xref: /linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c (revision a508da6cc0093171833efb8376b00473f24221b9)
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2012 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
23 #include <linux/ip.h>
24 #include <net/ipv6.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/prefetch.h>
27 #include "bnx2x_cmn.h"
28 #include "bnx2x_init.h"
29 #include "bnx2x_sp.h"
30 
31 
32 
33 /**
34  * bnx2x_move_fp - move content of the fastpath structure.
35  *
36  * @bp:		driver handle
37  * @from:	source FP index
38  * @to:		destination FP index
39  *
40  * Makes sure the contents of the bp->fp[to].napi is kept
41  * intact. This is done by first copying the napi struct from
42  * the target to the source, and then mem copying the entire
43  * source onto the target
44  */
45 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
46 {
47 	struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 	struct bnx2x_fastpath *to_fp = &bp->fp[to];
49 
50 	/* Copy the NAPI object as it has been already initialized */
51 	from_fp->napi = to_fp->napi;
52 
53 	/* Move bnx2x_fastpath contents */
54 	memcpy(to_fp, from_fp, sizeof(*to_fp));
55 	to_fp->index = to;
56 }
57 
58 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
59 
60 /* free skb in the packet ring at pos idx
61  * return idx of last bd freed
62  */
63 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
64 			     u16 idx, unsigned int *pkts_compl,
65 			     unsigned int *bytes_compl)
66 {
67 	struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
68 	struct eth_tx_start_bd *tx_start_bd;
69 	struct eth_tx_bd *tx_data_bd;
70 	struct sk_buff *skb = tx_buf->skb;
71 	u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
72 	int nbd;
73 
74 	/* prefetch skb end pointer to speedup dev_kfree_skb() */
75 	prefetch(&skb->end);
76 
77 	DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
78 	   txdata->txq_index, idx, tx_buf, skb);
79 
80 	/* unmap first bd */
81 	tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
82 	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
83 			 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
84 
85 
86 	nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
87 #ifdef BNX2X_STOP_ON_ERROR
88 	if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
89 		BNX2X_ERR("BAD nbd!\n");
90 		bnx2x_panic();
91 	}
92 #endif
93 	new_cons = nbd + tx_buf->first_bd;
94 
95 	/* Get the next bd */
96 	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
97 
98 	/* Skip a parse bd... */
99 	--nbd;
100 	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
101 
102 	/* ...and the TSO split header bd since they have no mapping */
103 	if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
104 		--nbd;
105 		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
106 	}
107 
108 	/* now free frags */
109 	while (nbd > 0) {
110 
111 		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
112 		dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
113 			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
114 		if (--nbd)
115 			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
116 	}
117 
118 	/* release skb */
119 	WARN_ON(!skb);
120 	if (likely(skb)) {
121 		(*pkts_compl)++;
122 		(*bytes_compl) += skb->len;
123 	}
124 
125 	dev_kfree_skb_any(skb);
126 	tx_buf->first_bd = 0;
127 	tx_buf->skb = NULL;
128 
129 	return new_cons;
130 }
131 
132 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
133 {
134 	struct netdev_queue *txq;
135 	u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
136 	unsigned int pkts_compl = 0, bytes_compl = 0;
137 
138 #ifdef BNX2X_STOP_ON_ERROR
139 	if (unlikely(bp->panic))
140 		return -1;
141 #endif
142 
143 	txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
144 	hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
145 	sw_cons = txdata->tx_pkt_cons;
146 
147 	while (sw_cons != hw_cons) {
148 		u16 pkt_cons;
149 
150 		pkt_cons = TX_BD(sw_cons);
151 
152 		DP(NETIF_MSG_TX_DONE,
153 		   "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
154 		   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
155 
156 		bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
157 		    &pkts_compl, &bytes_compl);
158 
159 		sw_cons++;
160 	}
161 
162 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
163 
164 	txdata->tx_pkt_cons = sw_cons;
165 	txdata->tx_bd_cons = bd_cons;
166 
167 	/* Need to make the tx_bd_cons update visible to start_xmit()
168 	 * before checking for netif_tx_queue_stopped().  Without the
169 	 * memory barrier, there is a small possibility that
170 	 * start_xmit() will miss it and cause the queue to be stopped
171 	 * forever.
172 	 * On the other hand we need an rmb() here to ensure the proper
173 	 * ordering of bit testing in the following
174 	 * netif_tx_queue_stopped(txq) call.
175 	 */
176 	smp_mb();
177 
178 	if (unlikely(netif_tx_queue_stopped(txq))) {
179 		/* Taking tx_lock() is needed to prevent reenabling the queue
180 		 * while it's empty. This could have happen if rx_action() gets
181 		 * suspended in bnx2x_tx_int() after the condition before
182 		 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
183 		 *
184 		 * stops the queue->sees fresh tx_bd_cons->releases the queue->
185 		 * sends some packets consuming the whole queue again->
186 		 * stops the queue
187 		 */
188 
189 		__netif_tx_lock(txq, smp_processor_id());
190 
191 		if ((netif_tx_queue_stopped(txq)) &&
192 		    (bp->state == BNX2X_STATE_OPEN) &&
193 		    (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
194 			netif_tx_wake_queue(txq);
195 
196 		__netif_tx_unlock(txq);
197 	}
198 	return 0;
199 }
200 
201 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
202 					     u16 idx)
203 {
204 	u16 last_max = fp->last_max_sge;
205 
206 	if (SUB_S16(idx, last_max) > 0)
207 		fp->last_max_sge = idx;
208 }
209 
210 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
211 					 u16 sge_len,
212 					 struct eth_end_agg_rx_cqe *cqe)
213 {
214 	struct bnx2x *bp = fp->bp;
215 	u16 last_max, last_elem, first_elem;
216 	u16 delta = 0;
217 	u16 i;
218 
219 	if (!sge_len)
220 		return;
221 
222 	/* First mark all used pages */
223 	for (i = 0; i < sge_len; i++)
224 		BIT_VEC64_CLEAR_BIT(fp->sge_mask,
225 			RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
226 
227 	DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
228 	   sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
229 
230 	/* Here we assume that the last SGE index is the biggest */
231 	prefetch((void *)(fp->sge_mask));
232 	bnx2x_update_last_max_sge(fp,
233 		le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
234 
235 	last_max = RX_SGE(fp->last_max_sge);
236 	last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
237 	first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
238 
239 	/* If ring is not full */
240 	if (last_elem + 1 != first_elem)
241 		last_elem++;
242 
243 	/* Now update the prod */
244 	for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
245 		if (likely(fp->sge_mask[i]))
246 			break;
247 
248 		fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
249 		delta += BIT_VEC64_ELEM_SZ;
250 	}
251 
252 	if (delta > 0) {
253 		fp->rx_sge_prod += delta;
254 		/* clear page-end entries */
255 		bnx2x_clear_sge_mask_next_elems(fp);
256 	}
257 
258 	DP(NETIF_MSG_RX_STATUS,
259 	   "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
260 	   fp->last_max_sge, fp->rx_sge_prod);
261 }
262 
263 /* Set Toeplitz hash value in the skb using the value from the
264  * CQE (calculated by HW).
265  */
266 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
267 			    const struct eth_fast_path_rx_cqe *cqe)
268 {
269 	/* Set Toeplitz hash from CQE */
270 	if ((bp->dev->features & NETIF_F_RXHASH) &&
271 	    (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
272 		return le32_to_cpu(cqe->rss_hash_result);
273 	return 0;
274 }
275 
276 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
277 			    u16 cons, u16 prod,
278 			    struct eth_fast_path_rx_cqe *cqe)
279 {
280 	struct bnx2x *bp = fp->bp;
281 	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
282 	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
283 	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
284 	dma_addr_t mapping;
285 	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
286 	struct sw_rx_bd *first_buf = &tpa_info->first_buf;
287 
288 	/* print error if current state != stop */
289 	if (tpa_info->tpa_state != BNX2X_TPA_STOP)
290 		BNX2X_ERR("start of bin not in stop [%d]\n", queue);
291 
292 	/* Try to map an empty data buffer from the aggregation info  */
293 	mapping = dma_map_single(&bp->pdev->dev,
294 				 first_buf->data + NET_SKB_PAD,
295 				 fp->rx_buf_size, DMA_FROM_DEVICE);
296 	/*
297 	 *  ...if it fails - move the skb from the consumer to the producer
298 	 *  and set the current aggregation state as ERROR to drop it
299 	 *  when TPA_STOP arrives.
300 	 */
301 
302 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
303 		/* Move the BD from the consumer to the producer */
304 		bnx2x_reuse_rx_data(fp, cons, prod);
305 		tpa_info->tpa_state = BNX2X_TPA_ERROR;
306 		return;
307 	}
308 
309 	/* move empty data from pool to prod */
310 	prod_rx_buf->data = first_buf->data;
311 	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
312 	/* point prod_bd to new data */
313 	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
314 	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
315 
316 	/* move partial skb from cons to pool (don't unmap yet) */
317 	*first_buf = *cons_rx_buf;
318 
319 	/* mark bin state as START */
320 	tpa_info->parsing_flags =
321 		le16_to_cpu(cqe->pars_flags.flags);
322 	tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
323 	tpa_info->tpa_state = BNX2X_TPA_START;
324 	tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
325 	tpa_info->placement_offset = cqe->placement_offset;
326 	tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
327 	if (fp->mode == TPA_MODE_GRO) {
328 		u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
329 		tpa_info->full_page =
330 			SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
331 		tpa_info->gro_size = gro_size;
332 	}
333 
334 #ifdef BNX2X_STOP_ON_ERROR
335 	fp->tpa_queue_used |= (1 << queue);
336 #ifdef _ASM_GENERIC_INT_L64_H
337 	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
338 #else
339 	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
340 #endif
341 	   fp->tpa_queue_used);
342 #endif
343 }
344 
345 /* Timestamp option length allowed for TPA aggregation:
346  *
347  *		nop nop kind length echo val
348  */
349 #define TPA_TSTAMP_OPT_LEN	12
350 /**
351  * bnx2x_set_lro_mss - calculate the approximate value of the MSS
352  *
353  * @bp:			driver handle
354  * @parsing_flags:	parsing flags from the START CQE
355  * @len_on_bd:		total length of the first packet for the
356  *			aggregation.
357  *
358  * Approximate value of the MSS for this aggregation calculated using
359  * the first packet of it.
360  */
361 static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
362 			     u16 len_on_bd)
363 {
364 	/*
365 	 * TPA arrgregation won't have either IP options or TCP options
366 	 * other than timestamp or IPv6 extension headers.
367 	 */
368 	u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
369 
370 	if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
371 	    PRS_FLAG_OVERETH_IPV6)
372 		hdrs_len += sizeof(struct ipv6hdr);
373 	else /* IPv4 */
374 		hdrs_len += sizeof(struct iphdr);
375 
376 
377 	/* Check if there was a TCP timestamp, if there is it's will
378 	 * always be 12 bytes length: nop nop kind length echo val.
379 	 *
380 	 * Otherwise FW would close the aggregation.
381 	 */
382 	if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
383 		hdrs_len += TPA_TSTAMP_OPT_LEN;
384 
385 	return len_on_bd - hdrs_len;
386 }
387 
388 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
389 			      struct bnx2x_fastpath *fp, u16 index)
390 {
391 	struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
392 	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
393 	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
394 	dma_addr_t mapping;
395 
396 	if (unlikely(page == NULL)) {
397 		BNX2X_ERR("Can't alloc sge\n");
398 		return -ENOMEM;
399 	}
400 
401 	mapping = dma_map_page(&bp->pdev->dev, page, 0,
402 			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
403 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
404 		__free_pages(page, PAGES_PER_SGE_SHIFT);
405 		BNX2X_ERR("Can't map sge\n");
406 		return -ENOMEM;
407 	}
408 
409 	sw_buf->page = page;
410 	dma_unmap_addr_set(sw_buf, mapping, mapping);
411 
412 	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
413 	sge->addr_lo = cpu_to_le32(U64_LO(mapping));
414 
415 	return 0;
416 }
417 
418 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
419 			       struct bnx2x_agg_info *tpa_info,
420 			       u16 pages,
421 			       struct sk_buff *skb,
422 			       struct eth_end_agg_rx_cqe *cqe,
423 			       u16 cqe_idx)
424 {
425 	struct sw_rx_page *rx_pg, old_rx_pg;
426 	u32 i, frag_len, frag_size;
427 	int err, j, frag_id = 0;
428 	u16 len_on_bd = tpa_info->len_on_bd;
429 	u16 full_page = 0, gro_size = 0;
430 
431 	frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
432 
433 	if (fp->mode == TPA_MODE_GRO) {
434 		gro_size = tpa_info->gro_size;
435 		full_page = tpa_info->full_page;
436 	}
437 
438 	/* This is needed in order to enable forwarding support */
439 	if (frag_size) {
440 		skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
441 					tpa_info->parsing_flags, len_on_bd);
442 
443 		/* set for GRO */
444 		if (fp->mode == TPA_MODE_GRO)
445 			skb_shinfo(skb)->gso_type =
446 			    (GET_FLAG(tpa_info->parsing_flags,
447 				      PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
448 						PRS_FLAG_OVERETH_IPV6) ?
449 				SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
450 	}
451 
452 
453 #ifdef BNX2X_STOP_ON_ERROR
454 	if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
455 		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
456 			  pages, cqe_idx);
457 		BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
458 		bnx2x_panic();
459 		return -EINVAL;
460 	}
461 #endif
462 
463 	/* Run through the SGL and compose the fragmented skb */
464 	for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
465 		u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
466 
467 		/* FW gives the indices of the SGE as if the ring is an array
468 		   (meaning that "next" element will consume 2 indices) */
469 		if (fp->mode == TPA_MODE_GRO)
470 			frag_len = min_t(u32, frag_size, (u32)full_page);
471 		else /* LRO */
472 			frag_len = min_t(u32, frag_size,
473 					 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
474 
475 		rx_pg = &fp->rx_page_ring[sge_idx];
476 		old_rx_pg = *rx_pg;
477 
478 		/* If we fail to allocate a substitute page, we simply stop
479 		   where we are and drop the whole packet */
480 		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
481 		if (unlikely(err)) {
482 			fp->eth_q_stats.rx_skb_alloc_failed++;
483 			return err;
484 		}
485 
486 		/* Unmap the page as we r going to pass it to the stack */
487 		dma_unmap_page(&bp->pdev->dev,
488 			       dma_unmap_addr(&old_rx_pg, mapping),
489 			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
490 		/* Add one frag and update the appropriate fields in the skb */
491 		if (fp->mode == TPA_MODE_LRO)
492 			skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
493 		else { /* GRO */
494 			int rem;
495 			int offset = 0;
496 			for (rem = frag_len; rem > 0; rem -= gro_size) {
497 				int len = rem > gro_size ? gro_size : rem;
498 				skb_fill_page_desc(skb, frag_id++,
499 						   old_rx_pg.page, offset, len);
500 				if (offset)
501 					get_page(old_rx_pg.page);
502 				offset += len;
503 			}
504 		}
505 
506 		skb->data_len += frag_len;
507 		skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
508 		skb->len += frag_len;
509 
510 		frag_size -= frag_len;
511 	}
512 
513 	return 0;
514 }
515 
516 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
517 			   struct bnx2x_agg_info *tpa_info,
518 			   u16 pages,
519 			   struct eth_end_agg_rx_cqe *cqe,
520 			   u16 cqe_idx)
521 {
522 	struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
523 	u8 pad = tpa_info->placement_offset;
524 	u16 len = tpa_info->len_on_bd;
525 	struct sk_buff *skb = NULL;
526 	u8 *new_data, *data = rx_buf->data;
527 	u8 old_tpa_state = tpa_info->tpa_state;
528 
529 	tpa_info->tpa_state = BNX2X_TPA_STOP;
530 
531 	/* If we there was an error during the handling of the TPA_START -
532 	 * drop this aggregation.
533 	 */
534 	if (old_tpa_state == BNX2X_TPA_ERROR)
535 		goto drop;
536 
537 	/* Try to allocate the new data */
538 	new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
539 
540 	/* Unmap skb in the pool anyway, as we are going to change
541 	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
542 	   fails. */
543 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
544 			 fp->rx_buf_size, DMA_FROM_DEVICE);
545 	if (likely(new_data))
546 		skb = build_skb(data, 0);
547 
548 	if (likely(skb)) {
549 #ifdef BNX2X_STOP_ON_ERROR
550 		if (pad + len > fp->rx_buf_size) {
551 			BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
552 				  pad, len, fp->rx_buf_size);
553 			bnx2x_panic();
554 			return;
555 		}
556 #endif
557 
558 		skb_reserve(skb, pad + NET_SKB_PAD);
559 		skb_put(skb, len);
560 		skb->rxhash = tpa_info->rxhash;
561 
562 		skb->protocol = eth_type_trans(skb, bp->dev);
563 		skb->ip_summed = CHECKSUM_UNNECESSARY;
564 
565 		if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
566 					 skb, cqe, cqe_idx)) {
567 			if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
568 				__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
569 			napi_gro_receive(&fp->napi, skb);
570 		} else {
571 			DP(NETIF_MSG_RX_STATUS,
572 			   "Failed to allocate new pages - dropping packet!\n");
573 			dev_kfree_skb_any(skb);
574 		}
575 
576 
577 		/* put new data in bin */
578 		rx_buf->data = new_data;
579 
580 		return;
581 	}
582 	kfree(new_data);
583 drop:
584 	/* drop the packet and keep the buffer in the bin */
585 	DP(NETIF_MSG_RX_STATUS,
586 	   "Failed to allocate or map a new skb - dropping packet!\n");
587 	fp->eth_q_stats.rx_skb_alloc_failed++;
588 }
589 
590 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
591 			       struct bnx2x_fastpath *fp, u16 index)
592 {
593 	u8 *data;
594 	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
595 	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
596 	dma_addr_t mapping;
597 
598 	data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
599 	if (unlikely(data == NULL))
600 		return -ENOMEM;
601 
602 	mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
603 				 fp->rx_buf_size,
604 				 DMA_FROM_DEVICE);
605 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
606 		kfree(data);
607 		BNX2X_ERR("Can't map rx data\n");
608 		return -ENOMEM;
609 	}
610 
611 	rx_buf->data = data;
612 	dma_unmap_addr_set(rx_buf, mapping, mapping);
613 
614 	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
615 	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
616 
617 	return 0;
618 }
619 
620 
621 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
622 {
623 	struct bnx2x *bp = fp->bp;
624 	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
625 	u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
626 	int rx_pkt = 0;
627 
628 #ifdef BNX2X_STOP_ON_ERROR
629 	if (unlikely(bp->panic))
630 		return 0;
631 #endif
632 
633 	/* CQ "next element" is of the size of the regular element,
634 	   that's why it's ok here */
635 	hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
636 	if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
637 		hw_comp_cons++;
638 
639 	bd_cons = fp->rx_bd_cons;
640 	bd_prod = fp->rx_bd_prod;
641 	bd_prod_fw = bd_prod;
642 	sw_comp_cons = fp->rx_comp_cons;
643 	sw_comp_prod = fp->rx_comp_prod;
644 
645 	/* Memory barrier necessary as speculative reads of the rx
646 	 * buffer can be ahead of the index in the status block
647 	 */
648 	rmb();
649 
650 	DP(NETIF_MSG_RX_STATUS,
651 	   "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
652 	   fp->index, hw_comp_cons, sw_comp_cons);
653 
654 	while (sw_comp_cons != hw_comp_cons) {
655 		struct sw_rx_bd *rx_buf = NULL;
656 		struct sk_buff *skb;
657 		union eth_rx_cqe *cqe;
658 		struct eth_fast_path_rx_cqe *cqe_fp;
659 		u8 cqe_fp_flags;
660 		enum eth_rx_cqe_type cqe_fp_type;
661 		u16 len, pad, queue;
662 		u8 *data;
663 
664 #ifdef BNX2X_STOP_ON_ERROR
665 		if (unlikely(bp->panic))
666 			return 0;
667 #endif
668 
669 		comp_ring_cons = RCQ_BD(sw_comp_cons);
670 		bd_prod = RX_BD(bd_prod);
671 		bd_cons = RX_BD(bd_cons);
672 
673 		cqe = &fp->rx_comp_ring[comp_ring_cons];
674 		cqe_fp = &cqe->fast_path_cqe;
675 		cqe_fp_flags = cqe_fp->type_error_flags;
676 		cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
677 
678 		DP(NETIF_MSG_RX_STATUS,
679 		   "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
680 		   CQE_TYPE(cqe_fp_flags),
681 		   cqe_fp_flags, cqe_fp->status_flags,
682 		   le32_to_cpu(cqe_fp->rss_hash_result),
683 		   le16_to_cpu(cqe_fp->vlan_tag),
684 		   le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
685 
686 		/* is this a slowpath msg? */
687 		if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
688 			bnx2x_sp_event(fp, cqe);
689 			goto next_cqe;
690 		}
691 
692 		rx_buf = &fp->rx_buf_ring[bd_cons];
693 		data = rx_buf->data;
694 
695 		if (!CQE_TYPE_FAST(cqe_fp_type)) {
696 			struct bnx2x_agg_info *tpa_info;
697 			u16 frag_size, pages;
698 #ifdef BNX2X_STOP_ON_ERROR
699 			/* sanity check */
700 			if (fp->disable_tpa &&
701 			    (CQE_TYPE_START(cqe_fp_type) ||
702 			     CQE_TYPE_STOP(cqe_fp_type)))
703 				BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
704 					  CQE_TYPE(cqe_fp_type));
705 #endif
706 
707 			if (CQE_TYPE_START(cqe_fp_type)) {
708 				u16 queue = cqe_fp->queue_index;
709 				DP(NETIF_MSG_RX_STATUS,
710 				   "calling tpa_start on queue %d\n",
711 				   queue);
712 
713 				bnx2x_tpa_start(fp, queue,
714 						bd_cons, bd_prod,
715 						cqe_fp);
716 
717 				goto next_rx;
718 
719 			}
720 			queue = cqe->end_agg_cqe.queue_index;
721 			tpa_info = &fp->tpa_info[queue];
722 			DP(NETIF_MSG_RX_STATUS,
723 			   "calling tpa_stop on queue %d\n",
724 			   queue);
725 
726 			frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
727 				    tpa_info->len_on_bd;
728 
729 			if (fp->mode == TPA_MODE_GRO)
730 				pages = (frag_size + tpa_info->full_page - 1) /
731 					 tpa_info->full_page;
732 			else
733 				pages = SGE_PAGE_ALIGN(frag_size) >>
734 					SGE_PAGE_SHIFT;
735 
736 			bnx2x_tpa_stop(bp, fp, tpa_info, pages,
737 				       &cqe->end_agg_cqe, comp_ring_cons);
738 #ifdef BNX2X_STOP_ON_ERROR
739 			if (bp->panic)
740 				return 0;
741 #endif
742 
743 			bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
744 			goto next_cqe;
745 		}
746 		/* non TPA */
747 		len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
748 		pad = cqe_fp->placement_offset;
749 		dma_sync_single_for_cpu(&bp->pdev->dev,
750 					dma_unmap_addr(rx_buf, mapping),
751 					pad + RX_COPY_THRESH,
752 					DMA_FROM_DEVICE);
753 		pad += NET_SKB_PAD;
754 		prefetch(data + pad); /* speedup eth_type_trans() */
755 		/* is this an error packet? */
756 		if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
757 			DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
758 			   "ERROR  flags %x  rx packet %u\n",
759 			   cqe_fp_flags, sw_comp_cons);
760 			fp->eth_q_stats.rx_err_discard_pkt++;
761 			goto reuse_rx;
762 		}
763 
764 		/* Since we don't have a jumbo ring
765 		 * copy small packets if mtu > 1500
766 		 */
767 		if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
768 		    (len <= RX_COPY_THRESH)) {
769 			skb = netdev_alloc_skb_ip_align(bp->dev, len);
770 			if (skb == NULL) {
771 				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
772 				   "ERROR  packet dropped because of alloc failure\n");
773 				fp->eth_q_stats.rx_skb_alloc_failed++;
774 				goto reuse_rx;
775 			}
776 			memcpy(skb->data, data + pad, len);
777 			bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
778 		} else {
779 			if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
780 				dma_unmap_single(&bp->pdev->dev,
781 						 dma_unmap_addr(rx_buf, mapping),
782 						 fp->rx_buf_size,
783 						 DMA_FROM_DEVICE);
784 				skb = build_skb(data, 0);
785 				if (unlikely(!skb)) {
786 					kfree(data);
787 					fp->eth_q_stats.rx_skb_alloc_failed++;
788 					goto next_rx;
789 				}
790 				skb_reserve(skb, pad);
791 			} else {
792 				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
793 				   "ERROR  packet dropped because of alloc failure\n");
794 				fp->eth_q_stats.rx_skb_alloc_failed++;
795 reuse_rx:
796 				bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
797 				goto next_rx;
798 			}
799 		}
800 
801 		skb_put(skb, len);
802 		skb->protocol = eth_type_trans(skb, bp->dev);
803 
804 		/* Set Toeplitz hash for a none-LRO skb */
805 		skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
806 
807 		skb_checksum_none_assert(skb);
808 
809 		if (bp->dev->features & NETIF_F_RXCSUM) {
810 
811 			if (likely(BNX2X_RX_CSUM_OK(cqe)))
812 				skb->ip_summed = CHECKSUM_UNNECESSARY;
813 			else
814 				fp->eth_q_stats.hw_csum_err++;
815 		}
816 
817 		skb_record_rx_queue(skb, fp->rx_queue);
818 
819 		if (le16_to_cpu(cqe_fp->pars_flags.flags) &
820 		    PARSING_FLAGS_VLAN)
821 			__vlan_hwaccel_put_tag(skb,
822 					       le16_to_cpu(cqe_fp->vlan_tag));
823 		napi_gro_receive(&fp->napi, skb);
824 
825 
826 next_rx:
827 		rx_buf->data = NULL;
828 
829 		bd_cons = NEXT_RX_IDX(bd_cons);
830 		bd_prod = NEXT_RX_IDX(bd_prod);
831 		bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
832 		rx_pkt++;
833 next_cqe:
834 		sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
835 		sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
836 
837 		if (rx_pkt == budget)
838 			break;
839 	} /* while */
840 
841 	fp->rx_bd_cons = bd_cons;
842 	fp->rx_bd_prod = bd_prod_fw;
843 	fp->rx_comp_cons = sw_comp_cons;
844 	fp->rx_comp_prod = sw_comp_prod;
845 
846 	/* Update producers */
847 	bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
848 			     fp->rx_sge_prod);
849 
850 	fp->rx_pkt += rx_pkt;
851 	fp->rx_calls++;
852 
853 	return rx_pkt;
854 }
855 
856 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
857 {
858 	struct bnx2x_fastpath *fp = fp_cookie;
859 	struct bnx2x *bp = fp->bp;
860 	u8 cos;
861 
862 	DP(NETIF_MSG_INTR,
863 	   "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
864 	   fp->index, fp->fw_sb_id, fp->igu_sb_id);
865 	bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
866 
867 #ifdef BNX2X_STOP_ON_ERROR
868 	if (unlikely(bp->panic))
869 		return IRQ_HANDLED;
870 #endif
871 
872 	/* Handle Rx and Tx according to MSI-X vector */
873 	prefetch(fp->rx_cons_sb);
874 
875 	for_each_cos_in_tx_queue(fp, cos)
876 		prefetch(fp->txdata[cos].tx_cons_sb);
877 
878 	prefetch(&fp->sb_running_index[SM_RX_ID]);
879 	napi_schedule(&bnx2x_fp(bp, fp->index, napi));
880 
881 	return IRQ_HANDLED;
882 }
883 
884 /* HW Lock for shared dual port PHYs */
885 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
886 {
887 	mutex_lock(&bp->port.phy_mutex);
888 
889 	if (bp->port.need_hw_lock)
890 		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
891 }
892 
893 void bnx2x_release_phy_lock(struct bnx2x *bp)
894 {
895 	if (bp->port.need_hw_lock)
896 		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
897 
898 	mutex_unlock(&bp->port.phy_mutex);
899 }
900 
901 /* calculates MF speed according to current linespeed and MF configuration */
902 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
903 {
904 	u16 line_speed = bp->link_vars.line_speed;
905 	if (IS_MF(bp)) {
906 		u16 maxCfg = bnx2x_extract_max_cfg(bp,
907 						   bp->mf_config[BP_VN(bp)]);
908 
909 		/* Calculate the current MAX line speed limit for the MF
910 		 * devices
911 		 */
912 		if (IS_MF_SI(bp))
913 			line_speed = (line_speed * maxCfg) / 100;
914 		else { /* SD mode */
915 			u16 vn_max_rate = maxCfg * 100;
916 
917 			if (vn_max_rate < line_speed)
918 				line_speed = vn_max_rate;
919 		}
920 	}
921 
922 	return line_speed;
923 }
924 
925 /**
926  * bnx2x_fill_report_data - fill link report data to report
927  *
928  * @bp:		driver handle
929  * @data:	link state to update
930  *
931  * It uses a none-atomic bit operations because is called under the mutex.
932  */
933 static void bnx2x_fill_report_data(struct bnx2x *bp,
934 				   struct bnx2x_link_report_data *data)
935 {
936 	u16 line_speed = bnx2x_get_mf_speed(bp);
937 
938 	memset(data, 0, sizeof(*data));
939 
940 	/* Fill the report data: efective line speed */
941 	data->line_speed = line_speed;
942 
943 	/* Link is down */
944 	if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
945 		__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
946 			  &data->link_report_flags);
947 
948 	/* Full DUPLEX */
949 	if (bp->link_vars.duplex == DUPLEX_FULL)
950 		__set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
951 
952 	/* Rx Flow Control is ON */
953 	if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
954 		__set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
955 
956 	/* Tx Flow Control is ON */
957 	if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
958 		__set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
959 }
960 
961 /**
962  * bnx2x_link_report - report link status to OS.
963  *
964  * @bp:		driver handle
965  *
966  * Calls the __bnx2x_link_report() under the same locking scheme
967  * as a link/PHY state managing code to ensure a consistent link
968  * reporting.
969  */
970 
971 void bnx2x_link_report(struct bnx2x *bp)
972 {
973 	bnx2x_acquire_phy_lock(bp);
974 	__bnx2x_link_report(bp);
975 	bnx2x_release_phy_lock(bp);
976 }
977 
978 /**
979  * __bnx2x_link_report - report link status to OS.
980  *
981  * @bp:		driver handle
982  *
983  * None atomic inmlementation.
984  * Should be called under the phy_lock.
985  */
986 void __bnx2x_link_report(struct bnx2x *bp)
987 {
988 	struct bnx2x_link_report_data cur_data;
989 
990 	/* reread mf_cfg */
991 	if (!CHIP_IS_E1(bp))
992 		bnx2x_read_mf_cfg(bp);
993 
994 	/* Read the current link report info */
995 	bnx2x_fill_report_data(bp, &cur_data);
996 
997 	/* Don't report link down or exactly the same link status twice */
998 	if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
999 	    (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1000 		      &bp->last_reported_link.link_report_flags) &&
1001 	     test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1002 		      &cur_data.link_report_flags)))
1003 		return;
1004 
1005 	bp->link_cnt++;
1006 
1007 	/* We are going to report a new link parameters now -
1008 	 * remember the current data for the next time.
1009 	 */
1010 	memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1011 
1012 	if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1013 		     &cur_data.link_report_flags)) {
1014 		netif_carrier_off(bp->dev);
1015 		netdev_err(bp->dev, "NIC Link is Down\n");
1016 		return;
1017 	} else {
1018 		const char *duplex;
1019 		const char *flow;
1020 
1021 		netif_carrier_on(bp->dev);
1022 
1023 		if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1024 				       &cur_data.link_report_flags))
1025 			duplex = "full";
1026 		else
1027 			duplex = "half";
1028 
1029 		/* Handle the FC at the end so that only these flags would be
1030 		 * possibly set. This way we may easily check if there is no FC
1031 		 * enabled.
1032 		 */
1033 		if (cur_data.link_report_flags) {
1034 			if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1035 				     &cur_data.link_report_flags)) {
1036 				if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1037 				     &cur_data.link_report_flags))
1038 					flow = "ON - receive & transmit";
1039 				else
1040 					flow = "ON - receive";
1041 			} else {
1042 				flow = "ON - transmit";
1043 			}
1044 		} else {
1045 			flow = "none";
1046 		}
1047 		netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1048 			    cur_data.line_speed, duplex, flow);
1049 	}
1050 }
1051 
1052 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1053 {
1054 	int i;
1055 
1056 	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1057 		struct eth_rx_sge *sge;
1058 
1059 		sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1060 		sge->addr_hi =
1061 			cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1062 			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1063 
1064 		sge->addr_lo =
1065 			cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1066 			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1067 	}
1068 }
1069 
1070 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1071 				struct bnx2x_fastpath *fp, int last)
1072 {
1073 	int i;
1074 
1075 	for (i = 0; i < last; i++) {
1076 		struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1077 		struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1078 		u8 *data = first_buf->data;
1079 
1080 		if (data == NULL) {
1081 			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1082 			continue;
1083 		}
1084 		if (tpa_info->tpa_state == BNX2X_TPA_START)
1085 			dma_unmap_single(&bp->pdev->dev,
1086 					 dma_unmap_addr(first_buf, mapping),
1087 					 fp->rx_buf_size, DMA_FROM_DEVICE);
1088 		kfree(data);
1089 		first_buf->data = NULL;
1090 	}
1091 }
1092 
1093 void bnx2x_init_rx_rings(struct bnx2x *bp)
1094 {
1095 	int func = BP_FUNC(bp);
1096 	u16 ring_prod;
1097 	int i, j;
1098 
1099 	/* Allocate TPA resources */
1100 	for_each_rx_queue(bp, j) {
1101 		struct bnx2x_fastpath *fp = &bp->fp[j];
1102 
1103 		DP(NETIF_MSG_IFUP,
1104 		   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1105 
1106 		if (!fp->disable_tpa) {
1107 			/* Fill the per-aggregtion pool */
1108 			for (i = 0; i < MAX_AGG_QS(bp); i++) {
1109 				struct bnx2x_agg_info *tpa_info =
1110 					&fp->tpa_info[i];
1111 				struct sw_rx_bd *first_buf =
1112 					&tpa_info->first_buf;
1113 
1114 				first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1115 							  GFP_ATOMIC);
1116 				if (!first_buf->data) {
1117 					BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1118 						  j);
1119 					bnx2x_free_tpa_pool(bp, fp, i);
1120 					fp->disable_tpa = 1;
1121 					break;
1122 				}
1123 				dma_unmap_addr_set(first_buf, mapping, 0);
1124 				tpa_info->tpa_state = BNX2X_TPA_STOP;
1125 			}
1126 
1127 			/* "next page" elements initialization */
1128 			bnx2x_set_next_page_sgl(fp);
1129 
1130 			/* set SGEs bit mask */
1131 			bnx2x_init_sge_ring_bit_mask(fp);
1132 
1133 			/* Allocate SGEs and initialize the ring elements */
1134 			for (i = 0, ring_prod = 0;
1135 			     i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1136 
1137 				if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1138 					BNX2X_ERR("was only able to allocate %d rx sges\n",
1139 						  i);
1140 					BNX2X_ERR("disabling TPA for queue[%d]\n",
1141 						  j);
1142 					/* Cleanup already allocated elements */
1143 					bnx2x_free_rx_sge_range(bp, fp,
1144 								ring_prod);
1145 					bnx2x_free_tpa_pool(bp, fp,
1146 							    MAX_AGG_QS(bp));
1147 					fp->disable_tpa = 1;
1148 					ring_prod = 0;
1149 					break;
1150 				}
1151 				ring_prod = NEXT_SGE_IDX(ring_prod);
1152 			}
1153 
1154 			fp->rx_sge_prod = ring_prod;
1155 		}
1156 	}
1157 
1158 	for_each_rx_queue(bp, j) {
1159 		struct bnx2x_fastpath *fp = &bp->fp[j];
1160 
1161 		fp->rx_bd_cons = 0;
1162 
1163 		/* Activate BD ring */
1164 		/* Warning!
1165 		 * this will generate an interrupt (to the TSTORM)
1166 		 * must only be done after chip is initialized
1167 		 */
1168 		bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1169 				     fp->rx_sge_prod);
1170 
1171 		if (j != 0)
1172 			continue;
1173 
1174 		if (CHIP_IS_E1(bp)) {
1175 			REG_WR(bp, BAR_USTRORM_INTMEM +
1176 			       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1177 			       U64_LO(fp->rx_comp_mapping));
1178 			REG_WR(bp, BAR_USTRORM_INTMEM +
1179 			       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1180 			       U64_HI(fp->rx_comp_mapping));
1181 		}
1182 	}
1183 }
1184 
1185 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1186 {
1187 	int i;
1188 	u8 cos;
1189 
1190 	for_each_tx_queue(bp, i) {
1191 		struct bnx2x_fastpath *fp = &bp->fp[i];
1192 		for_each_cos_in_tx_queue(fp, cos) {
1193 			struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
1194 			unsigned pkts_compl = 0, bytes_compl = 0;
1195 
1196 			u16 sw_prod = txdata->tx_pkt_prod;
1197 			u16 sw_cons = txdata->tx_pkt_cons;
1198 
1199 			while (sw_cons != sw_prod) {
1200 				bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1201 				    &pkts_compl, &bytes_compl);
1202 				sw_cons++;
1203 			}
1204 			netdev_tx_reset_queue(
1205 			    netdev_get_tx_queue(bp->dev, txdata->txq_index));
1206 		}
1207 	}
1208 }
1209 
1210 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1211 {
1212 	struct bnx2x *bp = fp->bp;
1213 	int i;
1214 
1215 	/* ring wasn't allocated */
1216 	if (fp->rx_buf_ring == NULL)
1217 		return;
1218 
1219 	for (i = 0; i < NUM_RX_BD; i++) {
1220 		struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1221 		u8 *data = rx_buf->data;
1222 
1223 		if (data == NULL)
1224 			continue;
1225 		dma_unmap_single(&bp->pdev->dev,
1226 				 dma_unmap_addr(rx_buf, mapping),
1227 				 fp->rx_buf_size, DMA_FROM_DEVICE);
1228 
1229 		rx_buf->data = NULL;
1230 		kfree(data);
1231 	}
1232 }
1233 
1234 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1235 {
1236 	int j;
1237 
1238 	for_each_rx_queue(bp, j) {
1239 		struct bnx2x_fastpath *fp = &bp->fp[j];
1240 
1241 		bnx2x_free_rx_bds(fp);
1242 
1243 		if (!fp->disable_tpa)
1244 			bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1245 	}
1246 }
1247 
1248 void bnx2x_free_skbs(struct bnx2x *bp)
1249 {
1250 	bnx2x_free_tx_skbs(bp);
1251 	bnx2x_free_rx_skbs(bp);
1252 }
1253 
1254 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1255 {
1256 	/* load old values */
1257 	u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1258 
1259 	if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1260 		/* leave all but MAX value */
1261 		mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1262 
1263 		/* set new MAX value */
1264 		mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1265 				& FUNC_MF_CFG_MAX_BW_MASK;
1266 
1267 		bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1268 	}
1269 }
1270 
1271 /**
1272  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1273  *
1274  * @bp:		driver handle
1275  * @nvecs:	number of vectors to be released
1276  */
1277 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1278 {
1279 	int i, offset = 0;
1280 
1281 	if (nvecs == offset)
1282 		return;
1283 	free_irq(bp->msix_table[offset].vector, bp->dev);
1284 	DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1285 	   bp->msix_table[offset].vector);
1286 	offset++;
1287 #ifdef BCM_CNIC
1288 	if (nvecs == offset)
1289 		return;
1290 	offset++;
1291 #endif
1292 
1293 	for_each_eth_queue(bp, i) {
1294 		if (nvecs == offset)
1295 			return;
1296 		DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1297 		   i, bp->msix_table[offset].vector);
1298 
1299 		free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1300 	}
1301 }
1302 
1303 void bnx2x_free_irq(struct bnx2x *bp)
1304 {
1305 	if (bp->flags & USING_MSIX_FLAG &&
1306 	    !(bp->flags & USING_SINGLE_MSIX_FLAG))
1307 		bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1308 				     CNIC_PRESENT + 1);
1309 	else
1310 		free_irq(bp->dev->irq, bp->dev);
1311 }
1312 
1313 int __devinit bnx2x_enable_msix(struct bnx2x *bp)
1314 {
1315 	int msix_vec = 0, i, rc, req_cnt;
1316 
1317 	bp->msix_table[msix_vec].entry = msix_vec;
1318 	BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1319 	   bp->msix_table[0].entry);
1320 	msix_vec++;
1321 
1322 #ifdef BCM_CNIC
1323 	bp->msix_table[msix_vec].entry = msix_vec;
1324 	BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1325 	   bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1326 	msix_vec++;
1327 #endif
1328 	/* We need separate vectors for ETH queues only (not FCoE) */
1329 	for_each_eth_queue(bp, i) {
1330 		bp->msix_table[msix_vec].entry = msix_vec;
1331 		BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1332 			       msix_vec, msix_vec, i);
1333 		msix_vec++;
1334 	}
1335 
1336 	req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
1337 
1338 	rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1339 
1340 	/*
1341 	 * reconfigure number of tx/rx queues according to available
1342 	 * MSI-X vectors
1343 	 */
1344 	if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1345 		/* how less vectors we will have? */
1346 		int diff = req_cnt - rc;
1347 
1348 		BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1349 
1350 		rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1351 
1352 		if (rc) {
1353 			BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1354 			goto no_msix;
1355 		}
1356 		/*
1357 		 * decrease number of queues by number of unallocated entries
1358 		 */
1359 		bp->num_queues -= diff;
1360 
1361 		BNX2X_DEV_INFO("New queue configuration set: %d\n",
1362 			       bp->num_queues);
1363 	} else if (rc > 0) {
1364 		/* Get by with single vector */
1365 		rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1366 		if (rc) {
1367 			BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1368 				       rc);
1369 			goto no_msix;
1370 		}
1371 
1372 		BNX2X_DEV_INFO("Using single MSI-X vector\n");
1373 		bp->flags |= USING_SINGLE_MSIX_FLAG;
1374 
1375 	} else if (rc < 0) {
1376 		BNX2X_DEV_INFO("MSI-X is not attainable  rc %d\n", rc);
1377 		goto no_msix;
1378 	}
1379 
1380 	bp->flags |= USING_MSIX_FLAG;
1381 
1382 	return 0;
1383 
1384 no_msix:
1385 	/* fall to INTx if not enough memory */
1386 	if (rc == -ENOMEM)
1387 		bp->flags |= DISABLE_MSI_FLAG;
1388 
1389 	return rc;
1390 }
1391 
1392 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1393 {
1394 	int i, rc, offset = 0;
1395 
1396 	rc = request_irq(bp->msix_table[offset++].vector,
1397 			 bnx2x_msix_sp_int, 0,
1398 			 bp->dev->name, bp->dev);
1399 	if (rc) {
1400 		BNX2X_ERR("request sp irq failed\n");
1401 		return -EBUSY;
1402 	}
1403 
1404 #ifdef BCM_CNIC
1405 	offset++;
1406 #endif
1407 	for_each_eth_queue(bp, i) {
1408 		struct bnx2x_fastpath *fp = &bp->fp[i];
1409 		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1410 			 bp->dev->name, i);
1411 
1412 		rc = request_irq(bp->msix_table[offset].vector,
1413 				 bnx2x_msix_fp_int, 0, fp->name, fp);
1414 		if (rc) {
1415 			BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1416 			      bp->msix_table[offset].vector, rc);
1417 			bnx2x_free_msix_irqs(bp, offset);
1418 			return -EBUSY;
1419 		}
1420 
1421 		offset++;
1422 	}
1423 
1424 	i = BNX2X_NUM_ETH_QUEUES(bp);
1425 	offset = 1 + CNIC_PRESENT;
1426 	netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1427 	       bp->msix_table[0].vector,
1428 	       0, bp->msix_table[offset].vector,
1429 	       i - 1, bp->msix_table[offset + i - 1].vector);
1430 
1431 	return 0;
1432 }
1433 
1434 int bnx2x_enable_msi(struct bnx2x *bp)
1435 {
1436 	int rc;
1437 
1438 	rc = pci_enable_msi(bp->pdev);
1439 	if (rc) {
1440 		BNX2X_DEV_INFO("MSI is not attainable\n");
1441 		return -1;
1442 	}
1443 	bp->flags |= USING_MSI_FLAG;
1444 
1445 	return 0;
1446 }
1447 
1448 static int bnx2x_req_irq(struct bnx2x *bp)
1449 {
1450 	unsigned long flags;
1451 	unsigned int irq;
1452 
1453 	if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1454 		flags = 0;
1455 	else
1456 		flags = IRQF_SHARED;
1457 
1458 	if (bp->flags & USING_MSIX_FLAG)
1459 		irq = bp->msix_table[0].vector;
1460 	else
1461 		irq = bp->pdev->irq;
1462 
1463 	return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1464 }
1465 
1466 static int bnx2x_setup_irqs(struct bnx2x *bp)
1467 {
1468 	int rc = 0;
1469 	if (bp->flags & USING_MSIX_FLAG &&
1470 	    !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1471 		rc = bnx2x_req_msix_irqs(bp);
1472 		if (rc)
1473 			return rc;
1474 	} else {
1475 		bnx2x_ack_int(bp);
1476 		rc = bnx2x_req_irq(bp);
1477 		if (rc) {
1478 			BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1479 			return rc;
1480 		}
1481 		if (bp->flags & USING_MSI_FLAG) {
1482 			bp->dev->irq = bp->pdev->irq;
1483 			netdev_info(bp->dev, "using MSI IRQ %d\n",
1484 				    bp->dev->irq);
1485 		}
1486 		if (bp->flags & USING_MSIX_FLAG) {
1487 			bp->dev->irq = bp->msix_table[0].vector;
1488 			netdev_info(bp->dev, "using MSIX IRQ %d\n",
1489 				    bp->dev->irq);
1490 		}
1491 	}
1492 
1493 	return 0;
1494 }
1495 
1496 static void bnx2x_napi_enable(struct bnx2x *bp)
1497 {
1498 	int i;
1499 
1500 	for_each_rx_queue(bp, i)
1501 		napi_enable(&bnx2x_fp(bp, i, napi));
1502 }
1503 
1504 static void bnx2x_napi_disable(struct bnx2x *bp)
1505 {
1506 	int i;
1507 
1508 	for_each_rx_queue(bp, i)
1509 		napi_disable(&bnx2x_fp(bp, i, napi));
1510 }
1511 
1512 void bnx2x_netif_start(struct bnx2x *bp)
1513 {
1514 	if (netif_running(bp->dev)) {
1515 		bnx2x_napi_enable(bp);
1516 		bnx2x_int_enable(bp);
1517 		if (bp->state == BNX2X_STATE_OPEN)
1518 			netif_tx_wake_all_queues(bp->dev);
1519 	}
1520 }
1521 
1522 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1523 {
1524 	bnx2x_int_disable_sync(bp, disable_hw);
1525 	bnx2x_napi_disable(bp);
1526 }
1527 
1528 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1529 {
1530 	struct bnx2x *bp = netdev_priv(dev);
1531 
1532 #ifdef BCM_CNIC
1533 	if (!NO_FCOE(bp)) {
1534 		struct ethhdr *hdr = (struct ethhdr *)skb->data;
1535 		u16 ether_type = ntohs(hdr->h_proto);
1536 
1537 		/* Skip VLAN tag if present */
1538 		if (ether_type == ETH_P_8021Q) {
1539 			struct vlan_ethhdr *vhdr =
1540 				(struct vlan_ethhdr *)skb->data;
1541 
1542 			ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1543 		}
1544 
1545 		/* If ethertype is FCoE or FIP - use FCoE ring */
1546 		if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1547 			return bnx2x_fcoe_tx(bp, txq_index);
1548 	}
1549 #endif
1550 	/* select a non-FCoE queue */
1551 	return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1552 }
1553 
1554 
1555 void bnx2x_set_num_queues(struct bnx2x *bp)
1556 {
1557 	/* RSS queues */
1558 	bp->num_queues = bnx2x_calc_num_queues(bp);
1559 
1560 #ifdef BCM_CNIC
1561 	/* override in STORAGE SD modes */
1562 	if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1563 		bp->num_queues = 1;
1564 #endif
1565 	/* Add special queues */
1566 	bp->num_queues += NON_ETH_CONTEXT_USE;
1567 }
1568 
1569 /**
1570  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1571  *
1572  * @bp:		Driver handle
1573  *
1574  * We currently support for at most 16 Tx queues for each CoS thus we will
1575  * allocate a multiple of 16 for ETH L2 rings according to the value of the
1576  * bp->max_cos.
1577  *
1578  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1579  * index after all ETH L2 indices.
1580  *
1581  * If the actual number of Tx queues (for each CoS) is less than 16 then there
1582  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1583  * 16..31,...) with indicies that are not coupled with any real Tx queue.
1584  *
1585  * The proper configuration of skb->queue_mapping is handled by
1586  * bnx2x_select_queue() and __skb_tx_hash().
1587  *
1588  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1589  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1590  */
1591 static int bnx2x_set_real_num_queues(struct bnx2x *bp)
1592 {
1593 	int rc, tx, rx;
1594 
1595 	tx = MAX_TXQS_PER_COS * bp->max_cos;
1596 	rx = BNX2X_NUM_ETH_QUEUES(bp);
1597 
1598 /* account for fcoe queue */
1599 #ifdef BCM_CNIC
1600 	if (!NO_FCOE(bp)) {
1601 		rx += FCOE_PRESENT;
1602 		tx += FCOE_PRESENT;
1603 	}
1604 #endif
1605 
1606 	rc = netif_set_real_num_tx_queues(bp->dev, tx);
1607 	if (rc) {
1608 		BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1609 		return rc;
1610 	}
1611 	rc = netif_set_real_num_rx_queues(bp->dev, rx);
1612 	if (rc) {
1613 		BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1614 		return rc;
1615 	}
1616 
1617 	DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1618 			  tx, rx);
1619 
1620 	return rc;
1621 }
1622 
1623 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1624 {
1625 	int i;
1626 
1627 	for_each_queue(bp, i) {
1628 		struct bnx2x_fastpath *fp = &bp->fp[i];
1629 		u32 mtu;
1630 
1631 		/* Always use a mini-jumbo MTU for the FCoE L2 ring */
1632 		if (IS_FCOE_IDX(i))
1633 			/*
1634 			 * Although there are no IP frames expected to arrive to
1635 			 * this ring we still want to add an
1636 			 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1637 			 * overrun attack.
1638 			 */
1639 			mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1640 		else
1641 			mtu = bp->dev->mtu;
1642 		fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1643 				  IP_HEADER_ALIGNMENT_PADDING +
1644 				  ETH_OVREHEAD +
1645 				  mtu +
1646 				  BNX2X_FW_RX_ALIGN_END;
1647 		/* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1648 	}
1649 }
1650 
1651 static int bnx2x_init_rss_pf(struct bnx2x *bp)
1652 {
1653 	int i;
1654 	u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1655 	u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1656 
1657 	/* Prepare the initial contents fo the indirection table if RSS is
1658 	 * enabled
1659 	 */
1660 	for (i = 0; i < sizeof(ind_table); i++)
1661 		ind_table[i] =
1662 			bp->fp->cl_id +
1663 			ethtool_rxfh_indir_default(i, num_eth_queues);
1664 
1665 	/*
1666 	 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1667 	 * per-port, so if explicit configuration is needed , do it only
1668 	 * for a PMF.
1669 	 *
1670 	 * For 57712 and newer on the other hand it's a per-function
1671 	 * configuration.
1672 	 */
1673 	return bnx2x_config_rss_eth(bp, ind_table,
1674 				    bp->port.pmf || !CHIP_IS_E1x(bp));
1675 }
1676 
1677 int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1678 			u8 *ind_table, bool config_hash)
1679 {
1680 	struct bnx2x_config_rss_params params = {NULL};
1681 	int i;
1682 
1683 	/* Although RSS is meaningless when there is a single HW queue we
1684 	 * still need it enabled in order to have HW Rx hash generated.
1685 	 *
1686 	 * if (!is_eth_multi(bp))
1687 	 *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
1688 	 */
1689 
1690 	params.rss_obj = rss_obj;
1691 
1692 	__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1693 
1694 	__set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1695 
1696 	/* RSS configuration */
1697 	__set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1698 	__set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1699 	__set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1700 	__set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1701 
1702 	/* Hash bits */
1703 	params.rss_result_mask = MULTI_MASK;
1704 
1705 	memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1706 
1707 	if (config_hash) {
1708 		/* RSS keys */
1709 		for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1710 			params.rss_key[i] = random32();
1711 
1712 		__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1713 	}
1714 
1715 	return bnx2x_config_rss(bp, &params);
1716 }
1717 
1718 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1719 {
1720 	struct bnx2x_func_state_params func_params = {NULL};
1721 
1722 	/* Prepare parameters for function state transitions */
1723 	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1724 
1725 	func_params.f_obj = &bp->func_obj;
1726 	func_params.cmd = BNX2X_F_CMD_HW_INIT;
1727 
1728 	func_params.params.hw_init.load_phase = load_code;
1729 
1730 	return bnx2x_func_state_change(bp, &func_params);
1731 }
1732 
1733 /*
1734  * Cleans the object that have internal lists without sending
1735  * ramrods. Should be run when interrutps are disabled.
1736  */
1737 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1738 {
1739 	int rc;
1740 	unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1741 	struct bnx2x_mcast_ramrod_params rparam = {NULL};
1742 	struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1743 
1744 	/***************** Cleanup MACs' object first *************************/
1745 
1746 	/* Wait for completion of requested */
1747 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1748 	/* Perform a dry cleanup */
1749 	__set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1750 
1751 	/* Clean ETH primary MAC */
1752 	__set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1753 	rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1754 				 &ramrod_flags);
1755 	if (rc != 0)
1756 		BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1757 
1758 	/* Cleanup UC list */
1759 	vlan_mac_flags = 0;
1760 	__set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1761 	rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1762 				 &ramrod_flags);
1763 	if (rc != 0)
1764 		BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1765 
1766 	/***************** Now clean mcast object *****************************/
1767 	rparam.mcast_obj = &bp->mcast_obj;
1768 	__set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1769 
1770 	/* Add a DEL command... */
1771 	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1772 	if (rc < 0)
1773 		BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1774 			  rc);
1775 
1776 	/* ...and wait until all pending commands are cleared */
1777 	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1778 	while (rc != 0) {
1779 		if (rc < 0) {
1780 			BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1781 				  rc);
1782 			return;
1783 		}
1784 
1785 		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1786 	}
1787 }
1788 
1789 #ifndef BNX2X_STOP_ON_ERROR
1790 #define LOAD_ERROR_EXIT(bp, label) \
1791 	do { \
1792 		(bp)->state = BNX2X_STATE_ERROR; \
1793 		goto label; \
1794 	} while (0)
1795 #else
1796 #define LOAD_ERROR_EXIT(bp, label) \
1797 	do { \
1798 		(bp)->state = BNX2X_STATE_ERROR; \
1799 		(bp)->panic = 1; \
1800 		return -EBUSY; \
1801 	} while (0)
1802 #endif
1803 
1804 bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1805 {
1806 	/* build FW version dword */
1807 	u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1808 		    (BCM_5710_FW_MINOR_VERSION << 8) +
1809 		    (BCM_5710_FW_REVISION_VERSION << 16) +
1810 		    (BCM_5710_FW_ENGINEERING_VERSION << 24);
1811 
1812 	/* read loaded FW from chip */
1813 	u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1814 
1815 	DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1816 
1817 	if (loaded_fw != my_fw) {
1818 		if (is_err)
1819 			BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1820 				  loaded_fw, my_fw);
1821 		return false;
1822 	}
1823 
1824 	return true;
1825 }
1826 
1827 /**
1828  * bnx2x_bz_fp - zero content of the fastpath structure.
1829  *
1830  * @bp:		driver handle
1831  * @index:	fastpath index to be zeroed
1832  *
1833  * Makes sure the contents of the bp->fp[index].napi is kept
1834  * intact.
1835  */
1836 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1837 {
1838 	struct bnx2x_fastpath *fp = &bp->fp[index];
1839 	struct napi_struct orig_napi = fp->napi;
1840 	/* bzero bnx2x_fastpath contents */
1841 	if (bp->stats_init)
1842 		memset(fp, 0, sizeof(*fp));
1843 	else {
1844 		/* Keep Queue statistics */
1845 		struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1846 		struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
1847 
1848 		tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1849 					  GFP_KERNEL);
1850 		if (tmp_eth_q_stats)
1851 			memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
1852 			       sizeof(struct bnx2x_eth_q_stats));
1853 
1854 		tmp_eth_q_stats_old =
1855 			kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1856 				GFP_KERNEL);
1857 		if (tmp_eth_q_stats_old)
1858 			memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
1859 			       sizeof(struct bnx2x_eth_q_stats_old));
1860 
1861 		memset(fp, 0, sizeof(*fp));
1862 
1863 		if (tmp_eth_q_stats) {
1864 			memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
1865 				   sizeof(struct bnx2x_eth_q_stats));
1866 			kfree(tmp_eth_q_stats);
1867 		}
1868 
1869 		if (tmp_eth_q_stats_old) {
1870 			memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
1871 			       sizeof(struct bnx2x_eth_q_stats_old));
1872 			kfree(tmp_eth_q_stats_old);
1873 		}
1874 
1875 	}
1876 
1877 	/* Restore the NAPI object as it has been already initialized */
1878 	fp->napi = orig_napi;
1879 
1880 	fp->bp = bp;
1881 	fp->index = index;
1882 	if (IS_ETH_FP(fp))
1883 		fp->max_cos = bp->max_cos;
1884 	else
1885 		/* Special queues support only one CoS */
1886 		fp->max_cos = 1;
1887 
1888 	/*
1889 	 * set the tpa flag for each queue. The tpa flag determines the queue
1890 	 * minimal size so it must be set prior to queue memory allocation
1891 	 */
1892 	fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
1893 				  (bp->flags & GRO_ENABLE_FLAG &&
1894 				   bnx2x_mtu_allows_gro(bp->dev->mtu)));
1895 	if (bp->flags & TPA_ENABLE_FLAG)
1896 		fp->mode = TPA_MODE_LRO;
1897 	else if (bp->flags & GRO_ENABLE_FLAG)
1898 		fp->mode = TPA_MODE_GRO;
1899 
1900 #ifdef BCM_CNIC
1901 	/* We don't want TPA on an FCoE L2 ring */
1902 	if (IS_FCOE_FP(fp))
1903 		fp->disable_tpa = 1;
1904 #endif
1905 }
1906 
1907 
1908 /* must be called with rtnl_lock */
1909 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1910 {
1911 	int port = BP_PORT(bp);
1912 	u32 load_code;
1913 	int i, rc;
1914 
1915 #ifdef BNX2X_STOP_ON_ERROR
1916 	if (unlikely(bp->panic)) {
1917 		BNX2X_ERR("Can't load NIC when there is panic\n");
1918 		return -EPERM;
1919 	}
1920 #endif
1921 
1922 	bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1923 
1924 	/* Set the initial link reported state to link down */
1925 	bnx2x_acquire_phy_lock(bp);
1926 	memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1927 	__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1928 		&bp->last_reported_link.link_report_flags);
1929 	bnx2x_release_phy_lock(bp);
1930 
1931 	/* must be called before memory allocation and HW init */
1932 	bnx2x_ilt_set_info(bp);
1933 
1934 	/*
1935 	 * Zero fastpath structures preserving invariants like napi, which are
1936 	 * allocated only once, fp index, max_cos, bp pointer.
1937 	 * Also set fp->disable_tpa.
1938 	 */
1939 	DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
1940 	for_each_queue(bp, i)
1941 		bnx2x_bz_fp(bp, i);
1942 
1943 
1944 	/* Set the receive queues buffer size */
1945 	bnx2x_set_rx_buf_size(bp);
1946 
1947 	if (bnx2x_alloc_mem(bp))
1948 		return -ENOMEM;
1949 
1950 	/* As long as bnx2x_alloc_mem() may possibly update
1951 	 * bp->num_queues, bnx2x_set_real_num_queues() should always
1952 	 * come after it.
1953 	 */
1954 	rc = bnx2x_set_real_num_queues(bp);
1955 	if (rc) {
1956 		BNX2X_ERR("Unable to set real_num_queues\n");
1957 		LOAD_ERROR_EXIT(bp, load_error0);
1958 	}
1959 
1960 	/* configure multi cos mappings in kernel.
1961 	 * this configuration may be overriden by a multi class queue discipline
1962 	 * or by a dcbx negotiation result.
1963 	 */
1964 	bnx2x_setup_tc(bp->dev, bp->max_cos);
1965 
1966 	bnx2x_napi_enable(bp);
1967 
1968 	/* set pf load just before approaching the MCP */
1969 	bnx2x_set_pf_load(bp);
1970 
1971 	/* Send LOAD_REQUEST command to MCP
1972 	 * Returns the type of LOAD command:
1973 	 * if it is the first port to be initialized
1974 	 * common blocks should be initialized, otherwise - not
1975 	 */
1976 	if (!BP_NOMCP(bp)) {
1977 		/* init fw_seq */
1978 		bp->fw_seq =
1979 			(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
1980 			 DRV_MSG_SEQ_NUMBER_MASK);
1981 		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
1982 
1983 		/* Get current FW pulse sequence */
1984 		bp->fw_drv_pulse_wr_seq =
1985 			(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
1986 			 DRV_PULSE_SEQ_MASK);
1987 		BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
1988 
1989 		load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1990 		if (!load_code) {
1991 			BNX2X_ERR("MCP response failure, aborting\n");
1992 			rc = -EBUSY;
1993 			LOAD_ERROR_EXIT(bp, load_error1);
1994 		}
1995 		if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1996 			BNX2X_ERR("Driver load refused\n");
1997 			rc = -EBUSY; /* other port in diagnostic mode */
1998 			LOAD_ERROR_EXIT(bp, load_error1);
1999 		}
2000 		if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2001 		    load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2002 			/* abort nic load if version mismatch */
2003 			if (!bnx2x_test_firmware_version(bp, true)) {
2004 				rc = -EBUSY;
2005 				LOAD_ERROR_EXIT(bp, load_error2);
2006 			}
2007 		}
2008 
2009 	} else {
2010 		int path = BP_PATH(bp);
2011 
2012 		DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2013 		   path, load_count[path][0], load_count[path][1],
2014 		   load_count[path][2]);
2015 		load_count[path][0]++;
2016 		load_count[path][1 + port]++;
2017 		DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2018 		   path, load_count[path][0], load_count[path][1],
2019 		   load_count[path][2]);
2020 		if (load_count[path][0] == 1)
2021 			load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
2022 		else if (load_count[path][1 + port] == 1)
2023 			load_code = FW_MSG_CODE_DRV_LOAD_PORT;
2024 		else
2025 			load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
2026 	}
2027 
2028 	if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2029 	    (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2030 	    (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2031 		bp->port.pmf = 1;
2032 		/*
2033 		 * We need the barrier to ensure the ordering between the
2034 		 * writing to bp->port.pmf here and reading it from the
2035 		 * bnx2x_periodic_task().
2036 		 */
2037 		smp_mb();
2038 	} else
2039 		bp->port.pmf = 0;
2040 
2041 	DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
2042 
2043 	/* Init Function state controlling object */
2044 	bnx2x__init_func_obj(bp);
2045 
2046 	/* Initialize HW */
2047 	rc = bnx2x_init_hw(bp, load_code);
2048 	if (rc) {
2049 		BNX2X_ERR("HW init failed, aborting\n");
2050 		bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2051 		LOAD_ERROR_EXIT(bp, load_error2);
2052 	}
2053 
2054 	/* Connect to IRQs */
2055 	rc = bnx2x_setup_irqs(bp);
2056 	if (rc) {
2057 		BNX2X_ERR("IRQs setup failed\n");
2058 		bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2059 		LOAD_ERROR_EXIT(bp, load_error2);
2060 	}
2061 
2062 	/* Setup NIC internals and enable interrupts */
2063 	bnx2x_nic_init(bp, load_code);
2064 
2065 	/* Init per-function objects */
2066 	bnx2x_init_bp_objs(bp);
2067 
2068 	if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2069 	    (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2070 	    (bp->common.shmem2_base)) {
2071 		if (SHMEM2_HAS(bp, dcc_support))
2072 			SHMEM2_WR(bp, dcc_support,
2073 				  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2074 				   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2075 		if (SHMEM2_HAS(bp, afex_driver_support))
2076 			SHMEM2_WR(bp, afex_driver_support,
2077 				  SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2078 	}
2079 
2080 	/* Set AFEX default VLAN tag to an invalid value */
2081 	bp->afex_def_vlan_tag = -1;
2082 
2083 	bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2084 	rc = bnx2x_func_start(bp);
2085 	if (rc) {
2086 		BNX2X_ERR("Function start failed!\n");
2087 		bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2088 		LOAD_ERROR_EXIT(bp, load_error3);
2089 	}
2090 
2091 	/* Send LOAD_DONE command to MCP */
2092 	if (!BP_NOMCP(bp)) {
2093 		load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2094 		if (!load_code) {
2095 			BNX2X_ERR("MCP response failure, aborting\n");
2096 			rc = -EBUSY;
2097 			LOAD_ERROR_EXIT(bp, load_error3);
2098 		}
2099 	}
2100 
2101 	rc = bnx2x_setup_leading(bp);
2102 	if (rc) {
2103 		BNX2X_ERR("Setup leading failed!\n");
2104 		LOAD_ERROR_EXIT(bp, load_error3);
2105 	}
2106 
2107 #ifdef BCM_CNIC
2108 	/* Enable Timer scan */
2109 	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2110 #endif
2111 
2112 	for_each_nondefault_queue(bp, i) {
2113 		rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2114 		if (rc) {
2115 			BNX2X_ERR("Queue setup failed\n");
2116 			LOAD_ERROR_EXIT(bp, load_error4);
2117 		}
2118 	}
2119 
2120 	rc = bnx2x_init_rss_pf(bp);
2121 	if (rc) {
2122 		BNX2X_ERR("PF RSS init failed\n");
2123 		LOAD_ERROR_EXIT(bp, load_error4);
2124 	}
2125 
2126 	/* Now when Clients are configured we are ready to work */
2127 	bp->state = BNX2X_STATE_OPEN;
2128 
2129 	/* Configure a ucast MAC */
2130 	rc = bnx2x_set_eth_mac(bp, true);
2131 	if (rc) {
2132 		BNX2X_ERR("Setting Ethernet MAC failed\n");
2133 		LOAD_ERROR_EXIT(bp, load_error4);
2134 	}
2135 
2136 	if (bp->pending_max) {
2137 		bnx2x_update_max_mf_config(bp, bp->pending_max);
2138 		bp->pending_max = 0;
2139 	}
2140 
2141 	if (bp->port.pmf)
2142 		bnx2x_initial_phy_init(bp, load_mode);
2143 
2144 	/* Start fast path */
2145 
2146 	/* Initialize Rx filter. */
2147 	netif_addr_lock_bh(bp->dev);
2148 	bnx2x_set_rx_mode(bp->dev);
2149 	netif_addr_unlock_bh(bp->dev);
2150 
2151 	/* Start the Tx */
2152 	switch (load_mode) {
2153 	case LOAD_NORMAL:
2154 		/* Tx queue should be only reenabled */
2155 		netif_tx_wake_all_queues(bp->dev);
2156 		break;
2157 
2158 	case LOAD_OPEN:
2159 		netif_tx_start_all_queues(bp->dev);
2160 		smp_mb__after_clear_bit();
2161 		break;
2162 
2163 	case LOAD_DIAG:
2164 		bp->state = BNX2X_STATE_DIAG;
2165 		break;
2166 
2167 	default:
2168 		break;
2169 	}
2170 
2171 	if (bp->port.pmf)
2172 		bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
2173 	else
2174 		bnx2x__link_status_update(bp);
2175 
2176 	/* start the timer */
2177 	mod_timer(&bp->timer, jiffies + bp->current_interval);
2178 
2179 #ifdef BCM_CNIC
2180 	/* re-read iscsi info */
2181 	bnx2x_get_iscsi_info(bp);
2182 	bnx2x_setup_cnic_irq_info(bp);
2183 	if (bp->state == BNX2X_STATE_OPEN)
2184 		bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2185 #endif
2186 
2187 	/* mark driver is loaded in shmem2 */
2188 	if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2189 		u32 val;
2190 		val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2191 		SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2192 			  val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2193 			  DRV_FLAGS_CAPABILITIES_LOADED_L2);
2194 	}
2195 
2196 	/* Wait for all pending SP commands to complete */
2197 	if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2198 		BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2199 		bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2200 		return -EBUSY;
2201 	}
2202 
2203 	bnx2x_dcbx_init(bp);
2204 	return 0;
2205 
2206 #ifndef BNX2X_STOP_ON_ERROR
2207 load_error4:
2208 #ifdef BCM_CNIC
2209 	/* Disable Timer scan */
2210 	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2211 #endif
2212 load_error3:
2213 	bnx2x_int_disable_sync(bp, 1);
2214 
2215 	/* Clean queueable objects */
2216 	bnx2x_squeeze_objects(bp);
2217 
2218 	/* Free SKBs, SGEs, TPA pool and driver internals */
2219 	bnx2x_free_skbs(bp);
2220 	for_each_rx_queue(bp, i)
2221 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2222 
2223 	/* Release IRQs */
2224 	bnx2x_free_irq(bp);
2225 load_error2:
2226 	if (!BP_NOMCP(bp)) {
2227 		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2228 		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2229 	}
2230 
2231 	bp->port.pmf = 0;
2232 load_error1:
2233 	bnx2x_napi_disable(bp);
2234 	/* clear pf_load status, as it was already set */
2235 	bnx2x_clear_pf_load(bp);
2236 load_error0:
2237 	bnx2x_free_mem(bp);
2238 
2239 	return rc;
2240 #endif /* ! BNX2X_STOP_ON_ERROR */
2241 }
2242 
2243 /* must be called with rtnl_lock */
2244 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2245 {
2246 	int i;
2247 	bool global = false;
2248 
2249 	/* mark driver is unloaded in shmem2 */
2250 	if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2251 		u32 val;
2252 		val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2253 		SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2254 			  val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2255 	}
2256 
2257 	if ((bp->state == BNX2X_STATE_CLOSED) ||
2258 	    (bp->state == BNX2X_STATE_ERROR)) {
2259 		/* We can get here if the driver has been unloaded
2260 		 * during parity error recovery and is either waiting for a
2261 		 * leader to complete or for other functions to unload and
2262 		 * then ifdown has been issued. In this case we want to
2263 		 * unload and let other functions to complete a recovery
2264 		 * process.
2265 		 */
2266 		bp->recovery_state = BNX2X_RECOVERY_DONE;
2267 		bp->is_leader = 0;
2268 		bnx2x_release_leader_lock(bp);
2269 		smp_mb();
2270 
2271 		DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2272 		BNX2X_ERR("Can't unload in closed or error state\n");
2273 		return -EINVAL;
2274 	}
2275 
2276 	/*
2277 	 * It's important to set the bp->state to the value different from
2278 	 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2279 	 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2280 	 */
2281 	bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2282 	smp_mb();
2283 
2284 	/* Stop Tx */
2285 	bnx2x_tx_disable(bp);
2286 
2287 #ifdef BCM_CNIC
2288 	bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2289 #endif
2290 
2291 	bp->rx_mode = BNX2X_RX_MODE_NONE;
2292 
2293 	del_timer_sync(&bp->timer);
2294 
2295 	/* Set ALWAYS_ALIVE bit in shmem */
2296 	bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2297 
2298 	bnx2x_drv_pulse(bp);
2299 
2300 	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2301 	bnx2x_save_statistics(bp);
2302 
2303 	/* Cleanup the chip if needed */
2304 	if (unload_mode != UNLOAD_RECOVERY)
2305 		bnx2x_chip_cleanup(bp, unload_mode);
2306 	else {
2307 		/* Send the UNLOAD_REQUEST to the MCP */
2308 		bnx2x_send_unload_req(bp, unload_mode);
2309 
2310 		/*
2311 		 * Prevent transactions to host from the functions on the
2312 		 * engine that doesn't reset global blocks in case of global
2313 		 * attention once gloabl blocks are reset and gates are opened
2314 		 * (the engine which leader will perform the recovery
2315 		 * last).
2316 		 */
2317 		if (!CHIP_IS_E1x(bp))
2318 			bnx2x_pf_disable(bp);
2319 
2320 		/* Disable HW interrupts, NAPI */
2321 		bnx2x_netif_stop(bp, 1);
2322 
2323 		/* Release IRQs */
2324 		bnx2x_free_irq(bp);
2325 
2326 		/* Report UNLOAD_DONE to MCP */
2327 		bnx2x_send_unload_done(bp);
2328 	}
2329 
2330 	/*
2331 	 * At this stage no more interrupts will arrive so we may safly clean
2332 	 * the queueable objects here in case they failed to get cleaned so far.
2333 	 */
2334 	bnx2x_squeeze_objects(bp);
2335 
2336 	/* There should be no more pending SP commands at this stage */
2337 	bp->sp_state = 0;
2338 
2339 	bp->port.pmf = 0;
2340 
2341 	/* Free SKBs, SGEs, TPA pool and driver internals */
2342 	bnx2x_free_skbs(bp);
2343 	for_each_rx_queue(bp, i)
2344 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2345 
2346 	bnx2x_free_mem(bp);
2347 
2348 	bp->state = BNX2X_STATE_CLOSED;
2349 
2350 	/* Check if there are pending parity attentions. If there are - set
2351 	 * RECOVERY_IN_PROGRESS.
2352 	 */
2353 	if (bnx2x_chk_parity_attn(bp, &global, false)) {
2354 		bnx2x_set_reset_in_progress(bp);
2355 
2356 		/* Set RESET_IS_GLOBAL if needed */
2357 		if (global)
2358 			bnx2x_set_reset_global(bp);
2359 	}
2360 
2361 
2362 	/* The last driver must disable a "close the gate" if there is no
2363 	 * parity attention or "process kill" pending.
2364 	 */
2365 	if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2366 		bnx2x_disable_close_the_gate(bp);
2367 
2368 	return 0;
2369 }
2370 
2371 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2372 {
2373 	u16 pmcsr;
2374 
2375 	/* If there is no power capability, silently succeed */
2376 	if (!bp->pm_cap) {
2377 		BNX2X_DEV_INFO("No power capability. Breaking.\n");
2378 		return 0;
2379 	}
2380 
2381 	pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2382 
2383 	switch (state) {
2384 	case PCI_D0:
2385 		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2386 				      ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2387 				       PCI_PM_CTRL_PME_STATUS));
2388 
2389 		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2390 			/* delay required during transition out of D3hot */
2391 			msleep(20);
2392 		break;
2393 
2394 	case PCI_D3hot:
2395 		/* If there are other clients above don't
2396 		   shut down the power */
2397 		if (atomic_read(&bp->pdev->enable_cnt) != 1)
2398 			return 0;
2399 		/* Don't shut down the power for emulation and FPGA */
2400 		if (CHIP_REV_IS_SLOW(bp))
2401 			return 0;
2402 
2403 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2404 		pmcsr |= 3;
2405 
2406 		if (bp->wol)
2407 			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2408 
2409 		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2410 				      pmcsr);
2411 
2412 		/* No more memory access after this point until
2413 		* device is brought back to D0.
2414 		*/
2415 		break;
2416 
2417 	default:
2418 		dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
2419 		return -EINVAL;
2420 	}
2421 	return 0;
2422 }
2423 
2424 /*
2425  * net_device service functions
2426  */
2427 int bnx2x_poll(struct napi_struct *napi, int budget)
2428 {
2429 	int work_done = 0;
2430 	u8 cos;
2431 	struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2432 						 napi);
2433 	struct bnx2x *bp = fp->bp;
2434 
2435 	while (1) {
2436 #ifdef BNX2X_STOP_ON_ERROR
2437 		if (unlikely(bp->panic)) {
2438 			napi_complete(napi);
2439 			return 0;
2440 		}
2441 #endif
2442 
2443 		for_each_cos_in_tx_queue(fp, cos)
2444 			if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2445 				bnx2x_tx_int(bp, &fp->txdata[cos]);
2446 
2447 
2448 		if (bnx2x_has_rx_work(fp)) {
2449 			work_done += bnx2x_rx_int(fp, budget - work_done);
2450 
2451 			/* must not complete if we consumed full budget */
2452 			if (work_done >= budget)
2453 				break;
2454 		}
2455 
2456 		/* Fall out from the NAPI loop if needed */
2457 		if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2458 #ifdef BCM_CNIC
2459 			/* No need to update SB for FCoE L2 ring as long as
2460 			 * it's connected to the default SB and the SB
2461 			 * has been updated when NAPI was scheduled.
2462 			 */
2463 			if (IS_FCOE_FP(fp)) {
2464 				napi_complete(napi);
2465 				break;
2466 			}
2467 #endif
2468 
2469 			bnx2x_update_fpsb_idx(fp);
2470 			/* bnx2x_has_rx_work() reads the status block,
2471 			 * thus we need to ensure that status block indices
2472 			 * have been actually read (bnx2x_update_fpsb_idx)
2473 			 * prior to this check (bnx2x_has_rx_work) so that
2474 			 * we won't write the "newer" value of the status block
2475 			 * to IGU (if there was a DMA right after
2476 			 * bnx2x_has_rx_work and if there is no rmb, the memory
2477 			 * reading (bnx2x_update_fpsb_idx) may be postponed
2478 			 * to right before bnx2x_ack_sb). In this case there
2479 			 * will never be another interrupt until there is
2480 			 * another update of the status block, while there
2481 			 * is still unhandled work.
2482 			 */
2483 			rmb();
2484 
2485 			if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2486 				napi_complete(napi);
2487 				/* Re-enable interrupts */
2488 				DP(NETIF_MSG_RX_STATUS,
2489 				   "Update index to %d\n", fp->fp_hc_idx);
2490 				bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2491 					     le16_to_cpu(fp->fp_hc_idx),
2492 					     IGU_INT_ENABLE, 1);
2493 				break;
2494 			}
2495 		}
2496 	}
2497 
2498 	return work_done;
2499 }
2500 
2501 /* we split the first BD into headers and data BDs
2502  * to ease the pain of our fellow microcode engineers
2503  * we use one mapping for both BDs
2504  * So far this has only been observed to happen
2505  * in Other Operating Systems(TM)
2506  */
2507 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2508 				   struct bnx2x_fp_txdata *txdata,
2509 				   struct sw_tx_bd *tx_buf,
2510 				   struct eth_tx_start_bd **tx_bd, u16 hlen,
2511 				   u16 bd_prod, int nbd)
2512 {
2513 	struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2514 	struct eth_tx_bd *d_tx_bd;
2515 	dma_addr_t mapping;
2516 	int old_len = le16_to_cpu(h_tx_bd->nbytes);
2517 
2518 	/* first fix first BD */
2519 	h_tx_bd->nbd = cpu_to_le16(nbd);
2520 	h_tx_bd->nbytes = cpu_to_le16(hlen);
2521 
2522 	DP(NETIF_MSG_TX_QUEUED,	"TSO split header size is %d (%x:%x) nbd %d\n",
2523 	   h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
2524 
2525 	/* now get a new data BD
2526 	 * (after the pbd) and fill it */
2527 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2528 	d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2529 
2530 	mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2531 			   le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2532 
2533 	d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2534 	d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2535 	d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2536 
2537 	/* this marks the BD as one that has no individual mapping */
2538 	tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2539 
2540 	DP(NETIF_MSG_TX_QUEUED,
2541 	   "TSO split data size is %d (%x:%x)\n",
2542 	   d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2543 
2544 	/* update tx_bd */
2545 	*tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2546 
2547 	return bd_prod;
2548 }
2549 
2550 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2551 {
2552 	if (fix > 0)
2553 		csum = (u16) ~csum_fold(csum_sub(csum,
2554 				csum_partial(t_header - fix, fix, 0)));
2555 
2556 	else if (fix < 0)
2557 		csum = (u16) ~csum_fold(csum_add(csum,
2558 				csum_partial(t_header, -fix, 0)));
2559 
2560 	return swab16(csum);
2561 }
2562 
2563 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2564 {
2565 	u32 rc;
2566 
2567 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2568 		rc = XMIT_PLAIN;
2569 
2570 	else {
2571 		if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2572 			rc = XMIT_CSUM_V6;
2573 			if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2574 				rc |= XMIT_CSUM_TCP;
2575 
2576 		} else {
2577 			rc = XMIT_CSUM_V4;
2578 			if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2579 				rc |= XMIT_CSUM_TCP;
2580 		}
2581 	}
2582 
2583 	if (skb_is_gso_v6(skb))
2584 		rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2585 	else if (skb_is_gso(skb))
2586 		rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2587 
2588 	return rc;
2589 }
2590 
2591 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2592 /* check if packet requires linearization (packet is too fragmented)
2593    no need to check fragmentation if page size > 8K (there will be no
2594    violation to FW restrictions) */
2595 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2596 			     u32 xmit_type)
2597 {
2598 	int to_copy = 0;
2599 	int hlen = 0;
2600 	int first_bd_sz = 0;
2601 
2602 	/* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2603 	if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2604 
2605 		if (xmit_type & XMIT_GSO) {
2606 			unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2607 			/* Check if LSO packet needs to be copied:
2608 			   3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2609 			int wnd_size = MAX_FETCH_BD - 3;
2610 			/* Number of windows to check */
2611 			int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2612 			int wnd_idx = 0;
2613 			int frag_idx = 0;
2614 			u32 wnd_sum = 0;
2615 
2616 			/* Headers length */
2617 			hlen = (int)(skb_transport_header(skb) - skb->data) +
2618 				tcp_hdrlen(skb);
2619 
2620 			/* Amount of data (w/o headers) on linear part of SKB*/
2621 			first_bd_sz = skb_headlen(skb) - hlen;
2622 
2623 			wnd_sum  = first_bd_sz;
2624 
2625 			/* Calculate the first sum - it's special */
2626 			for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2627 				wnd_sum +=
2628 					skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
2629 
2630 			/* If there was data on linear skb data - check it */
2631 			if (first_bd_sz > 0) {
2632 				if (unlikely(wnd_sum < lso_mss)) {
2633 					to_copy = 1;
2634 					goto exit_lbl;
2635 				}
2636 
2637 				wnd_sum -= first_bd_sz;
2638 			}
2639 
2640 			/* Others are easier: run through the frag list and
2641 			   check all windows */
2642 			for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2643 				wnd_sum +=
2644 			  skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
2645 
2646 				if (unlikely(wnd_sum < lso_mss)) {
2647 					to_copy = 1;
2648 					break;
2649 				}
2650 				wnd_sum -=
2651 					skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
2652 			}
2653 		} else {
2654 			/* in non-LSO too fragmented packet should always
2655 			   be linearized */
2656 			to_copy = 1;
2657 		}
2658 	}
2659 
2660 exit_lbl:
2661 	if (unlikely(to_copy))
2662 		DP(NETIF_MSG_TX_QUEUED,
2663 		   "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
2664 		   (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2665 		   skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2666 
2667 	return to_copy;
2668 }
2669 #endif
2670 
2671 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2672 					u32 xmit_type)
2673 {
2674 	*parsing_data |= (skb_shinfo(skb)->gso_size <<
2675 			      ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2676 			      ETH_TX_PARSE_BD_E2_LSO_MSS;
2677 	if ((xmit_type & XMIT_GSO_V6) &&
2678 	    (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2679 		*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2680 }
2681 
2682 /**
2683  * bnx2x_set_pbd_gso - update PBD in GSO case.
2684  *
2685  * @skb:	packet skb
2686  * @pbd:	parse BD
2687  * @xmit_type:	xmit flags
2688  */
2689 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2690 				     struct eth_tx_parse_bd_e1x *pbd,
2691 				     u32 xmit_type)
2692 {
2693 	pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2694 	pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2695 	pbd->tcp_flags = pbd_tcp_flags(skb);
2696 
2697 	if (xmit_type & XMIT_GSO_V4) {
2698 		pbd->ip_id = swab16(ip_hdr(skb)->id);
2699 		pbd->tcp_pseudo_csum =
2700 			swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2701 						  ip_hdr(skb)->daddr,
2702 						  0, IPPROTO_TCP, 0));
2703 
2704 	} else
2705 		pbd->tcp_pseudo_csum =
2706 			swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2707 						&ipv6_hdr(skb)->daddr,
2708 						0, IPPROTO_TCP, 0));
2709 
2710 	pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2711 }
2712 
2713 /**
2714  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2715  *
2716  * @bp:			driver handle
2717  * @skb:		packet skb
2718  * @parsing_data:	data to be updated
2719  * @xmit_type:		xmit flags
2720  *
2721  * 57712 related
2722  */
2723 static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2724 	u32 *parsing_data, u32 xmit_type)
2725 {
2726 	*parsing_data |=
2727 			((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2728 			ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2729 			ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2730 
2731 	if (xmit_type & XMIT_CSUM_TCP) {
2732 		*parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2733 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2734 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2735 
2736 		return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2737 	} else
2738 		/* We support checksum offload for TCP and UDP only.
2739 		 * No need to pass the UDP header length - it's a constant.
2740 		 */
2741 		return skb_transport_header(skb) +
2742 				sizeof(struct udphdr) - skb->data;
2743 }
2744 
2745 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2746 	struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2747 {
2748 	tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2749 
2750 	if (xmit_type & XMIT_CSUM_V4)
2751 		tx_start_bd->bd_flags.as_bitfield |=
2752 					ETH_TX_BD_FLAGS_IP_CSUM;
2753 	else
2754 		tx_start_bd->bd_flags.as_bitfield |=
2755 					ETH_TX_BD_FLAGS_IPV6;
2756 
2757 	if (!(xmit_type & XMIT_CSUM_TCP))
2758 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
2759 }
2760 
2761 /**
2762  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2763  *
2764  * @bp:		driver handle
2765  * @skb:	packet skb
2766  * @pbd:	parse BD to be updated
2767  * @xmit_type:	xmit flags
2768  */
2769 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2770 	struct eth_tx_parse_bd_e1x *pbd,
2771 	u32 xmit_type)
2772 {
2773 	u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2774 
2775 	/* for now NS flag is not used in Linux */
2776 	pbd->global_data =
2777 		(hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2778 			 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2779 
2780 	pbd->ip_hlen_w = (skb_transport_header(skb) -
2781 			skb_network_header(skb)) >> 1;
2782 
2783 	hlen += pbd->ip_hlen_w;
2784 
2785 	/* We support checksum offload for TCP and UDP only */
2786 	if (xmit_type & XMIT_CSUM_TCP)
2787 		hlen += tcp_hdrlen(skb) / 2;
2788 	else
2789 		hlen += sizeof(struct udphdr) / 2;
2790 
2791 	pbd->total_hlen_w = cpu_to_le16(hlen);
2792 	hlen = hlen*2;
2793 
2794 	if (xmit_type & XMIT_CSUM_TCP) {
2795 		pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2796 
2797 	} else {
2798 		s8 fix = SKB_CS_OFF(skb); /* signed! */
2799 
2800 		DP(NETIF_MSG_TX_QUEUED,
2801 		   "hlen %d  fix %d  csum before fix %x\n",
2802 		   le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2803 
2804 		/* HW bug: fixup the CSUM */
2805 		pbd->tcp_pseudo_csum =
2806 			bnx2x_csum_fix(skb_transport_header(skb),
2807 				       SKB_CS(skb), fix);
2808 
2809 		DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2810 		   pbd->tcp_pseudo_csum);
2811 	}
2812 
2813 	return hlen;
2814 }
2815 
2816 /* called with netif_tx_lock
2817  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2818  * netif_wake_queue()
2819  */
2820 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2821 {
2822 	struct bnx2x *bp = netdev_priv(dev);
2823 
2824 	struct bnx2x_fastpath *fp;
2825 	struct netdev_queue *txq;
2826 	struct bnx2x_fp_txdata *txdata;
2827 	struct sw_tx_bd *tx_buf;
2828 	struct eth_tx_start_bd *tx_start_bd, *first_bd;
2829 	struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2830 	struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2831 	struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2832 	u32 pbd_e2_parsing_data = 0;
2833 	u16 pkt_prod, bd_prod;
2834 	int nbd, txq_index, fp_index, txdata_index;
2835 	dma_addr_t mapping;
2836 	u32 xmit_type = bnx2x_xmit_type(bp, skb);
2837 	int i;
2838 	u8 hlen = 0;
2839 	__le16 pkt_size = 0;
2840 	struct ethhdr *eth;
2841 	u8 mac_type = UNICAST_ADDRESS;
2842 
2843 #ifdef BNX2X_STOP_ON_ERROR
2844 	if (unlikely(bp->panic))
2845 		return NETDEV_TX_BUSY;
2846 #endif
2847 
2848 	txq_index = skb_get_queue_mapping(skb);
2849 	txq = netdev_get_tx_queue(dev, txq_index);
2850 
2851 	BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2852 
2853 	/* decode the fastpath index and the cos index from the txq */
2854 	fp_index = TXQ_TO_FP(txq_index);
2855 	txdata_index = TXQ_TO_COS(txq_index);
2856 
2857 #ifdef BCM_CNIC
2858 	/*
2859 	 * Override the above for the FCoE queue:
2860 	 *   - FCoE fp entry is right after the ETH entries.
2861 	 *   - FCoE L2 queue uses bp->txdata[0] only.
2862 	 */
2863 	if (unlikely(!NO_FCOE(bp) && (txq_index ==
2864 				      bnx2x_fcoe_tx(bp, txq_index)))) {
2865 		fp_index = FCOE_IDX;
2866 		txdata_index = 0;
2867 	}
2868 #endif
2869 
2870 	/* enable this debug print to view the transmission queue being used
2871 	DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
2872 	   txq_index, fp_index, txdata_index); */
2873 
2874 	/* locate the fastpath and the txdata */
2875 	fp = &bp->fp[fp_index];
2876 	txdata = &fp->txdata[txdata_index];
2877 
2878 	/* enable this debug print to view the tranmission details
2879 	DP(NETIF_MSG_TX_QUEUED,
2880 	   "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
2881 	   txdata->cid, fp_index, txdata_index, txdata, fp); */
2882 
2883 	if (unlikely(bnx2x_tx_avail(bp, txdata) <
2884 		     (skb_shinfo(skb)->nr_frags + 3))) {
2885 		fp->eth_q_stats.driver_xoff++;
2886 		netif_tx_stop_queue(txq);
2887 		BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2888 		return NETDEV_TX_BUSY;
2889 	}
2890 
2891 	DP(NETIF_MSG_TX_QUEUED,
2892 	   "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x\n",
2893 	   txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2894 	   ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2895 
2896 	eth = (struct ethhdr *)skb->data;
2897 
2898 	/* set flag according to packet type (UNICAST_ADDRESS is default)*/
2899 	if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2900 		if (is_broadcast_ether_addr(eth->h_dest))
2901 			mac_type = BROADCAST_ADDRESS;
2902 		else
2903 			mac_type = MULTICAST_ADDRESS;
2904 	}
2905 
2906 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2907 	/* First, check if we need to linearize the skb (due to FW
2908 	   restrictions). No need to check fragmentation if page size > 8K
2909 	   (there will be no violation to FW restrictions) */
2910 	if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2911 		/* Statistics of linearization */
2912 		bp->lin_cnt++;
2913 		if (skb_linearize(skb) != 0) {
2914 			DP(NETIF_MSG_TX_QUEUED,
2915 			   "SKB linearization failed - silently dropping this SKB\n");
2916 			dev_kfree_skb_any(skb);
2917 			return NETDEV_TX_OK;
2918 		}
2919 	}
2920 #endif
2921 	/* Map skb linear data for DMA */
2922 	mapping = dma_map_single(&bp->pdev->dev, skb->data,
2923 				 skb_headlen(skb), DMA_TO_DEVICE);
2924 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2925 		DP(NETIF_MSG_TX_QUEUED,
2926 		   "SKB mapping failed - silently dropping this SKB\n");
2927 		dev_kfree_skb_any(skb);
2928 		return NETDEV_TX_OK;
2929 	}
2930 	/*
2931 	Please read carefully. First we use one BD which we mark as start,
2932 	then we have a parsing info BD (used for TSO or xsum),
2933 	and only then we have the rest of the TSO BDs.
2934 	(don't forget to mark the last one as last,
2935 	and to unmap only AFTER you write to the BD ...)
2936 	And above all, all pdb sizes are in words - NOT DWORDS!
2937 	*/
2938 
2939 	/* get current pkt produced now - advance it just before sending packet
2940 	 * since mapping of pages may fail and cause packet to be dropped
2941 	 */
2942 	pkt_prod = txdata->tx_pkt_prod;
2943 	bd_prod = TX_BD(txdata->tx_bd_prod);
2944 
2945 	/* get a tx_buf and first BD
2946 	 * tx_start_bd may be changed during SPLIT,
2947 	 * but first_bd will always stay first
2948 	 */
2949 	tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2950 	tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
2951 	first_bd = tx_start_bd;
2952 
2953 	tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2954 	SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2955 		 mac_type);
2956 
2957 	/* header nbd */
2958 	SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2959 
2960 	/* remember the first BD of the packet */
2961 	tx_buf->first_bd = txdata->tx_bd_prod;
2962 	tx_buf->skb = skb;
2963 	tx_buf->flags = 0;
2964 
2965 	DP(NETIF_MSG_TX_QUEUED,
2966 	   "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
2967 	   pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
2968 
2969 	if (vlan_tx_tag_present(skb)) {
2970 		tx_start_bd->vlan_or_ethertype =
2971 		    cpu_to_le16(vlan_tx_tag_get(skb));
2972 		tx_start_bd->bd_flags.as_bitfield |=
2973 		    (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2974 	} else
2975 		tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2976 
2977 	/* turn on parsing and get a BD */
2978 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2979 
2980 	if (xmit_type & XMIT_CSUM)
2981 		bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
2982 
2983 	if (!CHIP_IS_E1x(bp)) {
2984 		pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
2985 		memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2986 		/* Set PBD in checksum offload case */
2987 		if (xmit_type & XMIT_CSUM)
2988 			hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2989 						     &pbd_e2_parsing_data,
2990 						     xmit_type);
2991 		if (IS_MF_SI(bp)) {
2992 			/*
2993 			 * fill in the MAC addresses in the PBD - for local
2994 			 * switching
2995 			 */
2996 			bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2997 					      &pbd_e2->src_mac_addr_mid,
2998 					      &pbd_e2->src_mac_addr_lo,
2999 					      eth->h_source);
3000 			bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3001 					      &pbd_e2->dst_mac_addr_mid,
3002 					      &pbd_e2->dst_mac_addr_lo,
3003 					      eth->h_dest);
3004 		}
3005 	} else {
3006 		pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3007 		memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3008 		/* Set PBD in checksum offload case */
3009 		if (xmit_type & XMIT_CSUM)
3010 			hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3011 
3012 	}
3013 
3014 	/* Setup the data pointer of the first BD of the packet */
3015 	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3016 	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3017 	nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3018 	tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3019 	pkt_size = tx_start_bd->nbytes;
3020 
3021 	DP(NETIF_MSG_TX_QUEUED,
3022 	   "first bd @%p  addr (%x:%x)  nbd %d  nbytes %d  flags %x  vlan %x\n",
3023 	   tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3024 	   le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
3025 	   tx_start_bd->bd_flags.as_bitfield,
3026 	   le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3027 
3028 	if (xmit_type & XMIT_GSO) {
3029 
3030 		DP(NETIF_MSG_TX_QUEUED,
3031 		   "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
3032 		   skb->len, hlen, skb_headlen(skb),
3033 		   skb_shinfo(skb)->gso_size);
3034 
3035 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3036 
3037 		if (unlikely(skb_headlen(skb) > hlen))
3038 			bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3039 						 &tx_start_bd, hlen,
3040 						 bd_prod, ++nbd);
3041 		if (!CHIP_IS_E1x(bp))
3042 			bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3043 					     xmit_type);
3044 		else
3045 			bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
3046 	}
3047 
3048 	/* Set the PBD's parsing_data field if not zero
3049 	 * (for the chips newer than 57711).
3050 	 */
3051 	if (pbd_e2_parsing_data)
3052 		pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3053 
3054 	tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3055 
3056 	/* Handle fragmented skb */
3057 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3058 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3059 
3060 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3061 					   skb_frag_size(frag), DMA_TO_DEVICE);
3062 		if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3063 			unsigned int pkts_compl = 0, bytes_compl = 0;
3064 
3065 			DP(NETIF_MSG_TX_QUEUED,
3066 			   "Unable to map page - dropping packet...\n");
3067 
3068 			/* we need unmap all buffers already mapped
3069 			 * for this SKB;
3070 			 * first_bd->nbd need to be properly updated
3071 			 * before call to bnx2x_free_tx_pkt
3072 			 */
3073 			first_bd->nbd = cpu_to_le16(nbd);
3074 			bnx2x_free_tx_pkt(bp, txdata,
3075 					  TX_BD(txdata->tx_pkt_prod),
3076 					  &pkts_compl, &bytes_compl);
3077 			return NETDEV_TX_OK;
3078 		}
3079 
3080 		bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3081 		tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3082 		if (total_pkt_bd == NULL)
3083 			total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3084 
3085 		tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3086 		tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3087 		tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3088 		le16_add_cpu(&pkt_size, skb_frag_size(frag));
3089 		nbd++;
3090 
3091 		DP(NETIF_MSG_TX_QUEUED,
3092 		   "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
3093 		   i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3094 		   le16_to_cpu(tx_data_bd->nbytes));
3095 	}
3096 
3097 	DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3098 
3099 	/* update with actual num BDs */
3100 	first_bd->nbd = cpu_to_le16(nbd);
3101 
3102 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3103 
3104 	/* now send a tx doorbell, counting the next BD
3105 	 * if the packet contains or ends with it
3106 	 */
3107 	if (TX_BD_POFF(bd_prod) < nbd)
3108 		nbd++;
3109 
3110 	/* total_pkt_bytes should be set on the first data BD if
3111 	 * it's not an LSO packet and there is more than one
3112 	 * data BD. In this case pkt_size is limited by an MTU value.
3113 	 * However we prefer to set it for an LSO packet (while we don't
3114 	 * have to) in order to save some CPU cycles in a none-LSO
3115 	 * case, when we much more care about them.
3116 	 */
3117 	if (total_pkt_bd != NULL)
3118 		total_pkt_bd->total_pkt_bytes = pkt_size;
3119 
3120 	if (pbd_e1x)
3121 		DP(NETIF_MSG_TX_QUEUED,
3122 		   "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
3123 		   pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3124 		   pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3125 		   pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3126 		    le16_to_cpu(pbd_e1x->total_hlen_w));
3127 	if (pbd_e2)
3128 		DP(NETIF_MSG_TX_QUEUED,
3129 		   "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
3130 		   pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3131 		   pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3132 		   pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3133 		   pbd_e2->parsing_data);
3134 	DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
3135 
3136 	netdev_tx_sent_queue(txq, skb->len);
3137 
3138 	skb_tx_timestamp(skb);
3139 
3140 	txdata->tx_pkt_prod++;
3141 	/*
3142 	 * Make sure that the BD data is updated before updating the producer
3143 	 * since FW might read the BD right after the producer is updated.
3144 	 * This is only applicable for weak-ordered memory model archs such
3145 	 * as IA-64. The following barrier is also mandatory since FW will
3146 	 * assumes packets must have BDs.
3147 	 */
3148 	wmb();
3149 
3150 	txdata->tx_db.data.prod += nbd;
3151 	barrier();
3152 
3153 	DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3154 
3155 	mmiowb();
3156 
3157 	txdata->tx_bd_prod += nbd;
3158 
3159 	if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
3160 		netif_tx_stop_queue(txq);
3161 
3162 		/* paired memory barrier is in bnx2x_tx_int(), we have to keep
3163 		 * ordering of set_bit() in netif_tx_stop_queue() and read of
3164 		 * fp->bd_tx_cons */
3165 		smp_mb();
3166 
3167 		fp->eth_q_stats.driver_xoff++;
3168 		if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
3169 			netif_tx_wake_queue(txq);
3170 	}
3171 	txdata->tx_pkt++;
3172 
3173 	return NETDEV_TX_OK;
3174 }
3175 
3176 /**
3177  * bnx2x_setup_tc - routine to configure net_device for multi tc
3178  *
3179  * @netdev: net device to configure
3180  * @tc: number of traffic classes to enable
3181  *
3182  * callback connected to the ndo_setup_tc function pointer
3183  */
3184 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3185 {
3186 	int cos, prio, count, offset;
3187 	struct bnx2x *bp = netdev_priv(dev);
3188 
3189 	/* setup tc must be called under rtnl lock */
3190 	ASSERT_RTNL();
3191 
3192 	/* no traffic classes requested. aborting */
3193 	if (!num_tc) {
3194 		netdev_reset_tc(dev);
3195 		return 0;
3196 	}
3197 
3198 	/* requested to support too many traffic classes */
3199 	if (num_tc > bp->max_cos) {
3200 		BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3201 			  num_tc, bp->max_cos);
3202 		return -EINVAL;
3203 	}
3204 
3205 	/* declare amount of supported traffic classes */
3206 	if (netdev_set_num_tc(dev, num_tc)) {
3207 		BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
3208 		return -EINVAL;
3209 	}
3210 
3211 	/* configure priority to traffic class mapping */
3212 	for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3213 		netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
3214 		DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3215 		   "mapping priority %d to tc %d\n",
3216 		   prio, bp->prio_to_cos[prio]);
3217 	}
3218 
3219 
3220 	/* Use this configuration to diffrentiate tc0 from other COSes
3221 	   This can be used for ets or pfc, and save the effort of setting
3222 	   up a multio class queue disc or negotiating DCBX with a switch
3223 	netdev_set_prio_tc_map(dev, 0, 0);
3224 	DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3225 	for (prio = 1; prio < 16; prio++) {
3226 		netdev_set_prio_tc_map(dev, prio, 1);
3227 		DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3228 	} */
3229 
3230 	/* configure traffic class to transmission queue mapping */
3231 	for (cos = 0; cos < bp->max_cos; cos++) {
3232 		count = BNX2X_NUM_ETH_QUEUES(bp);
3233 		offset = cos * MAX_TXQS_PER_COS;
3234 		netdev_set_tc_queue(dev, cos, count, offset);
3235 		DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3236 		   "mapping tc %d to offset %d count %d\n",
3237 		   cos, offset, count);
3238 	}
3239 
3240 	return 0;
3241 }
3242 
3243 /* called with rtnl_lock */
3244 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3245 {
3246 	struct sockaddr *addr = p;
3247 	struct bnx2x *bp = netdev_priv(dev);
3248 	int rc = 0;
3249 
3250 	if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3251 		BNX2X_ERR("Requested MAC address is not valid\n");
3252 		return -EINVAL;
3253 	}
3254 
3255 #ifdef BCM_CNIC
3256 	if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3257 	    !is_zero_ether_addr(addr->sa_data)) {
3258 		BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3259 		return -EINVAL;
3260 	}
3261 #endif
3262 
3263 	if (netif_running(dev))  {
3264 		rc = bnx2x_set_eth_mac(bp, false);
3265 		if (rc)
3266 			return rc;
3267 	}
3268 
3269 	dev->addr_assign_type &= ~NET_ADDR_RANDOM;
3270 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3271 
3272 	if (netif_running(dev))
3273 		rc = bnx2x_set_eth_mac(bp, true);
3274 
3275 	return rc;
3276 }
3277 
3278 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3279 {
3280 	union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3281 	struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3282 	u8 cos;
3283 
3284 	/* Common */
3285 #ifdef BCM_CNIC
3286 	if (IS_FCOE_IDX(fp_index)) {
3287 		memset(sb, 0, sizeof(union host_hc_status_block));
3288 		fp->status_blk_mapping = 0;
3289 
3290 	} else {
3291 #endif
3292 		/* status blocks */
3293 		if (!CHIP_IS_E1x(bp))
3294 			BNX2X_PCI_FREE(sb->e2_sb,
3295 				       bnx2x_fp(bp, fp_index,
3296 						status_blk_mapping),
3297 				       sizeof(struct host_hc_status_block_e2));
3298 		else
3299 			BNX2X_PCI_FREE(sb->e1x_sb,
3300 				       bnx2x_fp(bp, fp_index,
3301 						status_blk_mapping),
3302 				       sizeof(struct host_hc_status_block_e1x));
3303 #ifdef BCM_CNIC
3304 	}
3305 #endif
3306 	/* Rx */
3307 	if (!skip_rx_queue(bp, fp_index)) {
3308 		bnx2x_free_rx_bds(fp);
3309 
3310 		/* fastpath rx rings: rx_buf rx_desc rx_comp */
3311 		BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3312 		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3313 			       bnx2x_fp(bp, fp_index, rx_desc_mapping),
3314 			       sizeof(struct eth_rx_bd) * NUM_RX_BD);
3315 
3316 		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3317 			       bnx2x_fp(bp, fp_index, rx_comp_mapping),
3318 			       sizeof(struct eth_fast_path_rx_cqe) *
3319 			       NUM_RCQ_BD);
3320 
3321 		/* SGE ring */
3322 		BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3323 		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3324 			       bnx2x_fp(bp, fp_index, rx_sge_mapping),
3325 			       BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3326 	}
3327 
3328 	/* Tx */
3329 	if (!skip_tx_queue(bp, fp_index)) {
3330 		/* fastpath tx rings: tx_buf tx_desc */
3331 		for_each_cos_in_tx_queue(fp, cos) {
3332 			struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3333 
3334 			DP(NETIF_MSG_IFDOWN,
3335 			   "freeing tx memory of fp %d cos %d cid %d\n",
3336 			   fp_index, cos, txdata->cid);
3337 
3338 			BNX2X_FREE(txdata->tx_buf_ring);
3339 			BNX2X_PCI_FREE(txdata->tx_desc_ring,
3340 				txdata->tx_desc_mapping,
3341 				sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3342 		}
3343 	}
3344 	/* end of fastpath */
3345 }
3346 
3347 void bnx2x_free_fp_mem(struct bnx2x *bp)
3348 {
3349 	int i;
3350 	for_each_queue(bp, i)
3351 		bnx2x_free_fp_mem_at(bp, i);
3352 }
3353 
3354 static void set_sb_shortcuts(struct bnx2x *bp, int index)
3355 {
3356 	union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3357 	if (!CHIP_IS_E1x(bp)) {
3358 		bnx2x_fp(bp, index, sb_index_values) =
3359 			(__le16 *)status_blk.e2_sb->sb.index_values;
3360 		bnx2x_fp(bp, index, sb_running_index) =
3361 			(__le16 *)status_blk.e2_sb->sb.running_index;
3362 	} else {
3363 		bnx2x_fp(bp, index, sb_index_values) =
3364 			(__le16 *)status_blk.e1x_sb->sb.index_values;
3365 		bnx2x_fp(bp, index, sb_running_index) =
3366 			(__le16 *)status_blk.e1x_sb->sb.running_index;
3367 	}
3368 }
3369 
3370 /* Returns the number of actually allocated BDs */
3371 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3372 			      int rx_ring_size)
3373 {
3374 	struct bnx2x *bp = fp->bp;
3375 	u16 ring_prod, cqe_ring_prod;
3376 	int i, failure_cnt = 0;
3377 
3378 	fp->rx_comp_cons = 0;
3379 	cqe_ring_prod = ring_prod = 0;
3380 
3381 	/* This routine is called only during fo init so
3382 	 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3383 	 */
3384 	for (i = 0; i < rx_ring_size; i++) {
3385 		if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3386 			failure_cnt++;
3387 			continue;
3388 		}
3389 		ring_prod = NEXT_RX_IDX(ring_prod);
3390 		cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3391 		WARN_ON(ring_prod <= (i - failure_cnt));
3392 	}
3393 
3394 	if (failure_cnt)
3395 		BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3396 			  i - failure_cnt, fp->index);
3397 
3398 	fp->rx_bd_prod = ring_prod;
3399 	/* Limit the CQE producer by the CQE ring size */
3400 	fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3401 			       cqe_ring_prod);
3402 	fp->rx_pkt = fp->rx_calls = 0;
3403 
3404 	fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3405 
3406 	return i - failure_cnt;
3407 }
3408 
3409 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3410 {
3411 	int i;
3412 
3413 	for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3414 		struct eth_rx_cqe_next_page *nextpg;
3415 
3416 		nextpg = (struct eth_rx_cqe_next_page *)
3417 			&fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3418 		nextpg->addr_hi =
3419 			cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3420 				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3421 		nextpg->addr_lo =
3422 			cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3423 				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3424 	}
3425 }
3426 
3427 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3428 {
3429 	union host_hc_status_block *sb;
3430 	struct bnx2x_fastpath *fp = &bp->fp[index];
3431 	int ring_size = 0;
3432 	u8 cos;
3433 	int rx_ring_size = 0;
3434 
3435 #ifdef BCM_CNIC
3436 	if (!bp->rx_ring_size &&
3437 	    (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3438 		rx_ring_size = MIN_RX_SIZE_NONTPA;
3439 		bp->rx_ring_size = rx_ring_size;
3440 	} else
3441 #endif
3442 	if (!bp->rx_ring_size) {
3443 		u32 cfg = SHMEM_RD(bp,
3444 			     dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
3445 
3446 		rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3447 
3448 		/* Dercease ring size for 1G functions */
3449 		if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3450 		    PORT_HW_CFG_NET_SERDES_IF_SGMII)
3451 			rx_ring_size /= 10;
3452 
3453 		/* allocate at least number of buffers required by FW */
3454 		rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3455 				     MIN_RX_SIZE_TPA, rx_ring_size);
3456 
3457 		bp->rx_ring_size = rx_ring_size;
3458 	} else /* if rx_ring_size specified - use it */
3459 		rx_ring_size = bp->rx_ring_size;
3460 
3461 	/* Common */
3462 	sb = &bnx2x_fp(bp, index, status_blk);
3463 #ifdef BCM_CNIC
3464 	if (!IS_FCOE_IDX(index)) {
3465 #endif
3466 		/* status blocks */
3467 		if (!CHIP_IS_E1x(bp))
3468 			BNX2X_PCI_ALLOC(sb->e2_sb,
3469 				&bnx2x_fp(bp, index, status_blk_mapping),
3470 				sizeof(struct host_hc_status_block_e2));
3471 		else
3472 			BNX2X_PCI_ALLOC(sb->e1x_sb,
3473 				&bnx2x_fp(bp, index, status_blk_mapping),
3474 			    sizeof(struct host_hc_status_block_e1x));
3475 #ifdef BCM_CNIC
3476 	}
3477 #endif
3478 
3479 	/* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3480 	 * set shortcuts for it.
3481 	 */
3482 	if (!IS_FCOE_IDX(index))
3483 		set_sb_shortcuts(bp, index);
3484 
3485 	/* Tx */
3486 	if (!skip_tx_queue(bp, index)) {
3487 		/* fastpath tx rings: tx_buf tx_desc */
3488 		for_each_cos_in_tx_queue(fp, cos) {
3489 			struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3490 
3491 			DP(NETIF_MSG_IFUP,
3492 			   "allocating tx memory of fp %d cos %d\n",
3493 			   index, cos);
3494 
3495 			BNX2X_ALLOC(txdata->tx_buf_ring,
3496 				sizeof(struct sw_tx_bd) * NUM_TX_BD);
3497 			BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3498 				&txdata->tx_desc_mapping,
3499 				sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3500 		}
3501 	}
3502 
3503 	/* Rx */
3504 	if (!skip_rx_queue(bp, index)) {
3505 		/* fastpath rx rings: rx_buf rx_desc rx_comp */
3506 		BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3507 				sizeof(struct sw_rx_bd) * NUM_RX_BD);
3508 		BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3509 				&bnx2x_fp(bp, index, rx_desc_mapping),
3510 				sizeof(struct eth_rx_bd) * NUM_RX_BD);
3511 
3512 		BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3513 				&bnx2x_fp(bp, index, rx_comp_mapping),
3514 				sizeof(struct eth_fast_path_rx_cqe) *
3515 				NUM_RCQ_BD);
3516 
3517 		/* SGE ring */
3518 		BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3519 				sizeof(struct sw_rx_page) * NUM_RX_SGE);
3520 		BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3521 				&bnx2x_fp(bp, index, rx_sge_mapping),
3522 				BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3523 		/* RX BD ring */
3524 		bnx2x_set_next_page_rx_bd(fp);
3525 
3526 		/* CQ ring */
3527 		bnx2x_set_next_page_rx_cq(fp);
3528 
3529 		/* BDs */
3530 		ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3531 		if (ring_size < rx_ring_size)
3532 			goto alloc_mem_err;
3533 	}
3534 
3535 	return 0;
3536 
3537 /* handles low memory cases */
3538 alloc_mem_err:
3539 	BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3540 						index, ring_size);
3541 	/* FW will drop all packets if queue is not big enough,
3542 	 * In these cases we disable the queue
3543 	 * Min size is different for OOO, TPA and non-TPA queues
3544 	 */
3545 	if (ring_size < (fp->disable_tpa ?
3546 				MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3547 			/* release memory allocated for this queue */
3548 			bnx2x_free_fp_mem_at(bp, index);
3549 			return -ENOMEM;
3550 	}
3551 	return 0;
3552 }
3553 
3554 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3555 {
3556 	int i;
3557 
3558 	/**
3559 	 * 1. Allocate FP for leading - fatal if error
3560 	 * 2. {CNIC} Allocate FCoE FP - fatal if error
3561 	 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3562 	 * 4. Allocate RSS - fix number of queues if error
3563 	 */
3564 
3565 	/* leading */
3566 	if (bnx2x_alloc_fp_mem_at(bp, 0))
3567 		return -ENOMEM;
3568 
3569 #ifdef BCM_CNIC
3570 	if (!NO_FCOE(bp))
3571 		/* FCoE */
3572 		if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3573 			/* we will fail load process instead of mark
3574 			 * NO_FCOE_FLAG
3575 			 */
3576 			return -ENOMEM;
3577 #endif
3578 
3579 	/* RSS */
3580 	for_each_nondefault_eth_queue(bp, i)
3581 		if (bnx2x_alloc_fp_mem_at(bp, i))
3582 			break;
3583 
3584 	/* handle memory failures */
3585 	if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3586 		int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3587 
3588 		WARN_ON(delta < 0);
3589 #ifdef BCM_CNIC
3590 		/**
3591 		 * move non eth FPs next to last eth FP
3592 		 * must be done in that order
3593 		 * FCOE_IDX < FWD_IDX < OOO_IDX
3594 		 */
3595 
3596 		/* move FCoE fp even NO_FCOE_FLAG is on */
3597 		bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3598 #endif
3599 		bp->num_queues -= delta;
3600 		BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3601 			  bp->num_queues + delta, bp->num_queues);
3602 	}
3603 
3604 	return 0;
3605 }
3606 
3607 void bnx2x_free_mem_bp(struct bnx2x *bp)
3608 {
3609 	kfree(bp->fp);
3610 	kfree(bp->msix_table);
3611 	kfree(bp->ilt);
3612 }
3613 
3614 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3615 {
3616 	struct bnx2x_fastpath *fp;
3617 	struct msix_entry *tbl;
3618 	struct bnx2x_ilt *ilt;
3619 	int msix_table_size = 0;
3620 
3621 	/*
3622 	 * The biggest MSI-X table we might need is as a maximum number of fast
3623 	 * path IGU SBs plus default SB (for PF).
3624 	 */
3625 	msix_table_size = bp->igu_sb_cnt + 1;
3626 
3627 	/* fp array: RSS plus CNIC related L2 queues */
3628 	fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
3629 		     sizeof(*fp), GFP_KERNEL);
3630 	if (!fp)
3631 		goto alloc_err;
3632 	bp->fp = fp;
3633 
3634 	/* msix table */
3635 	tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
3636 	if (!tbl)
3637 		goto alloc_err;
3638 	bp->msix_table = tbl;
3639 
3640 	/* ilt */
3641 	ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3642 	if (!ilt)
3643 		goto alloc_err;
3644 	bp->ilt = ilt;
3645 
3646 	return 0;
3647 alloc_err:
3648 	bnx2x_free_mem_bp(bp);
3649 	return -ENOMEM;
3650 
3651 }
3652 
3653 int bnx2x_reload_if_running(struct net_device *dev)
3654 {
3655 	struct bnx2x *bp = netdev_priv(dev);
3656 
3657 	if (unlikely(!netif_running(dev)))
3658 		return 0;
3659 
3660 	bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3661 	return bnx2x_nic_load(bp, LOAD_NORMAL);
3662 }
3663 
3664 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3665 {
3666 	u32 sel_phy_idx = 0;
3667 	if (bp->link_params.num_phys <= 1)
3668 		return INT_PHY;
3669 
3670 	if (bp->link_vars.link_up) {
3671 		sel_phy_idx = EXT_PHY1;
3672 		/* In case link is SERDES, check if the EXT_PHY2 is the one */
3673 		if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3674 		    (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3675 			sel_phy_idx = EXT_PHY2;
3676 	} else {
3677 
3678 		switch (bnx2x_phy_selection(&bp->link_params)) {
3679 		case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3680 		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3681 		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3682 		       sel_phy_idx = EXT_PHY1;
3683 		       break;
3684 		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3685 		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3686 		       sel_phy_idx = EXT_PHY2;
3687 		       break;
3688 		}
3689 	}
3690 
3691 	return sel_phy_idx;
3692 
3693 }
3694 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3695 {
3696 	u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3697 	/*
3698 	 * The selected actived PHY is always after swapping (in case PHY
3699 	 * swapping is enabled). So when swapping is enabled, we need to reverse
3700 	 * the configuration
3701 	 */
3702 
3703 	if (bp->link_params.multi_phy_config &
3704 	    PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3705 		if (sel_phy_idx == EXT_PHY1)
3706 			sel_phy_idx = EXT_PHY2;
3707 		else if (sel_phy_idx == EXT_PHY2)
3708 			sel_phy_idx = EXT_PHY1;
3709 	}
3710 	return LINK_CONFIG_IDX(sel_phy_idx);
3711 }
3712 
3713 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3714 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3715 {
3716 	struct bnx2x *bp = netdev_priv(dev);
3717 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3718 
3719 	switch (type) {
3720 	case NETDEV_FCOE_WWNN:
3721 		*wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3722 				cp->fcoe_wwn_node_name_lo);
3723 		break;
3724 	case NETDEV_FCOE_WWPN:
3725 		*wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3726 				cp->fcoe_wwn_port_name_lo);
3727 		break;
3728 	default:
3729 		BNX2X_ERR("Wrong WWN type requested - %d\n", type);
3730 		return -EINVAL;
3731 	}
3732 
3733 	return 0;
3734 }
3735 #endif
3736 
3737 /* called with rtnl_lock */
3738 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3739 {
3740 	struct bnx2x *bp = netdev_priv(dev);
3741 
3742 	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3743 		BNX2X_ERR("Can't perform change MTU during parity recovery\n");
3744 		return -EAGAIN;
3745 	}
3746 
3747 	if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3748 	    ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
3749 		BNX2X_ERR("Can't support requested MTU size\n");
3750 		return -EINVAL;
3751 	}
3752 
3753 	/* This does not race with packet allocation
3754 	 * because the actual alloc size is
3755 	 * only updated as part of load
3756 	 */
3757 	dev->mtu = new_mtu;
3758 
3759 	return bnx2x_reload_if_running(dev);
3760 }
3761 
3762 netdev_features_t bnx2x_fix_features(struct net_device *dev,
3763 				     netdev_features_t features)
3764 {
3765 	struct bnx2x *bp = netdev_priv(dev);
3766 
3767 	/* TPA requires Rx CSUM offloading */
3768 	if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
3769 		features &= ~NETIF_F_LRO;
3770 		features &= ~NETIF_F_GRO;
3771 	}
3772 
3773 	return features;
3774 }
3775 
3776 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
3777 {
3778 	struct bnx2x *bp = netdev_priv(dev);
3779 	u32 flags = bp->flags;
3780 	bool bnx2x_reload = false;
3781 
3782 	if (features & NETIF_F_LRO)
3783 		flags |= TPA_ENABLE_FLAG;
3784 	else
3785 		flags &= ~TPA_ENABLE_FLAG;
3786 
3787 	if (features & NETIF_F_GRO)
3788 		flags |= GRO_ENABLE_FLAG;
3789 	else
3790 		flags &= ~GRO_ENABLE_FLAG;
3791 
3792 	if (features & NETIF_F_LOOPBACK) {
3793 		if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3794 			bp->link_params.loopback_mode = LOOPBACK_BMAC;
3795 			bnx2x_reload = true;
3796 		}
3797 	} else {
3798 		if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3799 			bp->link_params.loopback_mode = LOOPBACK_NONE;
3800 			bnx2x_reload = true;
3801 		}
3802 	}
3803 
3804 	if (flags ^ bp->flags) {
3805 		bp->flags = flags;
3806 		bnx2x_reload = true;
3807 	}
3808 
3809 	if (bnx2x_reload) {
3810 		if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3811 			return bnx2x_reload_if_running(dev);
3812 		/* else: bnx2x_nic_load() will be called at end of recovery */
3813 	}
3814 
3815 	return 0;
3816 }
3817 
3818 void bnx2x_tx_timeout(struct net_device *dev)
3819 {
3820 	struct bnx2x *bp = netdev_priv(dev);
3821 
3822 #ifdef BNX2X_STOP_ON_ERROR
3823 	if (!bp->panic)
3824 		bnx2x_panic();
3825 #endif
3826 
3827 	smp_mb__before_clear_bit();
3828 	set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3829 	smp_mb__after_clear_bit();
3830 
3831 	/* This allows the netif to be shutdown gracefully before resetting */
3832 	schedule_delayed_work(&bp->sp_rtnl_task, 0);
3833 }
3834 
3835 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3836 {
3837 	struct net_device *dev = pci_get_drvdata(pdev);
3838 	struct bnx2x *bp;
3839 
3840 	if (!dev) {
3841 		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3842 		return -ENODEV;
3843 	}
3844 	bp = netdev_priv(dev);
3845 
3846 	rtnl_lock();
3847 
3848 	pci_save_state(pdev);
3849 
3850 	if (!netif_running(dev)) {
3851 		rtnl_unlock();
3852 		return 0;
3853 	}
3854 
3855 	netif_device_detach(dev);
3856 
3857 	bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3858 
3859 	bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3860 
3861 	rtnl_unlock();
3862 
3863 	return 0;
3864 }
3865 
3866 int bnx2x_resume(struct pci_dev *pdev)
3867 {
3868 	struct net_device *dev = pci_get_drvdata(pdev);
3869 	struct bnx2x *bp;
3870 	int rc;
3871 
3872 	if (!dev) {
3873 		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3874 		return -ENODEV;
3875 	}
3876 	bp = netdev_priv(dev);
3877 
3878 	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3879 		BNX2X_ERR("Handling parity error recovery. Try again later\n");
3880 		return -EAGAIN;
3881 	}
3882 
3883 	rtnl_lock();
3884 
3885 	pci_restore_state(pdev);
3886 
3887 	if (!netif_running(dev)) {
3888 		rtnl_unlock();
3889 		return 0;
3890 	}
3891 
3892 	bnx2x_set_power_state(bp, PCI_D0);
3893 	netif_device_attach(dev);
3894 
3895 	rc = bnx2x_nic_load(bp, LOAD_OPEN);
3896 
3897 	rtnl_unlock();
3898 
3899 	return rc;
3900 }
3901 
3902 
3903 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3904 			      u32 cid)
3905 {
3906 	/* ustorm cxt validation */
3907 	cxt->ustorm_ag_context.cdu_usage =
3908 		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3909 			CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3910 	/* xcontext validation */
3911 	cxt->xstorm_ag_context.cdu_reserved =
3912 		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3913 			CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3914 }
3915 
3916 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3917 				    u8 fw_sb_id, u8 sb_index,
3918 				    u8 ticks)
3919 {
3920 
3921 	u32 addr = BAR_CSTRORM_INTMEM +
3922 		   CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3923 	REG_WR8(bp, addr, ticks);
3924 	DP(NETIF_MSG_IFUP,
3925 	   "port %x fw_sb_id %d sb_index %d ticks %d\n",
3926 	   port, fw_sb_id, sb_index, ticks);
3927 }
3928 
3929 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3930 				    u16 fw_sb_id, u8 sb_index,
3931 				    u8 disable)
3932 {
3933 	u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3934 	u32 addr = BAR_CSTRORM_INTMEM +
3935 		   CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3936 	u16 flags = REG_RD16(bp, addr);
3937 	/* clear and set */
3938 	flags &= ~HC_INDEX_DATA_HC_ENABLED;
3939 	flags |= enable_flag;
3940 	REG_WR16(bp, addr, flags);
3941 	DP(NETIF_MSG_IFUP,
3942 	   "port %x fw_sb_id %d sb_index %d disable %d\n",
3943 	   port, fw_sb_id, sb_index, disable);
3944 }
3945 
3946 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3947 				    u8 sb_index, u8 disable, u16 usec)
3948 {
3949 	int port = BP_PORT(bp);
3950 	u8 ticks = usec / BNX2X_BTR;
3951 
3952 	storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3953 
3954 	disable = disable ? 1 : (usec ? 0 : 1);
3955 	storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3956 }
3957