xref: /linux/drivers/net/ethernet/brocade/bna/bnad.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Linux network driver for QLogic BR-series Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15  * Copyright (c) 2014-2015 QLogic Corporation
16  * All rights reserved
17  * www.qlogic.com
18  */
19 #include <linux/bitops.h>
20 #include <linux/netdevice.h>
21 #include <linux/skbuff.h>
22 #include <linux/etherdevice.h>
23 #include <linux/in.h>
24 #include <linux/ethtool.h>
25 #include <linux/if_vlan.h>
26 #include <linux/if_ether.h>
27 #include <linux/ip.h>
28 #include <linux/prefetch.h>
29 #include <linux/module.h>
30 
31 #include "bnad.h"
32 #include "bna.h"
33 #include "cna.h"
34 
35 static DEFINE_MUTEX(bnad_fwimg_mutex);
36 
37 /*
38  * Module params
39  */
40 static uint bnad_msix_disable;
41 module_param(bnad_msix_disable, uint, 0444);
42 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
43 
44 static uint bnad_ioc_auto_recover = 1;
45 module_param(bnad_ioc_auto_recover, uint, 0444);
46 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
47 
48 static uint bna_debugfs_enable = 1;
49 module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
50 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
51 		 " Range[false:0|true:1]");
52 
53 /*
54  * Global variables
55  */
56 static u32 bnad_rxqs_per_cq = 2;
57 static u32 bna_id;
58 static struct mutex bnad_list_mutex;
59 static LIST_HEAD(bnad_list);
60 static const u8 bnad_bcast_addr[] __aligned(2) =
61 	{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
62 
63 /*
64  * Local MACROS
65  */
66 #define BNAD_GET_MBOX_IRQ(_bnad)				\
67 	(((_bnad)->cfg_flags & BNAD_CF_MSIX) ?			\
68 	 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
69 	 ((_bnad)->pcidev->irq))
70 
71 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size)	\
72 do {								\
73 	(_res_info)->res_type = BNA_RES_T_MEM;			\
74 	(_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;	\
75 	(_res_info)->res_u.mem_info.num = (_num);		\
76 	(_res_info)->res_u.mem_info.len = (_size);		\
77 } while (0)
78 
79 static void
80 bnad_add_to_list(struct bnad *bnad)
81 {
82 	mutex_lock(&bnad_list_mutex);
83 	list_add_tail(&bnad->list_entry, &bnad_list);
84 	bnad->id = bna_id++;
85 	mutex_unlock(&bnad_list_mutex);
86 }
87 
88 static void
89 bnad_remove_from_list(struct bnad *bnad)
90 {
91 	mutex_lock(&bnad_list_mutex);
92 	list_del(&bnad->list_entry);
93 	mutex_unlock(&bnad_list_mutex);
94 }
95 
96 /*
97  * Reinitialize completions in CQ, once Rx is taken down
98  */
99 static void
100 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
101 {
102 	struct bna_cq_entry *cmpl;
103 	int i;
104 
105 	for (i = 0; i < ccb->q_depth; i++) {
106 		cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
107 		cmpl->valid = 0;
108 	}
109 }
110 
111 /* Tx Datapath functions */
112 
113 
114 /* Caller should ensure that the entry at unmap_q[index] is valid */
115 static u32
116 bnad_tx_buff_unmap(struct bnad *bnad,
117 			      struct bnad_tx_unmap *unmap_q,
118 			      u32 q_depth, u32 index)
119 {
120 	struct bnad_tx_unmap *unmap;
121 	struct sk_buff *skb;
122 	int vector, nvecs;
123 
124 	unmap = &unmap_q[index];
125 	nvecs = unmap->nvecs;
126 
127 	skb = unmap->skb;
128 	unmap->skb = NULL;
129 	unmap->nvecs = 0;
130 	dma_unmap_single(&bnad->pcidev->dev,
131 		dma_unmap_addr(&unmap->vectors[0], dma_addr),
132 		skb_headlen(skb), DMA_TO_DEVICE);
133 	dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
134 	nvecs--;
135 
136 	vector = 0;
137 	while (nvecs) {
138 		vector++;
139 		if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
140 			vector = 0;
141 			BNA_QE_INDX_INC(index, q_depth);
142 			unmap = &unmap_q[index];
143 		}
144 
145 		dma_unmap_page(&bnad->pcidev->dev,
146 			dma_unmap_addr(&unmap->vectors[vector], dma_addr),
147 			dma_unmap_len(&unmap->vectors[vector], dma_len),
148 			DMA_TO_DEVICE);
149 		dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
150 		nvecs--;
151 	}
152 
153 	BNA_QE_INDX_INC(index, q_depth);
154 
155 	return index;
156 }
157 
158 /*
159  * Frees all pending Tx Bufs
160  * At this point no activity is expected on the Q,
161  * so DMA unmap & freeing is fine.
162  */
163 static void
164 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
165 {
166 	struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
167 	struct sk_buff *skb;
168 	int i;
169 
170 	for (i = 0; i < tcb->q_depth; i++) {
171 		skb = unmap_q[i].skb;
172 		if (!skb)
173 			continue;
174 		bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
175 
176 		dev_kfree_skb_any(skb);
177 	}
178 }
179 
180 /*
181  * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
182  * Can be called in a) Interrupt context
183  *		    b) Sending context
184  */
185 static u32
186 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
187 {
188 	u32 sent_packets = 0, sent_bytes = 0;
189 	u32 wis, unmap_wis, hw_cons, cons, q_depth;
190 	struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
191 	struct bnad_tx_unmap *unmap;
192 	struct sk_buff *skb;
193 
194 	/* Just return if TX is stopped */
195 	if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
196 		return 0;
197 
198 	hw_cons = *(tcb->hw_consumer_index);
199 	cons = tcb->consumer_index;
200 	q_depth = tcb->q_depth;
201 
202 	wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
203 	BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
204 
205 	while (wis) {
206 		unmap = &unmap_q[cons];
207 
208 		skb = unmap->skb;
209 
210 		sent_packets++;
211 		sent_bytes += skb->len;
212 
213 		unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
214 		wis -= unmap_wis;
215 
216 		cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
217 		dev_kfree_skb_any(skb);
218 	}
219 
220 	/* Update consumer pointers. */
221 	tcb->consumer_index = hw_cons;
222 
223 	tcb->txq->tx_packets += sent_packets;
224 	tcb->txq->tx_bytes += sent_bytes;
225 
226 	return sent_packets;
227 }
228 
229 static u32
230 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
231 {
232 	struct net_device *netdev = bnad->netdev;
233 	u32 sent = 0;
234 
235 	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
236 		return 0;
237 
238 	sent = bnad_txcmpl_process(bnad, tcb);
239 	if (sent) {
240 		if (netif_queue_stopped(netdev) &&
241 		    netif_carrier_ok(netdev) &&
242 		    BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
243 				    BNAD_NETIF_WAKE_THRESHOLD) {
244 			if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
245 				netif_wake_queue(netdev);
246 				BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
247 			}
248 		}
249 	}
250 
251 	if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
252 		bna_ib_ack(tcb->i_dbell, sent);
253 
254 	smp_mb__before_atomic();
255 	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
256 
257 	return sent;
258 }
259 
260 /* MSIX Tx Completion Handler */
261 static irqreturn_t
262 bnad_msix_tx(int irq, void *data)
263 {
264 	struct bna_tcb *tcb = (struct bna_tcb *)data;
265 	struct bnad *bnad = tcb->bnad;
266 
267 	bnad_tx_complete(bnad, tcb);
268 
269 	return IRQ_HANDLED;
270 }
271 
272 static inline void
273 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
274 {
275 	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
276 
277 	unmap_q->reuse_pi = -1;
278 	unmap_q->alloc_order = -1;
279 	unmap_q->map_size = 0;
280 	unmap_q->type = BNAD_RXBUF_NONE;
281 }
282 
283 /* Default is page-based allocation. Multi-buffer support - TBD */
284 static int
285 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
286 {
287 	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
288 	int order;
289 
290 	bnad_rxq_alloc_uninit(bnad, rcb);
291 
292 	order = get_order(rcb->rxq->buffer_size);
293 
294 	unmap_q->type = BNAD_RXBUF_PAGE;
295 
296 	if (bna_is_small_rxq(rcb->id)) {
297 		unmap_q->alloc_order = 0;
298 		unmap_q->map_size = rcb->rxq->buffer_size;
299 	} else {
300 		if (rcb->rxq->multi_buffer) {
301 			unmap_q->alloc_order = 0;
302 			unmap_q->map_size = rcb->rxq->buffer_size;
303 			unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
304 		} else {
305 			unmap_q->alloc_order = order;
306 			unmap_q->map_size =
307 				(rcb->rxq->buffer_size > 2048) ?
308 				PAGE_SIZE << order : 2048;
309 		}
310 	}
311 
312 	BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
313 
314 	return 0;
315 }
316 
317 static inline void
318 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
319 {
320 	if (!unmap->page)
321 		return;
322 
323 	dma_unmap_page(&bnad->pcidev->dev,
324 			dma_unmap_addr(&unmap->vector, dma_addr),
325 			unmap->vector.len, DMA_FROM_DEVICE);
326 	put_page(unmap->page);
327 	unmap->page = NULL;
328 	dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
329 	unmap->vector.len = 0;
330 }
331 
332 static inline void
333 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
334 {
335 	if (!unmap->skb)
336 		return;
337 
338 	dma_unmap_single(&bnad->pcidev->dev,
339 			dma_unmap_addr(&unmap->vector, dma_addr),
340 			unmap->vector.len, DMA_FROM_DEVICE);
341 	dev_kfree_skb_any(unmap->skb);
342 	unmap->skb = NULL;
343 	dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
344 	unmap->vector.len = 0;
345 }
346 
347 static void
348 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
349 {
350 	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
351 	int i;
352 
353 	for (i = 0; i < rcb->q_depth; i++) {
354 		struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
355 
356 		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
357 			bnad_rxq_cleanup_skb(bnad, unmap);
358 		else
359 			bnad_rxq_cleanup_page(bnad, unmap);
360 	}
361 	bnad_rxq_alloc_uninit(bnad, rcb);
362 }
363 
364 static u32
365 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
366 {
367 	u32 alloced, prod, q_depth;
368 	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
369 	struct bnad_rx_unmap *unmap, *prev;
370 	struct bna_rxq_entry *rxent;
371 	struct page *page;
372 	u32 page_offset, alloc_size;
373 	dma_addr_t dma_addr;
374 
375 	prod = rcb->producer_index;
376 	q_depth = rcb->q_depth;
377 
378 	alloc_size = PAGE_SIZE << unmap_q->alloc_order;
379 	alloced = 0;
380 
381 	while (nalloc--) {
382 		unmap = &unmap_q->unmap[prod];
383 
384 		if (unmap_q->reuse_pi < 0) {
385 			page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
386 					unmap_q->alloc_order);
387 			page_offset = 0;
388 		} else {
389 			prev = &unmap_q->unmap[unmap_q->reuse_pi];
390 			page = prev->page;
391 			page_offset = prev->page_offset + unmap_q->map_size;
392 			get_page(page);
393 		}
394 
395 		if (unlikely(!page)) {
396 			BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
397 			rcb->rxq->rxbuf_alloc_failed++;
398 			goto finishing;
399 		}
400 
401 		dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
402 					unmap_q->map_size, DMA_FROM_DEVICE);
403 		if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
404 			put_page(page);
405 			BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
406 			rcb->rxq->rxbuf_map_failed++;
407 			goto finishing;
408 		}
409 
410 		unmap->page = page;
411 		unmap->page_offset = page_offset;
412 		dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
413 		unmap->vector.len = unmap_q->map_size;
414 		page_offset += unmap_q->map_size;
415 
416 		if (page_offset < alloc_size)
417 			unmap_q->reuse_pi = prod;
418 		else
419 			unmap_q->reuse_pi = -1;
420 
421 		rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
422 		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
423 		BNA_QE_INDX_INC(prod, q_depth);
424 		alloced++;
425 	}
426 
427 finishing:
428 	if (likely(alloced)) {
429 		rcb->producer_index = prod;
430 		smp_mb();
431 		if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
432 			bna_rxq_prod_indx_doorbell(rcb);
433 	}
434 
435 	return alloced;
436 }
437 
438 static u32
439 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
440 {
441 	u32 alloced, prod, q_depth, buff_sz;
442 	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
443 	struct bnad_rx_unmap *unmap;
444 	struct bna_rxq_entry *rxent;
445 	struct sk_buff *skb;
446 	dma_addr_t dma_addr;
447 
448 	buff_sz = rcb->rxq->buffer_size;
449 	prod = rcb->producer_index;
450 	q_depth = rcb->q_depth;
451 
452 	alloced = 0;
453 	while (nalloc--) {
454 		unmap = &unmap_q->unmap[prod];
455 
456 		skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
457 
458 		if (unlikely(!skb)) {
459 			BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
460 			rcb->rxq->rxbuf_alloc_failed++;
461 			goto finishing;
462 		}
463 
464 		dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
465 					  buff_sz, DMA_FROM_DEVICE);
466 		if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
467 			dev_kfree_skb_any(skb);
468 			BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
469 			rcb->rxq->rxbuf_map_failed++;
470 			goto finishing;
471 		}
472 
473 		unmap->skb = skb;
474 		dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
475 		unmap->vector.len = buff_sz;
476 
477 		rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
478 		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
479 		BNA_QE_INDX_INC(prod, q_depth);
480 		alloced++;
481 	}
482 
483 finishing:
484 	if (likely(alloced)) {
485 		rcb->producer_index = prod;
486 		smp_mb();
487 		if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
488 			bna_rxq_prod_indx_doorbell(rcb);
489 	}
490 
491 	return alloced;
492 }
493 
494 static inline void
495 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
496 {
497 	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
498 	u32 to_alloc;
499 
500 	to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
501 	if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
502 		return;
503 
504 	if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
505 		bnad_rxq_refill_skb(bnad, rcb, to_alloc);
506 	else
507 		bnad_rxq_refill_page(bnad, rcb, to_alloc);
508 }
509 
510 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
511 					BNA_CQ_EF_IPV6 | \
512 					BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
513 					BNA_CQ_EF_L4_CKSUM_OK)
514 
515 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
516 				BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
517 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
518 				BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
519 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
520 				BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
521 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
522 				BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
523 
524 static void
525 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
526 		    u32 sop_ci, u32 nvecs)
527 {
528 	struct bnad_rx_unmap_q *unmap_q;
529 	struct bnad_rx_unmap *unmap;
530 	u32 ci, vec;
531 
532 	unmap_q = rcb->unmap_q;
533 	for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
534 		unmap = &unmap_q->unmap[ci];
535 		BNA_QE_INDX_INC(ci, rcb->q_depth);
536 
537 		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
538 			bnad_rxq_cleanup_skb(bnad, unmap);
539 		else
540 			bnad_rxq_cleanup_page(bnad, unmap);
541 	}
542 }
543 
544 static void
545 bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
546 			u32 sop_ci, u32 nvecs, u32 last_fraglen)
547 {
548 	struct bnad *bnad;
549 	u32 ci, vec, len, totlen = 0;
550 	struct bnad_rx_unmap_q *unmap_q;
551 	struct bnad_rx_unmap *unmap;
552 
553 	unmap_q = rcb->unmap_q;
554 	bnad = rcb->bnad;
555 
556 	/* prefetch header */
557 	prefetch(page_address(unmap_q->unmap[sop_ci].page) +
558 			unmap_q->unmap[sop_ci].page_offset);
559 
560 	for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
561 		unmap = &unmap_q->unmap[ci];
562 		BNA_QE_INDX_INC(ci, rcb->q_depth);
563 
564 		dma_unmap_page(&bnad->pcidev->dev,
565 				dma_unmap_addr(&unmap->vector, dma_addr),
566 				unmap->vector.len, DMA_FROM_DEVICE);
567 
568 		len = (vec == nvecs) ?
569 			last_fraglen : unmap->vector.len;
570 		skb->truesize += unmap->vector.len;
571 		totlen += len;
572 
573 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
574 				unmap->page, unmap->page_offset, len);
575 
576 		unmap->page = NULL;
577 		unmap->vector.len = 0;
578 	}
579 
580 	skb->len += totlen;
581 	skb->data_len += totlen;
582 }
583 
584 static inline void
585 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
586 		  struct bnad_rx_unmap *unmap, u32 len)
587 {
588 	prefetch(skb->data);
589 
590 	dma_unmap_single(&bnad->pcidev->dev,
591 			dma_unmap_addr(&unmap->vector, dma_addr),
592 			unmap->vector.len, DMA_FROM_DEVICE);
593 
594 	skb_put(skb, len);
595 	skb->protocol = eth_type_trans(skb, bnad->netdev);
596 
597 	unmap->skb = NULL;
598 	unmap->vector.len = 0;
599 }
600 
601 static u32
602 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
603 {
604 	struct bna_cq_entry *cq, *cmpl, *next_cmpl;
605 	struct bna_rcb *rcb = NULL;
606 	struct bnad_rx_unmap_q *unmap_q;
607 	struct bnad_rx_unmap *unmap = NULL;
608 	struct sk_buff *skb = NULL;
609 	struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
610 	struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
611 	u32 packets = 0, len = 0, totlen = 0;
612 	u32 pi, vec, sop_ci = 0, nvecs = 0;
613 	u32 flags, masked_flags;
614 
615 	prefetch(bnad->netdev);
616 
617 	cq = ccb->sw_q;
618 
619 	while (packets < budget) {
620 		cmpl = &cq[ccb->producer_index];
621 		if (!cmpl->valid)
622 			break;
623 		/* The 'valid' field is set by the adapter, only after writing
624 		 * the other fields of completion entry. Hence, do not load
625 		 * other fields of completion entry *before* the 'valid' is
626 		 * loaded. Adding the rmb() here prevents the compiler and/or
627 		 * CPU from reordering the reads which would potentially result
628 		 * in reading stale values in completion entry.
629 		 */
630 		rmb();
631 
632 		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
633 
634 		if (bna_is_small_rxq(cmpl->rxq_id))
635 			rcb = ccb->rcb[1];
636 		else
637 			rcb = ccb->rcb[0];
638 
639 		unmap_q = rcb->unmap_q;
640 
641 		/* start of packet ci */
642 		sop_ci = rcb->consumer_index;
643 
644 		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
645 			unmap = &unmap_q->unmap[sop_ci];
646 			skb = unmap->skb;
647 		} else {
648 			skb = napi_get_frags(&rx_ctrl->napi);
649 			if (unlikely(!skb))
650 				break;
651 		}
652 		prefetch(skb);
653 
654 		flags = ntohl(cmpl->flags);
655 		len = ntohs(cmpl->length);
656 		totlen = len;
657 		nvecs = 1;
658 
659 		/* Check all the completions for this frame.
660 		 * busy-wait doesn't help much, break here.
661 		 */
662 		if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
663 		    (flags & BNA_CQ_EF_EOP) == 0) {
664 			pi = ccb->producer_index;
665 			do {
666 				BNA_QE_INDX_INC(pi, ccb->q_depth);
667 				next_cmpl = &cq[pi];
668 
669 				if (!next_cmpl->valid)
670 					break;
671 				/* The 'valid' field is set by the adapter, only
672 				 * after writing the other fields of completion
673 				 * entry. Hence, do not load other fields of
674 				 * completion entry *before* the 'valid' is
675 				 * loaded. Adding the rmb() here prevents the
676 				 * compiler and/or CPU from reordering the reads
677 				 * which would potentially result in reading
678 				 * stale values in completion entry.
679 				 */
680 				rmb();
681 
682 				len = ntohs(next_cmpl->length);
683 				flags = ntohl(next_cmpl->flags);
684 
685 				nvecs++;
686 				totlen += len;
687 			} while ((flags & BNA_CQ_EF_EOP) == 0);
688 
689 			if (!next_cmpl->valid)
690 				break;
691 		}
692 		packets++;
693 
694 		/* TODO: BNA_CQ_EF_LOCAL ? */
695 		if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
696 						BNA_CQ_EF_FCS_ERROR |
697 						BNA_CQ_EF_TOO_LONG))) {
698 			bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
699 			rcb->rxq->rx_packets_with_error++;
700 
701 			goto next;
702 		}
703 
704 		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
705 			bnad_cq_setup_skb(bnad, skb, unmap, len);
706 		else
707 			bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
708 
709 		rcb->rxq->rx_packets++;
710 		rcb->rxq->rx_bytes += totlen;
711 		ccb->bytes_per_intr += totlen;
712 
713 		masked_flags = flags & flags_cksum_prot_mask;
714 
715 		if (likely
716 		    ((bnad->netdev->features & NETIF_F_RXCSUM) &&
717 		     ((masked_flags == flags_tcp4) ||
718 		      (masked_flags == flags_udp4) ||
719 		      (masked_flags == flags_tcp6) ||
720 		      (masked_flags == flags_udp6))))
721 			skb->ip_summed = CHECKSUM_UNNECESSARY;
722 		else
723 			skb_checksum_none_assert(skb);
724 
725 		if ((flags & BNA_CQ_EF_VLAN) &&
726 		    (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
727 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
728 
729 		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
730 			netif_receive_skb(skb);
731 		else
732 			napi_gro_frags(&rx_ctrl->napi);
733 
734 next:
735 		BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
736 		for (vec = 0; vec < nvecs; vec++) {
737 			cmpl = &cq[ccb->producer_index];
738 			cmpl->valid = 0;
739 			BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
740 		}
741 	}
742 
743 	napi_gro_flush(&rx_ctrl->napi, false);
744 	if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
745 		bna_ib_ack_disable_irq(ccb->i_dbell, packets);
746 
747 	bnad_rxq_post(bnad, ccb->rcb[0]);
748 	if (ccb->rcb[1])
749 		bnad_rxq_post(bnad, ccb->rcb[1]);
750 
751 	return packets;
752 }
753 
754 static void
755 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
756 {
757 	struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
758 	struct napi_struct *napi = &rx_ctrl->napi;
759 
760 	if (likely(napi_schedule_prep(napi))) {
761 		__napi_schedule(napi);
762 		rx_ctrl->rx_schedule++;
763 	}
764 }
765 
766 /* MSIX Rx Path Handler */
767 static irqreturn_t
768 bnad_msix_rx(int irq, void *data)
769 {
770 	struct bna_ccb *ccb = (struct bna_ccb *)data;
771 
772 	if (ccb) {
773 		((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
774 		bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
775 	}
776 
777 	return IRQ_HANDLED;
778 }
779 
780 /* Interrupt handlers */
781 
782 /* Mbox Interrupt Handlers */
783 static irqreturn_t
784 bnad_msix_mbox_handler(int irq, void *data)
785 {
786 	u32 intr_status;
787 	unsigned long flags;
788 	struct bnad *bnad = (struct bnad *)data;
789 
790 	spin_lock_irqsave(&bnad->bna_lock, flags);
791 	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
792 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
793 		return IRQ_HANDLED;
794 	}
795 
796 	bna_intr_status_get(&bnad->bna, intr_status);
797 
798 	if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
799 		bna_mbox_handler(&bnad->bna, intr_status);
800 
801 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
802 
803 	return IRQ_HANDLED;
804 }
805 
806 static irqreturn_t
807 bnad_isr(int irq, void *data)
808 {
809 	int i, j;
810 	u32 intr_status;
811 	unsigned long flags;
812 	struct bnad *bnad = (struct bnad *)data;
813 	struct bnad_rx_info *rx_info;
814 	struct bnad_rx_ctrl *rx_ctrl;
815 	struct bna_tcb *tcb = NULL;
816 
817 	spin_lock_irqsave(&bnad->bna_lock, flags);
818 	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
819 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
820 		return IRQ_NONE;
821 	}
822 
823 	bna_intr_status_get(&bnad->bna, intr_status);
824 
825 	if (unlikely(!intr_status)) {
826 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
827 		return IRQ_NONE;
828 	}
829 
830 	if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
831 		bna_mbox_handler(&bnad->bna, intr_status);
832 
833 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
834 
835 	if (!BNA_IS_INTX_DATA_INTR(intr_status))
836 		return IRQ_HANDLED;
837 
838 	/* Process data interrupts */
839 	/* Tx processing */
840 	for (i = 0; i < bnad->num_tx; i++) {
841 		for (j = 0; j < bnad->num_txq_per_tx; j++) {
842 			tcb = bnad->tx_info[i].tcb[j];
843 			if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
844 				bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
845 		}
846 	}
847 	/* Rx processing */
848 	for (i = 0; i < bnad->num_rx; i++) {
849 		rx_info = &bnad->rx_info[i];
850 		if (!rx_info->rx)
851 			continue;
852 		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
853 			rx_ctrl = &rx_info->rx_ctrl[j];
854 			if (rx_ctrl->ccb)
855 				bnad_netif_rx_schedule_poll(bnad,
856 							    rx_ctrl->ccb);
857 		}
858 	}
859 	return IRQ_HANDLED;
860 }
861 
862 /*
863  * Called in interrupt / callback context
864  * with bna_lock held, so cfg_flags access is OK
865  */
866 static void
867 bnad_enable_mbox_irq(struct bnad *bnad)
868 {
869 	clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
870 
871 	BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
872 }
873 
874 /*
875  * Called with bnad->bna_lock held b'cos of
876  * bnad->cfg_flags access.
877  */
878 static void
879 bnad_disable_mbox_irq(struct bnad *bnad)
880 {
881 	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
882 
883 	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
884 }
885 
886 static void
887 bnad_set_netdev_perm_addr(struct bnad *bnad)
888 {
889 	struct net_device *netdev = bnad->netdev;
890 
891 	ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
892 	if (is_zero_ether_addr(netdev->dev_addr))
893 		ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
894 }
895 
896 /* Control Path Handlers */
897 
898 /* Callbacks */
899 void
900 bnad_cb_mbox_intr_enable(struct bnad *bnad)
901 {
902 	bnad_enable_mbox_irq(bnad);
903 }
904 
905 void
906 bnad_cb_mbox_intr_disable(struct bnad *bnad)
907 {
908 	bnad_disable_mbox_irq(bnad);
909 }
910 
911 void
912 bnad_cb_ioceth_ready(struct bnad *bnad)
913 {
914 	bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
915 	complete(&bnad->bnad_completions.ioc_comp);
916 }
917 
918 void
919 bnad_cb_ioceth_failed(struct bnad *bnad)
920 {
921 	bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
922 	complete(&bnad->bnad_completions.ioc_comp);
923 }
924 
925 void
926 bnad_cb_ioceth_disabled(struct bnad *bnad)
927 {
928 	bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
929 	complete(&bnad->bnad_completions.ioc_comp);
930 }
931 
932 static void
933 bnad_cb_enet_disabled(void *arg)
934 {
935 	struct bnad *bnad = (struct bnad *)arg;
936 
937 	netif_carrier_off(bnad->netdev);
938 	complete(&bnad->bnad_completions.enet_comp);
939 }
940 
941 void
942 bnad_cb_ethport_link_status(struct bnad *bnad,
943 			enum bna_link_status link_status)
944 {
945 	bool link_up = false;
946 
947 	link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
948 
949 	if (link_status == BNA_CEE_UP) {
950 		if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
951 			BNAD_UPDATE_CTR(bnad, cee_toggle);
952 		set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
953 	} else {
954 		if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
955 			BNAD_UPDATE_CTR(bnad, cee_toggle);
956 		clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
957 	}
958 
959 	if (link_up) {
960 		if (!netif_carrier_ok(bnad->netdev)) {
961 			uint tx_id, tcb_id;
962 			netdev_info(bnad->netdev, "link up\n");
963 			netif_carrier_on(bnad->netdev);
964 			BNAD_UPDATE_CTR(bnad, link_toggle);
965 			for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
966 				for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
967 				      tcb_id++) {
968 					struct bna_tcb *tcb =
969 					bnad->tx_info[tx_id].tcb[tcb_id];
970 					u32 txq_id;
971 					if (!tcb)
972 						continue;
973 
974 					txq_id = tcb->id;
975 
976 					if (test_bit(BNAD_TXQ_TX_STARTED,
977 						     &tcb->flags)) {
978 						/*
979 						 * Force an immediate
980 						 * Transmit Schedule */
981 						netif_wake_subqueue(
982 								bnad->netdev,
983 								txq_id);
984 						BNAD_UPDATE_CTR(bnad,
985 							netif_queue_wakeup);
986 					} else {
987 						netif_stop_subqueue(
988 								bnad->netdev,
989 								txq_id);
990 						BNAD_UPDATE_CTR(bnad,
991 							netif_queue_stop);
992 					}
993 				}
994 			}
995 		}
996 	} else {
997 		if (netif_carrier_ok(bnad->netdev)) {
998 			netdev_info(bnad->netdev, "link down\n");
999 			netif_carrier_off(bnad->netdev);
1000 			BNAD_UPDATE_CTR(bnad, link_toggle);
1001 		}
1002 	}
1003 }
1004 
1005 static void
1006 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
1007 {
1008 	struct bnad *bnad = (struct bnad *)arg;
1009 
1010 	complete(&bnad->bnad_completions.tx_comp);
1011 }
1012 
1013 static void
1014 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1015 {
1016 	struct bnad_tx_info *tx_info =
1017 			(struct bnad_tx_info *)tcb->txq->tx->priv;
1018 
1019 	tcb->priv = tcb;
1020 	tx_info->tcb[tcb->id] = tcb;
1021 }
1022 
1023 static void
1024 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1025 {
1026 	struct bnad_tx_info *tx_info =
1027 			(struct bnad_tx_info *)tcb->txq->tx->priv;
1028 
1029 	tx_info->tcb[tcb->id] = NULL;
1030 	tcb->priv = NULL;
1031 }
1032 
1033 static void
1034 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1035 {
1036 	struct bnad_rx_info *rx_info =
1037 			(struct bnad_rx_info *)ccb->cq->rx->priv;
1038 
1039 	rx_info->rx_ctrl[ccb->id].ccb = ccb;
1040 	ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1041 }
1042 
1043 static void
1044 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1045 {
1046 	struct bnad_rx_info *rx_info =
1047 			(struct bnad_rx_info *)ccb->cq->rx->priv;
1048 
1049 	rx_info->rx_ctrl[ccb->id].ccb = NULL;
1050 }
1051 
1052 static void
1053 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1054 {
1055 	struct bnad_tx_info *tx_info =
1056 			(struct bnad_tx_info *)tx->priv;
1057 	struct bna_tcb *tcb;
1058 	u32 txq_id;
1059 	int i;
1060 
1061 	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1062 		tcb = tx_info->tcb[i];
1063 		if (!tcb)
1064 			continue;
1065 		txq_id = tcb->id;
1066 		clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1067 		netif_stop_subqueue(bnad->netdev, txq_id);
1068 	}
1069 }
1070 
1071 static void
1072 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1073 {
1074 	struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1075 	struct bna_tcb *tcb;
1076 	u32 txq_id;
1077 	int i;
1078 
1079 	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1080 		tcb = tx_info->tcb[i];
1081 		if (!tcb)
1082 			continue;
1083 		txq_id = tcb->id;
1084 
1085 		BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1086 		set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1087 		BUG_ON(*(tcb->hw_consumer_index) != 0);
1088 
1089 		if (netif_carrier_ok(bnad->netdev)) {
1090 			netif_wake_subqueue(bnad->netdev, txq_id);
1091 			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1092 		}
1093 	}
1094 
1095 	/*
1096 	 * Workaround for first ioceth enable failure & we
1097 	 * get a 0 MAC address. We try to get the MAC address
1098 	 * again here.
1099 	 */
1100 	if (is_zero_ether_addr(bnad->perm_addr)) {
1101 		bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
1102 		bnad_set_netdev_perm_addr(bnad);
1103 	}
1104 }
1105 
1106 /*
1107  * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1108  */
1109 static void
1110 bnad_tx_cleanup(struct delayed_work *work)
1111 {
1112 	struct bnad_tx_info *tx_info =
1113 		container_of(work, struct bnad_tx_info, tx_cleanup_work);
1114 	struct bnad *bnad = NULL;
1115 	struct bna_tcb *tcb;
1116 	unsigned long flags;
1117 	u32 i, pending = 0;
1118 
1119 	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1120 		tcb = tx_info->tcb[i];
1121 		if (!tcb)
1122 			continue;
1123 
1124 		bnad = tcb->bnad;
1125 
1126 		if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1127 			pending++;
1128 			continue;
1129 		}
1130 
1131 		bnad_txq_cleanup(bnad, tcb);
1132 
1133 		smp_mb__before_atomic();
1134 		clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1135 	}
1136 
1137 	if (pending) {
1138 		queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1139 			msecs_to_jiffies(1));
1140 		return;
1141 	}
1142 
1143 	spin_lock_irqsave(&bnad->bna_lock, flags);
1144 	bna_tx_cleanup_complete(tx_info->tx);
1145 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1146 }
1147 
1148 static void
1149 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1150 {
1151 	struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1152 	struct bna_tcb *tcb;
1153 	int i;
1154 
1155 	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1156 		tcb = tx_info->tcb[i];
1157 		if (!tcb)
1158 			continue;
1159 	}
1160 
1161 	queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1162 }
1163 
1164 static void
1165 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1166 {
1167 	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1168 	struct bna_ccb *ccb;
1169 	struct bnad_rx_ctrl *rx_ctrl;
1170 	int i;
1171 
1172 	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1173 		rx_ctrl = &rx_info->rx_ctrl[i];
1174 		ccb = rx_ctrl->ccb;
1175 		if (!ccb)
1176 			continue;
1177 
1178 		clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1179 
1180 		if (ccb->rcb[1])
1181 			clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1182 	}
1183 }
1184 
1185 /*
1186  * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1187  */
1188 static void
1189 bnad_rx_cleanup(void *work)
1190 {
1191 	struct bnad_rx_info *rx_info =
1192 		container_of(work, struct bnad_rx_info, rx_cleanup_work);
1193 	struct bnad_rx_ctrl *rx_ctrl;
1194 	struct bnad *bnad = NULL;
1195 	unsigned long flags;
1196 	u32 i;
1197 
1198 	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1199 		rx_ctrl = &rx_info->rx_ctrl[i];
1200 
1201 		if (!rx_ctrl->ccb)
1202 			continue;
1203 
1204 		bnad = rx_ctrl->ccb->bnad;
1205 
1206 		/*
1207 		 * Wait till the poll handler has exited
1208 		 * and nothing can be scheduled anymore
1209 		 */
1210 		napi_disable(&rx_ctrl->napi);
1211 
1212 		bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1213 		bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1214 		if (rx_ctrl->ccb->rcb[1])
1215 			bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1216 	}
1217 
1218 	spin_lock_irqsave(&bnad->bna_lock, flags);
1219 	bna_rx_cleanup_complete(rx_info->rx);
1220 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1221 }
1222 
1223 static void
1224 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1225 {
1226 	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1227 	struct bna_ccb *ccb;
1228 	struct bnad_rx_ctrl *rx_ctrl;
1229 	int i;
1230 
1231 	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1232 		rx_ctrl = &rx_info->rx_ctrl[i];
1233 		ccb = rx_ctrl->ccb;
1234 		if (!ccb)
1235 			continue;
1236 
1237 		clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1238 
1239 		if (ccb->rcb[1])
1240 			clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1241 	}
1242 
1243 	queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1244 }
1245 
1246 static void
1247 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1248 {
1249 	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1250 	struct bna_ccb *ccb;
1251 	struct bna_rcb *rcb;
1252 	struct bnad_rx_ctrl *rx_ctrl;
1253 	int i, j;
1254 
1255 	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1256 		rx_ctrl = &rx_info->rx_ctrl[i];
1257 		ccb = rx_ctrl->ccb;
1258 		if (!ccb)
1259 			continue;
1260 
1261 		napi_enable(&rx_ctrl->napi);
1262 
1263 		for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1264 			rcb = ccb->rcb[j];
1265 			if (!rcb)
1266 				continue;
1267 
1268 			bnad_rxq_alloc_init(bnad, rcb);
1269 			set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1270 			set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1271 			bnad_rxq_post(bnad, rcb);
1272 		}
1273 	}
1274 }
1275 
1276 static void
1277 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1278 {
1279 	struct bnad *bnad = (struct bnad *)arg;
1280 
1281 	complete(&bnad->bnad_completions.rx_comp);
1282 }
1283 
1284 static void
1285 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1286 {
1287 	bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1288 	complete(&bnad->bnad_completions.mcast_comp);
1289 }
1290 
1291 void
1292 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1293 		       struct bna_stats *stats)
1294 {
1295 	if (status == BNA_CB_SUCCESS)
1296 		BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1297 
1298 	if (!netif_running(bnad->netdev) ||
1299 		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1300 		return;
1301 
1302 	mod_timer(&bnad->stats_timer,
1303 		  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1304 }
1305 
1306 static void
1307 bnad_cb_enet_mtu_set(struct bnad *bnad)
1308 {
1309 	bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1310 	complete(&bnad->bnad_completions.mtu_comp);
1311 }
1312 
1313 void
1314 bnad_cb_completion(void *arg, enum bfa_status status)
1315 {
1316 	struct bnad_iocmd_comp *iocmd_comp =
1317 			(struct bnad_iocmd_comp *)arg;
1318 
1319 	iocmd_comp->comp_status = (u32) status;
1320 	complete(&iocmd_comp->comp);
1321 }
1322 
1323 /* Resource allocation, free functions */
1324 
1325 static void
1326 bnad_mem_free(struct bnad *bnad,
1327 	      struct bna_mem_info *mem_info)
1328 {
1329 	int i;
1330 	dma_addr_t dma_pa;
1331 
1332 	if (mem_info->mdl == NULL)
1333 		return;
1334 
1335 	for (i = 0; i < mem_info->num; i++) {
1336 		if (mem_info->mdl[i].kva != NULL) {
1337 			if (mem_info->mem_type == BNA_MEM_T_DMA) {
1338 				BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1339 						dma_pa);
1340 				dma_free_coherent(&bnad->pcidev->dev,
1341 						  mem_info->mdl[i].len,
1342 						  mem_info->mdl[i].kva, dma_pa);
1343 			} else
1344 				kfree(mem_info->mdl[i].kva);
1345 		}
1346 	}
1347 	kfree(mem_info->mdl);
1348 	mem_info->mdl = NULL;
1349 }
1350 
1351 static int
1352 bnad_mem_alloc(struct bnad *bnad,
1353 	       struct bna_mem_info *mem_info)
1354 {
1355 	int i;
1356 	dma_addr_t dma_pa;
1357 
1358 	if ((mem_info->num == 0) || (mem_info->len == 0)) {
1359 		mem_info->mdl = NULL;
1360 		return 0;
1361 	}
1362 
1363 	mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1364 				GFP_KERNEL);
1365 	if (mem_info->mdl == NULL)
1366 		return -ENOMEM;
1367 
1368 	if (mem_info->mem_type == BNA_MEM_T_DMA) {
1369 		for (i = 0; i < mem_info->num; i++) {
1370 			mem_info->mdl[i].len = mem_info->len;
1371 			mem_info->mdl[i].kva =
1372 				dma_alloc_coherent(&bnad->pcidev->dev,
1373 						   mem_info->len, &dma_pa,
1374 						   GFP_KERNEL);
1375 			if (mem_info->mdl[i].kva == NULL)
1376 				goto err_return;
1377 
1378 			BNA_SET_DMA_ADDR(dma_pa,
1379 					 &(mem_info->mdl[i].dma));
1380 		}
1381 	} else {
1382 		for (i = 0; i < mem_info->num; i++) {
1383 			mem_info->mdl[i].len = mem_info->len;
1384 			mem_info->mdl[i].kva = kzalloc(mem_info->len,
1385 							GFP_KERNEL);
1386 			if (mem_info->mdl[i].kva == NULL)
1387 				goto err_return;
1388 		}
1389 	}
1390 
1391 	return 0;
1392 
1393 err_return:
1394 	bnad_mem_free(bnad, mem_info);
1395 	return -ENOMEM;
1396 }
1397 
1398 /* Free IRQ for Mailbox */
1399 static void
1400 bnad_mbox_irq_free(struct bnad *bnad)
1401 {
1402 	int irq;
1403 	unsigned long flags;
1404 
1405 	spin_lock_irqsave(&bnad->bna_lock, flags);
1406 	bnad_disable_mbox_irq(bnad);
1407 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1408 
1409 	irq = BNAD_GET_MBOX_IRQ(bnad);
1410 	free_irq(irq, bnad);
1411 }
1412 
1413 /*
1414  * Allocates IRQ for Mailbox, but keep it disabled
1415  * This will be enabled once we get the mbox enable callback
1416  * from bna
1417  */
1418 static int
1419 bnad_mbox_irq_alloc(struct bnad *bnad)
1420 {
1421 	int		err = 0;
1422 	unsigned long	irq_flags, flags;
1423 	u32	irq;
1424 	irq_handler_t	irq_handler;
1425 
1426 	spin_lock_irqsave(&bnad->bna_lock, flags);
1427 	if (bnad->cfg_flags & BNAD_CF_MSIX) {
1428 		irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1429 		irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1430 		irq_flags = 0;
1431 	} else {
1432 		irq_handler = (irq_handler_t)bnad_isr;
1433 		irq = bnad->pcidev->irq;
1434 		irq_flags = IRQF_SHARED;
1435 	}
1436 
1437 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1438 	sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1439 
1440 	/*
1441 	 * Set the Mbox IRQ disable flag, so that the IRQ handler
1442 	 * called from request_irq() for SHARED IRQs do not execute
1443 	 */
1444 	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1445 
1446 	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1447 
1448 	err = request_irq(irq, irq_handler, irq_flags,
1449 			  bnad->mbox_irq_name, bnad);
1450 
1451 	return err;
1452 }
1453 
1454 static void
1455 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1456 {
1457 	kfree(intr_info->idl);
1458 	intr_info->idl = NULL;
1459 }
1460 
1461 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1462 static int
1463 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1464 		    u32 txrx_id, struct bna_intr_info *intr_info)
1465 {
1466 	int i, vector_start = 0;
1467 	u32 cfg_flags;
1468 	unsigned long flags;
1469 
1470 	spin_lock_irqsave(&bnad->bna_lock, flags);
1471 	cfg_flags = bnad->cfg_flags;
1472 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1473 
1474 	if (cfg_flags & BNAD_CF_MSIX) {
1475 		intr_info->intr_type = BNA_INTR_T_MSIX;
1476 		intr_info->idl = kcalloc(intr_info->num,
1477 					sizeof(struct bna_intr_descr),
1478 					GFP_KERNEL);
1479 		if (!intr_info->idl)
1480 			return -ENOMEM;
1481 
1482 		switch (src) {
1483 		case BNAD_INTR_TX:
1484 			vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1485 			break;
1486 
1487 		case BNAD_INTR_RX:
1488 			vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1489 					(bnad->num_tx * bnad->num_txq_per_tx) +
1490 					txrx_id;
1491 			break;
1492 
1493 		default:
1494 			BUG();
1495 		}
1496 
1497 		for (i = 0; i < intr_info->num; i++)
1498 			intr_info->idl[i].vector = vector_start + i;
1499 	} else {
1500 		intr_info->intr_type = BNA_INTR_T_INTX;
1501 		intr_info->num = 1;
1502 		intr_info->idl = kcalloc(intr_info->num,
1503 					sizeof(struct bna_intr_descr),
1504 					GFP_KERNEL);
1505 		if (!intr_info->idl)
1506 			return -ENOMEM;
1507 
1508 		switch (src) {
1509 		case BNAD_INTR_TX:
1510 			intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1511 			break;
1512 
1513 		case BNAD_INTR_RX:
1514 			intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1515 			break;
1516 		}
1517 	}
1518 	return 0;
1519 }
1520 
1521 /* NOTE: Should be called for MSIX only
1522  * Unregisters Tx MSIX vector(s) from the kernel
1523  */
1524 static void
1525 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1526 			int num_txqs)
1527 {
1528 	int i;
1529 	int vector_num;
1530 
1531 	for (i = 0; i < num_txqs; i++) {
1532 		if (tx_info->tcb[i] == NULL)
1533 			continue;
1534 
1535 		vector_num = tx_info->tcb[i]->intr_vector;
1536 		free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1537 	}
1538 }
1539 
1540 /* NOTE: Should be called for MSIX only
1541  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1542  */
1543 static int
1544 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1545 			u32 tx_id, int num_txqs)
1546 {
1547 	int i;
1548 	int err;
1549 	int vector_num;
1550 
1551 	for (i = 0; i < num_txqs; i++) {
1552 		vector_num = tx_info->tcb[i]->intr_vector;
1553 		sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1554 				tx_id + tx_info->tcb[i]->id);
1555 		err = request_irq(bnad->msix_table[vector_num].vector,
1556 				  (irq_handler_t)bnad_msix_tx, 0,
1557 				  tx_info->tcb[i]->name,
1558 				  tx_info->tcb[i]);
1559 		if (err)
1560 			goto err_return;
1561 	}
1562 
1563 	return 0;
1564 
1565 err_return:
1566 	if (i > 0)
1567 		bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1568 	return -1;
1569 }
1570 
1571 /* NOTE: Should be called for MSIX only
1572  * Unregisters Rx MSIX vector(s) from the kernel
1573  */
1574 static void
1575 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1576 			int num_rxps)
1577 {
1578 	int i;
1579 	int vector_num;
1580 
1581 	for (i = 0; i < num_rxps; i++) {
1582 		if (rx_info->rx_ctrl[i].ccb == NULL)
1583 			continue;
1584 
1585 		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1586 		free_irq(bnad->msix_table[vector_num].vector,
1587 			 rx_info->rx_ctrl[i].ccb);
1588 	}
1589 }
1590 
1591 /* NOTE: Should be called for MSIX only
1592  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1593  */
1594 static int
1595 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1596 			u32 rx_id, int num_rxps)
1597 {
1598 	int i;
1599 	int err;
1600 	int vector_num;
1601 
1602 	for (i = 0; i < num_rxps; i++) {
1603 		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1604 		sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1605 			bnad->netdev->name,
1606 			rx_id + rx_info->rx_ctrl[i].ccb->id);
1607 		err = request_irq(bnad->msix_table[vector_num].vector,
1608 				  (irq_handler_t)bnad_msix_rx, 0,
1609 				  rx_info->rx_ctrl[i].ccb->name,
1610 				  rx_info->rx_ctrl[i].ccb);
1611 		if (err)
1612 			goto err_return;
1613 	}
1614 
1615 	return 0;
1616 
1617 err_return:
1618 	if (i > 0)
1619 		bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1620 	return -1;
1621 }
1622 
1623 /* Free Tx object Resources */
1624 static void
1625 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1626 {
1627 	int i;
1628 
1629 	for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1630 		if (res_info[i].res_type == BNA_RES_T_MEM)
1631 			bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1632 		else if (res_info[i].res_type == BNA_RES_T_INTR)
1633 			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1634 	}
1635 }
1636 
1637 /* Allocates memory and interrupt resources for Tx object */
1638 static int
1639 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1640 		  u32 tx_id)
1641 {
1642 	int i, err = 0;
1643 
1644 	for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1645 		if (res_info[i].res_type == BNA_RES_T_MEM)
1646 			err = bnad_mem_alloc(bnad,
1647 					&res_info[i].res_u.mem_info);
1648 		else if (res_info[i].res_type == BNA_RES_T_INTR)
1649 			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1650 					&res_info[i].res_u.intr_info);
1651 		if (err)
1652 			goto err_return;
1653 	}
1654 	return 0;
1655 
1656 err_return:
1657 	bnad_tx_res_free(bnad, res_info);
1658 	return err;
1659 }
1660 
1661 /* Free Rx object Resources */
1662 static void
1663 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1664 {
1665 	int i;
1666 
1667 	for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1668 		if (res_info[i].res_type == BNA_RES_T_MEM)
1669 			bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1670 		else if (res_info[i].res_type == BNA_RES_T_INTR)
1671 			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1672 	}
1673 }
1674 
1675 /* Allocates memory and interrupt resources for Rx object */
1676 static int
1677 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1678 		  uint rx_id)
1679 {
1680 	int i, err = 0;
1681 
1682 	/* All memory needs to be allocated before setup_ccbs */
1683 	for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1684 		if (res_info[i].res_type == BNA_RES_T_MEM)
1685 			err = bnad_mem_alloc(bnad,
1686 					&res_info[i].res_u.mem_info);
1687 		else if (res_info[i].res_type == BNA_RES_T_INTR)
1688 			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1689 					&res_info[i].res_u.intr_info);
1690 		if (err)
1691 			goto err_return;
1692 	}
1693 	return 0;
1694 
1695 err_return:
1696 	bnad_rx_res_free(bnad, res_info);
1697 	return err;
1698 }
1699 
1700 /* Timer callbacks */
1701 /* a) IOC timer */
1702 static void
1703 bnad_ioc_timeout(unsigned long data)
1704 {
1705 	struct bnad *bnad = (struct bnad *)data;
1706 	unsigned long flags;
1707 
1708 	spin_lock_irqsave(&bnad->bna_lock, flags);
1709 	bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
1710 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1711 }
1712 
1713 static void
1714 bnad_ioc_hb_check(unsigned long data)
1715 {
1716 	struct bnad *bnad = (struct bnad *)data;
1717 	unsigned long flags;
1718 
1719 	spin_lock_irqsave(&bnad->bna_lock, flags);
1720 	bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
1721 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1722 }
1723 
1724 static void
1725 bnad_iocpf_timeout(unsigned long data)
1726 {
1727 	struct bnad *bnad = (struct bnad *)data;
1728 	unsigned long flags;
1729 
1730 	spin_lock_irqsave(&bnad->bna_lock, flags);
1731 	bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
1732 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1733 }
1734 
1735 static void
1736 bnad_iocpf_sem_timeout(unsigned long data)
1737 {
1738 	struct bnad *bnad = (struct bnad *)data;
1739 	unsigned long flags;
1740 
1741 	spin_lock_irqsave(&bnad->bna_lock, flags);
1742 	bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
1743 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1744 }
1745 
1746 /*
1747  * All timer routines use bnad->bna_lock to protect against
1748  * the following race, which may occur in case of no locking:
1749  *	Time	CPU m	CPU n
1750  *	0       1 = test_bit
1751  *	1			clear_bit
1752  *	2			del_timer_sync
1753  *	3	mod_timer
1754  */
1755 
1756 /* b) Dynamic Interrupt Moderation Timer */
1757 static void
1758 bnad_dim_timeout(unsigned long data)
1759 {
1760 	struct bnad *bnad = (struct bnad *)data;
1761 	struct bnad_rx_info *rx_info;
1762 	struct bnad_rx_ctrl *rx_ctrl;
1763 	int i, j;
1764 	unsigned long flags;
1765 
1766 	if (!netif_carrier_ok(bnad->netdev))
1767 		return;
1768 
1769 	spin_lock_irqsave(&bnad->bna_lock, flags);
1770 	for (i = 0; i < bnad->num_rx; i++) {
1771 		rx_info = &bnad->rx_info[i];
1772 		if (!rx_info->rx)
1773 			continue;
1774 		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1775 			rx_ctrl = &rx_info->rx_ctrl[j];
1776 			if (!rx_ctrl->ccb)
1777 				continue;
1778 			bna_rx_dim_update(rx_ctrl->ccb);
1779 		}
1780 	}
1781 
1782 	/* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1783 	if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1784 		mod_timer(&bnad->dim_timer,
1785 			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1786 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1787 }
1788 
1789 /* c)  Statistics Timer */
1790 static void
1791 bnad_stats_timeout(unsigned long data)
1792 {
1793 	struct bnad *bnad = (struct bnad *)data;
1794 	unsigned long flags;
1795 
1796 	if (!netif_running(bnad->netdev) ||
1797 		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1798 		return;
1799 
1800 	spin_lock_irqsave(&bnad->bna_lock, flags);
1801 	bna_hw_stats_get(&bnad->bna);
1802 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1803 }
1804 
1805 /*
1806  * Set up timer for DIM
1807  * Called with bnad->bna_lock held
1808  */
1809 void
1810 bnad_dim_timer_start(struct bnad *bnad)
1811 {
1812 	if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1813 	    !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1814 		setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1815 			    (unsigned long)bnad);
1816 		set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1817 		mod_timer(&bnad->dim_timer,
1818 			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1819 	}
1820 }
1821 
1822 /*
1823  * Set up timer for statistics
1824  * Called with mutex_lock(&bnad->conf_mutex) held
1825  */
1826 static void
1827 bnad_stats_timer_start(struct bnad *bnad)
1828 {
1829 	unsigned long flags;
1830 
1831 	spin_lock_irqsave(&bnad->bna_lock, flags);
1832 	if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1833 		setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1834 			    (unsigned long)bnad);
1835 		mod_timer(&bnad->stats_timer,
1836 			  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1837 	}
1838 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1839 }
1840 
1841 /*
1842  * Stops the stats timer
1843  * Called with mutex_lock(&bnad->conf_mutex) held
1844  */
1845 static void
1846 bnad_stats_timer_stop(struct bnad *bnad)
1847 {
1848 	int to_del = 0;
1849 	unsigned long flags;
1850 
1851 	spin_lock_irqsave(&bnad->bna_lock, flags);
1852 	if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1853 		to_del = 1;
1854 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1855 	if (to_del)
1856 		del_timer_sync(&bnad->stats_timer);
1857 }
1858 
1859 /* Utilities */
1860 
1861 static void
1862 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1863 {
1864 	int i = 1; /* Index 0 has broadcast address */
1865 	struct netdev_hw_addr *mc_addr;
1866 
1867 	netdev_for_each_mc_addr(mc_addr, netdev) {
1868 		ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
1869 		i++;
1870 	}
1871 }
1872 
1873 static int
1874 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1875 {
1876 	struct bnad_rx_ctrl *rx_ctrl =
1877 		container_of(napi, struct bnad_rx_ctrl, napi);
1878 	struct bnad *bnad = rx_ctrl->bnad;
1879 	int rcvd = 0;
1880 
1881 	rx_ctrl->rx_poll_ctr++;
1882 
1883 	if (!netif_carrier_ok(bnad->netdev))
1884 		goto poll_exit;
1885 
1886 	rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1887 	if (rcvd >= budget)
1888 		return rcvd;
1889 
1890 poll_exit:
1891 	napi_complete(napi);
1892 
1893 	rx_ctrl->rx_complete++;
1894 
1895 	if (rx_ctrl->ccb)
1896 		bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1897 
1898 	return rcvd;
1899 }
1900 
1901 #define BNAD_NAPI_POLL_QUOTA		64
1902 static void
1903 bnad_napi_add(struct bnad *bnad, u32 rx_id)
1904 {
1905 	struct bnad_rx_ctrl *rx_ctrl;
1906 	int i;
1907 
1908 	/* Initialize & enable NAPI */
1909 	for (i = 0; i <	bnad->num_rxp_per_rx; i++) {
1910 		rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1911 		netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1912 			       bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1913 	}
1914 }
1915 
1916 static void
1917 bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1918 {
1919 	int i;
1920 
1921 	/* First disable and then clean up */
1922 	for (i = 0; i < bnad->num_rxp_per_rx; i++)
1923 		netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1924 }
1925 
1926 /* Should be held with conf_lock held */
1927 void
1928 bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1929 {
1930 	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1931 	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1932 	unsigned long flags;
1933 
1934 	if (!tx_info->tx)
1935 		return;
1936 
1937 	init_completion(&bnad->bnad_completions.tx_comp);
1938 	spin_lock_irqsave(&bnad->bna_lock, flags);
1939 	bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1940 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1941 	wait_for_completion(&bnad->bnad_completions.tx_comp);
1942 
1943 	if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1944 		bnad_tx_msix_unregister(bnad, tx_info,
1945 			bnad->num_txq_per_tx);
1946 
1947 	spin_lock_irqsave(&bnad->bna_lock, flags);
1948 	bna_tx_destroy(tx_info->tx);
1949 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1950 
1951 	tx_info->tx = NULL;
1952 	tx_info->tx_id = 0;
1953 
1954 	bnad_tx_res_free(bnad, res_info);
1955 }
1956 
1957 /* Should be held with conf_lock held */
1958 int
1959 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1960 {
1961 	int err;
1962 	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1963 	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1964 	struct bna_intr_info *intr_info =
1965 			&res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1966 	struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1967 	static const struct bna_tx_event_cbfn tx_cbfn = {
1968 		.tcb_setup_cbfn = bnad_cb_tcb_setup,
1969 		.tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1970 		.tx_stall_cbfn = bnad_cb_tx_stall,
1971 		.tx_resume_cbfn = bnad_cb_tx_resume,
1972 		.tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1973 	};
1974 
1975 	struct bna_tx *tx;
1976 	unsigned long flags;
1977 
1978 	tx_info->tx_id = tx_id;
1979 
1980 	/* Initialize the Tx object configuration */
1981 	tx_config->num_txq = bnad->num_txq_per_tx;
1982 	tx_config->txq_depth = bnad->txq_depth;
1983 	tx_config->tx_type = BNA_TX_T_REGULAR;
1984 	tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1985 
1986 	/* Get BNA's resource requirement for one tx object */
1987 	spin_lock_irqsave(&bnad->bna_lock, flags);
1988 	bna_tx_res_req(bnad->num_txq_per_tx,
1989 		bnad->txq_depth, res_info);
1990 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1991 
1992 	/* Fill Unmap Q memory requirements */
1993 	BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1994 			bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1995 			bnad->txq_depth));
1996 
1997 	/* Allocate resources */
1998 	err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1999 	if (err)
2000 		return err;
2001 
2002 	/* Ask BNA to create one Tx object, supplying required resources */
2003 	spin_lock_irqsave(&bnad->bna_lock, flags);
2004 	tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
2005 			tx_info);
2006 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2007 	if (!tx) {
2008 		err = -ENOMEM;
2009 		goto err_return;
2010 	}
2011 	tx_info->tx = tx;
2012 
2013 	INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
2014 			(work_func_t)bnad_tx_cleanup);
2015 
2016 	/* Register ISR for the Tx object */
2017 	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2018 		err = bnad_tx_msix_register(bnad, tx_info,
2019 			tx_id, bnad->num_txq_per_tx);
2020 		if (err)
2021 			goto cleanup_tx;
2022 	}
2023 
2024 	spin_lock_irqsave(&bnad->bna_lock, flags);
2025 	bna_tx_enable(tx);
2026 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2027 
2028 	return 0;
2029 
2030 cleanup_tx:
2031 	spin_lock_irqsave(&bnad->bna_lock, flags);
2032 	bna_tx_destroy(tx_info->tx);
2033 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2034 	tx_info->tx = NULL;
2035 	tx_info->tx_id = 0;
2036 err_return:
2037 	bnad_tx_res_free(bnad, res_info);
2038 	return err;
2039 }
2040 
2041 /* Setup the rx config for bna_rx_create */
2042 /* bnad decides the configuration */
2043 static void
2044 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2045 {
2046 	memset(rx_config, 0, sizeof(*rx_config));
2047 	rx_config->rx_type = BNA_RX_T_REGULAR;
2048 	rx_config->num_paths = bnad->num_rxp_per_rx;
2049 	rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2050 
2051 	if (bnad->num_rxp_per_rx > 1) {
2052 		rx_config->rss_status = BNA_STATUS_T_ENABLED;
2053 		rx_config->rss_config.hash_type =
2054 				(BFI_ENET_RSS_IPV6 |
2055 				 BFI_ENET_RSS_IPV6_TCP |
2056 				 BFI_ENET_RSS_IPV4 |
2057 				 BFI_ENET_RSS_IPV4_TCP);
2058 		rx_config->rss_config.hash_mask =
2059 				bnad->num_rxp_per_rx - 1;
2060 		netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
2061 			sizeof(rx_config->rss_config.toeplitz_hash_key));
2062 	} else {
2063 		rx_config->rss_status = BNA_STATUS_T_DISABLED;
2064 		memset(&rx_config->rss_config, 0,
2065 		       sizeof(rx_config->rss_config));
2066 	}
2067 
2068 	rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2069 	rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2070 
2071 	/* BNA_RXP_SINGLE - one data-buffer queue
2072 	 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2073 	 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2074 	 */
2075 	/* TODO: configurable param for queue type */
2076 	rx_config->rxp_type = BNA_RXP_SLR;
2077 
2078 	if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2079 	    rx_config->frame_size > 4096) {
2080 		/* though size_routing_enable is set in SLR,
2081 		 * small packets may get routed to same rxq.
2082 		 * set buf_size to 2048 instead of PAGE_SIZE.
2083 		 */
2084 		rx_config->q0_buf_size = 2048;
2085 		/* this should be in multiples of 2 */
2086 		rx_config->q0_num_vecs = 4;
2087 		rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2088 		rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2089 	} else {
2090 		rx_config->q0_buf_size = rx_config->frame_size;
2091 		rx_config->q0_num_vecs = 1;
2092 		rx_config->q0_depth = bnad->rxq_depth;
2093 	}
2094 
2095 	/* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2096 	if (rx_config->rxp_type == BNA_RXP_SLR) {
2097 		rx_config->q1_depth = bnad->rxq_depth;
2098 		rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2099 	}
2100 
2101 	rx_config->vlan_strip_status =
2102 		(bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
2103 		BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
2104 }
2105 
2106 static void
2107 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2108 {
2109 	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2110 	int i;
2111 
2112 	for (i = 0; i < bnad->num_rxp_per_rx; i++)
2113 		rx_info->rx_ctrl[i].bnad = bnad;
2114 }
2115 
2116 /* Called with mutex_lock(&bnad->conf_mutex) held */
2117 static u32
2118 bnad_reinit_rx(struct bnad *bnad)
2119 {
2120 	struct net_device *netdev = bnad->netdev;
2121 	u32 err = 0, current_err = 0;
2122 	u32 rx_id = 0, count = 0;
2123 	unsigned long flags;
2124 
2125 	/* destroy and create new rx objects */
2126 	for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2127 		if (!bnad->rx_info[rx_id].rx)
2128 			continue;
2129 		bnad_destroy_rx(bnad, rx_id);
2130 	}
2131 
2132 	spin_lock_irqsave(&bnad->bna_lock, flags);
2133 	bna_enet_mtu_set(&bnad->bna.enet,
2134 			 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2135 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2136 
2137 	for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2138 		count++;
2139 		current_err = bnad_setup_rx(bnad, rx_id);
2140 		if (current_err && !err) {
2141 			err = current_err;
2142 			netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
2143 		}
2144 	}
2145 
2146 	/* restore rx configuration */
2147 	if (bnad->rx_info[0].rx && !err) {
2148 		bnad_restore_vlans(bnad, 0);
2149 		bnad_enable_default_bcast(bnad);
2150 		spin_lock_irqsave(&bnad->bna_lock, flags);
2151 		bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2152 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2153 		bnad_set_rx_mode(netdev);
2154 	}
2155 
2156 	return count;
2157 }
2158 
2159 /* Called with bnad_conf_lock() held */
2160 void
2161 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2162 {
2163 	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2164 	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2165 	struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2166 	unsigned long flags;
2167 	int to_del = 0;
2168 
2169 	if (!rx_info->rx)
2170 		return;
2171 
2172 	if (0 == rx_id) {
2173 		spin_lock_irqsave(&bnad->bna_lock, flags);
2174 		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2175 		    test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2176 			clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2177 			to_del = 1;
2178 		}
2179 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2180 		if (to_del)
2181 			del_timer_sync(&bnad->dim_timer);
2182 	}
2183 
2184 	init_completion(&bnad->bnad_completions.rx_comp);
2185 	spin_lock_irqsave(&bnad->bna_lock, flags);
2186 	bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2187 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2188 	wait_for_completion(&bnad->bnad_completions.rx_comp);
2189 
2190 	if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2191 		bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2192 
2193 	bnad_napi_delete(bnad, rx_id);
2194 
2195 	spin_lock_irqsave(&bnad->bna_lock, flags);
2196 	bna_rx_destroy(rx_info->rx);
2197 
2198 	rx_info->rx = NULL;
2199 	rx_info->rx_id = 0;
2200 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2201 
2202 	bnad_rx_res_free(bnad, res_info);
2203 }
2204 
2205 /* Called with mutex_lock(&bnad->conf_mutex) held */
2206 int
2207 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2208 {
2209 	int err;
2210 	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2211 	struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2212 	struct bna_intr_info *intr_info =
2213 			&res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2214 	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2215 	static const struct bna_rx_event_cbfn rx_cbfn = {
2216 		.rcb_setup_cbfn = NULL,
2217 		.rcb_destroy_cbfn = NULL,
2218 		.ccb_setup_cbfn = bnad_cb_ccb_setup,
2219 		.ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2220 		.rx_stall_cbfn = bnad_cb_rx_stall,
2221 		.rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2222 		.rx_post_cbfn = bnad_cb_rx_post,
2223 	};
2224 	struct bna_rx *rx;
2225 	unsigned long flags;
2226 
2227 	rx_info->rx_id = rx_id;
2228 
2229 	/* Initialize the Rx object configuration */
2230 	bnad_init_rx_config(bnad, rx_config);
2231 
2232 	/* Get BNA's resource requirement for one Rx object */
2233 	spin_lock_irqsave(&bnad->bna_lock, flags);
2234 	bna_rx_res_req(rx_config, res_info);
2235 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2236 
2237 	/* Fill Unmap Q memory requirements */
2238 	BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2239 				 rx_config->num_paths,
2240 			(rx_config->q0_depth *
2241 			 sizeof(struct bnad_rx_unmap)) +
2242 			 sizeof(struct bnad_rx_unmap_q));
2243 
2244 	if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2245 		BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2246 					 rx_config->num_paths,
2247 				(rx_config->q1_depth *
2248 				 sizeof(struct bnad_rx_unmap) +
2249 				 sizeof(struct bnad_rx_unmap_q)));
2250 	}
2251 	/* Allocate resource */
2252 	err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2253 	if (err)
2254 		return err;
2255 
2256 	bnad_rx_ctrl_init(bnad, rx_id);
2257 
2258 	/* Ask BNA to create one Rx object, supplying required resources */
2259 	spin_lock_irqsave(&bnad->bna_lock, flags);
2260 	rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2261 			rx_info);
2262 	if (!rx) {
2263 		err = -ENOMEM;
2264 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2265 		goto err_return;
2266 	}
2267 	rx_info->rx = rx;
2268 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2269 
2270 	INIT_WORK(&rx_info->rx_cleanup_work,
2271 			(work_func_t)(bnad_rx_cleanup));
2272 
2273 	/*
2274 	 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2275 	 * so that IRQ handler cannot schedule NAPI at this point.
2276 	 */
2277 	bnad_napi_add(bnad, rx_id);
2278 
2279 	/* Register ISR for the Rx object */
2280 	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2281 		err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2282 						rx_config->num_paths);
2283 		if (err)
2284 			goto err_return;
2285 	}
2286 
2287 	spin_lock_irqsave(&bnad->bna_lock, flags);
2288 	if (0 == rx_id) {
2289 		/* Set up Dynamic Interrupt Moderation Vector */
2290 		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2291 			bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2292 
2293 		/* Enable VLAN filtering only on the default Rx */
2294 		bna_rx_vlanfilter_enable(rx);
2295 
2296 		/* Start the DIM timer */
2297 		bnad_dim_timer_start(bnad);
2298 	}
2299 
2300 	bna_rx_enable(rx);
2301 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2302 
2303 	return 0;
2304 
2305 err_return:
2306 	bnad_destroy_rx(bnad, rx_id);
2307 	return err;
2308 }
2309 
2310 /* Called with conf_lock & bnad->bna_lock held */
2311 void
2312 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2313 {
2314 	struct bnad_tx_info *tx_info;
2315 
2316 	tx_info = &bnad->tx_info[0];
2317 	if (!tx_info->tx)
2318 		return;
2319 
2320 	bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2321 }
2322 
2323 /* Called with conf_lock & bnad->bna_lock held */
2324 void
2325 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2326 {
2327 	struct bnad_rx_info *rx_info;
2328 	int	i;
2329 
2330 	for (i = 0; i < bnad->num_rx; i++) {
2331 		rx_info = &bnad->rx_info[i];
2332 		if (!rx_info->rx)
2333 			continue;
2334 		bna_rx_coalescing_timeo_set(rx_info->rx,
2335 				bnad->rx_coalescing_timeo);
2336 	}
2337 }
2338 
2339 /*
2340  * Called with bnad->bna_lock held
2341  */
2342 int
2343 bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
2344 {
2345 	int ret;
2346 
2347 	if (!is_valid_ether_addr(mac_addr))
2348 		return -EADDRNOTAVAIL;
2349 
2350 	/* If datapath is down, pretend everything went through */
2351 	if (!bnad->rx_info[0].rx)
2352 		return 0;
2353 
2354 	ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
2355 	if (ret != BNA_CB_SUCCESS)
2356 		return -EADDRNOTAVAIL;
2357 
2358 	return 0;
2359 }
2360 
2361 /* Should be called with conf_lock held */
2362 int
2363 bnad_enable_default_bcast(struct bnad *bnad)
2364 {
2365 	struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2366 	int ret;
2367 	unsigned long flags;
2368 
2369 	init_completion(&bnad->bnad_completions.mcast_comp);
2370 
2371 	spin_lock_irqsave(&bnad->bna_lock, flags);
2372 	ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr,
2373 			       bnad_cb_rx_mcast_add);
2374 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2375 
2376 	if (ret == BNA_CB_SUCCESS)
2377 		wait_for_completion(&bnad->bnad_completions.mcast_comp);
2378 	else
2379 		return -ENODEV;
2380 
2381 	if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2382 		return -ENODEV;
2383 
2384 	return 0;
2385 }
2386 
2387 /* Called with mutex_lock(&bnad->conf_mutex) held */
2388 void
2389 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2390 {
2391 	u16 vid;
2392 	unsigned long flags;
2393 
2394 	for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2395 		spin_lock_irqsave(&bnad->bna_lock, flags);
2396 		bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2397 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2398 	}
2399 }
2400 
2401 /* Statistics utilities */
2402 void
2403 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2404 {
2405 	int i, j;
2406 
2407 	for (i = 0; i < bnad->num_rx; i++) {
2408 		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2409 			if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2410 				stats->rx_packets += bnad->rx_info[i].
2411 				rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2412 				stats->rx_bytes += bnad->rx_info[i].
2413 					rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2414 				if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2415 					bnad->rx_info[i].rx_ctrl[j].ccb->
2416 					rcb[1]->rxq) {
2417 					stats->rx_packets +=
2418 						bnad->rx_info[i].rx_ctrl[j].
2419 						ccb->rcb[1]->rxq->rx_packets;
2420 					stats->rx_bytes +=
2421 						bnad->rx_info[i].rx_ctrl[j].
2422 						ccb->rcb[1]->rxq->rx_bytes;
2423 				}
2424 			}
2425 		}
2426 	}
2427 	for (i = 0; i < bnad->num_tx; i++) {
2428 		for (j = 0; j < bnad->num_txq_per_tx; j++) {
2429 			if (bnad->tx_info[i].tcb[j]) {
2430 				stats->tx_packets +=
2431 				bnad->tx_info[i].tcb[j]->txq->tx_packets;
2432 				stats->tx_bytes +=
2433 					bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2434 			}
2435 		}
2436 	}
2437 }
2438 
2439 /*
2440  * Must be called with the bna_lock held.
2441  */
2442 void
2443 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2444 {
2445 	struct bfi_enet_stats_mac *mac_stats;
2446 	u32 bmap;
2447 	int i;
2448 
2449 	mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2450 	stats->rx_errors =
2451 		mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2452 		mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2453 		mac_stats->rx_undersize;
2454 	stats->tx_errors = mac_stats->tx_fcs_error +
2455 					mac_stats->tx_undersize;
2456 	stats->rx_dropped = mac_stats->rx_drop;
2457 	stats->tx_dropped = mac_stats->tx_drop;
2458 	stats->multicast = mac_stats->rx_multicast;
2459 	stats->collisions = mac_stats->tx_total_collision;
2460 
2461 	stats->rx_length_errors = mac_stats->rx_frame_length_error;
2462 
2463 	/* receive ring buffer overflow  ?? */
2464 
2465 	stats->rx_crc_errors = mac_stats->rx_fcs_error;
2466 	stats->rx_frame_errors = mac_stats->rx_alignment_error;
2467 	/* recv'r fifo overrun */
2468 	bmap = bna_rx_rid_mask(&bnad->bna);
2469 	for (i = 0; bmap; i++) {
2470 		if (bmap & 1) {
2471 			stats->rx_fifo_errors +=
2472 				bnad->stats.bna_stats->
2473 					hw_stats.rxf_stats[i].frame_drops;
2474 			break;
2475 		}
2476 		bmap >>= 1;
2477 	}
2478 }
2479 
2480 static void
2481 bnad_mbox_irq_sync(struct bnad *bnad)
2482 {
2483 	u32 irq;
2484 	unsigned long flags;
2485 
2486 	spin_lock_irqsave(&bnad->bna_lock, flags);
2487 	if (bnad->cfg_flags & BNAD_CF_MSIX)
2488 		irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2489 	else
2490 		irq = bnad->pcidev->irq;
2491 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2492 
2493 	synchronize_irq(irq);
2494 }
2495 
2496 /* Utility used by bnad_start_xmit, for doing TSO */
2497 static int
2498 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2499 {
2500 	int err;
2501 
2502 	err = skb_cow_head(skb, 0);
2503 	if (err < 0) {
2504 		BNAD_UPDATE_CTR(bnad, tso_err);
2505 		return err;
2506 	}
2507 
2508 	/*
2509 	 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2510 	 * excluding the length field.
2511 	 */
2512 	if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
2513 		struct iphdr *iph = ip_hdr(skb);
2514 
2515 		/* Do we really need these? */
2516 		iph->tot_len = 0;
2517 		iph->check = 0;
2518 
2519 		tcp_hdr(skb)->check =
2520 			~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2521 					   IPPROTO_TCP, 0);
2522 		BNAD_UPDATE_CTR(bnad, tso4);
2523 	} else {
2524 		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2525 
2526 		ipv6h->payload_len = 0;
2527 		tcp_hdr(skb)->check =
2528 			~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2529 					 IPPROTO_TCP, 0);
2530 		BNAD_UPDATE_CTR(bnad, tso6);
2531 	}
2532 
2533 	return 0;
2534 }
2535 
2536 /*
2537  * Initialize Q numbers depending on Rx Paths
2538  * Called with bnad->bna_lock held, because of cfg_flags
2539  * access.
2540  */
2541 static void
2542 bnad_q_num_init(struct bnad *bnad)
2543 {
2544 	int rxps;
2545 
2546 	rxps = min((uint)num_online_cpus(),
2547 			(uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2548 
2549 	if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2550 		rxps = 1;	/* INTx */
2551 
2552 	bnad->num_rx = 1;
2553 	bnad->num_tx = 1;
2554 	bnad->num_rxp_per_rx = rxps;
2555 	bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2556 }
2557 
2558 /*
2559  * Adjusts the Q numbers, given a number of msix vectors
2560  * Give preference to RSS as opposed to Tx priority Queues,
2561  * in such a case, just use 1 Tx Q
2562  * Called with bnad->bna_lock held b'cos of cfg_flags access
2563  */
2564 static void
2565 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2566 {
2567 	bnad->num_txq_per_tx = 1;
2568 	if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  +
2569 	     bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2570 	    (bnad->cfg_flags & BNAD_CF_MSIX)) {
2571 		bnad->num_rxp_per_rx = msix_vectors -
2572 			(bnad->num_tx * bnad->num_txq_per_tx) -
2573 			BNAD_MAILBOX_MSIX_VECTORS;
2574 	} else
2575 		bnad->num_rxp_per_rx = 1;
2576 }
2577 
2578 /* Enable / disable ioceth */
2579 static int
2580 bnad_ioceth_disable(struct bnad *bnad)
2581 {
2582 	unsigned long flags;
2583 	int err = 0;
2584 
2585 	spin_lock_irqsave(&bnad->bna_lock, flags);
2586 	init_completion(&bnad->bnad_completions.ioc_comp);
2587 	bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2588 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2589 
2590 	wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2591 		msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2592 
2593 	err = bnad->bnad_completions.ioc_comp_status;
2594 	return err;
2595 }
2596 
2597 static int
2598 bnad_ioceth_enable(struct bnad *bnad)
2599 {
2600 	int err = 0;
2601 	unsigned long flags;
2602 
2603 	spin_lock_irqsave(&bnad->bna_lock, flags);
2604 	init_completion(&bnad->bnad_completions.ioc_comp);
2605 	bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2606 	bna_ioceth_enable(&bnad->bna.ioceth);
2607 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2608 
2609 	wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2610 		msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2611 
2612 	err = bnad->bnad_completions.ioc_comp_status;
2613 
2614 	return err;
2615 }
2616 
2617 /* Free BNA resources */
2618 static void
2619 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2620 		u32 res_val_max)
2621 {
2622 	int i;
2623 
2624 	for (i = 0; i < res_val_max; i++)
2625 		bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2626 }
2627 
2628 /* Allocates memory and interrupt resources for BNA */
2629 static int
2630 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2631 		u32 res_val_max)
2632 {
2633 	int i, err;
2634 
2635 	for (i = 0; i < res_val_max; i++) {
2636 		err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2637 		if (err)
2638 			goto err_return;
2639 	}
2640 	return 0;
2641 
2642 err_return:
2643 	bnad_res_free(bnad, res_info, res_val_max);
2644 	return err;
2645 }
2646 
2647 /* Interrupt enable / disable */
2648 static void
2649 bnad_enable_msix(struct bnad *bnad)
2650 {
2651 	int i, ret;
2652 	unsigned long flags;
2653 
2654 	spin_lock_irqsave(&bnad->bna_lock, flags);
2655 	if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2656 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2657 		return;
2658 	}
2659 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2660 
2661 	if (bnad->msix_table)
2662 		return;
2663 
2664 	bnad->msix_table =
2665 		kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2666 
2667 	if (!bnad->msix_table)
2668 		goto intx_mode;
2669 
2670 	for (i = 0; i < bnad->msix_num; i++)
2671 		bnad->msix_table[i].entry = i;
2672 
2673 	ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2674 				    1, bnad->msix_num);
2675 	if (ret < 0) {
2676 		goto intx_mode;
2677 	} else if (ret < bnad->msix_num) {
2678 		dev_warn(&bnad->pcidev->dev,
2679 			 "%d MSI-X vectors allocated < %d requested\n",
2680 			 ret, bnad->msix_num);
2681 
2682 		spin_lock_irqsave(&bnad->bna_lock, flags);
2683 		/* ret = #of vectors that we got */
2684 		bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2685 			(ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2686 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2687 
2688 		bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2689 			 BNAD_MAILBOX_MSIX_VECTORS;
2690 
2691 		if (bnad->msix_num > ret) {
2692 			pci_disable_msix(bnad->pcidev);
2693 			goto intx_mode;
2694 		}
2695 	}
2696 
2697 	pci_intx(bnad->pcidev, 0);
2698 
2699 	return;
2700 
2701 intx_mode:
2702 	dev_warn(&bnad->pcidev->dev,
2703 		 "MSI-X enable failed - operating in INTx mode\n");
2704 
2705 	kfree(bnad->msix_table);
2706 	bnad->msix_table = NULL;
2707 	bnad->msix_num = 0;
2708 	spin_lock_irqsave(&bnad->bna_lock, flags);
2709 	bnad->cfg_flags &= ~BNAD_CF_MSIX;
2710 	bnad_q_num_init(bnad);
2711 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2712 }
2713 
2714 static void
2715 bnad_disable_msix(struct bnad *bnad)
2716 {
2717 	u32 cfg_flags;
2718 	unsigned long flags;
2719 
2720 	spin_lock_irqsave(&bnad->bna_lock, flags);
2721 	cfg_flags = bnad->cfg_flags;
2722 	if (bnad->cfg_flags & BNAD_CF_MSIX)
2723 		bnad->cfg_flags &= ~BNAD_CF_MSIX;
2724 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2725 
2726 	if (cfg_flags & BNAD_CF_MSIX) {
2727 		pci_disable_msix(bnad->pcidev);
2728 		kfree(bnad->msix_table);
2729 		bnad->msix_table = NULL;
2730 	}
2731 }
2732 
2733 /* Netdev entry points */
2734 static int
2735 bnad_open(struct net_device *netdev)
2736 {
2737 	int err;
2738 	struct bnad *bnad = netdev_priv(netdev);
2739 	struct bna_pause_config pause_config;
2740 	unsigned long flags;
2741 
2742 	mutex_lock(&bnad->conf_mutex);
2743 
2744 	/* Tx */
2745 	err = bnad_setup_tx(bnad, 0);
2746 	if (err)
2747 		goto err_return;
2748 
2749 	/* Rx */
2750 	err = bnad_setup_rx(bnad, 0);
2751 	if (err)
2752 		goto cleanup_tx;
2753 
2754 	/* Port */
2755 	pause_config.tx_pause = 0;
2756 	pause_config.rx_pause = 0;
2757 
2758 	spin_lock_irqsave(&bnad->bna_lock, flags);
2759 	bna_enet_mtu_set(&bnad->bna.enet,
2760 			 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2761 	bna_enet_pause_config(&bnad->bna.enet, &pause_config);
2762 	bna_enet_enable(&bnad->bna.enet);
2763 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2764 
2765 	/* Enable broadcast */
2766 	bnad_enable_default_bcast(bnad);
2767 
2768 	/* Restore VLANs, if any */
2769 	bnad_restore_vlans(bnad, 0);
2770 
2771 	/* Set the UCAST address */
2772 	spin_lock_irqsave(&bnad->bna_lock, flags);
2773 	bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2774 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2775 
2776 	/* Start the stats timer */
2777 	bnad_stats_timer_start(bnad);
2778 
2779 	mutex_unlock(&bnad->conf_mutex);
2780 
2781 	return 0;
2782 
2783 cleanup_tx:
2784 	bnad_destroy_tx(bnad, 0);
2785 
2786 err_return:
2787 	mutex_unlock(&bnad->conf_mutex);
2788 	return err;
2789 }
2790 
2791 static int
2792 bnad_stop(struct net_device *netdev)
2793 {
2794 	struct bnad *bnad = netdev_priv(netdev);
2795 	unsigned long flags;
2796 
2797 	mutex_lock(&bnad->conf_mutex);
2798 
2799 	/* Stop the stats timer */
2800 	bnad_stats_timer_stop(bnad);
2801 
2802 	init_completion(&bnad->bnad_completions.enet_comp);
2803 
2804 	spin_lock_irqsave(&bnad->bna_lock, flags);
2805 	bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2806 			bnad_cb_enet_disabled);
2807 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2808 
2809 	wait_for_completion(&bnad->bnad_completions.enet_comp);
2810 
2811 	bnad_destroy_tx(bnad, 0);
2812 	bnad_destroy_rx(bnad, 0);
2813 
2814 	/* Synchronize mailbox IRQ */
2815 	bnad_mbox_irq_sync(bnad);
2816 
2817 	mutex_unlock(&bnad->conf_mutex);
2818 
2819 	return 0;
2820 }
2821 
2822 /* TX */
2823 /* Returns 0 for success */
2824 static int
2825 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2826 		    struct sk_buff *skb, struct bna_txq_entry *txqent)
2827 {
2828 	u16 flags = 0;
2829 	u32 gso_size;
2830 	u16 vlan_tag = 0;
2831 
2832 	if (skb_vlan_tag_present(skb)) {
2833 		vlan_tag = (u16)skb_vlan_tag_get(skb);
2834 		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2835 	}
2836 	if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2837 		vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2838 				| (vlan_tag & 0x1fff);
2839 		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2840 	}
2841 	txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2842 
2843 	if (skb_is_gso(skb)) {
2844 		gso_size = skb_shinfo(skb)->gso_size;
2845 		if (unlikely(gso_size > bnad->netdev->mtu)) {
2846 			BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2847 			return -EINVAL;
2848 		}
2849 		if (unlikely((gso_size + skb_transport_offset(skb) +
2850 			      tcp_hdrlen(skb)) >= skb->len)) {
2851 			txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2852 			txqent->hdr.wi.lso_mss = 0;
2853 			BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2854 		} else {
2855 			txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
2856 			txqent->hdr.wi.lso_mss = htons(gso_size);
2857 		}
2858 
2859 		if (bnad_tso_prepare(bnad, skb)) {
2860 			BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2861 			return -EINVAL;
2862 		}
2863 
2864 		flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2865 		txqent->hdr.wi.l4_hdr_size_n_offset =
2866 			htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2867 			tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2868 	} else  {
2869 		txqent->hdr.wi.opcode =	htons(BNA_TXQ_WI_SEND);
2870 		txqent->hdr.wi.lso_mss = 0;
2871 
2872 		if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
2873 			BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2874 			return -EINVAL;
2875 		}
2876 
2877 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
2878 			__be16 net_proto = vlan_get_protocol(skb);
2879 			u8 proto = 0;
2880 
2881 			if (net_proto == htons(ETH_P_IP))
2882 				proto = ip_hdr(skb)->protocol;
2883 #ifdef NETIF_F_IPV6_CSUM
2884 			else if (net_proto == htons(ETH_P_IPV6)) {
2885 				/* nexthdr may not be TCP immediately. */
2886 				proto = ipv6_hdr(skb)->nexthdr;
2887 			}
2888 #endif
2889 			if (proto == IPPROTO_TCP) {
2890 				flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2891 				txqent->hdr.wi.l4_hdr_size_n_offset =
2892 					htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2893 					      (0, skb_transport_offset(skb)));
2894 
2895 				BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2896 
2897 				if (unlikely(skb_headlen(skb) <
2898 					    skb_transport_offset(skb) +
2899 				    tcp_hdrlen(skb))) {
2900 					BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2901 					return -EINVAL;
2902 				}
2903 			} else if (proto == IPPROTO_UDP) {
2904 				flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2905 				txqent->hdr.wi.l4_hdr_size_n_offset =
2906 					htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2907 					      (0, skb_transport_offset(skb)));
2908 
2909 				BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2910 				if (unlikely(skb_headlen(skb) <
2911 					    skb_transport_offset(skb) +
2912 				    sizeof(struct udphdr))) {
2913 					BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2914 					return -EINVAL;
2915 				}
2916 			} else {
2917 
2918 				BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2919 				return -EINVAL;
2920 			}
2921 		} else
2922 			txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2923 	}
2924 
2925 	txqent->hdr.wi.flags = htons(flags);
2926 	txqent->hdr.wi.frame_length = htonl(skb->len);
2927 
2928 	return 0;
2929 }
2930 
2931 /*
2932  * bnad_start_xmit : Netdev entry point for Transmit
2933  *		     Called under lock held by net_device
2934  */
2935 static netdev_tx_t
2936 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2937 {
2938 	struct bnad *bnad = netdev_priv(netdev);
2939 	u32 txq_id = 0;
2940 	struct bna_tcb *tcb = NULL;
2941 	struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2942 	u32		prod, q_depth, vect_id;
2943 	u32		wis, vectors, len;
2944 	int		i;
2945 	dma_addr_t		dma_addr;
2946 	struct bna_txq_entry *txqent;
2947 
2948 	len = skb_headlen(skb);
2949 
2950 	/* Sanity checks for the skb */
2951 
2952 	if (unlikely(skb->len <= ETH_HLEN)) {
2953 		dev_kfree_skb_any(skb);
2954 		BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2955 		return NETDEV_TX_OK;
2956 	}
2957 	if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2958 		dev_kfree_skb_any(skb);
2959 		BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2960 		return NETDEV_TX_OK;
2961 	}
2962 	if (unlikely(len == 0)) {
2963 		dev_kfree_skb_any(skb);
2964 		BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2965 		return NETDEV_TX_OK;
2966 	}
2967 
2968 	tcb = bnad->tx_info[0].tcb[txq_id];
2969 
2970 	/*
2971 	 * Takes care of the Tx that is scheduled between clearing the flag
2972 	 * and the netif_tx_stop_all_queues() call.
2973 	 */
2974 	if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2975 		dev_kfree_skb_any(skb);
2976 		BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2977 		return NETDEV_TX_OK;
2978 	}
2979 
2980 	q_depth = tcb->q_depth;
2981 	prod = tcb->producer_index;
2982 	unmap_q = tcb->unmap_q;
2983 
2984 	vectors = 1 + skb_shinfo(skb)->nr_frags;
2985 	wis = BNA_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */
2986 
2987 	if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2988 		dev_kfree_skb_any(skb);
2989 		BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2990 		return NETDEV_TX_OK;
2991 	}
2992 
2993 	/* Check for available TxQ resources */
2994 	if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2995 		if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2996 		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2997 			u32 sent;
2998 			sent = bnad_txcmpl_process(bnad, tcb);
2999 			if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3000 				bna_ib_ack(tcb->i_dbell, sent);
3001 			smp_mb__before_atomic();
3002 			clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
3003 		} else {
3004 			netif_stop_queue(netdev);
3005 			BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3006 		}
3007 
3008 		smp_mb();
3009 		/*
3010 		 * Check again to deal with race condition between
3011 		 * netif_stop_queue here, and netif_wake_queue in
3012 		 * interrupt handler which is not inside netif tx lock.
3013 		 */
3014 		if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
3015 			BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3016 			return NETDEV_TX_BUSY;
3017 		} else {
3018 			netif_wake_queue(netdev);
3019 			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
3020 		}
3021 	}
3022 
3023 	txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3024 	head_unmap = &unmap_q[prod];
3025 
3026 	/* Program the opcode, flags, frame_len, num_vectors in WI */
3027 	if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3028 		dev_kfree_skb_any(skb);
3029 		return NETDEV_TX_OK;
3030 	}
3031 	txqent->hdr.wi.reserved = 0;
3032 	txqent->hdr.wi.num_vectors = vectors;
3033 
3034 	head_unmap->skb = skb;
3035 	head_unmap->nvecs = 0;
3036 
3037 	/* Program the vectors */
3038 	unmap = head_unmap;
3039 	dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3040 				  len, DMA_TO_DEVICE);
3041 	if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3042 		dev_kfree_skb_any(skb);
3043 		BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3044 		return NETDEV_TX_OK;
3045 	}
3046 	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3047 	txqent->vector[0].length = htons(len);
3048 	dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3049 	head_unmap->nvecs++;
3050 
3051 	for (i = 0, vect_id = 0; i < vectors - 1; i++) {
3052 		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
3053 		u32		size = skb_frag_size(frag);
3054 
3055 		if (unlikely(size == 0)) {
3056 			/* Undo the changes starting at tcb->producer_index */
3057 			bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3058 				tcb->producer_index);
3059 			dev_kfree_skb_any(skb);
3060 			BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3061 			return NETDEV_TX_OK;
3062 		}
3063 
3064 		len += size;
3065 
3066 		vect_id++;
3067 		if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
3068 			vect_id = 0;
3069 			BNA_QE_INDX_INC(prod, q_depth);
3070 			txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3071 			txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
3072 			unmap = &unmap_q[prod];
3073 		}
3074 
3075 		dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3076 					    0, size, DMA_TO_DEVICE);
3077 		if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3078 			/* Undo the changes starting at tcb->producer_index */
3079 			bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3080 					   tcb->producer_index);
3081 			dev_kfree_skb_any(skb);
3082 			BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3083 			return NETDEV_TX_OK;
3084 		}
3085 
3086 		dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3087 		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3088 		txqent->vector[vect_id].length = htons(size);
3089 		dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3090 				   dma_addr);
3091 		head_unmap->nvecs++;
3092 	}
3093 
3094 	if (unlikely(len != skb->len)) {
3095 		/* Undo the changes starting at tcb->producer_index */
3096 		bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3097 		dev_kfree_skb_any(skb);
3098 		BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3099 		return NETDEV_TX_OK;
3100 	}
3101 
3102 	BNA_QE_INDX_INC(prod, q_depth);
3103 	tcb->producer_index = prod;
3104 
3105 	smp_mb();
3106 
3107 	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3108 		return NETDEV_TX_OK;
3109 
3110 	skb_tx_timestamp(skb);
3111 
3112 	bna_txq_prod_indx_doorbell(tcb);
3113 	smp_mb();
3114 
3115 	return NETDEV_TX_OK;
3116 }
3117 
3118 /*
3119  * Used spin_lock to synchronize reading of stats structures, which
3120  * is written by BNA under the same lock.
3121  */
3122 static struct rtnl_link_stats64 *
3123 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3124 {
3125 	struct bnad *bnad = netdev_priv(netdev);
3126 	unsigned long flags;
3127 
3128 	spin_lock_irqsave(&bnad->bna_lock, flags);
3129 
3130 	bnad_netdev_qstats_fill(bnad, stats);
3131 	bnad_netdev_hwstats_fill(bnad, stats);
3132 
3133 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3134 
3135 	return stats;
3136 }
3137 
3138 static void
3139 bnad_set_rx_ucast_fltr(struct bnad *bnad)
3140 {
3141 	struct net_device *netdev = bnad->netdev;
3142 	int uc_count = netdev_uc_count(netdev);
3143 	enum bna_cb_status ret;
3144 	u8 *mac_list;
3145 	struct netdev_hw_addr *ha;
3146 	int entry;
3147 
3148 	if (netdev_uc_empty(bnad->netdev)) {
3149 		bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3150 		return;
3151 	}
3152 
3153 	if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3154 		goto mode_default;
3155 
3156 	mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
3157 	if (mac_list == NULL)
3158 		goto mode_default;
3159 
3160 	entry = 0;
3161 	netdev_for_each_uc_addr(ha, netdev) {
3162 		ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
3163 		entry++;
3164 	}
3165 
3166 	ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
3167 	kfree(mac_list);
3168 
3169 	if (ret != BNA_CB_SUCCESS)
3170 		goto mode_default;
3171 
3172 	return;
3173 
3174 	/* ucast packets not in UCAM are routed to default function */
3175 mode_default:
3176 	bnad->cfg_flags |= BNAD_CF_DEFAULT;
3177 	bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3178 }
3179 
3180 static void
3181 bnad_set_rx_mcast_fltr(struct bnad *bnad)
3182 {
3183 	struct net_device *netdev = bnad->netdev;
3184 	int mc_count = netdev_mc_count(netdev);
3185 	enum bna_cb_status ret;
3186 	u8 *mac_list;
3187 
3188 	if (netdev->flags & IFF_ALLMULTI)
3189 		goto mode_allmulti;
3190 
3191 	if (netdev_mc_empty(netdev))
3192 		return;
3193 
3194 	if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3195 		goto mode_allmulti;
3196 
3197 	mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
3198 
3199 	if (mac_list == NULL)
3200 		goto mode_allmulti;
3201 
3202 	ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
3203 
3204 	/* copy rest of the MCAST addresses */
3205 	bnad_netdev_mc_list_get(netdev, mac_list);
3206 	ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
3207 	kfree(mac_list);
3208 
3209 	if (ret != BNA_CB_SUCCESS)
3210 		goto mode_allmulti;
3211 
3212 	return;
3213 
3214 mode_allmulti:
3215 	bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3216 	bna_rx_mcast_delall(bnad->rx_info[0].rx);
3217 }
3218 
3219 void
3220 bnad_set_rx_mode(struct net_device *netdev)
3221 {
3222 	struct bnad *bnad = netdev_priv(netdev);
3223 	enum bna_rxmode new_mode, mode_mask;
3224 	unsigned long flags;
3225 
3226 	spin_lock_irqsave(&bnad->bna_lock, flags);
3227 
3228 	if (bnad->rx_info[0].rx == NULL) {
3229 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
3230 		return;
3231 	}
3232 
3233 	/* clear bnad flags to update it with new settings */
3234 	bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3235 			BNAD_CF_ALLMULTI);
3236 
3237 	new_mode = 0;
3238 	if (netdev->flags & IFF_PROMISC) {
3239 		new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3240 		bnad->cfg_flags |= BNAD_CF_PROMISC;
3241 	} else {
3242 		bnad_set_rx_mcast_fltr(bnad);
3243 
3244 		if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3245 			new_mode |= BNA_RXMODE_ALLMULTI;
3246 
3247 		bnad_set_rx_ucast_fltr(bnad);
3248 
3249 		if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3250 			new_mode |= BNA_RXMODE_DEFAULT;
3251 	}
3252 
3253 	mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3254 			BNA_RXMODE_ALLMULTI;
3255 	bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
3256 
3257 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3258 }
3259 
3260 /*
3261  * bna_lock is used to sync writes to netdev->addr
3262  * conf_lock cannot be used since this call may be made
3263  * in a non-blocking context.
3264  */
3265 static int
3266 bnad_set_mac_address(struct net_device *netdev, void *addr)
3267 {
3268 	int err;
3269 	struct bnad *bnad = netdev_priv(netdev);
3270 	struct sockaddr *sa = (struct sockaddr *)addr;
3271 	unsigned long flags;
3272 
3273 	spin_lock_irqsave(&bnad->bna_lock, flags);
3274 
3275 	err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3276 	if (!err)
3277 		ether_addr_copy(netdev->dev_addr, sa->sa_data);
3278 
3279 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3280 
3281 	return err;
3282 }
3283 
3284 static int
3285 bnad_mtu_set(struct bnad *bnad, int frame_size)
3286 {
3287 	unsigned long flags;
3288 
3289 	init_completion(&bnad->bnad_completions.mtu_comp);
3290 
3291 	spin_lock_irqsave(&bnad->bna_lock, flags);
3292 	bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3293 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3294 
3295 	wait_for_completion(&bnad->bnad_completions.mtu_comp);
3296 
3297 	return bnad->bnad_completions.mtu_comp_status;
3298 }
3299 
3300 static int
3301 bnad_change_mtu(struct net_device *netdev, int new_mtu)
3302 {
3303 	int err, mtu;
3304 	struct bnad *bnad = netdev_priv(netdev);
3305 	u32 rx_count = 0, frame, new_frame;
3306 
3307 	if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
3308 		return -EINVAL;
3309 
3310 	mutex_lock(&bnad->conf_mutex);
3311 
3312 	mtu = netdev->mtu;
3313 	netdev->mtu = new_mtu;
3314 
3315 	frame = BNAD_FRAME_SIZE(mtu);
3316 	new_frame = BNAD_FRAME_SIZE(new_mtu);
3317 
3318 	/* check if multi-buffer needs to be enabled */
3319 	if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3320 	    netif_running(bnad->netdev)) {
3321 		/* only when transition is over 4K */
3322 		if ((frame <= 4096 && new_frame > 4096) ||
3323 		    (frame > 4096 && new_frame <= 4096))
3324 			rx_count = bnad_reinit_rx(bnad);
3325 	}
3326 
3327 	/* rx_count > 0 - new rx created
3328 	 *	- Linux set err = 0 and return
3329 	 */
3330 	err = bnad_mtu_set(bnad, new_frame);
3331 	if (err)
3332 		err = -EBUSY;
3333 
3334 	mutex_unlock(&bnad->conf_mutex);
3335 	return err;
3336 }
3337 
3338 static int
3339 bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3340 {
3341 	struct bnad *bnad = netdev_priv(netdev);
3342 	unsigned long flags;
3343 
3344 	if (!bnad->rx_info[0].rx)
3345 		return 0;
3346 
3347 	mutex_lock(&bnad->conf_mutex);
3348 
3349 	spin_lock_irqsave(&bnad->bna_lock, flags);
3350 	bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3351 	set_bit(vid, bnad->active_vlans);
3352 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3353 
3354 	mutex_unlock(&bnad->conf_mutex);
3355 
3356 	return 0;
3357 }
3358 
3359 static int
3360 bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3361 {
3362 	struct bnad *bnad = netdev_priv(netdev);
3363 	unsigned long flags;
3364 
3365 	if (!bnad->rx_info[0].rx)
3366 		return 0;
3367 
3368 	mutex_lock(&bnad->conf_mutex);
3369 
3370 	spin_lock_irqsave(&bnad->bna_lock, flags);
3371 	clear_bit(vid, bnad->active_vlans);
3372 	bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3373 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3374 
3375 	mutex_unlock(&bnad->conf_mutex);
3376 
3377 	return 0;
3378 }
3379 
3380 static int bnad_set_features(struct net_device *dev, netdev_features_t features)
3381 {
3382 	struct bnad *bnad = netdev_priv(dev);
3383 	netdev_features_t changed = features ^ dev->features;
3384 
3385 	if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
3386 		unsigned long flags;
3387 
3388 		spin_lock_irqsave(&bnad->bna_lock, flags);
3389 
3390 		if (features & NETIF_F_HW_VLAN_CTAG_RX)
3391 			bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3392 		else
3393 			bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3394 
3395 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
3396 	}
3397 
3398 	return 0;
3399 }
3400 
3401 #ifdef CONFIG_NET_POLL_CONTROLLER
3402 static void
3403 bnad_netpoll(struct net_device *netdev)
3404 {
3405 	struct bnad *bnad = netdev_priv(netdev);
3406 	struct bnad_rx_info *rx_info;
3407 	struct bnad_rx_ctrl *rx_ctrl;
3408 	u32 curr_mask;
3409 	int i, j;
3410 
3411 	if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3412 		bna_intx_disable(&bnad->bna, curr_mask);
3413 		bnad_isr(bnad->pcidev->irq, netdev);
3414 		bna_intx_enable(&bnad->bna, curr_mask);
3415 	} else {
3416 		/*
3417 		 * Tx processing may happen in sending context, so no need
3418 		 * to explicitly process completions here
3419 		 */
3420 
3421 		/* Rx processing */
3422 		for (i = 0; i < bnad->num_rx; i++) {
3423 			rx_info = &bnad->rx_info[i];
3424 			if (!rx_info->rx)
3425 				continue;
3426 			for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3427 				rx_ctrl = &rx_info->rx_ctrl[j];
3428 				if (rx_ctrl->ccb)
3429 					bnad_netif_rx_schedule_poll(bnad,
3430 							    rx_ctrl->ccb);
3431 			}
3432 		}
3433 	}
3434 }
3435 #endif
3436 
3437 static const struct net_device_ops bnad_netdev_ops = {
3438 	.ndo_open		= bnad_open,
3439 	.ndo_stop		= bnad_stop,
3440 	.ndo_start_xmit		= bnad_start_xmit,
3441 	.ndo_get_stats64		= bnad_get_stats64,
3442 	.ndo_set_rx_mode	= bnad_set_rx_mode,
3443 	.ndo_validate_addr      = eth_validate_addr,
3444 	.ndo_set_mac_address    = bnad_set_mac_address,
3445 	.ndo_change_mtu		= bnad_change_mtu,
3446 	.ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,
3447 	.ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid,
3448 	.ndo_set_features	= bnad_set_features,
3449 #ifdef CONFIG_NET_POLL_CONTROLLER
3450 	.ndo_poll_controller    = bnad_netpoll
3451 #endif
3452 };
3453 
3454 static void
3455 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3456 {
3457 	struct net_device *netdev = bnad->netdev;
3458 
3459 	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3460 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3461 		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
3462 		NETIF_F_HW_VLAN_CTAG_RX;
3463 
3464 	netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3465 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3466 		NETIF_F_TSO | NETIF_F_TSO6;
3467 
3468 	netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3469 
3470 	if (using_dac)
3471 		netdev->features |= NETIF_F_HIGHDMA;
3472 
3473 	netdev->mem_start = bnad->mmio_start;
3474 	netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3475 
3476 	netdev->netdev_ops = &bnad_netdev_ops;
3477 	bnad_set_ethtool_ops(netdev);
3478 }
3479 
3480 /*
3481  * 1. Initialize the bnad structure
3482  * 2. Setup netdev pointer in pci_dev
3483  * 3. Initialize no. of TxQ & CQs & MSIX vectors
3484  * 4. Initialize work queue.
3485  */
3486 static int
3487 bnad_init(struct bnad *bnad,
3488 	  struct pci_dev *pdev, struct net_device *netdev)
3489 {
3490 	unsigned long flags;
3491 
3492 	SET_NETDEV_DEV(netdev, &pdev->dev);
3493 	pci_set_drvdata(pdev, netdev);
3494 
3495 	bnad->netdev = netdev;
3496 	bnad->pcidev = pdev;
3497 	bnad->mmio_start = pci_resource_start(pdev, 0);
3498 	bnad->mmio_len = pci_resource_len(pdev, 0);
3499 	bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3500 	if (!bnad->bar0) {
3501 		dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3502 		return -ENOMEM;
3503 	}
3504 	dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
3505 		 (unsigned long long) bnad->mmio_len);
3506 
3507 	spin_lock_irqsave(&bnad->bna_lock, flags);
3508 	if (!bnad_msix_disable)
3509 		bnad->cfg_flags = BNAD_CF_MSIX;
3510 
3511 	bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3512 
3513 	bnad_q_num_init(bnad);
3514 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3515 
3516 	bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3517 		(bnad->num_rx * bnad->num_rxp_per_rx) +
3518 			 BNAD_MAILBOX_MSIX_VECTORS;
3519 
3520 	bnad->txq_depth = BNAD_TXQ_DEPTH;
3521 	bnad->rxq_depth = BNAD_RXQ_DEPTH;
3522 
3523 	bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3524 	bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3525 
3526 	sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3527 	bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3528 	if (!bnad->work_q) {
3529 		iounmap(bnad->bar0);
3530 		return -ENOMEM;
3531 	}
3532 
3533 	return 0;
3534 }
3535 
3536 /*
3537  * Must be called after bnad_pci_uninit()
3538  * so that iounmap() and pci_set_drvdata(NULL)
3539  * happens only after PCI uninitialization.
3540  */
3541 static void
3542 bnad_uninit(struct bnad *bnad)
3543 {
3544 	if (bnad->work_q) {
3545 		flush_workqueue(bnad->work_q);
3546 		destroy_workqueue(bnad->work_q);
3547 		bnad->work_q = NULL;
3548 	}
3549 
3550 	if (bnad->bar0)
3551 		iounmap(bnad->bar0);
3552 }
3553 
3554 /*
3555  * Initialize locks
3556 	a) Per ioceth mutes used for serializing configuration
3557 	   changes from OS interface
3558 	b) spin lock used to protect bna state machine
3559  */
3560 static void
3561 bnad_lock_init(struct bnad *bnad)
3562 {
3563 	spin_lock_init(&bnad->bna_lock);
3564 	mutex_init(&bnad->conf_mutex);
3565 	mutex_init(&bnad_list_mutex);
3566 }
3567 
3568 static void
3569 bnad_lock_uninit(struct bnad *bnad)
3570 {
3571 	mutex_destroy(&bnad->conf_mutex);
3572 	mutex_destroy(&bnad_list_mutex);
3573 }
3574 
3575 /* PCI Initialization */
3576 static int
3577 bnad_pci_init(struct bnad *bnad,
3578 	      struct pci_dev *pdev, bool *using_dac)
3579 {
3580 	int err;
3581 
3582 	err = pci_enable_device(pdev);
3583 	if (err)
3584 		return err;
3585 	err = pci_request_regions(pdev, BNAD_NAME);
3586 	if (err)
3587 		goto disable_device;
3588 	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3589 		*using_dac = true;
3590 	} else {
3591 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3592 		if (err)
3593 			goto release_regions;
3594 		*using_dac = false;
3595 	}
3596 	pci_set_master(pdev);
3597 	return 0;
3598 
3599 release_regions:
3600 	pci_release_regions(pdev);
3601 disable_device:
3602 	pci_disable_device(pdev);
3603 
3604 	return err;
3605 }
3606 
3607 static void
3608 bnad_pci_uninit(struct pci_dev *pdev)
3609 {
3610 	pci_release_regions(pdev);
3611 	pci_disable_device(pdev);
3612 }
3613 
3614 static int
3615 bnad_pci_probe(struct pci_dev *pdev,
3616 		const struct pci_device_id *pcidev_id)
3617 {
3618 	bool	using_dac;
3619 	int	err;
3620 	struct bnad *bnad;
3621 	struct bna *bna;
3622 	struct net_device *netdev;
3623 	struct bfa_pcidev pcidev_info;
3624 	unsigned long flags;
3625 
3626 	mutex_lock(&bnad_fwimg_mutex);
3627 	if (!cna_get_firmware_buf(pdev)) {
3628 		mutex_unlock(&bnad_fwimg_mutex);
3629 		dev_err(&pdev->dev, "failed to load firmware image!\n");
3630 		return -ENODEV;
3631 	}
3632 	mutex_unlock(&bnad_fwimg_mutex);
3633 
3634 	/*
3635 	 * Allocates sizeof(struct net_device + struct bnad)
3636 	 * bnad = netdev->priv
3637 	 */
3638 	netdev = alloc_etherdev(sizeof(struct bnad));
3639 	if (!netdev) {
3640 		err = -ENOMEM;
3641 		return err;
3642 	}
3643 	bnad = netdev_priv(netdev);
3644 	bnad_lock_init(bnad);
3645 	bnad_add_to_list(bnad);
3646 
3647 	mutex_lock(&bnad->conf_mutex);
3648 	/*
3649 	 * PCI initialization
3650 	 *	Output : using_dac = 1 for 64 bit DMA
3651 	 *			   = 0 for 32 bit DMA
3652 	 */
3653 	using_dac = false;
3654 	err = bnad_pci_init(bnad, pdev, &using_dac);
3655 	if (err)
3656 		goto unlock_mutex;
3657 
3658 	/*
3659 	 * Initialize bnad structure
3660 	 * Setup relation between pci_dev & netdev
3661 	 */
3662 	err = bnad_init(bnad, pdev, netdev);
3663 	if (err)
3664 		goto pci_uninit;
3665 
3666 	/* Initialize netdev structure, set up ethtool ops */
3667 	bnad_netdev_init(bnad, using_dac);
3668 
3669 	/* Set link to down state */
3670 	netif_carrier_off(netdev);
3671 
3672 	/* Setup the debugfs node for this bfad */
3673 	if (bna_debugfs_enable)
3674 		bnad_debugfs_init(bnad);
3675 
3676 	/* Get resource requirement form bna */
3677 	spin_lock_irqsave(&bnad->bna_lock, flags);
3678 	bna_res_req(&bnad->res_info[0]);
3679 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3680 
3681 	/* Allocate resources from bna */
3682 	err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3683 	if (err)
3684 		goto drv_uninit;
3685 
3686 	bna = &bnad->bna;
3687 
3688 	/* Setup pcidev_info for bna_init() */
3689 	pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3690 	pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3691 	pcidev_info.device_id = bnad->pcidev->device;
3692 	pcidev_info.pci_bar_kva = bnad->bar0;
3693 
3694 	spin_lock_irqsave(&bnad->bna_lock, flags);
3695 	bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3696 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3697 
3698 	bnad->stats.bna_stats = &bna->stats;
3699 
3700 	bnad_enable_msix(bnad);
3701 	err = bnad_mbox_irq_alloc(bnad);
3702 	if (err)
3703 		goto res_free;
3704 
3705 	/* Set up timers */
3706 	setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3707 		    (unsigned long)bnad);
3708 	setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3709 		    (unsigned long)bnad);
3710 	setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3711 		    (unsigned long)bnad);
3712 	setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3713 		    (unsigned long)bnad);
3714 
3715 	/*
3716 	 * Start the chip
3717 	 * If the call back comes with error, we bail out.
3718 	 * This is a catastrophic error.
3719 	 */
3720 	err = bnad_ioceth_enable(bnad);
3721 	if (err) {
3722 		dev_err(&pdev->dev, "initialization failed err=%d\n", err);
3723 		goto probe_success;
3724 	}
3725 
3726 	spin_lock_irqsave(&bnad->bna_lock, flags);
3727 	if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3728 		bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3729 		bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3730 			bna_attr(bna)->num_rxp - 1);
3731 		if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3732 			bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3733 			err = -EIO;
3734 	}
3735 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3736 	if (err)
3737 		goto disable_ioceth;
3738 
3739 	spin_lock_irqsave(&bnad->bna_lock, flags);
3740 	bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3741 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3742 
3743 	err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3744 	if (err) {
3745 		err = -EIO;
3746 		goto disable_ioceth;
3747 	}
3748 
3749 	spin_lock_irqsave(&bnad->bna_lock, flags);
3750 	bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3751 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3752 
3753 	/* Get the burnt-in mac */
3754 	spin_lock_irqsave(&bnad->bna_lock, flags);
3755 	bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
3756 	bnad_set_netdev_perm_addr(bnad);
3757 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3758 
3759 	mutex_unlock(&bnad->conf_mutex);
3760 
3761 	/* Finally, reguister with net_device layer */
3762 	err = register_netdev(netdev);
3763 	if (err) {
3764 		dev_err(&pdev->dev, "registering net device failed\n");
3765 		goto probe_uninit;
3766 	}
3767 	set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3768 
3769 	return 0;
3770 
3771 probe_success:
3772 	mutex_unlock(&bnad->conf_mutex);
3773 	return 0;
3774 
3775 probe_uninit:
3776 	mutex_lock(&bnad->conf_mutex);
3777 	bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3778 disable_ioceth:
3779 	bnad_ioceth_disable(bnad);
3780 	del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3781 	del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3782 	del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3783 	spin_lock_irqsave(&bnad->bna_lock, flags);
3784 	bna_uninit(bna);
3785 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3786 	bnad_mbox_irq_free(bnad);
3787 	bnad_disable_msix(bnad);
3788 res_free:
3789 	bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3790 drv_uninit:
3791 	/* Remove the debugfs node for this bnad */
3792 	kfree(bnad->regdata);
3793 	bnad_debugfs_uninit(bnad);
3794 	bnad_uninit(bnad);
3795 pci_uninit:
3796 	bnad_pci_uninit(pdev);
3797 unlock_mutex:
3798 	mutex_unlock(&bnad->conf_mutex);
3799 	bnad_remove_from_list(bnad);
3800 	bnad_lock_uninit(bnad);
3801 	free_netdev(netdev);
3802 	return err;
3803 }
3804 
3805 static void
3806 bnad_pci_remove(struct pci_dev *pdev)
3807 {
3808 	struct net_device *netdev = pci_get_drvdata(pdev);
3809 	struct bnad *bnad;
3810 	struct bna *bna;
3811 	unsigned long flags;
3812 
3813 	if (!netdev)
3814 		return;
3815 
3816 	bnad = netdev_priv(netdev);
3817 	bna = &bnad->bna;
3818 
3819 	if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3820 		unregister_netdev(netdev);
3821 
3822 	mutex_lock(&bnad->conf_mutex);
3823 	bnad_ioceth_disable(bnad);
3824 	del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3825 	del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3826 	del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3827 	spin_lock_irqsave(&bnad->bna_lock, flags);
3828 	bna_uninit(bna);
3829 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3830 
3831 	bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3832 	bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3833 	bnad_mbox_irq_free(bnad);
3834 	bnad_disable_msix(bnad);
3835 	bnad_pci_uninit(pdev);
3836 	mutex_unlock(&bnad->conf_mutex);
3837 	bnad_remove_from_list(bnad);
3838 	bnad_lock_uninit(bnad);
3839 	/* Remove the debugfs node for this bnad */
3840 	kfree(bnad->regdata);
3841 	bnad_debugfs_uninit(bnad);
3842 	bnad_uninit(bnad);
3843 	free_netdev(netdev);
3844 }
3845 
3846 static const struct pci_device_id bnad_pci_id_table[] = {
3847 	{
3848 		PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3849 			PCI_DEVICE_ID_BROCADE_CT),
3850 		.class = PCI_CLASS_NETWORK_ETHERNET << 8,
3851 		.class_mask =  0xffff00
3852 	},
3853 	{
3854 		PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3855 			BFA_PCI_DEVICE_ID_CT2),
3856 		.class = PCI_CLASS_NETWORK_ETHERNET << 8,
3857 		.class_mask =  0xffff00
3858 	},
3859 	{0,  },
3860 };
3861 
3862 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3863 
3864 static struct pci_driver bnad_pci_driver = {
3865 	.name = BNAD_NAME,
3866 	.id_table = bnad_pci_id_table,
3867 	.probe = bnad_pci_probe,
3868 	.remove = bnad_pci_remove,
3869 };
3870 
3871 static int __init
3872 bnad_module_init(void)
3873 {
3874 	int err;
3875 
3876 	pr_info("bna: QLogic BR-series 10G Ethernet driver - version: %s\n",
3877 		BNAD_VERSION);
3878 
3879 	bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3880 
3881 	err = pci_register_driver(&bnad_pci_driver);
3882 	if (err < 0) {
3883 		pr_err("bna: PCI driver registration failed err=%d\n", err);
3884 		return err;
3885 	}
3886 
3887 	return 0;
3888 }
3889 
3890 static void __exit
3891 bnad_module_exit(void)
3892 {
3893 	pci_unregister_driver(&bnad_pci_driver);
3894 	release_firmware(bfi_fw);
3895 }
3896 
3897 module_init(bnad_module_init);
3898 module_exit(bnad_module_exit);
3899 
3900 MODULE_AUTHOR("Brocade");
3901 MODULE_LICENSE("GPL");
3902 MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
3903 MODULE_VERSION(BNAD_VERSION);
3904 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3905 MODULE_FIRMWARE(CNA_FW_FILE_CT2);
3906