1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 */
5 /*
6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
7 * Copyright (c) 2014-2015 QLogic Corporation
8 * All rights reserved
9 * www.qlogic.com
10 */
11 #include <linux/bitops.h>
12 #include <linux/netdevice.h>
13 #include <linux/skbuff.h>
14 #include <linux/etherdevice.h>
15 #include <linux/in.h>
16 #include <linux/ethtool.h>
17 #include <linux/if_vlan.h>
18 #include <linux/if_ether.h>
19 #include <linux/ip.h>
20 #include <linux/prefetch.h>
21 #include <linux/module.h>
22 #include <net/gro.h>
23
24 #include "bnad.h"
25 #include "bna.h"
26 #include "cna.h"
27
28 static DEFINE_MUTEX(bnad_fwimg_mutex);
29
30 /*
31 * Module params
32 */
33 static uint bnad_msix_disable;
34 module_param(bnad_msix_disable, uint, 0444);
35 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
36
37 static uint bnad_ioc_auto_recover = 1;
38 module_param(bnad_ioc_auto_recover, uint, 0444);
39 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
40
41 static uint bna_debugfs_enable = 1;
42 module_param(bna_debugfs_enable, uint, 0644);
43 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
44 " Range[false:0|true:1]");
45
46 /*
47 * Global variables
48 */
49 static u32 bnad_rxqs_per_cq = 2;
50 static atomic_t bna_id;
51 static const u8 bnad_bcast_addr[] __aligned(2) =
52 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
53
54 /*
55 * Local MACROS
56 */
57 #define BNAD_GET_MBOX_IRQ(_bnad) \
58 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
59 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
60 ((_bnad)->pcidev->irq))
61
62 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
63 do { \
64 (_res_info)->res_type = BNA_RES_T_MEM; \
65 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
66 (_res_info)->res_u.mem_info.num = (_num); \
67 (_res_info)->res_u.mem_info.len = (_size); \
68 } while (0)
69
70 /*
71 * Reinitialize completions in CQ, once Rx is taken down
72 */
73 static void
bnad_cq_cleanup(struct bnad * bnad,struct bna_ccb * ccb)74 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
75 {
76 struct bna_cq_entry *cmpl;
77 int i;
78
79 for (i = 0; i < ccb->q_depth; i++) {
80 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
81 cmpl->valid = 0;
82 }
83 }
84
85 /* Tx Datapath functions */
86
87
88 /* Caller should ensure that the entry at unmap_q[index] is valid */
89 static u32
bnad_tx_buff_unmap(struct bnad * bnad,struct bnad_tx_unmap * unmap_q,u32 q_depth,u32 index)90 bnad_tx_buff_unmap(struct bnad *bnad,
91 struct bnad_tx_unmap *unmap_q,
92 u32 q_depth, u32 index)
93 {
94 struct bnad_tx_unmap *unmap;
95 struct sk_buff *skb;
96 int vector, nvecs;
97
98 unmap = &unmap_q[index];
99 nvecs = unmap->nvecs;
100
101 skb = unmap->skb;
102 unmap->skb = NULL;
103 unmap->nvecs = 0;
104 dma_unmap_single(&bnad->pcidev->dev,
105 dma_unmap_addr(&unmap->vectors[0], dma_addr),
106 skb_headlen(skb), DMA_TO_DEVICE);
107 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
108 nvecs--;
109
110 vector = 0;
111 while (nvecs) {
112 vector++;
113 if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
114 vector = 0;
115 BNA_QE_INDX_INC(index, q_depth);
116 unmap = &unmap_q[index];
117 }
118
119 dma_unmap_page(&bnad->pcidev->dev,
120 dma_unmap_addr(&unmap->vectors[vector], dma_addr),
121 dma_unmap_len(&unmap->vectors[vector], dma_len),
122 DMA_TO_DEVICE);
123 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
124 nvecs--;
125 }
126
127 BNA_QE_INDX_INC(index, q_depth);
128
129 return index;
130 }
131
132 /*
133 * Frees all pending Tx Bufs
134 * At this point no activity is expected on the Q,
135 * so DMA unmap & freeing is fine.
136 */
137 static void
bnad_txq_cleanup(struct bnad * bnad,struct bna_tcb * tcb)138 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
139 {
140 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
141 struct sk_buff *skb;
142 int i;
143
144 for (i = 0; i < tcb->q_depth; i++) {
145 skb = unmap_q[i].skb;
146 if (!skb)
147 continue;
148 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
149
150 dev_kfree_skb_any(skb);
151 }
152 }
153
154 /*
155 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
156 * Can be called in a) Interrupt context
157 * b) Sending context
158 */
159 static u32
bnad_txcmpl_process(struct bnad * bnad,struct bna_tcb * tcb)160 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
161 {
162 u32 sent_packets = 0, sent_bytes = 0;
163 u32 wis, unmap_wis, hw_cons, cons, q_depth;
164 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
165 struct bnad_tx_unmap *unmap;
166 struct sk_buff *skb;
167
168 /* Just return if TX is stopped */
169 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
170 return 0;
171
172 hw_cons = *(tcb->hw_consumer_index);
173 rmb();
174 cons = tcb->consumer_index;
175 q_depth = tcb->q_depth;
176
177 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
178 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
179
180 while (wis) {
181 unmap = &unmap_q[cons];
182
183 skb = unmap->skb;
184
185 sent_packets++;
186 sent_bytes += skb->len;
187
188 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
189 wis -= unmap_wis;
190
191 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
192 dev_kfree_skb_any(skb);
193 }
194
195 /* Update consumer pointers. */
196 tcb->consumer_index = hw_cons;
197
198 tcb->txq->tx_packets += sent_packets;
199 tcb->txq->tx_bytes += sent_bytes;
200
201 return sent_packets;
202 }
203
204 static u32
bnad_tx_complete(struct bnad * bnad,struct bna_tcb * tcb)205 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
206 {
207 struct net_device *netdev = bnad->netdev;
208 u32 sent = 0;
209
210 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
211 return 0;
212
213 sent = bnad_txcmpl_process(bnad, tcb);
214 if (sent) {
215 if (netif_queue_stopped(netdev) &&
216 netif_carrier_ok(netdev) &&
217 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
218 BNAD_NETIF_WAKE_THRESHOLD) {
219 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
220 netif_wake_queue(netdev);
221 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
222 }
223 }
224 }
225
226 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
227 bna_ib_ack(tcb->i_dbell, sent);
228
229 smp_mb__before_atomic();
230 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
231
232 return sent;
233 }
234
235 /* MSIX Tx Completion Handler */
236 static irqreturn_t
bnad_msix_tx(int irq,void * data)237 bnad_msix_tx(int irq, void *data)
238 {
239 struct bna_tcb *tcb = (struct bna_tcb *)data;
240 struct bnad *bnad = tcb->bnad;
241
242 bnad_tx_complete(bnad, tcb);
243
244 return IRQ_HANDLED;
245 }
246
247 static inline void
bnad_rxq_alloc_uninit(struct bnad * bnad,struct bna_rcb * rcb)248 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
249 {
250 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
251
252 unmap_q->reuse_pi = -1;
253 unmap_q->alloc_order = -1;
254 unmap_q->map_size = 0;
255 unmap_q->type = BNAD_RXBUF_NONE;
256 }
257
258 /* Default is page-based allocation. Multi-buffer support - TBD */
259 static int
bnad_rxq_alloc_init(struct bnad * bnad,struct bna_rcb * rcb)260 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
261 {
262 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
263 int order;
264
265 bnad_rxq_alloc_uninit(bnad, rcb);
266
267 order = get_order(rcb->rxq->buffer_size);
268
269 unmap_q->type = BNAD_RXBUF_PAGE;
270
271 if (bna_is_small_rxq(rcb->id)) {
272 unmap_q->alloc_order = 0;
273 unmap_q->map_size = rcb->rxq->buffer_size;
274 } else {
275 if (rcb->rxq->multi_buffer) {
276 unmap_q->alloc_order = 0;
277 unmap_q->map_size = rcb->rxq->buffer_size;
278 unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
279 } else {
280 unmap_q->alloc_order = order;
281 unmap_q->map_size =
282 (rcb->rxq->buffer_size > 2048) ?
283 PAGE_SIZE << order : 2048;
284 }
285 }
286
287 BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
288
289 return 0;
290 }
291
292 static inline void
bnad_rxq_cleanup_page(struct bnad * bnad,struct bnad_rx_unmap * unmap)293 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
294 {
295 if (!unmap->page)
296 return;
297
298 dma_unmap_page(&bnad->pcidev->dev,
299 dma_unmap_addr(&unmap->vector, dma_addr),
300 unmap->vector.len, DMA_FROM_DEVICE);
301 put_page(unmap->page);
302 unmap->page = NULL;
303 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
304 unmap->vector.len = 0;
305 }
306
307 static inline void
bnad_rxq_cleanup_skb(struct bnad * bnad,struct bnad_rx_unmap * unmap)308 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
309 {
310 if (!unmap->skb)
311 return;
312
313 dma_unmap_single(&bnad->pcidev->dev,
314 dma_unmap_addr(&unmap->vector, dma_addr),
315 unmap->vector.len, DMA_FROM_DEVICE);
316 dev_kfree_skb_any(unmap->skb);
317 unmap->skb = NULL;
318 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
319 unmap->vector.len = 0;
320 }
321
322 static void
bnad_rxq_cleanup(struct bnad * bnad,struct bna_rcb * rcb)323 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
324 {
325 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
326 int i;
327
328 for (i = 0; i < rcb->q_depth; i++) {
329 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
330
331 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
332 bnad_rxq_cleanup_skb(bnad, unmap);
333 else
334 bnad_rxq_cleanup_page(bnad, unmap);
335 }
336 bnad_rxq_alloc_uninit(bnad, rcb);
337 }
338
339 static u32
bnad_rxq_refill_page(struct bnad * bnad,struct bna_rcb * rcb,u32 nalloc)340 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
341 {
342 u32 alloced, prod, q_depth;
343 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
344 struct bnad_rx_unmap *unmap, *prev;
345 struct bna_rxq_entry *rxent;
346 struct page *page;
347 u32 page_offset, alloc_size;
348 dma_addr_t dma_addr;
349
350 prod = rcb->producer_index;
351 q_depth = rcb->q_depth;
352
353 alloc_size = PAGE_SIZE << unmap_q->alloc_order;
354 alloced = 0;
355
356 while (nalloc--) {
357 unmap = &unmap_q->unmap[prod];
358
359 if (unmap_q->reuse_pi < 0) {
360 page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
361 unmap_q->alloc_order);
362 page_offset = 0;
363 } else {
364 prev = &unmap_q->unmap[unmap_q->reuse_pi];
365 page = prev->page;
366 page_offset = prev->page_offset + unmap_q->map_size;
367 get_page(page);
368 }
369
370 if (unlikely(!page)) {
371 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
372 rcb->rxq->rxbuf_alloc_failed++;
373 goto finishing;
374 }
375
376 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
377 unmap_q->map_size, DMA_FROM_DEVICE);
378 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
379 put_page(page);
380 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
381 rcb->rxq->rxbuf_map_failed++;
382 goto finishing;
383 }
384
385 unmap->page = page;
386 unmap->page_offset = page_offset;
387 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
388 unmap->vector.len = unmap_q->map_size;
389 page_offset += unmap_q->map_size;
390
391 if (page_offset < alloc_size)
392 unmap_q->reuse_pi = prod;
393 else
394 unmap_q->reuse_pi = -1;
395
396 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
397 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
398 BNA_QE_INDX_INC(prod, q_depth);
399 alloced++;
400 }
401
402 finishing:
403 if (likely(alloced)) {
404 rcb->producer_index = prod;
405 smp_mb();
406 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
407 bna_rxq_prod_indx_doorbell(rcb);
408 }
409
410 return alloced;
411 }
412
413 static u32
bnad_rxq_refill_skb(struct bnad * bnad,struct bna_rcb * rcb,u32 nalloc)414 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
415 {
416 u32 alloced, prod, q_depth, buff_sz;
417 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
418 struct bnad_rx_unmap *unmap;
419 struct bna_rxq_entry *rxent;
420 struct sk_buff *skb;
421 dma_addr_t dma_addr;
422
423 buff_sz = rcb->rxq->buffer_size;
424 prod = rcb->producer_index;
425 q_depth = rcb->q_depth;
426
427 alloced = 0;
428 while (nalloc--) {
429 unmap = &unmap_q->unmap[prod];
430
431 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
432
433 if (unlikely(!skb)) {
434 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
435 rcb->rxq->rxbuf_alloc_failed++;
436 goto finishing;
437 }
438
439 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
440 buff_sz, DMA_FROM_DEVICE);
441 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
442 dev_kfree_skb_any(skb);
443 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
444 rcb->rxq->rxbuf_map_failed++;
445 goto finishing;
446 }
447
448 unmap->skb = skb;
449 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
450 unmap->vector.len = buff_sz;
451
452 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
453 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
454 BNA_QE_INDX_INC(prod, q_depth);
455 alloced++;
456 }
457
458 finishing:
459 if (likely(alloced)) {
460 rcb->producer_index = prod;
461 smp_mb();
462 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
463 bna_rxq_prod_indx_doorbell(rcb);
464 }
465
466 return alloced;
467 }
468
469 static inline void
bnad_rxq_post(struct bnad * bnad,struct bna_rcb * rcb)470 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
471 {
472 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
473 u32 to_alloc;
474
475 to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
476 if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
477 return;
478
479 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
480 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
481 else
482 bnad_rxq_refill_page(bnad, rcb, to_alloc);
483 }
484
485 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
486 BNA_CQ_EF_IPV6 | \
487 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
488 BNA_CQ_EF_L4_CKSUM_OK)
489
490 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
491 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
492 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
493 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
494 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
495 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
496 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
497 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
498
499 static void
bnad_cq_drop_packet(struct bnad * bnad,struct bna_rcb * rcb,u32 sop_ci,u32 nvecs)500 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
501 u32 sop_ci, u32 nvecs)
502 {
503 struct bnad_rx_unmap_q *unmap_q;
504 struct bnad_rx_unmap *unmap;
505 u32 ci, vec;
506
507 unmap_q = rcb->unmap_q;
508 for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
509 unmap = &unmap_q->unmap[ci];
510 BNA_QE_INDX_INC(ci, rcb->q_depth);
511
512 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
513 bnad_rxq_cleanup_skb(bnad, unmap);
514 else
515 bnad_rxq_cleanup_page(bnad, unmap);
516 }
517 }
518
519 static void
bnad_cq_setup_skb_frags(struct bna_ccb * ccb,struct sk_buff * skb,u32 nvecs)520 bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
521 {
522 struct bna_rcb *rcb;
523 struct bnad *bnad;
524 struct bnad_rx_unmap_q *unmap_q;
525 struct bna_cq_entry *cq, *cmpl;
526 u32 ci, pi, totlen = 0;
527
528 cq = ccb->sw_q;
529 pi = ccb->producer_index;
530 cmpl = &cq[pi];
531
532 rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
533 unmap_q = rcb->unmap_q;
534 bnad = rcb->bnad;
535 ci = rcb->consumer_index;
536
537 /* prefetch header */
538 prefetch(page_address(unmap_q->unmap[ci].page) +
539 unmap_q->unmap[ci].page_offset);
540
541 while (nvecs--) {
542 struct bnad_rx_unmap *unmap;
543 u32 len;
544
545 unmap = &unmap_q->unmap[ci];
546 BNA_QE_INDX_INC(ci, rcb->q_depth);
547
548 dma_unmap_page(&bnad->pcidev->dev,
549 dma_unmap_addr(&unmap->vector, dma_addr),
550 unmap->vector.len, DMA_FROM_DEVICE);
551
552 len = ntohs(cmpl->length);
553 skb->truesize += unmap->vector.len;
554 totlen += len;
555
556 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
557 unmap->page, unmap->page_offset, len);
558
559 unmap->page = NULL;
560 unmap->vector.len = 0;
561
562 BNA_QE_INDX_INC(pi, ccb->q_depth);
563 cmpl = &cq[pi];
564 }
565
566 skb->len += totlen;
567 skb->data_len += totlen;
568 }
569
570 static inline void
bnad_cq_setup_skb(struct bnad * bnad,struct sk_buff * skb,struct bnad_rx_unmap * unmap,u32 len)571 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
572 struct bnad_rx_unmap *unmap, u32 len)
573 {
574 prefetch(skb->data);
575
576 dma_unmap_single(&bnad->pcidev->dev,
577 dma_unmap_addr(&unmap->vector, dma_addr),
578 unmap->vector.len, DMA_FROM_DEVICE);
579
580 skb_put(skb, len);
581 skb->protocol = eth_type_trans(skb, bnad->netdev);
582
583 unmap->skb = NULL;
584 unmap->vector.len = 0;
585 }
586
587 static u32
bnad_cq_process(struct bnad * bnad,struct bna_ccb * ccb,int budget)588 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
589 {
590 struct bna_cq_entry *cq, *cmpl, *next_cmpl;
591 struct bna_rcb *rcb = NULL;
592 struct bnad_rx_unmap_q *unmap_q;
593 struct bnad_rx_unmap *unmap = NULL;
594 struct sk_buff *skb = NULL;
595 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
596 struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
597 u32 packets = 0, len = 0, totlen = 0;
598 u32 pi, vec, sop_ci = 0, nvecs = 0;
599 u32 flags, masked_flags;
600
601 prefetch(bnad->netdev);
602
603 cq = ccb->sw_q;
604
605 while (packets < budget) {
606 cmpl = &cq[ccb->producer_index];
607 if (!cmpl->valid)
608 break;
609 /* The 'valid' field is set by the adapter, only after writing
610 * the other fields of completion entry. Hence, do not load
611 * other fields of completion entry *before* the 'valid' is
612 * loaded. Adding the rmb() here prevents the compiler and/or
613 * CPU from reordering the reads which would potentially result
614 * in reading stale values in completion entry.
615 */
616 rmb();
617
618 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
619
620 if (bna_is_small_rxq(cmpl->rxq_id))
621 rcb = ccb->rcb[1];
622 else
623 rcb = ccb->rcb[0];
624
625 unmap_q = rcb->unmap_q;
626
627 /* start of packet ci */
628 sop_ci = rcb->consumer_index;
629
630 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
631 unmap = &unmap_q->unmap[sop_ci];
632 skb = unmap->skb;
633 } else {
634 skb = napi_get_frags(&rx_ctrl->napi);
635 if (unlikely(!skb))
636 break;
637 }
638 prefetch(skb);
639
640 flags = ntohl(cmpl->flags);
641 len = ntohs(cmpl->length);
642 totlen = len;
643 nvecs = 1;
644
645 /* Check all the completions for this frame.
646 * busy-wait doesn't help much, break here.
647 */
648 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
649 (flags & BNA_CQ_EF_EOP) == 0) {
650 pi = ccb->producer_index;
651 do {
652 BNA_QE_INDX_INC(pi, ccb->q_depth);
653 next_cmpl = &cq[pi];
654
655 if (!next_cmpl->valid)
656 break;
657 /* The 'valid' field is set by the adapter, only
658 * after writing the other fields of completion
659 * entry. Hence, do not load other fields of
660 * completion entry *before* the 'valid' is
661 * loaded. Adding the rmb() here prevents the
662 * compiler and/or CPU from reordering the reads
663 * which would potentially result in reading
664 * stale values in completion entry.
665 */
666 rmb();
667
668 len = ntohs(next_cmpl->length);
669 flags = ntohl(next_cmpl->flags);
670
671 nvecs++;
672 totlen += len;
673 } while ((flags & BNA_CQ_EF_EOP) == 0);
674
675 if (!next_cmpl->valid)
676 break;
677 }
678 packets++;
679
680 /* TODO: BNA_CQ_EF_LOCAL ? */
681 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
682 BNA_CQ_EF_FCS_ERROR |
683 BNA_CQ_EF_TOO_LONG))) {
684 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
685 rcb->rxq->rx_packets_with_error++;
686
687 goto next;
688 }
689
690 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
691 bnad_cq_setup_skb(bnad, skb, unmap, len);
692 else
693 bnad_cq_setup_skb_frags(ccb, skb, nvecs);
694
695 rcb->rxq->rx_packets++;
696 rcb->rxq->rx_bytes += totlen;
697 ccb->bytes_per_intr += totlen;
698
699 masked_flags = flags & flags_cksum_prot_mask;
700
701 if (likely
702 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
703 ((masked_flags == flags_tcp4) ||
704 (masked_flags == flags_udp4) ||
705 (masked_flags == flags_tcp6) ||
706 (masked_flags == flags_udp6))))
707 skb->ip_summed = CHECKSUM_UNNECESSARY;
708 else
709 skb_checksum_none_assert(skb);
710
711 if ((flags & BNA_CQ_EF_VLAN) &&
712 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
713 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
714
715 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
716 netif_receive_skb(skb);
717 else
718 napi_gro_frags(&rx_ctrl->napi);
719
720 next:
721 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
722 for (vec = 0; vec < nvecs; vec++) {
723 cmpl = &cq[ccb->producer_index];
724 cmpl->valid = 0;
725 BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
726 }
727 }
728
729 napi_gro_flush(&rx_ctrl->napi, false);
730 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
731 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
732
733 bnad_rxq_post(bnad, ccb->rcb[0]);
734 if (ccb->rcb[1])
735 bnad_rxq_post(bnad, ccb->rcb[1]);
736
737 return packets;
738 }
739
740 static void
bnad_netif_rx_schedule_poll(struct bnad * bnad,struct bna_ccb * ccb)741 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
742 {
743 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
744 struct napi_struct *napi = &rx_ctrl->napi;
745
746 if (likely(napi_schedule_prep(napi))) {
747 __napi_schedule(napi);
748 rx_ctrl->rx_schedule++;
749 }
750 }
751
752 /* MSIX Rx Path Handler */
753 static irqreturn_t
bnad_msix_rx(int irq,void * data)754 bnad_msix_rx(int irq, void *data)
755 {
756 struct bna_ccb *ccb = (struct bna_ccb *)data;
757
758 if (ccb) {
759 ((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
760 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
761 }
762
763 return IRQ_HANDLED;
764 }
765
766 /* Interrupt handlers */
767
768 /* Mbox Interrupt Handlers */
769 static irqreturn_t
bnad_msix_mbox_handler(int irq,void * data)770 bnad_msix_mbox_handler(int irq, void *data)
771 {
772 u32 intr_status;
773 unsigned long flags;
774 struct bnad *bnad = (struct bnad *)data;
775
776 spin_lock_irqsave(&bnad->bna_lock, flags);
777 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
778 spin_unlock_irqrestore(&bnad->bna_lock, flags);
779 return IRQ_HANDLED;
780 }
781
782 bna_intr_status_get(&bnad->bna, intr_status);
783
784 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
785 bna_mbox_handler(&bnad->bna, intr_status);
786
787 spin_unlock_irqrestore(&bnad->bna_lock, flags);
788
789 return IRQ_HANDLED;
790 }
791
792 static irqreturn_t
bnad_isr(int irq,void * data)793 bnad_isr(int irq, void *data)
794 {
795 int i, j;
796 u32 intr_status;
797 unsigned long flags;
798 struct bnad *bnad = (struct bnad *)data;
799 struct bnad_rx_info *rx_info;
800 struct bnad_rx_ctrl *rx_ctrl;
801 struct bna_tcb *tcb = NULL;
802
803 spin_lock_irqsave(&bnad->bna_lock, flags);
804 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
805 spin_unlock_irqrestore(&bnad->bna_lock, flags);
806 return IRQ_NONE;
807 }
808
809 bna_intr_status_get(&bnad->bna, intr_status);
810
811 if (unlikely(!intr_status)) {
812 spin_unlock_irqrestore(&bnad->bna_lock, flags);
813 return IRQ_NONE;
814 }
815
816 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
817 bna_mbox_handler(&bnad->bna, intr_status);
818
819 spin_unlock_irqrestore(&bnad->bna_lock, flags);
820
821 if (!BNA_IS_INTX_DATA_INTR(intr_status))
822 return IRQ_HANDLED;
823
824 /* Process data interrupts */
825 /* Tx processing */
826 for (i = 0; i < bnad->num_tx; i++) {
827 for (j = 0; j < bnad->num_txq_per_tx; j++) {
828 tcb = bnad->tx_info[i].tcb[j];
829 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
830 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
831 }
832 }
833 /* Rx processing */
834 for (i = 0; i < bnad->num_rx; i++) {
835 rx_info = &bnad->rx_info[i];
836 if (!rx_info->rx)
837 continue;
838 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
839 rx_ctrl = &rx_info->rx_ctrl[j];
840 if (rx_ctrl->ccb)
841 bnad_netif_rx_schedule_poll(bnad,
842 rx_ctrl->ccb);
843 }
844 }
845 return IRQ_HANDLED;
846 }
847
848 /*
849 * Called in interrupt / callback context
850 * with bna_lock held, so cfg_flags access is OK
851 */
852 static void
bnad_enable_mbox_irq(struct bnad * bnad)853 bnad_enable_mbox_irq(struct bnad *bnad)
854 {
855 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
856
857 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
858 }
859
860 /*
861 * Called with bnad->bna_lock held b'cos of
862 * bnad->cfg_flags access.
863 */
864 static void
bnad_disable_mbox_irq(struct bnad * bnad)865 bnad_disable_mbox_irq(struct bnad *bnad)
866 {
867 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
868
869 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
870 }
871
872 static void
bnad_set_netdev_perm_addr(struct bnad * bnad)873 bnad_set_netdev_perm_addr(struct bnad *bnad)
874 {
875 struct net_device *netdev = bnad->netdev;
876
877 ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
878 if (is_zero_ether_addr(netdev->dev_addr))
879 eth_hw_addr_set(netdev, bnad->perm_addr);
880 }
881
882 /* Control Path Handlers */
883
884 /* Callbacks */
885 void
bnad_cb_mbox_intr_enable(struct bnad * bnad)886 bnad_cb_mbox_intr_enable(struct bnad *bnad)
887 {
888 bnad_enable_mbox_irq(bnad);
889 }
890
891 void
bnad_cb_mbox_intr_disable(struct bnad * bnad)892 bnad_cb_mbox_intr_disable(struct bnad *bnad)
893 {
894 bnad_disable_mbox_irq(bnad);
895 }
896
897 void
bnad_cb_ioceth_ready(struct bnad * bnad)898 bnad_cb_ioceth_ready(struct bnad *bnad)
899 {
900 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
901 complete(&bnad->bnad_completions.ioc_comp);
902 }
903
904 void
bnad_cb_ioceth_failed(struct bnad * bnad)905 bnad_cb_ioceth_failed(struct bnad *bnad)
906 {
907 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
908 complete(&bnad->bnad_completions.ioc_comp);
909 }
910
911 void
bnad_cb_ioceth_disabled(struct bnad * bnad)912 bnad_cb_ioceth_disabled(struct bnad *bnad)
913 {
914 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
915 complete(&bnad->bnad_completions.ioc_comp);
916 }
917
918 static void
bnad_cb_enet_disabled(void * arg)919 bnad_cb_enet_disabled(void *arg)
920 {
921 struct bnad *bnad = (struct bnad *)arg;
922
923 netif_carrier_off(bnad->netdev);
924 complete(&bnad->bnad_completions.enet_comp);
925 }
926
927 void
bnad_cb_ethport_link_status(struct bnad * bnad,enum bna_link_status link_status)928 bnad_cb_ethport_link_status(struct bnad *bnad,
929 enum bna_link_status link_status)
930 {
931 bool link_up = false;
932
933 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
934
935 if (link_status == BNA_CEE_UP) {
936 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
937 BNAD_UPDATE_CTR(bnad, cee_toggle);
938 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
939 } else {
940 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
941 BNAD_UPDATE_CTR(bnad, cee_toggle);
942 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
943 }
944
945 if (link_up) {
946 if (!netif_carrier_ok(bnad->netdev)) {
947 uint tx_id, tcb_id;
948 netdev_info(bnad->netdev, "link up\n");
949 netif_carrier_on(bnad->netdev);
950 BNAD_UPDATE_CTR(bnad, link_toggle);
951 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
952 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
953 tcb_id++) {
954 struct bna_tcb *tcb =
955 bnad->tx_info[tx_id].tcb[tcb_id];
956 u32 txq_id;
957 if (!tcb)
958 continue;
959
960 txq_id = tcb->id;
961
962 if (test_bit(BNAD_TXQ_TX_STARTED,
963 &tcb->flags)) {
964 /*
965 * Force an immediate
966 * Transmit Schedule */
967 netif_wake_subqueue(
968 bnad->netdev,
969 txq_id);
970 BNAD_UPDATE_CTR(bnad,
971 netif_queue_wakeup);
972 } else {
973 netif_stop_subqueue(
974 bnad->netdev,
975 txq_id);
976 BNAD_UPDATE_CTR(bnad,
977 netif_queue_stop);
978 }
979 }
980 }
981 }
982 } else {
983 if (netif_carrier_ok(bnad->netdev)) {
984 netdev_info(bnad->netdev, "link down\n");
985 netif_carrier_off(bnad->netdev);
986 BNAD_UPDATE_CTR(bnad, link_toggle);
987 }
988 }
989 }
990
991 static void
bnad_cb_tx_disabled(void * arg,struct bna_tx * tx)992 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
993 {
994 struct bnad *bnad = (struct bnad *)arg;
995
996 complete(&bnad->bnad_completions.tx_comp);
997 }
998
999 static void
bnad_cb_tcb_setup(struct bnad * bnad,struct bna_tcb * tcb)1000 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1001 {
1002 struct bnad_tx_info *tx_info =
1003 (struct bnad_tx_info *)tcb->txq->tx->priv;
1004
1005 tcb->priv = tcb;
1006 tx_info->tcb[tcb->id] = tcb;
1007 }
1008
1009 static void
bnad_cb_tcb_destroy(struct bnad * bnad,struct bna_tcb * tcb)1010 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1011 {
1012 struct bnad_tx_info *tx_info =
1013 (struct bnad_tx_info *)tcb->txq->tx->priv;
1014
1015 tx_info->tcb[tcb->id] = NULL;
1016 tcb->priv = NULL;
1017 }
1018
1019 static void
bnad_cb_ccb_setup(struct bnad * bnad,struct bna_ccb * ccb)1020 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1021 {
1022 struct bnad_rx_info *rx_info =
1023 (struct bnad_rx_info *)ccb->cq->rx->priv;
1024
1025 rx_info->rx_ctrl[ccb->id].ccb = ccb;
1026 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1027 }
1028
1029 static void
bnad_cb_ccb_destroy(struct bnad * bnad,struct bna_ccb * ccb)1030 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1031 {
1032 struct bnad_rx_info *rx_info =
1033 (struct bnad_rx_info *)ccb->cq->rx->priv;
1034
1035 rx_info->rx_ctrl[ccb->id].ccb = NULL;
1036 }
1037
1038 static void
bnad_cb_tx_stall(struct bnad * bnad,struct bna_tx * tx)1039 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1040 {
1041 struct bnad_tx_info *tx_info = tx->priv;
1042 struct bna_tcb *tcb;
1043 u32 txq_id;
1044 int i;
1045
1046 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1047 tcb = tx_info->tcb[i];
1048 if (!tcb)
1049 continue;
1050 txq_id = tcb->id;
1051 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1052 netif_stop_subqueue(bnad->netdev, txq_id);
1053 }
1054 }
1055
1056 static void
bnad_cb_tx_resume(struct bnad * bnad,struct bna_tx * tx)1057 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1058 {
1059 struct bnad_tx_info *tx_info = tx->priv;
1060 struct bna_tcb *tcb;
1061 u32 txq_id;
1062 int i;
1063
1064 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1065 tcb = tx_info->tcb[i];
1066 if (!tcb)
1067 continue;
1068 txq_id = tcb->id;
1069
1070 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1071 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1072 BUG_ON(*(tcb->hw_consumer_index) != 0);
1073
1074 if (netif_carrier_ok(bnad->netdev)) {
1075 netif_wake_subqueue(bnad->netdev, txq_id);
1076 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1077 }
1078 }
1079
1080 /*
1081 * Workaround for first ioceth enable failure & we
1082 * get a 0 MAC address. We try to get the MAC address
1083 * again here.
1084 */
1085 if (is_zero_ether_addr(bnad->perm_addr)) {
1086 bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
1087 bnad_set_netdev_perm_addr(bnad);
1088 }
1089 }
1090
1091 /*
1092 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1093 */
1094 static void
bnad_tx_cleanup(struct work_struct * work)1095 bnad_tx_cleanup(struct work_struct *work)
1096 {
1097 struct bnad_tx_info *tx_info =
1098 container_of(work, struct bnad_tx_info, tx_cleanup_work.work);
1099 struct bnad *bnad = NULL;
1100 struct bna_tcb *tcb;
1101 unsigned long flags;
1102 u32 i, pending = 0;
1103
1104 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1105 tcb = tx_info->tcb[i];
1106 if (!tcb)
1107 continue;
1108
1109 bnad = tcb->bnad;
1110
1111 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1112 pending++;
1113 continue;
1114 }
1115
1116 bnad_txq_cleanup(bnad, tcb);
1117
1118 smp_mb__before_atomic();
1119 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1120 }
1121
1122 if (pending) {
1123 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1124 msecs_to_jiffies(1));
1125 return;
1126 }
1127
1128 spin_lock_irqsave(&bnad->bna_lock, flags);
1129 bna_tx_cleanup_complete(tx_info->tx);
1130 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1131 }
1132
1133 static void
bnad_cb_tx_cleanup(struct bnad * bnad,struct bna_tx * tx)1134 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1135 {
1136 struct bnad_tx_info *tx_info = tx->priv;
1137 struct bna_tcb *tcb;
1138 int i;
1139
1140 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1141 tcb = tx_info->tcb[i];
1142 if (!tcb)
1143 continue;
1144 }
1145
1146 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1147 }
1148
1149 static void
bnad_cb_rx_stall(struct bnad * bnad,struct bna_rx * rx)1150 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1151 {
1152 struct bnad_rx_info *rx_info = rx->priv;
1153 struct bna_ccb *ccb;
1154 struct bnad_rx_ctrl *rx_ctrl;
1155 int i;
1156
1157 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1158 rx_ctrl = &rx_info->rx_ctrl[i];
1159 ccb = rx_ctrl->ccb;
1160 if (!ccb)
1161 continue;
1162
1163 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1164
1165 if (ccb->rcb[1])
1166 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1167 }
1168 }
1169
1170 /*
1171 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1172 */
1173 static void
bnad_rx_cleanup(struct work_struct * work)1174 bnad_rx_cleanup(struct work_struct *work)
1175 {
1176 struct bnad_rx_info *rx_info =
1177 container_of(work, struct bnad_rx_info, rx_cleanup_work);
1178 struct bnad_rx_ctrl *rx_ctrl;
1179 struct bnad *bnad = NULL;
1180 unsigned long flags;
1181 u32 i;
1182
1183 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1184 rx_ctrl = &rx_info->rx_ctrl[i];
1185
1186 if (!rx_ctrl->ccb)
1187 continue;
1188
1189 bnad = rx_ctrl->ccb->bnad;
1190
1191 /*
1192 * Wait till the poll handler has exited
1193 * and nothing can be scheduled anymore
1194 */
1195 napi_disable(&rx_ctrl->napi);
1196
1197 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1198 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1199 if (rx_ctrl->ccb->rcb[1])
1200 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1201 }
1202
1203 spin_lock_irqsave(&bnad->bna_lock, flags);
1204 bna_rx_cleanup_complete(rx_info->rx);
1205 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1206 }
1207
1208 static void
bnad_cb_rx_cleanup(struct bnad * bnad,struct bna_rx * rx)1209 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1210 {
1211 struct bnad_rx_info *rx_info = rx->priv;
1212 struct bna_ccb *ccb;
1213 struct bnad_rx_ctrl *rx_ctrl;
1214 int i;
1215
1216 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1217 rx_ctrl = &rx_info->rx_ctrl[i];
1218 ccb = rx_ctrl->ccb;
1219 if (!ccb)
1220 continue;
1221
1222 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1223
1224 if (ccb->rcb[1])
1225 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1226 }
1227
1228 queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1229 }
1230
1231 static void
bnad_cb_rx_post(struct bnad * bnad,struct bna_rx * rx)1232 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1233 {
1234 struct bnad_rx_info *rx_info = rx->priv;
1235 struct bna_ccb *ccb;
1236 struct bna_rcb *rcb;
1237 struct bnad_rx_ctrl *rx_ctrl;
1238 int i, j;
1239
1240 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1241 rx_ctrl = &rx_info->rx_ctrl[i];
1242 ccb = rx_ctrl->ccb;
1243 if (!ccb)
1244 continue;
1245
1246 napi_enable(&rx_ctrl->napi);
1247
1248 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1249 rcb = ccb->rcb[j];
1250 if (!rcb)
1251 continue;
1252
1253 bnad_rxq_alloc_init(bnad, rcb);
1254 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1255 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1256 bnad_rxq_post(bnad, rcb);
1257 }
1258 }
1259 }
1260
1261 static void
bnad_cb_rx_disabled(void * arg,struct bna_rx * rx)1262 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1263 {
1264 struct bnad *bnad = (struct bnad *)arg;
1265
1266 complete(&bnad->bnad_completions.rx_comp);
1267 }
1268
1269 static void
bnad_cb_rx_mcast_add(struct bnad * bnad,struct bna_rx * rx)1270 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1271 {
1272 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1273 complete(&bnad->bnad_completions.mcast_comp);
1274 }
1275
1276 void
bnad_cb_stats_get(struct bnad * bnad,enum bna_cb_status status,struct bna_stats * stats)1277 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1278 struct bna_stats *stats)
1279 {
1280 if (status == BNA_CB_SUCCESS)
1281 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1282
1283 if (!netif_running(bnad->netdev) ||
1284 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1285 return;
1286
1287 mod_timer(&bnad->stats_timer,
1288 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1289 }
1290
1291 static void
bnad_cb_enet_mtu_set(struct bnad * bnad)1292 bnad_cb_enet_mtu_set(struct bnad *bnad)
1293 {
1294 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1295 complete(&bnad->bnad_completions.mtu_comp);
1296 }
1297
1298 void
bnad_cb_completion(void * arg,enum bfa_status status)1299 bnad_cb_completion(void *arg, enum bfa_status status)
1300 {
1301 struct bnad_iocmd_comp *iocmd_comp =
1302 (struct bnad_iocmd_comp *)arg;
1303
1304 iocmd_comp->comp_status = (u32) status;
1305 complete(&iocmd_comp->comp);
1306 }
1307
1308 /* Resource allocation, free functions */
1309
1310 static void
bnad_mem_free(struct bnad * bnad,struct bna_mem_info * mem_info)1311 bnad_mem_free(struct bnad *bnad,
1312 struct bna_mem_info *mem_info)
1313 {
1314 int i;
1315 dma_addr_t dma_pa;
1316
1317 if (mem_info->mdl == NULL)
1318 return;
1319
1320 for (i = 0; i < mem_info->num; i++) {
1321 if (mem_info->mdl[i].kva != NULL) {
1322 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1323 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1324 dma_pa);
1325 dma_free_coherent(&bnad->pcidev->dev,
1326 mem_info->mdl[i].len,
1327 mem_info->mdl[i].kva, dma_pa);
1328 } else
1329 kfree(mem_info->mdl[i].kva);
1330 }
1331 }
1332 kfree(mem_info->mdl);
1333 mem_info->mdl = NULL;
1334 }
1335
1336 static int
bnad_mem_alloc(struct bnad * bnad,struct bna_mem_info * mem_info)1337 bnad_mem_alloc(struct bnad *bnad,
1338 struct bna_mem_info *mem_info)
1339 {
1340 int i;
1341 dma_addr_t dma_pa;
1342
1343 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1344 mem_info->mdl = NULL;
1345 return 0;
1346 }
1347
1348 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1349 GFP_KERNEL);
1350 if (mem_info->mdl == NULL)
1351 return -ENOMEM;
1352
1353 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1354 for (i = 0; i < mem_info->num; i++) {
1355 mem_info->mdl[i].len = mem_info->len;
1356 mem_info->mdl[i].kva =
1357 dma_alloc_coherent(&bnad->pcidev->dev,
1358 mem_info->len, &dma_pa,
1359 GFP_KERNEL);
1360 if (mem_info->mdl[i].kva == NULL)
1361 goto err_return;
1362
1363 BNA_SET_DMA_ADDR(dma_pa,
1364 &(mem_info->mdl[i].dma));
1365 }
1366 } else {
1367 for (i = 0; i < mem_info->num; i++) {
1368 mem_info->mdl[i].len = mem_info->len;
1369 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1370 GFP_KERNEL);
1371 if (mem_info->mdl[i].kva == NULL)
1372 goto err_return;
1373 }
1374 }
1375
1376 return 0;
1377
1378 err_return:
1379 bnad_mem_free(bnad, mem_info);
1380 return -ENOMEM;
1381 }
1382
1383 /* Free IRQ for Mailbox */
1384 static void
bnad_mbox_irq_free(struct bnad * bnad)1385 bnad_mbox_irq_free(struct bnad *bnad)
1386 {
1387 int irq;
1388 unsigned long flags;
1389
1390 spin_lock_irqsave(&bnad->bna_lock, flags);
1391 bnad_disable_mbox_irq(bnad);
1392 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1393
1394 irq = BNAD_GET_MBOX_IRQ(bnad);
1395 free_irq(irq, bnad);
1396 }
1397
1398 /*
1399 * Allocates IRQ for Mailbox, but keep it disabled
1400 * This will be enabled once we get the mbox enable callback
1401 * from bna
1402 */
1403 static int
bnad_mbox_irq_alloc(struct bnad * bnad)1404 bnad_mbox_irq_alloc(struct bnad *bnad)
1405 {
1406 int err = 0;
1407 unsigned long irq_flags, flags;
1408 u32 irq;
1409 irq_handler_t irq_handler;
1410
1411 spin_lock_irqsave(&bnad->bna_lock, flags);
1412 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1413 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1414 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1415 irq_flags = 0;
1416 } else {
1417 irq_handler = (irq_handler_t)bnad_isr;
1418 irq = bnad->pcidev->irq;
1419 irq_flags = IRQF_SHARED;
1420 }
1421
1422 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1423 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1424
1425 /*
1426 * Set the Mbox IRQ disable flag, so that the IRQ handler
1427 * called from request_irq() for SHARED IRQs do not execute
1428 */
1429 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1430
1431 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1432
1433 err = request_irq(irq, irq_handler, irq_flags,
1434 bnad->mbox_irq_name, bnad);
1435
1436 return err;
1437 }
1438
1439 static void
bnad_txrx_irq_free(struct bnad * bnad,struct bna_intr_info * intr_info)1440 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1441 {
1442 kfree(intr_info->idl);
1443 intr_info->idl = NULL;
1444 }
1445
1446 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1447 static int
bnad_txrx_irq_alloc(struct bnad * bnad,enum bnad_intr_source src,u32 txrx_id,struct bna_intr_info * intr_info)1448 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1449 u32 txrx_id, struct bna_intr_info *intr_info)
1450 {
1451 int i, vector_start = 0;
1452 u32 cfg_flags;
1453 unsigned long flags;
1454
1455 spin_lock_irqsave(&bnad->bna_lock, flags);
1456 cfg_flags = bnad->cfg_flags;
1457 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1458
1459 if (cfg_flags & BNAD_CF_MSIX) {
1460 intr_info->intr_type = BNA_INTR_T_MSIX;
1461 intr_info->idl = kcalloc(intr_info->num,
1462 sizeof(struct bna_intr_descr),
1463 GFP_KERNEL);
1464 if (!intr_info->idl)
1465 return -ENOMEM;
1466
1467 switch (src) {
1468 case BNAD_INTR_TX:
1469 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1470 break;
1471
1472 case BNAD_INTR_RX:
1473 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1474 (bnad->num_tx * bnad->num_txq_per_tx) +
1475 txrx_id;
1476 break;
1477
1478 default:
1479 BUG();
1480 }
1481
1482 for (i = 0; i < intr_info->num; i++)
1483 intr_info->idl[i].vector = vector_start + i;
1484 } else {
1485 intr_info->intr_type = BNA_INTR_T_INTX;
1486 intr_info->num = 1;
1487 intr_info->idl = kcalloc(intr_info->num,
1488 sizeof(struct bna_intr_descr),
1489 GFP_KERNEL);
1490 if (!intr_info->idl)
1491 return -ENOMEM;
1492
1493 switch (src) {
1494 case BNAD_INTR_TX:
1495 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1496 break;
1497
1498 case BNAD_INTR_RX:
1499 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1500 break;
1501 }
1502 }
1503 return 0;
1504 }
1505
1506 /* NOTE: Should be called for MSIX only
1507 * Unregisters Tx MSIX vector(s) from the kernel
1508 */
1509 static void
bnad_tx_msix_unregister(struct bnad * bnad,struct bnad_tx_info * tx_info,int num_txqs)1510 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1511 int num_txqs)
1512 {
1513 int i;
1514 int vector_num;
1515
1516 for (i = 0; i < num_txqs; i++) {
1517 if (tx_info->tcb[i] == NULL)
1518 continue;
1519
1520 vector_num = tx_info->tcb[i]->intr_vector;
1521 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1522 }
1523 }
1524
1525 /* NOTE: Should be called for MSIX only
1526 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1527 */
1528 static int
bnad_tx_msix_register(struct bnad * bnad,struct bnad_tx_info * tx_info,u32 tx_id,int num_txqs)1529 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1530 u32 tx_id, int num_txqs)
1531 {
1532 int i;
1533 int err;
1534 int vector_num;
1535
1536 for (i = 0; i < num_txqs; i++) {
1537 vector_num = tx_info->tcb[i]->intr_vector;
1538 snprintf(tx_info->tcb[i]->name, BNA_Q_NAME_SIZE, "%s TXQ %d",
1539 bnad->netdev->name,
1540 tx_id + tx_info->tcb[i]->id);
1541 err = request_irq(bnad->msix_table[vector_num].vector,
1542 (irq_handler_t)bnad_msix_tx, 0,
1543 tx_info->tcb[i]->name,
1544 tx_info->tcb[i]);
1545 if (err)
1546 goto err_return;
1547 }
1548
1549 return 0;
1550
1551 err_return:
1552 if (i > 0)
1553 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1554 return -1;
1555 }
1556
1557 /* NOTE: Should be called for MSIX only
1558 * Unregisters Rx MSIX vector(s) from the kernel
1559 */
1560 static void
bnad_rx_msix_unregister(struct bnad * bnad,struct bnad_rx_info * rx_info,int num_rxps)1561 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1562 int num_rxps)
1563 {
1564 int i;
1565 int vector_num;
1566
1567 for (i = 0; i < num_rxps; i++) {
1568 if (rx_info->rx_ctrl[i].ccb == NULL)
1569 continue;
1570
1571 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1572 free_irq(bnad->msix_table[vector_num].vector,
1573 rx_info->rx_ctrl[i].ccb);
1574 }
1575 }
1576
1577 /* NOTE: Should be called for MSIX only
1578 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1579 */
1580 static int
bnad_rx_msix_register(struct bnad * bnad,struct bnad_rx_info * rx_info,u32 rx_id,int num_rxps)1581 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1582 u32 rx_id, int num_rxps)
1583 {
1584 int i;
1585 int err;
1586 int vector_num;
1587
1588 for (i = 0; i < num_rxps; i++) {
1589 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1590 snprintf(rx_info->rx_ctrl[i].ccb->name, BNA_Q_NAME_SIZE,
1591 "%s CQ %d", bnad->netdev->name,
1592 rx_id + rx_info->rx_ctrl[i].ccb->id);
1593 err = request_irq(bnad->msix_table[vector_num].vector,
1594 (irq_handler_t)bnad_msix_rx, 0,
1595 rx_info->rx_ctrl[i].ccb->name,
1596 rx_info->rx_ctrl[i].ccb);
1597 if (err)
1598 goto err_return;
1599 }
1600
1601 return 0;
1602
1603 err_return:
1604 if (i > 0)
1605 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1606 return -1;
1607 }
1608
1609 /* Free Tx object Resources */
1610 static void
bnad_tx_res_free(struct bnad * bnad,struct bna_res_info * res_info)1611 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1612 {
1613 int i;
1614
1615 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1616 if (res_info[i].res_type == BNA_RES_T_MEM)
1617 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1618 else if (res_info[i].res_type == BNA_RES_T_INTR)
1619 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1620 }
1621 }
1622
1623 /* Allocates memory and interrupt resources for Tx object */
1624 static int
bnad_tx_res_alloc(struct bnad * bnad,struct bna_res_info * res_info,u32 tx_id)1625 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1626 u32 tx_id)
1627 {
1628 int i, err = 0;
1629
1630 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1631 if (res_info[i].res_type == BNA_RES_T_MEM)
1632 err = bnad_mem_alloc(bnad,
1633 &res_info[i].res_u.mem_info);
1634 else if (res_info[i].res_type == BNA_RES_T_INTR)
1635 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1636 &res_info[i].res_u.intr_info);
1637 if (err)
1638 goto err_return;
1639 }
1640 return 0;
1641
1642 err_return:
1643 bnad_tx_res_free(bnad, res_info);
1644 return err;
1645 }
1646
1647 /* Free Rx object Resources */
1648 static void
bnad_rx_res_free(struct bnad * bnad,struct bna_res_info * res_info)1649 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1650 {
1651 int i;
1652
1653 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1654 if (res_info[i].res_type == BNA_RES_T_MEM)
1655 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1656 else if (res_info[i].res_type == BNA_RES_T_INTR)
1657 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1658 }
1659 }
1660
1661 /* Allocates memory and interrupt resources for Rx object */
1662 static int
bnad_rx_res_alloc(struct bnad * bnad,struct bna_res_info * res_info,uint rx_id)1663 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1664 uint rx_id)
1665 {
1666 int i, err = 0;
1667
1668 /* All memory needs to be allocated before setup_ccbs */
1669 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1670 if (res_info[i].res_type == BNA_RES_T_MEM)
1671 err = bnad_mem_alloc(bnad,
1672 &res_info[i].res_u.mem_info);
1673 else if (res_info[i].res_type == BNA_RES_T_INTR)
1674 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1675 &res_info[i].res_u.intr_info);
1676 if (err)
1677 goto err_return;
1678 }
1679 return 0;
1680
1681 err_return:
1682 bnad_rx_res_free(bnad, res_info);
1683 return err;
1684 }
1685
1686 /* Timer callbacks */
1687 /* a) IOC timer */
1688 static void
bnad_ioc_timeout(struct timer_list * t)1689 bnad_ioc_timeout(struct timer_list *t)
1690 {
1691 struct bnad *bnad = timer_container_of(bnad, t,
1692 bna.ioceth.ioc.ioc_timer);
1693 unsigned long flags;
1694
1695 spin_lock_irqsave(&bnad->bna_lock, flags);
1696 bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
1697 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1698 }
1699
1700 static void
bnad_ioc_hb_check(struct timer_list * t)1701 bnad_ioc_hb_check(struct timer_list *t)
1702 {
1703 struct bnad *bnad = timer_container_of(bnad, t,
1704 bna.ioceth.ioc.hb_timer);
1705 unsigned long flags;
1706
1707 spin_lock_irqsave(&bnad->bna_lock, flags);
1708 bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
1709 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1710 }
1711
1712 static void
bnad_iocpf_timeout(struct timer_list * t)1713 bnad_iocpf_timeout(struct timer_list *t)
1714 {
1715 struct bnad *bnad = timer_container_of(bnad, t,
1716 bna.ioceth.ioc.iocpf_timer);
1717 unsigned long flags;
1718
1719 spin_lock_irqsave(&bnad->bna_lock, flags);
1720 bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
1721 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1722 }
1723
1724 static void
bnad_iocpf_sem_timeout(struct timer_list * t)1725 bnad_iocpf_sem_timeout(struct timer_list *t)
1726 {
1727 struct bnad *bnad = timer_container_of(bnad, t,
1728 bna.ioceth.ioc.sem_timer);
1729 unsigned long flags;
1730
1731 spin_lock_irqsave(&bnad->bna_lock, flags);
1732 bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
1733 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1734 }
1735
1736 /*
1737 * All timer routines use bnad->bna_lock to protect against
1738 * the following race, which may occur in case of no locking:
1739 * Time CPU m CPU n
1740 * 0 1 = test_bit
1741 * 1 clear_bit
1742 * 2 timer_delete_sync
1743 * 3 mod_timer
1744 */
1745
1746 /* b) Dynamic Interrupt Moderation Timer */
1747 static void
bnad_dim_timeout(struct timer_list * t)1748 bnad_dim_timeout(struct timer_list *t)
1749 {
1750 struct bnad *bnad = timer_container_of(bnad, t, dim_timer);
1751 struct bnad_rx_info *rx_info;
1752 struct bnad_rx_ctrl *rx_ctrl;
1753 int i, j;
1754 unsigned long flags;
1755
1756 if (!netif_carrier_ok(bnad->netdev))
1757 return;
1758
1759 spin_lock_irqsave(&bnad->bna_lock, flags);
1760 for (i = 0; i < bnad->num_rx; i++) {
1761 rx_info = &bnad->rx_info[i];
1762 if (!rx_info->rx)
1763 continue;
1764 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1765 rx_ctrl = &rx_info->rx_ctrl[j];
1766 if (!rx_ctrl->ccb)
1767 continue;
1768 bna_rx_dim_update(rx_ctrl->ccb);
1769 }
1770 }
1771
1772 /* Check for BNAD_CF_DIM_ENABLED, does not eliminate a race */
1773 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1774 mod_timer(&bnad->dim_timer,
1775 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1776 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1777 }
1778
1779 /* c) Statistics Timer */
1780 static void
bnad_stats_timeout(struct timer_list * t)1781 bnad_stats_timeout(struct timer_list *t)
1782 {
1783 struct bnad *bnad = timer_container_of(bnad, t, stats_timer);
1784 unsigned long flags;
1785
1786 if (!netif_running(bnad->netdev) ||
1787 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1788 return;
1789
1790 spin_lock_irqsave(&bnad->bna_lock, flags);
1791 bna_hw_stats_get(&bnad->bna);
1792 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1793 }
1794
1795 /*
1796 * Set up timer for DIM
1797 * Called with bnad->bna_lock held
1798 */
1799 void
bnad_dim_timer_start(struct bnad * bnad)1800 bnad_dim_timer_start(struct bnad *bnad)
1801 {
1802 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1803 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1804 timer_setup(&bnad->dim_timer, bnad_dim_timeout, 0);
1805 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1806 mod_timer(&bnad->dim_timer,
1807 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1808 }
1809 }
1810
1811 /*
1812 * Set up timer for statistics
1813 * Called with mutex_lock(&bnad->conf_mutex) held
1814 */
1815 static void
bnad_stats_timer_start(struct bnad * bnad)1816 bnad_stats_timer_start(struct bnad *bnad)
1817 {
1818 unsigned long flags;
1819
1820 spin_lock_irqsave(&bnad->bna_lock, flags);
1821 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1822 timer_setup(&bnad->stats_timer, bnad_stats_timeout, 0);
1823 mod_timer(&bnad->stats_timer,
1824 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1825 }
1826 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1827 }
1828
1829 /*
1830 * Stops the stats timer
1831 * Called with mutex_lock(&bnad->conf_mutex) held
1832 */
1833 static void
bnad_stats_timer_stop(struct bnad * bnad)1834 bnad_stats_timer_stop(struct bnad *bnad)
1835 {
1836 int to_del = 0;
1837 unsigned long flags;
1838
1839 spin_lock_irqsave(&bnad->bna_lock, flags);
1840 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1841 to_del = 1;
1842 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1843 if (to_del)
1844 timer_delete_sync(&bnad->stats_timer);
1845 }
1846
1847 /* Utilities */
1848
1849 static void
bnad_netdev_mc_list_get(struct net_device * netdev,u8 * mc_list)1850 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1851 {
1852 int i = 1; /* Index 0 has broadcast address */
1853 struct netdev_hw_addr *mc_addr;
1854
1855 netdev_for_each_mc_addr(mc_addr, netdev) {
1856 ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
1857 i++;
1858 }
1859 }
1860
1861 static int
bnad_napi_poll_rx(struct napi_struct * napi,int budget)1862 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1863 {
1864 struct bnad_rx_ctrl *rx_ctrl =
1865 container_of(napi, struct bnad_rx_ctrl, napi);
1866 struct bnad *bnad = rx_ctrl->bnad;
1867 int rcvd = 0;
1868
1869 rx_ctrl->rx_poll_ctr++;
1870
1871 if (!netif_carrier_ok(bnad->netdev))
1872 goto poll_exit;
1873
1874 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1875 if (rcvd >= budget)
1876 return rcvd;
1877
1878 poll_exit:
1879 napi_complete_done(napi, rcvd);
1880
1881 rx_ctrl->rx_complete++;
1882
1883 if (rx_ctrl->ccb)
1884 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1885
1886 return rcvd;
1887 }
1888
1889 static void
bnad_napi_add(struct bnad * bnad,u32 rx_id)1890 bnad_napi_add(struct bnad *bnad, u32 rx_id)
1891 {
1892 struct bnad_rx_ctrl *rx_ctrl;
1893 int i;
1894
1895 /* Initialize & enable NAPI */
1896 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1897 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1898 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1899 bnad_napi_poll_rx);
1900 }
1901 }
1902
1903 static void
bnad_napi_delete(struct bnad * bnad,u32 rx_id)1904 bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1905 {
1906 int i;
1907
1908 /* First disable and then clean up */
1909 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1910 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1911 }
1912
1913 /* Should be held with conf_lock held */
1914 void
bnad_destroy_tx(struct bnad * bnad,u32 tx_id)1915 bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1916 {
1917 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1918 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1919 unsigned long flags;
1920
1921 if (!tx_info->tx)
1922 return;
1923
1924 init_completion(&bnad->bnad_completions.tx_comp);
1925 spin_lock_irqsave(&bnad->bna_lock, flags);
1926 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1927 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1928 wait_for_completion(&bnad->bnad_completions.tx_comp);
1929
1930 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1931 bnad_tx_msix_unregister(bnad, tx_info,
1932 bnad->num_txq_per_tx);
1933
1934 spin_lock_irqsave(&bnad->bna_lock, flags);
1935 bna_tx_destroy(tx_info->tx);
1936 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1937
1938 tx_info->tx = NULL;
1939 tx_info->tx_id = 0;
1940
1941 bnad_tx_res_free(bnad, res_info);
1942 }
1943
1944 /* Should be held with conf_lock held */
1945 int
bnad_setup_tx(struct bnad * bnad,u32 tx_id)1946 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1947 {
1948 int err;
1949 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1950 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1951 struct bna_intr_info *intr_info =
1952 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1953 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1954 static const struct bna_tx_event_cbfn tx_cbfn = {
1955 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1956 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1957 .tx_stall_cbfn = bnad_cb_tx_stall,
1958 .tx_resume_cbfn = bnad_cb_tx_resume,
1959 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1960 };
1961
1962 struct bna_tx *tx;
1963 unsigned long flags;
1964
1965 tx_info->tx_id = tx_id;
1966
1967 /* Initialize the Tx object configuration */
1968 tx_config->num_txq = bnad->num_txq_per_tx;
1969 tx_config->txq_depth = bnad->txq_depth;
1970 tx_config->tx_type = BNA_TX_T_REGULAR;
1971 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1972
1973 /* Get BNA's resource requirement for one tx object */
1974 spin_lock_irqsave(&bnad->bna_lock, flags);
1975 bna_tx_res_req(bnad->num_txq_per_tx,
1976 bnad->txq_depth, res_info);
1977 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1978
1979 /* Fill Unmap Q memory requirements */
1980 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1981 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1982 bnad->txq_depth));
1983
1984 /* Allocate resources */
1985 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1986 if (err)
1987 return err;
1988
1989 /* Ask BNA to create one Tx object, supplying required resources */
1990 spin_lock_irqsave(&bnad->bna_lock, flags);
1991 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1992 tx_info);
1993 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1994 if (!tx) {
1995 err = -ENOMEM;
1996 goto err_return;
1997 }
1998 tx_info->tx = tx;
1999
2000 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, bnad_tx_cleanup);
2001
2002 /* Register ISR for the Tx object */
2003 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2004 err = bnad_tx_msix_register(bnad, tx_info,
2005 tx_id, bnad->num_txq_per_tx);
2006 if (err)
2007 goto cleanup_tx;
2008 }
2009
2010 spin_lock_irqsave(&bnad->bna_lock, flags);
2011 bna_tx_enable(tx);
2012 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2013
2014 return 0;
2015
2016 cleanup_tx:
2017 spin_lock_irqsave(&bnad->bna_lock, flags);
2018 bna_tx_destroy(tx_info->tx);
2019 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2020 tx_info->tx = NULL;
2021 tx_info->tx_id = 0;
2022 err_return:
2023 bnad_tx_res_free(bnad, res_info);
2024 return err;
2025 }
2026
2027 /* Setup the rx config for bna_rx_create */
2028 /* bnad decides the configuration */
2029 static void
bnad_init_rx_config(struct bnad * bnad,struct bna_rx_config * rx_config)2030 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2031 {
2032 memset(rx_config, 0, sizeof(*rx_config));
2033 rx_config->rx_type = BNA_RX_T_REGULAR;
2034 rx_config->num_paths = bnad->num_rxp_per_rx;
2035 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2036
2037 if (bnad->num_rxp_per_rx > 1) {
2038 rx_config->rss_status = BNA_STATUS_T_ENABLED;
2039 rx_config->rss_config.hash_type =
2040 (BFI_ENET_RSS_IPV6 |
2041 BFI_ENET_RSS_IPV6_TCP |
2042 BFI_ENET_RSS_IPV4 |
2043 BFI_ENET_RSS_IPV4_TCP);
2044 rx_config->rss_config.hash_mask =
2045 bnad->num_rxp_per_rx - 1;
2046 netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
2047 sizeof(rx_config->rss_config.toeplitz_hash_key));
2048 } else {
2049 rx_config->rss_status = BNA_STATUS_T_DISABLED;
2050 memset(&rx_config->rss_config, 0,
2051 sizeof(rx_config->rss_config));
2052 }
2053
2054 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2055 rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2056
2057 /* BNA_RXP_SINGLE - one data-buffer queue
2058 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2059 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2060 */
2061 /* TODO: configurable param for queue type */
2062 rx_config->rxp_type = BNA_RXP_SLR;
2063
2064 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2065 rx_config->frame_size > 4096) {
2066 /* though size_routing_enable is set in SLR,
2067 * small packets may get routed to same rxq.
2068 * set buf_size to 2048 instead of PAGE_SIZE.
2069 */
2070 rx_config->q0_buf_size = 2048;
2071 /* this should be in multiples of 2 */
2072 rx_config->q0_num_vecs = 4;
2073 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2074 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2075 } else {
2076 rx_config->q0_buf_size = rx_config->frame_size;
2077 rx_config->q0_num_vecs = 1;
2078 rx_config->q0_depth = bnad->rxq_depth;
2079 }
2080
2081 /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2082 if (rx_config->rxp_type == BNA_RXP_SLR) {
2083 rx_config->q1_depth = bnad->rxq_depth;
2084 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2085 }
2086
2087 rx_config->vlan_strip_status =
2088 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
2089 BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
2090 }
2091
2092 static void
bnad_rx_ctrl_init(struct bnad * bnad,u32 rx_id)2093 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2094 {
2095 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2096 int i;
2097
2098 for (i = 0; i < bnad->num_rxp_per_rx; i++)
2099 rx_info->rx_ctrl[i].bnad = bnad;
2100 }
2101
2102 /* Called with mutex_lock(&bnad->conf_mutex) held */
2103 static u32
bnad_reinit_rx(struct bnad * bnad)2104 bnad_reinit_rx(struct bnad *bnad)
2105 {
2106 struct net_device *netdev = bnad->netdev;
2107 u32 err = 0, current_err = 0;
2108 u32 rx_id = 0, count = 0;
2109 unsigned long flags;
2110
2111 /* destroy and create new rx objects */
2112 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2113 if (!bnad->rx_info[rx_id].rx)
2114 continue;
2115 bnad_destroy_rx(bnad, rx_id);
2116 }
2117
2118 spin_lock_irqsave(&bnad->bna_lock, flags);
2119 bna_enet_mtu_set(&bnad->bna.enet,
2120 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2121 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2122
2123 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2124 count++;
2125 current_err = bnad_setup_rx(bnad, rx_id);
2126 if (current_err && !err) {
2127 err = current_err;
2128 netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
2129 }
2130 }
2131
2132 /* restore rx configuration */
2133 if (bnad->rx_info[0].rx && !err) {
2134 bnad_restore_vlans(bnad, 0);
2135 bnad_enable_default_bcast(bnad);
2136 spin_lock_irqsave(&bnad->bna_lock, flags);
2137 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2138 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2139 bnad_set_rx_mode(netdev);
2140 }
2141
2142 return count;
2143 }
2144
2145 /* Called with bnad_conf_lock() held */
2146 void
bnad_destroy_rx(struct bnad * bnad,u32 rx_id)2147 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2148 {
2149 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2150 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2151 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2152 unsigned long flags;
2153 int to_del = 0;
2154
2155 if (!rx_info->rx)
2156 return;
2157
2158 if (0 == rx_id) {
2159 spin_lock_irqsave(&bnad->bna_lock, flags);
2160 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2161 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2162 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2163 to_del = 1;
2164 }
2165 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2166 if (to_del)
2167 timer_delete_sync(&bnad->dim_timer);
2168 }
2169
2170 init_completion(&bnad->bnad_completions.rx_comp);
2171 spin_lock_irqsave(&bnad->bna_lock, flags);
2172 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2173 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2174 wait_for_completion(&bnad->bnad_completions.rx_comp);
2175
2176 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2177 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2178
2179 bnad_napi_delete(bnad, rx_id);
2180
2181 spin_lock_irqsave(&bnad->bna_lock, flags);
2182 bna_rx_destroy(rx_info->rx);
2183
2184 rx_info->rx = NULL;
2185 rx_info->rx_id = 0;
2186 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2187
2188 bnad_rx_res_free(bnad, res_info);
2189 }
2190
2191 /* Called with mutex_lock(&bnad->conf_mutex) held */
2192 int
bnad_setup_rx(struct bnad * bnad,u32 rx_id)2193 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2194 {
2195 int err;
2196 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2197 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2198 struct bna_intr_info *intr_info =
2199 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2200 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2201 static const struct bna_rx_event_cbfn rx_cbfn = {
2202 .rcb_setup_cbfn = NULL,
2203 .rcb_destroy_cbfn = NULL,
2204 .ccb_setup_cbfn = bnad_cb_ccb_setup,
2205 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2206 .rx_stall_cbfn = bnad_cb_rx_stall,
2207 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2208 .rx_post_cbfn = bnad_cb_rx_post,
2209 };
2210 struct bna_rx *rx;
2211 unsigned long flags;
2212
2213 rx_info->rx_id = rx_id;
2214
2215 /* Initialize the Rx object configuration */
2216 bnad_init_rx_config(bnad, rx_config);
2217
2218 /* Get BNA's resource requirement for one Rx object */
2219 spin_lock_irqsave(&bnad->bna_lock, flags);
2220 bna_rx_res_req(rx_config, res_info);
2221 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2222
2223 /* Fill Unmap Q memory requirements */
2224 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2225 rx_config->num_paths,
2226 (rx_config->q0_depth *
2227 sizeof(struct bnad_rx_unmap)) +
2228 sizeof(struct bnad_rx_unmap_q));
2229
2230 if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2231 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2232 rx_config->num_paths,
2233 (rx_config->q1_depth *
2234 sizeof(struct bnad_rx_unmap) +
2235 sizeof(struct bnad_rx_unmap_q)));
2236 }
2237 /* Allocate resource */
2238 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2239 if (err)
2240 return err;
2241
2242 bnad_rx_ctrl_init(bnad, rx_id);
2243
2244 /* Ask BNA to create one Rx object, supplying required resources */
2245 spin_lock_irqsave(&bnad->bna_lock, flags);
2246 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2247 rx_info);
2248 if (!rx) {
2249 err = -ENOMEM;
2250 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2251 goto err_return;
2252 }
2253 rx_info->rx = rx;
2254 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2255
2256 INIT_WORK(&rx_info->rx_cleanup_work, bnad_rx_cleanup);
2257
2258 /*
2259 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2260 * so that IRQ handler cannot schedule NAPI at this point.
2261 */
2262 bnad_napi_add(bnad, rx_id);
2263
2264 /* Register ISR for the Rx object */
2265 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2266 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2267 rx_config->num_paths);
2268 if (err)
2269 goto err_return;
2270 }
2271
2272 spin_lock_irqsave(&bnad->bna_lock, flags);
2273 if (0 == rx_id) {
2274 /* Set up Dynamic Interrupt Moderation Vector */
2275 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2276 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2277
2278 /* Enable VLAN filtering only on the default Rx */
2279 bna_rx_vlanfilter_enable(rx);
2280
2281 /* Start the DIM timer */
2282 bnad_dim_timer_start(bnad);
2283 }
2284
2285 bna_rx_enable(rx);
2286 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2287
2288 return 0;
2289
2290 err_return:
2291 bnad_destroy_rx(bnad, rx_id);
2292 return err;
2293 }
2294
2295 /* Called with conf_lock & bnad->bna_lock held */
2296 void
bnad_tx_coalescing_timeo_set(struct bnad * bnad)2297 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2298 {
2299 struct bnad_tx_info *tx_info;
2300
2301 tx_info = &bnad->tx_info[0];
2302 if (!tx_info->tx)
2303 return;
2304
2305 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2306 }
2307
2308 /* Called with conf_lock & bnad->bna_lock held */
2309 void
bnad_rx_coalescing_timeo_set(struct bnad * bnad)2310 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2311 {
2312 struct bnad_rx_info *rx_info;
2313 int i;
2314
2315 for (i = 0; i < bnad->num_rx; i++) {
2316 rx_info = &bnad->rx_info[i];
2317 if (!rx_info->rx)
2318 continue;
2319 bna_rx_coalescing_timeo_set(rx_info->rx,
2320 bnad->rx_coalescing_timeo);
2321 }
2322 }
2323
2324 /*
2325 * Called with bnad->bna_lock held
2326 */
2327 int
bnad_mac_addr_set_locked(struct bnad * bnad,const u8 * mac_addr)2328 bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
2329 {
2330 int ret;
2331
2332 if (!is_valid_ether_addr(mac_addr))
2333 return -EADDRNOTAVAIL;
2334
2335 /* If datapath is down, pretend everything went through */
2336 if (!bnad->rx_info[0].rx)
2337 return 0;
2338
2339 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
2340 if (ret != BNA_CB_SUCCESS)
2341 return -EADDRNOTAVAIL;
2342
2343 return 0;
2344 }
2345
2346 /* Should be called with conf_lock held */
2347 int
bnad_enable_default_bcast(struct bnad * bnad)2348 bnad_enable_default_bcast(struct bnad *bnad)
2349 {
2350 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2351 int ret;
2352 unsigned long flags;
2353
2354 init_completion(&bnad->bnad_completions.mcast_comp);
2355
2356 spin_lock_irqsave(&bnad->bna_lock, flags);
2357 ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr,
2358 bnad_cb_rx_mcast_add);
2359 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2360
2361 if (ret == BNA_CB_SUCCESS)
2362 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2363 else
2364 return -ENODEV;
2365
2366 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2367 return -ENODEV;
2368
2369 return 0;
2370 }
2371
2372 /* Called with mutex_lock(&bnad->conf_mutex) held */
2373 void
bnad_restore_vlans(struct bnad * bnad,u32 rx_id)2374 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2375 {
2376 u16 vid;
2377 unsigned long flags;
2378
2379 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2380 spin_lock_irqsave(&bnad->bna_lock, flags);
2381 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2382 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2383 }
2384 }
2385
2386 /* Statistics utilities */
2387 void
bnad_netdev_qstats_fill(struct bnad * bnad,struct rtnl_link_stats64 * stats)2388 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2389 {
2390 int i, j;
2391
2392 for (i = 0; i < bnad->num_rx; i++) {
2393 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2394 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2395 stats->rx_packets += bnad->rx_info[i].
2396 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2397 stats->rx_bytes += bnad->rx_info[i].
2398 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2399 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2400 bnad->rx_info[i].rx_ctrl[j].ccb->
2401 rcb[1]->rxq) {
2402 stats->rx_packets +=
2403 bnad->rx_info[i].rx_ctrl[j].
2404 ccb->rcb[1]->rxq->rx_packets;
2405 stats->rx_bytes +=
2406 bnad->rx_info[i].rx_ctrl[j].
2407 ccb->rcb[1]->rxq->rx_bytes;
2408 }
2409 }
2410 }
2411 }
2412 for (i = 0; i < bnad->num_tx; i++) {
2413 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2414 if (bnad->tx_info[i].tcb[j]) {
2415 stats->tx_packets +=
2416 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2417 stats->tx_bytes +=
2418 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2419 }
2420 }
2421 }
2422 }
2423
2424 /*
2425 * Must be called with the bna_lock held.
2426 */
2427 void
bnad_netdev_hwstats_fill(struct bnad * bnad,struct rtnl_link_stats64 * stats)2428 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2429 {
2430 struct bfi_enet_stats_mac *mac_stats;
2431 u32 bmap;
2432 int i;
2433
2434 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2435 stats->rx_errors =
2436 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2437 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2438 mac_stats->rx_undersize;
2439 stats->tx_errors = mac_stats->tx_fcs_error +
2440 mac_stats->tx_undersize;
2441 stats->rx_dropped = mac_stats->rx_drop;
2442 stats->tx_dropped = mac_stats->tx_drop;
2443 stats->multicast = mac_stats->rx_multicast;
2444 stats->collisions = mac_stats->tx_total_collision;
2445
2446 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2447
2448 /* receive ring buffer overflow ?? */
2449
2450 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2451 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2452 /* recv'r fifo overrun */
2453 bmap = bna_rx_rid_mask(&bnad->bna);
2454 for (i = 0; bmap; i++) {
2455 if (bmap & 1) {
2456 stats->rx_fifo_errors +=
2457 bnad->stats.bna_stats->
2458 hw_stats.rxf_stats[i].frame_drops;
2459 break;
2460 }
2461 bmap >>= 1;
2462 }
2463 }
2464
2465 static void
bnad_mbox_irq_sync(struct bnad * bnad)2466 bnad_mbox_irq_sync(struct bnad *bnad)
2467 {
2468 u32 irq;
2469 unsigned long flags;
2470
2471 spin_lock_irqsave(&bnad->bna_lock, flags);
2472 if (bnad->cfg_flags & BNAD_CF_MSIX)
2473 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2474 else
2475 irq = bnad->pcidev->irq;
2476 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2477
2478 synchronize_irq(irq);
2479 }
2480
2481 /* Utility used by bnad_start_xmit, for doing TSO */
2482 static int
bnad_tso_prepare(struct bnad * bnad,struct sk_buff * skb)2483 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2484 {
2485 int err;
2486
2487 err = skb_cow_head(skb, 0);
2488 if (err < 0) {
2489 BNAD_UPDATE_CTR(bnad, tso_err);
2490 return err;
2491 }
2492
2493 /*
2494 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2495 * excluding the length field.
2496 */
2497 if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
2498 struct iphdr *iph = ip_hdr(skb);
2499
2500 /* Do we really need these? */
2501 iph->tot_len = 0;
2502 iph->check = 0;
2503
2504 tcp_hdr(skb)->check =
2505 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2506 IPPROTO_TCP, 0);
2507 BNAD_UPDATE_CTR(bnad, tso4);
2508 } else {
2509 tcp_v6_gso_csum_prep(skb);
2510 BNAD_UPDATE_CTR(bnad, tso6);
2511 }
2512
2513 return 0;
2514 }
2515
2516 /*
2517 * Initialize Q numbers depending on Rx Paths
2518 * Called with bnad->bna_lock held, because of cfg_flags
2519 * access.
2520 */
2521 static void
bnad_q_num_init(struct bnad * bnad)2522 bnad_q_num_init(struct bnad *bnad)
2523 {
2524 int rxps;
2525
2526 rxps = min((uint)num_online_cpus(),
2527 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2528
2529 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2530 rxps = 1; /* INTx */
2531
2532 bnad->num_rx = 1;
2533 bnad->num_tx = 1;
2534 bnad->num_rxp_per_rx = rxps;
2535 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2536 }
2537
2538 /*
2539 * Adjusts the Q numbers, given a number of msix vectors
2540 * Give preference to RSS as opposed to Tx priority Queues,
2541 * in such a case, just use 1 Tx Q
2542 * Called with bnad->bna_lock held b'cos of cfg_flags access
2543 */
2544 static void
bnad_q_num_adjust(struct bnad * bnad,int msix_vectors,int temp)2545 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2546 {
2547 bnad->num_txq_per_tx = 1;
2548 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2549 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2550 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2551 bnad->num_rxp_per_rx = msix_vectors -
2552 (bnad->num_tx * bnad->num_txq_per_tx) -
2553 BNAD_MAILBOX_MSIX_VECTORS;
2554 } else
2555 bnad->num_rxp_per_rx = 1;
2556 }
2557
2558 /* Enable / disable ioceth */
2559 static int
bnad_ioceth_disable(struct bnad * bnad)2560 bnad_ioceth_disable(struct bnad *bnad)
2561 {
2562 unsigned long flags;
2563 int err = 0;
2564
2565 spin_lock_irqsave(&bnad->bna_lock, flags);
2566 init_completion(&bnad->bnad_completions.ioc_comp);
2567 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2568 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2569
2570 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2571 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2572
2573 err = bnad->bnad_completions.ioc_comp_status;
2574 return err;
2575 }
2576
2577 static int
bnad_ioceth_enable(struct bnad * bnad)2578 bnad_ioceth_enable(struct bnad *bnad)
2579 {
2580 int err = 0;
2581 unsigned long flags;
2582
2583 spin_lock_irqsave(&bnad->bna_lock, flags);
2584 init_completion(&bnad->bnad_completions.ioc_comp);
2585 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2586 bna_ioceth_enable(&bnad->bna.ioceth);
2587 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2588
2589 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2590 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2591
2592 err = bnad->bnad_completions.ioc_comp_status;
2593
2594 return err;
2595 }
2596
2597 /* Free BNA resources */
2598 static void
bnad_res_free(struct bnad * bnad,struct bna_res_info * res_info,u32 res_val_max)2599 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2600 u32 res_val_max)
2601 {
2602 int i;
2603
2604 for (i = 0; i < res_val_max; i++)
2605 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2606 }
2607
2608 /* Allocates memory and interrupt resources for BNA */
2609 static int
bnad_res_alloc(struct bnad * bnad,struct bna_res_info * res_info,u32 res_val_max)2610 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2611 u32 res_val_max)
2612 {
2613 int i, err;
2614
2615 for (i = 0; i < res_val_max; i++) {
2616 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2617 if (err)
2618 goto err_return;
2619 }
2620 return 0;
2621
2622 err_return:
2623 bnad_res_free(bnad, res_info, res_val_max);
2624 return err;
2625 }
2626
2627 /* Interrupt enable / disable */
2628 static void
bnad_enable_msix(struct bnad * bnad)2629 bnad_enable_msix(struct bnad *bnad)
2630 {
2631 int i, ret;
2632 unsigned long flags;
2633
2634 spin_lock_irqsave(&bnad->bna_lock, flags);
2635 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2636 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2637 return;
2638 }
2639 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2640
2641 if (bnad->msix_table)
2642 return;
2643
2644 bnad->msix_table =
2645 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2646
2647 if (!bnad->msix_table)
2648 goto intx_mode;
2649
2650 for (i = 0; i < bnad->msix_num; i++)
2651 bnad->msix_table[i].entry = i;
2652
2653 ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2654 1, bnad->msix_num);
2655 if (ret < 0) {
2656 goto intx_mode;
2657 } else if (ret < bnad->msix_num) {
2658 dev_warn(&bnad->pcidev->dev,
2659 "%d MSI-X vectors allocated < %d requested\n",
2660 ret, bnad->msix_num);
2661
2662 spin_lock_irqsave(&bnad->bna_lock, flags);
2663 /* ret = #of vectors that we got */
2664 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2665 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2666 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2667
2668 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2669 BNAD_MAILBOX_MSIX_VECTORS;
2670
2671 if (bnad->msix_num > ret) {
2672 pci_disable_msix(bnad->pcidev);
2673 goto intx_mode;
2674 }
2675 }
2676
2677 pci_intx(bnad->pcidev, 0);
2678
2679 return;
2680
2681 intx_mode:
2682 dev_warn(&bnad->pcidev->dev,
2683 "MSI-X enable failed - operating in INTx mode\n");
2684
2685 kfree(bnad->msix_table);
2686 bnad->msix_table = NULL;
2687 bnad->msix_num = 0;
2688 spin_lock_irqsave(&bnad->bna_lock, flags);
2689 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2690 bnad_q_num_init(bnad);
2691 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2692 }
2693
2694 static void
bnad_disable_msix(struct bnad * bnad)2695 bnad_disable_msix(struct bnad *bnad)
2696 {
2697 u32 cfg_flags;
2698 unsigned long flags;
2699
2700 spin_lock_irqsave(&bnad->bna_lock, flags);
2701 cfg_flags = bnad->cfg_flags;
2702 if (bnad->cfg_flags & BNAD_CF_MSIX)
2703 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2704 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2705
2706 if (cfg_flags & BNAD_CF_MSIX) {
2707 pci_disable_msix(bnad->pcidev);
2708 kfree(bnad->msix_table);
2709 bnad->msix_table = NULL;
2710 }
2711 }
2712
2713 /* Netdev entry points */
2714 static int
bnad_open(struct net_device * netdev)2715 bnad_open(struct net_device *netdev)
2716 {
2717 int err;
2718 struct bnad *bnad = netdev_priv(netdev);
2719 struct bna_pause_config pause_config;
2720 unsigned long flags;
2721
2722 mutex_lock(&bnad->conf_mutex);
2723
2724 /* Tx */
2725 err = bnad_setup_tx(bnad, 0);
2726 if (err)
2727 goto err_return;
2728
2729 /* Rx */
2730 err = bnad_setup_rx(bnad, 0);
2731 if (err)
2732 goto cleanup_tx;
2733
2734 /* Port */
2735 pause_config.tx_pause = 0;
2736 pause_config.rx_pause = 0;
2737
2738 spin_lock_irqsave(&bnad->bna_lock, flags);
2739 bna_enet_mtu_set(&bnad->bna.enet,
2740 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2741 bna_enet_pause_config(&bnad->bna.enet, &pause_config);
2742 bna_enet_enable(&bnad->bna.enet);
2743 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2744
2745 /* Enable broadcast */
2746 bnad_enable_default_bcast(bnad);
2747
2748 /* Restore VLANs, if any */
2749 bnad_restore_vlans(bnad, 0);
2750
2751 /* Set the UCAST address */
2752 spin_lock_irqsave(&bnad->bna_lock, flags);
2753 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2754 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2755
2756 /* Start the stats timer */
2757 bnad_stats_timer_start(bnad);
2758
2759 mutex_unlock(&bnad->conf_mutex);
2760
2761 return 0;
2762
2763 cleanup_tx:
2764 bnad_destroy_tx(bnad, 0);
2765
2766 err_return:
2767 mutex_unlock(&bnad->conf_mutex);
2768 return err;
2769 }
2770
2771 static int
bnad_stop(struct net_device * netdev)2772 bnad_stop(struct net_device *netdev)
2773 {
2774 struct bnad *bnad = netdev_priv(netdev);
2775 unsigned long flags;
2776
2777 mutex_lock(&bnad->conf_mutex);
2778
2779 /* Stop the stats timer */
2780 bnad_stats_timer_stop(bnad);
2781
2782 init_completion(&bnad->bnad_completions.enet_comp);
2783
2784 spin_lock_irqsave(&bnad->bna_lock, flags);
2785 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2786 bnad_cb_enet_disabled);
2787 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2788
2789 wait_for_completion(&bnad->bnad_completions.enet_comp);
2790
2791 bnad_destroy_tx(bnad, 0);
2792 bnad_destroy_rx(bnad, 0);
2793
2794 /* Synchronize mailbox IRQ */
2795 bnad_mbox_irq_sync(bnad);
2796
2797 mutex_unlock(&bnad->conf_mutex);
2798
2799 return 0;
2800 }
2801
2802 /* TX */
2803 /* Returns 0 for success */
2804 static int
bnad_txq_wi_prepare(struct bnad * bnad,struct bna_tcb * tcb,struct sk_buff * skb,struct bna_txq_entry * txqent)2805 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2806 struct sk_buff *skb, struct bna_txq_entry *txqent)
2807 {
2808 u16 flags = 0;
2809 u32 gso_size;
2810 u16 vlan_tag = 0;
2811
2812 if (skb_vlan_tag_present(skb)) {
2813 vlan_tag = (u16)skb_vlan_tag_get(skb);
2814 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2815 }
2816 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2817 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2818 | (vlan_tag & 0x1fff);
2819 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2820 }
2821 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2822
2823 if (skb_is_gso(skb)) {
2824 gso_size = skb_shinfo(skb)->gso_size;
2825 if (unlikely(gso_size > bnad->netdev->mtu)) {
2826 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2827 return -EINVAL;
2828 }
2829 if (unlikely((gso_size + skb_tcp_all_headers(skb)) >= skb->len)) {
2830 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2831 txqent->hdr.wi.lso_mss = 0;
2832 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2833 } else {
2834 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
2835 txqent->hdr.wi.lso_mss = htons(gso_size);
2836 }
2837
2838 if (bnad_tso_prepare(bnad, skb)) {
2839 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2840 return -EINVAL;
2841 }
2842
2843 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2844 txqent->hdr.wi.l4_hdr_size_n_offset =
2845 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2846 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2847 } else {
2848 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2849 txqent->hdr.wi.lso_mss = 0;
2850
2851 if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
2852 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2853 return -EINVAL;
2854 }
2855
2856 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2857 __be16 net_proto = vlan_get_protocol(skb);
2858 u8 proto = 0;
2859
2860 if (net_proto == htons(ETH_P_IP))
2861 proto = ip_hdr(skb)->protocol;
2862 #ifdef NETIF_F_IPV6_CSUM
2863 else if (net_proto == htons(ETH_P_IPV6)) {
2864 /* nexthdr may not be TCP immediately. */
2865 proto = ipv6_hdr(skb)->nexthdr;
2866 }
2867 #endif
2868 if (proto == IPPROTO_TCP) {
2869 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2870 txqent->hdr.wi.l4_hdr_size_n_offset =
2871 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2872 (0, skb_transport_offset(skb)));
2873
2874 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2875
2876 if (unlikely(skb_headlen(skb) <
2877 skb_tcp_all_headers(skb))) {
2878 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2879 return -EINVAL;
2880 }
2881 } else if (proto == IPPROTO_UDP) {
2882 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2883 txqent->hdr.wi.l4_hdr_size_n_offset =
2884 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2885 (0, skb_transport_offset(skb)));
2886
2887 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2888 if (unlikely(skb_headlen(skb) <
2889 skb_transport_offset(skb) +
2890 sizeof(struct udphdr))) {
2891 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2892 return -EINVAL;
2893 }
2894 } else {
2895
2896 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2897 return -EINVAL;
2898 }
2899 } else
2900 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2901 }
2902
2903 txqent->hdr.wi.flags = htons(flags);
2904 txqent->hdr.wi.frame_length = htonl(skb->len);
2905
2906 return 0;
2907 }
2908
2909 /*
2910 * bnad_start_xmit : Netdev entry point for Transmit
2911 * Called under lock held by net_device
2912 */
2913 static netdev_tx_t
bnad_start_xmit(struct sk_buff * skb,struct net_device * netdev)2914 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2915 {
2916 struct bnad *bnad = netdev_priv(netdev);
2917 u32 txq_id = 0;
2918 struct bna_tcb *tcb = NULL;
2919 struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2920 u32 prod, q_depth, vect_id;
2921 u32 wis, vectors, len;
2922 int i;
2923 dma_addr_t dma_addr;
2924 struct bna_txq_entry *txqent;
2925
2926 len = skb_headlen(skb);
2927
2928 /* Sanity checks for the skb */
2929
2930 if (unlikely(skb->len <= ETH_HLEN)) {
2931 dev_kfree_skb_any(skb);
2932 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2933 return NETDEV_TX_OK;
2934 }
2935 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2936 dev_kfree_skb_any(skb);
2937 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2938 return NETDEV_TX_OK;
2939 }
2940 if (unlikely(len == 0)) {
2941 dev_kfree_skb_any(skb);
2942 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2943 return NETDEV_TX_OK;
2944 }
2945
2946 tcb = bnad->tx_info[0].tcb[txq_id];
2947
2948 /*
2949 * Takes care of the Tx that is scheduled between clearing the flag
2950 * and the netif_tx_stop_all_queues() call.
2951 */
2952 if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2953 dev_kfree_skb_any(skb);
2954 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2955 return NETDEV_TX_OK;
2956 }
2957
2958 q_depth = tcb->q_depth;
2959 prod = tcb->producer_index;
2960 unmap_q = tcb->unmap_q;
2961
2962 vectors = 1 + skb_shinfo(skb)->nr_frags;
2963 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2964
2965 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2966 dev_kfree_skb_any(skb);
2967 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2968 return NETDEV_TX_OK;
2969 }
2970
2971 /* Check for available TxQ resources */
2972 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2973 if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2974 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2975 u32 sent;
2976 sent = bnad_txcmpl_process(bnad, tcb);
2977 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2978 bna_ib_ack(tcb->i_dbell, sent);
2979 smp_mb__before_atomic();
2980 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2981 } else {
2982 netif_stop_queue(netdev);
2983 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2984 }
2985
2986 smp_mb();
2987 /*
2988 * Check again to deal with race condition between
2989 * netif_stop_queue here, and netif_wake_queue in
2990 * interrupt handler which is not inside netif tx lock.
2991 */
2992 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2993 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2994 return NETDEV_TX_BUSY;
2995 } else {
2996 netif_wake_queue(netdev);
2997 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2998 }
2999 }
3000
3001 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3002 head_unmap = &unmap_q[prod];
3003
3004 /* Program the opcode, flags, frame_len, num_vectors in WI */
3005 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3006 dev_kfree_skb_any(skb);
3007 return NETDEV_TX_OK;
3008 }
3009 txqent->hdr.wi.reserved = 0;
3010 txqent->hdr.wi.num_vectors = vectors;
3011
3012 head_unmap->skb = skb;
3013 head_unmap->nvecs = 0;
3014
3015 /* Program the vectors */
3016 unmap = head_unmap;
3017 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3018 len, DMA_TO_DEVICE);
3019 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3020 dev_kfree_skb_any(skb);
3021 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3022 return NETDEV_TX_OK;
3023 }
3024 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3025 txqent->vector[0].length = htons(len);
3026 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3027 head_unmap->nvecs++;
3028
3029 for (i = 0, vect_id = 0; i < vectors - 1; i++) {
3030 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3031 u32 size = skb_frag_size(frag);
3032
3033 if (unlikely(size == 0)) {
3034 /* Undo the changes starting at tcb->producer_index */
3035 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3036 tcb->producer_index);
3037 dev_kfree_skb_any(skb);
3038 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3039 return NETDEV_TX_OK;
3040 }
3041
3042 len += size;
3043
3044 vect_id++;
3045 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
3046 vect_id = 0;
3047 BNA_QE_INDX_INC(prod, q_depth);
3048 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3049 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
3050 unmap = &unmap_q[prod];
3051 }
3052
3053 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3054 0, size, DMA_TO_DEVICE);
3055 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3056 /* Undo the changes starting at tcb->producer_index */
3057 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3058 tcb->producer_index);
3059 dev_kfree_skb_any(skb);
3060 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3061 return NETDEV_TX_OK;
3062 }
3063
3064 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3065 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3066 txqent->vector[vect_id].length = htons(size);
3067 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3068 dma_addr);
3069 head_unmap->nvecs++;
3070 }
3071
3072 if (unlikely(len != skb->len)) {
3073 /* Undo the changes starting at tcb->producer_index */
3074 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3075 dev_kfree_skb_any(skb);
3076 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3077 return NETDEV_TX_OK;
3078 }
3079
3080 BNA_QE_INDX_INC(prod, q_depth);
3081 tcb->producer_index = prod;
3082
3083 wmb();
3084
3085 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3086 return NETDEV_TX_OK;
3087
3088 skb_tx_timestamp(skb);
3089
3090 bna_txq_prod_indx_doorbell(tcb);
3091
3092 return NETDEV_TX_OK;
3093 }
3094
3095 /*
3096 * Used spin_lock to synchronize reading of stats structures, which
3097 * is written by BNA under the same lock.
3098 */
3099 static void
bnad_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)3100 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3101 {
3102 struct bnad *bnad = netdev_priv(netdev);
3103 unsigned long flags;
3104
3105 spin_lock_irqsave(&bnad->bna_lock, flags);
3106
3107 bnad_netdev_qstats_fill(bnad, stats);
3108 bnad_netdev_hwstats_fill(bnad, stats);
3109
3110 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3111 }
3112
3113 static void
bnad_set_rx_ucast_fltr(struct bnad * bnad)3114 bnad_set_rx_ucast_fltr(struct bnad *bnad)
3115 {
3116 struct net_device *netdev = bnad->netdev;
3117 int uc_count = netdev_uc_count(netdev);
3118 enum bna_cb_status ret;
3119 u8 *mac_list;
3120 struct netdev_hw_addr *ha;
3121 int entry;
3122
3123 if (netdev_uc_empty(bnad->netdev)) {
3124 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3125 return;
3126 }
3127
3128 if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3129 goto mode_default;
3130
3131 mac_list = kcalloc(ETH_ALEN, uc_count, GFP_ATOMIC);
3132 if (mac_list == NULL)
3133 goto mode_default;
3134
3135 entry = 0;
3136 netdev_for_each_uc_addr(ha, netdev) {
3137 ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
3138 entry++;
3139 }
3140
3141 ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
3142 kfree(mac_list);
3143
3144 if (ret != BNA_CB_SUCCESS)
3145 goto mode_default;
3146
3147 return;
3148
3149 /* ucast packets not in UCAM are routed to default function */
3150 mode_default:
3151 bnad->cfg_flags |= BNAD_CF_DEFAULT;
3152 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3153 }
3154
3155 static void
bnad_set_rx_mcast_fltr(struct bnad * bnad)3156 bnad_set_rx_mcast_fltr(struct bnad *bnad)
3157 {
3158 struct net_device *netdev = bnad->netdev;
3159 int mc_count = netdev_mc_count(netdev);
3160 enum bna_cb_status ret;
3161 u8 *mac_list;
3162
3163 if (netdev->flags & IFF_ALLMULTI)
3164 goto mode_allmulti;
3165
3166 if (netdev_mc_empty(netdev))
3167 return;
3168
3169 if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3170 goto mode_allmulti;
3171
3172 mac_list = kcalloc(mc_count + 1, ETH_ALEN, GFP_ATOMIC);
3173
3174 if (mac_list == NULL)
3175 goto mode_allmulti;
3176
3177 ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
3178
3179 /* copy rest of the MCAST addresses */
3180 bnad_netdev_mc_list_get(netdev, mac_list);
3181 ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
3182 kfree(mac_list);
3183
3184 if (ret != BNA_CB_SUCCESS)
3185 goto mode_allmulti;
3186
3187 return;
3188
3189 mode_allmulti:
3190 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3191 bna_rx_mcast_delall(bnad->rx_info[0].rx);
3192 }
3193
3194 void
bnad_set_rx_mode(struct net_device * netdev)3195 bnad_set_rx_mode(struct net_device *netdev)
3196 {
3197 struct bnad *bnad = netdev_priv(netdev);
3198 enum bna_rxmode new_mode, mode_mask;
3199 unsigned long flags;
3200
3201 spin_lock_irqsave(&bnad->bna_lock, flags);
3202
3203 if (bnad->rx_info[0].rx == NULL) {
3204 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3205 return;
3206 }
3207
3208 /* clear bnad flags to update it with new settings */
3209 bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3210 BNAD_CF_ALLMULTI);
3211
3212 new_mode = 0;
3213 if (netdev->flags & IFF_PROMISC) {
3214 new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3215 bnad->cfg_flags |= BNAD_CF_PROMISC;
3216 } else {
3217 bnad_set_rx_mcast_fltr(bnad);
3218
3219 if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3220 new_mode |= BNA_RXMODE_ALLMULTI;
3221
3222 bnad_set_rx_ucast_fltr(bnad);
3223
3224 if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3225 new_mode |= BNA_RXMODE_DEFAULT;
3226 }
3227
3228 mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3229 BNA_RXMODE_ALLMULTI;
3230 bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
3231
3232 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3233 }
3234
3235 /*
3236 * bna_lock is used to sync writes to netdev->addr
3237 * conf_lock cannot be used since this call may be made
3238 * in a non-blocking context.
3239 */
3240 static int
bnad_set_mac_address(struct net_device * netdev,void * addr)3241 bnad_set_mac_address(struct net_device *netdev, void *addr)
3242 {
3243 int err;
3244 struct bnad *bnad = netdev_priv(netdev);
3245 struct sockaddr *sa = (struct sockaddr *)addr;
3246 unsigned long flags;
3247
3248 spin_lock_irqsave(&bnad->bna_lock, flags);
3249
3250 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3251 if (!err)
3252 eth_hw_addr_set(netdev, sa->sa_data);
3253
3254 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3255
3256 return err;
3257 }
3258
3259 static int
bnad_mtu_set(struct bnad * bnad,int frame_size)3260 bnad_mtu_set(struct bnad *bnad, int frame_size)
3261 {
3262 unsigned long flags;
3263
3264 init_completion(&bnad->bnad_completions.mtu_comp);
3265
3266 spin_lock_irqsave(&bnad->bna_lock, flags);
3267 bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3268 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3269
3270 wait_for_completion(&bnad->bnad_completions.mtu_comp);
3271
3272 return bnad->bnad_completions.mtu_comp_status;
3273 }
3274
3275 static int
bnad_change_mtu(struct net_device * netdev,int new_mtu)3276 bnad_change_mtu(struct net_device *netdev, int new_mtu)
3277 {
3278 int err, mtu;
3279 struct bnad *bnad = netdev_priv(netdev);
3280 u32 frame, new_frame;
3281
3282 mutex_lock(&bnad->conf_mutex);
3283
3284 mtu = netdev->mtu;
3285 WRITE_ONCE(netdev->mtu, new_mtu);
3286
3287 frame = BNAD_FRAME_SIZE(mtu);
3288 new_frame = BNAD_FRAME_SIZE(new_mtu);
3289
3290 /* check if multi-buffer needs to be enabled */
3291 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3292 netif_running(bnad->netdev)) {
3293 /* only when transition is over 4K */
3294 if ((frame <= 4096 && new_frame > 4096) ||
3295 (frame > 4096 && new_frame <= 4096))
3296 bnad_reinit_rx(bnad);
3297 }
3298
3299 err = bnad_mtu_set(bnad, new_frame);
3300 if (err)
3301 err = -EBUSY;
3302
3303 mutex_unlock(&bnad->conf_mutex);
3304 return err;
3305 }
3306
3307 static int
bnad_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)3308 bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3309 {
3310 struct bnad *bnad = netdev_priv(netdev);
3311 unsigned long flags;
3312
3313 if (!bnad->rx_info[0].rx)
3314 return 0;
3315
3316 mutex_lock(&bnad->conf_mutex);
3317
3318 spin_lock_irqsave(&bnad->bna_lock, flags);
3319 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3320 set_bit(vid, bnad->active_vlans);
3321 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3322
3323 mutex_unlock(&bnad->conf_mutex);
3324
3325 return 0;
3326 }
3327
3328 static int
bnad_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)3329 bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3330 {
3331 struct bnad *bnad = netdev_priv(netdev);
3332 unsigned long flags;
3333
3334 if (!bnad->rx_info[0].rx)
3335 return 0;
3336
3337 mutex_lock(&bnad->conf_mutex);
3338
3339 spin_lock_irqsave(&bnad->bna_lock, flags);
3340 clear_bit(vid, bnad->active_vlans);
3341 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3342 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3343
3344 mutex_unlock(&bnad->conf_mutex);
3345
3346 return 0;
3347 }
3348
bnad_set_features(struct net_device * dev,netdev_features_t features)3349 static int bnad_set_features(struct net_device *dev, netdev_features_t features)
3350 {
3351 struct bnad *bnad = netdev_priv(dev);
3352 netdev_features_t changed = features ^ dev->features;
3353
3354 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
3355 unsigned long flags;
3356
3357 spin_lock_irqsave(&bnad->bna_lock, flags);
3358
3359 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3360 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3361 else
3362 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3363
3364 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3365 }
3366
3367 return 0;
3368 }
3369
3370 #ifdef CONFIG_NET_POLL_CONTROLLER
3371 static void
bnad_netpoll(struct net_device * netdev)3372 bnad_netpoll(struct net_device *netdev)
3373 {
3374 struct bnad *bnad = netdev_priv(netdev);
3375 struct bnad_rx_info *rx_info;
3376 struct bnad_rx_ctrl *rx_ctrl;
3377 u32 curr_mask;
3378 int i, j;
3379
3380 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3381 bna_intx_disable(&bnad->bna, curr_mask);
3382 bnad_isr(bnad->pcidev->irq, netdev);
3383 bna_intx_enable(&bnad->bna, curr_mask);
3384 } else {
3385 /*
3386 * Tx processing may happen in sending context, so no need
3387 * to explicitly process completions here
3388 */
3389
3390 /* Rx processing */
3391 for (i = 0; i < bnad->num_rx; i++) {
3392 rx_info = &bnad->rx_info[i];
3393 if (!rx_info->rx)
3394 continue;
3395 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3396 rx_ctrl = &rx_info->rx_ctrl[j];
3397 if (rx_ctrl->ccb)
3398 bnad_netif_rx_schedule_poll(bnad,
3399 rx_ctrl->ccb);
3400 }
3401 }
3402 }
3403 }
3404 #endif
3405
3406 static const struct net_device_ops bnad_netdev_ops = {
3407 .ndo_open = bnad_open,
3408 .ndo_stop = bnad_stop,
3409 .ndo_start_xmit = bnad_start_xmit,
3410 .ndo_get_stats64 = bnad_get_stats64,
3411 .ndo_set_rx_mode = bnad_set_rx_mode,
3412 .ndo_validate_addr = eth_validate_addr,
3413 .ndo_set_mac_address = bnad_set_mac_address,
3414 .ndo_change_mtu = bnad_change_mtu,
3415 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3416 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3417 .ndo_set_features = bnad_set_features,
3418 #ifdef CONFIG_NET_POLL_CONTROLLER
3419 .ndo_poll_controller = bnad_netpoll
3420 #endif
3421 };
3422
3423 static void
bnad_netdev_init(struct bnad * bnad)3424 bnad_netdev_init(struct bnad *bnad)
3425 {
3426 struct net_device *netdev = bnad->netdev;
3427
3428 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3429 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3430 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
3431 NETIF_F_HW_VLAN_CTAG_RX;
3432
3433 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3434 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3435 NETIF_F_TSO | NETIF_F_TSO6;
3436
3437 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER |
3438 NETIF_F_HIGHDMA;
3439
3440 netdev->mem_start = bnad->mmio_start;
3441 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3442
3443 /* MTU range: 46 - 9000 */
3444 netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
3445 netdev->max_mtu = BNAD_JUMBO_MTU;
3446
3447 netdev->netdev_ops = &bnad_netdev_ops;
3448 bnad_set_ethtool_ops(netdev);
3449 }
3450
3451 /*
3452 * 1. Initialize the bnad structure
3453 * 2. Setup netdev pointer in pci_dev
3454 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3455 * 4. Initialize work queue.
3456 */
3457 static int
bnad_init(struct bnad * bnad,struct pci_dev * pdev,struct net_device * netdev)3458 bnad_init(struct bnad *bnad,
3459 struct pci_dev *pdev, struct net_device *netdev)
3460 {
3461 unsigned long flags;
3462
3463 SET_NETDEV_DEV(netdev, &pdev->dev);
3464 pci_set_drvdata(pdev, netdev);
3465
3466 bnad->netdev = netdev;
3467 bnad->pcidev = pdev;
3468 bnad->mmio_start = pci_resource_start(pdev, 0);
3469 bnad->mmio_len = pci_resource_len(pdev, 0);
3470 bnad->bar0 = ioremap(bnad->mmio_start, bnad->mmio_len);
3471 if (!bnad->bar0) {
3472 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3473 return -ENOMEM;
3474 }
3475 dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
3476 (unsigned long long) bnad->mmio_len);
3477
3478 spin_lock_irqsave(&bnad->bna_lock, flags);
3479 if (!bnad_msix_disable)
3480 bnad->cfg_flags = BNAD_CF_MSIX;
3481
3482 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3483
3484 bnad_q_num_init(bnad);
3485 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3486
3487 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3488 (bnad->num_rx * bnad->num_rxp_per_rx) +
3489 BNAD_MAILBOX_MSIX_VECTORS;
3490
3491 bnad->txq_depth = BNAD_TXQ_DEPTH;
3492 bnad->rxq_depth = BNAD_RXQ_DEPTH;
3493
3494 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3495 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3496
3497 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3498 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3499 if (!bnad->work_q) {
3500 iounmap(bnad->bar0);
3501 return -ENOMEM;
3502 }
3503
3504 return 0;
3505 }
3506
3507 /*
3508 * Must be called after bnad_pci_uninit()
3509 * so that iounmap() and pci_set_drvdata(NULL)
3510 * happens only after PCI uninitialization.
3511 */
3512 static void
bnad_uninit(struct bnad * bnad)3513 bnad_uninit(struct bnad *bnad)
3514 {
3515 if (bnad->work_q) {
3516 destroy_workqueue(bnad->work_q);
3517 bnad->work_q = NULL;
3518 }
3519
3520 if (bnad->bar0)
3521 iounmap(bnad->bar0);
3522 }
3523
3524 /*
3525 * Initialize locks
3526 a) Per ioceth mutes used for serializing configuration
3527 changes from OS interface
3528 b) spin lock used to protect bna state machine
3529 */
3530 static void
bnad_lock_init(struct bnad * bnad)3531 bnad_lock_init(struct bnad *bnad)
3532 {
3533 spin_lock_init(&bnad->bna_lock);
3534 mutex_init(&bnad->conf_mutex);
3535 }
3536
3537 static void
bnad_lock_uninit(struct bnad * bnad)3538 bnad_lock_uninit(struct bnad *bnad)
3539 {
3540 mutex_destroy(&bnad->conf_mutex);
3541 }
3542
3543 /* PCI Initialization */
3544 static int
bnad_pci_init(struct bnad * bnad,struct pci_dev * pdev)3545 bnad_pci_init(struct bnad *bnad, struct pci_dev *pdev)
3546 {
3547 int err;
3548
3549 err = pci_enable_device(pdev);
3550 if (err)
3551 return err;
3552 err = pci_request_regions(pdev, BNAD_NAME);
3553 if (err)
3554 goto disable_device;
3555 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3556 if (err)
3557 goto release_regions;
3558 pci_set_master(pdev);
3559 return 0;
3560
3561 release_regions:
3562 pci_release_regions(pdev);
3563 disable_device:
3564 pci_disable_device(pdev);
3565
3566 return err;
3567 }
3568
3569 static void
bnad_pci_uninit(struct pci_dev * pdev)3570 bnad_pci_uninit(struct pci_dev *pdev)
3571 {
3572 pci_release_regions(pdev);
3573 pci_disable_device(pdev);
3574 }
3575
3576 static int
bnad_pci_probe(struct pci_dev * pdev,const struct pci_device_id * pcidev_id)3577 bnad_pci_probe(struct pci_dev *pdev,
3578 const struct pci_device_id *pcidev_id)
3579 {
3580 int err;
3581 struct bnad *bnad;
3582 struct bna *bna;
3583 struct net_device *netdev;
3584 struct bfa_pcidev pcidev_info;
3585 unsigned long flags;
3586
3587 mutex_lock(&bnad_fwimg_mutex);
3588 if (!cna_get_firmware_buf(pdev)) {
3589 mutex_unlock(&bnad_fwimg_mutex);
3590 dev_err(&pdev->dev, "failed to load firmware image!\n");
3591 return -ENODEV;
3592 }
3593 mutex_unlock(&bnad_fwimg_mutex);
3594
3595 /*
3596 * Allocates sizeof(struct net_device + struct bnad)
3597 * bnad = netdev->priv
3598 */
3599 netdev = alloc_etherdev(sizeof(struct bnad));
3600 if (!netdev) {
3601 err = -ENOMEM;
3602 return err;
3603 }
3604 bnad = netdev_priv(netdev);
3605 bnad_lock_init(bnad);
3606 bnad->id = atomic_inc_return(&bna_id) - 1;
3607
3608 mutex_lock(&bnad->conf_mutex);
3609 /* PCI initialization */
3610 err = bnad_pci_init(bnad, pdev);
3611 if (err)
3612 goto unlock_mutex;
3613
3614 /*
3615 * Initialize bnad structure
3616 * Setup relation between pci_dev & netdev
3617 */
3618 err = bnad_init(bnad, pdev, netdev);
3619 if (err)
3620 goto pci_uninit;
3621
3622 /* Initialize netdev structure, set up ethtool ops */
3623 bnad_netdev_init(bnad);
3624
3625 /* Set link to down state */
3626 netif_carrier_off(netdev);
3627
3628 /* Setup the debugfs node for this bfad */
3629 if (bna_debugfs_enable)
3630 bnad_debugfs_init(bnad);
3631
3632 /* Get resource requirement form bna */
3633 spin_lock_irqsave(&bnad->bna_lock, flags);
3634 bna_res_req(&bnad->res_info[0]);
3635 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3636
3637 /* Allocate resources from bna */
3638 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3639 if (err)
3640 goto drv_uninit;
3641
3642 bna = &bnad->bna;
3643
3644 /* Setup pcidev_info for bna_init() */
3645 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3646 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3647 pcidev_info.device_id = bnad->pcidev->device;
3648 pcidev_info.pci_bar_kva = bnad->bar0;
3649
3650 spin_lock_irqsave(&bnad->bna_lock, flags);
3651 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3652 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3653
3654 bnad->stats.bna_stats = &bna->stats;
3655
3656 bnad_enable_msix(bnad);
3657 err = bnad_mbox_irq_alloc(bnad);
3658 if (err)
3659 goto res_free;
3660
3661 /* Set up timers */
3662 timer_setup(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, 0);
3663 timer_setup(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, 0);
3664 timer_setup(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, 0);
3665 timer_setup(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3666 0);
3667
3668 /*
3669 * Start the chip
3670 * If the call back comes with error, we bail out.
3671 * This is a catastrophic error.
3672 */
3673 err = bnad_ioceth_enable(bnad);
3674 if (err) {
3675 dev_err(&pdev->dev, "initialization failed err=%d\n", err);
3676 goto probe_success;
3677 }
3678
3679 spin_lock_irqsave(&bnad->bna_lock, flags);
3680 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3681 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3682 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3683 bna_attr(bna)->num_rxp - 1);
3684 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3685 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3686 err = -EIO;
3687 }
3688 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3689 if (err)
3690 goto disable_ioceth;
3691
3692 spin_lock_irqsave(&bnad->bna_lock, flags);
3693 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3694 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3695
3696 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3697 if (err) {
3698 err = -EIO;
3699 goto disable_ioceth;
3700 }
3701
3702 spin_lock_irqsave(&bnad->bna_lock, flags);
3703 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3704 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3705
3706 /* Get the burnt-in mac */
3707 spin_lock_irqsave(&bnad->bna_lock, flags);
3708 bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
3709 bnad_set_netdev_perm_addr(bnad);
3710 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3711
3712 mutex_unlock(&bnad->conf_mutex);
3713
3714 /* Finally, reguister with net_device layer */
3715 err = register_netdev(netdev);
3716 if (err) {
3717 dev_err(&pdev->dev, "registering net device failed\n");
3718 goto probe_uninit;
3719 }
3720 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3721
3722 return 0;
3723
3724 probe_success:
3725 mutex_unlock(&bnad->conf_mutex);
3726 return 0;
3727
3728 probe_uninit:
3729 mutex_lock(&bnad->conf_mutex);
3730 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3731 disable_ioceth:
3732 bnad_ioceth_disable(bnad);
3733 timer_delete_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3734 timer_delete_sync(&bnad->bna.ioceth.ioc.sem_timer);
3735 timer_delete_sync(&bnad->bna.ioceth.ioc.hb_timer);
3736 spin_lock_irqsave(&bnad->bna_lock, flags);
3737 bna_uninit(bna);
3738 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3739 bnad_mbox_irq_free(bnad);
3740 bnad_disable_msix(bnad);
3741 res_free:
3742 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3743 drv_uninit:
3744 /* Remove the debugfs node for this bnad */
3745 kfree(bnad->regdata);
3746 bnad_debugfs_uninit(bnad);
3747 bnad_uninit(bnad);
3748 pci_uninit:
3749 bnad_pci_uninit(pdev);
3750 unlock_mutex:
3751 mutex_unlock(&bnad->conf_mutex);
3752 bnad_lock_uninit(bnad);
3753 free_netdev(netdev);
3754 return err;
3755 }
3756
3757 static void
bnad_pci_remove(struct pci_dev * pdev)3758 bnad_pci_remove(struct pci_dev *pdev)
3759 {
3760 struct net_device *netdev = pci_get_drvdata(pdev);
3761 struct bnad *bnad;
3762 struct bna *bna;
3763 unsigned long flags;
3764
3765 if (!netdev)
3766 return;
3767
3768 bnad = netdev_priv(netdev);
3769 bna = &bnad->bna;
3770
3771 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3772 unregister_netdev(netdev);
3773
3774 mutex_lock(&bnad->conf_mutex);
3775 bnad_ioceth_disable(bnad);
3776 timer_delete_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3777 timer_delete_sync(&bnad->bna.ioceth.ioc.sem_timer);
3778 timer_delete_sync(&bnad->bna.ioceth.ioc.hb_timer);
3779 spin_lock_irqsave(&bnad->bna_lock, flags);
3780 bna_uninit(bna);
3781 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3782
3783 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3784 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3785 bnad_mbox_irq_free(bnad);
3786 bnad_disable_msix(bnad);
3787 bnad_pci_uninit(pdev);
3788 mutex_unlock(&bnad->conf_mutex);
3789 bnad_lock_uninit(bnad);
3790 /* Remove the debugfs node for this bnad */
3791 kfree(bnad->regdata);
3792 bnad_debugfs_uninit(bnad);
3793 bnad_uninit(bnad);
3794 free_netdev(netdev);
3795 }
3796
3797 static const struct pci_device_id bnad_pci_id_table[] = {
3798 {
3799 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3800 PCI_DEVICE_ID_BROCADE_CT),
3801 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3802 .class_mask = 0xffff00
3803 },
3804 {
3805 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3806 BFA_PCI_DEVICE_ID_CT2),
3807 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3808 .class_mask = 0xffff00
3809 },
3810 {0, },
3811 };
3812
3813 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3814
3815 static struct pci_driver bnad_pci_driver = {
3816 .name = BNAD_NAME,
3817 .id_table = bnad_pci_id_table,
3818 .probe = bnad_pci_probe,
3819 .remove = bnad_pci_remove,
3820 };
3821
3822 static int __init
bnad_module_init(void)3823 bnad_module_init(void)
3824 {
3825 int err;
3826
3827 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3828
3829 err = pci_register_driver(&bnad_pci_driver);
3830 if (err < 0) {
3831 pr_err("bna: PCI driver registration failed err=%d\n", err);
3832 return err;
3833 }
3834
3835 return 0;
3836 }
3837
3838 static void __exit
bnad_module_exit(void)3839 bnad_module_exit(void)
3840 {
3841 pci_unregister_driver(&bnad_pci_driver);
3842 release_firmware(bfi_fw);
3843 }
3844
3845 module_init(bnad_module_init);
3846 module_exit(bnad_module_exit);
3847
3848 MODULE_AUTHOR("Brocade");
3849 MODULE_LICENSE("GPL");
3850 MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
3851 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3852 MODULE_FIRMWARE(CNA_FW_FILE_CT2);
3853