xref: /titanic_51/usr/src/uts/common/io/bnxe/bnxe_rx.c (revision b819cea2f73f98c5662230cc9affc8cc84f77fcf)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2014 QLogic Corporation
24  * The contents of this file are subject to the terms of the
25  * QLogic End User License (the "License").
26  * You may not use this file except in compliance with the License.
27  *
28  * You can obtain a copy of the License at
29  * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
30  * QLogic_End_User_Software_License.txt
31  * See the License for the specific language governing permissions
32  * and limitations under the License.
33  */
34 
35 /*
36  * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
37  */
38 
39 #include "bnxe.h"
40 
41 
42 ddi_dma_attr_t bnxeRxDmaAttrib =
43 {
44     DMA_ATTR_V0,         /* dma_attr_version */
45     0,                   /* dma_attr_addr_lo */
46     0xffffffffffffffff,  /* dma_attr_addr_hi */
47     0xffffffffffffffff,  /* dma_attr_count_max */
48     BNXE_DMA_ALIGNMENT,  /* dma_attr_align */
49     0xffffffff,          /* dma_attr_burstsizes */
50     1,                   /* dma_attr_minxfer */
51     0xffffffffffffffff,  /* dma_attr_maxxfer */
52     0xffffffffffffffff,  /* dma_attr_seg */
53     1,                   /* dma_attr_sgllen */
54     1,                   /* dma_attr_granular */
55     0,                   /* dma_attr_flags */
56 };
57 
58 
59 static void BnxeRxPostBuffers(um_device_t * pUM,
60                               int           idx,
61                               s_list_t *    pReclaimList)
62 {
63     lm_rx_chain_t * pLmRxChain = &LM_RXQ(&pUM->lm_dev, idx);
64     u32_t           returnedBytes = 0;
65     lm_packet_t *   pLmPkt;
66 
67     /* return bytes from reclaimed list to LM */
68     pLmPkt = (lm_packet_t *)s_list_peek_head(pReclaimList);
69     while (pLmPkt)
70     {
71         returnedBytes += pLmPkt->size;
72         pLmPkt = (lm_packet_t *)s_list_next_entry(&pLmPkt->link);
73     }
74 
75     BNXE_LOCK_ENTER_RX(pUM, idx);
76 
77     if (pUM->rxq[idx].rxLowWater > s_list_entry_cnt(&pLmRxChain->active_descq))
78     {
79         pUM->rxq[idx].rxLowWater = s_list_entry_cnt(&pLmRxChain->active_descq);
80     }
81 
82     lm_return_packet_bytes(&pUM->lm_dev, idx, returnedBytes);
83 
84     s_list_add_tail(&pLmRxChain->common.free_descq, pReclaimList);
85     s_list_clear(pReclaimList);
86 
87 #if 0
88     /*
89      * Don't post buffers if we don't have too many free buffers and there are a
90      * lot of buffers already posted.
91      */
92     if (lm_bd_chain_avail_bds(&pLmRxChain->bd_chain) < 32)
93     {
94         BNXE_LOCK_EXIT_RX(pUM, idx);
95         return;
96     }
97 
98     /*
99      * Don't post buffers if there aren't really that many to post yet.
100      */
101     if (s_list_entry_cnt(&pLmRxChain->common.free_descq) < 32)
102     {
103         BNXE_LOCK_EXIT_RX(pUM, idx);
104         return;
105     }
106 #endif
107 
108     lm_post_buffers(&pUM->lm_dev, idx, NULL, 0);
109 
110     BNXE_LOCK_EXIT_RX(pUM, idx);
111 }
112 
113 
114 static u32_t BnxeRxPktDescrSize(um_device_t * pUM)
115 {
116     u32_t descSize;
117 
118     (void)pUM;
119 
120     descSize = sizeof(um_rxpacket_t) + SIZEOF_SIG;
121 
122     return ALIGN_VALUE_TO_WORD_BOUNDARY(descSize);
123 }
124 
125 
126 static void BnxeRxPktDescrFree(um_device_t *   pUM,
127                                um_rxpacket_t * pRxPkt)
128 {
129     u32_t descSize;
130     caddr_t pMem;
131 
132     BnxeDbgBreakIfFastPath(pUM, SIG(pRxPkt) != L2PACKET_RX_SIG);
133 
134     descSize = BnxeRxPktDescrSize(pUM);
135     pMem = (caddr_t)pRxPkt - SIZEOF_SIG;
136 
137     kmem_free(pMem, descSize);
138 }
139 
140 
141 static void BnxeRxPktFree(char * free_arg)
142 {
143     um_rxpacket_t * pRxPkt = (um_rxpacket_t *)free_arg;
144     um_device_t *   pUM    = (um_device_t *)pRxPkt->pUM;
145     int             idx    = pRxPkt->idx;
146     s_list_t        doneRxQ;
147 
148     if (pUM->magic != BNXE_MAGIC)
149     {
150         /*
151          * Oh my!  The free_arg data got corrupted.  Log a message and leak this
152          * packet.  We don't decrement the 'up in the stack count' since we
153          * can't be sure this packet really was a packet we previously sent up.
154          */
155         BnxeLogWarn(NULL, "ERROR freeing packet - UM is invalid! (%p)", pRxPkt);
156         return;
157     }
158 
159     if (pUM->rxBufSignature[LM_CHAIN_IDX_CLI(&pUM->lm_dev, idx)] !=
160         pRxPkt->signature)
161     {
162         /*
163          * The stack is freeing a packet that was from a previous plumb of
164          * the interface.
165          */
166         pRxPkt->lm_pkt.u1.rx.mem_phys[0].as_u64 = 0;
167         pRxPkt->rx_info.mem_virt = NULL;
168         pRxPkt->rx_info.mem_size = 0;
169 
170         ddi_dma_unbind_handle(pRxPkt->dmaHandle);
171         ddi_dma_mem_free(&pRxPkt->dmaAccHandle);
172         ddi_dma_free_handle(&pRxPkt->dmaHandle);
173 
174         BnxeRxPktDescrFree(pUM, pRxPkt);
175     }
176     else
177     {
178         s_list_clear(&doneRxQ);
179 
180         BNXE_LOCK_ENTER_DONERX(pUM, idx);
181 
182         s_list_push_tail(&pUM->rxq[idx].doneRxQ,
183                          &((lm_packet_t *)pRxPkt)->link);
184 
185         /* post packets when a bunch are ready */
186         if (s_list_entry_cnt(&pUM->rxq[idx].doneRxQ) >= pUM->devParams.maxRxFree)
187         {
188             doneRxQ = pUM->rxq[idx].doneRxQ;
189             s_list_clear(&pUM->rxq[idx].doneRxQ);
190         }
191 
192         BNXE_LOCK_EXIT_DONERX(pUM, idx);
193 
194         if (s_list_entry_cnt(&doneRxQ))
195         {
196             BnxeRxPostBuffers(pUM, idx, &doneRxQ);
197         }
198     }
199 
200     atomic_dec_32(&pUM->rxq[idx].rxBufUpInStack);
201 }
202 
203 
204 boolean_t BnxeWaitForPacketsFromClient(um_device_t * pUM,
205                                        int           cliIdx)
206 {
207     int i, idx, cnt=0, tot=0;
208 
209     switch (cliIdx)
210     {
211     case LM_CLI_IDX_FCOE:
212 
213         for (i = 0; i < 5; i++)
214         {
215             if ((cnt = pUM->rxq[FCOE_CID(&pUM->lm_dev)].rxBufUpInStack) == 0)
216             {
217                 break;
218             }
219 
220             /* twiddle our thumbs for one second */
221             delay(drv_usectohz(1000000));
222         }
223 
224         if (cnt)
225         {
226             BnxeLogWarn(pUM, "%d packets still held by FCoE (chain %d)!",
227                         cnt, FCOE_CID(&pUM->lm_dev));
228             return B_FALSE;
229         }
230 
231         break;
232 
233     case LM_CLI_IDX_NDIS:
234 
235         tot = 0;
236 
237         LM_FOREACH_RSS_IDX(&pUM->lm_dev, idx)
238         {
239             for (i = 0; i < 5; i++)
240             {
241                 if ((cnt = pUM->rxq[idx].rxBufUpInStack) == 0)
242                 {
243                     break;
244                 }
245 
246                 /* twiddle our thumbs for one second */
247                 delay(drv_usectohz(1000000));
248             }
249 
250             tot += cnt;
251         }
252 
253         if (tot)
254         {
255             BnxeLogWarn(pUM, "%d packets still held by the stack (chain %d)!",
256                         tot, idx);
257             return B_FALSE;
258         }
259 
260         break;
261 
262     default:
263 
264         BnxeLogWarn(pUM, "ERROR: Invalid cliIdx for BnxeWaitForPacketsFromClient (%d)", cliIdx);
265         break;
266     }
267 
268     return B_TRUE;
269 }
270 
271 
272 /* numBytes is only valid when polling is TRUE */
273 mblk_t * BnxeRxRingProcess(um_device_t * pUM,
274                            int           idx,
275                            boolean_t     polling,
276                            int           numBytes)
277 {
278     RxQueue *       pRxQ;
279     lm_rx_chain_t * pLmRxChain;
280     u32_t           activeDescqCount;
281     boolean_t       forceCopy;
282     um_rxpacket_t * pRxPkt;
283     lm_packet_t *   pLmPkt;
284     u32_t           pktLen;
285     boolean_t       dataCopied;
286     u32_t           notCopiedCount;
287     mblk_t *        pMblk;
288     int             ofldFlags;
289     mblk_t *        head = NULL;
290     mblk_t *        tail = NULL;
291     s_list_t        rxList;
292     s_list_t        reclaimList;
293     int             procBytes = 0;
294     s_list_t        tmpList;
295     sp_cqes_info    sp_cqes;
296     u32_t           pktsRxed;
297 
298     pRxQ = &pUM->rxq[idx];
299 
300     s_list_clear(&tmpList);
301 
302     /* get the list of packets received */
303     BNXE_LOCK_ENTER_RX(pUM, idx);
304 
305     pktsRxed = lm_get_packets_rcvd(&pUM->lm_dev, idx, &tmpList, &sp_cqes);
306 
307     /* grab any waiting packets */
308     rxList = pRxQ->waitRxQ;
309     s_list_clear(&pRxQ->waitRxQ);
310 
311     /* put any new packets at the end of the queue */
312     s_list_add_tail(&rxList, &tmpList);
313 
314     BNXE_LOCK_EXIT_RX(pUM, idx);
315 
316     /* now complete the ramrods */
317     lm_complete_ramrods(&pUM->lm_dev, &sp_cqes);
318 
319     if (s_list_entry_cnt(&rxList) == 0)
320     {
321         return NULL;
322     }
323 
324     s_list_clear(&reclaimList);
325     notCopiedCount = 0;
326 
327     pLmRxChain = &LM_RXQ(&pUM->lm_dev, idx);
328 
329     activeDescqCount = s_list_entry_cnt(&pLmRxChain->active_descq);
330 
331     forceCopy = (activeDescqCount <
332                  (pUM->lm_dev.params.l2_rx_desc_cnt[LM_CHAIN_IDX_CLI(&pUM->lm_dev, idx)] >> 3));
333 
334     /* send the packets up the stack */
335     while (1)
336     {
337         pRxPkt = (um_rxpacket_t *)s_list_pop_head(&rxList);
338         if (pRxPkt == NULL)
339         {
340             break;
341         }
342 
343         pLmPkt = &(pRxPkt->lm_pkt);
344 
345         if (pLmPkt->status != LM_STATUS_SUCCESS)
346         {
347             /* XXX increment error stat? */
348             s_list_push_tail(&reclaimList, &pLmPkt->link);
349             continue;
350         }
351 
352         pktLen = pLmPkt->size;
353 
354         if (polling == TRUE)
355         {
356             /* When polling an rx ring we can only process up to numBytes */
357             if ((procBytes + pktLen) <= numBytes)
358             {
359                 /* continue to process this packet */
360                 procBytes += pktLen;
361             }
362             else
363             {
364                 /* put this packet not processed back on the list (front) */
365                 s_list_push_head(&rxList, &pRxPkt->lm_pkt.link);
366                 break;
367             }
368         }
369 
370         (void)ddi_dma_sync(pRxPkt->dmaHandle,
371                            0,
372                            pktLen,
373                            DDI_DMA_SYNC_FORKERNEL);
374 
375         if (pUM->fmCapabilities &&
376             BnxeCheckDmaHandle(pRxPkt->dmaHandle) != DDI_FM_OK)
377         {
378             ddi_fm_service_impact(pUM->pDev, DDI_SERVICE_DEGRADED);
379         }
380 
381         dataCopied = B_FALSE;
382 
383         if (forceCopy ||
384             (pUM->devParams.rxCopyThreshold &&
385              (pktLen < pUM->devParams.rxCopyThreshold)))
386         {
387             if ((pMblk = allocb(pktLen, BPRI_MED)) == NULL)
388             {
389                 pRxQ->rxDiscards++;
390                 s_list_push_tail(&reclaimList, &pLmPkt->link);
391                 continue;
392             }
393 
394             /* copy the packet into the new mblk */
395             bcopy((pRxPkt->rx_info.mem_virt + BNXE_DMA_RX_OFFSET),
396                   pMblk->b_rptr, pktLen);
397             pMblk->b_wptr = (pMblk->b_rptr + pktLen);
398             dataCopied = B_TRUE;
399 
400             pRxQ->rxCopied++;
401 
402             goto BnxeRxRingProcess_sendup;
403         }
404 
405         if ((activeDescqCount == 0) && (s_list_entry_cnt(&rxList) == 0))
406         {
407             /*
408              * If the hardware is out of receive buffers and we are on the last
409              * receive packet then drop the packet.  We do this because we might
410              * not be able to allocate any new receive buffers before the ISR
411              * completes.  If this happens, the driver will enter an infinite
412              * interrupt loop where the hardware is requesting rx buffers the
413              * driver cannot allocate.  To prevent a system livelock we leave
414              * one buffer perpetually available.  Note that we do this after
415              * giving the double copy code a chance to claim the packet.
416              */
417 
418             /* FIXME
419              * Make sure to add one more to the rx packet descriptor count
420              * before allocating them.
421              */
422 
423             pRxQ->rxDiscards++;
424             s_list_push_tail(&reclaimList, &pLmPkt->link);
425             continue;
426         }
427 
428         /*
429          * If we got here then the packet wasn't copied so we need to create a
430          * new mblk_t which references the lm_packet_t buffer.
431          */
432 
433         pRxPkt->freeRtn.free_func = BnxeRxPktFree;
434         pRxPkt->freeRtn.free_arg  = (char *)pRxPkt;
435         pRxPkt->pUM               = (void *)pUM;
436         pRxPkt->idx               = idx;
437 
438         if ((pMblk = desballoc((pRxPkt->rx_info.mem_virt + BNXE_DMA_RX_OFFSET),
439                                pktLen,
440                                BPRI_MED,
441                                &pRxPkt->freeRtn)) == NULL)
442         {
443             pRxQ->rxDiscards++;
444             s_list_push_tail(&reclaimList, &pLmPkt->link);
445             continue;
446         }
447 
448         pMblk->b_wptr = (pMblk->b_rptr + pktLen);
449 
450 BnxeRxRingProcess_sendup:
451 
452         /*
453          * Check if the checksum was offloaded so we can pass the result to
454          * the stack.
455          */
456         ofldFlags = 0;
457 
458         if ((pUM->devParams.enabled_oflds & LM_OFFLOAD_RX_IP_CKSUM) &&
459             (pRxPkt->rx_info.flags & LM_RX_FLAG_IP_CKSUM_IS_GOOD))
460         {
461             ofldFlags |= HCK_IPV4_HDRCKSUM_OK;
462         }
463 
464         if (((pUM->devParams.enabled_oflds & LM_OFFLOAD_RX_TCP_CKSUM) &&
465              (pRxPkt->rx_info.flags & LM_RX_FLAG_TCP_CKSUM_IS_GOOD)) ||
466             ((pUM->devParams.enabled_oflds & LM_OFFLOAD_RX_UDP_CKSUM) &&
467              (pRxPkt->rx_info.flags & LM_RX_FLAG_UDP_CKSUM_IS_GOOD)))
468         {
469             ofldFlags |= HCK_FULLCKSUM_OK;
470         }
471 
472         if (ofldFlags != 0)
473         {
474             mac_hcksum_set(pMblk, 0, 0, 0, 0, ofldFlags);
475         }
476 
477         /*
478          * If the packet data was copied into a new recieve buffer then put this
479          * descriptor in a list to be reclaimed later.  If not, then increment a
480          * counter so we can track how many of our descriptors are held by the
481          * stack.
482          */
483         if (dataCopied == B_TRUE)
484         {
485             s_list_push_tail(&reclaimList, &pLmPkt->link);
486         }
487         else
488         {
489             notCopiedCount++;
490         }
491 
492         if (head == NULL)
493         {
494             head = pMblk;
495         }
496         else
497         {
498             tail->b_next = pMblk;
499         }
500 
501         tail         = pMblk;
502         tail->b_next = NULL;
503 
504 #if 0
505         BnxeDumpPkt(pUM,
506                     (BNXE_FCOE(pUM) && (idx == FCOE_CID(&pUM->lm_dev))) ?
507                         "<- FCoE L2 RX <-" : "<- L2 RX <-",
508                     pMblk, B_TRUE);
509 #endif
510     }
511 
512     if (head)
513     {
514         if (notCopiedCount)
515         {
516             /* track all non-copied packets that will be held by the stack */
517             atomic_add_32(&pUM->rxq[idx].rxBufUpInStack, notCopiedCount);
518         }
519 
520         /* pass the mblk chain up the stack */
521         if (polling == FALSE)
522         {
523 
524 /* XXX NEED TO ADD STATS FOR RX PATH UPCALLS */
525 
526             if (BNXE_FCOE(pUM) && (idx == FCOE_CID(&pUM->lm_dev)))
527             {
528                 /* XXX verify fcoe frees all packets on success or error */
529                 if (pUM->fcoe.pDev && pUM->fcoe.bind.cliIndicateRx)
530                 {
531                     pUM->fcoe.bind.cliIndicateRx(pUM->fcoe.pDev, head);
532                 }
533                 else
534                 {
535                     /* FCoE isn't bound?  Reclaim the chain... */
536                     freemsgchain(head);
537                     head = NULL;
538                 }
539             }
540             else
541             {
542 #if defined(BNXE_RINGS) && (defined(__S11) || defined(__S12))
543                 mac_rx_ring(pUM->pMac,
544                             pUM->rxq[idx].ringHandle,
545                             head,
546                             pUM->rxq[idx].genNumber);
547 #else
548                 mac_rx(pUM->pMac,
549                        pUM->macRxResourceHandles[idx],
550                        head);
551 #endif
552             }
553         }
554     }
555 
556     if ((polling == TRUE) && s_list_entry_cnt(&rxList))
557     {
558         /* put the packets not processed back on the list (front) */
559         BNXE_LOCK_ENTER_RX(pUM, idx);
560         s_list_add_head(&pRxQ->waitRxQ, &rxList);
561         BNXE_LOCK_EXIT_RX(pUM, idx);
562     }
563 
564     if (s_list_entry_cnt(&reclaimList))
565     {
566         BnxeRxPostBuffers(pUM, idx, &reclaimList);
567     }
568 
569     return (polling == TRUE) ? head : NULL;
570 }
571 
572 
573 /*
574  * Dumping packets simply moves all packets from the waiting queue to the free
575  * queue.  Note that the packets are not posted back to the LM.
576  */
577 static void BnxeRxRingDump(um_device_t * pUM,
578                            int           idx)
579 {
580     s_list_t tmpList;
581 
582     BNXE_LOCK_ENTER_RX(pUM, idx);
583 
584     tmpList = pUM->rxq[idx].waitRxQ;
585     s_list_clear(&pUM->rxq[idx].waitRxQ);
586 
587     s_list_add_tail(&LM_RXQ(&pUM->lm_dev, idx).common.free_descq, &tmpList);
588 
589     BNXE_LOCK_EXIT_RX(pUM, idx);
590 }
591 
592 
593 /*
594  * Aborting packets stops all rx processing by dumping the currently waiting
595  * packets and aborting all the rx descriptors currently posted in the LM.
596  */
597 static void BnxeRxPktsAbortIdx(um_device_t * pUM,
598                                int           idx)
599 {
600     BnxeRxRingDump(pUM, idx);
601 
602     BNXE_LOCK_ENTER_RX(pUM, idx);
603     lm_abort(&pUM->lm_dev, ABORT_OP_RX_CHAIN, idx);
604     BNXE_LOCK_EXIT_RX(pUM, idx);
605 }
606 
607 
608 void BnxeRxPktsAbort(um_device_t * pUM,
609                      int           cliIdx)
610 {
611     int idx;
612 
613     switch (cliIdx)
614     {
615     case LM_CLI_IDX_FCOE:
616 
617         BnxeRxPktsAbortIdx(pUM, FCOE_CID(&pUM->lm_dev));
618         break;
619 
620     case LM_CLI_IDX_NDIS:
621 
622         LM_FOREACH_RSS_IDX(&pUM->lm_dev, idx)
623         {
624             BnxeRxPktsAbortIdx(pUM, idx);
625         }
626 
627         break;
628 
629     default:
630 
631         BnxeLogWarn(pUM, "ERROR: Invalid cliIdx for BnxeRxPktsAbort (%d)", cliIdx);
632         break;
633     }
634 }
635 
636 
637 static int BnxeRxBufAlloc(um_device_t *   pUM,
638                           int             idx,
639                           um_rxpacket_t * pRxPkt)
640 {
641     ddi_dma_cookie_t cookie;
642     u32_t            count;
643     size_t           length;
644     int rc;
645 
646     if ((rc = ddi_dma_alloc_handle(pUM->pDev,
647                                    &bnxeRxDmaAttrib,
648                                    DDI_DMA_DONTWAIT,
649                                    NULL,
650                                    &pRxPkt->dmaHandle)) != DDI_SUCCESS)
651     {
652         BnxeLogWarn(pUM, "Failed to alloc DMA handle for rx buffer");
653         return -1;
654     }
655 
656     pRxPkt->rx_info.mem_size = MAX_L2_CLI_BUFFER_SIZE(&pUM->lm_dev, idx);
657 
658     if ((rc = ddi_dma_mem_alloc(pRxPkt->dmaHandle,
659                                 pRxPkt->rx_info.mem_size,
660                                 &bnxeAccessAttribBUF,
661                                 DDI_DMA_STREAMING,
662                                 DDI_DMA_DONTWAIT,
663                                 NULL,
664                                 (caddr_t *)&pRxPkt->rx_info.mem_virt,
665                                 &length,
666                                 &pRxPkt->dmaAccHandle)) != DDI_SUCCESS)
667     {
668         BnxeLogWarn(pUM, "Failed to alloc DMA memory for rx buffer");
669         ddi_dma_free_handle(&pRxPkt->dmaHandle);
670         return -1;
671     }
672 
673     if ((rc = ddi_dma_addr_bind_handle(pRxPkt->dmaHandle,
674                                        NULL,
675                                        (caddr_t)pRxPkt->rx_info.mem_virt,
676                                        pRxPkt->rx_info.mem_size,
677                                        DDI_DMA_READ | DDI_DMA_STREAMING,
678                                        DDI_DMA_DONTWAIT,
679                                        NULL,
680                                        &cookie,
681                                        &count)) != DDI_DMA_MAPPED)
682     {
683         BnxeLogWarn(pUM, "Failed to bind DMA address for rx buffer");
684         ddi_dma_mem_free(&pRxPkt->dmaAccHandle);
685         ddi_dma_free_handle(&pRxPkt->dmaHandle);
686         return -1;
687     }
688 
689     pRxPkt->lm_pkt.u1.rx.mem_phys[0].as_u64 = cookie.dmac_laddress;
690 
691     return 0;
692 }
693 
694 
695 static int BnxeRxPktsInitPostBuffersIdx(um_device_t * pUM,
696                                         int           idx)
697 {
698     BNXE_LOCK_ENTER_RX(pUM, idx);
699     lm_post_buffers(&pUM->lm_dev, idx, NULL, 0);
700     BNXE_LOCK_EXIT_RX(pUM, idx);
701 
702     return 0;
703 }
704 
705 
706 int BnxeRxPktsInitPostBuffers(um_device_t * pUM,
707                               int           cliIdx)
708 {
709     int idx;
710 
711     switch (cliIdx)
712     {
713     case LM_CLI_IDX_FCOE:
714 
715         BnxeRxPktsInitPostBuffersIdx(pUM, FCOE_CID(&pUM->lm_dev));
716         break;
717 
718     case LM_CLI_IDX_NDIS:
719 
720         LM_FOREACH_RSS_IDX(&pUM->lm_dev, idx)
721         {
722             BnxeRxPktsInitPostBuffersIdx(pUM, idx);
723         }
724 
725         break;
726 
727     default:
728 
729         BnxeLogWarn(pUM, "ERROR: Invalid cliIdx for BnxeRxPktsInit (%d)", cliIdx);
730         break;
731     }
732 
733     return 0;
734 }
735 
736 
737 static int BnxeRxPktsInitIdx(um_device_t * pUM,
738                              int           idx)
739 {
740     lm_device_t *   pLM = &pUM->lm_dev;
741     lm_rx_chain_t * pLmRxChain;
742     um_rxpacket_t * pRxPkt;
743     lm_packet_t *   pLmPkt;
744     u8_t *          pTmp;
745     int postCnt, i;
746 
747     BNXE_LOCK_ENTER_RX(pUM, idx);
748 
749     pLmRxChain = &LM_RXQ(pLM, idx);
750 
751     s_list_clear(&pUM->rxq[idx].doneRxQ);
752     pUM->rxq[idx].rxLowWater = pLM->params.l2_rx_desc_cnt[LM_CHAIN_IDX_CLI(pLM, idx)];
753     pUM->rxq[idx].rxDiscards = 0;
754     pUM->rxq[idx].rxCopied   = 0;
755 
756     s_list_clear(&pUM->rxq[idx].waitRxQ);
757 
758     /* allocate the packet descriptors */
759     for (i = 0;
760          i < pLM->params.l2_rx_desc_cnt[LM_CHAIN_IDX_CLI(pLM, idx)];
761          i++)
762     {
763         if ((pTmp = kmem_zalloc(BnxeRxPktDescrSize(pUM),
764                                 KM_NOSLEEP)) == NULL)
765         {
766             BnxeLogWarn(pUM, "Failed to alloc an rx packet descriptor!!!");
767             break; /* continue without error */
768         }
769 
770         pRxPkt            = (um_rxpacket_t *)(pTmp + SIZEOF_SIG);
771         SIG(pRxPkt)       = L2PACKET_RX_SIG;
772         pRxPkt->signature = pUM->rxBufSignature[LM_CHAIN_IDX_CLI(pLM, idx)];
773 
774         pLmPkt                     = (lm_packet_t *)pRxPkt;
775         pLmPkt->u1.rx.hash_val_ptr = &pRxPkt->hash_value;
776         pLmPkt->l2pkt_rx_info      = &pRxPkt->rx_info;
777 
778         if (BnxeRxBufAlloc(pUM, idx, pRxPkt) != 0)
779         {
780             BnxeRxPktDescrFree(pUM, pRxPkt);
781             break; /* continue without error */
782         }
783 
784         s_list_push_tail(&pLmRxChain->common.free_descq, &pLmPkt->link);
785     }
786 
787     postCnt = s_list_entry_cnt(&pLmRxChain->common.free_descq);
788 
789     if (postCnt != pLM->params.l2_rx_desc_cnt[LM_CHAIN_IDX_CLI(pLM, idx)])
790     {
791         BnxeLogWarn(pUM, "%d rx buffers requested and only %d allocated!!!",
792                     pLM->params.l2_rx_desc_cnt[LM_CHAIN_IDX_CLI(pLM, idx)],
793                     postCnt);
794     }
795 
796     BNXE_LOCK_EXIT_RX(pUM, idx);
797 
798     return 0;
799 }
800 
801 
802 int BnxeRxPktsInit(um_device_t * pUM,
803                    int           cliIdx)
804 {
805     int idx;
806 
807     /* set the rx buffer signature for this plumb */
808     atomic_swap_32(&pUM->rxBufSignature[cliIdx], (u32_t)ddi_get_time());
809 
810     switch (cliIdx)
811     {
812     case LM_CLI_IDX_FCOE:
813 
814         BnxeRxPktsInitIdx(pUM, FCOE_CID(&pUM->lm_dev));
815         break;
816 
817     case LM_CLI_IDX_NDIS:
818 
819         LM_FOREACH_RSS_IDX(&pUM->lm_dev, idx)
820         {
821             BnxeRxPktsInitIdx(pUM, idx);
822         }
823 
824         break;
825 
826     default:
827 
828         BnxeLogWarn(pUM, "ERROR: Invalid cliIdx for BnxeRxPktsInit (%d)", cliIdx);
829         break;
830     }
831 
832     return 0;
833 }
834 
835 
836 static void BnxeRxPktsFiniIdx(um_device_t * pUM,
837                               int           idx)
838 {
839     lm_rx_chain_t * pLmRxChain;
840     um_rxpacket_t * pRxPkt;
841     s_list_t        tmpList;
842 
843     pLmRxChain = &LM_RXQ(&pUM->lm_dev, idx);
844 
845     s_list_clear(&tmpList);
846 
847     BNXE_LOCK_ENTER_RX(pUM, idx);
848     s_list_add_tail(&tmpList, &pLmRxChain->common.free_descq);
849     s_list_clear(&pLmRxChain->common.free_descq);
850     BNXE_LOCK_EXIT_RX(pUM, idx);
851 
852     BNXE_LOCK_ENTER_DONERX(pUM, idx);
853     s_list_add_tail(&tmpList, &pUM->rxq[idx].doneRxQ);
854     s_list_clear(&pUM->rxq[idx].doneRxQ);
855     BNXE_LOCK_EXIT_DONERX(pUM, idx);
856 
857     if (s_list_entry_cnt(&tmpList) !=
858         pUM->lm_dev.params.l2_rx_desc_cnt[LM_CHAIN_IDX_CLI(&pUM->lm_dev, idx)])
859     {
860         BnxeLogWarn(pUM, "WARNING Missing RX packets (idx:%d) (%lu / %d - %u in stack)",
861                     idx, s_list_entry_cnt(&tmpList),
862                     pUM->lm_dev.params.l2_rx_desc_cnt[LM_CHAIN_IDX_CLI(&pUM->lm_dev, idx)],
863                     pUM->rxq[idx].rxBufUpInStack);
864     }
865 
866     /*
867      * Back out all the packets in the "available for hardware use" queue.
868      * Free the buffers associated with the descriptors as we go.
869      */
870     while (1)
871     {
872         pRxPkt = (um_rxpacket_t *)s_list_pop_head(&tmpList);
873         if (pRxPkt == NULL)
874         {
875             break;
876         }
877 
878         pRxPkt->lm_pkt.u1.rx.mem_phys[0].as_u64 = 0;
879         pRxPkt->rx_info.mem_virt = NULL;
880         pRxPkt->rx_info.mem_size = 0;
881 
882         ddi_dma_unbind_handle(pRxPkt->dmaHandle);
883         ddi_dma_mem_free(&pRxPkt->dmaAccHandle);
884         ddi_dma_free_handle(&pRxPkt->dmaHandle);
885 
886         BnxeRxPktDescrFree(pUM, pRxPkt);
887     }
888 }
889 
890 
891 void BnxeRxPktsFini(um_device_t * pUM,
892                     int           cliIdx)
893 {
894     int idx;
895 
896     /* reset the signature for this unplumb */
897     atomic_swap_32(&pUM->rxBufSignature[cliIdx], 0);
898 
899     switch (cliIdx)
900     {
901     case LM_CLI_IDX_FCOE:
902 
903         BnxeRxPktsFiniIdx(pUM, FCOE_CID(&pUM->lm_dev));
904         break;
905 
906     case LM_CLI_IDX_NDIS:
907 
908         LM_FOREACH_RSS_IDX(&pUM->lm_dev, idx)
909         {
910             BnxeRxPktsFiniIdx(pUM, idx);
911         }
912 
913         break;
914 
915     default:
916 
917         BnxeLogWarn(pUM, "ERROR: Invalid cliIdx for BnxeRxPktsFini (%d)", cliIdx);
918         break;
919     }
920 }
921 
922