xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/oce/oce_rx.c (revision ead1f93ee620d7580f7e53350fe5a884fc4f158a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Source file containing the Recieve Path handling
29  * functions
30  */
31 #include <oce_impl.h>
32 
33 
34 static void rx_pool_free(char *arg);
35 static inline mblk_t *oce_rx(struct oce_dev *dev, struct oce_rq *rq,
36     struct oce_nic_rx_cqe *cqe);
37 static inline mblk_t *oce_rx_bcopy(struct oce_dev *dev,
38 	struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
39 static int oce_rq_charge(struct oce_dev *dev, struct oce_rq *rq,
40     uint32_t nbufs);
41 static oce_rq_bdesc_t *oce_rqb_alloc(struct oce_rq *rq);
42 static void oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd);
43 static void oce_rqb_dtor(oce_rq_bdesc_t *rqbd);
44 static int oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq,
45     size_t size, int flags);
46 static void oce_rx_insert_tag(mblk_t *mp, uint16_t vtag);
47 static void oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe);
48 static inline void oce_rx_drop_pkt(struct oce_rq *rq,
49     struct oce_nic_rx_cqe *cqe);
50 
51 
52 /*
53  * function to create a DMA buffer pool for RQ
54  *
55  * dev - software handle to the device
56  * num_items - number of buffers in the pool
57  * item_size - size of each buffer
58  *
59  * return DDI_SUCCESS => success, DDI_FAILURE otherwise
60  */
61 int
62 oce_rqb_cache_create(struct oce_rq *rq, size_t buf_size)
63 {
64 	struct oce_dev *dev = rq->parent;
65 	int size;
66 	int cnt;
67 	int ret;
68 	int nitems;
69 
70 	nitems = rq->cfg.nbufs;
71 	size = nitems * sizeof (oce_rq_bdesc_t);
72 	rq->rq_bdesc_array = kmem_zalloc(size, KM_SLEEP);
73 
74 	/* Create the free buffer list */
75 	OCE_LIST_CREATE(&rq->rq_buf_list, DDI_INTR_PRI(dev->intr_pri));
76 
77 	for (cnt = 0; cnt < nitems; cnt++) {
78 		ret = oce_rqb_ctor(&rq->rq_bdesc_array[cnt],
79 		    rq, buf_size, DDI_DMA_STREAMING);
80 		if (ret != DDI_SUCCESS) {
81 			goto rqb_fail;
82 		}
83 		OCE_LIST_INSERT_TAIL(&rq->rq_buf_list,
84 		    &(rq->rq_bdesc_array[cnt].link));
85 	}
86 	return (DDI_SUCCESS);
87 
88 rqb_fail:
89 	oce_rqb_cache_destroy(rq);
90 	return (DDI_FAILURE);
91 } /* oce_rqb_cache_create */
92 
93 /*
94  * function to Destroy RQ DMA buffer cache
95  *
96  * rq - pointer to rq structure
97  *
98  * return none
99  */
100 void
101 oce_rqb_cache_destroy(struct oce_rq *rq)
102 {
103 	oce_rq_bdesc_t *rqbd = NULL;
104 
105 	while ((rqbd = (oce_rq_bdesc_t *)OCE_LIST_REM_HEAD(&rq->rq_buf_list))
106 	    != NULL) {
107 		oce_rqb_dtor(rqbd);
108 	}
109 	kmem_free(rq->rq_bdesc_array,
110 	    rq->cfg.nbufs * sizeof (oce_rq_bdesc_t));
111 	OCE_LIST_DESTROY(&rq->rq_buf_list);
112 } /* oce_rqb_cache_destroy */
113 
114 /*
115  * RQ buffer destructor function
116  *
117  * rqbd - pointer to rq buffer descriptor
118  *
119  * return none
120  */
121 static	void
122 oce_rqb_dtor(oce_rq_bdesc_t *rqbd)
123 {
124 	if ((rqbd == NULL) || (rqbd->rq == NULL)) {
125 		return;
126 	}
127 	oce_free_dma_buffer(rqbd->rq->parent, rqbd->rqb);
128 	if (rqbd->mp != NULL) {
129 		/* Buffer is already free  */
130 		rqbd->fr_rtn.free_arg = NULL;
131 		freeb(rqbd->mp);
132 	}
133 } /* oce_rqb_dtor */
134 
135 /*
136  * RQ buffer constructor function
137  *
138  * rqbd - pointer to rq buffer descriptor
139  * rq - pointer to RQ structure
140  * size - size of the buffer
141  * flags - KM_SLEEP OR KM_NOSLEEP
142  *
143  * return DDI_SUCCESS => success, DDI_FAILURE otherwise
144  */
145 static int
146 oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq, size_t size, int flags)
147 {
148 	struct oce_dev *dev;
149 	oce_dma_buf_t *dbuf;
150 
151 	dev = rq->parent;
152 
153 	dbuf  = oce_alloc_dma_buffer(dev, size, flags);
154 	if (dbuf == NULL) {
155 		return (DDI_FAILURE);
156 	}
157 
158 	/* override usable length */
159 	rqbd->rqb = dbuf;
160 	rqbd->rq = rq;
161 	rqbd->frag_addr.dw.addr_lo = ADDR_LO(dbuf->addr + OCE_RQE_BUF_HEADROOM);
162 	rqbd->frag_addr.dw.addr_hi = ADDR_HI(dbuf->addr + OCE_RQE_BUF_HEADROOM);
163 	rqbd->fr_rtn.free_func = (void (*)())rx_pool_free;
164 	rqbd->fr_rtn.free_arg = (caddr_t)(void *)rqbd;
165 	rqbd->mp = desballoc((uchar_t *)(rqbd->rqb->base),
166 	    rqbd->rqb->size, 0, &rqbd->fr_rtn);
167 	rqbd->mp->b_rptr = (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
168 
169 	return (DDI_SUCCESS);
170 } /* oce_rqb_ctor */
171 
172 /*
173  * RQ buffer allocator function
174  *
175  * rq - pointer to RQ structure
176  *
177  * return pointer to RQ buffer descriptor
178  */
179 static inline oce_rq_bdesc_t *
180 oce_rqb_alloc(struct oce_rq *rq)
181 {
182 	oce_rq_bdesc_t *rqbd;
183 	rqbd = OCE_LIST_REM_HEAD(&rq->rq_buf_list);
184 	return (rqbd);
185 } /* oce_rqb_alloc */
186 
187 /*
188  * function to free the RQ buffer
189  *
190  * rq - pointer to RQ structure
191  * rqbd - pointer to recieve buffer descriptor
192  *
193  * return none
194  */
195 static inline void
196 oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd)
197 {
198 	OCE_LIST_INSERT_TAIL(&rq->rq_buf_list, rqbd);
199 } /* oce_rqb_free */
200 
201 
202 /*
203  * function to charge a given rq with buffers from a pool's free list
204  *
205  * dev - software handle to the device
206  * rq - pointer to the RQ to charge
207  * nbufs - numbers of buffers to be charged
208  *
209  * return number of rqe's charges.
210  */
211 static inline int
212 oce_rq_charge(struct oce_dev *dev,
213     struct oce_rq *rq, uint32_t nbufs)
214 {
215 	struct oce_nic_rqe *rqe;
216 	oce_rq_bdesc_t *rqbd;
217 	struct rq_shadow_entry	*shadow_rq;
218 	int32_t num_bufs = 0;
219 	int32_t total_bufs = 0;
220 	pd_rxulp_db_t rxdb_reg;
221 	uint32_t cnt;
222 
223 	shadow_rq = rq->shadow_ring;
224 	/* check number of slots free and recharge */
225 	nbufs = ((rq->buf_avail + nbufs) > rq->cfg.q_len) ?
226 	    (rq->cfg.q_len - rq->buf_avail) : nbufs;
227 	for (cnt = 0; cnt < nbufs; cnt++) {
228 		rqbd = oce_rqb_alloc(rq);
229 		if (rqbd == NULL) {
230 			oce_log(dev, CE_NOTE, MOD_RX, "%s %x",
231 			    "rqb pool empty @ ticks",
232 			    (uint32_t)ddi_get_lbolt());
233 			break;
234 		}
235 		if (rqbd->mp == NULL) {
236 			rqbd->mp = desballoc((uchar_t *)(rqbd->rqb->base),
237 			    rqbd->rqb->size, 0, &rqbd->fr_rtn);
238 			if (rqbd->mp != NULL) {
239 				rqbd->mp->b_rptr =
240 				    (uchar_t *)rqbd->rqb->base +
241 				    OCE_RQE_BUF_HEADROOM;
242 			}
243 
244 			/*
245 			 * Failed again put back the buffer and continue
246 			 * loops for nbufs so its a finite loop
247 			 */
248 
249 			if (rqbd->mp == NULL) {
250 				oce_rqb_free(rq, rqbd);
251 				continue;
252 			}
253 		}
254 
255 		/* fill the rqes */
256 		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring,
257 		    struct oce_nic_rqe);
258 		rqe->u0.s.frag_pa_lo = rqbd->frag_addr.dw.addr_lo;
259 		rqe->u0.s.frag_pa_hi = rqbd->frag_addr.dw.addr_hi;
260 		shadow_rq[rq->ring->pidx].rqbd = rqbd;
261 		DW_SWAP(u32ptr(rqe), sizeof (struct oce_nic_rqe));
262 		RING_PUT(rq->ring, 1);
263 
264 		/* if we have reached the max allowed posts, post */
265 		if (cnt && !(cnt % OCE_MAX_RQ_POSTS)) {
266 			rxdb_reg.dw0 = 0;
267 			rxdb_reg.bits.num_posted = num_bufs;
268 			rxdb_reg.bits.qid = rq->rq_id & DB_RQ_ID_MASK;
269 			OCE_DB_WRITE32(dev, PD_RXULP_DB, rxdb_reg.dw0);
270 			num_bufs = 0;
271 		}
272 		num_bufs++;
273 		total_bufs++;
274 	}
275 
276 	/* post pending bufs */
277 	if (num_bufs) {
278 		rxdb_reg.dw0 = 0;
279 		rxdb_reg.bits.num_posted = num_bufs;
280 		rxdb_reg.bits.qid = rq->rq_id & DB_RQ_ID_MASK;
281 		OCE_DB_WRITE32(dev, PD_RXULP_DB, rxdb_reg.dw0);
282 	}
283 	atomic_add_32(&rq->buf_avail, total_bufs);
284 	return (total_bufs);
285 } /* oce_rq_charge */
286 
287 /*
288  * function to release the posted buffers
289  *
290  * rq - pointer to the RQ to charge
291  *
292  * return none
293  */
294 void
295 oce_rq_discharge(struct oce_rq *rq)
296 {
297 	oce_rq_bdesc_t *rqbd;
298 	struct rq_shadow_entry *shadow_rq;
299 
300 	shadow_rq = rq->shadow_ring;
301 	/* Free the posted buffer since RQ is destroyed already */
302 	while ((int32_t)rq->buf_avail > 0) {
303 		rqbd = shadow_rq[rq->ring->cidx].rqbd;
304 		oce_rqb_free(rq, rqbd);
305 		RING_GET(rq->ring, 1);
306 		rq->buf_avail--;
307 	}
308 }
309 /*
310  * function to process a single packet
311  *
312  * dev - software handle to the device
313  * rq - pointer to the RQ to charge
314  * cqe - Pointer to Completion Q entry
315  *
316  * return mblk pointer =>  success, NULL  => error
317  */
318 static inline mblk_t *
319 oce_rx(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
320 {
321 	mblk_t *mp;
322 	int pkt_len;
323 	int32_t frag_cnt = 0;
324 	mblk_t *mblk_prev = NULL;
325 	mblk_t	*mblk_head = NULL;
326 	int frag_size;
327 	struct rq_shadow_entry *shadow_rq;
328 	struct rq_shadow_entry *shadow_rqe;
329 	oce_rq_bdesc_t *rqbd;
330 
331 	/* Get the relevant Queue pointers */
332 	shadow_rq = rq->shadow_ring;
333 	pkt_len = cqe->u0.s.pkt_size;
334 	for (; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
335 		shadow_rqe = &shadow_rq[rq->ring->cidx];
336 		rqbd = shadow_rqe->rqbd;
337 		mp = rqbd->mp;
338 		if (mp == NULL)
339 			return (NULL);
340 		frag_size  = (pkt_len > rq->cfg.frag_size) ?
341 		    rq->cfg.frag_size : pkt_len;
342 		mp->b_wptr = mp->b_rptr + frag_size;
343 		pkt_len   -= frag_size;
344 		/* Chain the message mblks */
345 		if (mblk_head == NULL) {
346 			mblk_head = mblk_prev = mp;
347 		} else {
348 			mblk_prev->b_cont = mp;
349 			mblk_prev = mp;
350 		}
351 		(void) ddi_dma_sync(rqbd->rqb->dma_handle, 0, frag_size,
352 		    DDI_DMA_SYNC_FORKERNEL);
353 		RING_GET(rq->ring, 1);
354 	}
355 
356 	if (mblk_head == NULL) {
357 		oce_log(dev, CE_WARN, MOD_RX, "%s", "oce_rx:no frags?");
358 		return (NULL);
359 	}
360 	atomic_add_32(&rq->pending, (cqe->u0.s.num_fragments & 0x7));
361 	mblk_head->b_next = NULL;
362 	return (mblk_head);
363 } /* oce_rx */
364 
365 /* ARGSUSED */
366 static inline mblk_t *
367 oce_rx_bcopy(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
368 {
369 	mblk_t *mp;
370 	int pkt_len;
371 	int alloc_len;
372 	int32_t frag_cnt = 0;
373 	int frag_size;
374 	struct rq_shadow_entry *shadow_rq;
375 	struct rq_shadow_entry *shadow_rqe;
376 	oce_rq_bdesc_t *rqbd;
377 	boolean_t tag_present =  B_FALSE;
378 	unsigned char  *rptr;
379 
380 	shadow_rq = rq->shadow_ring;
381 	pkt_len = cqe->u0.s.pkt_size;
382 	alloc_len = pkt_len;
383 
384 	/* Hardware always Strips Vlan tag so insert it back */
385 	if (cqe->u0.s.vlan_tag_present) {
386 		alloc_len += VLAN_TAGSZ;
387 		tag_present = B_TRUE;
388 	}
389 	mp = allocb(alloc_len, BPRI_HI);
390 	if (mp == NULL)
391 		return (NULL);
392 	if (tag_present) {
393 		/* offset the read pointer by 4 bytes to insert tag */
394 		mp->b_rptr += VLAN_TAGSZ;
395 	}
396 	rptr = mp->b_rptr;
397 	mp->b_wptr = mp->b_wptr + alloc_len;
398 
399 	for (frag_cnt = 0; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
400 		shadow_rqe = &shadow_rq[rq->ring->cidx];
401 		rqbd = shadow_rqe->rqbd;
402 		frag_size  = (pkt_len > rq->cfg.frag_size) ?
403 		    rq->cfg.frag_size : pkt_len;
404 		(void) ddi_dma_sync(rqbd->rqb->dma_handle, 0, frag_size,
405 		    DDI_DMA_SYNC_FORKERNEL);
406 		bcopy(rqbd->rqb->base + OCE_RQE_BUF_HEADROOM,
407 		    rptr, frag_size);
408 		rptr += frag_size;
409 		pkt_len   -= frag_size;
410 		oce_rqb_free(rq, rqbd);
411 		RING_GET(rq->ring, 1);
412 	}
413 	return (mp);
414 }
415 
416 static inline void
417 oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe)
418 {
419 	int csum_flags = 0;
420 
421 	/* set flags */
422 	if (cqe->u0.s.ip_cksum_pass) {
423 		csum_flags |= HCK_IPV4_HDRCKSUM;
424 	}
425 
426 	if (cqe->u0.s.l4_cksum_pass) {
427 		csum_flags |= (HCK_FULLCKSUM | HCK_FULLCKSUM_OK);
428 	}
429 
430 	if (csum_flags) {
431 		(void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0,
432 		    csum_flags, 0);
433 	}
434 }
435 
436 static inline void
437 oce_rx_insert_tag(mblk_t *mp, uint16_t vtag)
438 {
439 	struct ether_vlan_header *ehp;
440 
441 	(void) memmove(mp->b_rptr - VLAN_TAGSZ,
442 	    mp->b_rptr, 2 * ETHERADDRL);
443 	mp->b_rptr -= VLAN_TAGSZ;
444 	ehp = (struct ether_vlan_header *)voidptr(mp->b_rptr);
445 	ehp->ether_tpid = htons(ETHERTYPE_VLAN);
446 	ehp->ether_tci = LE_16(vtag);
447 }
448 
449 
450 
451 /*
452  * function to process a Recieve queue
453  *
454  * arg - pointer to the RQ to charge
455  *
456  * return number of cqes processed
457  */
458 uint16_t
459 oce_drain_rq_cq(void *arg)
460 {
461 	struct oce_nic_rx_cqe *cqe;
462 	struct oce_rq *rq;
463 	mblk_t *mp = NULL;
464 	mblk_t *mblk_head  = NULL;
465 	mblk_t *mblk_prev  = NULL;
466 	uint16_t num_cqe = 0;
467 	struct oce_cq  *cq;
468 	struct oce_dev *dev;
469 
470 	if (arg == NULL)
471 		return (0);
472 
473 	rq = (struct oce_rq *)arg;
474 	dev = rq->parent;
475 	cq = rq->cq;
476 	mutex_enter(&rq->rx_lock);
477 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
478 
479 	/* dequeue till you reach an invalid cqe */
480 	while (RQ_CQE_VALID(cqe) && (num_cqe < rq->cfg.q_len)) {
481 		DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
482 		/* if insufficient buffers to charge then do copy */
483 		if (cqe->u0.s.pkt_size < dev->rx_bcopy_limit ||
484 		    OCE_LIST_SIZE(&rq->rq_buf_list) < cqe->u0.s.num_fragments) {
485 			mp = oce_rx_bcopy(dev, rq, cqe);
486 		} else {
487 			mp = oce_rx(dev, rq, cqe);
488 		}
489 		if (mp != NULL) {
490 			if (cqe->u0.s.vlan_tag_present) {
491 				oce_rx_insert_tag(mp, cqe->u0.s.vlan_tag);
492 			}
493 			oce_set_rx_oflags(mp, cqe);
494 			if (mblk_head == NULL) {
495 				mblk_head = mblk_prev  = mp;
496 			} else {
497 				mblk_prev->b_next = mp;
498 				mblk_prev = mp;
499 			}
500 
501 		} else {
502 			oce_rx_drop_pkt(rq, cqe);
503 		}
504 		atomic_add_32(&rq->buf_avail, -(cqe->u0.s.num_fragments & 0x7));
505 		(void) oce_rq_charge(dev, rq,
506 		    (cqe->u0.s.num_fragments & 0x7));
507 		RQ_CQE_INVALIDATE(cqe);
508 		RING_GET(cq->ring, 1);
509 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
510 		    struct oce_nic_rx_cqe);
511 		num_cqe++;
512 	} /* for all valid CQEs */
513 	mutex_exit(&rq->rx_lock);
514 	if (mblk_head) {
515 		mac_rx(dev->mac_handle, NULL, mblk_head);
516 	}
517 	oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE);
518 	return (num_cqe);
519 } /* oce_drain_rq_cq */
520 
521 /*
522  * function to free mblk databuffer to the RQ pool
523  *
524  * arg - pointer to the receive buffer descriptor
525  *
526  * return none
527  */
528 static void
529 rx_pool_free(char *arg)
530 {
531 	oce_rq_bdesc_t *rqbd;
532 	struct oce_rq  *rq;
533 
534 	/* During destroy, arg will be NULL */
535 	if (arg == NULL) {
536 		return;
537 	}
538 
539 	/* retrieve the pointers from arg */
540 	rqbd = (oce_rq_bdesc_t *)(void *)arg;
541 	rq = rqbd->rq;
542 
543 	rqbd->mp = desballoc((uchar_t *)(rqbd->rqb->base),
544 	    rqbd->rqb->size, 0, &rqbd->fr_rtn);
545 	if (rqbd->mp != NULL) {
546 		rqbd->mp->b_rptr = (uchar_t *)rqbd->rqb->base +
547 		    OCE_RQE_BUF_HEADROOM;
548 	}
549 	oce_rqb_free(rq, rqbd);
550 	(void) atomic_add_32(&rq->pending, -1);
551 } /* rx_pool_free */
552 
553 /*
554  * function to stop the RX
555  *
556  * rq - pointer to RQ structure
557  *
558  * return none
559  */
560 void
561 oce_clean_rq(struct oce_rq *rq)
562 {
563 	uint16_t num_cqe = 0;
564 	struct oce_cq  *cq;
565 	struct oce_dev *dev;
566 	struct oce_nic_rx_cqe *cqe;
567 	int32_t ti = 0;
568 
569 	dev = rq->parent;
570 	cq = rq->cq;
571 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
572 	/* dequeue till you reach an invalid cqe */
573 	for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {
574 
575 		while (RQ_CQE_VALID(cqe)) {
576 			DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
577 			oce_rx_drop_pkt(rq, cqe);
578 			atomic_add_32(&rq->buf_avail,
579 			    -(cqe->u0.s.num_fragments & 0x7));
580 			oce_arm_cq(dev, cq->cq_id, 1, B_TRUE);
581 			RQ_CQE_INVALIDATE(cqe);
582 			RING_GET(cq->ring, 1);
583 			cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
584 			    struct oce_nic_rx_cqe);
585 			num_cqe++;
586 		}
587 		OCE_MSDELAY(1);
588 	}
589 #if 0
590 	if (num_cqe) {
591 		oce_arm_cq(dev, cq->cq_id, num_cqe, B_FALSE);
592 	}
593 	/* Drain the Event queue now */
594 	oce_drain_eq(rq->cq->eq);
595 	return (num_cqe);
596 #endif
597 } /* oce_clean_rq */
598 
599 /*
600  * function to start  the RX
601  *
602  * rq - pointer to RQ structure
603  *
604  * return number of rqe's charges.
605  */
606 int
607 oce_start_rq(struct oce_rq *rq)
608 {
609 	int ret = 0;
610 	struct oce_dev *dev = rq->parent;
611 
612 	(void) oce_rq_charge(dev, rq, rq->cfg.q_len);
613 	oce_arm_cq(dev, rq->cq->cq_id, 0, B_TRUE);
614 	return (ret);
615 } /* oce_start_rq */
616 
617 /* Checks for pending rx buffers with Stack */
618 int
619 oce_rx_pending(struct oce_dev *dev)
620 {
621 	int ti;
622 
623 	for (ti = 0; ti < 200; ti++) {
624 		if (dev->rq[0]->pending > 0) {
625 			OCE_MSDELAY(1);
626 			continue;
627 		} else {
628 			dev->rq[0]->pending = 0;
629 			break;
630 		}
631 	}
632 	return (dev->rq[0]->pending);
633 }
634 
635 static inline void
636 oce_rx_drop_pkt(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
637 {
638 	int frag_cnt;
639 	oce_rq_bdesc_t *rqbd;
640 	struct rq_shadow_entry *shadow_rq;
641 	shadow_rq = rq->shadow_ring;
642 	for (frag_cnt = 0; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
643 		rqbd = shadow_rq[rq->ring->cidx].rqbd;
644 		oce_rqb_free(rq, rqbd);
645 		RING_GET(rq->ring, 1);
646 	}
647 }
648