xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/oce/oce_rx.c (revision fcdb3229a31dd4ff700c69238814e326aad49098)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright © 2003-2011 Emulex. All rights reserved.  */
23 
24 /*
25  * Source file containing the Receive Path handling
26  * functions
27  */
28 #include <oce_impl.h>
29 
30 
31 void oce_rx_pool_free(char *arg);
32 static void oce_rqb_dtor(oce_rq_bdesc_t *rqbd);
33 static int oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq,
34     size_t size, int flags);
35 
36 static inline mblk_t *oce_rx(struct oce_dev *dev, struct oce_rq *rq,
37     struct oce_nic_rx_cqe *cqe);
38 static inline mblk_t *oce_rx_bcopy(struct oce_dev *dev,
39 	struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
40 static int oce_rq_charge(struct oce_rq *rq, uint32_t nbufs, boolean_t repost);
41 static void oce_rx_insert_tag(mblk_t *mp, uint16_t vtag);
42 static void oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe);
43 static inline void oce_rx_drop_pkt(struct oce_rq *rq,
44     struct oce_nic_rx_cqe *cqe);
45 static oce_rq_bdesc_t *oce_rqb_alloc(struct oce_rq *rq);
46 static void oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd);
47 static void oce_rq_post_buffer(struct oce_rq *rq, int nbufs);
48 
49 static ddi_dma_attr_t oce_rx_buf_attr = {
50 	DMA_ATTR_V0,		/* version number */
51 	0x0000000000000000ull,	/* low address */
52 	0xFFFFFFFFFFFFFFFFull,	/* high address */
53 	0x00000000FFFFFFFFull,	/* dma counter max */
54 	OCE_DMA_ALIGNMENT,	/* alignment */
55 	0x000007FF,		/* burst sizes */
56 	0x00000001,		/* minimum transfer size */
57 	0x00000000FFFFFFFFull,	/* maximum transfer size */
58 	0xFFFFFFFFFFFFFFFFull,	/* maximum segment size */
59 	1,			/* scatter/gather list length */
60 	0x00000001,		/* granularity */
61 	DDI_DMA_FLAGERR|DDI_DMA_RELAXED_ORDERING		/* DMA flags */
62 };
63 
64 /*
65  * function to create a DMA buffer pool for RQ
66  *
67  * dev - software handle to the device
68  * num_items - number of buffers in the pool
69  * item_size - size of each buffer
70  *
71  * return DDI_SUCCESS => success, DDI_FAILURE otherwise
72  */
73 int
oce_rqb_cache_create(struct oce_rq * rq,size_t buf_size)74 oce_rqb_cache_create(struct oce_rq *rq, size_t buf_size)
75 {
76 	int size;
77 	int cnt;
78 	int ret;
79 	oce_rq_bdesc_t *rqbd;
80 
81 	_NOTE(ARGUNUSED(buf_size));
82 	rqbd = rq->rq_bdesc_array;
83 	size = rq->cfg.frag_size + OCE_RQE_BUF_HEADROOM;
84 	for (cnt = 0; cnt < rq->cfg.nbufs; cnt++, rqbd++) {
85 		rq->rqb_freelist[cnt] = rqbd;
86 		ret = oce_rqb_ctor(rqbd, rq,
87 		    size, (DDI_DMA_RDWR|DDI_DMA_STREAMING));
88 		if (ret != DDI_SUCCESS) {
89 			goto rqb_fail;
90 		}
91 	}
92 	rq->rqb_free = rq->cfg.nbufs;
93 	rq->rqb_rc_head = 0;
94 	rq->rqb_next_free = 0;
95 	return (DDI_SUCCESS);
96 
97 rqb_fail:
98 	oce_rqb_cache_destroy(rq);
99 	return (DDI_FAILURE);
100 } /* oce_rqb_cache_create */
101 
102 /*
103  * function to Destroy RQ DMA buffer cache
104  *
105  * rq - pointer to rq structure
106  *
107  * return none
108  */
109 void
oce_rqb_cache_destroy(struct oce_rq * rq)110 oce_rqb_cache_destroy(struct oce_rq *rq)
111 {
112 	oce_rq_bdesc_t *rqbd = NULL;
113 	int cnt;
114 
115 	rqbd = rq->rq_bdesc_array;
116 	for (cnt = 0; cnt < rq->cfg.nbufs; cnt++, rqbd++) {
117 		oce_rqb_dtor(rqbd);
118 	}
119 } /* oce_rqb_cache_destroy */
120 
121 /*
122  * RQ buffer destructor function
123  *
124  * rqbd - pointer to rq buffer descriptor
125  *
126  * return none
127  */
128 static	void
oce_rqb_dtor(oce_rq_bdesc_t * rqbd)129 oce_rqb_dtor(oce_rq_bdesc_t *rqbd)
130 {
131 	if ((rqbd == NULL) || (rqbd->rq == NULL)) {
132 		return;
133 	}
134 	if (rqbd->mp != NULL) {
135 		rqbd->fr_rtn.free_arg = NULL;
136 		freemsg(rqbd->mp);
137 		rqbd->mp = NULL;
138 	}
139 	oce_free_dma_buffer(rqbd->rq->parent, rqbd->rqb);
140 } /* oce_rqb_dtor */
141 
142 /*
143  * RQ buffer constructor function
144  *
145  * rqbd - pointer to rq buffer descriptor
146  * rq - pointer to RQ structure
147  * size - size of the buffer
148  * flags - KM_SLEEP OR KM_NOSLEEP
149  *
150  * return DDI_SUCCESS => success, DDI_FAILURE otherwise
151  */
152 static int
oce_rqb_ctor(oce_rq_bdesc_t * rqbd,struct oce_rq * rq,size_t size,int flags)153 oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq, size_t size, int flags)
154 {
155 	struct oce_dev *dev;
156 	oce_dma_buf_t *dbuf;
157 
158 	dev = rq->parent;
159 
160 	dbuf  = oce_alloc_dma_buffer(dev, size, &oce_rx_buf_attr, flags);
161 	if (dbuf == NULL) {
162 		return (DDI_FAILURE);
163 	}
164 
165 	/* Set the call back function parameters */
166 	rqbd->fr_rtn.free_func = (void (*)())oce_rx_pool_free;
167 	rqbd->fr_rtn.free_arg = (caddr_t)(void *)rqbd;
168 	rqbd->mp = desballoc((uchar_t *)(dbuf->base),
169 	    dbuf->size, 0, &rqbd->fr_rtn);
170 	if (rqbd->mp == NULL) {
171 		oce_free_dma_buffer(dev, dbuf);
172 		return (DDI_FAILURE);
173 	}
174 	rqbd->rqb = dbuf;
175 	rqbd->rq = rq;
176 	rqbd->frag_addr.dw.addr_lo = ADDR_LO(dbuf->addr + OCE_RQE_BUF_HEADROOM);
177 	rqbd->frag_addr.dw.addr_hi = ADDR_HI(dbuf->addr + OCE_RQE_BUF_HEADROOM);
178 	rqbd->mp->b_rptr = (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
179 
180 	return (DDI_SUCCESS);
181 } /* oce_rqb_ctor */
182 
183 /*
184  * RQ buffer allocator function
185  *
186  * rq - pointer to RQ structure
187  *
188  * return pointer to RQ buffer descriptor
189  */
190 static inline oce_rq_bdesc_t *
oce_rqb_alloc(struct oce_rq * rq)191 oce_rqb_alloc(struct oce_rq *rq)
192 {
193 	oce_rq_bdesc_t *rqbd;
194 	uint32_t free_index;
195 	free_index = rq->rqb_next_free;
196 	rqbd = rq->rqb_freelist[free_index];
197 	rq->rqb_freelist[free_index] = NULL;
198 	rq->rqb_next_free = GET_Q_NEXT(free_index, 1, rq->cfg.nbufs);
199 	return (rqbd);
200 } /* oce_rqb_alloc */
201 
202 /*
203  * function to free the RQ buffer
204  *
205  * rq - pointer to RQ structure
206  * rqbd - pointer to recieve buffer descriptor
207  *
208  * return none
209  */
210 static inline void
oce_rqb_free(struct oce_rq * rq,oce_rq_bdesc_t * rqbd)211 oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd)
212 {
213 	uint32_t free_index;
214 	mutex_enter(&rq->rc_lock);
215 	free_index = rq->rqb_rc_head;
216 	rq->rqb_freelist[free_index] = rqbd;
217 	rq->rqb_rc_head = GET_Q_NEXT(free_index, 1, rq->cfg.nbufs);
218 	mutex_exit(&rq->rc_lock);
219 	atomic_inc_32(&rq->rqb_free);
220 } /* oce_rqb_free */
221 
222 
223 
224 
oce_rq_post_buffer(struct oce_rq * rq,int nbufs)225 static void oce_rq_post_buffer(struct oce_rq *rq, int nbufs)
226 {
227 	pd_rxulp_db_t rxdb_reg;
228 	int count;
229 	struct oce_dev *dev =  rq->parent;
230 
231 
232 	rxdb_reg.dw0 = 0;
233 	rxdb_reg.bits.qid = rq->rq_id & DB_RQ_ID_MASK;
234 
235 	for (count = nbufs/OCE_MAX_RQ_POSTS; count > 0; count--) {
236 		rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
237 		OCE_DB_WRITE32(dev, PD_RXULP_DB, rxdb_reg.dw0);
238 		rq->buf_avail += OCE_MAX_RQ_POSTS;
239 		nbufs -= OCE_MAX_RQ_POSTS;
240 	}
241 	if (nbufs > 0) {
242 		rxdb_reg.bits.num_posted = nbufs;
243 		OCE_DB_WRITE32(dev, PD_RXULP_DB, rxdb_reg.dw0);
244 		rq->buf_avail += nbufs;
245 	}
246 }
247 /*
248  * function to charge a given rq with buffers from a pool's free list
249  *
250  * dev - software handle to the device
251  * rq - pointer to the RQ to charge
252  * nbufs - numbers of buffers to be charged
253  *
254  * return number of rqe's charges.
255  */
256 static inline int
oce_rq_charge(struct oce_rq * rq,uint32_t nbufs,boolean_t repost)257 oce_rq_charge(struct oce_rq *rq, uint32_t nbufs, boolean_t repost)
258 {
259 	struct oce_nic_rqe *rqe;
260 	oce_rq_bdesc_t *rqbd;
261 	oce_rq_bdesc_t **shadow_rq;
262 	int cnt;
263 	int cur_index;
264 	oce_ring_buffer_t *ring;
265 
266 	shadow_rq = rq->shadow_ring;
267 	ring = rq->ring;
268 	cur_index = ring->cidx;
269 
270 	for (cnt = 0; cnt < nbufs; cnt++) {
271 		if (!repost) {
272 			rqbd = oce_rqb_alloc(rq);
273 		} else {
274 			/* just repost the buffers from shadow ring */
275 			rqbd = shadow_rq[cur_index];
276 			cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
277 		}
278 		/* fill the rqes */
279 		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring,
280 		    struct oce_nic_rqe);
281 		rqe->u0.s.frag_pa_lo = rqbd->frag_addr.dw.addr_lo;
282 		rqe->u0.s.frag_pa_hi = rqbd->frag_addr.dw.addr_hi;
283 		shadow_rq[rq->ring->pidx] = rqbd;
284 		DW_SWAP(u32ptr(rqe), sizeof (struct oce_nic_rqe));
285 		RING_PUT(rq->ring, 1);
286 	}
287 
288 	return (cnt);
289 } /* oce_rq_charge */
290 
291 /*
292  * function to release the posted buffers
293  *
294  * rq - pointer to the RQ to charge
295  *
296  * return none
297  */
298 void
oce_rq_discharge(struct oce_rq * rq)299 oce_rq_discharge(struct oce_rq *rq)
300 {
301 	oce_rq_bdesc_t *rqbd;
302 	oce_rq_bdesc_t **shadow_rq;
303 
304 	shadow_rq = rq->shadow_ring;
305 	/* Free the posted buffer since RQ is destroyed already */
306 	while ((int32_t)rq->buf_avail > 0) {
307 		rqbd = shadow_rq[rq->ring->cidx];
308 		oce_rqb_free(rq, rqbd);
309 		RING_GET(rq->ring, 1);
310 		rq->buf_avail--;
311 	}
312 }
313 /*
314  * function to process a single packet
315  *
316  * dev - software handle to the device
317  * rq - pointer to the RQ to charge
318  * cqe - Pointer to Completion Q entry
319  *
320  * return mblk pointer =>  success, NULL  => error
321  */
322 static inline mblk_t *
oce_rx(struct oce_dev * dev,struct oce_rq * rq,struct oce_nic_rx_cqe * cqe)323 oce_rx(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
324 {
325 	mblk_t *mp;
326 	int pkt_len;
327 	int32_t frag_cnt = 0;
328 	mblk_t **mblk_tail;
329 	mblk_t	*mblk_head;
330 	int frag_size;
331 	oce_rq_bdesc_t *rqbd;
332 	uint16_t cur_index;
333 	oce_ring_buffer_t *ring;
334 	int i;
335 
336 	frag_cnt  = cqe->u0.s.num_fragments & 0x7;
337 	mblk_head = NULL;
338 	mblk_tail = &mblk_head;
339 
340 	ring = rq->ring;
341 	cur_index = ring->cidx;
342 
343 	/* Get the relevant Queue pointers */
344 	pkt_len = cqe->u0.s.pkt_size;
345 	for (i = 0; i < frag_cnt; i++) {
346 		rqbd = rq->shadow_ring[cur_index];
347 		if (rqbd->mp == NULL) {
348 			rqbd->mp = desballoc((uchar_t *)rqbd->rqb->base,
349 			    rqbd->rqb->size, 0, &rqbd->fr_rtn);
350 			if (rqbd->mp == NULL) {
351 				return (NULL);
352 			}
353 
354 			rqbd->mp->b_rptr =
355 			    (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
356 		}
357 
358 		mp = rqbd->mp;
359 		frag_size  = (pkt_len > rq->cfg.frag_size) ?
360 		    rq->cfg.frag_size : pkt_len;
361 		mp->b_wptr = mp->b_rptr + frag_size;
362 		pkt_len   -= frag_size;
363 		mp->b_next = mp->b_cont = NULL;
364 		/* Chain the message mblks */
365 		*mblk_tail = mp;
366 		mblk_tail = &mp->b_cont;
367 		(void) DBUF_SYNC(rqbd->rqb, DDI_DMA_SYNC_FORCPU);
368 		cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
369 	}
370 
371 	if (mblk_head == NULL) {
372 		oce_log(dev, CE_WARN, MOD_RX, "%s", "oce_rx:no frags?");
373 		return (NULL);
374 	}
375 
376 	/* replace the buffer with new ones */
377 	(void) oce_rq_charge(rq, frag_cnt, B_FALSE);
378 	atomic_add_32(&rq->pending, frag_cnt);
379 	return (mblk_head);
380 } /* oce_rx */
381 
382 static inline mblk_t *
oce_rx_bcopy(struct oce_dev * dev,struct oce_rq * rq,struct oce_nic_rx_cqe * cqe)383 oce_rx_bcopy(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
384 {
385 	mblk_t *mp;
386 	int pkt_len;
387 	int alloc_len;
388 	int32_t frag_cnt = 0;
389 	int frag_size;
390 	oce_rq_bdesc_t *rqbd;
391 	unsigned char  *rptr;
392 	uint32_t cur_index;
393 	oce_ring_buffer_t *ring;
394 	oce_rq_bdesc_t **shadow_rq;
395 	int cnt = 0;
396 
397 	_NOTE(ARGUNUSED(dev));
398 
399 	shadow_rq = rq->shadow_ring;
400 	pkt_len = cqe->u0.s.pkt_size;
401 	alloc_len = pkt_len + OCE_RQE_BUF_HEADROOM;
402 	frag_cnt = cqe->u0.s.num_fragments & 0x7;
403 
404 	mp = allocb(alloc_len, BPRI_HI);
405 	if (mp == NULL) {
406 		return (NULL);
407 	}
408 
409 	mp->b_rptr += OCE_RQE_BUF_HEADROOM;
410 	rptr = mp->b_rptr;
411 	mp->b_wptr = mp->b_rptr + pkt_len;
412 	ring = rq->ring;
413 
414 	cur_index = ring->cidx;
415 	for (cnt = 0; cnt < frag_cnt; cnt++) {
416 		rqbd = shadow_rq[cur_index];
417 		frag_size  = (pkt_len > rq->cfg.frag_size) ?
418 		    rq->cfg.frag_size : pkt_len;
419 		(void) DBUF_SYNC(rqbd->rqb, DDI_DMA_SYNC_FORCPU);
420 		bcopy(rqbd->rqb->base + OCE_RQE_BUF_HEADROOM, rptr, frag_size);
421 		rptr += frag_size;
422 		pkt_len   -= frag_size;
423 		cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
424 	}
425 	(void) oce_rq_charge(rq, frag_cnt, B_TRUE);
426 	return (mp);
427 }
428 
429 static inline void
oce_set_rx_oflags(mblk_t * mp,struct oce_nic_rx_cqe * cqe)430 oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe)
431 {
432 	int csum_flags = 0;
433 
434 	/* set flags */
435 	if (cqe->u0.s.ip_cksum_pass) {
436 		csum_flags |= HCK_IPV4_HDRCKSUM_OK;
437 	}
438 
439 	if (cqe->u0.s.l4_cksum_pass) {
440 		csum_flags |= (HCK_FULLCKSUM | HCK_FULLCKSUM_OK);
441 	}
442 
443 	if (csum_flags) {
444 		(void) mac_hcksum_set(mp, 0, 0, 0, 0, csum_flags);
445 	}
446 }
447 
448 static inline void
oce_rx_insert_tag(mblk_t * mp,uint16_t vtag)449 oce_rx_insert_tag(mblk_t *mp, uint16_t vtag)
450 {
451 	struct ether_vlan_header *ehp;
452 
453 	(void) memmove(mp->b_rptr - VTAG_SIZE,
454 	    mp->b_rptr, 2 * ETHERADDRL);
455 	mp->b_rptr -= VTAG_SIZE;
456 	ehp = (struct ether_vlan_header *)voidptr(mp->b_rptr);
457 	ehp->ether_tpid = htons(ETHERTYPE_VLAN);
458 	ehp->ether_tci = LE_16(vtag);
459 }
460 
461 static inline void
oce_rx_drop_pkt(struct oce_rq * rq,struct oce_nic_rx_cqe * cqe)462 oce_rx_drop_pkt(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
463 {
464 	int frag_cnt;
465 	oce_rq_bdesc_t *rqbd;
466 	oce_rq_bdesc_t  **shadow_rq;
467 	shadow_rq = rq->shadow_ring;
468 	for (frag_cnt = 0; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
469 		rqbd = shadow_rq[rq->ring->cidx];
470 		oce_rqb_free(rq, rqbd);
471 		RING_GET(rq->ring, 1);
472 	}
473 }
474 
475 
476 /*
477  * function to process a Recieve queue
478  *
479  * arg - pointer to the RQ to charge
480  *
481  * return number of cqes processed
482  */
483 uint16_t
oce_drain_rq_cq(void * arg)484 oce_drain_rq_cq(void *arg)
485 {
486 	struct oce_nic_rx_cqe *cqe;
487 	struct oce_rq *rq;
488 	mblk_t *mp = NULL;
489 	mblk_t *mblk_head;
490 	mblk_t **mblk_tail;
491 	uint16_t num_cqe = 0;
492 	struct oce_cq  *cq;
493 	struct oce_dev *dev;
494 	int32_t frag_cnt;
495 	uint32_t nbufs = 0;
496 
497 	rq = (struct oce_rq *)arg;
498 	dev = rq->parent;
499 	cq = rq->cq;
500 	mblk_head = NULL;
501 	mblk_tail = &mblk_head;
502 
503 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
504 
505 	(void) DBUF_SYNC(cq->ring->dbuf, DDI_DMA_SYNC_FORKERNEL);
506 	/* dequeue till you reach an invalid cqe */
507 	while (RQ_CQE_VALID(cqe)) {
508 		DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
509 		frag_cnt = cqe->u0.s.num_fragments & 0x7;
510 		/* if insufficient buffers to charge then do copy */
511 		if ((cqe->u0.s.pkt_size < dev->rx_bcopy_limit) ||
512 		    (oce_atomic_reserve(&rq->rqb_free, frag_cnt) < 0)) {
513 			mp = oce_rx_bcopy(dev, rq, cqe);
514 		} else {
515 			mp = oce_rx(dev, rq, cqe);
516 			if (mp == NULL) {
517 				atomic_add_32(&rq->rqb_free, frag_cnt);
518 				mp = oce_rx_bcopy(dev, rq, cqe);
519 			}
520 		}
521 		if (mp != NULL) {
522 			if (dev->function_mode & FLEX10_MODE) {
523 				if (cqe->u0.s.vlan_tag_present &&
524 				    cqe->u0.s.qnq) {
525 					oce_rx_insert_tag(mp,
526 					    cqe->u0.s.vlan_tag);
527 				}
528 			} else if (cqe->u0.s.vlan_tag_present) {
529 				oce_rx_insert_tag(mp, cqe->u0.s.vlan_tag);
530 			}
531 			oce_set_rx_oflags(mp, cqe);
532 
533 			*mblk_tail = mp;
534 			mblk_tail = &mp->b_next;
535 		} else {
536 			(void) oce_rq_charge(rq, frag_cnt, B_TRUE);
537 		}
538 		RING_GET(rq->ring, frag_cnt);
539 		rq->buf_avail -= frag_cnt;
540 		nbufs += frag_cnt;
541 
542 		oce_rq_post_buffer(rq, frag_cnt);
543 		RQ_CQE_INVALIDATE(cqe);
544 		RING_GET(cq->ring, 1);
545 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
546 		    struct oce_nic_rx_cqe);
547 		num_cqe++;
548 		/* process max ring size */
549 		if (num_cqe > dev->rx_pkt_per_intr) {
550 			break;
551 		}
552 	} /* for all valid CQEs */
553 
554 	if (mblk_head) {
555 		mac_rx(dev->mac_handle, NULL, mblk_head);
556 	}
557 	oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE);
558 	return (num_cqe);
559 } /* oce_drain_rq_cq */
560 
561 /*
562  * function to free mblk databuffer to the RQ pool
563  *
564  * arg - pointer to the receive buffer descriptor
565  *
566  * return none
567  */
568 void
oce_rx_pool_free(char * arg)569 oce_rx_pool_free(char *arg)
570 {
571 	oce_rq_bdesc_t *rqbd;
572 	struct oce_rq  *rq;
573 
574 	/* During destroy, arg will be NULL */
575 	if (arg == NULL) {
576 		return;
577 	}
578 
579 	/* retrieve the pointers from arg */
580 	rqbd = (oce_rq_bdesc_t *)(void *)arg;
581 	rq = rqbd->rq;
582 	rqbd->mp = desballoc((uchar_t *)rqbd->rqb->base,
583 	    rqbd->rqb->size, 0, &rqbd->fr_rtn);
584 
585 	if (rqbd->mp) {
586 		rqbd->mp->b_rptr =
587 		    (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
588 	}
589 
590 	oce_rqb_free(rq, rqbd);
591 	(void) atomic_dec_32(&rq->pending);
592 } /* rx_pool_free */
593 
594 /*
595  * function to stop the RX
596  *
597  * rq - pointer to RQ structure
598  *
599  * return none
600  */
601 void
oce_clean_rq(struct oce_rq * rq)602 oce_clean_rq(struct oce_rq *rq)
603 {
604 	uint16_t num_cqe = 0;
605 	struct oce_cq  *cq;
606 	struct oce_dev *dev;
607 	struct oce_nic_rx_cqe *cqe;
608 	int32_t ti = 0;
609 
610 	dev = rq->parent;
611 	cq = rq->cq;
612 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
613 	/* dequeue till you reach an invalid cqe */
614 	for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {
615 
616 		while (RQ_CQE_VALID(cqe)) {
617 			DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
618 			oce_rx_drop_pkt(rq, cqe);
619 			atomic_add_32(&rq->buf_avail,
620 			    -(cqe->u0.s.num_fragments & 0x7));
621 			oce_arm_cq(dev, cq->cq_id, 1, B_TRUE);
622 			RQ_CQE_INVALIDATE(cqe);
623 			RING_GET(cq->ring, 1);
624 			cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
625 			    struct oce_nic_rx_cqe);
626 			num_cqe++;
627 		}
628 		OCE_MSDELAY(1);
629 	}
630 } /* oce_clean_rq */
631 
632 /*
633  * function to start  the RX
634  *
635  * rq - pointer to RQ structure
636  *
637  * return number of rqe's charges.
638  */
639 int
oce_start_rq(struct oce_rq * rq)640 oce_start_rq(struct oce_rq *rq)
641 {
642 	int ret = 0;
643 	int to_charge = 0;
644 	struct oce_dev *dev = rq->parent;
645 	to_charge = rq->cfg.q_len - rq->buf_avail;
646 	to_charge = min(to_charge, rq->rqb_free);
647 	atomic_add_32(&rq->rqb_free, -to_charge);
648 	(void) oce_rq_charge(rq, to_charge, B_FALSE);
649 	/* ok to do it here since Rx has not even started */
650 	oce_rq_post_buffer(rq, to_charge);
651 	oce_arm_cq(dev, rq->cq->cq_id, 0, B_TRUE);
652 	return (ret);
653 } /* oce_start_rq */
654 
655 /* Checks for pending rx buffers with Stack */
656 int
oce_rx_pending(struct oce_dev * dev,struct oce_rq * rq,int32_t timeout)657 oce_rx_pending(struct oce_dev *dev, struct oce_rq *rq, int32_t timeout)
658 {
659 	int ti;
660 	_NOTE(ARGUNUSED(dev));
661 
662 	for (ti = 0; ti < timeout; ti++) {
663 		if (rq->pending > 0) {
664 			OCE_MSDELAY(10);
665 			continue;
666 		} else {
667 			rq->pending = 0;
668 			break;
669 		}
670 	}
671 	return (rq->pending);
672 }
673