xref: /freebsd/sys/dev/oce/oce_queue.c (revision 71625ec9ad2a9bc8c09784fbd23b759830e0ee5f)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (C) 2013 Emulex
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  *    this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * 3. Neither the name of the Emulex Corporation nor the names of its
18  *    contributors may be used to endorse or promote products derived from
19  *    this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Contact Information:
34  * freebsd-drivers@emulex.com
35  *
36  * Emulex
37  * 3333 Susan Street
38  * Costa Mesa, CA 92626
39  */
40 
41 
42 #include "oce_if.h"
43 
44 /*****************************************************
45  * local queue functions
46  *****************************************************/
47 
48 static struct oce_wq *oce_wq_init(POCE_SOFTC sc,
49 				  uint32_t q_len, uint32_t wq_type);
50 static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
51 static void oce_wq_free(struct oce_wq *wq);
52 static void oce_wq_del(struct oce_wq *wq);
53 static struct oce_rq *oce_rq_init(POCE_SOFTC sc,
54 				  uint32_t q_len,
55 				  uint32_t frag_size,
56 				  uint32_t mtu, uint32_t rss);
57 static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
58 static void oce_rq_free(struct oce_rq *rq);
59 static void oce_rq_del(struct oce_rq *rq);
60 static struct oce_eq *oce_eq_create(POCE_SOFTC sc,
61 				    uint32_t q_len,
62 				    uint32_t item_size,
63 				    uint32_t eq_delay,
64 				    uint32_t vector);
65 static void oce_eq_del(struct oce_eq *eq);
66 static struct oce_mq *oce_mq_create(POCE_SOFTC sc,
67 				    struct oce_eq *eq, uint32_t q_len);
68 static void oce_mq_free(struct oce_mq *mq);
69 static int oce_destroy_q(POCE_SOFTC sc, struct oce_mbx
70 			 *mbx, size_t req_size, enum qtype qtype, int version);
71 struct oce_cq *oce_cq_create(POCE_SOFTC sc,
72 			     struct oce_eq *eq,
73 			     uint32_t q_len,
74 			     uint32_t item_size,
75 			     uint32_t sol_event,
76 			     uint32_t is_eventable,
77 			     uint32_t nodelay, uint32_t ncoalesce);
78 static void oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq);
79 
80 /**
81  * @brief	Create and initialize all the queues on the board
82  * @param sc	software handle to the device
83  * @returns 0	if successful, or error
84  **/
85 int
oce_queue_init_all(POCE_SOFTC sc)86 oce_queue_init_all(POCE_SOFTC sc)
87 {
88 	int rc = 0, i, vector;
89 	struct oce_wq *wq;
90 	struct oce_rq *rq;
91 	struct oce_aic_obj *aic;
92 
93 	/* alloc TX/RX queues */
94 	for_all_wq_queues(sc, wq, i) {
95 		sc->wq[i] = oce_wq_init(sc, sc->tx_ring_size,
96 					 NIC_WQ_TYPE_STANDARD);
97 		if (!sc->wq[i])
98 			goto error;
99 
100 	}
101 
102 	for_all_rq_queues(sc, rq, i) {
103 		sc->rq[i] = oce_rq_init(sc, sc->rx_ring_size, sc->rq_frag_size,
104 					OCE_MAX_JUMBO_FRAME_SIZE,
105 					(i == 0) ? 0 : is_rss_enabled(sc));
106 		if (!sc->rq[i])
107 			goto error;
108 	}
109 
110 	/* Create network interface on card */
111 	if (oce_create_nw_interface(sc))
112 		goto error;
113 
114 	/* create all of the event queues */
115 	for (vector = 0; vector < sc->intr_count; vector++) {
116 		/* setup aic defaults for each event queue */
117 		aic = &sc->aic_obj[vector];
118 		aic->max_eqd = OCE_MAX_EQD;
119 		aic->min_eqd = OCE_MIN_EQD;
120 		aic->et_eqd = OCE_MIN_EQD;
121 		aic->enable = TRUE;
122 
123 		sc->eq[vector] = oce_eq_create(sc, sc->enable_hwlro ? EQ_LEN_2048 : EQ_LEN_1024,
124 						EQE_SIZE_4,0, vector);
125 
126 		if (!sc->eq[vector])
127 			goto error;
128 	}
129 
130 	/* create Tx, Rx and mcc queues */
131 	for_all_wq_queues(sc, wq, i) {
132 		rc = oce_wq_create(wq, sc->eq[i]);
133 		if (rc)
134 			goto error;
135 		wq->queue_index = i;
136 		TASK_INIT(&wq->txtask, 1, oce_tx_task, wq);
137 	}
138 
139 	for_all_rq_queues(sc, rq, i) {
140 		rc = oce_rq_create(rq, sc->if_id,
141 					sc->eq[(i == 0) ? 0:(i-1)]);
142 		if (rc)
143 			goto error;
144 		rq->queue_index = i;
145 	}
146 
147 	sc->mq = oce_mq_create(sc, sc->eq[0], 64);
148 	if (!sc->mq)
149 		goto error;
150 
151 	return rc;
152 
153 error:
154 	oce_queue_release_all(sc);
155 	return 1;
156 }
157 
158 /**
159  * @brief Releases all mailbox queues created
160  * @param sc		software handle to the device
161  */
162 void
oce_queue_release_all(POCE_SOFTC sc)163 oce_queue_release_all(POCE_SOFTC sc)
164 {
165 	int i = 0;
166 	struct oce_wq *wq;
167 	struct oce_rq *rq;
168 	struct oce_eq *eq;
169 
170 	/* before deleting lro queues, we have to disable hwlro	*/
171 	if(sc->enable_hwlro)
172 		oce_mbox_nic_set_iface_lro_config(sc, 0);
173 
174 	for_all_rq_queues(sc, rq, i) {
175 		if (rq) {
176 			oce_rq_del(sc->rq[i]);
177 			oce_rq_free(sc->rq[i]);
178 		}
179 	}
180 
181 	for_all_wq_queues(sc, wq, i) {
182 		if (wq) {
183 			oce_wq_del(sc->wq[i]);
184 			oce_wq_free(sc->wq[i]);
185 		}
186 	}
187 
188 	if (sc->mq)
189 		oce_mq_free(sc->mq);
190 
191 	for_all_evnt_queues(sc, eq, i) {
192 		if (eq)
193 			oce_eq_del(sc->eq[i]);
194 	}
195 }
196 
197 /**
198  * @brief 		Function to create a WQ for NIC Tx
199  * @param sc 		software handle to the device
200  * @param qlen		number of entries in the queue
201  * @param wq_type	work queue type
202  * @returns		the pointer to the WQ created or NULL on failure
203  */
204 static struct
oce_wq_init(POCE_SOFTC sc,uint32_t q_len,uint32_t wq_type)205 oce_wq *oce_wq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t wq_type)
206 {
207 	struct oce_wq *wq;
208 	int rc = 0, i;
209 
210 	/* q_len must be min 256 and max 2k */
211 	if (q_len < 256 || q_len > 2048) {
212 		device_printf(sc->dev,
213 			  "Invalid q length. Must be "
214 			  "[256, 2000]: 0x%x\n", q_len);
215 		return NULL;
216 	}
217 
218 	/* allocate wq */
219 	wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
220 	if (!wq)
221 		return NULL;
222 
223 	/* Set the wq config */
224 	wq->cfg.q_len = q_len;
225 	wq->cfg.wq_type = (uint8_t) wq_type;
226 	wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
227 	wq->cfg.nbufs = 2 * wq->cfg.q_len;
228 	wq->cfg.nhdl = 2 * wq->cfg.q_len;
229 
230 	wq->parent = (void *)sc;
231 
232 	rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
233 				1, 0,
234 				BUS_SPACE_MAXADDR,
235 				BUS_SPACE_MAXADDR,
236 				NULL, NULL,
237 				OCE_MAX_TX_SIZE,
238 				OCE_MAX_TX_ELEMENTS,
239 				PAGE_SIZE, 0, NULL, NULL, &wq->tag);
240 
241 	if (rc)
242 		goto free_wq;
243 
244 	for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
245 		rc = bus_dmamap_create(wq->tag, 0, &wq->pckts[i].map);
246 		if (rc)
247 			goto free_wq;
248 	}
249 
250 	wq->ring = oce_create_ring_buffer(sc, q_len, NIC_WQE_SIZE);
251 	if (!wq->ring)
252 		goto free_wq;
253 
254 	LOCK_CREATE(&wq->tx_lock, "TX_lock");
255 	LOCK_CREATE(&wq->tx_compl_lock, "WQ_HANDLER_LOCK");
256 
257 	/* Allocate buf ring for multiqueue*/
258 	wq->br = buf_ring_alloc(4096, M_DEVBUF,
259 			M_WAITOK, &wq->tx_lock.mutex);
260 	if (!wq->br)
261 		goto free_wq;
262 	return wq;
263 
264 free_wq:
265 	device_printf(sc->dev, "Create WQ failed\n");
266 	oce_wq_free(wq);
267 	return NULL;
268 }
269 
270 /**
271  * @brief 		Frees the work queue
272  * @param wq		pointer to work queue to free
273  */
274 static void
oce_wq_free(struct oce_wq * wq)275 oce_wq_free(struct oce_wq *wq)
276 {
277 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
278 	int i;
279 
280 	taskqueue_drain(taskqueue_swi, &wq->txtask);
281 
282 	if (wq->ring != NULL) {
283 		oce_destroy_ring_buffer(sc, wq->ring);
284 		wq->ring = NULL;
285 	}
286 
287 	for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
288 		if (wq->pckts[i].map != NULL) {
289 			bus_dmamap_unload(wq->tag, wq->pckts[i].map);
290 			bus_dmamap_destroy(wq->tag, wq->pckts[i].map);
291 			wq->pckts[i].map = NULL;
292 		}
293 	}
294 
295 	if (wq->tag != NULL)
296 		bus_dma_tag_destroy(wq->tag);
297 	if (wq->br != NULL)
298 		buf_ring_free(wq->br, M_DEVBUF);
299 
300 	LOCK_DESTROY(&wq->tx_lock);
301 	LOCK_DESTROY(&wq->tx_compl_lock);
302 	free(wq, M_DEVBUF);
303 }
304 
305 /**
306  * @brief 		Create a work queue
307  * @param wq		pointer to work queue
308  * @param eq		pointer to associated event queue
309  */
310 static int
oce_wq_create(struct oce_wq * wq,struct oce_eq * eq)311 oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
312 {
313 	POCE_SOFTC sc = wq->parent;
314 	struct oce_cq *cq;
315 	int rc = 0;
316 
317 	/* create the CQ */
318 	cq = oce_cq_create(sc,
319 			   eq,
320 			   CQ_LEN_1024,
321 			   sizeof(struct oce_nic_tx_cqe), 0, 1, 0, 3);
322 	if (!cq)
323 		return ENXIO;
324 
325 	wq->cq = cq;
326 
327 	rc = oce_mbox_create_wq(wq);
328 	if (rc)
329 		goto error;
330 
331 	wq->qstate = QCREATED;
332 	wq->wq_free = wq->cfg.q_len;
333 	wq->ring->cidx = 0;
334 	wq->ring->pidx = 0;
335 
336 	eq->cq[eq->cq_valid] = cq;
337 	eq->cq_valid++;
338 	cq->cb_arg = wq;
339 	cq->cq_handler = oce_wq_handler;
340 
341 	return 0;
342 
343 error:
344 	device_printf(sc->dev, "WQ create failed\n");
345 	oce_wq_del(wq);
346 	return rc;
347 }
348 
349 /**
350  * @brief 		Delete a work queue
351  * @param wq		pointer to work queue
352  */
353 static void
oce_wq_del(struct oce_wq * wq)354 oce_wq_del(struct oce_wq *wq)
355 {
356 	struct oce_mbx mbx;
357 	struct mbx_delete_nic_wq *fwcmd;
358 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
359 
360 	if (wq->qstate == QCREATED) {
361 		bzero(&mbx, sizeof(struct oce_mbx));
362 		/* now fill the command */
363 		fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
364 		fwcmd->params.req.wq_id = wq->wq_id;
365 		(void)oce_destroy_q(sc, &mbx,
366 				sizeof(struct mbx_delete_nic_wq), QTYPE_WQ, 0);
367 		wq->qstate = QDELETED;
368 	}
369 
370 	if (wq->cq != NULL) {
371 		oce_cq_del(sc, wq->cq);
372 		wq->cq = NULL;
373 	}
374 }
375 
376 /**
377  * @brief 		function to allocate receive queue resources
378  * @param sc		software handle to the device
379  * @param q_len		length of receive queue
380  * @param frag_size	size of an receive queue fragment
381  * @param mtu		maximum transmission unit
382  * @param rss		is-rss-queue flag
383  * @returns		the pointer to the RQ created or NULL on failure
384  */
385 static struct
oce_rq_init(POCE_SOFTC sc,uint32_t q_len,uint32_t frag_size,uint32_t mtu,uint32_t rss)386 oce_rq *oce_rq_init(POCE_SOFTC sc,
387 				  uint32_t q_len,
388 				  uint32_t frag_size,
389 				  uint32_t mtu, uint32_t rss)
390 {
391 	struct oce_rq *rq;
392 	int rc = 0, i;
393 
394 	if (OCE_LOG2(frag_size) <= 0)
395 		return NULL;
396 
397 	if ((q_len == 0) || (q_len > 1024))
398 		return NULL;
399 
400 	/* allocate the rq */
401 	rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
402 	if (!rq)
403 		return NULL;
404 
405 	rq->cfg.q_len = q_len;
406 	rq->cfg.frag_size = frag_size;
407 	rq->cfg.mtu = mtu;
408 	rq->cfg.eqd = 0;
409 	rq->lro_pkts_queued = 0;
410 	rq->cfg.is_rss_queue = rss;
411         rq->pending = 0;
412 
413 	rq->parent = (void *)sc;
414 
415 	rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
416 			1, 0,
417 			BUS_SPACE_MAXADDR,
418 			BUS_SPACE_MAXADDR,
419 			NULL, NULL,
420 			oce_rq_buf_size,
421 			1, oce_rq_buf_size, 0, NULL, NULL, &rq->tag);
422 	if (rc)
423 		goto free_rq;
424 
425 	for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
426 		rc = bus_dmamap_create(rq->tag, 0, &rq->pckts[i].map);
427 		if (rc)
428 			goto free_rq;
429 	}
430 
431 	/* create the ring buffer */
432 	rq->ring = oce_create_ring_buffer(sc, q_len,
433 				 sizeof(struct oce_nic_rqe));
434 	if (!rq->ring)
435 		goto free_rq;
436 
437 	LOCK_CREATE(&rq->rx_lock, "RX_lock");
438 
439 	return rq;
440 
441 free_rq:
442 	device_printf(sc->dev, "Create RQ failed\n");
443 	oce_rq_free(rq);
444 	return NULL;
445 }
446 
447 /**
448  * @brief 		Free a receive queue
449  * @param rq		pointer to receive queue
450  */
451 static void
oce_rq_free(struct oce_rq * rq)452 oce_rq_free(struct oce_rq *rq)
453 {
454 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
455 	int i = 0 ;
456 
457 	if (rq->ring != NULL) {
458 		oce_destroy_ring_buffer(sc, rq->ring);
459 		rq->ring = NULL;
460 	}
461 	for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
462 		if (rq->pckts[i].map != NULL) {
463 			bus_dmamap_unload(rq->tag, rq->pckts[i].map);
464 			bus_dmamap_destroy(rq->tag, rq->pckts[i].map);
465 			rq->pckts[i].map = NULL;
466 		}
467 		if (rq->pckts[i].mbuf) {
468 			m_free(rq->pckts[i].mbuf);
469 			rq->pckts[i].mbuf = NULL;
470 		}
471 	}
472 
473 	if (rq->tag != NULL)
474 		bus_dma_tag_destroy(rq->tag);
475 
476 	LOCK_DESTROY(&rq->rx_lock);
477 	free(rq, M_DEVBUF);
478 }
479 
480 /**
481  * @brief 		Create a receive queue
482  * @param rq 		receive queue
483  * @param if_id		interface identifier index`
484  * @param eq		pointer to event queue
485  */
486 static int
oce_rq_create(struct oce_rq * rq,uint32_t if_id,struct oce_eq * eq)487 oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
488 {
489 	POCE_SOFTC sc = rq->parent;
490 	struct oce_cq *cq;
491 
492 	cq = oce_cq_create(sc, eq,
493 		       	sc->enable_hwlro ? CQ_LEN_2048 : CQ_LEN_1024,
494 			sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3);
495 
496 	if (!cq)
497 		return ENXIO;
498 
499 	rq->cq = cq;
500 	rq->cfg.if_id = if_id;
501 
502 	/* Dont create RQ here. Create in if_activate */
503 	rq->qstate     = 0;
504 	rq->ring->cidx = 0;
505 	rq->ring->pidx = 0;
506 	eq->cq[eq->cq_valid] = cq;
507 	eq->cq_valid++;
508 	cq->cb_arg = rq;
509 	cq->cq_handler = oce_rq_handler;
510 
511 	return 0;
512 
513 }
514 
515 /**
516  * @brief 		Delete a receive queue
517  * @param rq		receive queue
518  */
519 static void
oce_rq_del(struct oce_rq * rq)520 oce_rq_del(struct oce_rq *rq)
521 {
522 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
523 	struct oce_mbx mbx;
524 	struct mbx_delete_nic_rq *fwcmd;
525 	struct mbx_delete_nic_rq_v1 *fwcmd1;
526 
527 	if (rq->qstate == QCREATED) {
528 		bzero(&mbx, sizeof(mbx));
529 		if(!rq->islro) {
530 			fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
531 			fwcmd->params.req.rq_id = rq->rq_id;
532 			(void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq), QTYPE_RQ, 0);
533 		}else {
534 			fwcmd1 = (struct mbx_delete_nic_rq_v1 *)&mbx.payload;
535 			fwcmd1->params.req.rq_id = rq->rq_id;
536 			fwcmd1->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO);
537 			(void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq_v1), QTYPE_RQ, 1);
538 		}
539 		rq->qstate = QDELETED;
540 	}
541 
542 	if (rq->cq != NULL) {
543 		oce_cq_del(sc, rq->cq);
544 		rq->cq = NULL;
545 	}
546 }
547 
548 /**
549  * @brief		function to create an event queue
550  * @param sc		software handle to the device
551  * @param q_len		length of event queue
552  * @param item_size	size of an event queue item
553  * @param eq_delay	event queue delay
554  * @retval eq      	success, pointer to event queue
555  * @retval NULL		failure
556  */
557 static struct
oce_eq_create(POCE_SOFTC sc,uint32_t q_len,uint32_t item_size,uint32_t eq_delay,uint32_t vector)558 oce_eq *oce_eq_create(POCE_SOFTC sc, uint32_t q_len,
559 				    uint32_t item_size,
560 				    uint32_t eq_delay,
561 				    uint32_t vector)
562 {
563 	struct oce_eq *eq;
564 	int rc = 0;
565 
566 	/* allocate an eq */
567 	eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
568 	if (eq == NULL)
569 		return NULL;
570 
571 	eq->parent = (void *)sc;
572 	eq->eq_id = 0xffff;
573 	eq->ring = oce_create_ring_buffer(sc, q_len, item_size);
574 	if (!eq->ring)
575 		goto free_eq;
576 
577 	eq->eq_cfg.q_len = q_len;
578 	eq->eq_cfg.item_size = item_size;
579 	eq->eq_cfg.cur_eqd = (uint8_t) eq_delay;
580 
581 	rc = oce_mbox_create_eq(eq);
582 	if (rc)
583 		goto free_eq;
584 
585 	sc->intrs[sc->neqs++].eq = eq;
586 
587 	return eq;
588 
589 free_eq:
590 	oce_eq_del(eq);
591 	return NULL;
592 }
593 
594 /**
595  * @brief 		Function to delete an event queue
596  * @param eq		pointer to an event queue
597  */
598 static void
oce_eq_del(struct oce_eq * eq)599 oce_eq_del(struct oce_eq *eq)
600 {
601 	struct oce_mbx mbx;
602 	struct mbx_destroy_common_eq *fwcmd;
603 	POCE_SOFTC sc = (POCE_SOFTC) eq->parent;
604 
605 	if (eq->eq_id != 0xffff) {
606 		bzero(&mbx, sizeof(mbx));
607 		fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
608 		fwcmd->params.req.id = eq->eq_id;
609 		(void)oce_destroy_q(sc, &mbx,
610 			sizeof(struct mbx_destroy_common_eq), QTYPE_EQ, 0);
611 	}
612 
613 	if (eq->ring != NULL) {
614 		oce_destroy_ring_buffer(sc, eq->ring);
615 		eq->ring = NULL;
616 	}
617 
618 	free(eq, M_DEVBUF);
619 
620 }
621 
622 /**
623  * @brief		Function to create an MQ
624  * @param sc		software handle to the device
625  * @param eq		the EQ to associate with the MQ for event notification
626  * @param q_len		the number of entries to create in the MQ
627  * @returns		pointer to the created MQ, failure otherwise
628  */
629 static struct oce_mq *
oce_mq_create(POCE_SOFTC sc,struct oce_eq * eq,uint32_t q_len)630 oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len)
631 {
632 	struct oce_mbx mbx;
633 	struct mbx_create_common_mq_ex *fwcmd = NULL;
634 	struct oce_mq *mq = NULL;
635 	int rc = 0;
636 	struct oce_cq *cq;
637 	oce_mq_ext_ctx_t *ctx;
638 	uint32_t num_pages;
639 	int version;
640 
641 	cq = oce_cq_create(sc, eq, CQ_LEN_256,
642 			sizeof(struct oce_mq_cqe), 1, 1, 0, 0);
643 	if (!cq)
644 		return NULL;
645 
646 	/* allocate the mq */
647 	mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
648 	if (!mq) {
649 		oce_cq_del(sc, cq);
650 		goto error;
651 	}
652 
653 	mq->parent = sc;
654 
655 	mq->ring = oce_create_ring_buffer(sc, q_len, sizeof(struct oce_mbx));
656 	if (!mq->ring)
657 		goto error;
658 
659 	bzero(&mbx, sizeof(struct oce_mbx));
660 
661 	IS_XE201(sc) ? (version = OCE_MBX_VER_V1) : (version = OCE_MBX_VER_V0);
662 	fwcmd = (struct mbx_create_common_mq_ex *)&mbx.payload;
663 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
664 				MBX_SUBSYSTEM_COMMON,
665 				OPCODE_COMMON_CREATE_MQ_EXT,
666 				MBX_TIMEOUT_SEC,
667 				sizeof(struct mbx_create_common_mq_ex),
668 				version);
669 
670 	num_pages = oce_page_list(mq->ring, &fwcmd->params.req.pages[0]);
671 
672 	ctx = &fwcmd->params.req.context;
673 
674 	if (IS_XE201(sc)) {
675 		ctx->v1.num_pages = num_pages;
676 		ctx->v1.ring_size = OCE_LOG2(q_len) + 1;
677 		ctx->v1.cq_id = cq->cq_id;
678 		ctx->v1.valid = 1;
679 		ctx->v1.async_cq_id = cq->cq_id;
680 		ctx->v1.async_cq_valid = 1;
681 		/* Subscribe to Link State and Group 5 Events(bits 1 & 5 set) */
682 		ctx->v1.async_evt_bitmap |= LE_32(0x00000022);
683 		ctx->v1.async_evt_bitmap |= LE_32(1 << ASYNC_EVENT_CODE_DEBUG);
684 		ctx->v1.async_evt_bitmap |=
685 					LE_32(1 << ASYNC_EVENT_CODE_SLIPORT);
686 	}
687 	else {
688 		ctx->v0.num_pages = num_pages;
689 		ctx->v0.cq_id = cq->cq_id;
690 		ctx->v0.ring_size = OCE_LOG2(q_len) + 1;
691 		ctx->v0.valid = 1;
692 		/* Subscribe to Link State and Group5 Events(bits 1 & 5 set) */
693 		ctx->v0.async_evt_bitmap = 0xffffffff;
694 	}
695 
696 	mbx.u0.s.embedded = 1;
697 	mbx.payload_length = sizeof(struct mbx_create_common_mq_ex);
698 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
699 
700 	rc = oce_mbox_post(sc, &mbx, NULL);
701 	if (!rc)
702                 rc = fwcmd->hdr.u0.rsp.status;
703 	if (rc) {
704 		device_printf(sc->dev,"%s failed - cmd status: %d\n",
705 			      __FUNCTION__, rc);
706 		goto error;
707 	}
708 	mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
709 	mq->cq = cq;
710 	eq->cq[eq->cq_valid] = cq;
711 	eq->cq_valid++;
712 	mq->cq->eq = eq;
713 	mq->cfg.q_len = (uint8_t) q_len;
714 	mq->cfg.eqd = 0;
715 	mq->qstate = QCREATED;
716 
717 	mq->cq->cb_arg = mq;
718 	mq->cq->cq_handler = oce_mq_handler;
719 
720 	return mq;
721 
722 error:
723 	device_printf(sc->dev, "MQ create failed\n");
724 	oce_mq_free(mq);
725 	mq = NULL;
726 	return mq;
727 }
728 
729 /**
730  * @brief		Function to free a mailbox queue
731  * @param mq		pointer to a mailbox queue
732  */
733 static void
oce_mq_free(struct oce_mq * mq)734 oce_mq_free(struct oce_mq *mq)
735 {
736 	POCE_SOFTC sc = (POCE_SOFTC) mq->parent;
737 	struct oce_mbx mbx;
738 	struct mbx_destroy_common_mq *fwcmd;
739 
740 	if (!mq)
741 		return;
742 
743 	if (mq->ring != NULL) {
744 		oce_destroy_ring_buffer(sc, mq->ring);
745 		mq->ring = NULL;
746 		if (mq->qstate == QCREATED) {
747 			bzero(&mbx, sizeof (struct oce_mbx));
748 			fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
749 			fwcmd->params.req.id = mq->mq_id;
750 			(void) oce_destroy_q(sc, &mbx,
751 				sizeof (struct mbx_destroy_common_mq),
752 				QTYPE_MQ, 0);
753 		}
754 		mq->qstate = QDELETED;
755 	}
756 
757 	if (mq->cq != NULL) {
758 		oce_cq_del(sc, mq->cq);
759 		mq->cq = NULL;
760 	}
761 
762 	free(mq, M_DEVBUF);
763 	mq = NULL;
764 }
765 
766 /**
767  * @brief		Function to delete a EQ, CQ, MQ, WQ or RQ
768  * @param sc		sofware handle to the device
769  * @param mbx		mailbox command to send to the fw to delete the queue
770  *			(mbx contains the queue information to delete)
771  * @param req_size	the size of the mbx payload dependent on the qtype
772  * @param qtype		the type of queue i.e. EQ, CQ, MQ, WQ or RQ
773  * @returns 		0 on success, failure otherwise
774  */
775 static int
oce_destroy_q(POCE_SOFTC sc,struct oce_mbx * mbx,size_t req_size,enum qtype qtype,int version)776 oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size,
777 		enum qtype qtype, int version)
778 {
779 	struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
780 	int opcode;
781 	int subsys;
782 	int rc = 0;
783 
784 	switch (qtype) {
785 	case QTYPE_EQ:
786 		opcode = OPCODE_COMMON_DESTROY_EQ;
787 		subsys = MBX_SUBSYSTEM_COMMON;
788 		break;
789 	case QTYPE_CQ:
790 		opcode = OPCODE_COMMON_DESTROY_CQ;
791 		subsys = MBX_SUBSYSTEM_COMMON;
792 		break;
793 	case QTYPE_MQ:
794 		opcode = OPCODE_COMMON_DESTROY_MQ;
795 		subsys = MBX_SUBSYSTEM_COMMON;
796 		break;
797 	case QTYPE_WQ:
798 		opcode = NIC_DELETE_WQ;
799 		subsys = MBX_SUBSYSTEM_NIC;
800 		break;
801 	case QTYPE_RQ:
802 		opcode = NIC_DELETE_RQ;
803 		subsys = MBX_SUBSYSTEM_NIC;
804 		break;
805 	default:
806 		return EINVAL;
807 	}
808 
809 	mbx_common_req_hdr_init(hdr, 0, 0, subsys,
810 				opcode, MBX_TIMEOUT_SEC, req_size,
811 				version);
812 
813 	mbx->u0.s.embedded = 1;
814 	mbx->payload_length = (uint32_t) req_size;
815 	DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
816 
817 	rc = oce_mbox_post(sc, mbx, NULL);
818 	if (!rc)
819                 rc = hdr->u0.rsp.status;
820 	if (rc)
821 		device_printf(sc->dev,"%s failed - cmd status: %d\n",
822 			      __FUNCTION__, rc);
823 	return rc;
824 }
825 
826 /**
827  * @brief		Function to create a completion queue
828  * @param sc		software handle to the device
829  * @param eq		optional eq to be associated with to the cq
830  * @param q_len		length of completion queue
831  * @param item_size	size of completion queue items
832  * @param sol_event	command context event
833  * @param is_eventable	event table
834  * @param nodelay	no delay flag
835  * @param ncoalesce	no coalescence flag
836  * @returns 		pointer to the cq created, NULL on failure
837  */
838 struct oce_cq *
oce_cq_create(POCE_SOFTC sc,struct oce_eq * eq,uint32_t q_len,uint32_t item_size,uint32_t sol_event,uint32_t is_eventable,uint32_t nodelay,uint32_t ncoalesce)839 oce_cq_create(POCE_SOFTC sc, struct oce_eq *eq,
840 			     uint32_t q_len,
841 			     uint32_t item_size,
842 			     uint32_t sol_event,
843 			     uint32_t is_eventable,
844 			     uint32_t nodelay, uint32_t ncoalesce)
845 {
846 	struct oce_cq *cq = NULL;
847 	int rc = 0;
848 
849 	cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
850 	if (!cq)
851 		return NULL;
852 
853 	cq->ring = oce_create_ring_buffer(sc, q_len, item_size);
854 	if (!cq->ring)
855 		goto error;
856 
857 	cq->parent = sc;
858 	cq->eq = eq;
859 	cq->cq_cfg.q_len = q_len;
860 	cq->cq_cfg.item_size = item_size;
861 	cq->cq_cfg.nodelay = (uint8_t) nodelay;
862 
863 	rc = oce_mbox_cq_create(cq, ncoalesce, is_eventable);
864 	if (rc)
865 		goto error;
866 
867 	sc->cq[sc->ncqs++] = cq;
868 
869 	return cq;
870 
871 error:
872 	device_printf(sc->dev, "CQ create failed\n");
873 	oce_cq_del(sc, cq);
874 	return NULL;
875 }
876 
877 /**
878  * @brief		Deletes the completion queue
879  * @param sc		software handle to the device
880  * @param cq		pointer to a completion queue
881  */
882 static void
oce_cq_del(POCE_SOFTC sc,struct oce_cq * cq)883 oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq)
884 {
885 	struct oce_mbx mbx;
886 	struct mbx_destroy_common_cq *fwcmd;
887 
888 	if (cq->ring != NULL) {
889 		bzero(&mbx, sizeof(struct oce_mbx));
890 		/* now fill the command */
891 		fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
892 		fwcmd->params.req.id = cq->cq_id;
893 		(void)oce_destroy_q(sc, &mbx,
894 			sizeof(struct mbx_destroy_common_cq), QTYPE_CQ, 0);
895 		/*NOW destroy the ring */
896 		oce_destroy_ring_buffer(sc, cq->ring);
897 		cq->ring = NULL;
898 	}
899 
900 	free(cq, M_DEVBUF);
901 	cq = NULL;
902 }
903 
904 /**
905  * @brief		Start a receive queue
906  * @param rq		pointer to a receive queue
907  */
908 int
oce_start_rq(struct oce_rq * rq)909 oce_start_rq(struct oce_rq *rq)
910 {
911 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
912 	int rc;
913 
914 	if(sc->enable_hwlro)
915 		rc = oce_alloc_rx_bufs(rq, 960);
916 	else
917 		rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len - 1);
918 
919 	if (rc == 0)
920 		oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE);
921 
922 	return rc;
923 }
924 
925 /**
926  * @brief		Start a work queue
927  * @param wq		pointer to a work queue
928  */
929 int
oce_start_wq(struct oce_wq * wq)930 oce_start_wq(struct oce_wq *wq)
931 {
932 	oce_arm_cq(wq->parent, wq->cq->cq_id, 0, TRUE);
933 	return 0;
934 }
935 
936 /**
937  * @brief		Start a mailbox queue
938  * @param mq		pointer to a mailbox queue
939  */
940 int
oce_start_mq(struct oce_mq * mq)941 oce_start_mq(struct oce_mq *mq)
942 {
943 	oce_arm_cq(mq->parent, mq->cq->cq_id, 0, TRUE);
944 	return 0;
945 }
946 
947 /**
948  * @brief		Function to arm an EQ so that it can generate events
949  * @param sc		software handle to the device
950  * @param qid		id of the EQ returned by the fw at the time of creation
951  * @param npopped	number of EQEs to arm
952  * @param rearm		rearm bit enable/disable
953  * @param clearint	bit to clear the interrupt condition because of which
954  *			EQEs are generated
955  */
956 void
oce_arm_eq(POCE_SOFTC sc,int16_t qid,int npopped,uint32_t rearm,uint32_t clearint)957 oce_arm_eq(POCE_SOFTC sc,
958 	   int16_t qid, int npopped, uint32_t rearm, uint32_t clearint)
959 {
960 	eq_db_t eq_db = { 0 };
961 
962 	eq_db.bits.rearm = rearm;
963 	eq_db.bits.event = 1;
964 	eq_db.bits.num_popped = npopped;
965 	eq_db.bits.clrint = clearint;
966 	eq_db.bits.qid = qid;
967 	OCE_WRITE_REG32(sc, db, PD_EQ_DB, eq_db.dw0);
968 
969 }
970 
971 /**
972  * @brief		Function to arm a CQ with CQEs
973  * @param sc		software handle to the device
974  * @param qid		id of the CQ returned by the fw at the time of creation
975  * @param npopped	number of CQEs to arm
976  * @param rearm		rearm bit enable/disable
977  */
oce_arm_cq(POCE_SOFTC sc,int16_t qid,int npopped,uint32_t rearm)978 void oce_arm_cq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm)
979 {
980 	cq_db_t cq_db = { 0 };
981 
982 	cq_db.bits.rearm = rearm;
983 	cq_db.bits.num_popped = npopped;
984 	cq_db.bits.event = 0;
985 	cq_db.bits.qid = qid;
986 	OCE_WRITE_REG32(sc, db, PD_CQ_DB, cq_db.dw0);
987 
988 }
989 
990 /*
991  * @brief		function to cleanup the eqs used during stop
992  * @param eq		pointer to event queue structure
993  * @returns		the number of EQs processed
994  */
995 void
oce_drain_eq(struct oce_eq * eq)996 oce_drain_eq(struct oce_eq *eq)
997 {
998 
999 	struct oce_eqe *eqe;
1000 	uint16_t num_eqe = 0;
1001 	POCE_SOFTC sc = eq->parent;
1002 
1003 	do {
1004 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1005 		if (eqe->evnt == 0)
1006 			break;
1007 		eqe->evnt = 0;
1008 		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
1009 					BUS_DMASYNC_POSTWRITE);
1010 		num_eqe++;
1011 		RING_GET(eq->ring, 1);
1012 
1013 	} while (TRUE);
1014 
1015 	oce_arm_eq(sc, eq->eq_id, num_eqe, FALSE, TRUE);
1016 
1017 }
1018 
1019 void
oce_drain_wq_cq(struct oce_wq * wq)1020 oce_drain_wq_cq(struct oce_wq *wq)
1021 {
1022         POCE_SOFTC sc = wq->parent;
1023         struct oce_cq *cq = wq->cq;
1024         struct oce_nic_tx_cqe *cqe;
1025         int num_cqes = 0;
1026 
1027 	bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1028 				 BUS_DMASYNC_POSTWRITE);
1029 
1030 	do {
1031 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1032 		if (cqe->u0.dw[3] == 0)
1033 			break;
1034 		cqe->u0.dw[3] = 0;
1035 		bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1036 				 BUS_DMASYNC_POSTWRITE);
1037 		RING_GET(cq->ring, 1);
1038 		num_cqes++;
1039 
1040 	} while (TRUE);
1041 
1042 	oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1043 
1044 }
1045 
1046 /*
1047  * @brief		function to drain a MCQ and process its CQEs
1048  * @param dev		software handle to the device
1049  * @param cq		pointer to the cq to drain
1050  * @returns		the number of CQEs processed
1051  */
1052 void
oce_drain_mq_cq(void * arg)1053 oce_drain_mq_cq(void *arg)
1054 {
1055 	/* TODO: additional code. */
1056 	return;
1057 }
1058 
1059 /**
1060  * @brief		function to process a Recieve queue
1061  * @param arg		pointer to the RQ to charge
1062  * @return		number of cqes processed
1063  */
1064 void
oce_drain_rq_cq(struct oce_rq * rq)1065 oce_drain_rq_cq(struct oce_rq *rq)
1066 {
1067 	struct oce_nic_rx_cqe *cqe;
1068 	uint16_t num_cqe = 0;
1069 	struct oce_cq  *cq;
1070 	POCE_SOFTC sc;
1071 
1072 	sc = rq->parent;
1073 	cq = rq->cq;
1074 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1075 	/* dequeue till you reach an invalid cqe */
1076 	while (RQ_CQE_VALID(cqe)) {
1077 		RQ_CQE_INVALIDATE(cqe);
1078 		RING_GET(cq->ring, 1);
1079 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
1080 		    struct oce_nic_rx_cqe);
1081 		num_cqe++;
1082 	}
1083 	oce_arm_cq(sc, cq->cq_id, num_cqe, FALSE);
1084 
1085 	return;
1086 }
1087 
1088 void
oce_free_posted_rxbuf(struct oce_rq * rq)1089 oce_free_posted_rxbuf(struct oce_rq *rq)
1090 {
1091 	struct oce_packet_desc *pd;
1092 
1093 	while (rq->pending) {
1094 		pd = &rq->pckts[rq->ring->cidx];
1095 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1096 		bus_dmamap_unload(rq->tag, pd->map);
1097 		if (pd->mbuf != NULL) {
1098 			m_freem(pd->mbuf);
1099 			pd->mbuf = NULL;
1100 		}
1101 
1102 		RING_GET(rq->ring,1);
1103                 rq->pending--;
1104 	}
1105 
1106 }
1107 
1108 void
oce_rx_cq_clean_hwlro(struct oce_rq * rq)1109 oce_rx_cq_clean_hwlro(struct oce_rq *rq)
1110 {
1111         struct oce_cq *cq = rq->cq;
1112         POCE_SOFTC sc = rq->parent;
1113         struct nic_hwlro_singleton_cqe *cqe;
1114         struct nic_hwlro_cqe_part2 *cqe2;
1115         int flush_wait = 0;
1116         int flush_compl = 0;
1117 	int num_frags = 0;
1118 
1119         for (;;) {
1120                 bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1121                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
1122                 if(cqe->valid) {
1123                         if(cqe->cqe_type == 0) { /* singleton cqe */
1124                                 /* we should not get singleton cqe after cqe1 on same rq */
1125                                 if(rq->cqe_firstpart != NULL) {
1126                                         device_printf(sc->dev, "Got singleton cqe after cqe1 \n");
1127                                         goto exit_rx_cq_clean_hwlro;
1128                                 }
1129 				num_frags = cqe->pkt_size / rq->cfg.frag_size;
1130 				if(cqe->pkt_size % rq->cfg.frag_size)
1131 					num_frags++;
1132                                 oce_discard_rx_comp(rq, num_frags);
1133                                 /* Check if CQE is flush completion */
1134                                 if(!cqe->pkt_size)
1135                                         flush_compl = 1;
1136                                 cqe->valid = 0;
1137                                 RING_GET(cq->ring, 1);
1138                         }else if(cqe->cqe_type == 0x1) { /* first part */
1139                                 /* we should not get cqe1 after cqe1 on same rq */
1140                                 if(rq->cqe_firstpart != NULL) {
1141                                         device_printf(sc->dev, "Got cqe1 after cqe1 \n");
1142                                         goto exit_rx_cq_clean_hwlro;
1143                                 }
1144                                 rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
1145                                 RING_GET(cq->ring, 1);
1146                         }else if(cqe->cqe_type == 0x2) { /* second part */
1147                                 cqe2 = (struct nic_hwlro_cqe_part2 *)cqe;
1148                                 /* We should not get cqe2 without cqe1 */
1149                                 if(rq->cqe_firstpart == NULL) {
1150                                         device_printf(sc->dev, "Got cqe2 without cqe1 \n");
1151                                         goto exit_rx_cq_clean_hwlro;
1152                                 }
1153 				num_frags = cqe2->coalesced_size / rq->cfg.frag_size;
1154 				if(cqe2->coalesced_size % rq->cfg.frag_size)
1155 					num_frags++;
1156 
1157 				/* Flush completion will always come in singleton CQE */
1158                                 oce_discard_rx_comp(rq, num_frags);
1159 
1160                                 rq->cqe_firstpart->valid = 0;
1161                                 cqe2->valid = 0;
1162                                 rq->cqe_firstpart = NULL;
1163                                 RING_GET(cq->ring, 1);
1164                         }
1165                         oce_arm_cq(sc, cq->cq_id, 1, FALSE);
1166                         if(flush_compl)
1167                                 break;
1168                 }else {
1169                         if (flush_wait++ > 100) {
1170                                 device_printf(sc->dev, "did not receive hwlro flush compl\n");
1171                                 break;
1172                         }
1173                         oce_arm_cq(sc, cq->cq_id, 0, TRUE);
1174                         DELAY(1000);
1175                 }
1176         }
1177 
1178         /* After cleanup, leave the CQ in unarmed state */
1179         oce_arm_cq(sc, cq->cq_id, 0, FALSE);
1180 
1181 exit_rx_cq_clean_hwlro:
1182 	return;
1183 }
1184 
1185 void
oce_rx_cq_clean(struct oce_rq * rq)1186 oce_rx_cq_clean(struct oce_rq *rq)
1187 {
1188 	struct oce_nic_rx_cqe *cqe;
1189         struct oce_cq  *cq;
1190         POCE_SOFTC sc;
1191 	int flush_wait = 0;
1192 	int flush_compl = 0;
1193         sc = rq->parent;
1194         cq = rq->cq;
1195 
1196 	for (;;) {
1197 		bus_dmamap_sync(cq->ring->dma.tag,
1198 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1199         	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1200 		if(RQ_CQE_VALID(cqe)) {
1201 			DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1202                         oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1203                         /* Check if CQE is flush completion */
1204                         if((cqe->u0.s.num_fragments==0)&&(cqe->u0.s.pkt_size == 0)&&(cqe->u0.s.error == 0))
1205 				flush_compl = 1;
1206 
1207                         RQ_CQE_INVALIDATE(cqe);
1208                         RING_GET(cq->ring, 1);
1209 #if defined(INET6) || defined(INET)
1210 		        if (IF_LRO_ENABLED(sc))
1211                 		oce_rx_flush_lro(rq);
1212 #endif
1213                         oce_arm_cq(sc, cq->cq_id, 1, FALSE);
1214 			if(flush_compl)
1215 				break;
1216 		}else {
1217 			if (flush_wait++ > 100) {
1218 				device_printf(sc->dev, "did not receive flush compl\n");
1219 				break;
1220 			}
1221 			oce_arm_cq(sc, cq->cq_id, 0, TRUE);
1222 			DELAY(1000);
1223                 }
1224         }
1225 
1226 	/* After cleanup, leave the CQ in unarmed state */
1227 	oce_arm_cq(sc, cq->cq_id, 0, FALSE);
1228 }
1229 
1230 void
oce_stop_rx(POCE_SOFTC sc)1231 oce_stop_rx(POCE_SOFTC sc)
1232 {
1233         struct epoch_tracker et;
1234         struct oce_mbx mbx;
1235         struct mbx_delete_nic_rq *fwcmd;
1236         struct mbx_delete_nic_rq_v1 *fwcmd1;
1237         struct oce_rq *rq;
1238         int i = 0;
1239 
1240         NET_EPOCH_ENTER(et);
1241         /* before deleting disable hwlro */
1242 	if(sc->enable_hwlro)
1243         	oce_mbox_nic_set_iface_lro_config(sc, 0);
1244 
1245         for_all_rq_queues(sc, rq, i) {
1246                 if (rq->qstate == QCREATED) {
1247                         /* Delete rxq in firmware */
1248 			LOCK(&rq->rx_lock);
1249 
1250                         bzero(&mbx, sizeof(mbx));
1251                 	if(!rq->islro) {
1252                         	fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
1253                         	fwcmd->params.req.rq_id = rq->rq_id;
1254                         	(void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq), QTYPE_RQ, 0);
1255                 	}else {
1256                         	fwcmd1 = (struct mbx_delete_nic_rq_v1 *)&mbx.payload;
1257                         	fwcmd1->params.req.rq_id = rq->rq_id;
1258                                	fwcmd1->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO);
1259 
1260                         	(void)oce_destroy_q(sc,&mbx,sizeof(struct mbx_delete_nic_rq_v1),QTYPE_RQ,1);
1261                 	}
1262                         rq->qstate = QDELETED;
1263 
1264                         DELAY(1000);
1265 
1266 			if(!rq->islro)
1267 				oce_rx_cq_clean(rq);
1268 			else
1269 				oce_rx_cq_clean_hwlro(rq);
1270 
1271                         /* Free posted RX buffers that are not used */
1272                         oce_free_posted_rxbuf(rq);
1273 			UNLOCK(&rq->rx_lock);
1274                 }
1275         }
1276         NET_EPOCH_EXIT(et);
1277 }
1278 
1279 int
oce_start_rx(POCE_SOFTC sc)1280 oce_start_rx(POCE_SOFTC sc)
1281 {
1282 	struct oce_rq *rq;
1283 	int rc = 0, i;
1284 
1285 	for_all_rq_queues(sc, rq, i) {
1286 		if (rq->qstate == QCREATED)
1287 			continue;
1288 		if((i == 0) || (!sc->enable_hwlro)) {
1289         	        rc = oce_mbox_create_rq(rq);
1290                         if (rc)
1291                                 goto error;
1292 			rq->islro = 0;
1293 		}else {
1294 			rc = oce_mbox_create_rq_v2(rq);
1295                         if (rc)
1296                                 goto error;
1297                         rq->islro = 1;
1298 		}
1299                 /* reset queue pointers */
1300                 rq->qstate       = QCREATED;
1301                 rq->pending      = 0;
1302                 rq->ring->cidx   = 0;
1303                 rq->ring->pidx   = 0;
1304 	}
1305 
1306 	if(sc->enable_hwlro) {
1307 		rc = oce_mbox_nic_set_iface_lro_config(sc, 1);
1308 		if (rc)
1309 			goto error;
1310 	}
1311 
1312 	DELAY(1);
1313 
1314 	/* RSS config */
1315 	if (is_rss_enabled(sc)) {
1316 		rc = oce_config_nic_rss(sc, (uint8_t) sc->if_id, RSS_ENABLE);
1317 		if (rc)
1318 			goto error;
1319 	}
1320 
1321 	DELAY(1);
1322 	return rc;
1323 error:
1324 	device_printf(sc->dev, "Start RX failed\n");
1325 	return rc;
1326 
1327 }
1328