xref: /freebsd/sys/dev/oce/oce_queue.c (revision 7750ad47a9a7dbc83f87158464170c8640723293)
1 /*-
2  * Copyright (C) 2012 Emulex
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the Emulex Corporation nor the names of its
16  *    contributors may be used to endorse or promote products derived from
17  *    this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * Contact Information:
32  * freebsd-drivers@emulex.com
33  *
34  * Emulex
35  * 3333 Susan Street
36  * Costa Mesa, CA 92626
37  */
38 
39 /* $FreeBSD$ */
40 
41 #include "oce_if.h"
42 
43 /*****************************************************
44  * local queue functions
45  *****************************************************/
46 
47 static struct oce_wq *oce_wq_init(POCE_SOFTC sc,
48 				  uint32_t q_len, uint32_t wq_type);
49 static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
50 static void oce_wq_free(struct oce_wq *wq);
51 static void oce_wq_del(struct oce_wq *wq);
52 static struct oce_rq *oce_rq_init(POCE_SOFTC sc,
53 				  uint32_t q_len,
54 				  uint32_t frag_size,
55 				  uint32_t mtu, uint32_t rss);
56 static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
57 static void oce_rq_free(struct oce_rq *rq);
58 static void oce_rq_del(struct oce_rq *rq);
59 static struct oce_eq *oce_eq_create(POCE_SOFTC sc,
60 				    uint32_t q_len,
61 				    uint32_t item_size,
62 				    uint32_t eq_delay,
63 				    uint32_t vector);
64 static void oce_eq_del(struct oce_eq *eq);
65 static struct oce_mq *oce_mq_create(POCE_SOFTC sc,
66 				    struct oce_eq *eq, uint32_t q_len);
67 static void oce_mq_free(struct oce_mq *mq);
68 static int oce_destroy_q(POCE_SOFTC sc, struct oce_mbx
69 			 *mbx, size_t req_size, enum qtype qtype);
70 struct oce_cq *oce_cq_create(POCE_SOFTC sc,
71 			     struct oce_eq *eq,
72 			     uint32_t q_len,
73 			     uint32_t item_size,
74 			     uint32_t sol_event,
75 			     uint32_t is_eventable,
76 			     uint32_t nodelay, uint32_t ncoalesce);
77 static void oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq);
78 
79 
80 
81 /**
82  * @brief	Create and initialize all the queues on the board
83  * @param sc	software handle to the device
84  * @returns 0	if successful, or error
85  **/
86 int
87 oce_queue_init_all(POCE_SOFTC sc)
88 {
89 	int rc = 0, i, vector;
90 	struct oce_wq *wq;
91 	struct oce_rq *rq;
92 
93 	/* alloc TX/RX queues */
94 	for_all_wq_queues(sc, wq, i) {
95 		sc->wq[i] = oce_wq_init(sc, sc->tx_ring_size,
96 					 NIC_WQ_TYPE_STANDARD);
97 		if (!sc->wq[i])
98 			goto error;
99 
100 	}
101 
102 	for_all_rq_queues(sc, rq, i) {
103 		sc->rq[i] = oce_rq_init(sc, sc->rx_ring_size, sc->rq_frag_size,
104 					OCE_MAX_JUMBO_FRAME_SIZE,
105 					(i == 0) ? 0 : sc->rss_enable);
106 		if (!sc->rq[i])
107 			goto error;
108 	}
109 
110 	/* Create network interface on card */
111 	if (oce_create_nw_interface(sc))
112 		goto error;
113 
114 	/* create all of the event queues */
115 	for (vector = 0; vector < sc->intr_count; vector++) {
116 		sc->eq[vector] = oce_eq_create(sc, EQ_LEN_1024, EQE_SIZE_4,
117 						 0, vector);
118 		if (!sc->eq[vector])
119 			goto error;
120 	}
121 
122 	/* create Tx, Rx and mcc queues */
123 	for_all_wq_queues(sc, wq, i) {
124 		rc = oce_wq_create(wq, sc->eq[i]);
125 		if (rc)
126 			goto error;
127 		wq->queue_index = i;
128 		TASK_INIT(&wq->txtask, 1, oce_tx_task, wq);
129 	}
130 
131 	for_all_rq_queues(sc, rq, i) {
132 		rc = oce_rq_create(rq, sc->if_id,
133 					sc->eq[(i == 0) ? 0:(i-1)]);
134 		if (rc)
135 			goto error;
136 		rq->queue_index = i;
137 	}
138 
139 	sc->mq = oce_mq_create(sc, sc->eq[0], 64);
140 	if (!sc->mq)
141 		goto error;
142 
143 	return rc;
144 
145 error:
146 	oce_queue_release_all(sc);
147 	return 1;
148 }
149 
150 
151 
152 /**
153  * @brief Releases all mailbox queues created
154  * @param sc		software handle to the device
155  */
156 void
157 oce_queue_release_all(POCE_SOFTC sc)
158 {
159 	int i = 0;
160 	struct oce_wq *wq;
161 	struct oce_rq *rq;
162 	struct oce_eq *eq;
163 
164 	for_all_rq_queues(sc, rq, i) {
165 		if (rq) {
166 			oce_rq_del(sc->rq[i]);
167 			oce_rq_free(sc->rq[i]);
168 		}
169 	}
170 
171 	for_all_wq_queues(sc, wq, i) {
172 		if (wq) {
173 			oce_wq_del(sc->wq[i]);
174 			oce_wq_free(sc->wq[i]);
175 		}
176 	}
177 
178 	if (sc->mq)
179 		oce_mq_free(sc->mq);
180 
181 	for_all_evnt_queues(sc, eq, i) {
182 		if (eq)
183 			oce_eq_del(sc->eq[i]);
184 	}
185 }
186 
187 
188 
189 /**
190  * @brief 		Function to create a WQ for NIC Tx
191  * @param sc 		software handle to the device
192  * @param qlen		number of entries in the queue
193  * @param wq_type	work queue type
194  * @returns		the pointer to the WQ created or NULL on failure
195  */
196 static struct
197 oce_wq *oce_wq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t wq_type)
198 {
199 	struct oce_wq *wq;
200 	int rc = 0, i;
201 
202 	/* q_len must be min 256 and max 2k */
203 	if (q_len < 256 || q_len > 2048) {
204 		device_printf(sc->dev,
205 			  "Invalid q length. Must be "
206 			  "[256, 2000]: 0x%x\n", q_len);
207 		return NULL;
208 	}
209 
210 	/* allocate wq */
211 	wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
212 	if (!wq)
213 		return NULL;
214 
215 	/* Set the wq config */
216 	wq->cfg.q_len = q_len;
217 	wq->cfg.wq_type = (uint8_t) wq_type;
218 	wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
219 	wq->cfg.nbufs = 2 * wq->cfg.q_len;
220 	wq->cfg.nhdl = 2 * wq->cfg.q_len;
221 
222 	wq->parent = (void *)sc;
223 
224 	rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
225 				1, 0,
226 				BUS_SPACE_MAXADDR,
227 				BUS_SPACE_MAXADDR,
228 				NULL, NULL,
229 				OCE_MAX_TX_SIZE,
230 				OCE_MAX_TX_ELEMENTS,
231 				PAGE_SIZE, 0, NULL, NULL, &wq->tag);
232 
233 	if (rc)
234 		goto free_wq;
235 
236 
237 	for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
238 		rc = bus_dmamap_create(wq->tag, 0, &wq->pckts[i].map);
239 		if (rc)
240 			goto free_wq;
241 	}
242 
243 	wq->ring = oce_create_ring_buffer(sc, q_len, NIC_WQE_SIZE);
244 	if (!wq->ring)
245 		goto free_wq;
246 
247 
248 	LOCK_CREATE(&wq->tx_lock, "TX_lock");
249 
250 #if __FreeBSD_version >= 800000
251 	/* Allocate buf ring for multiqueue*/
252 	wq->br = buf_ring_alloc(4096, M_DEVBUF,
253 			M_WAITOK, &wq->tx_lock.mutex);
254 	if (!wq->br)
255 		goto free_wq;
256 #endif
257 	return wq;
258 
259 
260 free_wq:
261 	device_printf(sc->dev, "Create WQ failed\n");
262 	oce_wq_free(wq);
263 	return NULL;
264 }
265 
266 
267 
268 /**
269  * @brief 		Frees the work queue
270  * @param wq		pointer to work queue to free
271  */
272 static void
273 oce_wq_free(struct oce_wq *wq)
274 {
275 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
276 	int i;
277 
278 	taskqueue_drain(taskqueue_swi, &wq->txtask);
279 
280 	if (wq->ring != NULL) {
281 		oce_destroy_ring_buffer(sc, wq->ring);
282 		wq->ring = NULL;
283 	}
284 
285 	for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
286 		if (wq->pckts[i].map != NULL) {
287 			bus_dmamap_unload(wq->tag, wq->pckts[i].map);
288 			bus_dmamap_destroy(wq->tag, wq->pckts[i].map);
289 			wq->pckts[i].map = NULL;
290 		}
291 	}
292 
293 	if (wq->tag != NULL)
294 		bus_dma_tag_destroy(wq->tag);
295 	if (wq->br != NULL)
296 		buf_ring_free(wq->br, M_DEVBUF);
297 
298 	LOCK_DESTROY(&wq->tx_lock);
299 	free(wq, M_DEVBUF);
300 }
301 
302 
303 
304 /**
305  * @brief 		Create a work queue
306  * @param wq		pointer to work queue
307  * @param eq		pointer to associated event queue
308  */
309 static int
310 oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
311 {
312 	POCE_SOFTC sc = wq->parent;
313 	struct oce_cq *cq;
314 	int rc = 0;
315 
316 	/* create the CQ */
317 	cq = oce_cq_create(sc,
318 			   eq,
319 			   CQ_LEN_1024,
320 			   sizeof(struct oce_nic_tx_cqe), 0, 1, 0, 3);
321 	if (!cq)
322 		return ENXIO;
323 
324 
325 	wq->cq = cq;
326 
327 	rc = oce_mbox_create_wq(wq);
328 	if (rc)
329 		goto error;
330 
331 	wq->qstate = QCREATED;
332 	wq->wq_free = wq->cfg.q_len;
333 	wq->ring->cidx = 0;
334 	wq->ring->pidx = 0;
335 
336 	eq->cq[eq->cq_valid] = cq;
337 	eq->cq_valid++;
338 	cq->cb_arg = wq;
339 	cq->cq_handler = oce_wq_handler;
340 
341 	return 0;
342 
343 error:
344 	device_printf(sc->dev, "WQ create failed\n");
345 	oce_wq_del(wq);
346 	return rc;
347 }
348 
349 
350 
351 
352 /**
353  * @brief 		Delete a work queue
354  * @param wq		pointer to work queue
355  */
356 static void
357 oce_wq_del(struct oce_wq *wq)
358 {
359 	struct oce_mbx mbx;
360 	struct mbx_delete_nic_wq *fwcmd;
361 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
362 
363 	if (wq->qstate == QCREATED) {
364 		bzero(&mbx, sizeof(struct oce_mbx));
365 		/* now fill the command */
366 		fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
367 		fwcmd->params.req.wq_id = wq->wq_id;
368 		(void)oce_destroy_q(sc, &mbx,
369 				sizeof(struct mbx_delete_nic_wq), QTYPE_WQ);
370 		wq->qstate = QDELETED;
371 	}
372 
373 	if (wq->cq != NULL) {
374 		oce_cq_del(sc, wq->cq);
375 		wq->cq = NULL;
376 	}
377 }
378 
379 
380 
381 /**
382  * @brief 		function to allocate receive queue resources
383  * @param sc		software handle to the device
384  * @param q_len		length of receive queue
385  * @param frag_size	size of an receive queue fragment
386  * @param mtu		maximum transmission unit
387  * @param rss		is-rss-queue flag
388  * @returns		the pointer to the RQ created or NULL on failure
389  */
390 static struct
391 oce_rq *oce_rq_init(POCE_SOFTC sc,
392 				  uint32_t q_len,
393 				  uint32_t frag_size,
394 				  uint32_t mtu, uint32_t rss)
395 {
396 	struct oce_rq *rq;
397 	int rc = 0, i;
398 
399 	if (OCE_LOG2(frag_size) <= 0)
400 		return NULL;
401 
402 	if ((q_len == 0) || (q_len > 1024))
403 		return NULL;
404 
405 	/* allocate the rq */
406 	rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
407 	if (!rq)
408 		return NULL;
409 
410 
411 	rq->cfg.q_len = q_len;
412 	rq->cfg.frag_size = frag_size;
413 	rq->cfg.mtu = mtu;
414 	rq->cfg.eqd = 0;
415 	rq->lro_pkts_queued = 0;
416 	rq->cfg.is_rss_queue = rss;
417 	rq->packets_in = 0;
418         rq->packets_out = 0;
419         rq->pending = 0;
420 
421 	rq->parent = (void *)sc;
422 
423 	rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
424 				1, 0,
425 				BUS_SPACE_MAXADDR,
426 				BUS_SPACE_MAXADDR,
427 				NULL, NULL,
428 				OCE_MAX_RX_SIZE,
429 				1, PAGE_SIZE, 0, NULL, NULL, &rq->tag);
430 
431 	if (rc)
432 		goto free_rq;
433 
434 	for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
435 		rc = bus_dmamap_create(rq->tag, 0, &rq->pckts[i].map);
436 		if (rc)
437 			goto free_rq;
438 	}
439 
440 	/* create the ring buffer */
441 	rq->ring = oce_create_ring_buffer(sc, q_len,
442 				 sizeof(struct oce_nic_rqe));
443 	if (!rq->ring)
444 		goto free_rq;
445 
446 	LOCK_CREATE(&rq->rx_lock, "RX_lock");
447 
448 	return rq;
449 
450 free_rq:
451 	device_printf(sc->dev, "Create RQ failed\n");
452 	oce_rq_free(rq);
453 	return NULL;
454 }
455 
456 
457 
458 
459 /**
460  * @brief 		Free a receive queue
461  * @param rq		pointer to receive queue
462  */
463 static void
464 oce_rq_free(struct oce_rq *rq)
465 {
466 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
467 	int i = 0 ;
468 
469 	if (rq->ring != NULL) {
470 		oce_destroy_ring_buffer(sc, rq->ring);
471 		rq->ring = NULL;
472 	}
473 	for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
474 		if (rq->pckts[i].map != NULL) {
475 			bus_dmamap_unload(rq->tag, rq->pckts[i].map);
476 			bus_dmamap_destroy(rq->tag, rq->pckts[i].map);
477 			rq->pckts[i].map = NULL;
478 		}
479 		if (rq->pckts[i].mbuf) {
480 			m_free(rq->pckts[i].mbuf);
481 			rq->pckts[i].mbuf = NULL;
482 		}
483 	}
484 
485 	if (rq->tag != NULL)
486 		bus_dma_tag_destroy(rq->tag);
487 
488 	LOCK_DESTROY(&rq->rx_lock);
489 	free(rq, M_DEVBUF);
490 }
491 
492 
493 
494 
495 /**
496  * @brief 		Create a receive queue
497  * @param rq 		receive queue
498  * @param if_id		interface identifier index`
499  * @param eq		pointer to event queue
500  */
501 static int
502 oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
503 {
504 	POCE_SOFTC sc = rq->parent;
505 	struct oce_cq *cq;
506 
507 	cq = oce_cq_create(sc,
508 			   eq,
509 			   CQ_LEN_1024,
510 			   sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3);
511 	if (!cq)
512 		return ENXIO;
513 
514 	rq->cq = cq;
515 	rq->cfg.if_id = if_id;
516 
517 	/* Dont create RQ here. Create in if_activate */
518 	rq->qstate     = 0;
519 	rq->ring->cidx = 0;
520 	rq->ring->pidx = 0;
521 	eq->cq[eq->cq_valid] = cq;
522 	eq->cq_valid++;
523 	cq->cb_arg = rq;
524 	cq->cq_handler = oce_rq_handler;
525 
526 	return 0;
527 
528 }
529 
530 
531 
532 
533 /**
534  * @brief 		Delete a receive queue
535  * @param rq		receive queue
536  */
537 static void
538 oce_rq_del(struct oce_rq *rq)
539 {
540 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
541 	struct oce_mbx mbx;
542 	struct mbx_delete_nic_rq *fwcmd;
543 
544 	if (rq->qstate == QCREATED) {
545 		bzero(&mbx, sizeof(mbx));
546 
547 		fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
548 		fwcmd->params.req.rq_id = rq->rq_id;
549 		(void)oce_destroy_q(sc, &mbx,
550 				sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
551 		rq->qstate = QDELETED;
552 	}
553 
554 	if (rq->cq != NULL) {
555 		oce_cq_del(sc, rq->cq);
556 		rq->cq = NULL;
557 	}
558 }
559 
560 
561 
562 /**
563  * @brief		function to create an event queue
564  * @param sc		software handle to the device
565  * @param q_len		length of event queue
566  * @param item_size	size of an event queue item
567  * @param eq_delay	event queue delay
568  * @retval eq      	success, pointer to event queue
569  * @retval NULL		failure
570  */
571 static struct
572 oce_eq *oce_eq_create(POCE_SOFTC sc, uint32_t q_len,
573 				    uint32_t item_size,
574 				    uint32_t eq_delay,
575 				    uint32_t vector)
576 {
577 	struct oce_eq *eq;
578 	int rc = 0;
579 
580 	/* allocate an eq */
581 	eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
582 	if (eq == NULL)
583 		return NULL;
584 
585 	eq->parent = (void *)sc;
586 	eq->eq_id = 0xffff;
587 	eq->ring = oce_create_ring_buffer(sc, q_len, item_size);
588 	if (!eq->ring)
589 		goto free_eq;
590 
591 	eq->eq_cfg.q_len = q_len;
592 	eq->eq_cfg.item_size = item_size;
593 	eq->eq_cfg.cur_eqd = (uint8_t) eq_delay;
594 
595 	rc = oce_mbox_create_eq(eq);
596 	if (rc)
597 		goto free_eq;
598 
599 	sc->intrs[sc->neqs++].eq = eq;
600 
601 	return eq;
602 
603 free_eq:
604 	oce_eq_del(eq);
605 	return NULL;
606 }
607 
608 
609 
610 
611 /**
612  * @brief 		Function to delete an event queue
613  * @param eq		pointer to an event queue
614  */
615 static void
616 oce_eq_del(struct oce_eq *eq)
617 {
618 	struct oce_mbx mbx;
619 	struct mbx_destroy_common_eq *fwcmd;
620 	POCE_SOFTC sc = (POCE_SOFTC) eq->parent;
621 
622 	if (eq->eq_id != 0xffff) {
623 		bzero(&mbx, sizeof(mbx));
624 		fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
625 		fwcmd->params.req.id = eq->eq_id;
626 		(void)oce_destroy_q(sc, &mbx,
627 			sizeof(struct mbx_destroy_common_eq), QTYPE_EQ);
628 	}
629 
630 	if (eq->ring != NULL) {
631 		oce_destroy_ring_buffer(sc, eq->ring);
632 		eq->ring = NULL;
633 	}
634 
635 	free(eq, M_DEVBUF);
636 
637 }
638 
639 
640 
641 
642 /**
643  * @brief		Function to create an MQ
644  * @param sc		software handle to the device
645  * @param eq		the EQ to associate with the MQ for event notification
646  * @param q_len		the number of entries to create in the MQ
647  * @returns		pointer to the created MQ, failure otherwise
648  */
649 static struct oce_mq *
650 oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len)
651 {
652 	struct oce_mbx mbx;
653 	struct mbx_create_common_mq_ex *fwcmd = NULL;
654 	struct oce_mq *mq = NULL;
655 	int rc = 0;
656 	struct oce_cq *cq;
657 	oce_mq_ext_ctx_t *ctx;
658 	uint32_t num_pages;
659 	uint32_t page_size;
660 	uint32_t version;
661 
662 
663 	cq = oce_cq_create(sc, eq, CQ_LEN_256,
664 			sizeof(struct oce_mq_cqe), 1, 1, 0, 0);
665 	if (!cq)
666 		return NULL;
667 
668 	/* allocate the mq */
669 	mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
670 	if (!mq) {
671 		oce_cq_del(sc, cq);
672 		goto error;
673 	}
674 
675 	mq->parent = sc;
676 
677 	mq->ring = oce_create_ring_buffer(sc, q_len, sizeof(struct oce_mbx));
678 	if (!mq->ring)
679 		goto error;
680 
681 	bzero(&mbx, sizeof(struct oce_mbx));
682 
683 	fwcmd = (struct mbx_create_common_mq_ex *)&mbx.payload;
684 	version = OCE_MBX_VER_V0;
685 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
686 				MBX_SUBSYSTEM_COMMON,
687 				OPCODE_COMMON_CREATE_MQ_EXT,
688 				MBX_TIMEOUT_SEC,
689 				sizeof(struct mbx_create_common_mq_ex),
690 				version);
691 
692 	num_pages = oce_page_list(mq->ring, &fwcmd->params.req.pages[0]);
693 	page_size = mq->ring->num_items * mq->ring->item_size;
694 
695 	ctx = &fwcmd->params.req.context;
696 	ctx->v0.num_pages = num_pages;
697 	ctx->v0.cq_id = cq->cq_id;
698 	ctx->v0.ring_size = OCE_LOG2(q_len) + 1;
699 	ctx->v0.valid = 1;
700 	/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
701 	ctx->v0.async_evt_bitmap = 0xffffffff;
702 
703 	mbx.u0.s.embedded = 1;
704 	mbx.payload_length = sizeof(struct mbx_create_common_mq_ex);
705 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
706 
707 	rc = oce_mbox_post(sc, &mbx, NULL);
708 	if (rc)
709 		goto error;
710 
711 	mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
712 	mq->cq = cq;
713 	eq->cq[eq->cq_valid] = cq;
714 	eq->cq_valid++;
715 	mq->cq->eq = eq;
716 	mq->cfg.q_len = (uint8_t) q_len;
717 	mq->cfg.eqd = 0;
718 	mq->qstate = QCREATED;
719 
720 	mq->cq->cb_arg = mq;
721 	mq->cq->cq_handler = oce_mq_handler;
722 
723 	return mq;
724 
725 error:
726 	device_printf(sc->dev, "MQ create failed\n");
727 	oce_mq_free(mq);
728 	mq = NULL;
729 	return mq;
730 }
731 
732 
733 
734 
735 
736 /**
737  * @brief		Function to free a mailbox queue
738  * @param mq		pointer to a mailbox queue
739  */
740 static void
741 oce_mq_free(struct oce_mq *mq)
742 {
743 	POCE_SOFTC sc = (POCE_SOFTC) mq->parent;
744 	struct oce_mbx mbx;
745 	struct mbx_destroy_common_mq *fwcmd;
746 
747 	if (!mq)
748 		return;
749 
750 	if (mq->ring != NULL) {
751 		oce_destroy_ring_buffer(sc, mq->ring);
752 		mq->ring = NULL;
753 		if (mq->qstate == QCREATED) {
754 			bzero(&mbx, sizeof (struct oce_mbx));
755 			fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
756 			fwcmd->params.req.id = mq->mq_id;
757 			(void) oce_destroy_q(sc, &mbx,
758 				sizeof (struct mbx_destroy_common_mq),
759 				QTYPE_MQ);
760 		}
761 		mq->qstate = QDELETED;
762 	}
763 
764 	if (mq->cq != NULL) {
765 		oce_cq_del(sc, mq->cq);
766 		mq->cq = NULL;
767 	}
768 
769 	free(mq, M_DEVBUF);
770 	mq = NULL;
771 }
772 
773 
774 
775 /**
776  * @brief		Function to delete a EQ, CQ, MQ, WQ or RQ
777  * @param sc		sofware handle to the device
778  * @param mbx		mailbox command to send to the fw to delete the queue
779  *			(mbx contains the queue information to delete)
780  * @param req_size	the size of the mbx payload dependent on the qtype
781  * @param qtype		the type of queue i.e. EQ, CQ, MQ, WQ or RQ
782  * @returns 		0 on success, failure otherwise
783  */
784 static int
785 oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size,
786 		enum qtype qtype)
787 {
788 	struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
789 	int opcode;
790 	int subsys;
791 	int rc = 0;
792 
793 	switch (qtype) {
794 	case QTYPE_EQ:
795 		opcode = OPCODE_COMMON_DESTROY_EQ;
796 		subsys = MBX_SUBSYSTEM_COMMON;
797 		break;
798 	case QTYPE_CQ:
799 		opcode = OPCODE_COMMON_DESTROY_CQ;
800 		subsys = MBX_SUBSYSTEM_COMMON;
801 		break;
802 	case QTYPE_MQ:
803 		opcode = OPCODE_COMMON_DESTROY_MQ;
804 		subsys = MBX_SUBSYSTEM_COMMON;
805 		break;
806 	case QTYPE_WQ:
807 		opcode = NIC_DELETE_WQ;
808 		subsys = MBX_SUBSYSTEM_NIC;
809 		break;
810 	case QTYPE_RQ:
811 		opcode = NIC_DELETE_RQ;
812 		subsys = MBX_SUBSYSTEM_NIC;
813 		break;
814 	default:
815 		return EINVAL;
816 	}
817 
818 	mbx_common_req_hdr_init(hdr, 0, 0, subsys,
819 				opcode, MBX_TIMEOUT_SEC, req_size,
820 				OCE_MBX_VER_V0);
821 
822 	mbx->u0.s.embedded = 1;
823 	mbx->payload_length = (uint32_t) req_size;
824 	DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
825 
826 	rc = oce_mbox_post(sc, mbx, NULL);
827 
828 	if (rc != 0)
829 		device_printf(sc->dev, "Failed to del q\n");
830 
831 	return rc;
832 }
833 
834 
835 
836 /**
837  * @brief		Function to create a completion queue
838  * @param sc		software handle to the device
839  * @param eq		optional eq to be associated with to the cq
840  * @param q_len		length of completion queue
841  * @param item_size	size of completion queue items
842  * @param sol_event	command context event
843  * @param is_eventable	event table
844  * @param nodelay	no delay flag
845  * @param ncoalesce	no coalescence flag
846  * @returns 		pointer to the cq created, NULL on failure
847  */
848 struct oce_cq *
849 oce_cq_create(POCE_SOFTC sc, struct oce_eq *eq,
850 			     uint32_t q_len,
851 			     uint32_t item_size,
852 			     uint32_t sol_event,
853 			     uint32_t is_eventable,
854 			     uint32_t nodelay, uint32_t ncoalesce)
855 {
856 	struct oce_cq *cq = NULL;
857 	int rc = 0;
858 
859 	cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
860 	if (!cq)
861 		return NULL;
862 
863 	cq->ring = oce_create_ring_buffer(sc, q_len, item_size);
864 	if (!cq->ring)
865 		goto error;
866 
867 	cq->parent = sc;
868 	cq->eq = eq;
869 	cq->cq_cfg.q_len = q_len;
870 	cq->cq_cfg.item_size = item_size;
871 	cq->cq_cfg.nodelay = (uint8_t) nodelay;
872 
873 	rc = oce_mbox_cq_create(cq, ncoalesce, is_eventable);
874 	if (rc)
875 		goto error;
876 
877 	sc->cq[sc->ncqs++] = cq;
878 
879 	return cq;
880 
881 error:
882 	device_printf(sc->dev, "CQ create failed\n");
883 	oce_cq_del(sc, cq);
884 	return NULL;
885 }
886 
887 
888 
889 /**
890  * @brief		Deletes the completion queue
891  * @param sc		software handle to the device
892  * @param cq		pointer to a completion queue
893  */
894 static void
895 oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq)
896 {
897 	struct oce_mbx mbx;
898 	struct mbx_destroy_common_cq *fwcmd;
899 
900 	if (cq->ring != NULL) {
901 
902 		bzero(&mbx, sizeof(struct oce_mbx));
903 		/* now fill the command */
904 		fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
905 		fwcmd->params.req.id = cq->cq_id;
906 		(void)oce_destroy_q(sc, &mbx,
907 			sizeof(struct mbx_destroy_common_cq), QTYPE_CQ);
908 		/*NOW destroy the ring */
909 		oce_destroy_ring_buffer(sc, cq->ring);
910 		cq->ring = NULL;
911 	}
912 
913 	free(cq, M_DEVBUF);
914 	cq = NULL;
915 }
916 
917 
918 
919 /**
920  * @brief		Start a receive queue
921  * @param rq		pointer to a receive queue
922  */
923 int
924 oce_start_rq(struct oce_rq *rq)
925 {
926 	int rc;
927 
928 	rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len);
929 
930 	if (rc == 0)
931 		oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE);
932 	return rc;
933 }
934 
935 
936 
937 /**
938  * @brief		Start a work queue
939  * @param wq		pointer to a work queue
940  */
941 int
942 oce_start_wq(struct oce_wq *wq)
943 {
944 	oce_arm_cq(wq->parent, wq->cq->cq_id, 0, TRUE);
945 	return 0;
946 }
947 
948 
949 
950 /**
951  * @brief		Start a mailbox queue
952  * @param mq		pointer to a mailbox queue
953  */
954 int
955 oce_start_mq(struct oce_mq *mq)
956 {
957 	oce_arm_cq(mq->parent, mq->cq->cq_id, 0, TRUE);
958 	return 0;
959 }
960 
961 
962 
963 /**
964  * @brief		Function to arm an EQ so that it can generate events
965  * @param sc		software handle to the device
966  * @param qid		id of the EQ returned by the fw at the time of creation
967  * @param npopped	number of EQEs to arm
968  * @param rearm		rearm bit enable/disable
969  * @param clearint	bit to clear the interrupt condition because of which
970  *			EQEs are generated
971  */
972 void
973 oce_arm_eq(POCE_SOFTC sc,
974 	   int16_t qid, int npopped, uint32_t rearm, uint32_t clearint)
975 {
976 	eq_db_t eq_db = { 0 };
977 
978 	eq_db.bits.rearm = rearm;
979 	eq_db.bits.event = 1;
980 	eq_db.bits.num_popped = npopped;
981 	eq_db.bits.clrint = clearint;
982 	eq_db.bits.qid = qid;
983 	OCE_WRITE_REG32(sc, db, PD_EQ_DB, eq_db.dw0);
984 
985 }
986 
987 
988 
989 
990 /**
991  * @brief		Function to arm a CQ with CQEs
992  * @param sc		software handle to the device
993  * @param qid		id of the CQ returned by the fw at the time of creation
994  * @param npopped	number of CQEs to arm
995  * @param rearm		rearm bit enable/disable
996  */
997 void oce_arm_cq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm)
998 {
999 	cq_db_t cq_db = { 0 };
1000 
1001 	cq_db.bits.rearm = rearm;
1002 	cq_db.bits.num_popped = npopped;
1003 	cq_db.bits.event = 0;
1004 	cq_db.bits.qid = qid;
1005 	OCE_WRITE_REG32(sc, db, PD_CQ_DB, cq_db.dw0);
1006 
1007 }
1008 
1009 
1010 
1011 
1012 /*
1013  * @brief		function to cleanup the eqs used during stop
1014  * @param eq		pointer to event queue structure
1015  * @returns		the number of EQs processed
1016  */
1017 void
1018 oce_drain_eq(struct oce_eq *eq)
1019 {
1020 
1021 	struct oce_eqe *eqe;
1022 	uint16_t num_eqe = 0;
1023 	POCE_SOFTC sc = eq->parent;
1024 
1025 	do {
1026 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1027 		if (eqe->evnt == 0)
1028 			break;
1029 		eqe->evnt = 0;
1030 		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
1031 					BUS_DMASYNC_POSTWRITE);
1032 		num_eqe++;
1033 		RING_GET(eq->ring, 1);
1034 
1035 	} while (TRUE);
1036 
1037 	oce_arm_eq(sc, eq->eq_id, num_eqe, FALSE, TRUE);
1038 
1039 }
1040 
1041 
1042 
1043 void
1044 oce_drain_wq_cq(struct oce_wq *wq)
1045 {
1046         POCE_SOFTC sc = wq->parent;
1047         struct oce_cq *cq = wq->cq;
1048         struct oce_nic_tx_cqe *cqe;
1049         int num_cqes = 0;
1050 
1051 	bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1052 				 BUS_DMASYNC_POSTWRITE);
1053 
1054 	do {
1055 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1056 		if (cqe->u0.dw[3] == 0)
1057 			break;
1058 		cqe->u0.dw[3] = 0;
1059 		bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1060 				 BUS_DMASYNC_POSTWRITE);
1061 		RING_GET(cq->ring, 1);
1062 		num_cqes++;
1063 
1064 	} while (TRUE);
1065 
1066 	oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1067 
1068 }
1069 
1070 
1071 /*
1072  * @brief		function to drain a MCQ and process its CQEs
1073  * @param dev		software handle to the device
1074  * @param cq		pointer to the cq to drain
1075  * @returns		the number of CQEs processed
1076  */
1077 void
1078 oce_drain_mq_cq(void *arg)
1079 {
1080 	/* TODO: additional code. */
1081 	return;
1082 }
1083 
1084 
1085 
1086 /**
1087  * @brief		function to process a Recieve queue
1088  * @param arg		pointer to the RQ to charge
1089  * @return		number of cqes processed
1090  */
1091 void
1092 oce_drain_rq_cq(struct oce_rq *rq)
1093 {
1094 	struct oce_nic_rx_cqe *cqe;
1095 	uint16_t num_cqe = 0;
1096 	struct oce_cq  *cq;
1097 	POCE_SOFTC sc;
1098 
1099 	sc = rq->parent;
1100 	cq = rq->cq;
1101 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1102 	/* dequeue till you reach an invalid cqe */
1103 	while (RQ_CQE_VALID(cqe)) {
1104 		RQ_CQE_INVALIDATE(cqe);
1105 		RING_GET(cq->ring, 1);
1106 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
1107 		    struct oce_nic_rx_cqe);
1108 		num_cqe++;
1109 	}
1110 	oce_arm_cq(sc, cq->cq_id, num_cqe, FALSE);
1111 
1112 	return;
1113 }
1114 
1115 
1116 void
1117 oce_free_posted_rxbuf(struct oce_rq *rq)
1118 {
1119 	struct oce_packet_desc *pd;
1120 
1121 	while (rq->pending) {
1122 
1123 		pd = &rq->pckts[rq->packets_out];
1124 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1125 		bus_dmamap_unload(rq->tag, pd->map);
1126 		if (pd->mbuf != NULL) {
1127 			m_freem(pd->mbuf);
1128 			pd->mbuf = NULL;
1129 		}
1130 
1131 		if ((rq->packets_out + 1) == OCE_RQ_PACKET_ARRAY_SIZE)
1132 			rq->packets_out = 0;
1133 		else
1134 			rq->packets_out++;
1135 
1136                 rq->pending--;
1137 	}
1138 
1139 }
1140 
1141 void
1142 oce_stop_rx(POCE_SOFTC sc)
1143 {
1144 	struct oce_mbx mbx;
1145 	struct mbx_delete_nic_rq *fwcmd;
1146 	struct oce_rq *rq;
1147 	int i = 0;
1148 
1149 	for_all_rq_queues(sc, rq, i) {
1150 		if (rq->qstate == QCREATED) {
1151 			/* Delete rxq in firmware */
1152 
1153 			bzero(&mbx, sizeof(mbx));
1154 			fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
1155 			fwcmd->params.req.rq_id = rq->rq_id;
1156 
1157 			(void)oce_destroy_q(sc, &mbx,
1158 				sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
1159 
1160 			rq->qstate = QDELETED;
1161 
1162 			DELAY(1);
1163 
1164 			/* Free posted RX buffers that are not used */
1165 			oce_free_posted_rxbuf(rq);
1166 
1167 		}
1168 	}
1169 }
1170 
1171 
1172 
1173 int
1174 oce_start_rx(POCE_SOFTC sc)
1175 {
1176 	struct oce_rq *rq;
1177 	int rc = 0, i;
1178 
1179 	for_all_rq_queues(sc, rq, i) {
1180 		if (rq->qstate == QCREATED)
1181 			continue;
1182 		rc = oce_mbox_create_rq(rq);
1183 		if (rc)
1184 			goto error;
1185 		/* reset queue pointers */
1186 		rq->qstate 	 = QCREATED;
1187 		rq->pending	 = 0;
1188 		rq->ring->cidx	 = 0;
1189 		rq->ring->pidx	 = 0;
1190 		rq->packets_in	 = 0;
1191 		rq->packets_out	 = 0;
1192 	}
1193 
1194 	DELAY(1);
1195 
1196 	/* RSS config */
1197 	if (sc->rss_enable) {
1198 		rc = oce_config_nic_rss(sc, (uint8_t) sc->if_id, RSS_ENABLE);
1199 		if (rc)
1200 			goto error;
1201 
1202 	}
1203 
1204 	return rc;
1205 error:
1206 	device_printf(sc->dev, "Start RX failed\n");
1207 	return rc;
1208 
1209 }
1210 
1211 
1212 
1213