xref: /freebsd/sys/dev/ocs_fc/ocs_hw_queues.c (revision 95ee2897e98f5d444f26ed2334cc7c439f9c16c6)
1 /*-
2  * Copyright (c) 2017 Broadcom. All rights reserved.
3  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  *    this list of conditions and the following disclaimer in the documentation
13  *    and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /**
33  * @file
34  *
35  */
36 
37 #include "ocs_os.h"
38 #include "ocs_hw.h"
39 #include "ocs_hw_queues.h"
40 
41 #define HW_QTOP_DEBUG		0
42 
43 /**
44  * @brief Initialize queues
45  *
46  * Given the parsed queue topology spec, the SLI queues are created and
47  * initialized
48  *
49  * @param hw pointer to HW object
50  * @param qtop pointer to queue topology
51  *
52  * @return returns 0 for success, an error code value for failure.
53  */
54 ocs_hw_rtn_e
ocs_hw_init_queues(ocs_hw_t * hw,ocs_hw_qtop_t * qtop)55 ocs_hw_init_queues(ocs_hw_t *hw, ocs_hw_qtop_t *qtop)
56 {
57 	uint32_t i, j;
58 	uint32_t default_lengths[QTOP_LAST], len;
59 	uint32_t rqset_len = 0, rqset_ulp = 0, rqset_count = 0;
60 	uint8_t rqset_filter_mask = 0;
61 	hw_eq_t *eqs[hw->config.n_rq];
62 	hw_cq_t *cqs[hw->config.n_rq];
63 	hw_rq_t *rqs[hw->config.n_rq];
64 	ocs_hw_qtop_entry_t *qt, *next_qt;
65 	ocs_hw_mrq_t mrq;
66 	bool use_mrq = FALSE;
67 
68 	hw_eq_t *eq = NULL;
69 	hw_cq_t *cq = NULL;
70 	hw_wq_t *wq = NULL;
71 	hw_rq_t *rq = NULL;
72 	hw_mq_t *mq = NULL;
73 
74 	mrq.num_pairs = 0;
75 	default_lengths[QTOP_EQ] = 1024;
76 	default_lengths[QTOP_CQ] = hw->num_qentries[SLI_QTYPE_CQ];
77 	default_lengths[QTOP_WQ] = hw->num_qentries[SLI_QTYPE_WQ];
78 	default_lengths[QTOP_RQ] = hw->num_qentries[SLI_QTYPE_RQ];
79 	default_lengths[QTOP_MQ] = OCS_HW_MQ_DEPTH;
80 
81 	ocs_hw_verify(hw != NULL, OCS_HW_RTN_INVALID_ARG);
82 
83 	hw->eq_count = 0;
84 	hw->cq_count = 0;
85 	hw->mq_count = 0;
86 	hw->wq_count = 0;
87 	hw->rq_count = 0;
88 	hw->hw_rq_count = 0;
89 	ocs_list_init(&hw->eq_list, hw_eq_t, link);
90 
91 	/* If MRQ is requested, Check if it is supported by SLI. */
92 	if ((hw->config.n_rq > 1 ) && !hw->sli.config.features.flag.mrqp) {
93 		ocs_log_err(hw->os, "MRQ topology not supported by SLI4.\n");
94 		return OCS_HW_RTN_ERROR;
95 	}
96 
97 	if (hw->config.n_rq > 1)
98 		use_mrq = TRUE;
99 
100 	/* Allocate class WQ pools */
101 	for (i = 0; i < ARRAY_SIZE(hw->wq_class_array); i++) {
102 		hw->wq_class_array[i] = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ);
103 		if (hw->wq_class_array[i] == NULL) {
104 			ocs_log_err(hw->os, "ocs_varray_alloc for wq_class failed\n");
105 			return OCS_HW_RTN_NO_MEMORY;
106 		}
107 	}
108 
109 	/* Allocate per CPU WQ pools */
110 	for (i = 0; i < ARRAY_SIZE(hw->wq_cpu_array); i++) {
111 		hw->wq_cpu_array[i] = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ);
112 		if (hw->wq_cpu_array[i] == NULL) {
113 			ocs_log_err(hw->os, "ocs_varray_alloc for wq_class failed\n");
114 			return OCS_HW_RTN_NO_MEMORY;
115 		}
116 	}
117 
118 	ocs_hw_assert(qtop != NULL);
119 
120 	for (i = 0, qt = qtop->entries; i < qtop->inuse_count; i++, qt++) {
121 		if (i == qtop->inuse_count - 1)
122 			next_qt = NULL;
123 		else
124 			next_qt = qt + 1;
125 
126 		switch(qt->entry) {
127 		case QTOP_EQ:
128 			len = (qt->len) ? qt->len : default_lengths[QTOP_EQ];
129 
130 			if (qt->set_default) {
131 				default_lengths[QTOP_EQ] = len;
132 				break;
133 			}
134 
135 			eq = hw_new_eq(hw, len);
136 			if (eq == NULL) {
137 				hw_queue_teardown(hw);
138 				return OCS_HW_RTN_NO_MEMORY;
139 			}
140 			break;
141 
142 		case QTOP_CQ:
143 			len = (qt->len) ? qt->len : default_lengths[QTOP_CQ];
144 
145 			if (qt->set_default) {
146 				default_lengths[QTOP_CQ] = len;
147 				break;
148 			}
149 
150 			if (!eq || !next_qt) {
151 				goto fail;
152 			}
153 
154 			/* If this CQ is for MRQ, then delay the creation */
155 			if (!use_mrq || next_qt->entry != QTOP_RQ) {
156 				cq = hw_new_cq(eq, len);
157 				if (cq == NULL) {
158 					goto fail;
159 				}
160 			}
161 			break;
162 
163 		case QTOP_WQ: {
164 			len = (qt->len) ? qt->len : default_lengths[QTOP_WQ];
165 			if (qt->set_default) {
166 				default_lengths[QTOP_WQ] = len;
167 				break;
168 			}
169 
170 			if ((hw->ulp_start + qt->ulp) > hw->ulp_max) {
171 				ocs_log_err(hw->os, "invalid ULP %d for WQ\n", qt->ulp);
172 				hw_queue_teardown(hw);
173 				return OCS_HW_RTN_NO_MEMORY;
174 			}
175 
176 			if (cq == NULL)
177 				goto fail;
178 
179 			wq = hw_new_wq(cq, len, qt->class, hw->ulp_start + qt->ulp);
180 			if (wq == NULL) {
181 				goto fail;
182 			}
183 
184 			/* Place this WQ on the EQ WQ array */
185 			if (ocs_varray_add(eq->wq_array, wq)) {
186 				ocs_log_err(hw->os, "QTOP_WQ: EQ ocs_varray_add failed\n");
187 				hw_queue_teardown(hw);
188 				return OCS_HW_RTN_ERROR;
189 			}
190 
191 			/* Place this WQ on the HW class array */
192 			if (qt->class < ARRAY_SIZE(hw->wq_class_array)) {
193 				if (ocs_varray_add(hw->wq_class_array[qt->class], wq)) {
194 					ocs_log_err(hw->os, "HW wq_class_array ocs_varray_add failed\n");
195 					hw_queue_teardown(hw);
196 					return OCS_HW_RTN_ERROR;
197 				}
198 			} else {
199 				ocs_log_err(hw->os, "Invalid class value: %d\n", qt->class);
200 				hw_queue_teardown(hw);
201 				return OCS_HW_RTN_ERROR;
202 			}
203 
204 			/*
205 			 * Place this WQ on the per CPU list, asumming that EQs are mapped to cpu given
206 			 * by the EQ instance modulo number of CPUs
207 			 */
208 			if (ocs_varray_add(hw->wq_cpu_array[eq->instance % ocs_get_num_cpus()], wq)) {
209 				ocs_log_err(hw->os, "HW wq_cpu_array ocs_varray_add failed\n");
210 				hw_queue_teardown(hw);
211 				return OCS_HW_RTN_ERROR;
212 			}
213 
214 			break;
215 		}
216 		case QTOP_RQ: {
217 			len = (qt->len) ? qt->len : default_lengths[QTOP_RQ];
218 			if (qt->set_default) {
219 				default_lengths[QTOP_RQ] = len;
220 				break;
221 			}
222 
223 			if ((hw->ulp_start + qt->ulp) > hw->ulp_max) {
224 				ocs_log_err(hw->os, "invalid ULP %d for RQ\n", qt->ulp);
225 				hw_queue_teardown(hw);
226 				return OCS_HW_RTN_NO_MEMORY;
227 			}
228 
229 			if (use_mrq) {
230 				mrq.rq_cfg[mrq.num_pairs].len = len;
231 				mrq.rq_cfg[mrq.num_pairs].ulp = hw->ulp_start + qt->ulp;
232 				mrq.rq_cfg[mrq.num_pairs].filter_mask = qt->filter_mask;
233 				mrq.rq_cfg[mrq.num_pairs].eq = eq;
234 				mrq.num_pairs ++;
235 			} else {
236 				rq = hw_new_rq(cq, len, hw->ulp_start + qt->ulp);
237 				if (rq == NULL) {
238 					hw_queue_teardown(hw);
239 					return OCS_HW_RTN_NO_MEMORY;
240 				}
241 				rq->filter_mask = qt->filter_mask;
242 			}
243 			break;
244 		}
245 
246 		case QTOP_MQ:
247 			len = (qt->len) ? qt->len : default_lengths[QTOP_MQ];
248 			if (qt->set_default) {
249 				default_lengths[QTOP_MQ] = len;
250 				break;
251 			}
252 
253 			if (cq == NULL)
254 				goto fail;
255 
256 			mq = hw_new_mq(cq, len);
257 			if (mq == NULL) {
258 				goto fail;
259 			}
260 			break;
261 
262 		default:
263 			ocs_hw_assert(0);
264 			break;
265 		}
266 	}
267 
268 	if (mrq.num_pairs) {
269 		/* First create normal RQs. */
270 		for (i = 0; i < mrq.num_pairs; i++) {
271 			for (j = 0; j < mrq.num_pairs; j++) {
272 				if ((i != j) && (mrq.rq_cfg[i].filter_mask == mrq.rq_cfg[j].filter_mask)) {
273 					/* This should be created using set */
274 					if (rqset_filter_mask && (rqset_filter_mask != mrq.rq_cfg[i].filter_mask)) {
275 						ocs_log_crit(hw->os, "Cant create morethan one RQ Set\n");
276 						hw_queue_teardown(hw);
277 						return OCS_HW_RTN_ERROR;
278 					} else if (!rqset_filter_mask){
279 						rqset_filter_mask = mrq.rq_cfg[i].filter_mask;
280 						rqset_len = mrq.rq_cfg[i].len;
281 						rqset_ulp = mrq.rq_cfg[i].ulp;
282 					}
283 					eqs[rqset_count] = mrq.rq_cfg[i].eq;
284 					rqset_count++;
285 					break;
286 				}
287 			}
288 			if (j == mrq.num_pairs) {
289 				/* Normal RQ */
290 				cq = hw_new_cq(mrq.rq_cfg[i].eq, default_lengths[QTOP_CQ]);
291 				if (cq == NULL) {
292 					hw_queue_teardown(hw);
293 					return OCS_HW_RTN_NO_MEMORY;
294 				}
295 
296 				rq = hw_new_rq(cq, mrq.rq_cfg[i].len, mrq.rq_cfg[i].ulp);
297 				if (rq == NULL) {
298 					hw_queue_teardown(hw);
299 					return OCS_HW_RTN_NO_MEMORY;
300 				}
301 				rq->filter_mask = mrq.rq_cfg[i].filter_mask;
302 			}
303 		}
304 
305 		/* Now create RQ Set */
306 		if (rqset_count) {
307 			if (rqset_count > OCE_HW_MAX_NUM_MRQ_PAIRS) {
308 				ocs_log_crit(hw->os,
309 					     "Max Supported MRQ pairs = %d\n",
310 					     OCE_HW_MAX_NUM_MRQ_PAIRS);
311 				hw_queue_teardown(hw);
312 				return OCS_HW_RTN_ERROR;
313 			}
314 
315 			/* Create CQ set */
316 			if (hw_new_cq_set(eqs, cqs, rqset_count, default_lengths[QTOP_CQ])) {
317 				hw_queue_teardown(hw);
318 				return OCS_HW_RTN_ERROR;
319 			}
320 
321 			/* Create RQ set */
322 			if (hw_new_rq_set(cqs, rqs, rqset_count, rqset_len, rqset_ulp)) {
323 				hw_queue_teardown(hw);
324 				return OCS_HW_RTN_ERROR;
325 			}
326 
327 			for (i = 0; i < rqset_count ; i++) {
328 				rqs[i]->filter_mask = rqset_filter_mask;
329 				rqs[i]->is_mrq = TRUE;
330 				rqs[i]->base_mrq_id = rqs[0]->hdr->id;
331 			}
332 
333 			hw->hw_mrq_count = rqset_count;
334 		}
335 	}
336 
337 	return OCS_HW_RTN_SUCCESS;
338 fail:
339 	hw_queue_teardown(hw);
340 	return OCS_HW_RTN_NO_MEMORY;
341 
342 }
343 
344 /**
345  * @brief Allocate a new EQ object
346  *
347  * A new EQ object is instantiated
348  *
349  * @param hw pointer to HW object
350  * @param entry_count number of entries in the EQ
351  *
352  * @return pointer to allocated EQ object
353  */
354 hw_eq_t*
hw_new_eq(ocs_hw_t * hw,uint32_t entry_count)355 hw_new_eq(ocs_hw_t *hw, uint32_t entry_count)
356 {
357 	hw_eq_t *eq = ocs_malloc(hw->os, sizeof(*eq), OCS_M_ZERO | OCS_M_NOWAIT);
358 
359 	if (eq != NULL) {
360 		eq->type = SLI_QTYPE_EQ;
361 		eq->hw = hw;
362 		eq->entry_count = entry_count;
363 		eq->instance = hw->eq_count++;
364 		eq->queue = &hw->eq[eq->instance];
365 		ocs_list_init(&eq->cq_list, hw_cq_t, link);
366 
367 		eq->wq_array = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ);
368 		if (eq->wq_array == NULL) {
369 			ocs_free(hw->os, eq, sizeof(*eq));
370 			eq = NULL;
371 		} else {
372 			if (sli_queue_alloc(&hw->sli, SLI_QTYPE_EQ, eq->queue, entry_count, NULL, 0)) {
373 				ocs_log_err(hw->os, "EQ[%d] allocation failure\n", eq->instance);
374 				ocs_free(hw->os, eq, sizeof(*eq));
375 				eq = NULL;
376 			} else {
377 				sli_eq_modify_delay(&hw->sli, eq->queue, 1, 0, 8);
378 				hw->hw_eq[eq->instance] = eq;
379 				ocs_list_add_tail(&hw->eq_list, eq);
380 				ocs_log_debug(hw->os, "create eq[%2d] id %3d len %4d\n", eq->instance, eq->queue->id,
381 					eq->entry_count);
382 			}
383 		}
384 	}
385 	return eq;
386 }
387 
388 /**
389  * @brief Allocate a new CQ object
390  *
391  * A new CQ object is instantiated
392  *
393  * @param eq pointer to parent EQ object
394  * @param entry_count number of entries in the CQ
395  *
396  * @return pointer to allocated CQ object
397  */
398 hw_cq_t*
hw_new_cq(hw_eq_t * eq,uint32_t entry_count)399 hw_new_cq(hw_eq_t *eq, uint32_t entry_count)
400 {
401 	ocs_hw_t *hw = eq->hw;
402 	hw_cq_t *cq = ocs_malloc(hw->os, sizeof(*cq), OCS_M_ZERO | OCS_M_NOWAIT);
403 
404 	if (cq != NULL) {
405 		cq->eq = eq;
406 		cq->type = SLI_QTYPE_CQ;
407 		cq->instance = eq->hw->cq_count++;
408 		cq->entry_count = entry_count;
409 		cq->queue = &hw->cq[cq->instance];
410 
411 		ocs_list_init(&cq->q_list, hw_q_t, link);
412 
413 		if (sli_queue_alloc(&hw->sli, SLI_QTYPE_CQ, cq->queue, cq->entry_count, eq->queue, 0)) {
414 			ocs_log_err(hw->os, "CQ[%d] allocation failure len=%d\n",
415 				eq->instance,
416 				eq->entry_count);
417 			ocs_free(hw->os, cq, sizeof(*cq));
418 			cq = NULL;
419 		} else {
420 			hw->hw_cq[cq->instance] = cq;
421 			ocs_list_add_tail(&eq->cq_list, cq);
422 			ocs_log_debug(hw->os, "create cq[%2d] id %3d len %4d\n", cq->instance, cq->queue->id,
423 				cq->entry_count);
424 		}
425 	}
426 	return cq;
427 }
428 
429 /**
430  * @brief Allocate a new CQ Set of objects.
431  *
432  * @param eqs pointer to a set of EQ objects.
433  * @param cqs pointer to a set of CQ objects to be returned.
434  * @param num_cqs number of CQ queues in the set.
435  * @param entry_count number of entries in the CQ.
436  *
437  * @return 0 on success and -1 on failure.
438  */
439 uint32_t
hw_new_cq_set(hw_eq_t * eqs[],hw_cq_t * cqs[],uint32_t num_cqs,uint32_t entry_count)440 hw_new_cq_set(hw_eq_t *eqs[], hw_cq_t *cqs[], uint32_t num_cqs, uint32_t entry_count)
441 {
442 	uint32_t i;
443 	ocs_hw_t *hw = eqs[0]->hw;
444 	sli4_t *sli4 = &hw->sli;
445 	hw_cq_t *cq = NULL;
446 	sli4_queue_t *qs[SLI_MAX_CQ_SET_COUNT], *assocs[SLI_MAX_CQ_SET_COUNT];
447 
448 	/* Initialise CQS pointers to NULL */
449 	for (i = 0; i < num_cqs; i++) {
450 		cqs[i] = NULL;
451 	}
452 
453 	for (i = 0; i < num_cqs; i++) {
454 		cq = ocs_malloc(hw->os, sizeof(*cq), OCS_M_ZERO | OCS_M_NOWAIT);
455 		if (cq == NULL)
456 			goto error;
457 
458 		cqs[i]          = cq;
459 		cq->eq          = eqs[i];
460 		cq->type        = SLI_QTYPE_CQ;
461 		cq->instance    = hw->cq_count++;
462 		cq->entry_count = entry_count;
463 		cq->queue       = &hw->cq[cq->instance];
464 		qs[i]           = cq->queue;
465 		assocs[i]       = eqs[i]->queue;
466 		ocs_list_init(&cq->q_list, hw_q_t, link);
467 	}
468 
469 	if (sli_cq_alloc_set(sli4, qs, num_cqs, entry_count, assocs)) {
470 		ocs_log_err(NULL, "Failed to create CQ Set. \n");
471 		goto error;
472 	}
473 
474 	for (i = 0; i < num_cqs; i++) {
475 		hw->hw_cq[cqs[i]->instance] = cqs[i];
476 		ocs_list_add_tail(&cqs[i]->eq->cq_list, cqs[i]);
477 	}
478 
479 	return 0;
480 
481 error:
482 	for (i = 0; i < num_cqs; i++) {
483 		if (cqs[i]) {
484 			ocs_free(hw->os, cqs[i], sizeof(*cqs[i]));
485 			cqs[i] = NULL;
486 		}
487 	}
488 	return -1;
489 }
490 
491 /**
492  * @brief Allocate a new MQ object
493  *
494  * A new MQ object is instantiated
495  *
496  * @param cq pointer to parent CQ object
497  * @param entry_count number of entries in the MQ
498  *
499  * @return pointer to allocated MQ object
500  */
501 hw_mq_t*
hw_new_mq(hw_cq_t * cq,uint32_t entry_count)502 hw_new_mq(hw_cq_t *cq, uint32_t entry_count)
503 {
504 	ocs_hw_t *hw = cq->eq->hw;
505 	hw_mq_t *mq = ocs_malloc(hw->os, sizeof(*mq), OCS_M_ZERO | OCS_M_NOWAIT);
506 
507 	if (mq != NULL) {
508 		mq->cq = cq;
509 		mq->type = SLI_QTYPE_MQ;
510 		mq->instance = cq->eq->hw->mq_count++;
511 		mq->entry_count = entry_count;
512 		mq->entry_size = OCS_HW_MQ_DEPTH;
513 		mq->queue = &hw->mq[mq->instance];
514 
515 		if (sli_queue_alloc(&hw->sli, SLI_QTYPE_MQ,
516 				    mq->queue,
517 				    mq->entry_size,
518 				    cq->queue, 0)) {
519 			ocs_log_err(hw->os, "MQ allocation failure\n");
520 			ocs_free(hw->os, mq, sizeof(*mq));
521 			mq = NULL;
522 		} else {
523 			hw->hw_mq[mq->instance] = mq;
524 			ocs_list_add_tail(&cq->q_list, mq);
525 			ocs_log_debug(hw->os, "create mq[%2d] id %3d len %4d\n", mq->instance, mq->queue->id,
526 				mq->entry_count);
527 		}
528 	}
529 	return mq;
530 }
531 
532 /**
533  * @brief Allocate a new WQ object
534  *
535  * A new WQ object is instantiated
536  *
537  * @param cq pointer to parent CQ object
538  * @param entry_count number of entries in the WQ
539  * @param class WQ class
540  * @param ulp index of chute
541  *
542  * @return pointer to allocated WQ object
543  */
544 hw_wq_t*
hw_new_wq(hw_cq_t * cq,uint32_t entry_count,uint32_t class,uint32_t ulp)545 hw_new_wq(hw_cq_t *cq, uint32_t entry_count, uint32_t class, uint32_t ulp)
546 {
547 	ocs_hw_t *hw = cq->eq->hw;
548 	hw_wq_t *wq = ocs_malloc(hw->os, sizeof(*wq), OCS_M_ZERO | OCS_M_NOWAIT);
549 
550 	if (wq != NULL) {
551 		wq->hw = cq->eq->hw;
552 		wq->cq = cq;
553 		wq->type = SLI_QTYPE_WQ;
554 		wq->instance = cq->eq->hw->wq_count++;
555 		wq->entry_count = entry_count;
556 		wq->queue = &hw->wq[wq->instance];
557 		wq->ulp = ulp;
558 		wq->wqec_set_count = OCS_HW_WQEC_SET_COUNT;
559 		wq->wqec_count = wq->wqec_set_count;
560 		wq->free_count = wq->entry_count - 1;
561 		wq->class = class;
562 		ocs_list_init(&wq->pending_list, ocs_hw_wqe_t, link);
563 
564 		if (sli_queue_alloc(&hw->sli, SLI_QTYPE_WQ, wq->queue, wq->entry_count, cq->queue, ulp)) {
565 			ocs_log_err(hw->os, "WQ allocation failure\n");
566 			ocs_free(hw->os, wq, sizeof(*wq));
567 			wq = NULL;
568 		} else {
569 			hw->hw_wq[wq->instance] = wq;
570 			ocs_list_add_tail(&cq->q_list, wq);
571 			ocs_log_debug(hw->os, "create wq[%2d] id %3d len %4d cls %d ulp %d\n", wq->instance, wq->queue->id,
572 				wq->entry_count, wq->class, wq->ulp);
573 		}
574 	}
575 	return wq;
576 }
577 
578 /**
579  * @brief Allocate a hw_rq_t object
580  *
581  * Allocate an RQ object, which encapsulates 2 SLI queues (for rq pair)
582  *
583  * @param cq pointer to parent CQ object
584  * @param entry_count number of entries in the RQs
585  * @param ulp ULP index for this RQ
586  *
587  * @return pointer to newly allocated hw_rq_t
588  */
589 hw_rq_t*
hw_new_rq(hw_cq_t * cq,uint32_t entry_count,uint32_t ulp)590 hw_new_rq(hw_cq_t *cq, uint32_t entry_count, uint32_t ulp)
591 {
592 	ocs_hw_t *hw = cq->eq->hw;
593 	hw_rq_t *rq = ocs_malloc(hw->os, sizeof(*rq), OCS_M_ZERO | OCS_M_NOWAIT);
594 	uint32_t max_hw_rq;
595 
596 	ocs_hw_get(hw, OCS_HW_MAX_RQ_ENTRIES, &max_hw_rq);
597 
598 	if (rq != NULL) {
599 		rq->instance = hw->hw_rq_count++;
600 		rq->cq = cq;
601 		rq->type = SLI_QTYPE_RQ;
602 		rq->ulp = ulp;
603 
604 		rq->entry_count = OCS_MIN(entry_count, OCS_MIN(max_hw_rq, OCS_HW_RQ_NUM_HDR));
605 
606 		/* Create the header RQ */
607 		ocs_hw_assert(hw->rq_count < ARRAY_SIZE(hw->rq));
608 		rq->hdr = &hw->rq[hw->rq_count];
609 		rq->hdr_entry_size = OCS_HW_RQ_HEADER_SIZE;
610 
611 		if (sli_fc_rq_alloc(&hw->sli, rq->hdr,
612 				    rq->entry_count,
613 				    rq->hdr_entry_size,
614 				    cq->queue,
615 				    ulp, TRUE)) {
616 			ocs_log_err(hw->os, "RQ allocation failure - header\n");
617 			ocs_free(hw->os, rq, sizeof(*rq));
618 			return NULL;
619 		}
620 		hw->hw_rq_lookup[hw->rq_count] = rq->instance;	/* Update hw_rq_lookup[] */
621 		hw->rq_count++;
622 		ocs_log_debug(hw->os, "create rq[%2d] id %3d len %4d hdr  size %4d ulp %d\n",
623 			rq->instance, rq->hdr->id, rq->entry_count, rq->hdr_entry_size, rq->ulp);
624 
625 		/* Create the default data RQ */
626 		ocs_hw_assert(hw->rq_count < ARRAY_SIZE(hw->rq));
627 		rq->data = &hw->rq[hw->rq_count];
628 		rq->data_entry_size = hw->config.rq_default_buffer_size;
629 
630 		if (sli_fc_rq_alloc(&hw->sli, rq->data,
631 				    rq->entry_count,
632 				    rq->data_entry_size,
633 				    cq->queue,
634 				    ulp, FALSE)) {
635 			ocs_log_err(hw->os, "RQ allocation failure - first burst\n");
636 			ocs_free(hw->os, rq, sizeof(*rq));
637 			return NULL;
638 		}
639 		hw->hw_rq_lookup[hw->rq_count] = rq->instance;	/* Update hw_rq_lookup[] */
640 		hw->rq_count++;
641 		ocs_log_debug(hw->os, "create rq[%2d] id %3d len %4d data size %4d ulp %d\n", rq->instance,
642 			rq->data->id, rq->entry_count, rq->data_entry_size, rq->ulp);
643 
644 		hw->hw_rq[rq->instance] = rq;
645 		ocs_list_add_tail(&cq->q_list, rq);
646 
647 		rq->rq_tracker = ocs_malloc(hw->os, sizeof(ocs_hw_sequence_t*) *
648 					    rq->entry_count, OCS_M_ZERO | OCS_M_NOWAIT);
649 		if (rq->rq_tracker == NULL) {
650 			ocs_log_err(hw->os, "RQ tracker buf allocation failure\n");
651 			return NULL;
652 		}
653 	}
654 	return rq;
655 }
656 
657 /**
658  * @brief Allocate a hw_rq_t object SET
659  *
660  * Allocate an RQ object SET, where each element in set
661  * encapsulates 2 SLI queues (for rq pair)
662  *
663  * @param cqs pointers to be associated with RQs.
664  * @param rqs RQ pointers to be returned on success.
665  * @param num_rq_pairs number of rq pairs in the Set.
666  * @param entry_count number of entries in the RQs
667  * @param ulp ULP index for this RQ
668  *
669  * @return 0 in success and -1 on failure.
670  */
671 uint32_t
hw_new_rq_set(hw_cq_t * cqs[],hw_rq_t * rqs[],uint32_t num_rq_pairs,uint32_t entry_count,uint32_t ulp)672 hw_new_rq_set(hw_cq_t *cqs[], hw_rq_t *rqs[], uint32_t num_rq_pairs, uint32_t entry_count, uint32_t ulp)
673 {
674 	ocs_hw_t *hw = cqs[0]->eq->hw;
675 	hw_rq_t *rq = NULL;
676 	sli4_queue_t *qs[SLI_MAX_RQ_SET_COUNT * 2] = { NULL };
677 	uint32_t max_hw_rq, i, q_count;
678 
679 	ocs_hw_get(hw, OCS_HW_MAX_RQ_ENTRIES, &max_hw_rq);
680 
681 	/* Initialise RQS pointers */
682 	for (i = 0; i < num_rq_pairs; i++) {
683 		rqs[i] = NULL;
684 	}
685 
686 	for (i = 0, q_count = 0; i < num_rq_pairs; i++, q_count += 2) {
687 		rq = ocs_malloc(hw->os, sizeof(*rq), OCS_M_ZERO | OCS_M_NOWAIT);
688 		if (rq == NULL)
689 			goto error;
690 
691 		rqs[i] = rq;
692 		rq->instance = hw->hw_rq_count++;
693 		rq->cq = cqs[i];
694 		rq->type = SLI_QTYPE_RQ;
695 		rq->ulp = ulp;
696 		rq->entry_count = OCS_MIN(entry_count, OCS_MIN(max_hw_rq, OCS_HW_RQ_NUM_HDR));
697 
698 		/* Header RQ */
699 		rq->hdr = &hw->rq[hw->rq_count];
700 		rq->hdr_entry_size = OCS_HW_RQ_HEADER_SIZE;
701 		hw->hw_rq_lookup[hw->rq_count] = rq->instance;
702 		hw->rq_count++;
703 		qs[q_count] = rq->hdr;
704 
705 		/* Data RQ */
706 		rq->data = &hw->rq[hw->rq_count];
707 		rq->data_entry_size = hw->config.rq_default_buffer_size;
708 		hw->hw_rq_lookup[hw->rq_count] = rq->instance;
709 		hw->rq_count++;
710 		qs[q_count + 1] = rq->data;
711 
712 		rq->rq_tracker = NULL;
713 	}
714 
715 	if (sli_fc_rq_set_alloc(&hw->sli, num_rq_pairs, qs,
716 			    cqs[0]->queue->id,
717 			    rqs[0]->entry_count,
718 			    rqs[0]->hdr_entry_size,
719 			    rqs[0]->data_entry_size,
720 			    ulp)) {
721 		ocs_log_err(hw->os, "RQ Set allocation failure for base CQ=%d\n", cqs[0]->queue->id);
722 		goto error;
723 	}
724 
725 	for (i = 0; i < num_rq_pairs; i++) {
726 		hw->hw_rq[rqs[i]->instance] = rqs[i];
727 		ocs_list_add_tail(&cqs[i]->q_list, rqs[i]);
728 		rqs[i]->rq_tracker = ocs_malloc(hw->os, sizeof(ocs_hw_sequence_t*) *
729 					    rqs[i]->entry_count, OCS_M_ZERO | OCS_M_NOWAIT);
730 		if (rqs[i]->rq_tracker == NULL) {
731 			ocs_log_err(hw->os, "RQ tracker buf allocation failure\n");
732 			goto error;
733 		}
734 	}
735 
736 	return 0;
737 
738 error:
739 	for (i = 0; i < num_rq_pairs; i++) {
740 		if (rqs[i] != NULL) {
741 			if (rqs[i]->rq_tracker != NULL) {
742 				ocs_free(hw->os, rqs[i]->rq_tracker,
743 					 sizeof(ocs_hw_sequence_t*) *
744 					 rqs[i]->entry_count);
745 			}
746 			ocs_free(hw->os, rqs[i], sizeof(*rqs[i]));
747 		}
748 	}
749 
750 	return -1;
751 }
752 
753 /**
754  * @brief Free an EQ object
755  *
756  * The EQ object and any child queue objects are freed
757  *
758  * @param eq pointer to EQ object
759  *
760  * @return none
761  */
762 void
hw_del_eq(hw_eq_t * eq)763 hw_del_eq(hw_eq_t *eq)
764 {
765 	if (eq != NULL) {
766 		hw_cq_t *cq;
767 		hw_cq_t *cq_next;
768 
769 		ocs_list_foreach_safe(&eq->cq_list, cq, cq_next) {
770 			hw_del_cq(cq);
771 		}
772 		ocs_varray_free(eq->wq_array);
773 		ocs_list_remove(&eq->hw->eq_list, eq);
774 		eq->hw->hw_eq[eq->instance] = NULL;
775 		ocs_free(eq->hw->os, eq, sizeof(*eq));
776 	}
777 }
778 
779 /**
780  * @brief Free a CQ object
781  *
782  * The CQ object and any child queue objects are freed
783  *
784  * @param cq pointer to CQ object
785  *
786  * @return none
787  */
788 void
hw_del_cq(hw_cq_t * cq)789 hw_del_cq(hw_cq_t *cq)
790 {
791 	if (cq != NULL) {
792 		hw_q_t *q;
793 		hw_q_t *q_next;
794 
795 		ocs_list_foreach_safe(&cq->q_list, q, q_next) {
796 			switch(q->type) {
797 			case SLI_QTYPE_MQ:
798 				hw_del_mq((hw_mq_t*) q);
799 				break;
800 			case SLI_QTYPE_WQ:
801 				hw_del_wq((hw_wq_t*) q);
802 				break;
803 			case SLI_QTYPE_RQ:
804 				hw_del_rq((hw_rq_t*) q);
805 				break;
806 			default:
807 				break;
808 			}
809 		}
810 		ocs_list_remove(&cq->eq->cq_list, cq);
811 		cq->eq->hw->hw_cq[cq->instance] = NULL;
812 		ocs_free(cq->eq->hw->os, cq, sizeof(*cq));
813 	}
814 }
815 
816 /**
817  * @brief Free a MQ object
818  *
819  * The MQ object is freed
820  *
821  * @param mq pointer to MQ object
822  *
823  * @return none
824  */
825 void
hw_del_mq(hw_mq_t * mq)826 hw_del_mq(hw_mq_t *mq)
827 {
828 	if (mq != NULL) {
829 		ocs_list_remove(&mq->cq->q_list, mq);
830 		mq->cq->eq->hw->hw_mq[mq->instance] = NULL;
831 		ocs_free(mq->cq->eq->hw->os, mq, sizeof(*mq));
832 	}
833 }
834 
835 /**
836  * @brief Free a WQ object
837  *
838  * The WQ object is freed
839  *
840  * @param wq pointer to WQ object
841  *
842  * @return none
843  */
844 void
hw_del_wq(hw_wq_t * wq)845 hw_del_wq(hw_wq_t *wq)
846 {
847 	if (wq != NULL) {
848 		ocs_list_remove(&wq->cq->q_list, wq);
849 		wq->cq->eq->hw->hw_wq[wq->instance] = NULL;
850 		ocs_free(wq->cq->eq->hw->os, wq, sizeof(*wq));
851 	}
852 }
853 
854 /**
855  * @brief Free an RQ object
856  *
857  * The RQ object is freed
858  *
859  * @param rq pointer to RQ object
860  *
861  * @return none
862  */
863 void
hw_del_rq(hw_rq_t * rq)864 hw_del_rq(hw_rq_t *rq)
865 {
866 
867 	if (rq != NULL) {
868 		ocs_hw_t *hw = rq->cq->eq->hw;
869 		/* Free RQ tracker */
870 		if (rq->rq_tracker != NULL) {
871 			ocs_free(hw->os, rq->rq_tracker, sizeof(ocs_hw_sequence_t*) * rq->entry_count);
872 			rq->rq_tracker = NULL;
873 		}
874 		ocs_list_remove(&rq->cq->q_list, rq);
875 		hw->hw_rq[rq->instance] = NULL;
876 		ocs_free(hw->os, rq, sizeof(*rq));
877 	}
878 }
879 
880 /**
881  * @brief Display HW queue objects
882  *
883  * The HW queue objects are displayed using ocs_log
884  *
885  * @param hw pointer to HW object
886  *
887  * @return none
888  */
889 void
hw_queue_dump(ocs_hw_t * hw)890 hw_queue_dump(ocs_hw_t *hw)
891 {
892 	hw_eq_t *eq;
893 	hw_cq_t *cq;
894 	hw_q_t *q;
895 	hw_mq_t *mq;
896 	hw_wq_t *wq;
897 	hw_rq_t *rq;
898 
899 	ocs_list_foreach(&hw->eq_list, eq) {
900 		ocs_printf("eq[%d] id %2d\n", eq->instance, eq->queue->id);
901 		ocs_list_foreach(&eq->cq_list, cq) {
902 			ocs_printf("  cq[%d] id %2d current\n", cq->instance, cq->queue->id);
903 			ocs_list_foreach(&cq->q_list, q) {
904 				switch(q->type) {
905 				case SLI_QTYPE_MQ:
906 					mq = (hw_mq_t *) q;
907 					ocs_printf("    mq[%d] id %2d\n", mq->instance, mq->queue->id);
908 					break;
909 				case SLI_QTYPE_WQ:
910 					wq = (hw_wq_t *) q;
911 					ocs_printf("    wq[%d] id %2d\n", wq->instance, wq->queue->id);
912 					break;
913 				case SLI_QTYPE_RQ:
914 					rq = (hw_rq_t *) q;
915 					ocs_printf("    rq[%d] hdr id %2d\n", rq->instance, rq->hdr->id);
916 					break;
917 				default:
918 					break;
919 				}
920 			}
921 		}
922 	}
923 }
924 
925 /**
926  * @brief Teardown HW queue objects
927  *
928  * The HW queue objects are freed
929  *
930  * @param hw pointer to HW object
931  *
932  * @return none
933  */
934 void
hw_queue_teardown(ocs_hw_t * hw)935 hw_queue_teardown(ocs_hw_t *hw)
936 {
937 	uint32_t i;
938 	hw_eq_t *eq;
939 	hw_eq_t *eq_next;
940 
941 	if (ocs_list_valid(&hw->eq_list)) {
942 		ocs_list_foreach_safe(&hw->eq_list, eq, eq_next) {
943 			hw_del_eq(eq);
944 		}
945 	}
946 	for (i = 0; i < ARRAY_SIZE(hw->wq_cpu_array); i++) {
947 		ocs_varray_free(hw->wq_cpu_array[i]);
948 		hw->wq_cpu_array[i] = NULL;
949 	}
950 	for (i = 0; i < ARRAY_SIZE(hw->wq_class_array); i++) {
951 		ocs_varray_free(hw->wq_class_array[i]);
952 		hw->wq_class_array[i] = NULL;
953 	}
954 }
955 
956 /**
957  * @brief Allocate a WQ to an IO object
958  *
959  * The next work queue index is used to assign a WQ to an IO.
960  *
961  * If wq_steering is OCS_HW_WQ_STEERING_CLASS, a WQ from io->wq_class is
962  * selected.
963  *
964  * If wq_steering is OCS_HW_WQ_STEERING_REQUEST, then a WQ from the EQ that
965  * the IO request came in on is selected.
966  *
967  * If wq_steering is OCS_HW_WQ_STEERING_CPU, then a WQ associated with the
968  * CPU the request is made on is selected.
969  *
970  * @param hw pointer to HW object
971  * @param io pointer to IO object
972  *
973  * @return Return pointer to next WQ
974  */
975 hw_wq_t *
ocs_hw_queue_next_wq(ocs_hw_t * hw,ocs_hw_io_t * io)976 ocs_hw_queue_next_wq(ocs_hw_t *hw, ocs_hw_io_t *io)
977 {
978 	hw_eq_t *eq;
979 	hw_wq_t *wq = NULL;
980 
981 	switch(io->wq_steering) {
982 	case OCS_HW_WQ_STEERING_CLASS:
983 		if (likely(io->wq_class < ARRAY_SIZE(hw->wq_class_array))) {
984 			wq = ocs_varray_iter_next(hw->wq_class_array[io->wq_class]);
985 		}
986 		break;
987 	case OCS_HW_WQ_STEERING_REQUEST:
988 		eq = io->eq;
989 		if (likely(eq != NULL)) {
990 			wq = ocs_varray_iter_next(eq->wq_array);
991 		}
992 		break;
993 	case OCS_HW_WQ_STEERING_CPU: {
994 		uint32_t cpuidx = ocs_thread_getcpu();
995 
996 		if (likely(cpuidx < ARRAY_SIZE(hw->wq_cpu_array))) {
997 			wq = ocs_varray_iter_next(hw->wq_cpu_array[cpuidx]);
998 		}
999 		break;
1000 	}
1001 	}
1002 
1003 	if (unlikely(wq == NULL)) {
1004 		wq = hw->hw_wq[0];
1005 	}
1006 
1007 	return wq;
1008 }
1009 
1010 /**
1011  * @brief Return count of EQs for a queue topology object
1012  *
1013  * The EQ count for in the HWs queue topology (hw->qtop) object is returned
1014  *
1015  * @param hw pointer to HW object
1016  *
1017  * @return count of EQs
1018  */
1019 uint32_t
ocs_hw_qtop_eq_count(ocs_hw_t * hw)1020 ocs_hw_qtop_eq_count(ocs_hw_t *hw)
1021 {
1022 	return hw->qtop->entry_counts[QTOP_EQ];
1023 }
1024 
1025 #define TOKEN_LEN		32
1026 
1027 /**
1028  * @brief return string given a QTOP entry
1029  *
1030  * @param entry QTOP entry
1031  *
1032  * @return returns string or "unknown"
1033  */
1034 #if HW_QTOP_DEBUG
1035 static char *
qtopentry2s(ocs_hw_qtop_entry_e entry)1036 qtopentry2s(ocs_hw_qtop_entry_e entry) {
1037 	switch(entry) {
1038 	#define P(x)	case x: return #x;
1039 	P(QTOP_EQ)
1040 	P(QTOP_CQ)
1041 	P(QTOP_WQ)
1042 	P(QTOP_RQ)
1043 	P(QTOP_MQ)
1044 	P(QTOP_THREAD_START)
1045 	P(QTOP_THREAD_END)
1046 	P(QTOP_LAST)
1047 	#undef P
1048 	}
1049 	return "unknown";
1050 }
1051 #endif
1052 
1053 /**
1054  * @brief Declare token types
1055  */
1056 typedef enum {
1057 	TOK_LPAREN = 1,
1058 	TOK_RPAREN,
1059 	TOK_COLON,
1060 	TOK_EQUALS,
1061 	TOK_QUEUE,
1062 	TOK_ATTR_NAME,
1063 	TOK_NUMBER,
1064 	TOK_NUMBER_VALUE,
1065 	TOK_NUMBER_LIST,
1066 } tok_type_e;
1067 
1068 /**
1069  * @brief Declare token sub-types
1070  */
1071 typedef enum {
1072 	TOK_SUB_EQ = 100,
1073 	TOK_SUB_CQ,
1074 	TOK_SUB_RQ,
1075 	TOK_SUB_MQ,
1076 	TOK_SUB_WQ,
1077 	TOK_SUB_LEN,
1078 	TOK_SUB_CLASS,
1079 	TOK_SUB_ULP,
1080 	TOK_SUB_FILTER,
1081 } tok_subtype_e;
1082 
1083 /**
1084  * @brief convert queue subtype to QTOP entry
1085  *
1086  * @param q queue subtype
1087  *
1088  * @return QTOP entry or 0
1089  */
1090 static ocs_hw_qtop_entry_e
subtype2qtop(tok_subtype_e q)1091 subtype2qtop(tok_subtype_e q)
1092 {
1093 	switch(q) {
1094 	case TOK_SUB_EQ:	return QTOP_EQ;
1095 	case TOK_SUB_CQ:	return QTOP_CQ;
1096 	case TOK_SUB_RQ:	return QTOP_RQ;
1097 	case TOK_SUB_MQ:	return QTOP_MQ;
1098 	case TOK_SUB_WQ:	return QTOP_WQ;
1099 	default:
1100 		break;
1101 	}
1102 	return 0;
1103 }
1104 
1105 /**
1106  * @brief Declare token object
1107  */
1108 typedef struct {
1109 	tok_type_e type;
1110 	tok_subtype_e subtype;
1111 	char string[TOKEN_LEN];
1112 } tok_t;
1113 
1114 /**
1115  * @brief Declare token array object
1116  */
1117 typedef struct {
1118 	tok_t *tokens;			/* Pointer to array of tokens */
1119 	uint32_t alloc_count;		/* Number of tokens in the array */
1120 	uint32_t inuse_count;		/* Number of tokens posted to array */
1121 	uint32_t iter_idx;		/* Iterator index */
1122 } tokarray_t;
1123 
1124 /**
1125  * @brief Declare token match structure
1126  */
1127 typedef struct {
1128 	char *s;
1129 	tok_type_e type;
1130 	tok_subtype_e subtype;
1131 } tokmatch_t;
1132 
1133 /**
1134  * @brief test if character is ID start character
1135  *
1136  * @param c character to test
1137  *
1138  * @return TRUE if character is an ID start character
1139  */
1140 static int32_t
idstart(int c)1141 idstart(int c)
1142 {
1143 	return	isalpha(c) || (c == '_') || (c == '$');
1144 }
1145 
1146 /**
1147  * @brief test if character is an ID character
1148  *
1149  * @param c character to test
1150  *
1151  * @return TRUE if character is an ID character
1152  */
1153 static int32_t
idchar(int c)1154 idchar(int c)
1155 {
1156 	return idstart(c) || ocs_isdigit(c);
1157 }
1158 
1159 /**
1160  * @brief Declare single character matches
1161  */
1162 static tokmatch_t cmatches[] = {
1163 	{"(", TOK_LPAREN},
1164 	{")", TOK_RPAREN},
1165 	{":", TOK_COLON},
1166 	{"=", TOK_EQUALS},
1167 };
1168 
1169 /**
1170  * @brief Declare identifier match strings
1171  */
1172 static tokmatch_t smatches[] = {
1173 	{"eq", TOK_QUEUE, TOK_SUB_EQ},
1174 	{"cq", TOK_QUEUE, TOK_SUB_CQ},
1175 	{"rq", TOK_QUEUE, TOK_SUB_RQ},
1176 	{"mq", TOK_QUEUE, TOK_SUB_MQ},
1177 	{"wq", TOK_QUEUE, TOK_SUB_WQ},
1178 	{"len", TOK_ATTR_NAME, TOK_SUB_LEN},
1179 	{"class", TOK_ATTR_NAME, TOK_SUB_CLASS},
1180 	{"ulp", TOK_ATTR_NAME, TOK_SUB_ULP},
1181 	{"filter", TOK_ATTR_NAME, TOK_SUB_FILTER},
1182 };
1183 
1184 /**
1185  * @brief Scan string and return next token
1186  *
1187  * The string is scanned and the next token is returned
1188  *
1189  * @param s input string to scan
1190  * @param tok pointer to place scanned token
1191  *
1192  * @return pointer to input string following scanned token, or NULL
1193  */
1194 static const char *
tokenize(const char * s,tok_t * tok)1195 tokenize(const char *s, tok_t *tok)
1196 {
1197 	uint32_t i;
1198 
1199 	memset(tok, 0, sizeof(*tok));
1200 
1201 	/* Skip over whitespace */
1202 	while (*s && ocs_isspace(*s)) {
1203 		s++;
1204 	}
1205 
1206 	/* Return if nothing left in this string */
1207 	if (*s == 0) {
1208 		return NULL;
1209 	}
1210 
1211 	/* Look for single character matches */
1212 	for (i = 0; i < ARRAY_SIZE(cmatches); i++) {
1213 		if (cmatches[i].s[0] == *s) {
1214 			tok->type = cmatches[i].type;
1215 			tok->subtype = cmatches[i].subtype;
1216 			tok->string[0] = *s++;
1217 			return s;
1218 		}
1219 	}
1220 
1221 	/* Scan for a hex number or decimal */
1222 	if ((s[0] == '0') && ((s[1] == 'x') || (s[1] == 'X'))) {
1223 		char *p = tok->string;
1224 
1225 		tok->type = TOK_NUMBER;
1226 
1227 		*p++ = *s++;
1228 		*p++ = *s++;
1229 		while ((*s == '.') || ocs_isxdigit(*s)) {
1230 			if ((p - tok->string) < (int32_t)sizeof(tok->string)) {
1231 				*p++ = *s;
1232 			}
1233 			if (*s == ',') {
1234 				tok->type = TOK_NUMBER_LIST;
1235 			}
1236 			s++;
1237 		}
1238 		*p = 0;
1239 		return s;
1240 	} else if (ocs_isdigit(*s)) {
1241 		char *p = tok->string;
1242 
1243 		tok->type = TOK_NUMBER;
1244 		while ((*s == ',') || ocs_isdigit(*s)) {
1245 			if ((p - tok->string) < (int32_t)sizeof(tok->string)) {
1246 				*p++ = *s;
1247 			}
1248 			if (*s == ',') {
1249 				tok->type = TOK_NUMBER_LIST;
1250 			}
1251 			s++;
1252 		}
1253 		*p = 0;
1254 		return s;
1255 	}
1256 
1257 	/* Scan for an ID */
1258 	if (idstart(*s)) {
1259 		char *p = tok->string;
1260 
1261 		for (*p++ = *s++; idchar(*s); s++) {
1262 			if ((p - tok->string) < TOKEN_LEN) {
1263 				*p++ = *s;
1264 			}
1265 		}
1266 
1267 		/* See if this is a $ number value */
1268 		if (tok->string[0] == '$') {
1269 			tok->type = TOK_NUMBER_VALUE;
1270 		} else {
1271 			/* Look for a string match */
1272 			for (i = 0; i < ARRAY_SIZE(smatches); i++) {
1273 				if (strcmp(smatches[i].s, tok->string) == 0) {
1274 					tok->type = smatches[i].type;
1275 					tok->subtype = smatches[i].subtype;
1276 					return s;
1277 				}
1278 			}
1279 		}
1280 	}
1281 	return s;
1282 }
1283 
1284 /**
1285  * @brief convert token type to string
1286  *
1287  * @param type token type
1288  *
1289  * @return string, or "unknown"
1290  */
1291 static const char *
token_type2s(tok_type_e type)1292 token_type2s(tok_type_e type)
1293 {
1294 	switch(type) {
1295 	#define P(x)	case x: return #x;
1296 	P(TOK_LPAREN)
1297 	P(TOK_RPAREN)
1298 	P(TOK_COLON)
1299 	P(TOK_EQUALS)
1300 	P(TOK_QUEUE)
1301 	P(TOK_ATTR_NAME)
1302 	P(TOK_NUMBER)
1303 	P(TOK_NUMBER_VALUE)
1304 	P(TOK_NUMBER_LIST)
1305 	#undef P
1306 	}
1307 	return "unknown";
1308 }
1309 
1310 /**
1311  * @brief convert token sub-type to string
1312  *
1313  * @param subtype token sub-type
1314  *
1315  * @return string, or "unknown"
1316  */
1317 static const char *
token_subtype2s(tok_subtype_e subtype)1318 token_subtype2s(tok_subtype_e subtype)
1319 {
1320 	switch(subtype) {
1321 	#define P(x)	case x: return #x;
1322 	P(TOK_SUB_EQ)
1323 	P(TOK_SUB_CQ)
1324 	P(TOK_SUB_RQ)
1325 	P(TOK_SUB_MQ)
1326 	P(TOK_SUB_WQ)
1327 	P(TOK_SUB_LEN)
1328 	P(TOK_SUB_CLASS)
1329 	P(TOK_SUB_ULP)
1330 	P(TOK_SUB_FILTER)
1331 	#undef P
1332 	}
1333 	return "";
1334 }
1335 
1336 /**
1337  * @brief Generate syntax error message
1338  *
1339  * A syntax error message is found, the input tokens are dumped up to and including
1340  * the token that failed as indicated by the current iterator index.
1341  *
1342  * @param hw pointer to HW object
1343  * @param tokarray pointer to token array object
1344  *
1345  * @return none
1346  */
1347 static void
tok_syntax(ocs_hw_t * hw,tokarray_t * tokarray)1348 tok_syntax(ocs_hw_t *hw, tokarray_t *tokarray)
1349 {
1350 	uint32_t i;
1351 	tok_t *tok;
1352 
1353 	ocs_log_test(hw->os, "Syntax error:\n");
1354 
1355 	for (i = 0, tok = tokarray->tokens; (i <= tokarray->inuse_count); i++, tok++) {
1356 		ocs_log_test(hw->os, "%s [%2d]    %-16s %-16s %s\n", (i == tokarray->iter_idx) ? ">>>" : "   ", i,
1357 			token_type2s(tok->type), token_subtype2s(tok->subtype), tok->string);
1358 	}
1359 }
1360 
1361 /**
1362  * @brief parse a number
1363  *
1364  * Parses tokens of type TOK_NUMBER and TOK_NUMBER_VALUE, returning a numeric value
1365  *
1366  * @param hw pointer to HW object
1367  * @param qtop pointer to QTOP object
1368  * @param tok pointer to token to parse
1369  *
1370  * @return numeric value
1371  */
1372 static uint32_t
tok_getnumber(ocs_hw_t * hw,ocs_hw_qtop_t * qtop,tok_t * tok)1373 tok_getnumber(ocs_hw_t *hw, ocs_hw_qtop_t *qtop, tok_t *tok)
1374 {
1375 	uint32_t rval = 0;
1376 	uint32_t num_cpus = ocs_get_num_cpus();
1377 
1378 	switch(tok->type) {
1379 	case TOK_NUMBER_VALUE:
1380 		if (ocs_strcmp(tok->string, "$ncpu") == 0) {
1381 			rval = num_cpus;
1382 		} else if (ocs_strcmp(tok->string, "$ncpu1") == 0) {
1383 			rval = num_cpus - 1;
1384 		} else if (ocs_strcmp(tok->string, "$nwq") == 0) {
1385 			if (hw != NULL) {
1386 				rval = hw->config.n_wq;
1387 			}
1388 		} else if (ocs_strcmp(tok->string, "$maxmrq") == 0) {
1389 			rval = MIN(num_cpus, OCS_HW_MAX_MRQS);
1390 		} else if (ocs_strcmp(tok->string, "$nulp") == 0) {
1391 			rval = hw->ulp_max - hw->ulp_start + 1;
1392 		} else if ((qtop->rptcount_idx > 0) && ocs_strcmp(tok->string, "$rpt0") == 0) {
1393 			rval = qtop->rptcount[qtop->rptcount_idx-1];
1394 		} else if ((qtop->rptcount_idx > 1) && ocs_strcmp(tok->string, "$rpt1") == 0) {
1395 			rval = qtop->rptcount[qtop->rptcount_idx-2];
1396 		} else if ((qtop->rptcount_idx > 2) && ocs_strcmp(tok->string, "$rpt2") == 0) {
1397 			rval = qtop->rptcount[qtop->rptcount_idx-3];
1398 		} else if ((qtop->rptcount_idx > 3) && ocs_strcmp(tok->string, "$rpt3") == 0) {
1399 			rval = qtop->rptcount[qtop->rptcount_idx-4];
1400 		} else {
1401 			rval = ocs_strtoul(tok->string, 0, 0);
1402 		}
1403 		break;
1404 	case TOK_NUMBER:
1405 		rval = ocs_strtoul(tok->string, 0, 0);
1406 		break;
1407 	default:
1408 		break;
1409 	}
1410 	return rval;
1411 }
1412 
1413 /**
1414  * @brief parse an array of tokens
1415  *
1416  * The tokens are semantically parsed, to generate QTOP entries.
1417  *
1418  * @param hw pointer to HW object
1419  * @param tokarray array array of tokens
1420  * @param qtop ouptut QTOP object
1421  *
1422  * @return returns 0 for success, a negative error code value for failure.
1423  */
1424 static int32_t
parse_topology(ocs_hw_t * hw,tokarray_t * tokarray,ocs_hw_qtop_t * qtop)1425 parse_topology(ocs_hw_t *hw, tokarray_t *tokarray, ocs_hw_qtop_t *qtop)
1426 {
1427 	ocs_hw_qtop_entry_t *qt = qtop->entries + qtop->inuse_count;
1428 	tok_t *tok;
1429 
1430 	for (; (tokarray->iter_idx < tokarray->inuse_count) &&
1431 	     ((tok = &tokarray->tokens[tokarray->iter_idx]) != NULL); ) {
1432 		if (qtop->inuse_count >= qtop->alloc_count) {
1433 			return -1;
1434 		}
1435 
1436 		qt = qtop->entries + qtop->inuse_count;
1437 
1438 		switch (tok[0].type)
1439 		{
1440 		case TOK_QUEUE:
1441 			qt->entry = subtype2qtop(tok[0].subtype);
1442 			qt->set_default = FALSE;
1443 			qt->len = 0;
1444 			qt->class = 0;
1445 			qtop->inuse_count++;
1446 
1447 			tokarray->iter_idx++;		/* Advance current token index */
1448 
1449 			/* Parse for queue attributes, possibly multiple instances */
1450 			while ((tokarray->iter_idx + 4) <= tokarray->inuse_count) {
1451 				tok = &tokarray->tokens[tokarray->iter_idx];
1452 				if(	(tok[0].type == TOK_COLON) &&
1453 					(tok[1].type == TOK_ATTR_NAME) &&
1454 					(tok[2].type == TOK_EQUALS) &&
1455 					((tok[3].type == TOK_NUMBER) ||
1456 					 (tok[3].type == TOK_NUMBER_VALUE) ||
1457 					 (tok[3].type == TOK_NUMBER_LIST))) {
1458 					switch (tok[1].subtype) {
1459 					case TOK_SUB_LEN:
1460 						qt->len = tok_getnumber(hw, qtop, &tok[3]);
1461 						break;
1462 
1463 					case TOK_SUB_CLASS:
1464 						qt->class = tok_getnumber(hw, qtop, &tok[3]);
1465 						break;
1466 
1467 					case TOK_SUB_ULP:
1468 						qt->ulp = tok_getnumber(hw, qtop, &tok[3]);
1469 						break;
1470 
1471 					case TOK_SUB_FILTER:
1472 						if (tok[3].type == TOK_NUMBER_LIST) {
1473 							uint32_t mask = 0;
1474 							char *p = tok[3].string;
1475 
1476 							while ((p != NULL) && *p) {
1477 								uint32_t v;
1478 
1479 								v = ocs_strtoul(p, 0, 0);
1480 								if (v < 32) {
1481 									mask |= (1U << v);
1482 								}
1483 
1484 								p = ocs_strchr(p, ',');
1485 								if (p != NULL) {
1486 									p++;
1487 								}
1488 							}
1489 							qt->filter_mask = mask;
1490 						} else {
1491 							qt->filter_mask = (1U << tok_getnumber(hw, qtop, &tok[3]));
1492 						}
1493 						break;
1494 					default:
1495 						break;
1496 					}
1497 					/* Advance current token index */
1498 					tokarray->iter_idx += 4;
1499 				} else {
1500 					break;
1501 				}
1502 			}
1503 			qtop->entry_counts[qt->entry]++;
1504 			break;
1505 
1506 		case TOK_ATTR_NAME:
1507 			if (	((tokarray->iter_idx + 5) <= tokarray->inuse_count) &&
1508 				(tok[1].type == TOK_COLON) &&
1509 				(tok[2].type == TOK_QUEUE) &&
1510 				(tok[3].type == TOK_EQUALS) &&
1511 				((tok[4].type == TOK_NUMBER) || (tok[4].type == TOK_NUMBER_VALUE))) {
1512 				qt->entry = subtype2qtop(tok[2].subtype);
1513 				qt->set_default = TRUE;
1514 				switch(tok[0].subtype) {
1515 				case TOK_SUB_LEN:
1516 					qt->len = tok_getnumber(hw, qtop, &tok[4]);
1517 					break;
1518 				case TOK_SUB_CLASS:
1519 					qt->class = tok_getnumber(hw, qtop, &tok[4]);
1520 					break;
1521 				case TOK_SUB_ULP:
1522 					qt->ulp = tok_getnumber(hw, qtop, &tok[4]);
1523 					break;
1524 				default:
1525 					break;
1526 				}
1527 				qtop->inuse_count++;
1528 				tokarray->iter_idx += 5;
1529 			} else {
1530 				tok_syntax(hw, tokarray);
1531 				return -1;
1532 			}
1533 			break;
1534 
1535 		case TOK_NUMBER:
1536 		case TOK_NUMBER_VALUE: {
1537 			uint32_t rpt_count = 1;
1538 			uint32_t i;
1539 
1540 			rpt_count = tok_getnumber(hw, qtop, tok);
1541 
1542 			if (tok[1].type == TOK_LPAREN) {
1543 				uint32_t iter_idx_save;
1544 
1545 				tokarray->iter_idx += 2;
1546 
1547 				/* save token array iteration index */
1548 				iter_idx_save = tokarray->iter_idx;
1549 
1550 				for (i = 0; i < rpt_count; i++) {
1551 					uint32_t rptcount_idx = qtop->rptcount_idx;
1552 
1553 					if (qtop->rptcount_idx < ARRAY_SIZE(qtop->rptcount)) {
1554 						qtop->rptcount[qtop->rptcount_idx++] = i;
1555 					}
1556 
1557 					/* restore token array iteration index */
1558 					tokarray->iter_idx = iter_idx_save;
1559 
1560 					/* parse, append to qtop */
1561 					parse_topology(hw, tokarray, qtop);
1562 
1563 					qtop->rptcount_idx = rptcount_idx;
1564 				}
1565 			}
1566 			break;
1567 		}
1568 
1569 		case TOK_RPAREN:
1570 			tokarray->iter_idx++;
1571 			return 0;
1572 
1573 		default:
1574 			tok_syntax(hw, tokarray);
1575 			return -1;
1576 		}
1577 	}
1578 	return 0;
1579 }
1580 
1581 /**
1582  * @brief Parse queue topology string
1583  *
1584  * The queue topology object is allocated, and filled with the results of parsing the
1585  * passed in queue topology string
1586  *
1587  * @param hw pointer to HW object
1588  * @param qtop_string input queue topology string
1589  *
1590  * @return pointer to allocated QTOP object, or NULL if there was an error
1591  */
1592 ocs_hw_qtop_t *
ocs_hw_qtop_parse(ocs_hw_t * hw,const char * qtop_string)1593 ocs_hw_qtop_parse(ocs_hw_t *hw, const char *qtop_string)
1594 {
1595 	ocs_hw_qtop_t *qtop;
1596 	tokarray_t tokarray;
1597 	const char *s;
1598 #if HW_QTOP_DEBUG
1599 	uint32_t i;
1600 	ocs_hw_qtop_entry_t *qt;
1601 #endif
1602 
1603 	ocs_log_debug(hw->os, "queue topology: %s\n", qtop_string);
1604 
1605 	/* Allocate a token array */
1606 	tokarray.tokens = ocs_malloc(hw->os, MAX_TOKENS * sizeof(*tokarray.tokens), OCS_M_ZERO | OCS_M_NOWAIT);
1607 	if (tokarray.tokens == NULL) {
1608 		return NULL;
1609 	}
1610 	tokarray.alloc_count = MAX_TOKENS;
1611 	tokarray.inuse_count = 0;
1612 	tokarray.iter_idx = 0;
1613 
1614 	/* Parse the tokens */
1615 	for (s = qtop_string; (tokarray.inuse_count < tokarray.alloc_count) &&
1616 	     ((s = tokenize(s, &tokarray.tokens[tokarray.inuse_count]))) != NULL; ) {
1617 		tokarray.inuse_count++;
1618 	}
1619 
1620 	/* Allocate a queue topology structure */
1621 	qtop = ocs_malloc(hw->os, sizeof(*qtop), OCS_M_ZERO | OCS_M_NOWAIT);
1622 	if (qtop == NULL) {
1623 		ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens));
1624 		ocs_log_err(hw->os, "malloc qtop failed\n");
1625 		return NULL;
1626 	}
1627 	qtop->os = hw->os;
1628 
1629 	/* Allocate queue topology entries */
1630 	qtop->entries = ocs_malloc(hw->os, OCS_HW_MAX_QTOP_ENTRIES*sizeof(*qtop->entries), OCS_M_ZERO | OCS_M_NOWAIT);
1631 	if (qtop->entries == NULL) {
1632 		ocs_log_err(hw->os, "malloc qtop entries failed\n");
1633 		ocs_free(hw->os, qtop, sizeof(*qtop));
1634 		ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens));
1635 		return NULL;
1636 	}
1637 	qtop->alloc_count = OCS_HW_MAX_QTOP_ENTRIES;
1638 	qtop->inuse_count = 0;
1639 
1640 	/* Parse the tokens */
1641 	parse_topology(hw, &tokarray, qtop);
1642 #if HW_QTOP_DEBUG
1643 	for (i = 0, qt = qtop->entries; i < qtop->inuse_count; i++, qt++) {
1644 		ocs_log_debug(hw->os, "entry %s set_df %d len %4d class %d ulp %d\n", qtopentry2s(qt->entry), qt->set_default, qt->len,
1645 		       qt->class, qt->ulp);
1646 	}
1647 #endif
1648 
1649 	/* Free the tokens array */
1650 	ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens));
1651 
1652 	return qtop;
1653 }
1654 
1655 /**
1656  * @brief free queue topology object
1657  *
1658  * @param qtop pointer to QTOP object
1659  *
1660  * @return none
1661  */
1662 void
ocs_hw_qtop_free(ocs_hw_qtop_t * qtop)1663 ocs_hw_qtop_free(ocs_hw_qtop_t *qtop)
1664 {
1665 	if (qtop != NULL) {
1666 		if (qtop->entries != NULL) {
1667 			ocs_free(qtop->os, qtop->entries, qtop->alloc_count*sizeof(*qtop->entries));
1668 		}
1669 		ocs_free(qtop->os, qtop, sizeof(*qtop));
1670 	}
1671 }
1672 
1673 /* Uncomment this to turn on RQ debug */
1674 // #define ENABLE_DEBUG_RQBUF
1675 
1676 static int32_t ocs_hw_rqpair_find(ocs_hw_t *hw, uint16_t rq_id);
1677 static ocs_hw_sequence_t * ocs_hw_rqpair_get(ocs_hw_t *hw, uint16_t rqindex, uint16_t bufindex);
1678 static int32_t ocs_hw_rqpair_put(ocs_hw_t *hw, ocs_hw_sequence_t *seq);
1679 static ocs_hw_rtn_e ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(ocs_hw_t *hw, ocs_hw_sequence_t *seq);
1680 
1681 /**
1682  * @brief Process receive queue completions for RQ Pair mode.
1683  *
1684  * @par Description
1685  * RQ completions are processed. In RQ pair mode, a single header and single payload
1686  * buffer are received, and passed to the function that has registered for unsolicited
1687  * callbacks.
1688  *
1689  * @param hw Hardware context.
1690  * @param cq Pointer to HW completion queue.
1691  * @param cqe Completion queue entry.
1692  *
1693  * @return Returns 0 for success, or a negative error code value for failure.
1694  */
1695 
1696 int32_t
ocs_hw_rqpair_process_rq(ocs_hw_t * hw,hw_cq_t * cq,uint8_t * cqe)1697 ocs_hw_rqpair_process_rq(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe)
1698 {
1699 	uint16_t rq_id;
1700 	uint32_t index;
1701 	int32_t rqindex;
1702 	int32_t	 rq_status;
1703 	uint32_t h_len;
1704 	uint32_t p_len;
1705 	ocs_hw_sequence_t *seq;
1706 
1707 	rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe, &rq_id, &index);
1708 	if (0 != rq_status) {
1709 		switch (rq_status) {
1710 		case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
1711 		case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
1712 			/* just get RQ buffer then return to chip */
1713 			rqindex = ocs_hw_rqpair_find(hw, rq_id);
1714 			if (rqindex < 0) {
1715 				ocs_log_test(hw->os, "status=%#x: rq_id lookup failed for id=%#x\n",
1716 					     rq_status, rq_id);
1717 				break;
1718 			}
1719 
1720 			/* get RQ buffer */
1721 			seq = ocs_hw_rqpair_get(hw, rqindex, index);
1722 
1723 			/* return to chip */
1724 			if (ocs_hw_rqpair_sequence_free(hw, seq)) {
1725 				ocs_log_test(hw->os, "status=%#x, failed to return buffers to RQ\n",
1726 					     rq_status);
1727 				break;
1728 			}
1729 			break;
1730 		case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
1731 		case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
1732 			/* since RQ buffers were not consumed, cannot return them to chip */
1733 			/* fall through */
1734 			ocs_log_debug(hw->os, "Warning: RCQE status=%#x, \n", rq_status);
1735 		default:
1736 			break;
1737 		}
1738 		return -1;
1739 	}
1740 
1741 	rqindex = ocs_hw_rqpair_find(hw, rq_id);
1742 	if (rqindex < 0) {
1743 		ocs_log_test(hw->os, "Error: rq_id lookup failed for id=%#x\n", rq_id);
1744 		return -1;
1745 	}
1746 
1747 	OCS_STAT({ hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; rq->use_count++; rq->hdr_use_count++;
1748 		 rq->payload_use_count++;})
1749 
1750 	seq = ocs_hw_rqpair_get(hw, rqindex, index);
1751 	ocs_hw_assert(seq != NULL);
1752 
1753 	seq->hw = hw;
1754 	seq->auto_xrdy = 0;
1755 	seq->out_of_xris = 0;
1756 	seq->xri = 0;
1757 	seq->hio = NULL;
1758 
1759 	sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
1760 	seq->header->dma.len = h_len;
1761 	seq->payload->dma.len = p_len;
1762 	seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
1763 	seq->hw_priv = cq->eq;
1764 
1765 	/* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
1766 	if (hw->config.bounce) {
1767 		fc_header_t *hdr = seq->header->dma.virt;
1768 		uint32_t s_id = fc_be24toh(hdr->s_id);
1769 		uint32_t d_id = fc_be24toh(hdr->d_id);
1770 		uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
1771 		if (hw->callback.bounce != NULL) {
1772 			(*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id);
1773 		}
1774 	} else {
1775 		hw->callback.unsolicited(hw->args.unsolicited, seq);
1776 	}
1777 
1778 	return 0;
1779 }
1780 
1781 /**
1782  * @brief Process receive queue completions for RQ Pair mode - Auto xfer rdy
1783  *
1784  * @par Description
1785  * RQ completions are processed. In RQ pair mode, a single header and single payload
1786  * buffer are received, and passed to the function that has registered for unsolicited
1787  * callbacks.
1788  *
1789  * @param hw Hardware context.
1790  * @param cq Pointer to HW completion queue.
1791  * @param cqe Completion queue entry.
1792  *
1793  * @return Returns 0 for success, or a negative error code value for failure.
1794  */
1795 
1796 int32_t
ocs_hw_rqpair_process_auto_xfr_rdy_cmd(ocs_hw_t * hw,hw_cq_t * cq,uint8_t * cqe)1797 ocs_hw_rqpair_process_auto_xfr_rdy_cmd(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe)
1798 {
1799 	/* Seems silly to call a SLI function to decode - use the structure directly for performance */
1800 	sli4_fc_optimized_write_cmd_cqe_t *opt_wr = (sli4_fc_optimized_write_cmd_cqe_t*)cqe;
1801 	uint16_t rq_id;
1802 	uint32_t index;
1803 	int32_t rqindex;
1804 	int32_t	 rq_status;
1805 	uint32_t h_len;
1806 	uint32_t p_len;
1807 	ocs_hw_sequence_t *seq;
1808 	uint8_t axr_lock_taken = 0;
1809 #if defined(OCS_DISC_SPIN_DELAY)
1810 	uint32_t 	delay = 0;
1811 	char 		prop_buf[32];
1812 #endif
1813 
1814 	rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe, &rq_id, &index);
1815 	if (0 != rq_status) {
1816 		switch (rq_status) {
1817 		case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
1818 		case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
1819 			/* just get RQ buffer then return to chip */
1820 			rqindex = ocs_hw_rqpair_find(hw, rq_id);
1821 			if (rqindex < 0) {
1822 				ocs_log_err(hw->os, "status=%#x: rq_id lookup failed for id=%#x\n",
1823 					    rq_status, rq_id);
1824 				break;
1825 			}
1826 
1827 			/* get RQ buffer */
1828 			seq = ocs_hw_rqpair_get(hw, rqindex, index);
1829 
1830 			/* return to chip */
1831 			if (ocs_hw_rqpair_sequence_free(hw, seq)) {
1832 				ocs_log_err(hw->os, "status=%#x, failed to return buffers to RQ\n",
1833 					    rq_status);
1834 				break;
1835 			}
1836 			break;
1837 		case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
1838 		case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
1839 			/* since RQ buffers were not consumed, cannot return them to chip */
1840 			ocs_log_debug(hw->os, "Warning: RCQE status=%#x, \n", rq_status);
1841 			/* fall through */
1842 		default:
1843 			break;
1844 		}
1845 		return -1;
1846 	}
1847 
1848 	rqindex = ocs_hw_rqpair_find(hw, rq_id);
1849 	if (rqindex < 0) {
1850 		ocs_log_err(hw->os, "Error: rq_id lookup failed for id=%#x\n", rq_id);
1851 		return -1;
1852 	}
1853 
1854 	OCS_STAT({ hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; rq->use_count++; rq->hdr_use_count++;
1855 		 rq->payload_use_count++;})
1856 
1857 	seq = ocs_hw_rqpair_get(hw, rqindex, index);
1858 	ocs_hw_assert(seq != NULL);
1859 
1860 	seq->hw = hw;
1861 	seq->auto_xrdy = opt_wr->agxr;
1862 	seq->out_of_xris = opt_wr->oox;
1863 	seq->xri = opt_wr->xri;
1864 	seq->hio = NULL;
1865 
1866 	sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
1867 	seq->header->dma.len = h_len;
1868 	seq->payload->dma.len = p_len;
1869 	seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
1870 	seq->hw_priv = cq->eq;
1871 
1872 	if (seq->auto_xrdy) {
1873 		fc_header_t *fc_hdr = seq->header->dma.virt;
1874 
1875 		seq->hio = ocs_hw_io_lookup(hw, seq->xri);
1876 		ocs_lock(&seq->hio->axr_lock);
1877 		axr_lock_taken = 1;
1878 
1879 		/* save the FCFI, src_id, dest_id and ox_id because we need it for the sequence object when the data comes. */
1880 		seq->hio->axr_buf->fcfi = seq->fcfi;
1881 		seq->hio->axr_buf->hdr.ox_id = fc_hdr->ox_id;
1882 		seq->hio->axr_buf->hdr.s_id = fc_hdr->s_id;
1883 		seq->hio->axr_buf->hdr.d_id = fc_hdr->d_id;
1884 		seq->hio->axr_buf->cmd_cqe = 1;
1885 
1886 		/*
1887 		 * Since auto xfer rdy is used for this IO, then clear the sequence
1888 		 * initiative bit in the header so that the upper layers wait for the
1889 		 * data. This should flow exactly like the first burst case.
1890 		 */
1891 		fc_hdr->f_ctl &= fc_htobe24(~FC_FCTL_SEQUENCE_INITIATIVE);
1892 
1893 		/* If AXR CMD CQE came before previous TRSP CQE of same XRI */
1894 		if (seq->hio->type == OCS_HW_IO_TARGET_RSP) {
1895 			seq->hio->axr_buf->call_axr_cmd = 1;
1896 			seq->hio->axr_buf->cmd_seq = seq;
1897 			goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_cmd;
1898 		}
1899 	}
1900 
1901 	/* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
1902 	if (hw->config.bounce) {
1903 		fc_header_t *hdr = seq->header->dma.virt;
1904 		uint32_t s_id = fc_be24toh(hdr->s_id);
1905 		uint32_t d_id = fc_be24toh(hdr->d_id);
1906 		uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
1907 		if (hw->callback.bounce != NULL) {
1908 			(*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id);
1909 		}
1910 	} else {
1911 		hw->callback.unsolicited(hw->args.unsolicited, seq);
1912 	}
1913 
1914 	if (seq->auto_xrdy) {
1915 		/* If data cqe came before cmd cqe in out of order in case of AXR */
1916 		if(seq->hio->axr_buf->data_cqe == 1) {
1917 #if defined(OCS_DISC_SPIN_DELAY)
1918 			if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
1919 				delay = ocs_strtoul(prop_buf, 0, 0);
1920 				ocs_udelay(delay);
1921 			}
1922 #endif
1923 			/* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
1924 			if (hw->config.bounce) {
1925 				fc_header_t *hdr = seq->header->dma.virt;
1926 				uint32_t s_id = fc_be24toh(hdr->s_id);
1927 				uint32_t d_id = fc_be24toh(hdr->d_id);
1928 				uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
1929 				if (hw->callback.bounce != NULL) {
1930 					(*hw->callback.bounce)(ocs_hw_unsol_process_bounce, &seq->hio->axr_buf->seq, s_id, d_id, ox_id);
1931 				}
1932 			} else {
1933 				hw->callback.unsolicited(hw->args.unsolicited, &seq->hio->axr_buf->seq);
1934 			}
1935 		}
1936 	}
1937 
1938 exit_ocs_hw_rqpair_process_auto_xfr_rdy_cmd:
1939 	if(axr_lock_taken) {
1940 		ocs_unlock(&seq->hio->axr_lock);
1941 	}
1942 	return 0;
1943 }
1944 
1945 /**
1946  * @brief Process CQ completions for Auto xfer rdy data phases.
1947  *
1948  * @par Description
1949  * The data is DMA'd into the data buffer posted to the SGL prior to the XRI
1950  * being assigned to an IO. When the completion is received, All of the data
1951  * is in the single buffer.
1952  *
1953  * @param hw Hardware context.
1954  * @param cq Pointer to HW completion queue.
1955  * @param cqe Completion queue entry.
1956  *
1957  * @return Returns 0 for success, or a negative error code value for failure.
1958  */
1959 
1960 int32_t
ocs_hw_rqpair_process_auto_xfr_rdy_data(ocs_hw_t * hw,hw_cq_t * cq,uint8_t * cqe)1961 ocs_hw_rqpair_process_auto_xfr_rdy_data(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe)
1962 {
1963 	/* Seems silly to call a SLI function to decode - use the structure directly for performance */
1964 	sli4_fc_optimized_write_data_cqe_t *opt_wr = (sli4_fc_optimized_write_data_cqe_t*)cqe;
1965 	ocs_hw_sequence_t *seq;
1966 	ocs_hw_io_t *io;
1967 	ocs_hw_auto_xfer_rdy_buffer_t *buf;
1968 #if defined(OCS_DISC_SPIN_DELAY)
1969 	uint32_t 	delay = 0;
1970 	char 		prop_buf[32];
1971 #endif
1972 	/* Look up the IO */
1973 	io = ocs_hw_io_lookup(hw, opt_wr->xri);
1974 	ocs_lock(&io->axr_lock);
1975 	buf = io->axr_buf;
1976 	buf->data_cqe = 1;
1977 	seq = &buf->seq;
1978 	seq->hw = hw;
1979 	seq->auto_xrdy = 1;
1980 	seq->out_of_xris = 0;
1981 	seq->xri = opt_wr->xri;
1982 	seq->hio = io;
1983 	seq->header = &buf->header;
1984 	seq->payload = &buf->payload;
1985 
1986 	seq->header->dma.len = sizeof(fc_header_t);
1987 	seq->payload->dma.len = opt_wr->total_data_placed;
1988 	seq->fcfi = buf->fcfi;
1989 	seq->hw_priv = cq->eq;
1990 
1991 	if (opt_wr->status == SLI4_FC_WCQE_STATUS_SUCCESS) {
1992 		seq->status = OCS_HW_UNSOL_SUCCESS;
1993 	} else if (opt_wr->status == SLI4_FC_WCQE_STATUS_REMOTE_STOP) {
1994 		seq->status = OCS_HW_UNSOL_ABTS_RCVD;
1995 	} else {
1996 		seq->status = OCS_HW_UNSOL_ERROR;
1997 	}
1998 
1999  	/* If AXR CMD CQE came before previous TRSP CQE of same XRI */
2000 	if(io->type == OCS_HW_IO_TARGET_RSP) {
2001 		io->axr_buf->call_axr_data = 1;
2002 		goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_data;
2003 	}
2004 
2005 	if(!buf->cmd_cqe) {
2006 		/* if data cqe came before cmd cqe, return here, cmd cqe will handle */
2007 		goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_data;
2008 	}
2009 #if defined(OCS_DISC_SPIN_DELAY)
2010 	if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
2011 		delay = ocs_strtoul(prop_buf, 0, 0);
2012 		ocs_udelay(delay);
2013 	}
2014 #endif
2015 
2016 	/* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
2017 	if (hw->config.bounce) {
2018 		fc_header_t *hdr = seq->header->dma.virt;
2019 		uint32_t s_id = fc_be24toh(hdr->s_id);
2020 		uint32_t d_id = fc_be24toh(hdr->d_id);
2021 		uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
2022 		if (hw->callback.bounce != NULL) {
2023 			(*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id);
2024 		}
2025 	} else {
2026 		hw->callback.unsolicited(hw->args.unsolicited, seq);
2027 	}
2028 
2029 exit_ocs_hw_rqpair_process_auto_xfr_rdy_data:
2030 	ocs_unlock(&io->axr_lock);
2031 	return 0;
2032 }
2033 
2034 /**
2035  * @brief Return pointer to RQ buffer entry.
2036  *
2037  * @par Description
2038  * Returns a pointer to the RQ buffer entry given by @c rqindex and @c bufindex.
2039  *
2040  * @param hw Hardware context.
2041  * @param rqindex Index of the RQ that is being processed.
2042  * @param bufindex Index into the RQ that is being processed.
2043  *
2044  * @return Pointer to the sequence structure, or NULL otherwise.
2045  */
2046 static ocs_hw_sequence_t *
ocs_hw_rqpair_get(ocs_hw_t * hw,uint16_t rqindex,uint16_t bufindex)2047 ocs_hw_rqpair_get(ocs_hw_t *hw, uint16_t rqindex, uint16_t bufindex)
2048 {
2049 	sli4_queue_t *rq_hdr = &hw->rq[rqindex];
2050 	sli4_queue_t *rq_payload = &hw->rq[rqindex+1];
2051 	ocs_hw_sequence_t *seq = NULL;
2052 	hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
2053 
2054 #if defined(ENABLE_DEBUG_RQBUF)
2055 	uint64_t rqbuf_debug_value = 0xdead0000 | ((rq->id & 0xf) << 12) | (bufindex & 0xfff);
2056 #endif
2057 
2058 	if (bufindex >= rq_hdr->length) {
2059 		ocs_log_err(hw->os, "RQ index %d bufindex %d exceed ring length %d for id %d\n",
2060 			    rqindex, bufindex, rq_hdr->length, rq_hdr->id);
2061 		return NULL;
2062 	}
2063 
2064 	sli_queue_lock(rq_hdr);
2065 	sli_queue_lock(rq_payload);
2066 
2067 #if defined(ENABLE_DEBUG_RQBUF)
2068 	/* Put a debug value into the rq, to track which entries are still valid */
2069 	_sli_queue_poke(&hw->sli, rq_hdr, bufindex, (uint8_t *)&rqbuf_debug_value);
2070 	_sli_queue_poke(&hw->sli, rq_payload, bufindex, (uint8_t *)&rqbuf_debug_value);
2071 #endif
2072 
2073 	seq = rq->rq_tracker[bufindex];
2074 	rq->rq_tracker[bufindex] = NULL;
2075 
2076 	if (seq == NULL ) {
2077 		ocs_log_err(hw->os, "RQ buffer NULL, rqindex %d, bufindex %d, current q index = %d\n",
2078 			    rqindex, bufindex, rq_hdr->index);
2079 	}
2080 
2081 	sli_queue_unlock(rq_payload);
2082 	sli_queue_unlock(rq_hdr);
2083 	return seq;
2084 }
2085 
2086 /**
2087  * @brief Posts an RQ buffer to a queue and update the verification structures
2088  *
2089  * @param hw		hardware context
2090  * @param seq Pointer to sequence object.
2091  *
2092  * @return Returns 0 on success, or a non-zero value otherwise.
2093  */
2094 static int32_t
ocs_hw_rqpair_put(ocs_hw_t * hw,ocs_hw_sequence_t * seq)2095 ocs_hw_rqpair_put(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
2096 {
2097 	sli4_queue_t *rq_hdr = &hw->rq[seq->header->rqindex];
2098 	sli4_queue_t *rq_payload = &hw->rq[seq->payload->rqindex];
2099 	uint32_t hw_rq_index = hw->hw_rq_lookup[seq->header->rqindex];
2100 	hw_rq_t *rq = hw->hw_rq[hw_rq_index];
2101 	uint32_t     phys_hdr[2];
2102 	uint32_t     phys_payload[2];
2103 	int32_t      qindex_hdr;
2104 	int32_t      qindex_payload;
2105 
2106 	/* Update the RQ verification lookup tables */
2107 	phys_hdr[0] = ocs_addr32_hi(seq->header->dma.phys);
2108 	phys_hdr[1] = ocs_addr32_lo(seq->header->dma.phys);
2109 	phys_payload[0] = ocs_addr32_hi(seq->payload->dma.phys);
2110 	phys_payload[1] = ocs_addr32_lo(seq->payload->dma.phys);
2111 
2112 	sli_queue_lock(rq_hdr);
2113 	sli_queue_lock(rq_payload);
2114 
2115 	/*
2116 	 * Note: The header must be posted last for buffer pair mode because
2117 	 *       posting on the header queue posts the payload queue as well.
2118 	 *       We do not ring the payload queue independently in RQ pair mode.
2119 	 */
2120 	qindex_payload = _sli_queue_write(&hw->sli, rq_payload, (void *)phys_payload);
2121 	qindex_hdr = _sli_queue_write(&hw->sli, rq_hdr, (void *)phys_hdr);
2122 	if (qindex_hdr < 0 ||
2123 	    qindex_payload < 0) {
2124 		ocs_log_err(hw->os, "RQ_ID=%#x write failed\n", rq_hdr->id);
2125 		sli_queue_unlock(rq_payload);
2126 		sli_queue_unlock(rq_hdr);
2127 		return OCS_HW_RTN_ERROR;
2128 	}
2129 
2130 	/* ensure the indexes are the same */
2131 	ocs_hw_assert(qindex_hdr == qindex_payload);
2132 
2133 	/* Update the lookup table */
2134 	if (rq->rq_tracker[qindex_hdr] == NULL) {
2135 		rq->rq_tracker[qindex_hdr] = seq;
2136 	} else {
2137 		ocs_log_test(hw->os, "expected rq_tracker[%d][%d] buffer to be NULL\n",
2138 			     hw_rq_index, qindex_hdr);
2139 	}
2140 
2141 	sli_queue_unlock(rq_payload);
2142 	sli_queue_unlock(rq_hdr);
2143 	return OCS_HW_RTN_SUCCESS;
2144 }
2145 
2146 /**
2147  * @brief Return RQ buffers (while in RQ pair mode).
2148  *
2149  * @par Description
2150  * The header and payload buffers are returned to the Receive Queue.
2151  *
2152  * @param hw Hardware context.
2153  * @param seq Header/payload sequence buffers.
2154  *
2155  * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code value on failure.
2156  */
2157 
2158 ocs_hw_rtn_e
ocs_hw_rqpair_sequence_free(ocs_hw_t * hw,ocs_hw_sequence_t * seq)2159 ocs_hw_rqpair_sequence_free(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
2160 {
2161 	ocs_hw_rtn_e   rc = OCS_HW_RTN_SUCCESS;
2162 
2163 	/* Check for auto xfer rdy dummy buffers and call the proper release function. */
2164 	if (seq->header->rqindex == OCS_HW_RQ_INDEX_DUMMY_HDR) {
2165 		return ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(hw, seq);
2166 	}
2167 
2168 	/*
2169 	 * Post the data buffer first. Because in RQ pair mode, ringing the
2170 	 * doorbell of the header ring will post the data buffer as well.
2171 	 */
2172 	if (ocs_hw_rqpair_put(hw, seq)) {
2173 		ocs_log_err(hw->os, "error writing buffers\n");
2174 		return OCS_HW_RTN_ERROR;
2175 	}
2176 
2177 	return rc;
2178 }
2179 
2180 /**
2181  * @brief Find the RQ index of RQ_ID.
2182  *
2183  * @param hw Hardware context.
2184  * @param rq_id RQ ID to find.
2185  *
2186  * @return Returns the RQ index, or -1 if not found
2187  */
2188 static inline int32_t
ocs_hw_rqpair_find(ocs_hw_t * hw,uint16_t rq_id)2189 ocs_hw_rqpair_find(ocs_hw_t *hw, uint16_t rq_id)
2190 {
2191 	return ocs_hw_queue_hash_find(hw->rq_hash, rq_id);
2192 }
2193 
2194 /**
2195  * @ingroup devInitShutdown
2196  * @brief Allocate auto xfer rdy buffers.
2197  *
2198  * @par Description
2199  * Allocates the auto xfer rdy buffers and places them on the free list.
2200  *
2201  * @param hw Hardware context allocated by the caller.
2202  * @param num_buffers Number of buffers to allocate.
2203  *
2204  * @return Returns 0 on success, or a non-zero value on failure.
2205  */
2206 ocs_hw_rtn_e
ocs_hw_rqpair_auto_xfer_rdy_buffer_alloc(ocs_hw_t * hw,uint32_t num_buffers)2207 ocs_hw_rqpair_auto_xfer_rdy_buffer_alloc(ocs_hw_t *hw, uint32_t num_buffers)
2208 {
2209 	ocs_hw_auto_xfer_rdy_buffer_t *buf;
2210 	uint32_t i;
2211 
2212 	hw->auto_xfer_rdy_buf_pool = ocs_pool_alloc(hw->os, sizeof(ocs_hw_auto_xfer_rdy_buffer_t), num_buffers, FALSE);
2213 	if (hw->auto_xfer_rdy_buf_pool == NULL) {
2214 		ocs_log_err(hw->os, "Failure to allocate auto xfer ready buffer pool\n");
2215 		return OCS_HW_RTN_NO_MEMORY;
2216 	}
2217 
2218 	for (i = 0; i < num_buffers; i++) {
2219 		/* allocate the wrapper object */
2220 		buf = ocs_pool_get_instance(hw->auto_xfer_rdy_buf_pool, i);
2221 		ocs_hw_assert(buf != NULL);
2222 
2223 		/* allocate the auto xfer ready buffer */
2224 		if (ocs_dma_alloc(hw->os, &buf->payload.dma, hw->config.auto_xfer_rdy_size, OCS_MIN_DMA_ALIGNMENT)) {
2225 			ocs_log_err(hw->os, "DMA allocation failed\n");
2226 			ocs_free(hw->os, buf, sizeof(*buf));
2227 			return OCS_HW_RTN_NO_MEMORY;
2228 		}
2229 
2230 		/* build a fake data header in big endian */
2231 		buf->hdr.info = FC_RCTL_INFO_SOL_DATA;
2232 		buf->hdr.r_ctl = FC_RCTL_FC4_DATA;
2233 		buf->hdr.type = FC_TYPE_FCP;
2234 		buf->hdr.f_ctl = fc_htobe24(FC_FCTL_EXCHANGE_RESPONDER |
2235 					    FC_FCTL_FIRST_SEQUENCE |
2236 					    FC_FCTL_LAST_SEQUENCE |
2237 					    FC_FCTL_END_SEQUENCE |
2238 					    FC_FCTL_SEQUENCE_INITIATIVE);
2239 
2240 		/* build the fake header DMA object */
2241 		buf->header.rqindex = OCS_HW_RQ_INDEX_DUMMY_HDR;
2242 		buf->header.dma.virt = &buf->hdr;
2243 		buf->header.dma.alloc = buf;
2244 		buf->header.dma.size = sizeof(buf->hdr);
2245 		buf->header.dma.len = sizeof(buf->hdr);
2246 
2247 		buf->payload.rqindex = OCS_HW_RQ_INDEX_DUMMY_DATA;
2248 	}
2249 	return OCS_HW_RTN_SUCCESS;
2250 }
2251 
2252 /**
2253  * @ingroup devInitShutdown
2254  * @brief Post Auto xfer rdy buffers to the XRIs posted with DNRX.
2255  *
2256  * @par Description
2257  * When new buffers are freed, check existing XRIs waiting for buffers.
2258  *
2259  * @param hw Hardware context allocated by the caller.
2260  */
2261 static void
ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(ocs_hw_t * hw)2262 ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(ocs_hw_t *hw)
2263 {
2264 	ocs_hw_io_t *io;
2265 	int32_t rc;
2266 
2267 	ocs_lock(&hw->io_lock);
2268 
2269 	while (!ocs_list_empty(&hw->io_port_dnrx)) {
2270 		io = ocs_list_remove_head(&hw->io_port_dnrx);
2271 		rc = ocs_hw_reque_xri(hw, io);
2272 		if(rc) {
2273 			break;
2274 		}
2275 	}
2276 
2277 	ocs_unlock(&hw->io_lock);
2278 }
2279 
2280 /**
2281  * @brief Called when the POST_SGL_PAGE command completes.
2282  *
2283  * @par Description
2284  * Free the mailbox command buffer.
2285  *
2286  * @param hw Hardware context.
2287  * @param status Status field from the mbox completion.
2288  * @param mqe Mailbox response structure.
2289  * @param arg Pointer to a callback function that signals the caller that the command is done.
2290  *
2291  * @return Returns 0.
2292  */
2293 static int32_t
ocs_hw_rqpair_auto_xfer_rdy_move_to_port_cb(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)2294 ocs_hw_rqpair_auto_xfer_rdy_move_to_port_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
2295 {
2296 	if (status != 0) {
2297 		ocs_log_debug(hw->os, "Status 0x%x\n", status);
2298 	}
2299 
2300 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
2301 	return 0;
2302 }
2303 
2304 /**
2305  * @brief Prepares an XRI to move to the chip.
2306  *
2307  * @par Description
2308  * Puts the data SGL into the SGL list for the IO object and possibly registers
2309  * an SGL list for the XRI. Since both the POST_XRI and POST_SGL_PAGES commands are
2310  * mailbox commands, we don't need to wait for completion before preceding.
2311  *
2312  * @param hw Hardware context allocated by the caller.
2313  * @param io Pointer to the IO object.
2314  *
2315  * @return Returns OCS_HW_RTN_SUCCESS for success, or an error code value for failure.
2316  */
2317 ocs_hw_rtn_e
ocs_hw_rqpair_auto_xfer_rdy_move_to_port(ocs_hw_t * hw,ocs_hw_io_t * io)2318 ocs_hw_rqpair_auto_xfer_rdy_move_to_port(ocs_hw_t *hw, ocs_hw_io_t *io)
2319 {
2320 	/* We only need to preregister the SGL if it has not yet been done. */
2321 	if (!sli_get_sgl_preregister(&hw->sli)) {
2322 		uint8_t	*post_sgl;
2323 		ocs_dma_t *psgls = &io->def_sgl;
2324 		ocs_dma_t **sgls = &psgls;
2325 
2326 		/* non-local buffer required for mailbox queue */
2327 		post_sgl = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2328 		if (post_sgl == NULL) {
2329 			ocs_log_err(hw->os, "no buffer for command\n");
2330 			return OCS_HW_RTN_NO_MEMORY;
2331 		}
2332 		if (sli_cmd_fcoe_post_sgl_pages(&hw->sli, post_sgl, SLI4_BMBX_SIZE,
2333 						io->indicator, 1, sgls, NULL, NULL)) {
2334 			if (ocs_hw_command(hw, post_sgl, OCS_CMD_NOWAIT,
2335 					    ocs_hw_rqpair_auto_xfer_rdy_move_to_port_cb, NULL)) {
2336 				ocs_free(hw->os, post_sgl, SLI4_BMBX_SIZE);
2337 				ocs_log_err(hw->os, "SGL post failed\n");
2338 				return OCS_HW_RTN_ERROR;
2339 			}
2340 		}
2341 	}
2342 
2343 	ocs_lock(&hw->io_lock);
2344 	if (ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 0) != 0) { /* DNRX set - no buffer */
2345 		ocs_unlock(&hw->io_lock);
2346 		return OCS_HW_RTN_ERROR;
2347 	}
2348 	ocs_unlock(&hw->io_lock);
2349 	return OCS_HW_RTN_SUCCESS;
2350 }
2351 
2352 /**
2353  * @brief Prepares an XRI to move back to the host.
2354  *
2355  * @par Description
2356  * Releases any attached buffer back to the pool.
2357  *
2358  * @param hw Hardware context allocated by the caller.
2359  * @param io Pointer to the IO object.
2360  */
2361 void
ocs_hw_rqpair_auto_xfer_rdy_move_to_host(ocs_hw_t * hw,ocs_hw_io_t * io)2362 ocs_hw_rqpair_auto_xfer_rdy_move_to_host(ocs_hw_t *hw, ocs_hw_io_t *io)
2363 {
2364 	if (io->axr_buf != NULL) {
2365 		ocs_lock(&hw->io_lock);
2366 			/* check  list and remove if there */
2367 			if (ocs_list_on_list(&io->dnrx_link)) {
2368 				ocs_list_remove(&hw->io_port_dnrx, io);
2369 				io->auto_xfer_rdy_dnrx = 0;
2370 
2371 				/* release the count for waiting for a buffer */
2372 				ocs_hw_io_free(hw, io);
2373 			}
2374 
2375 			ocs_pool_put(hw->auto_xfer_rdy_buf_pool, io->axr_buf);
2376 			io->axr_buf = NULL;
2377 		ocs_unlock(&hw->io_lock);
2378 
2379 		ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(hw);
2380 	}
2381 	return;
2382 }
2383 
2384 /**
2385  * @brief Posts an auto xfer rdy buffer to an IO.
2386  *
2387  * @par Description
2388  * Puts the data SGL into the SGL list for the IO object
2389  * @n @name
2390  * @b Note: io_lock must be held.
2391  *
2392  * @param hw Hardware context allocated by the caller.
2393  * @param io Pointer to the IO object.
2394  *
2395  * @return Returns the value of DNRX bit in the TRSP and ABORT WQEs.
2396  */
2397 uint8_t
ocs_hw_rqpair_auto_xfer_rdy_buffer_post(ocs_hw_t * hw,ocs_hw_io_t * io,int reuse_buf)2398 ocs_hw_rqpair_auto_xfer_rdy_buffer_post(ocs_hw_t *hw, ocs_hw_io_t *io, int reuse_buf)
2399 {
2400 	ocs_hw_auto_xfer_rdy_buffer_t *buf;
2401 	sli4_sge_t	*data;
2402 
2403 	if(!reuse_buf) {
2404 		buf = ocs_pool_get(hw->auto_xfer_rdy_buf_pool);
2405 		io->axr_buf = buf;
2406 	}
2407 
2408 	data = io->def_sgl.virt;
2409 	data[0].sge_type = SLI4_SGE_TYPE_SKIP;
2410 	data[0].last = 0;
2411 
2412 	/*
2413 	 * Note: if we are doing DIF assists, then the SGE[1] must contain the
2414 	 * DI_SEED SGE. The host is responsible for programming:
2415 	 *   SGE Type (Word 2, bits 30:27)
2416 	 *   Replacement App Tag (Word 2 bits 15:0)
2417 	 *   App Tag (Word 3 bits 15:0)
2418 	 *   New Ref Tag (Word 3 bit 23)
2419 	 *   Metadata Enable (Word 3 bit 20)
2420 	 *   Auto-Increment RefTag (Word 3 bit 19)
2421 	 *   Block Size (Word 3 bits 18:16)
2422 	 * The following fields are managed by the SLI Port:
2423 	 *    Ref Tag Compare (Word 0)
2424 	 *    Replacement Ref Tag (Word 1) - In not the LBA
2425 	 *    NA (Word 2 bit 25)
2426 	 *    Opcode RX (Word 3 bits 27:24)
2427 	 *    Checksum Enable (Word 3 bit 22)
2428 	 *    RefTag Enable (Word 3 bit 21)
2429 	 *
2430 	 * The first two SGLs are cleared by ocs_hw_io_init_sges(), so assume eveything is cleared.
2431 	 */
2432 	if (hw->config.auto_xfer_rdy_p_type) {
2433 		sli4_diseed_sge_t *diseed = (sli4_diseed_sge_t*)&data[1];
2434 
2435 		diseed->sge_type = SLI4_SGE_TYPE_DISEED;
2436 		diseed->repl_app_tag = hw->config.auto_xfer_rdy_app_tag_value;
2437 		diseed->app_tag_cmp = hw->config.auto_xfer_rdy_app_tag_value;
2438 		diseed->check_app_tag = hw->config.auto_xfer_rdy_app_tag_valid;
2439 		diseed->auto_incr_ref_tag = TRUE; /* Always the LBA */
2440 		diseed->dif_blk_size = hw->config.auto_xfer_rdy_blk_size_chip;
2441 	} else {
2442 		data[1].sge_type = SLI4_SGE_TYPE_SKIP;
2443 		data[1].last = 0;
2444 	}
2445 
2446 	data[2].sge_type = SLI4_SGE_TYPE_DATA;
2447 	data[2].buffer_address_high = ocs_addr32_hi(io->axr_buf->payload.dma.phys);
2448 	data[2].buffer_address_low  = ocs_addr32_lo(io->axr_buf->payload.dma.phys);
2449 	data[2].buffer_length = io->axr_buf->payload.dma.size;
2450 	data[2].last = TRUE;
2451 	data[3].sge_type = SLI4_SGE_TYPE_SKIP;
2452 
2453 	return 0;
2454 }
2455 
2456 /**
2457  * @brief Return auto xfer ready buffers (while in RQ pair mode).
2458  *
2459  * @par Description
2460  * The header and payload buffers are returned to the auto xfer rdy pool.
2461  *
2462  * @param hw Hardware context.
2463  * @param seq Header/payload sequence buffers.
2464  *
2465  * @return Returns OCS_HW_RTN_SUCCESS for success, an error code value for failure.
2466  */
2467 
2468 static ocs_hw_rtn_e
ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(ocs_hw_t * hw,ocs_hw_sequence_t * seq)2469 ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
2470 {
2471 	ocs_hw_auto_xfer_rdy_buffer_t *buf = seq->header->dma.alloc;
2472 
2473 	buf->data_cqe = 0;
2474 	buf->cmd_cqe = 0;
2475 	buf->fcfi = 0;
2476 	buf->call_axr_cmd = 0;
2477 	buf->call_axr_data = 0;
2478 
2479 	/* build a fake data header in big endian */
2480 	buf->hdr.info = FC_RCTL_INFO_SOL_DATA;
2481 	buf->hdr.r_ctl = FC_RCTL_FC4_DATA;
2482 	buf->hdr.type = FC_TYPE_FCP;
2483 	buf->hdr.f_ctl = fc_htobe24(FC_FCTL_EXCHANGE_RESPONDER |
2484 					FC_FCTL_FIRST_SEQUENCE |
2485 					FC_FCTL_LAST_SEQUENCE |
2486 					FC_FCTL_END_SEQUENCE |
2487 					FC_FCTL_SEQUENCE_INITIATIVE);
2488 
2489 	/* build the fake header DMA object */
2490 	buf->header.rqindex = OCS_HW_RQ_INDEX_DUMMY_HDR;
2491 	buf->header.dma.virt = &buf->hdr;
2492 	buf->header.dma.alloc = buf;
2493 	buf->header.dma.size = sizeof(buf->hdr);
2494 	buf->header.dma.len = sizeof(buf->hdr);
2495 	buf->payload.rqindex = OCS_HW_RQ_INDEX_DUMMY_DATA;
2496 
2497 	ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(hw);
2498 
2499 	return OCS_HW_RTN_SUCCESS;
2500 }
2501 
2502 /**
2503  * @ingroup devInitShutdown
2504  * @brief Free auto xfer rdy buffers.
2505  *
2506  * @par Description
2507  * Frees the auto xfer rdy buffers.
2508  *
2509  * @param hw Hardware context allocated by the caller.
2510  *
2511  * @return Returns 0 on success, or a non-zero value on failure.
2512  */
2513 static void
ocs_hw_rqpair_auto_xfer_rdy_buffer_free(ocs_hw_t * hw)2514 ocs_hw_rqpair_auto_xfer_rdy_buffer_free(ocs_hw_t *hw)
2515 {
2516 	ocs_hw_auto_xfer_rdy_buffer_t *buf;
2517 	uint32_t i;
2518 
2519 	if (hw->auto_xfer_rdy_buf_pool != NULL) {
2520 		ocs_lock(&hw->io_lock);
2521 			for (i = 0; i < ocs_pool_get_count(hw->auto_xfer_rdy_buf_pool); i++) {
2522 				buf = ocs_pool_get_instance(hw->auto_xfer_rdy_buf_pool, i);
2523 				if (buf != NULL) {
2524 					ocs_dma_free(hw->os, &buf->payload.dma);
2525 				}
2526 			}
2527 		ocs_unlock(&hw->io_lock);
2528 
2529 		ocs_pool_free(hw->auto_xfer_rdy_buf_pool);
2530 		hw->auto_xfer_rdy_buf_pool = NULL;
2531 	}
2532 }
2533 
2534 /**
2535  * @ingroup devInitShutdown
2536  * @brief Configure the rq_pair function from ocs_hw_init().
2537  *
2538  * @par Description
2539  * Allocates the buffers to auto xfer rdy and posts initial XRIs for this feature.
2540  *
2541  * @param hw Hardware context allocated by the caller.
2542  *
2543  * @return Returns 0 on success, or a non-zero value on failure.
2544  */
2545 ocs_hw_rtn_e
ocs_hw_rqpair_init(ocs_hw_t * hw)2546 ocs_hw_rqpair_init(ocs_hw_t *hw)
2547 {
2548 	ocs_hw_rtn_e	rc;
2549 	uint32_t xris_posted;
2550 
2551 	ocs_log_debug(hw->os, "RQ Pair mode\n");
2552 
2553 	/*
2554 	 * If we get this far, the auto XFR_RDY feature was enabled successfully, otherwise ocs_hw_init() would
2555 	 * return with an error. So allocate the buffers based on the initial XRI pool required to support this
2556 	 * feature.
2557 	 */
2558 	if (sli_get_auto_xfer_rdy_capable(&hw->sli) &&
2559 	    hw->config.auto_xfer_rdy_size > 0) {
2560 		if (hw->auto_xfer_rdy_buf_pool == NULL) {
2561 			/*
2562 			 * Allocate one more buffer than XRIs so that when all the XRIs are in use, we still have
2563 			 * one to post back for the case where the response phase is started in the context of
2564 			 * the data completion.
2565 			 */
2566 			rc = ocs_hw_rqpair_auto_xfer_rdy_buffer_alloc(hw, hw->config.auto_xfer_rdy_xri_cnt + 1);
2567 			if (rc != OCS_HW_RTN_SUCCESS) {
2568 				return rc;
2569 			}
2570 		} else {
2571 			ocs_pool_reset(hw->auto_xfer_rdy_buf_pool);
2572 		}
2573 
2574 		/* Post the auto XFR_RDY XRIs */
2575 		xris_posted = ocs_hw_xri_move_to_port_owned(hw, hw->config.auto_xfer_rdy_xri_cnt);
2576 		if (xris_posted != hw->config.auto_xfer_rdy_xri_cnt) {
2577 			ocs_log_err(hw->os, "post_xri failed, only posted %d XRIs\n", xris_posted);
2578 			return OCS_HW_RTN_ERROR;
2579 		}
2580 	}
2581 
2582 	return 0;
2583 }
2584 
2585 /**
2586  * @ingroup devInitShutdown
2587  * @brief Tear down the rq_pair function from ocs_hw_teardown().
2588  *
2589  * @par Description
2590  * Frees the buffers to auto xfer rdy.
2591  *
2592  * @param hw Hardware context allocated by the caller.
2593  */
2594 void
ocs_hw_rqpair_teardown(ocs_hw_t * hw)2595 ocs_hw_rqpair_teardown(ocs_hw_t *hw)
2596 {
2597 	/* We need to free any auto xfer ready buffers */
2598 	ocs_hw_rqpair_auto_xfer_rdy_buffer_free(hw);
2599 }
2600