xref: /freebsd/sys/dev/qlnx/qlnxe/ecore_spq.c (revision e3514747256465c52c3b2aedc9795f52c0d3efe9)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 /*
28  * File : ecore_spq.c
29  */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 
34 #include "bcm_osal.h"
35 #include "reg_addr.h"
36 #include "ecore_gtt_reg_addr.h"
37 #include "ecore_hsi_common.h"
38 #include "ecore.h"
39 #include "ecore_sp_api.h"
40 #include "ecore_spq.h"
41 #include "ecore_iro.h"
42 #include "ecore_init_fw_funcs.h"
43 #include "ecore_cxt.h"
44 #include "ecore_int.h"
45 #include "ecore_dev_api.h"
46 #include "ecore_mcp.h"
47 #ifdef CONFIG_ECORE_ROCE
48 #include "ecore_roce.h"
49 #endif
50 #include "ecore_hw.h"
51 #include "ecore_sriov.h"
52 #ifdef CONFIG_ECORE_ISCSI
53 #include "ecore_iscsi.h"
54 #include "ecore_ooo.h"
55 #endif
56 
57 /***************************************************************************
58  * Structures & Definitions
59  ***************************************************************************/
60 
61 #define SPQ_HIGH_PRI_RESERVE_DEFAULT	(1)
62 
63 #define SPQ_BLOCK_DELAY_MAX_ITER	(10)
64 #define SPQ_BLOCK_DELAY_US		(10)
65 #define SPQ_BLOCK_SLEEP_MAX_ITER	(1000)
66 #define SPQ_BLOCK_SLEEP_MS		(5)
67 
68 #ifndef REMOVE_DBG
69 /***************************************************************************
70  * Debug [iSCSI] tool
71  ***************************************************************************/
72 static void ecore_iscsi_eq_dump(struct ecore_hwfn *p_hwfn,
73 				struct event_ring_entry *p_eqe)
74 {
75 	if (p_eqe->opcode >= MAX_ISCSI_EQE_OPCODE) {
76 		DP_NOTICE(p_hwfn, false, "Unknown iSCSI EQ: %x\n",
77 			  p_eqe->opcode);
78 	}
79 
80 	switch (p_eqe->opcode) {
81 	case ISCSI_EVENT_TYPE_INIT_FUNC:
82 	case ISCSI_EVENT_TYPE_DESTROY_FUNC:
83 		/* NOPE */
84 		break;
85 	case ISCSI_EVENT_TYPE_OFFLOAD_CONN:
86 	case ISCSI_EVENT_TYPE_TERMINATE_CONN:
87 		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
88 			   "iSCSI EQE: Port %x, Op %x, echo %x, FWret %x, CID %x, ConnID %x, ERR %x\n",
89 			   p_hwfn->port_id, p_eqe->opcode,
90 			   OSAL_LE16_TO_CPU(p_eqe->echo),
91 			   p_eqe->fw_return_code,
92 			   OSAL_LE32_TO_CPU(p_eqe->data.iscsi_info.cid),
93 			   OSAL_LE16_TO_CPU(p_eqe->data.iscsi_info.conn_id),
94 			   p_eqe->data.iscsi_info.error_code);
95 		break;
96 	case ISCSI_EVENT_TYPE_UPDATE_CONN:
97 	case ISCSI_EVENT_TYPE_CLEAR_SQ:
98 	case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE:
99 	case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE:
100 	case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD:
101 	case ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD:
102 	case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD:
103 	case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME:
104 	case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT:
105 	case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT:
106 	case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2:
107 	case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR:
108 	case ISCSI_EVENT_TYPE_TCP_CONN_ERROR:
109 	default:
110 		/* NOPE */
111 		break;
112 	}
113 }
114 #endif
115 
116 /***************************************************************************
117  * Blocking Imp. (BLOCK/EBLOCK mode)
118  ***************************************************************************/
119 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
120 					void                  *cookie,
121 					union event_ring_data *data,
122 					u8                    fw_return_code)
123 {
124 	struct ecore_spq_comp_done *comp_done;
125 
126 	comp_done = (struct ecore_spq_comp_done *)cookie;
127 
128 	comp_done->done = 0x1;
129 	comp_done->fw_return_code = fw_return_code;
130 
131 	/* make update visible to waiting thread */
132 	OSAL_SMP_WMB(p_hwfn->p_dev);
133 }
134 
135 static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
136 					      struct ecore_spq_entry *p_ent,
137 					      u8 *p_fw_ret,
138 					      bool sleep_between_iter)
139 {
140 	struct ecore_spq_comp_done *comp_done;
141 	u32 iter_cnt;
142 
143 	comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
144 	iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
145 				      : SPQ_BLOCK_DELAY_MAX_ITER;
146 
147 	while (iter_cnt--) {
148 		OSAL_POLL_MODE_DPC(p_hwfn);
149 		OSAL_SMP_RMB(p_hwfn->p_dev);
150 		if (comp_done->done == 1) {
151 			if (p_fw_ret)
152 				*p_fw_ret = comp_done->fw_return_code;
153 			return ECORE_SUCCESS;
154 		}
155 
156 		if (sleep_between_iter) {
157 			OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
158 		} else {
159 			OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
160 		}
161 	}
162 
163 	return ECORE_TIMEOUT;
164 }
165 
166 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
167 					    struct ecore_spq_entry *p_ent,
168 					    u8 *p_fw_ret, bool skip_quick_poll)
169 {
170 	struct ecore_spq_comp_done *comp_done;
171 	enum _ecore_status_t rc;
172 
173 	/* A relatively short polling period w/o sleeping, to allow the FW to
174 	 * complete the ramrod and thus possibly to avoid the following sleeps.
175 	 */
176 	if (!skip_quick_poll) {
177 		rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
178 		if (rc == ECORE_SUCCESS)
179 			return ECORE_SUCCESS;
180 	}
181 
182 	/* Move to polling with a sleeping period between iterations */
183 	rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
184 	if (rc == ECORE_SUCCESS)
185 		return ECORE_SUCCESS;
186 
187 	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
188 	rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
189 	if (rc != ECORE_SUCCESS) {
190 		DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
191 		goto err;
192 	}
193 
194 	/* Retry after drain */
195 	rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
196 	if (rc == ECORE_SUCCESS)
197 		return ECORE_SUCCESS;
198 
199 	comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
200 	if (comp_done->done == 1) {
201 		if (p_fw_ret)
202 			*p_fw_ret = comp_done->fw_return_code;
203 		return ECORE_SUCCESS;
204 	}
205 err:
206 	DP_NOTICE(p_hwfn, true,
207 		  "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
208 		  OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
209 		  p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
210 		  OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
211 
212 	ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
213 
214 	return ECORE_BUSY;
215 }
216 
217 /***************************************************************************
218  * SPQ entries inner API
219  ***************************************************************************/
220 static enum _ecore_status_t ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn,
221 						 struct ecore_spq_entry *p_ent)
222 {
223 	p_ent->flags = 0;
224 
225 	switch (p_ent->comp_mode) {
226 	case ECORE_SPQ_MODE_EBLOCK:
227 	case ECORE_SPQ_MODE_BLOCK:
228 		p_ent->comp_cb.function = ecore_spq_blocking_cb;
229 		break;
230 	case ECORE_SPQ_MODE_CB:
231 		break;
232 	default:
233 		DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
234 			  p_ent->comp_mode);
235 		return ECORE_INVAL;
236 	}
237 
238 	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
239 		   "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
240 		   p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
241 		   p_ent->elem.hdr.protocol_id,
242 		   p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
243 		   D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
244 			   ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
245 			   "MODE_CB"));
246 
247 	return ECORE_SUCCESS;
248 }
249 
250 /***************************************************************************
251  * HSI access
252  ***************************************************************************/
253 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
254 				    struct ecore_spq  *p_spq)
255 {
256 	struct ecore_cxt_info cxt_info;
257 	struct core_conn_context *p_cxt;
258 	enum _ecore_status_t rc;
259 	u16 physical_q;
260 
261 	cxt_info.iid = p_spq->cid;
262 
263 	rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
264 
265 	if (rc < 0) {
266 		DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
267 			  p_spq->cid);
268 		return;
269 	}
270 
271 	p_cxt = cxt_info.p_cxt;
272 
273 	/* @@@TBD we zero the context until we have ilt_reset implemented. */
274 	OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
275 
276 	if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
277 		SET_FIELD(p_cxt->xstorm_ag_context.flags10,
278 			  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
279 		SET_FIELD(p_cxt->xstorm_ag_context.flags1,
280 			  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
281 		/*SET_FIELD(p_cxt->xstorm_ag_context.flags10,
282 			  E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);*/
283 		SET_FIELD(p_cxt->xstorm_ag_context.flags9,
284 			  E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
285 	} else { /* E5 */
286 		ECORE_E5_MISSING_CODE;
287 	}
288 
289 	/* CDU validation - FIXME currently disabled */
290 
291 	/* QM physical queue */
292 	physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
293 	p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
294 
295 	p_cxt->xstorm_st_context.spq_base_lo =
296 		DMA_LO_LE(p_spq->chain.p_phys_addr);
297 	p_cxt->xstorm_st_context.spq_base_hi =
298 		DMA_HI_LE(p_spq->chain.p_phys_addr);
299 
300 	DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
301 		       p_hwfn->p_consq->chain.p_phys_addr);
302 }
303 
304 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn		*p_hwfn,
305 					      struct ecore_spq		*p_spq,
306 					      struct ecore_spq_entry	*p_ent)
307 {
308 	struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
309 	u16 echo = ecore_chain_get_prod_idx(p_chain);
310 	struct slow_path_element *elem;
311 	struct core_db_data db;
312 
313 	p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
314 	elem = ecore_chain_produce(p_chain);
315 	if (!elem) {
316 		DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
317 		return ECORE_INVAL;
318 	}
319 
320 	*elem = p_ent->elem; /* struct assignment */
321 
322 	/* send a doorbell on the slow hwfn session */
323 	OSAL_MEMSET(&db, 0, sizeof(db));
324 	SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
325 	SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
326 	SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL, DQ_XCM_CORE_SPQ_PROD_CMD);
327 	db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
328 	db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
329 
330 	/* make sure the SPQE is updated before the doorbell */
331 	OSAL_WMB(p_hwfn->p_dev);
332 
333 	DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
334 
335 	/* make sure doorbell is rang */
336 	OSAL_WMB(p_hwfn->p_dev);
337 
338 	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
339 		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
340 		   DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params,
341 		   db.agg_flags, ecore_chain_get_prod_idx(p_chain));
342 
343 	return ECORE_SUCCESS;
344 }
345 
346 /***************************************************************************
347  * Asynchronous events
348  ***************************************************************************/
349 
350 static enum _ecore_status_t
351 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
352 			     struct event_ring_entry *p_eqe)
353 {
354 	switch (p_eqe->protocol_id) {
355 #ifdef CONFIG_ECORE_ROCE
356 	case PROTOCOLID_ROCE:
357 	{
358 		ecore_roce_async_event(p_hwfn,
359 				       p_eqe->opcode,
360 				       &p_eqe->data.rdma_data);
361 		return ECORE_SUCCESS;
362 	}
363 #ifdef CONFIG_ECORE_IWARP
364 	case PROTOCOLID_IWARP:
365 	{
366 		ecore_iwarp_async_event(p_hwfn,
367 					p_eqe->opcode,
368 					&p_eqe->data.rdma_data.async_handle,
369 					p_eqe->fw_return_code);
370 		return ECORE_SUCCESS;
371 	}
372 #endif
373 #endif
374 	case PROTOCOLID_COMMON:
375 		return ecore_sriov_eqe_event(p_hwfn,
376 					     p_eqe->opcode,
377 					     p_eqe->echo,
378 					     &p_eqe->data);
379 #ifdef CONFIG_ECORE_ISCSI
380 	case PROTOCOLID_ISCSI:
381 		if (p_hwfn->p_iscsi_info->event_cb != OSAL_NULL) {
382 			struct ecore_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
383 
384 			return p_iscsi->event_cb(p_iscsi->event_context,
385 						 p_eqe->opcode, &p_eqe->data);
386 		} else {
387 			DP_NOTICE(p_hwfn,
388 				 false, "iSCSI async completion is not set\n");
389 			return ECORE_NOTIMPL;
390 		}
391 #endif
392 	default:
393 		DP_NOTICE(p_hwfn,
394 			 true, "Unknown Async completion for protocol: %d\n",
395 			 p_eqe->protocol_id);
396 		return ECORE_INVAL;
397 	}
398 }
399 
400 /***************************************************************************
401  * EQ API
402  ***************************************************************************/
403 void ecore_eq_prod_update(struct ecore_hwfn	*p_hwfn,
404 			  u16			prod)
405 {
406 	u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
407 		USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
408 
409 	REG_WR16(p_hwfn, addr, prod);
410 
411 	/* keep prod updates ordered */
412 	OSAL_MMIOWB(p_hwfn->p_dev);
413 }
414 
415 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn	*p_hwfn,
416 					 void                   *cookie)
417 
418 {
419 	struct ecore_eq    *p_eq    = cookie;
420 	struct ecore_chain *p_chain = &p_eq->chain;
421 	enum _ecore_status_t rc = 0;
422 
423 	/* take a snapshot of the FW consumer */
424 	u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
425 
426 	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
427 
428 	/* Need to guarantee the fw_cons index we use points to a usuable
429 	 * element (to comply with our chain), so our macros would comply
430 	 */
431 	if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
432 	    ecore_chain_get_usable_per_page(p_chain)) {
433 		fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
434 	}
435 
436 	/* Complete current segment of eq entries */
437 	while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
438 		struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
439 		if (!p_eqe) {
440 			rc = ECORE_INVAL;
441 			break;
442 		}
443 
444 		DP_VERBOSE(p_hwfn,
445 			   ECORE_MSG_SPQ,
446 			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
447 			   p_eqe->opcode,	     /* Event Opcode */
448 			   p_eqe->protocol_id,	     /* Event Protocol ID */
449 			   p_eqe->reserved0,	     /* Reserved */
450 			   OSAL_LE16_TO_CPU(p_eqe->echo),/* Echo value from
451 							ramrod data on the host
452 						      */
453 			   p_eqe->fw_return_code,    /* FW return code for SP
454 							ramrods
455 						      */
456 			   p_eqe->flags);
457 #ifndef REMOVE_DBG
458 		if (p_eqe->protocol_id == PROTOCOLID_ISCSI)
459 			ecore_iscsi_eq_dump(p_hwfn, p_eqe);
460 #endif
461 
462 		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
463 			if (ecore_async_event_completion(p_hwfn, p_eqe))
464 				rc = ECORE_INVAL;
465 		} else if (ecore_spq_completion(p_hwfn,
466 						p_eqe->echo,
467 						p_eqe->fw_return_code,
468 						&p_eqe->data)) {
469 			rc = ECORE_INVAL;
470 		}
471 
472 		ecore_chain_recycle_consumed(p_chain);
473 	}
474 
475 	ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
476 
477 	return rc;
478 }
479 
480 enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
481 {
482 	struct ecore_eq	*p_eq;
483 
484 	/* Allocate EQ struct */
485 	p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
486 	if (!p_eq) {
487 		DP_NOTICE(p_hwfn, true,
488 			  "Failed to allocate `struct ecore_eq'\n");
489 		return ECORE_NOMEM;
490 	}
491 
492 	/* Allocate and initialize EQ chain*/
493 	if (ecore_chain_alloc(p_hwfn->p_dev,
494 			      ECORE_CHAIN_USE_TO_PRODUCE,
495 			      ECORE_CHAIN_MODE_PBL,
496 			      ECORE_CHAIN_CNT_TYPE_U16,
497 			      num_elem,
498 			      sizeof(union event_ring_element),
499 			      &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
500 		DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
501 		goto eq_allocate_fail;
502 	}
503 
504 	/* register EQ completion on the SP SB */
505 	ecore_int_register_cb(p_hwfn, ecore_eq_completion,
506 			      p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
507 
508 	p_hwfn->p_eq = p_eq;
509 	return ECORE_SUCCESS;
510 
511 eq_allocate_fail:
512 	OSAL_FREE(p_hwfn->p_dev, p_eq);
513 	return ECORE_NOMEM;
514 }
515 
516 void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
517 {
518 	ecore_chain_reset(&p_hwfn->p_eq->chain);
519 }
520 
521 void ecore_eq_free(struct ecore_hwfn *p_hwfn)
522 {
523 	if (!p_hwfn->p_eq)
524 		return;
525 
526 	ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
527 
528 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
529 	p_hwfn->p_eq = OSAL_NULL;
530 }
531 
532 /***************************************************************************
533 * CQE API - manipulate EQ functionality
534 ***************************************************************************/
535 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
536 						 struct eth_slow_path_rx_cqe *cqe,
537 						 enum protocol_type protocol)
538 {
539 	if (IS_VF(p_hwfn->p_dev))
540 		return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
541 
542 	/* @@@tmp - it's possible we'll eventually want to handle some
543 	 * actual commands that can arrive here, but for now this is only
544 	 * used to complete the ramrod using the echo value on the cqe
545 	 */
546 	return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
547 }
548 
549 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
550 					      struct eth_slow_path_rx_cqe *cqe)
551 {
552 	enum _ecore_status_t rc;
553 
554 	rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
555 	if (rc) {
556 		DP_NOTICE(p_hwfn, true,
557 			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
558 			  cqe->ramrod_cmd_id);
559 	}
560 
561 	return rc;
562 }
563 
564 /***************************************************************************
565  * Slow hwfn Queue (spq)
566  ***************************************************************************/
567 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
568 {
569 	struct ecore_spq *p_spq = p_hwfn->p_spq;
570 	struct ecore_spq_entry *p_virt = OSAL_NULL;
571 	dma_addr_t p_phys = 0;
572 	u32 i, capacity;
573 
574 	OSAL_LIST_INIT(&p_spq->pending);
575 	OSAL_LIST_INIT(&p_spq->completion_pending);
576 	OSAL_LIST_INIT(&p_spq->free_pool);
577 	OSAL_LIST_INIT(&p_spq->unlimited_pending);
578 	OSAL_SPIN_LOCK_INIT(&p_spq->lock);
579 
580 	/* SPQ empty pool */
581 	p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
582 	p_virt = p_spq->p_virt;
583 
584 	capacity = ecore_chain_get_capacity(&p_spq->chain);
585 	for (i = 0; i < capacity; i++) {
586 		DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
587 
588 		OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
589 
590 		p_virt++;
591 		p_phys += sizeof(struct ecore_spq_entry);
592 	}
593 
594 	/* Statistics */
595 	p_spq->normal_count		= 0;
596 	p_spq->comp_count		= 0;
597 	p_spq->comp_sent_count		= 0;
598 	p_spq->unlimited_pending_count	= 0;
599 
600 	OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
601 		      SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
602 	p_spq->comp_bitmap_idx = 0;
603 
604 	/* SPQ cid, cannot fail */
605 	ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
606 	ecore_spq_hw_initialize(p_hwfn, p_spq);
607 
608 	/* reset the chain itself */
609 	ecore_chain_reset(&p_spq->chain);
610 }
611 
612 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
613 {
614 	struct ecore_spq_entry *p_virt = OSAL_NULL;
615 	struct ecore_spq *p_spq = OSAL_NULL;
616 	dma_addr_t p_phys = 0;
617 	u32 capacity;
618 
619 	/* SPQ struct */
620 	p_spq =
621 	    OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
622 	if (!p_spq) {
623 		DP_NOTICE(p_hwfn, true, "Failed to allocate `struct ecore_spq'\n");
624 		return ECORE_NOMEM;
625 	}
626 
627 	/* SPQ ring  */
628 	if (ecore_chain_alloc(p_hwfn->p_dev,
629 			      ECORE_CHAIN_USE_TO_PRODUCE,
630 			      ECORE_CHAIN_MODE_SINGLE,
631 			      ECORE_CHAIN_CNT_TYPE_U16,
632 			      0, /* N/A when the mode is SINGLE */
633 			      sizeof(struct slow_path_element),
634 			      &p_spq->chain, OSAL_NULL)) {
635 		DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
636 		goto spq_allocate_fail;
637 	}
638 
639 	/* allocate and fill the SPQ elements (incl. ramrod data list) */
640 	capacity = ecore_chain_get_capacity(&p_spq->chain);
641 	p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
642 					 capacity *
643 					 sizeof(struct ecore_spq_entry));
644 	if (!p_virt) {
645 		goto spq_allocate_fail;
646 	}
647 
648 	p_spq->p_virt = p_virt;
649 	p_spq->p_phys = p_phys;
650 
651 	OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
652 
653 	p_hwfn->p_spq = p_spq;
654 	return ECORE_SUCCESS;
655 
656 spq_allocate_fail:
657 	ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
658 	OSAL_FREE(p_hwfn->p_dev, p_spq);
659 	return ECORE_NOMEM;
660 }
661 
662 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
663 {
664 	struct ecore_spq *p_spq = p_hwfn->p_spq;
665 	u32 capacity;
666 
667 	if (!p_spq)
668 		return;
669 
670 	if (p_spq->p_virt) {
671 		capacity = ecore_chain_get_capacity(&p_spq->chain);
672 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
673 				       p_spq->p_virt,
674 				       p_spq->p_phys,
675 				       capacity *
676 				       sizeof(struct ecore_spq_entry));
677 	}
678 
679 	ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
680 	OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
681 
682 	OSAL_FREE(p_hwfn->p_dev, p_spq);
683 	p_hwfn->p_spq = OSAL_NULL;
684 }
685 
686 enum _ecore_status_t ecore_spq_get_entry(struct ecore_hwfn *p_hwfn,
687 					 struct ecore_spq_entry **pp_ent)
688 {
689 	struct ecore_spq *p_spq = p_hwfn->p_spq;
690 	struct ecore_spq_entry *p_ent = OSAL_NULL;
691 	enum _ecore_status_t rc = ECORE_SUCCESS;
692 
693 	OSAL_SPIN_LOCK(&p_spq->lock);
694 
695 	if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
696 
697 		p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
698 		if (!p_ent) {
699 			DP_NOTICE(p_hwfn, true, "Failed to allocate an SPQ entry for a pending ramrod\n");
700 			rc = ECORE_NOMEM;
701 			goto out_unlock;
702 		}
703 		p_ent->queue = &p_spq->unlimited_pending;
704 	} else {
705 		p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
706 					      struct ecore_spq_entry,
707 					      list);
708 		OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
709 		p_ent->queue = &p_spq->pending;
710 	}
711 
712 	*pp_ent = p_ent;
713 
714 out_unlock:
715 	OSAL_SPIN_UNLOCK(&p_spq->lock);
716 	return rc;
717 }
718 
719 /* Locked variant; Should be called while the SPQ lock is taken */
720 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
721 			      struct ecore_spq_entry *p_ent)
722 {
723 	OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
724 }
725 
726 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
727 			    struct ecore_spq_entry *p_ent)
728 {
729 	OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
730 	__ecore_spq_return_entry(p_hwfn, p_ent);
731 	OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
732 }
733 
734 /**
735  * @brief ecore_spq_add_entry - adds a new entry to the pending
736  *        list. Should be used while lock is being held.
737  *
738  * Addes an entry to the pending list is there is room (en empty
739  * element is available in the free_pool), or else places the
740  * entry in the unlimited_pending pool.
741  *
742  * @param p_hwfn
743  * @param p_ent
744  * @param priority
745  *
746  * @return enum _ecore_status_t
747  */
748 static enum _ecore_status_t ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
749 						struct ecore_spq_entry *p_ent,
750 						enum spq_priority priority)
751 {
752 	struct ecore_spq	*p_spq	= p_hwfn->p_spq;
753 
754 	if (p_ent->queue == &p_spq->unlimited_pending) {
755 		if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
756 
757 			OSAL_LIST_PUSH_TAIL(&p_ent->list,
758 					    &p_spq->unlimited_pending);
759 			p_spq->unlimited_pending_count++;
760 
761 			return ECORE_SUCCESS;
762 
763 		} else {
764 			struct ecore_spq_entry *p_en2;
765 
766 			p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
767 						     struct ecore_spq_entry,
768 						     list);
769 			OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
770 
771 			/* Copy the ring element physical pointer to the new
772 			 * entry, since we are about to override the entire ring
773 			 * entry and don't want to lose the pointer.
774 			 */
775 			p_ent->elem.data_ptr = p_en2->elem.data_ptr;
776 
777 			*p_en2 = *p_ent;
778 
779 			/* EBLOCK responsible to free the allocated p_ent */
780 			if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
781 				OSAL_FREE(p_hwfn->p_dev, p_ent);
782 
783 			p_ent = p_en2;
784 		}
785 	}
786 
787 	/* entry is to be placed in 'pending' queue */
788 	switch (priority) {
789 	case ECORE_SPQ_PRIORITY_NORMAL:
790 		OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
791 		p_spq->normal_count++;
792 		break;
793 	case ECORE_SPQ_PRIORITY_HIGH:
794 		OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
795 		p_spq->high_count++;
796 		break;
797 	default:
798 		return ECORE_INVAL;
799 	}
800 
801 	return ECORE_SUCCESS;
802 }
803 
804 /***************************************************************************
805  * Accessor
806  ***************************************************************************/
807 
808 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
809 {
810 	if (!p_hwfn->p_spq) {
811 		return 0xffffffff;	/* illegal */
812 	}
813 	return p_hwfn->p_spq->cid;
814 }
815 
816 /***************************************************************************
817  * Posting new Ramrods
818  ***************************************************************************/
819 
820 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
821 						osal_list_t	  *head,
822 						u32		  keep_reserve)
823 {
824 	struct ecore_spq	*p_spq = p_hwfn->p_spq;
825 	enum _ecore_status_t	rc;
826 
827 	/* TODO - implementation might be wasteful; will always keep room
828 	 * for an additional high priority ramrod (even if one is already
829 	 * pending FW)
830 	 */
831 	while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
832 	       !OSAL_LIST_IS_EMPTY(head)) {
833 		struct ecore_spq_entry  *p_ent =
834 		    OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
835 		if (p_ent != OSAL_NULL) {
836 #if defined(_NTDDK_)
837 #pragma warning(suppress : 6011 28182)
838 #endif
839 			OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
840 			OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->completion_pending);
841 			p_spq->comp_sent_count++;
842 
843 			rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
844 			if (rc) {
845 				OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
846 									&p_spq->completion_pending);
847 				__ecore_spq_return_entry(p_hwfn, p_ent);
848 				return rc;
849 			}
850 		}
851 	}
852 
853 	return ECORE_SUCCESS;
854 }
855 
856 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
857 {
858 	struct ecore_spq *p_spq = p_hwfn->p_spq;
859 	struct ecore_spq_entry *p_ent = OSAL_NULL;
860 
861 	while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool))
862 	{
863 		if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
864 			break;
865 
866 		p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
867 					      struct ecore_spq_entry,
868 					      list);
869 		if (!p_ent)
870 			return ECORE_INVAL;
871 
872 #if defined(_NTDDK_)
873 #pragma warning(suppress : 6011)
874 #endif
875 		OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
876 
877 		ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
878 	}
879 
880 	return ecore_spq_post_list(p_hwfn, &p_spq->pending,
881 				   SPQ_HIGH_PRI_RESERVE_DEFAULT);
882 }
883 
884 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn		*p_hwfn,
885 				    struct ecore_spq_entry	*p_ent,
886 				    u8                          *fw_return_code)
887 {
888 	enum _ecore_status_t	rc = ECORE_SUCCESS;
889 	struct ecore_spq	*p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
890 	bool			b_ret_ent = true;
891 
892 	if (!p_hwfn)
893 		return ECORE_INVAL;
894 
895 	if (!p_ent) {
896 		DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
897 		return ECORE_INVAL;
898 	}
899 
900 	if (p_hwfn->p_dev->recov_in_prog) {
901 		DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
902 			   "Recovery is in progress -> skip spq post [cmd %02x protocol %02x]\n",
903 			   p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
904 		/* Return success to let the flows to be completed successfully
905 		 * w/o any error handling.
906 		 */
907 		return ECORE_SUCCESS;
908 	}
909 
910 	OSAL_SPIN_LOCK(&p_spq->lock);
911 
912 	/* Complete the entry */
913 	rc = ecore_spq_fill_entry(p_hwfn, p_ent);
914 
915 	/* Check return value after LOCK is taken for cleaner error flow */
916 	if (rc)
917 		goto spq_post_fail;
918 
919 	/* Add the request to the pending queue */
920 	rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
921 	if (rc)
922 		goto spq_post_fail;
923 
924 	rc = ecore_spq_pend_post(p_hwfn);
925 	if (rc) {
926 		/* Since it's possible that pending failed for a different
927 		 * entry [although unlikely], the failed entry was already
928 		 * dealt with; No need to return it here.
929 		 */
930 		b_ret_ent = false;
931 		goto spq_post_fail;
932 	}
933 
934 	OSAL_SPIN_UNLOCK(&p_spq->lock);
935 
936 	if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
937 		/* For entries in ECORE BLOCK mode, the completion code cannot
938 		 * perform the necessary cleanup - if it did, we couldn't
939 		 * access p_ent here to see whether it's successful or not.
940 		 * Thus, after gaining the answer - perform the cleanup here.
941 		 */
942 		rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
943 				     p_ent->queue == &p_spq->unlimited_pending);
944 
945 		if (p_ent->queue == &p_spq->unlimited_pending) {
946 			/* This is an allocated p_ent which does not need to
947 			 * return to pool.
948 			 */
949 			OSAL_FREE(p_hwfn->p_dev, p_ent);
950 
951 			/* TBD: handle error flow and remove p_ent from
952 			 * completion pending
953 			 */
954 			return rc;
955 		}
956 
957 		if (rc)
958 			goto spq_post_fail2;
959 
960 		/* return to pool */
961 		ecore_spq_return_entry(p_hwfn, p_ent);
962 	}
963 	return rc;
964 
965 spq_post_fail2:
966 	OSAL_SPIN_LOCK(&p_spq->lock);
967 	OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
968 	ecore_chain_return_produced(&p_spq->chain);
969 
970 spq_post_fail:
971 	/* return to the free pool */
972 	if (b_ret_ent)
973 		__ecore_spq_return_entry(p_hwfn, p_ent);
974 	OSAL_SPIN_UNLOCK(&p_spq->lock);
975 
976 	return rc;
977 }
978 
979 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
980 					  __le16 echo,
981 					  u8 fw_return_code,
982 					  union event_ring_data	*p_data)
983 {
984 	struct ecore_spq	*p_spq;
985 	struct ecore_spq_entry	*p_ent = OSAL_NULL;
986 	struct ecore_spq_entry	*tmp;
987 	struct ecore_spq_entry	*found = OSAL_NULL;
988 	enum _ecore_status_t	rc;
989 
990 	if (!p_hwfn) {
991 		return ECORE_INVAL;
992 	}
993 
994 	p_spq = p_hwfn->p_spq;
995 	if (!p_spq) {
996 		return ECORE_INVAL;
997 	}
998 
999 	OSAL_SPIN_LOCK(&p_spq->lock);
1000 	OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
1001 				      tmp,
1002 				      &p_spq->completion_pending,
1003 				      list,
1004 				      struct ecore_spq_entry) {
1005 
1006 		if (p_ent->elem.hdr.echo == echo) {
1007 			OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
1008 					       &p_spq->completion_pending);
1009 
1010 			/* Avoid overriding of SPQ entries when getting
1011 			 * out-of-order completions, by marking the completions
1012 			 * in a bitmap and increasing the chain consumer only
1013 			 * for the first successive completed entries.
1014 			 */
1015 			SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
1016 			while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
1017 						      p_spq->comp_bitmap_idx)) {
1018 				SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
1019 							p_spq->comp_bitmap_idx);
1020 				p_spq->comp_bitmap_idx++;
1021 				ecore_chain_return_produced(&p_spq->chain);
1022 			}
1023 
1024 			p_spq->comp_count++;
1025 			found = p_ent;
1026 			break;
1027 		}
1028 
1029 		/* This is debug and should be relatively uncommon - depends
1030 		 * on scenarios which have mutliple per-PF sent ramrods.
1031 		 */
1032 		DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1033 			   "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
1034 			   OSAL_LE16_TO_CPU(echo),
1035 			   OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
1036 	}
1037 
1038 	/* Release lock before callback, as callback may post
1039 	 * an additional ramrod.
1040 	 */
1041 	OSAL_SPIN_UNLOCK(&p_spq->lock);
1042 
1043 	if (!found) {
1044 		DP_NOTICE(p_hwfn, true,
1045 			  "Failed to find an entry this EQE [echo %04x] completes\n",
1046 			  OSAL_LE16_TO_CPU(echo));
1047 		return ECORE_EXISTS;
1048 	}
1049 
1050 	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1051 		   "Complete EQE [echo %04x]: func %p cookie %p)\n",
1052 		   OSAL_LE16_TO_CPU(echo),
1053 		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
1054 	if (found->comp_cb.function)
1055 		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
1056 					fw_return_code);
1057 	else
1058 		DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Got a completion without a callback function\n");
1059 
1060 	if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
1061 	    (found->queue == &p_spq->unlimited_pending))
1062 		/* EBLOCK  is responsible for returning its own entry into the
1063 		 * free list, unless it originally added the entry into the
1064 		 * unlimited pending list.
1065 		 */
1066 		ecore_spq_return_entry(p_hwfn, found);
1067 
1068 	/* Attempt to post pending requests */
1069 	OSAL_SPIN_LOCK(&p_spq->lock);
1070 	rc = ecore_spq_pend_post(p_hwfn);
1071 	OSAL_SPIN_UNLOCK(&p_spq->lock);
1072 
1073 	return rc;
1074 }
1075 
1076 enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
1077 {
1078 	struct ecore_consq *p_consq;
1079 
1080 	/* Allocate ConsQ struct */
1081 	p_consq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
1082 	if (!p_consq) {
1083 		DP_NOTICE(p_hwfn, true,
1084 			  "Failed to allocate `struct ecore_consq'\n");
1085 		return ECORE_NOMEM;
1086 	}
1087 
1088 	/* Allocate and initialize EQ chain*/
1089 	if (ecore_chain_alloc(p_hwfn->p_dev,
1090 			      ECORE_CHAIN_USE_TO_PRODUCE,
1091 			      ECORE_CHAIN_MODE_PBL,
1092 			      ECORE_CHAIN_CNT_TYPE_U16,
1093 			      ECORE_CHAIN_PAGE_SIZE/0x80,
1094 			      0x80,
1095 			      &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
1096 		DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
1097 		goto consq_allocate_fail;
1098 	}
1099 
1100 	p_hwfn->p_consq = p_consq;
1101 	return ECORE_SUCCESS;
1102 
1103 consq_allocate_fail:
1104 	OSAL_FREE(p_hwfn->p_dev, p_consq);
1105 	return ECORE_NOMEM;
1106 }
1107 
1108 void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
1109 {
1110 	ecore_chain_reset(&p_hwfn->p_consq->chain);
1111 }
1112 
1113 void ecore_consq_free(struct ecore_hwfn *p_hwfn)
1114 {
1115 	if (!p_hwfn->p_consq)
1116 		return;
1117 
1118 	ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
1119 
1120 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
1121 	p_hwfn->p_consq = OSAL_NULL;
1122 }
1123 
1124