1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1, (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1, (the "License").
26
27 * You may not use this file except in compliance with the License.
28
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
31
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
34 */
35
36 #include "bcm_osal.h"
37 #include "reg_addr.h"
38 #include "ecore_gtt_reg_addr.h"
39 #include "ecore_hsi_common.h"
40 #include "ecore.h"
41 #include "ecore_sp_api.h"
42 #include "ecore_spq.h"
43 #include "ecore_iro.h"
44 #include "ecore_init_fw_funcs.h"
45 #include "ecore_cxt.h"
46 #include "ecore_int.h"
47 #include "ecore_dev_api.h"
48 #include "ecore_mcp.h"
49 #ifdef CONFIG_ECORE_ROCE
50 #include "ecore_roce.h"
51 #endif
52 #include "ecore_hw.h"
53 #include "ecore_sriov.h"
54 #ifdef CONFIG_ECORE_ISCSI
55 #include "ecore_iscsi.h"
56 #include "ecore_ooo.h"
57 #endif
58
59 /***************************************************************************
60 * Structures & Definitions
61 ***************************************************************************/
62
63 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
64
65 #define SPQ_BLOCK_DELAY_MAX_ITER (10)
66 #define SPQ_BLOCK_DELAY_US (10)
67 #define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
68 #define SPQ_BLOCK_SLEEP_MS (5)
69
70 #ifndef REMOVE_DBG
71 /***************************************************************************
72 * Debug [iSCSI] tool
73 ***************************************************************************/
ecore_iscsi_eq_dump(struct ecore_hwfn * p_hwfn,struct event_ring_entry * p_eqe)74 static void ecore_iscsi_eq_dump(struct ecore_hwfn *p_hwfn,
75 struct event_ring_entry *p_eqe)
76 {
77 if (p_eqe->opcode >= MAX_ISCSI_EQE_OPCODE) {
78 DP_NOTICE(p_hwfn, false, "Unknown iSCSI EQ: %x\n",
79 p_eqe->opcode);
80 }
81
82 switch (p_eqe->opcode) {
83 case ISCSI_EVENT_TYPE_INIT_FUNC:
84 case ISCSI_EVENT_TYPE_DESTROY_FUNC:
85 /* NOPE */
86 break;
87 case ISCSI_EVENT_TYPE_OFFLOAD_CONN:
88 case ISCSI_EVENT_TYPE_TERMINATE_CONN:
89 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
90 "iSCSI EQE: Port %x, Op %x, echo %x, FWret %x, CID %x, ConnID %x, ERR %x\n",
91 p_hwfn->port_id, p_eqe->opcode,
92 OSAL_LE16_TO_CPU(p_eqe->echo),
93 p_eqe->fw_return_code,
94 OSAL_LE32_TO_CPU(p_eqe->data.iscsi_info.cid),
95 OSAL_LE16_TO_CPU(p_eqe->data.iscsi_info.conn_id),
96 p_eqe->data.iscsi_info.error_code);
97 break;
98 case ISCSI_EVENT_TYPE_UPDATE_CONN:
99 case ISCSI_EVENT_TYPE_CLEAR_SQ:
100 case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE:
101 case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE:
102 case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD:
103 case ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD:
104 case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD:
105 case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME:
106 case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT:
107 case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT:
108 case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2:
109 case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR:
110 case ISCSI_EVENT_TYPE_TCP_CONN_ERROR:
111 default:
112 /* NOPE */
113 break;
114 }
115 }
116 #endif
117
118 /***************************************************************************
119 * Blocking Imp. (BLOCK/EBLOCK mode)
120 ***************************************************************************/
ecore_spq_blocking_cb(struct ecore_hwfn * p_hwfn,void * cookie,union event_ring_data * data,u8 fw_return_code)121 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
122 void *cookie,
123 union event_ring_data *data,
124 u8 fw_return_code)
125 {
126 struct ecore_spq_comp_done *comp_done;
127
128 comp_done = (struct ecore_spq_comp_done *)cookie;
129
130 comp_done->done = 0x1;
131 comp_done->fw_return_code = fw_return_code;
132
133 /* make update visible to waiting thread */
134 OSAL_SMP_WMB(p_hwfn->p_dev);
135 }
136
__ecore_spq_block(struct ecore_hwfn * p_hwfn,struct ecore_spq_entry * p_ent,u8 * p_fw_ret,bool sleep_between_iter)137 static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
138 struct ecore_spq_entry *p_ent,
139 u8 *p_fw_ret,
140 bool sleep_between_iter)
141 {
142 struct ecore_spq_comp_done *comp_done;
143 u32 iter_cnt;
144
145 comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
146 iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
147 : SPQ_BLOCK_DELAY_MAX_ITER;
148
149 while (iter_cnt--) {
150 OSAL_POLL_MODE_DPC(p_hwfn);
151 OSAL_SMP_RMB(p_hwfn->p_dev);
152 if (comp_done->done == 1) {
153 if (p_fw_ret)
154 *p_fw_ret = comp_done->fw_return_code;
155 return ECORE_SUCCESS;
156 }
157
158 if (sleep_between_iter)
159 OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
160 else
161 OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
162 }
163
164 return ECORE_TIMEOUT;
165 }
166
ecore_spq_block(struct ecore_hwfn * p_hwfn,struct ecore_spq_entry * p_ent,u8 * p_fw_ret,bool skip_quick_poll)167 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
168 struct ecore_spq_entry *p_ent,
169 u8 *p_fw_ret, bool skip_quick_poll)
170 {
171 struct ecore_spq_comp_done *comp_done;
172 enum _ecore_status_t rc;
173
174 /* A relatively short polling period w/o sleeping, to allow the FW to
175 * complete the ramrod and thus possibly to avoid the following sleeps.
176 */
177 if (!skip_quick_poll) {
178 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
179 if (rc == ECORE_SUCCESS)
180 return ECORE_SUCCESS;
181 }
182
183 /* Move to polling with a sleeping period between iterations */
184 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
185 if (rc == ECORE_SUCCESS)
186 return ECORE_SUCCESS;
187
188 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
189 rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
190 if (rc != ECORE_SUCCESS) {
191 DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
192 goto err;
193 }
194
195 /* Retry after drain */
196 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
197 if (rc == ECORE_SUCCESS)
198 return ECORE_SUCCESS;
199
200 comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
201 if (comp_done->done == 1) {
202 if (p_fw_ret)
203 *p_fw_ret = comp_done->fw_return_code;
204 return ECORE_SUCCESS;
205 }
206 err:
207 DP_NOTICE(p_hwfn, true,
208 "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
209 OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
210 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
211 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
212
213 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
214
215 return ECORE_BUSY;
216 }
217
218 /***************************************************************************
219 * SPQ entries inner API
220 ***************************************************************************/
ecore_spq_fill_entry(struct ecore_hwfn * p_hwfn,struct ecore_spq_entry * p_ent)221 static enum _ecore_status_t ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn,
222 struct ecore_spq_entry *p_ent)
223 {
224 p_ent->flags = 0;
225
226 switch (p_ent->comp_mode) {
227 case ECORE_SPQ_MODE_EBLOCK:
228 case ECORE_SPQ_MODE_BLOCK:
229 p_ent->comp_cb.function = ecore_spq_blocking_cb;
230 break;
231 case ECORE_SPQ_MODE_CB:
232 break;
233 default:
234 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
235 p_ent->comp_mode);
236 return ECORE_INVAL;
237 }
238
239 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
240 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
241 p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
242 p_ent->elem.hdr.protocol_id,
243 p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
244 D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
245 ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
246 "MODE_CB"));
247
248 return ECORE_SUCCESS;
249 }
250
251 /***************************************************************************
252 * HSI access
253 ***************************************************************************/
ecore_spq_hw_initialize(struct ecore_hwfn * p_hwfn,struct ecore_spq * p_spq)254 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
255 struct ecore_spq *p_spq)
256 {
257 struct ecore_cxt_info cxt_info;
258 struct core_conn_context *p_cxt;
259 enum _ecore_status_t rc;
260 u16 physical_q;
261
262 cxt_info.iid = p_spq->cid;
263
264 rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
265
266 if (rc < 0) {
267 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
268 p_spq->cid);
269 return;
270 }
271
272 p_cxt = cxt_info.p_cxt;
273
274 /* @@@TBD we zero the context until we have ilt_reset implemented. */
275 OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
276
277 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
278 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
279 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
280 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
281 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
282 /*SET_FIELD(p_cxt->xstorm_ag_context.flags10,
283 E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);*/
284 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
285 E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
286 } else { /* E5 */
287 ECORE_E5_MISSING_CODE;
288 }
289
290 /* CDU validation - FIXME currently disabled */
291
292 /* QM physical queue */
293 physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
294 p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
295
296 p_cxt->xstorm_st_context.spq_base_lo =
297 DMA_LO_LE(p_spq->chain.p_phys_addr);
298 p_cxt->xstorm_st_context.spq_base_hi =
299 DMA_HI_LE(p_spq->chain.p_phys_addr);
300
301 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
302 p_hwfn->p_consq->chain.p_phys_addr);
303 }
304
ecore_spq_hw_post(struct ecore_hwfn * p_hwfn,struct ecore_spq * p_spq,struct ecore_spq_entry * p_ent)305 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
306 struct ecore_spq *p_spq,
307 struct ecore_spq_entry *p_ent)
308 {
309 struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
310 u16 echo = ecore_chain_get_prod_idx(p_chain);
311 struct slow_path_element *elem;
312 struct core_db_data db;
313
314 p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
315 elem = ecore_chain_produce(p_chain);
316 if (!elem) {
317 DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
318 return ECORE_INVAL;
319 }
320
321 *elem = p_ent->elem; /* struct assignment */
322
323 /* send a doorbell on the slow hwfn session */
324 OSAL_MEMSET(&db, 0, sizeof(db));
325 SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
326 SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
327 SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL, DQ_XCM_CORE_SPQ_PROD_CMD);
328 db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
329 db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
330
331 /* make sure the SPQE is updated before the doorbell */
332 OSAL_WMB(p_hwfn->p_dev);
333
334 DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
335
336 /* make sure doorbell is rang */
337 OSAL_WMB(p_hwfn->p_dev);
338
339 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
340 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
341 DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params,
342 db.agg_flags, ecore_chain_get_prod_idx(p_chain));
343
344 return ECORE_SUCCESS;
345 }
346
347 /***************************************************************************
348 * Asynchronous events
349 ***************************************************************************/
350
351 static enum _ecore_status_t
ecore_async_event_completion(struct ecore_hwfn * p_hwfn,struct event_ring_entry * p_eqe)352 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
353 struct event_ring_entry *p_eqe)
354 {
355 switch (p_eqe->protocol_id) {
356 #ifdef CONFIG_ECORE_ROCE
357 case PROTOCOLID_ROCE:
358 {
359 ecore_roce_async_event(p_hwfn,
360 p_eqe->opcode,
361 &p_eqe->data.rdma_data);
362 return ECORE_SUCCESS;
363 }
364 #ifdef CONFIG_ECORE_IWARP
365 case PROTOCOLID_IWARP:
366 {
367 ecore_iwarp_async_event(p_hwfn,
368 p_eqe->opcode,
369 &p_eqe->data.rdma_data.async_handle,
370 p_eqe->fw_return_code);
371 return ECORE_SUCCESS;
372 }
373 #endif
374 #endif
375 case PROTOCOLID_COMMON:
376 return ecore_sriov_eqe_event(p_hwfn,
377 p_eqe->opcode,
378 p_eqe->echo,
379 &p_eqe->data);
380 #ifdef CONFIG_ECORE_ISCSI
381 case PROTOCOLID_ISCSI:
382 if (p_hwfn->p_iscsi_info->event_cb != OSAL_NULL) {
383 struct ecore_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
384
385 return p_iscsi->event_cb(p_iscsi->event_context,
386 p_eqe->opcode, &p_eqe->data);
387 } else {
388 DP_NOTICE(p_hwfn,
389 false, "iSCSI async completion is not set\n");
390 return ECORE_NOTIMPL;
391 }
392 #endif
393 default:
394 DP_NOTICE(p_hwfn,
395 true, "Unknown Async completion for protocol: %d\n",
396 p_eqe->protocol_id);
397 return ECORE_INVAL;
398 }
399 }
400
401 /***************************************************************************
402 * EQ API
403 ***************************************************************************/
ecore_eq_prod_update(struct ecore_hwfn * p_hwfn,u16 prod)404 void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn,
405 u16 prod)
406 {
407 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
408 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
409
410 REG_WR16(p_hwfn, addr, prod);
411
412 /* keep prod updates ordered */
413 OSAL_MMIOWB(p_hwfn->p_dev);
414 }
415
ecore_eq_completion(struct ecore_hwfn * p_hwfn,void * cookie)416 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
417 void *cookie)
418
419 {
420 struct ecore_eq *p_eq = cookie;
421 struct ecore_chain *p_chain = &p_eq->chain;
422 enum _ecore_status_t rc = 0;
423
424 /* take a snapshot of the FW consumer */
425 u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
426
427 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
428
429 /* Need to guarantee the fw_cons index we use points to a usuable
430 * element (to comply with our chain), so our macros would comply
431 */
432 if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
433 ecore_chain_get_usable_per_page(p_chain)) {
434 fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
435 }
436
437 /* Complete current segment of eq entries */
438 while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
439 struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
440 if (!p_eqe) {
441 rc = ECORE_INVAL;
442 break;
443 }
444
445 DP_VERBOSE(p_hwfn,
446 ECORE_MSG_SPQ,
447 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
448 p_eqe->opcode, /* Event Opcode */
449 p_eqe->protocol_id, /* Event Protocol ID */
450 p_eqe->reserved0, /* Reserved */
451 OSAL_LE16_TO_CPU(p_eqe->echo),/* Echo value from
452 ramrod data on the host
453 */
454 p_eqe->fw_return_code, /* FW return code for SP
455 ramrods
456 */
457 p_eqe->flags);
458 #ifndef REMOVE_DBG
459 if (p_eqe->protocol_id == PROTOCOLID_ISCSI)
460 ecore_iscsi_eq_dump(p_hwfn, p_eqe);
461 #endif
462
463 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
464 if (ecore_async_event_completion(p_hwfn, p_eqe))
465 rc = ECORE_INVAL;
466 } else if (ecore_spq_completion(p_hwfn,
467 p_eqe->echo,
468 p_eqe->fw_return_code,
469 &p_eqe->data)) {
470 rc = ECORE_INVAL;
471 }
472
473 ecore_chain_recycle_consumed(p_chain);
474 }
475
476 ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
477
478 return rc;
479 }
480
ecore_eq_alloc(struct ecore_hwfn * p_hwfn,u16 num_elem)481 enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
482 {
483 struct ecore_eq *p_eq;
484
485 /* Allocate EQ struct */
486 p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
487 if (!p_eq) {
488 DP_NOTICE(p_hwfn, true,
489 "Failed to allocate `struct ecore_eq'\n");
490 return ECORE_NOMEM;
491 }
492
493 /* Allocate and initialize EQ chain*/
494 if (ecore_chain_alloc(p_hwfn->p_dev,
495 ECORE_CHAIN_USE_TO_PRODUCE,
496 ECORE_CHAIN_MODE_PBL,
497 ECORE_CHAIN_CNT_TYPE_U16,
498 num_elem,
499 sizeof(union event_ring_element),
500 &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
501 DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
502 goto eq_allocate_fail;
503 }
504
505 /* register EQ completion on the SP SB */
506 ecore_int_register_cb(p_hwfn, ecore_eq_completion,
507 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
508
509 p_hwfn->p_eq = p_eq;
510 return ECORE_SUCCESS;
511
512 eq_allocate_fail:
513 OSAL_FREE(p_hwfn->p_dev, p_eq);
514 return ECORE_NOMEM;
515 }
516
ecore_eq_setup(struct ecore_hwfn * p_hwfn)517 void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
518 {
519 ecore_chain_reset(&p_hwfn->p_eq->chain);
520 }
521
ecore_eq_free(struct ecore_hwfn * p_hwfn)522 void ecore_eq_free(struct ecore_hwfn *p_hwfn)
523 {
524 if (!p_hwfn->p_eq)
525 return;
526
527 ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
528
529 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
530 p_hwfn->p_eq = OSAL_NULL;
531 }
532
533 /***************************************************************************
534 * CQE API - manipulate EQ functionallity
535 ***************************************************************************/
ecore_cqe_completion(struct ecore_hwfn * p_hwfn,struct eth_slow_path_rx_cqe * cqe,enum protocol_type protocol)536 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
537 struct eth_slow_path_rx_cqe *cqe,
538 enum protocol_type protocol)
539 {
540 if (IS_VF(p_hwfn->p_dev))
541 return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
542
543 /* @@@tmp - it's possible we'll eventually want to handle some
544 * actual commands that can arrive here, but for now this is only
545 * used to complete the ramrod using the echo value on the cqe
546 */
547 return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
548 }
549
ecore_eth_cqe_completion(struct ecore_hwfn * p_hwfn,struct eth_slow_path_rx_cqe * cqe)550 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
551 struct eth_slow_path_rx_cqe *cqe)
552 {
553 enum _ecore_status_t rc;
554
555 rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
556 if (rc) {
557 DP_NOTICE(p_hwfn, true,
558 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
559 cqe->ramrod_cmd_id);
560 }
561
562 return rc;
563 }
564
565 /***************************************************************************
566 * Slow hwfn Queue (spq)
567 ***************************************************************************/
ecore_spq_setup(struct ecore_hwfn * p_hwfn)568 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
569 {
570 struct ecore_spq *p_spq = p_hwfn->p_spq;
571 struct ecore_spq_entry *p_virt = OSAL_NULL;
572 dma_addr_t p_phys = 0;
573 u32 i, capacity;
574
575 OSAL_LIST_INIT(&p_spq->pending);
576 OSAL_LIST_INIT(&p_spq->completion_pending);
577 OSAL_LIST_INIT(&p_spq->free_pool);
578 OSAL_LIST_INIT(&p_spq->unlimited_pending);
579 OSAL_SPIN_LOCK_INIT(&p_spq->lock);
580
581 /* SPQ empty pool */
582 p_phys = p_spq->p_phys + offsetof(struct ecore_spq_entry, ramrod);
583 p_virt = p_spq->p_virt;
584
585 capacity = ecore_chain_get_capacity(&p_spq->chain);
586 for (i = 0; i < capacity; i++) {
587 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
588
589 OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
590
591 p_virt++;
592 p_phys += sizeof(struct ecore_spq_entry);
593 }
594
595 /* Statistics */
596 p_spq->normal_count = 0;
597 p_spq->comp_count = 0;
598 p_spq->comp_sent_count = 0;
599 p_spq->unlimited_pending_count = 0;
600
601 OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
602 SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
603 p_spq->comp_bitmap_idx = 0;
604
605 /* SPQ cid, cannot fail */
606 ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
607 ecore_spq_hw_initialize(p_hwfn, p_spq);
608
609 /* reset the chain itself */
610 ecore_chain_reset(&p_spq->chain);
611 }
612
ecore_spq_alloc(struct ecore_hwfn * p_hwfn)613 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
614 {
615 struct ecore_spq_entry *p_virt = OSAL_NULL;
616 struct ecore_spq *p_spq = OSAL_NULL;
617 dma_addr_t p_phys = 0;
618 u32 capacity;
619
620 /* SPQ struct */
621 p_spq =
622 OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
623 if (!p_spq) {
624 DP_NOTICE(p_hwfn, true, "Failed to allocate `struct ecore_spq'\n");
625 return ECORE_NOMEM;
626 }
627
628 /* SPQ ring */
629 if (ecore_chain_alloc(p_hwfn->p_dev,
630 ECORE_CHAIN_USE_TO_PRODUCE,
631 ECORE_CHAIN_MODE_SINGLE,
632 ECORE_CHAIN_CNT_TYPE_U16,
633 0, /* N/A when the mode is SINGLE */
634 sizeof(struct slow_path_element),
635 &p_spq->chain, OSAL_NULL)) {
636 DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
637 goto spq_allocate_fail;
638 }
639
640 /* allocate and fill the SPQ elements (incl. ramrod data list) */
641 capacity = ecore_chain_get_capacity(&p_spq->chain);
642 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
643 capacity *
644 sizeof(struct ecore_spq_entry));
645 if (!p_virt) {
646 goto spq_allocate_fail;
647 }
648
649 p_spq->p_virt = p_virt;
650 p_spq->p_phys = p_phys;
651
652 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
653
654 p_hwfn->p_spq = p_spq;
655 return ECORE_SUCCESS;
656
657 spq_allocate_fail:
658 ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
659 OSAL_FREE(p_hwfn->p_dev, p_spq);
660 return ECORE_NOMEM;
661 }
662
ecore_spq_free(struct ecore_hwfn * p_hwfn)663 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
664 {
665 struct ecore_spq *p_spq = p_hwfn->p_spq;
666 u32 capacity;
667
668 if (!p_spq)
669 return;
670
671 if (p_spq->p_virt) {
672 capacity = ecore_chain_get_capacity(&p_spq->chain);
673 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
674 p_spq->p_virt,
675 p_spq->p_phys,
676 capacity *
677 sizeof(struct ecore_spq_entry));
678 }
679
680 ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
681 OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
682
683 OSAL_FREE(p_hwfn->p_dev, p_spq);
684 p_hwfn->p_spq = OSAL_NULL;
685 }
686
ecore_spq_get_entry(struct ecore_hwfn * p_hwfn,struct ecore_spq_entry ** pp_ent)687 enum _ecore_status_t ecore_spq_get_entry(struct ecore_hwfn *p_hwfn,
688 struct ecore_spq_entry **pp_ent)
689 {
690 struct ecore_spq *p_spq = p_hwfn->p_spq;
691 struct ecore_spq_entry *p_ent = OSAL_NULL;
692 enum _ecore_status_t rc = ECORE_SUCCESS;
693
694 OSAL_SPIN_LOCK(&p_spq->lock);
695
696 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
697
698 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
699 if (!p_ent) {
700 DP_NOTICE(p_hwfn, true, "Failed to allocate an SPQ entry for a pending ramrod\n");
701 rc = ECORE_NOMEM;
702 goto out_unlock;
703 }
704 p_ent->queue = &p_spq->unlimited_pending;
705 } else {
706 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
707 struct ecore_spq_entry,
708 list);
709 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
710 p_ent->queue = &p_spq->pending;
711 }
712
713 *pp_ent = p_ent;
714
715 out_unlock:
716 OSAL_SPIN_UNLOCK(&p_spq->lock);
717 return rc;
718 }
719
720 /* Locked variant; Should be called while the SPQ lock is taken */
__ecore_spq_return_entry(struct ecore_hwfn * p_hwfn,struct ecore_spq_entry * p_ent)721 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
722 struct ecore_spq_entry *p_ent)
723 {
724 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
725 }
726
ecore_spq_return_entry(struct ecore_hwfn * p_hwfn,struct ecore_spq_entry * p_ent)727 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
728 struct ecore_spq_entry *p_ent)
729 {
730 OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
731 __ecore_spq_return_entry(p_hwfn, p_ent);
732 OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
733 }
734
735 /**
736 * @brief ecore_spq_add_entry - adds a new entry to the pending
737 * list. Should be used while lock is being held.
738 *
739 * Addes an entry to the pending list is there is room (en empty
740 * element is avaliable in the free_pool), or else places the
741 * entry in the unlimited_pending pool.
742 *
743 * @param p_hwfn
744 * @param p_ent
745 * @param priority
746 *
747 * @return enum _ecore_status_t
748 */
ecore_spq_add_entry(struct ecore_hwfn * p_hwfn,struct ecore_spq_entry * p_ent,enum spq_priority priority)749 static enum _ecore_status_t ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
750 struct ecore_spq_entry *p_ent,
751 enum spq_priority priority)
752 {
753 struct ecore_spq *p_spq = p_hwfn->p_spq;
754
755 if (p_ent->queue == &p_spq->unlimited_pending) {
756 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
757
758 OSAL_LIST_PUSH_TAIL(&p_ent->list,
759 &p_spq->unlimited_pending);
760 p_spq->unlimited_pending_count++;
761
762 return ECORE_SUCCESS;
763
764 } else {
765 struct ecore_spq_entry *p_en2;
766
767 p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
768 struct ecore_spq_entry,
769 list);
770 OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
771
772 /* Copy the ring element physical pointer to the new
773 * entry, since we are about to override the entire ring
774 * entry and don't want to lose the pointer.
775 */
776 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
777
778 *p_en2 = *p_ent;
779
780 /* EBLOCK responsible to free the allocated p_ent */
781 if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
782 OSAL_FREE(p_hwfn->p_dev, p_ent);
783
784 p_ent = p_en2;
785 }
786 }
787
788 /* entry is to be placed in 'pending' queue */
789 switch (priority) {
790 case ECORE_SPQ_PRIORITY_NORMAL:
791 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
792 p_spq->normal_count++;
793 break;
794 case ECORE_SPQ_PRIORITY_HIGH:
795 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
796 p_spq->high_count++;
797 break;
798 default:
799 return ECORE_INVAL;
800 }
801
802 return ECORE_SUCCESS;
803 }
804
805 /***************************************************************************
806 * Accessor
807 ***************************************************************************/
808
ecore_spq_get_cid(struct ecore_hwfn * p_hwfn)809 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
810 {
811 if (!p_hwfn->p_spq) {
812 return 0xffffffff; /* illegal */
813 }
814 return p_hwfn->p_spq->cid;
815 }
816
817 /***************************************************************************
818 * Posting new Ramrods
819 ***************************************************************************/
820
ecore_spq_post_list(struct ecore_hwfn * p_hwfn,osal_list_t * head,u32 keep_reserve)821 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
822 osal_list_t *head,
823 u32 keep_reserve)
824 {
825 struct ecore_spq *p_spq = p_hwfn->p_spq;
826 enum _ecore_status_t rc;
827
828 /* TODO - implementation might be wasteful; will always keep room
829 * for an additional high priority ramrod (even if one is already
830 * pending FW)
831 */
832 while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
833 !OSAL_LIST_IS_EMPTY(head)) {
834 struct ecore_spq_entry *p_ent =
835 OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
836 if (p_ent != OSAL_NULL) {
837 #if defined(_NTDDK_)
838 #pragma warning(suppress : 6011 28182)
839 #endif
840 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
841 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->completion_pending);
842 p_spq->comp_sent_count++;
843
844 rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
845 if (rc) {
846 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
847 &p_spq->completion_pending);
848 __ecore_spq_return_entry(p_hwfn, p_ent);
849 return rc;
850 }
851 }
852 }
853
854 return ECORE_SUCCESS;
855 }
856
ecore_spq_pend_post(struct ecore_hwfn * p_hwfn)857 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
858 {
859 struct ecore_spq *p_spq = p_hwfn->p_spq;
860 struct ecore_spq_entry *p_ent = OSAL_NULL;
861
862 while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool))
863 {
864 if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
865 break;
866
867 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
868 struct ecore_spq_entry,
869 list);
870 if (!p_ent)
871 return ECORE_INVAL;
872
873 #if defined(_NTDDK_)
874 #pragma warning(suppress : 6011)
875 #endif
876 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
877
878 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
879 }
880
881 return ecore_spq_post_list(p_hwfn, &p_spq->pending,
882 SPQ_HIGH_PRI_RESERVE_DEFAULT);
883 }
884
ecore_spq_post(struct ecore_hwfn * p_hwfn,struct ecore_spq_entry * p_ent,u8 * fw_return_code)885 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
886 struct ecore_spq_entry *p_ent,
887 u8 *fw_return_code)
888 {
889 enum _ecore_status_t rc = ECORE_SUCCESS;
890 struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
891 bool b_ret_ent = true;
892
893 if (!p_hwfn)
894 return ECORE_INVAL;
895
896 if (!p_ent) {
897 DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
898 return ECORE_INVAL;
899 }
900
901 if (p_hwfn->p_dev->recov_in_prog) {
902 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
903 "Recovery is in progress -> skip spq post [cmd %02x protocol %02x]\n",
904 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
905 /* Return success to let the flows to be completed successfully
906 * w/o any error handling.
907 */
908 return ECORE_SUCCESS;
909 }
910
911 OSAL_SPIN_LOCK(&p_spq->lock);
912
913 /* Complete the entry */
914 rc = ecore_spq_fill_entry(p_hwfn, p_ent);
915
916 /* Check return value after LOCK is taken for cleaner error flow */
917 if (rc)
918 goto spq_post_fail;
919
920 /* Add the request to the pending queue */
921 rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
922 if (rc)
923 goto spq_post_fail;
924
925 rc = ecore_spq_pend_post(p_hwfn);
926 if (rc) {
927 /* Since it's possible that pending failed for a different
928 * entry [altough unlikely], the failed entry was already
929 * dealt with; No need to return it here.
930 */
931 b_ret_ent = false;
932 goto spq_post_fail;
933 }
934
935 OSAL_SPIN_UNLOCK(&p_spq->lock);
936
937 if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
938 /* For entries in ECORE BLOCK mode, the completion code cannot
939 * perform the neccessary cleanup - if it did, we couldn't
940 * access p_ent here to see whether it's successful or not.
941 * Thus, after gaining the answer - perform the cleanup here.
942 */
943 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
944 p_ent->queue == &p_spq->unlimited_pending);
945
946 if (p_ent->queue == &p_spq->unlimited_pending) {
947 /* This is an allocated p_ent which does not need to
948 * return to pool.
949 */
950 OSAL_FREE(p_hwfn->p_dev, p_ent);
951
952 /* TBD: handle error flow and remove p_ent from
953 * completion pending
954 */
955 return rc;
956 }
957
958 if (rc)
959 goto spq_post_fail2;
960
961 /* return to pool */
962 ecore_spq_return_entry(p_hwfn, p_ent);
963 }
964 return rc;
965
966 spq_post_fail2:
967 OSAL_SPIN_LOCK(&p_spq->lock);
968 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
969 ecore_chain_return_produced(&p_spq->chain);
970
971 spq_post_fail:
972 /* return to the free pool */
973 if (b_ret_ent)
974 __ecore_spq_return_entry(p_hwfn, p_ent);
975 OSAL_SPIN_UNLOCK(&p_spq->lock);
976
977 return rc;
978 }
979
ecore_spq_completion(struct ecore_hwfn * p_hwfn,__le16 echo,u8 fw_return_code,union event_ring_data * p_data)980 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
981 __le16 echo,
982 u8 fw_return_code,
983 union event_ring_data *p_data)
984 {
985 struct ecore_spq *p_spq;
986 struct ecore_spq_entry *p_ent = OSAL_NULL;
987 struct ecore_spq_entry *tmp;
988 struct ecore_spq_entry *found = OSAL_NULL;
989 enum _ecore_status_t rc;
990
991 if (!p_hwfn) {
992 return ECORE_INVAL;
993 }
994
995 p_spq = p_hwfn->p_spq;
996 if (!p_spq) {
997 return ECORE_INVAL;
998 }
999
1000 OSAL_SPIN_LOCK(&p_spq->lock);
1001 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
1002 tmp,
1003 &p_spq->completion_pending,
1004 list,
1005 struct ecore_spq_entry) {
1006
1007 if (p_ent->elem.hdr.echo == echo) {
1008 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
1009 &p_spq->completion_pending);
1010
1011 /* Avoid overriding of SPQ entries when getting
1012 * out-of-order completions, by marking the completions
1013 * in a bitmap and increasing the chain consumer only
1014 * for the first successive completed entries.
1015 */
1016 SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
1017 while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
1018 p_spq->comp_bitmap_idx)) {
1019 SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
1020 p_spq->comp_bitmap_idx);
1021 p_spq->comp_bitmap_idx++;
1022 ecore_chain_return_produced(&p_spq->chain);
1023 }
1024
1025 p_spq->comp_count++;
1026 found = p_ent;
1027 break;
1028 }
1029
1030 /* This is debug and should be relatively uncommon - depends
1031 * on scenarios which have mutliple per-PF sent ramrods.
1032 */
1033 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1034 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
1035 OSAL_LE16_TO_CPU(echo),
1036 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
1037 }
1038
1039 /* Release lock before callback, as callback may post
1040 * an additional ramrod.
1041 */
1042 OSAL_SPIN_UNLOCK(&p_spq->lock);
1043
1044 if (!found) {
1045 DP_NOTICE(p_hwfn, true,
1046 "Failed to find an entry this EQE [echo %04x] completes\n",
1047 OSAL_LE16_TO_CPU(echo));
1048 return ECORE_EXISTS;
1049 }
1050
1051 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1052 "Complete EQE [echo %04x]: func %p cookie %p)\n",
1053 OSAL_LE16_TO_CPU(echo),
1054 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
1055 if (found->comp_cb.function)
1056 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
1057 fw_return_code);
1058 else
1059 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Got a completion without a callback function\n");
1060
1061 if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
1062 (found->queue == &p_spq->unlimited_pending))
1063 /* EBLOCK is responsible for returning its own entry into the
1064 * free list, unless it originally added the entry into the
1065 * unlimited pending list.
1066 */
1067 ecore_spq_return_entry(p_hwfn, found);
1068
1069 /* Attempt to post pending requests */
1070 OSAL_SPIN_LOCK(&p_spq->lock);
1071 rc = ecore_spq_pend_post(p_hwfn);
1072 OSAL_SPIN_UNLOCK(&p_spq->lock);
1073
1074 return rc;
1075 }
1076
ecore_consq_alloc(struct ecore_hwfn * p_hwfn)1077 enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
1078 {
1079 struct ecore_consq *p_consq;
1080
1081 /* Allocate ConsQ struct */
1082 p_consq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
1083 if (!p_consq) {
1084 DP_NOTICE(p_hwfn, true,
1085 "Failed to allocate `struct ecore_consq'\n");
1086 return ECORE_NOMEM;
1087 }
1088
1089 /* Allocate and initialize EQ chain*/
1090 if (ecore_chain_alloc(p_hwfn->p_dev,
1091 ECORE_CHAIN_USE_TO_PRODUCE,
1092 ECORE_CHAIN_MODE_PBL,
1093 ECORE_CHAIN_CNT_TYPE_U16,
1094 ECORE_CHAIN_PAGE_SIZE/0x80,
1095 0x80,
1096 &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
1097 DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
1098 goto consq_allocate_fail;
1099 }
1100
1101 p_hwfn->p_consq = p_consq;
1102 return ECORE_SUCCESS;
1103
1104 consq_allocate_fail:
1105 OSAL_FREE(p_hwfn->p_dev, p_consq);
1106 return ECORE_NOMEM;
1107 }
1108
ecore_consq_setup(struct ecore_hwfn * p_hwfn)1109 void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
1110 {
1111 ecore_chain_reset(&p_hwfn->p_consq->chain);
1112 }
1113
ecore_consq_free(struct ecore_hwfn * p_hwfn)1114 void ecore_consq_free(struct ecore_hwfn *p_hwfn)
1115 {
1116 if (!p_hwfn->p_consq)
1117 return;
1118
1119 ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
1120
1121 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
1122 p_hwfn->p_consq = OSAL_NULL;
1123 }
1124
1125