xref: /linux/drivers/net/ethernet/qlogic/qed/qed_spq.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/io.h>
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/list.h>
41 #include <linux/pci.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/string.h>
45 #include "qed.h"
46 #include "qed_cxt.h"
47 #include "qed_dev_api.h"
48 #include "qed_hsi.h"
49 #include "qed_hw.h"
50 #include "qed_int.h"
51 #include "qed_iscsi.h"
52 #include "qed_mcp.h"
53 #include "qed_ooo.h"
54 #include "qed_reg_addr.h"
55 #include "qed_sp.h"
56 #include "qed_sriov.h"
57 #include "qed_roce.h"
58 
59 /***************************************************************************
60 * Structures & Definitions
61 ***************************************************************************/
62 
63 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
64 
65 #define SPQ_BLOCK_DELAY_MAX_ITER        (10)
66 #define SPQ_BLOCK_DELAY_US              (10)
67 #define SPQ_BLOCK_SLEEP_MAX_ITER        (1000)
68 #define SPQ_BLOCK_SLEEP_MS              (5)
69 
70 /***************************************************************************
71 * Blocking Imp. (BLOCK/EBLOCK mode)
72 ***************************************************************************/
73 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
74 				void *cookie,
75 				union event_ring_data *data, u8 fw_return_code)
76 {
77 	struct qed_spq_comp_done *comp_done;
78 
79 	comp_done = (struct qed_spq_comp_done *)cookie;
80 
81 	comp_done->fw_return_code = fw_return_code;
82 
83 	/* Make sure completion done is visible on waiting thread */
84 	smp_store_release(&comp_done->done, 0x1);
85 }
86 
87 static int __qed_spq_block(struct qed_hwfn *p_hwfn,
88 			   struct qed_spq_entry *p_ent,
89 			   u8 *p_fw_ret, bool sleep_between_iter)
90 {
91 	struct qed_spq_comp_done *comp_done;
92 	u32 iter_cnt;
93 
94 	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
95 	iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
96 				      : SPQ_BLOCK_DELAY_MAX_ITER;
97 
98 	while (iter_cnt--) {
99 		/* Validate we receive completion update */
100 		if (READ_ONCE(comp_done->done) == 1) {
101 			/* Read updated FW return value */
102 			smp_read_barrier_depends();
103 			if (p_fw_ret)
104 				*p_fw_ret = comp_done->fw_return_code;
105 			return 0;
106 		}
107 
108 		if (sleep_between_iter)
109 			msleep(SPQ_BLOCK_SLEEP_MS);
110 		else
111 			udelay(SPQ_BLOCK_DELAY_US);
112 	}
113 
114 	return -EBUSY;
115 }
116 
117 static int qed_spq_block(struct qed_hwfn *p_hwfn,
118 			 struct qed_spq_entry *p_ent,
119 			 u8 *p_fw_ret, bool skip_quick_poll)
120 {
121 	struct qed_spq_comp_done *comp_done;
122 	int rc;
123 
124 	/* A relatively short polling period w/o sleeping, to allow the FW to
125 	 * complete the ramrod and thus possibly to avoid the following sleeps.
126 	 */
127 	if (!skip_quick_poll) {
128 		rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
129 		if (!rc)
130 			return 0;
131 	}
132 
133 	/* Move to polling with a sleeping period between iterations */
134 	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
135 	if (!rc)
136 		return 0;
137 
138 	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
139 	rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
140 	if (rc) {
141 		DP_NOTICE(p_hwfn, "MCP drain failed\n");
142 		goto err;
143 	}
144 
145 	/* Retry after drain */
146 	rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
147 	if (!rc)
148 		return 0;
149 
150 	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
151 	if (comp_done->done == 1) {
152 		if (p_fw_ret)
153 			*p_fw_ret = comp_done->fw_return_code;
154 		return 0;
155 	}
156 err:
157 	DP_NOTICE(p_hwfn,
158 		  "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
159 		  le32_to_cpu(p_ent->elem.hdr.cid),
160 		  p_ent->elem.hdr.cmd_id,
161 		  p_ent->elem.hdr.protocol_id,
162 		  le16_to_cpu(p_ent->elem.hdr.echo));
163 
164 	return -EBUSY;
165 }
166 
167 /***************************************************************************
168 * SPQ entries inner API
169 ***************************************************************************/
170 static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
171 			      struct qed_spq_entry *p_ent)
172 {
173 	p_ent->flags = 0;
174 
175 	switch (p_ent->comp_mode) {
176 	case QED_SPQ_MODE_EBLOCK:
177 	case QED_SPQ_MODE_BLOCK:
178 		p_ent->comp_cb.function = qed_spq_blocking_cb;
179 		break;
180 	case QED_SPQ_MODE_CB:
181 		break;
182 	default:
183 		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
184 			  p_ent->comp_mode);
185 		return -EINVAL;
186 	}
187 
188 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
189 		   "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
190 		   p_ent->elem.hdr.cid,
191 		   p_ent->elem.hdr.cmd_id,
192 		   p_ent->elem.hdr.protocol_id,
193 		   p_ent->elem.data_ptr.hi,
194 		   p_ent->elem.data_ptr.lo,
195 		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
196 			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
197 			   "MODE_CB"));
198 
199 	return 0;
200 }
201 
202 /***************************************************************************
203 * HSI access
204 ***************************************************************************/
205 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
206 				  struct qed_spq *p_spq)
207 {
208 	u16				pq;
209 	struct qed_cxt_info		cxt_info;
210 	struct core_conn_context	*p_cxt;
211 	union qed_qm_pq_params		pq_params;
212 	int				rc;
213 
214 	cxt_info.iid = p_spq->cid;
215 
216 	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
217 
218 	if (rc < 0) {
219 		DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
220 			  p_spq->cid);
221 		return;
222 	}
223 
224 	p_cxt = cxt_info.p_cxt;
225 
226 	SET_FIELD(p_cxt->xstorm_ag_context.flags10,
227 		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
228 	SET_FIELD(p_cxt->xstorm_ag_context.flags1,
229 		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
230 	SET_FIELD(p_cxt->xstorm_ag_context.flags9,
231 		  XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
232 
233 	/* QM physical queue */
234 	memset(&pq_params, 0, sizeof(pq_params));
235 	pq_params.core.tc = LB_TC;
236 	pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
237 	p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
238 
239 	p_cxt->xstorm_st_context.spq_base_lo =
240 		DMA_LO_LE(p_spq->chain.p_phys_addr);
241 	p_cxt->xstorm_st_context.spq_base_hi =
242 		DMA_HI_LE(p_spq->chain.p_phys_addr);
243 
244 	DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
245 		       p_hwfn->p_consq->chain.p_phys_addr);
246 }
247 
248 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
249 			   struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
250 {
251 	struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
252 	u16 echo = qed_chain_get_prod_idx(p_chain);
253 	struct slow_path_element	*elem;
254 	struct core_db_data		db;
255 
256 	p_ent->elem.hdr.echo	= cpu_to_le16(echo);
257 	elem = qed_chain_produce(p_chain);
258 	if (!elem) {
259 		DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
260 		return -EINVAL;
261 	}
262 
263 	*elem = p_ent->elem; /* struct assignment */
264 
265 	/* send a doorbell on the slow hwfn session */
266 	memset(&db, 0, sizeof(db));
267 	SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
268 	SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
269 	SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
270 		  DQ_XCM_CORE_SPQ_PROD_CMD);
271 	db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
272 	db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
273 
274 	/* make sure the SPQE is updated before the doorbell */
275 	wmb();
276 
277 	DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
278 
279 	/* make sure doorbell is rang */
280 	wmb();
281 
282 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
283 		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
284 		   qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
285 		   p_spq->cid, db.params, db.agg_flags,
286 		   qed_chain_get_prod_idx(p_chain));
287 
288 	return 0;
289 }
290 
291 /***************************************************************************
292 * Asynchronous events
293 ***************************************************************************/
294 static int
295 qed_async_event_completion(struct qed_hwfn *p_hwfn,
296 			   struct event_ring_entry *p_eqe)
297 {
298 	switch (p_eqe->protocol_id) {
299 	case PROTOCOLID_ROCE:
300 		qed_async_roce_event(p_hwfn, p_eqe);
301 		return 0;
302 	case PROTOCOLID_COMMON:
303 		return qed_sriov_eqe_event(p_hwfn,
304 					   p_eqe->opcode,
305 					   p_eqe->echo, &p_eqe->data);
306 	case PROTOCOLID_ISCSI:
307 		if (!IS_ENABLED(CONFIG_QED_ISCSI))
308 			return -EINVAL;
309 		if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) {
310 			u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid);
311 
312 			qed_ooo_release_connection_isles(p_hwfn,
313 							 p_hwfn->p_ooo_info,
314 							 cid);
315 			return 0;
316 		}
317 
318 		if (p_hwfn->p_iscsi_info->event_cb) {
319 			struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
320 
321 			return p_iscsi->event_cb(p_iscsi->event_context,
322 						 p_eqe->opcode, &p_eqe->data);
323 		} else {
324 			DP_NOTICE(p_hwfn,
325 				  "iSCSI async completion is not set\n");
326 			return -EINVAL;
327 		}
328 	default:
329 		DP_NOTICE(p_hwfn,
330 			  "Unknown Async completion for protocol: %d\n",
331 			  p_eqe->protocol_id);
332 		return -EINVAL;
333 	}
334 }
335 
336 /***************************************************************************
337 * EQ API
338 ***************************************************************************/
339 void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
340 {
341 	u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
342 		   USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
343 
344 	REG_WR16(p_hwfn, addr, prod);
345 
346 	/* keep prod updates ordered */
347 	mmiowb();
348 }
349 
350 int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
351 {
352 	struct qed_eq *p_eq = cookie;
353 	struct qed_chain *p_chain = &p_eq->chain;
354 	int rc = 0;
355 
356 	/* take a snapshot of the FW consumer */
357 	u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
358 
359 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
360 
361 	/* Need to guarantee the fw_cons index we use points to a usuable
362 	 * element (to comply with our chain), so our macros would comply
363 	 */
364 	if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
365 	    qed_chain_get_usable_per_page(p_chain))
366 		fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
367 
368 	/* Complete current segment of eq entries */
369 	while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
370 		struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
371 
372 		if (!p_eqe) {
373 			rc = -EINVAL;
374 			break;
375 		}
376 
377 		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
378 			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
379 			   p_eqe->opcode,
380 			   p_eqe->protocol_id,
381 			   p_eqe->reserved0,
382 			   le16_to_cpu(p_eqe->echo),
383 			   p_eqe->fw_return_code,
384 			   p_eqe->flags);
385 
386 		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
387 			if (qed_async_event_completion(p_hwfn, p_eqe))
388 				rc = -EINVAL;
389 		} else if (qed_spq_completion(p_hwfn,
390 					      p_eqe->echo,
391 					      p_eqe->fw_return_code,
392 					      &p_eqe->data)) {
393 			rc = -EINVAL;
394 		}
395 
396 		qed_chain_recycle_consumed(p_chain);
397 	}
398 
399 	qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
400 
401 	return rc;
402 }
403 
404 struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
405 {
406 	struct qed_eq *p_eq;
407 
408 	/* Allocate EQ struct */
409 	p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
410 	if (!p_eq)
411 		return NULL;
412 
413 	/* Allocate and initialize EQ chain*/
414 	if (qed_chain_alloc(p_hwfn->cdev,
415 			    QED_CHAIN_USE_TO_PRODUCE,
416 			    QED_CHAIN_MODE_PBL,
417 			    QED_CHAIN_CNT_TYPE_U16,
418 			    num_elem,
419 			    sizeof(union event_ring_element),
420 			    &p_eq->chain))
421 		goto eq_allocate_fail;
422 
423 	/* register EQ completion on the SP SB */
424 	qed_int_register_cb(p_hwfn, qed_eq_completion,
425 			    p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
426 
427 	return p_eq;
428 
429 eq_allocate_fail:
430 	qed_eq_free(p_hwfn, p_eq);
431 	return NULL;
432 }
433 
434 void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
435 {
436 	qed_chain_reset(&p_eq->chain);
437 }
438 
439 void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
440 {
441 	if (!p_eq)
442 		return;
443 	qed_chain_free(p_hwfn->cdev, &p_eq->chain);
444 	kfree(p_eq);
445 }
446 
447 /***************************************************************************
448 * CQE API - manipulate EQ functionality
449 ***************************************************************************/
450 static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
451 			      struct eth_slow_path_rx_cqe *cqe,
452 			      enum protocol_type protocol)
453 {
454 	if (IS_VF(p_hwfn->cdev))
455 		return 0;
456 
457 	/* @@@tmp - it's possible we'll eventually want to handle some
458 	 * actual commands that can arrive here, but for now this is only
459 	 * used to complete the ramrod using the echo value on the cqe
460 	 */
461 	return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
462 }
463 
464 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
465 			   struct eth_slow_path_rx_cqe *cqe)
466 {
467 	int rc;
468 
469 	rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
470 	if (rc)
471 		DP_NOTICE(p_hwfn,
472 			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
473 			  cqe->ramrod_cmd_id);
474 
475 	return rc;
476 }
477 
478 /***************************************************************************
479 * Slow hwfn Queue (spq)
480 ***************************************************************************/
481 void qed_spq_setup(struct qed_hwfn *p_hwfn)
482 {
483 	struct qed_spq *p_spq = p_hwfn->p_spq;
484 	struct qed_spq_entry *p_virt = NULL;
485 	dma_addr_t p_phys = 0;
486 	u32 i, capacity;
487 
488 	INIT_LIST_HEAD(&p_spq->pending);
489 	INIT_LIST_HEAD(&p_spq->completion_pending);
490 	INIT_LIST_HEAD(&p_spq->free_pool);
491 	INIT_LIST_HEAD(&p_spq->unlimited_pending);
492 	spin_lock_init(&p_spq->lock);
493 
494 	/* SPQ empty pool */
495 	p_phys	= p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
496 	p_virt	= p_spq->p_virt;
497 
498 	capacity = qed_chain_get_capacity(&p_spq->chain);
499 	for (i = 0; i < capacity; i++) {
500 		DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
501 
502 		list_add_tail(&p_virt->list, &p_spq->free_pool);
503 
504 		p_virt++;
505 		p_phys += sizeof(struct qed_spq_entry);
506 	}
507 
508 	/* Statistics */
509 	p_spq->normal_count		= 0;
510 	p_spq->comp_count		= 0;
511 	p_spq->comp_sent_count		= 0;
512 	p_spq->unlimited_pending_count	= 0;
513 
514 	bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
515 	p_spq->comp_bitmap_idx = 0;
516 
517 	/* SPQ cid, cannot fail */
518 	qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
519 	qed_spq_hw_initialize(p_hwfn, p_spq);
520 
521 	/* reset the chain itself */
522 	qed_chain_reset(&p_spq->chain);
523 }
524 
525 int qed_spq_alloc(struct qed_hwfn *p_hwfn)
526 {
527 	struct qed_spq_entry *p_virt = NULL;
528 	struct qed_spq *p_spq = NULL;
529 	dma_addr_t p_phys = 0;
530 	u32 capacity;
531 
532 	/* SPQ struct */
533 	p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
534 	if (!p_spq)
535 		return -ENOMEM;
536 
537 	/* SPQ ring  */
538 	if (qed_chain_alloc(p_hwfn->cdev,
539 			    QED_CHAIN_USE_TO_PRODUCE,
540 			    QED_CHAIN_MODE_SINGLE,
541 			    QED_CHAIN_CNT_TYPE_U16,
542 			    0,   /* N/A when the mode is SINGLE */
543 			    sizeof(struct slow_path_element),
544 			    &p_spq->chain))
545 		goto spq_allocate_fail;
546 
547 	/* allocate and fill the SPQ elements (incl. ramrod data list) */
548 	capacity = qed_chain_get_capacity(&p_spq->chain);
549 	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
550 				    capacity * sizeof(struct qed_spq_entry),
551 				    &p_phys, GFP_KERNEL);
552 	if (!p_virt)
553 		goto spq_allocate_fail;
554 
555 	p_spq->p_virt = p_virt;
556 	p_spq->p_phys = p_phys;
557 	p_hwfn->p_spq = p_spq;
558 
559 	return 0;
560 
561 spq_allocate_fail:
562 	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
563 	kfree(p_spq);
564 	return -ENOMEM;
565 }
566 
567 void qed_spq_free(struct qed_hwfn *p_hwfn)
568 {
569 	struct qed_spq *p_spq = p_hwfn->p_spq;
570 	u32 capacity;
571 
572 	if (!p_spq)
573 		return;
574 
575 	if (p_spq->p_virt) {
576 		capacity = qed_chain_get_capacity(&p_spq->chain);
577 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
578 				  capacity *
579 				  sizeof(struct qed_spq_entry),
580 				  p_spq->p_virt, p_spq->p_phys);
581 	}
582 
583 	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
584 	;
585 	kfree(p_spq);
586 }
587 
588 int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
589 {
590 	struct qed_spq *p_spq = p_hwfn->p_spq;
591 	struct qed_spq_entry *p_ent = NULL;
592 	int rc = 0;
593 
594 	spin_lock_bh(&p_spq->lock);
595 
596 	if (list_empty(&p_spq->free_pool)) {
597 		p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
598 		if (!p_ent) {
599 			DP_NOTICE(p_hwfn,
600 				  "Failed to allocate an SPQ entry for a pending ramrod\n");
601 			rc = -ENOMEM;
602 			goto out_unlock;
603 		}
604 		p_ent->queue = &p_spq->unlimited_pending;
605 	} else {
606 		p_ent = list_first_entry(&p_spq->free_pool,
607 					 struct qed_spq_entry, list);
608 		list_del(&p_ent->list);
609 		p_ent->queue = &p_spq->pending;
610 	}
611 
612 	*pp_ent = p_ent;
613 
614 out_unlock:
615 	spin_unlock_bh(&p_spq->lock);
616 	return rc;
617 }
618 
619 /* Locked variant; Should be called while the SPQ lock is taken */
620 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
621 				   struct qed_spq_entry *p_ent)
622 {
623 	list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
624 }
625 
626 void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
627 {
628 	spin_lock_bh(&p_hwfn->p_spq->lock);
629 	__qed_spq_return_entry(p_hwfn, p_ent);
630 	spin_unlock_bh(&p_hwfn->p_spq->lock);
631 }
632 
633 /**
634  * @brief qed_spq_add_entry - adds a new entry to the pending
635  *        list. Should be used while lock is being held.
636  *
637  * Addes an entry to the pending list is there is room (en empty
638  * element is available in the free_pool), or else places the
639  * entry in the unlimited_pending pool.
640  *
641  * @param p_hwfn
642  * @param p_ent
643  * @param priority
644  *
645  * @return int
646  */
647 static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
648 			     struct qed_spq_entry *p_ent,
649 			     enum spq_priority priority)
650 {
651 	struct qed_spq *p_spq = p_hwfn->p_spq;
652 
653 	if (p_ent->queue == &p_spq->unlimited_pending) {
654 
655 		if (list_empty(&p_spq->free_pool)) {
656 			list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
657 			p_spq->unlimited_pending_count++;
658 
659 			return 0;
660 		} else {
661 			struct qed_spq_entry *p_en2;
662 
663 			p_en2 = list_first_entry(&p_spq->free_pool,
664 						 struct qed_spq_entry, list);
665 			list_del(&p_en2->list);
666 
667 			/* Copy the ring element physical pointer to the new
668 			 * entry, since we are about to override the entire ring
669 			 * entry and don't want to lose the pointer.
670 			 */
671 			p_ent->elem.data_ptr = p_en2->elem.data_ptr;
672 
673 			*p_en2 = *p_ent;
674 
675 			/* EBLOCK responsible to free the allocated p_ent */
676 			if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
677 				kfree(p_ent);
678 
679 			p_ent = p_en2;
680 		}
681 	}
682 
683 	/* entry is to be placed in 'pending' queue */
684 	switch (priority) {
685 	case QED_SPQ_PRIORITY_NORMAL:
686 		list_add_tail(&p_ent->list, &p_spq->pending);
687 		p_spq->normal_count++;
688 		break;
689 	case QED_SPQ_PRIORITY_HIGH:
690 		list_add(&p_ent->list, &p_spq->pending);
691 		p_spq->high_count++;
692 		break;
693 	default:
694 		return -EINVAL;
695 	}
696 
697 	return 0;
698 }
699 
700 /***************************************************************************
701 * Accessor
702 ***************************************************************************/
703 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
704 {
705 	if (!p_hwfn->p_spq)
706 		return 0xffffffff;      /* illegal */
707 	return p_hwfn->p_spq->cid;
708 }
709 
710 /***************************************************************************
711 * Posting new Ramrods
712 ***************************************************************************/
713 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
714 			     struct list_head *head, u32 keep_reserve)
715 {
716 	struct qed_spq *p_spq = p_hwfn->p_spq;
717 	int rc;
718 
719 	while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
720 	       !list_empty(head)) {
721 		struct qed_spq_entry *p_ent =
722 			list_first_entry(head, struct qed_spq_entry, list);
723 		list_del(&p_ent->list);
724 		list_add_tail(&p_ent->list, &p_spq->completion_pending);
725 		p_spq->comp_sent_count++;
726 
727 		rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
728 		if (rc) {
729 			list_del(&p_ent->list);
730 			__qed_spq_return_entry(p_hwfn, p_ent);
731 			return rc;
732 		}
733 	}
734 
735 	return 0;
736 }
737 
738 static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
739 {
740 	struct qed_spq *p_spq = p_hwfn->p_spq;
741 	struct qed_spq_entry *p_ent = NULL;
742 
743 	while (!list_empty(&p_spq->free_pool)) {
744 		if (list_empty(&p_spq->unlimited_pending))
745 			break;
746 
747 		p_ent = list_first_entry(&p_spq->unlimited_pending,
748 					 struct qed_spq_entry, list);
749 		if (!p_ent)
750 			return -EINVAL;
751 
752 		list_del(&p_ent->list);
753 
754 		qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
755 	}
756 
757 	return qed_spq_post_list(p_hwfn, &p_spq->pending,
758 				 SPQ_HIGH_PRI_RESERVE_DEFAULT);
759 }
760 
761 int qed_spq_post(struct qed_hwfn *p_hwfn,
762 		 struct qed_spq_entry *p_ent, u8 *fw_return_code)
763 {
764 	int rc = 0;
765 	struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
766 	bool b_ret_ent = true;
767 
768 	if (!p_hwfn)
769 		return -EINVAL;
770 
771 	if (!p_ent) {
772 		DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
773 		return -EINVAL;
774 	}
775 
776 	/* Complete the entry */
777 	rc = qed_spq_fill_entry(p_hwfn, p_ent);
778 
779 	spin_lock_bh(&p_spq->lock);
780 
781 	/* Check return value after LOCK is taken for cleaner error flow */
782 	if (rc)
783 		goto spq_post_fail;
784 
785 	/* Add the request to the pending queue */
786 	rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
787 	if (rc)
788 		goto spq_post_fail;
789 
790 	rc = qed_spq_pend_post(p_hwfn);
791 	if (rc) {
792 		/* Since it's possible that pending failed for a different
793 		 * entry [although unlikely], the failed entry was already
794 		 * dealt with; No need to return it here.
795 		 */
796 		b_ret_ent = false;
797 		goto spq_post_fail;
798 	}
799 
800 	spin_unlock_bh(&p_spq->lock);
801 
802 	if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
803 		/* For entries in QED BLOCK mode, the completion code cannot
804 		 * perform the necessary cleanup - if it did, we couldn't
805 		 * access p_ent here to see whether it's successful or not.
806 		 * Thus, after gaining the answer perform the cleanup here.
807 		 */
808 		rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
809 				   p_ent->queue == &p_spq->unlimited_pending);
810 
811 		if (p_ent->queue == &p_spq->unlimited_pending) {
812 			/* This is an allocated p_ent which does not need to
813 			 * return to pool.
814 			 */
815 			kfree(p_ent);
816 			return rc;
817 		}
818 
819 		if (rc)
820 			goto spq_post_fail2;
821 
822 		/* return to pool */
823 		qed_spq_return_entry(p_hwfn, p_ent);
824 	}
825 	return rc;
826 
827 spq_post_fail2:
828 	spin_lock_bh(&p_spq->lock);
829 	list_del(&p_ent->list);
830 	qed_chain_return_produced(&p_spq->chain);
831 
832 spq_post_fail:
833 	/* return to the free pool */
834 	if (b_ret_ent)
835 		__qed_spq_return_entry(p_hwfn, p_ent);
836 	spin_unlock_bh(&p_spq->lock);
837 
838 	return rc;
839 }
840 
841 int qed_spq_completion(struct qed_hwfn *p_hwfn,
842 		       __le16 echo,
843 		       u8 fw_return_code,
844 		       union event_ring_data *p_data)
845 {
846 	struct qed_spq		*p_spq;
847 	struct qed_spq_entry	*p_ent = NULL;
848 	struct qed_spq_entry	*tmp;
849 	struct qed_spq_entry	*found = NULL;
850 	int			rc;
851 
852 	if (!p_hwfn)
853 		return -EINVAL;
854 
855 	p_spq = p_hwfn->p_spq;
856 	if (!p_spq)
857 		return -EINVAL;
858 
859 	spin_lock_bh(&p_spq->lock);
860 	list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
861 		if (p_ent->elem.hdr.echo == echo) {
862 			u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
863 
864 			list_del(&p_ent->list);
865 
866 			/* Avoid overriding of SPQ entries when getting
867 			 * out-of-order completions, by marking the completions
868 			 * in a bitmap and increasing the chain consumer only
869 			 * for the first successive completed entries.
870 			 */
871 			__set_bit(pos, p_spq->p_comp_bitmap);
872 
873 			while (test_bit(p_spq->comp_bitmap_idx,
874 					p_spq->p_comp_bitmap)) {
875 				__clear_bit(p_spq->comp_bitmap_idx,
876 					    p_spq->p_comp_bitmap);
877 				p_spq->comp_bitmap_idx++;
878 				qed_chain_return_produced(&p_spq->chain);
879 			}
880 
881 			p_spq->comp_count++;
882 			found = p_ent;
883 			break;
884 		}
885 
886 		/* This is relatively uncommon - depends on scenarios
887 		 * which have mutliple per-PF sent ramrods.
888 		 */
889 		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
890 			   "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
891 			   le16_to_cpu(echo),
892 			   le16_to_cpu(p_ent->elem.hdr.echo));
893 	}
894 
895 	/* Release lock before callback, as callback may post
896 	 * an additional ramrod.
897 	 */
898 	spin_unlock_bh(&p_spq->lock);
899 
900 	if (!found) {
901 		DP_NOTICE(p_hwfn,
902 			  "Failed to find an entry this EQE [echo %04x] completes\n",
903 			  le16_to_cpu(echo));
904 		return -EEXIST;
905 	}
906 
907 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
908 		   "Complete EQE [echo %04x]: func %p cookie %p)\n",
909 		   le16_to_cpu(echo),
910 		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
911 	if (found->comp_cb.function)
912 		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
913 					fw_return_code);
914 	else
915 		DP_VERBOSE(p_hwfn,
916 			   QED_MSG_SPQ,
917 			   "Got a completion without a callback function\n");
918 
919 	if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
920 	    (found->queue == &p_spq->unlimited_pending))
921 		/* EBLOCK  is responsible for returning its own entry into the
922 		 * free list, unless it originally added the entry into the
923 		 * unlimited pending list.
924 		 */
925 		qed_spq_return_entry(p_hwfn, found);
926 
927 	/* Attempt to post pending requests */
928 	spin_lock_bh(&p_spq->lock);
929 	rc = qed_spq_pend_post(p_hwfn);
930 	spin_unlock_bh(&p_spq->lock);
931 
932 	return rc;
933 }
934 
935 struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
936 {
937 	struct qed_consq *p_consq;
938 
939 	/* Allocate ConsQ struct */
940 	p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
941 	if (!p_consq)
942 		return NULL;
943 
944 	/* Allocate and initialize EQ chain*/
945 	if (qed_chain_alloc(p_hwfn->cdev,
946 			    QED_CHAIN_USE_TO_PRODUCE,
947 			    QED_CHAIN_MODE_PBL,
948 			    QED_CHAIN_CNT_TYPE_U16,
949 			    QED_CHAIN_PAGE_SIZE / 0x80,
950 			    0x80, &p_consq->chain))
951 		goto consq_allocate_fail;
952 
953 	return p_consq;
954 
955 consq_allocate_fail:
956 	qed_consq_free(p_hwfn, p_consq);
957 	return NULL;
958 }
959 
960 void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
961 {
962 	qed_chain_reset(&p_consq->chain);
963 }
964 
965 void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
966 {
967 	if (!p_consq)
968 		return;
969 	qed_chain_free(p_hwfn->cdev, &p_consq->chain);
970 	kfree(p_consq);
971 }
972