xref: /linux/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: RDMA Controller HW interface
37  */
38 #include <linux/interrupt.h>
39 #include <linux/spinlock.h>
40 #include <linux/pci.h>
41 #include <linux/prefetch.h>
42 #include "roce_hsi.h"
43 #include "qplib_res.h"
44 #include "qplib_rcfw.h"
45 static void bnxt_qplib_service_creq(unsigned long data);
46 
47 /* Hardware communication channel */
48 int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
49 {
50 	u16 cbit;
51 	int rc;
52 
53 	cookie &= RCFW_MAX_COOKIE_VALUE;
54 	cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
55 	if (!test_bit(cbit, rcfw->cmdq_bitmap))
56 		dev_warn(&rcfw->pdev->dev,
57 			 "QPLIB: CMD bit %d for cookie 0x%x is not set?",
58 			 cbit, cookie);
59 
60 	rc = wait_event_timeout(rcfw->waitq,
61 				!test_bit(cbit, rcfw->cmdq_bitmap),
62 				msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
63 	if (!rc) {
64 		dev_warn(&rcfw->pdev->dev,
65 			 "QPLIB: Bono Error: timeout %d msec, msg {0x%x}\n",
66 			 RCFW_CMD_WAIT_TIME_MS, cookie);
67 	}
68 
69 	return rc;
70 };
71 
72 int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
73 {
74 	u32 count = -1;
75 	u16 cbit;
76 
77 	cookie &= RCFW_MAX_COOKIE_VALUE;
78 	cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
79 	if (!test_bit(cbit, rcfw->cmdq_bitmap))
80 		goto done;
81 	do {
82 		bnxt_qplib_service_creq((unsigned long)rcfw);
83 	} while (test_bit(cbit, rcfw->cmdq_bitmap) && --count);
84 done:
85 	return count;
86 };
87 
88 void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
89 				   struct cmdq_base *req, void **crsbe,
90 				   u8 is_block)
91 {
92 	struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
93 	struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr;
94 	struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
95 	struct bnxt_qplib_hwq *crsb = &rcfw->crsb;
96 	struct bnxt_qplib_crsqe *crsqe = NULL;
97 	struct bnxt_qplib_crsbe **crsb_ptr;
98 	u32 sw_prod, cmdq_prod;
99 	u8 retry_cnt = 0xFF;
100 	dma_addr_t dma_addr;
101 	unsigned long flags;
102 	u32 size, opcode;
103 	u16 cookie, cbit;
104 	int pg, idx;
105 	u8 *preq;
106 
107 retry:
108 	opcode = req->opcode;
109 	if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
110 	    (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
111 	     opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW)) {
112 		dev_err(&rcfw->pdev->dev,
113 			"QPLIB: RCFW not initialized, reject opcode 0x%x",
114 			opcode);
115 		return NULL;
116 	}
117 
118 	if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
119 	    opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
120 		dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
121 		return NULL;
122 	}
123 
124 	/* Cmdq are in 16-byte units, each request can consume 1 or more
125 	 * cmdqe
126 	 */
127 	spin_lock_irqsave(&cmdq->lock, flags);
128 	if (req->cmd_size > cmdq->max_elements -
129 	    ((HWQ_CMP(cmdq->prod, cmdq) - HWQ_CMP(cmdq->cons, cmdq)) &
130 	     (cmdq->max_elements - 1))) {
131 		dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!");
132 		spin_unlock_irqrestore(&cmdq->lock, flags);
133 
134 		if (!retry_cnt--)
135 			return NULL;
136 		goto retry;
137 	}
138 
139 	retry_cnt = 0xFF;
140 
141 	cookie = atomic_inc_return(&rcfw->seq_num) & RCFW_MAX_COOKIE_VALUE;
142 	cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
143 	if (is_block)
144 		cookie |= RCFW_CMD_IS_BLOCKING;
145 	req->cookie = cpu_to_le16(cookie);
146 	if (test_and_set_bit(cbit, rcfw->cmdq_bitmap)) {
147 		dev_err(&rcfw->pdev->dev,
148 			"QPLIB: RCFW MAX outstanding cmd reached!");
149 		atomic_dec(&rcfw->seq_num);
150 		spin_unlock_irqrestore(&cmdq->lock, flags);
151 
152 		if (!retry_cnt--)
153 			return NULL;
154 		goto retry;
155 	}
156 	/* Reserve a resp buffer slot if requested */
157 	if (req->resp_size && crsbe) {
158 		spin_lock(&crsb->lock);
159 		sw_prod = HWQ_CMP(crsb->prod, crsb);
160 		crsb_ptr = (struct bnxt_qplib_crsbe **)crsb->pbl_ptr;
161 		*crsbe = (void *)&crsb_ptr[get_crsb_pg(sw_prod)]
162 					  [get_crsb_idx(sw_prod)];
163 		bnxt_qplib_crsb_dma_next(crsb->pbl_dma_ptr, sw_prod, &dma_addr);
164 		req->resp_addr = cpu_to_le64(dma_addr);
165 		crsb->prod++;
166 		spin_unlock(&crsb->lock);
167 
168 		req->resp_size = (sizeof(struct bnxt_qplib_crsbe) +
169 				  BNXT_QPLIB_CMDQE_UNITS - 1) /
170 				 BNXT_QPLIB_CMDQE_UNITS;
171 	}
172 	cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
173 	preq = (u8 *)req;
174 	size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
175 	do {
176 		pg = 0;
177 		idx = 0;
178 
179 		/* Locate the next cmdq slot */
180 		sw_prod = HWQ_CMP(cmdq->prod, cmdq);
181 		cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod)][get_cmdq_idx(sw_prod)];
182 		if (!cmdqe) {
183 			dev_err(&rcfw->pdev->dev,
184 				"QPLIB: RCFW request failed with no cmdqe!");
185 			goto done;
186 		}
187 		/* Copy a segment of the req cmd to the cmdq */
188 		memset(cmdqe, 0, sizeof(*cmdqe));
189 		memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe)));
190 		preq += min_t(u32, size, sizeof(*cmdqe));
191 		size -= min_t(u32, size, sizeof(*cmdqe));
192 		cmdq->prod++;
193 	} while (size > 0);
194 
195 	cmdq_prod = cmdq->prod;
196 	if (rcfw->flags & FIRMWARE_FIRST_FLAG) {
197 		/* The very first doorbell write is required to set this flag
198 		 * which prompts the FW to reset its internal pointers
199 		 */
200 		cmdq_prod |= FIRMWARE_FIRST_FLAG;
201 		rcfw->flags &= ~FIRMWARE_FIRST_FLAG;
202 	}
203 	sw_prod = HWQ_CMP(crsq->prod, crsq);
204 	crsqe = &crsq->crsq[sw_prod];
205 	memset(crsqe, 0, sizeof(*crsqe));
206 	crsq->prod++;
207 	crsqe->req_size = req->cmd_size;
208 
209 	/* ring CMDQ DB */
210 	writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem +
211 	       rcfw->cmdq_bar_reg_prod_off);
212 	writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem +
213 	       rcfw->cmdq_bar_reg_trig_off);
214 done:
215 	spin_unlock_irqrestore(&cmdq->lock, flags);
216 	/* Return the CREQ response pointer */
217 	return crsqe ? &crsqe->qp_event : NULL;
218 }
219 
220 /* Completions */
221 static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
222 					 struct creq_func_event *func_event)
223 {
224 	switch (func_event->event) {
225 	case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
226 		break;
227 	case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
228 		break;
229 	case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
230 		break;
231 	case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
232 		break;
233 	case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
234 		break;
235 	case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
236 		break;
237 	case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
238 		break;
239 	case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
240 		/* SRQ ctx error, call srq_handler??
241 		 * But there's no SRQ handle!
242 		 */
243 		break;
244 	case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
245 		break;
246 	case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
247 		break;
248 	case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
249 		break;
250 	case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST:
251 		break;
252 	case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED:
253 		break;
254 	default:
255 		return -EINVAL;
256 	}
257 	return 0;
258 }
259 
260 static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
261 				       struct creq_qp_event *qp_event)
262 {
263 	struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
264 	struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
265 	struct bnxt_qplib_crsqe *crsqe;
266 	u16 cbit, cookie, blocked = 0;
267 	unsigned long flags;
268 	u32 sw_cons;
269 
270 	switch (qp_event->event) {
271 	case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
272 		dev_dbg(&rcfw->pdev->dev,
273 			"QPLIB: Received QP error notification");
274 		break;
275 	default:
276 		/* Command Response */
277 		spin_lock_irqsave(&cmdq->lock, flags);
278 		sw_cons = HWQ_CMP(crsq->cons, crsq);
279 		crsqe = &crsq->crsq[sw_cons];
280 		crsq->cons++;
281 		memcpy(&crsqe->qp_event, qp_event, sizeof(crsqe->qp_event));
282 
283 		cookie = le16_to_cpu(crsqe->qp_event.cookie);
284 		blocked = cookie & RCFW_CMD_IS_BLOCKING;
285 		cookie &= RCFW_MAX_COOKIE_VALUE;
286 		cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
287 		if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
288 			dev_warn(&rcfw->pdev->dev,
289 				 "QPLIB: CMD bit %d was not requested", cbit);
290 
291 		cmdq->cons += crsqe->req_size;
292 		spin_unlock_irqrestore(&cmdq->lock, flags);
293 		if (!blocked)
294 			wake_up(&rcfw->waitq);
295 		break;
296 	}
297 	return 0;
298 }
299 
300 /* SP - CREQ Completion handlers */
301 static void bnxt_qplib_service_creq(unsigned long data)
302 {
303 	struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data;
304 	struct bnxt_qplib_hwq *creq = &rcfw->creq;
305 	struct creq_base *creqe, **creq_ptr;
306 	u32 sw_cons, raw_cons;
307 	unsigned long flags;
308 	u32 type;
309 
310 	/* Service the CREQ until empty */
311 	spin_lock_irqsave(&creq->lock, flags);
312 	raw_cons = creq->cons;
313 	while (1) {
314 		sw_cons = HWQ_CMP(raw_cons, creq);
315 		creq_ptr = (struct creq_base **)creq->pbl_ptr;
316 		creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
317 		if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements))
318 			break;
319 
320 		type = creqe->type & CREQ_BASE_TYPE_MASK;
321 		switch (type) {
322 		case CREQ_BASE_TYPE_QP_EVENT:
323 			if (!bnxt_qplib_process_qp_event
324 			    (rcfw, (struct creq_qp_event *)creqe))
325 				rcfw->creq_qp_event_processed++;
326 			else {
327 				dev_warn(&rcfw->pdev->dev, "QPLIB: crsqe with");
328 				dev_warn(&rcfw->pdev->dev,
329 					 "QPLIB: type = 0x%x not handled",
330 					 type);
331 			}
332 			break;
333 		case CREQ_BASE_TYPE_FUNC_EVENT:
334 			if (!bnxt_qplib_process_func_event
335 			    (rcfw, (struct creq_func_event *)creqe))
336 				rcfw->creq_func_event_processed++;
337 			else
338 				dev_warn
339 				(&rcfw->pdev->dev, "QPLIB:aeqe:%#x Not handled",
340 				 type);
341 			break;
342 		default:
343 			dev_warn(&rcfw->pdev->dev, "QPLIB: creqe with ");
344 			dev_warn(&rcfw->pdev->dev,
345 				 "QPLIB: op_event = 0x%x not handled", type);
346 			break;
347 		}
348 		raw_cons++;
349 	}
350 	if (creq->cons != raw_cons) {
351 		creq->cons = raw_cons;
352 		CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons,
353 			      creq->max_elements);
354 	}
355 	spin_unlock_irqrestore(&creq->lock, flags);
356 }
357 
358 static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
359 {
360 	struct bnxt_qplib_rcfw *rcfw = dev_instance;
361 	struct bnxt_qplib_hwq *creq = &rcfw->creq;
362 	struct creq_base **creq_ptr;
363 	u32 sw_cons;
364 
365 	/* Prefetch the CREQ element */
366 	sw_cons = HWQ_CMP(creq->cons, creq);
367 	creq_ptr = (struct creq_base **)rcfw->creq.pbl_ptr;
368 	prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]);
369 
370 	tasklet_schedule(&rcfw->worker);
371 
372 	return IRQ_HANDLED;
373 }
374 
375 /* RCFW */
376 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
377 {
378 	struct creq_deinitialize_fw_resp *resp;
379 	struct cmdq_deinitialize_fw req;
380 	u16 cmd_flags = 0;
381 
382 	RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
383 	resp = (struct creq_deinitialize_fw_resp *)
384 			bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
385 						     NULL, 0);
386 	if (!resp)
387 		return -EINVAL;
388 
389 	if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie)))
390 		return -ETIMEDOUT;
391 
392 	if (resp->status ||
393 	    le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie))
394 		return -EFAULT;
395 
396 	clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
397 	return 0;
398 }
399 
400 static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl)
401 {
402 	return (pbl->pg_size == ROCE_PG_SIZE_4K ?
403 				      CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K :
404 		pbl->pg_size == ROCE_PG_SIZE_8K ?
405 				      CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K :
406 		pbl->pg_size == ROCE_PG_SIZE_64K ?
407 				      CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K :
408 		pbl->pg_size == ROCE_PG_SIZE_2M ?
409 				      CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M :
410 		pbl->pg_size == ROCE_PG_SIZE_8M ?
411 				      CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M :
412 		pbl->pg_size == ROCE_PG_SIZE_1G ?
413 				      CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G :
414 				      CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K);
415 }
416 
417 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
418 			 struct bnxt_qplib_ctx *ctx, int is_virtfn)
419 {
420 	struct creq_initialize_fw_resp *resp;
421 	struct cmdq_initialize_fw req;
422 	u16 cmd_flags = 0, level;
423 
424 	RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
425 
426 	/*
427 	 * VFs need not setup the HW context area, PF
428 	 * shall setup this area for VF. Skipping the
429 	 * HW programming
430 	 */
431 	if (is_virtfn)
432 		goto skip_ctx_setup;
433 
434 	level = ctx->qpc_tbl.level;
435 	req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) |
436 				__get_pbl_pg_idx(&ctx->qpc_tbl.pbl[level]);
437 	level = ctx->mrw_tbl.level;
438 	req.mrw_pg_size_mrw_lvl = (level << CMDQ_INITIALIZE_FW_MRW_LVL_SFT) |
439 				__get_pbl_pg_idx(&ctx->mrw_tbl.pbl[level]);
440 	level = ctx->srqc_tbl.level;
441 	req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
442 				__get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
443 	level = ctx->cq_tbl.level;
444 	req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
445 				__get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
446 	level = ctx->srqc_tbl.level;
447 	req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
448 				__get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
449 	level = ctx->cq_tbl.level;
450 	req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
451 				__get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
452 	level = ctx->tim_tbl.level;
453 	req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) |
454 				  __get_pbl_pg_idx(&ctx->tim_tbl.pbl[level]);
455 	level = ctx->tqm_pde_level;
456 	req.tqm_pg_size_tqm_lvl = (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) |
457 				  __get_pbl_pg_idx(&ctx->tqm_pde.pbl[level]);
458 
459 	req.qpc_page_dir =
460 		cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
461 	req.mrw_page_dir =
462 		cpu_to_le64(ctx->mrw_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
463 	req.srq_page_dir =
464 		cpu_to_le64(ctx->srqc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
465 	req.cq_page_dir =
466 		cpu_to_le64(ctx->cq_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
467 	req.tim_page_dir =
468 		cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
469 	req.tqm_page_dir =
470 		cpu_to_le64(ctx->tqm_pde.pbl[PBL_LVL_0].pg_map_arr[0]);
471 
472 	req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements);
473 	req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements);
474 	req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
475 	req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
476 
477 	req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
478 	req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
479 	req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
480 	req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
481 	req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
482 
483 skip_ctx_setup:
484 	req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
485 	resp = (struct creq_initialize_fw_resp *)
486 			bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
487 						     NULL, 0);
488 	if (!resp) {
489 		dev_err(&rcfw->pdev->dev,
490 			"QPLIB: RCFW: INITIALIZE_FW send failed");
491 		return -EINVAL;
492 	}
493 	if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
494 		/* Cmd timed out */
495 		dev_err(&rcfw->pdev->dev,
496 			"QPLIB: RCFW: INITIALIZE_FW timed out");
497 		return -ETIMEDOUT;
498 	}
499 	if (resp->status ||
500 	    le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
501 		dev_err(&rcfw->pdev->dev,
502 			"QPLIB: RCFW: INITIALIZE_FW failed");
503 		return -EINVAL;
504 	}
505 	set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
506 	return 0;
507 }
508 
509 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
510 {
511 	bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->crsb);
512 	kfree(rcfw->crsq.crsq);
513 	bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
514 	bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
515 
516 	rcfw->pdev = NULL;
517 }
518 
519 int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
520 				  struct bnxt_qplib_rcfw *rcfw)
521 {
522 	rcfw->pdev = pdev;
523 	rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT;
524 	if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL, 0,
525 				      &rcfw->creq.max_elements,
526 				      BNXT_QPLIB_CREQE_UNITS, 0, PAGE_SIZE,
527 				      HWQ_TYPE_L2_CMPL)) {
528 		dev_err(&rcfw->pdev->dev,
529 			"QPLIB: HW channel CREQ allocation failed");
530 		goto fail;
531 	}
532 	rcfw->cmdq.max_elements = BNXT_QPLIB_CMDQE_MAX_CNT;
533 	if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->cmdq, NULL, 0,
534 				      &rcfw->cmdq.max_elements,
535 				      BNXT_QPLIB_CMDQE_UNITS, 0, PAGE_SIZE,
536 				      HWQ_TYPE_CTX)) {
537 		dev_err(&rcfw->pdev->dev,
538 			"QPLIB: HW channel CMDQ allocation failed");
539 		goto fail;
540 	}
541 
542 	rcfw->crsq.max_elements = rcfw->cmdq.max_elements;
543 	rcfw->crsq.crsq = kcalloc(rcfw->crsq.max_elements,
544 				  sizeof(*rcfw->crsq.crsq), GFP_KERNEL);
545 	if (!rcfw->crsq.crsq)
546 		goto fail;
547 
548 	rcfw->crsb.max_elements = BNXT_QPLIB_CRSBE_MAX_CNT;
549 	if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->crsb, NULL, 0,
550 				      &rcfw->crsb.max_elements,
551 				      BNXT_QPLIB_CRSBE_UNITS, 0, PAGE_SIZE,
552 				      HWQ_TYPE_CTX)) {
553 		dev_err(&rcfw->pdev->dev,
554 			"QPLIB: HW channel CRSB allocation failed");
555 		goto fail;
556 	}
557 	return 0;
558 
559 fail:
560 	bnxt_qplib_free_rcfw_channel(rcfw);
561 	return -ENOMEM;
562 }
563 
564 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
565 {
566 	unsigned long indx;
567 
568 	/* Make sure the HW channel is stopped! */
569 	synchronize_irq(rcfw->vector);
570 	tasklet_disable(&rcfw->worker);
571 	tasklet_kill(&rcfw->worker);
572 
573 	if (rcfw->requested) {
574 		free_irq(rcfw->vector, rcfw);
575 		rcfw->requested = false;
576 	}
577 	if (rcfw->cmdq_bar_reg_iomem)
578 		iounmap(rcfw->cmdq_bar_reg_iomem);
579 	rcfw->cmdq_bar_reg_iomem = NULL;
580 
581 	if (rcfw->creq_bar_reg_iomem)
582 		iounmap(rcfw->creq_bar_reg_iomem);
583 	rcfw->creq_bar_reg_iomem = NULL;
584 
585 	indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size);
586 	if (indx != rcfw->bmap_size)
587 		dev_err(&rcfw->pdev->dev,
588 			"QPLIB: disabling RCFW with pending cmd-bit %lx", indx);
589 	kfree(rcfw->cmdq_bitmap);
590 	rcfw->bmap_size = 0;
591 
592 	rcfw->aeq_handler = NULL;
593 	rcfw->vector = 0;
594 }
595 
596 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
597 				   struct bnxt_qplib_rcfw *rcfw,
598 				   int msix_vector,
599 				   int cp_bar_reg_off, int virt_fn,
600 				   int (*aeq_handler)(struct bnxt_qplib_rcfw *,
601 						      struct creq_func_event *))
602 {
603 	resource_size_t res_base;
604 	struct cmdq_init init;
605 	u16 bmap_size;
606 	int rc;
607 
608 	/* General */
609 	atomic_set(&rcfw->seq_num, 0);
610 	rcfw->flags = FIRMWARE_FIRST_FLAG;
611 	bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
612 				  sizeof(unsigned long));
613 	rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
614 	if (!rcfw->cmdq_bitmap)
615 		return -ENOMEM;
616 	rcfw->bmap_size = bmap_size;
617 
618 	/* CMDQ */
619 	rcfw->cmdq_bar_reg = RCFW_COMM_PCI_BAR_REGION;
620 	res_base = pci_resource_start(pdev, rcfw->cmdq_bar_reg);
621 	if (!res_base)
622 		return -ENOMEM;
623 
624 	rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base +
625 					      RCFW_COMM_BASE_OFFSET,
626 					      RCFW_COMM_SIZE);
627 	if (!rcfw->cmdq_bar_reg_iomem) {
628 		dev_err(&rcfw->pdev->dev,
629 			"QPLIB: CMDQ BAR region %d mapping failed",
630 			rcfw->cmdq_bar_reg);
631 		return -ENOMEM;
632 	}
633 
634 	rcfw->cmdq_bar_reg_prod_off = virt_fn ? RCFW_VF_COMM_PROD_OFFSET :
635 					RCFW_PF_COMM_PROD_OFFSET;
636 
637 	rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET;
638 
639 	/* CRSQ */
640 	rcfw->crsq.prod = 0;
641 	rcfw->crsq.cons = 0;
642 
643 	/* CREQ */
644 	rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION;
645 	res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
646 	if (!res_base)
647 		dev_err(&rcfw->pdev->dev,
648 			"QPLIB: CREQ BAR region %d resc start is 0!",
649 			rcfw->creq_bar_reg);
650 	rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off,
651 						   4);
652 	if (!rcfw->creq_bar_reg_iomem) {
653 		dev_err(&rcfw->pdev->dev,
654 			"QPLIB: CREQ BAR region %d mapping failed",
655 			rcfw->creq_bar_reg);
656 		return -ENOMEM;
657 	}
658 	rcfw->creq_qp_event_processed = 0;
659 	rcfw->creq_func_event_processed = 0;
660 
661 	rcfw->vector = msix_vector;
662 	if (aeq_handler)
663 		rcfw->aeq_handler = aeq_handler;
664 
665 	tasklet_init(&rcfw->worker, bnxt_qplib_service_creq,
666 		     (unsigned long)rcfw);
667 
668 	rcfw->requested = false;
669 	rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
670 			 "bnxt_qplib_creq", rcfw);
671 	if (rc) {
672 		dev_err(&rcfw->pdev->dev,
673 			"QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
674 		bnxt_qplib_disable_rcfw_channel(rcfw);
675 		return rc;
676 	}
677 	rcfw->requested = true;
678 
679 	init_waitqueue_head(&rcfw->waitq);
680 
681 	CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements);
682 
683 	init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
684 	init.cmdq_size_cmdq_lvl = cpu_to_le16(
685 		((BNXT_QPLIB_CMDQE_MAX_CNT << CMDQ_INIT_CMDQ_SIZE_SFT) &
686 		 CMDQ_INIT_CMDQ_SIZE_MASK) |
687 		((rcfw->cmdq.level << CMDQ_INIT_CMDQ_LVL_SFT) &
688 		 CMDQ_INIT_CMDQ_LVL_MASK));
689 	init.creq_ring_id = cpu_to_le16(rcfw->creq_ring_id);
690 
691 	/* Write to the Bono mailbox register */
692 	__iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4);
693 	return 0;
694 }
695