xref: /linux/drivers/infiniband/hw/bng_re/bng_fw.c (revision 32a92f8c89326985e05dce8b22d3f0aa07a3e1bd)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2025 Broadcom.
3 #include <linux/pci.h>
4 
5 #include "bng_roce_hsi.h"
6 #include "bng_res.h"
7 #include "bng_fw.h"
8 #include "bng_sp.h"
9 
10 /**
11  * bng_re_map_rc  -  map return type based on opcode
12  * @opcode:  roce slow path opcode
13  *
14  * case #1
15  * Firmware initiated error recovery is a safe state machine and
16  * driver can consider all the underlying rdma resources are free.
17  * In this state, it is safe to return success for opcodes related to
18  * destroying rdma resources (like destroy qp, destroy cq etc.).
19  *
20  * case #2
21  * If driver detect potential firmware stall, it is not safe state machine
22  * and the driver can not consider all the underlying rdma resources are
23  * freed.
24  * In this state, it is not safe to return success for opcodes related to
25  * destroying rdma resources (like destroy qp, destroy cq etc.).
26  *
27  * Scope of this helper function is only for case #1.
28  *
29  * Returns:
30  * 0 to communicate success to caller.
31  * Non zero error code to communicate failure to caller.
32  */
bng_re_map_rc(u8 opcode)33 static int bng_re_map_rc(u8 opcode)
34 {
35 	switch (opcode) {
36 	case CMDQ_BASE_OPCODE_DESTROY_QP:
37 	case CMDQ_BASE_OPCODE_DESTROY_SRQ:
38 	case CMDQ_BASE_OPCODE_DESTROY_CQ:
39 	case CMDQ_BASE_OPCODE_DEALLOCATE_KEY:
40 	case CMDQ_BASE_OPCODE_DEREGISTER_MR:
41 	case CMDQ_BASE_OPCODE_DELETE_GID:
42 	case CMDQ_BASE_OPCODE_DESTROY_QP1:
43 	case CMDQ_BASE_OPCODE_DESTROY_AH:
44 	case CMDQ_BASE_OPCODE_DEINITIALIZE_FW:
45 	case CMDQ_BASE_OPCODE_MODIFY_ROCE_CC:
46 	case CMDQ_BASE_OPCODE_SET_LINK_AGGR_MODE:
47 		return 0;
48 	default:
49 		return -ETIMEDOUT;
50 	}
51 }
52 
bng_re_free_rcfw_channel(struct bng_re_rcfw * rcfw)53 void bng_re_free_rcfw_channel(struct bng_re_rcfw *rcfw)
54 {
55 	kfree(rcfw->crsqe_tbl);
56 	bng_re_free_hwq(rcfw->res, &rcfw->cmdq.hwq);
57 	bng_re_free_hwq(rcfw->res, &rcfw->creq.hwq);
58 	rcfw->pdev = NULL;
59 }
60 
bng_re_alloc_fw_channel(struct bng_re_res * res,struct bng_re_rcfw * rcfw)61 int bng_re_alloc_fw_channel(struct bng_re_res *res,
62 			    struct bng_re_rcfw *rcfw)
63 {
64 	struct bng_re_hwq_attr hwq_attr = {};
65 	struct bng_re_sg_info sginfo = {};
66 	struct bng_re_cmdq_ctx *cmdq;
67 	struct bng_re_creq_ctx *creq;
68 
69 	rcfw->pdev = res->pdev;
70 	cmdq = &rcfw->cmdq;
71 	creq = &rcfw->creq;
72 	rcfw->res = res;
73 
74 	sginfo.pgsize = PAGE_SIZE;
75 	sginfo.pgshft = PAGE_SHIFT;
76 
77 	hwq_attr.sginfo = &sginfo;
78 	hwq_attr.res = rcfw->res;
79 	hwq_attr.depth = BNG_FW_CREQE_MAX_CNT;
80 	hwq_attr.stride = BNG_FW_CREQE_UNITS;
81 	hwq_attr.type = BNG_HWQ_TYPE_QUEUE;
82 
83 	if (bng_re_alloc_init_hwq(&creq->hwq, &hwq_attr)) {
84 		dev_err(&rcfw->pdev->dev,
85 			"HW channel CREQ allocation failed\n");
86 		goto fail;
87 	}
88 
89 	rcfw->cmdq_depth = BNG_FW_CMDQE_MAX_CNT;
90 
91 	sginfo.pgsize = bng_fw_cmdqe_page_size(rcfw->cmdq_depth);
92 	hwq_attr.depth = rcfw->cmdq_depth & 0x7FFFFFFF;
93 	hwq_attr.stride = BNG_FW_CMDQE_UNITS;
94 	hwq_attr.type = BNG_HWQ_TYPE_CTX;
95 	if (bng_re_alloc_init_hwq(&cmdq->hwq, &hwq_attr)) {
96 		dev_err(&rcfw->pdev->dev,
97 			"HW channel CMDQ allocation failed\n");
98 		goto fail;
99 	}
100 
101 	rcfw->crsqe_tbl = kzalloc_objs(*rcfw->crsqe_tbl, cmdq->hwq.max_elements);
102 	if (!rcfw->crsqe_tbl)
103 		goto fail;
104 
105 	spin_lock_init(&rcfw->tbl_lock);
106 
107 	rcfw->max_timeout = res->cctx->hwrm_cmd_max_timeout;
108 	return 0;
109 
110 fail:
111 	bng_re_free_rcfw_channel(rcfw);
112 	return -ENOMEM;
113 }
114 
bng_re_process_qp_event(struct bng_re_rcfw * rcfw,struct creq_qp_event * qp_event,u32 * num_wait)115 static int bng_re_process_qp_event(struct bng_re_rcfw *rcfw,
116 				   struct creq_qp_event *qp_event,
117 				   u32 *num_wait)
118 {
119 	struct bng_re_hwq *hwq = &rcfw->cmdq.hwq;
120 	struct bng_re_crsqe *crsqe;
121 	u32 req_size;
122 	u16 cookie;
123 	bool is_waiter_alive;
124 	struct pci_dev *pdev;
125 	u32 wait_cmds = 0;
126 	int rc = 0;
127 
128 	pdev = rcfw->pdev;
129 	switch (qp_event->event) {
130 	case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
131 		dev_err(&pdev->dev, "Received QP error notification\n");
132 		break;
133 	default:
134 		/*
135 		 * Command Response
136 		 * cmdq->lock needs to be acquired to synchronie
137 		 * the command send and completion reaping. This function
138 		 * is always called with creq->lock held. Using
139 		 * the nested variant of spin_lock.
140 		 *
141 		 */
142 
143 		spin_lock_nested(&hwq->lock, SINGLE_DEPTH_NESTING);
144 		cookie = le16_to_cpu(qp_event->cookie);
145 		cookie &= BNG_FW_MAX_COOKIE_VALUE;
146 		crsqe = &rcfw->crsqe_tbl[cookie];
147 
148 		if (WARN_ONCE(test_bit(FIRMWARE_STALL_DETECTED,
149 				       &rcfw->cmdq.flags),
150 		    "Unreponsive rcfw channel detected.!!")) {
151 			dev_info(&pdev->dev,
152 				 "rcfw timedout: cookie = %#x, free_slots = %d",
153 				 cookie, crsqe->free_slots);
154 			spin_unlock(&hwq->lock);
155 			return rc;
156 		}
157 
158 		if (crsqe->is_waiter_alive) {
159 			if (crsqe->resp) {
160 				memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
161 				/* Insert write memory barrier to ensure that
162 				 * response data is copied before clearing the
163 				 * flags
164 				 */
165 				smp_wmb();
166 			}
167 		}
168 
169 		wait_cmds++;
170 
171 		req_size = crsqe->req_size;
172 		is_waiter_alive = crsqe->is_waiter_alive;
173 
174 		crsqe->req_size = 0;
175 		if (!is_waiter_alive)
176 			crsqe->resp = NULL;
177 
178 		crsqe->is_in_used = false;
179 
180 		hwq->cons += req_size;
181 
182 		spin_unlock(&hwq->lock);
183 	}
184 	*num_wait += wait_cmds;
185 	return rc;
186 }
187 
188 /* function events */
bng_re_process_func_event(struct bng_re_rcfw * rcfw,struct creq_func_event * func_event)189 static int bng_re_process_func_event(struct bng_re_rcfw *rcfw,
190 				     struct creq_func_event *func_event)
191 {
192 	switch (func_event->event) {
193 	case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
194 	case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
195 	case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
196 	case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
197 	case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
198 	case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
199 	case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
200 	case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
201 	case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
202 	case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
203 	case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
204 	case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST:
205 	case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED:
206 		break;
207 	default:
208 		return -EINVAL;
209 	}
210 
211 	return 0;
212 }
213 
214 /* CREQ Completion handlers */
bng_re_service_creq(struct tasklet_struct * t)215 static void bng_re_service_creq(struct tasklet_struct *t)
216 {
217 	struct bng_re_rcfw *rcfw = from_tasklet(rcfw, t, creq.creq_tasklet);
218 	struct bng_re_creq_ctx *creq = &rcfw->creq;
219 	u32 type, budget = BNG_FW_CREQ_ENTRY_POLL_BUDGET;
220 	struct bng_re_hwq *hwq = &creq->hwq;
221 	struct creq_base *creqe;
222 	u32 num_wakeup = 0;
223 	u32 hw_polled = 0;
224 
225 	/* Service the CREQ until budget is over */
226 	spin_lock_bh(&hwq->lock);
227 	while (budget > 0) {
228 		creqe = bng_re_get_qe(hwq, hwq->cons, NULL);
229 		if (!BNG_FW_CREQ_CMP_VALID(creqe, creq->creq_db.dbinfo.flags))
230 			break;
231 		/* The valid test of the entry must be done first before
232 		 * reading any further.
233 		 */
234 		dma_rmb();
235 
236 		type = creqe->type & CREQ_BASE_TYPE_MASK;
237 		switch (type) {
238 		case CREQ_BASE_TYPE_QP_EVENT:
239 			bng_re_process_qp_event
240 				(rcfw, (struct creq_qp_event *)creqe,
241 				 &num_wakeup);
242 			creq->stats.creq_qp_event_processed++;
243 			break;
244 		case CREQ_BASE_TYPE_FUNC_EVENT:
245 			if (!bng_re_process_func_event
246 			    (rcfw, (struct creq_func_event *)creqe))
247 				creq->stats.creq_func_event_processed++;
248 			else
249 				dev_warn(&rcfw->pdev->dev,
250 					 "aeqe:%#x Not handled\n", type);
251 			break;
252 		default:
253 			if (type != ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT)
254 				dev_warn(&rcfw->pdev->dev,
255 					 "creqe with event 0x%x not handled\n",
256 					 type);
257 			break;
258 		}
259 		budget--;
260 		hw_polled++;
261 		bng_re_hwq_incr_cons(hwq->max_elements, &hwq->cons,
262 				     1, &creq->creq_db.dbinfo.flags);
263 	}
264 
265 	if (hw_polled)
266 		bng_re_ring_nq_db(&creq->creq_db.dbinfo,
267 				  rcfw->res->cctx, true);
268 	spin_unlock_bh(&hwq->lock);
269 	if (num_wakeup)
270 		wake_up_nr(&rcfw->cmdq.waitq, num_wakeup);
271 }
272 
__send_message_basic_sanity(struct bng_re_rcfw * rcfw,struct bng_re_cmdqmsg * msg,u8 opcode)273 static int __send_message_basic_sanity(struct bng_re_rcfw *rcfw,
274 				       struct bng_re_cmdqmsg *msg,
275 				       u8 opcode)
276 {
277 	struct bng_re_cmdq_ctx *cmdq;
278 
279 	cmdq = &rcfw->cmdq;
280 
281 	if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
282 		return -ETIMEDOUT;
283 
284 	if (test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
285 	    opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
286 		dev_err(&rcfw->pdev->dev, "RCFW already initialized!");
287 		return -EINVAL;
288 	}
289 
290 	if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
291 	    (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
292 	     opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
293 	     opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
294 		dev_err(&rcfw->pdev->dev,
295 			"RCFW not initialized, reject opcode 0x%x",
296 			opcode);
297 		return -EOPNOTSUPP;
298 	}
299 
300 	return 0;
301 }
302 
__send_message(struct bng_re_rcfw * rcfw,struct bng_re_cmdqmsg * msg,u8 opcode)303 static int __send_message(struct bng_re_rcfw *rcfw,
304 			  struct bng_re_cmdqmsg *msg, u8 opcode)
305 {
306 	u32 bsize, free_slots, required_slots;
307 	struct bng_re_cmdq_ctx *cmdq;
308 	struct bng_re_crsqe *crsqe;
309 	struct bng_fw_cmdqe *cmdqe;
310 	struct bng_re_hwq *hwq;
311 	u32 sw_prod, cmdq_prod;
312 	struct pci_dev *pdev;
313 	u16 cookie;
314 	u8 *preq;
315 
316 	cmdq = &rcfw->cmdq;
317 	hwq = &cmdq->hwq;
318 	pdev = rcfw->pdev;
319 
320 	/* Cmdq are in 16-byte units, each request can consume 1 or more
321 	 * cmdqe
322 	 */
323 	spin_lock_bh(&hwq->lock);
324 	required_slots = bng_re_get_cmd_slots(msg->req);
325 	free_slots = HWQ_FREE_SLOTS(hwq);
326 	cookie = cmdq->seq_num & BNG_FW_MAX_COOKIE_VALUE;
327 	crsqe = &rcfw->crsqe_tbl[cookie];
328 
329 	if (required_slots >= free_slots) {
330 		dev_info_ratelimited(&pdev->dev,
331 				     "CMDQ is full req/free %d/%d!",
332 				     required_slots, free_slots);
333 		spin_unlock_bh(&hwq->lock);
334 		return -EAGAIN;
335 	}
336 	__set_cmdq_base_cookie(msg->req, msg->req_sz, cpu_to_le16(cookie));
337 
338 	bsize = bng_re_set_cmd_slots(msg->req);
339 	crsqe->free_slots = free_slots;
340 	crsqe->resp = (struct creq_qp_event *)msg->resp;
341 	crsqe->is_waiter_alive = true;
342 	crsqe->is_in_used = true;
343 	crsqe->opcode = opcode;
344 
345 	crsqe->req_size = __get_cmdq_base_cmd_size(msg->req, msg->req_sz);
346 	if (__get_cmdq_base_resp_size(msg->req, msg->req_sz) && msg->sb) {
347 		struct bng_re_rcfw_sbuf *sbuf = msg->sb;
348 
349 		__set_cmdq_base_resp_addr(msg->req, msg->req_sz,
350 					  cpu_to_le64(sbuf->dma_addr));
351 		__set_cmdq_base_resp_size(msg->req, msg->req_sz,
352 					  ALIGN(sbuf->size,
353 						BNG_FW_CMDQE_UNITS) /
354 						BNG_FW_CMDQE_UNITS);
355 	}
356 
357 	preq = (u8 *)msg->req;
358 	do {
359 		/* Locate the next cmdq slot */
360 		sw_prod = HWQ_CMP(hwq->prod, hwq);
361 		cmdqe = bng_re_get_qe(hwq, sw_prod, NULL);
362 		/* Copy a segment of the req cmd to the cmdq */
363 		memset(cmdqe, 0, sizeof(*cmdqe));
364 		memcpy(cmdqe, preq, min_t(u32, bsize, sizeof(*cmdqe)));
365 		preq += min_t(u32, bsize, sizeof(*cmdqe));
366 		bsize -= min_t(u32, bsize, sizeof(*cmdqe));
367 		hwq->prod++;
368 	} while (bsize > 0);
369 	cmdq->seq_num++;
370 
371 	cmdq_prod = hwq->prod & 0xFFFF;
372 	if (test_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags)) {
373 		/* The very first doorbell write
374 		 * is required to set this flag
375 		 * which prompts the FW to reset
376 		 * its internal pointers
377 		 */
378 		cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);
379 		clear_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
380 	}
381 	/* ring CMDQ DB */
382 	wmb();
383 	writel(cmdq_prod, cmdq->cmdq_mbox.prod);
384 	writel(BNG_FW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
385 	spin_unlock_bh(&hwq->lock);
386 	/* Return the CREQ response pointer */
387 	return 0;
388 }
389 
390 /**
391  * __wait_for_resp   -	Don't hold the cpu context and wait for response
392  * @rcfw:    rcfw channel instance of rdev
393  * @cookie:  cookie to track the command
394  *
395  * Wait for command completion in sleepable context.
396  *
397  * Returns:
398  * 0 if command is completed by firmware.
399  * Non zero error code for rest of the case.
400  */
__wait_for_resp(struct bng_re_rcfw * rcfw,u16 cookie)401 static int __wait_for_resp(struct bng_re_rcfw *rcfw, u16 cookie)
402 {
403 	struct bng_re_cmdq_ctx *cmdq;
404 	struct bng_re_crsqe *crsqe;
405 
406 	cmdq = &rcfw->cmdq;
407 	crsqe = &rcfw->crsqe_tbl[cookie];
408 
409 	do {
410 		wait_event_timeout(cmdq->waitq,
411 				   !crsqe->is_in_used,
412 				   secs_to_jiffies(rcfw->max_timeout));
413 
414 		if (!crsqe->is_in_used)
415 			return 0;
416 
417 		bng_re_service_creq(&rcfw->creq.creq_tasklet);
418 
419 		if (!crsqe->is_in_used)
420 			return 0;
421 	} while (true);
422 };
423 
424 /**
425  * bng_re_rcfw_send_message   -	interface to send
426  * and complete rcfw command.
427  * @rcfw:   rcfw channel instance of rdev
428  * @msg:    message to send
429  *
430  * This function does not account shadow queue depth. It will send
431  * all the command unconditionally as long as send queue is not full.
432  *
433  * Returns:
434  * 0 if command completed by firmware.
435  * Non zero if the command is not completed by firmware.
436  */
bng_re_rcfw_send_message(struct bng_re_rcfw * rcfw,struct bng_re_cmdqmsg * msg)437 int bng_re_rcfw_send_message(struct bng_re_rcfw *rcfw,
438 			     struct bng_re_cmdqmsg *msg)
439 {
440 	struct creq_qp_event *evnt = (struct creq_qp_event *)msg->resp;
441 	struct bng_re_crsqe *crsqe;
442 	u16 cookie;
443 	int rc;
444 	u8 opcode;
445 
446 	opcode = __get_cmdq_base_opcode(msg->req, msg->req_sz);
447 
448 	rc = __send_message_basic_sanity(rcfw, msg, opcode);
449 	if (rc)
450 		return rc == -ENXIO ? bng_re_map_rc(opcode) : rc;
451 
452 	rc = __send_message(rcfw, msg, opcode);
453 	if (rc)
454 		return rc;
455 
456 	cookie = le16_to_cpu(__get_cmdq_base_cookie(msg->req, msg->req_sz))
457 				& BNG_FW_MAX_COOKIE_VALUE;
458 
459 	rc = __wait_for_resp(rcfw, cookie);
460 
461 	if (rc) {
462 		spin_lock_bh(&rcfw->cmdq.hwq.lock);
463 		crsqe = &rcfw->crsqe_tbl[cookie];
464 		crsqe->is_waiter_alive = false;
465 		if (rc == -ENODEV)
466 			set_bit(FIRMWARE_STALL_DETECTED, &rcfw->cmdq.flags);
467 		spin_unlock_bh(&rcfw->cmdq.hwq.lock);
468 		return -ETIMEDOUT;
469 	}
470 
471 	if (evnt->status) {
472 		/* failed with status */
473 		dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
474 			cookie, opcode, evnt->status);
475 		rc = -EIO;
476 	}
477 
478 	return rc;
479 }
480 
bng_re_map_cmdq_mbox(struct bng_re_rcfw * rcfw)481 static int bng_re_map_cmdq_mbox(struct bng_re_rcfw *rcfw)
482 {
483 	struct bng_re_cmdq_mbox *mbox;
484 	resource_size_t bar_reg;
485 	struct pci_dev *pdev;
486 
487 	pdev = rcfw->pdev;
488 	mbox = &rcfw->cmdq.cmdq_mbox;
489 
490 	mbox->reg.bar_id = BNG_FW_COMM_PCI_BAR_REGION;
491 	mbox->reg.len = BNG_FW_COMM_SIZE;
492 	mbox->reg.bar_base = pci_resource_start(pdev, mbox->reg.bar_id);
493 	if (!mbox->reg.bar_base) {
494 		dev_err(&pdev->dev,
495 			"CMDQ BAR region %d resc start is 0!\n",
496 			mbox->reg.bar_id);
497 		return -ENOMEM;
498 	}
499 
500 	bar_reg = mbox->reg.bar_base + BNG_FW_COMM_BASE_OFFSET;
501 	mbox->reg.len = BNG_FW_COMM_SIZE;
502 	mbox->reg.bar_reg = ioremap(bar_reg, mbox->reg.len);
503 	if (!mbox->reg.bar_reg) {
504 		dev_err(&pdev->dev,
505 			"CMDQ BAR region %d mapping failed\n",
506 			mbox->reg.bar_id);
507 		return -ENOMEM;
508 	}
509 
510 	mbox->prod = (void  __iomem *)(mbox->reg.bar_reg +
511 			BNG_FW_PF_VF_COMM_PROD_OFFSET);
512 	mbox->db = (void __iomem *)(mbox->reg.bar_reg + BNG_FW_COMM_TRIG_OFFSET);
513 	return 0;
514 }
515 
bng_re_creq_irq(int irq,void * dev_instance)516 static irqreturn_t bng_re_creq_irq(int irq, void *dev_instance)
517 {
518 	struct bng_re_rcfw *rcfw = dev_instance;
519 	struct bng_re_creq_ctx *creq;
520 	struct bng_re_hwq *hwq;
521 	u32 sw_cons;
522 
523 	creq = &rcfw->creq;
524 	hwq = &creq->hwq;
525 	/* Prefetch the CREQ element */
526 	sw_cons = HWQ_CMP(hwq->cons, hwq);
527 	bng_re_get_qe(hwq, sw_cons, NULL);
528 
529 	tasklet_schedule(&creq->creq_tasklet);
530 	return IRQ_HANDLED;
531 }
532 
bng_re_rcfw_start_irq(struct bng_re_rcfw * rcfw,int msix_vector,bool need_init)533 int bng_re_rcfw_start_irq(struct bng_re_rcfw *rcfw, int msix_vector,
534 			  bool need_init)
535 {
536 	struct bng_re_creq_ctx *creq;
537 	struct bng_re_res *res;
538 	int rc;
539 
540 	creq = &rcfw->creq;
541 	res = rcfw->res;
542 
543 	if (creq->irq_handler_avail)
544 		return -EFAULT;
545 
546 	creq->msix_vec = msix_vector;
547 	if (need_init)
548 		tasklet_setup(&creq->creq_tasklet, bng_re_service_creq);
549 	else
550 		tasklet_enable(&creq->creq_tasklet);
551 
552 	creq->irq_name = kasprintf(GFP_KERNEL, "bng_re-creq@pci:%s",
553 				   pci_name(res->pdev));
554 	if (!creq->irq_name)
555 		return -ENOMEM;
556 	rc = request_irq(creq->msix_vec, bng_re_creq_irq, 0,
557 			 creq->irq_name, rcfw);
558 	if (rc) {
559 		kfree(creq->irq_name);
560 		creq->irq_name = NULL;
561 		tasklet_disable(&creq->creq_tasklet);
562 		return rc;
563 	}
564 	creq->irq_handler_avail = true;
565 
566 	bng_re_ring_nq_db(&creq->creq_db.dbinfo, res->cctx, true);
567 	atomic_inc(&rcfw->rcfw_intr_enabled);
568 
569 	return 0;
570 }
571 
bng_re_map_creq_db(struct bng_re_rcfw * rcfw,u32 reg_offt)572 static int bng_re_map_creq_db(struct bng_re_rcfw *rcfw, u32 reg_offt)
573 {
574 	struct bng_re_creq_db *creq_db;
575 	resource_size_t bar_reg;
576 	struct pci_dev *pdev;
577 
578 	pdev = rcfw->pdev;
579 	creq_db = &rcfw->creq.creq_db;
580 
581 	creq_db->dbinfo.flags = 0;
582 	creq_db->reg.bar_id = BNG_FW_COMM_CONS_PCI_BAR_REGION;
583 	creq_db->reg.bar_base = pci_resource_start(pdev, creq_db->reg.bar_id);
584 	if (!creq_db->reg.bar_id)
585 		dev_err(&pdev->dev,
586 			"CREQ BAR region %d resc start is 0!",
587 			creq_db->reg.bar_id);
588 
589 	bar_reg = creq_db->reg.bar_base + reg_offt;
590 
591 	creq_db->reg.len = BNG_FW_CREQ_DB_LEN;
592 	creq_db->reg.bar_reg = ioremap(bar_reg, creq_db->reg.len);
593 	if (!creq_db->reg.bar_reg) {
594 		dev_err(&pdev->dev,
595 			"CREQ BAR region %d mapping failed",
596 			creq_db->reg.bar_id);
597 		return -ENOMEM;
598 	}
599 	creq_db->dbinfo.db = creq_db->reg.bar_reg;
600 	creq_db->dbinfo.hwq = &rcfw->creq.hwq;
601 	creq_db->dbinfo.xid = rcfw->creq.ring_id;
602 	return 0;
603 }
604 
bng_re_rcfw_stop_irq(struct bng_re_rcfw * rcfw,bool kill)605 void bng_re_rcfw_stop_irq(struct bng_re_rcfw *rcfw, bool kill)
606 {
607 	struct bng_re_creq_ctx *creq;
608 
609 	creq = &rcfw->creq;
610 
611 	if (!creq->irq_handler_avail)
612 		return;
613 
614 	creq->irq_handler_avail = false;
615 	/* Mask h/w interrupts */
616 	bng_re_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, false);
617 	/* Sync with last running IRQ-handler */
618 	synchronize_irq(creq->msix_vec);
619 	free_irq(creq->msix_vec, rcfw);
620 	kfree(creq->irq_name);
621 	creq->irq_name = NULL;
622 	atomic_set(&rcfw->rcfw_intr_enabled, 0);
623 	if (kill)
624 		tasklet_kill(&creq->creq_tasklet);
625 	tasklet_disable(&creq->creq_tasklet);
626 }
627 
bng_re_disable_rcfw_channel(struct bng_re_rcfw * rcfw)628 void bng_re_disable_rcfw_channel(struct bng_re_rcfw *rcfw)
629 {
630 	struct bng_re_creq_ctx *creq;
631 	struct bng_re_cmdq_ctx *cmdq;
632 
633 	creq = &rcfw->creq;
634 	cmdq = &rcfw->cmdq;
635 	/* Make sure the HW channel is stopped! */
636 	bng_re_rcfw_stop_irq(rcfw, true);
637 
638 	iounmap(cmdq->cmdq_mbox.reg.bar_reg);
639 	iounmap(creq->creq_db.reg.bar_reg);
640 
641 	cmdq->cmdq_mbox.reg.bar_reg = NULL;
642 	creq->creq_db.reg.bar_reg = NULL;
643 	creq->msix_vec = 0;
644 }
645 
bng_re_start_rcfw(struct bng_re_rcfw * rcfw)646 static void bng_re_start_rcfw(struct bng_re_rcfw *rcfw)
647 {
648 	struct bng_re_cmdq_ctx *cmdq;
649 	struct bng_re_creq_ctx *creq;
650 	struct bng_re_cmdq_mbox *mbox;
651 	struct cmdq_init init = {0};
652 
653 	cmdq = &rcfw->cmdq;
654 	creq = &rcfw->creq;
655 	mbox = &cmdq->cmdq_mbox;
656 
657 	init.cmdq_pbl = cpu_to_le64(cmdq->hwq.pbl[BNG_PBL_LVL_0].pg_map_arr[0]);
658 	init.cmdq_size_cmdq_lvl =
659 			cpu_to_le16(((rcfw->cmdq_depth <<
660 				      CMDQ_INIT_CMDQ_SIZE_SFT) &
661 				    CMDQ_INIT_CMDQ_SIZE_MASK) |
662 				    ((cmdq->hwq.level <<
663 				      CMDQ_INIT_CMDQ_LVL_SFT) &
664 				    CMDQ_INIT_CMDQ_LVL_MASK));
665 	init.creq_ring_id = cpu_to_le16(creq->ring_id);
666 	/* Write to the mailbox register */
667 	__iowrite32_copy(mbox->reg.bar_reg, &init, sizeof(init) / 4);
668 }
669 
bng_re_enable_fw_channel(struct bng_re_rcfw * rcfw,int msix_vector,int cp_bar_reg_off)670 int bng_re_enable_fw_channel(struct bng_re_rcfw *rcfw,
671 			     int msix_vector,
672 			     int cp_bar_reg_off)
673 {
674 	struct bng_re_cmdq_ctx *cmdq;
675 	int rc;
676 
677 	cmdq = &rcfw->cmdq;
678 
679 	/* Assign defaults */
680 	cmdq->seq_num = 0;
681 	set_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
682 	init_waitqueue_head(&cmdq->waitq);
683 
684 	rc = bng_re_map_cmdq_mbox(rcfw);
685 	if (rc)
686 		return rc;
687 
688 	rc = bng_re_map_creq_db(rcfw, cp_bar_reg_off);
689 	if (rc)
690 		return rc;
691 
692 	rc = bng_re_rcfw_start_irq(rcfw, msix_vector, true);
693 	if (rc) {
694 		dev_err(&rcfw->pdev->dev,
695 			"Failed to request IRQ for CREQ rc = 0x%x\n", rc);
696 		bng_re_disable_rcfw_channel(rcfw);
697 		return rc;
698 	}
699 
700 	bng_re_start_rcfw(rcfw);
701 	return 0;
702 }
703 
bng_re_deinit_rcfw(struct bng_re_rcfw * rcfw)704 int bng_re_deinit_rcfw(struct bng_re_rcfw *rcfw)
705 {
706 	struct creq_deinitialize_fw_resp resp = {};
707 	struct cmdq_deinitialize_fw req = {};
708 	struct bng_re_cmdqmsg msg = {};
709 	int rc;
710 
711 	bng_re_rcfw_cmd_prep((struct cmdq_base *)&req,
712 			     CMDQ_BASE_OPCODE_DEINITIALIZE_FW,
713 			     sizeof(req));
714 	bng_re_fill_cmdqmsg(&msg, &req, &resp, NULL,
715 			    sizeof(req), sizeof(resp), 0);
716 	rc = bng_re_rcfw_send_message(rcfw, &msg);
717 	if (rc)
718 		return rc;
719 
720 	clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
721 	return 0;
722 }
_is_hw_retx_supported(u16 dev_cap_flags)723 static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
724 {
725 	return dev_cap_flags &
726 		(CREQ_QUERY_FUNC_RESP_SB_HW_REQUESTER_RETX_ENABLED |
727 		 CREQ_QUERY_FUNC_RESP_SB_HW_RESPONDER_RETX_ENABLED);
728 }
729 
730 #define BNG_RE_HW_RETX(a) _is_hw_retx_supported((a))
_is_optimize_modify_qp_supported(u16 dev_cap_ext_flags2)731 static inline bool _is_optimize_modify_qp_supported(u16 dev_cap_ext_flags2)
732 {
733 	return dev_cap_ext_flags2 &
734 	       CREQ_QUERY_FUNC_RESP_SB_OPTIMIZE_MODIFY_QP_SUPPORTED;
735 }
736 
bng_re_init_rcfw(struct bng_re_rcfw * rcfw,struct bng_re_stats * stats_ctx)737 int bng_re_init_rcfw(struct bng_re_rcfw *rcfw,
738 		     struct bng_re_stats *stats_ctx)
739 {
740 	struct creq_initialize_fw_resp resp = {};
741 	struct cmdq_initialize_fw req = {};
742 	struct bng_re_cmdqmsg msg = {};
743 	int rc;
744 	u16 flags = 0;
745 
746 	bng_re_rcfw_cmd_prep((struct cmdq_base *)&req,
747 			     CMDQ_BASE_OPCODE_INITIALIZE_FW,
748 			     sizeof(req));
749 	/* Supply (log-base-2-of-host-page-size - base-page-shift)
750 	 * to bono to adjust the doorbell page sizes.
751 	 */
752 	req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
753 					   BNG_FW_DBR_BASE_PAGE_SHIFT);
754 	if (BNG_RE_HW_RETX(rcfw->res->dattr->dev_cap_flags))
755 		flags |= CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED;
756 	if (_is_optimize_modify_qp_supported(rcfw->res->dattr->dev_cap_flags2))
757 		flags |= CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED;
758 	req.flags |= cpu_to_le16(flags);
759 	req.stat_ctx_id = cpu_to_le32(stats_ctx->fw_id);
760 	bng_re_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
761 	rc = bng_re_rcfw_send_message(rcfw, &msg);
762 	if (rc)
763 		return rc;
764 	set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
765 	return 0;
766 }
767