xref: /linux/drivers/infiniband/hw/bng_re/bng_fw.c (revision 55aa394a5ed871208eac11c5f4677cafd258c4dd)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2025 Broadcom.
3 #include <linux/pci.h>
4 
5 #include "roce_hsi.h"
6 #include "bng_res.h"
7 #include "bng_fw.h"
8 #include "bng_sp.h"
9 
10 /**
11  * bng_re_map_rc  -  map return type based on opcode
12  * @opcode:  roce slow path opcode
13  *
14  * case #1
15  * Firmware initiated error recovery is a safe state machine and
16  * driver can consider all the underlying rdma resources are free.
17  * In this state, it is safe to return success for opcodes related to
18  * destroying rdma resources (like destroy qp, destroy cq etc.).
19  *
20  * case #2
21  * If driver detect potential firmware stall, it is not safe state machine
22  * and the driver can not consider all the underlying rdma resources are
23  * freed.
24  * In this state, it is not safe to return success for opcodes related to
25  * destroying rdma resources (like destroy qp, destroy cq etc.).
26  *
27  * Scope of this helper function is only for case #1.
28  *
29  * Returns:
30  * 0 to communicate success to caller.
31  * Non zero error code to communicate failure to caller.
32  */
bng_re_map_rc(u8 opcode)33 static int bng_re_map_rc(u8 opcode)
34 {
35 	switch (opcode) {
36 	case CMDQ_BASE_OPCODE_DESTROY_QP:
37 	case CMDQ_BASE_OPCODE_DESTROY_SRQ:
38 	case CMDQ_BASE_OPCODE_DESTROY_CQ:
39 	case CMDQ_BASE_OPCODE_DEALLOCATE_KEY:
40 	case CMDQ_BASE_OPCODE_DEREGISTER_MR:
41 	case CMDQ_BASE_OPCODE_DELETE_GID:
42 	case CMDQ_BASE_OPCODE_DESTROY_QP1:
43 	case CMDQ_BASE_OPCODE_DESTROY_AH:
44 	case CMDQ_BASE_OPCODE_DEINITIALIZE_FW:
45 	case CMDQ_BASE_OPCODE_MODIFY_ROCE_CC:
46 	case CMDQ_BASE_OPCODE_SET_LINK_AGGR_MODE:
47 		return 0;
48 	default:
49 		return -ETIMEDOUT;
50 	}
51 }
52 
bng_re_free_rcfw_channel(struct bng_re_rcfw * rcfw)53 void bng_re_free_rcfw_channel(struct bng_re_rcfw *rcfw)
54 {
55 	kfree(rcfw->crsqe_tbl);
56 	bng_re_free_hwq(rcfw->res, &rcfw->cmdq.hwq);
57 	bng_re_free_hwq(rcfw->res, &rcfw->creq.hwq);
58 	rcfw->pdev = NULL;
59 }
60 
bng_re_alloc_fw_channel(struct bng_re_res * res,struct bng_re_rcfw * rcfw)61 int bng_re_alloc_fw_channel(struct bng_re_res *res,
62 			    struct bng_re_rcfw *rcfw)
63 {
64 	struct bng_re_hwq_attr hwq_attr = {};
65 	struct bng_re_sg_info sginfo = {};
66 	struct bng_re_cmdq_ctx *cmdq;
67 	struct bng_re_creq_ctx *creq;
68 
69 	rcfw->pdev = res->pdev;
70 	cmdq = &rcfw->cmdq;
71 	creq = &rcfw->creq;
72 	rcfw->res = res;
73 
74 	sginfo.pgsize = PAGE_SIZE;
75 	sginfo.pgshft = PAGE_SHIFT;
76 
77 	hwq_attr.sginfo = &sginfo;
78 	hwq_attr.res = rcfw->res;
79 	hwq_attr.depth = BNG_FW_CREQE_MAX_CNT;
80 	hwq_attr.stride = BNG_FW_CREQE_UNITS;
81 	hwq_attr.type = BNG_HWQ_TYPE_QUEUE;
82 
83 	if (bng_re_alloc_init_hwq(&creq->hwq, &hwq_attr)) {
84 		dev_err(&rcfw->pdev->dev,
85 			"HW channel CREQ allocation failed\n");
86 		goto fail;
87 	}
88 
89 	rcfw->cmdq_depth = BNG_FW_CMDQE_MAX_CNT;
90 
91 	sginfo.pgsize = bng_fw_cmdqe_page_size(rcfw->cmdq_depth);
92 	hwq_attr.depth = rcfw->cmdq_depth & 0x7FFFFFFF;
93 	hwq_attr.stride = BNG_FW_CMDQE_UNITS;
94 	hwq_attr.type = BNG_HWQ_TYPE_CTX;
95 	if (bng_re_alloc_init_hwq(&cmdq->hwq, &hwq_attr)) {
96 		dev_err(&rcfw->pdev->dev,
97 			"HW channel CMDQ allocation failed\n");
98 		goto fail;
99 	}
100 
101 	rcfw->crsqe_tbl = kcalloc(cmdq->hwq.max_elements,
102 				  sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
103 	if (!rcfw->crsqe_tbl)
104 		goto fail;
105 
106 	spin_lock_init(&rcfw->tbl_lock);
107 
108 	rcfw->max_timeout = res->cctx->hwrm_cmd_max_timeout;
109 	return 0;
110 
111 fail:
112 	bng_re_free_rcfw_channel(rcfw);
113 	return -ENOMEM;
114 }
115 
bng_re_process_qp_event(struct bng_re_rcfw * rcfw,struct creq_qp_event * qp_event,u32 * num_wait)116 static int bng_re_process_qp_event(struct bng_re_rcfw *rcfw,
117 				   struct creq_qp_event *qp_event,
118 				   u32 *num_wait)
119 {
120 	struct bng_re_hwq *hwq = &rcfw->cmdq.hwq;
121 	struct bng_re_crsqe *crsqe;
122 	u32 req_size;
123 	u16 cookie;
124 	bool is_waiter_alive;
125 	struct pci_dev *pdev;
126 	u32 wait_cmds = 0;
127 	int rc = 0;
128 
129 	pdev = rcfw->pdev;
130 	switch (qp_event->event) {
131 	case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
132 		dev_err(&pdev->dev, "Received QP error notification\n");
133 		break;
134 	default:
135 		/*
136 		 * Command Response
137 		 * cmdq->lock needs to be acquired to synchronie
138 		 * the command send and completion reaping. This function
139 		 * is always called with creq->lock held. Using
140 		 * the nested variant of spin_lock.
141 		 *
142 		 */
143 
144 		spin_lock_nested(&hwq->lock, SINGLE_DEPTH_NESTING);
145 		cookie = le16_to_cpu(qp_event->cookie);
146 		cookie &= BNG_FW_MAX_COOKIE_VALUE;
147 		crsqe = &rcfw->crsqe_tbl[cookie];
148 
149 		if (WARN_ONCE(test_bit(FIRMWARE_STALL_DETECTED,
150 				       &rcfw->cmdq.flags),
151 		    "Unreponsive rcfw channel detected.!!")) {
152 			dev_info(&pdev->dev,
153 				 "rcfw timedout: cookie = %#x, free_slots = %d",
154 				 cookie, crsqe->free_slots);
155 			spin_unlock(&hwq->lock);
156 			return rc;
157 		}
158 
159 		if (crsqe->is_waiter_alive) {
160 			if (crsqe->resp) {
161 				memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
162 				/* Insert write memory barrier to ensure that
163 				 * response data is copied before clearing the
164 				 * flags
165 				 */
166 				smp_wmb();
167 			}
168 		}
169 
170 		wait_cmds++;
171 
172 		req_size = crsqe->req_size;
173 		is_waiter_alive = crsqe->is_waiter_alive;
174 
175 		crsqe->req_size = 0;
176 		if (!is_waiter_alive)
177 			crsqe->resp = NULL;
178 
179 		crsqe->is_in_used = false;
180 
181 		hwq->cons += req_size;
182 
183 		spin_unlock(&hwq->lock);
184 	}
185 	*num_wait += wait_cmds;
186 	return rc;
187 }
188 
189 /* function events */
bng_re_process_func_event(struct bng_re_rcfw * rcfw,struct creq_func_event * func_event)190 static int bng_re_process_func_event(struct bng_re_rcfw *rcfw,
191 				     struct creq_func_event *func_event)
192 {
193 	switch (func_event->event) {
194 	case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
195 	case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
196 	case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
197 	case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
198 	case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
199 	case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
200 	case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
201 	case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
202 	case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
203 	case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
204 	case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
205 	case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST:
206 	case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED:
207 		break;
208 	default:
209 		return -EINVAL;
210 	}
211 
212 	return 0;
213 }
214 
215 /* CREQ Completion handlers */
bng_re_service_creq(struct tasklet_struct * t)216 static void bng_re_service_creq(struct tasklet_struct *t)
217 {
218 	struct bng_re_rcfw *rcfw = from_tasklet(rcfw, t, creq.creq_tasklet);
219 	struct bng_re_creq_ctx *creq = &rcfw->creq;
220 	u32 type, budget = BNG_FW_CREQ_ENTRY_POLL_BUDGET;
221 	struct bng_re_hwq *hwq = &creq->hwq;
222 	struct creq_base *creqe;
223 	u32 num_wakeup = 0;
224 	u32 hw_polled = 0;
225 
226 	/* Service the CREQ until budget is over */
227 	spin_lock_bh(&hwq->lock);
228 	while (budget > 0) {
229 		creqe = bng_re_get_qe(hwq, hwq->cons, NULL);
230 		if (!BNG_FW_CREQ_CMP_VALID(creqe, creq->creq_db.dbinfo.flags))
231 			break;
232 		/* The valid test of the entry must be done first before
233 		 * reading any further.
234 		 */
235 		dma_rmb();
236 
237 		type = creqe->type & CREQ_BASE_TYPE_MASK;
238 		switch (type) {
239 		case CREQ_BASE_TYPE_QP_EVENT:
240 			bng_re_process_qp_event
241 				(rcfw, (struct creq_qp_event *)creqe,
242 				 &num_wakeup);
243 			creq->stats.creq_qp_event_processed++;
244 			break;
245 		case CREQ_BASE_TYPE_FUNC_EVENT:
246 			if (!bng_re_process_func_event
247 			    (rcfw, (struct creq_func_event *)creqe))
248 				creq->stats.creq_func_event_processed++;
249 			else
250 				dev_warn(&rcfw->pdev->dev,
251 					 "aeqe:%#x Not handled\n", type);
252 			break;
253 		default:
254 			if (type != ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT)
255 				dev_warn(&rcfw->pdev->dev,
256 					 "creqe with event 0x%x not handled\n",
257 					 type);
258 			break;
259 		}
260 		budget--;
261 		hw_polled++;
262 		bng_re_hwq_incr_cons(hwq->max_elements, &hwq->cons,
263 				     1, &creq->creq_db.dbinfo.flags);
264 	}
265 
266 	if (hw_polled)
267 		bng_re_ring_nq_db(&creq->creq_db.dbinfo,
268 				  rcfw->res->cctx, true);
269 	spin_unlock_bh(&hwq->lock);
270 	if (num_wakeup)
271 		wake_up_nr(&rcfw->cmdq.waitq, num_wakeup);
272 }
273 
__send_message_basic_sanity(struct bng_re_rcfw * rcfw,struct bng_re_cmdqmsg * msg,u8 opcode)274 static int __send_message_basic_sanity(struct bng_re_rcfw *rcfw,
275 				       struct bng_re_cmdqmsg *msg,
276 				       u8 opcode)
277 {
278 	struct bng_re_cmdq_ctx *cmdq;
279 
280 	cmdq = &rcfw->cmdq;
281 
282 	if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
283 		return -ETIMEDOUT;
284 
285 	if (test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
286 	    opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
287 		dev_err(&rcfw->pdev->dev, "RCFW already initialized!");
288 		return -EINVAL;
289 	}
290 
291 	if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
292 	    (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
293 	     opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
294 	     opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
295 		dev_err(&rcfw->pdev->dev,
296 			"RCFW not initialized, reject opcode 0x%x",
297 			opcode);
298 		return -EOPNOTSUPP;
299 	}
300 
301 	return 0;
302 }
303 
__send_message(struct bng_re_rcfw * rcfw,struct bng_re_cmdqmsg * msg,u8 opcode)304 static int __send_message(struct bng_re_rcfw *rcfw,
305 			  struct bng_re_cmdqmsg *msg, u8 opcode)
306 {
307 	u32 bsize, free_slots, required_slots;
308 	struct bng_re_cmdq_ctx *cmdq;
309 	struct bng_re_crsqe *crsqe;
310 	struct bng_fw_cmdqe *cmdqe;
311 	struct bng_re_hwq *hwq;
312 	u32 sw_prod, cmdq_prod;
313 	struct pci_dev *pdev;
314 	u16 cookie;
315 	u8 *preq;
316 
317 	cmdq = &rcfw->cmdq;
318 	hwq = &cmdq->hwq;
319 	pdev = rcfw->pdev;
320 
321 	/* Cmdq are in 16-byte units, each request can consume 1 or more
322 	 * cmdqe
323 	 */
324 	spin_lock_bh(&hwq->lock);
325 	required_slots = bng_re_get_cmd_slots(msg->req);
326 	free_slots = HWQ_FREE_SLOTS(hwq);
327 	cookie = cmdq->seq_num & BNG_FW_MAX_COOKIE_VALUE;
328 	crsqe = &rcfw->crsqe_tbl[cookie];
329 
330 	if (required_slots >= free_slots) {
331 		dev_info_ratelimited(&pdev->dev,
332 				     "CMDQ is full req/free %d/%d!",
333 				     required_slots, free_slots);
334 		spin_unlock_bh(&hwq->lock);
335 		return -EAGAIN;
336 	}
337 	__set_cmdq_base_cookie(msg->req, msg->req_sz, cpu_to_le16(cookie));
338 
339 	bsize = bng_re_set_cmd_slots(msg->req);
340 	crsqe->free_slots = free_slots;
341 	crsqe->resp = (struct creq_qp_event *)msg->resp;
342 	crsqe->is_waiter_alive = true;
343 	crsqe->is_in_used = true;
344 	crsqe->opcode = opcode;
345 
346 	crsqe->req_size = __get_cmdq_base_cmd_size(msg->req, msg->req_sz);
347 	if (__get_cmdq_base_resp_size(msg->req, msg->req_sz) && msg->sb) {
348 		struct bng_re_rcfw_sbuf *sbuf = msg->sb;
349 
350 		__set_cmdq_base_resp_addr(msg->req, msg->req_sz,
351 					  cpu_to_le64(sbuf->dma_addr));
352 		__set_cmdq_base_resp_size(msg->req, msg->req_sz,
353 					  ALIGN(sbuf->size,
354 						BNG_FW_CMDQE_UNITS) /
355 						BNG_FW_CMDQE_UNITS);
356 	}
357 
358 	preq = (u8 *)msg->req;
359 	do {
360 		/* Locate the next cmdq slot */
361 		sw_prod = HWQ_CMP(hwq->prod, hwq);
362 		cmdqe = bng_re_get_qe(hwq, sw_prod, NULL);
363 		/* Copy a segment of the req cmd to the cmdq */
364 		memset(cmdqe, 0, sizeof(*cmdqe));
365 		memcpy(cmdqe, preq, min_t(u32, bsize, sizeof(*cmdqe)));
366 		preq += min_t(u32, bsize, sizeof(*cmdqe));
367 		bsize -= min_t(u32, bsize, sizeof(*cmdqe));
368 		hwq->prod++;
369 	} while (bsize > 0);
370 	cmdq->seq_num++;
371 
372 	cmdq_prod = hwq->prod & 0xFFFF;
373 	if (test_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags)) {
374 		/* The very first doorbell write
375 		 * is required to set this flag
376 		 * which prompts the FW to reset
377 		 * its internal pointers
378 		 */
379 		cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);
380 		clear_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
381 	}
382 	/* ring CMDQ DB */
383 	wmb();
384 	writel(cmdq_prod, cmdq->cmdq_mbox.prod);
385 	writel(BNG_FW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
386 	spin_unlock_bh(&hwq->lock);
387 	/* Return the CREQ response pointer */
388 	return 0;
389 }
390 
391 /**
392  * __wait_for_resp   -	Don't hold the cpu context and wait for response
393  * @rcfw:    rcfw channel instance of rdev
394  * @cookie:  cookie to track the command
395  *
396  * Wait for command completion in sleepable context.
397  *
398  * Returns:
399  * 0 if command is completed by firmware.
400  * Non zero error code for rest of the case.
401  */
__wait_for_resp(struct bng_re_rcfw * rcfw,u16 cookie)402 static int __wait_for_resp(struct bng_re_rcfw *rcfw, u16 cookie)
403 {
404 	struct bng_re_cmdq_ctx *cmdq;
405 	struct bng_re_crsqe *crsqe;
406 
407 	cmdq = &rcfw->cmdq;
408 	crsqe = &rcfw->crsqe_tbl[cookie];
409 
410 	do {
411 		wait_event_timeout(cmdq->waitq,
412 				   !crsqe->is_in_used,
413 				   secs_to_jiffies(rcfw->max_timeout));
414 
415 		if (!crsqe->is_in_used)
416 			return 0;
417 
418 		bng_re_service_creq(&rcfw->creq.creq_tasklet);
419 
420 		if (!crsqe->is_in_used)
421 			return 0;
422 	} while (true);
423 };
424 
425 /**
426  * bng_re_rcfw_send_message   -	interface to send
427  * and complete rcfw command.
428  * @rcfw:   rcfw channel instance of rdev
429  * @msg:    message to send
430  *
431  * This function does not account shadow queue depth. It will send
432  * all the command unconditionally as long as send queue is not full.
433  *
434  * Returns:
435  * 0 if command completed by firmware.
436  * Non zero if the command is not completed by firmware.
437  */
bng_re_rcfw_send_message(struct bng_re_rcfw * rcfw,struct bng_re_cmdqmsg * msg)438 int bng_re_rcfw_send_message(struct bng_re_rcfw *rcfw,
439 			     struct bng_re_cmdqmsg *msg)
440 {
441 	struct creq_qp_event *evnt = (struct creq_qp_event *)msg->resp;
442 	struct bng_re_crsqe *crsqe;
443 	u16 cookie;
444 	int rc;
445 	u8 opcode;
446 
447 	opcode = __get_cmdq_base_opcode(msg->req, msg->req_sz);
448 
449 	rc = __send_message_basic_sanity(rcfw, msg, opcode);
450 	if (rc)
451 		return rc == -ENXIO ? bng_re_map_rc(opcode) : rc;
452 
453 	rc = __send_message(rcfw, msg, opcode);
454 	if (rc)
455 		return rc;
456 
457 	cookie = le16_to_cpu(__get_cmdq_base_cookie(msg->req, msg->req_sz))
458 				& BNG_FW_MAX_COOKIE_VALUE;
459 
460 	rc = __wait_for_resp(rcfw, cookie);
461 
462 	if (rc) {
463 		spin_lock_bh(&rcfw->cmdq.hwq.lock);
464 		crsqe = &rcfw->crsqe_tbl[cookie];
465 		crsqe->is_waiter_alive = false;
466 		if (rc == -ENODEV)
467 			set_bit(FIRMWARE_STALL_DETECTED, &rcfw->cmdq.flags);
468 		spin_unlock_bh(&rcfw->cmdq.hwq.lock);
469 		return -ETIMEDOUT;
470 	}
471 
472 	if (evnt->status) {
473 		/* failed with status */
474 		dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
475 			cookie, opcode, evnt->status);
476 		rc = -EIO;
477 	}
478 
479 	return rc;
480 }
481 
bng_re_map_cmdq_mbox(struct bng_re_rcfw * rcfw)482 static int bng_re_map_cmdq_mbox(struct bng_re_rcfw *rcfw)
483 {
484 	struct bng_re_cmdq_mbox *mbox;
485 	resource_size_t bar_reg;
486 	struct pci_dev *pdev;
487 
488 	pdev = rcfw->pdev;
489 	mbox = &rcfw->cmdq.cmdq_mbox;
490 
491 	mbox->reg.bar_id = BNG_FW_COMM_PCI_BAR_REGION;
492 	mbox->reg.len = BNG_FW_COMM_SIZE;
493 	mbox->reg.bar_base = pci_resource_start(pdev, mbox->reg.bar_id);
494 	if (!mbox->reg.bar_base) {
495 		dev_err(&pdev->dev,
496 			"CMDQ BAR region %d resc start is 0!\n",
497 			mbox->reg.bar_id);
498 		return -ENOMEM;
499 	}
500 
501 	bar_reg = mbox->reg.bar_base + BNG_FW_COMM_BASE_OFFSET;
502 	mbox->reg.len = BNG_FW_COMM_SIZE;
503 	mbox->reg.bar_reg = ioremap(bar_reg, mbox->reg.len);
504 	if (!mbox->reg.bar_reg) {
505 		dev_err(&pdev->dev,
506 			"CMDQ BAR region %d mapping failed\n",
507 			mbox->reg.bar_id);
508 		return -ENOMEM;
509 	}
510 
511 	mbox->prod = (void  __iomem *)(mbox->reg.bar_reg +
512 			BNG_FW_PF_VF_COMM_PROD_OFFSET);
513 	mbox->db = (void __iomem *)(mbox->reg.bar_reg + BNG_FW_COMM_TRIG_OFFSET);
514 	return 0;
515 }
516 
bng_re_creq_irq(int irq,void * dev_instance)517 static irqreturn_t bng_re_creq_irq(int irq, void *dev_instance)
518 {
519 	struct bng_re_rcfw *rcfw = dev_instance;
520 	struct bng_re_creq_ctx *creq;
521 	struct bng_re_hwq *hwq;
522 	u32 sw_cons;
523 
524 	creq = &rcfw->creq;
525 	hwq = &creq->hwq;
526 	/* Prefetch the CREQ element */
527 	sw_cons = HWQ_CMP(hwq->cons, hwq);
528 	bng_re_get_qe(hwq, sw_cons, NULL);
529 
530 	tasklet_schedule(&creq->creq_tasklet);
531 	return IRQ_HANDLED;
532 }
533 
bng_re_rcfw_start_irq(struct bng_re_rcfw * rcfw,int msix_vector,bool need_init)534 int bng_re_rcfw_start_irq(struct bng_re_rcfw *rcfw, int msix_vector,
535 			  bool need_init)
536 {
537 	struct bng_re_creq_ctx *creq;
538 	struct bng_re_res *res;
539 	int rc;
540 
541 	creq = &rcfw->creq;
542 	res = rcfw->res;
543 
544 	if (creq->irq_handler_avail)
545 		return -EFAULT;
546 
547 	creq->msix_vec = msix_vector;
548 	if (need_init)
549 		tasklet_setup(&creq->creq_tasklet, bng_re_service_creq);
550 	else
551 		tasklet_enable(&creq->creq_tasklet);
552 
553 	creq->irq_name = kasprintf(GFP_KERNEL, "bng_re-creq@pci:%s",
554 				   pci_name(res->pdev));
555 	if (!creq->irq_name)
556 		return -ENOMEM;
557 	rc = request_irq(creq->msix_vec, bng_re_creq_irq, 0,
558 			 creq->irq_name, rcfw);
559 	if (rc) {
560 		kfree(creq->irq_name);
561 		creq->irq_name = NULL;
562 		tasklet_disable(&creq->creq_tasklet);
563 		return rc;
564 	}
565 	creq->irq_handler_avail = true;
566 
567 	bng_re_ring_nq_db(&creq->creq_db.dbinfo, res->cctx, true);
568 	atomic_inc(&rcfw->rcfw_intr_enabled);
569 
570 	return 0;
571 }
572 
bng_re_map_creq_db(struct bng_re_rcfw * rcfw,u32 reg_offt)573 static int bng_re_map_creq_db(struct bng_re_rcfw *rcfw, u32 reg_offt)
574 {
575 	struct bng_re_creq_db *creq_db;
576 	resource_size_t bar_reg;
577 	struct pci_dev *pdev;
578 
579 	pdev = rcfw->pdev;
580 	creq_db = &rcfw->creq.creq_db;
581 
582 	creq_db->dbinfo.flags = 0;
583 	creq_db->reg.bar_id = BNG_FW_COMM_CONS_PCI_BAR_REGION;
584 	creq_db->reg.bar_base = pci_resource_start(pdev, creq_db->reg.bar_id);
585 	if (!creq_db->reg.bar_id)
586 		dev_err(&pdev->dev,
587 			"CREQ BAR region %d resc start is 0!",
588 			creq_db->reg.bar_id);
589 
590 	bar_reg = creq_db->reg.bar_base + reg_offt;
591 
592 	creq_db->reg.len = BNG_FW_CREQ_DB_LEN;
593 	creq_db->reg.bar_reg = ioremap(bar_reg, creq_db->reg.len);
594 	if (!creq_db->reg.bar_reg) {
595 		dev_err(&pdev->dev,
596 			"CREQ BAR region %d mapping failed",
597 			creq_db->reg.bar_id);
598 		return -ENOMEM;
599 	}
600 	creq_db->dbinfo.db = creq_db->reg.bar_reg;
601 	creq_db->dbinfo.hwq = &rcfw->creq.hwq;
602 	creq_db->dbinfo.xid = rcfw->creq.ring_id;
603 	return 0;
604 }
605 
bng_re_rcfw_stop_irq(struct bng_re_rcfw * rcfw,bool kill)606 void bng_re_rcfw_stop_irq(struct bng_re_rcfw *rcfw, bool kill)
607 {
608 	struct bng_re_creq_ctx *creq;
609 
610 	creq = &rcfw->creq;
611 
612 	if (!creq->irq_handler_avail)
613 		return;
614 
615 	creq->irq_handler_avail = false;
616 	/* Mask h/w interrupts */
617 	bng_re_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, false);
618 	/* Sync with last running IRQ-handler */
619 	synchronize_irq(creq->msix_vec);
620 	free_irq(creq->msix_vec, rcfw);
621 	kfree(creq->irq_name);
622 	creq->irq_name = NULL;
623 	atomic_set(&rcfw->rcfw_intr_enabled, 0);
624 	if (kill)
625 		tasklet_kill(&creq->creq_tasklet);
626 	tasklet_disable(&creq->creq_tasklet);
627 }
628 
bng_re_disable_rcfw_channel(struct bng_re_rcfw * rcfw)629 void bng_re_disable_rcfw_channel(struct bng_re_rcfw *rcfw)
630 {
631 	struct bng_re_creq_ctx *creq;
632 	struct bng_re_cmdq_ctx *cmdq;
633 
634 	creq = &rcfw->creq;
635 	cmdq = &rcfw->cmdq;
636 	/* Make sure the HW channel is stopped! */
637 	bng_re_rcfw_stop_irq(rcfw, true);
638 
639 	iounmap(cmdq->cmdq_mbox.reg.bar_reg);
640 	iounmap(creq->creq_db.reg.bar_reg);
641 
642 	cmdq->cmdq_mbox.reg.bar_reg = NULL;
643 	creq->creq_db.reg.bar_reg = NULL;
644 	creq->msix_vec = 0;
645 }
646 
bng_re_start_rcfw(struct bng_re_rcfw * rcfw)647 static void bng_re_start_rcfw(struct bng_re_rcfw *rcfw)
648 {
649 	struct bng_re_cmdq_ctx *cmdq;
650 	struct bng_re_creq_ctx *creq;
651 	struct bng_re_cmdq_mbox *mbox;
652 	struct cmdq_init init = {0};
653 
654 	cmdq = &rcfw->cmdq;
655 	creq = &rcfw->creq;
656 	mbox = &cmdq->cmdq_mbox;
657 
658 	init.cmdq_pbl = cpu_to_le64(cmdq->hwq.pbl[BNG_PBL_LVL_0].pg_map_arr[0]);
659 	init.cmdq_size_cmdq_lvl =
660 			cpu_to_le16(((rcfw->cmdq_depth <<
661 				      CMDQ_INIT_CMDQ_SIZE_SFT) &
662 				    CMDQ_INIT_CMDQ_SIZE_MASK) |
663 				    ((cmdq->hwq.level <<
664 				      CMDQ_INIT_CMDQ_LVL_SFT) &
665 				    CMDQ_INIT_CMDQ_LVL_MASK));
666 	init.creq_ring_id = cpu_to_le16(creq->ring_id);
667 	/* Write to the mailbox register */
668 	__iowrite32_copy(mbox->reg.bar_reg, &init, sizeof(init) / 4);
669 }
670 
bng_re_enable_fw_channel(struct bng_re_rcfw * rcfw,int msix_vector,int cp_bar_reg_off)671 int bng_re_enable_fw_channel(struct bng_re_rcfw *rcfw,
672 			     int msix_vector,
673 			     int cp_bar_reg_off)
674 {
675 	struct bng_re_cmdq_ctx *cmdq;
676 	int rc;
677 
678 	cmdq = &rcfw->cmdq;
679 
680 	/* Assign defaults */
681 	cmdq->seq_num = 0;
682 	set_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
683 	init_waitqueue_head(&cmdq->waitq);
684 
685 	rc = bng_re_map_cmdq_mbox(rcfw);
686 	if (rc)
687 		return rc;
688 
689 	rc = bng_re_map_creq_db(rcfw, cp_bar_reg_off);
690 	if (rc)
691 		return rc;
692 
693 	rc = bng_re_rcfw_start_irq(rcfw, msix_vector, true);
694 	if (rc) {
695 		dev_err(&rcfw->pdev->dev,
696 			"Failed to request IRQ for CREQ rc = 0x%x\n", rc);
697 		bng_re_disable_rcfw_channel(rcfw);
698 		return rc;
699 	}
700 
701 	bng_re_start_rcfw(rcfw);
702 	return 0;
703 }
704 
bng_re_deinit_rcfw(struct bng_re_rcfw * rcfw)705 int bng_re_deinit_rcfw(struct bng_re_rcfw *rcfw)
706 {
707 	struct creq_deinitialize_fw_resp resp = {};
708 	struct cmdq_deinitialize_fw req = {};
709 	struct bng_re_cmdqmsg msg = {};
710 	int rc;
711 
712 	bng_re_rcfw_cmd_prep((struct cmdq_base *)&req,
713 			     CMDQ_BASE_OPCODE_DEINITIALIZE_FW,
714 			     sizeof(req));
715 	bng_re_fill_cmdqmsg(&msg, &req, &resp, NULL,
716 			    sizeof(req), sizeof(resp), 0);
717 	rc = bng_re_rcfw_send_message(rcfw, &msg);
718 	if (rc)
719 		return rc;
720 
721 	clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
722 	return 0;
723 }
_is_hw_retx_supported(u16 dev_cap_flags)724 static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
725 {
726 	return dev_cap_flags &
727 		(CREQ_QUERY_FUNC_RESP_SB_HW_REQUESTER_RETX_ENABLED |
728 		 CREQ_QUERY_FUNC_RESP_SB_HW_RESPONDER_RETX_ENABLED);
729 }
730 
731 #define BNG_RE_HW_RETX(a) _is_hw_retx_supported((a))
_is_optimize_modify_qp_supported(u16 dev_cap_ext_flags2)732 static inline bool _is_optimize_modify_qp_supported(u16 dev_cap_ext_flags2)
733 {
734 	return dev_cap_ext_flags2 &
735 	       CREQ_QUERY_FUNC_RESP_SB_OPTIMIZE_MODIFY_QP_SUPPORTED;
736 }
737 
bng_re_init_rcfw(struct bng_re_rcfw * rcfw,struct bng_re_stats * stats_ctx)738 int bng_re_init_rcfw(struct bng_re_rcfw *rcfw,
739 		     struct bng_re_stats *stats_ctx)
740 {
741 	struct creq_initialize_fw_resp resp = {};
742 	struct cmdq_initialize_fw req = {};
743 	struct bng_re_cmdqmsg msg = {};
744 	int rc;
745 	u16 flags = 0;
746 
747 	bng_re_rcfw_cmd_prep((struct cmdq_base *)&req,
748 			     CMDQ_BASE_OPCODE_INITIALIZE_FW,
749 			     sizeof(req));
750 	/* Supply (log-base-2-of-host-page-size - base-page-shift)
751 	 * to bono to adjust the doorbell page sizes.
752 	 */
753 	req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
754 					   BNG_FW_DBR_BASE_PAGE_SHIFT);
755 	if (BNG_RE_HW_RETX(rcfw->res->dattr->dev_cap_flags))
756 		flags |= CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED;
757 	if (_is_optimize_modify_qp_supported(rcfw->res->dattr->dev_cap_flags2))
758 		flags |= CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED;
759 	req.flags |= cpu_to_le16(flags);
760 	req.stat_ctx_id = cpu_to_le32(stats_ctx->fw_id);
761 	bng_re_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
762 	rc = bng_re_rcfw_send_message(rcfw, &msg);
763 	if (rc)
764 		return rc;
765 	set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
766 	return 0;
767 }
768