Lines Matching refs:rcfw
101 * @rcfw: rcfw channel instance of rdev
104 * If firmware has not responded any rcfw command within
105 * rcfw->max_timeout, consider firmware as stalled.
111 static int bnxt_re_is_fw_stalled(struct bnxt_qplib_rcfw *rcfw,
117 crsqe = &rcfw->crsqe_tbl[cookie];
118 cmdq = &rcfw->cmdq;
121 (rcfw->max_timeout * HZ))) {
122 dev_warn_ratelimited(&rcfw->pdev->dev,
126 rcfw->max_timeout * 1000,
136 * @rcfw: rcfw channel instance of rdev
145 static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
151 cmdq = &rcfw->cmdq;
152 crsqe = &rcfw->crsqe_tbl[cookie];
163 secs_to_jiffies(rcfw->max_timeout));
168 bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet);
173 ret = bnxt_re_is_fw_stalled(rcfw, cookie);
182 * @rcfw: rcfw channel instance of rdev
192 static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
194 struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
199 crsqe = &rcfw->crsqe_tbl[cookie];
209 bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet);
219 * @rcfw: rcfw channel instance of rdev
235 static void __send_message_no_waiter(struct bnxt_qplib_rcfw *rcfw,
238 struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
249 crsqe = &rcfw->crsqe_tbl[cookie];
276 atomic_inc(&rcfw->timeout_send);
283 static int __send_message(struct bnxt_qplib_rcfw *rcfw,
296 cmdq = &rcfw->cmdq;
298 pdev = rcfw->pdev;
307 crsqe = &rcfw->crsqe_tbl[cookie];
375 * __poll_for_resp - self poll completion for rcfw command
376 * @rcfw: rcfw channel instance of rdev
387 static int __poll_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
389 struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
395 crsqe = &rcfw->crsqe_tbl[cookie];
405 bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet);
409 (rcfw->max_timeout * 1000)) {
410 ret = bnxt_re_is_fw_stalled(rcfw, cookie);
417 static int __send_message_basic_sanity(struct bnxt_qplib_rcfw *rcfw,
423 cmdq = &rcfw->cmdq;
426 if (test_bit(ERR_DEVICE_DETACHED, &rcfw->cmdq.flags))
434 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
442 dev_err(&rcfw->pdev->dev,
452 static void __destroy_timedout_ah(struct bnxt_qplib_rcfw *rcfw,
464 __send_message_no_waiter(rcfw, &msg);
465 dev_info_ratelimited(&rcfw->pdev->dev,
468 atomic_read(&rcfw->timeout_send));
473 * and complete rcfw command.
474 * @rcfw: rcfw channel instance of rdev
484 static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
495 rc = __send_message_basic_sanity(rcfw, msg, opcode);
499 rc = __send_message(rcfw, msg, opcode);
507 rc = __block_for_resp(rcfw, cookie);
508 else if (atomic_read(&rcfw->rcfw_intr_enabled))
509 rc = __wait_for_resp(rcfw, cookie);
511 rc = __poll_for_resp(rcfw, cookie);
514 spin_lock_bh(&rcfw->cmdq.hwq.lock);
515 crsqe = &rcfw->crsqe_tbl[cookie];
518 set_bit(FIRMWARE_STALL_DETECTED, &rcfw->cmdq.flags);
519 spin_unlock_bh(&rcfw->cmdq.hwq.lock);
525 dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
535 * and complete rcfw command.
536 * @rcfw: rcfw channel instance of rdev
539 * Driver interact with Firmware through rcfw channel/slow path in two ways.
540 * a. Blocking rcfw command send. In this path, driver cannot hold
543 * b. Non-blocking rcfw command send. In this path, driver can hold the
548 * (due to size of rcfw message there can be actual ~4K rcfw outstanding)
549 * is not optimal for rcfw command processing in firmware.
551 * Restrict at max #RCFW_CMD_NON_BLOCKING_SHADOW_QD Non-Blocking rcfw commands.
558 int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
564 down(&rcfw->rcfw_inflight);
565 ret = __bnxt_qplib_rcfw_send_message(rcfw, msg);
566 up(&rcfw->rcfw_inflight);
568 ret = __bnxt_qplib_rcfw_send_message(rcfw, msg);
575 static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
614 rc = rcfw->creq.aeq_handler(rcfw, (void *)func_event, NULL);
618 static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
623 struct bnxt_qplib_hwq *hwq = &rcfw->cmdq.hwq;
633 pdev = rcfw->pdev;
638 spin_lock(&rcfw->tbl_lock);
639 tbl_indx = map_qp_id_to_tbl_indx(qp_id, rcfw);
640 qp = rcfw->qp_tbl[tbl_indx].qp_handle;
642 spin_unlock(&rcfw->tbl_lock);
646 rc = rcfw->creq.aeq_handler(rcfw, qp_event, qp);
647 spin_unlock(&rcfw->tbl_lock);
668 crsqe = &rcfw->crsqe_tbl[cookie];
671 &rcfw->cmdq.flags),
672 "QPLIB: Unreponsive rcfw channel detected.!!")) {
674 "rcfw timedout: cookie = %#x, free_slots = %d",
681 atomic_dec(&rcfw->timeout_send);
721 __destroy_timedout_ah(rcfw,
733 struct bnxt_qplib_rcfw *rcfw = from_tasklet(rcfw, t, creq.creq_tasklet);
734 struct bnxt_qplib_creq_ctx *creq = &rcfw->creq;
751 rcfw->cmdq.last_seen = jiffies;
757 (rcfw, (struct creq_qp_event *)creqe,
763 (rcfw, (struct creq_func_event *)creqe))
766 dev_warn(&rcfw->pdev->dev,
771 dev_warn(&rcfw->pdev->dev,
784 rcfw->res->cctx, true);
787 wake_up_nr(&rcfw->cmdq.waitq, num_wakeup);
792 struct bnxt_qplib_rcfw *rcfw = dev_instance;
797 creq = &rcfw->creq;
809 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
821 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
825 clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
829 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
854 if (is_virtfn || bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx))
900 if (BNXT_RE_HW_RETX(rcfw->res->dattr->dev_cap_flags))
902 if (_is_optimize_modify_qp_supported(rcfw->res->dattr->dev_cap_flags2))
904 if (rcfw->res->en_dev->flags & BNXT_EN_FLAG_ROCE_VF_RES_MGMT)
909 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
912 set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
916 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
918 kfree(rcfw->crsqe_tbl);
919 bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq);
920 bnxt_qplib_free_hwq(rcfw->res, &rcfw->creq.hwq);
921 rcfw->pdev = NULL;
925 struct bnxt_qplib_rcfw *rcfw,
933 rcfw->pdev = res->pdev;
934 cmdq = &rcfw->cmdq;
935 creq = &rcfw->creq;
936 rcfw->res = res;
942 hwq_attr.res = rcfw->res;
948 dev_err(&rcfw->pdev->dev,
953 rcfw->cmdq_depth = BNXT_QPLIB_CMDQE_MAX_CNT;
955 sginfo.pgsize = bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth);
956 hwq_attr.depth = rcfw->cmdq_depth & 0x7FFFFFFF;
960 dev_err(&rcfw->pdev->dev,
965 rcfw->crsqe_tbl = kcalloc(cmdq->hwq.max_elements,
966 sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
967 if (!rcfw->crsqe_tbl)
970 spin_lock_init(&rcfw->tbl_lock);
972 rcfw->max_timeout = res->cctx->hwrm_cmd_max_timeout;
977 bnxt_qplib_free_rcfw_channel(rcfw);
981 void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
985 creq = &rcfw->creq;
992 bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, false);
995 free_irq(creq->msix_vec, rcfw);
998 atomic_set(&rcfw->rcfw_intr_enabled, 0);
1004 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
1009 creq = &rcfw->creq;
1010 cmdq = &rcfw->cmdq;
1012 bnxt_qplib_rcfw_stop_irq(rcfw, true);
1023 int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
1030 creq = &rcfw->creq;
1031 res = rcfw->res;
1047 creq->irq_name, rcfw);
1057 atomic_inc(&rcfw->rcfw_intr_enabled);
1062 static int bnxt_qplib_map_cmdq_mbox(struct bnxt_qplib_rcfw *rcfw)
1068 pdev = rcfw->pdev;
1069 mbox = &rcfw->cmdq.cmdq_mbox;
1097 static int bnxt_qplib_map_creq_db(struct bnxt_qplib_rcfw *rcfw, u32 reg_offt)
1103 pdev = rcfw->pdev;
1104 creq_db = &rcfw->creq.creq_db;
1125 creq_db->dbinfo.hwq = &rcfw->creq.hwq;
1126 creq_db->dbinfo.xid = rcfw->creq.ring_id;
1130 static void bnxt_qplib_start_rcfw(struct bnxt_qplib_rcfw *rcfw)
1137 cmdq = &rcfw->cmdq;
1138 creq = &rcfw->creq;
1143 cpu_to_le16(((rcfw->cmdq_depth <<
1154 int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw,
1163 cmdq = &rcfw->cmdq;
1164 creq = &rcfw->creq;
1176 rc = bnxt_qplib_map_cmdq_mbox(rcfw);
1180 rc = bnxt_qplib_map_creq_db(rcfw, cp_bar_reg_off);
1184 rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
1186 dev_err(&rcfw->pdev->dev,
1188 bnxt_qplib_disable_rcfw_channel(rcfw);
1192 sema_init(&rcfw->rcfw_inflight, RCFW_CMD_NON_BLOCKING_SHADOW_QD);
1193 bnxt_qplib_start_rcfw(rcfw);