Lines Matching refs:adap
113 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, in t4_read_indirect() argument
118 t4_write_reg(adap, addr_reg, start_idx); in t4_read_indirect()
119 *vals++ = t4_read_reg(adap, data_reg); in t4_read_indirect()
136 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, in t4_write_indirect() argument
141 t4_write_reg(adap, addr_reg, start_idx++); in t4_write_indirect()
142 t4_write_reg(adap, data_reg, *vals++); in t4_write_indirect()
152 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val) in t4_hw_pci_read_cfg4() argument
154 u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg); in t4_hw_pci_read_cfg4()
156 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in t4_hw_pci_read_cfg4()
161 if (is_t4(adap->params.chip)) in t4_hw_pci_read_cfg4()
164 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req); in t4_hw_pci_read_cfg4()
165 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A); in t4_hw_pci_read_cfg4()
172 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0); in t4_hw_pci_read_cfg4()
183 static void t4_report_fw_error(struct adapter *adap) in t4_report_fw_error() argument
197 pcie_fw = t4_read_reg(adap, PCIE_FW_A); in t4_report_fw_error()
199 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n", in t4_report_fw_error()
201 adap->flags &= ~CXGB4_FW_OK; in t4_report_fw_error()
208 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, in get_mbox_rpl() argument
212 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); in get_mbox_rpl()
218 static void fw_asrt(struct adapter *adap, u32 mbox_addr) in fw_asrt() argument
222 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); in fw_asrt()
223 dev_alert(adap->pdev_dev, in fw_asrt()
282 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, in t4_wr_mbox_meat_timeout() argument
308 if (adap->pdev->error_state != pci_channel_io_normal) in t4_wr_mbox_meat_timeout()
322 spin_lock_bh(&adap->mbox_lock); in t4_wr_mbox_meat_timeout()
323 list_add_tail(&entry.list, &adap->mlist.list); in t4_wr_mbox_meat_timeout()
324 spin_unlock_bh(&adap->mbox_lock); in t4_wr_mbox_meat_timeout()
335 pcie_fw = t4_read_reg(adap, PCIE_FW_A); in t4_wr_mbox_meat_timeout()
337 spin_lock_bh(&adap->mbox_lock); in t4_wr_mbox_meat_timeout()
339 spin_unlock_bh(&adap->mbox_lock); in t4_wr_mbox_meat_timeout()
341 t4_record_mbox(adap, cmd, size, access, ret); in t4_wr_mbox_meat_timeout()
348 if (list_first_entry(&adap->mlist.list, struct mbox_list, in t4_wr_mbox_meat_timeout()
366 v = MBOWNER_G(t4_read_reg(adap, ctl_reg)); in t4_wr_mbox_meat_timeout()
368 v = MBOWNER_G(t4_read_reg(adap, ctl_reg)); in t4_wr_mbox_meat_timeout()
370 spin_lock_bh(&adap->mbox_lock); in t4_wr_mbox_meat_timeout()
372 spin_unlock_bh(&adap->mbox_lock); in t4_wr_mbox_meat_timeout()
374 t4_record_mbox(adap, cmd, size, access, ret); in t4_wr_mbox_meat_timeout()
379 t4_record_mbox(adap, cmd, size, access, 0); in t4_wr_mbox_meat_timeout()
381 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); in t4_wr_mbox_meat_timeout()
383 t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW)); in t4_wr_mbox_meat_timeout()
384 t4_read_reg(adap, ctl_reg); /* flush write */ in t4_wr_mbox_meat_timeout()
390 !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) && in t4_wr_mbox_meat_timeout()
401 v = t4_read_reg(adap, ctl_reg); in t4_wr_mbox_meat_timeout()
404 t4_write_reg(adap, ctl_reg, 0); in t4_wr_mbox_meat_timeout()
408 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg); in t4_wr_mbox_meat_timeout()
412 fw_asrt(adap, data_reg); in t4_wr_mbox_meat_timeout()
418 t4_write_reg(adap, ctl_reg, 0); in t4_wr_mbox_meat_timeout()
421 t4_record_mbox(adap, cmd_rpl, in t4_wr_mbox_meat_timeout()
423 spin_lock_bh(&adap->mbox_lock); in t4_wr_mbox_meat_timeout()
425 spin_unlock_bh(&adap->mbox_lock); in t4_wr_mbox_meat_timeout()
431 t4_record_mbox(adap, cmd, size, access, ret); in t4_wr_mbox_meat_timeout()
432 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", in t4_wr_mbox_meat_timeout()
434 t4_report_fw_error(adap); in t4_wr_mbox_meat_timeout()
435 spin_lock_bh(&adap->mbox_lock); in t4_wr_mbox_meat_timeout()
437 spin_unlock_bh(&adap->mbox_lock); in t4_wr_mbox_meat_timeout()
438 t4_fatal_err(adap); in t4_wr_mbox_meat_timeout()
442 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, in t4_wr_mbox_meat() argument
445 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok, in t4_wr_mbox_meat()
449 static int t4_edc_err_read(struct adapter *adap, int idx) in t4_edc_err_read() argument
454 if (is_t4(adap->params.chip)) { in t4_edc_err_read()
455 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__); in t4_edc_err_read()
459 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx); in t4_edc_err_read()
466 CH_WARN(adap, in t4_edc_err_read()
469 t4_read_reg(adap, edc_ecc_err_addr_reg)); in t4_edc_err_read()
470 CH_WARN(adap, in t4_edc_err_read()
473 (unsigned long long)t4_read_reg64(adap, rdata_reg), in t4_edc_err_read()
474 (unsigned long long)t4_read_reg64(adap, rdata_reg + 8), in t4_edc_err_read()
475 (unsigned long long)t4_read_reg64(adap, rdata_reg + 16), in t4_edc_err_read()
476 (unsigned long long)t4_read_reg64(adap, rdata_reg + 24), in t4_edc_err_read()
477 (unsigned long long)t4_read_reg64(adap, rdata_reg + 32), in t4_edc_err_read()
478 (unsigned long long)t4_read_reg64(adap, rdata_reg + 40), in t4_edc_err_read()
479 (unsigned long long)t4_read_reg64(adap, rdata_reg + 48), in t4_edc_err_read()
480 (unsigned long long)t4_read_reg64(adap, rdata_reg + 56), in t4_edc_err_read()
481 (unsigned long long)t4_read_reg64(adap, rdata_reg + 64)); in t4_edc_err_read()
497 int t4_memory_rw_init(struct adapter *adap, int win, int mtype, u32 *mem_off, in t4_memory_rw_init() argument
509 edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A)); in t4_memory_rw_init()
515 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap, in t4_memory_rw_init()
528 mem_reg = t4_read_reg(adap, in t4_memory_rw_init()
537 if (is_t4(adap->params.chip)) in t4_memory_rw_init()
538 *mem_base -= adap->t4_bar0; in t4_memory_rw_init()
551 void t4_memory_update_win(struct adapter *adap, int win, u32 addr) in t4_memory_update_win() argument
553 t4_write_reg(adap, in t4_memory_update_win()
559 t4_read_reg(adap, in t4_memory_update_win()
573 void t4_memory_rw_residual(struct adapter *adap, u32 off, u32 addr, u8 *buf, in t4_memory_rw_residual() argument
585 t4_read_reg(adap, addr)); in t4_memory_rw_residual()
592 t4_write_reg(adap, addr, in t4_memory_rw_residual()
614 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, in t4_memory_rw() argument
636 ret = t4_memory_rw_init(adap, win, mtype, &memoffset, &mem_base, in t4_memory_rw()
644 win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf); in t4_memory_rw()
655 t4_memory_update_win(adap, win, pos | win_pf); in t4_memory_rw()
693 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap, in t4_memory_rw()
696 t4_write_reg(adap, mem_base + offset, in t4_memory_rw()
710 t4_memory_update_win(adap, win, pos | win_pf); in t4_memory_rw()
720 t4_memory_rw_residual(adap, resid, mem_base + offset, in t4_memory_rw()
731 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg) in t4_read_pcie_cfg4() argument
750 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf)); in t4_read_pcie_cfg4()
756 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd), in t4_read_pcie_cfg4()
764 t4_hw_pci_read_cfg4(adap, reg, &val); in t4_read_pcie_cfg4()
772 static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask, in t4_get_window() argument
777 if (is_t4(adap->params.chip)) { in t4_get_window()
789 bar0 = t4_read_pcie_cfg4(adap, pci_base); in t4_get_window()
791 adap->t4_bar0 = bar0; in t4_get_window()
802 u32 t4_get_util_window(struct adapter *adap) in t4_get_util_window() argument
804 return t4_get_window(adap, PCI_BASE_ADDRESS_0, in t4_get_util_window()
812 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window) in t4_setup_memwin() argument
814 t4_write_reg(adap, in t4_setup_memwin()
818 t4_read_reg(adap, in t4_setup_memwin()
856 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) in t4_get_regs() argument
2643 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip); in t4_get_regs()
2665 dev_err(adap->pdev_dev, in t4_get_regs()
2683 *bufp++ = t4_read_reg(adap, reg); in t4_get_regs()
3153 int t4_get_exprom_version(struct adapter *adap, u32 *vers) in t4_get_exprom_version() argument
3163 ret = t4_read_flash(adap, FLASH_EXP_ROM_START, in t4_get_exprom_version()
3361 int t4_check_fw_version(struct adapter *adap) in t4_check_fw_version() argument
3365 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip); in t4_check_fw_version()
3367 ret = t4_get_fw_version(adap, &adap->params.fw_vers); in t4_check_fw_version()
3370 ret = t4_get_fw_version(adap, &adap->params.fw_vers); in t4_check_fw_version()
3375 major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers); in t4_check_fw_version()
3376 minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers); in t4_check_fw_version()
3377 micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers); in t4_check_fw_version()
3396 dev_err(adap->pdev_dev, "Unsupported chip type, %x\n", in t4_check_fw_version()
3397 adap->chip); in t4_check_fw_version()
3403 dev_err(adap->pdev_dev, in t4_check_fw_version()
3435 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable, in should_install_fs_fw() argument
3453 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, " in should_install_fs_fw()
3463 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, in t4_prep_fw() argument
3475 ret = t4_read_flash(adap, FLASH_FW_START, in t4_prep_fw()
3481 dev_err(adap->pdev_dev, in t4_prep_fw()
3501 should_install_fs_fw(adap, card_fw_usable, in t4_prep_fw()
3504 ret = t4_fw_upgrade(adap, adap->mbox, fw_data, in t4_prep_fw()
3507 dev_err(adap->pdev_dev, in t4_prep_fw()
3525 dev_err(adap->pdev_dev, "Cannot find a usable firmware: " in t4_prep_fw()
3541 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver); in t4_prep_fw()
3542 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver); in t4_prep_fw()
3599 static bool t4_fw_matches_chip(const struct adapter *adap, in t4_fw_matches_chip() argument
3605 if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) || in t4_fw_matches_chip()
3606 (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) || in t4_fw_matches_chip()
3607 (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6)) in t4_fw_matches_chip()
3610 dev_err(adap->pdev_dev, in t4_fw_matches_chip()
3612 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip)); in t4_fw_matches_chip()
3624 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) in t4_load_fw() argument
3632 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; in t4_load_fw()
3638 dev_err(adap->pdev_dev, "FW image has no data\n"); in t4_load_fw()
3642 dev_err(adap->pdev_dev, in t4_load_fw()
3647 dev_err(adap->pdev_dev, in t4_load_fw()
3652 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", in t4_load_fw()
3656 if (!t4_fw_matches_chip(adap, hdr)) in t4_load_fw()
3663 dev_err(adap->pdev_dev, in t4_load_fw()
3669 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); in t4_load_fw()
3680 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, true); in t4_load_fw()
3688 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, true); in t4_load_fw()
3693 ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver), in t4_load_fw()
3698 dev_err(adap->pdev_dev, "firmware download failed, error %d\n", in t4_load_fw()
3701 ret = t4_get_fw_version(adap, &adap->params.fw_vers); in t4_load_fw()
3713 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver) in t4_phy_fw_ver() argument
3720 FW_PARAMS_PARAM_Y_V(adap->params.portvec) | in t4_phy_fw_ver()
3722 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, in t4_phy_fw_ver()
3754 int t4_load_phy_fw(struct adapter *adap, int win, in t4_load_phy_fw() argument
3768 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver); in t4_load_phy_fw()
3773 CH_WARN(adap, "PHY Firmware already up-to-date, " in t4_load_phy_fw()
3787 FW_PARAMS_PARAM_Y_V(adap->params.portvec) | in t4_load_phy_fw()
3790 ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1, in t4_load_phy_fw()
3800 spin_lock_bh(&adap->win0_lock); in t4_load_phy_fw()
3801 ret = t4_memory_rw(adap, win, mtype, maddr, in t4_load_phy_fw()
3804 spin_unlock_bh(&adap->win0_lock); in t4_load_phy_fw()
3815 FW_PARAMS_PARAM_Y_V(adap->params.portvec) | in t4_load_phy_fw()
3817 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, in t4_load_phy_fw()
3826 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver); in t4_load_phy_fw()
3831 CH_WARN(adap, "PHY Firmware did not update: " in t4_load_phy_fw()
3847 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op) in t4_fwcache() argument
3855 FW_PARAMS_CMD_PFN_V(adap->pf) | in t4_fwcache()
3863 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL); in t4_fwcache()
3866 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp, in t4_cim_read_pif_la() argument
3873 cfg = t4_read_reg(adap, CIM_DEBUGCFG_A); in t4_cim_read_pif_la()
3875 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F); in t4_cim_read_pif_la()
3877 val = t4_read_reg(adap, CIM_DEBUGSTS_A); in t4_cim_read_pif_la()
3887 t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) | in t4_cim_read_pif_la()
3889 *pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A); in t4_cim_read_pif_la()
3890 *pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A); in t4_cim_read_pif_la()
3897 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg); in t4_cim_read_pif_la()
3900 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp) in t4_cim_read_ma_la() argument
3905 cfg = t4_read_reg(adap, CIM_DEBUGCFG_A); in t4_cim_read_ma_la()
3907 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F); in t4_cim_read_ma_la()
3912 t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) | in t4_cim_read_ma_la()
3914 *ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A); in t4_cim_read_ma_la()
3915 *ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A); in t4_cim_read_ma_la()
3918 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg); in t4_cim_read_ma_la()
3921 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) in t4_ulprx_read_la() argument
3928 t4_write_reg(adap, ULP_RX_LA_CTL_A, i); in t4_ulprx_read_la()
3929 j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A); in t4_ulprx_read_la()
3930 t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j); in t4_ulprx_read_la()
3932 *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A); in t4_ulprx_read_la()
4242 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) in t4_restart_aneg() argument
4244 unsigned int fw_caps = adap->params.fw_caps_support; in t4_restart_aneg()
4260 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); in t4_restart_aneg()
4263 typedef void (*int_handler_t)(struct adapter *adap);
4716 static void le_intr_handler(struct adapter *adap) in le_intr_handler() argument
4718 enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip); in le_intr_handler()
4739 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, in le_intr_handler()
4742 t4_fatal_err(adap); in le_intr_handler()
4884 static void ma_intr_handler(struct adapter *adap) in ma_intr_handler() argument
4886 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A); in ma_intr_handler()
4889 dev_alert(adap->pdev_dev, in ma_intr_handler()
4891 t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A)); in ma_intr_handler()
4892 if (is_t5(adap->params.chip)) in ma_intr_handler()
4893 dev_alert(adap->pdev_dev, in ma_intr_handler()
4895 t4_read_reg(adap, in ma_intr_handler()
4899 v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A); in ma_intr_handler()
4900 dev_alert(adap->pdev_dev, "MA address wrap-around error by " in ma_intr_handler()
4905 t4_write_reg(adap, MA_INT_CAUSE_A, status); in ma_intr_handler()
4906 t4_fatal_err(adap); in ma_intr_handler()
4912 static void smb_intr_handler(struct adapter *adap) in smb_intr_handler() argument
4921 if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info)) in smb_intr_handler()
4922 t4_fatal_err(adap); in smb_intr_handler()
4928 static void ncsi_intr_handler(struct adapter *adap) in ncsi_intr_handler() argument
4938 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info)) in ncsi_intr_handler()
4939 t4_fatal_err(adap); in ncsi_intr_handler()
4945 static void xgmac_intr_handler(struct adapter *adap, int port) in xgmac_intr_handler() argument
4949 if (is_t4(adap->params.chip)) in xgmac_intr_handler()
4954 v = t4_read_reg(adap, int_cause_reg); in xgmac_intr_handler()
4961 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n", in xgmac_intr_handler()
4964 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n", in xgmac_intr_handler()
4966 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v); in xgmac_intr_handler()
4967 t4_fatal_err(adap); in xgmac_intr_handler()
4973 static void pl_intr_handler(struct adapter *adap) in pl_intr_handler() argument
4981 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info)) in pl_intr_handler()
4982 t4_fatal_err(adap); in pl_intr_handler()
5119 unsigned int t4_chip_rss_size(struct adapter *adap) in t4_chip_rss_size() argument
5121 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in t4_chip_rss_size()
5249 static int rd_rss_row(struct adapter *adap, int row, u32 *val) in rd_rss_row() argument
5251 t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row); in rd_rss_row()
5252 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1, in rd_rss_row()
5279 static unsigned int t4_use_ldst(struct adapter *adap) in t4_use_ldst() argument
5281 return (adap->flags & CXGB4_FW_OK) && !adap->use_bd; in t4_use_ldst()
5296 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals, in t4_tp_fw_ldst_rw() argument
5315 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, in t4_tp_fw_ldst_rw()
5340 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data, in t4_tp_indirect_rw() argument
5361 if (t4_use_ldst(adap)) in t4_tp_indirect_rw()
5362 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw, in t4_tp_indirect_rw()
5369 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs, in t4_tp_indirect_rw()
5372 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs, in t4_tp_indirect_rw()
5387 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs, in t4_tp_pio_read() argument
5390 t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs, in t4_tp_pio_read()
5404 static void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs, in t4_tp_pio_write() argument
5407 t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs, in t4_tp_pio_write()
5421 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs, in t4_tp_tm_pio_read() argument
5424 t4_tp_indirect_rw(adap, TP_TM_PIO_ADDR_A, TP_TM_PIO_DATA_A, buff, in t4_tp_tm_pio_read()
5438 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, in t4_tp_mib_read() argument
5441 t4_tp_indirect_rw(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, buff, nregs, in t4_tp_mib_read()
5453 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok) in t4_read_rss_key() argument
5455 t4_tp_pio_read(adap, key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok); in t4_read_rss_key()
5469 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx, in t4_write_rss_key() argument
5473 u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A); in t4_write_rss_key()
5479 if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) && in t4_write_rss_key()
5483 t4_tp_pio_write(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok); in t4_write_rss_key()
5487 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A, in t4_write_rss_key()
5491 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A, in t4_write_rss_key()
5589 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, in t4_tp_get_tcp_stats() argument
5599 t4_tp_mib_read(adap, val, ARRAY_SIZE(val), in t4_tp_get_tcp_stats()
5607 t4_tp_mib_read(adap, val, ARRAY_SIZE(val), in t4_tp_get_tcp_stats()
5627 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st, in t4_tp_get_err_stats() argument
5630 int nchan = adap->params.arch.nchan; in t4_tp_get_err_stats()
5632 t4_tp_mib_read(adap, st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A, in t4_tp_get_err_stats()
5634 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A, in t4_tp_get_err_stats()
5636 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A, in t4_tp_get_err_stats()
5638 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan, in t4_tp_get_err_stats()
5640 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan, in t4_tp_get_err_stats()
5642 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A, in t4_tp_get_err_stats()
5644 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan, in t4_tp_get_err_stats()
5646 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan, in t4_tp_get_err_stats()
5648 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A, in t4_tp_get_err_stats()
5660 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st, in t4_tp_get_cpl_stats() argument
5663 int nchan = adap->params.arch.nchan; in t4_tp_get_cpl_stats()
5665 t4_tp_mib_read(adap, st->req, nchan, TP_MIB_CPL_IN_REQ_0_A, sleep_ok); in t4_tp_get_cpl_stats()
5667 t4_tp_mib_read(adap, st->rsp, nchan, TP_MIB_CPL_OUT_RSP_0_A, sleep_ok); in t4_tp_get_cpl_stats()
5678 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st, in t4_tp_get_rdma_stats() argument
5681 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, TP_MIB_RQE_DFR_PKT_A, in t4_tp_get_rdma_stats()
5694 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, in t4_get_fcoe_stats() argument
5699 t4_tp_mib_read(adap, &st->frames_ddp, 1, TP_MIB_FCOE_DDP_0_A + idx, in t4_get_fcoe_stats()
5702 t4_tp_mib_read(adap, &st->frames_drop, 1, in t4_get_fcoe_stats()
5705 t4_tp_mib_read(adap, val, 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx, in t4_get_fcoe_stats()
5719 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st, in t4_get_usm_stats() argument
5724 t4_tp_mib_read(adap, val, 4, TP_MIB_USM_PKTS_A, sleep_ok); in t4_get_usm_stats()
5738 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) in t4_read_mtu_tbl() argument
5744 t4_write_reg(adap, TP_MTU_TABLE_A, in t4_read_mtu_tbl()
5746 v = t4_read_reg(adap, TP_MTU_TABLE_A); in t4_read_mtu_tbl()
5761 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]) in t4_read_cong_tbl() argument
5767 t4_write_reg(adap, TP_CCTRL_TABLE_A, in t4_read_cong_tbl()
5769 incr[mtu][w] = (u16)t4_read_reg(adap, in t4_read_cong_tbl()
5783 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, in t4_tp_wr_bits_indirect() argument
5786 t4_write_reg(adap, TP_PIO_ADDR_A, addr); in t4_tp_wr_bits_indirect()
5787 val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask; in t4_tp_wr_bits_indirect()
5788 t4_write_reg(adap, TP_PIO_DATA_A, val); in t4_tp_wr_bits_indirect()
5850 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, in t4_load_mtus() argument
5867 t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) | in t4_load_mtus()
5876 t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) | in t4_load_mtus()
5891 static u64 chan_rate(struct adapter *adap, unsigned int bytes256) in chan_rate() argument
5893 u64 v = bytes256 * adap->params.vpd.cclk; in chan_rate()
5907 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate) in t4_get_chan_txrate() argument
5911 v = t4_read_reg(adap, TP_TX_TRATE_A); in t4_get_chan_txrate()
5912 nic_rate[0] = chan_rate(adap, TNLRATE0_G(v)); in t4_get_chan_txrate()
5913 nic_rate[1] = chan_rate(adap, TNLRATE1_G(v)); in t4_get_chan_txrate()
5914 if (adap->params.arch.nchan == NCHAN) { in t4_get_chan_txrate()
5915 nic_rate[2] = chan_rate(adap, TNLRATE2_G(v)); in t4_get_chan_txrate()
5916 nic_rate[3] = chan_rate(adap, TNLRATE3_G(v)); in t4_get_chan_txrate()
5919 v = t4_read_reg(adap, TP_TX_ORATE_A); in t4_get_chan_txrate()
5920 ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v)); in t4_get_chan_txrate()
5921 ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v)); in t4_get_chan_txrate()
5922 if (adap->params.arch.nchan == NCHAN) { in t4_get_chan_txrate()
5923 ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v)); in t4_get_chan_txrate()
5924 ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v)); in t4_get_chan_txrate()
5939 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, in t4_set_trace_filter() argument
5946 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0); in t4_set_trace_filter()
5950 cfg = t4_read_reg(adap, MPS_TRC_CFG_A); in t4_set_trace_filter()
5967 if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 || in t4_set_trace_filter()
5973 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0); in t4_set_trace_filter()
5980 t4_write_reg(adap, data_reg, tp->data[i]); in t4_set_trace_filter()
5981 t4_write_reg(adap, mask_reg, ~tp->mask[i]); in t4_set_trace_filter()
5983 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst, in t4_set_trace_filter()
5986 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, in t4_set_trace_filter()
5988 (is_t4(adap->params.chip) ? in t4_set_trace_filter()
6005 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, in t4_get_trace_filter() argument
6012 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst); in t4_get_trace_filter()
6013 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst); in t4_get_trace_filter()
6015 if (is_t4(adap->params.chip)) { in t4_get_trace_filter()
6034 tp->mask[i] = ~t4_read_reg(adap, mask_reg); in t4_get_trace_filter()
6035 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; in t4_get_trace_filter()
6047 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) in t4_pmtx_get_stats() argument
6052 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) { in t4_pmtx_get_stats()
6053 t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1); in t4_pmtx_get_stats()
6054 cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A); in t4_pmtx_get_stats()
6055 if (is_t4(adap->params.chip)) { in t4_pmtx_get_stats()
6056 cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A); in t4_pmtx_get_stats()
6058 t4_read_indirect(adap, PM_TX_DBG_CTRL_A, in t4_pmtx_get_stats()
6074 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) in t4_pmrx_get_stats() argument
6079 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) { in t4_pmrx_get_stats()
6080 t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1); in t4_pmrx_get_stats()
6081 cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A); in t4_pmrx_get_stats()
6082 if (is_t4(adap->params.chip)) { in t4_pmrx_get_stats()
6083 cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A); in t4_pmrx_get_stats()
6085 t4_read_indirect(adap, PM_RX_DBG_CTRL_A, in t4_pmrx_get_stats()
6239 unsigned int t4_get_tp_ch_map(struct adapter *adap, int pidx) in t4_get_tp_ch_map() argument
6241 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip); in t4_get_tp_ch_map()
6242 unsigned int nports = 1 << NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A)); in t4_get_tp_ch_map()
6245 dev_warn(adap->pdev_dev, "TP Port Index %d >= Nports %d\n", in t4_get_tp_ch_map()
6272 dev_err(adap->pdev_dev, "Need TP Channel Map for Chip %0x, Nports %d\n", in t4_get_tp_ch_map()
6322 void t4_get_port_stats_offset(struct adapter *adap, int idx, in t4_get_port_stats_offset() argument
6329 t4_get_port_stats(adap, idx, stats); in t4_get_port_stats_offset()
6344 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) in t4_get_port_stats() argument
6346 u32 bgmap = t4_get_mps_bg_map(adap, idx); in t4_get_port_stats()
6347 u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A); in t4_get_port_stats()
6350 t4_read_reg64(adap, \ in t4_get_port_stats()
6351 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \ in t4_get_port_stats()
6353 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) in t4_get_port_stats()
6379 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) { in t4_get_port_stats()
6413 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) { in t4_get_port_stats()
6441 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) in t4_get_lb_stats() argument
6443 u32 bgmap = t4_get_mps_bg_map(adap, idx); in t4_get_lb_stats()
6446 t4_read_reg64(adap, \ in t4_get_lb_stats()
6447 (is_t4(adap->params.chip) ? \ in t4_get_lb_stats()
6450 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) in t4_get_lb_stats()
6509 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, in t4_fwaddrspace_write() argument
6525 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); in t4_fwaddrspace_write()
6539 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, in t4_mdio_rd() argument
6556 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); in t4_mdio_rd()
6573 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, in t4_mdio_wr() argument
6590 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); in t4_mdio_wr()
6767 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type) in t4_sge_ctxt_flush() argument
6783 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); in t4_sge_ctxt_flush()
6797 int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers, in t4_read_sge_dbqtimers() argument
6817 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in t4_read_sge_dbqtimers()
6839 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, in t4_fw_hello() argument
6867 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); in t4_fw_hello()
6871 if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F) in t4_fw_hello()
6872 t4_report_fw_error(adap); in t4_fw_hello()
6921 pcie_fw = t4_read_reg(adap, PCIE_FW_A); in t4_fw_hello()
6965 int t4_fw_bye(struct adapter *adap, unsigned int mbox) in t4_fw_bye() argument
6971 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); in t4_fw_bye()
6982 int t4_early_init(struct adapter *adap, unsigned int mbox) in t4_early_init() argument
6988 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); in t4_early_init()
6999 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) in t4_fw_reset() argument
7006 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); in t4_fw_reset()
7025 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) in t4_fw_halt() argument
7040 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); in t4_fw_halt()
7057 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F); in t4_fw_halt()
7058 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, in t4_fw_halt()
7091 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) in t4_fw_restart() argument
7099 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0); in t4_fw_restart()
7109 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0); in t4_fw_restart()
7111 if (t4_fw_reset(adap, mbox, in t4_fw_restart()
7116 t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F); in t4_fw_restart()
7121 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0); in t4_fw_restart()
7123 if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F)) in t4_fw_restart()
7154 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, in t4_fw_upgrade() argument
7160 if (!t4_fw_matches_chip(adap, fw_hdr)) in t4_fw_upgrade()
7166 adap->flags &= ~CXGB4_FW_OK; in t4_fw_upgrade()
7168 ret = t4_fw_halt(adap, mbox, force); in t4_fw_upgrade()
7172 ret = t4_load_fw(adap, fw_data, size); in t4_fw_upgrade()
7185 (void)t4_load_cfg(adap, NULL, 0); in t4_fw_upgrade()
7196 ret = t4_fw_restart(adap, mbox, reset); in t4_fw_upgrade()
7203 (void)t4_init_devlog_params(adap); in t4_fw_upgrade()
7205 adap->flags |= CXGB4_FW_OK; in t4_fw_upgrade()
7218 int t4_fl_pkt_align(struct adapter *adap) in t4_fl_pkt_align() argument
7223 sge_control = t4_read_reg(adap, SGE_CONTROL_A); in t4_fl_pkt_align()
7237 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in t4_fl_pkt_align()
7245 if (!is_t4(adap->params.chip)) { in t4_fl_pkt_align()
7249 sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A); in t4_fl_pkt_align()
7272 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, in t4_fixup_host_params() argument
7281 t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A, in t4_fixup_host_params()
7291 if (is_t4(adap->params.chip)) { in t4_fixup_host_params()
7292 t4_set_reg_field(adap, SGE_CONTROL_A, in t4_fixup_host_params()
7325 if (pci_is_pcie(adap->pdev)) { in t4_fixup_host_params()
7333 pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL, in t4_fixup_host_params()
7364 if (is_t5(adap->params.chip)) in t4_fixup_host_params()
7369 t4_set_reg_field(adap, SGE_CONTROL_A, in t4_fixup_host_params()
7374 t4_set_reg_field(adap, SGE_CONTROL2_A, in t4_fixup_host_params()
7399 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size); in t4_fixup_host_params()
7400 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A, in t4_fixup_host_params()
7401 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1) in t4_fixup_host_params()
7403 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A, in t4_fixup_host_params()
7404 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1) in t4_fixup_host_params()
7407 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12)); in t4_fixup_host_params()
7420 int t4_fw_initialize(struct adapter *adap, unsigned int mbox) in t4_fw_initialize() argument
7426 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); in t4_fw_initialize()
7444 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf, in t4_query_params_rw() argument
7469 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); in t4_query_params_rw()
7476 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, in t4_query_params() argument
7480 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0, in t4_query_params()
7484 int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf, in t4_query_params_ns() argument
7488 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0, in t4_query_params_ns()
7506 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, in t4_set_params_timeout() argument
7529 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout); in t4_set_params_timeout()
7545 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, in t4_set_params() argument
7549 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val, in t4_set_params()
7574 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, in t4_cfg_pfvf() argument
7598 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); in t4_cfg_pfvf()
7620 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, in t4_alloc_vi() argument
7635 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); in t4_alloc_vi()
7677 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, in t4_free_vi() argument
7691 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); in t4_free_vi()
7709 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, in t4_set_rxmode() argument
7748 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); in t4_set_rxmode()
7753 ret = t4_wr_mbox_meat(adap, mbox, &c_mirror, sizeof(c_mirror), in t4_set_rxmode()
7770 int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid, in t4_free_encap_mac_filt() argument
7791 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); in t4_free_encap_mac_filt()
7810 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid, in t4_free_raw_mac_filt() argument
7842 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); in t4_free_raw_mac_filt()
7861 int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid, in t4_alloc_encap_mac_filt() argument
7888 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); in t4_alloc_encap_mac_filt()
7909 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid, in t4_alloc_raw_mac_filt() argument
7940 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); in t4_alloc_raw_mac_filt()
7972 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, in t4_alloc_mac_filt() argument
7979 unsigned int max_naddr = adap->params.arch.mps_tcam_size; in t4_alloc_mac_filt()
8016 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); in t4_alloc_mac_filt()
8057 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox, in t4_free_mac_filt() argument
8064 unsigned int max_naddr = is_t4(adap->params.chip) ? in t4_free_mac_filt()
8098 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); in t4_free_mac_filt()
8138 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, in t4_change_mac() argument
8144 unsigned int max_mac_addr = adap->params.arch.mps_tcam_size; in t4_change_mac()
8160 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); in t4_change_mac()
8166 if (adap->params.viid_smt_extn_support) { in t4_change_mac()
8175 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= in t4_change_mac()
8197 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, in t4_set_addr_hash() argument
8210 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); in t4_set_addr_hash()
8225 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, in t4_enable_vi_params() argument
8238 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); in t4_enable_vi_params()
8251 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, in t4_enable_vi() argument
8254 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0); in t4_enable_vi()
8272 int t4_enable_pi_params(struct adapter *adap, unsigned int mbox, in t4_enable_pi_params() argument
8276 int ret = t4_enable_vi_params(adap, mbox, pi->viid, in t4_enable_pi_params()
8280 t4_os_link_changed(adap, pi->port_id, in t4_enable_pi_params()
8294 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, in t4_identify_port() argument
8305 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); in t4_identify_port()
8323 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, in t4_iq_stop() argument
8338 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); in t4_iq_stop()
8354 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, in t4_iq_free() argument
8369 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); in t4_iq_free()
8382 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, in t4_eth_eq_free() argument
8394 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); in t4_eth_eq_free()
8407 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, in t4_ctrl_eq_free() argument
8419 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); in t4_ctrl_eq_free()
8432 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, in t4_ofld_eq_free() argument
8444 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); in t4_ofld_eq_free()
8843 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) in t4_handle_fw_rpl() argument
8863 for_each_port(adap, i) { in t4_handle_fw_rpl()
8864 pi = adap2pinfo(adap, i); in t4_handle_fw_rpl()
8871 dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n", in t4_handle_fw_rpl()
8952 static int t4_get_flash_params(struct adapter *adap) in t4_get_flash_params() argument
8972 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID); in t4_get_flash_params()
8974 ret = sf1_read(adap, 3, 0, 1, &flashid); in t4_get_flash_params()
8975 t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */ in t4_get_flash_params()
8983 adap->params.sf_size = supported_flash[part].size_mb; in t4_get_flash_params()
8984 adap->params.sf_nsec = in t4_get_flash_params()
8985 adap->params.sf_size / SF_SEC_SIZE; in t4_get_flash_params()
9089 dev_warn(adap->pdev_dev, "Unknown Flash Part, ID = %#x, assuming 4MB\n", in t4_get_flash_params()
9095 adap->params.sf_size = size; in t4_get_flash_params()
9096 adap->params.sf_nsec = size / SF_SEC_SIZE; in t4_get_flash_params()
9099 if (adap->params.sf_size < FLASH_MIN_SIZE) in t4_get_flash_params()
9100 dev_warn(adap->pdev_dev, "WARNING: Flash Part ID %#x, size %#x < %#x\n", in t4_get_flash_params()
9101 flashid, adap->params.sf_size, FLASH_MIN_SIZE); in t4_get_flash_params()
9324 int t4_init_devlog_params(struct adapter *adap) in t4_init_devlog_params() argument
9326 struct devlog_params *dparams = &adap->params.devlog; in t4_init_devlog_params()
9337 t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG)); in t4_init_devlog_params()
9357 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd), in t4_init_devlog_params()
9409 int t4_init_tp_params(struct adapter *adap, bool sleep_ok) in t4_init_tp_params() argument
9415 v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A); in t4_init_tp_params()
9416 adap->params.tp.tre = TIMERRESOLUTION_G(v); in t4_init_tp_params()
9417 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v); in t4_init_tp_params()
9421 adap->params.tp.tx_modq[chan] = chan; in t4_init_tp_params()
9431 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, in t4_init_tp_params()
9434 dev_info(adap->pdev_dev, in t4_init_tp_params()
9438 adap->params.tp.vlan_pri_map = in t4_init_tp_params()
9440 adap->params.tp.filter_mask = in t4_init_tp_params()
9443 dev_info(adap->pdev_dev, in t4_init_tp_params()
9451 t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1, in t4_init_tp_params()
9461 adap->params.tp.filter_mask = adap->params.tp.vlan_pri_map; in t4_init_tp_params()
9464 t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1, in t4_init_tp_params()
9470 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { in t4_init_tp_params()
9471 v = t4_read_reg(adap, TP_OUT_CONFIG_A); in t4_init_tp_params()
9472 adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0; in t4_init_tp_params()
9479 adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, FCOE_F); in t4_init_tp_params()
9480 adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F); in t4_init_tp_params()
9481 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F); in t4_init_tp_params()
9482 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F); in t4_init_tp_params()
9483 adap->params.tp.tos_shift = t4_filter_field_shift(adap, TOS_F); in t4_init_tp_params()
9484 adap->params.tp.protocol_shift = t4_filter_field_shift(adap, in t4_init_tp_params()
9486 adap->params.tp.ethertype_shift = t4_filter_field_shift(adap, in t4_init_tp_params()
9488 adap->params.tp.macmatch_shift = t4_filter_field_shift(adap, in t4_init_tp_params()
9490 adap->params.tp.matchtype_shift = t4_filter_field_shift(adap, in t4_init_tp_params()
9492 adap->params.tp.frag_shift = t4_filter_field_shift(adap, in t4_init_tp_params()
9498 if ((adap->params.tp.ingress_config & VNIC_F) == 0) in t4_init_tp_params()
9499 adap->params.tp.vnic_shift = -1; in t4_init_tp_params()
9501 v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A); in t4_init_tp_params()
9502 adap->params.tp.hash_filter_mask = v; in t4_init_tp_params()
9503 v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A); in t4_init_tp_params()
9504 adap->params.tp.hash_filter_mask |= ((u64)v << 32); in t4_init_tp_params()
9517 int t4_filter_field_shift(const struct adapter *adap, int filter_sel) in t4_filter_field_shift() argument
9519 unsigned int filter_mode = adap->params.tp.vlan_pri_map; in t4_filter_field_shift()
9563 int t4_init_rss_mode(struct adapter *adap, int mbox) in t4_init_rss_mode() argument
9570 for_each_port(adap, i) { in t4_init_rss_mode()
9571 struct port_info *p = adap2pinfo(adap, i); in t4_init_rss_mode()
9578 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); in t4_init_rss_mode()
9696 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) in t4_port_init() argument
9701 for_each_port(adap, i) { in t4_port_init()
9702 struct port_info *pi = adap2pinfo(adap, i); in t4_port_init()
9704 while ((adap->params.portvec & (1 << j)) == 0) in t4_port_init()
9711 eth_hw_addr_set(adap->port[i], addr); in t4_port_init()
9743 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres) in t4_read_cimq_cfg() argument
9746 int cim_num_obq = is_t4(adap->params.chip) ? in t4_read_cimq_cfg()
9750 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F | in t4_read_cimq_cfg()
9752 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A); in t4_read_cimq_cfg()
9759 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F | in t4_read_cimq_cfg()
9761 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A); in t4_read_cimq_cfg()
9779 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) in t4_read_cim_ibq() argument
9798 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) | in t4_read_cim_ibq()
9800 err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0, in t4_read_cim_ibq()
9804 *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A); in t4_read_cim_ibq()
9806 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0); in t4_read_cim_ibq()
9821 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) in t4_read_cim_obq() argument
9825 int cim_num_obq = is_t4(adap->params.chip) ? in t4_read_cim_obq()
9831 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F | in t4_read_cim_obq()
9833 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A); in t4_read_cim_obq()
9841 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) | in t4_read_cim_obq()
9843 err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0, in t4_read_cim_obq()
9847 *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A); in t4_read_cim_obq()
9849 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0); in t4_read_cim_obq()
9862 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, in t4_cim_read() argument
9867 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F) in t4_cim_read()
9871 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr); in t4_cim_read()
9872 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F, in t4_cim_read()
9875 *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A); in t4_cim_read()
9889 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, in t4_cim_write() argument
9894 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F) in t4_cim_write()
9898 t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++); in t4_cim_write()
9899 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F); in t4_cim_write()
9900 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F, in t4_cim_write()
9906 static int t4_cim_write1(struct adapter *adap, unsigned int addr, in t4_cim_write1() argument
9909 return t4_cim_write(adap, addr, 1, &val); in t4_cim_write1()
9922 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr) in t4_cim_read_la() argument
9927 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg); in t4_cim_read_la()
9932 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0); in t4_cim_read_la()
9937 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val); in t4_cim_read_la()
9945 for (i = 0; i < adap->params.cim_la_size; i++) { in t4_cim_read_la()
9946 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, in t4_cim_read_la()
9950 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val); in t4_cim_read_la()
9957 ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]); in t4_cim_read_la()
9964 if (is_t6(adap->params.chip) && (idx & 0xf) >= 9) in t4_cim_read_la()
9973 int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, in t4_cim_read_la()
9991 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr) in t4_tp_read_la() argument
9996 cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff; in t4_tp_read_la()
9998 t4_write_reg(adap, TP_DBG_LA_CONFIG_A, in t4_tp_read_la()
9999 adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F)); in t4_tp_read_la()
10001 val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A); in t4_tp_read_la()
10011 val |= adap->params.tp.la_mask; in t4_tp_read_la()
10014 t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val); in t4_tp_read_la()
10015 la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A); in t4_tp_read_la()
10024 t4_write_reg(adap, TP_DBG_LA_CONFIG_A, in t4_tp_read_la()
10025 cfg | adap->params.tp.la_mask); in t4_tp_read_la()
10163 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) in t4_load_cfg() argument
10168 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; in t4_load_cfg()
10170 cfg_addr = t4_flash_cfg_addr(adap); in t4_load_cfg()
10178 dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n", in t4_load_cfg()
10185 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, in t4_load_cfg()
10199 ret = t4_write_flash(adap, addr, n, cfg_data, true); in t4_load_cfg()
10209 dev_err(adap->pdev_dev, "config file %s failed %d\n", in t4_load_cfg()
10263 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]) in t4_read_pace_tbl() argument
10268 t4_write_reg(adap, TP_PACE_TABLE_A, 0xffff0000 + i); in t4_read_pace_tbl()
10269 v = t4_read_reg(adap, TP_PACE_TABLE_A); in t4_read_pace_tbl()
10270 pace_vals[i] = dack_ticks_to_usec(adap, v); in t4_read_pace_tbl()
10284 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, in t4_get_tx_sched() argument
10291 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok); in t4_get_tx_sched()
10299 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */ in t4_get_tx_sched()
10305 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok); in t4_get_tx_sched()
10309 *ipg = (10000 * v) / core_ticks_per_usec(adap); in t4_get_tx_sched()
10322 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, in t4_sge_ctxt_rd() argument
10340 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); in t4_sge_ctxt_rd()
10362 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, in t4_sge_ctxt_rd_bd() argument
10367 t4_write_reg(adap, SGE_CTXT_CMD_A, CTXTQID_V(cid) | CTXTTYPE_V(ctype)); in t4_sge_ctxt_rd_bd()
10368 ret = t4_wait_op_done(adap, SGE_CTXT_CMD_A, BUSY_F, 0, 3, 1); in t4_sge_ctxt_rd_bd()
10371 *data++ = t4_read_reg(adap, i); in t4_sge_ctxt_rd_bd()
10418 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port, in t4_i2c_rd() argument
10449 ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd), in t4_i2c_rd()
10470 int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf, in t4_set_vlan_acl() argument
10482 FW_ACL_VLAN_CMD_PFN_V(adap->pf) | in t4_set_vlan_acl()
10494 return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL); in t4_set_vlan_acl()
10577 int t4_load_boot(struct adapter *adap, u8 *boot_data, in t4_load_boot() argument
10580 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; in t4_load_boot()
10593 dev_err(adap->pdev_dev, "boot image encroaching on firmware region\n"); in t4_load_boot()
10609 dev_err(adap->pdev_dev, "boot image too small/large\n"); in t4_load_boot()
10614 dev_err(adap->pdev_dev, "Boot image missing signature\n"); in t4_load_boot()
10620 dev_err(adap->pdev_dev, "PCI header missing signature\n"); in t4_load_boot()
10626 dev_err(adap->pdev_dev, "Vendor ID missing signature\n"); in t4_load_boot()
10636 ret = t4_flash_erase_sectors(adap, boot_sector >> 16, in t4_load_boot()
10646 pci_read_config_word(adap->pdev, PCI_DEVICE_ID, &device_id); in t4_load_boot()
10669 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, in t4_load_boot()
10675 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, in t4_load_boot()
10680 dev_err(adap->pdev_dev, "boot image load failed, error %d\n", in t4_load_boot()
10707 int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) in t4_load_bootcfg() argument
10709 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; in t4_load_bootcfg()
10715 cfg_addr = t4_flash_bootcfg_addr(adap); in t4_load_bootcfg()
10723 dev_err(adap->pdev_dev, "bootcfg file too large, max is %u bytes\n", in t4_load_bootcfg()
10730 dev_err(adap->pdev_dev, "Wrong bootcfg signature\n"); in t4_load_bootcfg()
10737 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, in t4_load_bootcfg()
10751 ret = t4_write_flash(adap, addr, n, cfg_data, false); in t4_load_bootcfg()
10763 ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data, in t4_load_bootcfg()
10771 dev_err(adap->pdev_dev, "boot config data %s failed %d\n", in t4_load_bootcfg()