1*14b24e2bSVaishali Kulkarni /* 2*14b24e2bSVaishali Kulkarni * CDDL HEADER START 3*14b24e2bSVaishali Kulkarni * 4*14b24e2bSVaishali Kulkarni * The contents of this file are subject to the terms of the 5*14b24e2bSVaishali Kulkarni * Common Development and Distribution License, v.1, (the "License"). 6*14b24e2bSVaishali Kulkarni * You may not use this file except in compliance with the License. 7*14b24e2bSVaishali Kulkarni * 8*14b24e2bSVaishali Kulkarni * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9*14b24e2bSVaishali Kulkarni * or http://opensource.org/licenses/CDDL-1.0. 10*14b24e2bSVaishali Kulkarni * See the License for the specific language governing permissions 11*14b24e2bSVaishali Kulkarni * and limitations under the License. 12*14b24e2bSVaishali Kulkarni * 13*14b24e2bSVaishali Kulkarni * When distributing Covered Code, include this CDDL HEADER in each 14*14b24e2bSVaishali Kulkarni * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15*14b24e2bSVaishali Kulkarni * If applicable, add the following below this CDDL HEADER, with the 16*14b24e2bSVaishali Kulkarni * fields enclosed by brackets "[]" replaced with your own identifying 17*14b24e2bSVaishali Kulkarni * information: Portions Copyright [yyyy] [name of copyright owner] 18*14b24e2bSVaishali Kulkarni * 19*14b24e2bSVaishali Kulkarni * CDDL HEADER END 20*14b24e2bSVaishali Kulkarni */ 21*14b24e2bSVaishali Kulkarni 22*14b24e2bSVaishali Kulkarni /* 23*14b24e2bSVaishali Kulkarni * Copyright 2014-2017 Cavium, Inc. 24*14b24e2bSVaishali Kulkarni * The contents of this file are subject to the terms of the Common Development 25*14b24e2bSVaishali Kulkarni * and Distribution License, v.1, (the "License"). 26*14b24e2bSVaishali Kulkarni 27*14b24e2bSVaishali Kulkarni * You may not use this file except in compliance with the License. 28*14b24e2bSVaishali Kulkarni 29*14b24e2bSVaishali Kulkarni * You can obtain a copy of the License at available 30*14b24e2bSVaishali Kulkarni * at http://opensource.org/licenses/CDDL-1.0 31*14b24e2bSVaishali Kulkarni 32*14b24e2bSVaishali Kulkarni * See the License for the specific language governing permissions and 33*14b24e2bSVaishali Kulkarni * limitations under the License. 34*14b24e2bSVaishali Kulkarni */ 35*14b24e2bSVaishali Kulkarni 36*14b24e2bSVaishali Kulkarni #include "bcm_osal.h" 37*14b24e2bSVaishali Kulkarni #include "reg_addr.h" 38*14b24e2bSVaishali Kulkarni #include "ecore_gtt_reg_addr.h" 39*14b24e2bSVaishali Kulkarni #include "ecore.h" 40*14b24e2bSVaishali Kulkarni #include "ecore_chain.h" 41*14b24e2bSVaishali Kulkarni #include "ecore_status.h" 42*14b24e2bSVaishali Kulkarni #include "ecore_hw.h" 43*14b24e2bSVaishali Kulkarni #include "ecore_rt_defs.h" 44*14b24e2bSVaishali Kulkarni #include "ecore_init_ops.h" 45*14b24e2bSVaishali Kulkarni #include "ecore_int.h" 46*14b24e2bSVaishali Kulkarni #include "ecore_cxt.h" 47*14b24e2bSVaishali Kulkarni #include "ecore_spq.h" 48*14b24e2bSVaishali Kulkarni #include "ecore_init_fw_funcs.h" 49*14b24e2bSVaishali Kulkarni #include "ecore_sp_commands.h" 50*14b24e2bSVaishali Kulkarni #include "ecore_dev_api.h" 51*14b24e2bSVaishali Kulkarni #include "ecore_sriov.h" 52*14b24e2bSVaishali Kulkarni #include "ecore_vf.h" 53*14b24e2bSVaishali Kulkarni #include "ecore_ll2.h" 54*14b24e2bSVaishali Kulkarni #include "ecore_fcoe.h" 55*14b24e2bSVaishali Kulkarni #include "ecore_iscsi.h" 56*14b24e2bSVaishali Kulkarni #include "ecore_ooo.h" 57*14b24e2bSVaishali Kulkarni #include "ecore_mcp.h" 58*14b24e2bSVaishali Kulkarni #include "ecore_hw_defs.h" 59*14b24e2bSVaishali Kulkarni #include "mcp_public.h" 60*14b24e2bSVaishali Kulkarni #include "ecore_roce.h" 61*14b24e2bSVaishali Kulkarni #include "ecore_iro.h" 62*14b24e2bSVaishali Kulkarni #include "nvm_cfg.h" 63*14b24e2bSVaishali Kulkarni #include "ecore_dev_api.h" 64*14b24e2bSVaishali Kulkarni #include "ecore_dcbx.h" 65*14b24e2bSVaishali Kulkarni #include "pcics_reg_driver.h" 66*14b24e2bSVaishali Kulkarni #include "ecore_l2.h" 67*14b24e2bSVaishali Kulkarni 68*14b24e2bSVaishali Kulkarni /* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM 69*14b24e2bSVaishali Kulkarni * registers involved are not split and thus configuration is a race where 70*14b24e2bSVaishali Kulkarni * some of the PFs configuration might be lost. 71*14b24e2bSVaishali Kulkarni * Eventually, this needs to move into a MFW-covered HW-lock as arbitration 72*14b24e2bSVaishali Kulkarni * mechanism as this doesn't cover some cases [E.g., PDA or scenarios where 73*14b24e2bSVaishali Kulkarni * there's more than a single compiled ecore component in system]. 74*14b24e2bSVaishali Kulkarni */ 75*14b24e2bSVaishali Kulkarni static osal_spinlock_t qm_lock; 76*14b24e2bSVaishali Kulkarni static bool qm_lock_init = false; 77*14b24e2bSVaishali Kulkarni 78*14b24e2bSVaishali Kulkarni /* Configurable */ 79*14b24e2bSVaishali Kulkarni #define ECORE_MIN_DPIS (4) /* The minimal number of DPIs required to 80*14b24e2bSVaishali Kulkarni * load the driver. The number was 81*14b24e2bSVaishali Kulkarni * arbitrarily set. 82*14b24e2bSVaishali Kulkarni */ 83*14b24e2bSVaishali Kulkarni 84*14b24e2bSVaishali Kulkarni /* Derived */ 85*14b24e2bSVaishali Kulkarni #define ECORE_MIN_PWM_REGION ((ECORE_WID_SIZE) * (ECORE_MIN_DPIS)) 86*14b24e2bSVaishali Kulkarni 87*14b24e2bSVaishali Kulkarni enum BAR_ID { 88*14b24e2bSVaishali Kulkarni BAR_ID_0, /* used for GRC */ 89*14b24e2bSVaishali Kulkarni BAR_ID_1 /* Used for doorbells */ 90*14b24e2bSVaishali Kulkarni }; 91*14b24e2bSVaishali Kulkarni 92*14b24e2bSVaishali Kulkarni static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id) 93*14b24e2bSVaishali Kulkarni { 94*14b24e2bSVaishali Kulkarni u32 bar_reg = (bar_id == BAR_ID_0 ? 95*14b24e2bSVaishali Kulkarni PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); 96*14b24e2bSVaishali Kulkarni u32 val; 97*14b24e2bSVaishali Kulkarni 98*14b24e2bSVaishali Kulkarni if (IS_VF(p_hwfn->p_dev)) { 99*14b24e2bSVaishali Kulkarni /* TODO - assume each VF hwfn has 64Kb for Bar0; Bar1 can be 100*14b24e2bSVaishali Kulkarni * read from actual register, but we're currently not using 101*14b24e2bSVaishali Kulkarni * it for actual doorbelling. 102*14b24e2bSVaishali Kulkarni */ 103*14b24e2bSVaishali Kulkarni return 1 << 17; 104*14b24e2bSVaishali Kulkarni } 105*14b24e2bSVaishali Kulkarni 106*14b24e2bSVaishali Kulkarni val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); 107*14b24e2bSVaishali Kulkarni if (val) 108*14b24e2bSVaishali Kulkarni return 1 << (val + 15); 109*14b24e2bSVaishali Kulkarni 110*14b24e2bSVaishali Kulkarni /* The above registers were updated in the past only in CMT mode. Since 111*14b24e2bSVaishali Kulkarni * they were found to be useful MFW started updating them from 8.7.7.0. 112*14b24e2bSVaishali Kulkarni * In older MFW versions they are set to 0 which means disabled. 113*14b24e2bSVaishali Kulkarni */ 114*14b24e2bSVaishali Kulkarni if (p_hwfn->p_dev->num_hwfns > 1) { 115*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, 116*14b24e2bSVaishali Kulkarni "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n"); 117*14b24e2bSVaishali Kulkarni return BAR_ID_0 ? 256 * 1024 : 512 * 1024; 118*14b24e2bSVaishali Kulkarni } else { 119*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, 120*14b24e2bSVaishali Kulkarni "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n"); 121*14b24e2bSVaishali Kulkarni return 512 * 1024; 122*14b24e2bSVaishali Kulkarni } 123*14b24e2bSVaishali Kulkarni } 124*14b24e2bSVaishali Kulkarni 125*14b24e2bSVaishali Kulkarni void ecore_init_dp(struct ecore_dev *p_dev, 126*14b24e2bSVaishali Kulkarni u32 dp_module, 127*14b24e2bSVaishali Kulkarni u8 dp_level, 128*14b24e2bSVaishali Kulkarni void *dp_ctx) 129*14b24e2bSVaishali Kulkarni { 130*14b24e2bSVaishali Kulkarni u32 i; 131*14b24e2bSVaishali Kulkarni 132*14b24e2bSVaishali Kulkarni p_dev->dp_level = dp_level; 133*14b24e2bSVaishali Kulkarni p_dev->dp_module = dp_module; 134*14b24e2bSVaishali Kulkarni p_dev->dp_ctx = dp_ctx; 135*14b24e2bSVaishali Kulkarni for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 136*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 137*14b24e2bSVaishali Kulkarni 138*14b24e2bSVaishali Kulkarni p_hwfn->dp_level = dp_level; 139*14b24e2bSVaishali Kulkarni p_hwfn->dp_module = dp_module; 140*14b24e2bSVaishali Kulkarni p_hwfn->dp_ctx = dp_ctx; 141*14b24e2bSVaishali Kulkarni } 142*14b24e2bSVaishali Kulkarni } 143*14b24e2bSVaishali Kulkarni 144*14b24e2bSVaishali Kulkarni void ecore_init_struct(struct ecore_dev *p_dev) 145*14b24e2bSVaishali Kulkarni { 146*14b24e2bSVaishali Kulkarni u8 i; 147*14b24e2bSVaishali Kulkarni 148*14b24e2bSVaishali Kulkarni for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 149*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 150*14b24e2bSVaishali Kulkarni 151*14b24e2bSVaishali Kulkarni p_hwfn->p_dev = p_dev; 152*14b24e2bSVaishali Kulkarni p_hwfn->my_id = i; 153*14b24e2bSVaishali Kulkarni p_hwfn->b_active = false; 154*14b24e2bSVaishali Kulkarni 155*14b24e2bSVaishali Kulkarni OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex); 156*14b24e2bSVaishali Kulkarni OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex); 157*14b24e2bSVaishali Kulkarni } 158*14b24e2bSVaishali Kulkarni 159*14b24e2bSVaishali Kulkarni /* hwfn 0 is always active */ 160*14b24e2bSVaishali Kulkarni p_dev->hwfns[0].b_active = true; 161*14b24e2bSVaishali Kulkarni 162*14b24e2bSVaishali Kulkarni /* set the default cache alignment to 128 (may be overridden later) */ 163*14b24e2bSVaishali Kulkarni p_dev->cache_shift = 7; 164*14b24e2bSVaishali Kulkarni } 165*14b24e2bSVaishali Kulkarni 166*14b24e2bSVaishali Kulkarni static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn) 167*14b24e2bSVaishali Kulkarni { 168*14b24e2bSVaishali Kulkarni struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 169*14b24e2bSVaishali Kulkarni 170*14b24e2bSVaishali Kulkarni OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params); 171*14b24e2bSVaishali Kulkarni qm_info->qm_pq_params = OSAL_NULL; 172*14b24e2bSVaishali Kulkarni OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params); 173*14b24e2bSVaishali Kulkarni qm_info->qm_vport_params = OSAL_NULL; 174*14b24e2bSVaishali Kulkarni OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params); 175*14b24e2bSVaishali Kulkarni qm_info->qm_port_params = OSAL_NULL; 176*14b24e2bSVaishali Kulkarni OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data); 177*14b24e2bSVaishali Kulkarni qm_info->wfq_data = OSAL_NULL; 178*14b24e2bSVaishali Kulkarni } 179*14b24e2bSVaishali Kulkarni 180*14b24e2bSVaishali Kulkarni void ecore_resc_free(struct ecore_dev *p_dev) 181*14b24e2bSVaishali Kulkarni { 182*14b24e2bSVaishali Kulkarni int i; 183*14b24e2bSVaishali Kulkarni 184*14b24e2bSVaishali Kulkarni if (IS_VF(p_dev)) { 185*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, i) 186*14b24e2bSVaishali Kulkarni ecore_l2_free(&p_dev->hwfns[i]); 187*14b24e2bSVaishali Kulkarni return; 188*14b24e2bSVaishali Kulkarni } 189*14b24e2bSVaishali Kulkarni 190*14b24e2bSVaishali Kulkarni OSAL_FREE(p_dev, p_dev->fw_data); 191*14b24e2bSVaishali Kulkarni p_dev->fw_data = OSAL_NULL; 192*14b24e2bSVaishali Kulkarni 193*14b24e2bSVaishali Kulkarni OSAL_FREE(p_dev, p_dev->reset_stats); 194*14b24e2bSVaishali Kulkarni p_dev->reset_stats = OSAL_NULL; 195*14b24e2bSVaishali Kulkarni 196*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, i) { 197*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 198*14b24e2bSVaishali Kulkarni 199*14b24e2bSVaishali Kulkarni ecore_cxt_mngr_free(p_hwfn); 200*14b24e2bSVaishali Kulkarni ecore_qm_info_free(p_hwfn); 201*14b24e2bSVaishali Kulkarni ecore_spq_free(p_hwfn); 202*14b24e2bSVaishali Kulkarni ecore_eq_free(p_hwfn); 203*14b24e2bSVaishali Kulkarni ecore_consq_free(p_hwfn); 204*14b24e2bSVaishali Kulkarni ecore_int_free(p_hwfn); 205*14b24e2bSVaishali Kulkarni #ifdef CONFIG_ECORE_LL2 206*14b24e2bSVaishali Kulkarni ecore_ll2_free(p_hwfn); 207*14b24e2bSVaishali Kulkarni #endif 208*14b24e2bSVaishali Kulkarni #ifdef CONFIG_ECORE_FCOE 209*14b24e2bSVaishali Kulkarni if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 210*14b24e2bSVaishali Kulkarni ecore_fcoe_free(p_hwfn); 211*14b24e2bSVaishali Kulkarni #endif 212*14b24e2bSVaishali Kulkarni #ifdef CONFIG_ECORE_ISCSI 213*14b24e2bSVaishali Kulkarni if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 214*14b24e2bSVaishali Kulkarni ecore_iscsi_free(p_hwfn); 215*14b24e2bSVaishali Kulkarni ecore_ooo_free(p_hwfn); 216*14b24e2bSVaishali Kulkarni } 217*14b24e2bSVaishali Kulkarni #endif 218*14b24e2bSVaishali Kulkarni #ifdef CONFIG_ECORE_ROCE 219*14b24e2bSVaishali Kulkarni if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) 220*14b24e2bSVaishali Kulkarni ecore_rdma_info_free(p_hwfn); 221*14b24e2bSVaishali Kulkarni #endif 222*14b24e2bSVaishali Kulkarni ecore_iov_free(p_hwfn); 223*14b24e2bSVaishali Kulkarni ecore_l2_free(p_hwfn); 224*14b24e2bSVaishali Kulkarni ecore_dmae_info_free(p_hwfn); 225*14b24e2bSVaishali Kulkarni ecore_dcbx_info_free(p_hwfn); 226*14b24e2bSVaishali Kulkarni /* @@@TBD Flush work-queue ?*/ 227*14b24e2bSVaishali Kulkarni } 228*14b24e2bSVaishali Kulkarni } 229*14b24e2bSVaishali Kulkarni 230*14b24e2bSVaishali Kulkarni /******************** QM initialization *******************/ 231*14b24e2bSVaishali Kulkarni 232*14b24e2bSVaishali Kulkarni /* bitmaps for indicating active traffic classes. Special case for Arrowhead 4 port */ 233*14b24e2bSVaishali Kulkarni #define ACTIVE_TCS_BMAP 0x9f /* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */ 234*14b24e2bSVaishali Kulkarni #define ACTIVE_TCS_BMAP_4PORT_K2 0xf /* 0..3 actually used, OOO and high priority stuff all use 3 */ 235*14b24e2bSVaishali Kulkarni 236*14b24e2bSVaishali Kulkarni /* determines the physical queue flags for a given PF. */ 237*14b24e2bSVaishali Kulkarni static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn) 238*14b24e2bSVaishali Kulkarni { 239*14b24e2bSVaishali Kulkarni u32 flags; 240*14b24e2bSVaishali Kulkarni 241*14b24e2bSVaishali Kulkarni /* common flags */ 242*14b24e2bSVaishali Kulkarni flags = PQ_FLAGS_LB; 243*14b24e2bSVaishali Kulkarni 244*14b24e2bSVaishali Kulkarni /* feature flags */ 245*14b24e2bSVaishali Kulkarni if (IS_ECORE_SRIOV(p_hwfn->p_dev)) 246*14b24e2bSVaishali Kulkarni flags |= PQ_FLAGS_VFS; 247*14b24e2bSVaishali Kulkarni if (IS_ECORE_DCQCN(p_hwfn)) 248*14b24e2bSVaishali Kulkarni flags |= PQ_FLAGS_RLS; 249*14b24e2bSVaishali Kulkarni 250*14b24e2bSVaishali Kulkarni /* protocol flags */ 251*14b24e2bSVaishali Kulkarni switch (p_hwfn->hw_info.personality) { 252*14b24e2bSVaishali Kulkarni case ECORE_PCI_ETH: 253*14b24e2bSVaishali Kulkarni flags |= PQ_FLAGS_MCOS; 254*14b24e2bSVaishali Kulkarni break; 255*14b24e2bSVaishali Kulkarni case ECORE_PCI_FCOE: 256*14b24e2bSVaishali Kulkarni flags |= PQ_FLAGS_OFLD; 257*14b24e2bSVaishali Kulkarni break; 258*14b24e2bSVaishali Kulkarni case ECORE_PCI_ISCSI: 259*14b24e2bSVaishali Kulkarni flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; 260*14b24e2bSVaishali Kulkarni break; 261*14b24e2bSVaishali Kulkarni case ECORE_PCI_ETH_ROCE: 262*14b24e2bSVaishali Kulkarni flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT; 263*14b24e2bSVaishali Kulkarni break; 264*14b24e2bSVaishali Kulkarni case ECORE_PCI_ETH_IWARP: 265*14b24e2bSVaishali Kulkarni flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; 266*14b24e2bSVaishali Kulkarni break; 267*14b24e2bSVaishali Kulkarni default: 268*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, "unknown personality %d\n", p_hwfn->hw_info.personality); 269*14b24e2bSVaishali Kulkarni return 0; 270*14b24e2bSVaishali Kulkarni } 271*14b24e2bSVaishali Kulkarni 272*14b24e2bSVaishali Kulkarni return flags; 273*14b24e2bSVaishali Kulkarni } 274*14b24e2bSVaishali Kulkarni 275*14b24e2bSVaishali Kulkarni 276*14b24e2bSVaishali Kulkarni /* Getters for resource amounts necessary for qm initialization */ 277*14b24e2bSVaishali Kulkarni u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn) 278*14b24e2bSVaishali Kulkarni { 279*14b24e2bSVaishali Kulkarni return p_hwfn->hw_info.num_hw_tc; 280*14b24e2bSVaishali Kulkarni } 281*14b24e2bSVaishali Kulkarni 282*14b24e2bSVaishali Kulkarni u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn) 283*14b24e2bSVaishali Kulkarni { 284*14b24e2bSVaishali Kulkarni return IS_ECORE_SRIOV(p_hwfn->p_dev) ? p_hwfn->p_dev->p_iov_info->total_vfs : 0; 285*14b24e2bSVaishali Kulkarni } 286*14b24e2bSVaishali Kulkarni 287*14b24e2bSVaishali Kulkarni #define NUM_DEFAULT_RLS 1 288*14b24e2bSVaishali Kulkarni 289*14b24e2bSVaishali Kulkarni u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn) 290*14b24e2bSVaishali Kulkarni { 291*14b24e2bSVaishali Kulkarni u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn); 292*14b24e2bSVaishali Kulkarni 293*14b24e2bSVaishali Kulkarni /* num RLs can't exceed resource amount of rls or vports or the dcqcn qps */ 294*14b24e2bSVaishali Kulkarni num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL), 295*14b24e2bSVaishali Kulkarni (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_VPORT), 296*14b24e2bSVaishali Kulkarni ROCE_DCQCN_RP_MAX_QPS)); 297*14b24e2bSVaishali Kulkarni 298*14b24e2bSVaishali Kulkarni /* make sure after we reserve the default and VF rls we'll have something left */ 299*14b24e2bSVaishali Kulkarni if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) { 300*14b24e2bSVaishali Kulkarni if (IS_ECORE_DCQCN(p_hwfn)) 301*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, "no rate limiters left for PF rate limiting [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs); 302*14b24e2bSVaishali Kulkarni return 0; 303*14b24e2bSVaishali Kulkarni } 304*14b24e2bSVaishali Kulkarni 305*14b24e2bSVaishali Kulkarni /* subtract rls necessary for VFs and one default one for the PF */ 306*14b24e2bSVaishali Kulkarni num_pf_rls -= num_vfs + NUM_DEFAULT_RLS; 307*14b24e2bSVaishali Kulkarni 308*14b24e2bSVaishali Kulkarni return num_pf_rls; 309*14b24e2bSVaishali Kulkarni } 310*14b24e2bSVaishali Kulkarni 311*14b24e2bSVaishali Kulkarni u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn) 312*14b24e2bSVaishali Kulkarni { 313*14b24e2bSVaishali Kulkarni u32 pq_flags = ecore_get_pq_flags(p_hwfn); 314*14b24e2bSVaishali Kulkarni 315*14b24e2bSVaishali Kulkarni /* all pqs share the same vport (hence the 1 below), except for vfs and pf_rl pqs */ 316*14b24e2bSVaishali Kulkarni return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) + 317*14b24e2bSVaishali Kulkarni (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn) + 1; 318*14b24e2bSVaishali Kulkarni } 319*14b24e2bSVaishali Kulkarni 320*14b24e2bSVaishali Kulkarni /* calc amount of PQs according to the requested flags */ 321*14b24e2bSVaishali Kulkarni u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn) 322*14b24e2bSVaishali Kulkarni { 323*14b24e2bSVaishali Kulkarni u32 pq_flags = ecore_get_pq_flags(p_hwfn); 324*14b24e2bSVaishali Kulkarni 325*14b24e2bSVaishali Kulkarni return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) + 326*14b24e2bSVaishali Kulkarni (!!(PQ_FLAGS_MCOS & pq_flags)) * ecore_init_qm_get_num_tcs(p_hwfn) + 327*14b24e2bSVaishali Kulkarni (!!(PQ_FLAGS_LB & pq_flags)) + 328*14b24e2bSVaishali Kulkarni (!!(PQ_FLAGS_OOO & pq_flags)) + 329*14b24e2bSVaishali Kulkarni (!!(PQ_FLAGS_ACK & pq_flags)) + 330*14b24e2bSVaishali Kulkarni (!!(PQ_FLAGS_OFLD & pq_flags)) + 331*14b24e2bSVaishali Kulkarni (!!(PQ_FLAGS_LLT & pq_flags)) + 332*14b24e2bSVaishali Kulkarni (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn); 333*14b24e2bSVaishali Kulkarni } 334*14b24e2bSVaishali Kulkarni 335*14b24e2bSVaishali Kulkarni /* initialize the top level QM params */ 336*14b24e2bSVaishali Kulkarni static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn) 337*14b24e2bSVaishali Kulkarni { 338*14b24e2bSVaishali Kulkarni struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 339*14b24e2bSVaishali Kulkarni bool four_port; 340*14b24e2bSVaishali Kulkarni 341*14b24e2bSVaishali Kulkarni /* pq and vport bases for this PF */ 342*14b24e2bSVaishali Kulkarni qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ); 343*14b24e2bSVaishali Kulkarni qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT); 344*14b24e2bSVaishali Kulkarni 345*14b24e2bSVaishali Kulkarni /* rate limiting and weighted fair queueing are always enabled */ 346*14b24e2bSVaishali Kulkarni qm_info->vport_rl_en = 1; 347*14b24e2bSVaishali Kulkarni qm_info->vport_wfq_en = 1; 348*14b24e2bSVaishali Kulkarni 349*14b24e2bSVaishali Kulkarni /* TC config is different for AH 4 port */ 350*14b24e2bSVaishali Kulkarni four_port = p_hwfn->p_dev->num_ports_in_engine == MAX_NUM_PORTS_K2; 351*14b24e2bSVaishali Kulkarni 352*14b24e2bSVaishali Kulkarni /* in AH 4 port we have fewer TCs per port */ 353*14b24e2bSVaishali Kulkarni qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : NUM_OF_PHYS_TCS; 354*14b24e2bSVaishali Kulkarni 355*14b24e2bSVaishali Kulkarni /* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and 4 otherwise */ 356*14b24e2bSVaishali Kulkarni if (!qm_info->ooo_tc) 357*14b24e2bSVaishali Kulkarni qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : DCBX_TCP_OOO_TC; 358*14b24e2bSVaishali Kulkarni } 359*14b24e2bSVaishali Kulkarni 360*14b24e2bSVaishali Kulkarni /* initialize qm vport params */ 361*14b24e2bSVaishali Kulkarni static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn) 362*14b24e2bSVaishali Kulkarni { 363*14b24e2bSVaishali Kulkarni struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 364*14b24e2bSVaishali Kulkarni u8 i; 365*14b24e2bSVaishali Kulkarni 366*14b24e2bSVaishali Kulkarni /* all vports participate in weighted fair queueing */ 367*14b24e2bSVaishali Kulkarni for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++) 368*14b24e2bSVaishali Kulkarni qm_info->qm_vport_params[i].vport_wfq = 1; 369*14b24e2bSVaishali Kulkarni } 370*14b24e2bSVaishali Kulkarni 371*14b24e2bSVaishali Kulkarni /* initialize qm port params */ 372*14b24e2bSVaishali Kulkarni static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn) 373*14b24e2bSVaishali Kulkarni { 374*14b24e2bSVaishali Kulkarni /* Initialize qm port parameters */ 375*14b24e2bSVaishali Kulkarni u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engine; 376*14b24e2bSVaishali Kulkarni 377*14b24e2bSVaishali Kulkarni /* indicate how ooo and high pri traffic is dealt with */ 378*14b24e2bSVaishali Kulkarni active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ? 379*14b24e2bSVaishali Kulkarni ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP; 380*14b24e2bSVaishali Kulkarni 381*14b24e2bSVaishali Kulkarni for (i = 0; i < num_ports; i++) { 382*14b24e2bSVaishali Kulkarni struct init_qm_port_params *p_qm_port = 383*14b24e2bSVaishali Kulkarni &p_hwfn->qm_info.qm_port_params[i]; 384*14b24e2bSVaishali Kulkarni 385*14b24e2bSVaishali Kulkarni p_qm_port->active = 1; 386*14b24e2bSVaishali Kulkarni p_qm_port->active_phys_tcs = active_phys_tcs; 387*14b24e2bSVaishali Kulkarni p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports; 388*14b24e2bSVaishali Kulkarni p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports; 389*14b24e2bSVaishali Kulkarni } 390*14b24e2bSVaishali Kulkarni } 391*14b24e2bSVaishali Kulkarni 392*14b24e2bSVaishali Kulkarni /* Reset the params which must be reset for qm init. QM init may be called as 393*14b24e2bSVaishali Kulkarni * a result of flows other than driver load (e.g. dcbx renegotiation). Other 394*14b24e2bSVaishali Kulkarni * params may be affected by the init but would simply recalculate to the same 395*14b24e2bSVaishali Kulkarni * values. The allocations made for QM init, ports, vports, pqs and vfqs are not 396*14b24e2bSVaishali Kulkarni * affected as these amounts stay the same. 397*14b24e2bSVaishali Kulkarni */ 398*14b24e2bSVaishali Kulkarni static void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn) 399*14b24e2bSVaishali Kulkarni { 400*14b24e2bSVaishali Kulkarni struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 401*14b24e2bSVaishali Kulkarni 402*14b24e2bSVaishali Kulkarni qm_info->num_pqs = 0; 403*14b24e2bSVaishali Kulkarni qm_info->num_vports = 0; 404*14b24e2bSVaishali Kulkarni qm_info->num_pf_rls = 0; 405*14b24e2bSVaishali Kulkarni qm_info->num_vf_pqs = 0; 406*14b24e2bSVaishali Kulkarni qm_info->first_vf_pq = 0; 407*14b24e2bSVaishali Kulkarni qm_info->first_mcos_pq = 0; 408*14b24e2bSVaishali Kulkarni qm_info->first_rl_pq = 0; 409*14b24e2bSVaishali Kulkarni } 410*14b24e2bSVaishali Kulkarni 411*14b24e2bSVaishali Kulkarni static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn) 412*14b24e2bSVaishali Kulkarni { 413*14b24e2bSVaishali Kulkarni struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 414*14b24e2bSVaishali Kulkarni 415*14b24e2bSVaishali Kulkarni qm_info->num_vports++; 416*14b24e2bSVaishali Kulkarni 417*14b24e2bSVaishali Kulkarni if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn)) 418*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn)); 419*14b24e2bSVaishali Kulkarni } 420*14b24e2bSVaishali Kulkarni 421*14b24e2bSVaishali Kulkarni /* initialize a single pq and manage qm_info resources accounting. 422*14b24e2bSVaishali Kulkarni * The pq_init_flags param determines whether the PQ is rate limited (for VF or PF) 423*14b24e2bSVaishali Kulkarni * and whether a new vport is allocated to the pq or not (i.e. vport will be shared) 424*14b24e2bSVaishali Kulkarni */ 425*14b24e2bSVaishali Kulkarni 426*14b24e2bSVaishali Kulkarni /* flags for pq init */ 427*14b24e2bSVaishali Kulkarni #define PQ_INIT_SHARE_VPORT (1 << 0) 428*14b24e2bSVaishali Kulkarni #define PQ_INIT_PF_RL (1 << 1) 429*14b24e2bSVaishali Kulkarni #define PQ_INIT_VF_RL (1 << 2) 430*14b24e2bSVaishali Kulkarni 431*14b24e2bSVaishali Kulkarni /* defines for pq init */ 432*14b24e2bSVaishali Kulkarni #define PQ_INIT_DEFAULT_WRR_GROUP 1 433*14b24e2bSVaishali Kulkarni #define PQ_INIT_DEFAULT_TC 0 434*14b24e2bSVaishali Kulkarni #define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc) 435*14b24e2bSVaishali Kulkarni 436*14b24e2bSVaishali Kulkarni static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn, 437*14b24e2bSVaishali Kulkarni struct ecore_qm_info *qm_info, 438*14b24e2bSVaishali Kulkarni u8 tc, u32 pq_init_flags) 439*14b24e2bSVaishali Kulkarni { 440*14b24e2bSVaishali Kulkarni u16 pq_idx = qm_info->num_pqs, max_pq = ecore_init_qm_get_num_pqs(p_hwfn); 441*14b24e2bSVaishali Kulkarni 442*14b24e2bSVaishali Kulkarni if (pq_idx > max_pq) 443*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq); 444*14b24e2bSVaishali Kulkarni 445*14b24e2bSVaishali Kulkarni /* init pq params */ 446*14b24e2bSVaishali Kulkarni qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + qm_info->num_vports; 447*14b24e2bSVaishali Kulkarni qm_info->qm_pq_params[pq_idx].tc_id = tc; 448*14b24e2bSVaishali Kulkarni qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP; 449*14b24e2bSVaishali Kulkarni qm_info->qm_pq_params[pq_idx].rl_valid = 450*14b24e2bSVaishali Kulkarni (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL); 451*14b24e2bSVaishali Kulkarni 452*14b24e2bSVaishali Kulkarni /* qm params accounting */ 453*14b24e2bSVaishali Kulkarni qm_info->num_pqs++; 454*14b24e2bSVaishali Kulkarni if (!(pq_init_flags & PQ_INIT_SHARE_VPORT)) 455*14b24e2bSVaishali Kulkarni qm_info->num_vports++; 456*14b24e2bSVaishali Kulkarni 457*14b24e2bSVaishali Kulkarni if (pq_init_flags & PQ_INIT_PF_RL) 458*14b24e2bSVaishali Kulkarni qm_info->num_pf_rls++; 459*14b24e2bSVaishali Kulkarni 460*14b24e2bSVaishali Kulkarni if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn)) 461*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn)); 462*14b24e2bSVaishali Kulkarni 463*14b24e2bSVaishali Kulkarni if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn)) 464*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n", qm_info->num_pf_rls, ecore_init_qm_get_num_pf_rls(p_hwfn)); 465*14b24e2bSVaishali Kulkarni } 466*14b24e2bSVaishali Kulkarni 467*14b24e2bSVaishali Kulkarni /* get pq index according to PQ_FLAGS */ 468*14b24e2bSVaishali Kulkarni static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn, 469*14b24e2bSVaishali Kulkarni u32 pq_flags) 470*14b24e2bSVaishali Kulkarni { 471*14b24e2bSVaishali Kulkarni struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 472*14b24e2bSVaishali Kulkarni 473*14b24e2bSVaishali Kulkarni /* Can't have multiple flags set here */ 474*14b24e2bSVaishali Kulkarni if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1) 475*14b24e2bSVaishali Kulkarni goto err; 476*14b24e2bSVaishali Kulkarni 477*14b24e2bSVaishali Kulkarni switch (pq_flags) { 478*14b24e2bSVaishali Kulkarni case PQ_FLAGS_RLS: 479*14b24e2bSVaishali Kulkarni return &qm_info->first_rl_pq; 480*14b24e2bSVaishali Kulkarni case PQ_FLAGS_MCOS: 481*14b24e2bSVaishali Kulkarni return &qm_info->first_mcos_pq; 482*14b24e2bSVaishali Kulkarni case PQ_FLAGS_LB: 483*14b24e2bSVaishali Kulkarni return &qm_info->pure_lb_pq; 484*14b24e2bSVaishali Kulkarni case PQ_FLAGS_OOO: 485*14b24e2bSVaishali Kulkarni return &qm_info->ooo_pq; 486*14b24e2bSVaishali Kulkarni case PQ_FLAGS_ACK: 487*14b24e2bSVaishali Kulkarni return &qm_info->pure_ack_pq; 488*14b24e2bSVaishali Kulkarni case PQ_FLAGS_OFLD: 489*14b24e2bSVaishali Kulkarni return &qm_info->offload_pq; 490*14b24e2bSVaishali Kulkarni case PQ_FLAGS_LLT: 491*14b24e2bSVaishali Kulkarni return &qm_info->low_latency_pq; 492*14b24e2bSVaishali Kulkarni case PQ_FLAGS_VFS: 493*14b24e2bSVaishali Kulkarni return &qm_info->first_vf_pq; 494*14b24e2bSVaishali Kulkarni default: 495*14b24e2bSVaishali Kulkarni goto err; 496*14b24e2bSVaishali Kulkarni } 497*14b24e2bSVaishali Kulkarni 498*14b24e2bSVaishali Kulkarni err: 499*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags); 500*14b24e2bSVaishali Kulkarni return OSAL_NULL; 501*14b24e2bSVaishali Kulkarni } 502*14b24e2bSVaishali Kulkarni 503*14b24e2bSVaishali Kulkarni /* save pq index in qm info */ 504*14b24e2bSVaishali Kulkarni static void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn, 505*14b24e2bSVaishali Kulkarni u32 pq_flags, u16 pq_val) 506*14b24e2bSVaishali Kulkarni { 507*14b24e2bSVaishali Kulkarni u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 508*14b24e2bSVaishali Kulkarni 509*14b24e2bSVaishali Kulkarni *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val; 510*14b24e2bSVaishali Kulkarni } 511*14b24e2bSVaishali Kulkarni 512*14b24e2bSVaishali Kulkarni /* get tx pq index, with the PQ TX base already set (ready for context init) */ 513*14b24e2bSVaishali Kulkarni u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags) 514*14b24e2bSVaishali Kulkarni { 515*14b24e2bSVaishali Kulkarni u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 516*14b24e2bSVaishali Kulkarni 517*14b24e2bSVaishali Kulkarni return *base_pq_idx + CM_TX_PQ_BASE; 518*14b24e2bSVaishali Kulkarni } 519*14b24e2bSVaishali Kulkarni 520*14b24e2bSVaishali Kulkarni u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc) 521*14b24e2bSVaishali Kulkarni { 522*14b24e2bSVaishali Kulkarni u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn); 523*14b24e2bSVaishali Kulkarni 524*14b24e2bSVaishali Kulkarni if (tc > max_tc) 525*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); 526*14b24e2bSVaishali Kulkarni 527*14b24e2bSVaishali Kulkarni return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc; 528*14b24e2bSVaishali Kulkarni } 529*14b24e2bSVaishali Kulkarni 530*14b24e2bSVaishali Kulkarni u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf) 531*14b24e2bSVaishali Kulkarni { 532*14b24e2bSVaishali Kulkarni u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn); 533*14b24e2bSVaishali Kulkarni 534*14b24e2bSVaishali Kulkarni if (vf > max_vf) 535*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); 536*14b24e2bSVaishali Kulkarni 537*14b24e2bSVaishali Kulkarni return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; 538*14b24e2bSVaishali Kulkarni } 539*14b24e2bSVaishali Kulkarni 540*14b24e2bSVaishali Kulkarni u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl) 541*14b24e2bSVaishali Kulkarni { 542*14b24e2bSVaishali Kulkarni u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn); 543*14b24e2bSVaishali Kulkarni 544*14b24e2bSVaishali Kulkarni if (rl > max_rl) 545*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl); 546*14b24e2bSVaishali Kulkarni 547*14b24e2bSVaishali Kulkarni return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl; 548*14b24e2bSVaishali Kulkarni } 549*14b24e2bSVaishali Kulkarni 550*14b24e2bSVaishali Kulkarni /* Functions for creating specific types of pqs */ 551*14b24e2bSVaishali Kulkarni static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn) 552*14b24e2bSVaishali Kulkarni { 553*14b24e2bSVaishali Kulkarni struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 554*14b24e2bSVaishali Kulkarni 555*14b24e2bSVaishali Kulkarni if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LB)) 556*14b24e2bSVaishali Kulkarni return; 557*14b24e2bSVaishali Kulkarni 558*14b24e2bSVaishali Kulkarni ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs); 559*14b24e2bSVaishali Kulkarni ecore_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT); 560*14b24e2bSVaishali Kulkarni } 561*14b24e2bSVaishali Kulkarni 562*14b24e2bSVaishali Kulkarni static void ecore_init_qm_ooo_pq(struct ecore_hwfn *p_hwfn) 563*14b24e2bSVaishali Kulkarni { 564*14b24e2bSVaishali Kulkarni struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 565*14b24e2bSVaishali Kulkarni 566*14b24e2bSVaishali Kulkarni if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO)) 567*14b24e2bSVaishali Kulkarni return; 568*14b24e2bSVaishali Kulkarni 569*14b24e2bSVaishali Kulkarni ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs); 570*14b24e2bSVaishali Kulkarni ecore_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT); 571*14b24e2bSVaishali Kulkarni } 572*14b24e2bSVaishali Kulkarni 573*14b24e2bSVaishali Kulkarni static void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn) 574*14b24e2bSVaishali Kulkarni { 575*14b24e2bSVaishali Kulkarni struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 576*14b24e2bSVaishali Kulkarni 577*14b24e2bSVaishali Kulkarni if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK)) 578*14b24e2bSVaishali Kulkarni return; 579*14b24e2bSVaishali Kulkarni 580*14b24e2bSVaishali Kulkarni ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs); 581*14b24e2bSVaishali Kulkarni ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 582*14b24e2bSVaishali Kulkarni } 583*14b24e2bSVaishali Kulkarni 584*14b24e2bSVaishali Kulkarni static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn) 585*14b24e2bSVaishali Kulkarni { 586*14b24e2bSVaishali Kulkarni struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 587*14b24e2bSVaishali Kulkarni 588*14b24e2bSVaishali Kulkarni if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD)) 589*14b24e2bSVaishali Kulkarni return; 590*14b24e2bSVaishali Kulkarni 591*14b24e2bSVaishali Kulkarni ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs); 592*14b24e2bSVaishali Kulkarni ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 593*14b24e2bSVaishali Kulkarni } 594*14b24e2bSVaishali Kulkarni 595*14b24e2bSVaishali Kulkarni static void ecore_init_qm_low_latency_pq(struct ecore_hwfn *p_hwfn) 596*14b24e2bSVaishali Kulkarni { 597*14b24e2bSVaishali Kulkarni struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 598*14b24e2bSVaishali Kulkarni 599*14b24e2bSVaishali Kulkarni if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT)) 600*14b24e2bSVaishali Kulkarni return; 601*14b24e2bSVaishali Kulkarni 602*14b24e2bSVaishali Kulkarni ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs); 603*14b24e2bSVaishali Kulkarni ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 604*14b24e2bSVaishali Kulkarni } 605*14b24e2bSVaishali Kulkarni 606*14b24e2bSVaishali Kulkarni static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn) 607*14b24e2bSVaishali Kulkarni { 608*14b24e2bSVaishali Kulkarni struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 609*14b24e2bSVaishali Kulkarni u8 tc_idx; 610*14b24e2bSVaishali Kulkarni 611*14b24e2bSVaishali Kulkarni if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS)) 612*14b24e2bSVaishali Kulkarni return; 613*14b24e2bSVaishali Kulkarni 614*14b24e2bSVaishali Kulkarni ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs); 615*14b24e2bSVaishali Kulkarni for (tc_idx = 0; tc_idx < ecore_init_qm_get_num_tcs(p_hwfn); tc_idx++) 616*14b24e2bSVaishali Kulkarni ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT); 617*14b24e2bSVaishali Kulkarni } 618*14b24e2bSVaishali Kulkarni 619*14b24e2bSVaishali Kulkarni static void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn) 620*14b24e2bSVaishali Kulkarni { 621*14b24e2bSVaishali Kulkarni struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 622*14b24e2bSVaishali Kulkarni u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn); 623*14b24e2bSVaishali Kulkarni 624*14b24e2bSVaishali Kulkarni if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS)) 625*14b24e2bSVaishali Kulkarni return; 626*14b24e2bSVaishali Kulkarni 627*14b24e2bSVaishali Kulkarni ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs); 628*14b24e2bSVaishali Kulkarni qm_info->num_vf_pqs = num_vfs; 629*14b24e2bSVaishali Kulkarni for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) 630*14b24e2bSVaishali Kulkarni ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL); 631*14b24e2bSVaishali Kulkarni } 632*14b24e2bSVaishali Kulkarni 633*14b24e2bSVaishali Kulkarni static void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn) 634*14b24e2bSVaishali Kulkarni { 635*14b24e2bSVaishali Kulkarni u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn); 636*14b24e2bSVaishali Kulkarni struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 637*14b24e2bSVaishali Kulkarni 638*14b24e2bSVaishali Kulkarni if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS)) 639*14b24e2bSVaishali Kulkarni return; 640*14b24e2bSVaishali Kulkarni 641*14b24e2bSVaishali Kulkarni ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs); 642*14b24e2bSVaishali Kulkarni for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++) 643*14b24e2bSVaishali Kulkarni ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL); 644*14b24e2bSVaishali Kulkarni } 645*14b24e2bSVaishali Kulkarni 646*14b24e2bSVaishali Kulkarni static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn) 647*14b24e2bSVaishali Kulkarni { 648*14b24e2bSVaishali Kulkarni /* rate limited pqs, must come first (FW assumption) */ 649*14b24e2bSVaishali Kulkarni ecore_init_qm_rl_pqs(p_hwfn); 650*14b24e2bSVaishali Kulkarni 651*14b24e2bSVaishali Kulkarni /* pqs for multi cos */ 652*14b24e2bSVaishali Kulkarni ecore_init_qm_mcos_pqs(p_hwfn); 653*14b24e2bSVaishali Kulkarni 654*14b24e2bSVaishali Kulkarni /* pure loopback pq */ 655*14b24e2bSVaishali Kulkarni ecore_init_qm_lb_pq(p_hwfn); 656*14b24e2bSVaishali Kulkarni 657*14b24e2bSVaishali Kulkarni /* out of order pq */ 658*14b24e2bSVaishali Kulkarni ecore_init_qm_ooo_pq(p_hwfn); 659*14b24e2bSVaishali Kulkarni 660*14b24e2bSVaishali Kulkarni /* pure ack pq */ 661*14b24e2bSVaishali Kulkarni ecore_init_qm_pure_ack_pq(p_hwfn); 662*14b24e2bSVaishali Kulkarni 663*14b24e2bSVaishali Kulkarni /* pq for offloaded protocol */ 664*14b24e2bSVaishali Kulkarni ecore_init_qm_offload_pq(p_hwfn); 665*14b24e2bSVaishali Kulkarni 666*14b24e2bSVaishali Kulkarni /* low latency pq */ 667*14b24e2bSVaishali Kulkarni ecore_init_qm_low_latency_pq(p_hwfn); 668*14b24e2bSVaishali Kulkarni 669*14b24e2bSVaishali Kulkarni /* done sharing vports */ 670*14b24e2bSVaishali Kulkarni ecore_init_qm_advance_vport(p_hwfn); 671*14b24e2bSVaishali Kulkarni 672*14b24e2bSVaishali Kulkarni /* pqs for vfs */ 673*14b24e2bSVaishali Kulkarni ecore_init_qm_vf_pqs(p_hwfn); 674*14b24e2bSVaishali Kulkarni } 675*14b24e2bSVaishali Kulkarni 676*14b24e2bSVaishali Kulkarni /* compare values of getters against resources amounts */ 677*14b24e2bSVaishali Kulkarni static enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn) 678*14b24e2bSVaishali Kulkarni { 679*14b24e2bSVaishali Kulkarni if (ecore_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, ECORE_VPORT)) { 680*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n"); 681*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 682*14b24e2bSVaishali Kulkarni } 683*14b24e2bSVaishali Kulkarni 684*14b24e2bSVaishali Kulkarni if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) { 685*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n"); 686*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 687*14b24e2bSVaishali Kulkarni } 688*14b24e2bSVaishali Kulkarni 689*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 690*14b24e2bSVaishali Kulkarni } 691*14b24e2bSVaishali Kulkarni 692*14b24e2bSVaishali Kulkarni /* 693*14b24e2bSVaishali Kulkarni * Function for verbose printing of the qm initialization results 694*14b24e2bSVaishali Kulkarni */ 695*14b24e2bSVaishali Kulkarni static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn) 696*14b24e2bSVaishali Kulkarni { 697*14b24e2bSVaishali Kulkarni struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 698*14b24e2bSVaishali Kulkarni struct init_qm_vport_params *vport; 699*14b24e2bSVaishali Kulkarni struct init_qm_port_params *port; 700*14b24e2bSVaishali Kulkarni struct init_qm_pq_params *pq; 701*14b24e2bSVaishali Kulkarni int i, tc; 702*14b24e2bSVaishali Kulkarni 703*14b24e2bSVaishali Kulkarni /* top level params */ 704*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n", 705*14b24e2bSVaishali Kulkarni qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq, qm_info->offload_pq, qm_info->pure_ack_pq); 706*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n", 707*14b24e2bSVaishali Kulkarni qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs, qm_info->num_vf_pqs, qm_info->num_vports, qm_info->max_phys_tcs_per_port); 708*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n", 709*14b24e2bSVaishali Kulkarni qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en, qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl, qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn)); 710*14b24e2bSVaishali Kulkarni 711*14b24e2bSVaishali Kulkarni /* port table */ 712*14b24e2bSVaishali Kulkarni for (i = 0; i < p_hwfn->p_dev->num_ports_in_engine; i++) { 713*14b24e2bSVaishali Kulkarni port = &(qm_info->qm_port_params[i]); 714*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n", 715*14b24e2bSVaishali Kulkarni i, port->active, port->active_phys_tcs, port->num_pbf_cmd_lines, port->num_btb_blocks, port->reserved); 716*14b24e2bSVaishali Kulkarni } 717*14b24e2bSVaishali Kulkarni 718*14b24e2bSVaishali Kulkarni /* vport table */ 719*14b24e2bSVaishali Kulkarni for (i = 0; i < qm_info->num_vports; i++) { 720*14b24e2bSVaishali Kulkarni vport = &(qm_info->qm_vport_params[i]); 721*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ", 722*14b24e2bSVaishali Kulkarni qm_info->start_vport + i, vport->vport_rl, vport->vport_wfq); 723*14b24e2bSVaishali Kulkarni for (tc = 0; tc < NUM_OF_TCS; tc++) 724*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ", vport->first_tx_pq_id[tc]); 725*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n"); 726*14b24e2bSVaishali Kulkarni } 727*14b24e2bSVaishali Kulkarni 728*14b24e2bSVaishali Kulkarni /* pq table */ 729*14b24e2bSVaishali Kulkarni for (i = 0; i < qm_info->num_pqs; i++) { 730*14b24e2bSVaishali Kulkarni pq = &(qm_info->qm_pq_params[i]); 731*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n", 732*14b24e2bSVaishali Kulkarni qm_info->start_pq + i, pq->vport_id, pq->tc_id, pq->wrr_group, pq->rl_valid); 733*14b24e2bSVaishali Kulkarni } 734*14b24e2bSVaishali Kulkarni } 735*14b24e2bSVaishali Kulkarni 736*14b24e2bSVaishali Kulkarni static void ecore_init_qm_info(struct ecore_hwfn *p_hwfn) 737*14b24e2bSVaishali Kulkarni { 738*14b24e2bSVaishali Kulkarni /* reset params required for init run */ 739*14b24e2bSVaishali Kulkarni ecore_init_qm_reset_params(p_hwfn); 740*14b24e2bSVaishali Kulkarni 741*14b24e2bSVaishali Kulkarni /* init QM top level params */ 742*14b24e2bSVaishali Kulkarni ecore_init_qm_params(p_hwfn); 743*14b24e2bSVaishali Kulkarni 744*14b24e2bSVaishali Kulkarni /* init QM port params */ 745*14b24e2bSVaishali Kulkarni ecore_init_qm_port_params(p_hwfn); 746*14b24e2bSVaishali Kulkarni 747*14b24e2bSVaishali Kulkarni /* init QM vport params */ 748*14b24e2bSVaishali Kulkarni ecore_init_qm_vport_params(p_hwfn); 749*14b24e2bSVaishali Kulkarni 750*14b24e2bSVaishali Kulkarni /* init QM physical queue params */ 751*14b24e2bSVaishali Kulkarni ecore_init_qm_pq_params(p_hwfn); 752*14b24e2bSVaishali Kulkarni 753*14b24e2bSVaishali Kulkarni /* display all that init */ 754*14b24e2bSVaishali Kulkarni ecore_dp_init_qm_params(p_hwfn); 755*14b24e2bSVaishali Kulkarni } 756*14b24e2bSVaishali Kulkarni 757*14b24e2bSVaishali Kulkarni /* This function reconfigures the QM pf on the fly. 758*14b24e2bSVaishali Kulkarni * For this purpose we: 759*14b24e2bSVaishali Kulkarni * 1. reconfigure the QM database 760*14b24e2bSVaishali Kulkarni * 2. set new values to runtime array 761*14b24e2bSVaishali Kulkarni * 3. send an sdm_qm_cmd through the rbc interface to stop the QM 762*14b24e2bSVaishali Kulkarni * 4. activate init tool in QM_PF stage 763*14b24e2bSVaishali Kulkarni * 5. send an sdm_qm_cmd through rbc interface to release the QM 764*14b24e2bSVaishali Kulkarni */ 765*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn, 766*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt) 767*14b24e2bSVaishali Kulkarni { 768*14b24e2bSVaishali Kulkarni struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 769*14b24e2bSVaishali Kulkarni bool b_rc; 770*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 771*14b24e2bSVaishali Kulkarni 772*14b24e2bSVaishali Kulkarni /* initialize ecore's qm data structure */ 773*14b24e2bSVaishali Kulkarni ecore_init_qm_info(p_hwfn); 774*14b24e2bSVaishali Kulkarni 775*14b24e2bSVaishali Kulkarni /* stop PF's qm queues */ 776*14b24e2bSVaishali Kulkarni OSAL_SPIN_LOCK(&qm_lock); 777*14b24e2bSVaishali Kulkarni b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true, 778*14b24e2bSVaishali Kulkarni qm_info->start_pq, qm_info->num_pqs); 779*14b24e2bSVaishali Kulkarni OSAL_SPIN_UNLOCK(&qm_lock); 780*14b24e2bSVaishali Kulkarni if (!b_rc) 781*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 782*14b24e2bSVaishali Kulkarni 783*14b24e2bSVaishali Kulkarni /* clear the QM_PF runtime phase leftovers from previous init */ 784*14b24e2bSVaishali Kulkarni ecore_init_clear_rt_data(p_hwfn); 785*14b24e2bSVaishali Kulkarni 786*14b24e2bSVaishali Kulkarni /* prepare QM portion of runtime array */ 787*14b24e2bSVaishali Kulkarni ecore_qm_init_pf(p_hwfn); 788*14b24e2bSVaishali Kulkarni 789*14b24e2bSVaishali Kulkarni /* activate init tool on runtime array */ 790*14b24e2bSVaishali Kulkarni rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, 791*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.hw_mode); 792*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 793*14b24e2bSVaishali Kulkarni return rc; 794*14b24e2bSVaishali Kulkarni 795*14b24e2bSVaishali Kulkarni /* start PF's qm queues */ 796*14b24e2bSVaishali Kulkarni OSAL_SPIN_LOCK(&qm_lock); 797*14b24e2bSVaishali Kulkarni b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true, 798*14b24e2bSVaishali Kulkarni qm_info->start_pq, qm_info->num_pqs); 799*14b24e2bSVaishali Kulkarni OSAL_SPIN_UNLOCK(&qm_lock); 800*14b24e2bSVaishali Kulkarni if (!b_rc) 801*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 802*14b24e2bSVaishali Kulkarni 803*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 804*14b24e2bSVaishali Kulkarni } 805*14b24e2bSVaishali Kulkarni 806*14b24e2bSVaishali Kulkarni static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn) 807*14b24e2bSVaishali Kulkarni { 808*14b24e2bSVaishali Kulkarni struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 809*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 810*14b24e2bSVaishali Kulkarni 811*14b24e2bSVaishali Kulkarni rc = ecore_init_qm_sanity(p_hwfn); 812*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 813*14b24e2bSVaishali Kulkarni goto alloc_err; 814*14b24e2bSVaishali Kulkarni 815*14b24e2bSVaishali Kulkarni qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 816*14b24e2bSVaishali Kulkarni sizeof(struct init_qm_pq_params) * 817*14b24e2bSVaishali Kulkarni ecore_init_qm_get_num_pqs(p_hwfn)); 818*14b24e2bSVaishali Kulkarni if (!qm_info->qm_pq_params) 819*14b24e2bSVaishali Kulkarni goto alloc_err; 820*14b24e2bSVaishali Kulkarni 821*14b24e2bSVaishali Kulkarni qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 822*14b24e2bSVaishali Kulkarni sizeof(struct init_qm_vport_params) * 823*14b24e2bSVaishali Kulkarni ecore_init_qm_get_num_vports(p_hwfn)); 824*14b24e2bSVaishali Kulkarni if (!qm_info->qm_vport_params) 825*14b24e2bSVaishali Kulkarni goto alloc_err; 826*14b24e2bSVaishali Kulkarni 827*14b24e2bSVaishali Kulkarni qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 828*14b24e2bSVaishali Kulkarni sizeof(struct init_qm_port_params) * 829*14b24e2bSVaishali Kulkarni p_hwfn->p_dev->num_ports_in_engine); 830*14b24e2bSVaishali Kulkarni if (!qm_info->qm_port_params) 831*14b24e2bSVaishali Kulkarni goto alloc_err; 832*14b24e2bSVaishali Kulkarni 833*14b24e2bSVaishali Kulkarni qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 834*14b24e2bSVaishali Kulkarni sizeof(struct ecore_wfq_data) * 835*14b24e2bSVaishali Kulkarni ecore_init_qm_get_num_vports(p_hwfn)); 836*14b24e2bSVaishali Kulkarni if (!qm_info->wfq_data) 837*14b24e2bSVaishali Kulkarni goto alloc_err; 838*14b24e2bSVaishali Kulkarni 839*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 840*14b24e2bSVaishali Kulkarni 841*14b24e2bSVaishali Kulkarni alloc_err: 842*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n"); 843*14b24e2bSVaishali Kulkarni ecore_qm_info_free(p_hwfn); 844*14b24e2bSVaishali Kulkarni return ECORE_NOMEM; 845*14b24e2bSVaishali Kulkarni } 846*14b24e2bSVaishali Kulkarni /******************** End QM initialization ***************/ 847*14b24e2bSVaishali Kulkarni 848*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev) 849*14b24e2bSVaishali Kulkarni { 850*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc = ECORE_SUCCESS; 851*14b24e2bSVaishali Kulkarni u32 rdma_tasks, excess_tasks; 852*14b24e2bSVaishali Kulkarni u32 line_count; 853*14b24e2bSVaishali Kulkarni int i; 854*14b24e2bSVaishali Kulkarni 855*14b24e2bSVaishali Kulkarni if (IS_VF(p_dev)) { 856*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, i) { 857*14b24e2bSVaishali Kulkarni rc = ecore_l2_alloc(&p_dev->hwfns[i]); 858*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 859*14b24e2bSVaishali Kulkarni return rc; 860*14b24e2bSVaishali Kulkarni } 861*14b24e2bSVaishali Kulkarni return rc; 862*14b24e2bSVaishali Kulkarni } 863*14b24e2bSVaishali Kulkarni 864*14b24e2bSVaishali Kulkarni p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL, 865*14b24e2bSVaishali Kulkarni sizeof(*p_dev->fw_data)); 866*14b24e2bSVaishali Kulkarni if (!p_dev->fw_data) 867*14b24e2bSVaishali Kulkarni return ECORE_NOMEM; 868*14b24e2bSVaishali Kulkarni 869*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, i) { 870*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 871*14b24e2bSVaishali Kulkarni u32 n_eqes, num_cons; 872*14b24e2bSVaishali Kulkarni 873*14b24e2bSVaishali Kulkarni /* First allocate the context manager structure */ 874*14b24e2bSVaishali Kulkarni rc = ecore_cxt_mngr_alloc(p_hwfn); 875*14b24e2bSVaishali Kulkarni if (rc) 876*14b24e2bSVaishali Kulkarni goto alloc_err; 877*14b24e2bSVaishali Kulkarni 878*14b24e2bSVaishali Kulkarni /* Set the HW cid/tid numbers (in the contest manager) 879*14b24e2bSVaishali Kulkarni * Must be done prior to any further computations. 880*14b24e2bSVaishali Kulkarni */ 881*14b24e2bSVaishali Kulkarni rc = ecore_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS); 882*14b24e2bSVaishali Kulkarni if (rc) 883*14b24e2bSVaishali Kulkarni goto alloc_err; 884*14b24e2bSVaishali Kulkarni 885*14b24e2bSVaishali Kulkarni rc = ecore_alloc_qm_data(p_hwfn); 886*14b24e2bSVaishali Kulkarni if (rc) 887*14b24e2bSVaishali Kulkarni goto alloc_err; 888*14b24e2bSVaishali Kulkarni 889*14b24e2bSVaishali Kulkarni /* init qm info */ 890*14b24e2bSVaishali Kulkarni ecore_init_qm_info(p_hwfn); 891*14b24e2bSVaishali Kulkarni 892*14b24e2bSVaishali Kulkarni /* Compute the ILT client partition */ 893*14b24e2bSVaishali Kulkarni rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count); 894*14b24e2bSVaishali Kulkarni if (rc) { 895*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, "too many ILT lines; re-computing with less lines\n"); 896*14b24e2bSVaishali Kulkarni /* In case there are not enough ILT lines we reduce the 897*14b24e2bSVaishali Kulkarni * number of RDMA tasks and re-compute. 898*14b24e2bSVaishali Kulkarni */ 899*14b24e2bSVaishali Kulkarni excess_tasks = ecore_cxt_cfg_ilt_compute_excess( 900*14b24e2bSVaishali Kulkarni p_hwfn, line_count); 901*14b24e2bSVaishali Kulkarni if (!excess_tasks) 902*14b24e2bSVaishali Kulkarni goto alloc_err; 903*14b24e2bSVaishali Kulkarni 904*14b24e2bSVaishali Kulkarni rdma_tasks = RDMA_MAX_TIDS - excess_tasks; 905*14b24e2bSVaishali Kulkarni rc = ecore_cxt_set_pf_params(p_hwfn, rdma_tasks); 906*14b24e2bSVaishali Kulkarni if (rc) 907*14b24e2bSVaishali Kulkarni goto alloc_err; 908*14b24e2bSVaishali Kulkarni 909*14b24e2bSVaishali Kulkarni rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count); 910*14b24e2bSVaishali Kulkarni if (rc) { 911*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, "failed ILT compute. Requested too many lines: %u\n", 912*14b24e2bSVaishali Kulkarni line_count); 913*14b24e2bSVaishali Kulkarni 914*14b24e2bSVaishali Kulkarni goto alloc_err; 915*14b24e2bSVaishali Kulkarni } 916*14b24e2bSVaishali Kulkarni } 917*14b24e2bSVaishali Kulkarni 918*14b24e2bSVaishali Kulkarni /* CID map / ILT shadow table / T2 919*14b24e2bSVaishali Kulkarni * The talbes sizes are determined by the computations above 920*14b24e2bSVaishali Kulkarni */ 921*14b24e2bSVaishali Kulkarni rc = ecore_cxt_tables_alloc(p_hwfn); 922*14b24e2bSVaishali Kulkarni if (rc) 923*14b24e2bSVaishali Kulkarni goto alloc_err; 924*14b24e2bSVaishali Kulkarni 925*14b24e2bSVaishali Kulkarni /* SPQ, must follow ILT because initializes SPQ context */ 926*14b24e2bSVaishali Kulkarni rc = ecore_spq_alloc(p_hwfn); 927*14b24e2bSVaishali Kulkarni if (rc) 928*14b24e2bSVaishali Kulkarni goto alloc_err; 929*14b24e2bSVaishali Kulkarni 930*14b24e2bSVaishali Kulkarni /* SP status block allocation */ 931*14b24e2bSVaishali Kulkarni p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn, 932*14b24e2bSVaishali Kulkarni RESERVED_PTT_DPC); 933*14b24e2bSVaishali Kulkarni 934*14b24e2bSVaishali Kulkarni rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt); 935*14b24e2bSVaishali Kulkarni if (rc) 936*14b24e2bSVaishali Kulkarni goto alloc_err; 937*14b24e2bSVaishali Kulkarni 938*14b24e2bSVaishali Kulkarni rc = ecore_iov_alloc(p_hwfn); 939*14b24e2bSVaishali Kulkarni if (rc) 940*14b24e2bSVaishali Kulkarni goto alloc_err; 941*14b24e2bSVaishali Kulkarni 942*14b24e2bSVaishali Kulkarni /* EQ */ 943*14b24e2bSVaishali Kulkarni n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain); 944*14b24e2bSVaishali Kulkarni if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) { 945*14b24e2bSVaishali Kulkarni /* Calculate the EQ size 946*14b24e2bSVaishali Kulkarni * --------------------- 947*14b24e2bSVaishali Kulkarni * Each ICID may generate up to one event at a time i.e. 948*14b24e2bSVaishali Kulkarni * the event must be handled/cleared before a new one 949*14b24e2bSVaishali Kulkarni * can be generated. We calculate the sum of events per 950*14b24e2bSVaishali Kulkarni * protocol and create an EQ deep enough to handle the 951*14b24e2bSVaishali Kulkarni * worst case: 952*14b24e2bSVaishali Kulkarni * - Core - according to SPQ. 953*14b24e2bSVaishali Kulkarni * - RoCE - per QP there are a couple of ICIDs, one 954*14b24e2bSVaishali Kulkarni * responder and one requester, each can 955*14b24e2bSVaishali Kulkarni * generate an EQE => n_eqes_qp = 2 * n_qp. 956*14b24e2bSVaishali Kulkarni * Each CQ can generate an EQE. There are 2 CQs 957*14b24e2bSVaishali Kulkarni * per QP => n_eqes_cq = 2 * n_qp. 958*14b24e2bSVaishali Kulkarni * Hence the RoCE total is 4 * n_qp or 959*14b24e2bSVaishali Kulkarni * 2 * num_cons. 960*14b24e2bSVaishali Kulkarni * - ENet - There can be up to two events per VF. One 961*14b24e2bSVaishali Kulkarni * for VF-PF channel and another for VF FLR 962*14b24e2bSVaishali Kulkarni * initial cleanup. The number of VFs is 963*14b24e2bSVaishali Kulkarni * bounded by MAX_NUM_VFS_BB, and is much 964*14b24e2bSVaishali Kulkarni * smaller than RoCE's so we avoid exact 965*14b24e2bSVaishali Kulkarni * calculation. 966*14b24e2bSVaishali Kulkarni */ 967*14b24e2bSVaishali Kulkarni if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) { 968*14b24e2bSVaishali Kulkarni num_cons = ecore_cxt_get_proto_cid_count( 969*14b24e2bSVaishali Kulkarni p_hwfn, PROTOCOLID_ROCE, OSAL_NULL); 970*14b24e2bSVaishali Kulkarni num_cons *= 2; 971*14b24e2bSVaishali Kulkarni } else { 972*14b24e2bSVaishali Kulkarni num_cons = ecore_cxt_get_proto_cid_count( 973*14b24e2bSVaishali Kulkarni p_hwfn, PROTOCOLID_IWARP, 974*14b24e2bSVaishali Kulkarni OSAL_NULL); 975*14b24e2bSVaishali Kulkarni } 976*14b24e2bSVaishali Kulkarni n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; 977*14b24e2bSVaishali Kulkarni } else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 978*14b24e2bSVaishali Kulkarni num_cons = ecore_cxt_get_proto_cid_count( 979*14b24e2bSVaishali Kulkarni p_hwfn, PROTOCOLID_ISCSI, OSAL_NULL); 980*14b24e2bSVaishali Kulkarni n_eqes += 2 * num_cons; 981*14b24e2bSVaishali Kulkarni } 982*14b24e2bSVaishali Kulkarni 983*14b24e2bSVaishali Kulkarni if (n_eqes > 0xFFFF) { 984*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, 985*14b24e2bSVaishali Kulkarni "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n", 986*14b24e2bSVaishali Kulkarni n_eqes, 0xFFFF); 987*14b24e2bSVaishali Kulkarni goto alloc_no_mem; 988*14b24e2bSVaishali Kulkarni } 989*14b24e2bSVaishali Kulkarni 990*14b24e2bSVaishali Kulkarni rc = ecore_eq_alloc(p_hwfn, (u16)n_eqes); 991*14b24e2bSVaishali Kulkarni if (rc) 992*14b24e2bSVaishali Kulkarni goto alloc_err; 993*14b24e2bSVaishali Kulkarni 994*14b24e2bSVaishali Kulkarni rc = ecore_consq_alloc(p_hwfn); 995*14b24e2bSVaishali Kulkarni if (rc) 996*14b24e2bSVaishali Kulkarni goto alloc_err; 997*14b24e2bSVaishali Kulkarni 998*14b24e2bSVaishali Kulkarni rc = ecore_l2_alloc(p_hwfn); 999*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 1000*14b24e2bSVaishali Kulkarni goto alloc_err; 1001*14b24e2bSVaishali Kulkarni 1002*14b24e2bSVaishali Kulkarni #ifdef CONFIG_ECORE_LL2 1003*14b24e2bSVaishali Kulkarni if (p_hwfn->using_ll2) { 1004*14b24e2bSVaishali Kulkarni rc = ecore_ll2_alloc(p_hwfn); 1005*14b24e2bSVaishali Kulkarni if (rc) 1006*14b24e2bSVaishali Kulkarni goto alloc_err; 1007*14b24e2bSVaishali Kulkarni } 1008*14b24e2bSVaishali Kulkarni #endif 1009*14b24e2bSVaishali Kulkarni #ifdef CONFIG_ECORE_FCOE 1010*14b24e2bSVaishali Kulkarni if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) { 1011*14b24e2bSVaishali Kulkarni rc = ecore_fcoe_alloc(p_hwfn); 1012*14b24e2bSVaishali Kulkarni if (rc) 1013*14b24e2bSVaishali Kulkarni goto alloc_err; 1014*14b24e2bSVaishali Kulkarni } 1015*14b24e2bSVaishali Kulkarni #endif 1016*14b24e2bSVaishali Kulkarni #ifdef CONFIG_ECORE_ISCSI 1017*14b24e2bSVaishali Kulkarni if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 1018*14b24e2bSVaishali Kulkarni rc = ecore_iscsi_alloc(p_hwfn); 1019*14b24e2bSVaishali Kulkarni if (rc) 1020*14b24e2bSVaishali Kulkarni goto alloc_err; 1021*14b24e2bSVaishali Kulkarni rc = ecore_ooo_alloc(p_hwfn); 1022*14b24e2bSVaishali Kulkarni if (rc) 1023*14b24e2bSVaishali Kulkarni goto alloc_err; 1024*14b24e2bSVaishali Kulkarni } 1025*14b24e2bSVaishali Kulkarni #endif 1026*14b24e2bSVaishali Kulkarni #ifdef CONFIG_ECORE_ROCE 1027*14b24e2bSVaishali Kulkarni if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) { 1028*14b24e2bSVaishali Kulkarni rc = ecore_rdma_info_alloc(p_hwfn); 1029*14b24e2bSVaishali Kulkarni if (rc) 1030*14b24e2bSVaishali Kulkarni goto alloc_err; 1031*14b24e2bSVaishali Kulkarni } 1032*14b24e2bSVaishali Kulkarni #endif 1033*14b24e2bSVaishali Kulkarni 1034*14b24e2bSVaishali Kulkarni /* DMA info initialization */ 1035*14b24e2bSVaishali Kulkarni rc = ecore_dmae_info_alloc(p_hwfn); 1036*14b24e2bSVaishali Kulkarni if (rc) { 1037*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 1038*14b24e2bSVaishali Kulkarni "Failed to allocate memory for dmae_info structure\n"); 1039*14b24e2bSVaishali Kulkarni goto alloc_err; 1040*14b24e2bSVaishali Kulkarni } 1041*14b24e2bSVaishali Kulkarni 1042*14b24e2bSVaishali Kulkarni /* DCBX initialization */ 1043*14b24e2bSVaishali Kulkarni rc = ecore_dcbx_info_alloc(p_hwfn); 1044*14b24e2bSVaishali Kulkarni if (rc) { 1045*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 1046*14b24e2bSVaishali Kulkarni "Failed to allocate memory for dcbx structure\n"); 1047*14b24e2bSVaishali Kulkarni goto alloc_err; 1048*14b24e2bSVaishali Kulkarni } 1049*14b24e2bSVaishali Kulkarni } 1050*14b24e2bSVaishali Kulkarni 1051*14b24e2bSVaishali Kulkarni p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL, 1052*14b24e2bSVaishali Kulkarni sizeof(*p_dev->reset_stats)); 1053*14b24e2bSVaishali Kulkarni if (!p_dev->reset_stats) { 1054*14b24e2bSVaishali Kulkarni DP_NOTICE(p_dev, true, 1055*14b24e2bSVaishali Kulkarni "Failed to allocate reset statistics\n"); 1056*14b24e2bSVaishali Kulkarni goto alloc_no_mem; 1057*14b24e2bSVaishali Kulkarni } 1058*14b24e2bSVaishali Kulkarni 1059*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 1060*14b24e2bSVaishali Kulkarni 1061*14b24e2bSVaishali Kulkarni alloc_no_mem: 1062*14b24e2bSVaishali Kulkarni rc = ECORE_NOMEM; 1063*14b24e2bSVaishali Kulkarni alloc_err: 1064*14b24e2bSVaishali Kulkarni ecore_resc_free(p_dev); 1065*14b24e2bSVaishali Kulkarni return rc; 1066*14b24e2bSVaishali Kulkarni } 1067*14b24e2bSVaishali Kulkarni 1068*14b24e2bSVaishali Kulkarni void ecore_resc_setup(struct ecore_dev *p_dev) 1069*14b24e2bSVaishali Kulkarni { 1070*14b24e2bSVaishali Kulkarni int i; 1071*14b24e2bSVaishali Kulkarni 1072*14b24e2bSVaishali Kulkarni if (IS_VF(p_dev)) { 1073*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, i) 1074*14b24e2bSVaishali Kulkarni ecore_l2_setup(&p_dev->hwfns[i]); 1075*14b24e2bSVaishali Kulkarni return; 1076*14b24e2bSVaishali Kulkarni } 1077*14b24e2bSVaishali Kulkarni 1078*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, i) { 1079*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1080*14b24e2bSVaishali Kulkarni 1081*14b24e2bSVaishali Kulkarni ecore_cxt_mngr_setup(p_hwfn); 1082*14b24e2bSVaishali Kulkarni ecore_spq_setup(p_hwfn); 1083*14b24e2bSVaishali Kulkarni ecore_eq_setup(p_hwfn); 1084*14b24e2bSVaishali Kulkarni ecore_consq_setup(p_hwfn); 1085*14b24e2bSVaishali Kulkarni 1086*14b24e2bSVaishali Kulkarni /* Read shadow of current MFW mailbox */ 1087*14b24e2bSVaishali Kulkarni ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt); 1088*14b24e2bSVaishali Kulkarni OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow, 1089*14b24e2bSVaishali Kulkarni p_hwfn->mcp_info->mfw_mb_cur, 1090*14b24e2bSVaishali Kulkarni p_hwfn->mcp_info->mfw_mb_length); 1091*14b24e2bSVaishali Kulkarni 1092*14b24e2bSVaishali Kulkarni ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt); 1093*14b24e2bSVaishali Kulkarni 1094*14b24e2bSVaishali Kulkarni ecore_l2_setup(p_hwfn); 1095*14b24e2bSVaishali Kulkarni ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt); 1096*14b24e2bSVaishali Kulkarni #ifdef CONFIG_ECORE_LL2 1097*14b24e2bSVaishali Kulkarni if (p_hwfn->using_ll2) 1098*14b24e2bSVaishali Kulkarni ecore_ll2_setup(p_hwfn); 1099*14b24e2bSVaishali Kulkarni #endif 1100*14b24e2bSVaishali Kulkarni #ifdef CONFIG_ECORE_FCOE 1101*14b24e2bSVaishali Kulkarni if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 1102*14b24e2bSVaishali Kulkarni ecore_fcoe_setup(p_hwfn); 1103*14b24e2bSVaishali Kulkarni #endif 1104*14b24e2bSVaishali Kulkarni #ifdef CONFIG_ECORE_ISCSI 1105*14b24e2bSVaishali Kulkarni if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 1106*14b24e2bSVaishali Kulkarni ecore_iscsi_setup(p_hwfn); 1107*14b24e2bSVaishali Kulkarni ecore_ooo_setup(p_hwfn); 1108*14b24e2bSVaishali Kulkarni } 1109*14b24e2bSVaishali Kulkarni #endif 1110*14b24e2bSVaishali Kulkarni } 1111*14b24e2bSVaishali Kulkarni } 1112*14b24e2bSVaishali Kulkarni 1113*14b24e2bSVaishali Kulkarni #define FINAL_CLEANUP_POLL_CNT (100) 1114*14b24e2bSVaishali Kulkarni #define FINAL_CLEANUP_POLL_TIME (10) 1115*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn, 1116*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 1117*14b24e2bSVaishali Kulkarni u16 id, bool is_vf) 1118*14b24e2bSVaishali Kulkarni { 1119*14b24e2bSVaishali Kulkarni u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; 1120*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc = ECORE_TIMEOUT; 1121*14b24e2bSVaishali Kulkarni 1122*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 1123*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) || 1124*14b24e2bSVaishali Kulkarni CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 1125*14b24e2bSVaishali Kulkarni DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n"); 1126*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 1127*14b24e2bSVaishali Kulkarni } 1128*14b24e2bSVaishali Kulkarni #endif 1129*14b24e2bSVaishali Kulkarni 1130*14b24e2bSVaishali Kulkarni addr = GTT_BAR0_MAP_REG_USDM_RAM + 1131*14b24e2bSVaishali Kulkarni USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); 1132*14b24e2bSVaishali Kulkarni 1133*14b24e2bSVaishali Kulkarni if (is_vf) 1134*14b24e2bSVaishali Kulkarni id += 0x10; 1135*14b24e2bSVaishali Kulkarni 1136*14b24e2bSVaishali Kulkarni command |= X_FINAL_CLEANUP_AGG_INT << 1137*14b24e2bSVaishali Kulkarni SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; 1138*14b24e2bSVaishali Kulkarni command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; 1139*14b24e2bSVaishali Kulkarni command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT; 1140*14b24e2bSVaishali Kulkarni command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT; 1141*14b24e2bSVaishali Kulkarni 1142*14b24e2bSVaishali Kulkarni /* Make sure notification is not set before initiating final cleanup */ 1143*14b24e2bSVaishali Kulkarni if (REG_RD(p_hwfn, addr)) { 1144*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, 1145*14b24e2bSVaishali Kulkarni "Unexpected; Found final cleanup notification before initiating final cleanup\n"); 1146*14b24e2bSVaishali Kulkarni REG_WR(p_hwfn, addr, 0); 1147*14b24e2bSVaishali Kulkarni } 1148*14b24e2bSVaishali Kulkarni 1149*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1150*14b24e2bSVaishali Kulkarni "Sending final cleanup for PFVF[%d] [Command %08x\n]", 1151*14b24e2bSVaishali Kulkarni id, command); 1152*14b24e2bSVaishali Kulkarni 1153*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); 1154*14b24e2bSVaishali Kulkarni 1155*14b24e2bSVaishali Kulkarni /* Poll until completion */ 1156*14b24e2bSVaishali Kulkarni while (!REG_RD(p_hwfn, addr) && count--) 1157*14b24e2bSVaishali Kulkarni OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME); 1158*14b24e2bSVaishali Kulkarni 1159*14b24e2bSVaishali Kulkarni if (REG_RD(p_hwfn, addr)) 1160*14b24e2bSVaishali Kulkarni rc = ECORE_SUCCESS; 1161*14b24e2bSVaishali Kulkarni else 1162*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, "Failed to receive FW final cleanup notification\n"); 1163*14b24e2bSVaishali Kulkarni 1164*14b24e2bSVaishali Kulkarni /* Cleanup afterwards */ 1165*14b24e2bSVaishali Kulkarni REG_WR(p_hwfn, addr, 0); 1166*14b24e2bSVaishali Kulkarni 1167*14b24e2bSVaishali Kulkarni return rc; 1168*14b24e2bSVaishali Kulkarni } 1169*14b24e2bSVaishali Kulkarni 1170*14b24e2bSVaishali Kulkarni static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn) 1171*14b24e2bSVaishali Kulkarni { 1172*14b24e2bSVaishali Kulkarni int hw_mode = 0; 1173*14b24e2bSVaishali Kulkarni 1174*14b24e2bSVaishali Kulkarni if (ECORE_IS_BB_B0(p_hwfn->p_dev)) { 1175*14b24e2bSVaishali Kulkarni hw_mode |= 1 << MODE_BB; 1176*14b24e2bSVaishali Kulkarni } else if (ECORE_IS_AH(p_hwfn->p_dev)) { 1177*14b24e2bSVaishali Kulkarni hw_mode |= 1 << MODE_K2; 1178*14b24e2bSVaishali Kulkarni } else { 1179*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, "Unknown chip type %#x\n", 1180*14b24e2bSVaishali Kulkarni p_hwfn->p_dev->type); 1181*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 1182*14b24e2bSVaishali Kulkarni } 1183*14b24e2bSVaishali Kulkarni 1184*14b24e2bSVaishali Kulkarni /* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE*/ 1185*14b24e2bSVaishali Kulkarni switch (p_hwfn->p_dev->num_ports_in_engine) { 1186*14b24e2bSVaishali Kulkarni case 1: 1187*14b24e2bSVaishali Kulkarni hw_mode |= 1 << MODE_PORTS_PER_ENG_1; 1188*14b24e2bSVaishali Kulkarni break; 1189*14b24e2bSVaishali Kulkarni case 2: 1190*14b24e2bSVaishali Kulkarni hw_mode |= 1 << MODE_PORTS_PER_ENG_2; 1191*14b24e2bSVaishali Kulkarni break; 1192*14b24e2bSVaishali Kulkarni case 4: 1193*14b24e2bSVaishali Kulkarni hw_mode |= 1 << MODE_PORTS_PER_ENG_4; 1194*14b24e2bSVaishali Kulkarni break; 1195*14b24e2bSVaishali Kulkarni default: 1196*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, "num_ports_in_engine = %d not supported\n", 1197*14b24e2bSVaishali Kulkarni p_hwfn->p_dev->num_ports_in_engine); 1198*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 1199*14b24e2bSVaishali Kulkarni } 1200*14b24e2bSVaishali Kulkarni 1201*14b24e2bSVaishali Kulkarni switch (p_hwfn->p_dev->mf_mode) { 1202*14b24e2bSVaishali Kulkarni case ECORE_MF_DEFAULT: 1203*14b24e2bSVaishali Kulkarni case ECORE_MF_NPAR: 1204*14b24e2bSVaishali Kulkarni hw_mode |= 1 << MODE_MF_SI; 1205*14b24e2bSVaishali Kulkarni break; 1206*14b24e2bSVaishali Kulkarni case ECORE_MF_OVLAN: 1207*14b24e2bSVaishali Kulkarni hw_mode |= 1 << MODE_MF_SD; 1208*14b24e2bSVaishali Kulkarni break; 1209*14b24e2bSVaishali Kulkarni default: 1210*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, "Unsupported MF mode, init as DEFAULT\n"); 1211*14b24e2bSVaishali Kulkarni hw_mode |= 1 << MODE_MF_SI; 1212*14b24e2bSVaishali Kulkarni } 1213*14b24e2bSVaishali Kulkarni 1214*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 1215*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 1216*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1217*14b24e2bSVaishali Kulkarni hw_mode |= 1 << MODE_FPGA; 1218*14b24e2bSVaishali Kulkarni } else { 1219*14b24e2bSVaishali Kulkarni if (p_hwfn->p_dev->b_is_emul_full) 1220*14b24e2bSVaishali Kulkarni hw_mode |= 1 << MODE_EMUL_FULL; 1221*14b24e2bSVaishali Kulkarni else 1222*14b24e2bSVaishali Kulkarni hw_mode |= 1 << MODE_EMUL_REDUCED; 1223*14b24e2bSVaishali Kulkarni } 1224*14b24e2bSVaishali Kulkarni } else 1225*14b24e2bSVaishali Kulkarni #endif 1226*14b24e2bSVaishali Kulkarni hw_mode |= 1 << MODE_ASIC; 1227*14b24e2bSVaishali Kulkarni 1228*14b24e2bSVaishali Kulkarni if (p_hwfn->p_dev->num_hwfns > 1) 1229*14b24e2bSVaishali Kulkarni hw_mode |= 1 << MODE_100G; 1230*14b24e2bSVaishali Kulkarni 1231*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.hw_mode = hw_mode; 1232*14b24e2bSVaishali Kulkarni 1233*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP), 1234*14b24e2bSVaishali Kulkarni "Configuring function for hw_mode: 0x%08x\n", 1235*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.hw_mode); 1236*14b24e2bSVaishali Kulkarni 1237*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 1238*14b24e2bSVaishali Kulkarni } 1239*14b24e2bSVaishali Kulkarni 1240*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 1241*14b24e2bSVaishali Kulkarni /* MFW-replacement initializations for non-ASIC */ 1242*14b24e2bSVaishali Kulkarni static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn, 1243*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt) 1244*14b24e2bSVaishali Kulkarni { 1245*14b24e2bSVaishali Kulkarni struct ecore_dev *p_dev = p_hwfn->p_dev; 1246*14b24e2bSVaishali Kulkarni u32 pl_hv = 1; 1247*14b24e2bSVaishali Kulkarni int i; 1248*14b24e2bSVaishali Kulkarni 1249*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_EMUL(p_dev)) { 1250*14b24e2bSVaishali Kulkarni if (ECORE_IS_AH(p_dev)) 1251*14b24e2bSVaishali Kulkarni pl_hv |= 0x600; 1252*14b24e2bSVaishali Kulkarni else if (ECORE_IS_E5(p_dev)) 1253*14b24e2bSVaishali Kulkarni ECORE_E5_MISSING_CODE; 1254*14b24e2bSVaishali Kulkarni } 1255*14b24e2bSVaishali Kulkarni 1256*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv); 1257*14b24e2bSVaishali Kulkarni 1258*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_EMUL(p_dev) && 1259*14b24e2bSVaishali Kulkarni (ECORE_IS_AH(p_dev) || ECORE_IS_E5(p_dev))) 1260*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2_E5, 1261*14b24e2bSVaishali Kulkarni 0x3ffffff); 1262*14b24e2bSVaishali Kulkarni 1263*14b24e2bSVaishali Kulkarni /* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */ 1264*14b24e2bSVaishali Kulkarni /* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */ 1265*14b24e2bSVaishali Kulkarni if (!CHIP_REV_IS_EMUL(p_dev) || ECORE_IS_BB(p_dev)) 1266*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4); 1267*14b24e2bSVaishali Kulkarni 1268*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_EMUL(p_dev)) { 1269*14b24e2bSVaishali Kulkarni if (ECORE_IS_AH(p_dev)) { 1270*14b24e2bSVaishali Kulkarni /* 2 for 4-port, 1 for 2-port, 0 for 1-port */ 1271*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE, 1272*14b24e2bSVaishali Kulkarni (p_dev->num_ports_in_engine >> 1)); 1273*14b24e2bSVaishali Kulkarni 1274*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN, 1275*14b24e2bSVaishali Kulkarni p_dev->num_ports_in_engine == 4 ? 0 : 3); 1276*14b24e2bSVaishali Kulkarni } else if (ECORE_IS_E5(p_dev)) { 1277*14b24e2bSVaishali Kulkarni ECORE_E5_MISSING_CODE; 1278*14b24e2bSVaishali Kulkarni } 1279*14b24e2bSVaishali Kulkarni } 1280*14b24e2bSVaishali Kulkarni 1281*14b24e2bSVaishali Kulkarni /* Poll on RBC */ 1282*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1); 1283*14b24e2bSVaishali Kulkarni for (i = 0; i < 100; i++) { 1284*14b24e2bSVaishali Kulkarni OSAL_UDELAY(50); 1285*14b24e2bSVaishali Kulkarni if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1) 1286*14b24e2bSVaishali Kulkarni break; 1287*14b24e2bSVaishali Kulkarni } 1288*14b24e2bSVaishali Kulkarni if (i == 100) 1289*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, "RBC done failed to complete in PSWRQ2\n"); 1290*14b24e2bSVaishali Kulkarni 1291*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 1292*14b24e2bSVaishali Kulkarni } 1293*14b24e2bSVaishali Kulkarni #endif 1294*14b24e2bSVaishali Kulkarni 1295*14b24e2bSVaishali Kulkarni /* Init run time data for all PFs and their VFs on an engine. 1296*14b24e2bSVaishali Kulkarni * TBD - for VFs - Once we have parent PF info for each VF in 1297*14b24e2bSVaishali Kulkarni * shmem available as CAU requires knowledge of parent PF for each VF. 1298*14b24e2bSVaishali Kulkarni */ 1299*14b24e2bSVaishali Kulkarni static void ecore_init_cau_rt_data(struct ecore_dev *p_dev) 1300*14b24e2bSVaishali Kulkarni { 1301*14b24e2bSVaishali Kulkarni u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; 1302*14b24e2bSVaishali Kulkarni int i, igu_sb_id; 1303*14b24e2bSVaishali Kulkarni 1304*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, i) { 1305*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1306*14b24e2bSVaishali Kulkarni struct ecore_igu_info *p_igu_info; 1307*14b24e2bSVaishali Kulkarni struct ecore_igu_block *p_block; 1308*14b24e2bSVaishali Kulkarni struct cau_sb_entry sb_entry; 1309*14b24e2bSVaishali Kulkarni 1310*14b24e2bSVaishali Kulkarni p_igu_info = p_hwfn->hw_info.p_igu_info; 1311*14b24e2bSVaishali Kulkarni 1312*14b24e2bSVaishali Kulkarni for (igu_sb_id = 0; 1313*14b24e2bSVaishali Kulkarni igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev); 1314*14b24e2bSVaishali Kulkarni igu_sb_id++) { 1315*14b24e2bSVaishali Kulkarni p_block = &p_igu_info->entry[igu_sb_id]; 1316*14b24e2bSVaishali Kulkarni 1317*14b24e2bSVaishali Kulkarni if (!p_block->is_pf) 1318*14b24e2bSVaishali Kulkarni continue; 1319*14b24e2bSVaishali Kulkarni 1320*14b24e2bSVaishali Kulkarni ecore_init_cau_sb_entry(p_hwfn, &sb_entry, 1321*14b24e2bSVaishali Kulkarni p_block->function_id, 1322*14b24e2bSVaishali Kulkarni 0, 0); 1323*14b24e2bSVaishali Kulkarni STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2, 1324*14b24e2bSVaishali Kulkarni sb_entry); 1325*14b24e2bSVaishali Kulkarni } 1326*14b24e2bSVaishali Kulkarni } 1327*14b24e2bSVaishali Kulkarni } 1328*14b24e2bSVaishali Kulkarni 1329*14b24e2bSVaishali Kulkarni static void ecore_init_cache_line_size(struct ecore_hwfn *p_hwfn, 1330*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt) 1331*14b24e2bSVaishali Kulkarni { 1332*14b24e2bSVaishali Kulkarni u32 val, wr_mbs, cache_line_size; 1333*14b24e2bSVaishali Kulkarni 1334*14b24e2bSVaishali Kulkarni val = ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0); 1335*14b24e2bSVaishali Kulkarni switch (val) { 1336*14b24e2bSVaishali Kulkarni case 0: 1337*14b24e2bSVaishali Kulkarni wr_mbs = 128; 1338*14b24e2bSVaishali Kulkarni break; 1339*14b24e2bSVaishali Kulkarni case 1: 1340*14b24e2bSVaishali Kulkarni wr_mbs = 256; 1341*14b24e2bSVaishali Kulkarni break; 1342*14b24e2bSVaishali Kulkarni case 2: 1343*14b24e2bSVaishali Kulkarni wr_mbs = 512; 1344*14b24e2bSVaishali Kulkarni break; 1345*14b24e2bSVaishali Kulkarni default: 1346*14b24e2bSVaishali Kulkarni DP_INFO(p_hwfn, 1347*14b24e2bSVaishali Kulkarni "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 1348*14b24e2bSVaishali Kulkarni val); 1349*14b24e2bSVaishali Kulkarni return; 1350*14b24e2bSVaishali Kulkarni } 1351*14b24e2bSVaishali Kulkarni 1352*14b24e2bSVaishali Kulkarni cache_line_size = OSAL_MIN_T(u32, OSAL_CACHE_LINE_SIZE, wr_mbs); 1353*14b24e2bSVaishali Kulkarni switch (cache_line_size) { 1354*14b24e2bSVaishali Kulkarni case 32: 1355*14b24e2bSVaishali Kulkarni val = 0; 1356*14b24e2bSVaishali Kulkarni break; 1357*14b24e2bSVaishali Kulkarni case 64: 1358*14b24e2bSVaishali Kulkarni val = 1; 1359*14b24e2bSVaishali Kulkarni break; 1360*14b24e2bSVaishali Kulkarni case 128: 1361*14b24e2bSVaishali Kulkarni val = 2; 1362*14b24e2bSVaishali Kulkarni break; 1363*14b24e2bSVaishali Kulkarni case 256: 1364*14b24e2bSVaishali Kulkarni val = 3; 1365*14b24e2bSVaishali Kulkarni break; 1366*14b24e2bSVaishali Kulkarni default: 1367*14b24e2bSVaishali Kulkarni DP_INFO(p_hwfn, 1368*14b24e2bSVaishali Kulkarni "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 1369*14b24e2bSVaishali Kulkarni cache_line_size); 1370*14b24e2bSVaishali Kulkarni } 1371*14b24e2bSVaishali Kulkarni 1372*14b24e2bSVaishali Kulkarni if (OSAL_CACHE_LINE_SIZE > wr_mbs) 1373*14b24e2bSVaishali Kulkarni DP_INFO(p_hwfn, 1374*14b24e2bSVaishali Kulkarni "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", 1375*14b24e2bSVaishali Kulkarni OSAL_CACHE_LINE_SIZE, wr_mbs); 1376*14b24e2bSVaishali Kulkarni 1377*14b24e2bSVaishali Kulkarni STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val); 1378*14b24e2bSVaishali Kulkarni } 1379*14b24e2bSVaishali Kulkarni 1380*14b24e2bSVaishali Kulkarni static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn, 1381*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 1382*14b24e2bSVaishali Kulkarni int hw_mode) 1383*14b24e2bSVaishali Kulkarni { 1384*14b24e2bSVaishali Kulkarni struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 1385*14b24e2bSVaishali Kulkarni struct ecore_dev *p_dev = p_hwfn->p_dev; 1386*14b24e2bSVaishali Kulkarni u8 vf_id, max_num_vfs; 1387*14b24e2bSVaishali Kulkarni u16 num_pfs, pf_id; 1388*14b24e2bSVaishali Kulkarni u32 concrete_fid; 1389*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc = ECORE_SUCCESS; 1390*14b24e2bSVaishali Kulkarni 1391*14b24e2bSVaishali Kulkarni ecore_init_cau_rt_data(p_dev); 1392*14b24e2bSVaishali Kulkarni 1393*14b24e2bSVaishali Kulkarni /* Program GTT windows */ 1394*14b24e2bSVaishali Kulkarni ecore_gtt_init(p_hwfn); 1395*14b24e2bSVaishali Kulkarni 1396*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 1397*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_EMUL(p_dev)) { 1398*14b24e2bSVaishali Kulkarni rc = ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt); 1399*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 1400*14b24e2bSVaishali Kulkarni return rc; 1401*14b24e2bSVaishali Kulkarni } 1402*14b24e2bSVaishali Kulkarni #endif 1403*14b24e2bSVaishali Kulkarni 1404*14b24e2bSVaishali Kulkarni if (p_hwfn->mcp_info) { 1405*14b24e2bSVaishali Kulkarni if (p_hwfn->mcp_info->func_info.bandwidth_max) 1406*14b24e2bSVaishali Kulkarni qm_info->pf_rl_en = 1; 1407*14b24e2bSVaishali Kulkarni if (p_hwfn->mcp_info->func_info.bandwidth_min) 1408*14b24e2bSVaishali Kulkarni qm_info->pf_wfq_en = 1; 1409*14b24e2bSVaishali Kulkarni } 1410*14b24e2bSVaishali Kulkarni 1411*14b24e2bSVaishali Kulkarni ecore_qm_common_rt_init(p_hwfn, 1412*14b24e2bSVaishali Kulkarni p_dev->num_ports_in_engine, 1413*14b24e2bSVaishali Kulkarni qm_info->max_phys_tcs_per_port, 1414*14b24e2bSVaishali Kulkarni qm_info->pf_rl_en, qm_info->pf_wfq_en, 1415*14b24e2bSVaishali Kulkarni qm_info->vport_rl_en, qm_info->vport_wfq_en, 1416*14b24e2bSVaishali Kulkarni qm_info->qm_port_params); 1417*14b24e2bSVaishali Kulkarni 1418*14b24e2bSVaishali Kulkarni ecore_cxt_hw_init_common(p_hwfn); 1419*14b24e2bSVaishali Kulkarni 1420*14b24e2bSVaishali Kulkarni ecore_init_cache_line_size(p_hwfn, p_ptt); 1421*14b24e2bSVaishali Kulkarni 1422*14b24e2bSVaishali Kulkarni rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); 1423*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 1424*14b24e2bSVaishali Kulkarni return rc; 1425*14b24e2bSVaishali Kulkarni 1426*14b24e2bSVaishali Kulkarni /* @@TBD MichalK - should add VALIDATE_VFID to init tool... 1427*14b24e2bSVaishali Kulkarni * need to decide with which value, maybe runtime 1428*14b24e2bSVaishali Kulkarni */ 1429*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); 1430*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); 1431*14b24e2bSVaishali Kulkarni 1432*14b24e2bSVaishali Kulkarni if (ECORE_IS_BB(p_dev)) { 1433*14b24e2bSVaishali Kulkarni /* Workaround clears ROCE search for all functions to prevent 1434*14b24e2bSVaishali Kulkarni * involving non intialized function in processing ROCE packet. 1435*14b24e2bSVaishali Kulkarni */ 1436*14b24e2bSVaishali Kulkarni num_pfs = NUM_OF_ENG_PFS(p_dev); 1437*14b24e2bSVaishali Kulkarni for (pf_id = 0; pf_id < num_pfs; pf_id++) { 1438*14b24e2bSVaishali Kulkarni ecore_fid_pretend(p_hwfn, p_ptt, pf_id); 1439*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 1440*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 1441*14b24e2bSVaishali Kulkarni } 1442*14b24e2bSVaishali Kulkarni /* pretend to original PF */ 1443*14b24e2bSVaishali Kulkarni ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 1444*14b24e2bSVaishali Kulkarni } 1445*14b24e2bSVaishali Kulkarni 1446*14b24e2bSVaishali Kulkarni /* Workaround for avoiding CCFC execution error when getting packets 1447*14b24e2bSVaishali Kulkarni * with CRC errors, and allowing instead the invoking of the FW error 1448*14b24e2bSVaishali Kulkarni * handler. 1449*14b24e2bSVaishali Kulkarni * This is not done inside the init tool since it currently can't 1450*14b24e2bSVaishali Kulkarni * perform a pretending to VFs. 1451*14b24e2bSVaishali Kulkarni */ 1452*14b24e2bSVaishali Kulkarni max_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; 1453*14b24e2bSVaishali Kulkarni for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { 1454*14b24e2bSVaishali Kulkarni concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id); 1455*14b24e2bSVaishali Kulkarni ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid); 1456*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); 1457*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); 1458*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); 1459*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0); 1460*14b24e2bSVaishali Kulkarni } 1461*14b24e2bSVaishali Kulkarni /* pretend to original PF */ 1462*14b24e2bSVaishali Kulkarni ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 1463*14b24e2bSVaishali Kulkarni 1464*14b24e2bSVaishali Kulkarni return rc; 1465*14b24e2bSVaishali Kulkarni } 1466*14b24e2bSVaishali Kulkarni 1467*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 1468*14b24e2bSVaishali Kulkarni #define MISC_REG_RESET_REG_2_XMAC_BIT (1<<4) 1469*14b24e2bSVaishali Kulkarni #define MISC_REG_RESET_REG_2_XMAC_SOFT_BIT (1<<5) 1470*14b24e2bSVaishali Kulkarni 1471*14b24e2bSVaishali Kulkarni #define PMEG_IF_BYTE_COUNT 8 1472*14b24e2bSVaishali Kulkarni 1473*14b24e2bSVaishali Kulkarni static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn, 1474*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 1475*14b24e2bSVaishali Kulkarni u32 addr, 1476*14b24e2bSVaishali Kulkarni u64 data, 1477*14b24e2bSVaishali Kulkarni u8 reg_type, 1478*14b24e2bSVaishali Kulkarni u8 port) 1479*14b24e2bSVaishali Kulkarni { 1480*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 1481*14b24e2bSVaishali Kulkarni "CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n", 1482*14b24e2bSVaishali Kulkarni ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) | 1483*14b24e2bSVaishali Kulkarni (8 << PMEG_IF_BYTE_COUNT), 1484*14b24e2bSVaishali Kulkarni (reg_type << 25) | (addr << 8) | port, 1485*14b24e2bSVaishali Kulkarni (u32)((data >> 32) & 0xffffffff), 1486*14b24e2bSVaishali Kulkarni (u32)(data & 0xffffffff)); 1487*14b24e2bSVaishali Kulkarni 1488*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB, 1489*14b24e2bSVaishali Kulkarni (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) & 1490*14b24e2bSVaishali Kulkarni 0xffff00fe) | 1491*14b24e2bSVaishali Kulkarni (8 << PMEG_IF_BYTE_COUNT)); 1492*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB, 1493*14b24e2bSVaishali Kulkarni (reg_type << 25) | (addr << 8) | port); 1494*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, data & 0xffffffff); 1495*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, 1496*14b24e2bSVaishali Kulkarni (data >> 32) & 0xffffffff); 1497*14b24e2bSVaishali Kulkarni } 1498*14b24e2bSVaishali Kulkarni 1499*14b24e2bSVaishali Kulkarni #define XLPORT_MODE_REG (0x20a) 1500*14b24e2bSVaishali Kulkarni #define XLPORT_MAC_CONTROL (0x210) 1501*14b24e2bSVaishali Kulkarni #define XLPORT_FLOW_CONTROL_CONFIG (0x207) 1502*14b24e2bSVaishali Kulkarni #define XLPORT_ENABLE_REG (0x20b) 1503*14b24e2bSVaishali Kulkarni 1504*14b24e2bSVaishali Kulkarni #define XLMAC_CTRL (0x600) 1505*14b24e2bSVaishali Kulkarni #define XLMAC_MODE (0x601) 1506*14b24e2bSVaishali Kulkarni #define XLMAC_RX_MAX_SIZE (0x608) 1507*14b24e2bSVaishali Kulkarni #define XLMAC_TX_CTRL (0x604) 1508*14b24e2bSVaishali Kulkarni #define XLMAC_PAUSE_CTRL (0x60d) 1509*14b24e2bSVaishali Kulkarni #define XLMAC_PFC_CTRL (0x60e) 1510*14b24e2bSVaishali Kulkarni 1511*14b24e2bSVaishali Kulkarni static void ecore_emul_link_init_bb(struct ecore_hwfn *p_hwfn, 1512*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt) 1513*14b24e2bSVaishali Kulkarni { 1514*14b24e2bSVaishali Kulkarni u8 loopback = 0, port = p_hwfn->port_id * 2; 1515*14b24e2bSVaishali Kulkarni 1516*14b24e2bSVaishali Kulkarni DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port); 1517*14b24e2bSVaishali Kulkarni 1518*14b24e2bSVaishali Kulkarni ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, 1519*14b24e2bSVaishali Kulkarni (0x4 << 4) | 0x4, 1, port); /* XLPORT MAC MODE */ /* 0 Quad, 4 Single... */ 1520*14b24e2bSVaishali Kulkarni ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port); 1521*14b24e2bSVaishali Kulkarni ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 1522*14b24e2bSVaishali Kulkarni 0x40, 0, port); /*XLMAC: SOFT RESET */ 1523*14b24e2bSVaishali Kulkarni ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE, 1524*14b24e2bSVaishali Kulkarni 0x40, 0, port); /*XLMAC: Port Speed >= 10Gbps */ 1525*14b24e2bSVaishali Kulkarni ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE, 1526*14b24e2bSVaishali Kulkarni 0x3fff, 0, port); /* XLMAC: Max Size */ 1527*14b24e2bSVaishali Kulkarni ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL, 1528*14b24e2bSVaishali Kulkarni 0x01000000800ULL | (0xa << 12) | ((u64)1 << 38), 1529*14b24e2bSVaishali Kulkarni 0, port); 1530*14b24e2bSVaishali Kulkarni ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL, 1531*14b24e2bSVaishali Kulkarni 0x7c000, 0, port); 1532*14b24e2bSVaishali Kulkarni ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL, 1533*14b24e2bSVaishali Kulkarni 0x30ffffc000ULL, 0, port); 1534*14b24e2bSVaishali Kulkarni ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2), 1535*14b24e2bSVaishali Kulkarni 0, port); /* XLMAC: TX_EN, RX_EN */ 1536*14b24e2bSVaishali Kulkarni ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x1003 | (loopback << 2), 1537*14b24e2bSVaishali Kulkarni 0, port); /* XLMAC: TX_EN, RX_EN, SW_LINK_STATUS */ 1538*14b24e2bSVaishali Kulkarni ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG, 1539*14b24e2bSVaishali Kulkarni 1, 0, port); /* Enabled Parallel PFC interface */ 1540*14b24e2bSVaishali Kulkarni ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 1541*14b24e2bSVaishali Kulkarni 0xf, 1, port); /* XLPORT port enable */ 1542*14b24e2bSVaishali Kulkarni } 1543*14b24e2bSVaishali Kulkarni 1544*14b24e2bSVaishali Kulkarni static void ecore_emul_link_init_ah_e5(struct ecore_hwfn *p_hwfn, 1545*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt) 1546*14b24e2bSVaishali Kulkarni { 1547*14b24e2bSVaishali Kulkarni u8 port = p_hwfn->port_id; 1548*14b24e2bSVaishali Kulkarni u32 mac_base = NWM_REG_MAC0_K2_E5 + (port << 2) * NWM_REG_MAC0_SIZE; 1549*14b24e2bSVaishali Kulkarni 1550*14b24e2bSVaishali Kulkarni DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port); 1551*14b24e2bSVaishali Kulkarni 1552*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2_E5 + (port << 2), 1553*14b24e2bSVaishali Kulkarni (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT) | 1554*14b24e2bSVaishali Kulkarni (port << 1555*14b24e2bSVaishali Kulkarni CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT) | 1556*14b24e2bSVaishali Kulkarni (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT)); 1557*14b24e2bSVaishali Kulkarni 1558*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE_K2_E5, 1559*14b24e2bSVaishali Kulkarni 1 << ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT); 1560*14b24e2bSVaishali Kulkarni 1561*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH_K2_E5, 1562*14b24e2bSVaishali Kulkarni 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT); 1563*14b24e2bSVaishali Kulkarni 1564*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH_K2_E5, 1565*14b24e2bSVaishali Kulkarni 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT); 1566*14b24e2bSVaishali Kulkarni 1567*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5, 1568*14b24e2bSVaishali Kulkarni 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT); 1569*14b24e2bSVaishali Kulkarni 1570*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5, 1571*14b24e2bSVaishali Kulkarni (0xA << 1572*14b24e2bSVaishali Kulkarni ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT) | 1573*14b24e2bSVaishali Kulkarni (8 << 1574*14b24e2bSVaishali Kulkarni ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT)); 1575*14b24e2bSVaishali Kulkarni 1576*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG_K2_E5, 1577*14b24e2bSVaishali Kulkarni 0xa853); 1578*14b24e2bSVaishali Kulkarni } 1579*14b24e2bSVaishali Kulkarni 1580*14b24e2bSVaishali Kulkarni static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn, 1581*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt) 1582*14b24e2bSVaishali Kulkarni { 1583*14b24e2bSVaishali Kulkarni if (ECORE_IS_AH(p_hwfn->p_dev) || ECORE_IS_E5(p_hwfn->p_dev)) 1584*14b24e2bSVaishali Kulkarni ecore_emul_link_init_ah_e5(p_hwfn, p_ptt); 1585*14b24e2bSVaishali Kulkarni else /* BB */ 1586*14b24e2bSVaishali Kulkarni ecore_emul_link_init_bb(p_hwfn, p_ptt); 1587*14b24e2bSVaishali Kulkarni 1588*14b24e2bSVaishali Kulkarni return; 1589*14b24e2bSVaishali Kulkarni } 1590*14b24e2bSVaishali Kulkarni 1591*14b24e2bSVaishali Kulkarni static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn, 1592*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, u8 port) 1593*14b24e2bSVaishali Kulkarni { 1594*14b24e2bSVaishali Kulkarni int port_offset = port ? 0x800 : 0; 1595*14b24e2bSVaishali Kulkarni u32 xmac_rxctrl = 0; 1596*14b24e2bSVaishali Kulkarni 1597*14b24e2bSVaishali Kulkarni /* Reset of XMAC */ 1598*14b24e2bSVaishali Kulkarni /* FIXME: move to common start */ 1599*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2*sizeof(u32), 1600*14b24e2bSVaishali Kulkarni MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */ 1601*14b24e2bSVaishali Kulkarni OSAL_MSLEEP(1); 1602*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32), 1603*14b24e2bSVaishali Kulkarni MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */ 1604*14b24e2bSVaishali Kulkarni 1605*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE_BB, 1); 1606*14b24e2bSVaishali Kulkarni 1607*14b24e2bSVaishali Kulkarni /* Set the number of ports on the Warp Core to 10G */ 1608*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE_BB, 3); 1609*14b24e2bSVaishali Kulkarni 1610*14b24e2bSVaishali Kulkarni /* Soft reset of XMAC */ 1611*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32), 1612*14b24e2bSVaishali Kulkarni MISC_REG_RESET_REG_2_XMAC_SOFT_BIT); 1613*14b24e2bSVaishali Kulkarni OSAL_MSLEEP(1); 1614*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32), 1615*14b24e2bSVaishali Kulkarni MISC_REG_RESET_REG_2_XMAC_SOFT_BIT); 1616*14b24e2bSVaishali Kulkarni 1617*14b24e2bSVaishali Kulkarni /* FIXME: move to common end */ 1618*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 1619*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE_BB + port_offset, 0x20); 1620*14b24e2bSVaishali Kulkarni 1621*14b24e2bSVaishali Kulkarni /* Set Max packet size: initialize XMAC block register for port 0 */ 1622*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE_BB + port_offset, 0x2710); 1623*14b24e2bSVaishali Kulkarni 1624*14b24e2bSVaishali Kulkarni /* CRC append for Tx packets: init XMAC block register for port 1 */ 1625*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO_BB + port_offset, 0xC800); 1626*14b24e2bSVaishali Kulkarni 1627*14b24e2bSVaishali Kulkarni /* Enable TX and RX: initialize XMAC block register for port 1 */ 1628*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL_BB + port_offset, 1629*14b24e2bSVaishali Kulkarni XMAC_REG_CTRL_TX_EN_BB | XMAC_REG_CTRL_RX_EN_BB); 1630*14b24e2bSVaishali Kulkarni xmac_rxctrl = ecore_rd(p_hwfn, p_ptt, 1631*14b24e2bSVaishali Kulkarni XMAC_REG_RX_CTRL_BB + port_offset); 1632*14b24e2bSVaishali Kulkarni xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB; 1633*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL_BB + port_offset, xmac_rxctrl); 1634*14b24e2bSVaishali Kulkarni } 1635*14b24e2bSVaishali Kulkarni #endif 1636*14b24e2bSVaishali Kulkarni 1637*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 1638*14b24e2bSVaishali Kulkarni ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn, 1639*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 1640*14b24e2bSVaishali Kulkarni u32 pwm_region_size, 1641*14b24e2bSVaishali Kulkarni u32 n_cpus) 1642*14b24e2bSVaishali Kulkarni { 1643*14b24e2bSVaishali Kulkarni u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size; 1644*14b24e2bSVaishali Kulkarni u32 dpi_bit_shift, dpi_count; 1645*14b24e2bSVaishali Kulkarni u32 min_dpis; 1646*14b24e2bSVaishali Kulkarni 1647*14b24e2bSVaishali Kulkarni /* Calculate DPI size 1648*14b24e2bSVaishali Kulkarni * ------------------ 1649*14b24e2bSVaishali Kulkarni * The PWM region contains Doorbell Pages. The first is reserverd for 1650*14b24e2bSVaishali Kulkarni * the kernel for, e.g, L2. The others are free to be used by non- 1651*14b24e2bSVaishali Kulkarni * trusted applications, typically from user space. Each page, called a 1652*14b24e2bSVaishali Kulkarni * doorbell page is sectioned into windows that allow doorbells to be 1653*14b24e2bSVaishali Kulkarni * issued in parallel by the kernel/application. The size of such a 1654*14b24e2bSVaishali Kulkarni * window (a.k.a. WID) is 1kB. 1655*14b24e2bSVaishali Kulkarni * Summary: 1656*14b24e2bSVaishali Kulkarni * 1kB WID x N WIDS = DPI page size 1657*14b24e2bSVaishali Kulkarni * DPI page size x N DPIs = PWM region size 1658*14b24e2bSVaishali Kulkarni * Notes: 1659*14b24e2bSVaishali Kulkarni * The size of the DPI page size must be in multiples of OSAL_PAGE_SIZE 1660*14b24e2bSVaishali Kulkarni * in order to ensure that two applications won't share the same page. 1661*14b24e2bSVaishali Kulkarni * It also must contain at least one WID per CPU to allow parallelism. 1662*14b24e2bSVaishali Kulkarni * It also must be a power of 2, since it is stored as a bit shift. 1663*14b24e2bSVaishali Kulkarni * 1664*14b24e2bSVaishali Kulkarni * The DPI page size is stored in a register as 'dpi_bit_shift' so that 1665*14b24e2bSVaishali Kulkarni * 0 is 4kB, 1 is 8kB and etc. Hence the minimum size is 4,096 1666*14b24e2bSVaishali Kulkarni * containing 4 WIDs. 1667*14b24e2bSVaishali Kulkarni */ 1668*14b24e2bSVaishali Kulkarni dpi_page_size_1 = ECORE_WID_SIZE * n_cpus; 1669*14b24e2bSVaishali Kulkarni dpi_page_size_2 = OSAL_MAX_T(u32, ECORE_WID_SIZE, OSAL_PAGE_SIZE); 1670*14b24e2bSVaishali Kulkarni dpi_page_size = OSAL_MAX_T(u32, dpi_page_size_1, dpi_page_size_2); 1671*14b24e2bSVaishali Kulkarni dpi_page_size = OSAL_ROUNDUP_POW_OF_TWO(dpi_page_size); 1672*14b24e2bSVaishali Kulkarni dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096); 1673*14b24e2bSVaishali Kulkarni 1674*14b24e2bSVaishali Kulkarni dpi_count = pwm_region_size / dpi_page_size; 1675*14b24e2bSVaishali Kulkarni 1676*14b24e2bSVaishali Kulkarni min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis; 1677*14b24e2bSVaishali Kulkarni min_dpis = OSAL_MAX_T(u32, ECORE_MIN_DPIS, min_dpis); 1678*14b24e2bSVaishali Kulkarni 1679*14b24e2bSVaishali Kulkarni /* Update hwfn */ 1680*14b24e2bSVaishali Kulkarni p_hwfn->dpi_size = dpi_page_size; 1681*14b24e2bSVaishali Kulkarni p_hwfn->dpi_count = dpi_count; 1682*14b24e2bSVaishali Kulkarni 1683*14b24e2bSVaishali Kulkarni /* Update registers */ 1684*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift); 1685*14b24e2bSVaishali Kulkarni 1686*14b24e2bSVaishali Kulkarni if (dpi_count < min_dpis) 1687*14b24e2bSVaishali Kulkarni return ECORE_NORESOURCES; 1688*14b24e2bSVaishali Kulkarni 1689*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 1690*14b24e2bSVaishali Kulkarni } 1691*14b24e2bSVaishali Kulkarni 1692*14b24e2bSVaishali Kulkarni enum ECORE_ROCE_EDPM_MODE { 1693*14b24e2bSVaishali Kulkarni ECORE_ROCE_EDPM_MODE_ENABLE = 0, 1694*14b24e2bSVaishali Kulkarni ECORE_ROCE_EDPM_MODE_FORCE_ON = 1, 1695*14b24e2bSVaishali Kulkarni ECORE_ROCE_EDPM_MODE_DISABLE = 2, 1696*14b24e2bSVaishali Kulkarni }; 1697*14b24e2bSVaishali Kulkarni 1698*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 1699*14b24e2bSVaishali Kulkarni ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn, 1700*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt) 1701*14b24e2bSVaishali Kulkarni { 1702*14b24e2bSVaishali Kulkarni u32 pwm_regsize, norm_regsize; 1703*14b24e2bSVaishali Kulkarni u32 non_pwm_conn, min_addr_reg1; 1704*14b24e2bSVaishali Kulkarni u32 db_bar_size, n_cpus = 1; 1705*14b24e2bSVaishali Kulkarni u32 roce_edpm_mode; 1706*14b24e2bSVaishali Kulkarni u32 pf_dems_shift; 1707*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc = ECORE_SUCCESS; 1708*14b24e2bSVaishali Kulkarni u8 cond; 1709*14b24e2bSVaishali Kulkarni 1710*14b24e2bSVaishali Kulkarni db_bar_size = ecore_hw_bar_size(p_hwfn, BAR_ID_1); 1711*14b24e2bSVaishali Kulkarni if (p_hwfn->p_dev->num_hwfns > 1) 1712*14b24e2bSVaishali Kulkarni db_bar_size /= 2; 1713*14b24e2bSVaishali Kulkarni 1714*14b24e2bSVaishali Kulkarni /* Calculate doorbell regions 1715*14b24e2bSVaishali Kulkarni * ----------------------------------- 1716*14b24e2bSVaishali Kulkarni * The doorbell BAR is made of two regions. The first is called normal 1717*14b24e2bSVaishali Kulkarni * region and the second is called PWM region. In the normal region 1718*14b24e2bSVaishali Kulkarni * each ICID has its own set of addresses so that writing to that 1719*14b24e2bSVaishali Kulkarni * specific address identifies the ICID. In the Process Window Mode 1720*14b24e2bSVaishali Kulkarni * region the ICID is given in the data written to the doorbell. The 1721*14b24e2bSVaishali Kulkarni * above per PF register denotes the offset in the doorbell BAR in which 1722*14b24e2bSVaishali Kulkarni * the PWM region begins. 1723*14b24e2bSVaishali Kulkarni * The normal region has ECORE_PF_DEMS_SIZE bytes per ICID, that is per 1724*14b24e2bSVaishali Kulkarni * non-PWM connection. The calculation below computes the total non-PWM 1725*14b24e2bSVaishali Kulkarni * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is 1726*14b24e2bSVaishali Kulkarni * in units of 4,096 bytes. 1727*14b24e2bSVaishali Kulkarni */ 1728*14b24e2bSVaishali Kulkarni non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) + 1729*14b24e2bSVaishali Kulkarni ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE, 1730*14b24e2bSVaishali Kulkarni OSAL_NULL) + 1731*14b24e2bSVaishali Kulkarni ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, 1732*14b24e2bSVaishali Kulkarni OSAL_NULL); 1733*14b24e2bSVaishali Kulkarni norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, 4096); 1734*14b24e2bSVaishali Kulkarni min_addr_reg1 = norm_regsize / 4096; 1735*14b24e2bSVaishali Kulkarni pwm_regsize = db_bar_size - norm_regsize; 1736*14b24e2bSVaishali Kulkarni 1737*14b24e2bSVaishali Kulkarni /* Check that the normal and PWM sizes are valid */ 1738*14b24e2bSVaishali Kulkarni if (db_bar_size < norm_regsize) { 1739*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn->p_dev, "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n", db_bar_size, norm_regsize); 1740*14b24e2bSVaishali Kulkarni return ECORE_NORESOURCES; 1741*14b24e2bSVaishali Kulkarni } 1742*14b24e2bSVaishali Kulkarni if (pwm_regsize < ECORE_MIN_PWM_REGION) { 1743*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn->p_dev, "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n", pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size, norm_regsize); 1744*14b24e2bSVaishali Kulkarni return ECORE_NORESOURCES; 1745*14b24e2bSVaishali Kulkarni } 1746*14b24e2bSVaishali Kulkarni 1747*14b24e2bSVaishali Kulkarni /* Calculate number of DPIs */ 1748*14b24e2bSVaishali Kulkarni roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode; 1749*14b24e2bSVaishali Kulkarni if ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE) || 1750*14b24e2bSVaishali Kulkarni ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_FORCE_ON))) { 1751*14b24e2bSVaishali Kulkarni /* Either EDPM is mandatory, or we are attempting to allocate a 1752*14b24e2bSVaishali Kulkarni * WID per CPU. 1753*14b24e2bSVaishali Kulkarni */ 1754*14b24e2bSVaishali Kulkarni n_cpus = OSAL_NUM_ACTIVE_CPU(); 1755*14b24e2bSVaishali Kulkarni rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 1756*14b24e2bSVaishali Kulkarni } 1757*14b24e2bSVaishali Kulkarni 1758*14b24e2bSVaishali Kulkarni cond = ((rc != ECORE_SUCCESS) && 1759*14b24e2bSVaishali Kulkarni (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) || 1760*14b24e2bSVaishali Kulkarni (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE); 1761*14b24e2bSVaishali Kulkarni if (cond || p_hwfn->dcbx_no_edpm) { 1762*14b24e2bSVaishali Kulkarni /* Either EDPM is disabled from user configuration, or it is 1763*14b24e2bSVaishali Kulkarni * disabled via DCBx, or it is not mandatory and we failed to 1764*14b24e2bSVaishali Kulkarni * allocated a WID per CPU. 1765*14b24e2bSVaishali Kulkarni */ 1766*14b24e2bSVaishali Kulkarni n_cpus = 1; 1767*14b24e2bSVaishali Kulkarni rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 1768*14b24e2bSVaishali Kulkarni 1769*14b24e2bSVaishali Kulkarni #ifdef CONFIG_ECORE_ROCE 1770*14b24e2bSVaishali Kulkarni /* If we entered this flow due to DCBX then the DPM register is 1771*14b24e2bSVaishali Kulkarni * already configured. 1772*14b24e2bSVaishali Kulkarni */ 1773*14b24e2bSVaishali Kulkarni if (cond) 1774*14b24e2bSVaishali Kulkarni ecore_rdma_dpm_bar(p_hwfn, p_ptt); 1775*14b24e2bSVaishali Kulkarni #endif 1776*14b24e2bSVaishali Kulkarni } 1777*14b24e2bSVaishali Kulkarni 1778*14b24e2bSVaishali Kulkarni p_hwfn->wid_count = (u16)n_cpus; 1779*14b24e2bSVaishali Kulkarni 1780*14b24e2bSVaishali Kulkarni DP_INFO(p_hwfn, "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n", 1781*14b24e2bSVaishali Kulkarni norm_regsize, pwm_regsize, p_hwfn->dpi_size, p_hwfn->dpi_count, 1782*14b24e2bSVaishali Kulkarni ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ? 1783*14b24e2bSVaishali Kulkarni "disabled" : "enabled"); 1784*14b24e2bSVaishali Kulkarni 1785*14b24e2bSVaishali Kulkarni /* Check return codes from above calls */ 1786*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 1787*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, 1788*14b24e2bSVaishali Kulkarni "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d. You can try reducing this down to %d via user configuration n_dpi or by disabling EDPM via user configuration roce_edpm\n", 1789*14b24e2bSVaishali Kulkarni p_hwfn->dpi_count, 1790*14b24e2bSVaishali Kulkarni p_hwfn->pf_params.rdma_pf_params.min_dpis, 1791*14b24e2bSVaishali Kulkarni ECORE_MIN_DPIS); 1792*14b24e2bSVaishali Kulkarni return ECORE_NORESOURCES; 1793*14b24e2bSVaishali Kulkarni } 1794*14b24e2bSVaishali Kulkarni 1795*14b24e2bSVaishali Kulkarni /* Update hwfn */ 1796*14b24e2bSVaishali Kulkarni p_hwfn->dpi_start_offset = norm_regsize; /* this is later used to 1797*14b24e2bSVaishali Kulkarni * calculate the doorbell 1798*14b24e2bSVaishali Kulkarni * address 1799*14b24e2bSVaishali Kulkarni */ 1800*14b24e2bSVaishali Kulkarni 1801*14b24e2bSVaishali Kulkarni /* Update registers */ 1802*14b24e2bSVaishali Kulkarni /* DEMS size is configured log2 of DWORDs, hence the division by 4 */ 1803*14b24e2bSVaishali Kulkarni pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4); 1804*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift); 1805*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1); 1806*14b24e2bSVaishali Kulkarni 1807*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 1808*14b24e2bSVaishali Kulkarni } 1809*14b24e2bSVaishali Kulkarni 1810*14b24e2bSVaishali Kulkarni static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn, 1811*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 1812*14b24e2bSVaishali Kulkarni int hw_mode) 1813*14b24e2bSVaishali Kulkarni { 1814*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc = ECORE_SUCCESS; 1815*14b24e2bSVaishali Kulkarni 1816*14b24e2bSVaishali Kulkarni rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, 1817*14b24e2bSVaishali Kulkarni hw_mode); 1818*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 1819*14b24e2bSVaishali Kulkarni return rc; 1820*14b24e2bSVaishali Kulkarni #if 0 1821*14b24e2bSVaishali Kulkarni /* FW 8.10.5.0 requires us to configure PF_VECTOR and DUALMODE in LLH. 1822*14b24e2bSVaishali Kulkarni * This would hopefully be moved to MFW. 1823*14b24e2bSVaishali Kulkarni */ 1824*14b24e2bSVaishali Kulkarni if (IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) { 1825*14b24e2bSVaishali Kulkarni u8 pf_id = 0; 1826*14b24e2bSVaishali Kulkarni 1827*14b24e2bSVaishali Kulkarni if (ecore_hw_init_first_eth(p_hwfn, p_ptt, &pf_id) == 1828*14b24e2bSVaishali Kulkarni ECORE_SUCCESS) { 1829*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 1830*14b24e2bSVaishali Kulkarni "PF[%08x] is first eth on engine\n", 1831*14b24e2bSVaishali Kulkarni pf_id); 1832*14b24e2bSVaishali Kulkarni 1833*14b24e2bSVaishali Kulkarni /* We should have configured BIT for ppfid, i.e., the 1834*14b24e2bSVaishali Kulkarni * relative function number in the port. But there's a 1835*14b24e2bSVaishali Kulkarni * bug in LLH in BB where the ppfid is actually engine 1836*14b24e2bSVaishali Kulkarni * based, so we need to take this into account. 1837*14b24e2bSVaishali Kulkarni */ 1838*14b24e2bSVaishali Kulkarni if (!ECORE_IS_BB(p_hwfn->p_dev)) 1839*14b24e2bSVaishali Kulkarni pf_id /= p_hwfn->p_dev->num_ports_in_engine; 1840*14b24e2bSVaishali Kulkarni 1841*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 1842*14b24e2bSVaishali Kulkarni NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id); 1843*14b24e2bSVaishali Kulkarni } 1844*14b24e2bSVaishali Kulkarni 1845*14b24e2bSVaishali Kulkarni /* Take the protocol-based hit vector if there is a hit, 1846*14b24e2bSVaishali Kulkarni * otherwise take the other vector. 1847*14b24e2bSVaishali Kulkarni */ 1848*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2); 1849*14b24e2bSVaishali Kulkarni } 1850*14b24e2bSVaishali Kulkarni #endif 1851*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 1852*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) 1853*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 1854*14b24e2bSVaishali Kulkarni 1855*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1856*14b24e2bSVaishali Kulkarni if (ECORE_IS_AH(p_hwfn->p_dev)) 1857*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 1858*14b24e2bSVaishali Kulkarni else if (ECORE_IS_BB(p_hwfn->p_dev)) 1859*14b24e2bSVaishali Kulkarni ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id); 1860*14b24e2bSVaishali Kulkarni else /* E5 */ 1861*14b24e2bSVaishali Kulkarni ECORE_E5_MISSING_CODE; 1862*14b24e2bSVaishali Kulkarni } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 1863*14b24e2bSVaishali Kulkarni if (p_hwfn->p_dev->num_hwfns > 1) { 1864*14b24e2bSVaishali Kulkarni /* Activate OPTE in CMT */ 1865*14b24e2bSVaishali Kulkarni u32 val; 1866*14b24e2bSVaishali Kulkarni 1867*14b24e2bSVaishali Kulkarni val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV); 1868*14b24e2bSVaishali Kulkarni val |= 0x10; 1869*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val); 1870*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1); 1871*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1); 1872*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1); 1873*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 1874*14b24e2bSVaishali Kulkarni NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1); 1875*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 1876*14b24e2bSVaishali Kulkarni NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555); 1877*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 1878*14b24e2bSVaishali Kulkarni NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4, 1879*14b24e2bSVaishali Kulkarni 0x55555555); 1880*14b24e2bSVaishali Kulkarni } 1881*14b24e2bSVaishali Kulkarni 1882*14b24e2bSVaishali Kulkarni ecore_emul_link_init(p_hwfn, p_ptt); 1883*14b24e2bSVaishali Kulkarni } else { 1884*14b24e2bSVaishali Kulkarni DP_INFO(p_hwfn->p_dev, "link is not being configured\n"); 1885*14b24e2bSVaishali Kulkarni } 1886*14b24e2bSVaishali Kulkarni #endif 1887*14b24e2bSVaishali Kulkarni 1888*14b24e2bSVaishali Kulkarni return rc; 1889*14b24e2bSVaishali Kulkarni } 1890*14b24e2bSVaishali Kulkarni 1891*14b24e2bSVaishali Kulkarni static enum _ecore_status_t ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, 1892*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 1893*14b24e2bSVaishali Kulkarni struct ecore_tunnel_info *p_tunn, 1894*14b24e2bSVaishali Kulkarni int hw_mode, 1895*14b24e2bSVaishali Kulkarni bool b_hw_start, 1896*14b24e2bSVaishali Kulkarni enum ecore_int_mode int_mode, 1897*14b24e2bSVaishali Kulkarni bool allow_npar_tx_switch) 1898*14b24e2bSVaishali Kulkarni { 1899*14b24e2bSVaishali Kulkarni u8 rel_pf_id = p_hwfn->rel_pf_id; 1900*14b24e2bSVaishali Kulkarni u32 prs_reg; 1901*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc = ECORE_SUCCESS; 1902*14b24e2bSVaishali Kulkarni u16 ctrl; 1903*14b24e2bSVaishali Kulkarni int pos; 1904*14b24e2bSVaishali Kulkarni 1905*14b24e2bSVaishali Kulkarni if (p_hwfn->mcp_info) { 1906*14b24e2bSVaishali Kulkarni struct ecore_mcp_function_info *p_info; 1907*14b24e2bSVaishali Kulkarni 1908*14b24e2bSVaishali Kulkarni p_info = &p_hwfn->mcp_info->func_info; 1909*14b24e2bSVaishali Kulkarni if (p_info->bandwidth_min) 1910*14b24e2bSVaishali Kulkarni p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; 1911*14b24e2bSVaishali Kulkarni 1912*14b24e2bSVaishali Kulkarni /* Update rate limit once we'll actually have a link */ 1913*14b24e2bSVaishali Kulkarni p_hwfn->qm_info.pf_rl = 100000; 1914*14b24e2bSVaishali Kulkarni } 1915*14b24e2bSVaishali Kulkarni ecore_cxt_hw_init_pf(p_hwfn); 1916*14b24e2bSVaishali Kulkarni 1917*14b24e2bSVaishali Kulkarni ecore_int_igu_init_rt(p_hwfn); 1918*14b24e2bSVaishali Kulkarni 1919*14b24e2bSVaishali Kulkarni /* Set VLAN in NIG if needed */ 1920*14b24e2bSVaishali Kulkarni if (hw_mode & (1 << MODE_MF_SD)) { 1921*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring LLH_FUNC_TAG\n"); 1922*14b24e2bSVaishali Kulkarni STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); 1923*14b24e2bSVaishali Kulkarni STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, 1924*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.ovlan); 1925*14b24e2bSVaishali Kulkarni } 1926*14b24e2bSVaishali Kulkarni 1927*14b24e2bSVaishali Kulkarni /* Enable classification by MAC if needed */ 1928*14b24e2bSVaishali Kulkarni if (hw_mode & (1 << MODE_MF_SI)) { 1929*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring TAGMAC_CLS_TYPE\n"); 1930*14b24e2bSVaishali Kulkarni STORE_RT_REG(p_hwfn, 1931*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1); 1932*14b24e2bSVaishali Kulkarni } 1933*14b24e2bSVaishali Kulkarni 1934*14b24e2bSVaishali Kulkarni /* Protocl Configuration - @@@TBD - should we set 0 otherwise?*/ 1935*14b24e2bSVaishali Kulkarni STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 1936*14b24e2bSVaishali Kulkarni (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) ? 1 : 0); 1937*14b24e2bSVaishali Kulkarni STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 1938*14b24e2bSVaishali Kulkarni (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) ? 1 : 0); 1939*14b24e2bSVaishali Kulkarni STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); 1940*14b24e2bSVaishali Kulkarni 1941*14b24e2bSVaishali Kulkarni /* perform debug configuration when chip is out of reset */ 1942*14b24e2bSVaishali Kulkarni OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id); 1943*14b24e2bSVaishali Kulkarni 1944*14b24e2bSVaishali Kulkarni /* Cleanup chip from previous driver if such remains exist */ 1945*14b24e2bSVaishali Kulkarni rc = ecore_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false); 1946*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 1947*14b24e2bSVaishali Kulkarni ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL); 1948*14b24e2bSVaishali Kulkarni return rc; 1949*14b24e2bSVaishali Kulkarni } 1950*14b24e2bSVaishali Kulkarni 1951*14b24e2bSVaishali Kulkarni /* PF Init sequence */ 1952*14b24e2bSVaishali Kulkarni rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); 1953*14b24e2bSVaishali Kulkarni if (rc) 1954*14b24e2bSVaishali Kulkarni return rc; 1955*14b24e2bSVaishali Kulkarni 1956*14b24e2bSVaishali Kulkarni /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */ 1957*14b24e2bSVaishali Kulkarni rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode); 1958*14b24e2bSVaishali Kulkarni if (rc) 1959*14b24e2bSVaishali Kulkarni return rc; 1960*14b24e2bSVaishali Kulkarni 1961*14b24e2bSVaishali Kulkarni /* Pure runtime initializations - directly to the HW */ 1962*14b24e2bSVaishali Kulkarni ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); 1963*14b24e2bSVaishali Kulkarni 1964*14b24e2bSVaishali Kulkarni /* PCI relaxed ordering causes a decrease in the performance on some 1965*14b24e2bSVaishali Kulkarni * systems. Till a root cause is found, disable this attribute in the 1966*14b24e2bSVaishali Kulkarni * PCI config space. 1967*14b24e2bSVaishali Kulkarni */ 1968*14b24e2bSVaishali Kulkarni pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP); 1969*14b24e2bSVaishali Kulkarni if (!pos) { 1970*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 1971*14b24e2bSVaishali Kulkarni "Failed to find the PCI Express Capability structure in the PCI config space\n"); 1972*14b24e2bSVaishali Kulkarni return ECORE_IO; 1973*14b24e2bSVaishali Kulkarni } 1974*14b24e2bSVaishali Kulkarni OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, &ctrl); 1975*14b24e2bSVaishali Kulkarni ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN; 1976*14b24e2bSVaishali Kulkarni OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, ctrl); 1977*14b24e2bSVaishali Kulkarni 1978*14b24e2bSVaishali Kulkarni rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt); 1979*14b24e2bSVaishali Kulkarni if (rc) 1980*14b24e2bSVaishali Kulkarni return rc; 1981*14b24e2bSVaishali Kulkarni #if 0 1982*14b24e2bSVaishali Kulkarni /* FW 8.10.5.0 requires us to configure MSG_INFO in PRS. 1983*14b24e2bSVaishali Kulkarni * This would hopefully be moved to MFW. 1984*14b24e2bSVaishali Kulkarni */ 1985*14b24e2bSVaishali Kulkarni if (IS_MF_SI(p_hwfn)) { 1986*14b24e2bSVaishali Kulkarni u8 pf_id = 0; 1987*14b24e2bSVaishali Kulkarni u32 val; 1988*14b24e2bSVaishali Kulkarni 1989*14b24e2bSVaishali Kulkarni if (ecore_hw_init_first_eth(p_hwfn, p_ptt, &pf_id) == 1990*14b24e2bSVaishali Kulkarni ECORE_SUCCESS) { 1991*14b24e2bSVaishali Kulkarni if (p_hwfn->rel_pf_id == pf_id) { 1992*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 1993*14b24e2bSVaishali Kulkarni "PF[%d] is first ETH on engine\n", 1994*14b24e2bSVaishali Kulkarni pf_id); 1995*14b24e2bSVaishali Kulkarni val = 1; 1996*14b24e2bSVaishali Kulkarni } 1997*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val); 1998*14b24e2bSVaishali Kulkarni } 1999*14b24e2bSVaishali Kulkarni } 2000*14b24e2bSVaishali Kulkarni #endif 2001*14b24e2bSVaishali Kulkarni if (b_hw_start) { 2002*14b24e2bSVaishali Kulkarni /* enable interrupts */ 2003*14b24e2bSVaishali Kulkarni rc = ecore_int_igu_enable(p_hwfn, p_ptt, int_mode); 2004*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 2005*14b24e2bSVaishali Kulkarni return rc; 2006*14b24e2bSVaishali Kulkarni 2007*14b24e2bSVaishali Kulkarni /* send function start command */ 2008*14b24e2bSVaishali Kulkarni rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode, 2009*14b24e2bSVaishali Kulkarni allow_npar_tx_switch); 2010*14b24e2bSVaishali Kulkarni if (rc) { 2011*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, "Function start ramrod failed\n"); 2012*14b24e2bSVaishali Kulkarni } else { 2013*14b24e2bSVaishali Kulkarni prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); 2014*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2015*14b24e2bSVaishali Kulkarni "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); 2016*14b24e2bSVaishali Kulkarni 2017*14b24e2bSVaishali Kulkarni if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 2018*14b24e2bSVaishali Kulkarni { 2019*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, 2020*14b24e2bSVaishali Kulkarni (1 << 2)); 2021*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 2022*14b24e2bSVaishali Kulkarni PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, 2023*14b24e2bSVaishali Kulkarni 0x100); 2024*14b24e2bSVaishali Kulkarni } 2025*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2026*14b24e2bSVaishali Kulkarni "PRS_REG_SEARCH registers after start PFn\n"); 2027*14b24e2bSVaishali Kulkarni prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP); 2028*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2029*14b24e2bSVaishali Kulkarni "PRS_REG_SEARCH_TCP: %x\n", prs_reg); 2030*14b24e2bSVaishali Kulkarni prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP); 2031*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2032*14b24e2bSVaishali Kulkarni "PRS_REG_SEARCH_UDP: %x\n", prs_reg); 2033*14b24e2bSVaishali Kulkarni prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE); 2034*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2035*14b24e2bSVaishali Kulkarni "PRS_REG_SEARCH_FCOE: %x\n", prs_reg); 2036*14b24e2bSVaishali Kulkarni prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE); 2037*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2038*14b24e2bSVaishali Kulkarni "PRS_REG_SEARCH_ROCE: %x\n", prs_reg); 2039*14b24e2bSVaishali Kulkarni prs_reg = ecore_rd(p_hwfn, p_ptt, 2040*14b24e2bSVaishali Kulkarni PRS_REG_SEARCH_TCP_FIRST_FRAG); 2041*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2042*14b24e2bSVaishali Kulkarni "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n", 2043*14b24e2bSVaishali Kulkarni prs_reg); 2044*14b24e2bSVaishali Kulkarni prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); 2045*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2046*14b24e2bSVaishali Kulkarni "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); 2047*14b24e2bSVaishali Kulkarni } 2048*14b24e2bSVaishali Kulkarni } 2049*14b24e2bSVaishali Kulkarni return rc; 2050*14b24e2bSVaishali Kulkarni } 2051*14b24e2bSVaishali Kulkarni 2052*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn, 2053*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 2054*14b24e2bSVaishali Kulkarni u8 enable) 2055*14b24e2bSVaishali Kulkarni { 2056*14b24e2bSVaishali Kulkarni u32 delay_idx = 0, val, set_val = enable ? 1 : 0; 2057*14b24e2bSVaishali Kulkarni 2058*14b24e2bSVaishali Kulkarni /* Change PF in PXP */ 2059*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 2060*14b24e2bSVaishali Kulkarni PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); 2061*14b24e2bSVaishali Kulkarni 2062*14b24e2bSVaishali Kulkarni /* wait until value is set - try for 1 second every 50us */ 2063*14b24e2bSVaishali Kulkarni for (delay_idx = 0; delay_idx < 20000; delay_idx++) { 2064*14b24e2bSVaishali Kulkarni val = ecore_rd(p_hwfn, p_ptt, 2065*14b24e2bSVaishali Kulkarni PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 2066*14b24e2bSVaishali Kulkarni if (val == set_val) 2067*14b24e2bSVaishali Kulkarni break; 2068*14b24e2bSVaishali Kulkarni 2069*14b24e2bSVaishali Kulkarni OSAL_UDELAY(50); 2070*14b24e2bSVaishali Kulkarni } 2071*14b24e2bSVaishali Kulkarni 2072*14b24e2bSVaishali Kulkarni if (val != set_val) { 2073*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 2074*14b24e2bSVaishali Kulkarni "PFID_ENABLE_MASTER wasn't changed after a second\n"); 2075*14b24e2bSVaishali Kulkarni return ECORE_UNKNOWN_ERROR; 2076*14b24e2bSVaishali Kulkarni } 2077*14b24e2bSVaishali Kulkarni 2078*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 2079*14b24e2bSVaishali Kulkarni } 2080*14b24e2bSVaishali Kulkarni 2081*14b24e2bSVaishali Kulkarni static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn, 2082*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_main_ptt) 2083*14b24e2bSVaishali Kulkarni { 2084*14b24e2bSVaishali Kulkarni /* Read shadow of current MFW mailbox */ 2085*14b24e2bSVaishali Kulkarni ecore_mcp_read_mb(p_hwfn, p_main_ptt); 2086*14b24e2bSVaishali Kulkarni OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow, 2087*14b24e2bSVaishali Kulkarni p_hwfn->mcp_info->mfw_mb_cur, 2088*14b24e2bSVaishali Kulkarni p_hwfn->mcp_info->mfw_mb_length); 2089*14b24e2bSVaishali Kulkarni } 2090*14b24e2bSVaishali Kulkarni 2091*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn, 2092*14b24e2bSVaishali Kulkarni struct ecore_hw_init_params *p_params) 2093*14b24e2bSVaishali Kulkarni { 2094*14b24e2bSVaishali Kulkarni if (p_params->p_tunn) { 2095*14b24e2bSVaishali Kulkarni ecore_vf_set_vf_start_tunn_update_param(p_params->p_tunn); 2096*14b24e2bSVaishali Kulkarni ecore_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn); 2097*14b24e2bSVaishali Kulkarni } 2098*14b24e2bSVaishali Kulkarni 2099*14b24e2bSVaishali Kulkarni p_hwfn->b_int_enabled = 1; 2100*14b24e2bSVaishali Kulkarni 2101*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 2102*14b24e2bSVaishali Kulkarni } 2103*14b24e2bSVaishali Kulkarni 2104*14b24e2bSVaishali Kulkarni static void 2105*14b24e2bSVaishali Kulkarni ecore_fill_load_req_params(struct ecore_load_req_params *p_load_req, 2106*14b24e2bSVaishali Kulkarni struct ecore_drv_load_params *p_drv_load) 2107*14b24e2bSVaishali Kulkarni { 2108*14b24e2bSVaishali Kulkarni OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req)); 2109*14b24e2bSVaishali Kulkarni 2110*14b24e2bSVaishali Kulkarni if (p_drv_load != OSAL_NULL) { 2111*14b24e2bSVaishali Kulkarni p_load_req->drv_role = p_drv_load->is_crash_kernel ? 2112*14b24e2bSVaishali Kulkarni ECORE_DRV_ROLE_KDUMP : 2113*14b24e2bSVaishali Kulkarni ECORE_DRV_ROLE_OS; 2114*14b24e2bSVaishali Kulkarni p_load_req->timeout_val = p_drv_load->mfw_timeout_val; 2115*14b24e2bSVaishali Kulkarni p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; 2116*14b24e2bSVaishali Kulkarni p_load_req->override_force_load = 2117*14b24e2bSVaishali Kulkarni p_drv_load->override_force_load; 2118*14b24e2bSVaishali Kulkarni } else { 2119*14b24e2bSVaishali Kulkarni p_load_req->drv_role = ECORE_DRV_ROLE_OS; 2120*14b24e2bSVaishali Kulkarni p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT; 2121*14b24e2bSVaishali Kulkarni p_load_req->avoid_eng_reset = false; 2122*14b24e2bSVaishali Kulkarni p_load_req->override_force_load = 2123*14b24e2bSVaishali Kulkarni ECORE_OVERRIDE_FORCE_LOAD_NONE; 2124*14b24e2bSVaishali Kulkarni } 2125*14b24e2bSVaishali Kulkarni } 2126*14b24e2bSVaishali Kulkarni 2127*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, 2128*14b24e2bSVaishali Kulkarni struct ecore_hw_init_params *p_params) 2129*14b24e2bSVaishali Kulkarni { 2130*14b24e2bSVaishali Kulkarni struct ecore_load_req_params load_req_params; 2131*14b24e2bSVaishali Kulkarni u32 load_code, param, drv_mb_param; 2132*14b24e2bSVaishali Kulkarni bool b_default_mtu = true; 2133*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn; 2134*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc = ECORE_SUCCESS, mfw_rc; 2135*14b24e2bSVaishali Kulkarni int i; 2136*14b24e2bSVaishali Kulkarni 2137*14b24e2bSVaishali Kulkarni if ((p_params->int_mode == ECORE_INT_MODE_MSI) && (p_dev->num_hwfns > 1)) { 2138*14b24e2bSVaishali Kulkarni DP_NOTICE(p_dev, false, 2139*14b24e2bSVaishali Kulkarni "MSI mode is not supported for CMT devices\n"); 2140*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 2141*14b24e2bSVaishali Kulkarni } 2142*14b24e2bSVaishali Kulkarni 2143*14b24e2bSVaishali Kulkarni if (IS_PF(p_dev)) { 2144*14b24e2bSVaishali Kulkarni rc = ecore_init_fw_data(p_dev, p_params->bin_fw_data); 2145*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 2146*14b24e2bSVaishali Kulkarni return rc; 2147*14b24e2bSVaishali Kulkarni } 2148*14b24e2bSVaishali Kulkarni 2149*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, i) { 2150*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 2151*14b24e2bSVaishali Kulkarni 2152*14b24e2bSVaishali Kulkarni /* If management didn't provide a default, set one of our own */ 2153*14b24e2bSVaishali Kulkarni if (!p_hwfn->hw_info.mtu) { 2154*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.mtu = 1500; 2155*14b24e2bSVaishali Kulkarni b_default_mtu = false; 2156*14b24e2bSVaishali Kulkarni } 2157*14b24e2bSVaishali Kulkarni 2158*14b24e2bSVaishali Kulkarni if (IS_VF(p_dev)) { 2159*14b24e2bSVaishali Kulkarni ecore_vf_start(p_hwfn, p_params); 2160*14b24e2bSVaishali Kulkarni continue; 2161*14b24e2bSVaishali Kulkarni } 2162*14b24e2bSVaishali Kulkarni 2163*14b24e2bSVaishali Kulkarni /* Enable DMAE in PXP */ 2164*14b24e2bSVaishali Kulkarni rc = ecore_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); 2165*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 2166*14b24e2bSVaishali Kulkarni return rc; 2167*14b24e2bSVaishali Kulkarni 2168*14b24e2bSVaishali Kulkarni rc = ecore_calc_hw_mode(p_hwfn); 2169*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 2170*14b24e2bSVaishali Kulkarni return rc; 2171*14b24e2bSVaishali Kulkarni 2172*14b24e2bSVaishali Kulkarni ecore_fill_load_req_params(&load_req_params, 2173*14b24e2bSVaishali Kulkarni p_params->p_drv_load_params); 2174*14b24e2bSVaishali Kulkarni rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, 2175*14b24e2bSVaishali Kulkarni &load_req_params); 2176*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 2177*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 2178*14b24e2bSVaishali Kulkarni "Failed sending a LOAD_REQ command\n"); 2179*14b24e2bSVaishali Kulkarni return rc; 2180*14b24e2bSVaishali Kulkarni } 2181*14b24e2bSVaishali Kulkarni 2182*14b24e2bSVaishali Kulkarni load_code = load_req_params.load_code; 2183*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 2184*14b24e2bSVaishali Kulkarni "Load request was sent. Load code: 0x%x\n", 2185*14b24e2bSVaishali Kulkarni load_code); 2186*14b24e2bSVaishali Kulkarni 2187*14b24e2bSVaishali Kulkarni ecore_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt); 2188*14b24e2bSVaishali Kulkarni 2189*14b24e2bSVaishali Kulkarni /* CQ75580: 2190*14b24e2bSVaishali Kulkarni * When comming back from hiberbate state, the registers from 2191*14b24e2bSVaishali Kulkarni * which shadow is read initially are not initialized. It turns 2192*14b24e2bSVaishali Kulkarni * out that these registers get initialized during the call to 2193*14b24e2bSVaishali Kulkarni * ecore_mcp_load_req request. So we need to reread them here 2194*14b24e2bSVaishali Kulkarni * to get the proper shadow register value. 2195*14b24e2bSVaishali Kulkarni * Note: This is a workaround for the missing MFW 2196*14b24e2bSVaishali Kulkarni * initialization. It may be removed once the implementation 2197*14b24e2bSVaishali Kulkarni * is done. 2198*14b24e2bSVaishali Kulkarni */ 2199*14b24e2bSVaishali Kulkarni ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); 2200*14b24e2bSVaishali Kulkarni 2201*14b24e2bSVaishali Kulkarni /* Only relevant for recovery: 2202*14b24e2bSVaishali Kulkarni * Clear the indication after the LOAD_REQ command is responded 2203*14b24e2bSVaishali Kulkarni * by the MFW. 2204*14b24e2bSVaishali Kulkarni */ 2205*14b24e2bSVaishali Kulkarni p_dev->recov_in_prog = false; 2206*14b24e2bSVaishali Kulkarni 2207*14b24e2bSVaishali Kulkarni p_hwfn->first_on_engine = (load_code == 2208*14b24e2bSVaishali Kulkarni FW_MSG_CODE_DRV_LOAD_ENGINE); 2209*14b24e2bSVaishali Kulkarni 2210*14b24e2bSVaishali Kulkarni if (!qm_lock_init) { 2211*14b24e2bSVaishali Kulkarni OSAL_SPIN_LOCK_INIT(&qm_lock); 2212*14b24e2bSVaishali Kulkarni qm_lock_init = true; 2213*14b24e2bSVaishali Kulkarni } 2214*14b24e2bSVaishali Kulkarni 2215*14b24e2bSVaishali Kulkarni switch (load_code) { 2216*14b24e2bSVaishali Kulkarni case FW_MSG_CODE_DRV_LOAD_ENGINE: 2217*14b24e2bSVaishali Kulkarni rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, 2218*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.hw_mode); 2219*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 2220*14b24e2bSVaishali Kulkarni break; 2221*14b24e2bSVaishali Kulkarni /* Fall into */ 2222*14b24e2bSVaishali Kulkarni case FW_MSG_CODE_DRV_LOAD_PORT: 2223*14b24e2bSVaishali Kulkarni rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, 2224*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.hw_mode); 2225*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 2226*14b24e2bSVaishali Kulkarni break; 2227*14b24e2bSVaishali Kulkarni /* Fall into */ 2228*14b24e2bSVaishali Kulkarni case FW_MSG_CODE_DRV_LOAD_FUNCTION: 2229*14b24e2bSVaishali Kulkarni rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, 2230*14b24e2bSVaishali Kulkarni p_params->p_tunn, 2231*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.hw_mode, 2232*14b24e2bSVaishali Kulkarni p_params->b_hw_start, 2233*14b24e2bSVaishali Kulkarni p_params->int_mode, 2234*14b24e2bSVaishali Kulkarni p_params->allow_npar_tx_switch); 2235*14b24e2bSVaishali Kulkarni break; 2236*14b24e2bSVaishali Kulkarni default: 2237*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, 2238*14b24e2bSVaishali Kulkarni "Unexpected load code [0x%08x]", load_code); 2239*14b24e2bSVaishali Kulkarni rc = ECORE_NOTIMPL; 2240*14b24e2bSVaishali Kulkarni break; 2241*14b24e2bSVaishali Kulkarni } 2242*14b24e2bSVaishali Kulkarni 2243*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 2244*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 2245*14b24e2bSVaishali Kulkarni "init phase failed for loadcode 0x%x (rc %d)\n", 2246*14b24e2bSVaishali Kulkarni load_code, rc); 2247*14b24e2bSVaishali Kulkarni 2248*14b24e2bSVaishali Kulkarni /* ACK mfw regardless of success or failure of initialization */ 2249*14b24e2bSVaishali Kulkarni mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 2250*14b24e2bSVaishali Kulkarni DRV_MSG_CODE_LOAD_DONE, 2251*14b24e2bSVaishali Kulkarni 0, &load_code, ¶m); 2252*14b24e2bSVaishali Kulkarni 2253*14b24e2bSVaishali Kulkarni /* Check the return value of the ecore_hw_init_*() function */ 2254*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 2255*14b24e2bSVaishali Kulkarni return rc; 2256*14b24e2bSVaishali Kulkarni 2257*14b24e2bSVaishali Kulkarni /* Check the return value of the LOAD_DONE command */ 2258*14b24e2bSVaishali Kulkarni if (mfw_rc != ECORE_SUCCESS) { 2259*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 2260*14b24e2bSVaishali Kulkarni "Failed sending a LOAD_DONE command\n"); 2261*14b24e2bSVaishali Kulkarni return mfw_rc; 2262*14b24e2bSVaishali Kulkarni } 2263*14b24e2bSVaishali Kulkarni 2264*14b24e2bSVaishali Kulkarni /* Check if there is a DID mismatch between nvm-cfg/efuse */ 2265*14b24e2bSVaishali Kulkarni if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) 2266*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, 2267*14b24e2bSVaishali Kulkarni "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); 2268*14b24e2bSVaishali Kulkarni 2269*14b24e2bSVaishali Kulkarni /* send DCBX attention request command */ 2270*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, 2271*14b24e2bSVaishali Kulkarni "sending phony dcbx set command to trigger DCBx attention handling\n"); 2272*14b24e2bSVaishali Kulkarni mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 2273*14b24e2bSVaishali Kulkarni DRV_MSG_CODE_SET_DCBX, 2274*14b24e2bSVaishali Kulkarni 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, 2275*14b24e2bSVaishali Kulkarni &load_code, ¶m); 2276*14b24e2bSVaishali Kulkarni if (mfw_rc != ECORE_SUCCESS) { 2277*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 2278*14b24e2bSVaishali Kulkarni "Failed to send DCBX attention request\n"); 2279*14b24e2bSVaishali Kulkarni return mfw_rc; 2280*14b24e2bSVaishali Kulkarni } 2281*14b24e2bSVaishali Kulkarni 2282*14b24e2bSVaishali Kulkarni p_hwfn->hw_init_done = true; 2283*14b24e2bSVaishali Kulkarni } 2284*14b24e2bSVaishali Kulkarni 2285*14b24e2bSVaishali Kulkarni if (IS_PF(p_dev)) { 2286*14b24e2bSVaishali Kulkarni p_hwfn = ECORE_LEADING_HWFN(p_dev); 2287*14b24e2bSVaishali Kulkarni drv_mb_param = STORM_FW_VERSION; 2288*14b24e2bSVaishali Kulkarni rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 2289*14b24e2bSVaishali Kulkarni DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, 2290*14b24e2bSVaishali Kulkarni drv_mb_param, &load_code, ¶m); 2291*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 2292*14b24e2bSVaishali Kulkarni DP_INFO(p_hwfn, "Failed to update firmware version\n"); 2293*14b24e2bSVaishali Kulkarni 2294*14b24e2bSVaishali Kulkarni if (!b_default_mtu) { 2295*14b24e2bSVaishali Kulkarni rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt, 2296*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.mtu); 2297*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 2298*14b24e2bSVaishali Kulkarni DP_INFO(p_hwfn, "Failed to update default mtu\n"); 2299*14b24e2bSVaishali Kulkarni } 2300*14b24e2bSVaishali Kulkarni 2301*14b24e2bSVaishali Kulkarni rc = ecore_mcp_ov_update_driver_state(p_hwfn, 2302*14b24e2bSVaishali Kulkarni p_hwfn->p_main_ptt, 2303*14b24e2bSVaishali Kulkarni ECORE_OV_DRIVER_STATE_DISABLED); 2304*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 2305*14b24e2bSVaishali Kulkarni DP_INFO(p_hwfn, "Failed to update driver state\n"); 2306*14b24e2bSVaishali Kulkarni 2307*14b24e2bSVaishali Kulkarni rc = ecore_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, 2308*14b24e2bSVaishali Kulkarni ECORE_OV_ESWITCH_VEB); 2309*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 2310*14b24e2bSVaishali Kulkarni DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); 2311*14b24e2bSVaishali Kulkarni } 2312*14b24e2bSVaishali Kulkarni 2313*14b24e2bSVaishali Kulkarni return rc; 2314*14b24e2bSVaishali Kulkarni } 2315*14b24e2bSVaishali Kulkarni 2316*14b24e2bSVaishali Kulkarni #define ECORE_HW_STOP_RETRY_LIMIT (10) 2317*14b24e2bSVaishali Kulkarni static void ecore_hw_timers_stop(struct ecore_dev *p_dev, 2318*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn, 2319*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt) 2320*14b24e2bSVaishali Kulkarni { 2321*14b24e2bSVaishali Kulkarni int i; 2322*14b24e2bSVaishali Kulkarni 2323*14b24e2bSVaishali Kulkarni /* close timers */ 2324*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); 2325*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); 2326*14b24e2bSVaishali Kulkarni for (i = 0; 2327*14b24e2bSVaishali Kulkarni i < ECORE_HW_STOP_RETRY_LIMIT && !p_dev->recov_in_prog; 2328*14b24e2bSVaishali Kulkarni i++) { 2329*14b24e2bSVaishali Kulkarni if ((!ecore_rd(p_hwfn, p_ptt, 2330*14b24e2bSVaishali Kulkarni TM_REG_PF_SCAN_ACTIVE_CONN)) && 2331*14b24e2bSVaishali Kulkarni (!ecore_rd(p_hwfn, p_ptt, 2332*14b24e2bSVaishali Kulkarni TM_REG_PF_SCAN_ACTIVE_TASK))) 2333*14b24e2bSVaishali Kulkarni break; 2334*14b24e2bSVaishali Kulkarni 2335*14b24e2bSVaishali Kulkarni /* Dependent on number of connection/tasks, possibly 2336*14b24e2bSVaishali Kulkarni * 1ms sleep is required between polls 2337*14b24e2bSVaishali Kulkarni */ 2338*14b24e2bSVaishali Kulkarni OSAL_MSLEEP(1); 2339*14b24e2bSVaishali Kulkarni } 2340*14b24e2bSVaishali Kulkarni 2341*14b24e2bSVaishali Kulkarni if (i < ECORE_HW_STOP_RETRY_LIMIT) 2342*14b24e2bSVaishali Kulkarni return; 2343*14b24e2bSVaishali Kulkarni 2344*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 2345*14b24e2bSVaishali Kulkarni "Timers linear scans are not over [Connection %02x Tasks %02x]\n", 2346*14b24e2bSVaishali Kulkarni (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), 2347*14b24e2bSVaishali Kulkarni (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); 2348*14b24e2bSVaishali Kulkarni } 2349*14b24e2bSVaishali Kulkarni 2350*14b24e2bSVaishali Kulkarni void ecore_hw_timers_stop_all(struct ecore_dev *p_dev) 2351*14b24e2bSVaishali Kulkarni { 2352*14b24e2bSVaishali Kulkarni int j; 2353*14b24e2bSVaishali Kulkarni 2354*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, j) { 2355*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 2356*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 2357*14b24e2bSVaishali Kulkarni 2358*14b24e2bSVaishali Kulkarni ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt); 2359*14b24e2bSVaishali Kulkarni } 2360*14b24e2bSVaishali Kulkarni } 2361*14b24e2bSVaishali Kulkarni 2362*14b24e2bSVaishali Kulkarni static enum _ecore_status_t ecore_verify_reg_val(struct ecore_hwfn *p_hwfn, 2363*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 2364*14b24e2bSVaishali Kulkarni u32 addr, u32 expected_val) 2365*14b24e2bSVaishali Kulkarni { 2366*14b24e2bSVaishali Kulkarni u32 val = ecore_rd(p_hwfn, p_ptt, addr); 2367*14b24e2bSVaishali Kulkarni 2368*14b24e2bSVaishali Kulkarni if (val != expected_val) { 2369*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 2370*14b24e2bSVaishali Kulkarni "Value at address 0x%08x is 0x%08x while the expected value is 0x%08x\n", 2371*14b24e2bSVaishali Kulkarni addr, val, expected_val); 2372*14b24e2bSVaishali Kulkarni return ECORE_UNKNOWN_ERROR; 2373*14b24e2bSVaishali Kulkarni } 2374*14b24e2bSVaishali Kulkarni 2375*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 2376*14b24e2bSVaishali Kulkarni } 2377*14b24e2bSVaishali Kulkarni 2378*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev) 2379*14b24e2bSVaishali Kulkarni { 2380*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn; 2381*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt; 2382*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc, rc2 = ECORE_SUCCESS; 2383*14b24e2bSVaishali Kulkarni int j; 2384*14b24e2bSVaishali Kulkarni 2385*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, j) { 2386*14b24e2bSVaishali Kulkarni p_hwfn = &p_dev->hwfns[j]; 2387*14b24e2bSVaishali Kulkarni p_ptt = p_hwfn->p_main_ptt; 2388*14b24e2bSVaishali Kulkarni 2389*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n"); 2390*14b24e2bSVaishali Kulkarni 2391*14b24e2bSVaishali Kulkarni if (IS_VF(p_dev)) { 2392*14b24e2bSVaishali Kulkarni ecore_vf_pf_int_cleanup(p_hwfn); 2393*14b24e2bSVaishali Kulkarni rc = ecore_vf_pf_reset(p_hwfn); 2394*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 2395*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 2396*14b24e2bSVaishali Kulkarni "ecore_vf_pf_reset failed. rc = %d.\n", 2397*14b24e2bSVaishali Kulkarni rc); 2398*14b24e2bSVaishali Kulkarni rc2 = ECORE_UNKNOWN_ERROR; 2399*14b24e2bSVaishali Kulkarni } 2400*14b24e2bSVaishali Kulkarni continue; 2401*14b24e2bSVaishali Kulkarni } 2402*14b24e2bSVaishali Kulkarni 2403*14b24e2bSVaishali Kulkarni /* mark the hw as uninitialized... */ 2404*14b24e2bSVaishali Kulkarni p_hwfn->hw_init_done = false; 2405*14b24e2bSVaishali Kulkarni 2406*14b24e2bSVaishali Kulkarni /* Send unload command to MCP */ 2407*14b24e2bSVaishali Kulkarni if (!p_dev->recov_in_prog) { 2408*14b24e2bSVaishali Kulkarni rc = ecore_mcp_unload_req(p_hwfn, p_ptt); 2409*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 2410*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 2411*14b24e2bSVaishali Kulkarni "Failed sending a UNLOAD_REQ command. rc = %d.\n", 2412*14b24e2bSVaishali Kulkarni rc); 2413*14b24e2bSVaishali Kulkarni rc2 = ECORE_UNKNOWN_ERROR; 2414*14b24e2bSVaishali Kulkarni } 2415*14b24e2bSVaishali Kulkarni } 2416*14b24e2bSVaishali Kulkarni 2417*14b24e2bSVaishali Kulkarni OSAL_DPC_SYNC(p_hwfn); 2418*14b24e2bSVaishali Kulkarni 2419*14b24e2bSVaishali Kulkarni /* After this point no MFW attentions are expected, e.g. prevent 2420*14b24e2bSVaishali Kulkarni * race between pf stop and dcbx pf update. 2421*14b24e2bSVaishali Kulkarni */ 2422*14b24e2bSVaishali Kulkarni 2423*14b24e2bSVaishali Kulkarni rc = ecore_sp_pf_stop(p_hwfn); 2424*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 2425*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 2426*14b24e2bSVaishali Kulkarni "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n", 2427*14b24e2bSVaishali Kulkarni rc); 2428*14b24e2bSVaishali Kulkarni rc2 = ECORE_UNKNOWN_ERROR; 2429*14b24e2bSVaishali Kulkarni } 2430*14b24e2bSVaishali Kulkarni 2431*14b24e2bSVaishali Kulkarni /* perform debug action after PF stop was sent */ 2432*14b24e2bSVaishali Kulkarni OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id); 2433*14b24e2bSVaishali Kulkarni 2434*14b24e2bSVaishali Kulkarni /* close NIG to BRB gate */ 2435*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 2436*14b24e2bSVaishali Kulkarni NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 2437*14b24e2bSVaishali Kulkarni 2438*14b24e2bSVaishali Kulkarni /* close parser */ 2439*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 2440*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 2441*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 2442*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 2443*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 2444*14b24e2bSVaishali Kulkarni 2445*14b24e2bSVaishali Kulkarni /* @@@TBD - clean transmission queues (5.b) */ 2446*14b24e2bSVaishali Kulkarni /* @@@TBD - clean BTB (5.c) */ 2447*14b24e2bSVaishali Kulkarni 2448*14b24e2bSVaishali Kulkarni ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt); 2449*14b24e2bSVaishali Kulkarni 2450*14b24e2bSVaishali Kulkarni /* @@@TBD - verify DMAE requests are done (8) */ 2451*14b24e2bSVaishali Kulkarni 2452*14b24e2bSVaishali Kulkarni /* Disable Attention Generation */ 2453*14b24e2bSVaishali Kulkarni ecore_int_igu_disable_int(p_hwfn, p_ptt); 2454*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); 2455*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); 2456*14b24e2bSVaishali Kulkarni ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true); 2457*14b24e2bSVaishali Kulkarni rc = ecore_int_igu_reset_cam_default(p_hwfn, p_ptt); 2458*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 2459*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 2460*14b24e2bSVaishali Kulkarni "Failed to return IGU CAM to default\n"); 2461*14b24e2bSVaishali Kulkarni rc2 = ECORE_UNKNOWN_ERROR; 2462*14b24e2bSVaishali Kulkarni } 2463*14b24e2bSVaishali Kulkarni 2464*14b24e2bSVaishali Kulkarni /* Need to wait 1ms to guarantee SBs are cleared */ 2465*14b24e2bSVaishali Kulkarni OSAL_MSLEEP(1); 2466*14b24e2bSVaishali Kulkarni 2467*14b24e2bSVaishali Kulkarni if (!p_dev->recov_in_prog) { 2468*14b24e2bSVaishali Kulkarni ecore_verify_reg_val(p_hwfn, p_ptt, 2469*14b24e2bSVaishali Kulkarni QM_REG_USG_CNT_PF_TX, 0); 2470*14b24e2bSVaishali Kulkarni ecore_verify_reg_val(p_hwfn, p_ptt, 2471*14b24e2bSVaishali Kulkarni QM_REG_USG_CNT_PF_OTHER, 0); 2472*14b24e2bSVaishali Kulkarni /* @@@TBD - assert on incorrect xCFC values (10.b) */ 2473*14b24e2bSVaishali Kulkarni } 2474*14b24e2bSVaishali Kulkarni 2475*14b24e2bSVaishali Kulkarni /* Disable PF in HW blocks */ 2476*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); 2477*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); 2478*14b24e2bSVaishali Kulkarni 2479*14b24e2bSVaishali Kulkarni if (!p_dev->recov_in_prog) { 2480*14b24e2bSVaishali Kulkarni ecore_mcp_unload_done(p_hwfn, p_ptt); 2481*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 2482*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 2483*14b24e2bSVaishali Kulkarni "Failed sending a UNLOAD_DONE command. rc = %d.\n", 2484*14b24e2bSVaishali Kulkarni rc); 2485*14b24e2bSVaishali Kulkarni rc2 = ECORE_UNKNOWN_ERROR; 2486*14b24e2bSVaishali Kulkarni } 2487*14b24e2bSVaishali Kulkarni } 2488*14b24e2bSVaishali Kulkarni } /* hwfn loop */ 2489*14b24e2bSVaishali Kulkarni 2490*14b24e2bSVaishali Kulkarni if (IS_PF(p_dev)) { 2491*14b24e2bSVaishali Kulkarni p_hwfn = ECORE_LEADING_HWFN(p_dev); 2492*14b24e2bSVaishali Kulkarni p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt; 2493*14b24e2bSVaishali Kulkarni 2494*14b24e2bSVaishali Kulkarni /* Disable DMAE in PXP - in CMT, this should only be done for 2495*14b24e2bSVaishali Kulkarni * first hw-function, and only after all transactions have 2496*14b24e2bSVaishali Kulkarni * stopped for all active hw-functions. 2497*14b24e2bSVaishali Kulkarni */ 2498*14b24e2bSVaishali Kulkarni rc = ecore_change_pci_hwfn(p_hwfn, p_ptt, false); 2499*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 2500*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 2501*14b24e2bSVaishali Kulkarni "ecore_change_pci_hwfn failed. rc = %d.\n", 2502*14b24e2bSVaishali Kulkarni rc); 2503*14b24e2bSVaishali Kulkarni rc2 = ECORE_UNKNOWN_ERROR; 2504*14b24e2bSVaishali Kulkarni } 2505*14b24e2bSVaishali Kulkarni } 2506*14b24e2bSVaishali Kulkarni 2507*14b24e2bSVaishali Kulkarni return rc2; 2508*14b24e2bSVaishali Kulkarni } 2509*14b24e2bSVaishali Kulkarni 2510*14b24e2bSVaishali Kulkarni void ecore_hw_stop_fastpath(struct ecore_dev *p_dev) 2511*14b24e2bSVaishali Kulkarni { 2512*14b24e2bSVaishali Kulkarni int j; 2513*14b24e2bSVaishali Kulkarni 2514*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, j) { 2515*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 2516*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 2517*14b24e2bSVaishali Kulkarni 2518*14b24e2bSVaishali Kulkarni if (IS_VF(p_dev)) { 2519*14b24e2bSVaishali Kulkarni ecore_vf_pf_int_cleanup(p_hwfn); 2520*14b24e2bSVaishali Kulkarni continue; 2521*14b24e2bSVaishali Kulkarni } 2522*14b24e2bSVaishali Kulkarni 2523*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Shutting down the fastpath\n"); 2524*14b24e2bSVaishali Kulkarni 2525*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 2526*14b24e2bSVaishali Kulkarni NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 2527*14b24e2bSVaishali Kulkarni 2528*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 2529*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 2530*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 2531*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 2532*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 2533*14b24e2bSVaishali Kulkarni 2534*14b24e2bSVaishali Kulkarni /* @@@TBD - clean transmission queues (5.b) */ 2535*14b24e2bSVaishali Kulkarni /* @@@TBD - clean BTB (5.c) */ 2536*14b24e2bSVaishali Kulkarni 2537*14b24e2bSVaishali Kulkarni /* @@@TBD - verify DMAE requests are done (8) */ 2538*14b24e2bSVaishali Kulkarni 2539*14b24e2bSVaishali Kulkarni ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); 2540*14b24e2bSVaishali Kulkarni /* Need to wait 1ms to guarantee SBs are cleared */ 2541*14b24e2bSVaishali Kulkarni OSAL_MSLEEP(1); 2542*14b24e2bSVaishali Kulkarni } 2543*14b24e2bSVaishali Kulkarni } 2544*14b24e2bSVaishali Kulkarni 2545*14b24e2bSVaishali Kulkarni void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn) 2546*14b24e2bSVaishali Kulkarni { 2547*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 2548*14b24e2bSVaishali Kulkarni 2549*14b24e2bSVaishali Kulkarni if (IS_VF(p_hwfn->p_dev)) 2550*14b24e2bSVaishali Kulkarni return; 2551*14b24e2bSVaishali Kulkarni 2552*14b24e2bSVaishali Kulkarni /* If roce info is allocated it means roce is initialized and should 2553*14b24e2bSVaishali Kulkarni * be enabled in searcher. 2554*14b24e2bSVaishali Kulkarni */ 2555*14b24e2bSVaishali Kulkarni if (p_hwfn->p_rdma_info) { 2556*14b24e2bSVaishali Kulkarni if (p_hwfn->b_rdma_enabled_in_prs) 2557*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 2558*14b24e2bSVaishali Kulkarni p_hwfn->rdma_prs_search_reg, 0x1); 2559*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x1); 2560*14b24e2bSVaishali Kulkarni } 2561*14b24e2bSVaishali Kulkarni 2562*14b24e2bSVaishali Kulkarni /* Re-open incoming traffic */ 2563*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2564*14b24e2bSVaishali Kulkarni NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); 2565*14b24e2bSVaishali Kulkarni } 2566*14b24e2bSVaishali Kulkarni /* TEMP macro to be removed when wol code revisted */ 2567*14b24e2bSVaishali Kulkarni #define ECORE_WOL_WR(_p_hwfn, _p_ptt, _offset, _val) ECORE_IS_BB(_p_hwfn->p_dev) ? \ 2568*14b24e2bSVaishali Kulkarni ecore_wr(_p_hwfn, _p_ptt, _offset, _val) : \ 2569*14b24e2bSVaishali Kulkarni ecore_mcp_wol_wr(_p_hwfn, _p_ptt, _offset, _val); 2570*14b24e2bSVaishali Kulkarni 2571*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_set_nwuf_reg(struct ecore_dev *p_dev, 2572*14b24e2bSVaishali Kulkarni const bool b_enable, 2573*14b24e2bSVaishali Kulkarni u32 reg_idx, 2574*14b24e2bSVaishali Kulkarni u32 pattern_size, 2575*14b24e2bSVaishali Kulkarni u32 crc) 2576*14b24e2bSVaishali Kulkarni { 2577*14b24e2bSVaishali Kulkarni struct ecore_hwfn *hwfn = &p_dev->hwfns[0]; 2578*14b24e2bSVaishali Kulkarni u32 reg_len = 0; 2579*14b24e2bSVaishali Kulkarni u32 reg_crc = 0; 2580*14b24e2bSVaishali Kulkarni 2581*14b24e2bSVaishali Kulkarni /* Get length and CRC register offsets */ 2582*14b24e2bSVaishali Kulkarni switch (reg_idx) 2583*14b24e2bSVaishali Kulkarni { 2584*14b24e2bSVaishali Kulkarni case 0: 2585*14b24e2bSVaishali Kulkarni reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_LEN_BB : 2586*14b24e2bSVaishali Kulkarni WOL_REG_ACPI_PAT_0_LEN_K2_E5; 2587*14b24e2bSVaishali Kulkarni reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_CRC_BB : 2588*14b24e2bSVaishali Kulkarni WOL_REG_ACPI_PAT_0_CRC_K2_E5; 2589*14b24e2bSVaishali Kulkarni break; 2590*14b24e2bSVaishali Kulkarni case 1: 2591*14b24e2bSVaishali Kulkarni reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_LEN_BB : 2592*14b24e2bSVaishali Kulkarni WOL_REG_ACPI_PAT_1_LEN_K2_E5; 2593*14b24e2bSVaishali Kulkarni reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_CRC_BB : 2594*14b24e2bSVaishali Kulkarni WOL_REG_ACPI_PAT_1_CRC_K2_E5; 2595*14b24e2bSVaishali Kulkarni break; 2596*14b24e2bSVaishali Kulkarni case 2: 2597*14b24e2bSVaishali Kulkarni reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_LEN_BB : 2598*14b24e2bSVaishali Kulkarni WOL_REG_ACPI_PAT_2_LEN_K2_E5; 2599*14b24e2bSVaishali Kulkarni reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_CRC_BB : 2600*14b24e2bSVaishali Kulkarni WOL_REG_ACPI_PAT_2_CRC_K2_E5; 2601*14b24e2bSVaishali Kulkarni break; 2602*14b24e2bSVaishali Kulkarni case 3: 2603*14b24e2bSVaishali Kulkarni reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_LEN_BB : 2604*14b24e2bSVaishali Kulkarni WOL_REG_ACPI_PAT_3_LEN_K2_E5; 2605*14b24e2bSVaishali Kulkarni reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_CRC_BB : 2606*14b24e2bSVaishali Kulkarni WOL_REG_ACPI_PAT_3_CRC_K2_E5; 2607*14b24e2bSVaishali Kulkarni break; 2608*14b24e2bSVaishali Kulkarni case 4: 2609*14b24e2bSVaishali Kulkarni reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_LEN_BB : 2610*14b24e2bSVaishali Kulkarni WOL_REG_ACPI_PAT_4_LEN_K2_E5; 2611*14b24e2bSVaishali Kulkarni reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_CRC_BB : 2612*14b24e2bSVaishali Kulkarni WOL_REG_ACPI_PAT_4_CRC_K2_E5; 2613*14b24e2bSVaishali Kulkarni break; 2614*14b24e2bSVaishali Kulkarni case 5: 2615*14b24e2bSVaishali Kulkarni reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_LEN_BB : 2616*14b24e2bSVaishali Kulkarni WOL_REG_ACPI_PAT_5_LEN_K2_E5; 2617*14b24e2bSVaishali Kulkarni reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_CRC_BB : 2618*14b24e2bSVaishali Kulkarni WOL_REG_ACPI_PAT_5_CRC_K2_E5; 2619*14b24e2bSVaishali Kulkarni break; 2620*14b24e2bSVaishali Kulkarni case 6: 2621*14b24e2bSVaishali Kulkarni reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_LEN_BB : 2622*14b24e2bSVaishali Kulkarni WOL_REG_ACPI_PAT_6_LEN_K2_E5; 2623*14b24e2bSVaishali Kulkarni reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_CRC_BB : 2624*14b24e2bSVaishali Kulkarni WOL_REG_ACPI_PAT_6_CRC_K2_E5; 2625*14b24e2bSVaishali Kulkarni break; 2626*14b24e2bSVaishali Kulkarni case 7: 2627*14b24e2bSVaishali Kulkarni reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_LEN_BB : 2628*14b24e2bSVaishali Kulkarni WOL_REG_ACPI_PAT_7_LEN_K2_E5; 2629*14b24e2bSVaishali Kulkarni reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_CRC_BB : 2630*14b24e2bSVaishali Kulkarni WOL_REG_ACPI_PAT_7_CRC_K2_E5; 2631*14b24e2bSVaishali Kulkarni break; 2632*14b24e2bSVaishali Kulkarni default: 2633*14b24e2bSVaishali Kulkarni return ECORE_UNKNOWN_ERROR; 2634*14b24e2bSVaishali Kulkarni } 2635*14b24e2bSVaishali Kulkarni 2636*14b24e2bSVaishali Kulkarni /* Allign pattern size to 4 */ 2637*14b24e2bSVaishali Kulkarni while (pattern_size % 4) 2638*14b24e2bSVaishali Kulkarni { 2639*14b24e2bSVaishali Kulkarni pattern_size++; 2640*14b24e2bSVaishali Kulkarni } 2641*14b24e2bSVaishali Kulkarni /* write pattern length */ 2642*14b24e2bSVaishali Kulkarni ECORE_WOL_WR(hwfn, hwfn->p_main_ptt, reg_len, pattern_size); 2643*14b24e2bSVaishali Kulkarni 2644*14b24e2bSVaishali Kulkarni /* write crc value*/ 2645*14b24e2bSVaishali Kulkarni ECORE_WOL_WR(hwfn, hwfn->p_main_ptt, reg_crc, crc); 2646*14b24e2bSVaishali Kulkarni 2647*14b24e2bSVaishali Kulkarni DP_INFO(p_dev, 2648*14b24e2bSVaishali Kulkarni "ecore_set_nwuf_reg: idx[%d] reg_crc[0x%x=0x%08x] " 2649*14b24e2bSVaishali Kulkarni "reg_len[0x%x=0x%x]\n", 2650*14b24e2bSVaishali Kulkarni reg_idx, reg_crc, crc, reg_len, pattern_size); 2651*14b24e2bSVaishali Kulkarni 2652*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 2653*14b24e2bSVaishali Kulkarni } 2654*14b24e2bSVaishali Kulkarni 2655*14b24e2bSVaishali Kulkarni void ecore_wol_buffer_clear(struct ecore_dev *p_dev) 2656*14b24e2bSVaishali Kulkarni { 2657*14b24e2bSVaishali Kulkarni struct ecore_hwfn *hwfn = &p_dev->hwfns[0]; 2658*14b24e2bSVaishali Kulkarni const u32 wake_buffer_clear_offset = 2659*14b24e2bSVaishali Kulkarni ECORE_IS_BB(p_dev) ? 2660*14b24e2bSVaishali Kulkarni NIG_REG_WAKE_BUFFER_CLEAR_BB : WOL_REG_WAKE_BUFFER_CLEAR_K2_E5; 2661*14b24e2bSVaishali Kulkarni 2662*14b24e2bSVaishali Kulkarni DP_INFO(p_dev, 2663*14b24e2bSVaishali Kulkarni "ecore_wol_buffer_clear: reset " 2664*14b24e2bSVaishali Kulkarni "REG_WAKE_BUFFER_CLEAR offset=0x%08x\n", 2665*14b24e2bSVaishali Kulkarni wake_buffer_clear_offset); 2666*14b24e2bSVaishali Kulkarni 2667*14b24e2bSVaishali Kulkarni ECORE_WOL_WR(hwfn, hwfn->p_main_ptt, wake_buffer_clear_offset, 1); 2668*14b24e2bSVaishali Kulkarni ECORE_WOL_WR(hwfn, hwfn->p_main_ptt, wake_buffer_clear_offset, 0); 2669*14b24e2bSVaishali Kulkarni } 2670*14b24e2bSVaishali Kulkarni 2671*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_get_wake_info(struct ecore_dev *p_dev, 2672*14b24e2bSVaishali Kulkarni struct ecore_wake_info *wake_info) 2673*14b24e2bSVaishali Kulkarni { 2674*14b24e2bSVaishali Kulkarni struct ecore_hwfn *hwfn = &p_dev->hwfns[0]; 2675*14b24e2bSVaishali Kulkarni u32 *buf = OSAL_NULL; 2676*14b24e2bSVaishali Kulkarni u32 i = 0; 2677*14b24e2bSVaishali Kulkarni const u32 reg_wake_buffer_offest = 2678*14b24e2bSVaishali Kulkarni ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_BUFFER_BB : 2679*14b24e2bSVaishali Kulkarni WOL_REG_WAKE_BUFFER_K2_E5; 2680*14b24e2bSVaishali Kulkarni 2681*14b24e2bSVaishali Kulkarni wake_info->wk_info = ecore_rd(hwfn, hwfn->p_main_ptt, 2682*14b24e2bSVaishali Kulkarni ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_INFO_BB : 2683*14b24e2bSVaishali Kulkarni WOL_REG_WAKE_INFO_K2_E5); 2684*14b24e2bSVaishali Kulkarni wake_info->wk_details = ecore_rd(hwfn, hwfn->p_main_ptt, 2685*14b24e2bSVaishali Kulkarni ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_DETAILS_BB : 2686*14b24e2bSVaishali Kulkarni WOL_REG_WAKE_DETAILS_K2_E5); 2687*14b24e2bSVaishali Kulkarni wake_info->wk_pkt_len = ecore_rd(hwfn, hwfn->p_main_ptt, 2688*14b24e2bSVaishali Kulkarni ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_PKT_LEN_BB : 2689*14b24e2bSVaishali Kulkarni WOL_REG_WAKE_PKT_LEN_K2_E5); 2690*14b24e2bSVaishali Kulkarni 2691*14b24e2bSVaishali Kulkarni DP_INFO(p_dev, 2692*14b24e2bSVaishali Kulkarni "ecore_get_wake_info: REG_WAKE_INFO=0x%08x " 2693*14b24e2bSVaishali Kulkarni "REG_WAKE_DETAILS=0x%08x " 2694*14b24e2bSVaishali Kulkarni "REG_WAKE_PKT_LEN=0x%08x\n", 2695*14b24e2bSVaishali Kulkarni wake_info->wk_info, 2696*14b24e2bSVaishali Kulkarni wake_info->wk_details, 2697*14b24e2bSVaishali Kulkarni wake_info->wk_pkt_len); 2698*14b24e2bSVaishali Kulkarni 2699*14b24e2bSVaishali Kulkarni buf = (u32 *)wake_info->wk_buffer; 2700*14b24e2bSVaishali Kulkarni 2701*14b24e2bSVaishali Kulkarni for (i = 0; i < (wake_info->wk_pkt_len / sizeof(u32)); i++) 2702*14b24e2bSVaishali Kulkarni { 2703*14b24e2bSVaishali Kulkarni if ((i*sizeof(u32)) >= sizeof(wake_info->wk_buffer)) 2704*14b24e2bSVaishali Kulkarni { 2705*14b24e2bSVaishali Kulkarni DP_INFO(p_dev, 2706*14b24e2bSVaishali Kulkarni "ecore_get_wake_info: i index to 0 high=%d\n", 2707*14b24e2bSVaishali Kulkarni i); 2708*14b24e2bSVaishali Kulkarni break; 2709*14b24e2bSVaishali Kulkarni } 2710*14b24e2bSVaishali Kulkarni buf[i] = ecore_rd(hwfn, hwfn->p_main_ptt, 2711*14b24e2bSVaishali Kulkarni reg_wake_buffer_offest + (i * sizeof(u32))); 2712*14b24e2bSVaishali Kulkarni DP_INFO(p_dev, "ecore_get_wake_info: wk_buffer[%u]: 0x%08x\n", 2713*14b24e2bSVaishali Kulkarni i, buf[i]); 2714*14b24e2bSVaishali Kulkarni } 2715*14b24e2bSVaishali Kulkarni 2716*14b24e2bSVaishali Kulkarni ecore_wol_buffer_clear(p_dev); 2717*14b24e2bSVaishali Kulkarni 2718*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 2719*14b24e2bSVaishali Kulkarni } 2720*14b24e2bSVaishali Kulkarni 2721*14b24e2bSVaishali Kulkarni /* Free hwfn memory and resources acquired in hw_hwfn_prepare */ 2722*14b24e2bSVaishali Kulkarni static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn) 2723*14b24e2bSVaishali Kulkarni { 2724*14b24e2bSVaishali Kulkarni ecore_ptt_pool_free(p_hwfn); 2725*14b24e2bSVaishali Kulkarni OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info); 2726*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.p_igu_info = OSAL_NULL; 2727*14b24e2bSVaishali Kulkarni } 2728*14b24e2bSVaishali Kulkarni 2729*14b24e2bSVaishali Kulkarni /* Setup bar access */ 2730*14b24e2bSVaishali Kulkarni static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn) 2731*14b24e2bSVaishali Kulkarni { 2732*14b24e2bSVaishali Kulkarni /* clear indirect access */ 2733*14b24e2bSVaishali Kulkarni if (ECORE_IS_AH(p_hwfn->p_dev) || ECORE_IS_E5(p_hwfn->p_dev)) { 2734*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2735*14b24e2bSVaishali Kulkarni PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5, 0); 2736*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2737*14b24e2bSVaishali Kulkarni PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5, 0); 2738*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2739*14b24e2bSVaishali Kulkarni PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5, 0); 2740*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2741*14b24e2bSVaishali Kulkarni PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5, 0); 2742*14b24e2bSVaishali Kulkarni } else { 2743*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2744*14b24e2bSVaishali Kulkarni PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0); 2745*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2746*14b24e2bSVaishali Kulkarni PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0); 2747*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2748*14b24e2bSVaishali Kulkarni PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0); 2749*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2750*14b24e2bSVaishali Kulkarni PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0); 2751*14b24e2bSVaishali Kulkarni } 2752*14b24e2bSVaishali Kulkarni 2753*14b24e2bSVaishali Kulkarni /* Clean Previous errors if such exist */ 2754*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2755*14b24e2bSVaishali Kulkarni PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 2756*14b24e2bSVaishali Kulkarni 1 << p_hwfn->abs_pf_id); 2757*14b24e2bSVaishali Kulkarni 2758*14b24e2bSVaishali Kulkarni /* enable internal target-read */ 2759*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2760*14b24e2bSVaishali Kulkarni PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 2761*14b24e2bSVaishali Kulkarni } 2762*14b24e2bSVaishali Kulkarni 2763*14b24e2bSVaishali Kulkarni static void get_function_id(struct ecore_hwfn *p_hwfn) 2764*14b24e2bSVaishali Kulkarni { 2765*14b24e2bSVaishali Kulkarni /* ME Register */ 2766*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn, 2767*14b24e2bSVaishali Kulkarni PXP_PF_ME_OPAQUE_ADDR); 2768*14b24e2bSVaishali Kulkarni 2769*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); 2770*14b24e2bSVaishali Kulkarni 2771*14b24e2bSVaishali Kulkarni /* Bits 16-19 from the ME registers are the pf_num */ 2772*14b24e2bSVaishali Kulkarni p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf; 2773*14b24e2bSVaishali Kulkarni p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 2774*14b24e2bSVaishali Kulkarni PXP_CONCRETE_FID_PFID); 2775*14b24e2bSVaishali Kulkarni p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 2776*14b24e2bSVaishali Kulkarni PXP_CONCRETE_FID_PORT); 2777*14b24e2bSVaishali Kulkarni 2778*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 2779*14b24e2bSVaishali Kulkarni "Read ME register: Concrete 0x%08x Opaque 0x%04x\n", 2780*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid); 2781*14b24e2bSVaishali Kulkarni } 2782*14b24e2bSVaishali Kulkarni 2783*14b24e2bSVaishali Kulkarni void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn) 2784*14b24e2bSVaishali Kulkarni { 2785*14b24e2bSVaishali Kulkarni u32 *feat_num = p_hwfn->hw_info.feat_num; 2786*14b24e2bSVaishali Kulkarni struct ecore_sb_cnt_info sb_cnt; 2787*14b24e2bSVaishali Kulkarni u32 non_l2_sbs = 0; 2788*14b24e2bSVaishali Kulkarni 2789*14b24e2bSVaishali Kulkarni OSAL_MEM_ZERO(&sb_cnt, sizeof(sb_cnt)); 2790*14b24e2bSVaishali Kulkarni ecore_int_get_num_sbs(p_hwfn, &sb_cnt); 2791*14b24e2bSVaishali Kulkarni 2792*14b24e2bSVaishali Kulkarni #ifdef CONFIG_ECORE_ROCE 2793*14b24e2bSVaishali Kulkarni /* Roce CNQ require each: 1 status block. 1 CNQ, we divide the 2794*14b24e2bSVaishali Kulkarni * status blocks equally between L2 / RoCE but with consideration as 2795*14b24e2bSVaishali Kulkarni * to how many l2 queues / cnqs we have 2796*14b24e2bSVaishali Kulkarni */ 2797*14b24e2bSVaishali Kulkarni if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) { 2798*14b24e2bSVaishali Kulkarni u32 max_cnqs; 2799*14b24e2bSVaishali Kulkarni 2800*14b24e2bSVaishali Kulkarni feat_num[ECORE_RDMA_CNQ] = 2801*14b24e2bSVaishali Kulkarni OSAL_MIN_T(u32, 2802*14b24e2bSVaishali Kulkarni sb_cnt.cnt / 2, 2803*14b24e2bSVaishali Kulkarni RESC_NUM(p_hwfn, ECORE_RDMA_CNQ_RAM)); 2804*14b24e2bSVaishali Kulkarni 2805*14b24e2bSVaishali Kulkarni /* Upper layer might require less */ 2806*14b24e2bSVaishali Kulkarni max_cnqs = (u32)p_hwfn->pf_params.rdma_pf_params.max_cnqs; 2807*14b24e2bSVaishali Kulkarni if (max_cnqs) { 2808*14b24e2bSVaishali Kulkarni if (max_cnqs == ECORE_RDMA_PF_PARAMS_CNQS_NONE) 2809*14b24e2bSVaishali Kulkarni max_cnqs = 0; 2810*14b24e2bSVaishali Kulkarni feat_num[ECORE_RDMA_CNQ] = 2811*14b24e2bSVaishali Kulkarni OSAL_MIN_T(u32, 2812*14b24e2bSVaishali Kulkarni feat_num[ECORE_RDMA_CNQ], 2813*14b24e2bSVaishali Kulkarni max_cnqs); 2814*14b24e2bSVaishali Kulkarni } 2815*14b24e2bSVaishali Kulkarni 2816*14b24e2bSVaishali Kulkarni non_l2_sbs = feat_num[ECORE_RDMA_CNQ]; 2817*14b24e2bSVaishali Kulkarni } 2818*14b24e2bSVaishali Kulkarni #endif 2819*14b24e2bSVaishali Kulkarni 2820*14b24e2bSVaishali Kulkarni /* L2 Queues require each: 1 status block. 1 L2 queue */ 2821*14b24e2bSVaishali Kulkarni if (ECORE_IS_L2_PERSONALITY(p_hwfn)) { 2822*14b24e2bSVaishali Kulkarni /* Start by allocating VF queues, then PF's */ 2823*14b24e2bSVaishali Kulkarni feat_num[ECORE_VF_L2_QUE] = 2824*14b24e2bSVaishali Kulkarni OSAL_MIN_T(u32, 2825*14b24e2bSVaishali Kulkarni RESC_NUM(p_hwfn, ECORE_L2_QUEUE), 2826*14b24e2bSVaishali Kulkarni sb_cnt.iov_cnt); 2827*14b24e2bSVaishali Kulkarni feat_num[ECORE_PF_L2_QUE] = 2828*14b24e2bSVaishali Kulkarni OSAL_MIN_T(u32, 2829*14b24e2bSVaishali Kulkarni sb_cnt.cnt - non_l2_sbs, 2830*14b24e2bSVaishali Kulkarni RESC_NUM(p_hwfn, ECORE_L2_QUEUE) - 2831*14b24e2bSVaishali Kulkarni FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE)); 2832*14b24e2bSVaishali Kulkarni } 2833*14b24e2bSVaishali Kulkarni 2834*14b24e2bSVaishali Kulkarni if (ECORE_IS_FCOE_PERSONALITY(p_hwfn)) 2835*14b24e2bSVaishali Kulkarni feat_num[ECORE_FCOE_CQ] = 2836*14b24e2bSVaishali Kulkarni OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, 2837*14b24e2bSVaishali Kulkarni ECORE_CMDQS_CQS)); 2838*14b24e2bSVaishali Kulkarni 2839*14b24e2bSVaishali Kulkarni if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) 2840*14b24e2bSVaishali Kulkarni feat_num[ECORE_ISCSI_CQ] = 2841*14b24e2bSVaishali Kulkarni OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, 2842*14b24e2bSVaishali Kulkarni ECORE_CMDQS_CQS)); 2843*14b24e2bSVaishali Kulkarni 2844*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 2845*14b24e2bSVaishali Kulkarni "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n", 2846*14b24e2bSVaishali Kulkarni (int)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE), 2847*14b24e2bSVaishali Kulkarni (int)FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE), 2848*14b24e2bSVaishali Kulkarni (int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ), 2849*14b24e2bSVaishali Kulkarni (int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ), 2850*14b24e2bSVaishali Kulkarni (int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ), 2851*14b24e2bSVaishali Kulkarni (int)sb_cnt.cnt); 2852*14b24e2bSVaishali Kulkarni } 2853*14b24e2bSVaishali Kulkarni 2854*14b24e2bSVaishali Kulkarni const char *ecore_hw_get_resc_name(enum ecore_resources res_id) 2855*14b24e2bSVaishali Kulkarni { 2856*14b24e2bSVaishali Kulkarni switch (res_id) { 2857*14b24e2bSVaishali Kulkarni case ECORE_L2_QUEUE: 2858*14b24e2bSVaishali Kulkarni return "L2_QUEUE"; 2859*14b24e2bSVaishali Kulkarni case ECORE_VPORT: 2860*14b24e2bSVaishali Kulkarni return "VPORT"; 2861*14b24e2bSVaishali Kulkarni case ECORE_RSS_ENG: 2862*14b24e2bSVaishali Kulkarni return "RSS_ENG"; 2863*14b24e2bSVaishali Kulkarni case ECORE_PQ: 2864*14b24e2bSVaishali Kulkarni return "PQ"; 2865*14b24e2bSVaishali Kulkarni case ECORE_RL: 2866*14b24e2bSVaishali Kulkarni return "RL"; 2867*14b24e2bSVaishali Kulkarni case ECORE_MAC: 2868*14b24e2bSVaishali Kulkarni return "MAC"; 2869*14b24e2bSVaishali Kulkarni case ECORE_VLAN: 2870*14b24e2bSVaishali Kulkarni return "VLAN"; 2871*14b24e2bSVaishali Kulkarni case ECORE_RDMA_CNQ_RAM: 2872*14b24e2bSVaishali Kulkarni return "RDMA_CNQ_RAM"; 2873*14b24e2bSVaishali Kulkarni case ECORE_ILT: 2874*14b24e2bSVaishali Kulkarni return "ILT"; 2875*14b24e2bSVaishali Kulkarni case ECORE_LL2_QUEUE: 2876*14b24e2bSVaishali Kulkarni return "LL2_QUEUE"; 2877*14b24e2bSVaishali Kulkarni case ECORE_CMDQS_CQS: 2878*14b24e2bSVaishali Kulkarni return "CMDQS_CQS"; 2879*14b24e2bSVaishali Kulkarni case ECORE_RDMA_STATS_QUEUE: 2880*14b24e2bSVaishali Kulkarni return "RDMA_STATS_QUEUE"; 2881*14b24e2bSVaishali Kulkarni case ECORE_BDQ: 2882*14b24e2bSVaishali Kulkarni return "BDQ"; 2883*14b24e2bSVaishali Kulkarni case ECORE_SB: 2884*14b24e2bSVaishali Kulkarni return "SB"; 2885*14b24e2bSVaishali Kulkarni default: 2886*14b24e2bSVaishali Kulkarni return "UNKNOWN_RESOURCE"; 2887*14b24e2bSVaishali Kulkarni } 2888*14b24e2bSVaishali Kulkarni } 2889*14b24e2bSVaishali Kulkarni 2890*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 2891*14b24e2bSVaishali Kulkarni __ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn, 2892*14b24e2bSVaishali Kulkarni enum ecore_resources res_id, u32 resc_max_val, 2893*14b24e2bSVaishali Kulkarni u32 *p_mcp_resp) 2894*14b24e2bSVaishali Kulkarni { 2895*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 2896*14b24e2bSVaishali Kulkarni 2897*14b24e2bSVaishali Kulkarni rc = ecore_mcp_set_resc_max_val(p_hwfn, p_hwfn->p_main_ptt, res_id, 2898*14b24e2bSVaishali Kulkarni resc_max_val, p_mcp_resp); 2899*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 2900*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 2901*14b24e2bSVaishali Kulkarni "MFW response failure for a max value setting of resource %d [%s]\n", 2902*14b24e2bSVaishali Kulkarni res_id, ecore_hw_get_resc_name(res_id)); 2903*14b24e2bSVaishali Kulkarni return rc; 2904*14b24e2bSVaishali Kulkarni } 2905*14b24e2bSVaishali Kulkarni 2906*14b24e2bSVaishali Kulkarni if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) 2907*14b24e2bSVaishali Kulkarni DP_INFO(p_hwfn, 2908*14b24e2bSVaishali Kulkarni "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n", 2909*14b24e2bSVaishali Kulkarni res_id, ecore_hw_get_resc_name(res_id), *p_mcp_resp); 2910*14b24e2bSVaishali Kulkarni 2911*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 2912*14b24e2bSVaishali Kulkarni } 2913*14b24e2bSVaishali Kulkarni 2914*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 2915*14b24e2bSVaishali Kulkarni ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn) 2916*14b24e2bSVaishali Kulkarni { 2917*14b24e2bSVaishali Kulkarni bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 2918*14b24e2bSVaishali Kulkarni u32 resc_max_val, mcp_resp; 2919*14b24e2bSVaishali Kulkarni u8 res_id; 2920*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 2921*14b24e2bSVaishali Kulkarni 2922*14b24e2bSVaishali Kulkarni for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) { 2923*14b24e2bSVaishali Kulkarni switch (res_id) { 2924*14b24e2bSVaishali Kulkarni case ECORE_LL2_QUEUE: 2925*14b24e2bSVaishali Kulkarni resc_max_val = MAX_NUM_LL2_RX_QUEUES; 2926*14b24e2bSVaishali Kulkarni break; 2927*14b24e2bSVaishali Kulkarni case ECORE_RDMA_CNQ_RAM: 2928*14b24e2bSVaishali Kulkarni /* No need for a case for ECORE_CMDQS_CQS since 2929*14b24e2bSVaishali Kulkarni * CNQ/CMDQS are the same resource. 2930*14b24e2bSVaishali Kulkarni */ 2931*14b24e2bSVaishali Kulkarni resc_max_val = NUM_OF_GLOBAL_QUEUES; 2932*14b24e2bSVaishali Kulkarni break; 2933*14b24e2bSVaishali Kulkarni case ECORE_RDMA_STATS_QUEUE: 2934*14b24e2bSVaishali Kulkarni resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 2935*14b24e2bSVaishali Kulkarni : RDMA_NUM_STATISTIC_COUNTERS_BB; 2936*14b24e2bSVaishali Kulkarni break; 2937*14b24e2bSVaishali Kulkarni case ECORE_BDQ: 2938*14b24e2bSVaishali Kulkarni resc_max_val = BDQ_NUM_RESOURCES; 2939*14b24e2bSVaishali Kulkarni break; 2940*14b24e2bSVaishali Kulkarni default: 2941*14b24e2bSVaishali Kulkarni continue; 2942*14b24e2bSVaishali Kulkarni } 2943*14b24e2bSVaishali Kulkarni 2944*14b24e2bSVaishali Kulkarni rc = __ecore_hw_set_soft_resc_size(p_hwfn, res_id, 2945*14b24e2bSVaishali Kulkarni resc_max_val, &mcp_resp); 2946*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 2947*14b24e2bSVaishali Kulkarni return rc; 2948*14b24e2bSVaishali Kulkarni 2949*14b24e2bSVaishali Kulkarni /* There's no point to continue to the next resource if the 2950*14b24e2bSVaishali Kulkarni * command is not supported by the MFW. 2951*14b24e2bSVaishali Kulkarni * We do continue if the command is supported but the resource 2952*14b24e2bSVaishali Kulkarni * is unknown to the MFW. Such a resource will be later 2953*14b24e2bSVaishali Kulkarni * configured with the default allocation values. 2954*14b24e2bSVaishali Kulkarni */ 2955*14b24e2bSVaishali Kulkarni if (mcp_resp == FW_MSG_CODE_UNSUPPORTED) 2956*14b24e2bSVaishali Kulkarni return ECORE_NOTIMPL; 2957*14b24e2bSVaishali Kulkarni } 2958*14b24e2bSVaishali Kulkarni 2959*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 2960*14b24e2bSVaishali Kulkarni } 2961*14b24e2bSVaishali Kulkarni 2962*14b24e2bSVaishali Kulkarni static 2963*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn, 2964*14b24e2bSVaishali Kulkarni enum ecore_resources res_id, 2965*14b24e2bSVaishali Kulkarni u32 *p_resc_num, u32 *p_resc_start) 2966*14b24e2bSVaishali Kulkarni { 2967*14b24e2bSVaishali Kulkarni u8 num_funcs = p_hwfn->num_funcs_on_engine; 2968*14b24e2bSVaishali Kulkarni bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 2969*14b24e2bSVaishali Kulkarni 2970*14b24e2bSVaishali Kulkarni switch (res_id) { 2971*14b24e2bSVaishali Kulkarni case ECORE_L2_QUEUE: 2972*14b24e2bSVaishali Kulkarni *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 : 2973*14b24e2bSVaishali Kulkarni MAX_NUM_L2_QUEUES_BB) / num_funcs; 2974*14b24e2bSVaishali Kulkarni break; 2975*14b24e2bSVaishali Kulkarni case ECORE_VPORT: 2976*14b24e2bSVaishali Kulkarni *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 : 2977*14b24e2bSVaishali Kulkarni MAX_NUM_VPORTS_BB) / num_funcs; 2978*14b24e2bSVaishali Kulkarni break; 2979*14b24e2bSVaishali Kulkarni case ECORE_RSS_ENG: 2980*14b24e2bSVaishali Kulkarni *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 : 2981*14b24e2bSVaishali Kulkarni ETH_RSS_ENGINE_NUM_BB) / num_funcs; 2982*14b24e2bSVaishali Kulkarni break; 2983*14b24e2bSVaishali Kulkarni case ECORE_PQ: 2984*14b24e2bSVaishali Kulkarni *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 : 2985*14b24e2bSVaishali Kulkarni MAX_QM_TX_QUEUES_BB) / num_funcs; 2986*14b24e2bSVaishali Kulkarni *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */ 2987*14b24e2bSVaishali Kulkarni break; 2988*14b24e2bSVaishali Kulkarni case ECORE_RL: 2989*14b24e2bSVaishali Kulkarni *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs; 2990*14b24e2bSVaishali Kulkarni break; 2991*14b24e2bSVaishali Kulkarni case ECORE_MAC: 2992*14b24e2bSVaishali Kulkarni case ECORE_VLAN: 2993*14b24e2bSVaishali Kulkarni /* Each VFC resource can accommodate both a MAC and a VLAN */ 2994*14b24e2bSVaishali Kulkarni *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; 2995*14b24e2bSVaishali Kulkarni break; 2996*14b24e2bSVaishali Kulkarni case ECORE_ILT: 2997*14b24e2bSVaishali Kulkarni *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 : 2998*14b24e2bSVaishali Kulkarni PXP_NUM_ILT_RECORDS_BB) / num_funcs; 2999*14b24e2bSVaishali Kulkarni break; 3000*14b24e2bSVaishali Kulkarni case ECORE_LL2_QUEUE: 3001*14b24e2bSVaishali Kulkarni *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs; 3002*14b24e2bSVaishali Kulkarni break; 3003*14b24e2bSVaishali Kulkarni case ECORE_RDMA_CNQ_RAM: 3004*14b24e2bSVaishali Kulkarni case ECORE_CMDQS_CQS: 3005*14b24e2bSVaishali Kulkarni /* CNQ/CMDQS are the same resource */ 3006*14b24e2bSVaishali Kulkarni *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs; 3007*14b24e2bSVaishali Kulkarni break; 3008*14b24e2bSVaishali Kulkarni case ECORE_RDMA_STATS_QUEUE: 3009*14b24e2bSVaishali Kulkarni *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 : 3010*14b24e2bSVaishali Kulkarni RDMA_NUM_STATISTIC_COUNTERS_BB) / 3011*14b24e2bSVaishali Kulkarni num_funcs; 3012*14b24e2bSVaishali Kulkarni break; 3013*14b24e2bSVaishali Kulkarni case ECORE_BDQ: 3014*14b24e2bSVaishali Kulkarni if (p_hwfn->hw_info.personality != ECORE_PCI_ISCSI && 3015*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.personality != ECORE_PCI_FCOE) 3016*14b24e2bSVaishali Kulkarni *p_resc_num = 0; 3017*14b24e2bSVaishali Kulkarni else 3018*14b24e2bSVaishali Kulkarni *p_resc_num = 1; 3019*14b24e2bSVaishali Kulkarni break; 3020*14b24e2bSVaishali Kulkarni case ECORE_SB: 3021*14b24e2bSVaishali Kulkarni /* Since we want its value to reflect whether MFW supports 3022*14b24e2bSVaishali Kulkarni * the new scheme, have a default of 0. 3023*14b24e2bSVaishali Kulkarni */ 3024*14b24e2bSVaishali Kulkarni *p_resc_num = 0; 3025*14b24e2bSVaishali Kulkarni break; 3026*14b24e2bSVaishali Kulkarni default: 3027*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 3028*14b24e2bSVaishali Kulkarni } 3029*14b24e2bSVaishali Kulkarni 3030*14b24e2bSVaishali Kulkarni switch (res_id) { 3031*14b24e2bSVaishali Kulkarni case ECORE_BDQ: 3032*14b24e2bSVaishali Kulkarni if (!*p_resc_num) 3033*14b24e2bSVaishali Kulkarni *p_resc_start = 0; 3034*14b24e2bSVaishali Kulkarni else if (p_hwfn->p_dev->num_ports_in_engine == 4) 3035*14b24e2bSVaishali Kulkarni *p_resc_start = p_hwfn->port_id; 3036*14b24e2bSVaishali Kulkarni else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) 3037*14b24e2bSVaishali Kulkarni *p_resc_start = p_hwfn->port_id; 3038*14b24e2bSVaishali Kulkarni else if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 3039*14b24e2bSVaishali Kulkarni *p_resc_start = p_hwfn->port_id + 2; 3040*14b24e2bSVaishali Kulkarni break; 3041*14b24e2bSVaishali Kulkarni default: 3042*14b24e2bSVaishali Kulkarni *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx; 3043*14b24e2bSVaishali Kulkarni break; 3044*14b24e2bSVaishali Kulkarni } 3045*14b24e2bSVaishali Kulkarni 3046*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 3047*14b24e2bSVaishali Kulkarni } 3048*14b24e2bSVaishali Kulkarni 3049*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 3050*14b24e2bSVaishali Kulkarni __ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id, 3051*14b24e2bSVaishali Kulkarni bool drv_resc_alloc) 3052*14b24e2bSVaishali Kulkarni { 3053*14b24e2bSVaishali Kulkarni u32 dflt_resc_num = 0, dflt_resc_start = 0; 3054*14b24e2bSVaishali Kulkarni u32 mcp_resp, *p_resc_num, *p_resc_start; 3055*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 3056*14b24e2bSVaishali Kulkarni 3057*14b24e2bSVaishali Kulkarni p_resc_num = &RESC_NUM(p_hwfn, res_id); 3058*14b24e2bSVaishali Kulkarni p_resc_start = &RESC_START(p_hwfn, res_id); 3059*14b24e2bSVaishali Kulkarni 3060*14b24e2bSVaishali Kulkarni rc = ecore_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num, 3061*14b24e2bSVaishali Kulkarni &dflt_resc_start); 3062*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 3063*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, 3064*14b24e2bSVaishali Kulkarni "Failed to get default amount for resource %d [%s]\n", 3065*14b24e2bSVaishali Kulkarni res_id, ecore_hw_get_resc_name(res_id)); 3066*14b24e2bSVaishali Kulkarni return rc; 3067*14b24e2bSVaishali Kulkarni } 3068*14b24e2bSVaishali Kulkarni 3069*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 3070*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 3071*14b24e2bSVaishali Kulkarni *p_resc_num = dflt_resc_num; 3072*14b24e2bSVaishali Kulkarni *p_resc_start = dflt_resc_start; 3073*14b24e2bSVaishali Kulkarni goto out; 3074*14b24e2bSVaishali Kulkarni } 3075*14b24e2bSVaishali Kulkarni #endif 3076*14b24e2bSVaishali Kulkarni 3077*14b24e2bSVaishali Kulkarni rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id, 3078*14b24e2bSVaishali Kulkarni &mcp_resp, p_resc_num, p_resc_start); 3079*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 3080*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 3081*14b24e2bSVaishali Kulkarni "MFW response failure for an allocation request for resource %d [%s]\n", 3082*14b24e2bSVaishali Kulkarni res_id, ecore_hw_get_resc_name(res_id)); 3083*14b24e2bSVaishali Kulkarni return rc; 3084*14b24e2bSVaishali Kulkarni } 3085*14b24e2bSVaishali Kulkarni 3086*14b24e2bSVaishali Kulkarni /* Default driver values are applied in the following cases: 3087*14b24e2bSVaishali Kulkarni * - The resource allocation MB command is not supported by the MFW 3088*14b24e2bSVaishali Kulkarni * - There is an internal error in the MFW while processing the request 3089*14b24e2bSVaishali Kulkarni * - The resource ID is unknown to the MFW 3090*14b24e2bSVaishali Kulkarni */ 3091*14b24e2bSVaishali Kulkarni if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) { 3092*14b24e2bSVaishali Kulkarni DP_INFO(p_hwfn, 3093*14b24e2bSVaishali Kulkarni "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n", 3094*14b24e2bSVaishali Kulkarni res_id, ecore_hw_get_resc_name(res_id), mcp_resp, 3095*14b24e2bSVaishali Kulkarni dflt_resc_num, dflt_resc_start); 3096*14b24e2bSVaishali Kulkarni *p_resc_num = dflt_resc_num; 3097*14b24e2bSVaishali Kulkarni *p_resc_start = dflt_resc_start; 3098*14b24e2bSVaishali Kulkarni goto out; 3099*14b24e2bSVaishali Kulkarni } 3100*14b24e2bSVaishali Kulkarni 3101*14b24e2bSVaishali Kulkarni if ((*p_resc_num != dflt_resc_num || 3102*14b24e2bSVaishali Kulkarni *p_resc_start != dflt_resc_start) && 3103*14b24e2bSVaishali Kulkarni res_id != ECORE_SB) { 3104*14b24e2bSVaishali Kulkarni DP_INFO(p_hwfn, 3105*14b24e2bSVaishali Kulkarni "MFW allocation for resource %d [%s] differs from default values [%d,%d vs. %d,%d]%s\n", 3106*14b24e2bSVaishali Kulkarni res_id, ecore_hw_get_resc_name(res_id), *p_resc_num, 3107*14b24e2bSVaishali Kulkarni *p_resc_start, dflt_resc_num, dflt_resc_start, 3108*14b24e2bSVaishali Kulkarni drv_resc_alloc ? " - Applying default values" : ""); 3109*14b24e2bSVaishali Kulkarni if (drv_resc_alloc) { 3110*14b24e2bSVaishali Kulkarni *p_resc_num = dflt_resc_num; 3111*14b24e2bSVaishali Kulkarni *p_resc_start = dflt_resc_start; 3112*14b24e2bSVaishali Kulkarni } 3113*14b24e2bSVaishali Kulkarni } 3114*14b24e2bSVaishali Kulkarni out: 3115*14b24e2bSVaishali Kulkarni /* PQs have to divide by 8 [that's the HW granularity]. 3116*14b24e2bSVaishali Kulkarni * Reduce number so it would fit. 3117*14b24e2bSVaishali Kulkarni */ 3118*14b24e2bSVaishali Kulkarni if ((res_id == ECORE_PQ) && 3119*14b24e2bSVaishali Kulkarni ((*p_resc_num % 8) || (*p_resc_start % 8))) { 3120*14b24e2bSVaishali Kulkarni DP_INFO(p_hwfn, 3121*14b24e2bSVaishali Kulkarni "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n", 3122*14b24e2bSVaishali Kulkarni *p_resc_num, (*p_resc_num) & ~0x7, 3123*14b24e2bSVaishali Kulkarni *p_resc_start, (*p_resc_start) & ~0x7); 3124*14b24e2bSVaishali Kulkarni *p_resc_num &= ~0x7; 3125*14b24e2bSVaishali Kulkarni *p_resc_start &= ~0x7; 3126*14b24e2bSVaishali Kulkarni } 3127*14b24e2bSVaishali Kulkarni 3128*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 3129*14b24e2bSVaishali Kulkarni } 3130*14b24e2bSVaishali Kulkarni 3131*14b24e2bSVaishali Kulkarni static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, 3132*14b24e2bSVaishali Kulkarni bool drv_resc_alloc) 3133*14b24e2bSVaishali Kulkarni { 3134*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 3135*14b24e2bSVaishali Kulkarni u8 res_id; 3136*14b24e2bSVaishali Kulkarni 3137*14b24e2bSVaishali Kulkarni for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) { 3138*14b24e2bSVaishali Kulkarni rc = __ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc); 3139*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 3140*14b24e2bSVaishali Kulkarni return rc; 3141*14b24e2bSVaishali Kulkarni } 3142*14b24e2bSVaishali Kulkarni 3143*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 3144*14b24e2bSVaishali Kulkarni } 3145*14b24e2bSVaishali Kulkarni 3146*14b24e2bSVaishali Kulkarni #define ECORE_RESC_ALLOC_LOCK_RETRY_CNT 10 3147*14b24e2bSVaishali Kulkarni #define ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US 10000 /* 10 msec */ 3148*14b24e2bSVaishali Kulkarni 3149*14b24e2bSVaishali Kulkarni static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn, 3150*14b24e2bSVaishali Kulkarni bool drv_resc_alloc) 3151*14b24e2bSVaishali Kulkarni { 3152*14b24e2bSVaishali Kulkarni struct ecore_resc_unlock_params resc_unlock_params; 3153*14b24e2bSVaishali Kulkarni struct ecore_resc_lock_params resc_lock_params; 3154*14b24e2bSVaishali Kulkarni bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 3155*14b24e2bSVaishali Kulkarni u8 res_id; 3156*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 3157*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 3158*14b24e2bSVaishali Kulkarni u32 *resc_start = p_hwfn->hw_info.resc_start; 3159*14b24e2bSVaishali Kulkarni u32 *resc_num = p_hwfn->hw_info.resc_num; 3160*14b24e2bSVaishali Kulkarni /* For AH, an equal share of the ILT lines between the maximal number of 3161*14b24e2bSVaishali Kulkarni * PFs is not enough for RoCE. This would be solved by the future 3162*14b24e2bSVaishali Kulkarni * resource allocation scheme, but isn't currently present for 3163*14b24e2bSVaishali Kulkarni * FPGA/emulation. For now we keep a number that is sufficient for RoCE 3164*14b24e2bSVaishali Kulkarni * to work - the BB number of ILT lines divided by its max PFs number. 3165*14b24e2bSVaishali Kulkarni */ 3166*14b24e2bSVaishali Kulkarni u32 roce_min_ilt_lines = PXP_NUM_ILT_RECORDS_BB / MAX_NUM_PFS_BB; 3167*14b24e2bSVaishali Kulkarni #endif 3168*14b24e2bSVaishali Kulkarni 3169*14b24e2bSVaishali Kulkarni /* Setting the max values of the soft resources and the following 3170*14b24e2bSVaishali Kulkarni * resources allocation queries should be atomic. Since several PFs can 3171*14b24e2bSVaishali Kulkarni * run in parallel - a resource lock is needed. 3172*14b24e2bSVaishali Kulkarni * If either the resource lock or resource set value commands are not 3173*14b24e2bSVaishali Kulkarni * supported - skip the the max values setting, release the lock if 3174*14b24e2bSVaishali Kulkarni * needed, and proceed to the queries. Other failures, including a 3175*14b24e2bSVaishali Kulkarni * failure to acquire the lock, will cause this function to fail. 3176*14b24e2bSVaishali Kulkarni * Old drivers that don't acquire the lock can run in parallel, and 3177*14b24e2bSVaishali Kulkarni * their allocation values won't be affected by the updated max values. 3178*14b24e2bSVaishali Kulkarni */ 3179*14b24e2bSVaishali Kulkarni OSAL_MEM_ZERO(&resc_lock_params, sizeof(resc_lock_params)); 3180*14b24e2bSVaishali Kulkarni resc_lock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC; 3181*14b24e2bSVaishali Kulkarni resc_lock_params.retry_num = ECORE_RESC_ALLOC_LOCK_RETRY_CNT; 3182*14b24e2bSVaishali Kulkarni resc_lock_params.retry_interval = ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US; 3183*14b24e2bSVaishali Kulkarni resc_lock_params.sleep_b4_retry = true; 3184*14b24e2bSVaishali Kulkarni OSAL_MEM_ZERO(&resc_unlock_params, sizeof(resc_unlock_params)); 3185*14b24e2bSVaishali Kulkarni resc_unlock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC; 3186*14b24e2bSVaishali Kulkarni 3187*14b24e2bSVaishali Kulkarni rc = ecore_mcp_resc_lock(p_hwfn, p_hwfn->p_main_ptt, &resc_lock_params); 3188*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) { 3189*14b24e2bSVaishali Kulkarni return rc; 3190*14b24e2bSVaishali Kulkarni } else if (rc == ECORE_NOTIMPL) { 3191*14b24e2bSVaishali Kulkarni DP_INFO(p_hwfn, 3192*14b24e2bSVaishali Kulkarni "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n"); 3193*14b24e2bSVaishali Kulkarni } else if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) { 3194*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, 3195*14b24e2bSVaishali Kulkarni "Failed to acquire the resource lock for the resource allocation commands\n"); 3196*14b24e2bSVaishali Kulkarni return ECORE_BUSY; 3197*14b24e2bSVaishali Kulkarni } else { 3198*14b24e2bSVaishali Kulkarni rc = ecore_hw_set_soft_resc_size(p_hwfn); 3199*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) { 3200*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, 3201*14b24e2bSVaishali Kulkarni "Failed to set the max values of the soft resources\n"); 3202*14b24e2bSVaishali Kulkarni goto unlock_and_exit; 3203*14b24e2bSVaishali Kulkarni } else if (rc == ECORE_NOTIMPL) { 3204*14b24e2bSVaishali Kulkarni DP_INFO(p_hwfn, 3205*14b24e2bSVaishali Kulkarni "Skip the max values setting of the soft resources since it is not supported by the MFW\n"); 3206*14b24e2bSVaishali Kulkarni rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt, 3207*14b24e2bSVaishali Kulkarni &resc_unlock_params); 3208*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 3209*14b24e2bSVaishali Kulkarni DP_INFO(p_hwfn, 3210*14b24e2bSVaishali Kulkarni "Failed to release the resource lock for the resource allocation commands\n"); 3211*14b24e2bSVaishali Kulkarni } 3212*14b24e2bSVaishali Kulkarni } 3213*14b24e2bSVaishali Kulkarni 3214*14b24e2bSVaishali Kulkarni rc = ecore_hw_set_resc_info(p_hwfn, drv_resc_alloc); 3215*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 3216*14b24e2bSVaishali Kulkarni goto unlock_and_exit; 3217*14b24e2bSVaishali Kulkarni 3218*14b24e2bSVaishali Kulkarni if (resc_lock_params.b_granted && !resc_unlock_params.b_released) { 3219*14b24e2bSVaishali Kulkarni rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt, 3220*14b24e2bSVaishali Kulkarni &resc_unlock_params); 3221*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 3222*14b24e2bSVaishali Kulkarni DP_INFO(p_hwfn, 3223*14b24e2bSVaishali Kulkarni "Failed to release the resource lock for the resource allocation commands\n"); 3224*14b24e2bSVaishali Kulkarni } 3225*14b24e2bSVaishali Kulkarni 3226*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 3227*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 3228*14b24e2bSVaishali Kulkarni /* Reduced build contains less PQs */ 3229*14b24e2bSVaishali Kulkarni if (!(p_hwfn->p_dev->b_is_emul_full)) { 3230*14b24e2bSVaishali Kulkarni resc_num[ECORE_PQ] = 32; 3231*14b24e2bSVaishali Kulkarni resc_start[ECORE_PQ] = resc_num[ECORE_PQ] * 3232*14b24e2bSVaishali Kulkarni p_hwfn->enabled_func_idx; 3233*14b24e2bSVaishali Kulkarni } 3234*14b24e2bSVaishali Kulkarni 3235*14b24e2bSVaishali Kulkarni /* For AH emulation, since we have a possible maximal number of 3236*14b24e2bSVaishali Kulkarni * 16 enabled PFs, in case there are not enough ILT lines - 3237*14b24e2bSVaishali Kulkarni * allocate only first PF as RoCE and have all the other ETH 3238*14b24e2bSVaishali Kulkarni * only with less ILT lines. 3239*14b24e2bSVaishali Kulkarni */ 3240*14b24e2bSVaishali Kulkarni if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full) 3241*14b24e2bSVaishali Kulkarni resc_num[ECORE_ILT] = OSAL_MAX_T(u32, 3242*14b24e2bSVaishali Kulkarni resc_num[ECORE_ILT], 3243*14b24e2bSVaishali Kulkarni roce_min_ilt_lines); 3244*14b24e2bSVaishali Kulkarni } 3245*14b24e2bSVaishali Kulkarni 3246*14b24e2bSVaishali Kulkarni /* Correct the common ILT calculation if PF0 has more */ 3247*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) && 3248*14b24e2bSVaishali Kulkarni p_hwfn->p_dev->b_is_emul_full && 3249*14b24e2bSVaishali Kulkarni p_hwfn->rel_pf_id && 3250*14b24e2bSVaishali Kulkarni resc_num[ECORE_ILT] < roce_min_ilt_lines) 3251*14b24e2bSVaishali Kulkarni resc_start[ECORE_ILT] += roce_min_ilt_lines - 3252*14b24e2bSVaishali Kulkarni resc_num[ECORE_ILT]; 3253*14b24e2bSVaishali Kulkarni #endif 3254*14b24e2bSVaishali Kulkarni 3255*14b24e2bSVaishali Kulkarni /* Sanity for ILT */ 3256*14b24e2bSVaishali Kulkarni if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) || 3257*14b24e2bSVaishali Kulkarni (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) { 3258*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, "Can't assign ILT pages [%08x,...,%08x]\n", 3259*14b24e2bSVaishali Kulkarni RESC_START(p_hwfn, ECORE_ILT), 3260*14b24e2bSVaishali Kulkarni RESC_END(p_hwfn, ECORE_ILT) - 1); 3261*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 3262*14b24e2bSVaishali Kulkarni } 3263*14b24e2bSVaishali Kulkarni 3264*14b24e2bSVaishali Kulkarni /* This will also learn the number of SBs from MFW */ 3265*14b24e2bSVaishali Kulkarni if (ecore_int_igu_reset_cam(p_hwfn, p_hwfn->p_main_ptt)) 3266*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 3267*14b24e2bSVaishali Kulkarni 3268*14b24e2bSVaishali Kulkarni ecore_hw_set_feat(p_hwfn); 3269*14b24e2bSVaishali Kulkarni 3270*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 3271*14b24e2bSVaishali Kulkarni "The numbers for each resource are:\n"); 3272*14b24e2bSVaishali Kulkarni for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) 3273*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "%s = %d start = %d\n", 3274*14b24e2bSVaishali Kulkarni ecore_hw_get_resc_name(res_id), 3275*14b24e2bSVaishali Kulkarni RESC_NUM(p_hwfn, res_id), 3276*14b24e2bSVaishali Kulkarni RESC_START(p_hwfn, res_id)); 3277*14b24e2bSVaishali Kulkarni 3278*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 3279*14b24e2bSVaishali Kulkarni 3280*14b24e2bSVaishali Kulkarni unlock_and_exit: 3281*14b24e2bSVaishali Kulkarni if (resc_lock_params.b_granted && !resc_unlock_params.b_released) 3282*14b24e2bSVaishali Kulkarni ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt, 3283*14b24e2bSVaishali Kulkarni &resc_unlock_params); 3284*14b24e2bSVaishali Kulkarni return rc; 3285*14b24e2bSVaishali Kulkarni } 3286*14b24e2bSVaishali Kulkarni 3287*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 3288*14b24e2bSVaishali Kulkarni ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn, 3289*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 3290*14b24e2bSVaishali Kulkarni struct ecore_hw_prepare_params *p_params) 3291*14b24e2bSVaishali Kulkarni { 3292*14b24e2bSVaishali Kulkarni u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg, dcbx_mode; 3293*14b24e2bSVaishali Kulkarni u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; 3294*14b24e2bSVaishali Kulkarni struct ecore_mcp_link_capabilities *p_caps; 3295*14b24e2bSVaishali Kulkarni struct ecore_mcp_link_params *link; 3296*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 3297*14b24e2bSVaishali Kulkarni 3298*14b24e2bSVaishali Kulkarni /* Read global nvm_cfg address */ 3299*14b24e2bSVaishali Kulkarni nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); 3300*14b24e2bSVaishali Kulkarni 3301*14b24e2bSVaishali Kulkarni /* Verify MCP has initialized it */ 3302*14b24e2bSVaishali Kulkarni if (!nvm_cfg_addr) { 3303*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n"); 3304*14b24e2bSVaishali Kulkarni if (p_params->b_relaxed_probe) 3305*14b24e2bSVaishali Kulkarni p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_NVM; 3306*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 3307*14b24e2bSVaishali Kulkarni } 3308*14b24e2bSVaishali Kulkarni 3309*14b24e2bSVaishali Kulkarni /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ 3310*14b24e2bSVaishali Kulkarni nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); 3311*14b24e2bSVaishali Kulkarni 3312*14b24e2bSVaishali Kulkarni addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3313*14b24e2bSVaishali Kulkarni OFFSETOF(struct nvm_cfg1, glob) + 3314*14b24e2bSVaishali Kulkarni OFFSETOF(struct nvm_cfg1_glob, core_cfg); 3315*14b24e2bSVaishali Kulkarni 3316*14b24e2bSVaishali Kulkarni core_cfg = ecore_rd(p_hwfn, p_ptt, addr); 3317*14b24e2bSVaishali Kulkarni 3318*14b24e2bSVaishali Kulkarni switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> 3319*14b24e2bSVaishali Kulkarni NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { 3320*14b24e2bSVaishali Kulkarni case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G: 3321*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G; 3322*14b24e2bSVaishali Kulkarni break; 3323*14b24e2bSVaishali Kulkarni case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G: 3324*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G; 3325*14b24e2bSVaishali Kulkarni break; 3326*14b24e2bSVaishali Kulkarni case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G: 3327*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G; 3328*14b24e2bSVaishali Kulkarni break; 3329*14b24e2bSVaishali Kulkarni case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F: 3330*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F; 3331*14b24e2bSVaishali Kulkarni break; 3332*14b24e2bSVaishali Kulkarni case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E: 3333*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E; 3334*14b24e2bSVaishali Kulkarni break; 3335*14b24e2bSVaishali Kulkarni case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G: 3336*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G; 3337*14b24e2bSVaishali Kulkarni break; 3338*14b24e2bSVaishali Kulkarni case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G: 3339*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G; 3340*14b24e2bSVaishali Kulkarni break; 3341*14b24e2bSVaishali Kulkarni case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: 3342*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G; 3343*14b24e2bSVaishali Kulkarni break; 3344*14b24e2bSVaishali Kulkarni case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G: 3345*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X10G; 3346*14b24e2bSVaishali Kulkarni break; 3347*14b24e2bSVaishali Kulkarni case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: 3348*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G; 3349*14b24e2bSVaishali Kulkarni break; 3350*14b24e2bSVaishali Kulkarni case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G: 3351*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X25G; 3352*14b24e2bSVaishali Kulkarni break; 3353*14b24e2bSVaishali Kulkarni default: 3354*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n", 3355*14b24e2bSVaishali Kulkarni core_cfg); 3356*14b24e2bSVaishali Kulkarni break; 3357*14b24e2bSVaishali Kulkarni } 3358*14b24e2bSVaishali Kulkarni 3359*14b24e2bSVaishali Kulkarni /* Read DCBX configuration */ 3360*14b24e2bSVaishali Kulkarni port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3361*14b24e2bSVaishali Kulkarni OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 3362*14b24e2bSVaishali Kulkarni dcbx_mode = ecore_rd(p_hwfn, p_ptt, 3363*14b24e2bSVaishali Kulkarni port_cfg_addr + 3364*14b24e2bSVaishali Kulkarni OFFSETOF(struct nvm_cfg1_port, generic_cont0)); 3365*14b24e2bSVaishali Kulkarni dcbx_mode = (dcbx_mode & NVM_CFG1_PORT_DCBX_MODE_MASK) 3366*14b24e2bSVaishali Kulkarni >> NVM_CFG1_PORT_DCBX_MODE_OFFSET; 3367*14b24e2bSVaishali Kulkarni switch (dcbx_mode) { 3368*14b24e2bSVaishali Kulkarni case NVM_CFG1_PORT_DCBX_MODE_DYNAMIC: 3369*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DYNAMIC; 3370*14b24e2bSVaishali Kulkarni break; 3371*14b24e2bSVaishali Kulkarni case NVM_CFG1_PORT_DCBX_MODE_CEE: 3372*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_CEE; 3373*14b24e2bSVaishali Kulkarni break; 3374*14b24e2bSVaishali Kulkarni case NVM_CFG1_PORT_DCBX_MODE_IEEE: 3375*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_IEEE; 3376*14b24e2bSVaishali Kulkarni break; 3377*14b24e2bSVaishali Kulkarni default: 3378*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DISABLED; 3379*14b24e2bSVaishali Kulkarni } 3380*14b24e2bSVaishali Kulkarni 3381*14b24e2bSVaishali Kulkarni /* Read default link configuration */ 3382*14b24e2bSVaishali Kulkarni link = &p_hwfn->mcp_info->link_input; 3383*14b24e2bSVaishali Kulkarni p_caps = &p_hwfn->mcp_info->link_capabilities; 3384*14b24e2bSVaishali Kulkarni port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3385*14b24e2bSVaishali Kulkarni OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 3386*14b24e2bSVaishali Kulkarni link_temp = ecore_rd(p_hwfn, p_ptt, 3387*14b24e2bSVaishali Kulkarni port_cfg_addr + 3388*14b24e2bSVaishali Kulkarni OFFSETOF(struct nvm_cfg1_port, speed_cap_mask)); 3389*14b24e2bSVaishali Kulkarni link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; 3390*14b24e2bSVaishali Kulkarni link->speed.advertised_speeds = link_temp; 3391*14b24e2bSVaishali Kulkarni p_caps->speed_capabilities = link->speed.advertised_speeds; 3392*14b24e2bSVaishali Kulkarni 3393*14b24e2bSVaishali Kulkarni link_temp = ecore_rd(p_hwfn, p_ptt, 3394*14b24e2bSVaishali Kulkarni port_cfg_addr + 3395*14b24e2bSVaishali Kulkarni OFFSETOF(struct nvm_cfg1_port, link_settings)); 3396*14b24e2bSVaishali Kulkarni switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >> 3397*14b24e2bSVaishali Kulkarni NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) { 3398*14b24e2bSVaishali Kulkarni case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG: 3399*14b24e2bSVaishali Kulkarni link->speed.autoneg = true; 3400*14b24e2bSVaishali Kulkarni break; 3401*14b24e2bSVaishali Kulkarni case NVM_CFG1_PORT_DRV_LINK_SPEED_1G: 3402*14b24e2bSVaishali Kulkarni link->speed.forced_speed = 1000; 3403*14b24e2bSVaishali Kulkarni break; 3404*14b24e2bSVaishali Kulkarni case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: 3405*14b24e2bSVaishali Kulkarni link->speed.forced_speed = 10000; 3406*14b24e2bSVaishali Kulkarni break; 3407*14b24e2bSVaishali Kulkarni case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: 3408*14b24e2bSVaishali Kulkarni link->speed.forced_speed = 25000; 3409*14b24e2bSVaishali Kulkarni break; 3410*14b24e2bSVaishali Kulkarni case NVM_CFG1_PORT_DRV_LINK_SPEED_40G: 3411*14b24e2bSVaishali Kulkarni link->speed.forced_speed = 40000; 3412*14b24e2bSVaishali Kulkarni break; 3413*14b24e2bSVaishali Kulkarni case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: 3414*14b24e2bSVaishali Kulkarni link->speed.forced_speed = 50000; 3415*14b24e2bSVaishali Kulkarni break; 3416*14b24e2bSVaishali Kulkarni case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G: 3417*14b24e2bSVaishali Kulkarni link->speed.forced_speed = 100000; 3418*14b24e2bSVaishali Kulkarni break; 3419*14b24e2bSVaishali Kulkarni default: 3420*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", 3421*14b24e2bSVaishali Kulkarni link_temp); 3422*14b24e2bSVaishali Kulkarni } 3423*14b24e2bSVaishali Kulkarni 3424*14b24e2bSVaishali Kulkarni p_caps->default_speed = link->speed.forced_speed; 3425*14b24e2bSVaishali Kulkarni p_caps->default_speed_autoneg = link->speed.autoneg; 3426*14b24e2bSVaishali Kulkarni 3427*14b24e2bSVaishali Kulkarni link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; 3428*14b24e2bSVaishali Kulkarni link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET; 3429*14b24e2bSVaishali Kulkarni link->pause.autoneg = !!(link_temp & 3430*14b24e2bSVaishali Kulkarni NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); 3431*14b24e2bSVaishali Kulkarni link->pause.forced_rx = !!(link_temp & 3432*14b24e2bSVaishali Kulkarni NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); 3433*14b24e2bSVaishali Kulkarni link->pause.forced_tx = !!(link_temp & 3434*14b24e2bSVaishali Kulkarni NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); 3435*14b24e2bSVaishali Kulkarni link->loopback_mode = 0; 3436*14b24e2bSVaishali Kulkarni 3437*14b24e2bSVaishali Kulkarni if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { 3438*14b24e2bSVaishali Kulkarni link_temp = ecore_rd(p_hwfn, p_ptt, port_cfg_addr + 3439*14b24e2bSVaishali Kulkarni OFFSETOF(struct nvm_cfg1_port, ext_phy)); 3440*14b24e2bSVaishali Kulkarni link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK; 3441*14b24e2bSVaishali Kulkarni link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET; 3442*14b24e2bSVaishali Kulkarni p_caps->default_eee = ECORE_MCP_EEE_ENABLED; 3443*14b24e2bSVaishali Kulkarni link->eee.enable = true; 3444*14b24e2bSVaishali Kulkarni switch (link_temp) { 3445*14b24e2bSVaishali Kulkarni case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED: 3446*14b24e2bSVaishali Kulkarni p_caps->default_eee = ECORE_MCP_EEE_DISABLED; 3447*14b24e2bSVaishali Kulkarni link->eee.enable = false; 3448*14b24e2bSVaishali Kulkarni break; 3449*14b24e2bSVaishali Kulkarni case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED: 3450*14b24e2bSVaishali Kulkarni p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME; 3451*14b24e2bSVaishali Kulkarni break; 3452*14b24e2bSVaishali Kulkarni case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE: 3453*14b24e2bSVaishali Kulkarni p_caps->eee_lpi_timer = 3454*14b24e2bSVaishali Kulkarni EEE_TX_TIMER_USEC_AGGRESSIVE_TIME; 3455*14b24e2bSVaishali Kulkarni break; 3456*14b24e2bSVaishali Kulkarni case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY: 3457*14b24e2bSVaishali Kulkarni p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME; 3458*14b24e2bSVaishali Kulkarni break; 3459*14b24e2bSVaishali Kulkarni } 3460*14b24e2bSVaishali Kulkarni link->eee.tx_lpi_timer = p_caps->eee_lpi_timer; 3461*14b24e2bSVaishali Kulkarni link->eee.tx_lpi_enable = link->eee.enable; 3462*14b24e2bSVaishali Kulkarni if (link->eee.enable) 3463*14b24e2bSVaishali Kulkarni link->eee.adv_caps = ECORE_EEE_1G_ADV | 3464*14b24e2bSVaishali Kulkarni ECORE_EEE_10G_ADV; 3465*14b24e2bSVaishali Kulkarni } else { 3466*14b24e2bSVaishali Kulkarni p_caps->default_eee = ECORE_MCP_EEE_UNSUPPORTED; 3467*14b24e2bSVaishali Kulkarni } 3468*14b24e2bSVaishali Kulkarni 3469*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 3470*14b24e2bSVaishali Kulkarni "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n", 3471*14b24e2bSVaishali Kulkarni link->speed.forced_speed, link->speed.advertised_speeds, 3472*14b24e2bSVaishali Kulkarni link->speed.autoneg, link->pause.autoneg, 3473*14b24e2bSVaishali Kulkarni p_caps->default_eee, p_caps->eee_lpi_timer); 3474*14b24e2bSVaishali Kulkarni 3475*14b24e2bSVaishali Kulkarni /* Read Multi-function information from shmem */ 3476*14b24e2bSVaishali Kulkarni addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3477*14b24e2bSVaishali Kulkarni OFFSETOF(struct nvm_cfg1, glob) + 3478*14b24e2bSVaishali Kulkarni OFFSETOF(struct nvm_cfg1_glob, generic_cont0); 3479*14b24e2bSVaishali Kulkarni 3480*14b24e2bSVaishali Kulkarni generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr); 3481*14b24e2bSVaishali Kulkarni 3482*14b24e2bSVaishali Kulkarni mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> 3483*14b24e2bSVaishali Kulkarni NVM_CFG1_GLOB_MF_MODE_OFFSET; 3484*14b24e2bSVaishali Kulkarni 3485*14b24e2bSVaishali Kulkarni switch (mf_mode) { 3486*14b24e2bSVaishali Kulkarni case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: 3487*14b24e2bSVaishali Kulkarni p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN; 3488*14b24e2bSVaishali Kulkarni break; 3489*14b24e2bSVaishali Kulkarni case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: 3490*14b24e2bSVaishali Kulkarni p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR; 3491*14b24e2bSVaishali Kulkarni break; 3492*14b24e2bSVaishali Kulkarni case NVM_CFG1_GLOB_MF_MODE_DEFAULT: 3493*14b24e2bSVaishali Kulkarni p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT; 3494*14b24e2bSVaishali Kulkarni break; 3495*14b24e2bSVaishali Kulkarni } 3496*14b24e2bSVaishali Kulkarni DP_INFO(p_hwfn, "Multi function mode is %08x\n", 3497*14b24e2bSVaishali Kulkarni p_hwfn->p_dev->mf_mode); 3498*14b24e2bSVaishali Kulkarni 3499*14b24e2bSVaishali Kulkarni /* Read Multi-function information from shmem */ 3500*14b24e2bSVaishali Kulkarni addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3501*14b24e2bSVaishali Kulkarni OFFSETOF(struct nvm_cfg1, glob) + 3502*14b24e2bSVaishali Kulkarni OFFSETOF(struct nvm_cfg1_glob, device_capabilities); 3503*14b24e2bSVaishali Kulkarni 3504*14b24e2bSVaishali Kulkarni device_capabilities = ecore_rd(p_hwfn, p_ptt, addr); 3505*14b24e2bSVaishali Kulkarni if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) 3506*14b24e2bSVaishali Kulkarni OSAL_SET_BIT(ECORE_DEV_CAP_ETH, 3507*14b24e2bSVaishali Kulkarni &p_hwfn->hw_info.device_capabilities); 3508*14b24e2bSVaishali Kulkarni if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE) 3509*14b24e2bSVaishali Kulkarni OSAL_SET_BIT(ECORE_DEV_CAP_FCOE, 3510*14b24e2bSVaishali Kulkarni &p_hwfn->hw_info.device_capabilities); 3511*14b24e2bSVaishali Kulkarni if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI) 3512*14b24e2bSVaishali Kulkarni OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI, 3513*14b24e2bSVaishali Kulkarni &p_hwfn->hw_info.device_capabilities); 3514*14b24e2bSVaishali Kulkarni if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE) 3515*14b24e2bSVaishali Kulkarni OSAL_SET_BIT(ECORE_DEV_CAP_ROCE, 3516*14b24e2bSVaishali Kulkarni &p_hwfn->hw_info.device_capabilities); 3517*14b24e2bSVaishali Kulkarni if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP) 3518*14b24e2bSVaishali Kulkarni OSAL_SET_BIT(ECORE_DEV_CAP_IWARP, 3519*14b24e2bSVaishali Kulkarni &p_hwfn->hw_info.device_capabilities); 3520*14b24e2bSVaishali Kulkarni 3521*14b24e2bSVaishali Kulkarni rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt); 3522*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) { 3523*14b24e2bSVaishali Kulkarni rc = ECORE_SUCCESS; 3524*14b24e2bSVaishali Kulkarni p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP; 3525*14b24e2bSVaishali Kulkarni } 3526*14b24e2bSVaishali Kulkarni 3527*14b24e2bSVaishali Kulkarni return rc; 3528*14b24e2bSVaishali Kulkarni } 3529*14b24e2bSVaishali Kulkarni 3530*14b24e2bSVaishali Kulkarni static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn, 3531*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt) 3532*14b24e2bSVaishali Kulkarni { 3533*14b24e2bSVaishali Kulkarni u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id; 3534*14b24e2bSVaishali Kulkarni u32 reg_function_hide, tmp, eng_mask, low_pfs_mask; 3535*14b24e2bSVaishali Kulkarni struct ecore_dev *p_dev = p_hwfn->p_dev; 3536*14b24e2bSVaishali Kulkarni 3537*14b24e2bSVaishali Kulkarni num_funcs = ECORE_IS_AH(p_dev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB; 3538*14b24e2bSVaishali Kulkarni 3539*14b24e2bSVaishali Kulkarni /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values 3540*14b24e2bSVaishali Kulkarni * in the other bits are selected. 3541*14b24e2bSVaishali Kulkarni * Bits 1-15 are for functions 1-15, respectively, and their value is 3542*14b24e2bSVaishali Kulkarni * '0' only for enabled functions (function 0 always exists and 3543*14b24e2bSVaishali Kulkarni * enabled). 3544*14b24e2bSVaishali Kulkarni * In case of CMT in BB, only the "even" functions are enabled, and thus 3545*14b24e2bSVaishali Kulkarni * the number of functions for both hwfns is learnt from the same bits. 3546*14b24e2bSVaishali Kulkarni */ 3547*14b24e2bSVaishali Kulkarni if (ECORE_IS_BB(p_dev) || ECORE_IS_AH(p_dev)) { 3548*14b24e2bSVaishali Kulkarni reg_function_hide = ecore_rd(p_hwfn, p_ptt, 3549*14b24e2bSVaishali Kulkarni MISCS_REG_FUNCTION_HIDE_BB_K2); 3550*14b24e2bSVaishali Kulkarni } else { /* E5 */ 3551*14b24e2bSVaishali Kulkarni reg_function_hide = 0; 3552*14b24e2bSVaishali Kulkarni ECORE_E5_MISSING_CODE; 3553*14b24e2bSVaishali Kulkarni } 3554*14b24e2bSVaishali Kulkarni 3555*14b24e2bSVaishali Kulkarni if (reg_function_hide & 0x1) { 3556*14b24e2bSVaishali Kulkarni if (ECORE_IS_BB(p_dev)) { 3557*14b24e2bSVaishali Kulkarni if (ECORE_PATH_ID(p_hwfn) && p_dev->num_hwfns == 1) { 3558*14b24e2bSVaishali Kulkarni num_funcs = 0; 3559*14b24e2bSVaishali Kulkarni eng_mask = 0xaaaa; 3560*14b24e2bSVaishali Kulkarni } else { 3561*14b24e2bSVaishali Kulkarni num_funcs = 1; 3562*14b24e2bSVaishali Kulkarni eng_mask = 0x5554; 3563*14b24e2bSVaishali Kulkarni } 3564*14b24e2bSVaishali Kulkarni } else { 3565*14b24e2bSVaishali Kulkarni num_funcs = 1; 3566*14b24e2bSVaishali Kulkarni eng_mask = 0xfffe; 3567*14b24e2bSVaishali Kulkarni } 3568*14b24e2bSVaishali Kulkarni 3569*14b24e2bSVaishali Kulkarni /* Get the number of the enabled functions on the engine */ 3570*14b24e2bSVaishali Kulkarni tmp = (reg_function_hide ^ 0xffffffff) & eng_mask; 3571*14b24e2bSVaishali Kulkarni while (tmp) { 3572*14b24e2bSVaishali Kulkarni if (tmp & 0x1) 3573*14b24e2bSVaishali Kulkarni num_funcs++; 3574*14b24e2bSVaishali Kulkarni tmp >>= 0x1; 3575*14b24e2bSVaishali Kulkarni } 3576*14b24e2bSVaishali Kulkarni 3577*14b24e2bSVaishali Kulkarni /* Get the PF index within the enabled functions */ 3578*14b24e2bSVaishali Kulkarni low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1; 3579*14b24e2bSVaishali Kulkarni tmp = reg_function_hide & eng_mask & low_pfs_mask; 3580*14b24e2bSVaishali Kulkarni while (tmp) { 3581*14b24e2bSVaishali Kulkarni if (tmp & 0x1) 3582*14b24e2bSVaishali Kulkarni enabled_func_idx--; 3583*14b24e2bSVaishali Kulkarni tmp >>= 0x1; 3584*14b24e2bSVaishali Kulkarni } 3585*14b24e2bSVaishali Kulkarni } 3586*14b24e2bSVaishali Kulkarni 3587*14b24e2bSVaishali Kulkarni p_hwfn->num_funcs_on_engine = num_funcs; 3588*14b24e2bSVaishali Kulkarni p_hwfn->enabled_func_idx = enabled_func_idx; 3589*14b24e2bSVaishali Kulkarni 3590*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 3591*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_FPGA(p_dev)) { 3592*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, 3593*14b24e2bSVaishali Kulkarni "FPGA: Limit number of PFs to 4 [would affect resource allocation, needed for IOV]\n"); 3594*14b24e2bSVaishali Kulkarni p_hwfn->num_funcs_on_engine = 4; 3595*14b24e2bSVaishali Kulkarni } 3596*14b24e2bSVaishali Kulkarni #endif 3597*14b24e2bSVaishali Kulkarni 3598*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 3599*14b24e2bSVaishali Kulkarni "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n", 3600*14b24e2bSVaishali Kulkarni p_hwfn->rel_pf_id, p_hwfn->abs_pf_id, 3601*14b24e2bSVaishali Kulkarni p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); 3602*14b24e2bSVaishali Kulkarni } 3603*14b24e2bSVaishali Kulkarni 3604*14b24e2bSVaishali Kulkarni static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn, 3605*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt) 3606*14b24e2bSVaishali Kulkarni { 3607*14b24e2bSVaishali Kulkarni u32 port_mode; 3608*14b24e2bSVaishali Kulkarni 3609*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 3610*14b24e2bSVaishali Kulkarni /* Read the port mode */ 3611*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 3612*14b24e2bSVaishali Kulkarni port_mode = 4; 3613*14b24e2bSVaishali Kulkarni else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && 3614*14b24e2bSVaishali Kulkarni (p_hwfn->p_dev->num_hwfns > 1)) 3615*14b24e2bSVaishali Kulkarni /* In CMT on emulation, assume 1 port */ 3616*14b24e2bSVaishali Kulkarni port_mode = 1; 3617*14b24e2bSVaishali Kulkarni else 3618*14b24e2bSVaishali Kulkarni #endif 3619*14b24e2bSVaishali Kulkarni port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB); 3620*14b24e2bSVaishali Kulkarni 3621*14b24e2bSVaishali Kulkarni if (port_mode < 3) { 3622*14b24e2bSVaishali Kulkarni p_hwfn->p_dev->num_ports_in_engine = 1; 3623*14b24e2bSVaishali Kulkarni } else if (port_mode <= 5) { 3624*14b24e2bSVaishali Kulkarni p_hwfn->p_dev->num_ports_in_engine = 2; 3625*14b24e2bSVaishali Kulkarni } else { 3626*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n", 3627*14b24e2bSVaishali Kulkarni p_hwfn->p_dev->num_ports_in_engine); 3628*14b24e2bSVaishali Kulkarni 3629*14b24e2bSVaishali Kulkarni /* Default num_ports_in_engine to something */ 3630*14b24e2bSVaishali Kulkarni p_hwfn->p_dev->num_ports_in_engine = 1; 3631*14b24e2bSVaishali Kulkarni } 3632*14b24e2bSVaishali Kulkarni } 3633*14b24e2bSVaishali Kulkarni 3634*14b24e2bSVaishali Kulkarni static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn, 3635*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt) 3636*14b24e2bSVaishali Kulkarni { 3637*14b24e2bSVaishali Kulkarni u32 port; 3638*14b24e2bSVaishali Kulkarni int i; 3639*14b24e2bSVaishali Kulkarni 3640*14b24e2bSVaishali Kulkarni p_hwfn->p_dev->num_ports_in_engine = 0; 3641*14b24e2bSVaishali Kulkarni 3642*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 3643*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 3644*14b24e2bSVaishali Kulkarni port = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED); 3645*14b24e2bSVaishali Kulkarni switch ((port & 0xf000) >> 12) { 3646*14b24e2bSVaishali Kulkarni case 1: 3647*14b24e2bSVaishali Kulkarni p_hwfn->p_dev->num_ports_in_engine = 1; 3648*14b24e2bSVaishali Kulkarni break; 3649*14b24e2bSVaishali Kulkarni case 3: 3650*14b24e2bSVaishali Kulkarni p_hwfn->p_dev->num_ports_in_engine = 2; 3651*14b24e2bSVaishali Kulkarni break; 3652*14b24e2bSVaishali Kulkarni case 0xf: 3653*14b24e2bSVaishali Kulkarni p_hwfn->p_dev->num_ports_in_engine = 4; 3654*14b24e2bSVaishali Kulkarni break; 3655*14b24e2bSVaishali Kulkarni default: 3656*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, 3657*14b24e2bSVaishali Kulkarni "Unknown port mode in ECO_RESERVED %08x\n", 3658*14b24e2bSVaishali Kulkarni port); 3659*14b24e2bSVaishali Kulkarni } 3660*14b24e2bSVaishali Kulkarni } else 3661*14b24e2bSVaishali Kulkarni #endif 3662*14b24e2bSVaishali Kulkarni for (i = 0; i < MAX_NUM_PORTS_K2; i++) { 3663*14b24e2bSVaishali Kulkarni port = ecore_rd(p_hwfn, p_ptt, 3664*14b24e2bSVaishali Kulkarni CNIG_REG_NIG_PORT0_CONF_K2_E5 + (i * 4)); 3665*14b24e2bSVaishali Kulkarni if (port & 1) 3666*14b24e2bSVaishali Kulkarni p_hwfn->p_dev->num_ports_in_engine++; 3667*14b24e2bSVaishali Kulkarni } 3668*14b24e2bSVaishali Kulkarni 3669*14b24e2bSVaishali Kulkarni if (!p_hwfn->p_dev->num_ports_in_engine) { 3670*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, "All NIG ports are inactive\n"); 3671*14b24e2bSVaishali Kulkarni 3672*14b24e2bSVaishali Kulkarni /* Default num_ports_in_engine to something */ 3673*14b24e2bSVaishali Kulkarni p_hwfn->p_dev->num_ports_in_engine = 1; 3674*14b24e2bSVaishali Kulkarni } 3675*14b24e2bSVaishali Kulkarni } 3676*14b24e2bSVaishali Kulkarni 3677*14b24e2bSVaishali Kulkarni static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn, 3678*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt) 3679*14b24e2bSVaishali Kulkarni { 3680*14b24e2bSVaishali Kulkarni if (ECORE_IS_BB(p_hwfn->p_dev)) 3681*14b24e2bSVaishali Kulkarni ecore_hw_info_port_num_bb(p_hwfn, p_ptt); 3682*14b24e2bSVaishali Kulkarni else 3683*14b24e2bSVaishali Kulkarni ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt); 3684*14b24e2bSVaishali Kulkarni } 3685*14b24e2bSVaishali Kulkarni 3686*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 3687*14b24e2bSVaishali Kulkarni ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3688*14b24e2bSVaishali Kulkarni enum ecore_pci_personality personality, 3689*14b24e2bSVaishali Kulkarni struct ecore_hw_prepare_params *p_params) 3690*14b24e2bSVaishali Kulkarni { 3691*14b24e2bSVaishali Kulkarni bool drv_resc_alloc = p_params->drv_resc_alloc; 3692*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 3693*14b24e2bSVaishali Kulkarni 3694*14b24e2bSVaishali Kulkarni /* Since all information is common, only first hwfns should do this */ 3695*14b24e2bSVaishali Kulkarni if (IS_LEAD_HWFN(p_hwfn)) { 3696*14b24e2bSVaishali Kulkarni rc = ecore_iov_hw_info(p_hwfn); 3697*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 3698*14b24e2bSVaishali Kulkarni if (p_params->b_relaxed_probe) 3699*14b24e2bSVaishali Kulkarni p_params->p_relaxed_res = 3700*14b24e2bSVaishali Kulkarni ECORE_HW_PREPARE_BAD_IOV; 3701*14b24e2bSVaishali Kulkarni else 3702*14b24e2bSVaishali Kulkarni return rc; 3703*14b24e2bSVaishali Kulkarni } 3704*14b24e2bSVaishali Kulkarni } 3705*14b24e2bSVaishali Kulkarni 3706*14b24e2bSVaishali Kulkarni /* TODO In get_hw_info, amoungst others: 3707*14b24e2bSVaishali Kulkarni * Get MCP FW revision and determine according to it the supported 3708*14b24e2bSVaishali Kulkarni * featrues (e.g. DCB) 3709*14b24e2bSVaishali Kulkarni * Get boot mode 3710*14b24e2bSVaishali Kulkarni * ecore_get_pcie_width_speed, WOL capability. 3711*14b24e2bSVaishali Kulkarni * Number of global CQ-s (for storage 3712*14b24e2bSVaishali Kulkarni */ 3713*14b24e2bSVaishali Kulkarni ecore_hw_info_port_num(p_hwfn, p_ptt); 3714*14b24e2bSVaishali Kulkarni 3715*14b24e2bSVaishali Kulkarni ecore_mcp_get_capabilities(p_hwfn, p_ptt); 3716*14b24e2bSVaishali Kulkarni 3717*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 3718*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) { 3719*14b24e2bSVaishali Kulkarni #endif 3720*14b24e2bSVaishali Kulkarni rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params); 3721*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 3722*14b24e2bSVaishali Kulkarni return rc; 3723*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 3724*14b24e2bSVaishali Kulkarni } 3725*14b24e2bSVaishali Kulkarni #endif 3726*14b24e2bSVaishali Kulkarni 3727*14b24e2bSVaishali Kulkarni rc = ecore_int_igu_read_cam(p_hwfn, p_ptt); 3728*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 3729*14b24e2bSVaishali Kulkarni if (p_params->b_relaxed_probe) 3730*14b24e2bSVaishali Kulkarni p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_IGU; 3731*14b24e2bSVaishali Kulkarni else 3732*14b24e2bSVaishali Kulkarni return rc; 3733*14b24e2bSVaishali Kulkarni } 3734*14b24e2bSVaishali Kulkarni 3735*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 3736*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) { 3737*14b24e2bSVaishali Kulkarni #endif 3738*14b24e2bSVaishali Kulkarni OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, 3739*14b24e2bSVaishali Kulkarni p_hwfn->mcp_info->func_info.mac, ETH_ALEN); 3740*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 3741*14b24e2bSVaishali Kulkarni } else { 3742*14b24e2bSVaishali Kulkarni static u8 mcp_hw_mac[6] = {0, 2, 3, 4, 5, 6}; 3743*14b24e2bSVaishali Kulkarni 3744*14b24e2bSVaishali Kulkarni OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN); 3745*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id; 3746*14b24e2bSVaishali Kulkarni } 3747*14b24e2bSVaishali Kulkarni #endif 3748*14b24e2bSVaishali Kulkarni 3749*14b24e2bSVaishali Kulkarni if (ecore_mcp_is_init(p_hwfn)) { 3750*14b24e2bSVaishali Kulkarni if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET) 3751*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.ovlan = 3752*14b24e2bSVaishali Kulkarni p_hwfn->mcp_info->func_info.ovlan; 3753*14b24e2bSVaishali Kulkarni 3754*14b24e2bSVaishali Kulkarni ecore_mcp_cmd_port_init(p_hwfn, p_ptt); 3755*14b24e2bSVaishali Kulkarni } 3756*14b24e2bSVaishali Kulkarni 3757*14b24e2bSVaishali Kulkarni if (personality != ECORE_PCI_DEFAULT) { 3758*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.personality = personality; 3759*14b24e2bSVaishali Kulkarni } else if (ecore_mcp_is_init(p_hwfn)) { 3760*14b24e2bSVaishali Kulkarni enum ecore_pci_personality protocol; 3761*14b24e2bSVaishali Kulkarni 3762*14b24e2bSVaishali Kulkarni protocol = p_hwfn->mcp_info->func_info.protocol; 3763*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.personality = protocol; 3764*14b24e2bSVaishali Kulkarni } 3765*14b24e2bSVaishali Kulkarni 3766*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 3767*14b24e2bSVaishali Kulkarni /* To overcome ILT lack for emulation, until at least until we'll have 3768*14b24e2bSVaishali Kulkarni * a definite answer from system about it, allow only PF0 to be RoCE. 3769*14b24e2bSVaishali Kulkarni */ 3770*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) { 3771*14b24e2bSVaishali Kulkarni if (!p_hwfn->rel_pf_id) 3772*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE; 3773*14b24e2bSVaishali Kulkarni else 3774*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.personality = ECORE_PCI_ETH; 3775*14b24e2bSVaishali Kulkarni } 3776*14b24e2bSVaishali Kulkarni #endif 3777*14b24e2bSVaishali Kulkarni 3778*14b24e2bSVaishali Kulkarni /* although in BB some constellations may support more than 4 tcs, 3779*14b24e2bSVaishali Kulkarni * that can result in performance penalty in some cases. 4 3780*14b24e2bSVaishali Kulkarni * represents a good tradeoff between performance and flexibility. 3781*14b24e2bSVaishali Kulkarni */ 3782*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; 3783*14b24e2bSVaishali Kulkarni 3784*14b24e2bSVaishali Kulkarni /* start out with a single active tc. This can be increased either 3785*14b24e2bSVaishali Kulkarni * by dcbx negotiation or by upper layer driver 3786*14b24e2bSVaishali Kulkarni */ 3787*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.num_active_tc = 1; 3788*14b24e2bSVaishali Kulkarni 3789*14b24e2bSVaishali Kulkarni ecore_get_num_funcs(p_hwfn, p_ptt); 3790*14b24e2bSVaishali Kulkarni 3791*14b24e2bSVaishali Kulkarni if (ecore_mcp_is_init(p_hwfn)) 3792*14b24e2bSVaishali Kulkarni p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu; 3793*14b24e2bSVaishali Kulkarni 3794*14b24e2bSVaishali Kulkarni /* In case of forcing the driver's default resource allocation, calling 3795*14b24e2bSVaishali Kulkarni * ecore_hw_get_resc() should come after initializing the personality 3796*14b24e2bSVaishali Kulkarni * and after getting the number of functions, since the calculation of 3797*14b24e2bSVaishali Kulkarni * the resources/features depends on them. 3798*14b24e2bSVaishali Kulkarni * This order is not harmful if not forcing. 3799*14b24e2bSVaishali Kulkarni */ 3800*14b24e2bSVaishali Kulkarni rc = ecore_hw_get_resc(p_hwfn, drv_resc_alloc); 3801*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) { 3802*14b24e2bSVaishali Kulkarni rc = ECORE_SUCCESS; 3803*14b24e2bSVaishali Kulkarni p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP; 3804*14b24e2bSVaishali Kulkarni } 3805*14b24e2bSVaishali Kulkarni 3806*14b24e2bSVaishali Kulkarni return rc; 3807*14b24e2bSVaishali Kulkarni } 3808*14b24e2bSVaishali Kulkarni 3809*14b24e2bSVaishali Kulkarni static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev) 3810*14b24e2bSVaishali Kulkarni { 3811*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 3812*14b24e2bSVaishali Kulkarni u16 device_id_mask; 3813*14b24e2bSVaishali Kulkarni u32 tmp; 3814*14b24e2bSVaishali Kulkarni 3815*14b24e2bSVaishali Kulkarni /* Read Vendor Id / Device Id */ 3816*14b24e2bSVaishali Kulkarni OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_VENDOR_ID_OFFSET, 3817*14b24e2bSVaishali Kulkarni &p_dev->vendor_id); 3818*14b24e2bSVaishali Kulkarni OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET, 3819*14b24e2bSVaishali Kulkarni &p_dev->device_id); 3820*14b24e2bSVaishali Kulkarni 3821*14b24e2bSVaishali Kulkarni /* Determine type */ 3822*14b24e2bSVaishali Kulkarni device_id_mask = p_dev->device_id & ECORE_DEV_ID_MASK; 3823*14b24e2bSVaishali Kulkarni switch (device_id_mask) { 3824*14b24e2bSVaishali Kulkarni case ECORE_DEV_ID_MASK_BB: 3825*14b24e2bSVaishali Kulkarni p_dev->type = ECORE_DEV_TYPE_BB; 3826*14b24e2bSVaishali Kulkarni break; 3827*14b24e2bSVaishali Kulkarni case ECORE_DEV_ID_MASK_AH: 3828*14b24e2bSVaishali Kulkarni p_dev->type = ECORE_DEV_TYPE_AH; 3829*14b24e2bSVaishali Kulkarni break; 3830*14b24e2bSVaishali Kulkarni default: 3831*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, "Unknown device id 0x%x\n", 3832*14b24e2bSVaishali Kulkarni p_dev->device_id); 3833*14b24e2bSVaishali Kulkarni return ECORE_ABORTED; 3834*14b24e2bSVaishali Kulkarni } 3835*14b24e2bSVaishali Kulkarni 3836*14b24e2bSVaishali Kulkarni p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3837*14b24e2bSVaishali Kulkarni MISCS_REG_CHIP_NUM); 3838*14b24e2bSVaishali Kulkarni p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3839*14b24e2bSVaishali Kulkarni MISCS_REG_CHIP_REV); 3840*14b24e2bSVaishali Kulkarni 3841*14b24e2bSVaishali Kulkarni MASK_FIELD(CHIP_REV, p_dev->chip_rev); 3842*14b24e2bSVaishali Kulkarni 3843*14b24e2bSVaishali Kulkarni /* Learn number of HW-functions */ 3844*14b24e2bSVaishali Kulkarni tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3845*14b24e2bSVaishali Kulkarni MISCS_REG_CMT_ENABLED_FOR_PAIR); 3846*14b24e2bSVaishali Kulkarni 3847*14b24e2bSVaishali Kulkarni if (tmp & (1 << p_hwfn->rel_pf_id)) { 3848*14b24e2bSVaishali Kulkarni DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n"); 3849*14b24e2bSVaishali Kulkarni p_dev->num_hwfns = 2; 3850*14b24e2bSVaishali Kulkarni } else { 3851*14b24e2bSVaishali Kulkarni p_dev->num_hwfns = 1; 3852*14b24e2bSVaishali Kulkarni } 3853*14b24e2bSVaishali Kulkarni 3854*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 3855*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_EMUL(p_dev)) { 3856*14b24e2bSVaishali Kulkarni /* For some reason we have problems with this register 3857*14b24e2bSVaishali Kulkarni * in B0 emulation; Simply assume no CMT 3858*14b24e2bSVaishali Kulkarni */ 3859*14b24e2bSVaishali Kulkarni DP_NOTICE(p_dev->hwfns, false, "device on emul - assume no CMT\n"); 3860*14b24e2bSVaishali Kulkarni p_dev->num_hwfns = 1; 3861*14b24e2bSVaishali Kulkarni } 3862*14b24e2bSVaishali Kulkarni #endif 3863*14b24e2bSVaishali Kulkarni 3864*14b24e2bSVaishali Kulkarni p_dev->chip_bond_id = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3865*14b24e2bSVaishali Kulkarni MISCS_REG_CHIP_TEST_REG) >> 4; 3866*14b24e2bSVaishali Kulkarni MASK_FIELD(CHIP_BOND_ID, p_dev->chip_bond_id); 3867*14b24e2bSVaishali Kulkarni p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3868*14b24e2bSVaishali Kulkarni MISCS_REG_CHIP_METAL); 3869*14b24e2bSVaishali Kulkarni MASK_FIELD(CHIP_METAL, p_dev->chip_metal); 3870*14b24e2bSVaishali Kulkarni DP_INFO(p_dev->hwfns, 3871*14b24e2bSVaishali Kulkarni "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", 3872*14b24e2bSVaishali Kulkarni ECORE_IS_BB(p_dev) ? "BB" : "AH", 3873*14b24e2bSVaishali Kulkarni 'A' + p_dev->chip_rev, (int)p_dev->chip_metal, 3874*14b24e2bSVaishali Kulkarni p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id, 3875*14b24e2bSVaishali Kulkarni p_dev->chip_metal); 3876*14b24e2bSVaishali Kulkarni 3877*14b24e2bSVaishali Kulkarni if (ECORE_IS_BB(p_dev) && CHIP_REV_IS_A0(p_dev)) { 3878*14b24e2bSVaishali Kulkarni DP_NOTICE(p_dev->hwfns, false, 3879*14b24e2bSVaishali Kulkarni "The chip type/rev (BB A0) is not supported!\n"); 3880*14b24e2bSVaishali Kulkarni return ECORE_ABORTED; 3881*14b24e2bSVaishali Kulkarni } 3882*14b24e2bSVaishali Kulkarni 3883*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 3884*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev)) 3885*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 3886*14b24e2bSVaishali Kulkarni MISCS_REG_PLL_MAIN_CTRL_4, 0x1); 3887*14b24e2bSVaishali Kulkarni 3888*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_EMUL(p_dev)) { 3889*14b24e2bSVaishali Kulkarni tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3890*14b24e2bSVaishali Kulkarni MISCS_REG_ECO_RESERVED); 3891*14b24e2bSVaishali Kulkarni if (tmp & (1 << 29)) { 3892*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, "Emulation: Running on a FULL build\n"); 3893*14b24e2bSVaishali Kulkarni p_dev->b_is_emul_full = true; 3894*14b24e2bSVaishali Kulkarni } else { 3895*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, "Emulation: Running on a REDUCED build\n"); 3896*14b24e2bSVaishali Kulkarni } 3897*14b24e2bSVaishali Kulkarni } 3898*14b24e2bSVaishali Kulkarni #endif 3899*14b24e2bSVaishali Kulkarni 3900*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 3901*14b24e2bSVaishali Kulkarni } 3902*14b24e2bSVaishali Kulkarni 3903*14b24e2bSVaishali Kulkarni #ifndef LINUX_REMOVE 3904*14b24e2bSVaishali Kulkarni void ecore_hw_hibernate_prepare(struct ecore_dev *p_dev) 3905*14b24e2bSVaishali Kulkarni { 3906*14b24e2bSVaishali Kulkarni int j; 3907*14b24e2bSVaishali Kulkarni 3908*14b24e2bSVaishali Kulkarni if (IS_VF(p_dev)) 3909*14b24e2bSVaishali Kulkarni return; 3910*14b24e2bSVaishali Kulkarni 3911*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, j) { 3912*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 3913*14b24e2bSVaishali Kulkarni 3914*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Mark hw/fw uninitialized\n"); 3915*14b24e2bSVaishali Kulkarni 3916*14b24e2bSVaishali Kulkarni p_hwfn->hw_init_done = false; 3917*14b24e2bSVaishali Kulkarni p_hwfn->first_on_engine = false; 3918*14b24e2bSVaishali Kulkarni 3919*14b24e2bSVaishali Kulkarni ecore_ptt_invalidate(p_hwfn); 3920*14b24e2bSVaishali Kulkarni } 3921*14b24e2bSVaishali Kulkarni } 3922*14b24e2bSVaishali Kulkarni 3923*14b24e2bSVaishali Kulkarni void ecore_hw_hibernate_resume(struct ecore_dev *p_dev) 3924*14b24e2bSVaishali Kulkarni { 3925*14b24e2bSVaishali Kulkarni int j = 0; 3926*14b24e2bSVaishali Kulkarni 3927*14b24e2bSVaishali Kulkarni if (IS_VF(p_dev)) 3928*14b24e2bSVaishali Kulkarni return; 3929*14b24e2bSVaishali Kulkarni 3930*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, j) { 3931*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 3932*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); 3933*14b24e2bSVaishali Kulkarni 3934*14b24e2bSVaishali Kulkarni ecore_hw_hwfn_prepare(p_hwfn); 3935*14b24e2bSVaishali Kulkarni 3936*14b24e2bSVaishali Kulkarni if (!p_ptt) 3937*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, "ptt acquire failed\n"); 3938*14b24e2bSVaishali Kulkarni else { 3939*14b24e2bSVaishali Kulkarni ecore_load_mcp_offsets(p_hwfn, p_ptt); 3940*14b24e2bSVaishali Kulkarni ecore_ptt_release(p_hwfn, p_ptt); 3941*14b24e2bSVaishali Kulkarni } 3942*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "Reinitialized hw after low power state\n"); 3943*14b24e2bSVaishali Kulkarni } 3944*14b24e2bSVaishali Kulkarni } 3945*14b24e2bSVaishali Kulkarni 3946*14b24e2bSVaishali Kulkarni #endif 3947*14b24e2bSVaishali Kulkarni 3948*14b24e2bSVaishali Kulkarni static enum _ecore_status_t ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, 3949*14b24e2bSVaishali Kulkarni void OSAL_IOMEM *p_regview, 3950*14b24e2bSVaishali Kulkarni void OSAL_IOMEM *p_doorbells, 3951*14b24e2bSVaishali Kulkarni struct ecore_hw_prepare_params *p_params) 3952*14b24e2bSVaishali Kulkarni { 3953*14b24e2bSVaishali Kulkarni struct ecore_mdump_retain_data mdump_retain; 3954*14b24e2bSVaishali Kulkarni struct ecore_dev *p_dev = p_hwfn->p_dev; 3955*14b24e2bSVaishali Kulkarni struct ecore_mdump_info mdump_info; 3956*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc = ECORE_SUCCESS; 3957*14b24e2bSVaishali Kulkarni 3958*14b24e2bSVaishali Kulkarni /* Split PCI bars evenly between hwfns */ 3959*14b24e2bSVaishali Kulkarni p_hwfn->regview = p_regview; 3960*14b24e2bSVaishali Kulkarni p_hwfn->doorbells = p_doorbells; 3961*14b24e2bSVaishali Kulkarni 3962*14b24e2bSVaishali Kulkarni if (IS_VF(p_dev)) 3963*14b24e2bSVaishali Kulkarni return ecore_vf_hw_prepare(p_hwfn); 3964*14b24e2bSVaishali Kulkarni 3965*14b24e2bSVaishali Kulkarni /* Validate that chip access is feasible */ 3966*14b24e2bSVaishali Kulkarni if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { 3967*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, "Reading the ME register returns all Fs; Preventing further chip access\n"); 3968*14b24e2bSVaishali Kulkarni if (p_params->b_relaxed_probe) 3969*14b24e2bSVaishali Kulkarni p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_ME; 3970*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 3971*14b24e2bSVaishali Kulkarni } 3972*14b24e2bSVaishali Kulkarni 3973*14b24e2bSVaishali Kulkarni get_function_id(p_hwfn); 3974*14b24e2bSVaishali Kulkarni 3975*14b24e2bSVaishali Kulkarni /* Allocate PTT pool */ 3976*14b24e2bSVaishali Kulkarni rc = ecore_ptt_pool_alloc(p_hwfn); 3977*14b24e2bSVaishali Kulkarni if (rc) { 3978*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n"); 3979*14b24e2bSVaishali Kulkarni if (p_params->b_relaxed_probe) 3980*14b24e2bSVaishali Kulkarni p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 3981*14b24e2bSVaishali Kulkarni goto err0; 3982*14b24e2bSVaishali Kulkarni } 3983*14b24e2bSVaishali Kulkarni 3984*14b24e2bSVaishali Kulkarni /* Allocate the main PTT */ 3985*14b24e2bSVaishali Kulkarni p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); 3986*14b24e2bSVaishali Kulkarni 3987*14b24e2bSVaishali Kulkarni /* First hwfn learns basic information, e.g., number of hwfns */ 3988*14b24e2bSVaishali Kulkarni if (!p_hwfn->my_id) { 3989*14b24e2bSVaishali Kulkarni rc = ecore_get_dev_info(p_dev); 3990*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 3991*14b24e2bSVaishali Kulkarni if (p_params->b_relaxed_probe) 3992*14b24e2bSVaishali Kulkarni p_params->p_relaxed_res = 3993*14b24e2bSVaishali Kulkarni ECORE_HW_PREPARE_FAILED_DEV; 3994*14b24e2bSVaishali Kulkarni goto err1; 3995*14b24e2bSVaishali Kulkarni } 3996*14b24e2bSVaishali Kulkarni } 3997*14b24e2bSVaishali Kulkarni 3998*14b24e2bSVaishali Kulkarni ecore_hw_hwfn_prepare(p_hwfn); 3999*14b24e2bSVaishali Kulkarni 4000*14b24e2bSVaishali Kulkarni /* Initialize MCP structure */ 4001*14b24e2bSVaishali Kulkarni rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); 4002*14b24e2bSVaishali Kulkarni if (rc) { 4003*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n"); 4004*14b24e2bSVaishali Kulkarni if (p_params->b_relaxed_probe) 4005*14b24e2bSVaishali Kulkarni p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 4006*14b24e2bSVaishali Kulkarni goto err1; 4007*14b24e2bSVaishali Kulkarni } 4008*14b24e2bSVaishali Kulkarni 4009*14b24e2bSVaishali Kulkarni /* Read the device configuration information from the HW and SHMEM */ 4010*14b24e2bSVaishali Kulkarni rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, 4011*14b24e2bSVaishali Kulkarni p_params->personality, p_params); 4012*14b24e2bSVaishali Kulkarni if (rc) { 4013*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, "Failed to get HW information\n"); 4014*14b24e2bSVaishali Kulkarni goto err2; 4015*14b24e2bSVaishali Kulkarni } 4016*14b24e2bSVaishali Kulkarni 4017*14b24e2bSVaishali Kulkarni /* Sending a mailbox to the MFW should be after ecore_get_hw_info() is 4018*14b24e2bSVaishali Kulkarni * called, since among others it sets the ports number in an engine. 4019*14b24e2bSVaishali Kulkarni */ 4020*14b24e2bSVaishali Kulkarni if (p_params->initiate_pf_flr && p_hwfn == ECORE_LEADING_HWFN(p_dev) && 4021*14b24e2bSVaishali Kulkarni !p_dev->recov_in_prog) { 4022*14b24e2bSVaishali Kulkarni rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt); 4023*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 4024*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n"); 4025*14b24e2bSVaishali Kulkarni } 4026*14b24e2bSVaishali Kulkarni 4027*14b24e2bSVaishali Kulkarni /* Check if mdump logs/data are present and update the epoch value */ 4028*14b24e2bSVaishali Kulkarni if (p_hwfn == ECORE_LEADING_HWFN(p_hwfn->p_dev)) { 4029*14b24e2bSVaishali Kulkarni rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt, 4030*14b24e2bSVaishali Kulkarni &mdump_info); 4031*14b24e2bSVaishali Kulkarni if (rc == ECORE_SUCCESS && mdump_info.num_of_logs) 4032*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, 4033*14b24e2bSVaishali Kulkarni "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n"); 4034*14b24e2bSVaishali Kulkarni 4035*14b24e2bSVaishali Kulkarni rc = ecore_mcp_mdump_get_retain(p_hwfn, p_hwfn->p_main_ptt, 4036*14b24e2bSVaishali Kulkarni &mdump_retain); 4037*14b24e2bSVaishali Kulkarni if (rc == ECORE_SUCCESS && mdump_retain.valid) 4038*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, 4039*14b24e2bSVaishali Kulkarni "mdump retained data: epoch 0x%08x, pf 0x%x, status 0x%08x\n", 4040*14b24e2bSVaishali Kulkarni mdump_retain.epoch, mdump_retain.pf, 4041*14b24e2bSVaishali Kulkarni mdump_retain.status); 4042*14b24e2bSVaishali Kulkarni 4043*14b24e2bSVaishali Kulkarni ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt, 4044*14b24e2bSVaishali Kulkarni p_params->epoch); 4045*14b24e2bSVaishali Kulkarni } 4046*14b24e2bSVaishali Kulkarni 4047*14b24e2bSVaishali Kulkarni /* Allocate the init RT array and initialize the init-ops engine */ 4048*14b24e2bSVaishali Kulkarni rc = ecore_init_alloc(p_hwfn); 4049*14b24e2bSVaishali Kulkarni if (rc) { 4050*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n"); 4051*14b24e2bSVaishali Kulkarni if (p_params->b_relaxed_probe) 4052*14b24e2bSVaishali Kulkarni p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 4053*14b24e2bSVaishali Kulkarni goto err2; 4054*14b24e2bSVaishali Kulkarni } 4055*14b24e2bSVaishali Kulkarni 4056*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 4057*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_FPGA(p_dev)) { 4058*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, 4059*14b24e2bSVaishali Kulkarni "FPGA: workaround; Prevent DMAE parities\n"); 4060*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK_K2_E5, 4061*14b24e2bSVaishali Kulkarni 7); 4062*14b24e2bSVaishali Kulkarni 4063*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, 4064*14b24e2bSVaishali Kulkarni "FPGA: workaround: Set VF bar0 size\n"); 4065*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 4066*14b24e2bSVaishali Kulkarni PGLUE_B_REG_VF_BAR0_SIZE_K2_E5, 4); 4067*14b24e2bSVaishali Kulkarni } 4068*14b24e2bSVaishali Kulkarni #endif 4069*14b24e2bSVaishali Kulkarni 4070*14b24e2bSVaishali Kulkarni return rc; 4071*14b24e2bSVaishali Kulkarni err2: 4072*14b24e2bSVaishali Kulkarni if (IS_LEAD_HWFN(p_hwfn)) 4073*14b24e2bSVaishali Kulkarni ecore_iov_free_hw_info(p_dev); 4074*14b24e2bSVaishali Kulkarni ecore_mcp_free(p_hwfn); 4075*14b24e2bSVaishali Kulkarni err1: 4076*14b24e2bSVaishali Kulkarni ecore_hw_hwfn_free(p_hwfn); 4077*14b24e2bSVaishali Kulkarni err0: 4078*14b24e2bSVaishali Kulkarni return rc; 4079*14b24e2bSVaishali Kulkarni } 4080*14b24e2bSVaishali Kulkarni 4081*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, 4082*14b24e2bSVaishali Kulkarni struct ecore_hw_prepare_params *p_params) 4083*14b24e2bSVaishali Kulkarni { 4084*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 4085*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 4086*14b24e2bSVaishali Kulkarni 4087*14b24e2bSVaishali Kulkarni p_dev->chk_reg_fifo = p_params->chk_reg_fifo; 4088*14b24e2bSVaishali Kulkarni p_dev->allow_mdump = p_params->allow_mdump; 4089*14b24e2bSVaishali Kulkarni 4090*14b24e2bSVaishali Kulkarni if (p_params->b_relaxed_probe) 4091*14b24e2bSVaishali Kulkarni p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS; 4092*14b24e2bSVaishali Kulkarni 4093*14b24e2bSVaishali Kulkarni /* Store the precompiled init data ptrs */ 4094*14b24e2bSVaishali Kulkarni if (IS_PF(p_dev)) 4095*14b24e2bSVaishali Kulkarni ecore_init_iro_array(p_dev); 4096*14b24e2bSVaishali Kulkarni 4097*14b24e2bSVaishali Kulkarni /* Initialize the first hwfn - will learn number of hwfns */ 4098*14b24e2bSVaishali Kulkarni rc = ecore_hw_prepare_single(p_hwfn, 4099*14b24e2bSVaishali Kulkarni p_dev->regview, 4100*14b24e2bSVaishali Kulkarni p_dev->doorbells, p_params); 4101*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 4102*14b24e2bSVaishali Kulkarni return rc; 4103*14b24e2bSVaishali Kulkarni 4104*14b24e2bSVaishali Kulkarni p_params->personality = p_hwfn->hw_info.personality; 4105*14b24e2bSVaishali Kulkarni 4106*14b24e2bSVaishali Kulkarni /* initilalize 2nd hwfn if necessary */ 4107*14b24e2bSVaishali Kulkarni if (p_dev->num_hwfns > 1) { 4108*14b24e2bSVaishali Kulkarni void OSAL_IOMEM *p_regview, *p_doorbell; 4109*14b24e2bSVaishali Kulkarni u8 OSAL_IOMEM *addr; 4110*14b24e2bSVaishali Kulkarni 4111*14b24e2bSVaishali Kulkarni /* adjust bar offset for second engine */ 4112*14b24e2bSVaishali Kulkarni addr = (u8 OSAL_IOMEM *)p_dev->regview + 4113*14b24e2bSVaishali Kulkarni ecore_hw_bar_size(p_hwfn, BAR_ID_0) / 2; 4114*14b24e2bSVaishali Kulkarni p_regview = (void OSAL_IOMEM *)addr; 4115*14b24e2bSVaishali Kulkarni 4116*14b24e2bSVaishali Kulkarni addr = (u8 OSAL_IOMEM *)p_dev->doorbells + 4117*14b24e2bSVaishali Kulkarni ecore_hw_bar_size(p_hwfn, BAR_ID_1) / 2; 4118*14b24e2bSVaishali Kulkarni p_doorbell = (void OSAL_IOMEM *)addr; 4119*14b24e2bSVaishali Kulkarni 4120*14b24e2bSVaishali Kulkarni /* prepare second hw function */ 4121*14b24e2bSVaishali Kulkarni rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview, 4122*14b24e2bSVaishali Kulkarni p_doorbell, p_params); 4123*14b24e2bSVaishali Kulkarni 4124*14b24e2bSVaishali Kulkarni /* in case of error, need to free the previously 4125*14b24e2bSVaishali Kulkarni * initiliazed hwfn 0. 4126*14b24e2bSVaishali Kulkarni */ 4127*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 4128*14b24e2bSVaishali Kulkarni if (p_params->b_relaxed_probe) 4129*14b24e2bSVaishali Kulkarni p_params->p_relaxed_res = 4130*14b24e2bSVaishali Kulkarni ECORE_HW_PREPARE_FAILED_ENG2; 4131*14b24e2bSVaishali Kulkarni 4132*14b24e2bSVaishali Kulkarni if (IS_PF(p_dev)) { 4133*14b24e2bSVaishali Kulkarni ecore_init_free(p_hwfn); 4134*14b24e2bSVaishali Kulkarni ecore_mcp_free(p_hwfn); 4135*14b24e2bSVaishali Kulkarni ecore_hw_hwfn_free(p_hwfn); 4136*14b24e2bSVaishali Kulkarni } else { 4137*14b24e2bSVaishali Kulkarni DP_NOTICE(p_dev, true, "What do we need to free when VF hwfn1 init fails\n"); 4138*14b24e2bSVaishali Kulkarni } 4139*14b24e2bSVaishali Kulkarni return rc; 4140*14b24e2bSVaishali Kulkarni } 4141*14b24e2bSVaishali Kulkarni } 4142*14b24e2bSVaishali Kulkarni 4143*14b24e2bSVaishali Kulkarni return rc; 4144*14b24e2bSVaishali Kulkarni } 4145*14b24e2bSVaishali Kulkarni 4146*14b24e2bSVaishali Kulkarni void ecore_hw_remove(struct ecore_dev *p_dev) 4147*14b24e2bSVaishali Kulkarni { 4148*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 4149*14b24e2bSVaishali Kulkarni int i; 4150*14b24e2bSVaishali Kulkarni 4151*14b24e2bSVaishali Kulkarni if (IS_PF(p_dev)) 4152*14b24e2bSVaishali Kulkarni ecore_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt, 4153*14b24e2bSVaishali Kulkarni ECORE_OV_DRIVER_STATE_NOT_LOADED); 4154*14b24e2bSVaishali Kulkarni 4155*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, i) { 4156*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 4157*14b24e2bSVaishali Kulkarni 4158*14b24e2bSVaishali Kulkarni if (IS_VF(p_dev)) { 4159*14b24e2bSVaishali Kulkarni ecore_vf_pf_release(p_hwfn); 4160*14b24e2bSVaishali Kulkarni continue; 4161*14b24e2bSVaishali Kulkarni } 4162*14b24e2bSVaishali Kulkarni 4163*14b24e2bSVaishali Kulkarni ecore_init_free(p_hwfn); 4164*14b24e2bSVaishali Kulkarni ecore_hw_hwfn_free(p_hwfn); 4165*14b24e2bSVaishali Kulkarni ecore_mcp_free(p_hwfn); 4166*14b24e2bSVaishali Kulkarni 4167*14b24e2bSVaishali Kulkarni OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex); 4168*14b24e2bSVaishali Kulkarni } 4169*14b24e2bSVaishali Kulkarni 4170*14b24e2bSVaishali Kulkarni ecore_iov_free_hw_info(p_dev); 4171*14b24e2bSVaishali Kulkarni } 4172*14b24e2bSVaishali Kulkarni 4173*14b24e2bSVaishali Kulkarni static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev, 4174*14b24e2bSVaishali Kulkarni struct ecore_chain *p_chain) 4175*14b24e2bSVaishali Kulkarni { 4176*14b24e2bSVaishali Kulkarni void *p_virt = p_chain->p_virt_addr, *p_virt_next = OSAL_NULL; 4177*14b24e2bSVaishali Kulkarni dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0; 4178*14b24e2bSVaishali Kulkarni struct ecore_chain_next *p_next; 4179*14b24e2bSVaishali Kulkarni u32 size, i; 4180*14b24e2bSVaishali Kulkarni 4181*14b24e2bSVaishali Kulkarni if (!p_virt) 4182*14b24e2bSVaishali Kulkarni return; 4183*14b24e2bSVaishali Kulkarni 4184*14b24e2bSVaishali Kulkarni size = p_chain->elem_size * p_chain->usable_per_page; 4185*14b24e2bSVaishali Kulkarni 4186*14b24e2bSVaishali Kulkarni for (i = 0; i < p_chain->page_cnt; i++) { 4187*14b24e2bSVaishali Kulkarni if (!p_virt) 4188*14b24e2bSVaishali Kulkarni break; 4189*14b24e2bSVaishali Kulkarni 4190*14b24e2bSVaishali Kulkarni p_next = (struct ecore_chain_next *)((u8 *)p_virt + size); 4191*14b24e2bSVaishali Kulkarni p_virt_next = p_next->next_virt; 4192*14b24e2bSVaishali Kulkarni p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys); 4193*14b24e2bSVaishali Kulkarni 4194*14b24e2bSVaishali Kulkarni OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys, 4195*14b24e2bSVaishali Kulkarni ECORE_CHAIN_PAGE_SIZE); 4196*14b24e2bSVaishali Kulkarni 4197*14b24e2bSVaishali Kulkarni p_virt = p_virt_next; 4198*14b24e2bSVaishali Kulkarni p_phys = p_phys_next; 4199*14b24e2bSVaishali Kulkarni } 4200*14b24e2bSVaishali Kulkarni } 4201*14b24e2bSVaishali Kulkarni 4202*14b24e2bSVaishali Kulkarni static void ecore_chain_free_single(struct ecore_dev *p_dev, 4203*14b24e2bSVaishali Kulkarni struct ecore_chain *p_chain) 4204*14b24e2bSVaishali Kulkarni { 4205*14b24e2bSVaishali Kulkarni if (!p_chain->p_virt_addr) 4206*14b24e2bSVaishali Kulkarni return; 4207*14b24e2bSVaishali Kulkarni 4208*14b24e2bSVaishali Kulkarni OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr, 4209*14b24e2bSVaishali Kulkarni p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE); 4210*14b24e2bSVaishali Kulkarni } 4211*14b24e2bSVaishali Kulkarni 4212*14b24e2bSVaishali Kulkarni static void ecore_chain_free_pbl(struct ecore_dev *p_dev, 4213*14b24e2bSVaishali Kulkarni struct ecore_chain *p_chain) 4214*14b24e2bSVaishali Kulkarni { 4215*14b24e2bSVaishali Kulkarni void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; 4216*14b24e2bSVaishali Kulkarni u8 *p_pbl_virt = (u8 *)p_chain->pbl_sp.p_virt_table; 4217*14b24e2bSVaishali Kulkarni u32 page_cnt = p_chain->page_cnt, i, pbl_size; 4218*14b24e2bSVaishali Kulkarni 4219*14b24e2bSVaishali Kulkarni if (!pp_virt_addr_tbl) 4220*14b24e2bSVaishali Kulkarni return; 4221*14b24e2bSVaishali Kulkarni 4222*14b24e2bSVaishali Kulkarni if (!p_pbl_virt) 4223*14b24e2bSVaishali Kulkarni goto out; 4224*14b24e2bSVaishali Kulkarni 4225*14b24e2bSVaishali Kulkarni for (i = 0; i < page_cnt; i++) { 4226*14b24e2bSVaishali Kulkarni if (!pp_virt_addr_tbl[i]) 4227*14b24e2bSVaishali Kulkarni break; 4228*14b24e2bSVaishali Kulkarni 4229*14b24e2bSVaishali Kulkarni OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i], 4230*14b24e2bSVaishali Kulkarni *(dma_addr_t *)p_pbl_virt, 4231*14b24e2bSVaishali Kulkarni ECORE_CHAIN_PAGE_SIZE); 4232*14b24e2bSVaishali Kulkarni 4233*14b24e2bSVaishali Kulkarni p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE; 4234*14b24e2bSVaishali Kulkarni } 4235*14b24e2bSVaishali Kulkarni 4236*14b24e2bSVaishali Kulkarni pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; 4237*14b24e2bSVaishali Kulkarni 4238*14b24e2bSVaishali Kulkarni if (!p_chain->b_external_pbl) { 4239*14b24e2bSVaishali Kulkarni OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table, 4240*14b24e2bSVaishali Kulkarni p_chain->pbl_sp.p_phys_table, pbl_size); 4241*14b24e2bSVaishali Kulkarni } 4242*14b24e2bSVaishali Kulkarni out: 4243*14b24e2bSVaishali Kulkarni OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl); 4244*14b24e2bSVaishali Kulkarni p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL; 4245*14b24e2bSVaishali Kulkarni } 4246*14b24e2bSVaishali Kulkarni 4247*14b24e2bSVaishali Kulkarni void ecore_chain_free(struct ecore_dev *p_dev, 4248*14b24e2bSVaishali Kulkarni struct ecore_chain *p_chain) 4249*14b24e2bSVaishali Kulkarni { 4250*14b24e2bSVaishali Kulkarni switch (p_chain->mode) { 4251*14b24e2bSVaishali Kulkarni case ECORE_CHAIN_MODE_NEXT_PTR: 4252*14b24e2bSVaishali Kulkarni ecore_chain_free_next_ptr(p_dev, p_chain); 4253*14b24e2bSVaishali Kulkarni break; 4254*14b24e2bSVaishali Kulkarni case ECORE_CHAIN_MODE_SINGLE: 4255*14b24e2bSVaishali Kulkarni ecore_chain_free_single(p_dev, p_chain); 4256*14b24e2bSVaishali Kulkarni break; 4257*14b24e2bSVaishali Kulkarni case ECORE_CHAIN_MODE_PBL: 4258*14b24e2bSVaishali Kulkarni ecore_chain_free_pbl(p_dev, p_chain); 4259*14b24e2bSVaishali Kulkarni break; 4260*14b24e2bSVaishali Kulkarni } 4261*14b24e2bSVaishali Kulkarni } 4262*14b24e2bSVaishali Kulkarni 4263*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 4264*14b24e2bSVaishali Kulkarni ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev, 4265*14b24e2bSVaishali Kulkarni enum ecore_chain_cnt_type cnt_type, 4266*14b24e2bSVaishali Kulkarni osal_size_t elem_size, u32 page_cnt) 4267*14b24e2bSVaishali Kulkarni { 4268*14b24e2bSVaishali Kulkarni u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; 4269*14b24e2bSVaishali Kulkarni 4270*14b24e2bSVaishali Kulkarni /* The actual chain size can be larger than the maximal possible value 4271*14b24e2bSVaishali Kulkarni * after rounding up the requested elements number to pages, and after 4272*14b24e2bSVaishali Kulkarni * taking into acount the unusuable elements (next-ptr elements). 4273*14b24e2bSVaishali Kulkarni * The size of a "u16" chain can be (U16_MAX + 1) since the chain 4274*14b24e2bSVaishali Kulkarni * size/capacity fields are of a u32 type. 4275*14b24e2bSVaishali Kulkarni */ 4276*14b24e2bSVaishali Kulkarni if ((cnt_type == ECORE_CHAIN_CNT_TYPE_U16 && 4277*14b24e2bSVaishali Kulkarni chain_size > ((u32)ECORE_U16_MAX + 1)) || 4278*14b24e2bSVaishali Kulkarni (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 && 4279*14b24e2bSVaishali Kulkarni chain_size > ECORE_U32_MAX)) { 4280*14b24e2bSVaishali Kulkarni DP_NOTICE(p_dev, true, 4281*14b24e2bSVaishali Kulkarni "The actual chain size (0x%llx) is larger than the maximal possible value\n", 4282*14b24e2bSVaishali Kulkarni chain_size); 4283*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 4284*14b24e2bSVaishali Kulkarni } 4285*14b24e2bSVaishali Kulkarni 4286*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 4287*14b24e2bSVaishali Kulkarni } 4288*14b24e2bSVaishali Kulkarni 4289*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 4290*14b24e2bSVaishali Kulkarni ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain) 4291*14b24e2bSVaishali Kulkarni { 4292*14b24e2bSVaishali Kulkarni void *p_virt = OSAL_NULL, *p_virt_prev = OSAL_NULL; 4293*14b24e2bSVaishali Kulkarni dma_addr_t p_phys = 0; 4294*14b24e2bSVaishali Kulkarni u32 i; 4295*14b24e2bSVaishali Kulkarni 4296*14b24e2bSVaishali Kulkarni for (i = 0; i < p_chain->page_cnt; i++) { 4297*14b24e2bSVaishali Kulkarni p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 4298*14b24e2bSVaishali Kulkarni ECORE_CHAIN_PAGE_SIZE); 4299*14b24e2bSVaishali Kulkarni if (!p_virt) { 4300*14b24e2bSVaishali Kulkarni DP_NOTICE(p_dev, true, 4301*14b24e2bSVaishali Kulkarni "Failed to allocate chain memory\n"); 4302*14b24e2bSVaishali Kulkarni return ECORE_NOMEM; 4303*14b24e2bSVaishali Kulkarni } 4304*14b24e2bSVaishali Kulkarni 4305*14b24e2bSVaishali Kulkarni if (i == 0) { 4306*14b24e2bSVaishali Kulkarni ecore_chain_init_mem(p_chain, p_virt, p_phys); 4307*14b24e2bSVaishali Kulkarni ecore_chain_reset(p_chain); 4308*14b24e2bSVaishali Kulkarni } else { 4309*14b24e2bSVaishali Kulkarni ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev, 4310*14b24e2bSVaishali Kulkarni p_virt, p_phys); 4311*14b24e2bSVaishali Kulkarni } 4312*14b24e2bSVaishali Kulkarni 4313*14b24e2bSVaishali Kulkarni p_virt_prev = p_virt; 4314*14b24e2bSVaishali Kulkarni } 4315*14b24e2bSVaishali Kulkarni /* Last page's next element should point to the beginning of the 4316*14b24e2bSVaishali Kulkarni * chain. 4317*14b24e2bSVaishali Kulkarni */ 4318*14b24e2bSVaishali Kulkarni ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev, 4319*14b24e2bSVaishali Kulkarni p_chain->p_virt_addr, 4320*14b24e2bSVaishali Kulkarni p_chain->p_phys_addr); 4321*14b24e2bSVaishali Kulkarni 4322*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 4323*14b24e2bSVaishali Kulkarni } 4324*14b24e2bSVaishali Kulkarni 4325*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 4326*14b24e2bSVaishali Kulkarni ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain) 4327*14b24e2bSVaishali Kulkarni { 4328*14b24e2bSVaishali Kulkarni dma_addr_t p_phys = 0; 4329*14b24e2bSVaishali Kulkarni void *p_virt = OSAL_NULL; 4330*14b24e2bSVaishali Kulkarni 4331*14b24e2bSVaishali Kulkarni p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE); 4332*14b24e2bSVaishali Kulkarni if (!p_virt) { 4333*14b24e2bSVaishali Kulkarni DP_NOTICE(p_dev, true, "Failed to allocate chain memory\n"); 4334*14b24e2bSVaishali Kulkarni return ECORE_NOMEM; 4335*14b24e2bSVaishali Kulkarni } 4336*14b24e2bSVaishali Kulkarni 4337*14b24e2bSVaishali Kulkarni ecore_chain_init_mem(p_chain, p_virt, p_phys); 4338*14b24e2bSVaishali Kulkarni ecore_chain_reset(p_chain); 4339*14b24e2bSVaishali Kulkarni 4340*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 4341*14b24e2bSVaishali Kulkarni } 4342*14b24e2bSVaishali Kulkarni 4343*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 4344*14b24e2bSVaishali Kulkarni ecore_chain_alloc_pbl(struct ecore_dev *p_dev, 4345*14b24e2bSVaishali Kulkarni struct ecore_chain *p_chain, 4346*14b24e2bSVaishali Kulkarni struct ecore_chain_ext_pbl *ext_pbl) 4347*14b24e2bSVaishali Kulkarni { 4348*14b24e2bSVaishali Kulkarni void *p_virt = OSAL_NULL; 4349*14b24e2bSVaishali Kulkarni u8 *p_pbl_virt = OSAL_NULL; 4350*14b24e2bSVaishali Kulkarni void **pp_virt_addr_tbl = OSAL_NULL; 4351*14b24e2bSVaishali Kulkarni dma_addr_t p_phys = 0, p_pbl_phys = 0; 4352*14b24e2bSVaishali Kulkarni u32 page_cnt = p_chain->page_cnt, size, i; 4353*14b24e2bSVaishali Kulkarni 4354*14b24e2bSVaishali Kulkarni size = page_cnt * sizeof(*pp_virt_addr_tbl); 4355*14b24e2bSVaishali Kulkarni pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size); 4356*14b24e2bSVaishali Kulkarni if (!pp_virt_addr_tbl) { 4357*14b24e2bSVaishali Kulkarni DP_NOTICE(p_dev, true, 4358*14b24e2bSVaishali Kulkarni "Failed to allocate memory for the chain virtual addresses table\n"); 4359*14b24e2bSVaishali Kulkarni return ECORE_NOMEM; 4360*14b24e2bSVaishali Kulkarni } 4361*14b24e2bSVaishali Kulkarni 4362*14b24e2bSVaishali Kulkarni /* The allocation of the PBL table is done with its full size, since it 4363*14b24e2bSVaishali Kulkarni * is expected to be successive. 4364*14b24e2bSVaishali Kulkarni * ecore_chain_init_pbl_mem() is called even in a case of an allocation 4365*14b24e2bSVaishali Kulkarni * failure, since pp_virt_addr_tbl was previously allocated, and it 4366*14b24e2bSVaishali Kulkarni * should be saved to allow its freeing during the error flow. 4367*14b24e2bSVaishali Kulkarni */ 4368*14b24e2bSVaishali Kulkarni size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; 4369*14b24e2bSVaishali Kulkarni 4370*14b24e2bSVaishali Kulkarni if (ext_pbl == OSAL_NULL) { 4371*14b24e2bSVaishali Kulkarni p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size); 4372*14b24e2bSVaishali Kulkarni } else { 4373*14b24e2bSVaishali Kulkarni p_pbl_virt = ext_pbl->p_pbl_virt; 4374*14b24e2bSVaishali Kulkarni p_pbl_phys = ext_pbl->p_pbl_phys; 4375*14b24e2bSVaishali Kulkarni p_chain->b_external_pbl = true; 4376*14b24e2bSVaishali Kulkarni } 4377*14b24e2bSVaishali Kulkarni 4378*14b24e2bSVaishali Kulkarni ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, 4379*14b24e2bSVaishali Kulkarni pp_virt_addr_tbl); 4380*14b24e2bSVaishali Kulkarni if (!p_pbl_virt) { 4381*14b24e2bSVaishali Kulkarni DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n"); 4382*14b24e2bSVaishali Kulkarni return ECORE_NOMEM; 4383*14b24e2bSVaishali Kulkarni } 4384*14b24e2bSVaishali Kulkarni 4385*14b24e2bSVaishali Kulkarni for (i = 0; i < page_cnt; i++) { 4386*14b24e2bSVaishali Kulkarni p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 4387*14b24e2bSVaishali Kulkarni ECORE_CHAIN_PAGE_SIZE); 4388*14b24e2bSVaishali Kulkarni if (!p_virt) { 4389*14b24e2bSVaishali Kulkarni DP_NOTICE(p_dev, true, 4390*14b24e2bSVaishali Kulkarni "Failed to allocate chain memory\n"); 4391*14b24e2bSVaishali Kulkarni return ECORE_NOMEM; 4392*14b24e2bSVaishali Kulkarni } 4393*14b24e2bSVaishali Kulkarni 4394*14b24e2bSVaishali Kulkarni if (i == 0) { 4395*14b24e2bSVaishali Kulkarni ecore_chain_init_mem(p_chain, p_virt, p_phys); 4396*14b24e2bSVaishali Kulkarni ecore_chain_reset(p_chain); 4397*14b24e2bSVaishali Kulkarni } 4398*14b24e2bSVaishali Kulkarni 4399*14b24e2bSVaishali Kulkarni /* Fill the PBL table with the physical address of the page */ 4400*14b24e2bSVaishali Kulkarni *(dma_addr_t *)p_pbl_virt = p_phys; 4401*14b24e2bSVaishali Kulkarni /* Keep the virtual address of the page */ 4402*14b24e2bSVaishali Kulkarni p_chain->pbl.pp_virt_addr_tbl[i] = p_virt; 4403*14b24e2bSVaishali Kulkarni 4404*14b24e2bSVaishali Kulkarni p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE; 4405*14b24e2bSVaishali Kulkarni } 4406*14b24e2bSVaishali Kulkarni 4407*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 4408*14b24e2bSVaishali Kulkarni } 4409*14b24e2bSVaishali Kulkarni 4410*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev, 4411*14b24e2bSVaishali Kulkarni enum ecore_chain_use_mode intended_use, 4412*14b24e2bSVaishali Kulkarni enum ecore_chain_mode mode, 4413*14b24e2bSVaishali Kulkarni enum ecore_chain_cnt_type cnt_type, 4414*14b24e2bSVaishali Kulkarni u32 num_elems, osal_size_t elem_size, 4415*14b24e2bSVaishali Kulkarni struct ecore_chain *p_chain, 4416*14b24e2bSVaishali Kulkarni struct ecore_chain_ext_pbl *ext_pbl) 4417*14b24e2bSVaishali Kulkarni { 4418*14b24e2bSVaishali Kulkarni u32 page_cnt; 4419*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc = ECORE_SUCCESS; 4420*14b24e2bSVaishali Kulkarni 4421*14b24e2bSVaishali Kulkarni if (mode == ECORE_CHAIN_MODE_SINGLE) 4422*14b24e2bSVaishali Kulkarni page_cnt = 1; 4423*14b24e2bSVaishali Kulkarni else 4424*14b24e2bSVaishali Kulkarni page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode); 4425*14b24e2bSVaishali Kulkarni 4426*14b24e2bSVaishali Kulkarni rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size, 4427*14b24e2bSVaishali Kulkarni page_cnt); 4428*14b24e2bSVaishali Kulkarni if (rc) { 4429*14b24e2bSVaishali Kulkarni DP_NOTICE(p_dev, true, 4430*14b24e2bSVaishali Kulkarni "Cannot allocate a chain with the given arguments:\n" 4431*14b24e2bSVaishali Kulkarni "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", 4432*14b24e2bSVaishali Kulkarni intended_use, mode, cnt_type, num_elems, elem_size); 4433*14b24e2bSVaishali Kulkarni return rc; 4434*14b24e2bSVaishali Kulkarni } 4435*14b24e2bSVaishali Kulkarni 4436*14b24e2bSVaishali Kulkarni ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use, 4437*14b24e2bSVaishali Kulkarni mode, cnt_type, p_dev->dp_ctx); 4438*14b24e2bSVaishali Kulkarni 4439*14b24e2bSVaishali Kulkarni switch (mode) { 4440*14b24e2bSVaishali Kulkarni case ECORE_CHAIN_MODE_NEXT_PTR: 4441*14b24e2bSVaishali Kulkarni rc = ecore_chain_alloc_next_ptr(p_dev, p_chain); 4442*14b24e2bSVaishali Kulkarni break; 4443*14b24e2bSVaishali Kulkarni case ECORE_CHAIN_MODE_SINGLE: 4444*14b24e2bSVaishali Kulkarni rc = ecore_chain_alloc_single(p_dev, p_chain); 4445*14b24e2bSVaishali Kulkarni break; 4446*14b24e2bSVaishali Kulkarni case ECORE_CHAIN_MODE_PBL: 4447*14b24e2bSVaishali Kulkarni rc = ecore_chain_alloc_pbl(p_dev, p_chain, ext_pbl); 4448*14b24e2bSVaishali Kulkarni break; 4449*14b24e2bSVaishali Kulkarni } 4450*14b24e2bSVaishali Kulkarni if (rc) 4451*14b24e2bSVaishali Kulkarni goto nomem; 4452*14b24e2bSVaishali Kulkarni 4453*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 4454*14b24e2bSVaishali Kulkarni 4455*14b24e2bSVaishali Kulkarni nomem: 4456*14b24e2bSVaishali Kulkarni ecore_chain_free(p_dev, p_chain); 4457*14b24e2bSVaishali Kulkarni return rc; 4458*14b24e2bSVaishali Kulkarni } 4459*14b24e2bSVaishali Kulkarni 4460*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn, 4461*14b24e2bSVaishali Kulkarni u16 src_id, u16 *dst_id) 4462*14b24e2bSVaishali Kulkarni { 4463*14b24e2bSVaishali Kulkarni if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) { 4464*14b24e2bSVaishali Kulkarni u16 min, max; 4465*14b24e2bSVaishali Kulkarni 4466*14b24e2bSVaishali Kulkarni min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE); 4467*14b24e2bSVaishali Kulkarni max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE); 4468*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, "l2_queue id [%d] is not valid, available indices [%d - %d]\n", 4469*14b24e2bSVaishali Kulkarni src_id, min, max); 4470*14b24e2bSVaishali Kulkarni 4471*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 4472*14b24e2bSVaishali Kulkarni } 4473*14b24e2bSVaishali Kulkarni 4474*14b24e2bSVaishali Kulkarni *dst_id = RESC_START(p_hwfn, ECORE_L2_QUEUE) + src_id; 4475*14b24e2bSVaishali Kulkarni 4476*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 4477*14b24e2bSVaishali Kulkarni } 4478*14b24e2bSVaishali Kulkarni 4479*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn, 4480*14b24e2bSVaishali Kulkarni u8 src_id, u8 *dst_id) 4481*14b24e2bSVaishali Kulkarni { 4482*14b24e2bSVaishali Kulkarni if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) { 4483*14b24e2bSVaishali Kulkarni u8 min, max; 4484*14b24e2bSVaishali Kulkarni 4485*14b24e2bSVaishali Kulkarni min = (u8)RESC_START(p_hwfn, ECORE_VPORT); 4486*14b24e2bSVaishali Kulkarni max = min + RESC_NUM(p_hwfn, ECORE_VPORT); 4487*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, "vport id [%d] is not valid, available indices [%d - %d]\n", 4488*14b24e2bSVaishali Kulkarni src_id, min, max); 4489*14b24e2bSVaishali Kulkarni 4490*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 4491*14b24e2bSVaishali Kulkarni } 4492*14b24e2bSVaishali Kulkarni 4493*14b24e2bSVaishali Kulkarni *dst_id = RESC_START(p_hwfn, ECORE_VPORT) + src_id; 4494*14b24e2bSVaishali Kulkarni 4495*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 4496*14b24e2bSVaishali Kulkarni } 4497*14b24e2bSVaishali Kulkarni 4498*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn, 4499*14b24e2bSVaishali Kulkarni u8 src_id, u8 *dst_id) 4500*14b24e2bSVaishali Kulkarni { 4501*14b24e2bSVaishali Kulkarni if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) { 4502*14b24e2bSVaishali Kulkarni u8 min, max; 4503*14b24e2bSVaishali Kulkarni 4504*14b24e2bSVaishali Kulkarni min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG); 4505*14b24e2bSVaishali Kulkarni max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG); 4506*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, "rss_eng id [%d] is not valid, available indices [%d - %d]\n", 4507*14b24e2bSVaishali Kulkarni src_id, min, max); 4508*14b24e2bSVaishali Kulkarni 4509*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 4510*14b24e2bSVaishali Kulkarni } 4511*14b24e2bSVaishali Kulkarni 4512*14b24e2bSVaishali Kulkarni *dst_id = RESC_START(p_hwfn, ECORE_RSS_ENG) + src_id; 4513*14b24e2bSVaishali Kulkarni 4514*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 4515*14b24e2bSVaishali Kulkarni } 4516*14b24e2bSVaishali Kulkarni 4517*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 4518*14b24e2bSVaishali Kulkarni ecore_llh_add_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4519*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, u32 high, u32 low, 4520*14b24e2bSVaishali Kulkarni u32 *p_entry_num) 4521*14b24e2bSVaishali Kulkarni { 4522*14b24e2bSVaishali Kulkarni u32 en; 4523*14b24e2bSVaishali Kulkarni int i; 4524*14b24e2bSVaishali Kulkarni 4525*14b24e2bSVaishali Kulkarni /* Find a free entry and utilize it */ 4526*14b24e2bSVaishali Kulkarni for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4527*14b24e2bSVaishali Kulkarni en = ecore_rd(p_hwfn, p_ptt, 4528*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4529*14b24e2bSVaishali Kulkarni i * sizeof(u32)); 4530*14b24e2bSVaishali Kulkarni if (en) 4531*14b24e2bSVaishali Kulkarni continue; 4532*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4533*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4534*14b24e2bSVaishali Kulkarni 2 * i * sizeof(u32), low); 4535*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4536*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4537*14b24e2bSVaishali Kulkarni (2 * i + 1) * sizeof(u32), high); 4538*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4539*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4540*14b24e2bSVaishali Kulkarni i * sizeof(u32), 0); 4541*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4542*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4543*14b24e2bSVaishali Kulkarni i * sizeof(u32), 0); 4544*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4545*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4546*14b24e2bSVaishali Kulkarni i * sizeof(u32), 1); 4547*14b24e2bSVaishali Kulkarni break; 4548*14b24e2bSVaishali Kulkarni } 4549*14b24e2bSVaishali Kulkarni 4550*14b24e2bSVaishali Kulkarni if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4551*14b24e2bSVaishali Kulkarni return ECORE_NORESOURCES; 4552*14b24e2bSVaishali Kulkarni 4553*14b24e2bSVaishali Kulkarni *p_entry_num = i; 4554*14b24e2bSVaishali Kulkarni 4555*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 4556*14b24e2bSVaishali Kulkarni } 4557*14b24e2bSVaishali Kulkarni 4558*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 4559*14b24e2bSVaishali Kulkarni ecore_llh_add_mac_filter_e5(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 4560*14b24e2bSVaishali Kulkarni u32 high, u32 low, u32 *p_entry_num) 4561*14b24e2bSVaishali Kulkarni { 4562*14b24e2bSVaishali Kulkarni ECORE_E5_MISSING_CODE; 4563*14b24e2bSVaishali Kulkarni 4564*14b24e2bSVaishali Kulkarni return ECORE_NOTIMPL; 4565*14b24e2bSVaishali Kulkarni } 4566*14b24e2bSVaishali Kulkarni 4567*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn, 4568*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, u8 *p_filter) 4569*14b24e2bSVaishali Kulkarni { 4570*14b24e2bSVaishali Kulkarni u32 high, low, entry_num; 4571*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 4572*14b24e2bSVaishali Kulkarni 4573*14b24e2bSVaishali Kulkarni if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4574*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 4575*14b24e2bSVaishali Kulkarni 4576*14b24e2bSVaishali Kulkarni high = p_filter[1] | (p_filter[0] << 8); 4577*14b24e2bSVaishali Kulkarni low = p_filter[5] | (p_filter[4] << 8) | 4578*14b24e2bSVaishali Kulkarni (p_filter[3] << 16) | (p_filter[2] << 24); 4579*14b24e2bSVaishali Kulkarni 4580*14b24e2bSVaishali Kulkarni if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4581*14b24e2bSVaishali Kulkarni rc = ecore_llh_add_mac_filter_bb_ah(p_hwfn, p_ptt, high, low, 4582*14b24e2bSVaishali Kulkarni &entry_num); 4583*14b24e2bSVaishali Kulkarni else /* E5 */ 4584*14b24e2bSVaishali Kulkarni rc = ecore_llh_add_mac_filter_e5(p_hwfn, p_ptt, high, low, 4585*14b24e2bSVaishali Kulkarni &entry_num); 4586*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 4587*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, 4588*14b24e2bSVaishali Kulkarni "Failed to find an empty LLH filter to utilize\n"); 4589*14b24e2bSVaishali Kulkarni return rc; 4590*14b24e2bSVaishali Kulkarni } 4591*14b24e2bSVaishali Kulkarni 4592*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4593*14b24e2bSVaishali Kulkarni "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx is added at %d\n", 4594*14b24e2bSVaishali Kulkarni p_filter[0], p_filter[1], p_filter[2], p_filter[3], 4595*14b24e2bSVaishali Kulkarni p_filter[4], p_filter[5], entry_num); 4596*14b24e2bSVaishali Kulkarni 4597*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 4598*14b24e2bSVaishali Kulkarni } 4599*14b24e2bSVaishali Kulkarni 4600*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 4601*14b24e2bSVaishali Kulkarni ecore_llh_remove_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4602*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, u32 high, u32 low, 4603*14b24e2bSVaishali Kulkarni u32 *p_entry_num) 4604*14b24e2bSVaishali Kulkarni { 4605*14b24e2bSVaishali Kulkarni int i; 4606*14b24e2bSVaishali Kulkarni 4607*14b24e2bSVaishali Kulkarni /* Find the entry and clean it */ 4608*14b24e2bSVaishali Kulkarni for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4609*14b24e2bSVaishali Kulkarni if (ecore_rd(p_hwfn, p_ptt, 4610*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4611*14b24e2bSVaishali Kulkarni 2 * i * sizeof(u32)) != low) 4612*14b24e2bSVaishali Kulkarni continue; 4613*14b24e2bSVaishali Kulkarni if (ecore_rd(p_hwfn, p_ptt, 4614*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4615*14b24e2bSVaishali Kulkarni (2 * i + 1) * sizeof(u32)) != high) 4616*14b24e2bSVaishali Kulkarni continue; 4617*14b24e2bSVaishali Kulkarni 4618*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4619*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0); 4620*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4621*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4622*14b24e2bSVaishali Kulkarni 2 * i * sizeof(u32), 0); 4623*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4624*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4625*14b24e2bSVaishali Kulkarni (2 * i + 1) * sizeof(u32), 0); 4626*14b24e2bSVaishali Kulkarni break; 4627*14b24e2bSVaishali Kulkarni } 4628*14b24e2bSVaishali Kulkarni 4629*14b24e2bSVaishali Kulkarni if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4630*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 4631*14b24e2bSVaishali Kulkarni 4632*14b24e2bSVaishali Kulkarni *p_entry_num = i; 4633*14b24e2bSVaishali Kulkarni 4634*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 4635*14b24e2bSVaishali Kulkarni } 4636*14b24e2bSVaishali Kulkarni 4637*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 4638*14b24e2bSVaishali Kulkarni ecore_llh_remove_mac_filter_e5(struct ecore_hwfn *p_hwfn, 4639*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, u32 high, u32 low, 4640*14b24e2bSVaishali Kulkarni u32 *p_entry_num) 4641*14b24e2bSVaishali Kulkarni { 4642*14b24e2bSVaishali Kulkarni ECORE_E5_MISSING_CODE; 4643*14b24e2bSVaishali Kulkarni 4644*14b24e2bSVaishali Kulkarni return ECORE_NOTIMPL; 4645*14b24e2bSVaishali Kulkarni } 4646*14b24e2bSVaishali Kulkarni 4647*14b24e2bSVaishali Kulkarni void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn, 4648*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, u8 *p_filter) 4649*14b24e2bSVaishali Kulkarni { 4650*14b24e2bSVaishali Kulkarni u32 high, low, entry_num; 4651*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 4652*14b24e2bSVaishali Kulkarni 4653*14b24e2bSVaishali Kulkarni if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4654*14b24e2bSVaishali Kulkarni return; 4655*14b24e2bSVaishali Kulkarni 4656*14b24e2bSVaishali Kulkarni high = p_filter[1] | (p_filter[0] << 8); 4657*14b24e2bSVaishali Kulkarni low = p_filter[5] | (p_filter[4] << 8) | 4658*14b24e2bSVaishali Kulkarni (p_filter[3] << 16) | (p_filter[2] << 24); 4659*14b24e2bSVaishali Kulkarni 4660*14b24e2bSVaishali Kulkarni if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4661*14b24e2bSVaishali Kulkarni rc = ecore_llh_remove_mac_filter_bb_ah(p_hwfn, p_ptt, high, 4662*14b24e2bSVaishali Kulkarni low, &entry_num); 4663*14b24e2bSVaishali Kulkarni else /* E5 */ 4664*14b24e2bSVaishali Kulkarni rc = ecore_llh_remove_mac_filter_e5(p_hwfn, p_ptt, high, low, 4665*14b24e2bSVaishali Kulkarni &entry_num); 4666*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 4667*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, 4668*14b24e2bSVaishali Kulkarni "Tried to remove a non-configured filter [MAC %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx]\n", 4669*14b24e2bSVaishali Kulkarni p_filter[0], p_filter[1], p_filter[2], p_filter[3], 4670*14b24e2bSVaishali Kulkarni p_filter[4], p_filter[5]); 4671*14b24e2bSVaishali Kulkarni return; 4672*14b24e2bSVaishali Kulkarni } 4673*14b24e2bSVaishali Kulkarni 4674*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4675*14b24e2bSVaishali Kulkarni "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx was removed from %d\n", 4676*14b24e2bSVaishali Kulkarni p_filter[0], p_filter[1], p_filter[2], p_filter[3], 4677*14b24e2bSVaishali Kulkarni p_filter[4], p_filter[5], entry_num); 4678*14b24e2bSVaishali Kulkarni } 4679*14b24e2bSVaishali Kulkarni 4680*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 4681*14b24e2bSVaishali Kulkarni ecore_llh_add_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4682*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 4683*14b24e2bSVaishali Kulkarni enum ecore_llh_port_filter_type_t type, 4684*14b24e2bSVaishali Kulkarni u32 high, u32 low, u32 *p_entry_num) 4685*14b24e2bSVaishali Kulkarni { 4686*14b24e2bSVaishali Kulkarni u32 en; 4687*14b24e2bSVaishali Kulkarni int i; 4688*14b24e2bSVaishali Kulkarni 4689*14b24e2bSVaishali Kulkarni /* Find a free entry and utilize it */ 4690*14b24e2bSVaishali Kulkarni for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4691*14b24e2bSVaishali Kulkarni en = ecore_rd(p_hwfn, p_ptt, 4692*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4693*14b24e2bSVaishali Kulkarni i * sizeof(u32)); 4694*14b24e2bSVaishali Kulkarni if (en) 4695*14b24e2bSVaishali Kulkarni continue; 4696*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4697*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4698*14b24e2bSVaishali Kulkarni 2 * i * sizeof(u32), low); 4699*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4700*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4701*14b24e2bSVaishali Kulkarni (2 * i + 1) * sizeof(u32), high); 4702*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4703*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4704*14b24e2bSVaishali Kulkarni i * sizeof(u32), 1); 4705*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4706*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4707*14b24e2bSVaishali Kulkarni i * sizeof(u32), 1 << type); 4708*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4709*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 1); 4710*14b24e2bSVaishali Kulkarni break; 4711*14b24e2bSVaishali Kulkarni } 4712*14b24e2bSVaishali Kulkarni 4713*14b24e2bSVaishali Kulkarni if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4714*14b24e2bSVaishali Kulkarni return ECORE_NORESOURCES; 4715*14b24e2bSVaishali Kulkarni 4716*14b24e2bSVaishali Kulkarni *p_entry_num = i; 4717*14b24e2bSVaishali Kulkarni 4718*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 4719*14b24e2bSVaishali Kulkarni } 4720*14b24e2bSVaishali Kulkarni 4721*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 4722*14b24e2bSVaishali Kulkarni ecore_llh_add_protocol_filter_e5(struct ecore_hwfn *p_hwfn, 4723*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 4724*14b24e2bSVaishali Kulkarni enum ecore_llh_port_filter_type_t type, 4725*14b24e2bSVaishali Kulkarni u32 high, u32 low, u32 *p_entry_num) 4726*14b24e2bSVaishali Kulkarni { 4727*14b24e2bSVaishali Kulkarni ECORE_E5_MISSING_CODE; 4728*14b24e2bSVaishali Kulkarni 4729*14b24e2bSVaishali Kulkarni return ECORE_NOTIMPL; 4730*14b24e2bSVaishali Kulkarni } 4731*14b24e2bSVaishali Kulkarni 4732*14b24e2bSVaishali Kulkarni enum _ecore_status_t 4733*14b24e2bSVaishali Kulkarni ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn, 4734*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 4735*14b24e2bSVaishali Kulkarni u16 source_port_or_eth_type, 4736*14b24e2bSVaishali Kulkarni u16 dest_port, 4737*14b24e2bSVaishali Kulkarni enum ecore_llh_port_filter_type_t type) 4738*14b24e2bSVaishali Kulkarni { 4739*14b24e2bSVaishali Kulkarni u32 high, low, entry_num; 4740*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 4741*14b24e2bSVaishali Kulkarni 4742*14b24e2bSVaishali Kulkarni if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4743*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 4744*14b24e2bSVaishali Kulkarni 4745*14b24e2bSVaishali Kulkarni high = 0; 4746*14b24e2bSVaishali Kulkarni low = 0; 4747*14b24e2bSVaishali Kulkarni switch (type) { 4748*14b24e2bSVaishali Kulkarni case ECORE_LLH_FILTER_ETHERTYPE: 4749*14b24e2bSVaishali Kulkarni high = source_port_or_eth_type; 4750*14b24e2bSVaishali Kulkarni break; 4751*14b24e2bSVaishali Kulkarni case ECORE_LLH_FILTER_TCP_SRC_PORT: 4752*14b24e2bSVaishali Kulkarni case ECORE_LLH_FILTER_UDP_SRC_PORT: 4753*14b24e2bSVaishali Kulkarni low = source_port_or_eth_type << 16; 4754*14b24e2bSVaishali Kulkarni break; 4755*14b24e2bSVaishali Kulkarni case ECORE_LLH_FILTER_TCP_DEST_PORT: 4756*14b24e2bSVaishali Kulkarni case ECORE_LLH_FILTER_UDP_DEST_PORT: 4757*14b24e2bSVaishali Kulkarni low = dest_port; 4758*14b24e2bSVaishali Kulkarni break; 4759*14b24e2bSVaishali Kulkarni case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 4760*14b24e2bSVaishali Kulkarni case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 4761*14b24e2bSVaishali Kulkarni low = (source_port_or_eth_type << 16) | dest_port; 4762*14b24e2bSVaishali Kulkarni break; 4763*14b24e2bSVaishali Kulkarni default: 4764*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 4765*14b24e2bSVaishali Kulkarni "Non valid LLH protocol filter type %d\n", type); 4766*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 4767*14b24e2bSVaishali Kulkarni } 4768*14b24e2bSVaishali Kulkarni 4769*14b24e2bSVaishali Kulkarni if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4770*14b24e2bSVaishali Kulkarni rc = ecore_llh_add_protocol_filter_bb_ah(p_hwfn, p_ptt, type, 4771*14b24e2bSVaishali Kulkarni high, low, &entry_num); 4772*14b24e2bSVaishali Kulkarni else /* E5 */ 4773*14b24e2bSVaishali Kulkarni rc = ecore_llh_add_protocol_filter_e5(p_hwfn, p_ptt, type, high, 4774*14b24e2bSVaishali Kulkarni low, &entry_num); 4775*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 4776*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, 4777*14b24e2bSVaishali Kulkarni "Failed to find an empty LLH filter to utilize\n"); 4778*14b24e2bSVaishali Kulkarni return rc; 4779*14b24e2bSVaishali Kulkarni } 4780*14b24e2bSVaishali Kulkarni 4781*14b24e2bSVaishali Kulkarni switch (type) { 4782*14b24e2bSVaishali Kulkarni case ECORE_LLH_FILTER_ETHERTYPE: 4783*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4784*14b24e2bSVaishali Kulkarni "ETH type %x is added at %d\n", 4785*14b24e2bSVaishali Kulkarni source_port_or_eth_type, entry_num); 4786*14b24e2bSVaishali Kulkarni break; 4787*14b24e2bSVaishali Kulkarni case ECORE_LLH_FILTER_TCP_SRC_PORT: 4788*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4789*14b24e2bSVaishali Kulkarni "TCP src port %x is added at %d\n", 4790*14b24e2bSVaishali Kulkarni source_port_or_eth_type, entry_num); 4791*14b24e2bSVaishali Kulkarni break; 4792*14b24e2bSVaishali Kulkarni case ECORE_LLH_FILTER_UDP_SRC_PORT: 4793*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4794*14b24e2bSVaishali Kulkarni "UDP src port %x is added at %d\n", 4795*14b24e2bSVaishali Kulkarni source_port_or_eth_type, entry_num); 4796*14b24e2bSVaishali Kulkarni break; 4797*14b24e2bSVaishali Kulkarni case ECORE_LLH_FILTER_TCP_DEST_PORT: 4798*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4799*14b24e2bSVaishali Kulkarni "TCP dst port %x is added at %d\n", 4800*14b24e2bSVaishali Kulkarni dest_port, entry_num); 4801*14b24e2bSVaishali Kulkarni break; 4802*14b24e2bSVaishali Kulkarni case ECORE_LLH_FILTER_UDP_DEST_PORT: 4803*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4804*14b24e2bSVaishali Kulkarni "UDP dst port %x is added at %d\n", 4805*14b24e2bSVaishali Kulkarni dest_port, entry_num); 4806*14b24e2bSVaishali Kulkarni break; 4807*14b24e2bSVaishali Kulkarni case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 4808*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4809*14b24e2bSVaishali Kulkarni "TCP src/dst ports %x/%x are added at %d\n", 4810*14b24e2bSVaishali Kulkarni source_port_or_eth_type, dest_port, entry_num); 4811*14b24e2bSVaishali Kulkarni break; 4812*14b24e2bSVaishali Kulkarni case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 4813*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4814*14b24e2bSVaishali Kulkarni "UDP src/dst ports %x/%x are added at %d\n", 4815*14b24e2bSVaishali Kulkarni source_port_or_eth_type, dest_port, entry_num); 4816*14b24e2bSVaishali Kulkarni break; 4817*14b24e2bSVaishali Kulkarni } 4818*14b24e2bSVaishali Kulkarni 4819*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 4820*14b24e2bSVaishali Kulkarni } 4821*14b24e2bSVaishali Kulkarni 4822*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 4823*14b24e2bSVaishali Kulkarni ecore_llh_remove_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4824*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 4825*14b24e2bSVaishali Kulkarni enum ecore_llh_port_filter_type_t type, 4826*14b24e2bSVaishali Kulkarni u32 high, u32 low, u32 *p_entry_num) 4827*14b24e2bSVaishali Kulkarni { 4828*14b24e2bSVaishali Kulkarni int i; 4829*14b24e2bSVaishali Kulkarni 4830*14b24e2bSVaishali Kulkarni /* Find the entry and clean it */ 4831*14b24e2bSVaishali Kulkarni for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4832*14b24e2bSVaishali Kulkarni if (!ecore_rd(p_hwfn, p_ptt, 4833*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4834*14b24e2bSVaishali Kulkarni i * sizeof(u32))) 4835*14b24e2bSVaishali Kulkarni continue; 4836*14b24e2bSVaishali Kulkarni if (!ecore_rd(p_hwfn, p_ptt, 4837*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4838*14b24e2bSVaishali Kulkarni i * sizeof(u32))) 4839*14b24e2bSVaishali Kulkarni continue; 4840*14b24e2bSVaishali Kulkarni if (!(ecore_rd(p_hwfn, p_ptt, 4841*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4842*14b24e2bSVaishali Kulkarni i * sizeof(u32)) & (1 << type))) 4843*14b24e2bSVaishali Kulkarni continue; 4844*14b24e2bSVaishali Kulkarni if (ecore_rd(p_hwfn, p_ptt, 4845*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4846*14b24e2bSVaishali Kulkarni 2 * i * sizeof(u32)) != low) 4847*14b24e2bSVaishali Kulkarni continue; 4848*14b24e2bSVaishali Kulkarni if (ecore_rd(p_hwfn, p_ptt, 4849*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4850*14b24e2bSVaishali Kulkarni (2 * i + 1) * sizeof(u32)) != high) 4851*14b24e2bSVaishali Kulkarni continue; 4852*14b24e2bSVaishali Kulkarni 4853*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4854*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0); 4855*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4856*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4857*14b24e2bSVaishali Kulkarni i * sizeof(u32), 0); 4858*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4859*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4860*14b24e2bSVaishali Kulkarni i * sizeof(u32), 0); 4861*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4862*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4863*14b24e2bSVaishali Kulkarni 2 * i * sizeof(u32), 0); 4864*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4865*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4866*14b24e2bSVaishali Kulkarni (2 * i + 1) * sizeof(u32), 0); 4867*14b24e2bSVaishali Kulkarni break; 4868*14b24e2bSVaishali Kulkarni } 4869*14b24e2bSVaishali Kulkarni 4870*14b24e2bSVaishali Kulkarni if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4871*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 4872*14b24e2bSVaishali Kulkarni 4873*14b24e2bSVaishali Kulkarni *p_entry_num = i; 4874*14b24e2bSVaishali Kulkarni 4875*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 4876*14b24e2bSVaishali Kulkarni } 4877*14b24e2bSVaishali Kulkarni 4878*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 4879*14b24e2bSVaishali Kulkarni ecore_llh_remove_protocol_filter_e5(struct ecore_hwfn *p_hwfn, 4880*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 4881*14b24e2bSVaishali Kulkarni enum ecore_llh_port_filter_type_t type, 4882*14b24e2bSVaishali Kulkarni u32 high, u32 low, u32 *p_entry_num) 4883*14b24e2bSVaishali Kulkarni { 4884*14b24e2bSVaishali Kulkarni ECORE_E5_MISSING_CODE; 4885*14b24e2bSVaishali Kulkarni 4886*14b24e2bSVaishali Kulkarni return ECORE_NOTIMPL; 4887*14b24e2bSVaishali Kulkarni } 4888*14b24e2bSVaishali Kulkarni 4889*14b24e2bSVaishali Kulkarni void 4890*14b24e2bSVaishali Kulkarni ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn, 4891*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 4892*14b24e2bSVaishali Kulkarni u16 source_port_or_eth_type, 4893*14b24e2bSVaishali Kulkarni u16 dest_port, 4894*14b24e2bSVaishali Kulkarni enum ecore_llh_port_filter_type_t type) 4895*14b24e2bSVaishali Kulkarni { 4896*14b24e2bSVaishali Kulkarni u32 high, low, entry_num; 4897*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 4898*14b24e2bSVaishali Kulkarni 4899*14b24e2bSVaishali Kulkarni if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4900*14b24e2bSVaishali Kulkarni return; 4901*14b24e2bSVaishali Kulkarni 4902*14b24e2bSVaishali Kulkarni high = 0; 4903*14b24e2bSVaishali Kulkarni low = 0; 4904*14b24e2bSVaishali Kulkarni switch (type) { 4905*14b24e2bSVaishali Kulkarni case ECORE_LLH_FILTER_ETHERTYPE: 4906*14b24e2bSVaishali Kulkarni high = source_port_or_eth_type; 4907*14b24e2bSVaishali Kulkarni break; 4908*14b24e2bSVaishali Kulkarni case ECORE_LLH_FILTER_TCP_SRC_PORT: 4909*14b24e2bSVaishali Kulkarni case ECORE_LLH_FILTER_UDP_SRC_PORT: 4910*14b24e2bSVaishali Kulkarni low = source_port_or_eth_type << 16; 4911*14b24e2bSVaishali Kulkarni break; 4912*14b24e2bSVaishali Kulkarni case ECORE_LLH_FILTER_TCP_DEST_PORT: 4913*14b24e2bSVaishali Kulkarni case ECORE_LLH_FILTER_UDP_DEST_PORT: 4914*14b24e2bSVaishali Kulkarni low = dest_port; 4915*14b24e2bSVaishali Kulkarni break; 4916*14b24e2bSVaishali Kulkarni case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 4917*14b24e2bSVaishali Kulkarni case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 4918*14b24e2bSVaishali Kulkarni low = (source_port_or_eth_type << 16) | dest_port; 4919*14b24e2bSVaishali Kulkarni break; 4920*14b24e2bSVaishali Kulkarni default: 4921*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 4922*14b24e2bSVaishali Kulkarni "Non valid LLH protocol filter type %d\n", type); 4923*14b24e2bSVaishali Kulkarni return; 4924*14b24e2bSVaishali Kulkarni } 4925*14b24e2bSVaishali Kulkarni 4926*14b24e2bSVaishali Kulkarni if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4927*14b24e2bSVaishali Kulkarni rc = ecore_llh_remove_protocol_filter_bb_ah(p_hwfn, p_ptt, type, 4928*14b24e2bSVaishali Kulkarni high, low, 4929*14b24e2bSVaishali Kulkarni &entry_num); 4930*14b24e2bSVaishali Kulkarni else /* E5 */ 4931*14b24e2bSVaishali Kulkarni rc = ecore_llh_remove_protocol_filter_e5(p_hwfn, p_ptt, type, 4932*14b24e2bSVaishali Kulkarni high, low, &entry_num); 4933*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 4934*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, 4935*14b24e2bSVaishali Kulkarni "Tried to remove a non-configured filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x]\n", 4936*14b24e2bSVaishali Kulkarni type, source_port_or_eth_type, dest_port); 4937*14b24e2bSVaishali Kulkarni return; 4938*14b24e2bSVaishali Kulkarni } 4939*14b24e2bSVaishali Kulkarni 4940*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4941*14b24e2bSVaishali Kulkarni "Protocol filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x] was removed from %d\n", 4942*14b24e2bSVaishali Kulkarni type, source_port_or_eth_type, dest_port, entry_num); 4943*14b24e2bSVaishali Kulkarni } 4944*14b24e2bSVaishali Kulkarni 4945*14b24e2bSVaishali Kulkarni static void ecore_llh_clear_all_filters_bb_ah(struct ecore_hwfn *p_hwfn, 4946*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt) 4947*14b24e2bSVaishali Kulkarni { 4948*14b24e2bSVaishali Kulkarni int i; 4949*14b24e2bSVaishali Kulkarni 4950*14b24e2bSVaishali Kulkarni for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4951*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4952*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4953*14b24e2bSVaishali Kulkarni i * sizeof(u32), 0); 4954*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4955*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4956*14b24e2bSVaishali Kulkarni 2 * i * sizeof(u32), 0); 4957*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4958*14b24e2bSVaishali Kulkarni NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4959*14b24e2bSVaishali Kulkarni (2 * i + 1) * sizeof(u32), 0); 4960*14b24e2bSVaishali Kulkarni } 4961*14b24e2bSVaishali Kulkarni } 4962*14b24e2bSVaishali Kulkarni 4963*14b24e2bSVaishali Kulkarni static void ecore_llh_clear_all_filters_e5(struct ecore_hwfn *p_hwfn, 4964*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt) 4965*14b24e2bSVaishali Kulkarni { 4966*14b24e2bSVaishali Kulkarni ECORE_E5_MISSING_CODE; 4967*14b24e2bSVaishali Kulkarni } 4968*14b24e2bSVaishali Kulkarni 4969*14b24e2bSVaishali Kulkarni void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn, 4970*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt) 4971*14b24e2bSVaishali Kulkarni { 4972*14b24e2bSVaishali Kulkarni if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4973*14b24e2bSVaishali Kulkarni return; 4974*14b24e2bSVaishali Kulkarni 4975*14b24e2bSVaishali Kulkarni if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4976*14b24e2bSVaishali Kulkarni ecore_llh_clear_all_filters_bb_ah(p_hwfn, p_ptt); 4977*14b24e2bSVaishali Kulkarni else /* E5 */ 4978*14b24e2bSVaishali Kulkarni ecore_llh_clear_all_filters_e5(p_hwfn, p_ptt); 4979*14b24e2bSVaishali Kulkarni } 4980*14b24e2bSVaishali Kulkarni 4981*14b24e2bSVaishali Kulkarni enum _ecore_status_t 4982*14b24e2bSVaishali Kulkarni ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn, 4983*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt) 4984*14b24e2bSVaishali Kulkarni { 4985*14b24e2bSVaishali Kulkarni if (IS_MF_DEFAULT(p_hwfn) && ECORE_IS_BB(p_hwfn->p_dev)) { 4986*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, 4987*14b24e2bSVaishali Kulkarni NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 4988*14b24e2bSVaishali Kulkarni 1 << p_hwfn->abs_pf_id / 2); 4989*14b24e2bSVaishali Kulkarni ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, 0); 4990*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 4991*14b24e2bSVaishali Kulkarni } else { 4992*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, 4993*14b24e2bSVaishali Kulkarni "This function can't be set as default\n"); 4994*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 4995*14b24e2bSVaishali Kulkarni } 4996*14b24e2bSVaishali Kulkarni } 4997*14b24e2bSVaishali Kulkarni 4998*14b24e2bSVaishali Kulkarni static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn, 4999*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 5000*14b24e2bSVaishali Kulkarni u32 hw_addr, void *p_eth_qzone, 5001*14b24e2bSVaishali Kulkarni osal_size_t eth_qzone_size, 5002*14b24e2bSVaishali Kulkarni u8 timeset) 5003*14b24e2bSVaishali Kulkarni { 5004*14b24e2bSVaishali Kulkarni struct coalescing_timeset *p_coal_timeset; 5005*14b24e2bSVaishali Kulkarni 5006*14b24e2bSVaishali Kulkarni if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) { 5007*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 5008*14b24e2bSVaishali Kulkarni "Coalescing configuration not enabled\n"); 5009*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 5010*14b24e2bSVaishali Kulkarni } 5011*14b24e2bSVaishali Kulkarni 5012*14b24e2bSVaishali Kulkarni p_coal_timeset = p_eth_qzone; 5013*14b24e2bSVaishali Kulkarni OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size); 5014*14b24e2bSVaishali Kulkarni SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); 5015*14b24e2bSVaishali Kulkarni SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); 5016*14b24e2bSVaishali Kulkarni ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); 5017*14b24e2bSVaishali Kulkarni 5018*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 5019*14b24e2bSVaishali Kulkarni } 5020*14b24e2bSVaishali Kulkarni 5021*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, 5022*14b24e2bSVaishali Kulkarni u16 rx_coal, u16 tx_coal, 5023*14b24e2bSVaishali Kulkarni void *p_handle) 5024*14b24e2bSVaishali Kulkarni { 5025*14b24e2bSVaishali Kulkarni struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle; 5026*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc = ECORE_SUCCESS; 5027*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt; 5028*14b24e2bSVaishali Kulkarni 5029*14b24e2bSVaishali Kulkarni /* TODO - Configuring a single queue's coalescing but 5030*14b24e2bSVaishali Kulkarni * claiming all queues are abiding same configuration 5031*14b24e2bSVaishali Kulkarni * for PF and VF both. 5032*14b24e2bSVaishali Kulkarni */ 5033*14b24e2bSVaishali Kulkarni 5034*14b24e2bSVaishali Kulkarni if (IS_VF(p_hwfn->p_dev)) 5035*14b24e2bSVaishali Kulkarni return ecore_vf_pf_set_coalesce(p_hwfn, rx_coal, 5036*14b24e2bSVaishali Kulkarni tx_coal, p_cid); 5037*14b24e2bSVaishali Kulkarni 5038*14b24e2bSVaishali Kulkarni p_ptt = ecore_ptt_acquire(p_hwfn); 5039*14b24e2bSVaishali Kulkarni if (!p_ptt) 5040*14b24e2bSVaishali Kulkarni return ECORE_AGAIN; 5041*14b24e2bSVaishali Kulkarni 5042*14b24e2bSVaishali Kulkarni if (rx_coal) { 5043*14b24e2bSVaishali Kulkarni rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); 5044*14b24e2bSVaishali Kulkarni if (rc) 5045*14b24e2bSVaishali Kulkarni goto out; 5046*14b24e2bSVaishali Kulkarni p_hwfn->p_dev->rx_coalesce_usecs = rx_coal; 5047*14b24e2bSVaishali Kulkarni } 5048*14b24e2bSVaishali Kulkarni 5049*14b24e2bSVaishali Kulkarni if (tx_coal) { 5050*14b24e2bSVaishali Kulkarni rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid); 5051*14b24e2bSVaishali Kulkarni if (rc) 5052*14b24e2bSVaishali Kulkarni goto out; 5053*14b24e2bSVaishali Kulkarni p_hwfn->p_dev->tx_coalesce_usecs = tx_coal; 5054*14b24e2bSVaishali Kulkarni } 5055*14b24e2bSVaishali Kulkarni out: 5056*14b24e2bSVaishali Kulkarni ecore_ptt_release(p_hwfn, p_ptt); 5057*14b24e2bSVaishali Kulkarni 5058*14b24e2bSVaishali Kulkarni return rc; 5059*14b24e2bSVaishali Kulkarni } 5060*14b24e2bSVaishali Kulkarni 5061*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn, 5062*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 5063*14b24e2bSVaishali Kulkarni u16 coalesce, 5064*14b24e2bSVaishali Kulkarni struct ecore_queue_cid *p_cid) 5065*14b24e2bSVaishali Kulkarni { 5066*14b24e2bSVaishali Kulkarni struct ustorm_eth_queue_zone eth_qzone; 5067*14b24e2bSVaishali Kulkarni u8 timeset, timer_res; 5068*14b24e2bSVaishali Kulkarni u32 address; 5069*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 5070*14b24e2bSVaishali Kulkarni 5071*14b24e2bSVaishali Kulkarni /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 5072*14b24e2bSVaishali Kulkarni if (coalesce <= 0x7F) 5073*14b24e2bSVaishali Kulkarni timer_res = 0; 5074*14b24e2bSVaishali Kulkarni else if (coalesce <= 0xFF) 5075*14b24e2bSVaishali Kulkarni timer_res = 1; 5076*14b24e2bSVaishali Kulkarni else if (coalesce <= 0x1FF) 5077*14b24e2bSVaishali Kulkarni timer_res = 2; 5078*14b24e2bSVaishali Kulkarni else { 5079*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 5080*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 5081*14b24e2bSVaishali Kulkarni } 5082*14b24e2bSVaishali Kulkarni timeset = (u8)(coalesce >> timer_res); 5083*14b24e2bSVaishali Kulkarni 5084*14b24e2bSVaishali Kulkarni rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, 5085*14b24e2bSVaishali Kulkarni p_cid->sb_igu_id, false); 5086*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 5087*14b24e2bSVaishali Kulkarni goto out; 5088*14b24e2bSVaishali Kulkarni 5089*14b24e2bSVaishali Kulkarni address = BAR0_MAP_REG_USDM_RAM + 5090*14b24e2bSVaishali Kulkarni USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 5091*14b24e2bSVaishali Kulkarni 5092*14b24e2bSVaishali Kulkarni rc = ecore_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 5093*14b24e2bSVaishali Kulkarni sizeof(struct ustorm_eth_queue_zone), timeset); 5094*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 5095*14b24e2bSVaishali Kulkarni goto out; 5096*14b24e2bSVaishali Kulkarni 5097*14b24e2bSVaishali Kulkarni out: 5098*14b24e2bSVaishali Kulkarni return rc; 5099*14b24e2bSVaishali Kulkarni } 5100*14b24e2bSVaishali Kulkarni 5101*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn, 5102*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 5103*14b24e2bSVaishali Kulkarni u16 coalesce, 5104*14b24e2bSVaishali Kulkarni struct ecore_queue_cid *p_cid) 5105*14b24e2bSVaishali Kulkarni { 5106*14b24e2bSVaishali Kulkarni struct xstorm_eth_queue_zone eth_qzone; 5107*14b24e2bSVaishali Kulkarni u8 timeset, timer_res; 5108*14b24e2bSVaishali Kulkarni u32 address; 5109*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 5110*14b24e2bSVaishali Kulkarni 5111*14b24e2bSVaishali Kulkarni /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 5112*14b24e2bSVaishali Kulkarni if (coalesce <= 0x7F) 5113*14b24e2bSVaishali Kulkarni timer_res = 0; 5114*14b24e2bSVaishali Kulkarni else if (coalesce <= 0xFF) 5115*14b24e2bSVaishali Kulkarni timer_res = 1; 5116*14b24e2bSVaishali Kulkarni else if (coalesce <= 0x1FF) 5117*14b24e2bSVaishali Kulkarni timer_res = 2; 5118*14b24e2bSVaishali Kulkarni else { 5119*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 5120*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 5121*14b24e2bSVaishali Kulkarni } 5122*14b24e2bSVaishali Kulkarni timeset = (u8)(coalesce >> timer_res); 5123*14b24e2bSVaishali Kulkarni 5124*14b24e2bSVaishali Kulkarni rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, 5125*14b24e2bSVaishali Kulkarni p_cid->sb_igu_id, true); 5126*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 5127*14b24e2bSVaishali Kulkarni goto out; 5128*14b24e2bSVaishali Kulkarni 5129*14b24e2bSVaishali Kulkarni address = BAR0_MAP_REG_XSDM_RAM + 5130*14b24e2bSVaishali Kulkarni XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 5131*14b24e2bSVaishali Kulkarni 5132*14b24e2bSVaishali Kulkarni rc = ecore_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 5133*14b24e2bSVaishali Kulkarni sizeof(struct xstorm_eth_queue_zone), timeset); 5134*14b24e2bSVaishali Kulkarni out: 5135*14b24e2bSVaishali Kulkarni return rc; 5136*14b24e2bSVaishali Kulkarni } 5137*14b24e2bSVaishali Kulkarni 5138*14b24e2bSVaishali Kulkarni /* Calculate final WFQ values for all vports and configure it. 5139*14b24e2bSVaishali Kulkarni * After this configuration each vport must have 5140*14b24e2bSVaishali Kulkarni * approx min rate = vport_wfq * min_pf_rate / ECORE_WFQ_UNIT 5141*14b24e2bSVaishali Kulkarni */ 5142*14b24e2bSVaishali Kulkarni static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn, 5143*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 5144*14b24e2bSVaishali Kulkarni u32 min_pf_rate) 5145*14b24e2bSVaishali Kulkarni { 5146*14b24e2bSVaishali Kulkarni struct init_qm_vport_params *vport_params; 5147*14b24e2bSVaishali Kulkarni int i; 5148*14b24e2bSVaishali Kulkarni 5149*14b24e2bSVaishali Kulkarni vport_params = p_hwfn->qm_info.qm_vport_params; 5150*14b24e2bSVaishali Kulkarni 5151*14b24e2bSVaishali Kulkarni for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5152*14b24e2bSVaishali Kulkarni u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 5153*14b24e2bSVaishali Kulkarni 5154*14b24e2bSVaishali Kulkarni vport_params[i].vport_wfq = (wfq_speed * ECORE_WFQ_UNIT) / 5155*14b24e2bSVaishali Kulkarni min_pf_rate; 5156*14b24e2bSVaishali Kulkarni ecore_init_vport_wfq(p_hwfn, p_ptt, 5157*14b24e2bSVaishali Kulkarni vport_params[i].first_tx_pq_id, 5158*14b24e2bSVaishali Kulkarni vport_params[i].vport_wfq); 5159*14b24e2bSVaishali Kulkarni } 5160*14b24e2bSVaishali Kulkarni } 5161*14b24e2bSVaishali Kulkarni 5162*14b24e2bSVaishali Kulkarni static void 5163*14b24e2bSVaishali Kulkarni ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn, u32 min_pf_rate) 5164*14b24e2bSVaishali Kulkarni 5165*14b24e2bSVaishali Kulkarni { 5166*14b24e2bSVaishali Kulkarni int i; 5167*14b24e2bSVaishali Kulkarni 5168*14b24e2bSVaishali Kulkarni for (i = 0; i < p_hwfn->qm_info.num_vports; i++) 5169*14b24e2bSVaishali Kulkarni p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1; 5170*14b24e2bSVaishali Kulkarni } 5171*14b24e2bSVaishali Kulkarni 5172*14b24e2bSVaishali Kulkarni static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn, 5173*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 5174*14b24e2bSVaishali Kulkarni u32 min_pf_rate) 5175*14b24e2bSVaishali Kulkarni { 5176*14b24e2bSVaishali Kulkarni struct init_qm_vport_params *vport_params; 5177*14b24e2bSVaishali Kulkarni int i; 5178*14b24e2bSVaishali Kulkarni 5179*14b24e2bSVaishali Kulkarni vport_params = p_hwfn->qm_info.qm_vport_params; 5180*14b24e2bSVaishali Kulkarni 5181*14b24e2bSVaishali Kulkarni for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5182*14b24e2bSVaishali Kulkarni ecore_init_wfq_default_param(p_hwfn, min_pf_rate); 5183*14b24e2bSVaishali Kulkarni ecore_init_vport_wfq(p_hwfn, p_ptt, 5184*14b24e2bSVaishali Kulkarni vport_params[i].first_tx_pq_id, 5185*14b24e2bSVaishali Kulkarni vport_params[i].vport_wfq); 5186*14b24e2bSVaishali Kulkarni } 5187*14b24e2bSVaishali Kulkarni } 5188*14b24e2bSVaishali Kulkarni 5189*14b24e2bSVaishali Kulkarni /* This function performs several validations for WFQ 5190*14b24e2bSVaishali Kulkarni * configuration and required min rate for a given vport 5191*14b24e2bSVaishali Kulkarni * 1. req_rate must be greater than one percent of min_pf_rate. 5192*14b24e2bSVaishali Kulkarni * 2. req_rate should not cause other vports [not configured for WFQ explicitly] 5193*14b24e2bSVaishali Kulkarni * rates to get less than one percent of min_pf_rate. 5194*14b24e2bSVaishali Kulkarni * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. 5195*14b24e2bSVaishali Kulkarni */ 5196*14b24e2bSVaishali Kulkarni static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn, 5197*14b24e2bSVaishali Kulkarni u16 vport_id, u32 req_rate, 5198*14b24e2bSVaishali Kulkarni u32 min_pf_rate) 5199*14b24e2bSVaishali Kulkarni { 5200*14b24e2bSVaishali Kulkarni u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; 5201*14b24e2bSVaishali Kulkarni int non_requested_count = 0, req_count = 0, i, num_vports; 5202*14b24e2bSVaishali Kulkarni 5203*14b24e2bSVaishali Kulkarni num_vports = p_hwfn->qm_info.num_vports; 5204*14b24e2bSVaishali Kulkarni 5205*14b24e2bSVaishali Kulkarni /* Accounting for the vports which are configured for WFQ explicitly */ 5206*14b24e2bSVaishali Kulkarni for (i = 0; i < num_vports; i++) { 5207*14b24e2bSVaishali Kulkarni u32 tmp_speed; 5208*14b24e2bSVaishali Kulkarni 5209*14b24e2bSVaishali Kulkarni if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) { 5210*14b24e2bSVaishali Kulkarni req_count++; 5211*14b24e2bSVaishali Kulkarni tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 5212*14b24e2bSVaishali Kulkarni total_req_min_rate += tmp_speed; 5213*14b24e2bSVaishali Kulkarni } 5214*14b24e2bSVaishali Kulkarni } 5215*14b24e2bSVaishali Kulkarni 5216*14b24e2bSVaishali Kulkarni /* Include current vport data as well */ 5217*14b24e2bSVaishali Kulkarni req_count++; 5218*14b24e2bSVaishali Kulkarni total_req_min_rate += req_rate; 5219*14b24e2bSVaishali Kulkarni non_requested_count = num_vports - req_count; 5220*14b24e2bSVaishali Kulkarni 5221*14b24e2bSVaishali Kulkarni /* validate possible error cases */ 5222*14b24e2bSVaishali Kulkarni if (req_rate > min_pf_rate) { 5223*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5224*14b24e2bSVaishali Kulkarni "Vport [%d] - Requested rate[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", 5225*14b24e2bSVaishali Kulkarni vport_id, req_rate, min_pf_rate); 5226*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 5227*14b24e2bSVaishali Kulkarni } 5228*14b24e2bSVaishali Kulkarni 5229*14b24e2bSVaishali Kulkarni if (req_rate < min_pf_rate / ECORE_WFQ_UNIT) { 5230*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5231*14b24e2bSVaishali Kulkarni "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 5232*14b24e2bSVaishali Kulkarni vport_id, req_rate, min_pf_rate); 5233*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 5234*14b24e2bSVaishali Kulkarni } 5235*14b24e2bSVaishali Kulkarni 5236*14b24e2bSVaishali Kulkarni /* TBD - for number of vports greater than 100 */ 5237*14b24e2bSVaishali Kulkarni if (num_vports > ECORE_WFQ_UNIT) { 5238*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5239*14b24e2bSVaishali Kulkarni "Number of vports is greater than %d\n", 5240*14b24e2bSVaishali Kulkarni ECORE_WFQ_UNIT); 5241*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 5242*14b24e2bSVaishali Kulkarni } 5243*14b24e2bSVaishali Kulkarni 5244*14b24e2bSVaishali Kulkarni if (total_req_min_rate > min_pf_rate) { 5245*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5246*14b24e2bSVaishali Kulkarni "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", 5247*14b24e2bSVaishali Kulkarni total_req_min_rate, min_pf_rate); 5248*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 5249*14b24e2bSVaishali Kulkarni } 5250*14b24e2bSVaishali Kulkarni 5251*14b24e2bSVaishali Kulkarni /* Data left for non requested vports */ 5252*14b24e2bSVaishali Kulkarni total_left_rate = min_pf_rate - total_req_min_rate; 5253*14b24e2bSVaishali Kulkarni left_rate_per_vp = total_left_rate / non_requested_count; 5254*14b24e2bSVaishali Kulkarni 5255*14b24e2bSVaishali Kulkarni /* validate if non requested get < 1% of min bw */ 5256*14b24e2bSVaishali Kulkarni if (left_rate_per_vp < min_pf_rate / ECORE_WFQ_UNIT) { 5257*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5258*14b24e2bSVaishali Kulkarni "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 5259*14b24e2bSVaishali Kulkarni left_rate_per_vp, min_pf_rate); 5260*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 5261*14b24e2bSVaishali Kulkarni } 5262*14b24e2bSVaishali Kulkarni 5263*14b24e2bSVaishali Kulkarni /* now req_rate for given vport passes all scenarios. 5264*14b24e2bSVaishali Kulkarni * assign final wfq rates to all vports. 5265*14b24e2bSVaishali Kulkarni */ 5266*14b24e2bSVaishali Kulkarni p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate; 5267*14b24e2bSVaishali Kulkarni p_hwfn->qm_info.wfq_data[vport_id].configured = true; 5268*14b24e2bSVaishali Kulkarni 5269*14b24e2bSVaishali Kulkarni for (i = 0; i < num_vports; i++) { 5270*14b24e2bSVaishali Kulkarni if (p_hwfn->qm_info.wfq_data[i].configured) 5271*14b24e2bSVaishali Kulkarni continue; 5272*14b24e2bSVaishali Kulkarni 5273*14b24e2bSVaishali Kulkarni p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp; 5274*14b24e2bSVaishali Kulkarni } 5275*14b24e2bSVaishali Kulkarni 5276*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 5277*14b24e2bSVaishali Kulkarni } 5278*14b24e2bSVaishali Kulkarni 5279*14b24e2bSVaishali Kulkarni static int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn, 5280*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 5281*14b24e2bSVaishali Kulkarni u16 vp_id, u32 rate) 5282*14b24e2bSVaishali Kulkarni { 5283*14b24e2bSVaishali Kulkarni struct ecore_mcp_link_state *p_link; 5284*14b24e2bSVaishali Kulkarni int rc = ECORE_SUCCESS; 5285*14b24e2bSVaishali Kulkarni 5286*14b24e2bSVaishali Kulkarni p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output; 5287*14b24e2bSVaishali Kulkarni 5288*14b24e2bSVaishali Kulkarni if (!p_link->min_pf_rate) { 5289*14b24e2bSVaishali Kulkarni p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate; 5290*14b24e2bSVaishali Kulkarni p_hwfn->qm_info.wfq_data[vp_id].configured = true; 5291*14b24e2bSVaishali Kulkarni return rc; 5292*14b24e2bSVaishali Kulkarni } 5293*14b24e2bSVaishali Kulkarni 5294*14b24e2bSVaishali Kulkarni rc = ecore_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); 5295*14b24e2bSVaishali Kulkarni 5296*14b24e2bSVaishali Kulkarni if (rc == ECORE_SUCCESS) 5297*14b24e2bSVaishali Kulkarni ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, 5298*14b24e2bSVaishali Kulkarni p_link->min_pf_rate); 5299*14b24e2bSVaishali Kulkarni else 5300*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, 5301*14b24e2bSVaishali Kulkarni "Validation failed while configuring min rate\n"); 5302*14b24e2bSVaishali Kulkarni 5303*14b24e2bSVaishali Kulkarni return rc; 5304*14b24e2bSVaishali Kulkarni } 5305*14b24e2bSVaishali Kulkarni 5306*14b24e2bSVaishali Kulkarni static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn, 5307*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 5308*14b24e2bSVaishali Kulkarni u32 min_pf_rate) 5309*14b24e2bSVaishali Kulkarni { 5310*14b24e2bSVaishali Kulkarni bool use_wfq = false; 5311*14b24e2bSVaishali Kulkarni int rc = ECORE_SUCCESS; 5312*14b24e2bSVaishali Kulkarni u16 i; 5313*14b24e2bSVaishali Kulkarni 5314*14b24e2bSVaishali Kulkarni /* Validate all pre configured vports for wfq */ 5315*14b24e2bSVaishali Kulkarni for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5316*14b24e2bSVaishali Kulkarni u32 rate; 5317*14b24e2bSVaishali Kulkarni 5318*14b24e2bSVaishali Kulkarni if (!p_hwfn->qm_info.wfq_data[i].configured) 5319*14b24e2bSVaishali Kulkarni continue; 5320*14b24e2bSVaishali Kulkarni 5321*14b24e2bSVaishali Kulkarni rate = p_hwfn->qm_info.wfq_data[i].min_speed; 5322*14b24e2bSVaishali Kulkarni use_wfq = true; 5323*14b24e2bSVaishali Kulkarni 5324*14b24e2bSVaishali Kulkarni rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate); 5325*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 5326*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, 5327*14b24e2bSVaishali Kulkarni "WFQ validation failed while configuring min rate\n"); 5328*14b24e2bSVaishali Kulkarni break; 5329*14b24e2bSVaishali Kulkarni } 5330*14b24e2bSVaishali Kulkarni } 5331*14b24e2bSVaishali Kulkarni 5332*14b24e2bSVaishali Kulkarni if (rc == ECORE_SUCCESS && use_wfq) 5333*14b24e2bSVaishali Kulkarni ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 5334*14b24e2bSVaishali Kulkarni else 5335*14b24e2bSVaishali Kulkarni ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 5336*14b24e2bSVaishali Kulkarni 5337*14b24e2bSVaishali Kulkarni return rc; 5338*14b24e2bSVaishali Kulkarni } 5339*14b24e2bSVaishali Kulkarni 5340*14b24e2bSVaishali Kulkarni /* Main API for ecore clients to configure vport min rate. 5341*14b24e2bSVaishali Kulkarni * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)] 5342*14b24e2bSVaishali Kulkarni * rate - Speed in Mbps needs to be assigned to a given vport. 5343*14b24e2bSVaishali Kulkarni */ 5344*14b24e2bSVaishali Kulkarni int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate) 5345*14b24e2bSVaishali Kulkarni { 5346*14b24e2bSVaishali Kulkarni int i, rc = ECORE_INVAL; 5347*14b24e2bSVaishali Kulkarni 5348*14b24e2bSVaishali Kulkarni /* TBD - for multiple hardware functions - that is 100 gig */ 5349*14b24e2bSVaishali Kulkarni if (p_dev->num_hwfns > 1) { 5350*14b24e2bSVaishali Kulkarni DP_NOTICE(p_dev, false, 5351*14b24e2bSVaishali Kulkarni "WFQ configuration is not supported for this device\n"); 5352*14b24e2bSVaishali Kulkarni return rc; 5353*14b24e2bSVaishali Kulkarni } 5354*14b24e2bSVaishali Kulkarni 5355*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, i) { 5356*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5357*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt; 5358*14b24e2bSVaishali Kulkarni 5359*14b24e2bSVaishali Kulkarni p_ptt = ecore_ptt_acquire(p_hwfn); 5360*14b24e2bSVaishali Kulkarni if (!p_ptt) 5361*14b24e2bSVaishali Kulkarni return ECORE_TIMEOUT; 5362*14b24e2bSVaishali Kulkarni 5363*14b24e2bSVaishali Kulkarni rc = __ecore_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); 5364*14b24e2bSVaishali Kulkarni 5365*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 5366*14b24e2bSVaishali Kulkarni ecore_ptt_release(p_hwfn, p_ptt); 5367*14b24e2bSVaishali Kulkarni return rc; 5368*14b24e2bSVaishali Kulkarni } 5369*14b24e2bSVaishali Kulkarni 5370*14b24e2bSVaishali Kulkarni ecore_ptt_release(p_hwfn, p_ptt); 5371*14b24e2bSVaishali Kulkarni } 5372*14b24e2bSVaishali Kulkarni 5373*14b24e2bSVaishali Kulkarni return rc; 5374*14b24e2bSVaishali Kulkarni } 5375*14b24e2bSVaishali Kulkarni 5376*14b24e2bSVaishali Kulkarni /* API to configure WFQ from mcp link change */ 5377*14b24e2bSVaishali Kulkarni void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev, 5378*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 5379*14b24e2bSVaishali Kulkarni u32 min_pf_rate) 5380*14b24e2bSVaishali Kulkarni { 5381*14b24e2bSVaishali Kulkarni int i; 5382*14b24e2bSVaishali Kulkarni 5383*14b24e2bSVaishali Kulkarni /* TBD - for multiple hardware functions - that is 100 gig */ 5384*14b24e2bSVaishali Kulkarni if (p_dev->num_hwfns > 1) { 5385*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_dev, ECORE_MSG_LINK, 5386*14b24e2bSVaishali Kulkarni "WFQ configuration is not supported for this device\n"); 5387*14b24e2bSVaishali Kulkarni return; 5388*14b24e2bSVaishali Kulkarni } 5389*14b24e2bSVaishali Kulkarni 5390*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, i) { 5391*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5392*14b24e2bSVaishali Kulkarni 5393*14b24e2bSVaishali Kulkarni __ecore_configure_vp_wfq_on_link_change(p_hwfn, p_ptt, 5394*14b24e2bSVaishali Kulkarni min_pf_rate); 5395*14b24e2bSVaishali Kulkarni } 5396*14b24e2bSVaishali Kulkarni } 5397*14b24e2bSVaishali Kulkarni 5398*14b24e2bSVaishali Kulkarni int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn, 5399*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 5400*14b24e2bSVaishali Kulkarni struct ecore_mcp_link_state *p_link, 5401*14b24e2bSVaishali Kulkarni u8 max_bw) 5402*14b24e2bSVaishali Kulkarni { 5403*14b24e2bSVaishali Kulkarni int rc = ECORE_SUCCESS; 5404*14b24e2bSVaishali Kulkarni 5405*14b24e2bSVaishali Kulkarni p_hwfn->mcp_info->func_info.bandwidth_max = max_bw; 5406*14b24e2bSVaishali Kulkarni 5407*14b24e2bSVaishali Kulkarni if (!p_link->line_speed && (max_bw != 100)) 5408*14b24e2bSVaishali Kulkarni return rc; 5409*14b24e2bSVaishali Kulkarni 5410*14b24e2bSVaishali Kulkarni p_link->speed = (p_link->line_speed * max_bw) / 100; 5411*14b24e2bSVaishali Kulkarni p_hwfn->qm_info.pf_rl = p_link->speed; 5412*14b24e2bSVaishali Kulkarni 5413*14b24e2bSVaishali Kulkarni /* Since the limiter also affects Tx-switched traffic, we don't want it 5414*14b24e2bSVaishali Kulkarni * to limit such traffic in case there's no actual limit. 5415*14b24e2bSVaishali Kulkarni * In that case, set limit to imaginary high boundary. 5416*14b24e2bSVaishali Kulkarni */ 5417*14b24e2bSVaishali Kulkarni if (max_bw == 100) 5418*14b24e2bSVaishali Kulkarni p_hwfn->qm_info.pf_rl = 100000; 5419*14b24e2bSVaishali Kulkarni 5420*14b24e2bSVaishali Kulkarni rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 5421*14b24e2bSVaishali Kulkarni p_hwfn->qm_info.pf_rl); 5422*14b24e2bSVaishali Kulkarni 5423*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5424*14b24e2bSVaishali Kulkarni "Configured MAX bandwidth to be %08x Mb/sec\n", 5425*14b24e2bSVaishali Kulkarni p_link->speed); 5426*14b24e2bSVaishali Kulkarni 5427*14b24e2bSVaishali Kulkarni return rc; 5428*14b24e2bSVaishali Kulkarni } 5429*14b24e2bSVaishali Kulkarni 5430*14b24e2bSVaishali Kulkarni /* Main API to configure PF max bandwidth where bw range is [1 - 100] */ 5431*14b24e2bSVaishali Kulkarni int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw) 5432*14b24e2bSVaishali Kulkarni { 5433*14b24e2bSVaishali Kulkarni int i, rc = ECORE_INVAL; 5434*14b24e2bSVaishali Kulkarni 5435*14b24e2bSVaishali Kulkarni if (max_bw < 1 || max_bw > 100) { 5436*14b24e2bSVaishali Kulkarni DP_NOTICE(p_dev, false, "PF max bw valid range is [1-100]\n"); 5437*14b24e2bSVaishali Kulkarni return rc; 5438*14b24e2bSVaishali Kulkarni } 5439*14b24e2bSVaishali Kulkarni 5440*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, i) { 5441*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5442*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev); 5443*14b24e2bSVaishali Kulkarni struct ecore_mcp_link_state *p_link; 5444*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt; 5445*14b24e2bSVaishali Kulkarni 5446*14b24e2bSVaishali Kulkarni p_link = &p_lead->mcp_info->link_output; 5447*14b24e2bSVaishali Kulkarni 5448*14b24e2bSVaishali Kulkarni p_ptt = ecore_ptt_acquire(p_hwfn); 5449*14b24e2bSVaishali Kulkarni if (!p_ptt) 5450*14b24e2bSVaishali Kulkarni return ECORE_TIMEOUT; 5451*14b24e2bSVaishali Kulkarni 5452*14b24e2bSVaishali Kulkarni rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, 5453*14b24e2bSVaishali Kulkarni p_link, max_bw); 5454*14b24e2bSVaishali Kulkarni 5455*14b24e2bSVaishali Kulkarni ecore_ptt_release(p_hwfn, p_ptt); 5456*14b24e2bSVaishali Kulkarni 5457*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 5458*14b24e2bSVaishali Kulkarni break; 5459*14b24e2bSVaishali Kulkarni } 5460*14b24e2bSVaishali Kulkarni 5461*14b24e2bSVaishali Kulkarni return rc; 5462*14b24e2bSVaishali Kulkarni } 5463*14b24e2bSVaishali Kulkarni 5464*14b24e2bSVaishali Kulkarni int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn, 5465*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 5466*14b24e2bSVaishali Kulkarni struct ecore_mcp_link_state *p_link, 5467*14b24e2bSVaishali Kulkarni u8 min_bw) 5468*14b24e2bSVaishali Kulkarni { 5469*14b24e2bSVaishali Kulkarni int rc = ECORE_SUCCESS; 5470*14b24e2bSVaishali Kulkarni 5471*14b24e2bSVaishali Kulkarni p_hwfn->mcp_info->func_info.bandwidth_min = min_bw; 5472*14b24e2bSVaishali Kulkarni p_hwfn->qm_info.pf_wfq = min_bw; 5473*14b24e2bSVaishali Kulkarni 5474*14b24e2bSVaishali Kulkarni if (!p_link->line_speed) 5475*14b24e2bSVaishali Kulkarni return rc; 5476*14b24e2bSVaishali Kulkarni 5477*14b24e2bSVaishali Kulkarni p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100; 5478*14b24e2bSVaishali Kulkarni 5479*14b24e2bSVaishali Kulkarni rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw); 5480*14b24e2bSVaishali Kulkarni 5481*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5482*14b24e2bSVaishali Kulkarni "Configured MIN bandwidth to be %d Mb/sec\n", 5483*14b24e2bSVaishali Kulkarni p_link->min_pf_rate); 5484*14b24e2bSVaishali Kulkarni 5485*14b24e2bSVaishali Kulkarni return rc; 5486*14b24e2bSVaishali Kulkarni } 5487*14b24e2bSVaishali Kulkarni 5488*14b24e2bSVaishali Kulkarni /* Main API to configure PF min bandwidth where bw range is [1-100] */ 5489*14b24e2bSVaishali Kulkarni int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw) 5490*14b24e2bSVaishali Kulkarni { 5491*14b24e2bSVaishali Kulkarni int i, rc = ECORE_INVAL; 5492*14b24e2bSVaishali Kulkarni 5493*14b24e2bSVaishali Kulkarni if (min_bw < 1 || min_bw > 100) { 5494*14b24e2bSVaishali Kulkarni DP_NOTICE(p_dev, false, "PF min bw valid range is [1-100]\n"); 5495*14b24e2bSVaishali Kulkarni return rc; 5496*14b24e2bSVaishali Kulkarni } 5497*14b24e2bSVaishali Kulkarni 5498*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, i) { 5499*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5500*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev); 5501*14b24e2bSVaishali Kulkarni struct ecore_mcp_link_state *p_link; 5502*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt; 5503*14b24e2bSVaishali Kulkarni 5504*14b24e2bSVaishali Kulkarni p_link = &p_lead->mcp_info->link_output; 5505*14b24e2bSVaishali Kulkarni 5506*14b24e2bSVaishali Kulkarni p_ptt = ecore_ptt_acquire(p_hwfn); 5507*14b24e2bSVaishali Kulkarni if (!p_ptt) 5508*14b24e2bSVaishali Kulkarni return ECORE_TIMEOUT; 5509*14b24e2bSVaishali Kulkarni 5510*14b24e2bSVaishali Kulkarni rc = __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, 5511*14b24e2bSVaishali Kulkarni p_link, min_bw); 5512*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 5513*14b24e2bSVaishali Kulkarni ecore_ptt_release(p_hwfn, p_ptt); 5514*14b24e2bSVaishali Kulkarni return rc; 5515*14b24e2bSVaishali Kulkarni } 5516*14b24e2bSVaishali Kulkarni 5517*14b24e2bSVaishali Kulkarni if (p_link->min_pf_rate) { 5518*14b24e2bSVaishali Kulkarni u32 min_rate = p_link->min_pf_rate; 5519*14b24e2bSVaishali Kulkarni 5520*14b24e2bSVaishali Kulkarni rc = __ecore_configure_vp_wfq_on_link_change(p_hwfn, 5521*14b24e2bSVaishali Kulkarni p_ptt, 5522*14b24e2bSVaishali Kulkarni min_rate); 5523*14b24e2bSVaishali Kulkarni } 5524*14b24e2bSVaishali Kulkarni 5525*14b24e2bSVaishali Kulkarni ecore_ptt_release(p_hwfn, p_ptt); 5526*14b24e2bSVaishali Kulkarni } 5527*14b24e2bSVaishali Kulkarni 5528*14b24e2bSVaishali Kulkarni return rc; 5529*14b24e2bSVaishali Kulkarni } 5530*14b24e2bSVaishali Kulkarni 5531*14b24e2bSVaishali Kulkarni void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 5532*14b24e2bSVaishali Kulkarni { 5533*14b24e2bSVaishali Kulkarni struct ecore_mcp_link_state *p_link; 5534*14b24e2bSVaishali Kulkarni 5535*14b24e2bSVaishali Kulkarni p_link = &p_hwfn->mcp_info->link_output; 5536*14b24e2bSVaishali Kulkarni 5537*14b24e2bSVaishali Kulkarni if (p_link->min_pf_rate) 5538*14b24e2bSVaishali Kulkarni ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, 5539*14b24e2bSVaishali Kulkarni p_link->min_pf_rate); 5540*14b24e2bSVaishali Kulkarni 5541*14b24e2bSVaishali Kulkarni OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0, 5542*14b24e2bSVaishali Kulkarni sizeof(*p_hwfn->qm_info.wfq_data) * 5543*14b24e2bSVaishali Kulkarni p_hwfn->qm_info.num_vports); 5544*14b24e2bSVaishali Kulkarni } 5545*14b24e2bSVaishali Kulkarni 5546*14b24e2bSVaishali Kulkarni int ecore_device_num_engines(struct ecore_dev *p_dev) 5547*14b24e2bSVaishali Kulkarni { 5548*14b24e2bSVaishali Kulkarni return ECORE_IS_BB(p_dev) ? 2 : 1; 5549*14b24e2bSVaishali Kulkarni } 5550*14b24e2bSVaishali Kulkarni 5551*14b24e2bSVaishali Kulkarni int ecore_device_num_ports(struct ecore_dev *p_dev) 5552*14b24e2bSVaishali Kulkarni { 5553*14b24e2bSVaishali Kulkarni /* in CMT always only one port */ 5554*14b24e2bSVaishali Kulkarni if (p_dev->num_hwfns > 1) 5555*14b24e2bSVaishali Kulkarni return 1; 5556*14b24e2bSVaishali Kulkarni 5557*14b24e2bSVaishali Kulkarni return p_dev->num_ports_in_engine * ecore_device_num_engines(p_dev); 5558*14b24e2bSVaishali Kulkarni } 5559*14b24e2bSVaishali Kulkarni 5560*14b24e2bSVaishali Kulkarni int ecore_device_get_port_id(struct ecore_dev *p_dev) 5561*14b24e2bSVaishali Kulkarni { 5562*14b24e2bSVaishali Kulkarni return (ECORE_LEADING_HWFN(p_dev)->abs_pf_id) % 5563*14b24e2bSVaishali Kulkarni ecore_device_num_ports(p_dev); 5564*14b24e2bSVaishali Kulkarni } 5565*14b24e2bSVaishali Kulkarni 5566*14b24e2bSVaishali Kulkarni void ecore_set_fw_mac_addr(__le16 *fw_msb, 5567*14b24e2bSVaishali Kulkarni __le16 *fw_mid, 5568*14b24e2bSVaishali Kulkarni __le16 *fw_lsb, 5569*14b24e2bSVaishali Kulkarni u8 *mac) 5570*14b24e2bSVaishali Kulkarni { 5571*14b24e2bSVaishali Kulkarni ((u8 *)fw_msb)[0] = mac[1]; 5572*14b24e2bSVaishali Kulkarni ((u8 *)fw_msb)[1] = mac[0]; 5573*14b24e2bSVaishali Kulkarni ((u8 *)fw_mid)[0] = mac[3]; 5574*14b24e2bSVaishali Kulkarni ((u8 *)fw_mid)[1] = mac[2]; 5575*14b24e2bSVaishali Kulkarni ((u8 *)fw_lsb)[0] = mac[5]; 5576*14b24e2bSVaishali Kulkarni ((u8 *)fw_lsb)[1] = mac[4]; 5577*14b24e2bSVaishali Kulkarni } 5578