1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, v.1, (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2014-2017 Cavium, Inc. 24 * The contents of this file are subject to the terms of the Common Development 25 * and Distribution License, v.1, (the "License"). 26 27 * You may not use this file except in compliance with the License. 28 29 * You can obtain a copy of the License at available 30 * at http://opensource.org/licenses/CDDL-1.0 31 32 * See the License for the specific language governing permissions and 33 * limitations under the License. 34 */ 35 36 #include "bcm_osal.h" 37 #include "reg_addr.h" 38 #include "ecore_gtt_reg_addr.h" 39 #include "ecore.h" 40 #include "ecore_chain.h" 41 #include "ecore_status.h" 42 #include "ecore_hw.h" 43 #include "ecore_rt_defs.h" 44 #include "ecore_init_ops.h" 45 #include "ecore_int.h" 46 #include "ecore_cxt.h" 47 #include "ecore_spq.h" 48 #include "ecore_init_fw_funcs.h" 49 #include "ecore_sp_commands.h" 50 #include "ecore_dev_api.h" 51 #include "ecore_sriov.h" 52 #include "ecore_vf.h" 53 #include "ecore_ll2.h" 54 #include "ecore_fcoe.h" 55 #include "ecore_iscsi.h" 56 #include "ecore_ooo.h" 57 #include "ecore_mcp.h" 58 #include "ecore_hw_defs.h" 59 #include "mcp_public.h" 60 #include "ecore_roce.h" 61 #include "ecore_iro.h" 62 #include "nvm_cfg.h" 63 #include "ecore_dev_api.h" 64 #include "ecore_dcbx.h" 65 #include "pcics_reg_driver.h" 66 #include "ecore_l2.h" 67 68 /* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM 69 * registers involved are not split and thus configuration is a race where 70 * some of the PFs configuration might be lost. 71 * Eventually, this needs to move into a MFW-covered HW-lock as arbitration 72 * mechanism as this doesn't cover some cases [E.g., PDA or scenarios where 73 * there's more than a single compiled ecore component in system]. 74 */ 75 static osal_spinlock_t qm_lock; 76 static bool qm_lock_init = false; 77 78 /* Configurable */ 79 #define ECORE_MIN_DPIS (4) /* The minimal number of DPIs required to 80 * load the driver. The number was 81 * arbitrarily set. 82 */ 83 84 /* Derived */ 85 #define ECORE_MIN_PWM_REGION ((ECORE_WID_SIZE) * (ECORE_MIN_DPIS)) 86 87 enum BAR_ID { 88 BAR_ID_0, /* used for GRC */ 89 BAR_ID_1 /* Used for doorbells */ 90 }; 91 92 static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id) 93 { 94 u32 bar_reg = (bar_id == BAR_ID_0 ? 95 PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); 96 u32 val; 97 98 if (IS_VF(p_hwfn->p_dev)) { 99 /* TODO - assume each VF hwfn has 64Kb for Bar0; Bar1 can be 100 * read from actual register, but we're currently not using 101 * it for actual doorbelling. 102 */ 103 return 1 << 17; 104 } 105 106 val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); 107 if (val) 108 return 1 << (val + 15); 109 110 /* The above registers were updated in the past only in CMT mode. Since 111 * they were found to be useful MFW started updating them from 8.7.7.0. 112 * In older MFW versions they are set to 0 which means disabled. 113 */ 114 if (p_hwfn->p_dev->num_hwfns > 1) { 115 DP_NOTICE(p_hwfn, false, 116 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n"); 117 return BAR_ID_0 ? 256 * 1024 : 512 * 1024; 118 } else { 119 DP_NOTICE(p_hwfn, false, 120 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n"); 121 return 512 * 1024; 122 } 123 } 124 125 void ecore_init_dp(struct ecore_dev *p_dev, 126 u32 dp_module, 127 u8 dp_level, 128 void *dp_ctx) 129 { 130 u32 i; 131 132 p_dev->dp_level = dp_level; 133 p_dev->dp_module = dp_module; 134 p_dev->dp_ctx = dp_ctx; 135 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 136 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 137 138 p_hwfn->dp_level = dp_level; 139 p_hwfn->dp_module = dp_module; 140 p_hwfn->dp_ctx = dp_ctx; 141 } 142 } 143 144 void ecore_init_struct(struct ecore_dev *p_dev) 145 { 146 u8 i; 147 148 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 149 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 150 151 p_hwfn->p_dev = p_dev; 152 p_hwfn->my_id = i; 153 p_hwfn->b_active = false; 154 155 OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex); 156 OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex); 157 } 158 159 /* hwfn 0 is always active */ 160 p_dev->hwfns[0].b_active = true; 161 162 /* set the default cache alignment to 128 (may be overridden later) */ 163 p_dev->cache_shift = 7; 164 } 165 166 static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn) 167 { 168 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 169 170 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params); 171 qm_info->qm_pq_params = OSAL_NULL; 172 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params); 173 qm_info->qm_vport_params = OSAL_NULL; 174 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params); 175 qm_info->qm_port_params = OSAL_NULL; 176 OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data); 177 qm_info->wfq_data = OSAL_NULL; 178 } 179 180 void ecore_resc_free(struct ecore_dev *p_dev) 181 { 182 int i; 183 184 if (IS_VF(p_dev)) { 185 for_each_hwfn(p_dev, i) 186 ecore_l2_free(&p_dev->hwfns[i]); 187 return; 188 } 189 190 OSAL_FREE(p_dev, p_dev->fw_data); 191 p_dev->fw_data = OSAL_NULL; 192 193 OSAL_FREE(p_dev, p_dev->reset_stats); 194 p_dev->reset_stats = OSAL_NULL; 195 196 for_each_hwfn(p_dev, i) { 197 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 198 199 ecore_cxt_mngr_free(p_hwfn); 200 ecore_qm_info_free(p_hwfn); 201 ecore_spq_free(p_hwfn); 202 ecore_eq_free(p_hwfn); 203 ecore_consq_free(p_hwfn); 204 ecore_int_free(p_hwfn); 205 #ifdef CONFIG_ECORE_LL2 206 ecore_ll2_free(p_hwfn); 207 #endif 208 #ifdef CONFIG_ECORE_FCOE 209 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 210 ecore_fcoe_free(p_hwfn); 211 #endif 212 #ifdef CONFIG_ECORE_ISCSI 213 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 214 ecore_iscsi_free(p_hwfn); 215 ecore_ooo_free(p_hwfn); 216 } 217 #endif 218 #ifdef CONFIG_ECORE_ROCE 219 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) 220 ecore_rdma_info_free(p_hwfn); 221 #endif 222 ecore_iov_free(p_hwfn); 223 ecore_l2_free(p_hwfn); 224 ecore_dmae_info_free(p_hwfn); 225 ecore_dcbx_info_free(p_hwfn); 226 /* @@@TBD Flush work-queue ?*/ 227 } 228 } 229 230 /******************** QM initialization *******************/ 231 232 /* bitmaps for indicating active traffic classes. Special case for Arrowhead 4 port */ 233 #define ACTIVE_TCS_BMAP 0x9f /* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */ 234 #define ACTIVE_TCS_BMAP_4PORT_K2 0xf /* 0..3 actually used, OOO and high priority stuff all use 3 */ 235 236 /* determines the physical queue flags for a given PF. */ 237 static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn) 238 { 239 u32 flags; 240 241 /* common flags */ 242 flags = PQ_FLAGS_LB; 243 244 /* feature flags */ 245 if (IS_ECORE_SRIOV(p_hwfn->p_dev)) 246 flags |= PQ_FLAGS_VFS; 247 if (IS_ECORE_DCQCN(p_hwfn)) 248 flags |= PQ_FLAGS_RLS; 249 250 /* protocol flags */ 251 switch (p_hwfn->hw_info.personality) { 252 case ECORE_PCI_ETH: 253 flags |= PQ_FLAGS_MCOS; 254 break; 255 case ECORE_PCI_FCOE: 256 flags |= PQ_FLAGS_OFLD; 257 break; 258 case ECORE_PCI_ISCSI: 259 flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; 260 break; 261 case ECORE_PCI_ETH_ROCE: 262 flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT; 263 break; 264 case ECORE_PCI_ETH_IWARP: 265 flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; 266 break; 267 default: 268 DP_ERR(p_hwfn, "unknown personality %d\n", p_hwfn->hw_info.personality); 269 return 0; 270 } 271 272 return flags; 273 } 274 275 276 /* Getters for resource amounts necessary for qm initialization */ 277 u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn) 278 { 279 return p_hwfn->hw_info.num_hw_tc; 280 } 281 282 u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn) 283 { 284 return IS_ECORE_SRIOV(p_hwfn->p_dev) ? p_hwfn->p_dev->p_iov_info->total_vfs : 0; 285 } 286 287 #define NUM_DEFAULT_RLS 1 288 289 u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn) 290 { 291 u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn); 292 293 /* num RLs can't exceed resource amount of rls or vports or the dcqcn qps */ 294 num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL), 295 (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_VPORT), 296 ROCE_DCQCN_RP_MAX_QPS)); 297 298 /* make sure after we reserve the default and VF rls we'll have something left */ 299 if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) { 300 if (IS_ECORE_DCQCN(p_hwfn)) 301 DP_NOTICE(p_hwfn, false, "no rate limiters left for PF rate limiting [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs); 302 return 0; 303 } 304 305 /* subtract rls necessary for VFs and one default one for the PF */ 306 num_pf_rls -= num_vfs + NUM_DEFAULT_RLS; 307 308 return num_pf_rls; 309 } 310 311 u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn) 312 { 313 u32 pq_flags = ecore_get_pq_flags(p_hwfn); 314 315 /* all pqs share the same vport (hence the 1 below), except for vfs and pf_rl pqs */ 316 return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) + 317 (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn) + 1; 318 } 319 320 /* calc amount of PQs according to the requested flags */ 321 u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn) 322 { 323 u32 pq_flags = ecore_get_pq_flags(p_hwfn); 324 325 return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) + 326 (!!(PQ_FLAGS_MCOS & pq_flags)) * ecore_init_qm_get_num_tcs(p_hwfn) + 327 (!!(PQ_FLAGS_LB & pq_flags)) + 328 (!!(PQ_FLAGS_OOO & pq_flags)) + 329 (!!(PQ_FLAGS_ACK & pq_flags)) + 330 (!!(PQ_FLAGS_OFLD & pq_flags)) + 331 (!!(PQ_FLAGS_LLT & pq_flags)) + 332 (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn); 333 } 334 335 /* initialize the top level QM params */ 336 static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn) 337 { 338 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 339 bool four_port; 340 341 /* pq and vport bases for this PF */ 342 qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ); 343 qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT); 344 345 /* rate limiting and weighted fair queueing are always enabled */ 346 qm_info->vport_rl_en = 1; 347 qm_info->vport_wfq_en = 1; 348 349 /* TC config is different for AH 4 port */ 350 four_port = p_hwfn->p_dev->num_ports_in_engine == MAX_NUM_PORTS_K2; 351 352 /* in AH 4 port we have fewer TCs per port */ 353 qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : NUM_OF_PHYS_TCS; 354 355 /* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and 4 otherwise */ 356 if (!qm_info->ooo_tc) 357 qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : DCBX_TCP_OOO_TC; 358 } 359 360 /* initialize qm vport params */ 361 static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn) 362 { 363 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 364 u8 i; 365 366 /* all vports participate in weighted fair queueing */ 367 for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++) 368 qm_info->qm_vport_params[i].vport_wfq = 1; 369 } 370 371 /* initialize qm port params */ 372 static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn) 373 { 374 /* Initialize qm port parameters */ 375 u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engine; 376 377 /* indicate how ooo and high pri traffic is dealt with */ 378 active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ? 379 ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP; 380 381 for (i = 0; i < num_ports; i++) { 382 struct init_qm_port_params *p_qm_port = 383 &p_hwfn->qm_info.qm_port_params[i]; 384 385 p_qm_port->active = 1; 386 p_qm_port->active_phys_tcs = active_phys_tcs; 387 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports; 388 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports; 389 } 390 } 391 392 /* Reset the params which must be reset for qm init. QM init may be called as 393 * a result of flows other than driver load (e.g. dcbx renegotiation). Other 394 * params may be affected by the init but would simply recalculate to the same 395 * values. The allocations made for QM init, ports, vports, pqs and vfqs are not 396 * affected as these amounts stay the same. 397 */ 398 static void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn) 399 { 400 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 401 402 qm_info->num_pqs = 0; 403 qm_info->num_vports = 0; 404 qm_info->num_pf_rls = 0; 405 qm_info->num_vf_pqs = 0; 406 qm_info->first_vf_pq = 0; 407 qm_info->first_mcos_pq = 0; 408 qm_info->first_rl_pq = 0; 409 } 410 411 static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn) 412 { 413 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 414 415 qm_info->num_vports++; 416 417 if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn)) 418 DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn)); 419 } 420 421 /* initialize a single pq and manage qm_info resources accounting. 422 * The pq_init_flags param determines whether the PQ is rate limited (for VF or PF) 423 * and whether a new vport is allocated to the pq or not (i.e. vport will be shared) 424 */ 425 426 /* flags for pq init */ 427 #define PQ_INIT_SHARE_VPORT (1 << 0) 428 #define PQ_INIT_PF_RL (1 << 1) 429 #define PQ_INIT_VF_RL (1 << 2) 430 431 /* defines for pq init */ 432 #define PQ_INIT_DEFAULT_WRR_GROUP 1 433 #define PQ_INIT_DEFAULT_TC 0 434 #define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc) 435 436 static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn, 437 struct ecore_qm_info *qm_info, 438 u8 tc, u32 pq_init_flags) 439 { 440 u16 pq_idx = qm_info->num_pqs, max_pq = ecore_init_qm_get_num_pqs(p_hwfn); 441 442 if (pq_idx > max_pq) 443 DP_ERR(p_hwfn, "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq); 444 445 /* init pq params */ 446 qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + qm_info->num_vports; 447 qm_info->qm_pq_params[pq_idx].tc_id = tc; 448 qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP; 449 qm_info->qm_pq_params[pq_idx].rl_valid = 450 (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL); 451 452 /* qm params accounting */ 453 qm_info->num_pqs++; 454 if (!(pq_init_flags & PQ_INIT_SHARE_VPORT)) 455 qm_info->num_vports++; 456 457 if (pq_init_flags & PQ_INIT_PF_RL) 458 qm_info->num_pf_rls++; 459 460 if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn)) 461 DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn)); 462 463 if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn)) 464 DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n", qm_info->num_pf_rls, ecore_init_qm_get_num_pf_rls(p_hwfn)); 465 } 466 467 /* get pq index according to PQ_FLAGS */ 468 static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn, 469 u32 pq_flags) 470 { 471 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 472 473 /* Can't have multiple flags set here */ 474 if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1) 475 goto err; 476 477 switch (pq_flags) { 478 case PQ_FLAGS_RLS: 479 return &qm_info->first_rl_pq; 480 case PQ_FLAGS_MCOS: 481 return &qm_info->first_mcos_pq; 482 case PQ_FLAGS_LB: 483 return &qm_info->pure_lb_pq; 484 case PQ_FLAGS_OOO: 485 return &qm_info->ooo_pq; 486 case PQ_FLAGS_ACK: 487 return &qm_info->pure_ack_pq; 488 case PQ_FLAGS_OFLD: 489 return &qm_info->offload_pq; 490 case PQ_FLAGS_LLT: 491 return &qm_info->low_latency_pq; 492 case PQ_FLAGS_VFS: 493 return &qm_info->first_vf_pq; 494 default: 495 goto err; 496 } 497 498 err: 499 DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags); 500 return OSAL_NULL; 501 } 502 503 /* save pq index in qm info */ 504 static void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn, 505 u32 pq_flags, u16 pq_val) 506 { 507 u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 508 509 *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val; 510 } 511 512 /* get tx pq index, with the PQ TX base already set (ready for context init) */ 513 u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags) 514 { 515 u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 516 517 return *base_pq_idx + CM_TX_PQ_BASE; 518 } 519 520 u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc) 521 { 522 u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn); 523 524 if (tc > max_tc) 525 DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); 526 527 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc; 528 } 529 530 u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf) 531 { 532 u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn); 533 534 if (vf > max_vf) 535 DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); 536 537 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; 538 } 539 540 u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl) 541 { 542 u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn); 543 544 if (rl > max_rl) 545 DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl); 546 547 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl; 548 } 549 550 /* Functions for creating specific types of pqs */ 551 static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn) 552 { 553 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 554 555 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LB)) 556 return; 557 558 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs); 559 ecore_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT); 560 } 561 562 static void ecore_init_qm_ooo_pq(struct ecore_hwfn *p_hwfn) 563 { 564 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 565 566 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO)) 567 return; 568 569 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs); 570 ecore_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT); 571 } 572 573 static void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn) 574 { 575 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 576 577 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK)) 578 return; 579 580 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs); 581 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 582 } 583 584 static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn) 585 { 586 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 587 588 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD)) 589 return; 590 591 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs); 592 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 593 } 594 595 static void ecore_init_qm_low_latency_pq(struct ecore_hwfn *p_hwfn) 596 { 597 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 598 599 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT)) 600 return; 601 602 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs); 603 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 604 } 605 606 static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn) 607 { 608 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 609 u8 tc_idx; 610 611 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS)) 612 return; 613 614 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs); 615 for (tc_idx = 0; tc_idx < ecore_init_qm_get_num_tcs(p_hwfn); tc_idx++) 616 ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT); 617 } 618 619 static void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn) 620 { 621 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 622 u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn); 623 624 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS)) 625 return; 626 627 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs); 628 qm_info->num_vf_pqs = num_vfs; 629 for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) 630 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL); 631 } 632 633 static void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn) 634 { 635 u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn); 636 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 637 638 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS)) 639 return; 640 641 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs); 642 for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++) 643 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL); 644 } 645 646 static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn) 647 { 648 /* rate limited pqs, must come first (FW assumption) */ 649 ecore_init_qm_rl_pqs(p_hwfn); 650 651 /* pqs for multi cos */ 652 ecore_init_qm_mcos_pqs(p_hwfn); 653 654 /* pure loopback pq */ 655 ecore_init_qm_lb_pq(p_hwfn); 656 657 /* out of order pq */ 658 ecore_init_qm_ooo_pq(p_hwfn); 659 660 /* pure ack pq */ 661 ecore_init_qm_pure_ack_pq(p_hwfn); 662 663 /* pq for offloaded protocol */ 664 ecore_init_qm_offload_pq(p_hwfn); 665 666 /* low latency pq */ 667 ecore_init_qm_low_latency_pq(p_hwfn); 668 669 /* done sharing vports */ 670 ecore_init_qm_advance_vport(p_hwfn); 671 672 /* pqs for vfs */ 673 ecore_init_qm_vf_pqs(p_hwfn); 674 } 675 676 /* compare values of getters against resources amounts */ 677 static enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn) 678 { 679 if (ecore_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, ECORE_VPORT)) { 680 DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n"); 681 return ECORE_INVAL; 682 } 683 684 if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) { 685 DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n"); 686 return ECORE_INVAL; 687 } 688 689 return ECORE_SUCCESS; 690 } 691 692 /* 693 * Function for verbose printing of the qm initialization results 694 */ 695 static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn) 696 { 697 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 698 struct init_qm_vport_params *vport; 699 struct init_qm_port_params *port; 700 struct init_qm_pq_params *pq; 701 int i, tc; 702 703 /* top level params */ 704 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n", 705 qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq, qm_info->offload_pq, qm_info->pure_ack_pq); 706 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n", 707 qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs, qm_info->num_vf_pqs, qm_info->num_vports, qm_info->max_phys_tcs_per_port); 708 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n", 709 qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en, qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl, qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn)); 710 711 /* port table */ 712 for (i = 0; i < p_hwfn->p_dev->num_ports_in_engine; i++) { 713 port = &(qm_info->qm_port_params[i]); 714 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n", 715 i, port->active, port->active_phys_tcs, port->num_pbf_cmd_lines, port->num_btb_blocks, port->reserved); 716 } 717 718 /* vport table */ 719 for (i = 0; i < qm_info->num_vports; i++) { 720 vport = &(qm_info->qm_vport_params[i]); 721 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ", 722 qm_info->start_vport + i, vport->vport_rl, vport->vport_wfq); 723 for (tc = 0; tc < NUM_OF_TCS; tc++) 724 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ", vport->first_tx_pq_id[tc]); 725 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n"); 726 } 727 728 /* pq table */ 729 for (i = 0; i < qm_info->num_pqs; i++) { 730 pq = &(qm_info->qm_pq_params[i]); 731 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n", 732 qm_info->start_pq + i, pq->vport_id, pq->tc_id, pq->wrr_group, pq->rl_valid); 733 } 734 } 735 736 static void ecore_init_qm_info(struct ecore_hwfn *p_hwfn) 737 { 738 /* reset params required for init run */ 739 ecore_init_qm_reset_params(p_hwfn); 740 741 /* init QM top level params */ 742 ecore_init_qm_params(p_hwfn); 743 744 /* init QM port params */ 745 ecore_init_qm_port_params(p_hwfn); 746 747 /* init QM vport params */ 748 ecore_init_qm_vport_params(p_hwfn); 749 750 /* init QM physical queue params */ 751 ecore_init_qm_pq_params(p_hwfn); 752 753 /* display all that init */ 754 ecore_dp_init_qm_params(p_hwfn); 755 } 756 757 /* This function reconfigures the QM pf on the fly. 758 * For this purpose we: 759 * 1. reconfigure the QM database 760 * 2. set new values to runtime array 761 * 3. send an sdm_qm_cmd through the rbc interface to stop the QM 762 * 4. activate init tool in QM_PF stage 763 * 5. send an sdm_qm_cmd through rbc interface to release the QM 764 */ 765 enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn, 766 struct ecore_ptt *p_ptt) 767 { 768 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 769 bool b_rc; 770 enum _ecore_status_t rc; 771 772 /* initialize ecore's qm data structure */ 773 ecore_init_qm_info(p_hwfn); 774 775 /* stop PF's qm queues */ 776 OSAL_SPIN_LOCK(&qm_lock); 777 b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true, 778 qm_info->start_pq, qm_info->num_pqs); 779 OSAL_SPIN_UNLOCK(&qm_lock); 780 if (!b_rc) 781 return ECORE_INVAL; 782 783 /* clear the QM_PF runtime phase leftovers from previous init */ 784 ecore_init_clear_rt_data(p_hwfn); 785 786 /* prepare QM portion of runtime array */ 787 ecore_qm_init_pf(p_hwfn); 788 789 /* activate init tool on runtime array */ 790 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, 791 p_hwfn->hw_info.hw_mode); 792 if (rc != ECORE_SUCCESS) 793 return rc; 794 795 /* start PF's qm queues */ 796 OSAL_SPIN_LOCK(&qm_lock); 797 b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true, 798 qm_info->start_pq, qm_info->num_pqs); 799 OSAL_SPIN_UNLOCK(&qm_lock); 800 if (!b_rc) 801 return ECORE_INVAL; 802 803 return ECORE_SUCCESS; 804 } 805 806 static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn) 807 { 808 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 809 enum _ecore_status_t rc; 810 811 rc = ecore_init_qm_sanity(p_hwfn); 812 if (rc != ECORE_SUCCESS) 813 goto alloc_err; 814 815 qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 816 sizeof(struct init_qm_pq_params) * 817 ecore_init_qm_get_num_pqs(p_hwfn)); 818 if (!qm_info->qm_pq_params) 819 goto alloc_err; 820 821 qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 822 sizeof(struct init_qm_vport_params) * 823 ecore_init_qm_get_num_vports(p_hwfn)); 824 if (!qm_info->qm_vport_params) 825 goto alloc_err; 826 827 qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 828 sizeof(struct init_qm_port_params) * 829 p_hwfn->p_dev->num_ports_in_engine); 830 if (!qm_info->qm_port_params) 831 goto alloc_err; 832 833 qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 834 sizeof(struct ecore_wfq_data) * 835 ecore_init_qm_get_num_vports(p_hwfn)); 836 if (!qm_info->wfq_data) 837 goto alloc_err; 838 839 return ECORE_SUCCESS; 840 841 alloc_err: 842 DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n"); 843 ecore_qm_info_free(p_hwfn); 844 return ECORE_NOMEM; 845 } 846 /******************** End QM initialization ***************/ 847 848 enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev) 849 { 850 enum _ecore_status_t rc = ECORE_SUCCESS; 851 u32 rdma_tasks, excess_tasks; 852 u32 line_count; 853 int i; 854 855 if (IS_VF(p_dev)) { 856 for_each_hwfn(p_dev, i) { 857 rc = ecore_l2_alloc(&p_dev->hwfns[i]); 858 if (rc != ECORE_SUCCESS) 859 return rc; 860 } 861 return rc; 862 } 863 864 p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL, 865 sizeof(*p_dev->fw_data)); 866 if (!p_dev->fw_data) 867 return ECORE_NOMEM; 868 869 for_each_hwfn(p_dev, i) { 870 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 871 u32 n_eqes, num_cons; 872 873 /* First allocate the context manager structure */ 874 rc = ecore_cxt_mngr_alloc(p_hwfn); 875 if (rc) 876 goto alloc_err; 877 878 /* Set the HW cid/tid numbers (in the contest manager) 879 * Must be done prior to any further computations. 880 */ 881 rc = ecore_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS); 882 if (rc) 883 goto alloc_err; 884 885 rc = ecore_alloc_qm_data(p_hwfn); 886 if (rc) 887 goto alloc_err; 888 889 /* init qm info */ 890 ecore_init_qm_info(p_hwfn); 891 892 /* Compute the ILT client partition */ 893 rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count); 894 if (rc) { 895 DP_NOTICE(p_hwfn, false, "too many ILT lines; re-computing with less lines\n"); 896 /* In case there are not enough ILT lines we reduce the 897 * number of RDMA tasks and re-compute. 898 */ 899 excess_tasks = ecore_cxt_cfg_ilt_compute_excess( 900 p_hwfn, line_count); 901 if (!excess_tasks) 902 goto alloc_err; 903 904 rdma_tasks = RDMA_MAX_TIDS - excess_tasks; 905 rc = ecore_cxt_set_pf_params(p_hwfn, rdma_tasks); 906 if (rc) 907 goto alloc_err; 908 909 rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count); 910 if (rc) { 911 DP_ERR(p_hwfn, "failed ILT compute. Requested too many lines: %u\n", 912 line_count); 913 914 goto alloc_err; 915 } 916 } 917 918 /* CID map / ILT shadow table / T2 919 * The talbes sizes are determined by the computations above 920 */ 921 rc = ecore_cxt_tables_alloc(p_hwfn); 922 if (rc) 923 goto alloc_err; 924 925 /* SPQ, must follow ILT because initializes SPQ context */ 926 rc = ecore_spq_alloc(p_hwfn); 927 if (rc) 928 goto alloc_err; 929 930 /* SP status block allocation */ 931 p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn, 932 RESERVED_PTT_DPC); 933 934 rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt); 935 if (rc) 936 goto alloc_err; 937 938 rc = ecore_iov_alloc(p_hwfn); 939 if (rc) 940 goto alloc_err; 941 942 /* EQ */ 943 n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain); 944 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) { 945 /* Calculate the EQ size 946 * --------------------- 947 * Each ICID may generate up to one event at a time i.e. 948 * the event must be handled/cleared before a new one 949 * can be generated. We calculate the sum of events per 950 * protocol and create an EQ deep enough to handle the 951 * worst case: 952 * - Core - according to SPQ. 953 * - RoCE - per QP there are a couple of ICIDs, one 954 * responder and one requester, each can 955 * generate an EQE => n_eqes_qp = 2 * n_qp. 956 * Each CQ can generate an EQE. There are 2 CQs 957 * per QP => n_eqes_cq = 2 * n_qp. 958 * Hence the RoCE total is 4 * n_qp or 959 * 2 * num_cons. 960 * - ENet - There can be up to two events per VF. One 961 * for VF-PF channel and another for VF FLR 962 * initial cleanup. The number of VFs is 963 * bounded by MAX_NUM_VFS_BB, and is much 964 * smaller than RoCE's so we avoid exact 965 * calculation. 966 */ 967 if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) { 968 num_cons = ecore_cxt_get_proto_cid_count( 969 p_hwfn, PROTOCOLID_ROCE, OSAL_NULL); 970 num_cons *= 2; 971 } else { 972 num_cons = ecore_cxt_get_proto_cid_count( 973 p_hwfn, PROTOCOLID_IWARP, 974 OSAL_NULL); 975 } 976 n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; 977 } else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 978 num_cons = ecore_cxt_get_proto_cid_count( 979 p_hwfn, PROTOCOLID_ISCSI, OSAL_NULL); 980 n_eqes += 2 * num_cons; 981 } 982 983 if (n_eqes > 0xFFFF) { 984 DP_ERR(p_hwfn, 985 "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n", 986 n_eqes, 0xFFFF); 987 goto alloc_no_mem; 988 } 989 990 rc = ecore_eq_alloc(p_hwfn, (u16)n_eqes); 991 if (rc) 992 goto alloc_err; 993 994 rc = ecore_consq_alloc(p_hwfn); 995 if (rc) 996 goto alloc_err; 997 998 rc = ecore_l2_alloc(p_hwfn); 999 if (rc != ECORE_SUCCESS) 1000 goto alloc_err; 1001 1002 #ifdef CONFIG_ECORE_LL2 1003 if (p_hwfn->using_ll2) { 1004 rc = ecore_ll2_alloc(p_hwfn); 1005 if (rc) 1006 goto alloc_err; 1007 } 1008 #endif 1009 #ifdef CONFIG_ECORE_FCOE 1010 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) { 1011 rc = ecore_fcoe_alloc(p_hwfn); 1012 if (rc) 1013 goto alloc_err; 1014 } 1015 #endif 1016 #ifdef CONFIG_ECORE_ISCSI 1017 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 1018 rc = ecore_iscsi_alloc(p_hwfn); 1019 if (rc) 1020 goto alloc_err; 1021 rc = ecore_ooo_alloc(p_hwfn); 1022 if (rc) 1023 goto alloc_err; 1024 } 1025 #endif 1026 #ifdef CONFIG_ECORE_ROCE 1027 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) { 1028 rc = ecore_rdma_info_alloc(p_hwfn); 1029 if (rc) 1030 goto alloc_err; 1031 } 1032 #endif 1033 1034 /* DMA info initialization */ 1035 rc = ecore_dmae_info_alloc(p_hwfn); 1036 if (rc) { 1037 DP_NOTICE(p_hwfn, true, 1038 "Failed to allocate memory for dmae_info structure\n"); 1039 goto alloc_err; 1040 } 1041 1042 /* DCBX initialization */ 1043 rc = ecore_dcbx_info_alloc(p_hwfn); 1044 if (rc) { 1045 DP_NOTICE(p_hwfn, true, 1046 "Failed to allocate memory for dcbx structure\n"); 1047 goto alloc_err; 1048 } 1049 } 1050 1051 p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL, 1052 sizeof(*p_dev->reset_stats)); 1053 if (!p_dev->reset_stats) { 1054 DP_NOTICE(p_dev, true, 1055 "Failed to allocate reset statistics\n"); 1056 goto alloc_no_mem; 1057 } 1058 1059 return ECORE_SUCCESS; 1060 1061 alloc_no_mem: 1062 rc = ECORE_NOMEM; 1063 alloc_err: 1064 ecore_resc_free(p_dev); 1065 return rc; 1066 } 1067 1068 void ecore_resc_setup(struct ecore_dev *p_dev) 1069 { 1070 int i; 1071 1072 if (IS_VF(p_dev)) { 1073 for_each_hwfn(p_dev, i) 1074 ecore_l2_setup(&p_dev->hwfns[i]); 1075 return; 1076 } 1077 1078 for_each_hwfn(p_dev, i) { 1079 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1080 1081 ecore_cxt_mngr_setup(p_hwfn); 1082 ecore_spq_setup(p_hwfn); 1083 ecore_eq_setup(p_hwfn); 1084 ecore_consq_setup(p_hwfn); 1085 1086 /* Read shadow of current MFW mailbox */ 1087 ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt); 1088 OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow, 1089 p_hwfn->mcp_info->mfw_mb_cur, 1090 p_hwfn->mcp_info->mfw_mb_length); 1091 1092 ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt); 1093 1094 ecore_l2_setup(p_hwfn); 1095 ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt); 1096 #ifdef CONFIG_ECORE_LL2 1097 if (p_hwfn->using_ll2) 1098 ecore_ll2_setup(p_hwfn); 1099 #endif 1100 #ifdef CONFIG_ECORE_FCOE 1101 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 1102 ecore_fcoe_setup(p_hwfn); 1103 #endif 1104 #ifdef CONFIG_ECORE_ISCSI 1105 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 1106 ecore_iscsi_setup(p_hwfn); 1107 ecore_ooo_setup(p_hwfn); 1108 } 1109 #endif 1110 } 1111 } 1112 1113 #define FINAL_CLEANUP_POLL_CNT (100) 1114 #define FINAL_CLEANUP_POLL_TIME (10) 1115 enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn, 1116 struct ecore_ptt *p_ptt, 1117 u16 id, bool is_vf) 1118 { 1119 u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; 1120 enum _ecore_status_t rc = ECORE_TIMEOUT; 1121 1122 #ifndef ASIC_ONLY 1123 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) || 1124 CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 1125 DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n"); 1126 return ECORE_SUCCESS; 1127 } 1128 #endif 1129 1130 addr = GTT_BAR0_MAP_REG_USDM_RAM + 1131 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); 1132 1133 if (is_vf) 1134 id += 0x10; 1135 1136 command |= X_FINAL_CLEANUP_AGG_INT << 1137 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; 1138 command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; 1139 command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT; 1140 command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT; 1141 1142 /* Make sure notification is not set before initiating final cleanup */ 1143 if (REG_RD(p_hwfn, addr)) { 1144 DP_NOTICE(p_hwfn, false, 1145 "Unexpected; Found final cleanup notification before initiating final cleanup\n"); 1146 REG_WR(p_hwfn, addr, 0); 1147 } 1148 1149 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1150 "Sending final cleanup for PFVF[%d] [Command %08x\n]", 1151 id, command); 1152 1153 ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); 1154 1155 /* Poll until completion */ 1156 while (!REG_RD(p_hwfn, addr) && count--) 1157 OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME); 1158 1159 if (REG_RD(p_hwfn, addr)) 1160 rc = ECORE_SUCCESS; 1161 else 1162 DP_NOTICE(p_hwfn, true, "Failed to receive FW final cleanup notification\n"); 1163 1164 /* Cleanup afterwards */ 1165 REG_WR(p_hwfn, addr, 0); 1166 1167 return rc; 1168 } 1169 1170 static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn) 1171 { 1172 int hw_mode = 0; 1173 1174 if (ECORE_IS_BB_B0(p_hwfn->p_dev)) { 1175 hw_mode |= 1 << MODE_BB; 1176 } else if (ECORE_IS_AH(p_hwfn->p_dev)) { 1177 hw_mode |= 1 << MODE_K2; 1178 } else { 1179 DP_NOTICE(p_hwfn, true, "Unknown chip type %#x\n", 1180 p_hwfn->p_dev->type); 1181 return ECORE_INVAL; 1182 } 1183 1184 /* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE*/ 1185 switch (p_hwfn->p_dev->num_ports_in_engine) { 1186 case 1: 1187 hw_mode |= 1 << MODE_PORTS_PER_ENG_1; 1188 break; 1189 case 2: 1190 hw_mode |= 1 << MODE_PORTS_PER_ENG_2; 1191 break; 1192 case 4: 1193 hw_mode |= 1 << MODE_PORTS_PER_ENG_4; 1194 break; 1195 default: 1196 DP_NOTICE(p_hwfn, true, "num_ports_in_engine = %d not supported\n", 1197 p_hwfn->p_dev->num_ports_in_engine); 1198 return ECORE_INVAL; 1199 } 1200 1201 switch (p_hwfn->p_dev->mf_mode) { 1202 case ECORE_MF_DEFAULT: 1203 case ECORE_MF_NPAR: 1204 hw_mode |= 1 << MODE_MF_SI; 1205 break; 1206 case ECORE_MF_OVLAN: 1207 hw_mode |= 1 << MODE_MF_SD; 1208 break; 1209 default: 1210 DP_NOTICE(p_hwfn, true, "Unsupported MF mode, init as DEFAULT\n"); 1211 hw_mode |= 1 << MODE_MF_SI; 1212 } 1213 1214 #ifndef ASIC_ONLY 1215 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 1216 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1217 hw_mode |= 1 << MODE_FPGA; 1218 } else { 1219 if (p_hwfn->p_dev->b_is_emul_full) 1220 hw_mode |= 1 << MODE_EMUL_FULL; 1221 else 1222 hw_mode |= 1 << MODE_EMUL_REDUCED; 1223 } 1224 } else 1225 #endif 1226 hw_mode |= 1 << MODE_ASIC; 1227 1228 if (p_hwfn->p_dev->num_hwfns > 1) 1229 hw_mode |= 1 << MODE_100G; 1230 1231 p_hwfn->hw_info.hw_mode = hw_mode; 1232 1233 DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP), 1234 "Configuring function for hw_mode: 0x%08x\n", 1235 p_hwfn->hw_info.hw_mode); 1236 1237 return ECORE_SUCCESS; 1238 } 1239 1240 #ifndef ASIC_ONLY 1241 /* MFW-replacement initializations for non-ASIC */ 1242 static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn, 1243 struct ecore_ptt *p_ptt) 1244 { 1245 struct ecore_dev *p_dev = p_hwfn->p_dev; 1246 u32 pl_hv = 1; 1247 int i; 1248 1249 if (CHIP_REV_IS_EMUL(p_dev)) { 1250 if (ECORE_IS_AH(p_dev)) 1251 pl_hv |= 0x600; 1252 else if (ECORE_IS_E5(p_dev)) 1253 ECORE_E5_MISSING_CODE; 1254 } 1255 1256 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv); 1257 1258 if (CHIP_REV_IS_EMUL(p_dev) && 1259 (ECORE_IS_AH(p_dev) || ECORE_IS_E5(p_dev))) 1260 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2_E5, 1261 0x3ffffff); 1262 1263 /* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */ 1264 /* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */ 1265 if (!CHIP_REV_IS_EMUL(p_dev) || ECORE_IS_BB(p_dev)) 1266 ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4); 1267 1268 if (CHIP_REV_IS_EMUL(p_dev)) { 1269 if (ECORE_IS_AH(p_dev)) { 1270 /* 2 for 4-port, 1 for 2-port, 0 for 1-port */ 1271 ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE, 1272 (p_dev->num_ports_in_engine >> 1)); 1273 1274 ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN, 1275 p_dev->num_ports_in_engine == 4 ? 0 : 3); 1276 } else if (ECORE_IS_E5(p_dev)) { 1277 ECORE_E5_MISSING_CODE; 1278 } 1279 } 1280 1281 /* Poll on RBC */ 1282 ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1); 1283 for (i = 0; i < 100; i++) { 1284 OSAL_UDELAY(50); 1285 if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1) 1286 break; 1287 } 1288 if (i == 100) 1289 DP_NOTICE(p_hwfn, true, "RBC done failed to complete in PSWRQ2\n"); 1290 1291 return ECORE_SUCCESS; 1292 } 1293 #endif 1294 1295 /* Init run time data for all PFs and their VFs on an engine. 1296 * TBD - for VFs - Once we have parent PF info for each VF in 1297 * shmem available as CAU requires knowledge of parent PF for each VF. 1298 */ 1299 static void ecore_init_cau_rt_data(struct ecore_dev *p_dev) 1300 { 1301 u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; 1302 int i, igu_sb_id; 1303 1304 for_each_hwfn(p_dev, i) { 1305 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1306 struct ecore_igu_info *p_igu_info; 1307 struct ecore_igu_block *p_block; 1308 struct cau_sb_entry sb_entry; 1309 1310 p_igu_info = p_hwfn->hw_info.p_igu_info; 1311 1312 for (igu_sb_id = 0; 1313 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev); 1314 igu_sb_id++) { 1315 p_block = &p_igu_info->entry[igu_sb_id]; 1316 1317 if (!p_block->is_pf) 1318 continue; 1319 1320 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, 1321 p_block->function_id, 1322 0, 0); 1323 STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2, 1324 sb_entry); 1325 } 1326 } 1327 } 1328 1329 static void ecore_init_cache_line_size(struct ecore_hwfn *p_hwfn, 1330 struct ecore_ptt *p_ptt) 1331 { 1332 u32 val, wr_mbs, cache_line_size; 1333 1334 val = ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0); 1335 switch (val) { 1336 case 0: 1337 wr_mbs = 128; 1338 break; 1339 case 1: 1340 wr_mbs = 256; 1341 break; 1342 case 2: 1343 wr_mbs = 512; 1344 break; 1345 default: 1346 DP_INFO(p_hwfn, 1347 "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 1348 val); 1349 return; 1350 } 1351 1352 cache_line_size = OSAL_MIN_T(u32, OSAL_CACHE_LINE_SIZE, wr_mbs); 1353 switch (cache_line_size) { 1354 case 32: 1355 val = 0; 1356 break; 1357 case 64: 1358 val = 1; 1359 break; 1360 case 128: 1361 val = 2; 1362 break; 1363 case 256: 1364 val = 3; 1365 break; 1366 default: 1367 DP_INFO(p_hwfn, 1368 "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 1369 cache_line_size); 1370 } 1371 1372 if (OSAL_CACHE_LINE_SIZE > wr_mbs) 1373 DP_INFO(p_hwfn, 1374 "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", 1375 OSAL_CACHE_LINE_SIZE, wr_mbs); 1376 1377 STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val); 1378 } 1379 1380 static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn, 1381 struct ecore_ptt *p_ptt, 1382 int hw_mode) 1383 { 1384 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 1385 struct ecore_dev *p_dev = p_hwfn->p_dev; 1386 u8 vf_id, max_num_vfs; 1387 u16 num_pfs, pf_id; 1388 u32 concrete_fid; 1389 enum _ecore_status_t rc = ECORE_SUCCESS; 1390 1391 ecore_init_cau_rt_data(p_dev); 1392 1393 /* Program GTT windows */ 1394 ecore_gtt_init(p_hwfn); 1395 1396 #ifndef ASIC_ONLY 1397 if (CHIP_REV_IS_EMUL(p_dev)) { 1398 rc = ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt); 1399 if (rc != ECORE_SUCCESS) 1400 return rc; 1401 } 1402 #endif 1403 1404 if (p_hwfn->mcp_info) { 1405 if (p_hwfn->mcp_info->func_info.bandwidth_max) 1406 qm_info->pf_rl_en = 1; 1407 if (p_hwfn->mcp_info->func_info.bandwidth_min) 1408 qm_info->pf_wfq_en = 1; 1409 } 1410 1411 ecore_qm_common_rt_init(p_hwfn, 1412 p_dev->num_ports_in_engine, 1413 qm_info->max_phys_tcs_per_port, 1414 qm_info->pf_rl_en, qm_info->pf_wfq_en, 1415 qm_info->vport_rl_en, qm_info->vport_wfq_en, 1416 qm_info->qm_port_params); 1417 1418 ecore_cxt_hw_init_common(p_hwfn); 1419 1420 ecore_init_cache_line_size(p_hwfn, p_ptt); 1421 1422 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); 1423 if (rc != ECORE_SUCCESS) 1424 return rc; 1425 1426 /* @@TBD MichalK - should add VALIDATE_VFID to init tool... 1427 * need to decide with which value, maybe runtime 1428 */ 1429 ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); 1430 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); 1431 1432 if (ECORE_IS_BB(p_dev)) { 1433 /* Workaround clears ROCE search for all functions to prevent 1434 * involving non intialized function in processing ROCE packet. 1435 */ 1436 num_pfs = NUM_OF_ENG_PFS(p_dev); 1437 for (pf_id = 0; pf_id < num_pfs; pf_id++) { 1438 ecore_fid_pretend(p_hwfn, p_ptt, pf_id); 1439 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 1440 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 1441 } 1442 /* pretend to original PF */ 1443 ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 1444 } 1445 1446 /* Workaround for avoiding CCFC execution error when getting packets 1447 * with CRC errors, and allowing instead the invoking of the FW error 1448 * handler. 1449 * This is not done inside the init tool since it currently can't 1450 * perform a pretending to VFs. 1451 */ 1452 max_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; 1453 for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { 1454 concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id); 1455 ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid); 1456 ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); 1457 ecore_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); 1458 ecore_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); 1459 ecore_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0); 1460 } 1461 /* pretend to original PF */ 1462 ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 1463 1464 return rc; 1465 } 1466 1467 #ifndef ASIC_ONLY 1468 #define MISC_REG_RESET_REG_2_XMAC_BIT (1<<4) 1469 #define MISC_REG_RESET_REG_2_XMAC_SOFT_BIT (1<<5) 1470 1471 #define PMEG_IF_BYTE_COUNT 8 1472 1473 static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn, 1474 struct ecore_ptt *p_ptt, 1475 u32 addr, 1476 u64 data, 1477 u8 reg_type, 1478 u8 port) 1479 { 1480 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 1481 "CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n", 1482 ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) | 1483 (8 << PMEG_IF_BYTE_COUNT), 1484 (reg_type << 25) | (addr << 8) | port, 1485 (u32)((data >> 32) & 0xffffffff), 1486 (u32)(data & 0xffffffff)); 1487 1488 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB, 1489 (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) & 1490 0xffff00fe) | 1491 (8 << PMEG_IF_BYTE_COUNT)); 1492 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB, 1493 (reg_type << 25) | (addr << 8) | port); 1494 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, data & 0xffffffff); 1495 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, 1496 (data >> 32) & 0xffffffff); 1497 } 1498 1499 #define XLPORT_MODE_REG (0x20a) 1500 #define XLPORT_MAC_CONTROL (0x210) 1501 #define XLPORT_FLOW_CONTROL_CONFIG (0x207) 1502 #define XLPORT_ENABLE_REG (0x20b) 1503 1504 #define XLMAC_CTRL (0x600) 1505 #define XLMAC_MODE (0x601) 1506 #define XLMAC_RX_MAX_SIZE (0x608) 1507 #define XLMAC_TX_CTRL (0x604) 1508 #define XLMAC_PAUSE_CTRL (0x60d) 1509 #define XLMAC_PFC_CTRL (0x60e) 1510 1511 static void ecore_emul_link_init_bb(struct ecore_hwfn *p_hwfn, 1512 struct ecore_ptt *p_ptt) 1513 { 1514 u8 loopback = 0, port = p_hwfn->port_id * 2; 1515 1516 DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port); 1517 1518 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, 1519 (0x4 << 4) | 0x4, 1, port); /* XLPORT MAC MODE */ /* 0 Quad, 4 Single... */ 1520 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port); 1521 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 1522 0x40, 0, port); /*XLMAC: SOFT RESET */ 1523 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE, 1524 0x40, 0, port); /*XLMAC: Port Speed >= 10Gbps */ 1525 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE, 1526 0x3fff, 0, port); /* XLMAC: Max Size */ 1527 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL, 1528 0x01000000800ULL | (0xa << 12) | ((u64)1 << 38), 1529 0, port); 1530 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL, 1531 0x7c000, 0, port); 1532 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL, 1533 0x30ffffc000ULL, 0, port); 1534 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2), 1535 0, port); /* XLMAC: TX_EN, RX_EN */ 1536 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x1003 | (loopback << 2), 1537 0, port); /* XLMAC: TX_EN, RX_EN, SW_LINK_STATUS */ 1538 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG, 1539 1, 0, port); /* Enabled Parallel PFC interface */ 1540 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 1541 0xf, 1, port); /* XLPORT port enable */ 1542 } 1543 1544 static void ecore_emul_link_init_ah_e5(struct ecore_hwfn *p_hwfn, 1545 struct ecore_ptt *p_ptt) 1546 { 1547 u8 port = p_hwfn->port_id; 1548 u32 mac_base = NWM_REG_MAC0_K2_E5 + (port << 2) * NWM_REG_MAC0_SIZE; 1549 1550 DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port); 1551 1552 ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2_E5 + (port << 2), 1553 (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT) | 1554 (port << 1555 CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT) | 1556 (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT)); 1557 1558 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE_K2_E5, 1559 1 << ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT); 1560 1561 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH_K2_E5, 1562 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT); 1563 1564 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH_K2_E5, 1565 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT); 1566 1567 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5, 1568 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT); 1569 1570 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5, 1571 (0xA << 1572 ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT) | 1573 (8 << 1574 ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT)); 1575 1576 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG_K2_E5, 1577 0xa853); 1578 } 1579 1580 static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn, 1581 struct ecore_ptt *p_ptt) 1582 { 1583 if (ECORE_IS_AH(p_hwfn->p_dev) || ECORE_IS_E5(p_hwfn->p_dev)) 1584 ecore_emul_link_init_ah_e5(p_hwfn, p_ptt); 1585 else /* BB */ 1586 ecore_emul_link_init_bb(p_hwfn, p_ptt); 1587 1588 return; 1589 } 1590 1591 static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn, 1592 struct ecore_ptt *p_ptt, u8 port) 1593 { 1594 int port_offset = port ? 0x800 : 0; 1595 u32 xmac_rxctrl = 0; 1596 1597 /* Reset of XMAC */ 1598 /* FIXME: move to common start */ 1599 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2*sizeof(u32), 1600 MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */ 1601 OSAL_MSLEEP(1); 1602 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32), 1603 MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */ 1604 1605 ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE_BB, 1); 1606 1607 /* Set the number of ports on the Warp Core to 10G */ 1608 ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE_BB, 3); 1609 1610 /* Soft reset of XMAC */ 1611 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32), 1612 MISC_REG_RESET_REG_2_XMAC_SOFT_BIT); 1613 OSAL_MSLEEP(1); 1614 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32), 1615 MISC_REG_RESET_REG_2_XMAC_SOFT_BIT); 1616 1617 /* FIXME: move to common end */ 1618 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 1619 ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE_BB + port_offset, 0x20); 1620 1621 /* Set Max packet size: initialize XMAC block register for port 0 */ 1622 ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE_BB + port_offset, 0x2710); 1623 1624 /* CRC append for Tx packets: init XMAC block register for port 1 */ 1625 ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO_BB + port_offset, 0xC800); 1626 1627 /* Enable TX and RX: initialize XMAC block register for port 1 */ 1628 ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL_BB + port_offset, 1629 XMAC_REG_CTRL_TX_EN_BB | XMAC_REG_CTRL_RX_EN_BB); 1630 xmac_rxctrl = ecore_rd(p_hwfn, p_ptt, 1631 XMAC_REG_RX_CTRL_BB + port_offset); 1632 xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB; 1633 ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL_BB + port_offset, xmac_rxctrl); 1634 } 1635 #endif 1636 1637 static enum _ecore_status_t 1638 ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn, 1639 struct ecore_ptt *p_ptt, 1640 u32 pwm_region_size, 1641 u32 n_cpus) 1642 { 1643 u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size; 1644 u32 dpi_bit_shift, dpi_count; 1645 u32 min_dpis; 1646 1647 /* Calculate DPI size 1648 * ------------------ 1649 * The PWM region contains Doorbell Pages. The first is reserverd for 1650 * the kernel for, e.g, L2. The others are free to be used by non- 1651 * trusted applications, typically from user space. Each page, called a 1652 * doorbell page is sectioned into windows that allow doorbells to be 1653 * issued in parallel by the kernel/application. The size of such a 1654 * window (a.k.a. WID) is 1kB. 1655 * Summary: 1656 * 1kB WID x N WIDS = DPI page size 1657 * DPI page size x N DPIs = PWM region size 1658 * Notes: 1659 * The size of the DPI page size must be in multiples of OSAL_PAGE_SIZE 1660 * in order to ensure that two applications won't share the same page. 1661 * It also must contain at least one WID per CPU to allow parallelism. 1662 * It also must be a power of 2, since it is stored as a bit shift. 1663 * 1664 * The DPI page size is stored in a register as 'dpi_bit_shift' so that 1665 * 0 is 4kB, 1 is 8kB and etc. Hence the minimum size is 4,096 1666 * containing 4 WIDs. 1667 */ 1668 dpi_page_size_1 = ECORE_WID_SIZE * n_cpus; 1669 dpi_page_size_2 = OSAL_MAX_T(u32, ECORE_WID_SIZE, OSAL_PAGE_SIZE); 1670 dpi_page_size = OSAL_MAX_T(u32, dpi_page_size_1, dpi_page_size_2); 1671 dpi_page_size = OSAL_ROUNDUP_POW_OF_TWO(dpi_page_size); 1672 dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096); 1673 1674 dpi_count = pwm_region_size / dpi_page_size; 1675 1676 min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis; 1677 min_dpis = OSAL_MAX_T(u32, ECORE_MIN_DPIS, min_dpis); 1678 1679 /* Update hwfn */ 1680 p_hwfn->dpi_size = dpi_page_size; 1681 p_hwfn->dpi_count = dpi_count; 1682 1683 /* Update registers */ 1684 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift); 1685 1686 if (dpi_count < min_dpis) 1687 return ECORE_NORESOURCES; 1688 1689 return ECORE_SUCCESS; 1690 } 1691 1692 enum ECORE_ROCE_EDPM_MODE { 1693 ECORE_ROCE_EDPM_MODE_ENABLE = 0, 1694 ECORE_ROCE_EDPM_MODE_FORCE_ON = 1, 1695 ECORE_ROCE_EDPM_MODE_DISABLE = 2, 1696 }; 1697 1698 static enum _ecore_status_t 1699 ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn, 1700 struct ecore_ptt *p_ptt) 1701 { 1702 u32 pwm_regsize, norm_regsize; 1703 u32 non_pwm_conn, min_addr_reg1; 1704 u32 db_bar_size, n_cpus = 1; 1705 u32 roce_edpm_mode; 1706 u32 pf_dems_shift; 1707 enum _ecore_status_t rc = ECORE_SUCCESS; 1708 u8 cond; 1709 1710 db_bar_size = ecore_hw_bar_size(p_hwfn, BAR_ID_1); 1711 if (p_hwfn->p_dev->num_hwfns > 1) 1712 db_bar_size /= 2; 1713 1714 /* Calculate doorbell regions 1715 * ----------------------------------- 1716 * The doorbell BAR is made of two regions. The first is called normal 1717 * region and the second is called PWM region. In the normal region 1718 * each ICID has its own set of addresses so that writing to that 1719 * specific address identifies the ICID. In the Process Window Mode 1720 * region the ICID is given in the data written to the doorbell. The 1721 * above per PF register denotes the offset in the doorbell BAR in which 1722 * the PWM region begins. 1723 * The normal region has ECORE_PF_DEMS_SIZE bytes per ICID, that is per 1724 * non-PWM connection. The calculation below computes the total non-PWM 1725 * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is 1726 * in units of 4,096 bytes. 1727 */ 1728 non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) + 1729 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE, 1730 OSAL_NULL) + 1731 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, 1732 OSAL_NULL); 1733 norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, 4096); 1734 min_addr_reg1 = norm_regsize / 4096; 1735 pwm_regsize = db_bar_size - norm_regsize; 1736 1737 /* Check that the normal and PWM sizes are valid */ 1738 if (db_bar_size < norm_regsize) { 1739 DP_ERR(p_hwfn->p_dev, "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n", db_bar_size, norm_regsize); 1740 return ECORE_NORESOURCES; 1741 } 1742 if (pwm_regsize < ECORE_MIN_PWM_REGION) { 1743 DP_ERR(p_hwfn->p_dev, "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n", pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size, norm_regsize); 1744 return ECORE_NORESOURCES; 1745 } 1746 1747 /* Calculate number of DPIs */ 1748 roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode; 1749 if ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE) || 1750 ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_FORCE_ON))) { 1751 /* Either EDPM is mandatory, or we are attempting to allocate a 1752 * WID per CPU. 1753 */ 1754 n_cpus = OSAL_NUM_ACTIVE_CPU(); 1755 rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 1756 } 1757 1758 cond = ((rc != ECORE_SUCCESS) && 1759 (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) || 1760 (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE); 1761 if (cond || p_hwfn->dcbx_no_edpm) { 1762 /* Either EDPM is disabled from user configuration, or it is 1763 * disabled via DCBx, or it is not mandatory and we failed to 1764 * allocated a WID per CPU. 1765 */ 1766 n_cpus = 1; 1767 rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 1768 1769 #ifdef CONFIG_ECORE_ROCE 1770 /* If we entered this flow due to DCBX then the DPM register is 1771 * already configured. 1772 */ 1773 if (cond) 1774 ecore_rdma_dpm_bar(p_hwfn, p_ptt); 1775 #endif 1776 } 1777 1778 p_hwfn->wid_count = (u16)n_cpus; 1779 1780 DP_INFO(p_hwfn, "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n", 1781 norm_regsize, pwm_regsize, p_hwfn->dpi_size, p_hwfn->dpi_count, 1782 ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ? 1783 "disabled" : "enabled"); 1784 1785 /* Check return codes from above calls */ 1786 if (rc != ECORE_SUCCESS) { 1787 DP_ERR(p_hwfn, 1788 "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d. You can try reducing this down to %d via user configuration n_dpi or by disabling EDPM via user configuration roce_edpm\n", 1789 p_hwfn->dpi_count, 1790 p_hwfn->pf_params.rdma_pf_params.min_dpis, 1791 ECORE_MIN_DPIS); 1792 return ECORE_NORESOURCES; 1793 } 1794 1795 /* Update hwfn */ 1796 p_hwfn->dpi_start_offset = norm_regsize; /* this is later used to 1797 * calculate the doorbell 1798 * address 1799 */ 1800 1801 /* Update registers */ 1802 /* DEMS size is configured log2 of DWORDs, hence the division by 4 */ 1803 pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4); 1804 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift); 1805 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1); 1806 1807 return ECORE_SUCCESS; 1808 } 1809 1810 static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn, 1811 struct ecore_ptt *p_ptt, 1812 int hw_mode) 1813 { 1814 enum _ecore_status_t rc = ECORE_SUCCESS; 1815 1816 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, 1817 hw_mode); 1818 if (rc != ECORE_SUCCESS) 1819 return rc; 1820 #if 0 1821 /* FW 8.10.5.0 requires us to configure PF_VECTOR and DUALMODE in LLH. 1822 * This would hopefully be moved to MFW. 1823 */ 1824 if (IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) { 1825 u8 pf_id = 0; 1826 1827 if (ecore_hw_init_first_eth(p_hwfn, p_ptt, &pf_id) == 1828 ECORE_SUCCESS) { 1829 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 1830 "PF[%08x] is first eth on engine\n", 1831 pf_id); 1832 1833 /* We should have configured BIT for ppfid, i.e., the 1834 * relative function number in the port. But there's a 1835 * bug in LLH in BB where the ppfid is actually engine 1836 * based, so we need to take this into account. 1837 */ 1838 if (!ECORE_IS_BB(p_hwfn->p_dev)) 1839 pf_id /= p_hwfn->p_dev->num_ports_in_engine; 1840 1841 ecore_wr(p_hwfn, p_ptt, 1842 NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id); 1843 } 1844 1845 /* Take the protocol-based hit vector if there is a hit, 1846 * otherwise take the other vector. 1847 */ 1848 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2); 1849 } 1850 #endif 1851 #ifndef ASIC_ONLY 1852 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) 1853 return ECORE_SUCCESS; 1854 1855 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1856 if (ECORE_IS_AH(p_hwfn->p_dev)) 1857 return ECORE_SUCCESS; 1858 else if (ECORE_IS_BB(p_hwfn->p_dev)) 1859 ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id); 1860 else /* E5 */ 1861 ECORE_E5_MISSING_CODE; 1862 } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 1863 if (p_hwfn->p_dev->num_hwfns > 1) { 1864 /* Activate OPTE in CMT */ 1865 u32 val; 1866 1867 val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV); 1868 val |= 0x10; 1869 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val); 1870 ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1); 1871 ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1); 1872 ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1); 1873 ecore_wr(p_hwfn, p_ptt, 1874 NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1); 1875 ecore_wr(p_hwfn, p_ptt, 1876 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555); 1877 ecore_wr(p_hwfn, p_ptt, 1878 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4, 1879 0x55555555); 1880 } 1881 1882 ecore_emul_link_init(p_hwfn, p_ptt); 1883 } else { 1884 DP_INFO(p_hwfn->p_dev, "link is not being configured\n"); 1885 } 1886 #endif 1887 1888 return rc; 1889 } 1890 1891 static enum _ecore_status_t ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, 1892 struct ecore_ptt *p_ptt, 1893 struct ecore_tunnel_info *p_tunn, 1894 int hw_mode, 1895 bool b_hw_start, 1896 enum ecore_int_mode int_mode, 1897 bool allow_npar_tx_switch) 1898 { 1899 u8 rel_pf_id = p_hwfn->rel_pf_id; 1900 u32 prs_reg; 1901 enum _ecore_status_t rc = ECORE_SUCCESS; 1902 u16 ctrl; 1903 int pos; 1904 1905 if (p_hwfn->mcp_info) { 1906 struct ecore_mcp_function_info *p_info; 1907 1908 p_info = &p_hwfn->mcp_info->func_info; 1909 if (p_info->bandwidth_min) 1910 p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; 1911 1912 /* Update rate limit once we'll actually have a link */ 1913 p_hwfn->qm_info.pf_rl = 100000; 1914 } 1915 ecore_cxt_hw_init_pf(p_hwfn); 1916 1917 ecore_int_igu_init_rt(p_hwfn); 1918 1919 /* Set VLAN in NIG if needed */ 1920 if (hw_mode & (1 << MODE_MF_SD)) { 1921 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring LLH_FUNC_TAG\n"); 1922 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); 1923 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, 1924 p_hwfn->hw_info.ovlan); 1925 } 1926 1927 /* Enable classification by MAC if needed */ 1928 if (hw_mode & (1 << MODE_MF_SI)) { 1929 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring TAGMAC_CLS_TYPE\n"); 1930 STORE_RT_REG(p_hwfn, 1931 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1); 1932 } 1933 1934 /* Protocl Configuration - @@@TBD - should we set 0 otherwise?*/ 1935 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 1936 (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) ? 1 : 0); 1937 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 1938 (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) ? 1 : 0); 1939 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); 1940 1941 /* perform debug configuration when chip is out of reset */ 1942 OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id); 1943 1944 /* Cleanup chip from previous driver if such remains exist */ 1945 rc = ecore_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false); 1946 if (rc != ECORE_SUCCESS) { 1947 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL); 1948 return rc; 1949 } 1950 1951 /* PF Init sequence */ 1952 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); 1953 if (rc) 1954 return rc; 1955 1956 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */ 1957 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode); 1958 if (rc) 1959 return rc; 1960 1961 /* Pure runtime initializations - directly to the HW */ 1962 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); 1963 1964 /* PCI relaxed ordering causes a decrease in the performance on some 1965 * systems. Till a root cause is found, disable this attribute in the 1966 * PCI config space. 1967 */ 1968 pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP); 1969 if (!pos) { 1970 DP_NOTICE(p_hwfn, true, 1971 "Failed to find the PCI Express Capability structure in the PCI config space\n"); 1972 return ECORE_IO; 1973 } 1974 OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, &ctrl); 1975 ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN; 1976 OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, ctrl); 1977 1978 rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt); 1979 if (rc) 1980 return rc; 1981 #if 0 1982 /* FW 8.10.5.0 requires us to configure MSG_INFO in PRS. 1983 * This would hopefully be moved to MFW. 1984 */ 1985 if (IS_MF_SI(p_hwfn)) { 1986 u8 pf_id = 0; 1987 u32 val; 1988 1989 if (ecore_hw_init_first_eth(p_hwfn, p_ptt, &pf_id) == 1990 ECORE_SUCCESS) { 1991 if (p_hwfn->rel_pf_id == pf_id) { 1992 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 1993 "PF[%d] is first ETH on engine\n", 1994 pf_id); 1995 val = 1; 1996 } 1997 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val); 1998 } 1999 } 2000 #endif 2001 if (b_hw_start) { 2002 /* enable interrupts */ 2003 rc = ecore_int_igu_enable(p_hwfn, p_ptt, int_mode); 2004 if (rc != ECORE_SUCCESS) 2005 return rc; 2006 2007 /* send function start command */ 2008 rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode, 2009 allow_npar_tx_switch); 2010 if (rc) { 2011 DP_NOTICE(p_hwfn, true, "Function start ramrod failed\n"); 2012 } else { 2013 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); 2014 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2015 "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); 2016 2017 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 2018 { 2019 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, 2020 (1 << 2)); 2021 ecore_wr(p_hwfn, p_ptt, 2022 PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, 2023 0x100); 2024 } 2025 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2026 "PRS_REG_SEARCH registers after start PFn\n"); 2027 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP); 2028 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2029 "PRS_REG_SEARCH_TCP: %x\n", prs_reg); 2030 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP); 2031 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2032 "PRS_REG_SEARCH_UDP: %x\n", prs_reg); 2033 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE); 2034 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2035 "PRS_REG_SEARCH_FCOE: %x\n", prs_reg); 2036 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE); 2037 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2038 "PRS_REG_SEARCH_ROCE: %x\n", prs_reg); 2039 prs_reg = ecore_rd(p_hwfn, p_ptt, 2040 PRS_REG_SEARCH_TCP_FIRST_FRAG); 2041 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2042 "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n", 2043 prs_reg); 2044 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); 2045 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2046 "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); 2047 } 2048 } 2049 return rc; 2050 } 2051 2052 enum _ecore_status_t ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn, 2053 struct ecore_ptt *p_ptt, 2054 u8 enable) 2055 { 2056 u32 delay_idx = 0, val, set_val = enable ? 1 : 0; 2057 2058 /* Change PF in PXP */ 2059 ecore_wr(p_hwfn, p_ptt, 2060 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); 2061 2062 /* wait until value is set - try for 1 second every 50us */ 2063 for (delay_idx = 0; delay_idx < 20000; delay_idx++) { 2064 val = ecore_rd(p_hwfn, p_ptt, 2065 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 2066 if (val == set_val) 2067 break; 2068 2069 OSAL_UDELAY(50); 2070 } 2071 2072 if (val != set_val) { 2073 DP_NOTICE(p_hwfn, true, 2074 "PFID_ENABLE_MASTER wasn't changed after a second\n"); 2075 return ECORE_UNKNOWN_ERROR; 2076 } 2077 2078 return ECORE_SUCCESS; 2079 } 2080 2081 static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn, 2082 struct ecore_ptt *p_main_ptt) 2083 { 2084 /* Read shadow of current MFW mailbox */ 2085 ecore_mcp_read_mb(p_hwfn, p_main_ptt); 2086 OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow, 2087 p_hwfn->mcp_info->mfw_mb_cur, 2088 p_hwfn->mcp_info->mfw_mb_length); 2089 } 2090 2091 enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn, 2092 struct ecore_hw_init_params *p_params) 2093 { 2094 if (p_params->p_tunn) { 2095 ecore_vf_set_vf_start_tunn_update_param(p_params->p_tunn); 2096 ecore_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn); 2097 } 2098 2099 p_hwfn->b_int_enabled = 1; 2100 2101 return ECORE_SUCCESS; 2102 } 2103 2104 static void 2105 ecore_fill_load_req_params(struct ecore_load_req_params *p_load_req, 2106 struct ecore_drv_load_params *p_drv_load) 2107 { 2108 OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req)); 2109 2110 if (p_drv_load != OSAL_NULL) { 2111 p_load_req->drv_role = p_drv_load->is_crash_kernel ? 2112 ECORE_DRV_ROLE_KDUMP : 2113 ECORE_DRV_ROLE_OS; 2114 p_load_req->timeout_val = p_drv_load->mfw_timeout_val; 2115 p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; 2116 p_load_req->override_force_load = 2117 p_drv_load->override_force_load; 2118 } else { 2119 p_load_req->drv_role = ECORE_DRV_ROLE_OS; 2120 p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT; 2121 p_load_req->avoid_eng_reset = false; 2122 p_load_req->override_force_load = 2123 ECORE_OVERRIDE_FORCE_LOAD_NONE; 2124 } 2125 } 2126 2127 enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, 2128 struct ecore_hw_init_params *p_params) 2129 { 2130 struct ecore_load_req_params load_req_params; 2131 u32 load_code, param, drv_mb_param; 2132 bool b_default_mtu = true; 2133 struct ecore_hwfn *p_hwfn; 2134 enum _ecore_status_t rc = ECORE_SUCCESS, mfw_rc; 2135 int i; 2136 2137 if ((p_params->int_mode == ECORE_INT_MODE_MSI) && (p_dev->num_hwfns > 1)) { 2138 DP_NOTICE(p_dev, false, 2139 "MSI mode is not supported for CMT devices\n"); 2140 return ECORE_INVAL; 2141 } 2142 2143 if (IS_PF(p_dev)) { 2144 rc = ecore_init_fw_data(p_dev, p_params->bin_fw_data); 2145 if (rc != ECORE_SUCCESS) 2146 return rc; 2147 } 2148 2149 for_each_hwfn(p_dev, i) { 2150 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 2151 2152 /* If management didn't provide a default, set one of our own */ 2153 if (!p_hwfn->hw_info.mtu) { 2154 p_hwfn->hw_info.mtu = 1500; 2155 b_default_mtu = false; 2156 } 2157 2158 if (IS_VF(p_dev)) { 2159 ecore_vf_start(p_hwfn, p_params); 2160 continue; 2161 } 2162 2163 /* Enable DMAE in PXP */ 2164 rc = ecore_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); 2165 if (rc != ECORE_SUCCESS) 2166 return rc; 2167 2168 rc = ecore_calc_hw_mode(p_hwfn); 2169 if (rc != ECORE_SUCCESS) 2170 return rc; 2171 2172 ecore_fill_load_req_params(&load_req_params, 2173 p_params->p_drv_load_params); 2174 rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, 2175 &load_req_params); 2176 if (rc != ECORE_SUCCESS) { 2177 DP_NOTICE(p_hwfn, true, 2178 "Failed sending a LOAD_REQ command\n"); 2179 return rc; 2180 } 2181 2182 load_code = load_req_params.load_code; 2183 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 2184 "Load request was sent. Load code: 0x%x\n", 2185 load_code); 2186 2187 ecore_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt); 2188 2189 /* CQ75580: 2190 * When comming back from hiberbate state, the registers from 2191 * which shadow is read initially are not initialized. It turns 2192 * out that these registers get initialized during the call to 2193 * ecore_mcp_load_req request. So we need to reread them here 2194 * to get the proper shadow register value. 2195 * Note: This is a workaround for the missing MFW 2196 * initialization. It may be removed once the implementation 2197 * is done. 2198 */ 2199 ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); 2200 2201 /* Only relevant for recovery: 2202 * Clear the indication after the LOAD_REQ command is responded 2203 * by the MFW. 2204 */ 2205 p_dev->recov_in_prog = false; 2206 2207 p_hwfn->first_on_engine = (load_code == 2208 FW_MSG_CODE_DRV_LOAD_ENGINE); 2209 2210 if (!qm_lock_init) { 2211 OSAL_SPIN_LOCK_INIT(&qm_lock); 2212 qm_lock_init = true; 2213 } 2214 2215 switch (load_code) { 2216 case FW_MSG_CODE_DRV_LOAD_ENGINE: 2217 rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, 2218 p_hwfn->hw_info.hw_mode); 2219 if (rc != ECORE_SUCCESS) 2220 break; 2221 /* FALLTHROUGH */ 2222 case FW_MSG_CODE_DRV_LOAD_PORT: 2223 rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, 2224 p_hwfn->hw_info.hw_mode); 2225 if (rc != ECORE_SUCCESS) 2226 break; 2227 /* FALLTHROUGH */ 2228 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 2229 rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, 2230 p_params->p_tunn, 2231 p_hwfn->hw_info.hw_mode, 2232 p_params->b_hw_start, 2233 p_params->int_mode, 2234 p_params->allow_npar_tx_switch); 2235 break; 2236 default: 2237 DP_NOTICE(p_hwfn, false, 2238 "Unexpected load code [0x%08x]", load_code); 2239 rc = ECORE_NOTIMPL; 2240 break; 2241 } 2242 2243 if (rc != ECORE_SUCCESS) 2244 DP_NOTICE(p_hwfn, true, 2245 "init phase failed for loadcode 0x%x (rc %d)\n", 2246 load_code, rc); 2247 2248 /* ACK mfw regardless of success or failure of initialization */ 2249 mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 2250 DRV_MSG_CODE_LOAD_DONE, 2251 0, &load_code, ¶m); 2252 2253 /* Check the return value of the ecore_hw_init_*() function */ 2254 if (rc != ECORE_SUCCESS) 2255 return rc; 2256 2257 /* Check the return value of the LOAD_DONE command */ 2258 if (mfw_rc != ECORE_SUCCESS) { 2259 DP_NOTICE(p_hwfn, true, 2260 "Failed sending a LOAD_DONE command\n"); 2261 return mfw_rc; 2262 } 2263 2264 /* Check if there is a DID mismatch between nvm-cfg/efuse */ 2265 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) 2266 DP_NOTICE(p_hwfn, false, 2267 "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); 2268 2269 /* send DCBX attention request command */ 2270 DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, 2271 "sending phony dcbx set command to trigger DCBx attention handling\n"); 2272 mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 2273 DRV_MSG_CODE_SET_DCBX, 2274 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, 2275 &load_code, ¶m); 2276 if (mfw_rc != ECORE_SUCCESS) { 2277 DP_NOTICE(p_hwfn, true, 2278 "Failed to send DCBX attention request\n"); 2279 return mfw_rc; 2280 } 2281 2282 p_hwfn->hw_init_done = true; 2283 } 2284 2285 if (IS_PF(p_dev)) { 2286 p_hwfn = ECORE_LEADING_HWFN(p_dev); 2287 drv_mb_param = STORM_FW_VERSION; 2288 rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 2289 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, 2290 drv_mb_param, &load_code, ¶m); 2291 if (rc != ECORE_SUCCESS) 2292 DP_INFO(p_hwfn, "Failed to update firmware version\n"); 2293 2294 if (!b_default_mtu) { 2295 rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt, 2296 p_hwfn->hw_info.mtu); 2297 if (rc != ECORE_SUCCESS) 2298 DP_INFO(p_hwfn, "Failed to update default mtu\n"); 2299 } 2300 2301 rc = ecore_mcp_ov_update_driver_state(p_hwfn, 2302 p_hwfn->p_main_ptt, 2303 ECORE_OV_DRIVER_STATE_DISABLED); 2304 if (rc != ECORE_SUCCESS) 2305 DP_INFO(p_hwfn, "Failed to update driver state\n"); 2306 2307 rc = ecore_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, 2308 ECORE_OV_ESWITCH_VEB); 2309 if (rc != ECORE_SUCCESS) 2310 DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); 2311 } 2312 2313 return rc; 2314 } 2315 2316 #define ECORE_HW_STOP_RETRY_LIMIT (10) 2317 static void ecore_hw_timers_stop(struct ecore_dev *p_dev, 2318 struct ecore_hwfn *p_hwfn, 2319 struct ecore_ptt *p_ptt) 2320 { 2321 int i; 2322 2323 /* close timers */ 2324 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); 2325 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); 2326 for (i = 0; 2327 i < ECORE_HW_STOP_RETRY_LIMIT && !p_dev->recov_in_prog; 2328 i++) { 2329 if ((!ecore_rd(p_hwfn, p_ptt, 2330 TM_REG_PF_SCAN_ACTIVE_CONN)) && 2331 (!ecore_rd(p_hwfn, p_ptt, 2332 TM_REG_PF_SCAN_ACTIVE_TASK))) 2333 break; 2334 2335 /* Dependent on number of connection/tasks, possibly 2336 * 1ms sleep is required between polls 2337 */ 2338 OSAL_MSLEEP(1); 2339 } 2340 2341 if (i < ECORE_HW_STOP_RETRY_LIMIT) 2342 return; 2343 2344 DP_NOTICE(p_hwfn, true, 2345 "Timers linear scans are not over [Connection %02x Tasks %02x]\n", 2346 (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), 2347 (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); 2348 } 2349 2350 void ecore_hw_timers_stop_all(struct ecore_dev *p_dev) 2351 { 2352 int j; 2353 2354 for_each_hwfn(p_dev, j) { 2355 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 2356 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 2357 2358 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt); 2359 } 2360 } 2361 2362 static enum _ecore_status_t ecore_verify_reg_val(struct ecore_hwfn *p_hwfn, 2363 struct ecore_ptt *p_ptt, 2364 u32 addr, u32 expected_val) 2365 { 2366 u32 val = ecore_rd(p_hwfn, p_ptt, addr); 2367 2368 if (val != expected_val) { 2369 DP_NOTICE(p_hwfn, true, 2370 "Value at address 0x%08x is 0x%08x while the expected value is 0x%08x\n", 2371 addr, val, expected_val); 2372 return ECORE_UNKNOWN_ERROR; 2373 } 2374 2375 return ECORE_SUCCESS; 2376 } 2377 2378 enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev) 2379 { 2380 struct ecore_hwfn *p_hwfn; 2381 struct ecore_ptt *p_ptt; 2382 enum _ecore_status_t rc, rc2 = ECORE_SUCCESS; 2383 int j; 2384 2385 for_each_hwfn(p_dev, j) { 2386 p_hwfn = &p_dev->hwfns[j]; 2387 p_ptt = p_hwfn->p_main_ptt; 2388 2389 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n"); 2390 2391 if (IS_VF(p_dev)) { 2392 ecore_vf_pf_int_cleanup(p_hwfn); 2393 rc = ecore_vf_pf_reset(p_hwfn); 2394 if (rc != ECORE_SUCCESS) { 2395 DP_NOTICE(p_hwfn, true, 2396 "ecore_vf_pf_reset failed. rc = %d.\n", 2397 rc); 2398 rc2 = ECORE_UNKNOWN_ERROR; 2399 } 2400 continue; 2401 } 2402 2403 /* mark the hw as uninitialized... */ 2404 p_hwfn->hw_init_done = false; 2405 2406 /* Send unload command to MCP */ 2407 if (!p_dev->recov_in_prog) { 2408 rc = ecore_mcp_unload_req(p_hwfn, p_ptt); 2409 if (rc != ECORE_SUCCESS) { 2410 DP_NOTICE(p_hwfn, true, 2411 "Failed sending a UNLOAD_REQ command. rc = %d.\n", 2412 rc); 2413 rc2 = ECORE_UNKNOWN_ERROR; 2414 } 2415 } 2416 2417 OSAL_DPC_SYNC(p_hwfn); 2418 2419 /* After this point no MFW attentions are expected, e.g. prevent 2420 * race between pf stop and dcbx pf update. 2421 */ 2422 2423 rc = ecore_sp_pf_stop(p_hwfn); 2424 if (rc != ECORE_SUCCESS) { 2425 DP_NOTICE(p_hwfn, true, 2426 "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n", 2427 rc); 2428 rc2 = ECORE_UNKNOWN_ERROR; 2429 } 2430 2431 /* perform debug action after PF stop was sent */ 2432 OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id); 2433 2434 /* close NIG to BRB gate */ 2435 ecore_wr(p_hwfn, p_ptt, 2436 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 2437 2438 /* close parser */ 2439 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 2440 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 2441 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 2442 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 2443 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 2444 2445 /* @@@TBD - clean transmission queues (5.b) */ 2446 /* @@@TBD - clean BTB (5.c) */ 2447 2448 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt); 2449 2450 /* @@@TBD - verify DMAE requests are done (8) */ 2451 2452 /* Disable Attention Generation */ 2453 ecore_int_igu_disable_int(p_hwfn, p_ptt); 2454 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); 2455 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); 2456 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true); 2457 rc = ecore_int_igu_reset_cam_default(p_hwfn, p_ptt); 2458 if (rc != ECORE_SUCCESS) { 2459 DP_NOTICE(p_hwfn, true, 2460 "Failed to return IGU CAM to default\n"); 2461 rc2 = ECORE_UNKNOWN_ERROR; 2462 } 2463 2464 /* Need to wait 1ms to guarantee SBs are cleared */ 2465 OSAL_MSLEEP(1); 2466 2467 if (!p_dev->recov_in_prog) { 2468 ecore_verify_reg_val(p_hwfn, p_ptt, 2469 QM_REG_USG_CNT_PF_TX, 0); 2470 ecore_verify_reg_val(p_hwfn, p_ptt, 2471 QM_REG_USG_CNT_PF_OTHER, 0); 2472 /* @@@TBD - assert on incorrect xCFC values (10.b) */ 2473 } 2474 2475 /* Disable PF in HW blocks */ 2476 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); 2477 ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); 2478 2479 if (!p_dev->recov_in_prog) { 2480 ecore_mcp_unload_done(p_hwfn, p_ptt); 2481 if (rc != ECORE_SUCCESS) { 2482 DP_NOTICE(p_hwfn, true, 2483 "Failed sending a UNLOAD_DONE command. rc = %d.\n", 2484 rc); 2485 rc2 = ECORE_UNKNOWN_ERROR; 2486 } 2487 } 2488 } /* hwfn loop */ 2489 2490 if (IS_PF(p_dev)) { 2491 p_hwfn = ECORE_LEADING_HWFN(p_dev); 2492 p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt; 2493 2494 /* Disable DMAE in PXP - in CMT, this should only be done for 2495 * first hw-function, and only after all transactions have 2496 * stopped for all active hw-functions. 2497 */ 2498 rc = ecore_change_pci_hwfn(p_hwfn, p_ptt, false); 2499 if (rc != ECORE_SUCCESS) { 2500 DP_NOTICE(p_hwfn, true, 2501 "ecore_change_pci_hwfn failed. rc = %d.\n", 2502 rc); 2503 rc2 = ECORE_UNKNOWN_ERROR; 2504 } 2505 } 2506 2507 return rc2; 2508 } 2509 2510 void ecore_hw_stop_fastpath(struct ecore_dev *p_dev) 2511 { 2512 int j; 2513 2514 for_each_hwfn(p_dev, j) { 2515 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 2516 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 2517 2518 if (IS_VF(p_dev)) { 2519 ecore_vf_pf_int_cleanup(p_hwfn); 2520 continue; 2521 } 2522 2523 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Shutting down the fastpath\n"); 2524 2525 ecore_wr(p_hwfn, p_ptt, 2526 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 2527 2528 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 2529 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 2530 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 2531 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 2532 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 2533 2534 /* @@@TBD - clean transmission queues (5.b) */ 2535 /* @@@TBD - clean BTB (5.c) */ 2536 2537 /* @@@TBD - verify DMAE requests are done (8) */ 2538 2539 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); 2540 /* Need to wait 1ms to guarantee SBs are cleared */ 2541 OSAL_MSLEEP(1); 2542 } 2543 } 2544 2545 void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn) 2546 { 2547 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 2548 2549 if (IS_VF(p_hwfn->p_dev)) 2550 return; 2551 2552 /* If roce info is allocated it means roce is initialized and should 2553 * be enabled in searcher. 2554 */ 2555 if (p_hwfn->p_rdma_info) { 2556 if (p_hwfn->b_rdma_enabled_in_prs) 2557 ecore_wr(p_hwfn, p_ptt, 2558 p_hwfn->rdma_prs_search_reg, 0x1); 2559 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x1); 2560 } 2561 2562 /* Re-open incoming traffic */ 2563 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2564 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); 2565 } 2566 /* TEMP macro to be removed when wol code revisted */ 2567 #define ECORE_WOL_WR(_p_hwfn, _p_ptt, _offset, _val) ECORE_IS_BB(_p_hwfn->p_dev) ? \ 2568 ecore_wr(_p_hwfn, _p_ptt, _offset, _val) : \ 2569 ecore_mcp_wol_wr(_p_hwfn, _p_ptt, _offset, _val); 2570 2571 enum _ecore_status_t ecore_set_nwuf_reg(struct ecore_dev *p_dev, 2572 const bool b_enable, 2573 u32 reg_idx, 2574 u32 pattern_size, 2575 u32 crc) 2576 { 2577 struct ecore_hwfn *hwfn = &p_dev->hwfns[0]; 2578 u32 reg_len = 0; 2579 u32 reg_crc = 0; 2580 2581 /* Get length and CRC register offsets */ 2582 switch (reg_idx) 2583 { 2584 case 0: 2585 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_LEN_BB : 2586 WOL_REG_ACPI_PAT_0_LEN_K2_E5; 2587 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_CRC_BB : 2588 WOL_REG_ACPI_PAT_0_CRC_K2_E5; 2589 break; 2590 case 1: 2591 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_LEN_BB : 2592 WOL_REG_ACPI_PAT_1_LEN_K2_E5; 2593 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_CRC_BB : 2594 WOL_REG_ACPI_PAT_1_CRC_K2_E5; 2595 break; 2596 case 2: 2597 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_LEN_BB : 2598 WOL_REG_ACPI_PAT_2_LEN_K2_E5; 2599 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_CRC_BB : 2600 WOL_REG_ACPI_PAT_2_CRC_K2_E5; 2601 break; 2602 case 3: 2603 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_LEN_BB : 2604 WOL_REG_ACPI_PAT_3_LEN_K2_E5; 2605 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_CRC_BB : 2606 WOL_REG_ACPI_PAT_3_CRC_K2_E5; 2607 break; 2608 case 4: 2609 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_LEN_BB : 2610 WOL_REG_ACPI_PAT_4_LEN_K2_E5; 2611 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_CRC_BB : 2612 WOL_REG_ACPI_PAT_4_CRC_K2_E5; 2613 break; 2614 case 5: 2615 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_LEN_BB : 2616 WOL_REG_ACPI_PAT_5_LEN_K2_E5; 2617 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_CRC_BB : 2618 WOL_REG_ACPI_PAT_5_CRC_K2_E5; 2619 break; 2620 case 6: 2621 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_LEN_BB : 2622 WOL_REG_ACPI_PAT_6_LEN_K2_E5; 2623 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_CRC_BB : 2624 WOL_REG_ACPI_PAT_6_CRC_K2_E5; 2625 break; 2626 case 7: 2627 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_LEN_BB : 2628 WOL_REG_ACPI_PAT_7_LEN_K2_E5; 2629 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_CRC_BB : 2630 WOL_REG_ACPI_PAT_7_CRC_K2_E5; 2631 break; 2632 default: 2633 return ECORE_UNKNOWN_ERROR; 2634 } 2635 2636 /* Allign pattern size to 4 */ 2637 while (pattern_size % 4) 2638 { 2639 pattern_size++; 2640 } 2641 /* write pattern length */ 2642 ECORE_WOL_WR(hwfn, hwfn->p_main_ptt, reg_len, pattern_size); 2643 2644 /* write crc value*/ 2645 ECORE_WOL_WR(hwfn, hwfn->p_main_ptt, reg_crc, crc); 2646 2647 DP_INFO(p_dev, 2648 "ecore_set_nwuf_reg: idx[%d] reg_crc[0x%x=0x%08x] " 2649 "reg_len[0x%x=0x%x]\n", 2650 reg_idx, reg_crc, crc, reg_len, pattern_size); 2651 2652 return ECORE_SUCCESS; 2653 } 2654 2655 void ecore_wol_buffer_clear(struct ecore_dev *p_dev) 2656 { 2657 struct ecore_hwfn *hwfn = &p_dev->hwfns[0]; 2658 const u32 wake_buffer_clear_offset = 2659 ECORE_IS_BB(p_dev) ? 2660 NIG_REG_WAKE_BUFFER_CLEAR_BB : WOL_REG_WAKE_BUFFER_CLEAR_K2_E5; 2661 2662 DP_INFO(p_dev, 2663 "ecore_wol_buffer_clear: reset " 2664 "REG_WAKE_BUFFER_CLEAR offset=0x%08x\n", 2665 wake_buffer_clear_offset); 2666 2667 ECORE_WOL_WR(hwfn, hwfn->p_main_ptt, wake_buffer_clear_offset, 1); 2668 ECORE_WOL_WR(hwfn, hwfn->p_main_ptt, wake_buffer_clear_offset, 0); 2669 } 2670 2671 enum _ecore_status_t ecore_get_wake_info(struct ecore_dev *p_dev, 2672 struct ecore_wake_info *wake_info) 2673 { 2674 struct ecore_hwfn *hwfn = &p_dev->hwfns[0]; 2675 u32 *buf = OSAL_NULL; 2676 u32 i = 0; 2677 const u32 reg_wake_buffer_offest = 2678 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_BUFFER_BB : 2679 WOL_REG_WAKE_BUFFER_K2_E5; 2680 2681 wake_info->wk_info = ecore_rd(hwfn, hwfn->p_main_ptt, 2682 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_INFO_BB : 2683 WOL_REG_WAKE_INFO_K2_E5); 2684 wake_info->wk_details = ecore_rd(hwfn, hwfn->p_main_ptt, 2685 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_DETAILS_BB : 2686 WOL_REG_WAKE_DETAILS_K2_E5); 2687 wake_info->wk_pkt_len = ecore_rd(hwfn, hwfn->p_main_ptt, 2688 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_PKT_LEN_BB : 2689 WOL_REG_WAKE_PKT_LEN_K2_E5); 2690 2691 DP_INFO(p_dev, 2692 "ecore_get_wake_info: REG_WAKE_INFO=0x%08x " 2693 "REG_WAKE_DETAILS=0x%08x " 2694 "REG_WAKE_PKT_LEN=0x%08x\n", 2695 wake_info->wk_info, 2696 wake_info->wk_details, 2697 wake_info->wk_pkt_len); 2698 2699 buf = (u32 *)wake_info->wk_buffer; 2700 2701 for (i = 0; i < (wake_info->wk_pkt_len / sizeof(u32)); i++) 2702 { 2703 if ((i*sizeof(u32)) >= sizeof(wake_info->wk_buffer)) 2704 { 2705 DP_INFO(p_dev, 2706 "ecore_get_wake_info: i index to 0 high=%d\n", 2707 i); 2708 break; 2709 } 2710 buf[i] = ecore_rd(hwfn, hwfn->p_main_ptt, 2711 reg_wake_buffer_offest + (i * sizeof(u32))); 2712 DP_INFO(p_dev, "ecore_get_wake_info: wk_buffer[%u]: 0x%08x\n", 2713 i, buf[i]); 2714 } 2715 2716 ecore_wol_buffer_clear(p_dev); 2717 2718 return ECORE_SUCCESS; 2719 } 2720 2721 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */ 2722 static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn) 2723 { 2724 ecore_ptt_pool_free(p_hwfn); 2725 OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info); 2726 p_hwfn->hw_info.p_igu_info = OSAL_NULL; 2727 } 2728 2729 /* Setup bar access */ 2730 static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn) 2731 { 2732 /* clear indirect access */ 2733 if (ECORE_IS_AH(p_hwfn->p_dev) || ECORE_IS_E5(p_hwfn->p_dev)) { 2734 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2735 PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5, 0); 2736 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2737 PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5, 0); 2738 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2739 PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5, 0); 2740 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2741 PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5, 0); 2742 } else { 2743 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2744 PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0); 2745 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2746 PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0); 2747 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2748 PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0); 2749 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2750 PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0); 2751 } 2752 2753 /* Clean Previous errors if such exist */ 2754 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2755 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 2756 1 << p_hwfn->abs_pf_id); 2757 2758 /* enable internal target-read */ 2759 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2760 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 2761 } 2762 2763 static void get_function_id(struct ecore_hwfn *p_hwfn) 2764 { 2765 /* ME Register */ 2766 p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn, 2767 PXP_PF_ME_OPAQUE_ADDR); 2768 2769 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); 2770 2771 /* Bits 16-19 from the ME registers are the pf_num */ 2772 p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf; 2773 p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 2774 PXP_CONCRETE_FID_PFID); 2775 p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 2776 PXP_CONCRETE_FID_PORT); 2777 2778 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 2779 "Read ME register: Concrete 0x%08x Opaque 0x%04x\n", 2780 p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid); 2781 } 2782 2783 void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn) 2784 { 2785 u32 *feat_num = p_hwfn->hw_info.feat_num; 2786 struct ecore_sb_cnt_info sb_cnt; 2787 u32 non_l2_sbs = 0; 2788 2789 OSAL_MEM_ZERO(&sb_cnt, sizeof(sb_cnt)); 2790 ecore_int_get_num_sbs(p_hwfn, &sb_cnt); 2791 2792 #ifdef CONFIG_ECORE_ROCE 2793 /* Roce CNQ require each: 1 status block. 1 CNQ, we divide the 2794 * status blocks equally between L2 / RoCE but with consideration as 2795 * to how many l2 queues / cnqs we have 2796 */ 2797 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) { 2798 u32 max_cnqs; 2799 2800 feat_num[ECORE_RDMA_CNQ] = 2801 OSAL_MIN_T(u32, 2802 sb_cnt.cnt / 2, 2803 RESC_NUM(p_hwfn, ECORE_RDMA_CNQ_RAM)); 2804 2805 /* Upper layer might require less */ 2806 max_cnqs = (u32)p_hwfn->pf_params.rdma_pf_params.max_cnqs; 2807 if (max_cnqs) { 2808 if (max_cnqs == ECORE_RDMA_PF_PARAMS_CNQS_NONE) 2809 max_cnqs = 0; 2810 feat_num[ECORE_RDMA_CNQ] = 2811 OSAL_MIN_T(u32, 2812 feat_num[ECORE_RDMA_CNQ], 2813 max_cnqs); 2814 } 2815 2816 non_l2_sbs = feat_num[ECORE_RDMA_CNQ]; 2817 } 2818 #endif 2819 2820 /* L2 Queues require each: 1 status block. 1 L2 queue */ 2821 if (ECORE_IS_L2_PERSONALITY(p_hwfn)) { 2822 /* Start by allocating VF queues, then PF's */ 2823 feat_num[ECORE_VF_L2_QUE] = 2824 OSAL_MIN_T(u32, 2825 RESC_NUM(p_hwfn, ECORE_L2_QUEUE), 2826 sb_cnt.iov_cnt); 2827 feat_num[ECORE_PF_L2_QUE] = 2828 OSAL_MIN_T(u32, 2829 sb_cnt.cnt - non_l2_sbs, 2830 RESC_NUM(p_hwfn, ECORE_L2_QUEUE) - 2831 FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE)); 2832 } 2833 2834 if (ECORE_IS_FCOE_PERSONALITY(p_hwfn)) 2835 feat_num[ECORE_FCOE_CQ] = 2836 OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, 2837 ECORE_CMDQS_CQS)); 2838 2839 if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) 2840 feat_num[ECORE_ISCSI_CQ] = 2841 OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, 2842 ECORE_CMDQS_CQS)); 2843 2844 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 2845 "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n", 2846 (int)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE), 2847 (int)FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE), 2848 (int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ), 2849 (int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ), 2850 (int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ), 2851 (int)sb_cnt.cnt); 2852 } 2853 2854 const char *ecore_hw_get_resc_name(enum ecore_resources res_id) 2855 { 2856 switch (res_id) { 2857 case ECORE_L2_QUEUE: 2858 return "L2_QUEUE"; 2859 case ECORE_VPORT: 2860 return "VPORT"; 2861 case ECORE_RSS_ENG: 2862 return "RSS_ENG"; 2863 case ECORE_PQ: 2864 return "PQ"; 2865 case ECORE_RL: 2866 return "RL"; 2867 case ECORE_MAC: 2868 return "MAC"; 2869 case ECORE_VLAN: 2870 return "VLAN"; 2871 case ECORE_RDMA_CNQ_RAM: 2872 return "RDMA_CNQ_RAM"; 2873 case ECORE_ILT: 2874 return "ILT"; 2875 case ECORE_LL2_QUEUE: 2876 return "LL2_QUEUE"; 2877 case ECORE_CMDQS_CQS: 2878 return "CMDQS_CQS"; 2879 case ECORE_RDMA_STATS_QUEUE: 2880 return "RDMA_STATS_QUEUE"; 2881 case ECORE_BDQ: 2882 return "BDQ"; 2883 case ECORE_SB: 2884 return "SB"; 2885 default: 2886 return "UNKNOWN_RESOURCE"; 2887 } 2888 } 2889 2890 static enum _ecore_status_t 2891 __ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn, 2892 enum ecore_resources res_id, u32 resc_max_val, 2893 u32 *p_mcp_resp) 2894 { 2895 enum _ecore_status_t rc; 2896 2897 rc = ecore_mcp_set_resc_max_val(p_hwfn, p_hwfn->p_main_ptt, res_id, 2898 resc_max_val, p_mcp_resp); 2899 if (rc != ECORE_SUCCESS) { 2900 DP_NOTICE(p_hwfn, true, 2901 "MFW response failure for a max value setting of resource %d [%s]\n", 2902 res_id, ecore_hw_get_resc_name(res_id)); 2903 return rc; 2904 } 2905 2906 if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) 2907 DP_INFO(p_hwfn, 2908 "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n", 2909 res_id, ecore_hw_get_resc_name(res_id), *p_mcp_resp); 2910 2911 return ECORE_SUCCESS; 2912 } 2913 2914 static enum _ecore_status_t 2915 ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn) 2916 { 2917 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 2918 u32 resc_max_val, mcp_resp; 2919 u8 res_id; 2920 enum _ecore_status_t rc; 2921 2922 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) { 2923 switch (res_id) { 2924 case ECORE_LL2_QUEUE: 2925 resc_max_val = MAX_NUM_LL2_RX_QUEUES; 2926 break; 2927 case ECORE_RDMA_CNQ_RAM: 2928 /* No need for a case for ECORE_CMDQS_CQS since 2929 * CNQ/CMDQS are the same resource. 2930 */ 2931 resc_max_val = NUM_OF_GLOBAL_QUEUES; 2932 break; 2933 case ECORE_RDMA_STATS_QUEUE: 2934 resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 2935 : RDMA_NUM_STATISTIC_COUNTERS_BB; 2936 break; 2937 case ECORE_BDQ: 2938 resc_max_val = BDQ_NUM_RESOURCES; 2939 break; 2940 default: 2941 continue; 2942 } 2943 2944 rc = __ecore_hw_set_soft_resc_size(p_hwfn, res_id, 2945 resc_max_val, &mcp_resp); 2946 if (rc != ECORE_SUCCESS) 2947 return rc; 2948 2949 /* There's no point to continue to the next resource if the 2950 * command is not supported by the MFW. 2951 * We do continue if the command is supported but the resource 2952 * is unknown to the MFW. Such a resource will be later 2953 * configured with the default allocation values. 2954 */ 2955 if (mcp_resp == FW_MSG_CODE_UNSUPPORTED) 2956 return ECORE_NOTIMPL; 2957 } 2958 2959 return ECORE_SUCCESS; 2960 } 2961 2962 static 2963 enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn, 2964 enum ecore_resources res_id, 2965 u32 *p_resc_num, u32 *p_resc_start) 2966 { 2967 u8 num_funcs = p_hwfn->num_funcs_on_engine; 2968 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 2969 2970 switch (res_id) { 2971 case ECORE_L2_QUEUE: 2972 *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 : 2973 MAX_NUM_L2_QUEUES_BB) / num_funcs; 2974 break; 2975 case ECORE_VPORT: 2976 *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 : 2977 MAX_NUM_VPORTS_BB) / num_funcs; 2978 break; 2979 case ECORE_RSS_ENG: 2980 *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 : 2981 ETH_RSS_ENGINE_NUM_BB) / num_funcs; 2982 break; 2983 case ECORE_PQ: 2984 *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 : 2985 MAX_QM_TX_QUEUES_BB) / num_funcs; 2986 *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */ 2987 break; 2988 case ECORE_RL: 2989 *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs; 2990 break; 2991 case ECORE_MAC: 2992 case ECORE_VLAN: 2993 /* Each VFC resource can accommodate both a MAC and a VLAN */ 2994 *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; 2995 break; 2996 case ECORE_ILT: 2997 *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 : 2998 PXP_NUM_ILT_RECORDS_BB) / num_funcs; 2999 break; 3000 case ECORE_LL2_QUEUE: 3001 *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs; 3002 break; 3003 case ECORE_RDMA_CNQ_RAM: 3004 case ECORE_CMDQS_CQS: 3005 /* CNQ/CMDQS are the same resource */ 3006 *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs; 3007 break; 3008 case ECORE_RDMA_STATS_QUEUE: 3009 *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 : 3010 RDMA_NUM_STATISTIC_COUNTERS_BB) / 3011 num_funcs; 3012 break; 3013 case ECORE_BDQ: 3014 if (p_hwfn->hw_info.personality != ECORE_PCI_ISCSI && 3015 p_hwfn->hw_info.personality != ECORE_PCI_FCOE) 3016 *p_resc_num = 0; 3017 else 3018 *p_resc_num = 1; 3019 break; 3020 case ECORE_SB: 3021 /* Since we want its value to reflect whether MFW supports 3022 * the new scheme, have a default of 0. 3023 */ 3024 *p_resc_num = 0; 3025 break; 3026 default: 3027 return ECORE_INVAL; 3028 } 3029 3030 switch (res_id) { 3031 case ECORE_BDQ: 3032 if (!*p_resc_num) 3033 *p_resc_start = 0; 3034 else if (p_hwfn->p_dev->num_ports_in_engine == 4) 3035 *p_resc_start = p_hwfn->port_id; 3036 else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) 3037 *p_resc_start = p_hwfn->port_id; 3038 else if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 3039 *p_resc_start = p_hwfn->port_id + 2; 3040 break; 3041 default: 3042 *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx; 3043 break; 3044 } 3045 3046 return ECORE_SUCCESS; 3047 } 3048 3049 static enum _ecore_status_t 3050 __ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id, 3051 bool drv_resc_alloc) 3052 { 3053 u32 dflt_resc_num = 0, dflt_resc_start = 0; 3054 u32 mcp_resp, *p_resc_num, *p_resc_start; 3055 enum _ecore_status_t rc; 3056 3057 p_resc_num = &RESC_NUM(p_hwfn, res_id); 3058 p_resc_start = &RESC_START(p_hwfn, res_id); 3059 3060 rc = ecore_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num, 3061 &dflt_resc_start); 3062 if (rc != ECORE_SUCCESS) { 3063 DP_ERR(p_hwfn, 3064 "Failed to get default amount for resource %d [%s]\n", 3065 res_id, ecore_hw_get_resc_name(res_id)); 3066 return rc; 3067 } 3068 3069 #ifndef ASIC_ONLY 3070 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 3071 *p_resc_num = dflt_resc_num; 3072 *p_resc_start = dflt_resc_start; 3073 goto out; 3074 } 3075 #endif 3076 3077 rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id, 3078 &mcp_resp, p_resc_num, p_resc_start); 3079 if (rc != ECORE_SUCCESS) { 3080 DP_NOTICE(p_hwfn, true, 3081 "MFW response failure for an allocation request for resource %d [%s]\n", 3082 res_id, ecore_hw_get_resc_name(res_id)); 3083 return rc; 3084 } 3085 3086 /* Default driver values are applied in the following cases: 3087 * - The resource allocation MB command is not supported by the MFW 3088 * - There is an internal error in the MFW while processing the request 3089 * - The resource ID is unknown to the MFW 3090 */ 3091 if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) { 3092 DP_INFO(p_hwfn, 3093 "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n", 3094 res_id, ecore_hw_get_resc_name(res_id), mcp_resp, 3095 dflt_resc_num, dflt_resc_start); 3096 *p_resc_num = dflt_resc_num; 3097 *p_resc_start = dflt_resc_start; 3098 goto out; 3099 } 3100 3101 if ((*p_resc_num != dflt_resc_num || 3102 *p_resc_start != dflt_resc_start) && 3103 res_id != ECORE_SB) { 3104 DP_INFO(p_hwfn, 3105 "MFW allocation for resource %d [%s] differs from default values [%d,%d vs. %d,%d]%s\n", 3106 res_id, ecore_hw_get_resc_name(res_id), *p_resc_num, 3107 *p_resc_start, dflt_resc_num, dflt_resc_start, 3108 drv_resc_alloc ? " - Applying default values" : ""); 3109 if (drv_resc_alloc) { 3110 *p_resc_num = dflt_resc_num; 3111 *p_resc_start = dflt_resc_start; 3112 } 3113 } 3114 out: 3115 /* PQs have to divide by 8 [that's the HW granularity]. 3116 * Reduce number so it would fit. 3117 */ 3118 if ((res_id == ECORE_PQ) && 3119 ((*p_resc_num % 8) || (*p_resc_start % 8))) { 3120 DP_INFO(p_hwfn, 3121 "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n", 3122 *p_resc_num, (*p_resc_num) & ~0x7, 3123 *p_resc_start, (*p_resc_start) & ~0x7); 3124 *p_resc_num &= ~0x7; 3125 *p_resc_start &= ~0x7; 3126 } 3127 3128 return ECORE_SUCCESS; 3129 } 3130 3131 static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, 3132 bool drv_resc_alloc) 3133 { 3134 enum _ecore_status_t rc; 3135 u8 res_id; 3136 3137 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) { 3138 rc = __ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc); 3139 if (rc != ECORE_SUCCESS) 3140 return rc; 3141 } 3142 3143 return ECORE_SUCCESS; 3144 } 3145 3146 #define ECORE_RESC_ALLOC_LOCK_RETRY_CNT 10 3147 #define ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US 10000 /* 10 msec */ 3148 3149 static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn, 3150 bool drv_resc_alloc) 3151 { 3152 struct ecore_resc_unlock_params resc_unlock_params; 3153 struct ecore_resc_lock_params resc_lock_params; 3154 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 3155 u8 res_id; 3156 enum _ecore_status_t rc; 3157 #ifndef ASIC_ONLY 3158 u32 *resc_start = p_hwfn->hw_info.resc_start; 3159 u32 *resc_num = p_hwfn->hw_info.resc_num; 3160 /* For AH, an equal share of the ILT lines between the maximal number of 3161 * PFs is not enough for RoCE. This would be solved by the future 3162 * resource allocation scheme, but isn't currently present for 3163 * FPGA/emulation. For now we keep a number that is sufficient for RoCE 3164 * to work - the BB number of ILT lines divided by its max PFs number. 3165 */ 3166 u32 roce_min_ilt_lines = PXP_NUM_ILT_RECORDS_BB / MAX_NUM_PFS_BB; 3167 #endif 3168 3169 /* Setting the max values of the soft resources and the following 3170 * resources allocation queries should be atomic. Since several PFs can 3171 * run in parallel - a resource lock is needed. 3172 * If either the resource lock or resource set value commands are not 3173 * supported - skip the the max values setting, release the lock if 3174 * needed, and proceed to the queries. Other failures, including a 3175 * failure to acquire the lock, will cause this function to fail. 3176 * Old drivers that don't acquire the lock can run in parallel, and 3177 * their allocation values won't be affected by the updated max values. 3178 */ 3179 OSAL_MEM_ZERO(&resc_lock_params, sizeof(resc_lock_params)); 3180 resc_lock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC; 3181 resc_lock_params.retry_num = ECORE_RESC_ALLOC_LOCK_RETRY_CNT; 3182 resc_lock_params.retry_interval = ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US; 3183 resc_lock_params.sleep_b4_retry = true; 3184 OSAL_MEM_ZERO(&resc_unlock_params, sizeof(resc_unlock_params)); 3185 resc_unlock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC; 3186 3187 rc = ecore_mcp_resc_lock(p_hwfn, p_hwfn->p_main_ptt, &resc_lock_params); 3188 if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) { 3189 return rc; 3190 } else if (rc == ECORE_NOTIMPL) { 3191 DP_INFO(p_hwfn, 3192 "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n"); 3193 } else if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) { 3194 DP_NOTICE(p_hwfn, false, 3195 "Failed to acquire the resource lock for the resource allocation commands\n"); 3196 return ECORE_BUSY; 3197 } else { 3198 rc = ecore_hw_set_soft_resc_size(p_hwfn); 3199 if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) { 3200 DP_NOTICE(p_hwfn, false, 3201 "Failed to set the max values of the soft resources\n"); 3202 goto unlock_and_exit; 3203 } else if (rc == ECORE_NOTIMPL) { 3204 DP_INFO(p_hwfn, 3205 "Skip the max values setting of the soft resources since it is not supported by the MFW\n"); 3206 rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt, 3207 &resc_unlock_params); 3208 if (rc != ECORE_SUCCESS) 3209 DP_INFO(p_hwfn, 3210 "Failed to release the resource lock for the resource allocation commands\n"); 3211 } 3212 } 3213 3214 rc = ecore_hw_set_resc_info(p_hwfn, drv_resc_alloc); 3215 if (rc != ECORE_SUCCESS) 3216 goto unlock_and_exit; 3217 3218 if (resc_lock_params.b_granted && !resc_unlock_params.b_released) { 3219 rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt, 3220 &resc_unlock_params); 3221 if (rc != ECORE_SUCCESS) 3222 DP_INFO(p_hwfn, 3223 "Failed to release the resource lock for the resource allocation commands\n"); 3224 } 3225 3226 #ifndef ASIC_ONLY 3227 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 3228 /* Reduced build contains less PQs */ 3229 if (!(p_hwfn->p_dev->b_is_emul_full)) { 3230 resc_num[ECORE_PQ] = 32; 3231 resc_start[ECORE_PQ] = resc_num[ECORE_PQ] * 3232 p_hwfn->enabled_func_idx; 3233 } 3234 3235 /* For AH emulation, since we have a possible maximal number of 3236 * 16 enabled PFs, in case there are not enough ILT lines - 3237 * allocate only first PF as RoCE and have all the other ETH 3238 * only with less ILT lines. 3239 */ 3240 if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full) 3241 resc_num[ECORE_ILT] = OSAL_MAX_T(u32, 3242 resc_num[ECORE_ILT], 3243 roce_min_ilt_lines); 3244 } 3245 3246 /* Correct the common ILT calculation if PF0 has more */ 3247 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) && 3248 p_hwfn->p_dev->b_is_emul_full && 3249 p_hwfn->rel_pf_id && 3250 resc_num[ECORE_ILT] < roce_min_ilt_lines) 3251 resc_start[ECORE_ILT] += roce_min_ilt_lines - 3252 resc_num[ECORE_ILT]; 3253 #endif 3254 3255 /* Sanity for ILT */ 3256 if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) || 3257 (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) { 3258 DP_NOTICE(p_hwfn, true, "Can't assign ILT pages [%08x,...,%08x]\n", 3259 RESC_START(p_hwfn, ECORE_ILT), 3260 RESC_END(p_hwfn, ECORE_ILT) - 1); 3261 return ECORE_INVAL; 3262 } 3263 3264 /* This will also learn the number of SBs from MFW */ 3265 if (ecore_int_igu_reset_cam(p_hwfn, p_hwfn->p_main_ptt)) 3266 return ECORE_INVAL; 3267 3268 ecore_hw_set_feat(p_hwfn); 3269 3270 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 3271 "The numbers for each resource are:\n"); 3272 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) 3273 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "%s = %d start = %d\n", 3274 ecore_hw_get_resc_name(res_id), 3275 RESC_NUM(p_hwfn, res_id), 3276 RESC_START(p_hwfn, res_id)); 3277 3278 return ECORE_SUCCESS; 3279 3280 unlock_and_exit: 3281 if (resc_lock_params.b_granted && !resc_unlock_params.b_released) 3282 ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt, 3283 &resc_unlock_params); 3284 return rc; 3285 } 3286 3287 static enum _ecore_status_t 3288 ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn, 3289 struct ecore_ptt *p_ptt, 3290 struct ecore_hw_prepare_params *p_params) 3291 { 3292 u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg, dcbx_mode; 3293 u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; 3294 struct ecore_mcp_link_capabilities *p_caps; 3295 struct ecore_mcp_link_params *link; 3296 enum _ecore_status_t rc; 3297 3298 /* Read global nvm_cfg address */ 3299 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); 3300 3301 /* Verify MCP has initialized it */ 3302 if (!nvm_cfg_addr) { 3303 DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n"); 3304 if (p_params->b_relaxed_probe) 3305 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_NVM; 3306 return ECORE_INVAL; 3307 } 3308 3309 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ 3310 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); 3311 3312 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3313 offsetof(struct nvm_cfg1, glob) + 3314 offsetof(struct nvm_cfg1_glob, core_cfg); 3315 3316 core_cfg = ecore_rd(p_hwfn, p_ptt, addr); 3317 3318 switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> 3319 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { 3320 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G: 3321 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G; 3322 break; 3323 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G: 3324 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G; 3325 break; 3326 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G: 3327 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G; 3328 break; 3329 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F: 3330 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F; 3331 break; 3332 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E: 3333 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E; 3334 break; 3335 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G: 3336 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G; 3337 break; 3338 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G: 3339 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G; 3340 break; 3341 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: 3342 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G; 3343 break; 3344 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G: 3345 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X10G; 3346 break; 3347 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: 3348 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G; 3349 break; 3350 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G: 3351 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X25G; 3352 break; 3353 default: 3354 DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n", 3355 core_cfg); 3356 break; 3357 } 3358 3359 /* Read DCBX configuration */ 3360 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3361 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 3362 dcbx_mode = ecore_rd(p_hwfn, p_ptt, 3363 port_cfg_addr + 3364 offsetof(struct nvm_cfg1_port, generic_cont0)); 3365 dcbx_mode = (dcbx_mode & NVM_CFG1_PORT_DCBX_MODE_MASK) 3366 >> NVM_CFG1_PORT_DCBX_MODE_OFFSET; 3367 switch (dcbx_mode) { 3368 case NVM_CFG1_PORT_DCBX_MODE_DYNAMIC: 3369 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DYNAMIC; 3370 break; 3371 case NVM_CFG1_PORT_DCBX_MODE_CEE: 3372 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_CEE; 3373 break; 3374 case NVM_CFG1_PORT_DCBX_MODE_IEEE: 3375 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_IEEE; 3376 break; 3377 default: 3378 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DISABLED; 3379 } 3380 3381 /* Read default link configuration */ 3382 link = &p_hwfn->mcp_info->link_input; 3383 p_caps = &p_hwfn->mcp_info->link_capabilities; 3384 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3385 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 3386 link_temp = ecore_rd(p_hwfn, p_ptt, 3387 port_cfg_addr + 3388 offsetof(struct nvm_cfg1_port, speed_cap_mask)); 3389 link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; 3390 link->speed.advertised_speeds = link_temp; 3391 p_caps->speed_capabilities = link->speed.advertised_speeds; 3392 3393 link_temp = ecore_rd(p_hwfn, p_ptt, 3394 port_cfg_addr + 3395 offsetof(struct nvm_cfg1_port, link_settings)); 3396 switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >> 3397 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) { 3398 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG: 3399 link->speed.autoneg = true; 3400 break; 3401 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G: 3402 link->speed.forced_speed = 1000; 3403 break; 3404 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: 3405 link->speed.forced_speed = 10000; 3406 break; 3407 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: 3408 link->speed.forced_speed = 25000; 3409 break; 3410 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G: 3411 link->speed.forced_speed = 40000; 3412 break; 3413 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: 3414 link->speed.forced_speed = 50000; 3415 break; 3416 case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G: 3417 link->speed.forced_speed = 100000; 3418 break; 3419 default: 3420 DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", 3421 link_temp); 3422 } 3423 3424 p_caps->default_speed = link->speed.forced_speed; 3425 p_caps->default_speed_autoneg = link->speed.autoneg; 3426 3427 link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; 3428 link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET; 3429 link->pause.autoneg = !!(link_temp & 3430 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); 3431 link->pause.forced_rx = !!(link_temp & 3432 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); 3433 link->pause.forced_tx = !!(link_temp & 3434 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); 3435 link->loopback_mode = 0; 3436 3437 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { 3438 link_temp = ecore_rd(p_hwfn, p_ptt, port_cfg_addr + 3439 offsetof(struct nvm_cfg1_port, ext_phy)); 3440 link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK; 3441 link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET; 3442 p_caps->default_eee = ECORE_MCP_EEE_ENABLED; 3443 link->eee.enable = true; 3444 switch (link_temp) { 3445 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED: 3446 p_caps->default_eee = ECORE_MCP_EEE_DISABLED; 3447 link->eee.enable = false; 3448 break; 3449 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED: 3450 p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME; 3451 break; 3452 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE: 3453 p_caps->eee_lpi_timer = 3454 EEE_TX_TIMER_USEC_AGGRESSIVE_TIME; 3455 break; 3456 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY: 3457 p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME; 3458 break; 3459 } 3460 link->eee.tx_lpi_timer = p_caps->eee_lpi_timer; 3461 link->eee.tx_lpi_enable = link->eee.enable; 3462 if (link->eee.enable) 3463 link->eee.adv_caps = ECORE_EEE_1G_ADV | 3464 ECORE_EEE_10G_ADV; 3465 } else { 3466 p_caps->default_eee = ECORE_MCP_EEE_UNSUPPORTED; 3467 } 3468 3469 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 3470 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n", 3471 link->speed.forced_speed, link->speed.advertised_speeds, 3472 link->speed.autoneg, link->pause.autoneg, 3473 p_caps->default_eee, p_caps->eee_lpi_timer); 3474 3475 /* Read Multi-function information from shmem */ 3476 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3477 offsetof(struct nvm_cfg1, glob) + 3478 offsetof(struct nvm_cfg1_glob, generic_cont0); 3479 3480 generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr); 3481 3482 mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> 3483 NVM_CFG1_GLOB_MF_MODE_OFFSET; 3484 3485 switch (mf_mode) { 3486 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: 3487 p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN; 3488 break; 3489 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: 3490 p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR; 3491 break; 3492 case NVM_CFG1_GLOB_MF_MODE_DEFAULT: 3493 p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT; 3494 break; 3495 } 3496 DP_INFO(p_hwfn, "Multi function mode is %08x\n", 3497 p_hwfn->p_dev->mf_mode); 3498 3499 /* Read Multi-function information from shmem */ 3500 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3501 offsetof(struct nvm_cfg1, glob) + 3502 offsetof(struct nvm_cfg1_glob, device_capabilities); 3503 3504 device_capabilities = ecore_rd(p_hwfn, p_ptt, addr); 3505 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) 3506 OSAL_SET_BIT(ECORE_DEV_CAP_ETH, 3507 &p_hwfn->hw_info.device_capabilities); 3508 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE) 3509 OSAL_SET_BIT(ECORE_DEV_CAP_FCOE, 3510 &p_hwfn->hw_info.device_capabilities); 3511 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI) 3512 OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI, 3513 &p_hwfn->hw_info.device_capabilities); 3514 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE) 3515 OSAL_SET_BIT(ECORE_DEV_CAP_ROCE, 3516 &p_hwfn->hw_info.device_capabilities); 3517 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP) 3518 OSAL_SET_BIT(ECORE_DEV_CAP_IWARP, 3519 &p_hwfn->hw_info.device_capabilities); 3520 3521 rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt); 3522 if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) { 3523 rc = ECORE_SUCCESS; 3524 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP; 3525 } 3526 3527 return rc; 3528 } 3529 3530 static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn, 3531 struct ecore_ptt *p_ptt) 3532 { 3533 u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id; 3534 u32 reg_function_hide, tmp, eng_mask, low_pfs_mask; 3535 struct ecore_dev *p_dev = p_hwfn->p_dev; 3536 3537 num_funcs = ECORE_IS_AH(p_dev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB; 3538 3539 /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values 3540 * in the other bits are selected. 3541 * Bits 1-15 are for functions 1-15, respectively, and their value is 3542 * '0' only for enabled functions (function 0 always exists and 3543 * enabled). 3544 * In case of CMT in BB, only the "even" functions are enabled, and thus 3545 * the number of functions for both hwfns is learnt from the same bits. 3546 */ 3547 if (ECORE_IS_BB(p_dev) || ECORE_IS_AH(p_dev)) { 3548 reg_function_hide = ecore_rd(p_hwfn, p_ptt, 3549 MISCS_REG_FUNCTION_HIDE_BB_K2); 3550 } else { /* E5 */ 3551 reg_function_hide = 0; 3552 ECORE_E5_MISSING_CODE; 3553 } 3554 3555 if (reg_function_hide & 0x1) { 3556 if (ECORE_IS_BB(p_dev)) { 3557 if (ECORE_PATH_ID(p_hwfn) && p_dev->num_hwfns == 1) { 3558 num_funcs = 0; 3559 eng_mask = 0xaaaa; 3560 } else { 3561 num_funcs = 1; 3562 eng_mask = 0x5554; 3563 } 3564 } else { 3565 num_funcs = 1; 3566 eng_mask = 0xfffe; 3567 } 3568 3569 /* Get the number of the enabled functions on the engine */ 3570 tmp = (reg_function_hide ^ 0xffffffff) & eng_mask; 3571 while (tmp) { 3572 if (tmp & 0x1) 3573 num_funcs++; 3574 tmp >>= 0x1; 3575 } 3576 3577 /* Get the PF index within the enabled functions */ 3578 low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1; 3579 tmp = reg_function_hide & eng_mask & low_pfs_mask; 3580 while (tmp) { 3581 if (tmp & 0x1) 3582 enabled_func_idx--; 3583 tmp >>= 0x1; 3584 } 3585 } 3586 3587 p_hwfn->num_funcs_on_engine = num_funcs; 3588 p_hwfn->enabled_func_idx = enabled_func_idx; 3589 3590 #ifndef ASIC_ONLY 3591 if (CHIP_REV_IS_FPGA(p_dev)) { 3592 DP_NOTICE(p_hwfn, false, 3593 "FPGA: Limit number of PFs to 4 [would affect resource allocation, needed for IOV]\n"); 3594 p_hwfn->num_funcs_on_engine = 4; 3595 } 3596 #endif 3597 3598 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 3599 "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n", 3600 p_hwfn->rel_pf_id, p_hwfn->abs_pf_id, 3601 p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); 3602 } 3603 3604 static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn, 3605 struct ecore_ptt *p_ptt) 3606 { 3607 u32 port_mode; 3608 3609 #ifndef ASIC_ONLY 3610 /* Read the port mode */ 3611 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 3612 port_mode = 4; 3613 else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && 3614 (p_hwfn->p_dev->num_hwfns > 1)) 3615 /* In CMT on emulation, assume 1 port */ 3616 port_mode = 1; 3617 else 3618 #endif 3619 port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB); 3620 3621 if (port_mode < 3) { 3622 p_hwfn->p_dev->num_ports_in_engine = 1; 3623 } else if (port_mode <= 5) { 3624 p_hwfn->p_dev->num_ports_in_engine = 2; 3625 } else { 3626 DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n", 3627 p_hwfn->p_dev->num_ports_in_engine); 3628 3629 /* Default num_ports_in_engine to something */ 3630 p_hwfn->p_dev->num_ports_in_engine = 1; 3631 } 3632 } 3633 3634 static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn, 3635 struct ecore_ptt *p_ptt) 3636 { 3637 u32 port; 3638 int i; 3639 3640 p_hwfn->p_dev->num_ports_in_engine = 0; 3641 3642 #ifndef ASIC_ONLY 3643 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 3644 port = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED); 3645 switch ((port & 0xf000) >> 12) { 3646 case 1: 3647 p_hwfn->p_dev->num_ports_in_engine = 1; 3648 break; 3649 case 3: 3650 p_hwfn->p_dev->num_ports_in_engine = 2; 3651 break; 3652 case 0xf: 3653 p_hwfn->p_dev->num_ports_in_engine = 4; 3654 break; 3655 default: 3656 DP_NOTICE(p_hwfn, false, 3657 "Unknown port mode in ECO_RESERVED %08x\n", 3658 port); 3659 } 3660 } else 3661 #endif 3662 for (i = 0; i < MAX_NUM_PORTS_K2; i++) { 3663 port = ecore_rd(p_hwfn, p_ptt, 3664 CNIG_REG_NIG_PORT0_CONF_K2_E5 + (i * 4)); 3665 if (port & 1) 3666 p_hwfn->p_dev->num_ports_in_engine++; 3667 } 3668 3669 if (!p_hwfn->p_dev->num_ports_in_engine) { 3670 DP_NOTICE(p_hwfn, true, "All NIG ports are inactive\n"); 3671 3672 /* Default num_ports_in_engine to something */ 3673 p_hwfn->p_dev->num_ports_in_engine = 1; 3674 } 3675 } 3676 3677 static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn, 3678 struct ecore_ptt *p_ptt) 3679 { 3680 if (ECORE_IS_BB(p_hwfn->p_dev)) 3681 ecore_hw_info_port_num_bb(p_hwfn, p_ptt); 3682 else 3683 ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt); 3684 } 3685 3686 static enum _ecore_status_t 3687 ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3688 enum ecore_pci_personality personality, 3689 struct ecore_hw_prepare_params *p_params) 3690 { 3691 bool drv_resc_alloc = p_params->drv_resc_alloc; 3692 enum _ecore_status_t rc; 3693 3694 /* Since all information is common, only first hwfns should do this */ 3695 if (IS_LEAD_HWFN(p_hwfn)) { 3696 rc = ecore_iov_hw_info(p_hwfn); 3697 if (rc != ECORE_SUCCESS) { 3698 if (p_params->b_relaxed_probe) 3699 p_params->p_relaxed_res = 3700 ECORE_HW_PREPARE_BAD_IOV; 3701 else 3702 return rc; 3703 } 3704 } 3705 3706 /* TODO In get_hw_info, amoungst others: 3707 * Get MCP FW revision and determine according to it the supported 3708 * featrues (e.g. DCB) 3709 * Get boot mode 3710 * ecore_get_pcie_width_speed, WOL capability. 3711 * Number of global CQ-s (for storage 3712 */ 3713 ecore_hw_info_port_num(p_hwfn, p_ptt); 3714 3715 ecore_mcp_get_capabilities(p_hwfn, p_ptt); 3716 3717 #ifndef ASIC_ONLY 3718 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) { 3719 #endif 3720 rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params); 3721 if (rc != ECORE_SUCCESS) 3722 return rc; 3723 #ifndef ASIC_ONLY 3724 } 3725 #endif 3726 3727 rc = ecore_int_igu_read_cam(p_hwfn, p_ptt); 3728 if (rc != ECORE_SUCCESS) { 3729 if (p_params->b_relaxed_probe) 3730 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_IGU; 3731 else 3732 return rc; 3733 } 3734 3735 #ifndef ASIC_ONLY 3736 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) { 3737 #endif 3738 OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, 3739 p_hwfn->mcp_info->func_info.mac, ETH_ALEN); 3740 #ifndef ASIC_ONLY 3741 } else { 3742 static u8 mcp_hw_mac[6] = {0, 2, 3, 4, 5, 6}; 3743 3744 OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN); 3745 p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id; 3746 } 3747 #endif 3748 3749 if (ecore_mcp_is_init(p_hwfn)) { 3750 if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET) 3751 p_hwfn->hw_info.ovlan = 3752 p_hwfn->mcp_info->func_info.ovlan; 3753 3754 ecore_mcp_cmd_port_init(p_hwfn, p_ptt); 3755 } 3756 3757 if (personality != ECORE_PCI_DEFAULT) { 3758 p_hwfn->hw_info.personality = personality; 3759 } else if (ecore_mcp_is_init(p_hwfn)) { 3760 enum ecore_pci_personality protocol; 3761 3762 protocol = p_hwfn->mcp_info->func_info.protocol; 3763 p_hwfn->hw_info.personality = protocol; 3764 } 3765 3766 #ifndef ASIC_ONLY 3767 /* To overcome ILT lack for emulation, until at least until we'll have 3768 * a definite answer from system about it, allow only PF0 to be RoCE. 3769 */ 3770 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) { 3771 if (!p_hwfn->rel_pf_id) 3772 p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE; 3773 else 3774 p_hwfn->hw_info.personality = ECORE_PCI_ETH; 3775 } 3776 #endif 3777 3778 /* although in BB some constellations may support more than 4 tcs, 3779 * that can result in performance penalty in some cases. 4 3780 * represents a good tradeoff between performance and flexibility. 3781 */ 3782 p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; 3783 3784 /* start out with a single active tc. This can be increased either 3785 * by dcbx negotiation or by upper layer driver 3786 */ 3787 p_hwfn->hw_info.num_active_tc = 1; 3788 3789 ecore_get_num_funcs(p_hwfn, p_ptt); 3790 3791 if (ecore_mcp_is_init(p_hwfn)) 3792 p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu; 3793 3794 /* In case of forcing the driver's default resource allocation, calling 3795 * ecore_hw_get_resc() should come after initializing the personality 3796 * and after getting the number of functions, since the calculation of 3797 * the resources/features depends on them. 3798 * This order is not harmful if not forcing. 3799 */ 3800 rc = ecore_hw_get_resc(p_hwfn, drv_resc_alloc); 3801 if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) { 3802 rc = ECORE_SUCCESS; 3803 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP; 3804 } 3805 3806 return rc; 3807 } 3808 3809 static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev) 3810 { 3811 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 3812 u16 device_id_mask; 3813 u32 tmp; 3814 3815 /* Read Vendor Id / Device Id */ 3816 OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_VENDOR_ID_OFFSET, 3817 &p_dev->vendor_id); 3818 OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET, 3819 &p_dev->device_id); 3820 3821 /* Determine type */ 3822 device_id_mask = p_dev->device_id & ECORE_DEV_ID_MASK; 3823 switch (device_id_mask) { 3824 case ECORE_DEV_ID_MASK_BB: 3825 p_dev->type = ECORE_DEV_TYPE_BB; 3826 break; 3827 case ECORE_DEV_ID_MASK_AH: 3828 p_dev->type = ECORE_DEV_TYPE_AH; 3829 break; 3830 default: 3831 DP_NOTICE(p_hwfn, true, "Unknown device id 0x%x\n", 3832 p_dev->device_id); 3833 return ECORE_ABORTED; 3834 } 3835 3836 p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3837 MISCS_REG_CHIP_NUM); 3838 p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3839 MISCS_REG_CHIP_REV); 3840 3841 MASK_FIELD(CHIP_REV, p_dev->chip_rev); 3842 3843 /* Learn number of HW-functions */ 3844 tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3845 MISCS_REG_CMT_ENABLED_FOR_PAIR); 3846 3847 if (tmp & (1 << p_hwfn->rel_pf_id)) { 3848 DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n"); 3849 p_dev->num_hwfns = 2; 3850 } else { 3851 p_dev->num_hwfns = 1; 3852 } 3853 3854 #ifndef ASIC_ONLY 3855 if (CHIP_REV_IS_EMUL(p_dev)) { 3856 /* For some reason we have problems with this register 3857 * in B0 emulation; Simply assume no CMT 3858 */ 3859 DP_NOTICE(p_dev->hwfns, false, "device on emul - assume no CMT\n"); 3860 p_dev->num_hwfns = 1; 3861 } 3862 #endif 3863 3864 p_dev->chip_bond_id = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3865 MISCS_REG_CHIP_TEST_REG) >> 4; 3866 MASK_FIELD(CHIP_BOND_ID, p_dev->chip_bond_id); 3867 p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3868 MISCS_REG_CHIP_METAL); 3869 MASK_FIELD(CHIP_METAL, p_dev->chip_metal); 3870 DP_INFO(p_dev->hwfns, 3871 "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", 3872 ECORE_IS_BB(p_dev) ? "BB" : "AH", 3873 'A' + p_dev->chip_rev, (int)p_dev->chip_metal, 3874 p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id, 3875 p_dev->chip_metal); 3876 3877 if (ECORE_IS_BB(p_dev) && CHIP_REV_IS_A0(p_dev)) { 3878 DP_NOTICE(p_dev->hwfns, false, 3879 "The chip type/rev (BB A0) is not supported!\n"); 3880 return ECORE_ABORTED; 3881 } 3882 3883 #ifndef ASIC_ONLY 3884 if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev)) 3885 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 3886 MISCS_REG_PLL_MAIN_CTRL_4, 0x1); 3887 3888 if (CHIP_REV_IS_EMUL(p_dev)) { 3889 tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3890 MISCS_REG_ECO_RESERVED); 3891 if (tmp & (1 << 29)) { 3892 DP_NOTICE(p_hwfn, false, "Emulation: Running on a FULL build\n"); 3893 p_dev->b_is_emul_full = true; 3894 } else { 3895 DP_NOTICE(p_hwfn, false, "Emulation: Running on a REDUCED build\n"); 3896 } 3897 } 3898 #endif 3899 3900 return ECORE_SUCCESS; 3901 } 3902 3903 #ifndef LINUX_REMOVE 3904 void ecore_hw_hibernate_prepare(struct ecore_dev *p_dev) 3905 { 3906 int j; 3907 3908 if (IS_VF(p_dev)) 3909 return; 3910 3911 for_each_hwfn(p_dev, j) { 3912 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 3913 3914 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Mark hw/fw uninitialized\n"); 3915 3916 p_hwfn->hw_init_done = false; 3917 p_hwfn->first_on_engine = false; 3918 3919 ecore_ptt_invalidate(p_hwfn); 3920 } 3921 } 3922 3923 void ecore_hw_hibernate_resume(struct ecore_dev *p_dev) 3924 { 3925 int j = 0; 3926 3927 if (IS_VF(p_dev)) 3928 return; 3929 3930 for_each_hwfn(p_dev, j) { 3931 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 3932 struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); 3933 3934 ecore_hw_hwfn_prepare(p_hwfn); 3935 3936 if (!p_ptt) 3937 DP_NOTICE(p_hwfn, true, "ptt acquire failed\n"); 3938 else { 3939 ecore_load_mcp_offsets(p_hwfn, p_ptt); 3940 ecore_ptt_release(p_hwfn, p_ptt); 3941 } 3942 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "Reinitialized hw after low power state\n"); 3943 } 3944 } 3945 3946 #endif 3947 3948 static enum _ecore_status_t ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, 3949 void OSAL_IOMEM *p_regview, 3950 void OSAL_IOMEM *p_doorbells, 3951 struct ecore_hw_prepare_params *p_params) 3952 { 3953 struct ecore_mdump_retain_data mdump_retain; 3954 struct ecore_dev *p_dev = p_hwfn->p_dev; 3955 struct ecore_mdump_info mdump_info; 3956 enum _ecore_status_t rc = ECORE_SUCCESS; 3957 3958 /* Split PCI bars evenly between hwfns */ 3959 p_hwfn->regview = p_regview; 3960 p_hwfn->doorbells = p_doorbells; 3961 3962 if (IS_VF(p_dev)) 3963 return ecore_vf_hw_prepare(p_hwfn); 3964 3965 /* Validate that chip access is feasible */ 3966 if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { 3967 DP_ERR(p_hwfn, "Reading the ME register returns all Fs; Preventing further chip access\n"); 3968 if (p_params->b_relaxed_probe) 3969 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_ME; 3970 return ECORE_INVAL; 3971 } 3972 3973 get_function_id(p_hwfn); 3974 3975 /* Allocate PTT pool */ 3976 rc = ecore_ptt_pool_alloc(p_hwfn); 3977 if (rc) { 3978 DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n"); 3979 if (p_params->b_relaxed_probe) 3980 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 3981 goto err0; 3982 } 3983 3984 /* Allocate the main PTT */ 3985 p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); 3986 3987 /* First hwfn learns basic information, e.g., number of hwfns */ 3988 if (!p_hwfn->my_id) { 3989 rc = ecore_get_dev_info(p_dev); 3990 if (rc != ECORE_SUCCESS) { 3991 if (p_params->b_relaxed_probe) 3992 p_params->p_relaxed_res = 3993 ECORE_HW_PREPARE_FAILED_DEV; 3994 goto err1; 3995 } 3996 } 3997 3998 ecore_hw_hwfn_prepare(p_hwfn); 3999 4000 /* Initialize MCP structure */ 4001 rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); 4002 if (rc) { 4003 DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n"); 4004 if (p_params->b_relaxed_probe) 4005 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 4006 goto err1; 4007 } 4008 4009 /* Read the device configuration information from the HW and SHMEM */ 4010 rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, 4011 p_params->personality, p_params); 4012 if (rc) { 4013 DP_NOTICE(p_hwfn, true, "Failed to get HW information\n"); 4014 goto err2; 4015 } 4016 4017 /* Sending a mailbox to the MFW should be after ecore_get_hw_info() is 4018 * called, since among others it sets the ports number in an engine. 4019 */ 4020 if (p_params->initiate_pf_flr && p_hwfn == ECORE_LEADING_HWFN(p_dev) && 4021 !p_dev->recov_in_prog) { 4022 rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt); 4023 if (rc != ECORE_SUCCESS) 4024 DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n"); 4025 } 4026 4027 /* Check if mdump logs/data are present and update the epoch value */ 4028 if (p_hwfn == ECORE_LEADING_HWFN(p_hwfn->p_dev)) { 4029 rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt, 4030 &mdump_info); 4031 if (rc == ECORE_SUCCESS && mdump_info.num_of_logs) 4032 DP_NOTICE(p_hwfn, false, 4033 "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n"); 4034 4035 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_hwfn->p_main_ptt, 4036 &mdump_retain); 4037 if (rc == ECORE_SUCCESS && mdump_retain.valid) 4038 DP_NOTICE(p_hwfn, false, 4039 "mdump retained data: epoch 0x%08x, pf 0x%x, status 0x%08x\n", 4040 mdump_retain.epoch, mdump_retain.pf, 4041 mdump_retain.status); 4042 4043 ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt, 4044 p_params->epoch); 4045 } 4046 4047 /* Allocate the init RT array and initialize the init-ops engine */ 4048 rc = ecore_init_alloc(p_hwfn); 4049 if (rc) { 4050 DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n"); 4051 if (p_params->b_relaxed_probe) 4052 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 4053 goto err2; 4054 } 4055 4056 #ifndef ASIC_ONLY 4057 if (CHIP_REV_IS_FPGA(p_dev)) { 4058 DP_NOTICE(p_hwfn, false, 4059 "FPGA: workaround; Prevent DMAE parities\n"); 4060 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK_K2_E5, 4061 7); 4062 4063 DP_NOTICE(p_hwfn, false, 4064 "FPGA: workaround: Set VF bar0 size\n"); 4065 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 4066 PGLUE_B_REG_VF_BAR0_SIZE_K2_E5, 4); 4067 } 4068 #endif 4069 4070 return rc; 4071 err2: 4072 if (IS_LEAD_HWFN(p_hwfn)) 4073 ecore_iov_free_hw_info(p_dev); 4074 ecore_mcp_free(p_hwfn); 4075 err1: 4076 ecore_hw_hwfn_free(p_hwfn); 4077 err0: 4078 return rc; 4079 } 4080 4081 enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, 4082 struct ecore_hw_prepare_params *p_params) 4083 { 4084 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 4085 enum _ecore_status_t rc; 4086 4087 p_dev->chk_reg_fifo = p_params->chk_reg_fifo; 4088 p_dev->allow_mdump = p_params->allow_mdump; 4089 4090 if (p_params->b_relaxed_probe) 4091 p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS; 4092 4093 /* Store the precompiled init data ptrs */ 4094 if (IS_PF(p_dev)) 4095 ecore_init_iro_array(p_dev); 4096 4097 /* Initialize the first hwfn - will learn number of hwfns */ 4098 rc = ecore_hw_prepare_single(p_hwfn, 4099 p_dev->regview, 4100 p_dev->doorbells, p_params); 4101 if (rc != ECORE_SUCCESS) 4102 return rc; 4103 4104 p_params->personality = p_hwfn->hw_info.personality; 4105 4106 /* initilalize 2nd hwfn if necessary */ 4107 if (p_dev->num_hwfns > 1) { 4108 void OSAL_IOMEM *p_regview, *p_doorbell; 4109 u8 OSAL_IOMEM *addr; 4110 4111 /* adjust bar offset for second engine */ 4112 addr = (u8 OSAL_IOMEM *)p_dev->regview + 4113 ecore_hw_bar_size(p_hwfn, BAR_ID_0) / 2; 4114 p_regview = (void OSAL_IOMEM *)addr; 4115 4116 addr = (u8 OSAL_IOMEM *)p_dev->doorbells + 4117 ecore_hw_bar_size(p_hwfn, BAR_ID_1) / 2; 4118 p_doorbell = (void OSAL_IOMEM *)addr; 4119 4120 /* prepare second hw function */ 4121 rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview, 4122 p_doorbell, p_params); 4123 4124 /* in case of error, need to free the previously 4125 * initiliazed hwfn 0. 4126 */ 4127 if (rc != ECORE_SUCCESS) { 4128 if (p_params->b_relaxed_probe) 4129 p_params->p_relaxed_res = 4130 ECORE_HW_PREPARE_FAILED_ENG2; 4131 4132 if (IS_PF(p_dev)) { 4133 ecore_init_free(p_hwfn); 4134 ecore_mcp_free(p_hwfn); 4135 ecore_hw_hwfn_free(p_hwfn); 4136 } else { 4137 DP_NOTICE(p_dev, true, "What do we need to free when VF hwfn1 init fails\n"); 4138 } 4139 return rc; 4140 } 4141 } 4142 4143 return rc; 4144 } 4145 4146 void ecore_hw_remove(struct ecore_dev *p_dev) 4147 { 4148 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 4149 int i; 4150 4151 if (IS_PF(p_dev)) 4152 ecore_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt, 4153 ECORE_OV_DRIVER_STATE_NOT_LOADED); 4154 4155 for_each_hwfn(p_dev, i) { 4156 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 4157 4158 if (IS_VF(p_dev)) { 4159 ecore_vf_pf_release(p_hwfn); 4160 continue; 4161 } 4162 4163 ecore_init_free(p_hwfn); 4164 ecore_hw_hwfn_free(p_hwfn); 4165 ecore_mcp_free(p_hwfn); 4166 4167 OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex); 4168 } 4169 4170 ecore_iov_free_hw_info(p_dev); 4171 } 4172 4173 static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev, 4174 struct ecore_chain *p_chain) 4175 { 4176 void *p_virt = p_chain->p_virt_addr, *p_virt_next = OSAL_NULL; 4177 dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0; 4178 struct ecore_chain_next *p_next; 4179 u32 size, i; 4180 4181 if (!p_virt) 4182 return; 4183 4184 size = p_chain->elem_size * p_chain->usable_per_page; 4185 4186 for (i = 0; i < p_chain->page_cnt; i++) { 4187 if (!p_virt) 4188 break; 4189 4190 p_next = (struct ecore_chain_next *)((u8 *)p_virt + size); 4191 p_virt_next = p_next->next_virt; 4192 p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys); 4193 4194 OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys, 4195 ECORE_CHAIN_PAGE_SIZE); 4196 4197 p_virt = p_virt_next; 4198 p_phys = p_phys_next; 4199 } 4200 } 4201 4202 static void ecore_chain_free_single(struct ecore_dev *p_dev, 4203 struct ecore_chain *p_chain) 4204 { 4205 if (!p_chain->p_virt_addr) 4206 return; 4207 4208 OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr, 4209 p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE); 4210 } 4211 4212 static void ecore_chain_free_pbl(struct ecore_dev *p_dev, 4213 struct ecore_chain *p_chain) 4214 { 4215 void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; 4216 u8 *p_pbl_virt = (u8 *)p_chain->pbl_sp.p_virt_table; 4217 u32 page_cnt = p_chain->page_cnt, i, pbl_size; 4218 4219 if (!pp_virt_addr_tbl) 4220 return; 4221 4222 if (!p_pbl_virt) 4223 goto out; 4224 4225 for (i = 0; i < page_cnt; i++) { 4226 if (!pp_virt_addr_tbl[i]) 4227 break; 4228 4229 OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i], 4230 *(dma_addr_t *)p_pbl_virt, 4231 ECORE_CHAIN_PAGE_SIZE); 4232 4233 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE; 4234 } 4235 4236 pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; 4237 4238 if (!p_chain->b_external_pbl) { 4239 OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table, 4240 p_chain->pbl_sp.p_phys_table, pbl_size); 4241 } 4242 out: 4243 OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl); 4244 p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL; 4245 } 4246 4247 void ecore_chain_free(struct ecore_dev *p_dev, 4248 struct ecore_chain *p_chain) 4249 { 4250 switch (p_chain->mode) { 4251 case ECORE_CHAIN_MODE_NEXT_PTR: 4252 ecore_chain_free_next_ptr(p_dev, p_chain); 4253 break; 4254 case ECORE_CHAIN_MODE_SINGLE: 4255 ecore_chain_free_single(p_dev, p_chain); 4256 break; 4257 case ECORE_CHAIN_MODE_PBL: 4258 ecore_chain_free_pbl(p_dev, p_chain); 4259 break; 4260 } 4261 } 4262 4263 static enum _ecore_status_t 4264 ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev, 4265 enum ecore_chain_cnt_type cnt_type, 4266 osal_size_t elem_size, u32 page_cnt) 4267 { 4268 u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; 4269 4270 /* The actual chain size can be larger than the maximal possible value 4271 * after rounding up the requested elements number to pages, and after 4272 * taking into acount the unusuable elements (next-ptr elements). 4273 * The size of a "u16" chain can be (U16_MAX + 1) since the chain 4274 * size/capacity fields are of a u32 type. 4275 */ 4276 if ((cnt_type == ECORE_CHAIN_CNT_TYPE_U16 && 4277 chain_size > ((u32)ECORE_U16_MAX + 1)) || 4278 (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 && 4279 chain_size > ECORE_U32_MAX)) { 4280 DP_NOTICE(p_dev, true, 4281 "The actual chain size (0x%llx) is larger than the maximal possible value\n", 4282 chain_size); 4283 return ECORE_INVAL; 4284 } 4285 4286 return ECORE_SUCCESS; 4287 } 4288 4289 static enum _ecore_status_t 4290 ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain) 4291 { 4292 void *p_virt = OSAL_NULL, *p_virt_prev = OSAL_NULL; 4293 dma_addr_t p_phys = 0; 4294 u32 i; 4295 4296 for (i = 0; i < p_chain->page_cnt; i++) { 4297 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 4298 ECORE_CHAIN_PAGE_SIZE); 4299 if (!p_virt) { 4300 DP_NOTICE(p_dev, true, 4301 "Failed to allocate chain memory\n"); 4302 return ECORE_NOMEM; 4303 } 4304 4305 if (i == 0) { 4306 ecore_chain_init_mem(p_chain, p_virt, p_phys); 4307 ecore_chain_reset(p_chain); 4308 } else { 4309 ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev, 4310 p_virt, p_phys); 4311 } 4312 4313 p_virt_prev = p_virt; 4314 } 4315 /* Last page's next element should point to the beginning of the 4316 * chain. 4317 */ 4318 ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev, 4319 p_chain->p_virt_addr, 4320 p_chain->p_phys_addr); 4321 4322 return ECORE_SUCCESS; 4323 } 4324 4325 static enum _ecore_status_t 4326 ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain) 4327 { 4328 dma_addr_t p_phys = 0; 4329 void *p_virt = OSAL_NULL; 4330 4331 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE); 4332 if (!p_virt) { 4333 DP_NOTICE(p_dev, true, "Failed to allocate chain memory\n"); 4334 return ECORE_NOMEM; 4335 } 4336 4337 ecore_chain_init_mem(p_chain, p_virt, p_phys); 4338 ecore_chain_reset(p_chain); 4339 4340 return ECORE_SUCCESS; 4341 } 4342 4343 static enum _ecore_status_t 4344 ecore_chain_alloc_pbl(struct ecore_dev *p_dev, 4345 struct ecore_chain *p_chain, 4346 struct ecore_chain_ext_pbl *ext_pbl) 4347 { 4348 void *p_virt = OSAL_NULL; 4349 u8 *p_pbl_virt = OSAL_NULL; 4350 void **pp_virt_addr_tbl = OSAL_NULL; 4351 dma_addr_t p_phys = 0, p_pbl_phys = 0; 4352 u32 page_cnt = p_chain->page_cnt, size, i; 4353 4354 size = page_cnt * sizeof(*pp_virt_addr_tbl); 4355 pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size); 4356 if (!pp_virt_addr_tbl) { 4357 DP_NOTICE(p_dev, true, 4358 "Failed to allocate memory for the chain virtual addresses table\n"); 4359 return ECORE_NOMEM; 4360 } 4361 4362 /* The allocation of the PBL table is done with its full size, since it 4363 * is expected to be successive. 4364 * ecore_chain_init_pbl_mem() is called even in a case of an allocation 4365 * failure, since pp_virt_addr_tbl was previously allocated, and it 4366 * should be saved to allow its freeing during the error flow. 4367 */ 4368 size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; 4369 4370 if (ext_pbl == OSAL_NULL) { 4371 p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size); 4372 } else { 4373 p_pbl_virt = ext_pbl->p_pbl_virt; 4374 p_pbl_phys = ext_pbl->p_pbl_phys; 4375 p_chain->b_external_pbl = true; 4376 } 4377 4378 ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, 4379 pp_virt_addr_tbl); 4380 if (!p_pbl_virt) { 4381 DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n"); 4382 return ECORE_NOMEM; 4383 } 4384 4385 for (i = 0; i < page_cnt; i++) { 4386 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 4387 ECORE_CHAIN_PAGE_SIZE); 4388 if (!p_virt) { 4389 DP_NOTICE(p_dev, true, 4390 "Failed to allocate chain memory\n"); 4391 return ECORE_NOMEM; 4392 } 4393 4394 if (i == 0) { 4395 ecore_chain_init_mem(p_chain, p_virt, p_phys); 4396 ecore_chain_reset(p_chain); 4397 } 4398 4399 /* Fill the PBL table with the physical address of the page */ 4400 *(dma_addr_t *)p_pbl_virt = p_phys; 4401 /* Keep the virtual address of the page */ 4402 p_chain->pbl.pp_virt_addr_tbl[i] = p_virt; 4403 4404 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE; 4405 } 4406 4407 return ECORE_SUCCESS; 4408 } 4409 4410 enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev, 4411 enum ecore_chain_use_mode intended_use, 4412 enum ecore_chain_mode mode, 4413 enum ecore_chain_cnt_type cnt_type, 4414 u32 num_elems, osal_size_t elem_size, 4415 struct ecore_chain *p_chain, 4416 struct ecore_chain_ext_pbl *ext_pbl) 4417 { 4418 u32 page_cnt; 4419 enum _ecore_status_t rc = ECORE_SUCCESS; 4420 4421 if (mode == ECORE_CHAIN_MODE_SINGLE) 4422 page_cnt = 1; 4423 else 4424 page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode); 4425 4426 rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size, 4427 page_cnt); 4428 if (rc) { 4429 DP_NOTICE(p_dev, true, 4430 "Cannot allocate a chain with the given arguments:\n" 4431 "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", 4432 intended_use, mode, cnt_type, num_elems, elem_size); 4433 return rc; 4434 } 4435 4436 ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use, 4437 mode, cnt_type, p_dev->dp_ctx); 4438 4439 switch (mode) { 4440 case ECORE_CHAIN_MODE_NEXT_PTR: 4441 rc = ecore_chain_alloc_next_ptr(p_dev, p_chain); 4442 break; 4443 case ECORE_CHAIN_MODE_SINGLE: 4444 rc = ecore_chain_alloc_single(p_dev, p_chain); 4445 break; 4446 case ECORE_CHAIN_MODE_PBL: 4447 rc = ecore_chain_alloc_pbl(p_dev, p_chain, ext_pbl); 4448 break; 4449 } 4450 if (rc) 4451 goto nomem; 4452 4453 return ECORE_SUCCESS; 4454 4455 nomem: 4456 ecore_chain_free(p_dev, p_chain); 4457 return rc; 4458 } 4459 4460 enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn, 4461 u16 src_id, u16 *dst_id) 4462 { 4463 if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) { 4464 u16 min, max; 4465 4466 min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE); 4467 max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE); 4468 DP_NOTICE(p_hwfn, true, "l2_queue id [%d] is not valid, available indices [%d - %d]\n", 4469 src_id, min, max); 4470 4471 return ECORE_INVAL; 4472 } 4473 4474 *dst_id = RESC_START(p_hwfn, ECORE_L2_QUEUE) + src_id; 4475 4476 return ECORE_SUCCESS; 4477 } 4478 4479 enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn, 4480 u8 src_id, u8 *dst_id) 4481 { 4482 if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) { 4483 u8 min, max; 4484 4485 min = (u8)RESC_START(p_hwfn, ECORE_VPORT); 4486 max = min + RESC_NUM(p_hwfn, ECORE_VPORT); 4487 DP_NOTICE(p_hwfn, true, "vport id [%d] is not valid, available indices [%d - %d]\n", 4488 src_id, min, max); 4489 4490 return ECORE_INVAL; 4491 } 4492 4493 *dst_id = RESC_START(p_hwfn, ECORE_VPORT) + src_id; 4494 4495 return ECORE_SUCCESS; 4496 } 4497 4498 enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn, 4499 u8 src_id, u8 *dst_id) 4500 { 4501 if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) { 4502 u8 min, max; 4503 4504 min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG); 4505 max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG); 4506 DP_NOTICE(p_hwfn, true, "rss_eng id [%d] is not valid, available indices [%d - %d]\n", 4507 src_id, min, max); 4508 4509 return ECORE_INVAL; 4510 } 4511 4512 *dst_id = RESC_START(p_hwfn, ECORE_RSS_ENG) + src_id; 4513 4514 return ECORE_SUCCESS; 4515 } 4516 4517 static enum _ecore_status_t 4518 ecore_llh_add_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4519 struct ecore_ptt *p_ptt, u32 high, u32 low, 4520 u32 *p_entry_num) 4521 { 4522 u32 en; 4523 int i; 4524 4525 /* Find a free entry and utilize it */ 4526 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4527 en = ecore_rd(p_hwfn, p_ptt, 4528 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4529 i * sizeof(u32)); 4530 if (en) 4531 continue; 4532 ecore_wr(p_hwfn, p_ptt, 4533 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4534 2 * i * sizeof(u32), low); 4535 ecore_wr(p_hwfn, p_ptt, 4536 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4537 (2 * i + 1) * sizeof(u32), high); 4538 ecore_wr(p_hwfn, p_ptt, 4539 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4540 i * sizeof(u32), 0); 4541 ecore_wr(p_hwfn, p_ptt, 4542 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4543 i * sizeof(u32), 0); 4544 ecore_wr(p_hwfn, p_ptt, 4545 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4546 i * sizeof(u32), 1); 4547 break; 4548 } 4549 4550 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4551 return ECORE_NORESOURCES; 4552 4553 *p_entry_num = i; 4554 4555 return ECORE_SUCCESS; 4556 } 4557 4558 static enum _ecore_status_t 4559 ecore_llh_add_mac_filter_e5(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 4560 u32 high, u32 low, u32 *p_entry_num) 4561 { 4562 ECORE_E5_MISSING_CODE; 4563 4564 return ECORE_NOTIMPL; 4565 } 4566 4567 enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn, 4568 struct ecore_ptt *p_ptt, u8 *p_filter) 4569 { 4570 u32 high, low, entry_num; 4571 enum _ecore_status_t rc; 4572 4573 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4574 return ECORE_SUCCESS; 4575 4576 high = p_filter[1] | (p_filter[0] << 8); 4577 low = p_filter[5] | (p_filter[4] << 8) | 4578 (p_filter[3] << 16) | (p_filter[2] << 24); 4579 4580 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4581 rc = ecore_llh_add_mac_filter_bb_ah(p_hwfn, p_ptt, high, low, 4582 &entry_num); 4583 else /* E5 */ 4584 rc = ecore_llh_add_mac_filter_e5(p_hwfn, p_ptt, high, low, 4585 &entry_num); 4586 if (rc != ECORE_SUCCESS) { 4587 DP_NOTICE(p_hwfn, false, 4588 "Failed to find an empty LLH filter to utilize\n"); 4589 return rc; 4590 } 4591 4592 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4593 "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx is added at %d\n", 4594 p_filter[0], p_filter[1], p_filter[2], p_filter[3], 4595 p_filter[4], p_filter[5], entry_num); 4596 4597 return ECORE_SUCCESS; 4598 } 4599 4600 static enum _ecore_status_t 4601 ecore_llh_remove_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4602 struct ecore_ptt *p_ptt, u32 high, u32 low, 4603 u32 *p_entry_num) 4604 { 4605 int i; 4606 4607 /* Find the entry and clean it */ 4608 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4609 if (ecore_rd(p_hwfn, p_ptt, 4610 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4611 2 * i * sizeof(u32)) != low) 4612 continue; 4613 if (ecore_rd(p_hwfn, p_ptt, 4614 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4615 (2 * i + 1) * sizeof(u32)) != high) 4616 continue; 4617 4618 ecore_wr(p_hwfn, p_ptt, 4619 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0); 4620 ecore_wr(p_hwfn, p_ptt, 4621 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4622 2 * i * sizeof(u32), 0); 4623 ecore_wr(p_hwfn, p_ptt, 4624 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4625 (2 * i + 1) * sizeof(u32), 0); 4626 break; 4627 } 4628 4629 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4630 return ECORE_INVAL; 4631 4632 *p_entry_num = i; 4633 4634 return ECORE_SUCCESS; 4635 } 4636 4637 static enum _ecore_status_t 4638 ecore_llh_remove_mac_filter_e5(struct ecore_hwfn *p_hwfn, 4639 struct ecore_ptt *p_ptt, u32 high, u32 low, 4640 u32 *p_entry_num) 4641 { 4642 ECORE_E5_MISSING_CODE; 4643 4644 return ECORE_NOTIMPL; 4645 } 4646 4647 void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn, 4648 struct ecore_ptt *p_ptt, u8 *p_filter) 4649 { 4650 u32 high, low, entry_num; 4651 enum _ecore_status_t rc; 4652 4653 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4654 return; 4655 4656 high = p_filter[1] | (p_filter[0] << 8); 4657 low = p_filter[5] | (p_filter[4] << 8) | 4658 (p_filter[3] << 16) | (p_filter[2] << 24); 4659 4660 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4661 rc = ecore_llh_remove_mac_filter_bb_ah(p_hwfn, p_ptt, high, 4662 low, &entry_num); 4663 else /* E5 */ 4664 rc = ecore_llh_remove_mac_filter_e5(p_hwfn, p_ptt, high, low, 4665 &entry_num); 4666 if (rc != ECORE_SUCCESS) { 4667 DP_NOTICE(p_hwfn, false, 4668 "Tried to remove a non-configured filter [MAC %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx]\n", 4669 p_filter[0], p_filter[1], p_filter[2], p_filter[3], 4670 p_filter[4], p_filter[5]); 4671 return; 4672 } 4673 4674 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4675 "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx was removed from %d\n", 4676 p_filter[0], p_filter[1], p_filter[2], p_filter[3], 4677 p_filter[4], p_filter[5], entry_num); 4678 } 4679 4680 static enum _ecore_status_t 4681 ecore_llh_add_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4682 struct ecore_ptt *p_ptt, 4683 enum ecore_llh_port_filter_type_t type, 4684 u32 high, u32 low, u32 *p_entry_num) 4685 { 4686 u32 en; 4687 int i; 4688 4689 /* Find a free entry and utilize it */ 4690 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4691 en = ecore_rd(p_hwfn, p_ptt, 4692 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4693 i * sizeof(u32)); 4694 if (en) 4695 continue; 4696 ecore_wr(p_hwfn, p_ptt, 4697 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4698 2 * i * sizeof(u32), low); 4699 ecore_wr(p_hwfn, p_ptt, 4700 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4701 (2 * i + 1) * sizeof(u32), high); 4702 ecore_wr(p_hwfn, p_ptt, 4703 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4704 i * sizeof(u32), 1); 4705 ecore_wr(p_hwfn, p_ptt, 4706 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4707 i * sizeof(u32), 1 << type); 4708 ecore_wr(p_hwfn, p_ptt, 4709 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 1); 4710 break; 4711 } 4712 4713 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4714 return ECORE_NORESOURCES; 4715 4716 *p_entry_num = i; 4717 4718 return ECORE_SUCCESS; 4719 } 4720 4721 static enum _ecore_status_t 4722 ecore_llh_add_protocol_filter_e5(struct ecore_hwfn *p_hwfn, 4723 struct ecore_ptt *p_ptt, 4724 enum ecore_llh_port_filter_type_t type, 4725 u32 high, u32 low, u32 *p_entry_num) 4726 { 4727 ECORE_E5_MISSING_CODE; 4728 4729 return ECORE_NOTIMPL; 4730 } 4731 4732 enum _ecore_status_t 4733 ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn, 4734 struct ecore_ptt *p_ptt, 4735 u16 source_port_or_eth_type, 4736 u16 dest_port, 4737 enum ecore_llh_port_filter_type_t type) 4738 { 4739 u32 high, low, entry_num; 4740 enum _ecore_status_t rc; 4741 4742 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4743 return ECORE_SUCCESS; 4744 4745 high = 0; 4746 low = 0; 4747 switch (type) { 4748 case ECORE_LLH_FILTER_ETHERTYPE: 4749 high = source_port_or_eth_type; 4750 break; 4751 case ECORE_LLH_FILTER_TCP_SRC_PORT: 4752 case ECORE_LLH_FILTER_UDP_SRC_PORT: 4753 low = source_port_or_eth_type << 16; 4754 break; 4755 case ECORE_LLH_FILTER_TCP_DEST_PORT: 4756 case ECORE_LLH_FILTER_UDP_DEST_PORT: 4757 low = dest_port; 4758 break; 4759 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 4760 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 4761 low = (source_port_or_eth_type << 16) | dest_port; 4762 break; 4763 default: 4764 DP_NOTICE(p_hwfn, true, 4765 "Non valid LLH protocol filter type %d\n", type); 4766 return ECORE_INVAL; 4767 } 4768 4769 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4770 rc = ecore_llh_add_protocol_filter_bb_ah(p_hwfn, p_ptt, type, 4771 high, low, &entry_num); 4772 else /* E5 */ 4773 rc = ecore_llh_add_protocol_filter_e5(p_hwfn, p_ptt, type, high, 4774 low, &entry_num); 4775 if (rc != ECORE_SUCCESS) { 4776 DP_NOTICE(p_hwfn, false, 4777 "Failed to find an empty LLH filter to utilize\n"); 4778 return rc; 4779 } 4780 4781 switch (type) { 4782 case ECORE_LLH_FILTER_ETHERTYPE: 4783 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4784 "ETH type %x is added at %d\n", 4785 source_port_or_eth_type, entry_num); 4786 break; 4787 case ECORE_LLH_FILTER_TCP_SRC_PORT: 4788 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4789 "TCP src port %x is added at %d\n", 4790 source_port_or_eth_type, entry_num); 4791 break; 4792 case ECORE_LLH_FILTER_UDP_SRC_PORT: 4793 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4794 "UDP src port %x is added at %d\n", 4795 source_port_or_eth_type, entry_num); 4796 break; 4797 case ECORE_LLH_FILTER_TCP_DEST_PORT: 4798 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4799 "TCP dst port %x is added at %d\n", 4800 dest_port, entry_num); 4801 break; 4802 case ECORE_LLH_FILTER_UDP_DEST_PORT: 4803 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4804 "UDP dst port %x is added at %d\n", 4805 dest_port, entry_num); 4806 break; 4807 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 4808 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4809 "TCP src/dst ports %x/%x are added at %d\n", 4810 source_port_or_eth_type, dest_port, entry_num); 4811 break; 4812 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 4813 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4814 "UDP src/dst ports %x/%x are added at %d\n", 4815 source_port_or_eth_type, dest_port, entry_num); 4816 break; 4817 } 4818 4819 return ECORE_SUCCESS; 4820 } 4821 4822 static enum _ecore_status_t 4823 ecore_llh_remove_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4824 struct ecore_ptt *p_ptt, 4825 enum ecore_llh_port_filter_type_t type, 4826 u32 high, u32 low, u32 *p_entry_num) 4827 { 4828 int i; 4829 4830 /* Find the entry and clean it */ 4831 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4832 if (!ecore_rd(p_hwfn, p_ptt, 4833 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4834 i * sizeof(u32))) 4835 continue; 4836 if (!ecore_rd(p_hwfn, p_ptt, 4837 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4838 i * sizeof(u32))) 4839 continue; 4840 if (!(ecore_rd(p_hwfn, p_ptt, 4841 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4842 i * sizeof(u32)) & (1 << type))) 4843 continue; 4844 if (ecore_rd(p_hwfn, p_ptt, 4845 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4846 2 * i * sizeof(u32)) != low) 4847 continue; 4848 if (ecore_rd(p_hwfn, p_ptt, 4849 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4850 (2 * i + 1) * sizeof(u32)) != high) 4851 continue; 4852 4853 ecore_wr(p_hwfn, p_ptt, 4854 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0); 4855 ecore_wr(p_hwfn, p_ptt, 4856 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4857 i * sizeof(u32), 0); 4858 ecore_wr(p_hwfn, p_ptt, 4859 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4860 i * sizeof(u32), 0); 4861 ecore_wr(p_hwfn, p_ptt, 4862 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4863 2 * i * sizeof(u32), 0); 4864 ecore_wr(p_hwfn, p_ptt, 4865 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4866 (2 * i + 1) * sizeof(u32), 0); 4867 break; 4868 } 4869 4870 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4871 return ECORE_INVAL; 4872 4873 *p_entry_num = i; 4874 4875 return ECORE_SUCCESS; 4876 } 4877 4878 static enum _ecore_status_t 4879 ecore_llh_remove_protocol_filter_e5(struct ecore_hwfn *p_hwfn, 4880 struct ecore_ptt *p_ptt, 4881 enum ecore_llh_port_filter_type_t type, 4882 u32 high, u32 low, u32 *p_entry_num) 4883 { 4884 ECORE_E5_MISSING_CODE; 4885 4886 return ECORE_NOTIMPL; 4887 } 4888 4889 void 4890 ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn, 4891 struct ecore_ptt *p_ptt, 4892 u16 source_port_or_eth_type, 4893 u16 dest_port, 4894 enum ecore_llh_port_filter_type_t type) 4895 { 4896 u32 high, low, entry_num; 4897 enum _ecore_status_t rc; 4898 4899 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4900 return; 4901 4902 high = 0; 4903 low = 0; 4904 switch (type) { 4905 case ECORE_LLH_FILTER_ETHERTYPE: 4906 high = source_port_or_eth_type; 4907 break; 4908 case ECORE_LLH_FILTER_TCP_SRC_PORT: 4909 case ECORE_LLH_FILTER_UDP_SRC_PORT: 4910 low = source_port_or_eth_type << 16; 4911 break; 4912 case ECORE_LLH_FILTER_TCP_DEST_PORT: 4913 case ECORE_LLH_FILTER_UDP_DEST_PORT: 4914 low = dest_port; 4915 break; 4916 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 4917 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 4918 low = (source_port_or_eth_type << 16) | dest_port; 4919 break; 4920 default: 4921 DP_NOTICE(p_hwfn, true, 4922 "Non valid LLH protocol filter type %d\n", type); 4923 return; 4924 } 4925 4926 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4927 rc = ecore_llh_remove_protocol_filter_bb_ah(p_hwfn, p_ptt, type, 4928 high, low, 4929 &entry_num); 4930 else /* E5 */ 4931 rc = ecore_llh_remove_protocol_filter_e5(p_hwfn, p_ptt, type, 4932 high, low, &entry_num); 4933 if (rc != ECORE_SUCCESS) { 4934 DP_NOTICE(p_hwfn, false, 4935 "Tried to remove a non-configured filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x]\n", 4936 type, source_port_or_eth_type, dest_port); 4937 return; 4938 } 4939 4940 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4941 "Protocol filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x] was removed from %d\n", 4942 type, source_port_or_eth_type, dest_port, entry_num); 4943 } 4944 4945 static void ecore_llh_clear_all_filters_bb_ah(struct ecore_hwfn *p_hwfn, 4946 struct ecore_ptt *p_ptt) 4947 { 4948 int i; 4949 4950 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4951 ecore_wr(p_hwfn, p_ptt, 4952 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4953 i * sizeof(u32), 0); 4954 ecore_wr(p_hwfn, p_ptt, 4955 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4956 2 * i * sizeof(u32), 0); 4957 ecore_wr(p_hwfn, p_ptt, 4958 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4959 (2 * i + 1) * sizeof(u32), 0); 4960 } 4961 } 4962 4963 static void ecore_llh_clear_all_filters_e5(struct ecore_hwfn *p_hwfn, 4964 struct ecore_ptt *p_ptt) 4965 { 4966 ECORE_E5_MISSING_CODE; 4967 } 4968 4969 void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn, 4970 struct ecore_ptt *p_ptt) 4971 { 4972 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4973 return; 4974 4975 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4976 ecore_llh_clear_all_filters_bb_ah(p_hwfn, p_ptt); 4977 else /* E5 */ 4978 ecore_llh_clear_all_filters_e5(p_hwfn, p_ptt); 4979 } 4980 4981 enum _ecore_status_t 4982 ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn, 4983 struct ecore_ptt *p_ptt) 4984 { 4985 if (IS_MF_DEFAULT(p_hwfn) && ECORE_IS_BB(p_hwfn->p_dev)) { 4986 ecore_wr(p_hwfn, p_ptt, 4987 NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 4988 1 << p_hwfn->abs_pf_id / 2); 4989 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, 0); 4990 return ECORE_SUCCESS; 4991 } else { 4992 DP_NOTICE(p_hwfn, false, 4993 "This function can't be set as default\n"); 4994 return ECORE_INVAL; 4995 } 4996 } 4997 4998 static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn, 4999 struct ecore_ptt *p_ptt, 5000 u32 hw_addr, void *p_eth_qzone, 5001 osal_size_t eth_qzone_size, 5002 u8 timeset) 5003 { 5004 struct coalescing_timeset *p_coal_timeset; 5005 5006 if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) { 5007 DP_NOTICE(p_hwfn, true, 5008 "Coalescing configuration not enabled\n"); 5009 return ECORE_INVAL; 5010 } 5011 5012 p_coal_timeset = p_eth_qzone; 5013 OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size); 5014 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); 5015 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); 5016 ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); 5017 5018 return ECORE_SUCCESS; 5019 } 5020 5021 enum _ecore_status_t ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, 5022 u16 rx_coal, u16 tx_coal, 5023 void *p_handle) 5024 { 5025 struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle; 5026 enum _ecore_status_t rc = ECORE_SUCCESS; 5027 struct ecore_ptt *p_ptt; 5028 5029 /* TODO - Configuring a single queue's coalescing but 5030 * claiming all queues are abiding same configuration 5031 * for PF and VF both. 5032 */ 5033 5034 if (IS_VF(p_hwfn->p_dev)) 5035 return ecore_vf_pf_set_coalesce(p_hwfn, rx_coal, 5036 tx_coal, p_cid); 5037 5038 p_ptt = ecore_ptt_acquire(p_hwfn); 5039 if (!p_ptt) 5040 return ECORE_AGAIN; 5041 5042 if (rx_coal) { 5043 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); 5044 if (rc) 5045 goto out; 5046 p_hwfn->p_dev->rx_coalesce_usecs = rx_coal; 5047 } 5048 5049 if (tx_coal) { 5050 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid); 5051 if (rc) 5052 goto out; 5053 p_hwfn->p_dev->tx_coalesce_usecs = tx_coal; 5054 } 5055 out: 5056 ecore_ptt_release(p_hwfn, p_ptt); 5057 5058 return rc; 5059 } 5060 5061 enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn, 5062 struct ecore_ptt *p_ptt, 5063 u16 coalesce, 5064 struct ecore_queue_cid *p_cid) 5065 { 5066 struct ustorm_eth_queue_zone eth_qzone; 5067 u8 timeset, timer_res; 5068 u32 address; 5069 enum _ecore_status_t rc; 5070 5071 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 5072 if (coalesce <= 0x7F) 5073 timer_res = 0; 5074 else if (coalesce <= 0xFF) 5075 timer_res = 1; 5076 else if (coalesce <= 0x1FF) 5077 timer_res = 2; 5078 else { 5079 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 5080 return ECORE_INVAL; 5081 } 5082 timeset = (u8)(coalesce >> timer_res); 5083 5084 rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, 5085 p_cid->sb_igu_id, false); 5086 if (rc != ECORE_SUCCESS) 5087 goto out; 5088 5089 address = BAR0_MAP_REG_USDM_RAM + 5090 USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 5091 5092 rc = ecore_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 5093 sizeof(struct ustorm_eth_queue_zone), timeset); 5094 if (rc != ECORE_SUCCESS) 5095 goto out; 5096 5097 out: 5098 return rc; 5099 } 5100 5101 enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn, 5102 struct ecore_ptt *p_ptt, 5103 u16 coalesce, 5104 struct ecore_queue_cid *p_cid) 5105 { 5106 struct xstorm_eth_queue_zone eth_qzone; 5107 u8 timeset, timer_res; 5108 u32 address; 5109 enum _ecore_status_t rc; 5110 5111 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 5112 if (coalesce <= 0x7F) 5113 timer_res = 0; 5114 else if (coalesce <= 0xFF) 5115 timer_res = 1; 5116 else if (coalesce <= 0x1FF) 5117 timer_res = 2; 5118 else { 5119 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 5120 return ECORE_INVAL; 5121 } 5122 timeset = (u8)(coalesce >> timer_res); 5123 5124 rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, 5125 p_cid->sb_igu_id, true); 5126 if (rc != ECORE_SUCCESS) 5127 goto out; 5128 5129 address = BAR0_MAP_REG_XSDM_RAM + 5130 XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 5131 5132 rc = ecore_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 5133 sizeof(struct xstorm_eth_queue_zone), timeset); 5134 out: 5135 return rc; 5136 } 5137 5138 /* Calculate final WFQ values for all vports and configure it. 5139 * After this configuration each vport must have 5140 * approx min rate = vport_wfq * min_pf_rate / ECORE_WFQ_UNIT 5141 */ 5142 static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn, 5143 struct ecore_ptt *p_ptt, 5144 u32 min_pf_rate) 5145 { 5146 struct init_qm_vport_params *vport_params; 5147 int i; 5148 5149 vport_params = p_hwfn->qm_info.qm_vport_params; 5150 5151 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5152 u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 5153 5154 vport_params[i].vport_wfq = (wfq_speed * ECORE_WFQ_UNIT) / 5155 min_pf_rate; 5156 ecore_init_vport_wfq(p_hwfn, p_ptt, 5157 vport_params[i].first_tx_pq_id, 5158 vport_params[i].vport_wfq); 5159 } 5160 } 5161 5162 static void 5163 ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn, u32 min_pf_rate) 5164 5165 { 5166 int i; 5167 5168 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) 5169 p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1; 5170 } 5171 5172 static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn, 5173 struct ecore_ptt *p_ptt, 5174 u32 min_pf_rate) 5175 { 5176 struct init_qm_vport_params *vport_params; 5177 int i; 5178 5179 vport_params = p_hwfn->qm_info.qm_vport_params; 5180 5181 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5182 ecore_init_wfq_default_param(p_hwfn, min_pf_rate); 5183 ecore_init_vport_wfq(p_hwfn, p_ptt, 5184 vport_params[i].first_tx_pq_id, 5185 vport_params[i].vport_wfq); 5186 } 5187 } 5188 5189 /* This function performs several validations for WFQ 5190 * configuration and required min rate for a given vport 5191 * 1. req_rate must be greater than one percent of min_pf_rate. 5192 * 2. req_rate should not cause other vports [not configured for WFQ explicitly] 5193 * rates to get less than one percent of min_pf_rate. 5194 * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. 5195 */ 5196 static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn, 5197 u16 vport_id, u32 req_rate, 5198 u32 min_pf_rate) 5199 { 5200 u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; 5201 int non_requested_count = 0, req_count = 0, i, num_vports; 5202 5203 num_vports = p_hwfn->qm_info.num_vports; 5204 5205 /* Accounting for the vports which are configured for WFQ explicitly */ 5206 for (i = 0; i < num_vports; i++) { 5207 u32 tmp_speed; 5208 5209 if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) { 5210 req_count++; 5211 tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 5212 total_req_min_rate += tmp_speed; 5213 } 5214 } 5215 5216 /* Include current vport data as well */ 5217 req_count++; 5218 total_req_min_rate += req_rate; 5219 non_requested_count = num_vports - req_count; 5220 5221 /* validate possible error cases */ 5222 if (req_rate > min_pf_rate) { 5223 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5224 "Vport [%d] - Requested rate[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", 5225 vport_id, req_rate, min_pf_rate); 5226 return ECORE_INVAL; 5227 } 5228 5229 if (req_rate < min_pf_rate / ECORE_WFQ_UNIT) { 5230 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5231 "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 5232 vport_id, req_rate, min_pf_rate); 5233 return ECORE_INVAL; 5234 } 5235 5236 /* TBD - for number of vports greater than 100 */ 5237 if (num_vports > ECORE_WFQ_UNIT) { 5238 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5239 "Number of vports is greater than %d\n", 5240 ECORE_WFQ_UNIT); 5241 return ECORE_INVAL; 5242 } 5243 5244 if (total_req_min_rate > min_pf_rate) { 5245 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5246 "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", 5247 total_req_min_rate, min_pf_rate); 5248 return ECORE_INVAL; 5249 } 5250 5251 /* Data left for non requested vports */ 5252 total_left_rate = min_pf_rate - total_req_min_rate; 5253 left_rate_per_vp = total_left_rate / non_requested_count; 5254 5255 /* validate if non requested get < 1% of min bw */ 5256 if (left_rate_per_vp < min_pf_rate / ECORE_WFQ_UNIT) { 5257 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5258 "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 5259 left_rate_per_vp, min_pf_rate); 5260 return ECORE_INVAL; 5261 } 5262 5263 /* now req_rate for given vport passes all scenarios. 5264 * assign final wfq rates to all vports. 5265 */ 5266 p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate; 5267 p_hwfn->qm_info.wfq_data[vport_id].configured = true; 5268 5269 for (i = 0; i < num_vports; i++) { 5270 if (p_hwfn->qm_info.wfq_data[i].configured) 5271 continue; 5272 5273 p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp; 5274 } 5275 5276 return ECORE_SUCCESS; 5277 } 5278 5279 static int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn, 5280 struct ecore_ptt *p_ptt, 5281 u16 vp_id, u32 rate) 5282 { 5283 struct ecore_mcp_link_state *p_link; 5284 int rc = ECORE_SUCCESS; 5285 5286 p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output; 5287 5288 if (!p_link->min_pf_rate) { 5289 p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate; 5290 p_hwfn->qm_info.wfq_data[vp_id].configured = true; 5291 return rc; 5292 } 5293 5294 rc = ecore_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); 5295 5296 if (rc == ECORE_SUCCESS) 5297 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, 5298 p_link->min_pf_rate); 5299 else 5300 DP_NOTICE(p_hwfn, false, 5301 "Validation failed while configuring min rate\n"); 5302 5303 return rc; 5304 } 5305 5306 static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn, 5307 struct ecore_ptt *p_ptt, 5308 u32 min_pf_rate) 5309 { 5310 bool use_wfq = false; 5311 int rc = ECORE_SUCCESS; 5312 u16 i; 5313 5314 /* Validate all pre configured vports for wfq */ 5315 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5316 u32 rate; 5317 5318 if (!p_hwfn->qm_info.wfq_data[i].configured) 5319 continue; 5320 5321 rate = p_hwfn->qm_info.wfq_data[i].min_speed; 5322 use_wfq = true; 5323 5324 rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate); 5325 if (rc != ECORE_SUCCESS) { 5326 DP_NOTICE(p_hwfn, false, 5327 "WFQ validation failed while configuring min rate\n"); 5328 break; 5329 } 5330 } 5331 5332 if (rc == ECORE_SUCCESS && use_wfq) 5333 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 5334 else 5335 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 5336 5337 return rc; 5338 } 5339 5340 /* Main API for ecore clients to configure vport min rate. 5341 * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)] 5342 * rate - Speed in Mbps needs to be assigned to a given vport. 5343 */ 5344 int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate) 5345 { 5346 int i, rc = ECORE_INVAL; 5347 5348 /* TBD - for multiple hardware functions - that is 100 gig */ 5349 if (p_dev->num_hwfns > 1) { 5350 DP_NOTICE(p_dev, false, 5351 "WFQ configuration is not supported for this device\n"); 5352 return rc; 5353 } 5354 5355 for_each_hwfn(p_dev, i) { 5356 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5357 struct ecore_ptt *p_ptt; 5358 5359 p_ptt = ecore_ptt_acquire(p_hwfn); 5360 if (!p_ptt) 5361 return ECORE_TIMEOUT; 5362 5363 rc = __ecore_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); 5364 5365 if (rc != ECORE_SUCCESS) { 5366 ecore_ptt_release(p_hwfn, p_ptt); 5367 return rc; 5368 } 5369 5370 ecore_ptt_release(p_hwfn, p_ptt); 5371 } 5372 5373 return rc; 5374 } 5375 5376 /* API to configure WFQ from mcp link change */ 5377 void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev, 5378 struct ecore_ptt *p_ptt, 5379 u32 min_pf_rate) 5380 { 5381 int i; 5382 5383 /* TBD - for multiple hardware functions - that is 100 gig */ 5384 if (p_dev->num_hwfns > 1) { 5385 DP_VERBOSE(p_dev, ECORE_MSG_LINK, 5386 "WFQ configuration is not supported for this device\n"); 5387 return; 5388 } 5389 5390 for_each_hwfn(p_dev, i) { 5391 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5392 5393 __ecore_configure_vp_wfq_on_link_change(p_hwfn, p_ptt, 5394 min_pf_rate); 5395 } 5396 } 5397 5398 int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn, 5399 struct ecore_ptt *p_ptt, 5400 struct ecore_mcp_link_state *p_link, 5401 u8 max_bw) 5402 { 5403 int rc = ECORE_SUCCESS; 5404 5405 p_hwfn->mcp_info->func_info.bandwidth_max = max_bw; 5406 5407 if (!p_link->line_speed && (max_bw != 100)) 5408 return rc; 5409 5410 p_link->speed = (p_link->line_speed * max_bw) / 100; 5411 p_hwfn->qm_info.pf_rl = p_link->speed; 5412 5413 /* Since the limiter also affects Tx-switched traffic, we don't want it 5414 * to limit such traffic in case there's no actual limit. 5415 * In that case, set limit to imaginary high boundary. 5416 */ 5417 if (max_bw == 100) 5418 p_hwfn->qm_info.pf_rl = 100000; 5419 5420 rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 5421 p_hwfn->qm_info.pf_rl); 5422 5423 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5424 "Configured MAX bandwidth to be %08x Mb/sec\n", 5425 p_link->speed); 5426 5427 return rc; 5428 } 5429 5430 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */ 5431 int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw) 5432 { 5433 int i, rc = ECORE_INVAL; 5434 5435 if (max_bw < 1 || max_bw > 100) { 5436 DP_NOTICE(p_dev, false, "PF max bw valid range is [1-100]\n"); 5437 return rc; 5438 } 5439 5440 for_each_hwfn(p_dev, i) { 5441 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5442 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev); 5443 struct ecore_mcp_link_state *p_link; 5444 struct ecore_ptt *p_ptt; 5445 5446 p_link = &p_lead->mcp_info->link_output; 5447 5448 p_ptt = ecore_ptt_acquire(p_hwfn); 5449 if (!p_ptt) 5450 return ECORE_TIMEOUT; 5451 5452 rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, 5453 p_link, max_bw); 5454 5455 ecore_ptt_release(p_hwfn, p_ptt); 5456 5457 if (rc != ECORE_SUCCESS) 5458 break; 5459 } 5460 5461 return rc; 5462 } 5463 5464 int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn, 5465 struct ecore_ptt *p_ptt, 5466 struct ecore_mcp_link_state *p_link, 5467 u8 min_bw) 5468 { 5469 int rc = ECORE_SUCCESS; 5470 5471 p_hwfn->mcp_info->func_info.bandwidth_min = min_bw; 5472 p_hwfn->qm_info.pf_wfq = min_bw; 5473 5474 if (!p_link->line_speed) 5475 return rc; 5476 5477 p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100; 5478 5479 rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw); 5480 5481 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5482 "Configured MIN bandwidth to be %d Mb/sec\n", 5483 p_link->min_pf_rate); 5484 5485 return rc; 5486 } 5487 5488 /* Main API to configure PF min bandwidth where bw range is [1-100] */ 5489 int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw) 5490 { 5491 int i, rc = ECORE_INVAL; 5492 5493 if (min_bw < 1 || min_bw > 100) { 5494 DP_NOTICE(p_dev, false, "PF min bw valid range is [1-100]\n"); 5495 return rc; 5496 } 5497 5498 for_each_hwfn(p_dev, i) { 5499 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5500 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev); 5501 struct ecore_mcp_link_state *p_link; 5502 struct ecore_ptt *p_ptt; 5503 5504 p_link = &p_lead->mcp_info->link_output; 5505 5506 p_ptt = ecore_ptt_acquire(p_hwfn); 5507 if (!p_ptt) 5508 return ECORE_TIMEOUT; 5509 5510 rc = __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, 5511 p_link, min_bw); 5512 if (rc != ECORE_SUCCESS) { 5513 ecore_ptt_release(p_hwfn, p_ptt); 5514 return rc; 5515 } 5516 5517 if (p_link->min_pf_rate) { 5518 u32 min_rate = p_link->min_pf_rate; 5519 5520 rc = __ecore_configure_vp_wfq_on_link_change(p_hwfn, 5521 p_ptt, 5522 min_rate); 5523 } 5524 5525 ecore_ptt_release(p_hwfn, p_ptt); 5526 } 5527 5528 return rc; 5529 } 5530 5531 void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 5532 { 5533 struct ecore_mcp_link_state *p_link; 5534 5535 p_link = &p_hwfn->mcp_info->link_output; 5536 5537 if (p_link->min_pf_rate) 5538 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, 5539 p_link->min_pf_rate); 5540 5541 OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0, 5542 sizeof(*p_hwfn->qm_info.wfq_data) * 5543 p_hwfn->qm_info.num_vports); 5544 } 5545 5546 int ecore_device_num_engines(struct ecore_dev *p_dev) 5547 { 5548 return ECORE_IS_BB(p_dev) ? 2 : 1; 5549 } 5550 5551 int ecore_device_num_ports(struct ecore_dev *p_dev) 5552 { 5553 /* in CMT always only one port */ 5554 if (p_dev->num_hwfns > 1) 5555 return 1; 5556 5557 return p_dev->num_ports_in_engine * ecore_device_num_engines(p_dev); 5558 } 5559 5560 int ecore_device_get_port_id(struct ecore_dev *p_dev) 5561 { 5562 return (ECORE_LEADING_HWFN(p_dev)->abs_pf_id) % 5563 ecore_device_num_ports(p_dev); 5564 } 5565 5566 void ecore_set_fw_mac_addr(__le16 *fw_msb, 5567 __le16 *fw_mid, 5568 __le16 *fw_lsb, 5569 u8 *mac) 5570 { 5571 ((u8 *)fw_msb)[0] = mac[1]; 5572 ((u8 *)fw_msb)[1] = mac[0]; 5573 ((u8 *)fw_mid)[0] = mac[3]; 5574 ((u8 *)fw_mid)[1] = mac[2]; 5575 ((u8 *)fw_lsb)[0] = mac[5]; 5576 ((u8 *)fw_lsb)[1] = mac[4]; 5577 } 5578