1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File : ecore_dev.c 30 */ 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 35 #include "bcm_osal.h" 36 #include "reg_addr.h" 37 #include "ecore_gtt_reg_addr.h" 38 #include "ecore.h" 39 #include "ecore_chain.h" 40 #include "ecore_status.h" 41 #include "ecore_hw.h" 42 #include "ecore_rt_defs.h" 43 #include "ecore_init_ops.h" 44 #include "ecore_int.h" 45 #include "ecore_cxt.h" 46 #include "ecore_spq.h" 47 #include "ecore_init_fw_funcs.h" 48 #include "ecore_sp_commands.h" 49 #include "ecore_dev_api.h" 50 #include "ecore_sriov.h" 51 #include "ecore_vf.h" 52 #include "ecore_ll2.h" 53 #include "ecore_fcoe.h" 54 #include "ecore_iscsi.h" 55 #include "ecore_ooo.h" 56 #include "ecore_mcp.h" 57 #include "ecore_hw_defs.h" 58 #include "mcp_public.h" 59 #include "ecore_roce.h" 60 #include "ecore_iro.h" 61 #include "nvm_cfg.h" 62 #include "ecore_dev_api.h" 63 #include "ecore_dcbx.h" 64 #include "pcics_reg_driver.h" 65 #include "ecore_l2.h" 66 67 /* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM 68 * registers involved are not split and thus configuration is a race where 69 * some of the PFs configuration might be lost. 70 * Eventually, this needs to move into a MFW-covered HW-lock as arbitration 71 * mechanism as this doesn't cover some cases [E.g., PDA or scenarios where 72 * there's more than a single compiled ecore component in system]. 73 */ 74 static osal_spinlock_t qm_lock; 75 static bool qm_lock_init = false; 76 77 /* Configurable */ 78 #define ECORE_MIN_DPIS (4) /* The minimal number of DPIs required to 79 * load the driver. The number was 80 * arbitrarily set. 81 */ 82 83 /* Derived */ 84 #define ECORE_MIN_PWM_REGION ((ECORE_WID_SIZE) * (ECORE_MIN_DPIS)) 85 86 enum BAR_ID { 87 BAR_ID_0, /* used for GRC */ 88 BAR_ID_1 /* Used for doorbells */ 89 }; 90 91 static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id) 92 { 93 u32 bar_reg = (bar_id == BAR_ID_0 ? 94 PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); 95 u32 val; 96 97 if (IS_VF(p_hwfn->p_dev)) { 98 /* TODO - assume each VF hwfn has 64Kb for Bar0; Bar1 can be 99 * read from actual register, but we're currently not using 100 * it for actual doorbelling. 101 */ 102 return 1 << 17; 103 } 104 105 val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); 106 if (val) 107 return 1 << (val + 15); 108 109 /* The above registers were updated in the past only in CMT mode. Since 110 * they were found to be useful MFW started updating them from 8.7.7.0. 111 * In older MFW versions they are set to 0 which means disabled. 112 */ 113 if (p_hwfn->p_dev->num_hwfns > 1) { 114 DP_NOTICE(p_hwfn, false, 115 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n"); 116 return BAR_ID_0 ? 256 * 1024 : 512 * 1024; 117 } else { 118 DP_NOTICE(p_hwfn, false, 119 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n"); 120 return 512 * 1024; 121 } 122 } 123 124 void ecore_init_dp(struct ecore_dev *p_dev, 125 u32 dp_module, 126 u8 dp_level, 127 void *dp_ctx) 128 { 129 u32 i; 130 131 p_dev->dp_level = dp_level; 132 p_dev->dp_module = dp_module; 133 p_dev->dp_ctx = dp_ctx; 134 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 135 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 136 137 p_hwfn->dp_level = dp_level; 138 p_hwfn->dp_module = dp_module; 139 p_hwfn->dp_ctx = dp_ctx; 140 } 141 } 142 143 void ecore_init_struct(struct ecore_dev *p_dev) 144 { 145 u8 i; 146 147 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 148 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 149 150 p_hwfn->p_dev = p_dev; 151 p_hwfn->my_id = i; 152 p_hwfn->b_active = false; 153 154 OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex); 155 OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex); 156 } 157 158 /* hwfn 0 is always active */ 159 p_dev->hwfns[0].b_active = true; 160 161 /* set the default cache alignment to 128 (may be overridden later) */ 162 p_dev->cache_shift = 7; 163 } 164 165 static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn) 166 { 167 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 168 169 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params); 170 qm_info->qm_pq_params = OSAL_NULL; 171 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params); 172 qm_info->qm_vport_params = OSAL_NULL; 173 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params); 174 qm_info->qm_port_params = OSAL_NULL; 175 OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data); 176 qm_info->wfq_data = OSAL_NULL; 177 } 178 179 void ecore_resc_free(struct ecore_dev *p_dev) 180 { 181 int i; 182 183 if (IS_VF(p_dev)) { 184 for_each_hwfn(p_dev, i) 185 ecore_l2_free(&p_dev->hwfns[i]); 186 return; 187 } 188 189 OSAL_FREE(p_dev, p_dev->fw_data); 190 p_dev->fw_data = OSAL_NULL; 191 192 OSAL_FREE(p_dev, p_dev->reset_stats); 193 p_dev->reset_stats = OSAL_NULL; 194 195 for_each_hwfn(p_dev, i) { 196 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 197 198 ecore_cxt_mngr_free(p_hwfn); 199 ecore_qm_info_free(p_hwfn); 200 ecore_spq_free(p_hwfn); 201 ecore_eq_free(p_hwfn); 202 ecore_consq_free(p_hwfn); 203 ecore_int_free(p_hwfn); 204 #ifdef CONFIG_ECORE_LL2 205 ecore_ll2_free(p_hwfn); 206 #endif 207 #ifdef CONFIG_ECORE_FCOE 208 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 209 ecore_fcoe_free(p_hwfn); 210 #endif 211 #ifdef CONFIG_ECORE_ISCSI 212 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 213 ecore_iscsi_free(p_hwfn); 214 ecore_ooo_free(p_hwfn); 215 } 216 #endif 217 ecore_iov_free(p_hwfn); 218 ecore_l2_free(p_hwfn); 219 ecore_dmae_info_free(p_hwfn); 220 ecore_dcbx_info_free(p_hwfn); 221 /* @@@TBD Flush work-queue ?*/ 222 } 223 } 224 225 /******************** QM initialization *******************/ 226 227 /* bitmaps for indicating active traffic classes. Special case for Arrowhead 4 port */ 228 #define ACTIVE_TCS_BMAP 0x9f /* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */ 229 #define ACTIVE_TCS_BMAP_4PORT_K2 0xf /* 0..3 actually used, OOO and high priority stuff all use 3 */ 230 231 /* determines the physical queue flags for a given PF. */ 232 static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn) 233 { 234 u32 flags; 235 236 /* common flags */ 237 flags = PQ_FLAGS_LB; 238 239 /* feature flags */ 240 if (IS_ECORE_SRIOV(p_hwfn->p_dev)) 241 flags |= PQ_FLAGS_VFS; 242 if (IS_ECORE_DCQCN(p_hwfn)) 243 flags |= PQ_FLAGS_RLS; 244 245 /* protocol flags */ 246 switch (p_hwfn->hw_info.personality) { 247 case ECORE_PCI_ETH: 248 flags |= PQ_FLAGS_MCOS; 249 break; 250 case ECORE_PCI_FCOE: 251 flags |= PQ_FLAGS_OFLD; 252 break; 253 case ECORE_PCI_ISCSI: 254 flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; 255 break; 256 case ECORE_PCI_ETH_ROCE: 257 flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT; 258 break; 259 case ECORE_PCI_ETH_IWARP: 260 flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; 261 break; 262 default: 263 DP_ERR(p_hwfn, "unknown personality %d\n", p_hwfn->hw_info.personality); 264 return 0; 265 } 266 267 return flags; 268 } 269 270 271 /* Getters for resource amounts necessary for qm initialization */ 272 u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn) 273 { 274 return p_hwfn->hw_info.num_hw_tc; 275 } 276 277 u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn) 278 { 279 return IS_ECORE_SRIOV(p_hwfn->p_dev) ? p_hwfn->p_dev->p_iov_info->total_vfs : 0; 280 } 281 282 #define NUM_DEFAULT_RLS 1 283 284 u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn) 285 { 286 u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn); 287 288 /* num RLs can't exceed resource amount of rls or vports or the dcqcn qps */ 289 num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL), 290 (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_VPORT), 291 ROCE_DCQCN_RP_MAX_QPS)); 292 293 /* make sure after we reserve the default and VF rls we'll have something left */ 294 if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) { 295 if (IS_ECORE_DCQCN(p_hwfn)) 296 DP_NOTICE(p_hwfn, false, "no rate limiters left for PF rate limiting [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs); 297 return 0; 298 } 299 300 /* subtract rls necessary for VFs and one default one for the PF */ 301 num_pf_rls -= num_vfs + NUM_DEFAULT_RLS; 302 303 return num_pf_rls; 304 } 305 306 u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn) 307 { 308 u32 pq_flags = ecore_get_pq_flags(p_hwfn); 309 310 /* all pqs share the same vport (hence the 1 below), except for vfs and pf_rl pqs */ 311 return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) + 312 (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn) + 1; 313 } 314 315 /* calc amount of PQs according to the requested flags */ 316 u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn) 317 { 318 u32 pq_flags = ecore_get_pq_flags(p_hwfn); 319 320 return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) + 321 (!!(PQ_FLAGS_MCOS & pq_flags)) * ecore_init_qm_get_num_tcs(p_hwfn) + 322 (!!(PQ_FLAGS_LB & pq_flags)) + 323 (!!(PQ_FLAGS_OOO & pq_flags)) + 324 (!!(PQ_FLAGS_ACK & pq_flags)) + 325 (!!(PQ_FLAGS_OFLD & pq_flags)) + 326 (!!(PQ_FLAGS_LLT & pq_flags)) + 327 (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn); 328 } 329 330 /* initialize the top level QM params */ 331 static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn) 332 { 333 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 334 bool four_port; 335 336 /* pq and vport bases for this PF */ 337 qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ); 338 qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT); 339 340 /* rate limiting and weighted fair queueing are always enabled */ 341 qm_info->vport_rl_en = 1; 342 qm_info->vport_wfq_en = 1; 343 344 /* TC config is different for AH 4 port */ 345 four_port = p_hwfn->p_dev->num_ports_in_engines == MAX_NUM_PORTS_K2; 346 347 /* in AH 4 port we have fewer TCs per port */ 348 qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : NUM_OF_PHYS_TCS; 349 350 /* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and 4 otherwise */ 351 if (!qm_info->ooo_tc) 352 qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : DCBX_TCP_OOO_TC; 353 } 354 355 /* initialize qm vport params */ 356 static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn) 357 { 358 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 359 u8 i; 360 361 /* all vports participate in weighted fair queueing */ 362 for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++) 363 qm_info->qm_vport_params[i].vport_wfq = 1; 364 } 365 366 /* initialize qm port params */ 367 static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn) 368 { 369 /* Initialize qm port parameters */ 370 u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engines; 371 372 /* indicate how ooo and high pri traffic is dealt with */ 373 active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ? 374 ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP; 375 376 for (i = 0; i < num_ports; i++) { 377 struct init_qm_port_params *p_qm_port = 378 &p_hwfn->qm_info.qm_port_params[i]; 379 380 p_qm_port->active = 1; 381 p_qm_port->active_phys_tcs = active_phys_tcs; 382 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports; 383 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports; 384 } 385 } 386 387 /* Reset the params which must be reset for qm init. QM init may be called as 388 * a result of flows other than driver load (e.g. dcbx renegotiation). Other 389 * params may be affected by the init but would simply recalculate to the same 390 * values. The allocations made for QM init, ports, vports, pqs and vfqs are not 391 * affected as these amounts stay the same. 392 */ 393 static void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn) 394 { 395 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 396 397 qm_info->num_pqs = 0; 398 qm_info->num_vports = 0; 399 qm_info->num_pf_rls = 0; 400 qm_info->num_vf_pqs = 0; 401 qm_info->first_vf_pq = 0; 402 qm_info->first_mcos_pq = 0; 403 qm_info->first_rl_pq = 0; 404 } 405 406 static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn) 407 { 408 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 409 410 qm_info->num_vports++; 411 412 if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn)) 413 DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn)); 414 } 415 416 /* initialize a single pq and manage qm_info resources accounting. 417 * The pq_init_flags param determines whether the PQ is rate limited (for VF or PF) 418 * and whether a new vport is allocated to the pq or not (i.e. vport will be shared) 419 */ 420 421 /* flags for pq init */ 422 #define PQ_INIT_SHARE_VPORT (1 << 0) 423 #define PQ_INIT_PF_RL (1 << 1) 424 #define PQ_INIT_VF_RL (1 << 2) 425 426 /* defines for pq init */ 427 #define PQ_INIT_DEFAULT_WRR_GROUP 1 428 #define PQ_INIT_DEFAULT_TC 0 429 #define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc) 430 431 static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn, 432 struct ecore_qm_info *qm_info, 433 u8 tc, u32 pq_init_flags) 434 { 435 u16 pq_idx = qm_info->num_pqs, max_pq = ecore_init_qm_get_num_pqs(p_hwfn); 436 437 if (pq_idx > max_pq) 438 DP_ERR(p_hwfn, "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq); 439 440 /* init pq params */ 441 qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + qm_info->num_vports; 442 qm_info->qm_pq_params[pq_idx].tc_id = tc; 443 qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP; 444 qm_info->qm_pq_params[pq_idx].rl_valid = 445 (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL); 446 447 /* qm params accounting */ 448 qm_info->num_pqs++; 449 if (!(pq_init_flags & PQ_INIT_SHARE_VPORT)) 450 qm_info->num_vports++; 451 452 if (pq_init_flags & PQ_INIT_PF_RL) 453 qm_info->num_pf_rls++; 454 455 if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn)) 456 DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn)); 457 458 if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn)) 459 DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n", qm_info->num_pf_rls, ecore_init_qm_get_num_pf_rls(p_hwfn)); 460 } 461 462 /* get pq index according to PQ_FLAGS */ 463 static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn, 464 u32 pq_flags) 465 { 466 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 467 468 /* Can't have multiple flags set here */ 469 if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1) 470 goto err; 471 472 switch (pq_flags) { 473 case PQ_FLAGS_RLS: 474 return &qm_info->first_rl_pq; 475 case PQ_FLAGS_MCOS: 476 return &qm_info->first_mcos_pq; 477 case PQ_FLAGS_LB: 478 return &qm_info->pure_lb_pq; 479 case PQ_FLAGS_OOO: 480 return &qm_info->ooo_pq; 481 case PQ_FLAGS_ACK: 482 return &qm_info->pure_ack_pq; 483 case PQ_FLAGS_OFLD: 484 return &qm_info->offload_pq; 485 case PQ_FLAGS_LLT: 486 return &qm_info->low_latency_pq; 487 case PQ_FLAGS_VFS: 488 return &qm_info->first_vf_pq; 489 default: 490 goto err; 491 } 492 493 err: 494 DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags); 495 return OSAL_NULL; 496 } 497 498 /* save pq index in qm info */ 499 static void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn, 500 u32 pq_flags, u16 pq_val) 501 { 502 u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 503 504 *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val; 505 } 506 507 /* get tx pq index, with the PQ TX base already set (ready for context init) */ 508 u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags) 509 { 510 u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 511 512 return *base_pq_idx + CM_TX_PQ_BASE; 513 } 514 515 u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc) 516 { 517 u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn); 518 519 if (tc > max_tc) 520 DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); 521 522 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc; 523 } 524 525 u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf) 526 { 527 u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn); 528 529 if (vf > max_vf) 530 DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); 531 532 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; 533 } 534 535 u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl) 536 { 537 u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn); 538 539 if (rl > max_rl) 540 DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl); 541 542 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl; 543 } 544 545 /* Functions for creating specific types of pqs */ 546 static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn) 547 { 548 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 549 550 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LB)) 551 return; 552 553 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs); 554 ecore_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT); 555 } 556 557 static void ecore_init_qm_ooo_pq(struct ecore_hwfn *p_hwfn) 558 { 559 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 560 561 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO)) 562 return; 563 564 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs); 565 ecore_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT); 566 } 567 568 static void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn) 569 { 570 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 571 572 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK)) 573 return; 574 575 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs); 576 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 577 } 578 579 static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn) 580 { 581 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 582 583 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD)) 584 return; 585 586 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs); 587 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 588 } 589 590 static void ecore_init_qm_low_latency_pq(struct ecore_hwfn *p_hwfn) 591 { 592 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 593 594 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT)) 595 return; 596 597 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs); 598 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 599 } 600 601 static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn) 602 { 603 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 604 u8 tc_idx; 605 606 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS)) 607 return; 608 609 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs); 610 for (tc_idx = 0; tc_idx < ecore_init_qm_get_num_tcs(p_hwfn); tc_idx++) 611 ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT); 612 } 613 614 static void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn) 615 { 616 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 617 u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn); 618 619 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS)) 620 return; 621 622 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs); 623 qm_info->num_vf_pqs = num_vfs; 624 for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) 625 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL); 626 } 627 628 static void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn) 629 { 630 u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn); 631 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 632 633 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS)) 634 return; 635 636 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs); 637 for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++) 638 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL); 639 } 640 641 static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn) 642 { 643 /* rate limited pqs, must come first (FW assumption) */ 644 ecore_init_qm_rl_pqs(p_hwfn); 645 646 /* pqs for multi cos */ 647 ecore_init_qm_mcos_pqs(p_hwfn); 648 649 /* pure loopback pq */ 650 ecore_init_qm_lb_pq(p_hwfn); 651 652 /* out of order pq */ 653 ecore_init_qm_ooo_pq(p_hwfn); 654 655 /* pure ack pq */ 656 ecore_init_qm_pure_ack_pq(p_hwfn); 657 658 /* pq for offloaded protocol */ 659 ecore_init_qm_offload_pq(p_hwfn); 660 661 /* low latency pq */ 662 ecore_init_qm_low_latency_pq(p_hwfn); 663 664 /* done sharing vports */ 665 ecore_init_qm_advance_vport(p_hwfn); 666 667 /* pqs for vfs */ 668 ecore_init_qm_vf_pqs(p_hwfn); 669 } 670 671 /* compare values of getters against resources amounts */ 672 static enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn) 673 { 674 if (ecore_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, ECORE_VPORT)) { 675 DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n"); 676 return ECORE_INVAL; 677 } 678 679 if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) { 680 DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n"); 681 return ECORE_INVAL; 682 } 683 684 return ECORE_SUCCESS; 685 } 686 687 /* 688 * Function for verbose printing of the qm initialization results 689 */ 690 static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn) 691 { 692 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 693 struct init_qm_vport_params *vport; 694 struct init_qm_port_params *port; 695 struct init_qm_pq_params *pq; 696 int i, tc; 697 698 /* top level params */ 699 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n", 700 qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq, qm_info->offload_pq, qm_info->pure_ack_pq); 701 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n", 702 qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs, qm_info->num_vf_pqs, qm_info->num_vports, qm_info->max_phys_tcs_per_port); 703 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n", 704 qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en, qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl, qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn)); 705 706 /* port table */ 707 for (i = 0; i < p_hwfn->p_dev->num_ports_in_engines; i++) { 708 port = &(qm_info->qm_port_params[i]); 709 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n", 710 i, port->active, port->active_phys_tcs, port->num_pbf_cmd_lines, port->num_btb_blocks, port->reserved); 711 } 712 713 /* vport table */ 714 for (i = 0; i < qm_info->num_vports; i++) { 715 vport = &(qm_info->qm_vport_params[i]); 716 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ", 717 qm_info->start_vport + i, vport->vport_rl, vport->vport_wfq); 718 for (tc = 0; tc < NUM_OF_TCS; tc++) 719 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ", vport->first_tx_pq_id[tc]); 720 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n"); 721 } 722 723 /* pq table */ 724 for (i = 0; i < qm_info->num_pqs; i++) { 725 pq = &(qm_info->qm_pq_params[i]); 726 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n", 727 qm_info->start_pq + i, pq->vport_id, pq->tc_id, pq->wrr_group, pq->rl_valid); 728 } 729 } 730 731 static void ecore_init_qm_info(struct ecore_hwfn *p_hwfn) 732 { 733 /* reset params required for init run */ 734 ecore_init_qm_reset_params(p_hwfn); 735 736 /* init QM top level params */ 737 ecore_init_qm_params(p_hwfn); 738 739 /* init QM port params */ 740 ecore_init_qm_port_params(p_hwfn); 741 742 /* init QM vport params */ 743 ecore_init_qm_vport_params(p_hwfn); 744 745 /* init QM physical queue params */ 746 ecore_init_qm_pq_params(p_hwfn); 747 748 /* display all that init */ 749 ecore_dp_init_qm_params(p_hwfn); 750 } 751 752 /* This function reconfigures the QM pf on the fly. 753 * For this purpose we: 754 * 1. reconfigure the QM database 755 * 2. set new values to runtime array 756 * 3. send an sdm_qm_cmd through the rbc interface to stop the QM 757 * 4. activate init tool in QM_PF stage 758 * 5. send an sdm_qm_cmd through rbc interface to release the QM 759 */ 760 enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn, 761 struct ecore_ptt *p_ptt) 762 { 763 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 764 bool b_rc; 765 enum _ecore_status_t rc; 766 767 /* initialize ecore's qm data structure */ 768 ecore_init_qm_info(p_hwfn); 769 770 /* stop PF's qm queues */ 771 OSAL_SPIN_LOCK(&qm_lock); 772 b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true, 773 qm_info->start_pq, qm_info->num_pqs); 774 OSAL_SPIN_UNLOCK(&qm_lock); 775 if (!b_rc) 776 return ECORE_INVAL; 777 778 /* clear the QM_PF runtime phase leftovers from previous init */ 779 ecore_init_clear_rt_data(p_hwfn); 780 781 /* prepare QM portion of runtime array */ 782 ecore_qm_init_pf(p_hwfn); 783 784 /* activate init tool on runtime array */ 785 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, 786 p_hwfn->hw_info.hw_mode); 787 if (rc != ECORE_SUCCESS) 788 return rc; 789 790 /* start PF's qm queues */ 791 OSAL_SPIN_LOCK(&qm_lock); 792 b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true, 793 qm_info->start_pq, qm_info->num_pqs); 794 OSAL_SPIN_UNLOCK(&qm_lock); 795 if (!b_rc) 796 return ECORE_INVAL; 797 798 return ECORE_SUCCESS; 799 } 800 801 static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn) 802 { 803 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 804 enum _ecore_status_t rc; 805 806 rc = ecore_init_qm_sanity(p_hwfn); 807 if (rc != ECORE_SUCCESS) 808 goto alloc_err; 809 810 qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 811 sizeof(struct init_qm_pq_params) * 812 ecore_init_qm_get_num_pqs(p_hwfn)); 813 if (!qm_info->qm_pq_params) 814 goto alloc_err; 815 816 qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 817 sizeof(struct init_qm_vport_params) * 818 ecore_init_qm_get_num_vports(p_hwfn)); 819 if (!qm_info->qm_vport_params) 820 goto alloc_err; 821 822 qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 823 sizeof(struct init_qm_port_params) * 824 p_hwfn->p_dev->num_ports_in_engines); 825 if (!qm_info->qm_port_params) 826 goto alloc_err; 827 828 qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 829 sizeof(struct ecore_wfq_data) * 830 ecore_init_qm_get_num_vports(p_hwfn)); 831 if (!qm_info->wfq_data) 832 goto alloc_err; 833 834 return ECORE_SUCCESS; 835 836 alloc_err: 837 DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n"); 838 ecore_qm_info_free(p_hwfn); 839 return ECORE_NOMEM; 840 } 841 /******************** End QM initialization ***************/ 842 843 enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev) 844 { 845 enum _ecore_status_t rc = ECORE_SUCCESS; 846 u32 rdma_tasks, excess_tasks; 847 u32 line_count; 848 int i; 849 850 if (IS_VF(p_dev)) { 851 for_each_hwfn(p_dev, i) { 852 rc = ecore_l2_alloc(&p_dev->hwfns[i]); 853 if (rc != ECORE_SUCCESS) 854 return rc; 855 } 856 return rc; 857 } 858 859 p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL, 860 sizeof(*p_dev->fw_data)); 861 if (!p_dev->fw_data) 862 return ECORE_NOMEM; 863 864 for_each_hwfn(p_dev, i) { 865 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 866 u32 n_eqes, num_cons; 867 868 /* First allocate the context manager structure */ 869 rc = ecore_cxt_mngr_alloc(p_hwfn); 870 if (rc) 871 goto alloc_err; 872 873 /* Set the HW cid/tid numbers (in the contest manager) 874 * Must be done prior to any further computations. 875 */ 876 rc = ecore_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS); 877 if (rc) 878 goto alloc_err; 879 880 rc = ecore_alloc_qm_data(p_hwfn); 881 if (rc) 882 goto alloc_err; 883 884 /* init qm info */ 885 ecore_init_qm_info(p_hwfn); 886 887 /* Compute the ILT client partition */ 888 rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count); 889 if (rc) { 890 DP_NOTICE(p_hwfn, false, "too many ILT lines; re-computing with less lines\n"); 891 /* In case there are not enough ILT lines we reduce the 892 * number of RDMA tasks and re-compute. 893 */ 894 excess_tasks = ecore_cxt_cfg_ilt_compute_excess( 895 p_hwfn, line_count); 896 if (!excess_tasks) 897 goto alloc_err; 898 899 rdma_tasks = RDMA_MAX_TIDS - excess_tasks; 900 rc = ecore_cxt_set_pf_params(p_hwfn, rdma_tasks); 901 if (rc) 902 goto alloc_err; 903 904 rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count); 905 if (rc) { 906 DP_ERR(p_hwfn, "failed ILT compute. Requested too many lines: %u\n", 907 line_count); 908 909 goto alloc_err; 910 } 911 } 912 913 /* CID map / ILT shadow table / T2 914 * The talbes sizes are determined by the computations above 915 */ 916 rc = ecore_cxt_tables_alloc(p_hwfn); 917 if (rc) 918 goto alloc_err; 919 920 /* SPQ, must follow ILT because initializes SPQ context */ 921 rc = ecore_spq_alloc(p_hwfn); 922 if (rc) 923 goto alloc_err; 924 925 /* SP status block allocation */ 926 p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn, 927 RESERVED_PTT_DPC); 928 929 rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt); 930 if (rc) 931 goto alloc_err; 932 933 rc = ecore_iov_alloc(p_hwfn); 934 if (rc) 935 goto alloc_err; 936 937 /* EQ */ 938 n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain); 939 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) { 940 /* Calculate the EQ size 941 * --------------------- 942 * Each ICID may generate up to one event at a time i.e. 943 * the event must be handled/cleared before a new one 944 * can be generated. We calculate the sum of events per 945 * protocol and create an EQ deep enough to handle the 946 * worst case: 947 * - Core - according to SPQ. 948 * - RoCE - per QP there are a couple of ICIDs, one 949 * responder and one requester, each can 950 * generate an EQE => n_eqes_qp = 2 * n_qp. 951 * Each CQ can generate an EQE. There are 2 CQs 952 * per QP => n_eqes_cq = 2 * n_qp. 953 * Hence the RoCE total is 4 * n_qp or 954 * 2 * num_cons. 955 * - ENet - There can be up to two events per VF. One 956 * for VF-PF channel and another for VF FLR 957 * initial cleanup. The number of VFs is 958 * bounded by MAX_NUM_VFS_BB, and is much 959 * smaller than RoCE's so we avoid exact 960 * calculation. 961 */ 962 if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) { 963 num_cons = ecore_cxt_get_proto_cid_count( 964 p_hwfn, PROTOCOLID_ROCE, OSAL_NULL); 965 num_cons *= 2; 966 } else { 967 num_cons = ecore_cxt_get_proto_cid_count( 968 p_hwfn, PROTOCOLID_IWARP, 969 OSAL_NULL); 970 } 971 n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; 972 } else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 973 num_cons = ecore_cxt_get_proto_cid_count( 974 p_hwfn, PROTOCOLID_ISCSI, OSAL_NULL); 975 n_eqes += 2 * num_cons; 976 } 977 978 if (n_eqes > 0xFFFF) { 979 DP_ERR(p_hwfn, 980 "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n", 981 n_eqes, 0xFFFF); 982 goto alloc_no_mem; 983 } 984 985 rc = ecore_eq_alloc(p_hwfn, (u16)n_eqes); 986 if (rc) 987 goto alloc_err; 988 989 rc = ecore_consq_alloc(p_hwfn); 990 if (rc) 991 goto alloc_err; 992 993 rc = ecore_l2_alloc(p_hwfn); 994 if (rc != ECORE_SUCCESS) 995 goto alloc_err; 996 997 #ifdef CONFIG_ECORE_LL2 998 if (p_hwfn->using_ll2) { 999 rc = ecore_ll2_alloc(p_hwfn); 1000 if (rc) 1001 goto alloc_err; 1002 } 1003 #endif 1004 #ifdef CONFIG_ECORE_FCOE 1005 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) { 1006 rc = ecore_fcoe_alloc(p_hwfn); 1007 if (rc) 1008 goto alloc_err; 1009 } 1010 #endif 1011 #ifdef CONFIG_ECORE_ISCSI 1012 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 1013 rc = ecore_iscsi_alloc(p_hwfn); 1014 if (rc) 1015 goto alloc_err; 1016 rc = ecore_ooo_alloc(p_hwfn); 1017 if (rc) 1018 goto alloc_err; 1019 } 1020 #endif 1021 1022 /* DMA info initialization */ 1023 rc = ecore_dmae_info_alloc(p_hwfn); 1024 if (rc) { 1025 DP_NOTICE(p_hwfn, true, 1026 "Failed to allocate memory for dmae_info structure\n"); 1027 goto alloc_err; 1028 } 1029 1030 /* DCBX initialization */ 1031 rc = ecore_dcbx_info_alloc(p_hwfn); 1032 if (rc) { 1033 DP_NOTICE(p_hwfn, true, 1034 "Failed to allocate memory for dcbx structure\n"); 1035 goto alloc_err; 1036 } 1037 } 1038 1039 p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL, 1040 sizeof(*p_dev->reset_stats)); 1041 if (!p_dev->reset_stats) { 1042 DP_NOTICE(p_dev, true, 1043 "Failed to allocate reset statistics\n"); 1044 goto alloc_no_mem; 1045 } 1046 1047 return ECORE_SUCCESS; 1048 1049 alloc_no_mem: 1050 rc = ECORE_NOMEM; 1051 alloc_err: 1052 ecore_resc_free(p_dev); 1053 return rc; 1054 } 1055 1056 void ecore_resc_setup(struct ecore_dev *p_dev) 1057 { 1058 int i; 1059 1060 if (IS_VF(p_dev)) { 1061 for_each_hwfn(p_dev, i) 1062 ecore_l2_setup(&p_dev->hwfns[i]); 1063 return; 1064 } 1065 1066 for_each_hwfn(p_dev, i) { 1067 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1068 1069 ecore_cxt_mngr_setup(p_hwfn); 1070 ecore_spq_setup(p_hwfn); 1071 ecore_eq_setup(p_hwfn); 1072 ecore_consq_setup(p_hwfn); 1073 1074 /* Read shadow of current MFW mailbox */ 1075 ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt); 1076 OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow, 1077 p_hwfn->mcp_info->mfw_mb_cur, 1078 p_hwfn->mcp_info->mfw_mb_length); 1079 1080 ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt); 1081 1082 ecore_l2_setup(p_hwfn); 1083 ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt); 1084 #ifdef CONFIG_ECORE_LL2 1085 if (p_hwfn->using_ll2) 1086 ecore_ll2_setup(p_hwfn); 1087 #endif 1088 #ifdef CONFIG_ECORE_FCOE 1089 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 1090 ecore_fcoe_setup(p_hwfn); 1091 #endif 1092 #ifdef CONFIG_ECORE_ISCSI 1093 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 1094 ecore_iscsi_setup(p_hwfn); 1095 ecore_ooo_setup(p_hwfn); 1096 } 1097 #endif 1098 } 1099 } 1100 1101 #define FINAL_CLEANUP_POLL_CNT (100) 1102 #define FINAL_CLEANUP_POLL_TIME (10) 1103 enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn, 1104 struct ecore_ptt *p_ptt, 1105 u16 id, bool is_vf) 1106 { 1107 u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; 1108 enum _ecore_status_t rc = ECORE_TIMEOUT; 1109 1110 #ifndef ASIC_ONLY 1111 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) || 1112 CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 1113 DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n"); 1114 return ECORE_SUCCESS; 1115 } 1116 #endif 1117 1118 addr = GTT_BAR0_MAP_REG_USDM_RAM + 1119 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); 1120 1121 if (is_vf) 1122 id += 0x10; 1123 1124 command |= X_FINAL_CLEANUP_AGG_INT << 1125 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; 1126 command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; 1127 command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT; 1128 command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT; 1129 1130 /* Make sure notification is not set before initiating final cleanup */ 1131 if (REG_RD(p_hwfn, addr)) { 1132 DP_NOTICE(p_hwfn, false, 1133 "Unexpected; Found final cleanup notification before initiating final cleanup\n"); 1134 REG_WR(p_hwfn, addr, 0); 1135 } 1136 1137 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1138 "Sending final cleanup for PFVF[%d] [Command %08x\n]", 1139 id, command); 1140 1141 ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); 1142 1143 /* Poll until completion */ 1144 while (!REG_RD(p_hwfn, addr) && count--) 1145 OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME); 1146 1147 if (REG_RD(p_hwfn, addr)) 1148 rc = ECORE_SUCCESS; 1149 else 1150 DP_NOTICE(p_hwfn, true, "Failed to receive FW final cleanup notification\n"); 1151 1152 /* Cleanup afterwards */ 1153 REG_WR(p_hwfn, addr, 0); 1154 1155 return rc; 1156 } 1157 1158 static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn) 1159 { 1160 int hw_mode = 0; 1161 1162 if (ECORE_IS_BB_B0(p_hwfn->p_dev)) { 1163 hw_mode |= 1 << MODE_BB; 1164 } else if (ECORE_IS_AH(p_hwfn->p_dev)) { 1165 hw_mode |= 1 << MODE_K2; 1166 } else { 1167 DP_NOTICE(p_hwfn, true, "Unknown chip type %#x\n", 1168 p_hwfn->p_dev->type); 1169 return ECORE_INVAL; 1170 } 1171 1172 /* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE*/ 1173 switch (p_hwfn->p_dev->num_ports_in_engines) { 1174 case 1: 1175 hw_mode |= 1 << MODE_PORTS_PER_ENG_1; 1176 break; 1177 case 2: 1178 hw_mode |= 1 << MODE_PORTS_PER_ENG_2; 1179 break; 1180 case 4: 1181 hw_mode |= 1 << MODE_PORTS_PER_ENG_4; 1182 break; 1183 default: 1184 DP_NOTICE(p_hwfn, true, "num_ports_in_engine = %d not supported\n", 1185 p_hwfn->p_dev->num_ports_in_engines); 1186 return ECORE_INVAL; 1187 } 1188 1189 switch (p_hwfn->p_dev->mf_mode) { 1190 case ECORE_MF_DEFAULT: 1191 case ECORE_MF_NPAR: 1192 hw_mode |= 1 << MODE_MF_SI; 1193 break; 1194 case ECORE_MF_OVLAN: 1195 hw_mode |= 1 << MODE_MF_SD; 1196 break; 1197 default: 1198 DP_NOTICE(p_hwfn, true, "Unsupported MF mode, init as DEFAULT\n"); 1199 hw_mode |= 1 << MODE_MF_SI; 1200 } 1201 1202 #ifndef ASIC_ONLY 1203 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 1204 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1205 hw_mode |= 1 << MODE_FPGA; 1206 } else { 1207 if (p_hwfn->p_dev->b_is_emul_full) 1208 hw_mode |= 1 << MODE_EMUL_FULL; 1209 else 1210 hw_mode |= 1 << MODE_EMUL_REDUCED; 1211 } 1212 } else 1213 #endif 1214 hw_mode |= 1 << MODE_ASIC; 1215 1216 if (p_hwfn->p_dev->num_hwfns > 1) 1217 hw_mode |= 1 << MODE_100G; 1218 1219 p_hwfn->hw_info.hw_mode = hw_mode; 1220 1221 DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP), 1222 "Configuring function for hw_mode: 0x%08x\n", 1223 p_hwfn->hw_info.hw_mode); 1224 1225 return ECORE_SUCCESS; 1226 } 1227 1228 #ifndef ASIC_ONLY 1229 /* MFW-replacement initializations for non-ASIC */ 1230 static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn, 1231 struct ecore_ptt *p_ptt) 1232 { 1233 struct ecore_dev *p_dev = p_hwfn->p_dev; 1234 u32 pl_hv = 1; 1235 int i; 1236 1237 if (CHIP_REV_IS_EMUL(p_dev)) { 1238 if (ECORE_IS_AH(p_dev)) 1239 pl_hv |= 0x600; 1240 else if (ECORE_IS_E5(p_dev)) 1241 ECORE_E5_MISSING_CODE; 1242 } 1243 1244 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv); 1245 1246 if (CHIP_REV_IS_EMUL(p_dev) && 1247 (ECORE_IS_AH(p_dev) || ECORE_IS_E5(p_dev))) 1248 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2_E5, 1249 0x3ffffff); 1250 1251 /* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */ 1252 /* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */ 1253 if (!CHIP_REV_IS_EMUL(p_dev) || ECORE_IS_BB(p_dev)) 1254 ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4); 1255 1256 if (CHIP_REV_IS_EMUL(p_dev)) { 1257 if (ECORE_IS_AH(p_dev)) { 1258 /* 2 for 4-port, 1 for 2-port, 0 for 1-port */ 1259 ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE, 1260 (p_dev->num_ports_in_engines >> 1)); 1261 1262 ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN, 1263 p_dev->num_ports_in_engines == 4 ? 0 : 3); 1264 } else if (ECORE_IS_E5(p_dev)) { 1265 ECORE_E5_MISSING_CODE; 1266 } 1267 } 1268 1269 /* Poll on RBC */ 1270 ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1); 1271 for (i = 0; i < 100; i++) { 1272 OSAL_UDELAY(50); 1273 if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1) 1274 break; 1275 } 1276 if (i == 100) 1277 DP_NOTICE(p_hwfn, true, "RBC done failed to complete in PSWRQ2\n"); 1278 1279 return ECORE_SUCCESS; 1280 } 1281 #endif 1282 1283 /* Init run time data for all PFs and their VFs on an engine. 1284 * TBD - for VFs - Once we have parent PF info for each VF in 1285 * shmem available as CAU requires knowledge of parent PF for each VF. 1286 */ 1287 static void ecore_init_cau_rt_data(struct ecore_dev *p_dev) 1288 { 1289 u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; 1290 int i, igu_sb_id; 1291 1292 for_each_hwfn(p_dev, i) { 1293 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1294 struct ecore_igu_info *p_igu_info; 1295 struct ecore_igu_block *p_block; 1296 struct cau_sb_entry sb_entry; 1297 1298 p_igu_info = p_hwfn->hw_info.p_igu_info; 1299 1300 for (igu_sb_id = 0; 1301 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev); 1302 igu_sb_id++) { 1303 p_block = &p_igu_info->entry[igu_sb_id]; 1304 1305 if (!p_block->is_pf) 1306 continue; 1307 1308 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, 1309 p_block->function_id, 1310 0, 0); 1311 STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2, 1312 sb_entry); 1313 } 1314 } 1315 } 1316 1317 static void ecore_init_cache_line_size(struct ecore_hwfn *p_hwfn, 1318 struct ecore_ptt *p_ptt) 1319 { 1320 u32 val, wr_mbs, cache_line_size; 1321 1322 val = ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0); 1323 switch (val) { 1324 case 0: 1325 wr_mbs = 128; 1326 break; 1327 case 1: 1328 wr_mbs = 256; 1329 break; 1330 case 2: 1331 wr_mbs = 512; 1332 break; 1333 default: 1334 DP_INFO(p_hwfn, 1335 "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 1336 val); 1337 return; 1338 } 1339 1340 cache_line_size = OSAL_MIN_T(u32, OSAL_CACHE_LINE_SIZE, wr_mbs); 1341 switch (cache_line_size) { 1342 case 32: 1343 val = 0; 1344 break; 1345 case 64: 1346 val = 1; 1347 break; 1348 case 128: 1349 val = 2; 1350 break; 1351 case 256: 1352 val = 3; 1353 break; 1354 default: 1355 DP_INFO(p_hwfn, 1356 "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 1357 cache_line_size); 1358 } 1359 1360 if (OSAL_CACHE_LINE_SIZE > wr_mbs) 1361 DP_INFO(p_hwfn, 1362 "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", 1363 OSAL_CACHE_LINE_SIZE, wr_mbs); 1364 1365 STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val); 1366 } 1367 1368 static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn, 1369 struct ecore_ptt *p_ptt, 1370 int hw_mode) 1371 { 1372 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 1373 struct ecore_dev *p_dev = p_hwfn->p_dev; 1374 u8 vf_id, max_num_vfs; 1375 u16 num_pfs, pf_id; 1376 u32 concrete_fid; 1377 enum _ecore_status_t rc = ECORE_SUCCESS; 1378 1379 ecore_init_cau_rt_data(p_dev); 1380 1381 /* Program GTT windows */ 1382 ecore_gtt_init(p_hwfn); 1383 1384 #ifndef ASIC_ONLY 1385 if (CHIP_REV_IS_EMUL(p_dev)) { 1386 rc = ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt); 1387 if (rc != ECORE_SUCCESS) 1388 return rc; 1389 } 1390 #endif 1391 1392 if (p_hwfn->mcp_info) { 1393 if (p_hwfn->mcp_info->func_info.bandwidth_max) 1394 qm_info->pf_rl_en = 1; 1395 if (p_hwfn->mcp_info->func_info.bandwidth_min) 1396 qm_info->pf_wfq_en = 1; 1397 } 1398 1399 ecore_qm_common_rt_init(p_hwfn, 1400 p_dev->num_ports_in_engines, 1401 qm_info->max_phys_tcs_per_port, 1402 qm_info->pf_rl_en, qm_info->pf_wfq_en, 1403 qm_info->vport_rl_en, qm_info->vport_wfq_en, 1404 qm_info->qm_port_params); 1405 1406 ecore_cxt_hw_init_common(p_hwfn); 1407 1408 ecore_init_cache_line_size(p_hwfn, p_ptt); 1409 1410 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); 1411 if (rc != ECORE_SUCCESS) 1412 return rc; 1413 1414 /* @@TBD MichalK - should add VALIDATE_VFID to init tool... 1415 * need to decide with which value, maybe runtime 1416 */ 1417 ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); 1418 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); 1419 1420 if (ECORE_IS_BB(p_dev)) { 1421 /* Workaround clears ROCE search for all functions to prevent 1422 * involving non intialized function in processing ROCE packet. 1423 */ 1424 num_pfs = NUM_OF_ENG_PFS(p_dev); 1425 for (pf_id = 0; pf_id < num_pfs; pf_id++) { 1426 ecore_fid_pretend(p_hwfn, p_ptt, pf_id); 1427 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 1428 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 1429 } 1430 /* pretend to original PF */ 1431 ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 1432 } 1433 1434 /* Workaround for avoiding CCFC execution error when getting packets 1435 * with CRC errors, and allowing instead the invoking of the FW error 1436 * handler. 1437 * This is not done inside the init tool since it currently can't 1438 * perform a pretending to VFs. 1439 */ 1440 max_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; 1441 for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { 1442 concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id); 1443 ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid); 1444 ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); 1445 ecore_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); 1446 ecore_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); 1447 ecore_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0); 1448 } 1449 /* pretend to original PF */ 1450 ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 1451 1452 return rc; 1453 } 1454 1455 #ifndef ASIC_ONLY 1456 #define MISC_REG_RESET_REG_2_XMAC_BIT (1<<4) 1457 #define MISC_REG_RESET_REG_2_XMAC_SOFT_BIT (1<<5) 1458 1459 #define PMEG_IF_BYTE_COUNT 8 1460 1461 static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn, 1462 struct ecore_ptt *p_ptt, 1463 u32 addr, 1464 u64 data, 1465 u8 reg_type, 1466 u8 port) 1467 { 1468 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 1469 "CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n", 1470 ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) | 1471 (8 << PMEG_IF_BYTE_COUNT), 1472 (reg_type << 25) | (addr << 8) | port, 1473 (u32)((data >> 32) & 0xffffffff), 1474 (u32)(data & 0xffffffff)); 1475 1476 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB, 1477 (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) & 1478 0xffff00fe) | 1479 (8 << PMEG_IF_BYTE_COUNT)); 1480 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB, 1481 (reg_type << 25) | (addr << 8) | port); 1482 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, data & 0xffffffff); 1483 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, 1484 (data >> 32) & 0xffffffff); 1485 } 1486 1487 #define XLPORT_MODE_REG (0x20a) 1488 #define XLPORT_MAC_CONTROL (0x210) 1489 #define XLPORT_FLOW_CONTROL_CONFIG (0x207) 1490 #define XLPORT_ENABLE_REG (0x20b) 1491 1492 #define XLMAC_CTRL (0x600) 1493 #define XLMAC_MODE (0x601) 1494 #define XLMAC_RX_MAX_SIZE (0x608) 1495 #define XLMAC_TX_CTRL (0x604) 1496 #define XLMAC_PAUSE_CTRL (0x60d) 1497 #define XLMAC_PFC_CTRL (0x60e) 1498 1499 static void ecore_emul_link_init_bb(struct ecore_hwfn *p_hwfn, 1500 struct ecore_ptt *p_ptt) 1501 { 1502 u8 loopback = 0, port = p_hwfn->port_id * 2; 1503 1504 DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port); 1505 1506 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, 1507 (0x4 << 4) | 0x4, 1, port); /* XLPORT MAC MODE */ /* 0 Quad, 4 Single... */ 1508 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port); 1509 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 1510 0x40, 0, port); /*XLMAC: SOFT RESET */ 1511 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE, 1512 0x40, 0, port); /*XLMAC: Port Speed >= 10Gbps */ 1513 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE, 1514 0x3fff, 0, port); /* XLMAC: Max Size */ 1515 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL, 1516 0x01000000800ULL | (0xa << 12) | ((u64)1 << 38), 1517 0, port); 1518 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL, 1519 0x7c000, 0, port); 1520 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL, 1521 0x30ffffc000ULL, 0, port); 1522 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2), 1523 0, port); /* XLMAC: TX_EN, RX_EN */ 1524 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x1003 | (loopback << 2), 1525 0, port); /* XLMAC: TX_EN, RX_EN, SW_LINK_STATUS */ 1526 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG, 1527 1, 0, port); /* Enabled Parallel PFC interface */ 1528 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 1529 0xf, 1, port); /* XLPORT port enable */ 1530 } 1531 1532 static void ecore_emul_link_init_ah_e5(struct ecore_hwfn *p_hwfn, 1533 struct ecore_ptt *p_ptt) 1534 { 1535 u8 port = p_hwfn->port_id; 1536 u32 mac_base = NWM_REG_MAC0_K2_E5 + (port << 2) * NWM_REG_MAC0_SIZE; 1537 1538 DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port); 1539 1540 ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2_E5 + (port << 2), 1541 (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT) | 1542 (port << 1543 CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT) | 1544 (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT)); 1545 1546 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE_K2_E5, 1547 1 << ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT); 1548 1549 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH_K2_E5, 1550 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT); 1551 1552 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH_K2_E5, 1553 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT); 1554 1555 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5, 1556 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT); 1557 1558 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5, 1559 (0xA << 1560 ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT) | 1561 (8 << 1562 ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT)); 1563 1564 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG_K2_E5, 1565 0xa853); 1566 } 1567 1568 static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn, 1569 struct ecore_ptt *p_ptt) 1570 { 1571 if (ECORE_IS_AH(p_hwfn->p_dev) || ECORE_IS_E5(p_hwfn->p_dev)) 1572 ecore_emul_link_init_ah_e5(p_hwfn, p_ptt); 1573 else /* BB */ 1574 ecore_emul_link_init_bb(p_hwfn, p_ptt); 1575 1576 return; 1577 } 1578 1579 static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn, 1580 struct ecore_ptt *p_ptt, u8 port) 1581 { 1582 int port_offset = port ? 0x800 : 0; 1583 u32 xmac_rxctrl = 0; 1584 1585 /* Reset of XMAC */ 1586 /* FIXME: move to common start */ 1587 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2*sizeof(u32), 1588 MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */ 1589 OSAL_MSLEEP(1); 1590 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32), 1591 MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */ 1592 1593 ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE_BB, 1); 1594 1595 /* Set the number of ports on the Warp Core to 10G */ 1596 ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE_BB, 3); 1597 1598 /* Soft reset of XMAC */ 1599 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32), 1600 MISC_REG_RESET_REG_2_XMAC_SOFT_BIT); 1601 OSAL_MSLEEP(1); 1602 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32), 1603 MISC_REG_RESET_REG_2_XMAC_SOFT_BIT); 1604 1605 /* FIXME: move to common end */ 1606 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 1607 ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE_BB + port_offset, 0x20); 1608 1609 /* Set Max packet size: initialize XMAC block register for port 0 */ 1610 ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE_BB + port_offset, 0x2710); 1611 1612 /* CRC append for Tx packets: init XMAC block register for port 1 */ 1613 ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO_BB + port_offset, 0xC800); 1614 1615 /* Enable TX and RX: initialize XMAC block register for port 1 */ 1616 ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL_BB + port_offset, 1617 XMAC_REG_CTRL_TX_EN_BB | XMAC_REG_CTRL_RX_EN_BB); 1618 xmac_rxctrl = ecore_rd(p_hwfn, p_ptt, 1619 XMAC_REG_RX_CTRL_BB + port_offset); 1620 xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB; 1621 ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL_BB + port_offset, xmac_rxctrl); 1622 } 1623 #endif 1624 1625 static enum _ecore_status_t 1626 ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn, 1627 struct ecore_ptt *p_ptt, 1628 u32 pwm_region_size, 1629 u32 n_cpus) 1630 { 1631 u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size; 1632 u32 dpi_bit_shift, dpi_count; 1633 u32 min_dpis; 1634 1635 /* Calculate DPI size 1636 * ------------------ 1637 * The PWM region contains Doorbell Pages. The first is reserverd for 1638 * the kernel for, e.g, L2. The others are free to be used by non- 1639 * trusted applications, typically from user space. Each page, called a 1640 * doorbell page is sectioned into windows that allow doorbells to be 1641 * issued in parallel by the kernel/application. The size of such a 1642 * window (a.k.a. WID) is 1kB. 1643 * Summary: 1644 * 1kB WID x N WIDS = DPI page size 1645 * DPI page size x N DPIs = PWM region size 1646 * Notes: 1647 * The size of the DPI page size must be in multiples of OSAL_PAGE_SIZE 1648 * in order to ensure that two applications won't share the same page. 1649 * It also must contain at least one WID per CPU to allow parallelism. 1650 * It also must be a power of 2, since it is stored as a bit shift. 1651 * 1652 * The DPI page size is stored in a register as 'dpi_bit_shift' so that 1653 * 0 is 4kB, 1 is 8kB and etc. Hence the minimum size is 4,096 1654 * containing 4 WIDs. 1655 */ 1656 dpi_page_size_1 = ECORE_WID_SIZE * n_cpus; 1657 dpi_page_size_2 = OSAL_MAX_T(u32, ECORE_WID_SIZE, OSAL_PAGE_SIZE); 1658 dpi_page_size = OSAL_MAX_T(u32, dpi_page_size_1, dpi_page_size_2); 1659 dpi_page_size = OSAL_ROUNDUP_POW_OF_TWO(dpi_page_size); 1660 dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096); 1661 1662 dpi_count = pwm_region_size / dpi_page_size; 1663 1664 min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis; 1665 min_dpis = OSAL_MAX_T(u32, ECORE_MIN_DPIS, min_dpis); 1666 1667 /* Update hwfn */ 1668 p_hwfn->dpi_size = dpi_page_size; 1669 p_hwfn->dpi_count = dpi_count; 1670 1671 /* Update registers */ 1672 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift); 1673 1674 if (dpi_count < min_dpis) 1675 return ECORE_NORESOURCES; 1676 1677 return ECORE_SUCCESS; 1678 } 1679 1680 enum ECORE_ROCE_EDPM_MODE { 1681 ECORE_ROCE_EDPM_MODE_ENABLE = 0, 1682 ECORE_ROCE_EDPM_MODE_FORCE_ON = 1, 1683 ECORE_ROCE_EDPM_MODE_DISABLE = 2, 1684 }; 1685 1686 static enum _ecore_status_t 1687 ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn, 1688 struct ecore_ptt *p_ptt) 1689 { 1690 u32 pwm_regsize, norm_regsize; 1691 u32 non_pwm_conn, min_addr_reg1; 1692 u32 db_bar_size, n_cpus = 1; 1693 u32 roce_edpm_mode; 1694 u32 pf_dems_shift; 1695 enum _ecore_status_t rc = ECORE_SUCCESS; 1696 u8 cond; 1697 1698 db_bar_size = ecore_hw_bar_size(p_hwfn, BAR_ID_1); 1699 if (p_hwfn->p_dev->num_hwfns > 1) 1700 db_bar_size /= 2; 1701 1702 /* Calculate doorbell regions 1703 * ----------------------------------- 1704 * The doorbell BAR is made of two regions. The first is called normal 1705 * region and the second is called PWM region. In the normal region 1706 * each ICID has its own set of addresses so that writing to that 1707 * specific address identifies the ICID. In the Process Window Mode 1708 * region the ICID is given in the data written to the doorbell. The 1709 * above per PF register denotes the offset in the doorbell BAR in which 1710 * the PWM region begins. 1711 * The normal region has ECORE_PF_DEMS_SIZE bytes per ICID, that is per 1712 * non-PWM connection. The calculation below computes the total non-PWM 1713 * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is 1714 * in units of 4,096 bytes. 1715 */ 1716 non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) + 1717 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE, 1718 OSAL_NULL) + 1719 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, 1720 OSAL_NULL); 1721 norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, 4096); 1722 min_addr_reg1 = norm_regsize / 4096; 1723 pwm_regsize = db_bar_size - norm_regsize; 1724 1725 /* Check that the normal and PWM sizes are valid */ 1726 if (db_bar_size < norm_regsize) { 1727 DP_ERR(p_hwfn->p_dev, "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n", db_bar_size, norm_regsize); 1728 return ECORE_NORESOURCES; 1729 } 1730 if (pwm_regsize < ECORE_MIN_PWM_REGION) { 1731 DP_ERR(p_hwfn->p_dev, "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n", pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size, norm_regsize); 1732 return ECORE_NORESOURCES; 1733 } 1734 1735 /* Calculate number of DPIs */ 1736 roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode; 1737 if ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE) || 1738 ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_FORCE_ON))) { 1739 /* Either EDPM is mandatory, or we are attempting to allocate a 1740 * WID per CPU. 1741 */ 1742 n_cpus = OSAL_NUM_ACTIVE_CPU(); 1743 rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 1744 } 1745 1746 cond = ((rc != ECORE_SUCCESS) && 1747 (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) || 1748 (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE); 1749 if (cond || p_hwfn->dcbx_no_edpm) { 1750 /* Either EDPM is disabled from user configuration, or it is 1751 * disabled via DCBx, or it is not mandatory and we failed to 1752 * allocated a WID per CPU. 1753 */ 1754 n_cpus = 1; 1755 rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 1756 1757 #ifdef CONFIG_ECORE_ROCE 1758 /* If we entered this flow due to DCBX then the DPM register is 1759 * already configured. 1760 */ 1761 if (cond) 1762 ecore_rdma_dpm_bar(p_hwfn, p_ptt); 1763 #endif 1764 } 1765 1766 p_hwfn->wid_count = (u16)n_cpus; 1767 1768 DP_INFO(p_hwfn, "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n", 1769 norm_regsize, pwm_regsize, p_hwfn->dpi_size, p_hwfn->dpi_count, 1770 ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ? 1771 "disabled" : "enabled"); 1772 1773 /* Check return codes from above calls */ 1774 if (rc != ECORE_SUCCESS) { 1775 DP_ERR(p_hwfn, 1776 "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d. You can try reducing this down to %d via user configuration n_dpi or by disabling EDPM via user configuration roce_edpm\n", 1777 p_hwfn->dpi_count, 1778 p_hwfn->pf_params.rdma_pf_params.min_dpis, 1779 ECORE_MIN_DPIS); 1780 return ECORE_NORESOURCES; 1781 } 1782 1783 /* Update hwfn */ 1784 p_hwfn->dpi_start_offset = norm_regsize; /* this is later used to 1785 * calculate the doorbell 1786 * address 1787 */ 1788 1789 /* Update registers */ 1790 /* DEMS size is configured log2 of DWORDs, hence the division by 4 */ 1791 pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4); 1792 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift); 1793 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1); 1794 1795 return ECORE_SUCCESS; 1796 } 1797 1798 static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn, 1799 struct ecore_ptt *p_ptt, 1800 int hw_mode) 1801 { 1802 enum _ecore_status_t rc = ECORE_SUCCESS; 1803 1804 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, 1805 hw_mode); 1806 if (rc != ECORE_SUCCESS) 1807 return rc; 1808 #if 0 1809 /* FW 8.10.5.0 requires us to configure PF_VECTOR and DUALMODE in LLH. 1810 * This would hopefully be moved to MFW. 1811 */ 1812 if (IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) { 1813 u8 pf_id = 0; 1814 1815 if (ecore_hw_init_first_eth(p_hwfn, p_ptt, &pf_id) == 1816 ECORE_SUCCESS) { 1817 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 1818 "PF[%08x] is first eth on engine\n", 1819 pf_id); 1820 1821 /* We should have configured BIT for ppfid, i.e., the 1822 * relative function number in the port. But there's a 1823 * bug in LLH in BB where the ppfid is actually engine 1824 * based, so we need to take this into account. 1825 */ 1826 if (!ECORE_IS_BB(p_hwfn->p_dev)) 1827 pf_id /= p_hwfn->p_dev->num_ports_in_engines; 1828 1829 ecore_wr(p_hwfn, p_ptt, 1830 NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id); 1831 } 1832 1833 /* Take the protocol-based hit vector if there is a hit, 1834 * otherwise take the other vector. 1835 */ 1836 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2); 1837 } 1838 #endif 1839 #ifndef ASIC_ONLY 1840 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) 1841 return ECORE_SUCCESS; 1842 1843 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1844 if (ECORE_IS_AH(p_hwfn->p_dev)) 1845 return ECORE_SUCCESS; 1846 else if (ECORE_IS_BB(p_hwfn->p_dev)) 1847 ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id); 1848 else /* E5 */ 1849 ECORE_E5_MISSING_CODE; 1850 } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 1851 if (p_hwfn->p_dev->num_hwfns > 1) { 1852 /* Activate OPTE in CMT */ 1853 u32 val; 1854 1855 val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV); 1856 val |= 0x10; 1857 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val); 1858 ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1); 1859 ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1); 1860 ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1); 1861 ecore_wr(p_hwfn, p_ptt, 1862 NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1); 1863 ecore_wr(p_hwfn, p_ptt, 1864 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555); 1865 ecore_wr(p_hwfn, p_ptt, 1866 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4, 1867 0x55555555); 1868 } 1869 1870 ecore_emul_link_init(p_hwfn, p_ptt); 1871 } else { 1872 DP_INFO(p_hwfn->p_dev, "link is not being configured\n"); 1873 } 1874 #endif 1875 1876 return rc; 1877 } 1878 1879 static enum _ecore_status_t ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, 1880 struct ecore_ptt *p_ptt, 1881 struct ecore_tunnel_info *p_tunn, 1882 int hw_mode, 1883 bool b_hw_start, 1884 enum ecore_int_mode int_mode, 1885 bool allow_npar_tx_switch) 1886 { 1887 u8 rel_pf_id = p_hwfn->rel_pf_id; 1888 u32 prs_reg; 1889 enum _ecore_status_t rc = ECORE_SUCCESS; 1890 u16 ctrl; 1891 int pos; 1892 1893 if (p_hwfn->mcp_info) { 1894 struct ecore_mcp_function_info *p_info; 1895 1896 p_info = &p_hwfn->mcp_info->func_info; 1897 if (p_info->bandwidth_min) 1898 p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; 1899 1900 /* Update rate limit once we'll actually have a link */ 1901 p_hwfn->qm_info.pf_rl = 100000; 1902 } 1903 ecore_cxt_hw_init_pf(p_hwfn); 1904 1905 ecore_int_igu_init_rt(p_hwfn); 1906 1907 /* Set VLAN in NIG if needed */ 1908 if (hw_mode & (1 << MODE_MF_SD)) { 1909 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring LLH_FUNC_TAG\n"); 1910 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); 1911 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, 1912 p_hwfn->hw_info.ovlan); 1913 } 1914 1915 /* Enable classification by MAC if needed */ 1916 if (hw_mode & (1 << MODE_MF_SI)) { 1917 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring TAGMAC_CLS_TYPE\n"); 1918 STORE_RT_REG(p_hwfn, 1919 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1); 1920 } 1921 1922 /* Protocl Configuration - @@@TBD - should we set 0 otherwise?*/ 1923 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 1924 (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) ? 1 : 0); 1925 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 1926 (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) ? 1 : 0); 1927 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); 1928 1929 /* perform debug configuration when chip is out of reset */ 1930 OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id); 1931 1932 /* Cleanup chip from previous driver if such remains exist */ 1933 rc = ecore_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false); 1934 if (rc != ECORE_SUCCESS) { 1935 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL); 1936 return rc; 1937 } 1938 1939 /* PF Init sequence */ 1940 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); 1941 if (rc) 1942 return rc; 1943 1944 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */ 1945 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode); 1946 if (rc) 1947 return rc; 1948 1949 /* Pure runtime initializations - directly to the HW */ 1950 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); 1951 1952 /* PCI relaxed ordering causes a decrease in the performance on some 1953 * systems. Till a root cause is found, disable this attribute in the 1954 * PCI config space. 1955 */ 1956 pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP); 1957 if (!pos) { 1958 DP_NOTICE(p_hwfn, true, 1959 "Failed to find the PCI Express Capability structure in the PCI config space\n"); 1960 return ECORE_IO; 1961 } 1962 OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, &ctrl); 1963 ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN; 1964 OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, ctrl); 1965 1966 rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt); 1967 if (rc) 1968 return rc; 1969 #if 0 1970 /* FW 8.10.5.0 requires us to configure MSG_INFO in PRS. 1971 * This would hopefully be moved to MFW. 1972 */ 1973 if (IS_MF_SI(p_hwfn)) { 1974 u8 pf_id = 0; 1975 u32 val; 1976 1977 if (ecore_hw_init_first_eth(p_hwfn, p_ptt, &pf_id) == 1978 ECORE_SUCCESS) { 1979 if (p_hwfn->rel_pf_id == pf_id) { 1980 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 1981 "PF[%d] is first ETH on engine\n", 1982 pf_id); 1983 val = 1; 1984 } 1985 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val); 1986 } 1987 } 1988 #endif 1989 /* Add an LLH filter with the primary MAC address. */ 1990 if (p_hwfn->p_dev->num_hwfns > 1 && IS_LEAD_HWFN(p_hwfn)) { 1991 rc = ecore_llh_add_mac_filter(p_hwfn, p_ptt, 1992 p_hwfn->hw_info.hw_mac_addr); 1993 if (rc != ECORE_SUCCESS) 1994 DP_NOTICE(p_hwfn, false, 1995 "Failed to add an LLH filter with the primary MAC\n"); 1996 } 1997 1998 if (b_hw_start) { 1999 /* enable interrupts */ 2000 rc = ecore_int_igu_enable(p_hwfn, p_ptt, int_mode); 2001 if (rc != ECORE_SUCCESS) 2002 return rc; 2003 2004 /* send function start command */ 2005 rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode, 2006 allow_npar_tx_switch); 2007 if (rc) { 2008 DP_NOTICE(p_hwfn, true, "Function start ramrod failed\n"); 2009 } else { 2010 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); 2011 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2012 "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); 2013 2014 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 2015 { 2016 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, 2017 (1 << 2)); 2018 ecore_wr(p_hwfn, p_ptt, 2019 PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, 2020 0x100); 2021 } 2022 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2023 "PRS_REG_SEARCH registers after start PFn\n"); 2024 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP); 2025 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2026 "PRS_REG_SEARCH_TCP: %x\n", prs_reg); 2027 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP); 2028 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2029 "PRS_REG_SEARCH_UDP: %x\n", prs_reg); 2030 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE); 2031 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2032 "PRS_REG_SEARCH_FCOE: %x\n", prs_reg); 2033 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE); 2034 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2035 "PRS_REG_SEARCH_ROCE: %x\n", prs_reg); 2036 prs_reg = ecore_rd(p_hwfn, p_ptt, 2037 PRS_REG_SEARCH_TCP_FIRST_FRAG); 2038 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2039 "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n", 2040 prs_reg); 2041 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); 2042 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2043 "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); 2044 } 2045 } 2046 return rc; 2047 } 2048 2049 enum _ecore_status_t ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn, 2050 struct ecore_ptt *p_ptt, 2051 u8 enable) 2052 { 2053 u32 delay_idx = 0, val, set_val = enable ? 1 : 0; 2054 2055 /* Change PF in PXP */ 2056 ecore_wr(p_hwfn, p_ptt, 2057 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); 2058 2059 /* wait until value is set - try for 1 second every 50us */ 2060 for (delay_idx = 0; delay_idx < 20000; delay_idx++) { 2061 val = ecore_rd(p_hwfn, p_ptt, 2062 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 2063 if (val == set_val) 2064 break; 2065 2066 OSAL_UDELAY(50); 2067 } 2068 2069 if (val != set_val) { 2070 DP_NOTICE(p_hwfn, true, 2071 "PFID_ENABLE_MASTER wasn't changed after a second\n"); 2072 return ECORE_UNKNOWN_ERROR; 2073 } 2074 2075 return ECORE_SUCCESS; 2076 } 2077 2078 static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn, 2079 struct ecore_ptt *p_main_ptt) 2080 { 2081 /* Read shadow of current MFW mailbox */ 2082 ecore_mcp_read_mb(p_hwfn, p_main_ptt); 2083 OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow, 2084 p_hwfn->mcp_info->mfw_mb_cur, 2085 p_hwfn->mcp_info->mfw_mb_length); 2086 } 2087 2088 static enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn, 2089 struct ecore_hw_init_params *p_params) 2090 { 2091 if (p_params->p_tunn) { 2092 ecore_vf_set_vf_start_tunn_update_param(p_params->p_tunn); 2093 ecore_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn); 2094 } 2095 2096 p_hwfn->b_int_enabled = 1; 2097 2098 return ECORE_SUCCESS; 2099 } 2100 2101 static void 2102 ecore_fill_load_req_params(struct ecore_load_req_params *p_load_req, 2103 struct ecore_drv_load_params *p_drv_load) 2104 { 2105 OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req)); 2106 2107 if (p_drv_load != OSAL_NULL) { 2108 p_load_req->drv_role = p_drv_load->is_crash_kernel ? 2109 ECORE_DRV_ROLE_KDUMP : 2110 ECORE_DRV_ROLE_OS; 2111 p_load_req->timeout_val = p_drv_load->mfw_timeout_val; 2112 p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; 2113 p_load_req->override_force_load = 2114 p_drv_load->override_force_load; 2115 } else { 2116 p_load_req->drv_role = ECORE_DRV_ROLE_OS; 2117 p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT; 2118 p_load_req->avoid_eng_reset = false; 2119 p_load_req->override_force_load = 2120 ECORE_OVERRIDE_FORCE_LOAD_NONE; 2121 } 2122 } 2123 2124 enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, 2125 struct ecore_hw_init_params *p_params) 2126 { 2127 struct ecore_load_req_params load_req_params; 2128 u32 load_code, param, drv_mb_param; 2129 bool b_default_mtu = true; 2130 struct ecore_hwfn *p_hwfn; 2131 enum _ecore_status_t rc = ECORE_SUCCESS, mfw_rc; 2132 int i; 2133 2134 if ((p_params->int_mode == ECORE_INT_MODE_MSI) && (p_dev->num_hwfns > 1)) { 2135 DP_NOTICE(p_dev, false, 2136 "MSI mode is not supported for CMT devices\n"); 2137 return ECORE_INVAL; 2138 } 2139 2140 if (IS_PF(p_dev)) { 2141 rc = ecore_init_fw_data(p_dev, p_params->bin_fw_data); 2142 if (rc != ECORE_SUCCESS) 2143 return rc; 2144 } 2145 2146 for_each_hwfn(p_dev, i) { 2147 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 2148 2149 /* If management didn't provide a default, set one of our own */ 2150 if (!p_hwfn->hw_info.mtu) { 2151 p_hwfn->hw_info.mtu = 1500; 2152 b_default_mtu = false; 2153 } 2154 2155 if (IS_VF(p_dev)) { 2156 ecore_vf_start(p_hwfn, p_params); 2157 continue; 2158 } 2159 2160 /* Enable DMAE in PXP */ 2161 rc = ecore_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); 2162 if (rc != ECORE_SUCCESS) 2163 return rc; 2164 2165 rc = ecore_calc_hw_mode(p_hwfn); 2166 if (rc != ECORE_SUCCESS) 2167 return rc; 2168 2169 ecore_fill_load_req_params(&load_req_params, 2170 p_params->p_drv_load_params); 2171 rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, 2172 &load_req_params); 2173 if (rc != ECORE_SUCCESS) { 2174 DP_NOTICE(p_hwfn, true, 2175 "Failed sending a LOAD_REQ command\n"); 2176 return rc; 2177 } 2178 2179 load_code = load_req_params.load_code; 2180 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 2181 "Load request was sent. Load code: 0x%x\n", 2182 load_code); 2183 2184 ecore_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt); 2185 2186 /* CQ75580: 2187 * When comming back from hiberbate state, the registers from 2188 * which shadow is read initially are not initialized. It turns 2189 * out that these registers get initialized during the call to 2190 * ecore_mcp_load_req request. So we need to reread them here 2191 * to get the proper shadow register value. 2192 * Note: This is a workaround for the missing MFW 2193 * initialization. It may be removed once the implementation 2194 * is done. 2195 */ 2196 ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); 2197 2198 /* Only relevant for recovery: 2199 * Clear the indication after the LOAD_REQ command is responded 2200 * by the MFW. 2201 */ 2202 p_dev->recov_in_prog = false; 2203 2204 p_hwfn->first_on_engine = (load_code == 2205 FW_MSG_CODE_DRV_LOAD_ENGINE); 2206 2207 if (!qm_lock_init) { 2208 OSAL_SPIN_LOCK_INIT(&qm_lock); 2209 qm_lock_init = true; 2210 } 2211 2212 switch (load_code) { 2213 case FW_MSG_CODE_DRV_LOAD_ENGINE: 2214 rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, 2215 p_hwfn->hw_info.hw_mode); 2216 if (rc != ECORE_SUCCESS) 2217 break; 2218 /* Fall into */ 2219 case FW_MSG_CODE_DRV_LOAD_PORT: 2220 rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, 2221 p_hwfn->hw_info.hw_mode); 2222 if (rc != ECORE_SUCCESS) 2223 break; 2224 /* Fall into */ 2225 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 2226 rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, 2227 p_params->p_tunn, 2228 p_hwfn->hw_info.hw_mode, 2229 p_params->b_hw_start, 2230 p_params->int_mode, 2231 p_params->allow_npar_tx_switch); 2232 break; 2233 default: 2234 DP_NOTICE(p_hwfn, false, 2235 "Unexpected load code [0x%08x]", load_code); 2236 rc = ECORE_NOTIMPL; 2237 break; 2238 } 2239 2240 if (rc != ECORE_SUCCESS) 2241 DP_NOTICE(p_hwfn, true, 2242 "init phase failed for loadcode 0x%x (rc %d)\n", 2243 load_code, rc); 2244 2245 /* ACK mfw regardless of success or failure of initialization */ 2246 mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 2247 DRV_MSG_CODE_LOAD_DONE, 2248 0, &load_code, ¶m); 2249 2250 /* Check the return value of the ecore_hw_init_*() function */ 2251 if (rc != ECORE_SUCCESS) 2252 return rc; 2253 2254 /* Check the return value of the LOAD_DONE command */ 2255 if (mfw_rc != ECORE_SUCCESS) { 2256 DP_NOTICE(p_hwfn, true, 2257 "Failed sending a LOAD_DONE command\n"); 2258 return mfw_rc; 2259 } 2260 2261 /* Check if there is a DID mismatch between nvm-cfg/efuse */ 2262 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) 2263 DP_NOTICE(p_hwfn, false, 2264 "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); 2265 2266 /* send DCBX attention request command */ 2267 DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, 2268 "sending phony dcbx set command to trigger DCBx attention handling\n"); 2269 mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 2270 DRV_MSG_CODE_SET_DCBX, 2271 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, 2272 &load_code, ¶m); 2273 if (mfw_rc != ECORE_SUCCESS) { 2274 DP_NOTICE(p_hwfn, true, 2275 "Failed to send DCBX attention request\n"); 2276 return mfw_rc; 2277 } 2278 2279 p_hwfn->hw_init_done = true; 2280 } 2281 2282 if (IS_PF(p_dev)) { 2283 p_hwfn = ECORE_LEADING_HWFN(p_dev); 2284 drv_mb_param = STORM_FW_VERSION; 2285 rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 2286 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, 2287 drv_mb_param, &load_code, ¶m); 2288 if (rc != ECORE_SUCCESS) 2289 DP_INFO(p_hwfn, "Failed to update firmware version\n"); 2290 2291 if (!b_default_mtu) { 2292 rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt, 2293 p_hwfn->hw_info.mtu); 2294 if (rc != ECORE_SUCCESS) 2295 DP_INFO(p_hwfn, "Failed to update default mtu\n"); 2296 } 2297 2298 rc = ecore_mcp_ov_update_driver_state(p_hwfn, 2299 p_hwfn->p_main_ptt, 2300 ECORE_OV_DRIVER_STATE_DISABLED); 2301 if (rc != ECORE_SUCCESS) 2302 DP_INFO(p_hwfn, "Failed to update driver state\n"); 2303 2304 rc = ecore_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, 2305 ECORE_OV_ESWITCH_VEB); 2306 if (rc != ECORE_SUCCESS) 2307 DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); 2308 } 2309 2310 return rc; 2311 } 2312 2313 #define ECORE_HW_STOP_RETRY_LIMIT (10) 2314 static void ecore_hw_timers_stop(struct ecore_dev *p_dev, 2315 struct ecore_hwfn *p_hwfn, 2316 struct ecore_ptt *p_ptt) 2317 { 2318 int i; 2319 2320 /* close timers */ 2321 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); 2322 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); 2323 for (i = 0; 2324 i < ECORE_HW_STOP_RETRY_LIMIT && !p_dev->recov_in_prog; 2325 i++) { 2326 if ((!ecore_rd(p_hwfn, p_ptt, 2327 TM_REG_PF_SCAN_ACTIVE_CONN)) && 2328 (!ecore_rd(p_hwfn, p_ptt, 2329 TM_REG_PF_SCAN_ACTIVE_TASK))) 2330 break; 2331 2332 /* Dependent on number of connection/tasks, possibly 2333 * 1ms sleep is required between polls 2334 */ 2335 OSAL_MSLEEP(1); 2336 } 2337 2338 if (i < ECORE_HW_STOP_RETRY_LIMIT) 2339 return; 2340 2341 DP_NOTICE(p_hwfn, true, 2342 "Timers linear scans are not over [Connection %02x Tasks %02x]\n", 2343 (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), 2344 (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); 2345 } 2346 2347 void ecore_hw_timers_stop_all(struct ecore_dev *p_dev) 2348 { 2349 int j; 2350 2351 for_each_hwfn(p_dev, j) { 2352 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 2353 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 2354 2355 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt); 2356 } 2357 } 2358 2359 static enum _ecore_status_t ecore_verify_reg_val(struct ecore_hwfn *p_hwfn, 2360 struct ecore_ptt *p_ptt, 2361 u32 addr, u32 expected_val) 2362 { 2363 u32 val = ecore_rd(p_hwfn, p_ptt, addr); 2364 2365 if (val != expected_val) { 2366 DP_NOTICE(p_hwfn, true, 2367 "Value at address 0x%08x is 0x%08x while the expected value is 0x%08x\n", 2368 addr, val, expected_val); 2369 return ECORE_UNKNOWN_ERROR; 2370 } 2371 2372 return ECORE_SUCCESS; 2373 } 2374 2375 enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev) 2376 { 2377 struct ecore_hwfn *p_hwfn; 2378 struct ecore_ptt *p_ptt; 2379 enum _ecore_status_t rc, rc2 = ECORE_SUCCESS; 2380 int j; 2381 2382 for_each_hwfn(p_dev, j) { 2383 p_hwfn = &p_dev->hwfns[j]; 2384 p_ptt = p_hwfn->p_main_ptt; 2385 2386 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n"); 2387 2388 if (IS_VF(p_dev)) { 2389 ecore_vf_pf_int_cleanup(p_hwfn); 2390 rc = ecore_vf_pf_reset(p_hwfn); 2391 if (rc != ECORE_SUCCESS) { 2392 DP_NOTICE(p_hwfn, true, 2393 "ecore_vf_pf_reset failed. rc = %d.\n", 2394 rc); 2395 rc2 = ECORE_UNKNOWN_ERROR; 2396 } 2397 continue; 2398 } 2399 2400 /* mark the hw as uninitialized... */ 2401 p_hwfn->hw_init_done = false; 2402 2403 /* Send unload command to MCP */ 2404 if (!p_dev->recov_in_prog) { 2405 rc = ecore_mcp_unload_req(p_hwfn, p_ptt); 2406 if (rc != ECORE_SUCCESS) { 2407 DP_NOTICE(p_hwfn, true, 2408 "Failed sending a UNLOAD_REQ command. rc = %d.\n", 2409 rc); 2410 rc2 = ECORE_UNKNOWN_ERROR; 2411 } 2412 } 2413 2414 OSAL_DPC_SYNC(p_hwfn); 2415 2416 /* After this point no MFW attentions are expected, e.g. prevent 2417 * race between pf stop and dcbx pf update. 2418 */ 2419 2420 rc = ecore_sp_pf_stop(p_hwfn); 2421 if (rc != ECORE_SUCCESS) { 2422 DP_NOTICE(p_hwfn, true, 2423 "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n", 2424 rc); 2425 rc2 = ECORE_UNKNOWN_ERROR; 2426 } 2427 2428 /* perform debug action after PF stop was sent */ 2429 OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id); 2430 2431 /* close NIG to BRB gate */ 2432 ecore_wr(p_hwfn, p_ptt, 2433 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 2434 2435 /* close parser */ 2436 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 2437 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 2438 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 2439 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 2440 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 2441 2442 /* @@@TBD - clean transmission queues (5.b) */ 2443 /* @@@TBD - clean BTB (5.c) */ 2444 2445 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt); 2446 2447 /* @@@TBD - verify DMAE requests are done (8) */ 2448 2449 /* Disable Attention Generation */ 2450 ecore_int_igu_disable_int(p_hwfn, p_ptt); 2451 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); 2452 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); 2453 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true); 2454 rc = ecore_int_igu_reset_cam_default(p_hwfn, p_ptt); 2455 if (rc != ECORE_SUCCESS) { 2456 DP_NOTICE(p_hwfn, true, 2457 "Failed to return IGU CAM to default\n"); 2458 rc2 = ECORE_UNKNOWN_ERROR; 2459 } 2460 2461 /* Need to wait 1ms to guarantee SBs are cleared */ 2462 OSAL_MSLEEP(1); 2463 2464 if (!p_dev->recov_in_prog) { 2465 ecore_verify_reg_val(p_hwfn, p_ptt, 2466 QM_REG_USG_CNT_PF_TX, 0); 2467 ecore_verify_reg_val(p_hwfn, p_ptt, 2468 QM_REG_USG_CNT_PF_OTHER, 0); 2469 /* @@@TBD - assert on incorrect xCFC values (10.b) */ 2470 } 2471 2472 /* Disable PF in HW blocks */ 2473 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); 2474 ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); 2475 2476 if (!p_dev->recov_in_prog) { 2477 ecore_mcp_unload_done(p_hwfn, p_ptt); 2478 if (rc != ECORE_SUCCESS) { 2479 DP_NOTICE(p_hwfn, true, 2480 "Failed sending a UNLOAD_DONE command. rc = %d.\n", 2481 rc); 2482 rc2 = ECORE_UNKNOWN_ERROR; 2483 } 2484 } 2485 2486 /* remove the LLH filter with the primary MAC addres */ 2487 if (p_hwfn->p_dev->num_hwfns > 1 && IS_LEAD_HWFN(p_hwfn)) 2488 ecore_llh_remove_mac_filter(p_hwfn, p_ptt, 2489 p_hwfn->hw_info.hw_mac_addr); 2490 } /* hwfn loop */ 2491 2492 if (IS_PF(p_dev)) { 2493 p_hwfn = ECORE_LEADING_HWFN(p_dev); 2494 p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt; 2495 2496 /* Disable DMAE in PXP - in CMT, this should only be done for 2497 * first hw-function, and only after all transactions have 2498 * stopped for all active hw-functions. 2499 */ 2500 rc = ecore_change_pci_hwfn(p_hwfn, p_ptt, false); 2501 if (rc != ECORE_SUCCESS) { 2502 DP_NOTICE(p_hwfn, true, 2503 "ecore_change_pci_hwfn failed. rc = %d.\n", 2504 rc); 2505 rc2 = ECORE_UNKNOWN_ERROR; 2506 } 2507 } 2508 2509 return rc2; 2510 } 2511 2512 void ecore_hw_stop_fastpath(struct ecore_dev *p_dev) 2513 { 2514 int j; 2515 2516 for_each_hwfn(p_dev, j) { 2517 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 2518 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 2519 2520 if (IS_VF(p_dev)) { 2521 ecore_vf_pf_int_cleanup(p_hwfn); 2522 continue; 2523 } 2524 2525 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Shutting down the fastpath\n"); 2526 2527 ecore_wr(p_hwfn, p_ptt, 2528 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 2529 2530 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 2531 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 2532 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 2533 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 2534 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 2535 2536 /* @@@TBD - clean transmission queues (5.b) */ 2537 /* @@@TBD - clean BTB (5.c) */ 2538 2539 /* @@@TBD - verify DMAE requests are done (8) */ 2540 2541 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); 2542 /* Need to wait 1ms to guarantee SBs are cleared */ 2543 OSAL_MSLEEP(1); 2544 } 2545 } 2546 2547 void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn) 2548 { 2549 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 2550 2551 if (IS_VF(p_hwfn->p_dev)) 2552 return; 2553 2554 /* If roce info is allocated it means roce is initialized and should 2555 * be enabled in searcher. 2556 */ 2557 if (p_hwfn->p_rdma_info) { 2558 if (p_hwfn->b_rdma_enabled_in_prs) 2559 ecore_wr(p_hwfn, p_ptt, 2560 p_hwfn->rdma_prs_search_reg, 0x1); 2561 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x1); 2562 } 2563 2564 /* Re-open incoming traffic */ 2565 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2566 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); 2567 } 2568 /* TEMP macro to be removed when wol code revisted */ 2569 #define ECORE_WOL_WR(_p_hwfn, _p_ptt, _offset, _val) ECORE_IS_BB(_p_hwfn->p_dev) ? \ 2570 ecore_wr(_p_hwfn, _p_ptt, _offset, _val) : \ 2571 ecore_mcp_wol_wr(_p_hwfn, _p_ptt, _offset, _val); 2572 2573 enum _ecore_status_t ecore_set_nwuf_reg(struct ecore_dev *p_dev, 2574 const bool b_enable, 2575 u32 reg_idx, 2576 u32 pattern_size, 2577 u32 crc) 2578 { 2579 struct ecore_hwfn *hwfn = &p_dev->hwfns[0]; 2580 u32 reg_len = 0; 2581 u32 reg_crc = 0; 2582 2583 /* Get length and CRC register offsets */ 2584 switch (reg_idx) 2585 { 2586 case 0: 2587 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_LEN_BB : 2588 WOL_REG_ACPI_PAT_0_LEN_K2_E5; 2589 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_CRC_BB : 2590 WOL_REG_ACPI_PAT_0_CRC_K2_E5; 2591 break; 2592 case 1: 2593 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_LEN_BB : 2594 WOL_REG_ACPI_PAT_1_LEN_K2_E5; 2595 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_CRC_BB : 2596 WOL_REG_ACPI_PAT_1_CRC_K2_E5; 2597 break; 2598 case 2: 2599 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_LEN_BB : 2600 WOL_REG_ACPI_PAT_2_LEN_K2_E5; 2601 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_CRC_BB : 2602 WOL_REG_ACPI_PAT_2_CRC_K2_E5; 2603 break; 2604 case 3: 2605 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_LEN_BB : 2606 WOL_REG_ACPI_PAT_3_LEN_K2_E5; 2607 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_CRC_BB : 2608 WOL_REG_ACPI_PAT_3_CRC_K2_E5; 2609 break; 2610 case 4: 2611 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_LEN_BB : 2612 WOL_REG_ACPI_PAT_4_LEN_K2_E5; 2613 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_CRC_BB : 2614 WOL_REG_ACPI_PAT_4_CRC_K2_E5; 2615 break; 2616 case 5: 2617 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_LEN_BB : 2618 WOL_REG_ACPI_PAT_5_LEN_K2_E5; 2619 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_CRC_BB : 2620 WOL_REG_ACPI_PAT_5_CRC_K2_E5; 2621 break; 2622 case 6: 2623 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_LEN_BB : 2624 WOL_REG_ACPI_PAT_6_LEN_K2_E5; 2625 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_CRC_BB : 2626 WOL_REG_ACPI_PAT_6_CRC_K2_E5; 2627 break; 2628 case 7: 2629 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_LEN_BB : 2630 WOL_REG_ACPI_PAT_7_LEN_K2_E5; 2631 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_CRC_BB : 2632 WOL_REG_ACPI_PAT_7_CRC_K2_E5; 2633 break; 2634 default: 2635 return ECORE_UNKNOWN_ERROR; 2636 } 2637 2638 /* Allign pattern size to 4 */ 2639 while (pattern_size % 4) 2640 { 2641 pattern_size++; 2642 } 2643 /* write pattern length */ 2644 ECORE_WOL_WR(hwfn, hwfn->p_main_ptt, reg_len, pattern_size); 2645 2646 /* write crc value*/ 2647 ECORE_WOL_WR(hwfn, hwfn->p_main_ptt, reg_crc, crc); 2648 2649 DP_INFO(p_dev, 2650 "ecore_set_nwuf_reg: idx[%d] reg_crc[0x%x=0x%08x] " 2651 "reg_len[0x%x=0x%x]\n", 2652 reg_idx, reg_crc, crc, reg_len, pattern_size); 2653 2654 return ECORE_SUCCESS; 2655 } 2656 2657 void ecore_wol_buffer_clear(struct ecore_dev *p_dev) 2658 { 2659 struct ecore_hwfn *hwfn = &p_dev->hwfns[0]; 2660 const u32 wake_buffer_clear_offset = 2661 ECORE_IS_BB(p_dev) ? 2662 NIG_REG_WAKE_BUFFER_CLEAR_BB : WOL_REG_WAKE_BUFFER_CLEAR_K2_E5; 2663 2664 DP_INFO(p_dev, 2665 "ecore_wol_buffer_clear: reset " 2666 "REG_WAKE_BUFFER_CLEAR offset=0x%08x\n", 2667 wake_buffer_clear_offset); 2668 2669 ECORE_WOL_WR(hwfn, hwfn->p_main_ptt, wake_buffer_clear_offset, 1); 2670 ECORE_WOL_WR(hwfn, hwfn->p_main_ptt, wake_buffer_clear_offset, 0); 2671 } 2672 2673 enum _ecore_status_t ecore_get_wake_info(struct ecore_dev *p_dev, 2674 struct ecore_wake_info *wake_info) 2675 { 2676 struct ecore_hwfn *hwfn = &p_dev->hwfns[0]; 2677 u32 *buf = OSAL_NULL; 2678 u32 i = 0; 2679 const u32 reg_wake_buffer_offest = 2680 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_BUFFER_BB : 2681 WOL_REG_WAKE_BUFFER_K2_E5; 2682 2683 wake_info->wk_info = ecore_rd(hwfn, hwfn->p_main_ptt, 2684 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_INFO_BB : 2685 WOL_REG_WAKE_INFO_K2_E5); 2686 wake_info->wk_details = ecore_rd(hwfn, hwfn->p_main_ptt, 2687 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_DETAILS_BB : 2688 WOL_REG_WAKE_DETAILS_K2_E5); 2689 wake_info->wk_pkt_len = ecore_rd(hwfn, hwfn->p_main_ptt, 2690 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_PKT_LEN_BB : 2691 WOL_REG_WAKE_PKT_LEN_K2_E5); 2692 2693 DP_INFO(p_dev, 2694 "ecore_get_wake_info: REG_WAKE_INFO=0x%08x " 2695 "REG_WAKE_DETAILS=0x%08x " 2696 "REG_WAKE_PKT_LEN=0x%08x\n", 2697 wake_info->wk_info, 2698 wake_info->wk_details, 2699 wake_info->wk_pkt_len); 2700 2701 buf = (u32 *)wake_info->wk_buffer; 2702 2703 for (i = 0; i < (wake_info->wk_pkt_len / sizeof(u32)); i++) 2704 { 2705 if ((i*sizeof(u32)) >= sizeof(wake_info->wk_buffer)) 2706 { 2707 DP_INFO(p_dev, 2708 "ecore_get_wake_info: i index to 0 high=%d\n", 2709 i); 2710 break; 2711 } 2712 buf[i] = ecore_rd(hwfn, hwfn->p_main_ptt, 2713 reg_wake_buffer_offest + (i * sizeof(u32))); 2714 DP_INFO(p_dev, "ecore_get_wake_info: wk_buffer[%u]: 0x%08x\n", 2715 i, buf[i]); 2716 } 2717 2718 ecore_wol_buffer_clear(p_dev); 2719 2720 return ECORE_SUCCESS; 2721 } 2722 2723 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */ 2724 static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn) 2725 { 2726 ecore_ptt_pool_free(p_hwfn); 2727 OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info); 2728 p_hwfn->hw_info.p_igu_info = OSAL_NULL; 2729 } 2730 2731 /* Setup bar access */ 2732 static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn) 2733 { 2734 /* clear indirect access */ 2735 if (ECORE_IS_AH(p_hwfn->p_dev) || ECORE_IS_E5(p_hwfn->p_dev)) { 2736 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2737 PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5, 0); 2738 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2739 PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5, 0); 2740 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2741 PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5, 0); 2742 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2743 PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5, 0); 2744 } else { 2745 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2746 PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0); 2747 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2748 PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0); 2749 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2750 PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0); 2751 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2752 PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0); 2753 } 2754 2755 /* Clean Previous errors if such exist */ 2756 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2757 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 2758 1 << p_hwfn->abs_pf_id); 2759 2760 /* enable internal target-read */ 2761 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2762 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 2763 } 2764 2765 static void get_function_id(struct ecore_hwfn *p_hwfn) 2766 { 2767 /* ME Register */ 2768 p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn, 2769 PXP_PF_ME_OPAQUE_ADDR); 2770 2771 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); 2772 2773 /* Bits 16-19 from the ME registers are the pf_num */ 2774 p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf; 2775 p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 2776 PXP_CONCRETE_FID_PFID); 2777 p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 2778 PXP_CONCRETE_FID_PORT); 2779 2780 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 2781 "Read ME register: Concrete 0x%08x Opaque 0x%04x\n", 2782 p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid); 2783 } 2784 2785 void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn) 2786 { 2787 u32 *feat_num = p_hwfn->hw_info.feat_num; 2788 struct ecore_sb_cnt_info sb_cnt; 2789 u32 non_l2_sbs = 0; 2790 2791 OSAL_MEM_ZERO(&sb_cnt, sizeof(sb_cnt)); 2792 ecore_int_get_num_sbs(p_hwfn, &sb_cnt); 2793 2794 #ifdef CONFIG_ECORE_ROCE 2795 /* Roce CNQ require each: 1 status block. 1 CNQ, we divide the 2796 * status blocks equally between L2 / RoCE but with consideration as 2797 * to how many l2 queues / cnqs we have 2798 */ 2799 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) { 2800 u32 max_cnqs; 2801 2802 feat_num[ECORE_RDMA_CNQ] = 2803 OSAL_MIN_T(u32, 2804 sb_cnt.cnt / 2, 2805 RESC_NUM(p_hwfn, ECORE_RDMA_CNQ_RAM)); 2806 2807 /* Upper layer might require less */ 2808 max_cnqs = (u32)p_hwfn->pf_params.rdma_pf_params.max_cnqs; 2809 if (max_cnqs) { 2810 if (max_cnqs == ECORE_RDMA_PF_PARAMS_CNQS_NONE) 2811 max_cnqs = 0; 2812 feat_num[ECORE_RDMA_CNQ] = 2813 OSAL_MIN_T(u32, 2814 feat_num[ECORE_RDMA_CNQ], 2815 max_cnqs); 2816 } 2817 2818 non_l2_sbs = feat_num[ECORE_RDMA_CNQ]; 2819 } 2820 #endif 2821 2822 /* L2 Queues require each: 1 status block. 1 L2 queue */ 2823 if (ECORE_IS_L2_PERSONALITY(p_hwfn)) { 2824 /* Start by allocating VF queues, then PF's */ 2825 feat_num[ECORE_VF_L2_QUE] = 2826 OSAL_MIN_T(u32, 2827 RESC_NUM(p_hwfn, ECORE_L2_QUEUE), 2828 sb_cnt.iov_cnt); 2829 feat_num[ECORE_PF_L2_QUE] = 2830 OSAL_MIN_T(u32, 2831 sb_cnt.cnt - non_l2_sbs, 2832 RESC_NUM(p_hwfn, ECORE_L2_QUEUE) - 2833 FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE)); 2834 } 2835 2836 if (ECORE_IS_FCOE_PERSONALITY(p_hwfn)) 2837 feat_num[ECORE_FCOE_CQ] = 2838 OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, 2839 ECORE_CMDQS_CQS)); 2840 2841 if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) 2842 feat_num[ECORE_ISCSI_CQ] = 2843 OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, 2844 ECORE_CMDQS_CQS)); 2845 2846 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 2847 "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n", 2848 (int)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE), 2849 (int)FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE), 2850 (int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ), 2851 (int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ), 2852 (int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ), 2853 (int)sb_cnt.cnt); 2854 } 2855 2856 const char *ecore_hw_get_resc_name(enum ecore_resources res_id) 2857 { 2858 switch (res_id) { 2859 case ECORE_L2_QUEUE: 2860 return "L2_QUEUE"; 2861 case ECORE_VPORT: 2862 return "VPORT"; 2863 case ECORE_RSS_ENG: 2864 return "RSS_ENG"; 2865 case ECORE_PQ: 2866 return "PQ"; 2867 case ECORE_RL: 2868 return "RL"; 2869 case ECORE_MAC: 2870 return "MAC"; 2871 case ECORE_VLAN: 2872 return "VLAN"; 2873 case ECORE_RDMA_CNQ_RAM: 2874 return "RDMA_CNQ_RAM"; 2875 case ECORE_ILT: 2876 return "ILT"; 2877 case ECORE_LL2_QUEUE: 2878 return "LL2_QUEUE"; 2879 case ECORE_CMDQS_CQS: 2880 return "CMDQS_CQS"; 2881 case ECORE_RDMA_STATS_QUEUE: 2882 return "RDMA_STATS_QUEUE"; 2883 case ECORE_BDQ: 2884 return "BDQ"; 2885 case ECORE_SB: 2886 return "SB"; 2887 default: 2888 return "UNKNOWN_RESOURCE"; 2889 } 2890 } 2891 2892 static enum _ecore_status_t 2893 __ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn, 2894 enum ecore_resources res_id, u32 resc_max_val, 2895 u32 *p_mcp_resp) 2896 { 2897 enum _ecore_status_t rc; 2898 2899 rc = ecore_mcp_set_resc_max_val(p_hwfn, p_hwfn->p_main_ptt, res_id, 2900 resc_max_val, p_mcp_resp); 2901 if (rc != ECORE_SUCCESS) { 2902 DP_NOTICE(p_hwfn, true, 2903 "MFW response failure for a max value setting of resource %d [%s]\n", 2904 res_id, ecore_hw_get_resc_name(res_id)); 2905 return rc; 2906 } 2907 2908 if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) 2909 DP_INFO(p_hwfn, 2910 "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n", 2911 res_id, ecore_hw_get_resc_name(res_id), *p_mcp_resp); 2912 2913 return ECORE_SUCCESS; 2914 } 2915 2916 static enum _ecore_status_t 2917 ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn) 2918 { 2919 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 2920 u32 resc_max_val, mcp_resp; 2921 u8 res_id; 2922 enum _ecore_status_t rc; 2923 2924 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) { 2925 switch (res_id) { 2926 case ECORE_LL2_QUEUE: 2927 resc_max_val = MAX_NUM_LL2_RX_QUEUES; 2928 break; 2929 case ECORE_RDMA_CNQ_RAM: 2930 /* No need for a case for ECORE_CMDQS_CQS since 2931 * CNQ/CMDQS are the same resource. 2932 */ 2933 resc_max_val = NUM_OF_GLOBAL_QUEUES; 2934 break; 2935 case ECORE_RDMA_STATS_QUEUE: 2936 resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 2937 : RDMA_NUM_STATISTIC_COUNTERS_BB; 2938 break; 2939 case ECORE_BDQ: 2940 resc_max_val = BDQ_NUM_RESOURCES; 2941 break; 2942 default: 2943 continue; 2944 } 2945 2946 rc = __ecore_hw_set_soft_resc_size(p_hwfn, res_id, 2947 resc_max_val, &mcp_resp); 2948 if (rc != ECORE_SUCCESS) 2949 return rc; 2950 2951 /* There's no point to continue to the next resource if the 2952 * command is not supported by the MFW. 2953 * We do continue if the command is supported but the resource 2954 * is unknown to the MFW. Such a resource will be later 2955 * configured with the default allocation values. 2956 */ 2957 if (mcp_resp == FW_MSG_CODE_UNSUPPORTED) 2958 return ECORE_NOTIMPL; 2959 } 2960 2961 return ECORE_SUCCESS; 2962 } 2963 2964 static 2965 enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn, 2966 enum ecore_resources res_id, 2967 u32 *p_resc_num, u32 *p_resc_start) 2968 { 2969 u8 num_funcs = p_hwfn->num_funcs_on_engine; 2970 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 2971 2972 switch (res_id) { 2973 case ECORE_L2_QUEUE: 2974 *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 : 2975 MAX_NUM_L2_QUEUES_BB) / num_funcs; 2976 break; 2977 case ECORE_VPORT: 2978 *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 : 2979 MAX_NUM_VPORTS_BB) / num_funcs; 2980 break; 2981 case ECORE_RSS_ENG: 2982 *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 : 2983 ETH_RSS_ENGINE_NUM_BB) / num_funcs; 2984 break; 2985 case ECORE_PQ: 2986 *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 : 2987 MAX_QM_TX_QUEUES_BB) / num_funcs; 2988 *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */ 2989 break; 2990 case ECORE_RL: 2991 *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs; 2992 break; 2993 case ECORE_MAC: 2994 case ECORE_VLAN: 2995 /* Each VFC resource can accommodate both a MAC and a VLAN */ 2996 *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; 2997 break; 2998 case ECORE_ILT: 2999 *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 : 3000 PXP_NUM_ILT_RECORDS_BB) / num_funcs; 3001 break; 3002 case ECORE_LL2_QUEUE: 3003 *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs; 3004 break; 3005 case ECORE_RDMA_CNQ_RAM: 3006 case ECORE_CMDQS_CQS: 3007 /* CNQ/CMDQS are the same resource */ 3008 *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs; 3009 break; 3010 case ECORE_RDMA_STATS_QUEUE: 3011 *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 : 3012 RDMA_NUM_STATISTIC_COUNTERS_BB) / 3013 num_funcs; 3014 break; 3015 case ECORE_BDQ: 3016 if (p_hwfn->hw_info.personality != ECORE_PCI_ISCSI && 3017 p_hwfn->hw_info.personality != ECORE_PCI_FCOE) 3018 *p_resc_num = 0; 3019 else 3020 *p_resc_num = 1; 3021 break; 3022 case ECORE_SB: 3023 /* Since we want its value to reflect whether MFW supports 3024 * the new scheme, have a default of 0. 3025 */ 3026 *p_resc_num = 0; 3027 break; 3028 default: 3029 return ECORE_INVAL; 3030 } 3031 3032 switch (res_id) { 3033 case ECORE_BDQ: 3034 if (!*p_resc_num) 3035 *p_resc_start = 0; 3036 else if (p_hwfn->p_dev->num_ports_in_engines == 4) 3037 *p_resc_start = p_hwfn->port_id; 3038 else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) 3039 *p_resc_start = p_hwfn->port_id; 3040 else if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 3041 *p_resc_start = p_hwfn->port_id + 2; 3042 break; 3043 default: 3044 *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx; 3045 break; 3046 } 3047 3048 return ECORE_SUCCESS; 3049 } 3050 3051 static enum _ecore_status_t 3052 __ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id, 3053 bool drv_resc_alloc) 3054 { 3055 u32 dflt_resc_num = 0, dflt_resc_start = 0; 3056 u32 mcp_resp, *p_resc_num, *p_resc_start; 3057 enum _ecore_status_t rc; 3058 3059 p_resc_num = &RESC_NUM(p_hwfn, res_id); 3060 p_resc_start = &RESC_START(p_hwfn, res_id); 3061 3062 rc = ecore_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num, 3063 &dflt_resc_start); 3064 if (rc != ECORE_SUCCESS) { 3065 DP_ERR(p_hwfn, 3066 "Failed to get default amount for resource %d [%s]\n", 3067 res_id, ecore_hw_get_resc_name(res_id)); 3068 return rc; 3069 } 3070 3071 #ifndef ASIC_ONLY 3072 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 3073 *p_resc_num = dflt_resc_num; 3074 *p_resc_start = dflt_resc_start; 3075 goto out; 3076 } 3077 #endif 3078 3079 rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id, 3080 &mcp_resp, p_resc_num, p_resc_start); 3081 if (rc != ECORE_SUCCESS) { 3082 DP_NOTICE(p_hwfn, true, 3083 "MFW response failure for an allocation request for resource %d [%s]\n", 3084 res_id, ecore_hw_get_resc_name(res_id)); 3085 return rc; 3086 } 3087 3088 /* Default driver values are applied in the following cases: 3089 * - The resource allocation MB command is not supported by the MFW 3090 * - There is an internal error in the MFW while processing the request 3091 * - The resource ID is unknown to the MFW 3092 */ 3093 if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) { 3094 DP_INFO(p_hwfn, 3095 "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n", 3096 res_id, ecore_hw_get_resc_name(res_id), mcp_resp, 3097 dflt_resc_num, dflt_resc_start); 3098 *p_resc_num = dflt_resc_num; 3099 *p_resc_start = dflt_resc_start; 3100 goto out; 3101 } 3102 3103 if ((*p_resc_num != dflt_resc_num || 3104 *p_resc_start != dflt_resc_start) && 3105 res_id != ECORE_SB) { 3106 DP_INFO(p_hwfn, 3107 "MFW allocation for resource %d [%s] differs from default values [%d,%d vs. %d,%d]%s\n", 3108 res_id, ecore_hw_get_resc_name(res_id), *p_resc_num, 3109 *p_resc_start, dflt_resc_num, dflt_resc_start, 3110 drv_resc_alloc ? " - Applying default values" : ""); 3111 if (drv_resc_alloc) { 3112 *p_resc_num = dflt_resc_num; 3113 *p_resc_start = dflt_resc_start; 3114 } 3115 } 3116 out: 3117 /* PQs have to divide by 8 [that's the HW granularity]. 3118 * Reduce number so it would fit. 3119 */ 3120 if ((res_id == ECORE_PQ) && 3121 ((*p_resc_num % 8) || (*p_resc_start % 8))) { 3122 DP_INFO(p_hwfn, 3123 "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n", 3124 *p_resc_num, (*p_resc_num) & ~0x7, 3125 *p_resc_start, (*p_resc_start) & ~0x7); 3126 *p_resc_num &= ~0x7; 3127 *p_resc_start &= ~0x7; 3128 } 3129 3130 return ECORE_SUCCESS; 3131 } 3132 3133 static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, 3134 bool drv_resc_alloc) 3135 { 3136 enum _ecore_status_t rc; 3137 u8 res_id; 3138 3139 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) { 3140 rc = __ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc); 3141 if (rc != ECORE_SUCCESS) 3142 return rc; 3143 } 3144 3145 return ECORE_SUCCESS; 3146 } 3147 3148 #define ECORE_RESC_ALLOC_LOCK_RETRY_CNT 10 3149 #define ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US 10000 /* 10 msec */ 3150 3151 static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn, 3152 bool drv_resc_alloc) 3153 { 3154 struct ecore_resc_unlock_params resc_unlock_params; 3155 struct ecore_resc_lock_params resc_lock_params; 3156 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 3157 u8 res_id; 3158 enum _ecore_status_t rc; 3159 #ifndef ASIC_ONLY 3160 u32 *resc_start = p_hwfn->hw_info.resc_start; 3161 u32 *resc_num = p_hwfn->hw_info.resc_num; 3162 /* For AH, an equal share of the ILT lines between the maximal number of 3163 * PFs is not enough for RoCE. This would be solved by the future 3164 * resource allocation scheme, but isn't currently present for 3165 * FPGA/emulation. For now we keep a number that is sufficient for RoCE 3166 * to work - the BB number of ILT lines divided by its max PFs number. 3167 */ 3168 u32 roce_min_ilt_lines = PXP_NUM_ILT_RECORDS_BB / MAX_NUM_PFS_BB; 3169 #endif 3170 3171 /* Setting the max values of the soft resources and the following 3172 * resources allocation queries should be atomic. Since several PFs can 3173 * run in parallel - a resource lock is needed. 3174 * If either the resource lock or resource set value commands are not 3175 * supported - skip the the max values setting, release the lock if 3176 * needed, and proceed to the queries. Other failures, including a 3177 * failure to acquire the lock, will cause this function to fail. 3178 * Old drivers that don't acquire the lock can run in parallel, and 3179 * their allocation values won't be affected by the updated max values. 3180 */ 3181 OSAL_MEM_ZERO(&resc_lock_params, sizeof(resc_lock_params)); 3182 resc_lock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC; 3183 resc_lock_params.retry_num = ECORE_RESC_ALLOC_LOCK_RETRY_CNT; 3184 resc_lock_params.retry_interval = ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US; 3185 resc_lock_params.sleep_b4_retry = true; 3186 OSAL_MEM_ZERO(&resc_unlock_params, sizeof(resc_unlock_params)); 3187 resc_unlock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC; 3188 3189 rc = ecore_mcp_resc_lock(p_hwfn, p_hwfn->p_main_ptt, &resc_lock_params); 3190 if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) { 3191 return rc; 3192 } else if (rc == ECORE_NOTIMPL) { 3193 DP_INFO(p_hwfn, 3194 "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n"); 3195 } else if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) { 3196 DP_NOTICE(p_hwfn, false, 3197 "Failed to acquire the resource lock for the resource allocation commands\n"); 3198 return ECORE_BUSY; 3199 } else { 3200 rc = ecore_hw_set_soft_resc_size(p_hwfn); 3201 if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) { 3202 DP_NOTICE(p_hwfn, false, 3203 "Failed to set the max values of the soft resources\n"); 3204 goto unlock_and_exit; 3205 } else if (rc == ECORE_NOTIMPL) { 3206 DP_INFO(p_hwfn, 3207 "Skip the max values setting of the soft resources since it is not supported by the MFW\n"); 3208 rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt, 3209 &resc_unlock_params); 3210 if (rc != ECORE_SUCCESS) 3211 DP_INFO(p_hwfn, 3212 "Failed to release the resource lock for the resource allocation commands\n"); 3213 } 3214 } 3215 3216 rc = ecore_hw_set_resc_info(p_hwfn, drv_resc_alloc); 3217 if (rc != ECORE_SUCCESS) 3218 goto unlock_and_exit; 3219 3220 if (resc_lock_params.b_granted && !resc_unlock_params.b_released) { 3221 rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt, 3222 &resc_unlock_params); 3223 if (rc != ECORE_SUCCESS) 3224 DP_INFO(p_hwfn, 3225 "Failed to release the resource lock for the resource allocation commands\n"); 3226 } 3227 3228 #ifndef ASIC_ONLY 3229 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 3230 /* Reduced build contains less PQs */ 3231 if (!(p_hwfn->p_dev->b_is_emul_full)) { 3232 resc_num[ECORE_PQ] = 32; 3233 resc_start[ECORE_PQ] = resc_num[ECORE_PQ] * 3234 p_hwfn->enabled_func_idx; 3235 } 3236 3237 /* For AH emulation, since we have a possible maximal number of 3238 * 16 enabled PFs, in case there are not enough ILT lines - 3239 * allocate only first PF as RoCE and have all the other ETH 3240 * only with less ILT lines. 3241 */ 3242 if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full) 3243 resc_num[ECORE_ILT] = OSAL_MAX_T(u32, 3244 resc_num[ECORE_ILT], 3245 roce_min_ilt_lines); 3246 } 3247 3248 /* Correct the common ILT calculation if PF0 has more */ 3249 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) && 3250 p_hwfn->p_dev->b_is_emul_full && 3251 p_hwfn->rel_pf_id && 3252 resc_num[ECORE_ILT] < roce_min_ilt_lines) 3253 resc_start[ECORE_ILT] += roce_min_ilt_lines - 3254 resc_num[ECORE_ILT]; 3255 #endif 3256 3257 /* Sanity for ILT */ 3258 if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) || 3259 (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) { 3260 DP_NOTICE(p_hwfn, true, "Can't assign ILT pages [%08x,...,%08x]\n", 3261 RESC_START(p_hwfn, ECORE_ILT), 3262 RESC_END(p_hwfn, ECORE_ILT) - 1); 3263 return ECORE_INVAL; 3264 } 3265 3266 /* This will also learn the number of SBs from MFW */ 3267 if (ecore_int_igu_reset_cam(p_hwfn, p_hwfn->p_main_ptt)) 3268 return ECORE_INVAL; 3269 3270 ecore_hw_set_feat(p_hwfn); 3271 3272 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 3273 "The numbers for each resource are:\n"); 3274 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) 3275 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "%s = %d start = %d\n", 3276 ecore_hw_get_resc_name(res_id), 3277 RESC_NUM(p_hwfn, res_id), 3278 RESC_START(p_hwfn, res_id)); 3279 3280 return ECORE_SUCCESS; 3281 3282 unlock_and_exit: 3283 if (resc_lock_params.b_granted && !resc_unlock_params.b_released) 3284 ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt, 3285 &resc_unlock_params); 3286 return rc; 3287 } 3288 3289 static enum _ecore_status_t 3290 ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn, 3291 struct ecore_ptt *p_ptt, 3292 struct ecore_hw_prepare_params *p_params) 3293 { 3294 u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg, dcbx_mode; 3295 u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; 3296 struct ecore_mcp_link_capabilities *p_caps; 3297 struct ecore_mcp_link_params *link; 3298 enum _ecore_status_t rc; 3299 3300 /* Read global nvm_cfg address */ 3301 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); 3302 3303 /* Verify MCP has initialized it */ 3304 if (!nvm_cfg_addr) { 3305 DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n"); 3306 if (p_params->b_relaxed_probe) 3307 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_NVM; 3308 return ECORE_INVAL; 3309 } 3310 3311 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ 3312 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); 3313 3314 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3315 OFFSETOF(struct nvm_cfg1, glob) + 3316 OFFSETOF(struct nvm_cfg1_glob, core_cfg); 3317 3318 core_cfg = ecore_rd(p_hwfn, p_ptt, addr); 3319 3320 switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> 3321 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { 3322 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G: 3323 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G; 3324 break; 3325 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G: 3326 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G; 3327 break; 3328 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G: 3329 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G; 3330 break; 3331 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F: 3332 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F; 3333 break; 3334 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E: 3335 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E; 3336 break; 3337 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G: 3338 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G; 3339 break; 3340 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G: 3341 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G; 3342 break; 3343 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: 3344 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G; 3345 break; 3346 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G: 3347 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X10G; 3348 break; 3349 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: 3350 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G; 3351 break; 3352 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G: 3353 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X25G; 3354 break; 3355 default: 3356 DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n", 3357 core_cfg); 3358 break; 3359 } 3360 3361 /* Read DCBX configuration */ 3362 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3363 OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 3364 dcbx_mode = ecore_rd(p_hwfn, p_ptt, 3365 port_cfg_addr + 3366 OFFSETOF(struct nvm_cfg1_port, generic_cont0)); 3367 dcbx_mode = (dcbx_mode & NVM_CFG1_PORT_DCBX_MODE_MASK) 3368 >> NVM_CFG1_PORT_DCBX_MODE_OFFSET; 3369 switch (dcbx_mode) { 3370 case NVM_CFG1_PORT_DCBX_MODE_DYNAMIC: 3371 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DYNAMIC; 3372 break; 3373 case NVM_CFG1_PORT_DCBX_MODE_CEE: 3374 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_CEE; 3375 break; 3376 case NVM_CFG1_PORT_DCBX_MODE_IEEE: 3377 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_IEEE; 3378 break; 3379 default: 3380 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DISABLED; 3381 } 3382 3383 /* Read default link configuration */ 3384 link = &p_hwfn->mcp_info->link_input; 3385 p_caps = &p_hwfn->mcp_info->link_capabilities; 3386 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3387 OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 3388 link_temp = ecore_rd(p_hwfn, p_ptt, 3389 port_cfg_addr + 3390 OFFSETOF(struct nvm_cfg1_port, speed_cap_mask)); 3391 link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; 3392 link->speed.advertised_speeds = link_temp; 3393 p_caps->speed_capabilities = link->speed.advertised_speeds; 3394 3395 link_temp = ecore_rd(p_hwfn, p_ptt, 3396 port_cfg_addr + 3397 OFFSETOF(struct nvm_cfg1_port, link_settings)); 3398 switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >> 3399 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) { 3400 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG: 3401 link->speed.autoneg = true; 3402 break; 3403 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G: 3404 link->speed.forced_speed = 1000; 3405 break; 3406 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: 3407 link->speed.forced_speed = 10000; 3408 break; 3409 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: 3410 link->speed.forced_speed = 25000; 3411 break; 3412 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G: 3413 link->speed.forced_speed = 40000; 3414 break; 3415 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: 3416 link->speed.forced_speed = 50000; 3417 break; 3418 case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G: 3419 link->speed.forced_speed = 100000; 3420 break; 3421 default: 3422 DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", 3423 link_temp); 3424 } 3425 3426 p_caps->default_speed = link->speed.forced_speed; 3427 p_caps->default_speed_autoneg = link->speed.autoneg; 3428 3429 link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; 3430 link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET; 3431 link->pause.autoneg = !!(link_temp & 3432 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); 3433 link->pause.forced_rx = !!(link_temp & 3434 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); 3435 link->pause.forced_tx = !!(link_temp & 3436 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); 3437 link->loopback_mode = 0; 3438 3439 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { 3440 link_temp = ecore_rd(p_hwfn, p_ptt, port_cfg_addr + 3441 OFFSETOF(struct nvm_cfg1_port, ext_phy)); 3442 link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK; 3443 link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET; 3444 p_caps->default_eee = ECORE_MCP_EEE_ENABLED; 3445 link->eee.enable = true; 3446 switch (link_temp) { 3447 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED: 3448 p_caps->default_eee = ECORE_MCP_EEE_DISABLED; 3449 link->eee.enable = false; 3450 break; 3451 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED: 3452 p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME; 3453 break; 3454 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE: 3455 p_caps->eee_lpi_timer = 3456 EEE_TX_TIMER_USEC_AGGRESSIVE_TIME; 3457 break; 3458 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY: 3459 p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME; 3460 break; 3461 } 3462 link->eee.tx_lpi_timer = p_caps->eee_lpi_timer; 3463 link->eee.tx_lpi_enable = link->eee.enable; 3464 if (link->eee.enable) 3465 link->eee.adv_caps = ECORE_EEE_1G_ADV | 3466 ECORE_EEE_10G_ADV; 3467 } else { 3468 p_caps->default_eee = ECORE_MCP_EEE_UNSUPPORTED; 3469 } 3470 3471 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 3472 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n", 3473 link->speed.forced_speed, link->speed.advertised_speeds, 3474 link->speed.autoneg, link->pause.autoneg, 3475 p_caps->default_eee, p_caps->eee_lpi_timer); 3476 3477 /* Read Multi-function information from shmem */ 3478 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3479 OFFSETOF(struct nvm_cfg1, glob) + 3480 OFFSETOF(struct nvm_cfg1_glob, generic_cont0); 3481 3482 generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr); 3483 3484 mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> 3485 NVM_CFG1_GLOB_MF_MODE_OFFSET; 3486 3487 switch (mf_mode) { 3488 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: 3489 p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN; 3490 break; 3491 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: 3492 p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR; 3493 break; 3494 case NVM_CFG1_GLOB_MF_MODE_DEFAULT: 3495 p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT; 3496 break; 3497 } 3498 DP_INFO(p_hwfn, "Multi function mode is %08x\n", 3499 p_hwfn->p_dev->mf_mode); 3500 3501 /* Read Multi-function information from shmem */ 3502 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3503 OFFSETOF(struct nvm_cfg1, glob) + 3504 OFFSETOF(struct nvm_cfg1_glob, device_capabilities); 3505 3506 device_capabilities = ecore_rd(p_hwfn, p_ptt, addr); 3507 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) 3508 OSAL_SET_BIT(ECORE_DEV_CAP_ETH, 3509 &p_hwfn->hw_info.device_capabilities); 3510 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE) 3511 OSAL_SET_BIT(ECORE_DEV_CAP_FCOE, 3512 &p_hwfn->hw_info.device_capabilities); 3513 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI) 3514 OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI, 3515 &p_hwfn->hw_info.device_capabilities); 3516 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE) 3517 OSAL_SET_BIT(ECORE_DEV_CAP_ROCE, 3518 &p_hwfn->hw_info.device_capabilities); 3519 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP) 3520 OSAL_SET_BIT(ECORE_DEV_CAP_IWARP, 3521 &p_hwfn->hw_info.device_capabilities); 3522 3523 rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt); 3524 if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) { 3525 rc = ECORE_SUCCESS; 3526 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP; 3527 } 3528 3529 return rc; 3530 } 3531 3532 static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn, 3533 struct ecore_ptt *p_ptt) 3534 { 3535 u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id; 3536 u32 reg_function_hide, tmp, eng_mask, low_pfs_mask; 3537 struct ecore_dev *p_dev = p_hwfn->p_dev; 3538 3539 num_funcs = ECORE_IS_AH(p_dev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB; 3540 3541 /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values 3542 * in the other bits are selected. 3543 * Bits 1-15 are for functions 1-15, respectively, and their value is 3544 * '0' only for enabled functions (function 0 always exists and 3545 * enabled). 3546 * In case of CMT in BB, only the "even" functions are enabled, and thus 3547 * the number of functions for both hwfns is learnt from the same bits. 3548 */ 3549 if (ECORE_IS_BB(p_dev) || ECORE_IS_AH(p_dev)) { 3550 reg_function_hide = ecore_rd(p_hwfn, p_ptt, 3551 MISCS_REG_FUNCTION_HIDE_BB_K2); 3552 } else { /* E5 */ 3553 reg_function_hide = 0; 3554 ECORE_E5_MISSING_CODE; 3555 } 3556 3557 if (reg_function_hide & 0x1) { 3558 if (ECORE_IS_BB(p_dev)) { 3559 if (ECORE_PATH_ID(p_hwfn) && p_dev->num_hwfns == 1) { 3560 num_funcs = 0; 3561 eng_mask = 0xaaaa; 3562 } else { 3563 num_funcs = 1; 3564 eng_mask = 0x5554; 3565 } 3566 } else { 3567 num_funcs = 1; 3568 eng_mask = 0xfffe; 3569 } 3570 3571 /* Get the number of the enabled functions on the engine */ 3572 tmp = (reg_function_hide ^ 0xffffffff) & eng_mask; 3573 while (tmp) { 3574 if (tmp & 0x1) 3575 num_funcs++; 3576 tmp >>= 0x1; 3577 } 3578 3579 /* Get the PF index within the enabled functions */ 3580 low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1; 3581 tmp = reg_function_hide & eng_mask & low_pfs_mask; 3582 while (tmp) { 3583 if (tmp & 0x1) 3584 enabled_func_idx--; 3585 tmp >>= 0x1; 3586 } 3587 } 3588 3589 p_hwfn->num_funcs_on_engine = num_funcs; 3590 p_hwfn->enabled_func_idx = enabled_func_idx; 3591 3592 #ifndef ASIC_ONLY 3593 if (CHIP_REV_IS_FPGA(p_dev)) { 3594 DP_NOTICE(p_hwfn, false, 3595 "FPGA: Limit number of PFs to 4 [would affect resource allocation, needed for IOV]\n"); 3596 p_hwfn->num_funcs_on_engine = 4; 3597 } 3598 #endif 3599 3600 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 3601 "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n", 3602 p_hwfn->rel_pf_id, p_hwfn->abs_pf_id, 3603 p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); 3604 } 3605 3606 static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn, 3607 struct ecore_ptt *p_ptt) 3608 { 3609 u32 port_mode; 3610 3611 #ifndef ASIC_ONLY 3612 /* Read the port mode */ 3613 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 3614 port_mode = 4; 3615 else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && 3616 (p_hwfn->p_dev->num_hwfns > 1)) 3617 /* In CMT on emulation, assume 1 port */ 3618 port_mode = 1; 3619 else 3620 #endif 3621 port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB); 3622 3623 if (port_mode < 3) { 3624 p_hwfn->p_dev->num_ports_in_engines = 1; 3625 } else if (port_mode <= 5) { 3626 p_hwfn->p_dev->num_ports_in_engines = 2; 3627 } else { 3628 DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n", 3629 p_hwfn->p_dev->num_ports_in_engines); 3630 3631 /* Default num_ports_in_engines to something */ 3632 p_hwfn->p_dev->num_ports_in_engines = 1; 3633 } 3634 } 3635 3636 static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn, 3637 struct ecore_ptt *p_ptt) 3638 { 3639 u32 port; 3640 int i; 3641 3642 p_hwfn->p_dev->num_ports_in_engines = 0; 3643 3644 #ifndef ASIC_ONLY 3645 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 3646 port = ecore_rd(p_hwfn, p_ptt, 3647 MISCS_REG_ECO_RESERVED); 3648 switch ((port & 0xf000) >> 12) { 3649 case 1: 3650 p_hwfn->p_dev->num_ports_in_engines = 1; 3651 break; 3652 case 3: 3653 p_hwfn->p_dev->num_ports_in_engines = 2; 3654 break; 3655 case 0xf: 3656 p_hwfn->p_dev->num_ports_in_engines = 4; 3657 break; 3658 default: 3659 DP_NOTICE(p_hwfn, false, 3660 "Unknown port mode in ECO_RESERVED %08x\n", 3661 port); 3662 } 3663 } else 3664 #endif 3665 for (i = 0; i < MAX_NUM_PORTS_K2; i++) { 3666 port = ecore_rd(p_hwfn, p_ptt, 3667 CNIG_REG_NIG_PORT0_CONF_K2_E5 + (i * 4)); 3668 if (port & 1) 3669 p_hwfn->p_dev->num_ports_in_engines++; 3670 } 3671 } 3672 3673 static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn, 3674 struct ecore_ptt *p_ptt) 3675 { 3676 if (ECORE_IS_BB(p_hwfn->p_dev)) 3677 ecore_hw_info_port_num_bb(p_hwfn, p_ptt); 3678 else 3679 ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt); 3680 } 3681 3682 static enum _ecore_status_t 3683 ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3684 enum ecore_pci_personality personality, 3685 struct ecore_hw_prepare_params *p_params) 3686 { 3687 bool drv_resc_alloc = p_params->drv_resc_alloc; 3688 enum _ecore_status_t rc; 3689 3690 /* Since all information is common, only first hwfns should do this */ 3691 if (IS_LEAD_HWFN(p_hwfn)) { 3692 rc = ecore_iov_hw_info(p_hwfn); 3693 if (rc != ECORE_SUCCESS) { 3694 if (p_params->b_relaxed_probe) 3695 p_params->p_relaxed_res = 3696 ECORE_HW_PREPARE_BAD_IOV; 3697 else 3698 return rc; 3699 } 3700 } 3701 3702 /* TODO In get_hw_info, amoungst others: 3703 * Get MCP FW revision and determine according to it the supported 3704 * featrues (e.g. DCB) 3705 * Get boot mode 3706 * ecore_get_pcie_width_speed, WOL capability. 3707 * Number of global CQ-s (for storage 3708 */ 3709 ecore_hw_info_port_num(p_hwfn, p_ptt); 3710 3711 ecore_mcp_get_capabilities(p_hwfn, p_ptt); 3712 3713 #ifndef ASIC_ONLY 3714 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) { 3715 #endif 3716 rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params); 3717 if (rc != ECORE_SUCCESS) 3718 return rc; 3719 #ifndef ASIC_ONLY 3720 } 3721 #endif 3722 3723 rc = ecore_int_igu_read_cam(p_hwfn, p_ptt); 3724 if (rc != ECORE_SUCCESS) { 3725 if (p_params->b_relaxed_probe) 3726 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_IGU; 3727 else 3728 return rc; 3729 } 3730 3731 #ifndef ASIC_ONLY 3732 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) { 3733 #endif 3734 OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, 3735 p_hwfn->mcp_info->func_info.mac, ETH_ALEN); 3736 #ifndef ASIC_ONLY 3737 } else { 3738 static u8 mcp_hw_mac[6] = {0, 2, 3, 4, 5, 6}; 3739 3740 OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN); 3741 p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id; 3742 } 3743 #endif 3744 3745 if (ecore_mcp_is_init(p_hwfn)) { 3746 if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET) 3747 p_hwfn->hw_info.ovlan = 3748 p_hwfn->mcp_info->func_info.ovlan; 3749 3750 ecore_mcp_cmd_port_init(p_hwfn, p_ptt); 3751 } 3752 3753 if (personality != ECORE_PCI_DEFAULT) { 3754 p_hwfn->hw_info.personality = personality; 3755 } else if (ecore_mcp_is_init(p_hwfn)) { 3756 enum ecore_pci_personality protocol; 3757 3758 protocol = p_hwfn->mcp_info->func_info.protocol; 3759 p_hwfn->hw_info.personality = protocol; 3760 } 3761 3762 #ifndef ASIC_ONLY 3763 /* To overcome ILT lack for emulation, until at least until we'll have 3764 * a definite answer from system about it, allow only PF0 to be RoCE. 3765 */ 3766 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) { 3767 if (!p_hwfn->rel_pf_id) 3768 p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE; 3769 else 3770 p_hwfn->hw_info.personality = ECORE_PCI_ETH; 3771 } 3772 #endif 3773 3774 /* although in BB some constellations may support more than 4 tcs, 3775 * that can result in performance penalty in some cases. 4 3776 * represents a good tradeoff between performance and flexibility. 3777 */ 3778 p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; 3779 3780 /* start out with a single active tc. This can be increased either 3781 * by dcbx negotiation or by upper layer driver 3782 */ 3783 p_hwfn->hw_info.num_active_tc = 1; 3784 3785 ecore_get_num_funcs(p_hwfn, p_ptt); 3786 3787 if (ecore_mcp_is_init(p_hwfn)) 3788 p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu; 3789 3790 /* In case of forcing the driver's default resource allocation, calling 3791 * ecore_hw_get_resc() should come after initializing the personality 3792 * and after getting the number of functions, since the calculation of 3793 * the resources/features depends on them. 3794 * This order is not harmful if not forcing. 3795 */ 3796 rc = ecore_hw_get_resc(p_hwfn, drv_resc_alloc); 3797 if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) { 3798 rc = ECORE_SUCCESS; 3799 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP; 3800 } 3801 3802 return rc; 3803 } 3804 3805 static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev) 3806 { 3807 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 3808 u16 device_id_mask; 3809 u32 tmp; 3810 3811 /* Read Vendor Id / Device Id */ 3812 OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_VENDOR_ID_OFFSET, 3813 &p_dev->vendor_id); 3814 OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET, 3815 &p_dev->device_id); 3816 3817 /* Determine type */ 3818 device_id_mask = p_dev->device_id & ECORE_DEV_ID_MASK; 3819 switch (device_id_mask) { 3820 case ECORE_DEV_ID_MASK_BB: 3821 p_dev->type = ECORE_DEV_TYPE_BB; 3822 break; 3823 case ECORE_DEV_ID_MASK_AH: 3824 p_dev->type = ECORE_DEV_TYPE_AH; 3825 break; 3826 default: 3827 DP_NOTICE(p_hwfn, true, "Unknown device id 0x%x\n", 3828 p_dev->device_id); 3829 return ECORE_ABORTED; 3830 } 3831 3832 p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3833 MISCS_REG_CHIP_NUM); 3834 p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3835 MISCS_REG_CHIP_REV); 3836 3837 MASK_FIELD(CHIP_REV, p_dev->chip_rev); 3838 3839 /* Learn number of HW-functions */ 3840 tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3841 MISCS_REG_CMT_ENABLED_FOR_PAIR); 3842 3843 if (tmp & (1 << p_hwfn->rel_pf_id)) { 3844 DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n"); 3845 p_dev->num_hwfns = 2; 3846 } else { 3847 p_dev->num_hwfns = 1; 3848 } 3849 3850 #ifndef ASIC_ONLY 3851 if (CHIP_REV_IS_EMUL(p_dev)) { 3852 /* For some reason we have problems with this register 3853 * in B0 emulation; Simply assume no CMT 3854 */ 3855 DP_NOTICE(p_dev->hwfns, false, "device on emul - assume no CMT\n"); 3856 p_dev->num_hwfns = 1; 3857 } 3858 #endif 3859 3860 p_dev->chip_bond_id = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3861 MISCS_REG_CHIP_TEST_REG) >> 4; 3862 MASK_FIELD(CHIP_BOND_ID, p_dev->chip_bond_id); 3863 p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3864 MISCS_REG_CHIP_METAL); 3865 MASK_FIELD(CHIP_METAL, p_dev->chip_metal); 3866 DP_INFO(p_dev->hwfns, 3867 "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", 3868 ECORE_IS_BB(p_dev) ? "BB" : "AH", 3869 'A' + p_dev->chip_rev, (int)p_dev->chip_metal, 3870 p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id, 3871 p_dev->chip_metal); 3872 3873 if (ECORE_IS_BB(p_dev) && CHIP_REV_IS_A0(p_dev)) { 3874 DP_NOTICE(p_dev->hwfns, false, 3875 "The chip type/rev (BB A0) is not supported!\n"); 3876 return ECORE_ABORTED; 3877 } 3878 3879 #ifndef ASIC_ONLY 3880 if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev)) 3881 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 3882 MISCS_REG_PLL_MAIN_CTRL_4, 0x1); 3883 3884 if (CHIP_REV_IS_EMUL(p_dev)) { 3885 tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3886 MISCS_REG_ECO_RESERVED); 3887 if (tmp & (1 << 29)) { 3888 DP_NOTICE(p_hwfn, false, "Emulation: Running on a FULL build\n"); 3889 p_dev->b_is_emul_full = true; 3890 } else { 3891 DP_NOTICE(p_hwfn, false, "Emulation: Running on a REDUCED build\n"); 3892 } 3893 } 3894 #endif 3895 3896 return ECORE_SUCCESS; 3897 } 3898 3899 void ecore_hw_hibernate_prepare(struct ecore_dev *p_dev) 3900 { 3901 int j; 3902 3903 if (IS_VF(p_dev)) 3904 return; 3905 3906 for_each_hwfn(p_dev, j) { 3907 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 3908 3909 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Mark hw/fw uninitialized\n"); 3910 3911 p_hwfn->hw_init_done = false; 3912 p_hwfn->first_on_engine = false; 3913 3914 ecore_ptt_invalidate(p_hwfn); 3915 } 3916 } 3917 3918 void ecore_hw_hibernate_resume(struct ecore_dev *p_dev) 3919 { 3920 int j = 0; 3921 3922 if (IS_VF(p_dev)) 3923 return; 3924 3925 for_each_hwfn(p_dev, j) { 3926 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 3927 struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); 3928 3929 ecore_hw_hwfn_prepare(p_hwfn); 3930 3931 if (!p_ptt) 3932 DP_NOTICE(p_hwfn, true, "ptt acquire failed\n"); 3933 else { 3934 ecore_load_mcp_offsets(p_hwfn, p_ptt); 3935 ecore_ptt_release(p_hwfn, p_ptt); 3936 } 3937 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "Reinitialized hw after low power state\n"); 3938 } 3939 } 3940 3941 static enum _ecore_status_t ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, 3942 void OSAL_IOMEM *p_regview, 3943 void OSAL_IOMEM *p_doorbells, 3944 struct ecore_hw_prepare_params *p_params) 3945 { 3946 struct ecore_mdump_retain_data mdump_retain; 3947 struct ecore_dev *p_dev = p_hwfn->p_dev; 3948 struct ecore_mdump_info mdump_info; 3949 enum _ecore_status_t rc = ECORE_SUCCESS; 3950 3951 /* Split PCI bars evenly between hwfns */ 3952 p_hwfn->regview = p_regview; 3953 p_hwfn->doorbells = p_doorbells; 3954 3955 if (IS_VF(p_dev)) 3956 return ecore_vf_hw_prepare(p_hwfn); 3957 3958 /* Validate that chip access is feasible */ 3959 if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { 3960 DP_ERR(p_hwfn, "Reading the ME register returns all Fs; Preventing further chip access\n"); 3961 if (p_params->b_relaxed_probe) 3962 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_ME; 3963 return ECORE_INVAL; 3964 } 3965 3966 get_function_id(p_hwfn); 3967 3968 /* Allocate PTT pool */ 3969 rc = ecore_ptt_pool_alloc(p_hwfn); 3970 if (rc) { 3971 DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n"); 3972 if (p_params->b_relaxed_probe) 3973 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 3974 goto err0; 3975 } 3976 3977 /* Allocate the main PTT */ 3978 p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); 3979 3980 /* First hwfn learns basic information, e.g., number of hwfns */ 3981 if (!p_hwfn->my_id) { 3982 rc = ecore_get_dev_info(p_dev); 3983 if (rc != ECORE_SUCCESS) { 3984 if (p_params->b_relaxed_probe) 3985 p_params->p_relaxed_res = 3986 ECORE_HW_PREPARE_FAILED_DEV; 3987 goto err1; 3988 } 3989 } 3990 3991 ecore_hw_hwfn_prepare(p_hwfn); 3992 3993 /* Initialize MCP structure */ 3994 rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); 3995 if (rc) { 3996 DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n"); 3997 if (p_params->b_relaxed_probe) 3998 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 3999 goto err1; 4000 } 4001 4002 /* Read the device configuration information from the HW and SHMEM */ 4003 rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, 4004 p_params->personality, p_params); 4005 if (rc) { 4006 DP_NOTICE(p_hwfn, true, "Failed to get HW information\n"); 4007 goto err2; 4008 } 4009 4010 /* Sending a mailbox to the MFW should be after ecore_get_hw_info() is 4011 * called, since among others it sets the ports number in an engine. 4012 */ 4013 if (p_params->initiate_pf_flr && p_hwfn == ECORE_LEADING_HWFN(p_dev) && 4014 !p_dev->recov_in_prog) { 4015 rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt); 4016 if (rc != ECORE_SUCCESS) 4017 DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n"); 4018 } 4019 4020 /* Check if mdump logs/data are present and update the epoch value */ 4021 if (p_hwfn == ECORE_LEADING_HWFN(p_hwfn->p_dev)) { 4022 rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt, 4023 &mdump_info); 4024 if (rc == ECORE_SUCCESS && mdump_info.num_of_logs) 4025 DP_NOTICE(p_hwfn, false, 4026 "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n"); 4027 4028 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_hwfn->p_main_ptt, 4029 &mdump_retain); 4030 if (rc == ECORE_SUCCESS && mdump_retain.valid) 4031 DP_NOTICE(p_hwfn, false, 4032 "mdump retained data: epoch 0x%08x, pf 0x%x, status 0x%08x\n", 4033 mdump_retain.epoch, mdump_retain.pf, 4034 mdump_retain.status); 4035 4036 ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt, 4037 p_params->epoch); 4038 } 4039 4040 /* Allocate the init RT array and initialize the init-ops engine */ 4041 rc = ecore_init_alloc(p_hwfn); 4042 if (rc) { 4043 DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n"); 4044 if (p_params->b_relaxed_probe) 4045 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 4046 goto err2; 4047 } 4048 4049 #ifndef ASIC_ONLY 4050 if (CHIP_REV_IS_FPGA(p_dev)) { 4051 DP_NOTICE(p_hwfn, false, 4052 "FPGA: workaround; Prevent DMAE parities\n"); 4053 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK_K2_E5, 4054 7); 4055 4056 DP_NOTICE(p_hwfn, false, 4057 "FPGA: workaround: Set VF bar0 size\n"); 4058 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 4059 PGLUE_B_REG_VF_BAR0_SIZE_K2_E5, 4); 4060 } 4061 #endif 4062 4063 return rc; 4064 err2: 4065 if (IS_LEAD_HWFN(p_hwfn)) 4066 ecore_iov_free_hw_info(p_dev); 4067 ecore_mcp_free(p_hwfn); 4068 err1: 4069 ecore_hw_hwfn_free(p_hwfn); 4070 err0: 4071 return rc; 4072 } 4073 4074 enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, 4075 struct ecore_hw_prepare_params *p_params) 4076 { 4077 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 4078 enum _ecore_status_t rc; 4079 4080 p_dev->chk_reg_fifo = p_params->chk_reg_fifo; 4081 p_dev->allow_mdump = p_params->allow_mdump; 4082 4083 if (p_params->b_relaxed_probe) 4084 p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS; 4085 4086 /* Store the precompiled init data ptrs */ 4087 if (IS_PF(p_dev)) 4088 ecore_init_iro_array(p_dev); 4089 4090 /* Initialize the first hwfn - will learn number of hwfns */ 4091 rc = ecore_hw_prepare_single(p_hwfn, 4092 p_dev->regview, 4093 p_dev->doorbells, p_params); 4094 if (rc != ECORE_SUCCESS) 4095 return rc; 4096 4097 p_params->personality = p_hwfn->hw_info.personality; 4098 4099 /* initilalize 2nd hwfn if necessary */ 4100 if (p_dev->num_hwfns > 1) { 4101 void OSAL_IOMEM *p_regview, *p_doorbell; 4102 u8 OSAL_IOMEM *addr; 4103 4104 /* adjust bar offset for second engine */ 4105 addr = (u8 OSAL_IOMEM *)p_dev->regview + 4106 ecore_hw_bar_size(p_hwfn, BAR_ID_0) / 2; 4107 p_regview = (void OSAL_IOMEM *)addr; 4108 4109 addr = (u8 OSAL_IOMEM *)p_dev->doorbells + 4110 ecore_hw_bar_size(p_hwfn, BAR_ID_1) / 2; 4111 p_doorbell = (void OSAL_IOMEM *)addr; 4112 4113 /* prepare second hw function */ 4114 rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview, 4115 p_doorbell, p_params); 4116 4117 /* in case of error, need to free the previously 4118 * initiliazed hwfn 0. 4119 */ 4120 if (rc != ECORE_SUCCESS) { 4121 if (p_params->b_relaxed_probe) 4122 p_params->p_relaxed_res = 4123 ECORE_HW_PREPARE_FAILED_ENG2; 4124 4125 if (IS_PF(p_dev)) { 4126 ecore_init_free(p_hwfn); 4127 ecore_mcp_free(p_hwfn); 4128 ecore_hw_hwfn_free(p_hwfn); 4129 } else { 4130 DP_NOTICE(p_dev, true, "What do we need to free when VF hwfn1 init fails\n"); 4131 } 4132 return rc; 4133 } 4134 } 4135 4136 return rc; 4137 } 4138 4139 void ecore_hw_remove(struct ecore_dev *p_dev) 4140 { 4141 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 4142 int i; 4143 4144 if (IS_PF(p_dev)) 4145 ecore_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt, 4146 ECORE_OV_DRIVER_STATE_NOT_LOADED); 4147 4148 for_each_hwfn(p_dev, i) { 4149 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 4150 4151 if (IS_VF(p_dev)) { 4152 ecore_vf_pf_release(p_hwfn); 4153 continue; 4154 } 4155 4156 ecore_init_free(p_hwfn); 4157 ecore_hw_hwfn_free(p_hwfn); 4158 ecore_mcp_free(p_hwfn); 4159 4160 OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex); 4161 } 4162 4163 ecore_iov_free_hw_info(p_dev); 4164 } 4165 4166 static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev, 4167 struct ecore_chain *p_chain) 4168 { 4169 void *p_virt = p_chain->p_virt_addr, *p_virt_next = OSAL_NULL; 4170 dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0; 4171 struct ecore_chain_next *p_next; 4172 u32 size, i; 4173 4174 if (!p_virt) 4175 return; 4176 4177 size = p_chain->elem_size * p_chain->usable_per_page; 4178 4179 for (i = 0; i < p_chain->page_cnt; i++) { 4180 if (!p_virt) 4181 break; 4182 4183 p_next = (struct ecore_chain_next *)((u8 *)p_virt + size); 4184 p_virt_next = p_next->next_virt; 4185 p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys); 4186 4187 OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys, 4188 ECORE_CHAIN_PAGE_SIZE); 4189 4190 p_virt = p_virt_next; 4191 p_phys = p_phys_next; 4192 } 4193 } 4194 4195 static void ecore_chain_free_single(struct ecore_dev *p_dev, 4196 struct ecore_chain *p_chain) 4197 { 4198 if (!p_chain->p_virt_addr) 4199 return; 4200 4201 OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr, 4202 p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE); 4203 } 4204 4205 static void ecore_chain_free_pbl(struct ecore_dev *p_dev, 4206 struct ecore_chain *p_chain) 4207 { 4208 void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; 4209 u8 *p_pbl_virt = (u8 *)p_chain->pbl_sp.p_virt_table; 4210 u32 page_cnt = p_chain->page_cnt, i, pbl_size; 4211 4212 if (!pp_virt_addr_tbl) 4213 return; 4214 4215 if (!p_pbl_virt) 4216 goto out; 4217 4218 for (i = 0; i < page_cnt; i++) { 4219 if (!pp_virt_addr_tbl[i]) 4220 break; 4221 4222 OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i], 4223 *(dma_addr_t *)p_pbl_virt, 4224 ECORE_CHAIN_PAGE_SIZE); 4225 4226 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE; 4227 } 4228 4229 pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; 4230 4231 if (!p_chain->b_external_pbl) { 4232 OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table, 4233 p_chain->pbl_sp.p_phys_table, pbl_size); 4234 } 4235 out: 4236 OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl); 4237 p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL; 4238 } 4239 4240 void ecore_chain_free(struct ecore_dev *p_dev, 4241 struct ecore_chain *p_chain) 4242 { 4243 switch (p_chain->mode) { 4244 case ECORE_CHAIN_MODE_NEXT_PTR: 4245 ecore_chain_free_next_ptr(p_dev, p_chain); 4246 break; 4247 case ECORE_CHAIN_MODE_SINGLE: 4248 ecore_chain_free_single(p_dev, p_chain); 4249 break; 4250 case ECORE_CHAIN_MODE_PBL: 4251 ecore_chain_free_pbl(p_dev, p_chain); 4252 break; 4253 } 4254 } 4255 4256 static enum _ecore_status_t 4257 ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev, 4258 enum ecore_chain_cnt_type cnt_type, 4259 osal_size_t elem_size, u32 page_cnt) 4260 { 4261 u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; 4262 4263 /* The actual chain size can be larger than the maximal possible value 4264 * after rounding up the requested elements number to pages, and after 4265 * taking into acount the unusuable elements (next-ptr elements). 4266 * The size of a "u16" chain can be (U16_MAX + 1) since the chain 4267 * size/capacity fields are of a u32 type. 4268 */ 4269 if ((cnt_type == ECORE_CHAIN_CNT_TYPE_U16 && 4270 chain_size > ((u32)ECORE_U16_MAX + 1)) || 4271 (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 && 4272 chain_size > ECORE_U32_MAX)) { 4273 DP_NOTICE(p_dev, true, 4274 "The actual chain size (0x%llx) is larger than the maximal possible value\n", 4275 (unsigned long long)chain_size); 4276 return ECORE_INVAL; 4277 } 4278 4279 return ECORE_SUCCESS; 4280 } 4281 4282 static enum _ecore_status_t 4283 ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain) 4284 { 4285 void *p_virt = OSAL_NULL, *p_virt_prev = OSAL_NULL; 4286 dma_addr_t p_phys = 0; 4287 u32 i; 4288 4289 for (i = 0; i < p_chain->page_cnt; i++) { 4290 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 4291 ECORE_CHAIN_PAGE_SIZE); 4292 if (!p_virt) { 4293 DP_NOTICE(p_dev, true, 4294 "Failed to allocate chain memory\n"); 4295 return ECORE_NOMEM; 4296 } 4297 4298 if (i == 0) { 4299 ecore_chain_init_mem(p_chain, p_virt, p_phys); 4300 ecore_chain_reset(p_chain); 4301 } else { 4302 ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev, 4303 p_virt, p_phys); 4304 } 4305 4306 p_virt_prev = p_virt; 4307 } 4308 /* Last page's next element should point to the beginning of the 4309 * chain. 4310 */ 4311 ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev, 4312 p_chain->p_virt_addr, 4313 p_chain->p_phys_addr); 4314 4315 return ECORE_SUCCESS; 4316 } 4317 4318 static enum _ecore_status_t 4319 ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain) 4320 { 4321 dma_addr_t p_phys = 0; 4322 void *p_virt = OSAL_NULL; 4323 4324 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE); 4325 if (!p_virt) { 4326 DP_NOTICE(p_dev, true, "Failed to allocate chain memory\n"); 4327 return ECORE_NOMEM; 4328 } 4329 4330 ecore_chain_init_mem(p_chain, p_virt, p_phys); 4331 ecore_chain_reset(p_chain); 4332 4333 return ECORE_SUCCESS; 4334 } 4335 4336 static enum _ecore_status_t 4337 ecore_chain_alloc_pbl(struct ecore_dev *p_dev, 4338 struct ecore_chain *p_chain, 4339 struct ecore_chain_ext_pbl *ext_pbl) 4340 { 4341 void *p_virt = OSAL_NULL; 4342 u8 *p_pbl_virt = OSAL_NULL; 4343 void **pp_virt_addr_tbl = OSAL_NULL; 4344 dma_addr_t p_phys = 0, p_pbl_phys = 0; 4345 u32 page_cnt = p_chain->page_cnt, size, i; 4346 4347 size = page_cnt * sizeof(*pp_virt_addr_tbl); 4348 pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size); 4349 if (!pp_virt_addr_tbl) { 4350 DP_NOTICE(p_dev, true, 4351 "Failed to allocate memory for the chain virtual addresses table\n"); 4352 return ECORE_NOMEM; 4353 } 4354 4355 /* The allocation of the PBL table is done with its full size, since it 4356 * is expected to be successive. 4357 * ecore_chain_init_pbl_mem() is called even in a case of an allocation 4358 * failure, since pp_virt_addr_tbl was previously allocated, and it 4359 * should be saved to allow its freeing during the error flow. 4360 */ 4361 size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; 4362 4363 if (ext_pbl == OSAL_NULL) { 4364 p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size); 4365 } else { 4366 p_pbl_virt = ext_pbl->p_pbl_virt; 4367 p_pbl_phys = ext_pbl->p_pbl_phys; 4368 p_chain->b_external_pbl = true; 4369 } 4370 4371 ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, 4372 pp_virt_addr_tbl); 4373 if (!p_pbl_virt) { 4374 DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n"); 4375 return ECORE_NOMEM; 4376 } 4377 4378 for (i = 0; i < page_cnt; i++) { 4379 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 4380 ECORE_CHAIN_PAGE_SIZE); 4381 if (!p_virt) { 4382 DP_NOTICE(p_dev, true, 4383 "Failed to allocate chain memory\n"); 4384 return ECORE_NOMEM; 4385 } 4386 4387 if (i == 0) { 4388 ecore_chain_init_mem(p_chain, p_virt, p_phys); 4389 ecore_chain_reset(p_chain); 4390 } 4391 4392 /* Fill the PBL table with the physical address of the page */ 4393 *(dma_addr_t *)p_pbl_virt = p_phys; 4394 /* Keep the virtual address of the page */ 4395 p_chain->pbl.pp_virt_addr_tbl[i] = p_virt; 4396 4397 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE; 4398 } 4399 4400 return ECORE_SUCCESS; 4401 } 4402 4403 enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev, 4404 enum ecore_chain_use_mode intended_use, 4405 enum ecore_chain_mode mode, 4406 enum ecore_chain_cnt_type cnt_type, 4407 u32 num_elems, osal_size_t elem_size, 4408 struct ecore_chain *p_chain, 4409 struct ecore_chain_ext_pbl *ext_pbl) 4410 { 4411 u32 page_cnt; 4412 enum _ecore_status_t rc = ECORE_SUCCESS; 4413 4414 if (mode == ECORE_CHAIN_MODE_SINGLE) 4415 page_cnt = 1; 4416 else 4417 page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode); 4418 4419 rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size, 4420 page_cnt); 4421 if (rc) { 4422 DP_NOTICE(p_dev, true, 4423 "Cannot allocate a chain with the given arguments:\n" 4424 "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", 4425 intended_use, mode, cnt_type, num_elems, elem_size); 4426 return rc; 4427 } 4428 4429 ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use, 4430 mode, cnt_type, p_dev->dp_ctx); 4431 4432 switch (mode) { 4433 case ECORE_CHAIN_MODE_NEXT_PTR: 4434 rc = ecore_chain_alloc_next_ptr(p_dev, p_chain); 4435 break; 4436 case ECORE_CHAIN_MODE_SINGLE: 4437 rc = ecore_chain_alloc_single(p_dev, p_chain); 4438 break; 4439 case ECORE_CHAIN_MODE_PBL: 4440 rc = ecore_chain_alloc_pbl(p_dev, p_chain, ext_pbl); 4441 break; 4442 } 4443 if (rc) 4444 goto nomem; 4445 4446 return ECORE_SUCCESS; 4447 4448 nomem: 4449 ecore_chain_free(p_dev, p_chain); 4450 return rc; 4451 } 4452 4453 enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn, 4454 u16 src_id, u16 *dst_id) 4455 { 4456 if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) { 4457 u16 min, max; 4458 4459 min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE); 4460 max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE); 4461 DP_NOTICE(p_hwfn, true, "l2_queue id [%d] is not valid, available indices [%d - %d]\n", 4462 src_id, min, max); 4463 4464 return ECORE_INVAL; 4465 } 4466 4467 *dst_id = RESC_START(p_hwfn, ECORE_L2_QUEUE) + src_id; 4468 4469 return ECORE_SUCCESS; 4470 } 4471 4472 enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn, 4473 u8 src_id, u8 *dst_id) 4474 { 4475 if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) { 4476 u8 min, max; 4477 4478 min = (u8)RESC_START(p_hwfn, ECORE_VPORT); 4479 max = min + RESC_NUM(p_hwfn, ECORE_VPORT); 4480 DP_NOTICE(p_hwfn, true, "vport id [%d] is not valid, available indices [%d - %d]\n", 4481 src_id, min, max); 4482 4483 return ECORE_INVAL; 4484 } 4485 4486 *dst_id = RESC_START(p_hwfn, ECORE_VPORT) + src_id; 4487 4488 return ECORE_SUCCESS; 4489 } 4490 4491 enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn, 4492 u8 src_id, u8 *dst_id) 4493 { 4494 if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) { 4495 u8 min, max; 4496 4497 min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG); 4498 max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG); 4499 DP_NOTICE(p_hwfn, true, "rss_eng id [%d] is not valid, available indices [%d - %d]\n", 4500 src_id, min, max); 4501 4502 return ECORE_INVAL; 4503 } 4504 4505 *dst_id = RESC_START(p_hwfn, ECORE_RSS_ENG) + src_id; 4506 4507 return ECORE_SUCCESS; 4508 } 4509 4510 static enum _ecore_status_t 4511 ecore_llh_add_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4512 struct ecore_ptt *p_ptt, u32 high, u32 low, 4513 u32 *p_entry_num) 4514 { 4515 u32 en; 4516 int i; 4517 4518 /* Find a free entry and utilize it */ 4519 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4520 en = ecore_rd(p_hwfn, p_ptt, 4521 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4522 i * sizeof(u32)); 4523 if (en) 4524 continue; 4525 ecore_wr(p_hwfn, p_ptt, 4526 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4527 2 * i * sizeof(u32), low); 4528 ecore_wr(p_hwfn, p_ptt, 4529 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4530 (2 * i + 1) * sizeof(u32), high); 4531 ecore_wr(p_hwfn, p_ptt, 4532 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4533 i * sizeof(u32), 0); 4534 ecore_wr(p_hwfn, p_ptt, 4535 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4536 i * sizeof(u32), 0); 4537 ecore_wr(p_hwfn, p_ptt, 4538 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4539 i * sizeof(u32), 1); 4540 break; 4541 } 4542 4543 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4544 return ECORE_NORESOURCES; 4545 4546 *p_entry_num = i; 4547 4548 return ECORE_SUCCESS; 4549 } 4550 4551 static enum _ecore_status_t 4552 ecore_llh_add_mac_filter_e5(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 4553 u32 high, u32 low, u32 *p_entry_num) 4554 { 4555 ECORE_E5_MISSING_CODE; 4556 4557 return ECORE_NOTIMPL; 4558 } 4559 4560 enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn, 4561 struct ecore_ptt *p_ptt, u8 *p_filter) 4562 { 4563 u32 high, low, entry_num; 4564 enum _ecore_status_t rc; 4565 4566 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4567 return ECORE_SUCCESS; 4568 4569 high = p_filter[1] | (p_filter[0] << 8); 4570 low = p_filter[5] | (p_filter[4] << 8) | 4571 (p_filter[3] << 16) | (p_filter[2] << 24); 4572 4573 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4574 rc = ecore_llh_add_mac_filter_bb_ah(p_hwfn, p_ptt, high, low, 4575 &entry_num); 4576 else /* E5 */ 4577 rc = ecore_llh_add_mac_filter_e5(p_hwfn, p_ptt, high, low, 4578 &entry_num); 4579 if (rc != ECORE_SUCCESS) { 4580 DP_NOTICE(p_hwfn, false, 4581 "Failed to find an empty LLH filter to utilize\n"); 4582 return rc; 4583 } 4584 4585 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4586 "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx is added at LLH entry %d\n", 4587 p_filter[0], p_filter[1], p_filter[2], p_filter[3], 4588 p_filter[4], p_filter[5], entry_num); 4589 4590 return ECORE_SUCCESS; 4591 } 4592 4593 static enum _ecore_status_t 4594 ecore_llh_remove_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4595 struct ecore_ptt *p_ptt, u32 high, u32 low, 4596 u32 *p_entry_num) 4597 { 4598 int i; 4599 4600 /* Find the entry and clean it */ 4601 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4602 if (ecore_rd(p_hwfn, p_ptt, 4603 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4604 2 * i * sizeof(u32)) != low) 4605 continue; 4606 if (ecore_rd(p_hwfn, p_ptt, 4607 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4608 (2 * i + 1) * sizeof(u32)) != high) 4609 continue; 4610 4611 ecore_wr(p_hwfn, p_ptt, 4612 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0); 4613 ecore_wr(p_hwfn, p_ptt, 4614 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4615 2 * i * sizeof(u32), 0); 4616 ecore_wr(p_hwfn, p_ptt, 4617 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4618 (2 * i + 1) * sizeof(u32), 0); 4619 break; 4620 } 4621 4622 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4623 return ECORE_INVAL; 4624 4625 *p_entry_num = i; 4626 4627 return ECORE_SUCCESS; 4628 } 4629 4630 static enum _ecore_status_t 4631 ecore_llh_remove_mac_filter_e5(struct ecore_hwfn *p_hwfn, 4632 struct ecore_ptt *p_ptt, u32 high, u32 low, 4633 u32 *p_entry_num) 4634 { 4635 ECORE_E5_MISSING_CODE; 4636 4637 return ECORE_NOTIMPL; 4638 } 4639 4640 void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn, 4641 struct ecore_ptt *p_ptt, u8 *p_filter) 4642 { 4643 u32 high, low, entry_num; 4644 enum _ecore_status_t rc; 4645 4646 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4647 return; 4648 4649 high = p_filter[1] | (p_filter[0] << 8); 4650 low = p_filter[5] | (p_filter[4] << 8) | 4651 (p_filter[3] << 16) | (p_filter[2] << 24); 4652 4653 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4654 rc = ecore_llh_remove_mac_filter_bb_ah(p_hwfn, p_ptt, high, 4655 low, &entry_num); 4656 else /* E5 */ 4657 rc = ecore_llh_remove_mac_filter_e5(p_hwfn, p_ptt, high, low, 4658 &entry_num); 4659 if (rc != ECORE_SUCCESS) { 4660 DP_NOTICE(p_hwfn, false, 4661 "Tried to remove a non-configured filter [MAC %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx]\n", 4662 p_filter[0], p_filter[1], p_filter[2], p_filter[3], 4663 p_filter[4], p_filter[5]); 4664 return; 4665 } 4666 4667 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4668 "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx was removed from LLH entry %d\n", 4669 p_filter[0], p_filter[1], p_filter[2], p_filter[3], 4670 p_filter[4], p_filter[5], entry_num); 4671 } 4672 4673 static enum _ecore_status_t 4674 ecore_llh_add_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4675 struct ecore_ptt *p_ptt, 4676 enum ecore_llh_port_filter_type_t type, 4677 u32 high, u32 low, u32 *p_entry_num) 4678 { 4679 u32 en; 4680 int i; 4681 4682 /* Find a free entry and utilize it */ 4683 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4684 en = ecore_rd(p_hwfn, p_ptt, 4685 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4686 i * sizeof(u32)); 4687 if (en) 4688 continue; 4689 ecore_wr(p_hwfn, p_ptt, 4690 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4691 2 * i * sizeof(u32), low); 4692 ecore_wr(p_hwfn, p_ptt, 4693 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4694 (2 * i + 1) * sizeof(u32), high); 4695 ecore_wr(p_hwfn, p_ptt, 4696 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4697 i * sizeof(u32), 1); 4698 ecore_wr(p_hwfn, p_ptt, 4699 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4700 i * sizeof(u32), 1 << type); 4701 ecore_wr(p_hwfn, p_ptt, 4702 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 1); 4703 break; 4704 } 4705 4706 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4707 return ECORE_NORESOURCES; 4708 4709 *p_entry_num = i; 4710 4711 return ECORE_SUCCESS; 4712 } 4713 4714 static enum _ecore_status_t 4715 ecore_llh_add_protocol_filter_e5(struct ecore_hwfn *p_hwfn, 4716 struct ecore_ptt *p_ptt, 4717 enum ecore_llh_port_filter_type_t type, 4718 u32 high, u32 low, u32 *p_entry_num) 4719 { 4720 ECORE_E5_MISSING_CODE; 4721 4722 return ECORE_NOTIMPL; 4723 } 4724 4725 enum _ecore_status_t 4726 ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn, 4727 struct ecore_ptt *p_ptt, 4728 u16 source_port_or_eth_type, 4729 u16 dest_port, 4730 enum ecore_llh_port_filter_type_t type) 4731 { 4732 u32 high, low, entry_num; 4733 enum _ecore_status_t rc; 4734 4735 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4736 return ECORE_SUCCESS; 4737 4738 high = 0; 4739 low = 0; 4740 switch (type) { 4741 case ECORE_LLH_FILTER_ETHERTYPE: 4742 high = source_port_or_eth_type; 4743 break; 4744 case ECORE_LLH_FILTER_TCP_SRC_PORT: 4745 case ECORE_LLH_FILTER_UDP_SRC_PORT: 4746 low = source_port_or_eth_type << 16; 4747 break; 4748 case ECORE_LLH_FILTER_TCP_DEST_PORT: 4749 case ECORE_LLH_FILTER_UDP_DEST_PORT: 4750 low = dest_port; 4751 break; 4752 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 4753 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 4754 low = (source_port_or_eth_type << 16) | dest_port; 4755 break; 4756 default: 4757 DP_NOTICE(p_hwfn, true, 4758 "Non valid LLH protocol filter type %d\n", type); 4759 return ECORE_INVAL; 4760 } 4761 4762 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4763 rc = ecore_llh_add_protocol_filter_bb_ah(p_hwfn, p_ptt, type, 4764 high, low, &entry_num); 4765 else /* E5 */ 4766 rc = ecore_llh_add_protocol_filter_e5(p_hwfn, p_ptt, type, high, 4767 low, &entry_num); 4768 if (rc != ECORE_SUCCESS) { 4769 DP_NOTICE(p_hwfn, false, 4770 "Failed to find an empty LLH filter to utilize\n"); 4771 return rc; 4772 } 4773 4774 switch (type) { 4775 case ECORE_LLH_FILTER_ETHERTYPE: 4776 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4777 "ETH type %x is added at LLH entry %d\n", 4778 source_port_or_eth_type, entry_num); 4779 break; 4780 case ECORE_LLH_FILTER_TCP_SRC_PORT: 4781 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4782 "TCP src port %x is added at LLH entry %d\n", 4783 source_port_or_eth_type, entry_num); 4784 break; 4785 case ECORE_LLH_FILTER_UDP_SRC_PORT: 4786 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4787 "UDP src port %x is added at LLH entry %d\n", 4788 source_port_or_eth_type, entry_num); 4789 break; 4790 case ECORE_LLH_FILTER_TCP_DEST_PORT: 4791 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4792 "TCP dst port %x is added at LLH entry %d\n", 4793 dest_port, entry_num); 4794 break; 4795 case ECORE_LLH_FILTER_UDP_DEST_PORT: 4796 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4797 "UDP dst port %x is added at LLH entry %d\n", 4798 dest_port, entry_num); 4799 break; 4800 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 4801 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4802 "TCP src/dst ports %x/%x are added at LLH entry %d\n", 4803 source_port_or_eth_type, dest_port, entry_num); 4804 break; 4805 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 4806 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4807 "UDP src/dst ports %x/%x are added at LLH entry %d\n", 4808 source_port_or_eth_type, dest_port, entry_num); 4809 break; 4810 } 4811 4812 return ECORE_SUCCESS; 4813 } 4814 4815 static enum _ecore_status_t 4816 ecore_llh_remove_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4817 struct ecore_ptt *p_ptt, 4818 enum ecore_llh_port_filter_type_t type, 4819 u32 high, u32 low, u32 *p_entry_num) 4820 { 4821 int i; 4822 4823 /* Find the entry and clean it */ 4824 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4825 if (!ecore_rd(p_hwfn, p_ptt, 4826 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4827 i * sizeof(u32))) 4828 continue; 4829 if (!ecore_rd(p_hwfn, p_ptt, 4830 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4831 i * sizeof(u32))) 4832 continue; 4833 if (!(ecore_rd(p_hwfn, p_ptt, 4834 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4835 i * sizeof(u32)) & (1 << type))) 4836 continue; 4837 if (ecore_rd(p_hwfn, p_ptt, 4838 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4839 2 * i * sizeof(u32)) != low) 4840 continue; 4841 if (ecore_rd(p_hwfn, p_ptt, 4842 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4843 (2 * i + 1) * sizeof(u32)) != high) 4844 continue; 4845 4846 ecore_wr(p_hwfn, p_ptt, 4847 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0); 4848 ecore_wr(p_hwfn, p_ptt, 4849 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4850 i * sizeof(u32), 0); 4851 ecore_wr(p_hwfn, p_ptt, 4852 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4853 i * sizeof(u32), 0); 4854 ecore_wr(p_hwfn, p_ptt, 4855 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4856 2 * i * sizeof(u32), 0); 4857 ecore_wr(p_hwfn, p_ptt, 4858 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4859 (2 * i + 1) * sizeof(u32), 0); 4860 break; 4861 } 4862 4863 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4864 return ECORE_INVAL; 4865 4866 *p_entry_num = i; 4867 4868 return ECORE_SUCCESS; 4869 } 4870 4871 static enum _ecore_status_t 4872 ecore_llh_remove_protocol_filter_e5(struct ecore_hwfn *p_hwfn, 4873 struct ecore_ptt *p_ptt, 4874 enum ecore_llh_port_filter_type_t type, 4875 u32 high, u32 low, u32 *p_entry_num) 4876 { 4877 ECORE_E5_MISSING_CODE; 4878 4879 return ECORE_NOTIMPL; 4880 } 4881 4882 void 4883 ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn, 4884 struct ecore_ptt *p_ptt, 4885 u16 source_port_or_eth_type, 4886 u16 dest_port, 4887 enum ecore_llh_port_filter_type_t type) 4888 { 4889 u32 high, low, entry_num; 4890 enum _ecore_status_t rc; 4891 4892 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4893 return; 4894 4895 high = 0; 4896 low = 0; 4897 switch (type) { 4898 case ECORE_LLH_FILTER_ETHERTYPE: 4899 high = source_port_or_eth_type; 4900 break; 4901 case ECORE_LLH_FILTER_TCP_SRC_PORT: 4902 case ECORE_LLH_FILTER_UDP_SRC_PORT: 4903 low = source_port_or_eth_type << 16; 4904 break; 4905 case ECORE_LLH_FILTER_TCP_DEST_PORT: 4906 case ECORE_LLH_FILTER_UDP_DEST_PORT: 4907 low = dest_port; 4908 break; 4909 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 4910 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 4911 low = (source_port_or_eth_type << 16) | dest_port; 4912 break; 4913 default: 4914 DP_NOTICE(p_hwfn, true, 4915 "Non valid LLH protocol filter type %d\n", type); 4916 return; 4917 } 4918 4919 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4920 rc = ecore_llh_remove_protocol_filter_bb_ah(p_hwfn, p_ptt, type, 4921 high, low, 4922 &entry_num); 4923 else /* E5 */ 4924 rc = ecore_llh_remove_protocol_filter_e5(p_hwfn, p_ptt, type, 4925 high, low, &entry_num); 4926 if (rc != ECORE_SUCCESS) { 4927 DP_NOTICE(p_hwfn, false, 4928 "Tried to remove a non-configured filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x]\n", 4929 type, source_port_or_eth_type, dest_port); 4930 return; 4931 } 4932 4933 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4934 "Protocol filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x] was removed from LLH entry %d\n", 4935 type, source_port_or_eth_type, dest_port, entry_num); 4936 } 4937 4938 static void ecore_llh_clear_all_filters_bb_ah(struct ecore_hwfn *p_hwfn, 4939 struct ecore_ptt *p_ptt) 4940 { 4941 int i; 4942 4943 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4944 ecore_wr(p_hwfn, p_ptt, 4945 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4946 i * sizeof(u32), 0); 4947 ecore_wr(p_hwfn, p_ptt, 4948 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4949 2 * i * sizeof(u32), 0); 4950 ecore_wr(p_hwfn, p_ptt, 4951 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4952 (2 * i + 1) * sizeof(u32), 0); 4953 } 4954 } 4955 4956 static void ecore_llh_clear_all_filters_e5(struct ecore_hwfn *p_hwfn, 4957 struct ecore_ptt *p_ptt) 4958 { 4959 ECORE_E5_MISSING_CODE; 4960 } 4961 4962 void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn, 4963 struct ecore_ptt *p_ptt) 4964 { 4965 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4966 return; 4967 4968 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4969 ecore_llh_clear_all_filters_bb_ah(p_hwfn, p_ptt); 4970 else /* E5 */ 4971 ecore_llh_clear_all_filters_e5(p_hwfn, p_ptt); 4972 } 4973 4974 enum _ecore_status_t 4975 ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn, 4976 struct ecore_ptt *p_ptt) 4977 { 4978 if (IS_MF_DEFAULT(p_hwfn) && ECORE_IS_BB(p_hwfn->p_dev)) { 4979 ecore_wr(p_hwfn, p_ptt, 4980 NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 4981 1 << p_hwfn->abs_pf_id / 2); 4982 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, 0); 4983 return ECORE_SUCCESS; 4984 } else { 4985 DP_NOTICE(p_hwfn, false, 4986 "This function can't be set as default\n"); 4987 return ECORE_INVAL; 4988 } 4989 } 4990 4991 static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn, 4992 struct ecore_ptt *p_ptt, 4993 u32 hw_addr, void *p_eth_qzone, 4994 osal_size_t eth_qzone_size, 4995 u8 timeset) 4996 { 4997 struct coalescing_timeset *p_coal_timeset; 4998 4999 if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) { 5000 DP_NOTICE(p_hwfn, true, 5001 "Coalescing configuration not enabled\n"); 5002 return ECORE_INVAL; 5003 } 5004 5005 p_coal_timeset = p_eth_qzone; 5006 OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size); 5007 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); 5008 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); 5009 ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); 5010 5011 return ECORE_SUCCESS; 5012 } 5013 5014 enum _ecore_status_t ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, 5015 u16 rx_coal, u16 tx_coal, 5016 void *p_handle) 5017 { 5018 struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle; 5019 enum _ecore_status_t rc = ECORE_SUCCESS; 5020 struct ecore_ptt *p_ptt; 5021 5022 /* TODO - Configuring a single queue's coalescing but 5023 * claiming all queues are abiding same configuration 5024 * for PF and VF both. 5025 */ 5026 5027 #ifdef CONFIG_ECORE_SRIOV 5028 if (IS_VF(p_hwfn->p_dev)) 5029 return ecore_vf_pf_set_coalesce(p_hwfn, rx_coal, 5030 tx_coal, p_cid); 5031 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 5032 5033 p_ptt = ecore_ptt_acquire(p_hwfn); 5034 if (!p_ptt) 5035 return ECORE_AGAIN; 5036 5037 if (rx_coal) { 5038 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); 5039 if (rc) 5040 goto out; 5041 p_hwfn->p_dev->rx_coalesce_usecs = rx_coal; 5042 } 5043 5044 if (tx_coal) { 5045 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid); 5046 if (rc) 5047 goto out; 5048 p_hwfn->p_dev->tx_coalesce_usecs = tx_coal; 5049 } 5050 out: 5051 ecore_ptt_release(p_hwfn, p_ptt); 5052 5053 return rc; 5054 } 5055 5056 enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn, 5057 struct ecore_ptt *p_ptt, 5058 u16 coalesce, 5059 struct ecore_queue_cid *p_cid) 5060 { 5061 struct ustorm_eth_queue_zone eth_qzone; 5062 u8 timeset, timer_res; 5063 u32 address; 5064 enum _ecore_status_t rc; 5065 5066 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 5067 if (coalesce <= 0x7F) 5068 timer_res = 0; 5069 else if (coalesce <= 0xFF) 5070 timer_res = 1; 5071 else if (coalesce <= 0x1FF) 5072 timer_res = 2; 5073 else { 5074 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 5075 return ECORE_INVAL; 5076 } 5077 timeset = (u8)(coalesce >> timer_res); 5078 5079 rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, 5080 p_cid->sb_igu_id, false); 5081 if (rc != ECORE_SUCCESS) 5082 goto out; 5083 5084 address = BAR0_MAP_REG_USDM_RAM + 5085 USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 5086 5087 rc = ecore_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 5088 sizeof(struct ustorm_eth_queue_zone), timeset); 5089 if (rc != ECORE_SUCCESS) 5090 goto out; 5091 5092 out: 5093 return rc; 5094 } 5095 5096 enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn, 5097 struct ecore_ptt *p_ptt, 5098 u16 coalesce, 5099 struct ecore_queue_cid *p_cid) 5100 { 5101 struct xstorm_eth_queue_zone eth_qzone; 5102 u8 timeset, timer_res; 5103 u32 address; 5104 enum _ecore_status_t rc; 5105 5106 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 5107 if (coalesce <= 0x7F) 5108 timer_res = 0; 5109 else if (coalesce <= 0xFF) 5110 timer_res = 1; 5111 else if (coalesce <= 0x1FF) 5112 timer_res = 2; 5113 else { 5114 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 5115 return ECORE_INVAL; 5116 } 5117 timeset = (u8)(coalesce >> timer_res); 5118 5119 rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, 5120 p_cid->sb_igu_id, true); 5121 if (rc != ECORE_SUCCESS) 5122 goto out; 5123 5124 address = BAR0_MAP_REG_XSDM_RAM + 5125 XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 5126 5127 rc = ecore_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 5128 sizeof(struct xstorm_eth_queue_zone), timeset); 5129 out: 5130 return rc; 5131 } 5132 5133 /* Calculate final WFQ values for all vports and configure it. 5134 * After this configuration each vport must have 5135 * approx min rate = vport_wfq * min_pf_rate / ECORE_WFQ_UNIT 5136 */ 5137 static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn, 5138 struct ecore_ptt *p_ptt, 5139 u32 min_pf_rate) 5140 { 5141 struct init_qm_vport_params *vport_params; 5142 int i; 5143 5144 vport_params = p_hwfn->qm_info.qm_vport_params; 5145 5146 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5147 u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 5148 5149 vport_params[i].vport_wfq = (wfq_speed * ECORE_WFQ_UNIT) / 5150 min_pf_rate; 5151 ecore_init_vport_wfq(p_hwfn, p_ptt, 5152 vport_params[i].first_tx_pq_id, 5153 vport_params[i].vport_wfq); 5154 } 5155 } 5156 5157 static void 5158 ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn, u32 min_pf_rate) 5159 5160 { 5161 int i; 5162 5163 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) 5164 p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1; 5165 } 5166 5167 static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn, 5168 struct ecore_ptt *p_ptt, 5169 u32 min_pf_rate) 5170 { 5171 struct init_qm_vport_params *vport_params; 5172 int i; 5173 5174 vport_params = p_hwfn->qm_info.qm_vport_params; 5175 5176 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5177 ecore_init_wfq_default_param(p_hwfn, min_pf_rate); 5178 ecore_init_vport_wfq(p_hwfn, p_ptt, 5179 vport_params[i].first_tx_pq_id, 5180 vport_params[i].vport_wfq); 5181 } 5182 } 5183 5184 /* This function performs several validations for WFQ 5185 * configuration and required min rate for a given vport 5186 * 1. req_rate must be greater than one percent of min_pf_rate. 5187 * 2. req_rate should not cause other vports [not configured for WFQ explicitly] 5188 * rates to get less than one percent of min_pf_rate. 5189 * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. 5190 */ 5191 static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn, 5192 u16 vport_id, u32 req_rate, 5193 u32 min_pf_rate) 5194 { 5195 u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; 5196 int non_requested_count = 0, req_count = 0, i, num_vports; 5197 5198 num_vports = p_hwfn->qm_info.num_vports; 5199 5200 /* Accounting for the vports which are configured for WFQ explicitly */ 5201 for (i = 0; i < num_vports; i++) { 5202 u32 tmp_speed; 5203 5204 if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) { 5205 req_count++; 5206 tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 5207 total_req_min_rate += tmp_speed; 5208 } 5209 } 5210 5211 /* Include current vport data as well */ 5212 req_count++; 5213 total_req_min_rate += req_rate; 5214 non_requested_count = num_vports - req_count; 5215 5216 /* validate possible error cases */ 5217 if (req_rate > min_pf_rate) { 5218 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5219 "Vport [%d] - Requested rate[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", 5220 vport_id, req_rate, min_pf_rate); 5221 return ECORE_INVAL; 5222 } 5223 5224 if (req_rate < min_pf_rate / ECORE_WFQ_UNIT) { 5225 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5226 "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 5227 vport_id, req_rate, min_pf_rate); 5228 return ECORE_INVAL; 5229 } 5230 5231 /* TBD - for number of vports greater than 100 */ 5232 if (num_vports > ECORE_WFQ_UNIT) { 5233 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5234 "Number of vports is greater than %d\n", 5235 ECORE_WFQ_UNIT); 5236 return ECORE_INVAL; 5237 } 5238 5239 if (total_req_min_rate > min_pf_rate) { 5240 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5241 "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", 5242 total_req_min_rate, min_pf_rate); 5243 return ECORE_INVAL; 5244 } 5245 5246 /* Data left for non requested vports */ 5247 total_left_rate = min_pf_rate - total_req_min_rate; 5248 left_rate_per_vp = total_left_rate / non_requested_count; 5249 5250 /* validate if non requested get < 1% of min bw */ 5251 if (left_rate_per_vp < min_pf_rate / ECORE_WFQ_UNIT) { 5252 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5253 "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 5254 left_rate_per_vp, min_pf_rate); 5255 return ECORE_INVAL; 5256 } 5257 5258 /* now req_rate for given vport passes all scenarios. 5259 * assign final wfq rates to all vports. 5260 */ 5261 p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate; 5262 p_hwfn->qm_info.wfq_data[vport_id].configured = true; 5263 5264 for (i = 0; i < num_vports; i++) { 5265 if (p_hwfn->qm_info.wfq_data[i].configured) 5266 continue; 5267 5268 p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp; 5269 } 5270 5271 return ECORE_SUCCESS; 5272 } 5273 5274 static int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn, 5275 struct ecore_ptt *p_ptt, 5276 u16 vp_id, u32 rate) 5277 { 5278 struct ecore_mcp_link_state *p_link; 5279 int rc = ECORE_SUCCESS; 5280 5281 p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output; 5282 5283 if (!p_link->min_pf_rate) { 5284 p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate; 5285 p_hwfn->qm_info.wfq_data[vp_id].configured = true; 5286 return rc; 5287 } 5288 5289 rc = ecore_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); 5290 5291 if (rc == ECORE_SUCCESS) 5292 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, 5293 p_link->min_pf_rate); 5294 else 5295 DP_NOTICE(p_hwfn, false, 5296 "Validation failed while configuring min rate\n"); 5297 5298 return rc; 5299 } 5300 5301 static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn, 5302 struct ecore_ptt *p_ptt, 5303 u32 min_pf_rate) 5304 { 5305 bool use_wfq = false; 5306 int rc = ECORE_SUCCESS; 5307 u16 i; 5308 5309 /* Validate all pre configured vports for wfq */ 5310 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5311 u32 rate; 5312 5313 if (!p_hwfn->qm_info.wfq_data[i].configured) 5314 continue; 5315 5316 rate = p_hwfn->qm_info.wfq_data[i].min_speed; 5317 use_wfq = true; 5318 5319 rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate); 5320 if (rc != ECORE_SUCCESS) { 5321 DP_NOTICE(p_hwfn, false, 5322 "WFQ validation failed while configuring min rate\n"); 5323 break; 5324 } 5325 } 5326 5327 if (rc == ECORE_SUCCESS && use_wfq) 5328 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 5329 else 5330 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 5331 5332 return rc; 5333 } 5334 5335 /* Main API for ecore clients to configure vport min rate. 5336 * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)] 5337 * rate - Speed in Mbps needs to be assigned to a given vport. 5338 */ 5339 int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate) 5340 { 5341 int i, rc = ECORE_INVAL; 5342 5343 /* TBD - for multiple hardware functions - that is 100 gig */ 5344 if (p_dev->num_hwfns > 1) { 5345 DP_NOTICE(p_dev, false, 5346 "WFQ configuration is not supported for this device\n"); 5347 return rc; 5348 } 5349 5350 for_each_hwfn(p_dev, i) { 5351 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5352 struct ecore_ptt *p_ptt; 5353 5354 p_ptt = ecore_ptt_acquire(p_hwfn); 5355 if (!p_ptt) 5356 return ECORE_TIMEOUT; 5357 5358 rc = __ecore_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); 5359 5360 if (rc != ECORE_SUCCESS) { 5361 ecore_ptt_release(p_hwfn, p_ptt); 5362 return rc; 5363 } 5364 5365 ecore_ptt_release(p_hwfn, p_ptt); 5366 } 5367 5368 return rc; 5369 } 5370 5371 /* API to configure WFQ from mcp link change */ 5372 void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev, 5373 struct ecore_ptt *p_ptt, 5374 u32 min_pf_rate) 5375 { 5376 int i; 5377 5378 /* TBD - for multiple hardware functions - that is 100 gig */ 5379 if (p_dev->num_hwfns > 1) { 5380 DP_VERBOSE(p_dev, ECORE_MSG_LINK, 5381 "WFQ configuration is not supported for this device\n"); 5382 return; 5383 } 5384 5385 for_each_hwfn(p_dev, i) { 5386 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5387 5388 __ecore_configure_vp_wfq_on_link_change(p_hwfn, p_ptt, 5389 min_pf_rate); 5390 } 5391 } 5392 5393 int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn, 5394 struct ecore_ptt *p_ptt, 5395 struct ecore_mcp_link_state *p_link, 5396 u8 max_bw) 5397 { 5398 int rc = ECORE_SUCCESS; 5399 5400 p_hwfn->mcp_info->func_info.bandwidth_max = max_bw; 5401 5402 if (!p_link->line_speed && (max_bw != 100)) 5403 return rc; 5404 5405 p_link->speed = (p_link->line_speed * max_bw) / 100; 5406 p_hwfn->qm_info.pf_rl = p_link->speed; 5407 5408 /* Since the limiter also affects Tx-switched traffic, we don't want it 5409 * to limit such traffic in case there's no actual limit. 5410 * In that case, set limit to imaginary high boundary. 5411 */ 5412 if (max_bw == 100) 5413 p_hwfn->qm_info.pf_rl = 100000; 5414 5415 rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 5416 p_hwfn->qm_info.pf_rl); 5417 5418 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5419 "Configured MAX bandwidth to be %08x Mb/sec\n", 5420 p_link->speed); 5421 5422 return rc; 5423 } 5424 5425 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */ 5426 int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw) 5427 { 5428 int i, rc = ECORE_INVAL; 5429 5430 if (max_bw < 1 || max_bw > 100) { 5431 DP_NOTICE(p_dev, false, "PF max bw valid range is [1-100]\n"); 5432 return rc; 5433 } 5434 5435 for_each_hwfn(p_dev, i) { 5436 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5437 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev); 5438 struct ecore_mcp_link_state *p_link; 5439 struct ecore_ptt *p_ptt; 5440 5441 p_link = &p_lead->mcp_info->link_output; 5442 5443 p_ptt = ecore_ptt_acquire(p_hwfn); 5444 if (!p_ptt) 5445 return ECORE_TIMEOUT; 5446 5447 rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, 5448 p_link, max_bw); 5449 5450 ecore_ptt_release(p_hwfn, p_ptt); 5451 5452 if (rc != ECORE_SUCCESS) 5453 break; 5454 } 5455 5456 return rc; 5457 } 5458 5459 int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn, 5460 struct ecore_ptt *p_ptt, 5461 struct ecore_mcp_link_state *p_link, 5462 u8 min_bw) 5463 { 5464 int rc = ECORE_SUCCESS; 5465 5466 p_hwfn->mcp_info->func_info.bandwidth_min = min_bw; 5467 p_hwfn->qm_info.pf_wfq = min_bw; 5468 5469 if (!p_link->line_speed) 5470 return rc; 5471 5472 p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100; 5473 5474 rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw); 5475 5476 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5477 "Configured MIN bandwidth to be %d Mb/sec\n", 5478 p_link->min_pf_rate); 5479 5480 return rc; 5481 } 5482 5483 /* Main API to configure PF min bandwidth where bw range is [1-100] */ 5484 int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw) 5485 { 5486 int i, rc = ECORE_INVAL; 5487 5488 if (min_bw < 1 || min_bw > 100) { 5489 DP_NOTICE(p_dev, false, "PF min bw valid range is [1-100]\n"); 5490 return rc; 5491 } 5492 5493 for_each_hwfn(p_dev, i) { 5494 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5495 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev); 5496 struct ecore_mcp_link_state *p_link; 5497 struct ecore_ptt *p_ptt; 5498 5499 p_link = &p_lead->mcp_info->link_output; 5500 5501 p_ptt = ecore_ptt_acquire(p_hwfn); 5502 if (!p_ptt) 5503 return ECORE_TIMEOUT; 5504 5505 rc = __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, 5506 p_link, min_bw); 5507 if (rc != ECORE_SUCCESS) { 5508 ecore_ptt_release(p_hwfn, p_ptt); 5509 return rc; 5510 } 5511 5512 if (p_link->min_pf_rate) { 5513 u32 min_rate = p_link->min_pf_rate; 5514 5515 rc = __ecore_configure_vp_wfq_on_link_change(p_hwfn, 5516 p_ptt, 5517 min_rate); 5518 } 5519 5520 ecore_ptt_release(p_hwfn, p_ptt); 5521 } 5522 5523 return rc; 5524 } 5525 5526 void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 5527 { 5528 struct ecore_mcp_link_state *p_link; 5529 5530 p_link = &p_hwfn->mcp_info->link_output; 5531 5532 if (p_link->min_pf_rate) 5533 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, 5534 p_link->min_pf_rate); 5535 5536 OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0, 5537 sizeof(*p_hwfn->qm_info.wfq_data) * 5538 p_hwfn->qm_info.num_vports); 5539 } 5540 5541 int ecore_device_num_engines(struct ecore_dev *p_dev) 5542 { 5543 return ECORE_IS_BB(p_dev) ? 2 : 1; 5544 } 5545 5546 int ecore_device_num_ports(struct ecore_dev *p_dev) 5547 { 5548 /* in CMT always only one port */ 5549 if (p_dev->num_hwfns > 1) 5550 return 1; 5551 5552 return p_dev->num_ports_in_engines * ecore_device_num_engines(p_dev); 5553 } 5554 5555 void ecore_set_fw_mac_addr(__le16 *fw_msb, 5556 __le16 *fw_mid, 5557 __le16 *fw_lsb, 5558 u8 *mac) 5559 { 5560 ((u8 *)fw_msb)[0] = mac[1]; 5561 ((u8 *)fw_msb)[1] = mac[0]; 5562 ((u8 *)fw_mid)[0] = mac[3]; 5563 ((u8 *)fw_mid)[1] = mac[2]; 5564 ((u8 *)fw_lsb)[0] = mac[5]; 5565 ((u8 *)fw_lsb)[1] = mac[4]; 5566 } 5567