1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File : ecore_dev.c 30 */ 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 35 #include "bcm_osal.h" 36 #include "reg_addr.h" 37 #include "ecore_gtt_reg_addr.h" 38 #include "ecore.h" 39 #include "ecore_chain.h" 40 #include "ecore_status.h" 41 #include "ecore_hw.h" 42 #include "ecore_rt_defs.h" 43 #include "ecore_init_ops.h" 44 #include "ecore_int.h" 45 #include "ecore_cxt.h" 46 #include "ecore_spq.h" 47 #include "ecore_init_fw_funcs.h" 48 #include "ecore_sp_commands.h" 49 #include "ecore_dev_api.h" 50 #include "ecore_sriov.h" 51 #include "ecore_vf.h" 52 #include "ecore_ll2.h" 53 #include "ecore_fcoe.h" 54 #include "ecore_iscsi.h" 55 #include "ecore_ooo.h" 56 #include "ecore_mcp.h" 57 #include "ecore_hw_defs.h" 58 #include "mcp_public.h" 59 #include "ecore_roce.h" 60 #include "ecore_iro.h" 61 #include "nvm_cfg.h" 62 #include "ecore_dev_api.h" 63 #include "ecore_dcbx.h" 64 #include "pcics_reg_driver.h" 65 #include "ecore_l2.h" 66 67 /* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM 68 * registers involved are not split and thus configuration is a race where 69 * some of the PFs configuration might be lost. 70 * Eventually, this needs to move into a MFW-covered HW-lock as arbitration 71 * mechanism as this doesn't cover some cases [E.g., PDA or scenarios where 72 * there's more than a single compiled ecore component in system]. 73 */ 74 static osal_spinlock_t qm_lock; 75 static bool qm_lock_init = false; 76 77 /* Configurable */ 78 #define ECORE_MIN_DPIS (4) /* The minimal number of DPIs required to 79 * load the driver. The number was 80 * arbitrarily set. 81 */ 82 83 /* Derived */ 84 #define ECORE_MIN_PWM_REGION ((ECORE_WID_SIZE) * (ECORE_MIN_DPIS)) 85 86 enum BAR_ID { 87 BAR_ID_0, /* used for GRC */ 88 BAR_ID_1 /* Used for doorbells */ 89 }; 90 91 static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id) 92 { 93 u32 bar_reg = (bar_id == BAR_ID_0 ? 94 PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); 95 u32 val; 96 97 if (IS_VF(p_hwfn->p_dev)) { 98 /* TODO - assume each VF hwfn has 64Kb for Bar0; Bar1 can be 99 * read from actual register, but we're currently not using 100 * it for actual doorbelling. 101 */ 102 return 1 << 17; 103 } 104 105 val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); 106 if (val) 107 return 1 << (val + 15); 108 109 /* The above registers were updated in the past only in CMT mode. Since 110 * they were found to be useful MFW started updating them from 8.7.7.0. 111 * In older MFW versions they are set to 0 which means disabled. 112 */ 113 if (p_hwfn->p_dev->num_hwfns > 1) { 114 DP_NOTICE(p_hwfn, false, 115 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n"); 116 return BAR_ID_0 ? 256 * 1024 : 512 * 1024; 117 } else { 118 DP_NOTICE(p_hwfn, false, 119 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n"); 120 return 512 * 1024; 121 } 122 } 123 124 void ecore_init_dp(struct ecore_dev *p_dev, 125 u32 dp_module, 126 u8 dp_level, 127 void *dp_ctx) 128 { 129 u32 i; 130 131 p_dev->dp_level = dp_level; 132 p_dev->dp_module = dp_module; 133 p_dev->dp_ctx = dp_ctx; 134 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 135 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 136 137 p_hwfn->dp_level = dp_level; 138 p_hwfn->dp_module = dp_module; 139 p_hwfn->dp_ctx = dp_ctx; 140 } 141 } 142 143 void ecore_init_struct(struct ecore_dev *p_dev) 144 { 145 u8 i; 146 147 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 148 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 149 150 p_hwfn->p_dev = p_dev; 151 p_hwfn->my_id = i; 152 p_hwfn->b_active = false; 153 154 OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex); 155 OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex); 156 } 157 158 /* hwfn 0 is always active */ 159 p_dev->hwfns[0].b_active = true; 160 161 /* set the default cache alignment to 128 (may be overridden later) */ 162 p_dev->cache_shift = 7; 163 } 164 165 static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn) 166 { 167 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 168 169 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params); 170 qm_info->qm_pq_params = OSAL_NULL; 171 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params); 172 qm_info->qm_vport_params = OSAL_NULL; 173 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params); 174 qm_info->qm_port_params = OSAL_NULL; 175 OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data); 176 qm_info->wfq_data = OSAL_NULL; 177 } 178 179 void ecore_resc_free(struct ecore_dev *p_dev) 180 { 181 int i; 182 183 if (IS_VF(p_dev)) { 184 for_each_hwfn(p_dev, i) 185 ecore_l2_free(&p_dev->hwfns[i]); 186 return; 187 } 188 189 OSAL_FREE(p_dev, p_dev->fw_data); 190 p_dev->fw_data = OSAL_NULL; 191 192 OSAL_FREE(p_dev, p_dev->reset_stats); 193 p_dev->reset_stats = OSAL_NULL; 194 195 for_each_hwfn(p_dev, i) { 196 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 197 198 ecore_cxt_mngr_free(p_hwfn); 199 ecore_qm_info_free(p_hwfn); 200 ecore_spq_free(p_hwfn); 201 ecore_eq_free(p_hwfn); 202 ecore_consq_free(p_hwfn); 203 ecore_int_free(p_hwfn); 204 #ifdef CONFIG_ECORE_LL2 205 ecore_ll2_free(p_hwfn); 206 #endif 207 #ifdef CONFIG_ECORE_FCOE 208 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 209 ecore_fcoe_free(p_hwfn); 210 #endif 211 #ifdef CONFIG_ECORE_ISCSI 212 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 213 ecore_iscsi_free(p_hwfn); 214 ecore_ooo_free(p_hwfn); 215 } 216 #endif 217 ecore_iov_free(p_hwfn); 218 ecore_l2_free(p_hwfn); 219 ecore_dmae_info_free(p_hwfn); 220 ecore_dcbx_info_free(p_hwfn); 221 /* @@@TBD Flush work-queue ?*/ 222 } 223 } 224 225 /******************** QM initialization *******************/ 226 227 /* bitmaps for indicating active traffic classes. Special case for Arrowhead 4 port */ 228 #define ACTIVE_TCS_BMAP 0x9f /* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */ 229 #define ACTIVE_TCS_BMAP_4PORT_K2 0xf /* 0..3 actually used, OOO and high priority stuff all use 3 */ 230 231 /* determines the physical queue flags for a given PF. */ 232 static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn) 233 { 234 u32 flags; 235 236 /* common flags */ 237 flags = PQ_FLAGS_LB; 238 239 /* feature flags */ 240 if (IS_ECORE_SRIOV(p_hwfn->p_dev)) 241 flags |= PQ_FLAGS_VFS; 242 if (IS_ECORE_DCQCN(p_hwfn)) 243 flags |= PQ_FLAGS_RLS; 244 245 /* protocol flags */ 246 switch (p_hwfn->hw_info.personality) { 247 case ECORE_PCI_ETH: 248 flags |= PQ_FLAGS_MCOS; 249 break; 250 case ECORE_PCI_FCOE: 251 flags |= PQ_FLAGS_OFLD; 252 break; 253 case ECORE_PCI_ISCSI: 254 flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; 255 break; 256 case ECORE_PCI_ETH_ROCE: 257 flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT; 258 break; 259 case ECORE_PCI_ETH_IWARP: 260 flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; 261 break; 262 default: 263 DP_ERR(p_hwfn, "unknown personality %d\n", p_hwfn->hw_info.personality); 264 return 0; 265 } 266 267 return flags; 268 } 269 270 271 /* Getters for resource amounts necessary for qm initialization */ 272 u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn) 273 { 274 return p_hwfn->hw_info.num_hw_tc; 275 } 276 277 u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn) 278 { 279 return IS_ECORE_SRIOV(p_hwfn->p_dev) ? p_hwfn->p_dev->p_iov_info->total_vfs : 0; 280 } 281 282 #define NUM_DEFAULT_RLS 1 283 284 u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn) 285 { 286 u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn); 287 288 /* num RLs can't exceed resource amount of rls or vports or the dcqcn qps */ 289 num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL), 290 (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_VPORT), 291 ROCE_DCQCN_RP_MAX_QPS)); 292 293 /* make sure after we reserve the default and VF rls we'll have something left */ 294 if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) { 295 if (IS_ECORE_DCQCN(p_hwfn)) 296 DP_NOTICE(p_hwfn, false, "no rate limiters left for PF rate limiting [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs); 297 return 0; 298 } 299 300 /* subtract rls necessary for VFs and one default one for the PF */ 301 num_pf_rls -= num_vfs + NUM_DEFAULT_RLS; 302 303 return num_pf_rls; 304 } 305 306 u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn) 307 { 308 u32 pq_flags = ecore_get_pq_flags(p_hwfn); 309 310 /* all pqs share the same vport (hence the 1 below), except for vfs and pf_rl pqs */ 311 return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) + 312 (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn) + 1; 313 } 314 315 /* calc amount of PQs according to the requested flags */ 316 u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn) 317 { 318 u32 pq_flags = ecore_get_pq_flags(p_hwfn); 319 320 return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) + 321 (!!(PQ_FLAGS_MCOS & pq_flags)) * ecore_init_qm_get_num_tcs(p_hwfn) + 322 (!!(PQ_FLAGS_LB & pq_flags)) + 323 (!!(PQ_FLAGS_OOO & pq_flags)) + 324 (!!(PQ_FLAGS_ACK & pq_flags)) + 325 (!!(PQ_FLAGS_OFLD & pq_flags)) + 326 (!!(PQ_FLAGS_LLT & pq_flags)) + 327 (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn); 328 } 329 330 /* initialize the top level QM params */ 331 static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn) 332 { 333 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 334 bool four_port; 335 336 /* pq and vport bases for this PF */ 337 qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ); 338 qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT); 339 340 /* rate limiting and weighted fair queueing are always enabled */ 341 qm_info->vport_rl_en = 1; 342 qm_info->vport_wfq_en = 1; 343 344 /* TC config is different for AH 4 port */ 345 four_port = p_hwfn->p_dev->num_ports_in_engines == MAX_NUM_PORTS_K2; 346 347 /* in AH 4 port we have fewer TCs per port */ 348 qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : NUM_OF_PHYS_TCS; 349 350 /* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and 4 otherwise */ 351 if (!qm_info->ooo_tc) 352 qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : DCBX_TCP_OOO_TC; 353 } 354 355 /* initialize qm vport params */ 356 static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn) 357 { 358 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 359 u8 i; 360 361 /* all vports participate in weighted fair queueing */ 362 for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++) 363 qm_info->qm_vport_params[i].vport_wfq = 1; 364 } 365 366 /* initialize qm port params */ 367 static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn) 368 { 369 /* Initialize qm port parameters */ 370 u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engines; 371 372 /* indicate how ooo and high pri traffic is dealt with */ 373 active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ? 374 ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP; 375 376 for (i = 0; i < num_ports; i++) { 377 struct init_qm_port_params *p_qm_port = 378 &p_hwfn->qm_info.qm_port_params[i]; 379 380 p_qm_port->active = 1; 381 p_qm_port->active_phys_tcs = active_phys_tcs; 382 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports; 383 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports; 384 } 385 } 386 387 /* Reset the params which must be reset for qm init. QM init may be called as 388 * a result of flows other than driver load (e.g. dcbx renegotiation). Other 389 * params may be affected by the init but would simply recalculate to the same 390 * values. The allocations made for QM init, ports, vports, pqs and vfqs are not 391 * affected as these amounts stay the same. 392 */ 393 static void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn) 394 { 395 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 396 397 qm_info->num_pqs = 0; 398 qm_info->num_vports = 0; 399 qm_info->num_pf_rls = 0; 400 qm_info->num_vf_pqs = 0; 401 qm_info->first_vf_pq = 0; 402 qm_info->first_mcos_pq = 0; 403 qm_info->first_rl_pq = 0; 404 } 405 406 static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn) 407 { 408 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 409 410 qm_info->num_vports++; 411 412 if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn)) 413 DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn)); 414 } 415 416 /* initialize a single pq and manage qm_info resources accounting. 417 * The pq_init_flags param determines whether the PQ is rate limited (for VF or PF) 418 * and whether a new vport is allocated to the pq or not (i.e. vport will be shared) 419 */ 420 421 /* flags for pq init */ 422 #define PQ_INIT_SHARE_VPORT (1 << 0) 423 #define PQ_INIT_PF_RL (1 << 1) 424 #define PQ_INIT_VF_RL (1 << 2) 425 426 /* defines for pq init */ 427 #define PQ_INIT_DEFAULT_WRR_GROUP 1 428 #define PQ_INIT_DEFAULT_TC 0 429 #define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc) 430 431 static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn, 432 struct ecore_qm_info *qm_info, 433 u8 tc, u32 pq_init_flags) 434 { 435 u16 pq_idx = qm_info->num_pqs, max_pq = ecore_init_qm_get_num_pqs(p_hwfn); 436 437 if (pq_idx > max_pq) 438 DP_ERR(p_hwfn, "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq); 439 440 /* init pq params */ 441 qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + qm_info->num_vports; 442 qm_info->qm_pq_params[pq_idx].tc_id = tc; 443 qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP; 444 qm_info->qm_pq_params[pq_idx].rl_valid = 445 (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL); 446 447 /* qm params accounting */ 448 qm_info->num_pqs++; 449 if (!(pq_init_flags & PQ_INIT_SHARE_VPORT)) 450 qm_info->num_vports++; 451 452 if (pq_init_flags & PQ_INIT_PF_RL) 453 qm_info->num_pf_rls++; 454 455 if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn)) 456 DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn)); 457 458 if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn)) 459 DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n", qm_info->num_pf_rls, ecore_init_qm_get_num_pf_rls(p_hwfn)); 460 } 461 462 /* get pq index according to PQ_FLAGS */ 463 static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn, 464 u32 pq_flags) 465 { 466 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 467 468 /* Can't have multiple flags set here */ 469 if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1) 470 goto err; 471 472 switch (pq_flags) { 473 case PQ_FLAGS_RLS: 474 return &qm_info->first_rl_pq; 475 case PQ_FLAGS_MCOS: 476 return &qm_info->first_mcos_pq; 477 case PQ_FLAGS_LB: 478 return &qm_info->pure_lb_pq; 479 case PQ_FLAGS_OOO: 480 return &qm_info->ooo_pq; 481 case PQ_FLAGS_ACK: 482 return &qm_info->pure_ack_pq; 483 case PQ_FLAGS_OFLD: 484 return &qm_info->offload_pq; 485 case PQ_FLAGS_LLT: 486 return &qm_info->low_latency_pq; 487 case PQ_FLAGS_VFS: 488 return &qm_info->first_vf_pq; 489 default: 490 goto err; 491 } 492 493 err: 494 DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags); 495 return OSAL_NULL; 496 } 497 498 /* save pq index in qm info */ 499 static void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn, 500 u32 pq_flags, u16 pq_val) 501 { 502 u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 503 504 *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val; 505 } 506 507 /* get tx pq index, with the PQ TX base already set (ready for context init) */ 508 u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags) 509 { 510 u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 511 512 return *base_pq_idx + CM_TX_PQ_BASE; 513 } 514 515 u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc) 516 { 517 u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn); 518 519 if (tc > max_tc) 520 DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); 521 522 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc; 523 } 524 525 u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf) 526 { 527 u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn); 528 529 if (vf > max_vf) 530 DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); 531 532 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; 533 } 534 535 u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl) 536 { 537 u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn); 538 539 if (rl > max_rl) 540 DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl); 541 542 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl; 543 } 544 545 /* Functions for creating specific types of pqs */ 546 static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn) 547 { 548 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 549 550 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LB)) 551 return; 552 553 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs); 554 ecore_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT); 555 } 556 557 static void ecore_init_qm_ooo_pq(struct ecore_hwfn *p_hwfn) 558 { 559 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 560 561 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO)) 562 return; 563 564 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs); 565 ecore_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT); 566 } 567 568 static void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn) 569 { 570 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 571 572 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK)) 573 return; 574 575 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs); 576 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 577 } 578 579 static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn) 580 { 581 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 582 583 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD)) 584 return; 585 586 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs); 587 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 588 } 589 590 static void ecore_init_qm_low_latency_pq(struct ecore_hwfn *p_hwfn) 591 { 592 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 593 594 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT)) 595 return; 596 597 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs); 598 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 599 } 600 601 static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn) 602 { 603 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 604 u8 tc_idx; 605 606 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS)) 607 return; 608 609 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs); 610 for (tc_idx = 0; tc_idx < ecore_init_qm_get_num_tcs(p_hwfn); tc_idx++) 611 ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT); 612 } 613 614 static void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn) 615 { 616 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 617 u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn); 618 619 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS)) 620 return; 621 622 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs); 623 qm_info->num_vf_pqs = num_vfs; 624 for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) 625 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL); 626 } 627 628 static void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn) 629 { 630 u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn); 631 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 632 633 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS)) 634 return; 635 636 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs); 637 for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++) 638 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL); 639 } 640 641 static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn) 642 { 643 /* rate limited pqs, must come first (FW assumption) */ 644 ecore_init_qm_rl_pqs(p_hwfn); 645 646 /* pqs for multi cos */ 647 ecore_init_qm_mcos_pqs(p_hwfn); 648 649 /* pure loopback pq */ 650 ecore_init_qm_lb_pq(p_hwfn); 651 652 /* out of order pq */ 653 ecore_init_qm_ooo_pq(p_hwfn); 654 655 /* pure ack pq */ 656 ecore_init_qm_pure_ack_pq(p_hwfn); 657 658 /* pq for offloaded protocol */ 659 ecore_init_qm_offload_pq(p_hwfn); 660 661 /* low latency pq */ 662 ecore_init_qm_low_latency_pq(p_hwfn); 663 664 /* done sharing vports */ 665 ecore_init_qm_advance_vport(p_hwfn); 666 667 /* pqs for vfs */ 668 ecore_init_qm_vf_pqs(p_hwfn); 669 } 670 671 /* compare values of getters against resources amounts */ 672 static enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn) 673 { 674 if (ecore_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, ECORE_VPORT)) { 675 DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n"); 676 return ECORE_INVAL; 677 } 678 679 if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) { 680 DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n"); 681 return ECORE_INVAL; 682 } 683 684 return ECORE_SUCCESS; 685 } 686 687 /* 688 * Function for verbose printing of the qm initialization results 689 */ 690 static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn) 691 { 692 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 693 struct init_qm_vport_params *vport; 694 struct init_qm_port_params *port; 695 struct init_qm_pq_params *pq; 696 int i, tc; 697 698 /* top level params */ 699 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n", 700 qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq, qm_info->offload_pq, qm_info->pure_ack_pq); 701 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n", 702 qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs, qm_info->num_vf_pqs, qm_info->num_vports, qm_info->max_phys_tcs_per_port); 703 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n", 704 qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en, qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl, qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn)); 705 706 /* port table */ 707 for (i = 0; i < p_hwfn->p_dev->num_ports_in_engines; i++) { 708 port = &(qm_info->qm_port_params[i]); 709 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n", 710 i, port->active, port->active_phys_tcs, port->num_pbf_cmd_lines, port->num_btb_blocks, port->reserved); 711 } 712 713 /* vport table */ 714 for (i = 0; i < qm_info->num_vports; i++) { 715 vport = &(qm_info->qm_vport_params[i]); 716 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ", 717 qm_info->start_vport + i, vport->vport_rl, vport->vport_wfq); 718 for (tc = 0; tc < NUM_OF_TCS; tc++) 719 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ", vport->first_tx_pq_id[tc]); 720 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n"); 721 } 722 723 /* pq table */ 724 for (i = 0; i < qm_info->num_pqs; i++) { 725 pq = &(qm_info->qm_pq_params[i]); 726 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n", 727 qm_info->start_pq + i, pq->vport_id, pq->tc_id, pq->wrr_group, pq->rl_valid); 728 } 729 } 730 731 static void ecore_init_qm_info(struct ecore_hwfn *p_hwfn) 732 { 733 /* reset params required for init run */ 734 ecore_init_qm_reset_params(p_hwfn); 735 736 /* init QM top level params */ 737 ecore_init_qm_params(p_hwfn); 738 739 /* init QM port params */ 740 ecore_init_qm_port_params(p_hwfn); 741 742 /* init QM vport params */ 743 ecore_init_qm_vport_params(p_hwfn); 744 745 /* init QM physical queue params */ 746 ecore_init_qm_pq_params(p_hwfn); 747 748 /* display all that init */ 749 ecore_dp_init_qm_params(p_hwfn); 750 } 751 752 /* This function reconfigures the QM pf on the fly. 753 * For this purpose we: 754 * 1. reconfigure the QM database 755 * 2. set new values to runtime array 756 * 3. send an sdm_qm_cmd through the rbc interface to stop the QM 757 * 4. activate init tool in QM_PF stage 758 * 5. send an sdm_qm_cmd through rbc interface to release the QM 759 */ 760 enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn, 761 struct ecore_ptt *p_ptt) 762 { 763 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 764 bool b_rc; 765 enum _ecore_status_t rc; 766 767 /* initialize ecore's qm data structure */ 768 ecore_init_qm_info(p_hwfn); 769 770 /* stop PF's qm queues */ 771 OSAL_SPIN_LOCK(&qm_lock); 772 b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true, 773 qm_info->start_pq, qm_info->num_pqs); 774 OSAL_SPIN_UNLOCK(&qm_lock); 775 if (!b_rc) 776 return ECORE_INVAL; 777 778 /* clear the QM_PF runtime phase leftovers from previous init */ 779 ecore_init_clear_rt_data(p_hwfn); 780 781 /* prepare QM portion of runtime array */ 782 ecore_qm_init_pf(p_hwfn); 783 784 /* activate init tool on runtime array */ 785 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, 786 p_hwfn->hw_info.hw_mode); 787 if (rc != ECORE_SUCCESS) 788 return rc; 789 790 /* start PF's qm queues */ 791 OSAL_SPIN_LOCK(&qm_lock); 792 b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true, 793 qm_info->start_pq, qm_info->num_pqs); 794 OSAL_SPIN_UNLOCK(&qm_lock); 795 if (!b_rc) 796 return ECORE_INVAL; 797 798 return ECORE_SUCCESS; 799 } 800 801 static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn) 802 { 803 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 804 enum _ecore_status_t rc; 805 806 rc = ecore_init_qm_sanity(p_hwfn); 807 if (rc != ECORE_SUCCESS) 808 goto alloc_err; 809 810 qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 811 sizeof(struct init_qm_pq_params) * 812 ecore_init_qm_get_num_pqs(p_hwfn)); 813 if (!qm_info->qm_pq_params) 814 goto alloc_err; 815 816 qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 817 sizeof(struct init_qm_vport_params) * 818 ecore_init_qm_get_num_vports(p_hwfn)); 819 if (!qm_info->qm_vport_params) 820 goto alloc_err; 821 822 qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 823 sizeof(struct init_qm_port_params) * 824 p_hwfn->p_dev->num_ports_in_engines); 825 if (!qm_info->qm_port_params) 826 goto alloc_err; 827 828 qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 829 sizeof(struct ecore_wfq_data) * 830 ecore_init_qm_get_num_vports(p_hwfn)); 831 if (!qm_info->wfq_data) 832 goto alloc_err; 833 834 return ECORE_SUCCESS; 835 836 alloc_err: 837 DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n"); 838 ecore_qm_info_free(p_hwfn); 839 return ECORE_NOMEM; 840 } 841 /******************** End QM initialization ***************/ 842 843 enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev) 844 { 845 enum _ecore_status_t rc = ECORE_SUCCESS; 846 u32 rdma_tasks, excess_tasks; 847 u32 line_count; 848 int i; 849 850 if (IS_VF(p_dev)) { 851 for_each_hwfn(p_dev, i) { 852 rc = ecore_l2_alloc(&p_dev->hwfns[i]); 853 if (rc != ECORE_SUCCESS) 854 return rc; 855 } 856 return rc; 857 } 858 859 p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL, 860 sizeof(*p_dev->fw_data)); 861 if (!p_dev->fw_data) 862 return ECORE_NOMEM; 863 864 for_each_hwfn(p_dev, i) { 865 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 866 u32 n_eqes, num_cons; 867 868 /* First allocate the context manager structure */ 869 rc = ecore_cxt_mngr_alloc(p_hwfn); 870 if (rc) 871 goto alloc_err; 872 873 /* Set the HW cid/tid numbers (in the contest manager) 874 * Must be done prior to any further computations. 875 */ 876 rc = ecore_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS); 877 if (rc) 878 goto alloc_err; 879 880 rc = ecore_alloc_qm_data(p_hwfn); 881 if (rc) 882 goto alloc_err; 883 884 /* init qm info */ 885 ecore_init_qm_info(p_hwfn); 886 887 /* Compute the ILT client partition */ 888 rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count); 889 if (rc) { 890 DP_NOTICE(p_hwfn, false, "too many ILT lines; re-computing with less lines\n"); 891 /* In case there are not enough ILT lines we reduce the 892 * number of RDMA tasks and re-compute. 893 */ 894 excess_tasks = ecore_cxt_cfg_ilt_compute_excess( 895 p_hwfn, line_count); 896 if (!excess_tasks) 897 goto alloc_err; 898 899 rdma_tasks = RDMA_MAX_TIDS - excess_tasks; 900 rc = ecore_cxt_set_pf_params(p_hwfn, rdma_tasks); 901 if (rc) 902 goto alloc_err; 903 904 rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count); 905 if (rc) { 906 DP_ERR(p_hwfn, "failed ILT compute. Requested too many lines: %u\n", 907 line_count); 908 909 goto alloc_err; 910 } 911 } 912 913 /* CID map / ILT shadow table / T2 914 * The talbes sizes are determined by the computations above 915 */ 916 rc = ecore_cxt_tables_alloc(p_hwfn); 917 if (rc) 918 goto alloc_err; 919 920 /* SPQ, must follow ILT because initializes SPQ context */ 921 rc = ecore_spq_alloc(p_hwfn); 922 if (rc) 923 goto alloc_err; 924 925 /* SP status block allocation */ 926 p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn, 927 RESERVED_PTT_DPC); 928 929 rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt); 930 if (rc) 931 goto alloc_err; 932 933 rc = ecore_iov_alloc(p_hwfn); 934 if (rc) 935 goto alloc_err; 936 937 /* EQ */ 938 n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain); 939 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) { 940 /* Calculate the EQ size 941 * --------------------- 942 * Each ICID may generate up to one event at a time i.e. 943 * the event must be handled/cleared before a new one 944 * can be generated. We calculate the sum of events per 945 * protocol and create an EQ deep enough to handle the 946 * worst case: 947 * - Core - according to SPQ. 948 * - RoCE - per QP there are a couple of ICIDs, one 949 * responder and one requester, each can 950 * generate an EQE => n_eqes_qp = 2 * n_qp. 951 * Each CQ can generate an EQE. There are 2 CQs 952 * per QP => n_eqes_cq = 2 * n_qp. 953 * Hence the RoCE total is 4 * n_qp or 954 * 2 * num_cons. 955 * - ENet - There can be up to two events per VF. One 956 * for VF-PF channel and another for VF FLR 957 * initial cleanup. The number of VFs is 958 * bounded by MAX_NUM_VFS_BB, and is much 959 * smaller than RoCE's so we avoid exact 960 * calculation. 961 */ 962 if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) { 963 num_cons = ecore_cxt_get_proto_cid_count( 964 p_hwfn, PROTOCOLID_ROCE, OSAL_NULL); 965 num_cons *= 2; 966 } else { 967 num_cons = ecore_cxt_get_proto_cid_count( 968 p_hwfn, PROTOCOLID_IWARP, 969 OSAL_NULL); 970 } 971 n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; 972 } else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 973 num_cons = ecore_cxt_get_proto_cid_count( 974 p_hwfn, PROTOCOLID_ISCSI, OSAL_NULL); 975 n_eqes += 2 * num_cons; 976 } 977 978 if (n_eqes > 0xFFFF) { 979 DP_ERR(p_hwfn, 980 "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n", 981 n_eqes, 0xFFFF); 982 goto alloc_no_mem; 983 } 984 985 rc = ecore_eq_alloc(p_hwfn, (u16)n_eqes); 986 if (rc) 987 goto alloc_err; 988 989 rc = ecore_consq_alloc(p_hwfn); 990 if (rc) 991 goto alloc_err; 992 993 rc = ecore_l2_alloc(p_hwfn); 994 if (rc != ECORE_SUCCESS) 995 goto alloc_err; 996 997 #ifdef CONFIG_ECORE_LL2 998 if (p_hwfn->using_ll2) { 999 rc = ecore_ll2_alloc(p_hwfn); 1000 if (rc) 1001 goto alloc_err; 1002 } 1003 #endif 1004 #ifdef CONFIG_ECORE_FCOE 1005 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) { 1006 rc = ecore_fcoe_alloc(p_hwfn); 1007 if (rc) 1008 goto alloc_err; 1009 } 1010 #endif 1011 #ifdef CONFIG_ECORE_ISCSI 1012 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 1013 rc = ecore_iscsi_alloc(p_hwfn); 1014 if (rc) 1015 goto alloc_err; 1016 rc = ecore_ooo_alloc(p_hwfn); 1017 if (rc) 1018 goto alloc_err; 1019 } 1020 #endif 1021 1022 /* DMA info initialization */ 1023 rc = ecore_dmae_info_alloc(p_hwfn); 1024 if (rc) { 1025 DP_NOTICE(p_hwfn, true, 1026 "Failed to allocate memory for dmae_info structure\n"); 1027 goto alloc_err; 1028 } 1029 1030 /* DCBX initialization */ 1031 rc = ecore_dcbx_info_alloc(p_hwfn); 1032 if (rc) { 1033 DP_NOTICE(p_hwfn, true, 1034 "Failed to allocate memory for dcbx structure\n"); 1035 goto alloc_err; 1036 } 1037 } 1038 1039 p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL, 1040 sizeof(*p_dev->reset_stats)); 1041 if (!p_dev->reset_stats) { 1042 DP_NOTICE(p_dev, true, 1043 "Failed to allocate reset statistics\n"); 1044 goto alloc_no_mem; 1045 } 1046 1047 return ECORE_SUCCESS; 1048 1049 alloc_no_mem: 1050 rc = ECORE_NOMEM; 1051 alloc_err: 1052 ecore_resc_free(p_dev); 1053 return rc; 1054 } 1055 1056 void ecore_resc_setup(struct ecore_dev *p_dev) 1057 { 1058 int i; 1059 1060 if (IS_VF(p_dev)) { 1061 for_each_hwfn(p_dev, i) 1062 ecore_l2_setup(&p_dev->hwfns[i]); 1063 return; 1064 } 1065 1066 for_each_hwfn(p_dev, i) { 1067 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1068 1069 ecore_cxt_mngr_setup(p_hwfn); 1070 ecore_spq_setup(p_hwfn); 1071 ecore_eq_setup(p_hwfn); 1072 ecore_consq_setup(p_hwfn); 1073 1074 /* Read shadow of current MFW mailbox */ 1075 ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt); 1076 OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow, 1077 p_hwfn->mcp_info->mfw_mb_cur, 1078 p_hwfn->mcp_info->mfw_mb_length); 1079 1080 ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt); 1081 1082 ecore_l2_setup(p_hwfn); 1083 ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt); 1084 #ifdef CONFIG_ECORE_LL2 1085 if (p_hwfn->using_ll2) 1086 ecore_ll2_setup(p_hwfn); 1087 #endif 1088 #ifdef CONFIG_ECORE_FCOE 1089 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 1090 ecore_fcoe_setup(p_hwfn); 1091 #endif 1092 #ifdef CONFIG_ECORE_ISCSI 1093 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 1094 ecore_iscsi_setup(p_hwfn); 1095 ecore_ooo_setup(p_hwfn); 1096 } 1097 #endif 1098 } 1099 } 1100 1101 #define FINAL_CLEANUP_POLL_CNT (100) 1102 #define FINAL_CLEANUP_POLL_TIME (10) 1103 enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn, 1104 struct ecore_ptt *p_ptt, 1105 u16 id, bool is_vf) 1106 { 1107 u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; 1108 enum _ecore_status_t rc = ECORE_TIMEOUT; 1109 1110 #ifndef ASIC_ONLY 1111 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) || 1112 CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 1113 DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n"); 1114 return ECORE_SUCCESS; 1115 } 1116 #endif 1117 1118 addr = GTT_BAR0_MAP_REG_USDM_RAM + 1119 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); 1120 1121 if (is_vf) 1122 id += 0x10; 1123 1124 command |= X_FINAL_CLEANUP_AGG_INT << 1125 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; 1126 command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; 1127 command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT; 1128 command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT; 1129 1130 /* Make sure notification is not set before initiating final cleanup */ 1131 if (REG_RD(p_hwfn, addr)) { 1132 DP_NOTICE(p_hwfn, false, 1133 "Unexpected; Found final cleanup notification before initiating final cleanup\n"); 1134 REG_WR(p_hwfn, addr, 0); 1135 } 1136 1137 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1138 "Sending final cleanup for PFVF[%d] [Command %08x\n]", 1139 id, command); 1140 1141 ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); 1142 1143 /* Poll until completion */ 1144 while (!REG_RD(p_hwfn, addr) && count--) 1145 OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME); 1146 1147 if (REG_RD(p_hwfn, addr)) 1148 rc = ECORE_SUCCESS; 1149 else 1150 DP_NOTICE(p_hwfn, true, "Failed to receive FW final cleanup notification\n"); 1151 1152 /* Cleanup afterwards */ 1153 REG_WR(p_hwfn, addr, 0); 1154 1155 return rc; 1156 } 1157 1158 static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn) 1159 { 1160 int hw_mode = 0; 1161 1162 if (ECORE_IS_BB_B0(p_hwfn->p_dev)) { 1163 hw_mode |= 1 << MODE_BB; 1164 } else if (ECORE_IS_AH(p_hwfn->p_dev)) { 1165 hw_mode |= 1 << MODE_K2; 1166 } else { 1167 DP_NOTICE(p_hwfn, true, "Unknown chip type %#x\n", 1168 p_hwfn->p_dev->type); 1169 return ECORE_INVAL; 1170 } 1171 1172 /* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE*/ 1173 switch (p_hwfn->p_dev->num_ports_in_engines) { 1174 case 1: 1175 hw_mode |= 1 << MODE_PORTS_PER_ENG_1; 1176 break; 1177 case 2: 1178 hw_mode |= 1 << MODE_PORTS_PER_ENG_2; 1179 break; 1180 case 4: 1181 hw_mode |= 1 << MODE_PORTS_PER_ENG_4; 1182 break; 1183 default: 1184 DP_NOTICE(p_hwfn, true, "num_ports_in_engine = %d not supported\n", 1185 p_hwfn->p_dev->num_ports_in_engines); 1186 return ECORE_INVAL; 1187 } 1188 1189 switch (p_hwfn->p_dev->mf_mode) { 1190 case ECORE_MF_DEFAULT: 1191 case ECORE_MF_NPAR: 1192 hw_mode |= 1 << MODE_MF_SI; 1193 break; 1194 case ECORE_MF_OVLAN: 1195 hw_mode |= 1 << MODE_MF_SD; 1196 break; 1197 default: 1198 DP_NOTICE(p_hwfn, true, "Unsupported MF mode, init as DEFAULT\n"); 1199 hw_mode |= 1 << MODE_MF_SI; 1200 } 1201 1202 #ifndef ASIC_ONLY 1203 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 1204 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1205 hw_mode |= 1 << MODE_FPGA; 1206 } else { 1207 if (p_hwfn->p_dev->b_is_emul_full) 1208 hw_mode |= 1 << MODE_EMUL_FULL; 1209 else 1210 hw_mode |= 1 << MODE_EMUL_REDUCED; 1211 } 1212 } else 1213 #endif 1214 hw_mode |= 1 << MODE_ASIC; 1215 1216 if (p_hwfn->p_dev->num_hwfns > 1) 1217 hw_mode |= 1 << MODE_100G; 1218 1219 p_hwfn->hw_info.hw_mode = hw_mode; 1220 1221 DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP), 1222 "Configuring function for hw_mode: 0x%08x\n", 1223 p_hwfn->hw_info.hw_mode); 1224 1225 return ECORE_SUCCESS; 1226 } 1227 1228 #ifndef ASIC_ONLY 1229 /* MFW-replacement initializations for non-ASIC */ 1230 static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn, 1231 struct ecore_ptt *p_ptt) 1232 { 1233 struct ecore_dev *p_dev = p_hwfn->p_dev; 1234 u32 pl_hv = 1; 1235 int i; 1236 1237 if (CHIP_REV_IS_EMUL(p_dev)) { 1238 if (ECORE_IS_AH(p_dev)) 1239 pl_hv |= 0x600; 1240 else if (ECORE_IS_E5(p_dev)) 1241 ECORE_E5_MISSING_CODE; 1242 } 1243 1244 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv); 1245 1246 if (CHIP_REV_IS_EMUL(p_dev) && 1247 (ECORE_IS_AH(p_dev) || ECORE_IS_E5(p_dev))) 1248 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2_E5, 1249 0x3ffffff); 1250 1251 /* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */ 1252 /* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */ 1253 if (!CHIP_REV_IS_EMUL(p_dev) || ECORE_IS_BB(p_dev)) 1254 ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4); 1255 1256 if (CHIP_REV_IS_EMUL(p_dev)) { 1257 if (ECORE_IS_AH(p_dev)) { 1258 /* 2 for 4-port, 1 for 2-port, 0 for 1-port */ 1259 ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE, 1260 (p_dev->num_ports_in_engines >> 1)); 1261 1262 ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN, 1263 p_dev->num_ports_in_engines == 4 ? 0 : 3); 1264 } else if (ECORE_IS_E5(p_dev)) { 1265 ECORE_E5_MISSING_CODE; 1266 } 1267 } 1268 1269 /* Poll on RBC */ 1270 ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1); 1271 for (i = 0; i < 100; i++) { 1272 OSAL_UDELAY(50); 1273 if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1) 1274 break; 1275 } 1276 if (i == 100) 1277 DP_NOTICE(p_hwfn, true, "RBC done failed to complete in PSWRQ2\n"); 1278 1279 return ECORE_SUCCESS; 1280 } 1281 #endif 1282 1283 /* Init run time data for all PFs and their VFs on an engine. 1284 * TBD - for VFs - Once we have parent PF info for each VF in 1285 * shmem available as CAU requires knowledge of parent PF for each VF. 1286 */ 1287 static void ecore_init_cau_rt_data(struct ecore_dev *p_dev) 1288 { 1289 u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; 1290 int i, igu_sb_id; 1291 1292 for_each_hwfn(p_dev, i) { 1293 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1294 struct ecore_igu_info *p_igu_info; 1295 struct ecore_igu_block *p_block; 1296 struct cau_sb_entry sb_entry; 1297 1298 p_igu_info = p_hwfn->hw_info.p_igu_info; 1299 1300 for (igu_sb_id = 0; 1301 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev); 1302 igu_sb_id++) { 1303 p_block = &p_igu_info->entry[igu_sb_id]; 1304 1305 if (!p_block->is_pf) 1306 continue; 1307 1308 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, 1309 p_block->function_id, 1310 0, 0); 1311 STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2, 1312 sb_entry); 1313 } 1314 } 1315 } 1316 1317 static void ecore_init_cache_line_size(struct ecore_hwfn *p_hwfn, 1318 struct ecore_ptt *p_ptt) 1319 { 1320 u32 val, wr_mbs, cache_line_size; 1321 1322 val = ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0); 1323 switch (val) { 1324 case 0: 1325 wr_mbs = 128; 1326 break; 1327 case 1: 1328 wr_mbs = 256; 1329 break; 1330 case 2: 1331 wr_mbs = 512; 1332 break; 1333 default: 1334 DP_INFO(p_hwfn, 1335 "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 1336 val); 1337 return; 1338 } 1339 1340 cache_line_size = OSAL_MIN_T(u32, OSAL_CACHE_LINE_SIZE, wr_mbs); 1341 switch (cache_line_size) { 1342 case 32: 1343 val = 0; 1344 break; 1345 case 64: 1346 val = 1; 1347 break; 1348 case 128: 1349 val = 2; 1350 break; 1351 case 256: 1352 val = 3; 1353 break; 1354 default: 1355 DP_INFO(p_hwfn, 1356 "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 1357 cache_line_size); 1358 } 1359 1360 if (OSAL_CACHE_LINE_SIZE > wr_mbs) 1361 DP_INFO(p_hwfn, 1362 "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", 1363 OSAL_CACHE_LINE_SIZE, wr_mbs); 1364 1365 STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val); 1366 } 1367 1368 static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn, 1369 struct ecore_ptt *p_ptt, 1370 int hw_mode) 1371 { 1372 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 1373 struct ecore_dev *p_dev = p_hwfn->p_dev; 1374 u8 vf_id, max_num_vfs; 1375 u16 num_pfs, pf_id; 1376 u32 concrete_fid; 1377 enum _ecore_status_t rc = ECORE_SUCCESS; 1378 1379 ecore_init_cau_rt_data(p_dev); 1380 1381 /* Program GTT windows */ 1382 ecore_gtt_init(p_hwfn); 1383 1384 #ifndef ASIC_ONLY 1385 if (CHIP_REV_IS_EMUL(p_dev)) { 1386 rc = ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt); 1387 if (rc != ECORE_SUCCESS) 1388 return rc; 1389 } 1390 #endif 1391 1392 if (p_hwfn->mcp_info) { 1393 if (p_hwfn->mcp_info->func_info.bandwidth_max) 1394 qm_info->pf_rl_en = 1; 1395 if (p_hwfn->mcp_info->func_info.bandwidth_min) 1396 qm_info->pf_wfq_en = 1; 1397 } 1398 1399 ecore_qm_common_rt_init(p_hwfn, 1400 p_dev->num_ports_in_engines, 1401 qm_info->max_phys_tcs_per_port, 1402 qm_info->pf_rl_en, qm_info->pf_wfq_en, 1403 qm_info->vport_rl_en, qm_info->vport_wfq_en, 1404 qm_info->qm_port_params); 1405 1406 ecore_cxt_hw_init_common(p_hwfn); 1407 1408 ecore_init_cache_line_size(p_hwfn, p_ptt); 1409 1410 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); 1411 if (rc != ECORE_SUCCESS) 1412 return rc; 1413 1414 /* @@TBD MichalK - should add VALIDATE_VFID to init tool... 1415 * need to decide with which value, maybe runtime 1416 */ 1417 ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); 1418 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); 1419 1420 if (ECORE_IS_BB(p_dev)) { 1421 /* Workaround clears ROCE search for all functions to prevent 1422 * involving non intialized function in processing ROCE packet. 1423 */ 1424 num_pfs = NUM_OF_ENG_PFS(p_dev); 1425 for (pf_id = 0; pf_id < num_pfs; pf_id++) { 1426 ecore_fid_pretend(p_hwfn, p_ptt, pf_id); 1427 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 1428 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 1429 } 1430 /* pretend to original PF */ 1431 ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 1432 } 1433 1434 /* Workaround for avoiding CCFC execution error when getting packets 1435 * with CRC errors, and allowing instead the invoking of the FW error 1436 * handler. 1437 * This is not done inside the init tool since it currently can't 1438 * perform a pretending to VFs. 1439 */ 1440 max_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; 1441 for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { 1442 concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id); 1443 ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid); 1444 ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); 1445 ecore_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); 1446 ecore_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); 1447 ecore_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0); 1448 } 1449 /* pretend to original PF */ 1450 ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 1451 1452 return rc; 1453 } 1454 1455 #ifndef ASIC_ONLY 1456 #define MISC_REG_RESET_REG_2_XMAC_BIT (1<<4) 1457 #define MISC_REG_RESET_REG_2_XMAC_SOFT_BIT (1<<5) 1458 1459 #define PMEG_IF_BYTE_COUNT 8 1460 1461 static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn, 1462 struct ecore_ptt *p_ptt, 1463 u32 addr, 1464 u64 data, 1465 u8 reg_type, 1466 u8 port) 1467 { 1468 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 1469 "CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n", 1470 ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) | 1471 (8 << PMEG_IF_BYTE_COUNT), 1472 (reg_type << 25) | (addr << 8) | port, 1473 (u32)((data >> 32) & 0xffffffff), 1474 (u32)(data & 0xffffffff)); 1475 1476 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB, 1477 (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) & 1478 0xffff00fe) | 1479 (8 << PMEG_IF_BYTE_COUNT)); 1480 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB, 1481 (reg_type << 25) | (addr << 8) | port); 1482 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, data & 0xffffffff); 1483 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, 1484 (data >> 32) & 0xffffffff); 1485 } 1486 1487 #define XLPORT_MODE_REG (0x20a) 1488 #define XLPORT_MAC_CONTROL (0x210) 1489 #define XLPORT_FLOW_CONTROL_CONFIG (0x207) 1490 #define XLPORT_ENABLE_REG (0x20b) 1491 1492 #define XLMAC_CTRL (0x600) 1493 #define XLMAC_MODE (0x601) 1494 #define XLMAC_RX_MAX_SIZE (0x608) 1495 #define XLMAC_TX_CTRL (0x604) 1496 #define XLMAC_PAUSE_CTRL (0x60d) 1497 #define XLMAC_PFC_CTRL (0x60e) 1498 1499 static void ecore_emul_link_init_bb(struct ecore_hwfn *p_hwfn, 1500 struct ecore_ptt *p_ptt) 1501 { 1502 u8 loopback = 0, port = p_hwfn->port_id * 2; 1503 1504 DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port); 1505 1506 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, 1507 (0x4 << 4) | 0x4, 1, port); /* XLPORT MAC MODE */ /* 0 Quad, 4 Single... */ 1508 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port); 1509 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 1510 0x40, 0, port); /*XLMAC: SOFT RESET */ 1511 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE, 1512 0x40, 0, port); /*XLMAC: Port Speed >= 10Gbps */ 1513 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE, 1514 0x3fff, 0, port); /* XLMAC: Max Size */ 1515 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL, 1516 0x01000000800ULL | (0xa << 12) | ((u64)1 << 38), 1517 0, port); 1518 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL, 1519 0x7c000, 0, port); 1520 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL, 1521 0x30ffffc000ULL, 0, port); 1522 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2), 1523 0, port); /* XLMAC: TX_EN, RX_EN */ 1524 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x1003 | (loopback << 2), 1525 0, port); /* XLMAC: TX_EN, RX_EN, SW_LINK_STATUS */ 1526 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG, 1527 1, 0, port); /* Enabled Parallel PFC interface */ 1528 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 1529 0xf, 1, port); /* XLPORT port enable */ 1530 } 1531 1532 static void ecore_emul_link_init_ah_e5(struct ecore_hwfn *p_hwfn, 1533 struct ecore_ptt *p_ptt) 1534 { 1535 u8 port = p_hwfn->port_id; 1536 u32 mac_base = NWM_REG_MAC0_K2_E5 + (port << 2) * NWM_REG_MAC0_SIZE; 1537 1538 DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port); 1539 1540 ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2_E5 + (port << 2), 1541 (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT) | 1542 (port << 1543 CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT) | 1544 (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT)); 1545 1546 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE_K2_E5, 1547 1 << ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT); 1548 1549 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH_K2_E5, 1550 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT); 1551 1552 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH_K2_E5, 1553 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT); 1554 1555 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5, 1556 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT); 1557 1558 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5, 1559 (0xA << 1560 ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT) | 1561 (8 << 1562 ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT)); 1563 1564 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG_K2_E5, 1565 0xa853); 1566 } 1567 1568 static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn, 1569 struct ecore_ptt *p_ptt) 1570 { 1571 if (ECORE_IS_AH(p_hwfn->p_dev) || ECORE_IS_E5(p_hwfn->p_dev)) 1572 ecore_emul_link_init_ah_e5(p_hwfn, p_ptt); 1573 else /* BB */ 1574 ecore_emul_link_init_bb(p_hwfn, p_ptt); 1575 1576 return; 1577 } 1578 1579 static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn, 1580 struct ecore_ptt *p_ptt, u8 port) 1581 { 1582 int port_offset = port ? 0x800 : 0; 1583 u32 xmac_rxctrl = 0; 1584 1585 /* Reset of XMAC */ 1586 /* FIXME: move to common start */ 1587 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2*sizeof(u32), 1588 MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */ 1589 OSAL_MSLEEP(1); 1590 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32), 1591 MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */ 1592 1593 ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE_BB, 1); 1594 1595 /* Set the number of ports on the Warp Core to 10G */ 1596 ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE_BB, 3); 1597 1598 /* Soft reset of XMAC */ 1599 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32), 1600 MISC_REG_RESET_REG_2_XMAC_SOFT_BIT); 1601 OSAL_MSLEEP(1); 1602 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32), 1603 MISC_REG_RESET_REG_2_XMAC_SOFT_BIT); 1604 1605 /* FIXME: move to common end */ 1606 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 1607 ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE_BB + port_offset, 0x20); 1608 1609 /* Set Max packet size: initialize XMAC block register for port 0 */ 1610 ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE_BB + port_offset, 0x2710); 1611 1612 /* CRC append for Tx packets: init XMAC block register for port 1 */ 1613 ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO_BB + port_offset, 0xC800); 1614 1615 /* Enable TX and RX: initialize XMAC block register for port 1 */ 1616 ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL_BB + port_offset, 1617 XMAC_REG_CTRL_TX_EN_BB | XMAC_REG_CTRL_RX_EN_BB); 1618 xmac_rxctrl = ecore_rd(p_hwfn, p_ptt, 1619 XMAC_REG_RX_CTRL_BB + port_offset); 1620 xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB; 1621 ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL_BB + port_offset, xmac_rxctrl); 1622 } 1623 #endif 1624 1625 static enum _ecore_status_t 1626 ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn, 1627 struct ecore_ptt *p_ptt, 1628 u32 pwm_region_size, 1629 u32 n_cpus) 1630 { 1631 u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size; 1632 u32 dpi_bit_shift, dpi_count; 1633 u32 min_dpis; 1634 1635 /* Calculate DPI size 1636 * ------------------ 1637 * The PWM region contains Doorbell Pages. The first is reserverd for 1638 * the kernel for, e.g, L2. The others are free to be used by non- 1639 * trusted applications, typically from user space. Each page, called a 1640 * doorbell page is sectioned into windows that allow doorbells to be 1641 * issued in parallel by the kernel/application. The size of such a 1642 * window (a.k.a. WID) is 1kB. 1643 * Summary: 1644 * 1kB WID x N WIDS = DPI page size 1645 * DPI page size x N DPIs = PWM region size 1646 * Notes: 1647 * The size of the DPI page size must be in multiples of OSAL_PAGE_SIZE 1648 * in order to ensure that two applications won't share the same page. 1649 * It also must contain at least one WID per CPU to allow parallelism. 1650 * It also must be a power of 2, since it is stored as a bit shift. 1651 * 1652 * The DPI page size is stored in a register as 'dpi_bit_shift' so that 1653 * 0 is 4kB, 1 is 8kB and etc. Hence the minimum size is 4,096 1654 * containing 4 WIDs. 1655 */ 1656 dpi_page_size_1 = ECORE_WID_SIZE * n_cpus; 1657 dpi_page_size_2 = OSAL_MAX_T(u32, ECORE_WID_SIZE, OSAL_PAGE_SIZE); 1658 dpi_page_size = OSAL_MAX_T(u32, dpi_page_size_1, dpi_page_size_2); 1659 dpi_page_size = OSAL_ROUNDUP_POW_OF_TWO(dpi_page_size); 1660 dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096); 1661 1662 dpi_count = pwm_region_size / dpi_page_size; 1663 1664 min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis; 1665 min_dpis = OSAL_MAX_T(u32, ECORE_MIN_DPIS, min_dpis); 1666 1667 /* Update hwfn */ 1668 p_hwfn->dpi_size = dpi_page_size; 1669 p_hwfn->dpi_count = dpi_count; 1670 1671 /* Update registers */ 1672 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift); 1673 1674 if (dpi_count < min_dpis) 1675 return ECORE_NORESOURCES; 1676 1677 return ECORE_SUCCESS; 1678 } 1679 1680 enum ECORE_ROCE_EDPM_MODE { 1681 ECORE_ROCE_EDPM_MODE_ENABLE = 0, 1682 ECORE_ROCE_EDPM_MODE_FORCE_ON = 1, 1683 ECORE_ROCE_EDPM_MODE_DISABLE = 2, 1684 }; 1685 1686 static enum _ecore_status_t 1687 ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn, 1688 struct ecore_ptt *p_ptt) 1689 { 1690 u32 pwm_regsize, norm_regsize; 1691 u32 non_pwm_conn, min_addr_reg1; 1692 u32 db_bar_size, n_cpus = 1; 1693 u32 roce_edpm_mode; 1694 u32 pf_dems_shift; 1695 enum _ecore_status_t rc = ECORE_SUCCESS; 1696 u8 cond; 1697 1698 db_bar_size = ecore_hw_bar_size(p_hwfn, BAR_ID_1); 1699 if (p_hwfn->p_dev->num_hwfns > 1) 1700 db_bar_size /= 2; 1701 1702 /* Calculate doorbell regions 1703 * ----------------------------------- 1704 * The doorbell BAR is made of two regions. The first is called normal 1705 * region and the second is called PWM region. In the normal region 1706 * each ICID has its own set of addresses so that writing to that 1707 * specific address identifies the ICID. In the Process Window Mode 1708 * region the ICID is given in the data written to the doorbell. The 1709 * above per PF register denotes the offset in the doorbell BAR in which 1710 * the PWM region begins. 1711 * The normal region has ECORE_PF_DEMS_SIZE bytes per ICID, that is per 1712 * non-PWM connection. The calculation below computes the total non-PWM 1713 * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is 1714 * in units of 4,096 bytes. 1715 */ 1716 non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) + 1717 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE, 1718 OSAL_NULL) + 1719 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, 1720 OSAL_NULL); 1721 norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, 4096); 1722 min_addr_reg1 = norm_regsize / 4096; 1723 pwm_regsize = db_bar_size - norm_regsize; 1724 1725 /* Check that the normal and PWM sizes are valid */ 1726 if (db_bar_size < norm_regsize) { 1727 DP_ERR(p_hwfn->p_dev, "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n", db_bar_size, norm_regsize); 1728 return ECORE_NORESOURCES; 1729 } 1730 if (pwm_regsize < ECORE_MIN_PWM_REGION) { 1731 DP_ERR(p_hwfn->p_dev, "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n", pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size, norm_regsize); 1732 return ECORE_NORESOURCES; 1733 } 1734 1735 /* Calculate number of DPIs */ 1736 roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode; 1737 if ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE) || 1738 ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_FORCE_ON))) { 1739 /* Either EDPM is mandatory, or we are attempting to allocate a 1740 * WID per CPU. 1741 */ 1742 n_cpus = OSAL_NUM_ACTIVE_CPU(); 1743 rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 1744 } 1745 1746 cond = ((rc != ECORE_SUCCESS) && 1747 (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) || 1748 (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE); 1749 if (cond || p_hwfn->dcbx_no_edpm) { 1750 /* Either EDPM is disabled from user configuration, or it is 1751 * disabled via DCBx, or it is not mandatory and we failed to 1752 * allocated a WID per CPU. 1753 */ 1754 n_cpus = 1; 1755 rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 1756 1757 #ifdef CONFIG_ECORE_ROCE 1758 /* If we entered this flow due to DCBX then the DPM register is 1759 * already configured. 1760 */ 1761 if (cond) 1762 ecore_rdma_dpm_bar(p_hwfn, p_ptt); 1763 #endif 1764 } 1765 1766 p_hwfn->wid_count = (u16)n_cpus; 1767 1768 DP_INFO(p_hwfn, "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n", 1769 norm_regsize, pwm_regsize, p_hwfn->dpi_size, p_hwfn->dpi_count, 1770 ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ? 1771 "disabled" : "enabled"); 1772 1773 /* Check return codes from above calls */ 1774 if (rc != ECORE_SUCCESS) { 1775 DP_ERR(p_hwfn, 1776 "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d. You can try reducing this down to %d via user configuration n_dpi or by disabling EDPM via user configuration roce_edpm\n", 1777 p_hwfn->dpi_count, 1778 p_hwfn->pf_params.rdma_pf_params.min_dpis, 1779 ECORE_MIN_DPIS); 1780 return ECORE_NORESOURCES; 1781 } 1782 1783 /* Update hwfn */ 1784 p_hwfn->dpi_start_offset = norm_regsize; /* this is later used to 1785 * calculate the doorbell 1786 * address 1787 */ 1788 1789 /* Update registers */ 1790 /* DEMS size is configured log2 of DWORDs, hence the division by 4 */ 1791 pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4); 1792 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift); 1793 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1); 1794 1795 return ECORE_SUCCESS; 1796 } 1797 1798 static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn, 1799 struct ecore_ptt *p_ptt, 1800 int hw_mode) 1801 { 1802 enum _ecore_status_t rc = ECORE_SUCCESS; 1803 1804 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, 1805 hw_mode); 1806 if (rc != ECORE_SUCCESS) 1807 return rc; 1808 #if 0 1809 /* FW 8.10.5.0 requires us to configure PF_VECTOR and DUALMODE in LLH. 1810 * This would hopefully be moved to MFW. 1811 */ 1812 if (IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) { 1813 u8 pf_id = 0; 1814 1815 if (ecore_hw_init_first_eth(p_hwfn, p_ptt, &pf_id) == 1816 ECORE_SUCCESS) { 1817 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 1818 "PF[%08x] is first eth on engine\n", 1819 pf_id); 1820 1821 /* We should have configured BIT for ppfid, i.e., the 1822 * relative function number in the port. But there's a 1823 * bug in LLH in BB where the ppfid is actually engine 1824 * based, so we need to take this into account. 1825 */ 1826 if (!ECORE_IS_BB(p_hwfn->p_dev)) 1827 pf_id /= p_hwfn->p_dev->num_ports_in_engines; 1828 1829 ecore_wr(p_hwfn, p_ptt, 1830 NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id); 1831 } 1832 1833 /* Take the protocol-based hit vector if there is a hit, 1834 * otherwise take the other vector. 1835 */ 1836 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2); 1837 } 1838 #endif 1839 #ifndef ASIC_ONLY 1840 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) 1841 return ECORE_SUCCESS; 1842 1843 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1844 if (ECORE_IS_AH(p_hwfn->p_dev)) 1845 return ECORE_SUCCESS; 1846 else if (ECORE_IS_BB(p_hwfn->p_dev)) 1847 ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id); 1848 else /* E5 */ 1849 ECORE_E5_MISSING_CODE; 1850 } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 1851 if (p_hwfn->p_dev->num_hwfns > 1) { 1852 /* Activate OPTE in CMT */ 1853 u32 val; 1854 1855 val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV); 1856 val |= 0x10; 1857 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val); 1858 ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1); 1859 ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1); 1860 ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1); 1861 ecore_wr(p_hwfn, p_ptt, 1862 NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1); 1863 ecore_wr(p_hwfn, p_ptt, 1864 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555); 1865 ecore_wr(p_hwfn, p_ptt, 1866 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4, 1867 0x55555555); 1868 } 1869 1870 ecore_emul_link_init(p_hwfn, p_ptt); 1871 } else { 1872 DP_INFO(p_hwfn->p_dev, "link is not being configured\n"); 1873 } 1874 #endif 1875 1876 return rc; 1877 } 1878 1879 static enum _ecore_status_t ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, 1880 struct ecore_ptt *p_ptt, 1881 struct ecore_tunnel_info *p_tunn, 1882 int hw_mode, 1883 bool b_hw_start, 1884 enum ecore_int_mode int_mode, 1885 bool allow_npar_tx_switch) 1886 { 1887 u8 rel_pf_id = p_hwfn->rel_pf_id; 1888 u32 prs_reg; 1889 enum _ecore_status_t rc = ECORE_SUCCESS; 1890 u16 ctrl; 1891 int pos; 1892 1893 if (p_hwfn->mcp_info) { 1894 struct ecore_mcp_function_info *p_info; 1895 1896 p_info = &p_hwfn->mcp_info->func_info; 1897 if (p_info->bandwidth_min) 1898 p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; 1899 1900 /* Update rate limit once we'll actually have a link */ 1901 p_hwfn->qm_info.pf_rl = 100000; 1902 } 1903 ecore_cxt_hw_init_pf(p_hwfn); 1904 1905 ecore_int_igu_init_rt(p_hwfn); 1906 1907 /* Set VLAN in NIG if needed */ 1908 if (hw_mode & (1 << MODE_MF_SD)) { 1909 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring LLH_FUNC_TAG\n"); 1910 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); 1911 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, 1912 p_hwfn->hw_info.ovlan); 1913 } 1914 1915 /* Enable classification by MAC if needed */ 1916 if (hw_mode & (1 << MODE_MF_SI)) { 1917 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring TAGMAC_CLS_TYPE\n"); 1918 STORE_RT_REG(p_hwfn, 1919 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1); 1920 } 1921 1922 /* Protocl Configuration - @@@TBD - should we set 0 otherwise?*/ 1923 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 1924 (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) ? 1 : 0); 1925 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 1926 (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) ? 1 : 0); 1927 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); 1928 1929 /* perform debug configuration when chip is out of reset */ 1930 OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id); 1931 1932 /* Cleanup chip from previous driver if such remains exist */ 1933 rc = ecore_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false); 1934 if (rc != ECORE_SUCCESS) { 1935 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL); 1936 return rc; 1937 } 1938 1939 /* PF Init sequence */ 1940 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); 1941 if (rc) 1942 return rc; 1943 1944 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */ 1945 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode); 1946 if (rc) 1947 return rc; 1948 1949 /* Pure runtime initializations - directly to the HW */ 1950 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); 1951 1952 /* PCI relaxed ordering causes a decrease in the performance on some 1953 * systems. Till a root cause is found, disable this attribute in the 1954 * PCI config space. 1955 */ 1956 pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP); 1957 if (!pos) { 1958 DP_NOTICE(p_hwfn, true, 1959 "Failed to find the PCI Express Capability structure in the PCI config space\n"); 1960 return ECORE_IO; 1961 } 1962 OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, &ctrl); 1963 ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN; 1964 OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, ctrl); 1965 1966 rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt); 1967 if (rc) 1968 return rc; 1969 #if 0 1970 /* FW 8.10.5.0 requires us to configure MSG_INFO in PRS. 1971 * This would hopefully be moved to MFW. 1972 */ 1973 if (IS_MF_SI(p_hwfn)) { 1974 u8 pf_id = 0; 1975 u32 val; 1976 1977 if (ecore_hw_init_first_eth(p_hwfn, p_ptt, &pf_id) == 1978 ECORE_SUCCESS) { 1979 if (p_hwfn->rel_pf_id == pf_id) { 1980 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 1981 "PF[%d] is first ETH on engine\n", 1982 pf_id); 1983 val = 1; 1984 } 1985 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val); 1986 } 1987 } 1988 #endif 1989 if (b_hw_start) { 1990 /* enable interrupts */ 1991 rc = ecore_int_igu_enable(p_hwfn, p_ptt, int_mode); 1992 if (rc != ECORE_SUCCESS) 1993 return rc; 1994 1995 /* send function start command */ 1996 rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode, 1997 allow_npar_tx_switch); 1998 if (rc) { 1999 DP_NOTICE(p_hwfn, true, "Function start ramrod failed\n"); 2000 } else { 2001 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); 2002 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2003 "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); 2004 2005 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 2006 { 2007 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, 2008 (1 << 2)); 2009 ecore_wr(p_hwfn, p_ptt, 2010 PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, 2011 0x100); 2012 } 2013 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2014 "PRS_REG_SEARCH registers after start PFn\n"); 2015 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP); 2016 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2017 "PRS_REG_SEARCH_TCP: %x\n", prs_reg); 2018 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP); 2019 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2020 "PRS_REG_SEARCH_UDP: %x\n", prs_reg); 2021 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE); 2022 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2023 "PRS_REG_SEARCH_FCOE: %x\n", prs_reg); 2024 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE); 2025 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2026 "PRS_REG_SEARCH_ROCE: %x\n", prs_reg); 2027 prs_reg = ecore_rd(p_hwfn, p_ptt, 2028 PRS_REG_SEARCH_TCP_FIRST_FRAG); 2029 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2030 "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n", 2031 prs_reg); 2032 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); 2033 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2034 "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); 2035 } 2036 } 2037 return rc; 2038 } 2039 2040 enum _ecore_status_t ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn, 2041 struct ecore_ptt *p_ptt, 2042 u8 enable) 2043 { 2044 u32 delay_idx = 0, val, set_val = enable ? 1 : 0; 2045 2046 /* Change PF in PXP */ 2047 ecore_wr(p_hwfn, p_ptt, 2048 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); 2049 2050 /* wait until value is set - try for 1 second every 50us */ 2051 for (delay_idx = 0; delay_idx < 20000; delay_idx++) { 2052 val = ecore_rd(p_hwfn, p_ptt, 2053 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 2054 if (val == set_val) 2055 break; 2056 2057 OSAL_UDELAY(50); 2058 } 2059 2060 if (val != set_val) { 2061 DP_NOTICE(p_hwfn, true, 2062 "PFID_ENABLE_MASTER wasn't changed after a second\n"); 2063 return ECORE_UNKNOWN_ERROR; 2064 } 2065 2066 return ECORE_SUCCESS; 2067 } 2068 2069 static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn, 2070 struct ecore_ptt *p_main_ptt) 2071 { 2072 /* Read shadow of current MFW mailbox */ 2073 ecore_mcp_read_mb(p_hwfn, p_main_ptt); 2074 OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow, 2075 p_hwfn->mcp_info->mfw_mb_cur, 2076 p_hwfn->mcp_info->mfw_mb_length); 2077 } 2078 2079 static enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn, 2080 struct ecore_hw_init_params *p_params) 2081 { 2082 if (p_params->p_tunn) { 2083 ecore_vf_set_vf_start_tunn_update_param(p_params->p_tunn); 2084 ecore_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn); 2085 } 2086 2087 p_hwfn->b_int_enabled = 1; 2088 2089 return ECORE_SUCCESS; 2090 } 2091 2092 static void 2093 ecore_fill_load_req_params(struct ecore_load_req_params *p_load_req, 2094 struct ecore_drv_load_params *p_drv_load) 2095 { 2096 OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req)); 2097 2098 if (p_drv_load != OSAL_NULL) { 2099 p_load_req->drv_role = p_drv_load->is_crash_kernel ? 2100 ECORE_DRV_ROLE_KDUMP : 2101 ECORE_DRV_ROLE_OS; 2102 p_load_req->timeout_val = p_drv_load->mfw_timeout_val; 2103 p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; 2104 p_load_req->override_force_load = 2105 p_drv_load->override_force_load; 2106 } else { 2107 p_load_req->drv_role = ECORE_DRV_ROLE_OS; 2108 p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT; 2109 p_load_req->avoid_eng_reset = false; 2110 p_load_req->override_force_load = 2111 ECORE_OVERRIDE_FORCE_LOAD_NONE; 2112 } 2113 } 2114 2115 enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, 2116 struct ecore_hw_init_params *p_params) 2117 { 2118 struct ecore_load_req_params load_req_params; 2119 u32 load_code, param, drv_mb_param; 2120 bool b_default_mtu = true; 2121 struct ecore_hwfn *p_hwfn; 2122 enum _ecore_status_t rc = ECORE_SUCCESS, mfw_rc; 2123 int i; 2124 2125 if ((p_params->int_mode == ECORE_INT_MODE_MSI) && (p_dev->num_hwfns > 1)) { 2126 DP_NOTICE(p_dev, false, 2127 "MSI mode is not supported for CMT devices\n"); 2128 return ECORE_INVAL; 2129 } 2130 2131 if (IS_PF(p_dev)) { 2132 rc = ecore_init_fw_data(p_dev, p_params->bin_fw_data); 2133 if (rc != ECORE_SUCCESS) 2134 return rc; 2135 } 2136 2137 for_each_hwfn(p_dev, i) { 2138 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 2139 2140 /* If management didn't provide a default, set one of our own */ 2141 if (!p_hwfn->hw_info.mtu) { 2142 p_hwfn->hw_info.mtu = 1500; 2143 b_default_mtu = false; 2144 } 2145 2146 if (IS_VF(p_dev)) { 2147 ecore_vf_start(p_hwfn, p_params); 2148 continue; 2149 } 2150 2151 /* Enable DMAE in PXP */ 2152 rc = ecore_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); 2153 if (rc != ECORE_SUCCESS) 2154 return rc; 2155 2156 rc = ecore_calc_hw_mode(p_hwfn); 2157 if (rc != ECORE_SUCCESS) 2158 return rc; 2159 2160 ecore_fill_load_req_params(&load_req_params, 2161 p_params->p_drv_load_params); 2162 rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, 2163 &load_req_params); 2164 if (rc != ECORE_SUCCESS) { 2165 DP_NOTICE(p_hwfn, true, 2166 "Failed sending a LOAD_REQ command\n"); 2167 return rc; 2168 } 2169 2170 load_code = load_req_params.load_code; 2171 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 2172 "Load request was sent. Load code: 0x%x\n", 2173 load_code); 2174 2175 ecore_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt); 2176 2177 /* CQ75580: 2178 * When comming back from hiberbate state, the registers from 2179 * which shadow is read initially are not initialized. It turns 2180 * out that these registers get initialized during the call to 2181 * ecore_mcp_load_req request. So we need to reread them here 2182 * to get the proper shadow register value. 2183 * Note: This is a workaround for the missing MFW 2184 * initialization. It may be removed once the implementation 2185 * is done. 2186 */ 2187 ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); 2188 2189 /* Only relevant for recovery: 2190 * Clear the indication after the LOAD_REQ command is responded 2191 * by the MFW. 2192 */ 2193 p_dev->recov_in_prog = false; 2194 2195 p_hwfn->first_on_engine = (load_code == 2196 FW_MSG_CODE_DRV_LOAD_ENGINE); 2197 2198 if (!qm_lock_init) { 2199 OSAL_SPIN_LOCK_INIT(&qm_lock); 2200 qm_lock_init = true; 2201 } 2202 2203 switch (load_code) { 2204 case FW_MSG_CODE_DRV_LOAD_ENGINE: 2205 rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, 2206 p_hwfn->hw_info.hw_mode); 2207 if (rc != ECORE_SUCCESS) 2208 break; 2209 /* Fall into */ 2210 case FW_MSG_CODE_DRV_LOAD_PORT: 2211 rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, 2212 p_hwfn->hw_info.hw_mode); 2213 if (rc != ECORE_SUCCESS) 2214 break; 2215 /* Fall into */ 2216 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 2217 rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, 2218 p_params->p_tunn, 2219 p_hwfn->hw_info.hw_mode, 2220 p_params->b_hw_start, 2221 p_params->int_mode, 2222 p_params->allow_npar_tx_switch); 2223 break; 2224 default: 2225 DP_NOTICE(p_hwfn, false, 2226 "Unexpected load code [0x%08x]", load_code); 2227 rc = ECORE_NOTIMPL; 2228 break; 2229 } 2230 2231 if (rc != ECORE_SUCCESS) 2232 DP_NOTICE(p_hwfn, true, 2233 "init phase failed for loadcode 0x%x (rc %d)\n", 2234 load_code, rc); 2235 2236 /* ACK mfw regardless of success or failure of initialization */ 2237 mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 2238 DRV_MSG_CODE_LOAD_DONE, 2239 0, &load_code, ¶m); 2240 2241 /* Check the return value of the ecore_hw_init_*() function */ 2242 if (rc != ECORE_SUCCESS) 2243 return rc; 2244 2245 /* Check the return value of the LOAD_DONE command */ 2246 if (mfw_rc != ECORE_SUCCESS) { 2247 DP_NOTICE(p_hwfn, true, 2248 "Failed sending a LOAD_DONE command\n"); 2249 return mfw_rc; 2250 } 2251 2252 /* Check if there is a DID mismatch between nvm-cfg/efuse */ 2253 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) 2254 DP_NOTICE(p_hwfn, false, 2255 "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); 2256 2257 /* send DCBX attention request command */ 2258 DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, 2259 "sending phony dcbx set command to trigger DCBx attention handling\n"); 2260 mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 2261 DRV_MSG_CODE_SET_DCBX, 2262 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, 2263 &load_code, ¶m); 2264 if (mfw_rc != ECORE_SUCCESS) { 2265 DP_NOTICE(p_hwfn, true, 2266 "Failed to send DCBX attention request\n"); 2267 return mfw_rc; 2268 } 2269 2270 p_hwfn->hw_init_done = true; 2271 } 2272 2273 if (IS_PF(p_dev)) { 2274 p_hwfn = ECORE_LEADING_HWFN(p_dev); 2275 drv_mb_param = STORM_FW_VERSION; 2276 rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 2277 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, 2278 drv_mb_param, &load_code, ¶m); 2279 if (rc != ECORE_SUCCESS) 2280 DP_INFO(p_hwfn, "Failed to update firmware version\n"); 2281 2282 if (!b_default_mtu) { 2283 rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt, 2284 p_hwfn->hw_info.mtu); 2285 if (rc != ECORE_SUCCESS) 2286 DP_INFO(p_hwfn, "Failed to update default mtu\n"); 2287 } 2288 2289 rc = ecore_mcp_ov_update_driver_state(p_hwfn, 2290 p_hwfn->p_main_ptt, 2291 ECORE_OV_DRIVER_STATE_DISABLED); 2292 if (rc != ECORE_SUCCESS) 2293 DP_INFO(p_hwfn, "Failed to update driver state\n"); 2294 2295 rc = ecore_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, 2296 ECORE_OV_ESWITCH_VEB); 2297 if (rc != ECORE_SUCCESS) 2298 DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); 2299 } 2300 2301 return rc; 2302 } 2303 2304 #define ECORE_HW_STOP_RETRY_LIMIT (10) 2305 static void ecore_hw_timers_stop(struct ecore_dev *p_dev, 2306 struct ecore_hwfn *p_hwfn, 2307 struct ecore_ptt *p_ptt) 2308 { 2309 int i; 2310 2311 /* close timers */ 2312 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); 2313 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); 2314 for (i = 0; 2315 i < ECORE_HW_STOP_RETRY_LIMIT && !p_dev->recov_in_prog; 2316 i++) { 2317 if ((!ecore_rd(p_hwfn, p_ptt, 2318 TM_REG_PF_SCAN_ACTIVE_CONN)) && 2319 (!ecore_rd(p_hwfn, p_ptt, 2320 TM_REG_PF_SCAN_ACTIVE_TASK))) 2321 break; 2322 2323 /* Dependent on number of connection/tasks, possibly 2324 * 1ms sleep is required between polls 2325 */ 2326 OSAL_MSLEEP(1); 2327 } 2328 2329 if (i < ECORE_HW_STOP_RETRY_LIMIT) 2330 return; 2331 2332 DP_NOTICE(p_hwfn, true, 2333 "Timers linear scans are not over [Connection %02x Tasks %02x]\n", 2334 (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), 2335 (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); 2336 } 2337 2338 void ecore_hw_timers_stop_all(struct ecore_dev *p_dev) 2339 { 2340 int j; 2341 2342 for_each_hwfn(p_dev, j) { 2343 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 2344 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 2345 2346 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt); 2347 } 2348 } 2349 2350 static enum _ecore_status_t ecore_verify_reg_val(struct ecore_hwfn *p_hwfn, 2351 struct ecore_ptt *p_ptt, 2352 u32 addr, u32 expected_val) 2353 { 2354 u32 val = ecore_rd(p_hwfn, p_ptt, addr); 2355 2356 if (val != expected_val) { 2357 DP_NOTICE(p_hwfn, true, 2358 "Value at address 0x%08x is 0x%08x while the expected value is 0x%08x\n", 2359 addr, val, expected_val); 2360 return ECORE_UNKNOWN_ERROR; 2361 } 2362 2363 return ECORE_SUCCESS; 2364 } 2365 2366 enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev) 2367 { 2368 struct ecore_hwfn *p_hwfn; 2369 struct ecore_ptt *p_ptt; 2370 enum _ecore_status_t rc, rc2 = ECORE_SUCCESS; 2371 int j; 2372 2373 for_each_hwfn(p_dev, j) { 2374 p_hwfn = &p_dev->hwfns[j]; 2375 p_ptt = p_hwfn->p_main_ptt; 2376 2377 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n"); 2378 2379 if (IS_VF(p_dev)) { 2380 ecore_vf_pf_int_cleanup(p_hwfn); 2381 rc = ecore_vf_pf_reset(p_hwfn); 2382 if (rc != ECORE_SUCCESS) { 2383 DP_NOTICE(p_hwfn, true, 2384 "ecore_vf_pf_reset failed. rc = %d.\n", 2385 rc); 2386 rc2 = ECORE_UNKNOWN_ERROR; 2387 } 2388 continue; 2389 } 2390 2391 /* mark the hw as uninitialized... */ 2392 p_hwfn->hw_init_done = false; 2393 2394 /* Send unload command to MCP */ 2395 if (!p_dev->recov_in_prog) { 2396 rc = ecore_mcp_unload_req(p_hwfn, p_ptt); 2397 if (rc != ECORE_SUCCESS) { 2398 DP_NOTICE(p_hwfn, true, 2399 "Failed sending a UNLOAD_REQ command. rc = %d.\n", 2400 rc); 2401 rc2 = ECORE_UNKNOWN_ERROR; 2402 } 2403 } 2404 2405 OSAL_DPC_SYNC(p_hwfn); 2406 2407 /* After this point no MFW attentions are expected, e.g. prevent 2408 * race between pf stop and dcbx pf update. 2409 */ 2410 2411 rc = ecore_sp_pf_stop(p_hwfn); 2412 if (rc != ECORE_SUCCESS) { 2413 DP_NOTICE(p_hwfn, true, 2414 "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n", 2415 rc); 2416 rc2 = ECORE_UNKNOWN_ERROR; 2417 } 2418 2419 /* perform debug action after PF stop was sent */ 2420 OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id); 2421 2422 /* close NIG to BRB gate */ 2423 ecore_wr(p_hwfn, p_ptt, 2424 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 2425 2426 /* close parser */ 2427 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 2428 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 2429 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 2430 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 2431 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 2432 2433 /* @@@TBD - clean transmission queues (5.b) */ 2434 /* @@@TBD - clean BTB (5.c) */ 2435 2436 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt); 2437 2438 /* @@@TBD - verify DMAE requests are done (8) */ 2439 2440 /* Disable Attention Generation */ 2441 ecore_int_igu_disable_int(p_hwfn, p_ptt); 2442 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); 2443 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); 2444 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true); 2445 rc = ecore_int_igu_reset_cam_default(p_hwfn, p_ptt); 2446 if (rc != ECORE_SUCCESS) { 2447 DP_NOTICE(p_hwfn, true, 2448 "Failed to return IGU CAM to default\n"); 2449 rc2 = ECORE_UNKNOWN_ERROR; 2450 } 2451 2452 /* Need to wait 1ms to guarantee SBs are cleared */ 2453 OSAL_MSLEEP(1); 2454 2455 if (!p_dev->recov_in_prog) { 2456 ecore_verify_reg_val(p_hwfn, p_ptt, 2457 QM_REG_USG_CNT_PF_TX, 0); 2458 ecore_verify_reg_val(p_hwfn, p_ptt, 2459 QM_REG_USG_CNT_PF_OTHER, 0); 2460 /* @@@TBD - assert on incorrect xCFC values (10.b) */ 2461 } 2462 2463 /* Disable PF in HW blocks */ 2464 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); 2465 ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); 2466 2467 if (!p_dev->recov_in_prog) { 2468 ecore_mcp_unload_done(p_hwfn, p_ptt); 2469 if (rc != ECORE_SUCCESS) { 2470 DP_NOTICE(p_hwfn, true, 2471 "Failed sending a UNLOAD_DONE command. rc = %d.\n", 2472 rc); 2473 rc2 = ECORE_UNKNOWN_ERROR; 2474 } 2475 } 2476 } /* hwfn loop */ 2477 2478 if (IS_PF(p_dev)) { 2479 p_hwfn = ECORE_LEADING_HWFN(p_dev); 2480 p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt; 2481 2482 /* Disable DMAE in PXP - in CMT, this should only be done for 2483 * first hw-function, and only after all transactions have 2484 * stopped for all active hw-functions. 2485 */ 2486 rc = ecore_change_pci_hwfn(p_hwfn, p_ptt, false); 2487 if (rc != ECORE_SUCCESS) { 2488 DP_NOTICE(p_hwfn, true, 2489 "ecore_change_pci_hwfn failed. rc = %d.\n", 2490 rc); 2491 rc2 = ECORE_UNKNOWN_ERROR; 2492 } 2493 } 2494 2495 return rc2; 2496 } 2497 2498 void ecore_hw_stop_fastpath(struct ecore_dev *p_dev) 2499 { 2500 int j; 2501 2502 for_each_hwfn(p_dev, j) { 2503 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 2504 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 2505 2506 if (IS_VF(p_dev)) { 2507 ecore_vf_pf_int_cleanup(p_hwfn); 2508 continue; 2509 } 2510 2511 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Shutting down the fastpath\n"); 2512 2513 ecore_wr(p_hwfn, p_ptt, 2514 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 2515 2516 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 2517 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 2518 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 2519 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 2520 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 2521 2522 /* @@@TBD - clean transmission queues (5.b) */ 2523 /* @@@TBD - clean BTB (5.c) */ 2524 2525 /* @@@TBD - verify DMAE requests are done (8) */ 2526 2527 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); 2528 /* Need to wait 1ms to guarantee SBs are cleared */ 2529 OSAL_MSLEEP(1); 2530 } 2531 } 2532 2533 void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn) 2534 { 2535 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 2536 2537 if (IS_VF(p_hwfn->p_dev)) 2538 return; 2539 2540 /* If roce info is allocated it means roce is initialized and should 2541 * be enabled in searcher. 2542 */ 2543 if (p_hwfn->p_rdma_info) { 2544 if (p_hwfn->b_rdma_enabled_in_prs) 2545 ecore_wr(p_hwfn, p_ptt, 2546 p_hwfn->rdma_prs_search_reg, 0x1); 2547 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x1); 2548 } 2549 2550 /* Re-open incoming traffic */ 2551 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2552 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); 2553 } 2554 /* TEMP macro to be removed when wol code revisted */ 2555 #define ECORE_WOL_WR(_p_hwfn, _p_ptt, _offset, _val) ECORE_IS_BB(_p_hwfn->p_dev) ? \ 2556 ecore_wr(_p_hwfn, _p_ptt, _offset, _val) : \ 2557 ecore_mcp_wol_wr(_p_hwfn, _p_ptt, _offset, _val); 2558 2559 enum _ecore_status_t ecore_set_nwuf_reg(struct ecore_dev *p_dev, 2560 const bool b_enable, 2561 u32 reg_idx, 2562 u32 pattern_size, 2563 u32 crc) 2564 { 2565 struct ecore_hwfn *hwfn = &p_dev->hwfns[0]; 2566 u32 reg_len = 0; 2567 u32 reg_crc = 0; 2568 2569 /* Get length and CRC register offsets */ 2570 switch (reg_idx) 2571 { 2572 case 0: 2573 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_LEN_BB : 2574 WOL_REG_ACPI_PAT_0_LEN_K2_E5; 2575 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_CRC_BB : 2576 WOL_REG_ACPI_PAT_0_CRC_K2_E5; 2577 break; 2578 case 1: 2579 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_LEN_BB : 2580 WOL_REG_ACPI_PAT_1_LEN_K2_E5; 2581 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_CRC_BB : 2582 WOL_REG_ACPI_PAT_1_CRC_K2_E5; 2583 break; 2584 case 2: 2585 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_LEN_BB : 2586 WOL_REG_ACPI_PAT_2_LEN_K2_E5; 2587 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_CRC_BB : 2588 WOL_REG_ACPI_PAT_2_CRC_K2_E5; 2589 break; 2590 case 3: 2591 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_LEN_BB : 2592 WOL_REG_ACPI_PAT_3_LEN_K2_E5; 2593 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_CRC_BB : 2594 WOL_REG_ACPI_PAT_3_CRC_K2_E5; 2595 break; 2596 case 4: 2597 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_LEN_BB : 2598 WOL_REG_ACPI_PAT_4_LEN_K2_E5; 2599 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_CRC_BB : 2600 WOL_REG_ACPI_PAT_4_CRC_K2_E5; 2601 break; 2602 case 5: 2603 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_LEN_BB : 2604 WOL_REG_ACPI_PAT_5_LEN_K2_E5; 2605 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_CRC_BB : 2606 WOL_REG_ACPI_PAT_5_CRC_K2_E5; 2607 break; 2608 case 6: 2609 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_LEN_BB : 2610 WOL_REG_ACPI_PAT_6_LEN_K2_E5; 2611 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_CRC_BB : 2612 WOL_REG_ACPI_PAT_6_CRC_K2_E5; 2613 break; 2614 case 7: 2615 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_LEN_BB : 2616 WOL_REG_ACPI_PAT_7_LEN_K2_E5; 2617 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_CRC_BB : 2618 WOL_REG_ACPI_PAT_7_CRC_K2_E5; 2619 break; 2620 default: 2621 return ECORE_UNKNOWN_ERROR; 2622 } 2623 2624 /* Allign pattern size to 4 */ 2625 while (pattern_size % 4) 2626 { 2627 pattern_size++; 2628 } 2629 /* write pattern length */ 2630 ECORE_WOL_WR(hwfn, hwfn->p_main_ptt, reg_len, pattern_size); 2631 2632 /* write crc value*/ 2633 ECORE_WOL_WR(hwfn, hwfn->p_main_ptt, reg_crc, crc); 2634 2635 DP_INFO(p_dev, 2636 "ecore_set_nwuf_reg: idx[%d] reg_crc[0x%x=0x%08x] " 2637 "reg_len[0x%x=0x%x]\n", 2638 reg_idx, reg_crc, crc, reg_len, pattern_size); 2639 2640 return ECORE_SUCCESS; 2641 } 2642 2643 void ecore_wol_buffer_clear(struct ecore_dev *p_dev) 2644 { 2645 struct ecore_hwfn *hwfn = &p_dev->hwfns[0]; 2646 const u32 wake_buffer_clear_offset = 2647 ECORE_IS_BB(p_dev) ? 2648 NIG_REG_WAKE_BUFFER_CLEAR_BB : WOL_REG_WAKE_BUFFER_CLEAR_K2_E5; 2649 2650 DP_INFO(p_dev, 2651 "ecore_wol_buffer_clear: reset " 2652 "REG_WAKE_BUFFER_CLEAR offset=0x%08x\n", 2653 wake_buffer_clear_offset); 2654 2655 ECORE_WOL_WR(hwfn, hwfn->p_main_ptt, wake_buffer_clear_offset, 1); 2656 ECORE_WOL_WR(hwfn, hwfn->p_main_ptt, wake_buffer_clear_offset, 0); 2657 } 2658 2659 enum _ecore_status_t ecore_get_wake_info(struct ecore_dev *p_dev, 2660 struct ecore_wake_info *wake_info) 2661 { 2662 struct ecore_hwfn *hwfn = &p_dev->hwfns[0]; 2663 u32 *buf = OSAL_NULL; 2664 u32 i = 0; 2665 const u32 reg_wake_buffer_offest = 2666 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_BUFFER_BB : 2667 WOL_REG_WAKE_BUFFER_K2_E5; 2668 2669 wake_info->wk_info = ecore_rd(hwfn, hwfn->p_main_ptt, 2670 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_INFO_BB : 2671 WOL_REG_WAKE_INFO_K2_E5); 2672 wake_info->wk_details = ecore_rd(hwfn, hwfn->p_main_ptt, 2673 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_DETAILS_BB : 2674 WOL_REG_WAKE_DETAILS_K2_E5); 2675 wake_info->wk_pkt_len = ecore_rd(hwfn, hwfn->p_main_ptt, 2676 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_PKT_LEN_BB : 2677 WOL_REG_WAKE_PKT_LEN_K2_E5); 2678 2679 DP_INFO(p_dev, 2680 "ecore_get_wake_info: REG_WAKE_INFO=0x%08x " 2681 "REG_WAKE_DETAILS=0x%08x " 2682 "REG_WAKE_PKT_LEN=0x%08x\n", 2683 wake_info->wk_info, 2684 wake_info->wk_details, 2685 wake_info->wk_pkt_len); 2686 2687 buf = (u32 *)wake_info->wk_buffer; 2688 2689 for (i = 0; i < (wake_info->wk_pkt_len / sizeof(u32)); i++) 2690 { 2691 if ((i*sizeof(u32)) >= sizeof(wake_info->wk_buffer)) 2692 { 2693 DP_INFO(p_dev, 2694 "ecore_get_wake_info: i index to 0 high=%d\n", 2695 i); 2696 break; 2697 } 2698 buf[i] = ecore_rd(hwfn, hwfn->p_main_ptt, 2699 reg_wake_buffer_offest + (i * sizeof(u32))); 2700 DP_INFO(p_dev, "ecore_get_wake_info: wk_buffer[%u]: 0x%08x\n", 2701 i, buf[i]); 2702 } 2703 2704 ecore_wol_buffer_clear(p_dev); 2705 2706 return ECORE_SUCCESS; 2707 } 2708 2709 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */ 2710 static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn) 2711 { 2712 ecore_ptt_pool_free(p_hwfn); 2713 OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info); 2714 p_hwfn->hw_info.p_igu_info = OSAL_NULL; 2715 } 2716 2717 /* Setup bar access */ 2718 static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn) 2719 { 2720 /* clear indirect access */ 2721 if (ECORE_IS_AH(p_hwfn->p_dev) || ECORE_IS_E5(p_hwfn->p_dev)) { 2722 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2723 PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5, 0); 2724 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2725 PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5, 0); 2726 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2727 PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5, 0); 2728 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2729 PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5, 0); 2730 } else { 2731 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2732 PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0); 2733 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2734 PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0); 2735 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2736 PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0); 2737 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2738 PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0); 2739 } 2740 2741 /* Clean Previous errors if such exist */ 2742 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2743 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 2744 1 << p_hwfn->abs_pf_id); 2745 2746 /* enable internal target-read */ 2747 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 2748 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 2749 } 2750 2751 static void get_function_id(struct ecore_hwfn *p_hwfn) 2752 { 2753 /* ME Register */ 2754 p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn, 2755 PXP_PF_ME_OPAQUE_ADDR); 2756 2757 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); 2758 2759 /* Bits 16-19 from the ME registers are the pf_num */ 2760 p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf; 2761 p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 2762 PXP_CONCRETE_FID_PFID); 2763 p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 2764 PXP_CONCRETE_FID_PORT); 2765 2766 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 2767 "Read ME register: Concrete 0x%08x Opaque 0x%04x\n", 2768 p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid); 2769 } 2770 2771 void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn) 2772 { 2773 u32 *feat_num = p_hwfn->hw_info.feat_num; 2774 struct ecore_sb_cnt_info sb_cnt; 2775 u32 non_l2_sbs = 0; 2776 2777 OSAL_MEM_ZERO(&sb_cnt, sizeof(sb_cnt)); 2778 ecore_int_get_num_sbs(p_hwfn, &sb_cnt); 2779 2780 #ifdef CONFIG_ECORE_ROCE 2781 /* Roce CNQ require each: 1 status block. 1 CNQ, we divide the 2782 * status blocks equally between L2 / RoCE but with consideration as 2783 * to how many l2 queues / cnqs we have 2784 */ 2785 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) { 2786 u32 max_cnqs; 2787 2788 feat_num[ECORE_RDMA_CNQ] = 2789 OSAL_MIN_T(u32, 2790 sb_cnt.cnt / 2, 2791 RESC_NUM(p_hwfn, ECORE_RDMA_CNQ_RAM)); 2792 2793 /* Upper layer might require less */ 2794 max_cnqs = (u32)p_hwfn->pf_params.rdma_pf_params.max_cnqs; 2795 if (max_cnqs) { 2796 if (max_cnqs == ECORE_RDMA_PF_PARAMS_CNQS_NONE) 2797 max_cnqs = 0; 2798 feat_num[ECORE_RDMA_CNQ] = 2799 OSAL_MIN_T(u32, 2800 feat_num[ECORE_RDMA_CNQ], 2801 max_cnqs); 2802 } 2803 2804 non_l2_sbs = feat_num[ECORE_RDMA_CNQ]; 2805 } 2806 #endif 2807 2808 /* L2 Queues require each: 1 status block. 1 L2 queue */ 2809 if (ECORE_IS_L2_PERSONALITY(p_hwfn)) { 2810 /* Start by allocating VF queues, then PF's */ 2811 feat_num[ECORE_VF_L2_QUE] = 2812 OSAL_MIN_T(u32, 2813 RESC_NUM(p_hwfn, ECORE_L2_QUEUE), 2814 sb_cnt.iov_cnt); 2815 feat_num[ECORE_PF_L2_QUE] = 2816 OSAL_MIN_T(u32, 2817 sb_cnt.cnt - non_l2_sbs, 2818 RESC_NUM(p_hwfn, ECORE_L2_QUEUE) - 2819 FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE)); 2820 } 2821 2822 if (ECORE_IS_FCOE_PERSONALITY(p_hwfn)) 2823 feat_num[ECORE_FCOE_CQ] = 2824 OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, 2825 ECORE_CMDQS_CQS)); 2826 2827 if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) 2828 feat_num[ECORE_ISCSI_CQ] = 2829 OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, 2830 ECORE_CMDQS_CQS)); 2831 2832 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 2833 "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n", 2834 (int)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE), 2835 (int)FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE), 2836 (int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ), 2837 (int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ), 2838 (int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ), 2839 (int)sb_cnt.cnt); 2840 } 2841 2842 const char *ecore_hw_get_resc_name(enum ecore_resources res_id) 2843 { 2844 switch (res_id) { 2845 case ECORE_L2_QUEUE: 2846 return "L2_QUEUE"; 2847 case ECORE_VPORT: 2848 return "VPORT"; 2849 case ECORE_RSS_ENG: 2850 return "RSS_ENG"; 2851 case ECORE_PQ: 2852 return "PQ"; 2853 case ECORE_RL: 2854 return "RL"; 2855 case ECORE_MAC: 2856 return "MAC"; 2857 case ECORE_VLAN: 2858 return "VLAN"; 2859 case ECORE_RDMA_CNQ_RAM: 2860 return "RDMA_CNQ_RAM"; 2861 case ECORE_ILT: 2862 return "ILT"; 2863 case ECORE_LL2_QUEUE: 2864 return "LL2_QUEUE"; 2865 case ECORE_CMDQS_CQS: 2866 return "CMDQS_CQS"; 2867 case ECORE_RDMA_STATS_QUEUE: 2868 return "RDMA_STATS_QUEUE"; 2869 case ECORE_BDQ: 2870 return "BDQ"; 2871 case ECORE_SB: 2872 return "SB"; 2873 default: 2874 return "UNKNOWN_RESOURCE"; 2875 } 2876 } 2877 2878 static enum _ecore_status_t 2879 __ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn, 2880 enum ecore_resources res_id, u32 resc_max_val, 2881 u32 *p_mcp_resp) 2882 { 2883 enum _ecore_status_t rc; 2884 2885 rc = ecore_mcp_set_resc_max_val(p_hwfn, p_hwfn->p_main_ptt, res_id, 2886 resc_max_val, p_mcp_resp); 2887 if (rc != ECORE_SUCCESS) { 2888 DP_NOTICE(p_hwfn, true, 2889 "MFW response failure for a max value setting of resource %d [%s]\n", 2890 res_id, ecore_hw_get_resc_name(res_id)); 2891 return rc; 2892 } 2893 2894 if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) 2895 DP_INFO(p_hwfn, 2896 "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n", 2897 res_id, ecore_hw_get_resc_name(res_id), *p_mcp_resp); 2898 2899 return ECORE_SUCCESS; 2900 } 2901 2902 static enum _ecore_status_t 2903 ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn) 2904 { 2905 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 2906 u32 resc_max_val, mcp_resp; 2907 u8 res_id; 2908 enum _ecore_status_t rc; 2909 2910 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) { 2911 switch (res_id) { 2912 case ECORE_LL2_QUEUE: 2913 resc_max_val = MAX_NUM_LL2_RX_QUEUES; 2914 break; 2915 case ECORE_RDMA_CNQ_RAM: 2916 /* No need for a case for ECORE_CMDQS_CQS since 2917 * CNQ/CMDQS are the same resource. 2918 */ 2919 resc_max_val = NUM_OF_GLOBAL_QUEUES; 2920 break; 2921 case ECORE_RDMA_STATS_QUEUE: 2922 resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 2923 : RDMA_NUM_STATISTIC_COUNTERS_BB; 2924 break; 2925 case ECORE_BDQ: 2926 resc_max_val = BDQ_NUM_RESOURCES; 2927 break; 2928 default: 2929 continue; 2930 } 2931 2932 rc = __ecore_hw_set_soft_resc_size(p_hwfn, res_id, 2933 resc_max_val, &mcp_resp); 2934 if (rc != ECORE_SUCCESS) 2935 return rc; 2936 2937 /* There's no point to continue to the next resource if the 2938 * command is not supported by the MFW. 2939 * We do continue if the command is supported but the resource 2940 * is unknown to the MFW. Such a resource will be later 2941 * configured with the default allocation values. 2942 */ 2943 if (mcp_resp == FW_MSG_CODE_UNSUPPORTED) 2944 return ECORE_NOTIMPL; 2945 } 2946 2947 return ECORE_SUCCESS; 2948 } 2949 2950 static 2951 enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn, 2952 enum ecore_resources res_id, 2953 u32 *p_resc_num, u32 *p_resc_start) 2954 { 2955 u8 num_funcs = p_hwfn->num_funcs_on_engine; 2956 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 2957 2958 switch (res_id) { 2959 case ECORE_L2_QUEUE: 2960 *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 : 2961 MAX_NUM_L2_QUEUES_BB) / num_funcs; 2962 break; 2963 case ECORE_VPORT: 2964 *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 : 2965 MAX_NUM_VPORTS_BB) / num_funcs; 2966 break; 2967 case ECORE_RSS_ENG: 2968 *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 : 2969 ETH_RSS_ENGINE_NUM_BB) / num_funcs; 2970 break; 2971 case ECORE_PQ: 2972 *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 : 2973 MAX_QM_TX_QUEUES_BB) / num_funcs; 2974 *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */ 2975 break; 2976 case ECORE_RL: 2977 *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs; 2978 break; 2979 case ECORE_MAC: 2980 case ECORE_VLAN: 2981 /* Each VFC resource can accommodate both a MAC and a VLAN */ 2982 *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; 2983 break; 2984 case ECORE_ILT: 2985 *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 : 2986 PXP_NUM_ILT_RECORDS_BB) / num_funcs; 2987 break; 2988 case ECORE_LL2_QUEUE: 2989 *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs; 2990 break; 2991 case ECORE_RDMA_CNQ_RAM: 2992 case ECORE_CMDQS_CQS: 2993 /* CNQ/CMDQS are the same resource */ 2994 *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs; 2995 break; 2996 case ECORE_RDMA_STATS_QUEUE: 2997 *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 : 2998 RDMA_NUM_STATISTIC_COUNTERS_BB) / 2999 num_funcs; 3000 break; 3001 case ECORE_BDQ: 3002 if (p_hwfn->hw_info.personality != ECORE_PCI_ISCSI && 3003 p_hwfn->hw_info.personality != ECORE_PCI_FCOE) 3004 *p_resc_num = 0; 3005 else 3006 *p_resc_num = 1; 3007 break; 3008 case ECORE_SB: 3009 /* Since we want its value to reflect whether MFW supports 3010 * the new scheme, have a default of 0. 3011 */ 3012 *p_resc_num = 0; 3013 break; 3014 default: 3015 return ECORE_INVAL; 3016 } 3017 3018 switch (res_id) { 3019 case ECORE_BDQ: 3020 if (!*p_resc_num) 3021 *p_resc_start = 0; 3022 else if (p_hwfn->p_dev->num_ports_in_engines == 4) 3023 *p_resc_start = p_hwfn->port_id; 3024 else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) 3025 *p_resc_start = p_hwfn->port_id; 3026 else if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 3027 *p_resc_start = p_hwfn->port_id + 2; 3028 break; 3029 default: 3030 *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx; 3031 break; 3032 } 3033 3034 return ECORE_SUCCESS; 3035 } 3036 3037 static enum _ecore_status_t 3038 __ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id, 3039 bool drv_resc_alloc) 3040 { 3041 u32 dflt_resc_num = 0, dflt_resc_start = 0; 3042 u32 mcp_resp, *p_resc_num, *p_resc_start; 3043 enum _ecore_status_t rc; 3044 3045 p_resc_num = &RESC_NUM(p_hwfn, res_id); 3046 p_resc_start = &RESC_START(p_hwfn, res_id); 3047 3048 rc = ecore_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num, 3049 &dflt_resc_start); 3050 if (rc != ECORE_SUCCESS) { 3051 DP_ERR(p_hwfn, 3052 "Failed to get default amount for resource %d [%s]\n", 3053 res_id, ecore_hw_get_resc_name(res_id)); 3054 return rc; 3055 } 3056 3057 #ifndef ASIC_ONLY 3058 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 3059 *p_resc_num = dflt_resc_num; 3060 *p_resc_start = dflt_resc_start; 3061 goto out; 3062 } 3063 #endif 3064 3065 rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id, 3066 &mcp_resp, p_resc_num, p_resc_start); 3067 if (rc != ECORE_SUCCESS) { 3068 DP_NOTICE(p_hwfn, true, 3069 "MFW response failure for an allocation request for resource %d [%s]\n", 3070 res_id, ecore_hw_get_resc_name(res_id)); 3071 return rc; 3072 } 3073 3074 /* Default driver values are applied in the following cases: 3075 * - The resource allocation MB command is not supported by the MFW 3076 * - There is an internal error in the MFW while processing the request 3077 * - The resource ID is unknown to the MFW 3078 */ 3079 if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) { 3080 DP_INFO(p_hwfn, 3081 "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n", 3082 res_id, ecore_hw_get_resc_name(res_id), mcp_resp, 3083 dflt_resc_num, dflt_resc_start); 3084 *p_resc_num = dflt_resc_num; 3085 *p_resc_start = dflt_resc_start; 3086 goto out; 3087 } 3088 3089 if ((*p_resc_num != dflt_resc_num || 3090 *p_resc_start != dflt_resc_start) && 3091 res_id != ECORE_SB) { 3092 DP_INFO(p_hwfn, 3093 "MFW allocation for resource %d [%s] differs from default values [%d,%d vs. %d,%d]%s\n", 3094 res_id, ecore_hw_get_resc_name(res_id), *p_resc_num, 3095 *p_resc_start, dflt_resc_num, dflt_resc_start, 3096 drv_resc_alloc ? " - Applying default values" : ""); 3097 if (drv_resc_alloc) { 3098 *p_resc_num = dflt_resc_num; 3099 *p_resc_start = dflt_resc_start; 3100 } 3101 } 3102 out: 3103 /* PQs have to divide by 8 [that's the HW granularity]. 3104 * Reduce number so it would fit. 3105 */ 3106 if ((res_id == ECORE_PQ) && 3107 ((*p_resc_num % 8) || (*p_resc_start % 8))) { 3108 DP_INFO(p_hwfn, 3109 "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n", 3110 *p_resc_num, (*p_resc_num) & ~0x7, 3111 *p_resc_start, (*p_resc_start) & ~0x7); 3112 *p_resc_num &= ~0x7; 3113 *p_resc_start &= ~0x7; 3114 } 3115 3116 return ECORE_SUCCESS; 3117 } 3118 3119 static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, 3120 bool drv_resc_alloc) 3121 { 3122 enum _ecore_status_t rc; 3123 u8 res_id; 3124 3125 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) { 3126 rc = __ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc); 3127 if (rc != ECORE_SUCCESS) 3128 return rc; 3129 } 3130 3131 return ECORE_SUCCESS; 3132 } 3133 3134 #define ECORE_RESC_ALLOC_LOCK_RETRY_CNT 10 3135 #define ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US 10000 /* 10 msec */ 3136 3137 static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn, 3138 bool drv_resc_alloc) 3139 { 3140 struct ecore_resc_unlock_params resc_unlock_params; 3141 struct ecore_resc_lock_params resc_lock_params; 3142 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 3143 u8 res_id; 3144 enum _ecore_status_t rc; 3145 #ifndef ASIC_ONLY 3146 u32 *resc_start = p_hwfn->hw_info.resc_start; 3147 u32 *resc_num = p_hwfn->hw_info.resc_num; 3148 /* For AH, an equal share of the ILT lines between the maximal number of 3149 * PFs is not enough for RoCE. This would be solved by the future 3150 * resource allocation scheme, but isn't currently present for 3151 * FPGA/emulation. For now we keep a number that is sufficient for RoCE 3152 * to work - the BB number of ILT lines divided by its max PFs number. 3153 */ 3154 u32 roce_min_ilt_lines = PXP_NUM_ILT_RECORDS_BB / MAX_NUM_PFS_BB; 3155 #endif 3156 3157 /* Setting the max values of the soft resources and the following 3158 * resources allocation queries should be atomic. Since several PFs can 3159 * run in parallel - a resource lock is needed. 3160 * If either the resource lock or resource set value commands are not 3161 * supported - skip the the max values setting, release the lock if 3162 * needed, and proceed to the queries. Other failures, including a 3163 * failure to acquire the lock, will cause this function to fail. 3164 * Old drivers that don't acquire the lock can run in parallel, and 3165 * their allocation values won't be affected by the updated max values. 3166 */ 3167 OSAL_MEM_ZERO(&resc_lock_params, sizeof(resc_lock_params)); 3168 resc_lock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC; 3169 resc_lock_params.retry_num = ECORE_RESC_ALLOC_LOCK_RETRY_CNT; 3170 resc_lock_params.retry_interval = ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US; 3171 resc_lock_params.sleep_b4_retry = true; 3172 OSAL_MEM_ZERO(&resc_unlock_params, sizeof(resc_unlock_params)); 3173 resc_unlock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC; 3174 3175 rc = ecore_mcp_resc_lock(p_hwfn, p_hwfn->p_main_ptt, &resc_lock_params); 3176 if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) { 3177 return rc; 3178 } else if (rc == ECORE_NOTIMPL) { 3179 DP_INFO(p_hwfn, 3180 "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n"); 3181 } else if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) { 3182 DP_NOTICE(p_hwfn, false, 3183 "Failed to acquire the resource lock for the resource allocation commands\n"); 3184 return ECORE_BUSY; 3185 } else { 3186 rc = ecore_hw_set_soft_resc_size(p_hwfn); 3187 if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) { 3188 DP_NOTICE(p_hwfn, false, 3189 "Failed to set the max values of the soft resources\n"); 3190 goto unlock_and_exit; 3191 } else if (rc == ECORE_NOTIMPL) { 3192 DP_INFO(p_hwfn, 3193 "Skip the max values setting of the soft resources since it is not supported by the MFW\n"); 3194 rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt, 3195 &resc_unlock_params); 3196 if (rc != ECORE_SUCCESS) 3197 DP_INFO(p_hwfn, 3198 "Failed to release the resource lock for the resource allocation commands\n"); 3199 } 3200 } 3201 3202 rc = ecore_hw_set_resc_info(p_hwfn, drv_resc_alloc); 3203 if (rc != ECORE_SUCCESS) 3204 goto unlock_and_exit; 3205 3206 if (resc_lock_params.b_granted && !resc_unlock_params.b_released) { 3207 rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt, 3208 &resc_unlock_params); 3209 if (rc != ECORE_SUCCESS) 3210 DP_INFO(p_hwfn, 3211 "Failed to release the resource lock for the resource allocation commands\n"); 3212 } 3213 3214 #ifndef ASIC_ONLY 3215 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 3216 /* Reduced build contains less PQs */ 3217 if (!(p_hwfn->p_dev->b_is_emul_full)) { 3218 resc_num[ECORE_PQ] = 32; 3219 resc_start[ECORE_PQ] = resc_num[ECORE_PQ] * 3220 p_hwfn->enabled_func_idx; 3221 } 3222 3223 /* For AH emulation, since we have a possible maximal number of 3224 * 16 enabled PFs, in case there are not enough ILT lines - 3225 * allocate only first PF as RoCE and have all the other ETH 3226 * only with less ILT lines. 3227 */ 3228 if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full) 3229 resc_num[ECORE_ILT] = OSAL_MAX_T(u32, 3230 resc_num[ECORE_ILT], 3231 roce_min_ilt_lines); 3232 } 3233 3234 /* Correct the common ILT calculation if PF0 has more */ 3235 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) && 3236 p_hwfn->p_dev->b_is_emul_full && 3237 p_hwfn->rel_pf_id && 3238 resc_num[ECORE_ILT] < roce_min_ilt_lines) 3239 resc_start[ECORE_ILT] += roce_min_ilt_lines - 3240 resc_num[ECORE_ILT]; 3241 #endif 3242 3243 /* Sanity for ILT */ 3244 if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) || 3245 (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) { 3246 DP_NOTICE(p_hwfn, true, "Can't assign ILT pages [%08x,...,%08x]\n", 3247 RESC_START(p_hwfn, ECORE_ILT), 3248 RESC_END(p_hwfn, ECORE_ILT) - 1); 3249 return ECORE_INVAL; 3250 } 3251 3252 /* This will also learn the number of SBs from MFW */ 3253 if (ecore_int_igu_reset_cam(p_hwfn, p_hwfn->p_main_ptt)) 3254 return ECORE_INVAL; 3255 3256 ecore_hw_set_feat(p_hwfn); 3257 3258 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 3259 "The numbers for each resource are:\n"); 3260 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) 3261 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "%s = %d start = %d\n", 3262 ecore_hw_get_resc_name(res_id), 3263 RESC_NUM(p_hwfn, res_id), 3264 RESC_START(p_hwfn, res_id)); 3265 3266 return ECORE_SUCCESS; 3267 3268 unlock_and_exit: 3269 if (resc_lock_params.b_granted && !resc_unlock_params.b_released) 3270 ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt, 3271 &resc_unlock_params); 3272 return rc; 3273 } 3274 3275 static enum _ecore_status_t 3276 ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn, 3277 struct ecore_ptt *p_ptt, 3278 struct ecore_hw_prepare_params *p_params) 3279 { 3280 u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg, dcbx_mode; 3281 u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; 3282 struct ecore_mcp_link_capabilities *p_caps; 3283 struct ecore_mcp_link_params *link; 3284 enum _ecore_status_t rc; 3285 3286 /* Read global nvm_cfg address */ 3287 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); 3288 3289 /* Verify MCP has initialized it */ 3290 if (!nvm_cfg_addr) { 3291 DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n"); 3292 if (p_params->b_relaxed_probe) 3293 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_NVM; 3294 return ECORE_INVAL; 3295 } 3296 3297 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ 3298 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); 3299 3300 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3301 OFFSETOF(struct nvm_cfg1, glob) + 3302 OFFSETOF(struct nvm_cfg1_glob, core_cfg); 3303 3304 core_cfg = ecore_rd(p_hwfn, p_ptt, addr); 3305 3306 switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> 3307 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { 3308 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G: 3309 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G; 3310 break; 3311 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G: 3312 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G; 3313 break; 3314 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G: 3315 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G; 3316 break; 3317 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F: 3318 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F; 3319 break; 3320 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E: 3321 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E; 3322 break; 3323 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G: 3324 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G; 3325 break; 3326 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G: 3327 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G; 3328 break; 3329 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: 3330 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G; 3331 break; 3332 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G: 3333 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X10G; 3334 break; 3335 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: 3336 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G; 3337 break; 3338 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G: 3339 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X25G; 3340 break; 3341 default: 3342 DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n", 3343 core_cfg); 3344 break; 3345 } 3346 3347 /* Read DCBX configuration */ 3348 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3349 OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 3350 dcbx_mode = ecore_rd(p_hwfn, p_ptt, 3351 port_cfg_addr + 3352 OFFSETOF(struct nvm_cfg1_port, generic_cont0)); 3353 dcbx_mode = (dcbx_mode & NVM_CFG1_PORT_DCBX_MODE_MASK) 3354 >> NVM_CFG1_PORT_DCBX_MODE_OFFSET; 3355 switch (dcbx_mode) { 3356 case NVM_CFG1_PORT_DCBX_MODE_DYNAMIC: 3357 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DYNAMIC; 3358 break; 3359 case NVM_CFG1_PORT_DCBX_MODE_CEE: 3360 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_CEE; 3361 break; 3362 case NVM_CFG1_PORT_DCBX_MODE_IEEE: 3363 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_IEEE; 3364 break; 3365 default: 3366 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DISABLED; 3367 } 3368 3369 /* Read default link configuration */ 3370 link = &p_hwfn->mcp_info->link_input; 3371 p_caps = &p_hwfn->mcp_info->link_capabilities; 3372 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3373 OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 3374 link_temp = ecore_rd(p_hwfn, p_ptt, 3375 port_cfg_addr + 3376 OFFSETOF(struct nvm_cfg1_port, speed_cap_mask)); 3377 link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; 3378 link->speed.advertised_speeds = link_temp; 3379 p_caps->speed_capabilities = link->speed.advertised_speeds; 3380 3381 link_temp = ecore_rd(p_hwfn, p_ptt, 3382 port_cfg_addr + 3383 OFFSETOF(struct nvm_cfg1_port, link_settings)); 3384 switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >> 3385 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) { 3386 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG: 3387 link->speed.autoneg = true; 3388 break; 3389 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G: 3390 link->speed.forced_speed = 1000; 3391 break; 3392 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: 3393 link->speed.forced_speed = 10000; 3394 break; 3395 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: 3396 link->speed.forced_speed = 25000; 3397 break; 3398 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G: 3399 link->speed.forced_speed = 40000; 3400 break; 3401 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: 3402 link->speed.forced_speed = 50000; 3403 break; 3404 case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G: 3405 link->speed.forced_speed = 100000; 3406 break; 3407 default: 3408 DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", 3409 link_temp); 3410 } 3411 3412 p_caps->default_speed = link->speed.forced_speed; 3413 p_caps->default_speed_autoneg = link->speed.autoneg; 3414 3415 link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; 3416 link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET; 3417 link->pause.autoneg = !!(link_temp & 3418 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); 3419 link->pause.forced_rx = !!(link_temp & 3420 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); 3421 link->pause.forced_tx = !!(link_temp & 3422 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); 3423 link->loopback_mode = 0; 3424 3425 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { 3426 link_temp = ecore_rd(p_hwfn, p_ptt, port_cfg_addr + 3427 OFFSETOF(struct nvm_cfg1_port, ext_phy)); 3428 link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK; 3429 link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET; 3430 p_caps->default_eee = ECORE_MCP_EEE_ENABLED; 3431 link->eee.enable = true; 3432 switch (link_temp) { 3433 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED: 3434 p_caps->default_eee = ECORE_MCP_EEE_DISABLED; 3435 link->eee.enable = false; 3436 break; 3437 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED: 3438 p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME; 3439 break; 3440 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE: 3441 p_caps->eee_lpi_timer = 3442 EEE_TX_TIMER_USEC_AGGRESSIVE_TIME; 3443 break; 3444 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY: 3445 p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME; 3446 break; 3447 } 3448 link->eee.tx_lpi_timer = p_caps->eee_lpi_timer; 3449 link->eee.tx_lpi_enable = link->eee.enable; 3450 if (link->eee.enable) 3451 link->eee.adv_caps = ECORE_EEE_1G_ADV | 3452 ECORE_EEE_10G_ADV; 3453 } else { 3454 p_caps->default_eee = ECORE_MCP_EEE_UNSUPPORTED; 3455 } 3456 3457 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 3458 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n", 3459 link->speed.forced_speed, link->speed.advertised_speeds, 3460 link->speed.autoneg, link->pause.autoneg, 3461 p_caps->default_eee, p_caps->eee_lpi_timer); 3462 3463 /* Read Multi-function information from shmem */ 3464 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3465 OFFSETOF(struct nvm_cfg1, glob) + 3466 OFFSETOF(struct nvm_cfg1_glob, generic_cont0); 3467 3468 generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr); 3469 3470 mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> 3471 NVM_CFG1_GLOB_MF_MODE_OFFSET; 3472 3473 switch (mf_mode) { 3474 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: 3475 p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN; 3476 break; 3477 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: 3478 p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR; 3479 break; 3480 case NVM_CFG1_GLOB_MF_MODE_DEFAULT: 3481 p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT; 3482 break; 3483 } 3484 DP_INFO(p_hwfn, "Multi function mode is %08x\n", 3485 p_hwfn->p_dev->mf_mode); 3486 3487 /* Read Multi-function information from shmem */ 3488 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3489 OFFSETOF(struct nvm_cfg1, glob) + 3490 OFFSETOF(struct nvm_cfg1_glob, device_capabilities); 3491 3492 device_capabilities = ecore_rd(p_hwfn, p_ptt, addr); 3493 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) 3494 OSAL_SET_BIT(ECORE_DEV_CAP_ETH, 3495 &p_hwfn->hw_info.device_capabilities); 3496 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE) 3497 OSAL_SET_BIT(ECORE_DEV_CAP_FCOE, 3498 &p_hwfn->hw_info.device_capabilities); 3499 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI) 3500 OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI, 3501 &p_hwfn->hw_info.device_capabilities); 3502 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE) 3503 OSAL_SET_BIT(ECORE_DEV_CAP_ROCE, 3504 &p_hwfn->hw_info.device_capabilities); 3505 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP) 3506 OSAL_SET_BIT(ECORE_DEV_CAP_IWARP, 3507 &p_hwfn->hw_info.device_capabilities); 3508 3509 rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt); 3510 if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) { 3511 rc = ECORE_SUCCESS; 3512 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP; 3513 } 3514 3515 return rc; 3516 } 3517 3518 static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn, 3519 struct ecore_ptt *p_ptt) 3520 { 3521 u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id; 3522 u32 reg_function_hide, tmp, eng_mask, low_pfs_mask; 3523 struct ecore_dev *p_dev = p_hwfn->p_dev; 3524 3525 num_funcs = ECORE_IS_AH(p_dev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB; 3526 3527 /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values 3528 * in the other bits are selected. 3529 * Bits 1-15 are for functions 1-15, respectively, and their value is 3530 * '0' only for enabled functions (function 0 always exists and 3531 * enabled). 3532 * In case of CMT in BB, only the "even" functions are enabled, and thus 3533 * the number of functions for both hwfns is learnt from the same bits. 3534 */ 3535 if (ECORE_IS_BB(p_dev) || ECORE_IS_AH(p_dev)) { 3536 reg_function_hide = ecore_rd(p_hwfn, p_ptt, 3537 MISCS_REG_FUNCTION_HIDE_BB_K2); 3538 } else { /* E5 */ 3539 reg_function_hide = 0; 3540 ECORE_E5_MISSING_CODE; 3541 } 3542 3543 if (reg_function_hide & 0x1) { 3544 if (ECORE_IS_BB(p_dev)) { 3545 if (ECORE_PATH_ID(p_hwfn) && p_dev->num_hwfns == 1) { 3546 num_funcs = 0; 3547 eng_mask = 0xaaaa; 3548 } else { 3549 num_funcs = 1; 3550 eng_mask = 0x5554; 3551 } 3552 } else { 3553 num_funcs = 1; 3554 eng_mask = 0xfffe; 3555 } 3556 3557 /* Get the number of the enabled functions on the engine */ 3558 tmp = (reg_function_hide ^ 0xffffffff) & eng_mask; 3559 while (tmp) { 3560 if (tmp & 0x1) 3561 num_funcs++; 3562 tmp >>= 0x1; 3563 } 3564 3565 /* Get the PF index within the enabled functions */ 3566 low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1; 3567 tmp = reg_function_hide & eng_mask & low_pfs_mask; 3568 while (tmp) { 3569 if (tmp & 0x1) 3570 enabled_func_idx--; 3571 tmp >>= 0x1; 3572 } 3573 } 3574 3575 p_hwfn->num_funcs_on_engine = num_funcs; 3576 p_hwfn->enabled_func_idx = enabled_func_idx; 3577 3578 #ifndef ASIC_ONLY 3579 if (CHIP_REV_IS_FPGA(p_dev)) { 3580 DP_NOTICE(p_hwfn, false, 3581 "FPGA: Limit number of PFs to 4 [would affect resource allocation, needed for IOV]\n"); 3582 p_hwfn->num_funcs_on_engine = 4; 3583 } 3584 #endif 3585 3586 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 3587 "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n", 3588 p_hwfn->rel_pf_id, p_hwfn->abs_pf_id, 3589 p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); 3590 } 3591 3592 static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn, 3593 struct ecore_ptt *p_ptt) 3594 { 3595 u32 port_mode; 3596 3597 #ifndef ASIC_ONLY 3598 /* Read the port mode */ 3599 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 3600 port_mode = 4; 3601 else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && 3602 (p_hwfn->p_dev->num_hwfns > 1)) 3603 /* In CMT on emulation, assume 1 port */ 3604 port_mode = 1; 3605 else 3606 #endif 3607 port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB); 3608 3609 if (port_mode < 3) { 3610 p_hwfn->p_dev->num_ports_in_engines = 1; 3611 } else if (port_mode <= 5) { 3612 p_hwfn->p_dev->num_ports_in_engines = 2; 3613 } else { 3614 DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n", 3615 p_hwfn->p_dev->num_ports_in_engines); 3616 3617 /* Default num_ports_in_engines to something */ 3618 p_hwfn->p_dev->num_ports_in_engines = 1; 3619 } 3620 } 3621 3622 static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn, 3623 struct ecore_ptt *p_ptt) 3624 { 3625 u32 port; 3626 int i; 3627 3628 p_hwfn->p_dev->num_ports_in_engines = 0; 3629 3630 #ifndef ASIC_ONLY 3631 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 3632 port = ecore_rd(p_hwfn, p_ptt, 3633 MISCS_REG_ECO_RESERVED); 3634 switch ((port & 0xf000) >> 12) { 3635 case 1: 3636 p_hwfn->p_dev->num_ports_in_engines = 1; 3637 break; 3638 case 3: 3639 p_hwfn->p_dev->num_ports_in_engines = 2; 3640 break; 3641 case 0xf: 3642 p_hwfn->p_dev->num_ports_in_engines = 4; 3643 break; 3644 default: 3645 DP_NOTICE(p_hwfn, false, 3646 "Unknown port mode in ECO_RESERVED %08x\n", 3647 port); 3648 } 3649 } else 3650 #endif 3651 for (i = 0; i < MAX_NUM_PORTS_K2; i++) { 3652 port = ecore_rd(p_hwfn, p_ptt, 3653 CNIG_REG_NIG_PORT0_CONF_K2_E5 + (i * 4)); 3654 if (port & 1) 3655 p_hwfn->p_dev->num_ports_in_engines++; 3656 } 3657 } 3658 3659 static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn, 3660 struct ecore_ptt *p_ptt) 3661 { 3662 if (ECORE_IS_BB(p_hwfn->p_dev)) 3663 ecore_hw_info_port_num_bb(p_hwfn, p_ptt); 3664 else 3665 ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt); 3666 } 3667 3668 static enum _ecore_status_t 3669 ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3670 enum ecore_pci_personality personality, 3671 struct ecore_hw_prepare_params *p_params) 3672 { 3673 bool drv_resc_alloc = p_params->drv_resc_alloc; 3674 enum _ecore_status_t rc; 3675 3676 /* Since all information is common, only first hwfns should do this */ 3677 if (IS_LEAD_HWFN(p_hwfn)) { 3678 rc = ecore_iov_hw_info(p_hwfn); 3679 if (rc != ECORE_SUCCESS) { 3680 if (p_params->b_relaxed_probe) 3681 p_params->p_relaxed_res = 3682 ECORE_HW_PREPARE_BAD_IOV; 3683 else 3684 return rc; 3685 } 3686 } 3687 3688 /* TODO In get_hw_info, amoungst others: 3689 * Get MCP FW revision and determine according to it the supported 3690 * featrues (e.g. DCB) 3691 * Get boot mode 3692 * ecore_get_pcie_width_speed, WOL capability. 3693 * Number of global CQ-s (for storage 3694 */ 3695 ecore_hw_info_port_num(p_hwfn, p_ptt); 3696 3697 ecore_mcp_get_capabilities(p_hwfn, p_ptt); 3698 3699 #ifndef ASIC_ONLY 3700 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) { 3701 #endif 3702 rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params); 3703 if (rc != ECORE_SUCCESS) 3704 return rc; 3705 #ifndef ASIC_ONLY 3706 } 3707 #endif 3708 3709 rc = ecore_int_igu_read_cam(p_hwfn, p_ptt); 3710 if (rc != ECORE_SUCCESS) { 3711 if (p_params->b_relaxed_probe) 3712 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_IGU; 3713 else 3714 return rc; 3715 } 3716 3717 #ifndef ASIC_ONLY 3718 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) { 3719 #endif 3720 OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, 3721 p_hwfn->mcp_info->func_info.mac, ETH_ALEN); 3722 #ifndef ASIC_ONLY 3723 } else { 3724 static u8 mcp_hw_mac[6] = {0, 2, 3, 4, 5, 6}; 3725 3726 OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN); 3727 p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id; 3728 } 3729 #endif 3730 3731 if (ecore_mcp_is_init(p_hwfn)) { 3732 if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET) 3733 p_hwfn->hw_info.ovlan = 3734 p_hwfn->mcp_info->func_info.ovlan; 3735 3736 ecore_mcp_cmd_port_init(p_hwfn, p_ptt); 3737 } 3738 3739 if (personality != ECORE_PCI_DEFAULT) { 3740 p_hwfn->hw_info.personality = personality; 3741 } else if (ecore_mcp_is_init(p_hwfn)) { 3742 enum ecore_pci_personality protocol; 3743 3744 protocol = p_hwfn->mcp_info->func_info.protocol; 3745 p_hwfn->hw_info.personality = protocol; 3746 } 3747 3748 #ifndef ASIC_ONLY 3749 /* To overcome ILT lack for emulation, until at least until we'll have 3750 * a definite answer from system about it, allow only PF0 to be RoCE. 3751 */ 3752 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) { 3753 if (!p_hwfn->rel_pf_id) 3754 p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE; 3755 else 3756 p_hwfn->hw_info.personality = ECORE_PCI_ETH; 3757 } 3758 #endif 3759 3760 /* although in BB some constellations may support more than 4 tcs, 3761 * that can result in performance penalty in some cases. 4 3762 * represents a good tradeoff between performance and flexibility. 3763 */ 3764 p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; 3765 3766 /* start out with a single active tc. This can be increased either 3767 * by dcbx negotiation or by upper layer driver 3768 */ 3769 p_hwfn->hw_info.num_active_tc = 1; 3770 3771 ecore_get_num_funcs(p_hwfn, p_ptt); 3772 3773 if (ecore_mcp_is_init(p_hwfn)) 3774 p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu; 3775 3776 /* In case of forcing the driver's default resource allocation, calling 3777 * ecore_hw_get_resc() should come after initializing the personality 3778 * and after getting the number of functions, since the calculation of 3779 * the resources/features depends on them. 3780 * This order is not harmful if not forcing. 3781 */ 3782 rc = ecore_hw_get_resc(p_hwfn, drv_resc_alloc); 3783 if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) { 3784 rc = ECORE_SUCCESS; 3785 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP; 3786 } 3787 3788 return rc; 3789 } 3790 3791 static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev) 3792 { 3793 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 3794 u16 device_id_mask; 3795 u32 tmp; 3796 3797 /* Read Vendor Id / Device Id */ 3798 OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_VENDOR_ID_OFFSET, 3799 &p_dev->vendor_id); 3800 OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET, 3801 &p_dev->device_id); 3802 3803 /* Determine type */ 3804 device_id_mask = p_dev->device_id & ECORE_DEV_ID_MASK; 3805 switch (device_id_mask) { 3806 case ECORE_DEV_ID_MASK_BB: 3807 p_dev->type = ECORE_DEV_TYPE_BB; 3808 break; 3809 case ECORE_DEV_ID_MASK_AH: 3810 p_dev->type = ECORE_DEV_TYPE_AH; 3811 break; 3812 default: 3813 DP_NOTICE(p_hwfn, true, "Unknown device id 0x%x\n", 3814 p_dev->device_id); 3815 return ECORE_ABORTED; 3816 } 3817 3818 p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3819 MISCS_REG_CHIP_NUM); 3820 p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3821 MISCS_REG_CHIP_REV); 3822 3823 MASK_FIELD(CHIP_REV, p_dev->chip_rev); 3824 3825 /* Learn number of HW-functions */ 3826 tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3827 MISCS_REG_CMT_ENABLED_FOR_PAIR); 3828 3829 if (tmp & (1 << p_hwfn->rel_pf_id)) { 3830 DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n"); 3831 p_dev->num_hwfns = 2; 3832 } else { 3833 p_dev->num_hwfns = 1; 3834 } 3835 3836 #ifndef ASIC_ONLY 3837 if (CHIP_REV_IS_EMUL(p_dev)) { 3838 /* For some reason we have problems with this register 3839 * in B0 emulation; Simply assume no CMT 3840 */ 3841 DP_NOTICE(p_dev->hwfns, false, "device on emul - assume no CMT\n"); 3842 p_dev->num_hwfns = 1; 3843 } 3844 #endif 3845 3846 p_dev->chip_bond_id = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3847 MISCS_REG_CHIP_TEST_REG) >> 4; 3848 MASK_FIELD(CHIP_BOND_ID, p_dev->chip_bond_id); 3849 p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3850 MISCS_REG_CHIP_METAL); 3851 MASK_FIELD(CHIP_METAL, p_dev->chip_metal); 3852 DP_INFO(p_dev->hwfns, 3853 "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", 3854 ECORE_IS_BB(p_dev) ? "BB" : "AH", 3855 'A' + p_dev->chip_rev, (int)p_dev->chip_metal, 3856 p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id, 3857 p_dev->chip_metal); 3858 3859 if (ECORE_IS_BB(p_dev) && CHIP_REV_IS_A0(p_dev)) { 3860 DP_NOTICE(p_dev->hwfns, false, 3861 "The chip type/rev (BB A0) is not supported!\n"); 3862 return ECORE_ABORTED; 3863 } 3864 3865 #ifndef ASIC_ONLY 3866 if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev)) 3867 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 3868 MISCS_REG_PLL_MAIN_CTRL_4, 0x1); 3869 3870 if (CHIP_REV_IS_EMUL(p_dev)) { 3871 tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, 3872 MISCS_REG_ECO_RESERVED); 3873 if (tmp & (1 << 29)) { 3874 DP_NOTICE(p_hwfn, false, "Emulation: Running on a FULL build\n"); 3875 p_dev->b_is_emul_full = true; 3876 } else { 3877 DP_NOTICE(p_hwfn, false, "Emulation: Running on a REDUCED build\n"); 3878 } 3879 } 3880 #endif 3881 3882 return ECORE_SUCCESS; 3883 } 3884 3885 void ecore_hw_hibernate_prepare(struct ecore_dev *p_dev) 3886 { 3887 int j; 3888 3889 if (IS_VF(p_dev)) 3890 return; 3891 3892 for_each_hwfn(p_dev, j) { 3893 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 3894 3895 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Mark hw/fw uninitialized\n"); 3896 3897 p_hwfn->hw_init_done = false; 3898 p_hwfn->first_on_engine = false; 3899 3900 ecore_ptt_invalidate(p_hwfn); 3901 } 3902 } 3903 3904 void ecore_hw_hibernate_resume(struct ecore_dev *p_dev) 3905 { 3906 int j = 0; 3907 3908 if (IS_VF(p_dev)) 3909 return; 3910 3911 for_each_hwfn(p_dev, j) { 3912 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 3913 struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); 3914 3915 ecore_hw_hwfn_prepare(p_hwfn); 3916 3917 if (!p_ptt) 3918 DP_NOTICE(p_hwfn, true, "ptt acquire failed\n"); 3919 else { 3920 ecore_load_mcp_offsets(p_hwfn, p_ptt); 3921 ecore_ptt_release(p_hwfn, p_ptt); 3922 } 3923 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "Reinitialized hw after low power state\n"); 3924 } 3925 } 3926 3927 static enum _ecore_status_t ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, 3928 void OSAL_IOMEM *p_regview, 3929 void OSAL_IOMEM *p_doorbells, 3930 struct ecore_hw_prepare_params *p_params) 3931 { 3932 struct ecore_mdump_retain_data mdump_retain; 3933 struct ecore_dev *p_dev = p_hwfn->p_dev; 3934 struct ecore_mdump_info mdump_info; 3935 enum _ecore_status_t rc = ECORE_SUCCESS; 3936 3937 /* Split PCI bars evenly between hwfns */ 3938 p_hwfn->regview = p_regview; 3939 p_hwfn->doorbells = p_doorbells; 3940 3941 if (IS_VF(p_dev)) 3942 return ecore_vf_hw_prepare(p_hwfn); 3943 3944 /* Validate that chip access is feasible */ 3945 if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { 3946 DP_ERR(p_hwfn, "Reading the ME register returns all Fs; Preventing further chip access\n"); 3947 if (p_params->b_relaxed_probe) 3948 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_ME; 3949 return ECORE_INVAL; 3950 } 3951 3952 get_function_id(p_hwfn); 3953 3954 /* Allocate PTT pool */ 3955 rc = ecore_ptt_pool_alloc(p_hwfn); 3956 if (rc) { 3957 DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n"); 3958 if (p_params->b_relaxed_probe) 3959 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 3960 goto err0; 3961 } 3962 3963 /* Allocate the main PTT */ 3964 p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); 3965 3966 /* First hwfn learns basic information, e.g., number of hwfns */ 3967 if (!p_hwfn->my_id) { 3968 rc = ecore_get_dev_info(p_dev); 3969 if (rc != ECORE_SUCCESS) { 3970 if (p_params->b_relaxed_probe) 3971 p_params->p_relaxed_res = 3972 ECORE_HW_PREPARE_FAILED_DEV; 3973 goto err1; 3974 } 3975 } 3976 3977 ecore_hw_hwfn_prepare(p_hwfn); 3978 3979 /* Initialize MCP structure */ 3980 rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); 3981 if (rc) { 3982 DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n"); 3983 if (p_params->b_relaxed_probe) 3984 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 3985 goto err1; 3986 } 3987 3988 /* Read the device configuration information from the HW and SHMEM */ 3989 rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, 3990 p_params->personality, p_params); 3991 if (rc) { 3992 DP_NOTICE(p_hwfn, true, "Failed to get HW information\n"); 3993 goto err2; 3994 } 3995 3996 /* Sending a mailbox to the MFW should be after ecore_get_hw_info() is 3997 * called, since among others it sets the ports number in an engine. 3998 */ 3999 if (p_params->initiate_pf_flr && p_hwfn == ECORE_LEADING_HWFN(p_dev) && 4000 !p_dev->recov_in_prog) { 4001 rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt); 4002 if (rc != ECORE_SUCCESS) 4003 DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n"); 4004 } 4005 4006 /* Check if mdump logs/data are present and update the epoch value */ 4007 if (p_hwfn == ECORE_LEADING_HWFN(p_hwfn->p_dev)) { 4008 rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt, 4009 &mdump_info); 4010 if (rc == ECORE_SUCCESS && mdump_info.num_of_logs) 4011 DP_NOTICE(p_hwfn, false, 4012 "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n"); 4013 4014 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_hwfn->p_main_ptt, 4015 &mdump_retain); 4016 if (rc == ECORE_SUCCESS && mdump_retain.valid) 4017 DP_NOTICE(p_hwfn, false, 4018 "mdump retained data: epoch 0x%08x, pf 0x%x, status 0x%08x\n", 4019 mdump_retain.epoch, mdump_retain.pf, 4020 mdump_retain.status); 4021 4022 ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt, 4023 p_params->epoch); 4024 } 4025 4026 /* Allocate the init RT array and initialize the init-ops engine */ 4027 rc = ecore_init_alloc(p_hwfn); 4028 if (rc) { 4029 DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n"); 4030 if (p_params->b_relaxed_probe) 4031 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 4032 goto err2; 4033 } 4034 4035 #ifndef ASIC_ONLY 4036 if (CHIP_REV_IS_FPGA(p_dev)) { 4037 DP_NOTICE(p_hwfn, false, 4038 "FPGA: workaround; Prevent DMAE parities\n"); 4039 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK_K2_E5, 4040 7); 4041 4042 DP_NOTICE(p_hwfn, false, 4043 "FPGA: workaround: Set VF bar0 size\n"); 4044 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 4045 PGLUE_B_REG_VF_BAR0_SIZE_K2_E5, 4); 4046 } 4047 #endif 4048 4049 return rc; 4050 err2: 4051 if (IS_LEAD_HWFN(p_hwfn)) 4052 ecore_iov_free_hw_info(p_dev); 4053 ecore_mcp_free(p_hwfn); 4054 err1: 4055 ecore_hw_hwfn_free(p_hwfn); 4056 err0: 4057 return rc; 4058 } 4059 4060 enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, 4061 struct ecore_hw_prepare_params *p_params) 4062 { 4063 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 4064 enum _ecore_status_t rc; 4065 4066 p_dev->chk_reg_fifo = p_params->chk_reg_fifo; 4067 p_dev->allow_mdump = p_params->allow_mdump; 4068 4069 if (p_params->b_relaxed_probe) 4070 p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS; 4071 4072 /* Store the precompiled init data ptrs */ 4073 if (IS_PF(p_dev)) 4074 ecore_init_iro_array(p_dev); 4075 4076 /* Initialize the first hwfn - will learn number of hwfns */ 4077 rc = ecore_hw_prepare_single(p_hwfn, 4078 p_dev->regview, 4079 p_dev->doorbells, p_params); 4080 if (rc != ECORE_SUCCESS) 4081 return rc; 4082 4083 p_params->personality = p_hwfn->hw_info.personality; 4084 4085 /* initilalize 2nd hwfn if necessary */ 4086 if (p_dev->num_hwfns > 1) { 4087 void OSAL_IOMEM *p_regview, *p_doorbell; 4088 u8 OSAL_IOMEM *addr; 4089 4090 /* adjust bar offset for second engine */ 4091 addr = (u8 OSAL_IOMEM *)p_dev->regview + 4092 ecore_hw_bar_size(p_hwfn, BAR_ID_0) / 2; 4093 p_regview = (void OSAL_IOMEM *)addr; 4094 4095 addr = (u8 OSAL_IOMEM *)p_dev->doorbells + 4096 ecore_hw_bar_size(p_hwfn, BAR_ID_1) / 2; 4097 p_doorbell = (void OSAL_IOMEM *)addr; 4098 4099 /* prepare second hw function */ 4100 rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview, 4101 p_doorbell, p_params); 4102 4103 /* in case of error, need to free the previously 4104 * initiliazed hwfn 0. 4105 */ 4106 if (rc != ECORE_SUCCESS) { 4107 if (p_params->b_relaxed_probe) 4108 p_params->p_relaxed_res = 4109 ECORE_HW_PREPARE_FAILED_ENG2; 4110 4111 if (IS_PF(p_dev)) { 4112 ecore_init_free(p_hwfn); 4113 ecore_mcp_free(p_hwfn); 4114 ecore_hw_hwfn_free(p_hwfn); 4115 } else { 4116 DP_NOTICE(p_dev, true, "What do we need to free when VF hwfn1 init fails\n"); 4117 } 4118 return rc; 4119 } 4120 } 4121 4122 return rc; 4123 } 4124 4125 void ecore_hw_remove(struct ecore_dev *p_dev) 4126 { 4127 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 4128 int i; 4129 4130 if (IS_PF(p_dev)) 4131 ecore_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt, 4132 ECORE_OV_DRIVER_STATE_NOT_LOADED); 4133 4134 for_each_hwfn(p_dev, i) { 4135 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 4136 4137 if (IS_VF(p_dev)) { 4138 ecore_vf_pf_release(p_hwfn); 4139 continue; 4140 } 4141 4142 ecore_init_free(p_hwfn); 4143 ecore_hw_hwfn_free(p_hwfn); 4144 ecore_mcp_free(p_hwfn); 4145 4146 OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex); 4147 } 4148 4149 ecore_iov_free_hw_info(p_dev); 4150 } 4151 4152 static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev, 4153 struct ecore_chain *p_chain) 4154 { 4155 void *p_virt = p_chain->p_virt_addr, *p_virt_next = OSAL_NULL; 4156 dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0; 4157 struct ecore_chain_next *p_next; 4158 u32 size, i; 4159 4160 if (!p_virt) 4161 return; 4162 4163 size = p_chain->elem_size * p_chain->usable_per_page; 4164 4165 for (i = 0; i < p_chain->page_cnt; i++) { 4166 if (!p_virt) 4167 break; 4168 4169 p_next = (struct ecore_chain_next *)((u8 *)p_virt + size); 4170 p_virt_next = p_next->next_virt; 4171 p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys); 4172 4173 OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys, 4174 ECORE_CHAIN_PAGE_SIZE); 4175 4176 p_virt = p_virt_next; 4177 p_phys = p_phys_next; 4178 } 4179 } 4180 4181 static void ecore_chain_free_single(struct ecore_dev *p_dev, 4182 struct ecore_chain *p_chain) 4183 { 4184 if (!p_chain->p_virt_addr) 4185 return; 4186 4187 OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr, 4188 p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE); 4189 } 4190 4191 static void ecore_chain_free_pbl(struct ecore_dev *p_dev, 4192 struct ecore_chain *p_chain) 4193 { 4194 void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; 4195 u8 *p_pbl_virt = (u8 *)p_chain->pbl_sp.p_virt_table; 4196 u32 page_cnt = p_chain->page_cnt, i, pbl_size; 4197 4198 if (!pp_virt_addr_tbl) 4199 return; 4200 4201 if (!p_pbl_virt) 4202 goto out; 4203 4204 for (i = 0; i < page_cnt; i++) { 4205 if (!pp_virt_addr_tbl[i]) 4206 break; 4207 4208 OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i], 4209 *(dma_addr_t *)p_pbl_virt, 4210 ECORE_CHAIN_PAGE_SIZE); 4211 4212 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE; 4213 } 4214 4215 pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; 4216 4217 if (!p_chain->b_external_pbl) { 4218 OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table, 4219 p_chain->pbl_sp.p_phys_table, pbl_size); 4220 } 4221 out: 4222 OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl); 4223 p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL; 4224 } 4225 4226 void ecore_chain_free(struct ecore_dev *p_dev, 4227 struct ecore_chain *p_chain) 4228 { 4229 switch (p_chain->mode) { 4230 case ECORE_CHAIN_MODE_NEXT_PTR: 4231 ecore_chain_free_next_ptr(p_dev, p_chain); 4232 break; 4233 case ECORE_CHAIN_MODE_SINGLE: 4234 ecore_chain_free_single(p_dev, p_chain); 4235 break; 4236 case ECORE_CHAIN_MODE_PBL: 4237 ecore_chain_free_pbl(p_dev, p_chain); 4238 break; 4239 } 4240 } 4241 4242 static enum _ecore_status_t 4243 ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev, 4244 enum ecore_chain_cnt_type cnt_type, 4245 osal_size_t elem_size, u32 page_cnt) 4246 { 4247 u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; 4248 4249 /* The actual chain size can be larger than the maximal possible value 4250 * after rounding up the requested elements number to pages, and after 4251 * taking into acount the unusuable elements (next-ptr elements). 4252 * The size of a "u16" chain can be (U16_MAX + 1) since the chain 4253 * size/capacity fields are of a u32 type. 4254 */ 4255 if ((cnt_type == ECORE_CHAIN_CNT_TYPE_U16 && 4256 chain_size > ((u32)ECORE_U16_MAX + 1)) || 4257 (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 && 4258 chain_size > ECORE_U32_MAX)) { 4259 DP_NOTICE(p_dev, true, 4260 "The actual chain size (0x%llx) is larger than the maximal possible value\n", 4261 (unsigned long long)chain_size); 4262 return ECORE_INVAL; 4263 } 4264 4265 return ECORE_SUCCESS; 4266 } 4267 4268 static enum _ecore_status_t 4269 ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain) 4270 { 4271 void *p_virt = OSAL_NULL, *p_virt_prev = OSAL_NULL; 4272 dma_addr_t p_phys = 0; 4273 u32 i; 4274 4275 for (i = 0; i < p_chain->page_cnt; i++) { 4276 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 4277 ECORE_CHAIN_PAGE_SIZE); 4278 if (!p_virt) { 4279 DP_NOTICE(p_dev, true, 4280 "Failed to allocate chain memory\n"); 4281 return ECORE_NOMEM; 4282 } 4283 4284 if (i == 0) { 4285 ecore_chain_init_mem(p_chain, p_virt, p_phys); 4286 ecore_chain_reset(p_chain); 4287 } else { 4288 ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev, 4289 p_virt, p_phys); 4290 } 4291 4292 p_virt_prev = p_virt; 4293 } 4294 /* Last page's next element should point to the beginning of the 4295 * chain. 4296 */ 4297 ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev, 4298 p_chain->p_virt_addr, 4299 p_chain->p_phys_addr); 4300 4301 return ECORE_SUCCESS; 4302 } 4303 4304 static enum _ecore_status_t 4305 ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain) 4306 { 4307 dma_addr_t p_phys = 0; 4308 void *p_virt = OSAL_NULL; 4309 4310 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE); 4311 if (!p_virt) { 4312 DP_NOTICE(p_dev, true, "Failed to allocate chain memory\n"); 4313 return ECORE_NOMEM; 4314 } 4315 4316 ecore_chain_init_mem(p_chain, p_virt, p_phys); 4317 ecore_chain_reset(p_chain); 4318 4319 return ECORE_SUCCESS; 4320 } 4321 4322 static enum _ecore_status_t 4323 ecore_chain_alloc_pbl(struct ecore_dev *p_dev, 4324 struct ecore_chain *p_chain, 4325 struct ecore_chain_ext_pbl *ext_pbl) 4326 { 4327 void *p_virt = OSAL_NULL; 4328 u8 *p_pbl_virt = OSAL_NULL; 4329 void **pp_virt_addr_tbl = OSAL_NULL; 4330 dma_addr_t p_phys = 0, p_pbl_phys = 0; 4331 u32 page_cnt = p_chain->page_cnt, size, i; 4332 4333 size = page_cnt * sizeof(*pp_virt_addr_tbl); 4334 pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size); 4335 if (!pp_virt_addr_tbl) { 4336 DP_NOTICE(p_dev, true, 4337 "Failed to allocate memory for the chain virtual addresses table\n"); 4338 return ECORE_NOMEM; 4339 } 4340 4341 /* The allocation of the PBL table is done with its full size, since it 4342 * is expected to be successive. 4343 * ecore_chain_init_pbl_mem() is called even in a case of an allocation 4344 * failure, since pp_virt_addr_tbl was previously allocated, and it 4345 * should be saved to allow its freeing during the error flow. 4346 */ 4347 size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; 4348 4349 if (ext_pbl == OSAL_NULL) { 4350 p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size); 4351 } else { 4352 p_pbl_virt = ext_pbl->p_pbl_virt; 4353 p_pbl_phys = ext_pbl->p_pbl_phys; 4354 p_chain->b_external_pbl = true; 4355 } 4356 4357 ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, 4358 pp_virt_addr_tbl); 4359 if (!p_pbl_virt) { 4360 DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n"); 4361 return ECORE_NOMEM; 4362 } 4363 4364 for (i = 0; i < page_cnt; i++) { 4365 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 4366 ECORE_CHAIN_PAGE_SIZE); 4367 if (!p_virt) { 4368 DP_NOTICE(p_dev, true, 4369 "Failed to allocate chain memory\n"); 4370 return ECORE_NOMEM; 4371 } 4372 4373 if (i == 0) { 4374 ecore_chain_init_mem(p_chain, p_virt, p_phys); 4375 ecore_chain_reset(p_chain); 4376 } 4377 4378 /* Fill the PBL table with the physical address of the page */ 4379 *(dma_addr_t *)p_pbl_virt = p_phys; 4380 /* Keep the virtual address of the page */ 4381 p_chain->pbl.pp_virt_addr_tbl[i] = p_virt; 4382 4383 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE; 4384 } 4385 4386 return ECORE_SUCCESS; 4387 } 4388 4389 enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev, 4390 enum ecore_chain_use_mode intended_use, 4391 enum ecore_chain_mode mode, 4392 enum ecore_chain_cnt_type cnt_type, 4393 u32 num_elems, osal_size_t elem_size, 4394 struct ecore_chain *p_chain, 4395 struct ecore_chain_ext_pbl *ext_pbl) 4396 { 4397 u32 page_cnt; 4398 enum _ecore_status_t rc = ECORE_SUCCESS; 4399 4400 if (mode == ECORE_CHAIN_MODE_SINGLE) 4401 page_cnt = 1; 4402 else 4403 page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode); 4404 4405 rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size, 4406 page_cnt); 4407 if (rc) { 4408 DP_NOTICE(p_dev, true, 4409 "Cannot allocate a chain with the given arguments:\n" 4410 "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", 4411 intended_use, mode, cnt_type, num_elems, elem_size); 4412 return rc; 4413 } 4414 4415 ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use, 4416 mode, cnt_type, p_dev->dp_ctx); 4417 4418 switch (mode) { 4419 case ECORE_CHAIN_MODE_NEXT_PTR: 4420 rc = ecore_chain_alloc_next_ptr(p_dev, p_chain); 4421 break; 4422 case ECORE_CHAIN_MODE_SINGLE: 4423 rc = ecore_chain_alloc_single(p_dev, p_chain); 4424 break; 4425 case ECORE_CHAIN_MODE_PBL: 4426 rc = ecore_chain_alloc_pbl(p_dev, p_chain, ext_pbl); 4427 break; 4428 } 4429 if (rc) 4430 goto nomem; 4431 4432 return ECORE_SUCCESS; 4433 4434 nomem: 4435 ecore_chain_free(p_dev, p_chain); 4436 return rc; 4437 } 4438 4439 enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn, 4440 u16 src_id, u16 *dst_id) 4441 { 4442 if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) { 4443 u16 min, max; 4444 4445 min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE); 4446 max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE); 4447 DP_NOTICE(p_hwfn, true, "l2_queue id [%d] is not valid, available indices [%d - %d]\n", 4448 src_id, min, max); 4449 4450 return ECORE_INVAL; 4451 } 4452 4453 *dst_id = RESC_START(p_hwfn, ECORE_L2_QUEUE) + src_id; 4454 4455 return ECORE_SUCCESS; 4456 } 4457 4458 enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn, 4459 u8 src_id, u8 *dst_id) 4460 { 4461 if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) { 4462 u8 min, max; 4463 4464 min = (u8)RESC_START(p_hwfn, ECORE_VPORT); 4465 max = min + RESC_NUM(p_hwfn, ECORE_VPORT); 4466 DP_NOTICE(p_hwfn, true, "vport id [%d] is not valid, available indices [%d - %d]\n", 4467 src_id, min, max); 4468 4469 return ECORE_INVAL; 4470 } 4471 4472 *dst_id = RESC_START(p_hwfn, ECORE_VPORT) + src_id; 4473 4474 return ECORE_SUCCESS; 4475 } 4476 4477 enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn, 4478 u8 src_id, u8 *dst_id) 4479 { 4480 if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) { 4481 u8 min, max; 4482 4483 min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG); 4484 max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG); 4485 DP_NOTICE(p_hwfn, true, "rss_eng id [%d] is not valid, available indices [%d - %d]\n", 4486 src_id, min, max); 4487 4488 return ECORE_INVAL; 4489 } 4490 4491 *dst_id = RESC_START(p_hwfn, ECORE_RSS_ENG) + src_id; 4492 4493 return ECORE_SUCCESS; 4494 } 4495 4496 static enum _ecore_status_t 4497 ecore_llh_add_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4498 struct ecore_ptt *p_ptt, u32 high, u32 low, 4499 u32 *p_entry_num) 4500 { 4501 u32 en; 4502 int i; 4503 4504 /* Find a free entry and utilize it */ 4505 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4506 en = ecore_rd(p_hwfn, p_ptt, 4507 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4508 i * sizeof(u32)); 4509 if (en) 4510 continue; 4511 ecore_wr(p_hwfn, p_ptt, 4512 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4513 2 * i * sizeof(u32), low); 4514 ecore_wr(p_hwfn, p_ptt, 4515 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4516 (2 * i + 1) * sizeof(u32), high); 4517 ecore_wr(p_hwfn, p_ptt, 4518 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4519 i * sizeof(u32), 0); 4520 ecore_wr(p_hwfn, p_ptt, 4521 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4522 i * sizeof(u32), 0); 4523 ecore_wr(p_hwfn, p_ptt, 4524 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4525 i * sizeof(u32), 1); 4526 break; 4527 } 4528 4529 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4530 return ECORE_NORESOURCES; 4531 4532 *p_entry_num = i; 4533 4534 return ECORE_SUCCESS; 4535 } 4536 4537 static enum _ecore_status_t 4538 ecore_llh_add_mac_filter_e5(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 4539 u32 high, u32 low, u32 *p_entry_num) 4540 { 4541 ECORE_E5_MISSING_CODE; 4542 4543 return ECORE_NOTIMPL; 4544 } 4545 4546 enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn, 4547 struct ecore_ptt *p_ptt, u8 *p_filter) 4548 { 4549 u32 high, low, entry_num; 4550 enum _ecore_status_t rc; 4551 4552 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4553 return ECORE_SUCCESS; 4554 4555 high = p_filter[1] | (p_filter[0] << 8); 4556 low = p_filter[5] | (p_filter[4] << 8) | 4557 (p_filter[3] << 16) | (p_filter[2] << 24); 4558 4559 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4560 rc = ecore_llh_add_mac_filter_bb_ah(p_hwfn, p_ptt, high, low, 4561 &entry_num); 4562 else /* E5 */ 4563 rc = ecore_llh_add_mac_filter_e5(p_hwfn, p_ptt, high, low, 4564 &entry_num); 4565 if (rc != ECORE_SUCCESS) { 4566 DP_NOTICE(p_hwfn, false, 4567 "Failed to find an empty LLH filter to utilize\n"); 4568 return rc; 4569 } 4570 4571 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4572 "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx is added at %d\n", 4573 p_filter[0], p_filter[1], p_filter[2], p_filter[3], 4574 p_filter[4], p_filter[5], entry_num); 4575 4576 return ECORE_SUCCESS; 4577 } 4578 4579 static enum _ecore_status_t 4580 ecore_llh_remove_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4581 struct ecore_ptt *p_ptt, u32 high, u32 low, 4582 u32 *p_entry_num) 4583 { 4584 int i; 4585 4586 /* Find the entry and clean it */ 4587 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4588 if (ecore_rd(p_hwfn, p_ptt, 4589 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4590 2 * i * sizeof(u32)) != low) 4591 continue; 4592 if (ecore_rd(p_hwfn, p_ptt, 4593 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4594 (2 * i + 1) * sizeof(u32)) != high) 4595 continue; 4596 4597 ecore_wr(p_hwfn, p_ptt, 4598 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0); 4599 ecore_wr(p_hwfn, p_ptt, 4600 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4601 2 * i * sizeof(u32), 0); 4602 ecore_wr(p_hwfn, p_ptt, 4603 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4604 (2 * i + 1) * sizeof(u32), 0); 4605 break; 4606 } 4607 4608 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4609 return ECORE_INVAL; 4610 4611 *p_entry_num = i; 4612 4613 return ECORE_SUCCESS; 4614 } 4615 4616 static enum _ecore_status_t 4617 ecore_llh_remove_mac_filter_e5(struct ecore_hwfn *p_hwfn, 4618 struct ecore_ptt *p_ptt, u32 high, u32 low, 4619 u32 *p_entry_num) 4620 { 4621 ECORE_E5_MISSING_CODE; 4622 4623 return ECORE_NOTIMPL; 4624 } 4625 4626 void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn, 4627 struct ecore_ptt *p_ptt, u8 *p_filter) 4628 { 4629 u32 high, low, entry_num; 4630 enum _ecore_status_t rc; 4631 4632 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4633 return; 4634 4635 high = p_filter[1] | (p_filter[0] << 8); 4636 low = p_filter[5] | (p_filter[4] << 8) | 4637 (p_filter[3] << 16) | (p_filter[2] << 24); 4638 4639 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4640 rc = ecore_llh_remove_mac_filter_bb_ah(p_hwfn, p_ptt, high, 4641 low, &entry_num); 4642 else /* E5 */ 4643 rc = ecore_llh_remove_mac_filter_e5(p_hwfn, p_ptt, high, low, 4644 &entry_num); 4645 if (rc != ECORE_SUCCESS) { 4646 DP_NOTICE(p_hwfn, false, 4647 "Tried to remove a non-configured filter [MAC %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx]\n", 4648 p_filter[0], p_filter[1], p_filter[2], p_filter[3], 4649 p_filter[4], p_filter[5]); 4650 return; 4651 } 4652 4653 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4654 "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx was removed from %d\n", 4655 p_filter[0], p_filter[1], p_filter[2], p_filter[3], 4656 p_filter[4], p_filter[5], entry_num); 4657 } 4658 4659 static enum _ecore_status_t 4660 ecore_llh_add_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4661 struct ecore_ptt *p_ptt, 4662 enum ecore_llh_port_filter_type_t type, 4663 u32 high, u32 low, u32 *p_entry_num) 4664 { 4665 u32 en; 4666 int i; 4667 4668 /* Find a free entry and utilize it */ 4669 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4670 en = ecore_rd(p_hwfn, p_ptt, 4671 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4672 i * sizeof(u32)); 4673 if (en) 4674 continue; 4675 ecore_wr(p_hwfn, p_ptt, 4676 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4677 2 * i * sizeof(u32), low); 4678 ecore_wr(p_hwfn, p_ptt, 4679 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4680 (2 * i + 1) * sizeof(u32), high); 4681 ecore_wr(p_hwfn, p_ptt, 4682 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4683 i * sizeof(u32), 1); 4684 ecore_wr(p_hwfn, p_ptt, 4685 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4686 i * sizeof(u32), 1 << type); 4687 ecore_wr(p_hwfn, p_ptt, 4688 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 1); 4689 break; 4690 } 4691 4692 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4693 return ECORE_NORESOURCES; 4694 4695 *p_entry_num = i; 4696 4697 return ECORE_SUCCESS; 4698 } 4699 4700 static enum _ecore_status_t 4701 ecore_llh_add_protocol_filter_e5(struct ecore_hwfn *p_hwfn, 4702 struct ecore_ptt *p_ptt, 4703 enum ecore_llh_port_filter_type_t type, 4704 u32 high, u32 low, u32 *p_entry_num) 4705 { 4706 ECORE_E5_MISSING_CODE; 4707 4708 return ECORE_NOTIMPL; 4709 } 4710 4711 enum _ecore_status_t 4712 ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn, 4713 struct ecore_ptt *p_ptt, 4714 u16 source_port_or_eth_type, 4715 u16 dest_port, 4716 enum ecore_llh_port_filter_type_t type) 4717 { 4718 u32 high, low, entry_num; 4719 enum _ecore_status_t rc; 4720 4721 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4722 return ECORE_SUCCESS; 4723 4724 high = 0; 4725 low = 0; 4726 switch (type) { 4727 case ECORE_LLH_FILTER_ETHERTYPE: 4728 high = source_port_or_eth_type; 4729 break; 4730 case ECORE_LLH_FILTER_TCP_SRC_PORT: 4731 case ECORE_LLH_FILTER_UDP_SRC_PORT: 4732 low = source_port_or_eth_type << 16; 4733 break; 4734 case ECORE_LLH_FILTER_TCP_DEST_PORT: 4735 case ECORE_LLH_FILTER_UDP_DEST_PORT: 4736 low = dest_port; 4737 break; 4738 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 4739 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 4740 low = (source_port_or_eth_type << 16) | dest_port; 4741 break; 4742 default: 4743 DP_NOTICE(p_hwfn, true, 4744 "Non valid LLH protocol filter type %d\n", type); 4745 return ECORE_INVAL; 4746 } 4747 4748 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4749 rc = ecore_llh_add_protocol_filter_bb_ah(p_hwfn, p_ptt, type, 4750 high, low, &entry_num); 4751 else /* E5 */ 4752 rc = ecore_llh_add_protocol_filter_e5(p_hwfn, p_ptt, type, high, 4753 low, &entry_num); 4754 if (rc != ECORE_SUCCESS) { 4755 DP_NOTICE(p_hwfn, false, 4756 "Failed to find an empty LLH filter to utilize\n"); 4757 return rc; 4758 } 4759 4760 switch (type) { 4761 case ECORE_LLH_FILTER_ETHERTYPE: 4762 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4763 "ETH type %x is added at %d\n", 4764 source_port_or_eth_type, entry_num); 4765 break; 4766 case ECORE_LLH_FILTER_TCP_SRC_PORT: 4767 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4768 "TCP src port %x is added at %d\n", 4769 source_port_or_eth_type, entry_num); 4770 break; 4771 case ECORE_LLH_FILTER_UDP_SRC_PORT: 4772 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4773 "UDP src port %x is added at %d\n", 4774 source_port_or_eth_type, entry_num); 4775 break; 4776 case ECORE_LLH_FILTER_TCP_DEST_PORT: 4777 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4778 "TCP dst port %x is added at %d\n", 4779 dest_port, entry_num); 4780 break; 4781 case ECORE_LLH_FILTER_UDP_DEST_PORT: 4782 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4783 "UDP dst port %x is added at %d\n", 4784 dest_port, entry_num); 4785 break; 4786 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 4787 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4788 "TCP src/dst ports %x/%x are added at %d\n", 4789 source_port_or_eth_type, dest_port, entry_num); 4790 break; 4791 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 4792 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4793 "UDP src/dst ports %x/%x are added at %d\n", 4794 source_port_or_eth_type, dest_port, entry_num); 4795 break; 4796 } 4797 4798 return ECORE_SUCCESS; 4799 } 4800 4801 static enum _ecore_status_t 4802 ecore_llh_remove_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4803 struct ecore_ptt *p_ptt, 4804 enum ecore_llh_port_filter_type_t type, 4805 u32 high, u32 low, u32 *p_entry_num) 4806 { 4807 int i; 4808 4809 /* Find the entry and clean it */ 4810 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4811 if (!ecore_rd(p_hwfn, p_ptt, 4812 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4813 i * sizeof(u32))) 4814 continue; 4815 if (!ecore_rd(p_hwfn, p_ptt, 4816 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4817 i * sizeof(u32))) 4818 continue; 4819 if (!(ecore_rd(p_hwfn, p_ptt, 4820 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4821 i * sizeof(u32)) & (1 << type))) 4822 continue; 4823 if (ecore_rd(p_hwfn, p_ptt, 4824 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4825 2 * i * sizeof(u32)) != low) 4826 continue; 4827 if (ecore_rd(p_hwfn, p_ptt, 4828 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4829 (2 * i + 1) * sizeof(u32)) != high) 4830 continue; 4831 4832 ecore_wr(p_hwfn, p_ptt, 4833 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0); 4834 ecore_wr(p_hwfn, p_ptt, 4835 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4836 i * sizeof(u32), 0); 4837 ecore_wr(p_hwfn, p_ptt, 4838 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4839 i * sizeof(u32), 0); 4840 ecore_wr(p_hwfn, p_ptt, 4841 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4842 2 * i * sizeof(u32), 0); 4843 ecore_wr(p_hwfn, p_ptt, 4844 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4845 (2 * i + 1) * sizeof(u32), 0); 4846 break; 4847 } 4848 4849 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4850 return ECORE_INVAL; 4851 4852 *p_entry_num = i; 4853 4854 return ECORE_SUCCESS; 4855 } 4856 4857 static enum _ecore_status_t 4858 ecore_llh_remove_protocol_filter_e5(struct ecore_hwfn *p_hwfn, 4859 struct ecore_ptt *p_ptt, 4860 enum ecore_llh_port_filter_type_t type, 4861 u32 high, u32 low, u32 *p_entry_num) 4862 { 4863 ECORE_E5_MISSING_CODE; 4864 4865 return ECORE_NOTIMPL; 4866 } 4867 4868 void 4869 ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn, 4870 struct ecore_ptt *p_ptt, 4871 u16 source_port_or_eth_type, 4872 u16 dest_port, 4873 enum ecore_llh_port_filter_type_t type) 4874 { 4875 u32 high, low, entry_num; 4876 enum _ecore_status_t rc; 4877 4878 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4879 return; 4880 4881 high = 0; 4882 low = 0; 4883 switch (type) { 4884 case ECORE_LLH_FILTER_ETHERTYPE: 4885 high = source_port_or_eth_type; 4886 break; 4887 case ECORE_LLH_FILTER_TCP_SRC_PORT: 4888 case ECORE_LLH_FILTER_UDP_SRC_PORT: 4889 low = source_port_or_eth_type << 16; 4890 break; 4891 case ECORE_LLH_FILTER_TCP_DEST_PORT: 4892 case ECORE_LLH_FILTER_UDP_DEST_PORT: 4893 low = dest_port; 4894 break; 4895 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 4896 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 4897 low = (source_port_or_eth_type << 16) | dest_port; 4898 break; 4899 default: 4900 DP_NOTICE(p_hwfn, true, 4901 "Non valid LLH protocol filter type %d\n", type); 4902 return; 4903 } 4904 4905 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4906 rc = ecore_llh_remove_protocol_filter_bb_ah(p_hwfn, p_ptt, type, 4907 high, low, 4908 &entry_num); 4909 else /* E5 */ 4910 rc = ecore_llh_remove_protocol_filter_e5(p_hwfn, p_ptt, type, 4911 high, low, &entry_num); 4912 if (rc != ECORE_SUCCESS) { 4913 DP_NOTICE(p_hwfn, false, 4914 "Tried to remove a non-configured filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x]\n", 4915 type, source_port_or_eth_type, dest_port); 4916 return; 4917 } 4918 4919 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4920 "Protocol filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x] was removed from %d\n", 4921 type, source_port_or_eth_type, dest_port, entry_num); 4922 } 4923 4924 static void ecore_llh_clear_all_filters_bb_ah(struct ecore_hwfn *p_hwfn, 4925 struct ecore_ptt *p_ptt) 4926 { 4927 int i; 4928 4929 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4930 ecore_wr(p_hwfn, p_ptt, 4931 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4932 i * sizeof(u32), 0); 4933 ecore_wr(p_hwfn, p_ptt, 4934 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4935 2 * i * sizeof(u32), 0); 4936 ecore_wr(p_hwfn, p_ptt, 4937 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4938 (2 * i + 1) * sizeof(u32), 0); 4939 } 4940 } 4941 4942 static void ecore_llh_clear_all_filters_e5(struct ecore_hwfn *p_hwfn, 4943 struct ecore_ptt *p_ptt) 4944 { 4945 ECORE_E5_MISSING_CODE; 4946 } 4947 4948 void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn, 4949 struct ecore_ptt *p_ptt) 4950 { 4951 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4952 return; 4953 4954 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4955 ecore_llh_clear_all_filters_bb_ah(p_hwfn, p_ptt); 4956 else /* E5 */ 4957 ecore_llh_clear_all_filters_e5(p_hwfn, p_ptt); 4958 } 4959 4960 enum _ecore_status_t 4961 ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn, 4962 struct ecore_ptt *p_ptt) 4963 { 4964 if (IS_MF_DEFAULT(p_hwfn) && ECORE_IS_BB(p_hwfn->p_dev)) { 4965 ecore_wr(p_hwfn, p_ptt, 4966 NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 4967 1 << p_hwfn->abs_pf_id / 2); 4968 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, 0); 4969 return ECORE_SUCCESS; 4970 } else { 4971 DP_NOTICE(p_hwfn, false, 4972 "This function can't be set as default\n"); 4973 return ECORE_INVAL; 4974 } 4975 } 4976 4977 static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn, 4978 struct ecore_ptt *p_ptt, 4979 u32 hw_addr, void *p_eth_qzone, 4980 osal_size_t eth_qzone_size, 4981 u8 timeset) 4982 { 4983 struct coalescing_timeset *p_coal_timeset; 4984 4985 if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) { 4986 DP_NOTICE(p_hwfn, true, 4987 "Coalescing configuration not enabled\n"); 4988 return ECORE_INVAL; 4989 } 4990 4991 p_coal_timeset = p_eth_qzone; 4992 OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size); 4993 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); 4994 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); 4995 ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); 4996 4997 return ECORE_SUCCESS; 4998 } 4999 5000 enum _ecore_status_t ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, 5001 u16 rx_coal, u16 tx_coal, 5002 void *p_handle) 5003 { 5004 struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle; 5005 enum _ecore_status_t rc = ECORE_SUCCESS; 5006 struct ecore_ptt *p_ptt; 5007 5008 /* TODO - Configuring a single queue's coalescing but 5009 * claiming all queues are abiding same configuration 5010 * for PF and VF both. 5011 */ 5012 5013 #ifdef CONFIG_ECORE_SRIOV 5014 if (IS_VF(p_hwfn->p_dev)) 5015 return ecore_vf_pf_set_coalesce(p_hwfn, rx_coal, 5016 tx_coal, p_cid); 5017 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 5018 5019 p_ptt = ecore_ptt_acquire(p_hwfn); 5020 if (!p_ptt) 5021 return ECORE_AGAIN; 5022 5023 if (rx_coal) { 5024 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); 5025 if (rc) 5026 goto out; 5027 p_hwfn->p_dev->rx_coalesce_usecs = rx_coal; 5028 } 5029 5030 if (tx_coal) { 5031 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid); 5032 if (rc) 5033 goto out; 5034 p_hwfn->p_dev->tx_coalesce_usecs = tx_coal; 5035 } 5036 out: 5037 ecore_ptt_release(p_hwfn, p_ptt); 5038 5039 return rc; 5040 } 5041 5042 enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn, 5043 struct ecore_ptt *p_ptt, 5044 u16 coalesce, 5045 struct ecore_queue_cid *p_cid) 5046 { 5047 struct ustorm_eth_queue_zone eth_qzone; 5048 u8 timeset, timer_res; 5049 u32 address; 5050 enum _ecore_status_t rc; 5051 5052 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 5053 if (coalesce <= 0x7F) 5054 timer_res = 0; 5055 else if (coalesce <= 0xFF) 5056 timer_res = 1; 5057 else if (coalesce <= 0x1FF) 5058 timer_res = 2; 5059 else { 5060 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 5061 return ECORE_INVAL; 5062 } 5063 timeset = (u8)(coalesce >> timer_res); 5064 5065 rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, 5066 p_cid->sb_igu_id, false); 5067 if (rc != ECORE_SUCCESS) 5068 goto out; 5069 5070 address = BAR0_MAP_REG_USDM_RAM + 5071 USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 5072 5073 rc = ecore_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 5074 sizeof(struct ustorm_eth_queue_zone), timeset); 5075 if (rc != ECORE_SUCCESS) 5076 goto out; 5077 5078 out: 5079 return rc; 5080 } 5081 5082 enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn, 5083 struct ecore_ptt *p_ptt, 5084 u16 coalesce, 5085 struct ecore_queue_cid *p_cid) 5086 { 5087 struct xstorm_eth_queue_zone eth_qzone; 5088 u8 timeset, timer_res; 5089 u32 address; 5090 enum _ecore_status_t rc; 5091 5092 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 5093 if (coalesce <= 0x7F) 5094 timer_res = 0; 5095 else if (coalesce <= 0xFF) 5096 timer_res = 1; 5097 else if (coalesce <= 0x1FF) 5098 timer_res = 2; 5099 else { 5100 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 5101 return ECORE_INVAL; 5102 } 5103 timeset = (u8)(coalesce >> timer_res); 5104 5105 rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, 5106 p_cid->sb_igu_id, true); 5107 if (rc != ECORE_SUCCESS) 5108 goto out; 5109 5110 address = BAR0_MAP_REG_XSDM_RAM + 5111 XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 5112 5113 rc = ecore_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 5114 sizeof(struct xstorm_eth_queue_zone), timeset); 5115 out: 5116 return rc; 5117 } 5118 5119 /* Calculate final WFQ values for all vports and configure it. 5120 * After this configuration each vport must have 5121 * approx min rate = vport_wfq * min_pf_rate / ECORE_WFQ_UNIT 5122 */ 5123 static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn, 5124 struct ecore_ptt *p_ptt, 5125 u32 min_pf_rate) 5126 { 5127 struct init_qm_vport_params *vport_params; 5128 int i; 5129 5130 vport_params = p_hwfn->qm_info.qm_vport_params; 5131 5132 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5133 u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 5134 5135 vport_params[i].vport_wfq = (wfq_speed * ECORE_WFQ_UNIT) / 5136 min_pf_rate; 5137 ecore_init_vport_wfq(p_hwfn, p_ptt, 5138 vport_params[i].first_tx_pq_id, 5139 vport_params[i].vport_wfq); 5140 } 5141 } 5142 5143 static void 5144 ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn, u32 min_pf_rate) 5145 5146 { 5147 int i; 5148 5149 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) 5150 p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1; 5151 } 5152 5153 static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn, 5154 struct ecore_ptt *p_ptt, 5155 u32 min_pf_rate) 5156 { 5157 struct init_qm_vport_params *vport_params; 5158 int i; 5159 5160 vport_params = p_hwfn->qm_info.qm_vport_params; 5161 5162 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5163 ecore_init_wfq_default_param(p_hwfn, min_pf_rate); 5164 ecore_init_vport_wfq(p_hwfn, p_ptt, 5165 vport_params[i].first_tx_pq_id, 5166 vport_params[i].vport_wfq); 5167 } 5168 } 5169 5170 /* This function performs several validations for WFQ 5171 * configuration and required min rate for a given vport 5172 * 1. req_rate must be greater than one percent of min_pf_rate. 5173 * 2. req_rate should not cause other vports [not configured for WFQ explicitly] 5174 * rates to get less than one percent of min_pf_rate. 5175 * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. 5176 */ 5177 static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn, 5178 u16 vport_id, u32 req_rate, 5179 u32 min_pf_rate) 5180 { 5181 u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; 5182 int non_requested_count = 0, req_count = 0, i, num_vports; 5183 5184 num_vports = p_hwfn->qm_info.num_vports; 5185 5186 /* Accounting for the vports which are configured for WFQ explicitly */ 5187 for (i = 0; i < num_vports; i++) { 5188 u32 tmp_speed; 5189 5190 if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) { 5191 req_count++; 5192 tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 5193 total_req_min_rate += tmp_speed; 5194 } 5195 } 5196 5197 /* Include current vport data as well */ 5198 req_count++; 5199 total_req_min_rate += req_rate; 5200 non_requested_count = num_vports - req_count; 5201 5202 /* validate possible error cases */ 5203 if (req_rate > min_pf_rate) { 5204 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5205 "Vport [%d] - Requested rate[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", 5206 vport_id, req_rate, min_pf_rate); 5207 return ECORE_INVAL; 5208 } 5209 5210 if (req_rate < min_pf_rate / ECORE_WFQ_UNIT) { 5211 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5212 "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 5213 vport_id, req_rate, min_pf_rate); 5214 return ECORE_INVAL; 5215 } 5216 5217 /* TBD - for number of vports greater than 100 */ 5218 if (num_vports > ECORE_WFQ_UNIT) { 5219 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5220 "Number of vports is greater than %d\n", 5221 ECORE_WFQ_UNIT); 5222 return ECORE_INVAL; 5223 } 5224 5225 if (total_req_min_rate > min_pf_rate) { 5226 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5227 "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", 5228 total_req_min_rate, min_pf_rate); 5229 return ECORE_INVAL; 5230 } 5231 5232 /* Data left for non requested vports */ 5233 total_left_rate = min_pf_rate - total_req_min_rate; 5234 left_rate_per_vp = total_left_rate / non_requested_count; 5235 5236 /* validate if non requested get < 1% of min bw */ 5237 if (left_rate_per_vp < min_pf_rate / ECORE_WFQ_UNIT) { 5238 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5239 "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 5240 left_rate_per_vp, min_pf_rate); 5241 return ECORE_INVAL; 5242 } 5243 5244 /* now req_rate for given vport passes all scenarios. 5245 * assign final wfq rates to all vports. 5246 */ 5247 p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate; 5248 p_hwfn->qm_info.wfq_data[vport_id].configured = true; 5249 5250 for (i = 0; i < num_vports; i++) { 5251 if (p_hwfn->qm_info.wfq_data[i].configured) 5252 continue; 5253 5254 p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp; 5255 } 5256 5257 return ECORE_SUCCESS; 5258 } 5259 5260 static int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn, 5261 struct ecore_ptt *p_ptt, 5262 u16 vp_id, u32 rate) 5263 { 5264 struct ecore_mcp_link_state *p_link; 5265 int rc = ECORE_SUCCESS; 5266 5267 p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output; 5268 5269 if (!p_link->min_pf_rate) { 5270 p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate; 5271 p_hwfn->qm_info.wfq_data[vp_id].configured = true; 5272 return rc; 5273 } 5274 5275 rc = ecore_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); 5276 5277 if (rc == ECORE_SUCCESS) 5278 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, 5279 p_link->min_pf_rate); 5280 else 5281 DP_NOTICE(p_hwfn, false, 5282 "Validation failed while configuring min rate\n"); 5283 5284 return rc; 5285 } 5286 5287 static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn, 5288 struct ecore_ptt *p_ptt, 5289 u32 min_pf_rate) 5290 { 5291 bool use_wfq = false; 5292 int rc = ECORE_SUCCESS; 5293 u16 i; 5294 5295 /* Validate all pre configured vports for wfq */ 5296 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5297 u32 rate; 5298 5299 if (!p_hwfn->qm_info.wfq_data[i].configured) 5300 continue; 5301 5302 rate = p_hwfn->qm_info.wfq_data[i].min_speed; 5303 use_wfq = true; 5304 5305 rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate); 5306 if (rc != ECORE_SUCCESS) { 5307 DP_NOTICE(p_hwfn, false, 5308 "WFQ validation failed while configuring min rate\n"); 5309 break; 5310 } 5311 } 5312 5313 if (rc == ECORE_SUCCESS && use_wfq) 5314 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 5315 else 5316 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 5317 5318 return rc; 5319 } 5320 5321 /* Main API for ecore clients to configure vport min rate. 5322 * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)] 5323 * rate - Speed in Mbps needs to be assigned to a given vport. 5324 */ 5325 int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate) 5326 { 5327 int i, rc = ECORE_INVAL; 5328 5329 /* TBD - for multiple hardware functions - that is 100 gig */ 5330 if (p_dev->num_hwfns > 1) { 5331 DP_NOTICE(p_dev, false, 5332 "WFQ configuration is not supported for this device\n"); 5333 return rc; 5334 } 5335 5336 for_each_hwfn(p_dev, i) { 5337 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5338 struct ecore_ptt *p_ptt; 5339 5340 p_ptt = ecore_ptt_acquire(p_hwfn); 5341 if (!p_ptt) 5342 return ECORE_TIMEOUT; 5343 5344 rc = __ecore_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); 5345 5346 if (rc != ECORE_SUCCESS) { 5347 ecore_ptt_release(p_hwfn, p_ptt); 5348 return rc; 5349 } 5350 5351 ecore_ptt_release(p_hwfn, p_ptt); 5352 } 5353 5354 return rc; 5355 } 5356 5357 /* API to configure WFQ from mcp link change */ 5358 void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev, 5359 struct ecore_ptt *p_ptt, 5360 u32 min_pf_rate) 5361 { 5362 int i; 5363 5364 /* TBD - for multiple hardware functions - that is 100 gig */ 5365 if (p_dev->num_hwfns > 1) { 5366 DP_VERBOSE(p_dev, ECORE_MSG_LINK, 5367 "WFQ configuration is not supported for this device\n"); 5368 return; 5369 } 5370 5371 for_each_hwfn(p_dev, i) { 5372 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5373 5374 __ecore_configure_vp_wfq_on_link_change(p_hwfn, p_ptt, 5375 min_pf_rate); 5376 } 5377 } 5378 5379 int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn, 5380 struct ecore_ptt *p_ptt, 5381 struct ecore_mcp_link_state *p_link, 5382 u8 max_bw) 5383 { 5384 int rc = ECORE_SUCCESS; 5385 5386 p_hwfn->mcp_info->func_info.bandwidth_max = max_bw; 5387 5388 if (!p_link->line_speed && (max_bw != 100)) 5389 return rc; 5390 5391 p_link->speed = (p_link->line_speed * max_bw) / 100; 5392 p_hwfn->qm_info.pf_rl = p_link->speed; 5393 5394 /* Since the limiter also affects Tx-switched traffic, we don't want it 5395 * to limit such traffic in case there's no actual limit. 5396 * In that case, set limit to imaginary high boundary. 5397 */ 5398 if (max_bw == 100) 5399 p_hwfn->qm_info.pf_rl = 100000; 5400 5401 rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 5402 p_hwfn->qm_info.pf_rl); 5403 5404 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5405 "Configured MAX bandwidth to be %08x Mb/sec\n", 5406 p_link->speed); 5407 5408 return rc; 5409 } 5410 5411 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */ 5412 int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw) 5413 { 5414 int i, rc = ECORE_INVAL; 5415 5416 if (max_bw < 1 || max_bw > 100) { 5417 DP_NOTICE(p_dev, false, "PF max bw valid range is [1-100]\n"); 5418 return rc; 5419 } 5420 5421 for_each_hwfn(p_dev, i) { 5422 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5423 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev); 5424 struct ecore_mcp_link_state *p_link; 5425 struct ecore_ptt *p_ptt; 5426 5427 p_link = &p_lead->mcp_info->link_output; 5428 5429 p_ptt = ecore_ptt_acquire(p_hwfn); 5430 if (!p_ptt) 5431 return ECORE_TIMEOUT; 5432 5433 rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, 5434 p_link, max_bw); 5435 5436 ecore_ptt_release(p_hwfn, p_ptt); 5437 5438 if (rc != ECORE_SUCCESS) 5439 break; 5440 } 5441 5442 return rc; 5443 } 5444 5445 int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn, 5446 struct ecore_ptt *p_ptt, 5447 struct ecore_mcp_link_state *p_link, 5448 u8 min_bw) 5449 { 5450 int rc = ECORE_SUCCESS; 5451 5452 p_hwfn->mcp_info->func_info.bandwidth_min = min_bw; 5453 p_hwfn->qm_info.pf_wfq = min_bw; 5454 5455 if (!p_link->line_speed) 5456 return rc; 5457 5458 p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100; 5459 5460 rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw); 5461 5462 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5463 "Configured MIN bandwidth to be %d Mb/sec\n", 5464 p_link->min_pf_rate); 5465 5466 return rc; 5467 } 5468 5469 /* Main API to configure PF min bandwidth where bw range is [1-100] */ 5470 int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw) 5471 { 5472 int i, rc = ECORE_INVAL; 5473 5474 if (min_bw < 1 || min_bw > 100) { 5475 DP_NOTICE(p_dev, false, "PF min bw valid range is [1-100]\n"); 5476 return rc; 5477 } 5478 5479 for_each_hwfn(p_dev, i) { 5480 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5481 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev); 5482 struct ecore_mcp_link_state *p_link; 5483 struct ecore_ptt *p_ptt; 5484 5485 p_link = &p_lead->mcp_info->link_output; 5486 5487 p_ptt = ecore_ptt_acquire(p_hwfn); 5488 if (!p_ptt) 5489 return ECORE_TIMEOUT; 5490 5491 rc = __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, 5492 p_link, min_bw); 5493 if (rc != ECORE_SUCCESS) { 5494 ecore_ptt_release(p_hwfn, p_ptt); 5495 return rc; 5496 } 5497 5498 if (p_link->min_pf_rate) { 5499 u32 min_rate = p_link->min_pf_rate; 5500 5501 rc = __ecore_configure_vp_wfq_on_link_change(p_hwfn, 5502 p_ptt, 5503 min_rate); 5504 } 5505 5506 ecore_ptt_release(p_hwfn, p_ptt); 5507 } 5508 5509 return rc; 5510 } 5511 5512 void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 5513 { 5514 struct ecore_mcp_link_state *p_link; 5515 5516 p_link = &p_hwfn->mcp_info->link_output; 5517 5518 if (p_link->min_pf_rate) 5519 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, 5520 p_link->min_pf_rate); 5521 5522 OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0, 5523 sizeof(*p_hwfn->qm_info.wfq_data) * 5524 p_hwfn->qm_info.num_vports); 5525 } 5526 5527 int ecore_device_num_engines(struct ecore_dev *p_dev) 5528 { 5529 return ECORE_IS_BB(p_dev) ? 2 : 1; 5530 } 5531 5532 int ecore_device_num_ports(struct ecore_dev *p_dev) 5533 { 5534 /* in CMT always only one port */ 5535 if (p_dev->num_hwfns > 1) 5536 return 1; 5537 5538 return p_dev->num_ports_in_engines * ecore_device_num_engines(p_dev); 5539 } 5540 5541 void ecore_set_fw_mac_addr(__le16 *fw_msb, 5542 __le16 *fw_mid, 5543 __le16 *fw_lsb, 5544 u8 *mac) 5545 { 5546 ((u8 *)fw_msb)[0] = mac[1]; 5547 ((u8 *)fw_msb)[1] = mac[0]; 5548 ((u8 *)fw_mid)[0] = mac[3]; 5549 ((u8 *)fw_mid)[1] = mac[2]; 5550 ((u8 *)fw_lsb)[0] = mac[5]; 5551 ((u8 *)fw_lsb)[1] = mac[4]; 5552 } 5553