1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File : ecore_dev.c 30 */ 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "bcm_osal.h" 35 #include "reg_addr.h" 36 #include "ecore_gtt_reg_addr.h" 37 #include "ecore.h" 38 #include "ecore_chain.h" 39 #include "ecore_status.h" 40 #include "ecore_hw.h" 41 #include "ecore_rt_defs.h" 42 #include "ecore_init_ops.h" 43 #include "ecore_int.h" 44 #include "ecore_cxt.h" 45 #include "ecore_spq.h" 46 #include "ecore_init_fw_funcs.h" 47 #include "ecore_sp_commands.h" 48 #include "ecore_dev_api.h" 49 #include "ecore_sriov.h" 50 #include "ecore_vf.h" 51 #include "ecore_ll2.h" 52 #include "ecore_fcoe.h" 53 #include "ecore_iscsi.h" 54 #include "ecore_ooo.h" 55 #include "ecore_mcp.h" 56 #include "ecore_hw_defs.h" 57 #include "mcp_public.h" 58 #include "ecore_roce.h" 59 #include "ecore_iro.h" 60 #include "nvm_cfg.h" 61 #include "ecore_dev_api.h" 62 #include "ecore_dcbx.h" 63 #include "pcics_reg_driver.h" 64 #include "ecore_l2.h" 65 66 /* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM 67 * registers involved are not split and thus configuration is a race where 68 * some of the PFs configuration might be lost. 69 * Eventually, this needs to move into a MFW-covered HW-lock as arbitration 70 * mechanism as this doesn't cover some cases [E.g., PDA or scenarios where 71 * there's more than a single compiled ecore component in system]. 72 */ 73 static osal_spinlock_t qm_lock; 74 static bool qm_lock_init = false; 75 76 /******************** Doorbell Recovery *******************/ 77 /* The doorbell recovery mechanism consists of a list of entries which represent 78 * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each 79 * entity needs to register with the mechanism and provide the parameters 80 * describing it's doorbell, including a location where last used doorbell data 81 * can be found. The doorbell execute function will traverse the list and 82 * doorbell all of the registered entries. 83 */ 84 struct ecore_db_recovery_entry { 85 osal_list_entry_t list_entry; 86 void OSAL_IOMEM *db_addr; 87 void *db_data; 88 enum ecore_db_rec_width db_width; 89 enum ecore_db_rec_space db_space; 90 u8 hwfn_idx; 91 }; 92 93 /* display a single doorbell recovery entry */ 94 static void ecore_db_recovery_dp_entry(struct ecore_hwfn *p_hwfn, 95 struct ecore_db_recovery_entry *db_entry, 96 char *action) 97 { 98 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n", 99 action, db_entry, db_entry->db_addr, db_entry->db_data, 100 db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b", 101 db_entry->db_space == DB_REC_USER ? "user" : "kernel", 102 db_entry->hwfn_idx); 103 } 104 105 /* doorbell address sanity (address within doorbell bar range) */ 106 static bool ecore_db_rec_sanity(struct ecore_dev *p_dev, void OSAL_IOMEM *db_addr) 107 { 108 if (db_addr < p_dev->doorbells || db_addr > 109 (void OSAL_IOMEM *)((uint8_t *)p_dev->doorbells + p_dev->db_size)) { 110 OSAL_WARN(true, 111 "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n", 112 db_addr, p_dev->doorbells, 113 (void *)((uint8_t *)p_dev->doorbells + p_dev->db_size)); 114 return false; 115 } else { 116 return true; 117 } 118 } 119 120 /* find hwfn according to the doorbell address */ 121 static struct ecore_hwfn *ecore_db_rec_find_hwfn(struct ecore_dev *p_dev, 122 void OSAL_IOMEM *db_addr) 123 { 124 struct ecore_hwfn *p_hwfn; 125 126 /* in CMT doorbell bar is split down the middle between engine 0 and enigne 1 */ 127 if (p_dev->num_hwfns > 1) 128 p_hwfn = db_addr < p_dev->hwfns[1].doorbells ? 129 &p_dev->hwfns[0] : &p_dev->hwfns[1]; 130 else 131 p_hwfn = ECORE_LEADING_HWFN(p_dev); 132 133 return p_hwfn; 134 } 135 136 /* add a new entry to the doorbell recovery mechanism */ 137 enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev, 138 void OSAL_IOMEM *db_addr, 139 void *db_data, 140 enum ecore_db_rec_width db_width, 141 enum ecore_db_rec_space db_space) 142 { 143 struct ecore_db_recovery_entry *db_entry; 144 struct ecore_hwfn *p_hwfn; 145 146 /* shortcircuit VFs, for now */ 147 if (IS_VF(p_dev)) { 148 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n"); 149 return ECORE_SUCCESS; 150 } 151 152 /* sanitize doorbell address */ 153 if (!ecore_db_rec_sanity(p_dev, db_addr)) 154 return ECORE_INVAL; 155 156 /* obtain hwfn from doorbell address */ 157 p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr); 158 159 /* create entry */ 160 db_entry = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*db_entry)); 161 if (!db_entry) { 162 DP_NOTICE(p_dev, false, "Failed to allocate a db recovery entry\n"); 163 return ECORE_NOMEM; 164 } 165 166 /* populate entry */ 167 db_entry->db_addr = db_addr; 168 db_entry->db_data = db_data; 169 db_entry->db_width = db_width; 170 db_entry->db_space = db_space; 171 db_entry->hwfn_idx = p_hwfn->my_id; 172 173 /* display */ 174 ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Adding"); 175 176 /* protect the list */ 177 OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock); 178 OSAL_LIST_PUSH_TAIL(&db_entry->list_entry, 179 &p_hwfn->db_recovery_info.list); 180 OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock); 181 182 return ECORE_SUCCESS; 183 } 184 185 /* remove an entry from the doorbell recovery mechanism */ 186 enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev, 187 void OSAL_IOMEM *db_addr, 188 void *db_data) 189 { 190 struct ecore_db_recovery_entry *db_entry = OSAL_NULL; 191 enum _ecore_status_t rc = ECORE_INVAL; 192 struct ecore_hwfn *p_hwfn; 193 194 /* shortcircuit VFs, for now */ 195 if (IS_VF(p_dev)) { 196 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n"); 197 return ECORE_SUCCESS; 198 } 199 200 /* sanitize doorbell address */ 201 if (!ecore_db_rec_sanity(p_dev, db_addr)) 202 return ECORE_INVAL; 203 204 /* obtain hwfn from doorbell address */ 205 p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr); 206 207 /* protect the list */ 208 OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock); 209 OSAL_LIST_FOR_EACH_ENTRY(db_entry, 210 &p_hwfn->db_recovery_info.list, 211 list_entry, 212 struct ecore_db_recovery_entry) { 213 214 /* search according to db_data addr since db_addr is not unique (roce) */ 215 if (db_entry->db_data == db_data) { 216 ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Deleting"); 217 OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry, 218 &p_hwfn->db_recovery_info.list); 219 rc = ECORE_SUCCESS; 220 break; 221 } 222 } 223 224 OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock); 225 226 if (rc == ECORE_INVAL) { 227 OSAL_WARN(true, "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n", 228 db_data, db_addr); 229 } else { 230 OSAL_FREE(p_dev, db_entry); 231 } 232 233 return rc; 234 } 235 236 /* initialize the doorbell recovery mechanism */ 237 static void ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn) 238 { 239 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Setting up db recovery\n"); 240 OSAL_LIST_INIT(&p_hwfn->db_recovery_info.list); 241 #ifdef CONFIG_ECORE_LOCK_ALLOC 242 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock); 243 #endif 244 OSAL_SPIN_LOCK_INIT(&p_hwfn->db_recovery_info.lock); 245 p_hwfn->db_recovery_info.db_recovery_counter = 0; 246 } 247 248 /* destroy the doorbell recovery mechanism */ 249 static void ecore_db_recovery_teardown(struct ecore_hwfn *p_hwfn) 250 { 251 struct ecore_db_recovery_entry *db_entry = OSAL_NULL; 252 253 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Tearing down db recovery\n"); 254 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) { 255 DP_VERBOSE(p_hwfn, false, "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n"); 256 while (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) { 257 db_entry = OSAL_LIST_FIRST_ENTRY(&p_hwfn->db_recovery_info.list, 258 struct ecore_db_recovery_entry, 259 list_entry); 260 ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Purging"); 261 OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry, 262 &p_hwfn->db_recovery_info.list); 263 OSAL_FREE(p_hwfn->p_dev, db_entry); 264 } 265 } 266 #ifdef CONFIG_ECORE_LOCK_ALLOC 267 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->db_recovery_info.lock); 268 #endif 269 p_hwfn->db_recovery_info.db_recovery_counter = 0; 270 } 271 272 /* print the content of the doorbell recovery mechanism */ 273 void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn) 274 { 275 struct ecore_db_recovery_entry *db_entry = OSAL_NULL; 276 277 DP_NOTICE(p_hwfn, false, 278 "Dispalying doorbell recovery database. Counter was %d\n", 279 p_hwfn->db_recovery_info.db_recovery_counter); 280 281 /* protect the list */ 282 OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock); 283 OSAL_LIST_FOR_EACH_ENTRY(db_entry, 284 &p_hwfn->db_recovery_info.list, 285 list_entry, 286 struct ecore_db_recovery_entry) { 287 ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Printing"); 288 } 289 290 OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock); 291 } 292 293 /* ring the doorbell of a single doorbell recovery entry */ 294 static void ecore_db_recovery_ring(struct ecore_hwfn *p_hwfn, 295 struct ecore_db_recovery_entry *db_entry, 296 enum ecore_db_rec_exec db_exec) 297 { 298 /* Print according to width */ 299 if (db_entry->db_width == DB_REC_WIDTH_32B) 300 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "%s doorbell address %p data %x\n", 301 db_exec == DB_REC_DRY_RUN ? "would have rung" : "ringing", 302 db_entry->db_addr, *(u32 *)db_entry->db_data); 303 else 304 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "%s doorbell address %p data %llx\n", 305 db_exec == DB_REC_DRY_RUN ? "would have rung" : "ringing", 306 db_entry->db_addr, (unsigned long long)*(u64 *)(db_entry->db_data)); 307 308 /* Sanity */ 309 if (!ecore_db_rec_sanity(p_hwfn->p_dev, db_entry->db_addr)) 310 return; 311 312 /* Flush the write combined buffer. Since there are multiple doorbelling 313 * entities using the same address, if we don't flush, a transaction 314 * could be lost. 315 */ 316 OSAL_WMB(p_hwfn->p_dev); 317 318 /* Ring the doorbell */ 319 if (db_exec == DB_REC_REAL_DEAL) { 320 if (db_entry->db_width == DB_REC_WIDTH_32B) 321 DIRECT_REG_WR(p_hwfn, db_entry->db_addr, *(u32 *)(db_entry->db_data)); 322 else 323 DIRECT_REG_WR64(p_hwfn, db_entry->db_addr, *(u64 *)(db_entry->db_data)); 324 } 325 326 /* Flush the write combined buffer. Next doorbell may come from a 327 * different entity to the same address... 328 */ 329 OSAL_WMB(p_hwfn->p_dev); 330 } 331 332 /* traverse the doorbell recovery entry list and ring all the doorbells */ 333 void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn, 334 enum ecore_db_rec_exec db_exec) 335 { 336 struct ecore_db_recovery_entry *db_entry = OSAL_NULL; 337 338 DP_NOTICE(p_hwfn, false, "Executing doorbell recovery. Counter was %d\n", 339 p_hwfn->db_recovery_info.db_recovery_counter); 340 341 /* protect the list */ 342 OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock); 343 OSAL_LIST_FOR_EACH_ENTRY(db_entry, 344 &p_hwfn->db_recovery_info.list, 345 list_entry, 346 struct ecore_db_recovery_entry) 347 ecore_db_recovery_ring(p_hwfn, db_entry, db_exec); 348 OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock); 349 350 /* track amount of times recovery was executed */ 351 p_hwfn->db_recovery_info.db_recovery_counter++; 352 } 353 /******************** Doorbell Recovery end ****************/ 354 355 /* Configurable */ 356 #define ECORE_MIN_DPIS (4) /* The minimal number of DPIs required to 357 * load the driver. The number was 358 * arbitrarily set. 359 */ 360 361 /* Derived */ 362 #define ECORE_MIN_PWM_REGION (ECORE_WID_SIZE * ECORE_MIN_DPIS) 363 364 enum BAR_ID { 365 BAR_ID_0, /* used for GRC */ 366 BAR_ID_1 /* Used for doorbells */ 367 }; 368 369 static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, 370 struct ecore_ptt *p_ptt, 371 enum BAR_ID bar_id) 372 { 373 u32 bar_reg = (bar_id == BAR_ID_0 ? 374 PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); 375 u32 val; 376 377 if (IS_VF(p_hwfn->p_dev)) { 378 /* TODO - assume each VF hwfn has 64Kb for Bar0; Bar1 can be 379 * read from actual register, but we're currently not using 380 * it for actual doorbelling. 381 */ 382 return 1 << 17; 383 } 384 385 val = ecore_rd(p_hwfn, p_ptt, bar_reg); 386 if (val) 387 return 1 << (val + 15); 388 389 /* The above registers were updated in the past only in CMT mode. Since 390 * they were found to be useful MFW started updating them from 8.7.7.0. 391 * In older MFW versions they are set to 0 which means disabled. 392 */ 393 if (p_hwfn->p_dev->num_hwfns > 1) { 394 DP_INFO(p_hwfn, 395 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n"); 396 return BAR_ID_0 ? 256 * 1024 : 512 * 1024; 397 } else { 398 DP_INFO(p_hwfn, 399 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n"); 400 return 512 * 1024; 401 } 402 } 403 404 void ecore_init_dp(struct ecore_dev *p_dev, 405 u32 dp_module, 406 u8 dp_level, 407 void *dp_ctx) 408 { 409 u32 i; 410 411 p_dev->dp_level = dp_level; 412 p_dev->dp_module = dp_module; 413 p_dev->dp_ctx = dp_ctx; 414 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 415 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 416 417 p_hwfn->dp_level = dp_level; 418 p_hwfn->dp_module = dp_module; 419 p_hwfn->dp_ctx = dp_ctx; 420 } 421 } 422 423 void ecore_init_struct(struct ecore_dev *p_dev) 424 { 425 u8 i; 426 427 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 428 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 429 430 p_hwfn->p_dev = p_dev; 431 p_hwfn->my_id = i; 432 p_hwfn->b_active = false; 433 434 #ifdef CONFIG_ECORE_LOCK_ALLOC 435 OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex); 436 #endif 437 OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex); 438 } 439 440 /* hwfn 0 is always active */ 441 p_dev->hwfns[0].b_active = true; 442 443 /* set the default cache alignment to 128 (may be overridden later) */ 444 p_dev->cache_shift = 7; 445 } 446 447 static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn) 448 { 449 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 450 451 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params); 452 qm_info->qm_pq_params = OSAL_NULL; 453 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params); 454 qm_info->qm_vport_params = OSAL_NULL; 455 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params); 456 qm_info->qm_port_params = OSAL_NULL; 457 OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data); 458 qm_info->wfq_data = OSAL_NULL; 459 } 460 461 void ecore_resc_free(struct ecore_dev *p_dev) 462 { 463 int i; 464 465 if (IS_VF(p_dev)) { 466 for_each_hwfn(p_dev, i) 467 ecore_l2_free(&p_dev->hwfns[i]); 468 return; 469 } 470 471 OSAL_FREE(p_dev, p_dev->fw_data); 472 p_dev->fw_data = OSAL_NULL; 473 474 OSAL_FREE(p_dev, p_dev->reset_stats); 475 p_dev->reset_stats = OSAL_NULL; 476 477 for_each_hwfn(p_dev, i) { 478 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 479 480 ecore_cxt_mngr_free(p_hwfn); 481 ecore_qm_info_free(p_hwfn); 482 ecore_spq_free(p_hwfn); 483 ecore_eq_free(p_hwfn); 484 ecore_consq_free(p_hwfn); 485 ecore_int_free(p_hwfn); 486 #ifdef CONFIG_ECORE_LL2 487 ecore_ll2_free(p_hwfn); 488 #endif 489 #ifdef CONFIG_ECORE_FCOE 490 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 491 ecore_fcoe_free(p_hwfn); 492 #endif 493 #ifdef CONFIG_ECORE_ISCSI 494 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 495 ecore_iscsi_free(p_hwfn); 496 ecore_ooo_free(p_hwfn); 497 } 498 #endif 499 ecore_iov_free(p_hwfn); 500 ecore_l2_free(p_hwfn); 501 ecore_dmae_info_free(p_hwfn); 502 ecore_dcbx_info_free(p_hwfn); 503 /* @@@TBD Flush work-queue ?*/ 504 505 /* destroy doorbell recovery mechanism */ 506 ecore_db_recovery_teardown(p_hwfn); 507 } 508 } 509 510 /******************** QM initialization *******************/ 511 /* bitmaps for indicating active traffic classes. Special case for Arrowhead 4 port */ 512 #define ACTIVE_TCS_BMAP 0x9f /* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */ 513 #define ACTIVE_TCS_BMAP_4PORT_K2 0xf /* 0..3 actually used, OOO and high priority stuff all use 3 */ 514 515 /* determines the physical queue flags for a given PF. */ 516 static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn) 517 { 518 u32 flags; 519 520 /* common flags */ 521 flags = PQ_FLAGS_LB; 522 523 /* feature flags */ 524 if (IS_ECORE_SRIOV(p_hwfn->p_dev)) 525 flags |= PQ_FLAGS_VFS; 526 if (IS_ECORE_DCQCN(p_hwfn)) 527 flags |= PQ_FLAGS_RLS; 528 529 /* protocol flags */ 530 switch (p_hwfn->hw_info.personality) { 531 case ECORE_PCI_ETH: 532 flags |= PQ_FLAGS_MCOS; 533 break; 534 case ECORE_PCI_FCOE: 535 flags |= PQ_FLAGS_OFLD; 536 break; 537 case ECORE_PCI_ISCSI: 538 flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; 539 break; 540 case ECORE_PCI_ETH_ROCE: 541 flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT; 542 break; 543 case ECORE_PCI_ETH_IWARP: 544 flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; 545 break; 546 default: 547 DP_ERR(p_hwfn, "unknown personality %d\n", p_hwfn->hw_info.personality); 548 return 0; 549 } 550 551 return flags; 552 } 553 554 555 /* Getters for resource amounts necessary for qm initialization */ 556 u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn) 557 { 558 return p_hwfn->hw_info.num_hw_tc; 559 } 560 561 u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn) 562 { 563 return IS_ECORE_SRIOV(p_hwfn->p_dev) ? p_hwfn->p_dev->p_iov_info->total_vfs : 0; 564 } 565 566 #define NUM_DEFAULT_RLS 1 567 568 u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn) 569 { 570 u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn); 571 572 /* num RLs can't exceed resource amount of rls or vports or the dcqcn qps */ 573 num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL), 574 (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_VPORT), 575 ROCE_DCQCN_RP_MAX_QPS)); 576 577 /* make sure after we reserve the default and VF rls we'll have something left */ 578 if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) { 579 if (IS_ECORE_DCQCN(p_hwfn)) 580 DP_NOTICE(p_hwfn, false, "no rate limiters left for PF rate limiting [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs); 581 return 0; 582 } 583 584 /* subtract rls necessary for VFs and one default one for the PF */ 585 num_pf_rls -= num_vfs + NUM_DEFAULT_RLS; 586 587 return num_pf_rls; 588 } 589 590 u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn) 591 { 592 u32 pq_flags = ecore_get_pq_flags(p_hwfn); 593 594 /* all pqs share the same vport (hence the 1 below), except for vfs and pf_rl pqs */ 595 return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) + 596 (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn) + 1; 597 } 598 599 /* calc amount of PQs according to the requested flags */ 600 u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn) 601 { 602 u32 pq_flags = ecore_get_pq_flags(p_hwfn); 603 604 return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) + 605 (!!(PQ_FLAGS_MCOS & pq_flags)) * ecore_init_qm_get_num_tcs(p_hwfn) + 606 (!!(PQ_FLAGS_LB & pq_flags)) + 607 (!!(PQ_FLAGS_OOO & pq_flags)) + 608 (!!(PQ_FLAGS_ACK & pq_flags)) + 609 (!!(PQ_FLAGS_OFLD & pq_flags)) + 610 (!!(PQ_FLAGS_LLT & pq_flags)) + 611 (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn); 612 } 613 614 /* initialize the top level QM params */ 615 static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn) 616 { 617 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 618 bool four_port; 619 620 /* pq and vport bases for this PF */ 621 qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ); 622 qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT); 623 624 /* rate limiting and weighted fair queueing are always enabled */ 625 qm_info->vport_rl_en = 1; 626 qm_info->vport_wfq_en = 1; 627 628 /* TC config is different for AH 4 port */ 629 four_port = p_hwfn->p_dev->num_ports_in_engine == MAX_NUM_PORTS_K2; 630 631 /* in AH 4 port we have fewer TCs per port */ 632 qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : NUM_OF_PHYS_TCS; 633 634 /* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and 4 otherwise */ 635 if (!qm_info->ooo_tc) 636 qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : DCBX_TCP_OOO_TC; 637 } 638 639 /* initialize qm vport params */ 640 static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn) 641 { 642 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 643 u8 i; 644 645 /* all vports participate in weighted fair queueing */ 646 for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++) 647 qm_info->qm_vport_params[i].vport_wfq = 1; 648 } 649 650 /* initialize qm port params */ 651 static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn) 652 { 653 /* Initialize qm port parameters */ 654 u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engine; 655 656 /* indicate how ooo and high pri traffic is dealt with */ 657 active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ? 658 ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP; 659 660 for (i = 0; i < num_ports; i++) { 661 struct init_qm_port_params *p_qm_port = 662 &p_hwfn->qm_info.qm_port_params[i]; 663 664 p_qm_port->active = 1; 665 p_qm_port->active_phys_tcs = active_phys_tcs; 666 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES_E4 / num_ports; 667 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports; 668 } 669 } 670 671 /* Reset the params which must be reset for qm init. QM init may be called as 672 * a result of flows other than driver load (e.g. dcbx renegotiation). Other 673 * params may be affected by the init but would simply recalculate to the same 674 * values. The allocations made for QM init, ports, vports, pqs and vfqs are not 675 * affected as these amounts stay the same. 676 */ 677 static void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn) 678 { 679 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 680 681 qm_info->num_pqs = 0; 682 qm_info->num_vports = 0; 683 qm_info->num_pf_rls = 0; 684 qm_info->num_vf_pqs = 0; 685 qm_info->first_vf_pq = 0; 686 qm_info->first_mcos_pq = 0; 687 qm_info->first_rl_pq = 0; 688 } 689 690 static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn) 691 { 692 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 693 694 qm_info->num_vports++; 695 696 if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn)) 697 DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn)); 698 } 699 700 /* initialize a single pq and manage qm_info resources accounting. 701 * The pq_init_flags param determines whether the PQ is rate limited (for VF or PF) 702 * and whether a new vport is allocated to the pq or not (i.e. vport will be shared) 703 */ 704 705 /* flags for pq init */ 706 #define PQ_INIT_SHARE_VPORT (1 << 0) 707 #define PQ_INIT_PF_RL (1 << 1) 708 #define PQ_INIT_VF_RL (1 << 2) 709 710 /* defines for pq init */ 711 #define PQ_INIT_DEFAULT_WRR_GROUP 1 712 #define PQ_INIT_DEFAULT_TC 0 713 #define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc) 714 715 static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn, 716 struct ecore_qm_info *qm_info, 717 u8 tc, u32 pq_init_flags) 718 { 719 u16 pq_idx = qm_info->num_pqs, max_pq = ecore_init_qm_get_num_pqs(p_hwfn); 720 721 if (pq_idx > max_pq) 722 DP_ERR(p_hwfn, "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq); 723 724 /* init pq params */ 725 qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + qm_info->num_vports; 726 qm_info->qm_pq_params[pq_idx].tc_id = tc; 727 qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP; 728 qm_info->qm_pq_params[pq_idx].rl_valid = 729 (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL); 730 731 /* qm params accounting */ 732 qm_info->num_pqs++; 733 if (!(pq_init_flags & PQ_INIT_SHARE_VPORT)) 734 qm_info->num_vports++; 735 736 if (pq_init_flags & PQ_INIT_PF_RL) 737 qm_info->num_pf_rls++; 738 739 if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn)) 740 DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn)); 741 742 if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn)) 743 DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n", qm_info->num_pf_rls, ecore_init_qm_get_num_pf_rls(p_hwfn)); 744 } 745 746 /* get pq index according to PQ_FLAGS */ 747 static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn, 748 u32 pq_flags) 749 { 750 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 751 752 /* Can't have multiple flags set here */ 753 if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1) 754 goto err; 755 756 switch (pq_flags) { 757 case PQ_FLAGS_RLS: 758 return &qm_info->first_rl_pq; 759 case PQ_FLAGS_MCOS: 760 return &qm_info->first_mcos_pq; 761 case PQ_FLAGS_LB: 762 return &qm_info->pure_lb_pq; 763 case PQ_FLAGS_OOO: 764 return &qm_info->ooo_pq; 765 case PQ_FLAGS_ACK: 766 return &qm_info->pure_ack_pq; 767 case PQ_FLAGS_OFLD: 768 return &qm_info->offload_pq; 769 case PQ_FLAGS_LLT: 770 return &qm_info->low_latency_pq; 771 case PQ_FLAGS_VFS: 772 return &qm_info->first_vf_pq; 773 default: 774 goto err; 775 } 776 777 err: 778 DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags); 779 return OSAL_NULL; 780 } 781 782 /* save pq index in qm info */ 783 static void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn, 784 u32 pq_flags, u16 pq_val) 785 { 786 u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 787 788 *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val; 789 } 790 791 /* get tx pq index, with the PQ TX base already set (ready for context init) */ 792 u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags) 793 { 794 u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 795 796 return *base_pq_idx + CM_TX_PQ_BASE; 797 } 798 799 u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc) 800 { 801 u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn); 802 803 if (tc > max_tc) 804 DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); 805 806 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc; 807 } 808 809 u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf) 810 { 811 u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn); 812 813 if (vf > max_vf) 814 DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); 815 816 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; 817 } 818 819 u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl) 820 { 821 u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn); 822 823 if (rl > max_rl) 824 DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl); 825 826 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl; 827 } 828 829 /* Functions for creating specific types of pqs */ 830 static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn) 831 { 832 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 833 834 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LB)) 835 return; 836 837 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs); 838 ecore_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT); 839 } 840 841 static void ecore_init_qm_ooo_pq(struct ecore_hwfn *p_hwfn) 842 { 843 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 844 845 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO)) 846 return; 847 848 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs); 849 ecore_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT); 850 } 851 852 static void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn) 853 { 854 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 855 856 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK)) 857 return; 858 859 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs); 860 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 861 } 862 863 static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn) 864 { 865 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 866 867 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD)) 868 return; 869 870 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs); 871 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 872 } 873 874 static void ecore_init_qm_low_latency_pq(struct ecore_hwfn *p_hwfn) 875 { 876 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 877 878 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT)) 879 return; 880 881 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs); 882 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 883 } 884 885 static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn) 886 { 887 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 888 u8 tc_idx; 889 890 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS)) 891 return; 892 893 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs); 894 for (tc_idx = 0; tc_idx < ecore_init_qm_get_num_tcs(p_hwfn); tc_idx++) 895 ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT); 896 } 897 898 static void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn) 899 { 900 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 901 u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn); 902 903 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS)) 904 return; 905 906 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs); 907 qm_info->num_vf_pqs = num_vfs; 908 for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) 909 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL); 910 } 911 912 static void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn) 913 { 914 u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn); 915 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 916 917 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS)) 918 return; 919 920 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs); 921 for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++) 922 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL); 923 } 924 925 static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn) 926 { 927 /* rate limited pqs, must come first (FW assumption) */ 928 ecore_init_qm_rl_pqs(p_hwfn); 929 930 /* pqs for multi cos */ 931 ecore_init_qm_mcos_pqs(p_hwfn); 932 933 /* pure loopback pq */ 934 ecore_init_qm_lb_pq(p_hwfn); 935 936 /* out of order pq */ 937 ecore_init_qm_ooo_pq(p_hwfn); 938 939 /* pure ack pq */ 940 ecore_init_qm_pure_ack_pq(p_hwfn); 941 942 /* pq for offloaded protocol */ 943 ecore_init_qm_offload_pq(p_hwfn); 944 945 /* low latency pq */ 946 ecore_init_qm_low_latency_pq(p_hwfn); 947 948 /* done sharing vports */ 949 ecore_init_qm_advance_vport(p_hwfn); 950 951 /* pqs for vfs */ 952 ecore_init_qm_vf_pqs(p_hwfn); 953 } 954 955 /* compare values of getters against resources amounts */ 956 static enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn) 957 { 958 if (ecore_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, ECORE_VPORT)) { 959 DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n"); 960 return ECORE_INVAL; 961 } 962 963 if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) { 964 DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n"); 965 return ECORE_INVAL; 966 } 967 968 return ECORE_SUCCESS; 969 } 970 971 /* 972 * Function for verbose printing of the qm initialization results 973 */ 974 static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn) 975 { 976 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 977 struct init_qm_vport_params *vport; 978 struct init_qm_port_params *port; 979 struct init_qm_pq_params *pq; 980 int i, tc; 981 982 /* top level params */ 983 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n", 984 qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq, qm_info->offload_pq, qm_info->pure_ack_pq); 985 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n", 986 qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs, qm_info->num_vf_pqs, qm_info->num_vports, qm_info->max_phys_tcs_per_port); 987 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n", 988 qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en, qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl, qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn)); 989 990 /* port table */ 991 for (i = 0; i < p_hwfn->p_dev->num_ports_in_engine; i++) { 992 port = &(qm_info->qm_port_params[i]); 993 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n", 994 i, port->active, port->active_phys_tcs, port->num_pbf_cmd_lines, port->num_btb_blocks, port->reserved); 995 } 996 997 /* vport table */ 998 for (i = 0; i < qm_info->num_vports; i++) { 999 vport = &(qm_info->qm_vport_params[i]); 1000 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ", 1001 qm_info->start_vport + i, vport->vport_rl, vport->vport_wfq); 1002 for (tc = 0; tc < NUM_OF_TCS; tc++) 1003 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ", vport->first_tx_pq_id[tc]); 1004 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n"); 1005 } 1006 1007 /* pq table */ 1008 for (i = 0; i < qm_info->num_pqs; i++) { 1009 pq = &(qm_info->qm_pq_params[i]); 1010 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n", 1011 qm_info->start_pq + i, pq->vport_id, pq->tc_id, pq->wrr_group, pq->rl_valid); 1012 } 1013 } 1014 1015 static void ecore_init_qm_info(struct ecore_hwfn *p_hwfn) 1016 { 1017 /* reset params required for init run */ 1018 ecore_init_qm_reset_params(p_hwfn); 1019 1020 /* init QM top level params */ 1021 ecore_init_qm_params(p_hwfn); 1022 1023 /* init QM port params */ 1024 ecore_init_qm_port_params(p_hwfn); 1025 1026 /* init QM vport params */ 1027 ecore_init_qm_vport_params(p_hwfn); 1028 1029 /* init QM physical queue params */ 1030 ecore_init_qm_pq_params(p_hwfn); 1031 1032 /* display all that init */ 1033 ecore_dp_init_qm_params(p_hwfn); 1034 } 1035 1036 /* This function reconfigures the QM pf on the fly. 1037 * For this purpose we: 1038 * 1. reconfigure the QM database 1039 * 2. set new values to runtime array 1040 * 3. send an sdm_qm_cmd through the rbc interface to stop the QM 1041 * 4. activate init tool in QM_PF stage 1042 * 5. send an sdm_qm_cmd through rbc interface to release the QM 1043 */ 1044 enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn, 1045 struct ecore_ptt *p_ptt) 1046 { 1047 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 1048 bool b_rc; 1049 enum _ecore_status_t rc; 1050 1051 /* initialize ecore's qm data structure */ 1052 ecore_init_qm_info(p_hwfn); 1053 1054 /* stop PF's qm queues */ 1055 OSAL_SPIN_LOCK(&qm_lock); 1056 b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true, 1057 qm_info->start_pq, qm_info->num_pqs); 1058 OSAL_SPIN_UNLOCK(&qm_lock); 1059 if (!b_rc) 1060 return ECORE_INVAL; 1061 1062 /* clear the QM_PF runtime phase leftovers from previous init */ 1063 ecore_init_clear_rt_data(p_hwfn); 1064 1065 /* prepare QM portion of runtime array */ 1066 ecore_qm_init_pf(p_hwfn, p_ptt); 1067 1068 /* activate init tool on runtime array */ 1069 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, 1070 p_hwfn->hw_info.hw_mode); 1071 if (rc != ECORE_SUCCESS) 1072 return rc; 1073 1074 /* start PF's qm queues */ 1075 OSAL_SPIN_LOCK(&qm_lock); 1076 b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true, 1077 qm_info->start_pq, qm_info->num_pqs); 1078 OSAL_SPIN_UNLOCK(&qm_lock); 1079 if (!b_rc) 1080 return ECORE_INVAL; 1081 1082 return ECORE_SUCCESS; 1083 } 1084 1085 static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn) 1086 { 1087 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 1088 enum _ecore_status_t rc; 1089 1090 rc = ecore_init_qm_sanity(p_hwfn); 1091 if (rc != ECORE_SUCCESS) 1092 goto alloc_err; 1093 1094 qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 1095 sizeof(struct init_qm_pq_params) * 1096 ecore_init_qm_get_num_pqs(p_hwfn)); 1097 if (!qm_info->qm_pq_params) 1098 goto alloc_err; 1099 1100 qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 1101 sizeof(struct init_qm_vport_params) * 1102 ecore_init_qm_get_num_vports(p_hwfn)); 1103 if (!qm_info->qm_vport_params) 1104 goto alloc_err; 1105 1106 qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 1107 sizeof(struct init_qm_port_params) * 1108 p_hwfn->p_dev->num_ports_in_engine); 1109 if (!qm_info->qm_port_params) 1110 goto alloc_err; 1111 1112 qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 1113 sizeof(struct ecore_wfq_data) * 1114 ecore_init_qm_get_num_vports(p_hwfn)); 1115 if (!qm_info->wfq_data) 1116 goto alloc_err; 1117 1118 return ECORE_SUCCESS; 1119 1120 alloc_err: 1121 DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n"); 1122 ecore_qm_info_free(p_hwfn); 1123 return ECORE_NOMEM; 1124 } 1125 /******************** End QM initialization ***************/ 1126 1127 enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev) 1128 { 1129 u32 rdma_tasks, excess_tasks; 1130 u32 line_count; 1131 enum _ecore_status_t rc = ECORE_SUCCESS; 1132 int i; 1133 1134 if (IS_VF(p_dev)) { 1135 for_each_hwfn(p_dev, i) { 1136 rc = ecore_l2_alloc(&p_dev->hwfns[i]); 1137 if (rc != ECORE_SUCCESS) 1138 return rc; 1139 } 1140 return rc; 1141 } 1142 1143 p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL, 1144 sizeof(*p_dev->fw_data)); 1145 if (!p_dev->fw_data) 1146 return ECORE_NOMEM; 1147 1148 for_each_hwfn(p_dev, i) { 1149 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1150 u32 n_eqes, num_cons; 1151 1152 /* First allocate the context manager structure */ 1153 rc = ecore_cxt_mngr_alloc(p_hwfn); 1154 if (rc) 1155 goto alloc_err; 1156 1157 /* Set the HW cid/tid numbers (in the contest manager) 1158 * Must be done prior to any further computations. 1159 */ 1160 rc = ecore_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS); 1161 if (rc) 1162 goto alloc_err; 1163 1164 rc = ecore_alloc_qm_data(p_hwfn); 1165 if (rc) 1166 goto alloc_err; 1167 1168 /* init qm info */ 1169 ecore_init_qm_info(p_hwfn); 1170 1171 /* Compute the ILT client partition */ 1172 rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count); 1173 if (rc) { 1174 DP_NOTICE(p_hwfn, false, "too many ILT lines; re-computing with less lines\n"); 1175 /* In case there are not enough ILT lines we reduce the 1176 * number of RDMA tasks and re-compute. 1177 */ 1178 excess_tasks = ecore_cxt_cfg_ilt_compute_excess( 1179 p_hwfn, line_count); 1180 if (!excess_tasks) 1181 goto alloc_err; 1182 1183 rdma_tasks = RDMA_MAX_TIDS - excess_tasks; 1184 rc = ecore_cxt_set_pf_params(p_hwfn, rdma_tasks); 1185 if (rc) 1186 goto alloc_err; 1187 1188 rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count); 1189 if (rc) { 1190 DP_ERR(p_hwfn, "failed ILT compute. Requested too many lines: %u\n", 1191 line_count); 1192 1193 goto alloc_err; 1194 } 1195 } 1196 1197 /* CID map / ILT shadow table / T2 1198 * The talbes sizes are determined by the computations above 1199 */ 1200 rc = ecore_cxt_tables_alloc(p_hwfn); 1201 if (rc) 1202 goto alloc_err; 1203 1204 /* SPQ, must follow ILT because initializes SPQ context */ 1205 rc = ecore_spq_alloc(p_hwfn); 1206 if (rc) 1207 goto alloc_err; 1208 1209 /* SP status block allocation */ 1210 p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn, 1211 RESERVED_PTT_DPC); 1212 1213 rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt); 1214 if (rc) 1215 goto alloc_err; 1216 1217 rc = ecore_iov_alloc(p_hwfn); 1218 if (rc) 1219 goto alloc_err; 1220 1221 /* EQ */ 1222 n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain); 1223 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) { 1224 /* Calculate the EQ size 1225 * --------------------- 1226 * Each ICID may generate up to one event at a time i.e. 1227 * the event must be handled/cleared before a new one 1228 * can be generated. We calculate the sum of events per 1229 * protocol and create an EQ deep enough to handle the 1230 * worst case: 1231 * - Core - according to SPQ. 1232 * - RoCE - per QP there are a couple of ICIDs, one 1233 * responder and one requester, each can 1234 * generate an EQE => n_eqes_qp = 2 * n_qp. 1235 * Each CQ can generate an EQE. There are 2 CQs 1236 * per QP => n_eqes_cq = 2 * n_qp. 1237 * Hence the RoCE total is 4 * n_qp or 1238 * 2 * num_cons. 1239 * - ENet - There can be up to two events per VF. One 1240 * for VF-PF channel and another for VF FLR 1241 * initial cleanup. The number of VFs is 1242 * bounded by MAX_NUM_VFS_BB, and is much 1243 * smaller than RoCE's so we avoid exact 1244 * calculation. 1245 */ 1246 if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) { 1247 num_cons = ecore_cxt_get_proto_cid_count( 1248 p_hwfn, PROTOCOLID_ROCE, OSAL_NULL); 1249 num_cons *= 2; 1250 } else { 1251 num_cons = ecore_cxt_get_proto_cid_count( 1252 p_hwfn, PROTOCOLID_IWARP, 1253 OSAL_NULL); 1254 } 1255 n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; 1256 } else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 1257 num_cons = ecore_cxt_get_proto_cid_count( 1258 p_hwfn, PROTOCOLID_ISCSI, OSAL_NULL); 1259 n_eqes += 2 * num_cons; 1260 } 1261 1262 if (n_eqes > 0xFFFF) { 1263 DP_ERR(p_hwfn, 1264 "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n", 1265 n_eqes, 0xFFFF); 1266 goto alloc_no_mem; 1267 } 1268 1269 rc = ecore_eq_alloc(p_hwfn, (u16)n_eqes); 1270 if (rc) 1271 goto alloc_err; 1272 1273 rc = ecore_consq_alloc(p_hwfn); 1274 if (rc) 1275 goto alloc_err; 1276 1277 rc = ecore_l2_alloc(p_hwfn); 1278 if (rc != ECORE_SUCCESS) 1279 goto alloc_err; 1280 1281 #ifdef CONFIG_ECORE_LL2 1282 if (p_hwfn->using_ll2) { 1283 rc = ecore_ll2_alloc(p_hwfn); 1284 if (rc) 1285 goto alloc_err; 1286 } 1287 #endif 1288 #ifdef CONFIG_ECORE_FCOE 1289 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) { 1290 rc = ecore_fcoe_alloc(p_hwfn); 1291 if (rc) 1292 goto alloc_err; 1293 } 1294 #endif 1295 #ifdef CONFIG_ECORE_ISCSI 1296 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 1297 rc = ecore_iscsi_alloc(p_hwfn); 1298 if (rc) 1299 goto alloc_err; 1300 rc = ecore_ooo_alloc(p_hwfn); 1301 if (rc) 1302 goto alloc_err; 1303 } 1304 #endif 1305 1306 /* DMA info initialization */ 1307 rc = ecore_dmae_info_alloc(p_hwfn); 1308 if (rc) { 1309 DP_NOTICE(p_hwfn, true, 1310 "Failed to allocate memory for dmae_info structure\n"); 1311 goto alloc_err; 1312 } 1313 1314 /* DCBX initialization */ 1315 rc = ecore_dcbx_info_alloc(p_hwfn); 1316 if (rc) { 1317 DP_NOTICE(p_hwfn, true, 1318 "Failed to allocate memory for dcbx structure\n"); 1319 goto alloc_err; 1320 } 1321 1322 /* initialize the doorbell recovery mechanism */ 1323 ecore_db_recovery_setup(p_hwfn); 1324 } 1325 1326 p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL, 1327 sizeof(*p_dev->reset_stats)); 1328 if (!p_dev->reset_stats) { 1329 DP_NOTICE(p_dev, true, 1330 "Failed to allocate reset statistics\n"); 1331 goto alloc_no_mem; 1332 } 1333 1334 return ECORE_SUCCESS; 1335 1336 alloc_no_mem: 1337 rc = ECORE_NOMEM; 1338 alloc_err: 1339 ecore_resc_free(p_dev); 1340 return rc; 1341 } 1342 1343 void ecore_resc_setup(struct ecore_dev *p_dev) 1344 { 1345 int i; 1346 1347 if (IS_VF(p_dev)) { 1348 for_each_hwfn(p_dev, i) 1349 ecore_l2_setup(&p_dev->hwfns[i]); 1350 return; 1351 } 1352 1353 for_each_hwfn(p_dev, i) { 1354 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1355 1356 ecore_cxt_mngr_setup(p_hwfn); 1357 ecore_spq_setup(p_hwfn); 1358 ecore_eq_setup(p_hwfn); 1359 ecore_consq_setup(p_hwfn); 1360 1361 /* Read shadow of current MFW mailbox */ 1362 ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt); 1363 OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow, 1364 p_hwfn->mcp_info->mfw_mb_cur, 1365 p_hwfn->mcp_info->mfw_mb_length); 1366 1367 ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt); 1368 1369 ecore_l2_setup(p_hwfn); 1370 ecore_iov_setup(p_hwfn); 1371 #ifdef CONFIG_ECORE_LL2 1372 if (p_hwfn->using_ll2) 1373 ecore_ll2_setup(p_hwfn); 1374 #endif 1375 #ifdef CONFIG_ECORE_FCOE 1376 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 1377 ecore_fcoe_setup(p_hwfn); 1378 #endif 1379 #ifdef CONFIG_ECORE_ISCSI 1380 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 1381 ecore_iscsi_setup(p_hwfn); 1382 ecore_ooo_setup(p_hwfn); 1383 } 1384 #endif 1385 } 1386 } 1387 1388 #define FINAL_CLEANUP_POLL_CNT (100) 1389 #define FINAL_CLEANUP_POLL_TIME (10) 1390 enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn, 1391 struct ecore_ptt *p_ptt, 1392 u16 id, bool is_vf) 1393 { 1394 u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; 1395 enum _ecore_status_t rc = ECORE_TIMEOUT; 1396 1397 #ifndef ASIC_ONLY 1398 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) || 1399 CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 1400 DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n"); 1401 return ECORE_SUCCESS; 1402 } 1403 #endif 1404 1405 addr = GTT_BAR0_MAP_REG_USDM_RAM + 1406 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); 1407 1408 if (is_vf) 1409 id += 0x10; 1410 1411 command |= X_FINAL_CLEANUP_AGG_INT << 1412 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; 1413 command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; 1414 command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT; 1415 command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT; 1416 1417 /* Make sure notification is not set before initiating final cleanup */ 1418 if (REG_RD(p_hwfn, addr)) { 1419 DP_NOTICE(p_hwfn, false, 1420 "Unexpected; Found final cleanup notification before initiating final cleanup\n"); 1421 REG_WR(p_hwfn, addr, 0); 1422 } 1423 1424 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1425 "Sending final cleanup for PFVF[%d] [Command %08x\n]", 1426 id, command); 1427 1428 ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); 1429 1430 /* Poll until completion */ 1431 while (!REG_RD(p_hwfn, addr) && count--) 1432 OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME); 1433 1434 if (REG_RD(p_hwfn, addr)) 1435 rc = ECORE_SUCCESS; 1436 else 1437 DP_NOTICE(p_hwfn, true, "Failed to receive FW final cleanup notification\n"); 1438 1439 /* Cleanup afterwards */ 1440 REG_WR(p_hwfn, addr, 0); 1441 1442 return rc; 1443 } 1444 1445 static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn) 1446 { 1447 int hw_mode = 0; 1448 1449 if (ECORE_IS_BB_B0(p_hwfn->p_dev)) { 1450 hw_mode |= 1 << MODE_BB; 1451 } else if (ECORE_IS_AH(p_hwfn->p_dev)) { 1452 hw_mode |= 1 << MODE_K2; 1453 } else if (ECORE_IS_E5(p_hwfn->p_dev)) { 1454 hw_mode |= 1 << MODE_E5; 1455 } else { 1456 DP_NOTICE(p_hwfn, true, "Unknown chip type %#x\n", 1457 p_hwfn->p_dev->type); 1458 return ECORE_INVAL; 1459 } 1460 1461 /* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE*/ 1462 switch (p_hwfn->p_dev->num_ports_in_engine) { 1463 case 1: 1464 hw_mode |= 1 << MODE_PORTS_PER_ENG_1; 1465 break; 1466 case 2: 1467 hw_mode |= 1 << MODE_PORTS_PER_ENG_2; 1468 break; 1469 case 4: 1470 hw_mode |= 1 << MODE_PORTS_PER_ENG_4; 1471 break; 1472 default: 1473 DP_NOTICE(p_hwfn, true, "num_ports_in_engine = %d not supported\n", 1474 p_hwfn->p_dev->num_ports_in_engine); 1475 return ECORE_INVAL; 1476 } 1477 1478 switch (p_hwfn->p_dev->mf_mode) { 1479 case ECORE_MF_DEFAULT: 1480 case ECORE_MF_NPAR: 1481 hw_mode |= 1 << MODE_MF_SI; 1482 break; 1483 case ECORE_MF_OVLAN: 1484 hw_mode |= 1 << MODE_MF_SD; 1485 break; 1486 default: 1487 DP_NOTICE(p_hwfn, true, "Unsupported MF mode, init as DEFAULT\n"); 1488 hw_mode |= 1 << MODE_MF_SI; 1489 } 1490 1491 #ifndef ASIC_ONLY 1492 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 1493 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1494 hw_mode |= 1 << MODE_FPGA; 1495 } else { 1496 if (p_hwfn->p_dev->b_is_emul_full) 1497 hw_mode |= 1 << MODE_EMUL_FULL; 1498 else 1499 hw_mode |= 1 << MODE_EMUL_REDUCED; 1500 } 1501 } else 1502 #endif 1503 hw_mode |= 1 << MODE_ASIC; 1504 1505 if (p_hwfn->p_dev->num_hwfns > 1) 1506 hw_mode |= 1 << MODE_100G; 1507 1508 p_hwfn->hw_info.hw_mode = hw_mode; 1509 1510 DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP), 1511 "Configuring function for hw_mode: 0x%08x\n", 1512 p_hwfn->hw_info.hw_mode); 1513 1514 return ECORE_SUCCESS; 1515 } 1516 1517 #ifndef ASIC_ONLY 1518 /* MFW-replacement initializations for non-ASIC */ 1519 static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn, 1520 struct ecore_ptt *p_ptt) 1521 { 1522 struct ecore_dev *p_dev = p_hwfn->p_dev; 1523 u32 pl_hv = 1; 1524 int i; 1525 1526 if (CHIP_REV_IS_EMUL(p_dev)) { 1527 if (ECORE_IS_AH(p_dev)) 1528 pl_hv |= 0x600; 1529 else if (ECORE_IS_E5(p_dev)) 1530 ECORE_E5_MISSING_CODE; 1531 } 1532 1533 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv); 1534 1535 if (CHIP_REV_IS_EMUL(p_dev) && 1536 (ECORE_IS_AH(p_dev) || ECORE_IS_E5(p_dev))) 1537 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2_E5, 1538 0x3ffffff); 1539 1540 /* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */ 1541 /* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */ 1542 if (!CHIP_REV_IS_EMUL(p_dev) || ECORE_IS_BB(p_dev)) 1543 ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4); 1544 1545 if (CHIP_REV_IS_EMUL(p_dev)) { 1546 if (ECORE_IS_AH(p_dev)) { 1547 /* 2 for 4-port, 1 for 2-port, 0 for 1-port */ 1548 ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE, 1549 (p_dev->num_ports_in_engine >> 1)); 1550 1551 ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN, 1552 p_dev->num_ports_in_engine == 4 ? 0 : 3); 1553 } else if (ECORE_IS_E5(p_dev)) { 1554 ECORE_E5_MISSING_CODE; 1555 } 1556 1557 /* Poll on RBC */ 1558 ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1); 1559 for (i = 0; i < 100; i++) { 1560 OSAL_UDELAY(50); 1561 if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1) 1562 break; 1563 } 1564 if (i == 100) 1565 DP_NOTICE(p_hwfn, true, 1566 "RBC done failed to complete in PSWRQ2\n"); 1567 } 1568 1569 return ECORE_SUCCESS; 1570 } 1571 #endif 1572 1573 /* Init run time data for all PFs and their VFs on an engine. 1574 * TBD - for VFs - Once we have parent PF info for each VF in 1575 * shmem available as CAU requires knowledge of parent PF for each VF. 1576 */ 1577 static void ecore_init_cau_rt_data(struct ecore_dev *p_dev) 1578 { 1579 u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; 1580 int i, igu_sb_id; 1581 1582 for_each_hwfn(p_dev, i) { 1583 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1584 struct ecore_igu_info *p_igu_info; 1585 struct ecore_igu_block *p_block; 1586 struct cau_sb_entry sb_entry; 1587 1588 p_igu_info = p_hwfn->hw_info.p_igu_info; 1589 1590 for (igu_sb_id = 0; 1591 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev); 1592 igu_sb_id++) { 1593 p_block = &p_igu_info->entry[igu_sb_id]; 1594 1595 if (!p_block->is_pf) 1596 continue; 1597 1598 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, 1599 p_block->function_id, 1600 0, 0); 1601 STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2, 1602 sb_entry); 1603 } 1604 } 1605 } 1606 1607 static void ecore_init_cache_line_size(struct ecore_hwfn *p_hwfn, 1608 struct ecore_ptt *p_ptt) 1609 { 1610 u32 val, wr_mbs, cache_line_size; 1611 1612 val = ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0); 1613 switch (val) { 1614 case 0: 1615 wr_mbs = 128; 1616 break; 1617 case 1: 1618 wr_mbs = 256; 1619 break; 1620 case 2: 1621 wr_mbs = 512; 1622 break; 1623 default: 1624 DP_INFO(p_hwfn, 1625 "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 1626 val); 1627 return; 1628 } 1629 1630 cache_line_size = OSAL_MIN_T(u32, OSAL_CACHE_LINE_SIZE, wr_mbs); 1631 switch (cache_line_size) { 1632 case 32: 1633 val = 0; 1634 break; 1635 case 64: 1636 val = 1; 1637 break; 1638 case 128: 1639 val = 2; 1640 break; 1641 case 256: 1642 val = 3; 1643 break; 1644 default: 1645 DP_INFO(p_hwfn, 1646 "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 1647 cache_line_size); 1648 } 1649 1650 if (OSAL_CACHE_LINE_SIZE > wr_mbs) 1651 DP_INFO(p_hwfn, 1652 "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", 1653 OSAL_CACHE_LINE_SIZE, wr_mbs); 1654 1655 STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val); 1656 if (val > 0) { 1657 STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val); 1658 STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val); 1659 } 1660 } 1661 1662 static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn, 1663 struct ecore_ptt *p_ptt, 1664 int hw_mode) 1665 { 1666 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 1667 struct ecore_dev *p_dev = p_hwfn->p_dev; 1668 u8 vf_id, max_num_vfs; 1669 u16 num_pfs, pf_id; 1670 u32 concrete_fid; 1671 enum _ecore_status_t rc = ECORE_SUCCESS; 1672 1673 ecore_init_cau_rt_data(p_dev); 1674 1675 /* Program GTT windows */ 1676 ecore_gtt_init(p_hwfn, p_ptt); 1677 1678 #ifndef ASIC_ONLY 1679 if (CHIP_REV_IS_EMUL(p_dev)) { 1680 rc = ecore_hw_init_chip(p_hwfn, p_ptt); 1681 if (rc != ECORE_SUCCESS) 1682 return rc; 1683 } 1684 #endif 1685 1686 if (p_hwfn->mcp_info) { 1687 if (p_hwfn->mcp_info->func_info.bandwidth_max) 1688 qm_info->pf_rl_en = 1; 1689 if (p_hwfn->mcp_info->func_info.bandwidth_min) 1690 qm_info->pf_wfq_en = 1; 1691 } 1692 1693 ecore_qm_common_rt_init(p_hwfn, 1694 p_dev->num_ports_in_engine, 1695 qm_info->max_phys_tcs_per_port, 1696 qm_info->pf_rl_en, qm_info->pf_wfq_en, 1697 qm_info->vport_rl_en, qm_info->vport_wfq_en, 1698 qm_info->qm_port_params); 1699 1700 ecore_cxt_hw_init_common(p_hwfn); 1701 1702 ecore_init_cache_line_size(p_hwfn, p_ptt); 1703 1704 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); 1705 if (rc != ECORE_SUCCESS) 1706 return rc; 1707 1708 /* @@@TBD mask DORQ afull as it is now benign. Init tool should do this */ 1709 ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_MASK, 1710 DORQ_REG_INT_STS_DORQ_FIFO_AFULL); 1711 1712 /* @@TBD MichalK - should add VALIDATE_VFID to init tool... 1713 * need to decide with which value, maybe runtime 1714 */ 1715 ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); 1716 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); 1717 1718 if (ECORE_IS_BB(p_dev)) { 1719 /* Workaround clears ROCE search for all functions to prevent 1720 * involving non intialized function in processing ROCE packet. 1721 */ 1722 num_pfs = NUM_OF_ENG_PFS(p_dev); 1723 for (pf_id = 0; pf_id < num_pfs; pf_id++) { 1724 ecore_fid_pretend(p_hwfn, p_ptt, pf_id); 1725 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 1726 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 1727 } 1728 /* pretend to original PF */ 1729 ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 1730 } 1731 1732 /* Workaround for avoiding CCFC execution error when getting packets 1733 * with CRC errors, and allowing instead the invoking of the FW error 1734 * handler. 1735 * This is not done inside the init tool since it currently can't 1736 * perform a pretending to VFs. 1737 */ 1738 max_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; 1739 for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { 1740 concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id); 1741 ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid); 1742 ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); 1743 ecore_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); 1744 ecore_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); 1745 ecore_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0); 1746 } 1747 /* pretend to original PF */ 1748 ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 1749 1750 return rc; 1751 } 1752 1753 #ifndef ASIC_ONLY 1754 #define MISC_REG_RESET_REG_2_XMAC_BIT (1<<4) 1755 #define MISC_REG_RESET_REG_2_XMAC_SOFT_BIT (1<<5) 1756 1757 #define PMEG_IF_BYTE_COUNT 8 1758 1759 static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn, 1760 struct ecore_ptt *p_ptt, 1761 u32 addr, 1762 u64 data, 1763 u8 reg_type, 1764 u8 port) 1765 { 1766 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 1767 "CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n", 1768 ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) | 1769 (8 << PMEG_IF_BYTE_COUNT), 1770 (reg_type << 25) | (addr << 8) | port, 1771 (u32)((data >> 32) & 0xffffffff), 1772 (u32)(data & 0xffffffff)); 1773 1774 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB, 1775 (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) & 1776 0xffff00fe) | 1777 (8 << PMEG_IF_BYTE_COUNT)); 1778 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB, 1779 (reg_type << 25) | (addr << 8) | port); 1780 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, data & 0xffffffff); 1781 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, 1782 (data >> 32) & 0xffffffff); 1783 } 1784 1785 #define XLPORT_MODE_REG (0x20a) 1786 #define XLPORT_MAC_CONTROL (0x210) 1787 #define XLPORT_FLOW_CONTROL_CONFIG (0x207) 1788 #define XLPORT_ENABLE_REG (0x20b) 1789 1790 #define XLMAC_CTRL (0x600) 1791 #define XLMAC_MODE (0x601) 1792 #define XLMAC_RX_MAX_SIZE (0x608) 1793 #define XLMAC_TX_CTRL (0x604) 1794 #define XLMAC_PAUSE_CTRL (0x60d) 1795 #define XLMAC_PFC_CTRL (0x60e) 1796 1797 static void ecore_emul_link_init_bb(struct ecore_hwfn *p_hwfn, 1798 struct ecore_ptt *p_ptt) 1799 { 1800 u8 loopback = 0, port = p_hwfn->port_id * 2; 1801 1802 DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port); 1803 1804 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, 1805 (0x4 << 4) | 0x4, 1, port); /* XLPORT MAC MODE */ /* 0 Quad, 4 Single... */ 1806 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port); 1807 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 1808 0x40, 0, port); /*XLMAC: SOFT RESET */ 1809 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE, 1810 0x40, 0, port); /*XLMAC: Port Speed >= 10Gbps */ 1811 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE, 1812 0x3fff, 0, port); /* XLMAC: Max Size */ 1813 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL, 1814 0x01000000800ULL | (0xa << 12) | ((u64)1 << 38), 1815 0, port); 1816 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL, 1817 0x7c000, 0, port); 1818 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL, 1819 0x30ffffc000ULL, 0, port); 1820 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2), 1821 0, port); /* XLMAC: TX_EN, RX_EN */ 1822 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x1003 | (loopback << 2), 1823 0, port); /* XLMAC: TX_EN, RX_EN, SW_LINK_STATUS */ 1824 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG, 1825 1, 0, port); /* Enabled Parallel PFC interface */ 1826 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 1827 0xf, 1, port); /* XLPORT port enable */ 1828 } 1829 1830 static void ecore_emul_link_init_ah_e5(struct ecore_hwfn *p_hwfn, 1831 struct ecore_ptt *p_ptt) 1832 { 1833 u8 port = p_hwfn->port_id; 1834 u32 mac_base = NWM_REG_MAC0_K2_E5 + (port << 2) * NWM_REG_MAC0_SIZE; 1835 1836 DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port); 1837 1838 ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2_E5 + (port << 2), 1839 (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT) | 1840 (port << 1841 CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT) | 1842 (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT)); 1843 1844 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE_K2_E5, 1845 1 << ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT); 1846 1847 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH_K2_E5, 1848 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT); 1849 1850 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH_K2_E5, 1851 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT); 1852 1853 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5, 1854 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT); 1855 1856 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5, 1857 (0xA << 1858 ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT) | 1859 (8 << 1860 ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT)); 1861 1862 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG_K2_E5, 1863 0xa853); 1864 } 1865 1866 static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn, 1867 struct ecore_ptt *p_ptt) 1868 { 1869 if (ECORE_IS_AH(p_hwfn->p_dev) || ECORE_IS_E5(p_hwfn->p_dev)) 1870 ecore_emul_link_init_ah_e5(p_hwfn, p_ptt); 1871 else /* BB */ 1872 ecore_emul_link_init_bb(p_hwfn, p_ptt); 1873 1874 return; 1875 } 1876 1877 static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn, 1878 struct ecore_ptt *p_ptt, u8 port) 1879 { 1880 int port_offset = port ? 0x800 : 0; 1881 u32 xmac_rxctrl = 0; 1882 1883 /* Reset of XMAC */ 1884 /* FIXME: move to common start */ 1885 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2*sizeof(u32), 1886 MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */ 1887 OSAL_MSLEEP(1); 1888 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32), 1889 MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */ 1890 1891 ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE_BB, 1); 1892 1893 /* Set the number of ports on the Warp Core to 10G */ 1894 ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE_BB, 3); 1895 1896 /* Soft reset of XMAC */ 1897 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32), 1898 MISC_REG_RESET_REG_2_XMAC_SOFT_BIT); 1899 OSAL_MSLEEP(1); 1900 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32), 1901 MISC_REG_RESET_REG_2_XMAC_SOFT_BIT); 1902 1903 /* FIXME: move to common end */ 1904 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 1905 ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE_BB + port_offset, 0x20); 1906 1907 /* Set Max packet size: initialize XMAC block register for port 0 */ 1908 ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE_BB + port_offset, 0x2710); 1909 1910 /* CRC append for Tx packets: init XMAC block register for port 1 */ 1911 ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO_BB + port_offset, 0xC800); 1912 1913 /* Enable TX and RX: initialize XMAC block register for port 1 */ 1914 ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL_BB + port_offset, 1915 XMAC_REG_CTRL_TX_EN_BB | XMAC_REG_CTRL_RX_EN_BB); 1916 xmac_rxctrl = ecore_rd(p_hwfn, p_ptt, 1917 XMAC_REG_RX_CTRL_BB + port_offset); 1918 xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB; 1919 ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL_BB + port_offset, xmac_rxctrl); 1920 } 1921 #endif 1922 1923 static enum _ecore_status_t 1924 ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn, 1925 struct ecore_ptt *p_ptt, 1926 u32 pwm_region_size, 1927 u32 n_cpus) 1928 { 1929 u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size; 1930 u32 dpi_bit_shift, dpi_count; 1931 u32 min_dpis; 1932 1933 /* Calculate DPI size 1934 * ------------------ 1935 * The PWM region contains Doorbell Pages. The first is reserverd for 1936 * the kernel for, e.g, L2. The others are free to be used by non- 1937 * trusted applications, typically from user space. Each page, called a 1938 * doorbell page is sectioned into windows that allow doorbells to be 1939 * issued in parallel by the kernel/application. The size of such a 1940 * window (a.k.a. WID) is 1kB. 1941 * Summary: 1942 * 1kB WID x N WIDS = DPI page size 1943 * DPI page size x N DPIs = PWM region size 1944 * Notes: 1945 * The size of the DPI page size must be in multiples of OSAL_PAGE_SIZE 1946 * in order to ensure that two applications won't share the same page. 1947 * It also must contain at least one WID per CPU to allow parallelism. 1948 * It also must be a power of 2, since it is stored as a bit shift. 1949 * 1950 * The DPI page size is stored in a register as 'dpi_bit_shift' so that 1951 * 0 is 4kB, 1 is 8kB and etc. Hence the minimum size is 4,096 1952 * containing 4 WIDs. 1953 */ 1954 dpi_page_size_1 = ECORE_WID_SIZE * n_cpus; 1955 dpi_page_size_2 = OSAL_MAX_T(u32, ECORE_WID_SIZE, OSAL_PAGE_SIZE); 1956 dpi_page_size = OSAL_MAX_T(u32, dpi_page_size_1, dpi_page_size_2); 1957 dpi_page_size = (dpi_page_size + OSAL_PAGE_SIZE - 1) & ~(OSAL_PAGE_SIZE - 1); 1958 dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096); 1959 1960 dpi_count = pwm_region_size / dpi_page_size; 1961 1962 min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis; 1963 min_dpis = OSAL_MAX_T(u32, ECORE_MIN_DPIS, min_dpis); 1964 1965 /* Update hwfn */ 1966 p_hwfn->dpi_size = dpi_page_size; 1967 p_hwfn->dpi_count = dpi_count; 1968 1969 /* Update registers */ 1970 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift); 1971 1972 if (dpi_count < min_dpis) 1973 return ECORE_NORESOURCES; 1974 1975 return ECORE_SUCCESS; 1976 } 1977 1978 enum ECORE_ROCE_EDPM_MODE { 1979 ECORE_ROCE_EDPM_MODE_ENABLE = 0, 1980 ECORE_ROCE_EDPM_MODE_FORCE_ON = 1, 1981 ECORE_ROCE_EDPM_MODE_DISABLE = 2, 1982 }; 1983 1984 static enum _ecore_status_t 1985 ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn, 1986 struct ecore_ptt *p_ptt) 1987 { 1988 u32 pwm_regsize, norm_regsize; 1989 u32 non_pwm_conn, min_addr_reg1; 1990 u32 db_bar_size, n_cpus = 1; 1991 u32 roce_edpm_mode; 1992 u32 pf_dems_shift; 1993 enum _ecore_status_t rc = ECORE_SUCCESS; 1994 u8 cond; 1995 1996 db_bar_size = ecore_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1); 1997 if (p_hwfn->p_dev->num_hwfns > 1) 1998 db_bar_size /= 2; 1999 2000 /* Calculate doorbell regions 2001 * ----------------------------------- 2002 * The doorbell BAR is made of two regions. The first is called normal 2003 * region and the second is called PWM region. In the normal region 2004 * each ICID has its own set of addresses so that writing to that 2005 * specific address identifies the ICID. In the Process Window Mode 2006 * region the ICID is given in the data written to the doorbell. The 2007 * above per PF register denotes the offset in the doorbell BAR in which 2008 * the PWM region begins. 2009 * The normal region has ECORE_PF_DEMS_SIZE bytes per ICID, that is per 2010 * non-PWM connection. The calculation below computes the total non-PWM 2011 * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is 2012 * in units of 4,096 bytes. 2013 */ 2014 non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) + 2015 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE, 2016 OSAL_NULL) + 2017 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, 2018 OSAL_NULL); 2019 norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, OSAL_PAGE_SIZE); 2020 min_addr_reg1 = norm_regsize / 4096; 2021 pwm_regsize = db_bar_size - norm_regsize; 2022 2023 /* Check that the normal and PWM sizes are valid */ 2024 if (db_bar_size < norm_regsize) { 2025 DP_ERR(p_hwfn->p_dev, "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n", db_bar_size, norm_regsize); 2026 return ECORE_NORESOURCES; 2027 } 2028 if (pwm_regsize < ECORE_MIN_PWM_REGION) { 2029 DP_ERR(p_hwfn->p_dev, "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n", pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size, norm_regsize); 2030 return ECORE_NORESOURCES; 2031 } 2032 2033 /* Calculate number of DPIs */ 2034 roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode; 2035 if ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE) || 2036 ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_FORCE_ON))) { 2037 /* Either EDPM is mandatory, or we are attempting to allocate a 2038 * WID per CPU. 2039 */ 2040 n_cpus = OSAL_NUM_CPUS(); 2041 rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 2042 } 2043 2044 cond = ((rc != ECORE_SUCCESS) && 2045 (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) || 2046 (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE); 2047 if (cond || p_hwfn->dcbx_no_edpm) { 2048 /* Either EDPM is disabled from user configuration, or it is 2049 * disabled via DCBx, or it is not mandatory and we failed to 2050 * allocated a WID per CPU. 2051 */ 2052 n_cpus = 1; 2053 rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 2054 2055 #ifdef CONFIG_ECORE_ROCE 2056 /* If we entered this flow due to DCBX then the DPM register is 2057 * already configured. 2058 */ 2059 if (cond) 2060 ecore_rdma_dpm_bar(p_hwfn, p_ptt); 2061 #endif 2062 } 2063 2064 p_hwfn->wid_count = (u16)n_cpus; 2065 2066 DP_INFO(p_hwfn, "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n", 2067 norm_regsize, pwm_regsize, p_hwfn->dpi_size, p_hwfn->dpi_count, 2068 ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ? 2069 "disabled" : "enabled"); 2070 2071 /* Check return codes from above calls */ 2072 if (rc != ECORE_SUCCESS) { 2073 #ifndef LINUX_REMOVE 2074 DP_ERR(p_hwfn, 2075 "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d. You can try reducing this down to %d via user configuration n_dpi or by disabling EDPM via user configuration roce_edpm\n", 2076 p_hwfn->dpi_count, 2077 p_hwfn->pf_params.rdma_pf_params.min_dpis, 2078 ECORE_MIN_DPIS); 2079 #else 2080 DP_ERR(p_hwfn, 2081 "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d. You can try reducing this down to %d via the module parameter min_roce_dpis or by disabling EDPM via the module parameter roce_edpm\n", 2082 p_hwfn->dpi_count, 2083 p_hwfn->pf_params.rdma_pf_params.min_dpis, 2084 ECORE_MIN_DPIS); 2085 #endif 2086 return ECORE_NORESOURCES; 2087 } 2088 2089 /* Update hwfn */ 2090 p_hwfn->dpi_start_offset = norm_regsize; /* this is later used to 2091 * calculate the doorbell 2092 * address 2093 */ 2094 2095 /* Update registers */ 2096 /* DEMS size is configured log2 of DWORDs, hence the division by 4 */ 2097 pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4); 2098 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift); 2099 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1); 2100 2101 return ECORE_SUCCESS; 2102 } 2103 2104 static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn, 2105 struct ecore_ptt *p_ptt, 2106 int hw_mode) 2107 { 2108 enum _ecore_status_t rc = ECORE_SUCCESS; 2109 2110 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, 2111 hw_mode); 2112 if (rc != ECORE_SUCCESS) 2113 return rc; 2114 2115 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0); 2116 2117 #if 0 2118 /* FW 8.10.5.0 requires us to configure PF_VECTOR and DUALMODE in LLH. 2119 * This would hopefully be moved to MFW. 2120 */ 2121 if (IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) { 2122 u8 pf_id = 0; 2123 2124 if (ecore_hw_init_first_eth(p_hwfn, p_ptt, &pf_id) == 2125 ECORE_SUCCESS) { 2126 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 2127 "PF[%08x] is first eth on engine\n", 2128 pf_id); 2129 2130 /* We should have configured BIT for ppfid, i.e., the 2131 * relative function number in the port. But there's a 2132 * bug in LLH in BB where the ppfid is actually engine 2133 * based, so we need to take this into account. 2134 */ 2135 if (!ECORE_IS_BB(p_hwfn->p_dev)) 2136 pf_id /= p_hwfn->p_dev->num_ports_in_engine; 2137 2138 ecore_wr(p_hwfn, p_ptt, 2139 NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id); 2140 } 2141 2142 /* Take the protocol-based hit vector if there is a hit, 2143 * otherwise take the other vector. 2144 */ 2145 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2); 2146 } 2147 #endif 2148 #ifndef ASIC_ONLY 2149 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) 2150 return ECORE_SUCCESS; 2151 2152 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 2153 if (ECORE_IS_AH(p_hwfn->p_dev)) 2154 return ECORE_SUCCESS; 2155 else if (ECORE_IS_BB(p_hwfn->p_dev)) 2156 ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id); 2157 else /* E5 */ 2158 ECORE_E5_MISSING_CODE; 2159 } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 2160 if (p_hwfn->p_dev->num_hwfns > 1) { 2161 /* Activate OPTE in CMT */ 2162 u32 val; 2163 2164 val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV); 2165 val |= 0x10; 2166 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val); 2167 ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1); 2168 ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1); 2169 ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1); 2170 ecore_wr(p_hwfn, p_ptt, 2171 NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1); 2172 ecore_wr(p_hwfn, p_ptt, 2173 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555); 2174 ecore_wr(p_hwfn, p_ptt, 2175 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4, 2176 0x55555555); 2177 } 2178 2179 ecore_emul_link_init(p_hwfn, p_ptt); 2180 } else { 2181 DP_INFO(p_hwfn->p_dev, "link is not being configured\n"); 2182 } 2183 #endif 2184 2185 return rc; 2186 } 2187 2188 static enum _ecore_status_t ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, 2189 struct ecore_ptt *p_ptt, 2190 struct ecore_tunnel_info *p_tunn, 2191 int hw_mode, 2192 bool b_hw_start, 2193 enum ecore_int_mode int_mode, 2194 bool allow_npar_tx_switch) 2195 { 2196 u8 rel_pf_id = p_hwfn->rel_pf_id; 2197 u32 prs_reg; 2198 enum _ecore_status_t rc = ECORE_SUCCESS; 2199 u16 ctrl; 2200 int pos; 2201 2202 if (p_hwfn->mcp_info) { 2203 struct ecore_mcp_function_info *p_info; 2204 2205 p_info = &p_hwfn->mcp_info->func_info; 2206 if (p_info->bandwidth_min) 2207 p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; 2208 2209 /* Update rate limit once we'll actually have a link */ 2210 p_hwfn->qm_info.pf_rl = 100000; 2211 } 2212 ecore_cxt_hw_init_pf(p_hwfn, p_ptt); 2213 2214 ecore_int_igu_init_rt(p_hwfn); 2215 2216 /* Set VLAN in NIG if needed */ 2217 if (hw_mode & (1 << MODE_MF_SD)) { 2218 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring LLH_FUNC_TAG\n"); 2219 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); 2220 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, 2221 p_hwfn->hw_info.ovlan); 2222 } 2223 2224 /* Enable classification by MAC if needed */ 2225 if (hw_mode & (1 << MODE_MF_SI)) { 2226 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring TAGMAC_CLS_TYPE\n"); 2227 STORE_RT_REG(p_hwfn, 2228 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1); 2229 } 2230 2231 /* Protocl Configuration - @@@TBD - should we set 0 otherwise?*/ 2232 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 2233 (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) ? 1 : 0); 2234 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 2235 (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) ? 1 : 0); 2236 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); 2237 2238 /* perform debug configuration when chip is out of reset */ 2239 OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id); 2240 2241 /* Cleanup chip from previous driver if such remains exist */ 2242 rc = ecore_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false); 2243 if (rc != ECORE_SUCCESS) { 2244 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL); 2245 return rc; 2246 } 2247 2248 /* PF Init sequence */ 2249 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); 2250 if (rc) 2251 return rc; 2252 2253 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */ 2254 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode); 2255 if (rc) 2256 return rc; 2257 2258 /* Pure runtime initializations - directly to the HW */ 2259 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); 2260 2261 /* PCI relaxed ordering causes a decrease in the performance on some 2262 * systems. Till a root cause is found, disable this attribute in the 2263 * PCI config space. 2264 */ 2265 pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP); 2266 if (!pos) { 2267 DP_NOTICE(p_hwfn, true, 2268 "Failed to find the PCI Express Capability structure in the PCI config space\n"); 2269 return ECORE_IO; 2270 } 2271 OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, &ctrl); 2272 ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN; 2273 OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, ctrl); 2274 2275 rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt); 2276 if (rc) 2277 return rc; 2278 #if 0 2279 /* FW 8.10.5.0 requires us to configure MSG_INFO in PRS. 2280 * This would hopefully be moved to MFW. 2281 */ 2282 if (IS_MF_SI(p_hwfn)) { 2283 u8 pf_id = 0; 2284 u32 val; 2285 2286 if (ecore_hw_init_first_eth(p_hwfn, p_ptt, &pf_id) == 2287 ECORE_SUCCESS) { 2288 if (p_hwfn->rel_pf_id == pf_id) { 2289 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 2290 "PF[%d] is first ETH on engine\n", 2291 pf_id); 2292 val = 1; 2293 } 2294 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val); 2295 } 2296 } 2297 #endif 2298 /* No default PF is configured in 100G NPAR mode, so need add an LLH 2299 * filter with the primary MAC address. 2300 */ 2301 if (p_hwfn->p_dev->num_hwfns > 1 && IS_LEAD_HWFN(p_hwfn)) { 2302 rc = ecore_llh_add_mac_filter(p_hwfn, p_ptt, 2303 p_hwfn->hw_info.hw_mac_addr); 2304 if (rc != ECORE_SUCCESS) 2305 DP_NOTICE(p_hwfn, false, 2306 "Failed to add an LLH filter with the primary MAC in 100 NPAR mode\n"); 2307 } 2308 2309 if (b_hw_start) { 2310 /* enable interrupts */ 2311 rc = ecore_int_igu_enable(p_hwfn, p_ptt, int_mode); 2312 if (rc != ECORE_SUCCESS) 2313 return rc; 2314 2315 /* send function start command */ 2316 rc = ecore_sp_pf_start(p_hwfn, p_ptt, p_tunn, 2317 p_hwfn->p_dev->mf_mode, 2318 allow_npar_tx_switch); 2319 if (rc) { 2320 DP_NOTICE(p_hwfn, true, "Function start ramrod failed\n"); 2321 } else { 2322 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); 2323 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2324 "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); 2325 2326 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 2327 { 2328 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, 2329 (1 << 2)); 2330 ecore_wr(p_hwfn, p_ptt, 2331 PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, 2332 0x100); 2333 } 2334 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2335 "PRS_REG_SEARCH registers after start PFn\n"); 2336 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP); 2337 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2338 "PRS_REG_SEARCH_TCP: %x\n", prs_reg); 2339 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP); 2340 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2341 "PRS_REG_SEARCH_UDP: %x\n", prs_reg); 2342 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE); 2343 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2344 "PRS_REG_SEARCH_FCOE: %x\n", prs_reg); 2345 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE); 2346 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2347 "PRS_REG_SEARCH_ROCE: %x\n", prs_reg); 2348 prs_reg = ecore_rd(p_hwfn, p_ptt, 2349 PRS_REG_SEARCH_TCP_FIRST_FRAG); 2350 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2351 "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n", 2352 prs_reg); 2353 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); 2354 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 2355 "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); 2356 } 2357 } 2358 return rc; 2359 } 2360 2361 enum _ecore_status_t ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn, 2362 struct ecore_ptt *p_ptt, 2363 u8 enable) 2364 { 2365 u32 delay_idx = 0, val, set_val = enable ? 1 : 0; 2366 2367 /* Change PF in PXP */ 2368 ecore_wr(p_hwfn, p_ptt, 2369 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); 2370 2371 /* wait until value is set - try for 1 second every 50us */ 2372 for (delay_idx = 0; delay_idx < 20000; delay_idx++) { 2373 val = ecore_rd(p_hwfn, p_ptt, 2374 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 2375 if (val == set_val) 2376 break; 2377 2378 OSAL_UDELAY(50); 2379 } 2380 2381 if (val != set_val) { 2382 DP_NOTICE(p_hwfn, true, 2383 "PFID_ENABLE_MASTER wasn't changed after a second\n"); 2384 return ECORE_UNKNOWN_ERROR; 2385 } 2386 2387 return ECORE_SUCCESS; 2388 } 2389 2390 static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn, 2391 struct ecore_ptt *p_main_ptt) 2392 { 2393 /* Read shadow of current MFW mailbox */ 2394 ecore_mcp_read_mb(p_hwfn, p_main_ptt); 2395 OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow, 2396 p_hwfn->mcp_info->mfw_mb_cur, 2397 p_hwfn->mcp_info->mfw_mb_length); 2398 } 2399 2400 static enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn, 2401 struct ecore_hw_init_params *p_params) 2402 { 2403 if (p_params->p_tunn) { 2404 ecore_vf_set_vf_start_tunn_update_param(p_params->p_tunn); 2405 ecore_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn); 2406 } 2407 2408 p_hwfn->b_int_enabled = 1; 2409 2410 return ECORE_SUCCESS; 2411 } 2412 2413 static void 2414 ecore_fill_load_req_params(struct ecore_load_req_params *p_load_req, 2415 struct ecore_drv_load_params *p_drv_load) 2416 { 2417 /* Make sure that if ecore-client didn't provide inputs, all the 2418 * expected defaults are indeed zero. 2419 */ 2420 OSAL_BUILD_BUG_ON(ECORE_DRV_ROLE_OS != 0); 2421 OSAL_BUILD_BUG_ON(ECORE_LOAD_REQ_LOCK_TO_DEFAULT != 0); 2422 OSAL_BUILD_BUG_ON(ECORE_OVERRIDE_FORCE_LOAD_NONE != 0); 2423 2424 OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req)); 2425 2426 if (p_drv_load != OSAL_NULL) { 2427 p_load_req->drv_role = p_drv_load->is_crash_kernel ? 2428 ECORE_DRV_ROLE_KDUMP : 2429 ECORE_DRV_ROLE_OS; 2430 p_load_req->timeout_val = p_drv_load->mfw_timeout_val; 2431 p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; 2432 p_load_req->override_force_load = 2433 p_drv_load->override_force_load; 2434 } 2435 } 2436 2437 enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, 2438 struct ecore_hw_init_params *p_params) 2439 { 2440 struct ecore_load_req_params load_req_params; 2441 u32 load_code, param, drv_mb_param; 2442 bool b_default_mtu = true; 2443 struct ecore_hwfn *p_hwfn; 2444 enum _ecore_status_t rc = ECORE_SUCCESS, mfw_rc; 2445 int i; 2446 2447 if ((p_params->int_mode == ECORE_INT_MODE_MSI) && (p_dev->num_hwfns > 1)) { 2448 DP_NOTICE(p_dev, false, 2449 "MSI mode is not supported for CMT devices\n"); 2450 return ECORE_INVAL; 2451 } 2452 2453 if (IS_PF(p_dev)) { 2454 rc = ecore_init_fw_data(p_dev, p_params->bin_fw_data); 2455 if (rc != ECORE_SUCCESS) 2456 return rc; 2457 } 2458 2459 for_each_hwfn(p_dev, i) { 2460 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 2461 2462 /* If management didn't provide a default, set one of our own */ 2463 if (!p_hwfn->hw_info.mtu) { 2464 p_hwfn->hw_info.mtu = 1500; 2465 b_default_mtu = false; 2466 } 2467 2468 if (IS_VF(p_dev)) { 2469 ecore_vf_start(p_hwfn, p_params); 2470 continue; 2471 } 2472 2473 /* Enable DMAE in PXP */ 2474 rc = ecore_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); 2475 if (rc != ECORE_SUCCESS) 2476 return rc; 2477 2478 rc = ecore_calc_hw_mode(p_hwfn); 2479 if (rc != ECORE_SUCCESS) 2480 return rc; 2481 2482 ecore_fill_load_req_params(&load_req_params, 2483 p_params->p_drv_load_params); 2484 rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, 2485 &load_req_params); 2486 if (rc != ECORE_SUCCESS) { 2487 DP_NOTICE(p_hwfn, true, 2488 "Failed sending a LOAD_REQ command\n"); 2489 return rc; 2490 } 2491 2492 load_code = load_req_params.load_code; 2493 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 2494 "Load request was sent. Load code: 0x%x\n", 2495 load_code); 2496 2497 ecore_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt); 2498 2499 /* CQ75580: 2500 * When comming back from hiberbate state, the registers from 2501 * which shadow is read initially are not initialized. It turns 2502 * out that these registers get initialized during the call to 2503 * ecore_mcp_load_req request. So we need to reread them here 2504 * to get the proper shadow register value. 2505 * Note: This is a workaround for the missing MFW 2506 * initialization. It may be removed once the implementation 2507 * is done. 2508 */ 2509 ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); 2510 2511 /* Only relevant for recovery: 2512 * Clear the indication after the LOAD_REQ command is responded 2513 * by the MFW. 2514 */ 2515 p_dev->recov_in_prog = false; 2516 2517 p_hwfn->first_on_engine = (load_code == 2518 FW_MSG_CODE_DRV_LOAD_ENGINE); 2519 2520 if (!qm_lock_init) { 2521 OSAL_SPIN_LOCK_INIT(&qm_lock); 2522 qm_lock_init = true; 2523 } 2524 2525 switch (load_code) { 2526 case FW_MSG_CODE_DRV_LOAD_ENGINE: 2527 rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, 2528 p_hwfn->hw_info.hw_mode); 2529 if (rc != ECORE_SUCCESS) 2530 break; 2531 /* Fall into */ 2532 case FW_MSG_CODE_DRV_LOAD_PORT: 2533 rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, 2534 p_hwfn->hw_info.hw_mode); 2535 if (rc != ECORE_SUCCESS) 2536 break; 2537 /* Fall into */ 2538 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 2539 rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, 2540 p_params->p_tunn, 2541 p_hwfn->hw_info.hw_mode, 2542 p_params->b_hw_start, 2543 p_params->int_mode, 2544 p_params->allow_npar_tx_switch); 2545 break; 2546 default: 2547 DP_NOTICE(p_hwfn, false, 2548 "Unexpected load code [0x%08x]", load_code); 2549 rc = ECORE_NOTIMPL; 2550 break; 2551 } 2552 2553 if (rc != ECORE_SUCCESS) 2554 DP_NOTICE(p_hwfn, true, 2555 "init phase failed for loadcode 0x%x (rc %d)\n", 2556 load_code, rc); 2557 2558 /* ACK mfw regardless of success or failure of initialization */ 2559 mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 2560 DRV_MSG_CODE_LOAD_DONE, 2561 0, &load_code, ¶m); 2562 2563 /* Check the return value of the ecore_hw_init_*() function */ 2564 if (rc != ECORE_SUCCESS) 2565 return rc; 2566 2567 /* Check the return value of the LOAD_DONE command */ 2568 if (mfw_rc != ECORE_SUCCESS) { 2569 DP_NOTICE(p_hwfn, true, 2570 "Failed sending a LOAD_DONE command\n"); 2571 return mfw_rc; 2572 } 2573 2574 /* Check if there is a DID mismatch between nvm-cfg/efuse */ 2575 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) 2576 DP_NOTICE(p_hwfn, false, 2577 "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); 2578 2579 /* send DCBX attention request command */ 2580 DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, 2581 "sending phony dcbx set command to trigger DCBx attention handling\n"); 2582 mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 2583 DRV_MSG_CODE_SET_DCBX, 2584 1 << DRV_MB_PARAM_DCBX_NOTIFY_OFFSET, 2585 &load_code, ¶m); 2586 if (mfw_rc != ECORE_SUCCESS) { 2587 DP_NOTICE(p_hwfn, true, 2588 "Failed to send DCBX attention request\n"); 2589 return mfw_rc; 2590 } 2591 2592 p_hwfn->hw_init_done = true; 2593 } 2594 2595 if (IS_PF(p_dev)) { 2596 p_hwfn = ECORE_LEADING_HWFN(p_dev); 2597 drv_mb_param = STORM_FW_VERSION; 2598 rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 2599 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, 2600 drv_mb_param, &load_code, ¶m); 2601 if (rc != ECORE_SUCCESS) 2602 DP_INFO(p_hwfn, "Failed to update firmware version\n"); 2603 2604 if (!b_default_mtu) { 2605 rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt, 2606 p_hwfn->hw_info.mtu); 2607 if (rc != ECORE_SUCCESS) 2608 DP_INFO(p_hwfn, "Failed to update default mtu\n"); 2609 } 2610 2611 rc = ecore_mcp_ov_update_driver_state(p_hwfn, 2612 p_hwfn->p_main_ptt, 2613 ECORE_OV_DRIVER_STATE_DISABLED); 2614 if (rc != ECORE_SUCCESS) 2615 DP_INFO(p_hwfn, "Failed to update driver state\n"); 2616 2617 rc = ecore_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, 2618 ECORE_OV_ESWITCH_VEB); 2619 if (rc != ECORE_SUCCESS) 2620 DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); 2621 } 2622 2623 return rc; 2624 } 2625 2626 #define ECORE_HW_STOP_RETRY_LIMIT (10) 2627 static void ecore_hw_timers_stop(struct ecore_dev *p_dev, 2628 struct ecore_hwfn *p_hwfn, 2629 struct ecore_ptt *p_ptt) 2630 { 2631 int i; 2632 2633 /* close timers */ 2634 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); 2635 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); 2636 for (i = 0; 2637 i < ECORE_HW_STOP_RETRY_LIMIT && !p_dev->recov_in_prog; 2638 i++) { 2639 if ((!ecore_rd(p_hwfn, p_ptt, 2640 TM_REG_PF_SCAN_ACTIVE_CONN)) && 2641 (!ecore_rd(p_hwfn, p_ptt, 2642 TM_REG_PF_SCAN_ACTIVE_TASK))) 2643 break; 2644 2645 /* Dependent on number of connection/tasks, possibly 2646 * 1ms sleep is required between polls 2647 */ 2648 OSAL_MSLEEP(1); 2649 } 2650 2651 if (i < ECORE_HW_STOP_RETRY_LIMIT) 2652 return; 2653 2654 DP_NOTICE(p_hwfn, true, 2655 "Timers linear scans are not over [Connection %02x Tasks %02x]\n", 2656 (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), 2657 (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); 2658 } 2659 2660 void ecore_hw_timers_stop_all(struct ecore_dev *p_dev) 2661 { 2662 int j; 2663 2664 for_each_hwfn(p_dev, j) { 2665 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 2666 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 2667 2668 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt); 2669 } 2670 } 2671 2672 static enum _ecore_status_t ecore_verify_reg_val(struct ecore_hwfn *p_hwfn, 2673 struct ecore_ptt *p_ptt, 2674 u32 addr, u32 expected_val) 2675 { 2676 u32 val = ecore_rd(p_hwfn, p_ptt, addr); 2677 2678 if (val != expected_val) { 2679 DP_NOTICE(p_hwfn, true, 2680 "Value at address 0x%08x is 0x%08x while the expected value is 0x%08x\n", 2681 addr, val, expected_val); 2682 return ECORE_UNKNOWN_ERROR; 2683 } 2684 2685 return ECORE_SUCCESS; 2686 } 2687 2688 enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev) 2689 { 2690 struct ecore_hwfn *p_hwfn; 2691 struct ecore_ptt *p_ptt; 2692 enum _ecore_status_t rc, rc2 = ECORE_SUCCESS; 2693 int j; 2694 2695 for_each_hwfn(p_dev, j) { 2696 p_hwfn = &p_dev->hwfns[j]; 2697 p_ptt = p_hwfn->p_main_ptt; 2698 2699 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n"); 2700 2701 if (IS_VF(p_dev)) { 2702 ecore_vf_pf_int_cleanup(p_hwfn); 2703 rc = ecore_vf_pf_reset(p_hwfn); 2704 if (rc != ECORE_SUCCESS) { 2705 DP_NOTICE(p_hwfn, true, 2706 "ecore_vf_pf_reset failed. rc = %d.\n", 2707 rc); 2708 rc2 = ECORE_UNKNOWN_ERROR; 2709 } 2710 continue; 2711 } 2712 2713 /* mark the hw as uninitialized... */ 2714 p_hwfn->hw_init_done = false; 2715 2716 /* Send unload command to MCP */ 2717 if (!p_dev->recov_in_prog) { 2718 rc = ecore_mcp_unload_req(p_hwfn, p_ptt); 2719 if (rc != ECORE_SUCCESS) { 2720 DP_NOTICE(p_hwfn, true, 2721 "Failed sending a UNLOAD_REQ command. rc = %d.\n", 2722 rc); 2723 rc2 = ECORE_UNKNOWN_ERROR; 2724 } 2725 } 2726 2727 OSAL_DPC_SYNC(p_hwfn); 2728 2729 /* After this point no MFW attentions are expected, e.g. prevent 2730 * race between pf stop and dcbx pf update. 2731 */ 2732 2733 rc = ecore_sp_pf_stop(p_hwfn); 2734 if (rc != ECORE_SUCCESS) { 2735 DP_NOTICE(p_hwfn, true, 2736 "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n", 2737 rc); 2738 rc2 = ECORE_UNKNOWN_ERROR; 2739 } 2740 2741 /* perform debug action after PF stop was sent */ 2742 OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id); 2743 2744 /* close NIG to BRB gate */ 2745 ecore_wr(p_hwfn, p_ptt, 2746 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 2747 2748 /* close parser */ 2749 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 2750 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 2751 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 2752 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 2753 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 2754 2755 /* @@@TBD - clean transmission queues (5.b) */ 2756 /* @@@TBD - clean BTB (5.c) */ 2757 2758 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt); 2759 2760 /* @@@TBD - verify DMAE requests are done (8) */ 2761 2762 /* Disable Attention Generation */ 2763 ecore_int_igu_disable_int(p_hwfn, p_ptt); 2764 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); 2765 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); 2766 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true); 2767 rc = ecore_int_igu_reset_cam_default(p_hwfn, p_ptt); 2768 if (rc != ECORE_SUCCESS) { 2769 DP_NOTICE(p_hwfn, true, 2770 "Failed to return IGU CAM to default\n"); 2771 rc2 = ECORE_UNKNOWN_ERROR; 2772 } 2773 2774 /* Need to wait 1ms to guarantee SBs are cleared */ 2775 OSAL_MSLEEP(1); 2776 2777 if (!p_dev->recov_in_prog) { 2778 ecore_verify_reg_val(p_hwfn, p_ptt, 2779 QM_REG_USG_CNT_PF_TX, 0); 2780 ecore_verify_reg_val(p_hwfn, p_ptt, 2781 QM_REG_USG_CNT_PF_OTHER, 0); 2782 /* @@@TBD - assert on incorrect xCFC values (10.b) */ 2783 } 2784 2785 /* Disable PF in HW blocks */ 2786 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); 2787 ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); 2788 2789 if (!p_dev->recov_in_prog) { 2790 ecore_mcp_unload_done(p_hwfn, p_ptt); 2791 if (rc != ECORE_SUCCESS) { 2792 DP_NOTICE(p_hwfn, true, 2793 "Failed sending a UNLOAD_DONE command. rc = %d.\n", 2794 rc); 2795 rc2 = ECORE_UNKNOWN_ERROR; 2796 } 2797 } 2798 2799 /* 100g NPAR mode - remove the LLH filter with the primary MAC 2800 * address. 2801 */ 2802 if (p_hwfn->p_dev->num_hwfns > 1 && IS_LEAD_HWFN(p_hwfn)) 2803 ecore_llh_remove_mac_filter(p_hwfn, p_ptt, 2804 p_hwfn->hw_info.hw_mac_addr); 2805 } /* hwfn loop */ 2806 2807 if (IS_PF(p_dev)) { 2808 p_hwfn = ECORE_LEADING_HWFN(p_dev); 2809 p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt; 2810 2811 /* Disable DMAE in PXP - in CMT, this should only be done for 2812 * first hw-function, and only after all transactions have 2813 * stopped for all active hw-functions. 2814 */ 2815 rc = ecore_change_pci_hwfn(p_hwfn, p_ptt, false); 2816 if (rc != ECORE_SUCCESS) { 2817 DP_NOTICE(p_hwfn, true, 2818 "ecore_change_pci_hwfn failed. rc = %d.\n", 2819 rc); 2820 rc2 = ECORE_UNKNOWN_ERROR; 2821 } 2822 } 2823 2824 return rc2; 2825 } 2826 2827 enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev) 2828 { 2829 int j; 2830 2831 for_each_hwfn(p_dev, j) { 2832 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 2833 struct ecore_ptt *p_ptt; 2834 2835 if (IS_VF(p_dev)) { 2836 ecore_vf_pf_int_cleanup(p_hwfn); 2837 continue; 2838 } 2839 p_ptt = ecore_ptt_acquire(p_hwfn); 2840 if (!p_ptt) 2841 return ECORE_AGAIN; 2842 2843 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Shutting down the fastpath\n"); 2844 2845 ecore_wr(p_hwfn, p_ptt, 2846 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 2847 2848 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 2849 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 2850 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 2851 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 2852 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 2853 2854 /* @@@TBD - clean transmission queues (5.b) */ 2855 /* @@@TBD - clean BTB (5.c) */ 2856 2857 /* @@@TBD - verify DMAE requests are done (8) */ 2858 2859 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); 2860 /* Need to wait 1ms to guarantee SBs are cleared */ 2861 OSAL_MSLEEP(1); 2862 ecore_ptt_release(p_hwfn, p_ptt); 2863 } 2864 2865 return ECORE_SUCCESS; 2866 } 2867 2868 enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn) 2869 { 2870 struct ecore_ptt *p_ptt; 2871 2872 if (IS_VF(p_hwfn->p_dev)) 2873 return ECORE_SUCCESS; 2874 2875 p_ptt = ecore_ptt_acquire(p_hwfn); 2876 if (!p_ptt) 2877 return ECORE_AGAIN; 2878 2879 /* If roce info is allocated it means roce is initialized and should 2880 * be enabled in searcher. 2881 */ 2882 if (p_hwfn->p_rdma_info) { 2883 if (p_hwfn->b_rdma_enabled_in_prs) 2884 ecore_wr(p_hwfn, p_ptt, 2885 p_hwfn->rdma_prs_search_reg, 0x1); 2886 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x1); 2887 } 2888 2889 /* Re-open incoming traffic */ 2890 ecore_wr(p_hwfn, p_ptt, 2891 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); 2892 ecore_ptt_release(p_hwfn, p_ptt); 2893 2894 return ECORE_SUCCESS; 2895 } 2896 /* TEMP macro to be removed when wol code revisted */ 2897 #define ECORE_WOL_WR(_p_hwfn, _p_ptt, _offset, _val) ECORE_IS_BB(_p_hwfn->p_dev) ? \ 2898 ecore_wr(_p_hwfn, _p_ptt, _offset, _val) : \ 2899 ecore_mcp_wol_wr(_p_hwfn, _p_ptt, _offset, _val); 2900 2901 enum _ecore_status_t ecore_set_nwuf_reg(struct ecore_dev *p_dev, u32 reg_idx, 2902 u32 pattern_size, u32 crc) 2903 { 2904 struct ecore_hwfn *hwfn = &p_dev->hwfns[0]; 2905 enum _ecore_status_t rc = ECORE_SUCCESS; 2906 struct ecore_ptt *p_ptt; 2907 u32 reg_len = 0; 2908 u32 reg_crc = 0; 2909 2910 p_ptt = ecore_ptt_acquire(hwfn); 2911 if (!p_ptt) 2912 return ECORE_AGAIN; 2913 2914 /* Get length and CRC register offsets */ 2915 switch (reg_idx) 2916 { 2917 case 0: 2918 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_LEN_BB : 2919 WOL_REG_ACPI_PAT_0_LEN_K2_E5; 2920 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_CRC_BB : 2921 WOL_REG_ACPI_PAT_0_CRC_K2_E5; 2922 break; 2923 case 1: 2924 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_LEN_BB : 2925 WOL_REG_ACPI_PAT_1_LEN_K2_E5; 2926 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_CRC_BB : 2927 WOL_REG_ACPI_PAT_1_CRC_K2_E5; 2928 break; 2929 case 2: 2930 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_LEN_BB : 2931 WOL_REG_ACPI_PAT_2_LEN_K2_E5; 2932 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_CRC_BB : 2933 WOL_REG_ACPI_PAT_2_CRC_K2_E5; 2934 break; 2935 case 3: 2936 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_LEN_BB : 2937 WOL_REG_ACPI_PAT_3_LEN_K2_E5; 2938 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_CRC_BB : 2939 WOL_REG_ACPI_PAT_3_CRC_K2_E5; 2940 break; 2941 case 4: 2942 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_LEN_BB : 2943 WOL_REG_ACPI_PAT_4_LEN_K2_E5; 2944 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_CRC_BB : 2945 WOL_REG_ACPI_PAT_4_CRC_K2_E5; 2946 break; 2947 case 5: 2948 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_LEN_BB : 2949 WOL_REG_ACPI_PAT_5_LEN_K2_E5; 2950 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_CRC_BB : 2951 WOL_REG_ACPI_PAT_5_CRC_K2_E5; 2952 break; 2953 case 6: 2954 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_LEN_BB : 2955 WOL_REG_ACPI_PAT_6_LEN_K2_E5; 2956 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_CRC_BB : 2957 WOL_REG_ACPI_PAT_6_CRC_K2_E5; 2958 break; 2959 case 7: 2960 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_LEN_BB : 2961 WOL_REG_ACPI_PAT_7_LEN_K2_E5; 2962 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_CRC_BB : 2963 WOL_REG_ACPI_PAT_7_CRC_K2_E5; 2964 break; 2965 default: 2966 rc = ECORE_UNKNOWN_ERROR; 2967 goto out; 2968 } 2969 2970 /* Allign pattern size to 4 */ 2971 while (pattern_size % 4) 2972 { 2973 pattern_size++; 2974 } 2975 /* write pattern length */ 2976 ECORE_WOL_WR(hwfn, p_ptt, reg_len, pattern_size); 2977 2978 /* write crc value*/ 2979 ECORE_WOL_WR(hwfn, p_ptt, reg_crc, crc); 2980 2981 DP_INFO(p_dev, 2982 "ecore_set_nwuf_reg: idx[%d] reg_crc[0x%x=0x%08x] " 2983 "reg_len[0x%x=0x%x]\n", 2984 reg_idx, reg_crc, crc, reg_len, pattern_size); 2985 out: 2986 ecore_ptt_release(hwfn, p_ptt); 2987 2988 return rc; 2989 } 2990 2991 void ecore_wol_buffer_clear(struct ecore_hwfn *p_hwfn, 2992 struct ecore_ptt *p_ptt) 2993 { 2994 const u32 wake_buffer_clear_offset = 2995 ECORE_IS_BB(p_hwfn->p_dev) ? 2996 NIG_REG_WAKE_BUFFER_CLEAR_BB : WOL_REG_WAKE_BUFFER_CLEAR_K2_E5; 2997 2998 DP_INFO(p_hwfn->p_dev, 2999 "ecore_wol_buffer_clear: reset " 3000 "REG_WAKE_BUFFER_CLEAR offset=0x%08x\n", 3001 wake_buffer_clear_offset); 3002 3003 ECORE_WOL_WR(p_hwfn, p_ptt, wake_buffer_clear_offset, 1); 3004 ECORE_WOL_WR(p_hwfn, p_ptt, wake_buffer_clear_offset, 0); 3005 } 3006 3007 enum _ecore_status_t ecore_get_wake_info(struct ecore_hwfn *p_hwfn, 3008 struct ecore_ptt *p_ptt, 3009 struct ecore_wake_info *wake_info) 3010 { 3011 struct ecore_dev *p_dev = p_hwfn->p_dev; 3012 u32 *buf = OSAL_NULL; 3013 u32 i = 0; 3014 const u32 reg_wake_buffer_offest = 3015 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_BUFFER_BB : 3016 WOL_REG_WAKE_BUFFER_K2_E5; 3017 3018 wake_info->wk_info = ecore_rd(p_hwfn, p_ptt, 3019 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_INFO_BB : 3020 WOL_REG_WAKE_INFO_K2_E5); 3021 wake_info->wk_details = ecore_rd(p_hwfn, p_ptt, 3022 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_DETAILS_BB : 3023 WOL_REG_WAKE_DETAILS_K2_E5); 3024 wake_info->wk_pkt_len = ecore_rd(p_hwfn, p_ptt, 3025 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_PKT_LEN_BB : 3026 WOL_REG_WAKE_PKT_LEN_K2_E5); 3027 3028 DP_INFO(p_dev, 3029 "ecore_get_wake_info: REG_WAKE_INFO=0x%08x " 3030 "REG_WAKE_DETAILS=0x%08x " 3031 "REG_WAKE_PKT_LEN=0x%08x\n", 3032 wake_info->wk_info, 3033 wake_info->wk_details, 3034 wake_info->wk_pkt_len); 3035 3036 buf = (u32 *)wake_info->wk_buffer; 3037 3038 for (i = 0; i < (wake_info->wk_pkt_len / sizeof(u32)); i++) 3039 { 3040 if ((i*sizeof(u32)) >= sizeof(wake_info->wk_buffer)) 3041 { 3042 DP_INFO(p_dev, 3043 "ecore_get_wake_info: i index to 0 high=%d\n", 3044 i); 3045 break; 3046 } 3047 buf[i] = ecore_rd(p_hwfn, p_ptt, 3048 reg_wake_buffer_offest + (i * sizeof(u32))); 3049 DP_INFO(p_dev, "ecore_get_wake_info: wk_buffer[%u]: 0x%08x\n", 3050 i, buf[i]); 3051 } 3052 3053 ecore_wol_buffer_clear(p_hwfn, p_ptt); 3054 3055 return ECORE_SUCCESS; 3056 } 3057 3058 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */ 3059 static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn) 3060 { 3061 ecore_ptt_pool_free(p_hwfn); 3062 OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info); 3063 p_hwfn->hw_info.p_igu_info = OSAL_NULL; 3064 } 3065 3066 /* Setup bar access */ 3067 static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn) 3068 { 3069 /* clear indirect access */ 3070 if (ECORE_IS_AH(p_hwfn->p_dev) || ECORE_IS_E5(p_hwfn->p_dev)) { 3071 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 3072 PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5, 0); 3073 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 3074 PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5, 0); 3075 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 3076 PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5, 0); 3077 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 3078 PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5, 0); 3079 } else { 3080 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 3081 PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0); 3082 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 3083 PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0); 3084 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 3085 PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0); 3086 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 3087 PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0); 3088 } 3089 3090 /* Clean Previous errors if such exist */ 3091 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 3092 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 3093 1 << p_hwfn->abs_pf_id); 3094 3095 /* enable internal target-read */ 3096 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 3097 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 3098 } 3099 3100 static void get_function_id(struct ecore_hwfn *p_hwfn) 3101 { 3102 /* ME Register */ 3103 p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn, 3104 PXP_PF_ME_OPAQUE_ADDR); 3105 3106 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); 3107 3108 /* Bits 16-19 from the ME registers are the pf_num */ 3109 p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf; 3110 p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 3111 PXP_CONCRETE_FID_PFID); 3112 p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 3113 PXP_CONCRETE_FID_PORT); 3114 3115 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 3116 "Read ME register: Concrete 0x%08x Opaque 0x%04x\n", 3117 p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid); 3118 } 3119 3120 void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn) 3121 { 3122 u32 *feat_num = p_hwfn->hw_info.feat_num; 3123 struct ecore_sb_cnt_info sb_cnt; 3124 u32 non_l2_sbs = 0; 3125 3126 OSAL_MEM_ZERO(&sb_cnt, sizeof(sb_cnt)); 3127 ecore_int_get_num_sbs(p_hwfn, &sb_cnt); 3128 3129 #ifdef CONFIG_ECORE_ROCE 3130 /* Roce CNQ require each: 1 status block. 1 CNQ, we divide the 3131 * status blocks equally between L2 / RoCE but with consideration as 3132 * to how many l2 queues / cnqs we have 3133 */ 3134 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) { 3135 u32 max_cnqs; 3136 3137 feat_num[ECORE_RDMA_CNQ] = 3138 OSAL_MIN_T(u32, 3139 sb_cnt.cnt / 2, 3140 RESC_NUM(p_hwfn, ECORE_RDMA_CNQ_RAM)); 3141 3142 /* Upper layer might require less */ 3143 max_cnqs = (u32)p_hwfn->pf_params.rdma_pf_params.max_cnqs; 3144 if (max_cnqs) { 3145 if (max_cnqs == ECORE_RDMA_PF_PARAMS_CNQS_NONE) 3146 max_cnqs = 0; 3147 feat_num[ECORE_RDMA_CNQ] = 3148 OSAL_MIN_T(u32, 3149 feat_num[ECORE_RDMA_CNQ], 3150 max_cnqs); 3151 } 3152 3153 non_l2_sbs = feat_num[ECORE_RDMA_CNQ]; 3154 } 3155 #endif 3156 3157 /* L2 Queues require each: 1 status block. 1 L2 queue */ 3158 if (ECORE_IS_L2_PERSONALITY(p_hwfn)) { 3159 /* Start by allocating VF queues, then PF's */ 3160 feat_num[ECORE_VF_L2_QUE] = 3161 OSAL_MIN_T(u32, 3162 RESC_NUM(p_hwfn, ECORE_L2_QUEUE), 3163 sb_cnt.iov_cnt); 3164 feat_num[ECORE_PF_L2_QUE] = 3165 OSAL_MIN_T(u32, 3166 sb_cnt.cnt - non_l2_sbs, 3167 RESC_NUM(p_hwfn, ECORE_L2_QUEUE) - 3168 FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE)); 3169 } 3170 3171 if (ECORE_IS_FCOE_PERSONALITY(p_hwfn)) 3172 feat_num[ECORE_FCOE_CQ] = 3173 OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, 3174 ECORE_CMDQS_CQS)); 3175 3176 if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) 3177 feat_num[ECORE_ISCSI_CQ] = 3178 OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, 3179 ECORE_CMDQS_CQS)); 3180 3181 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 3182 "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n", 3183 (int)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE), 3184 (int)FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE), 3185 (int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ), 3186 (int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ), 3187 (int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ), 3188 (int)sb_cnt.cnt); 3189 } 3190 3191 const char *ecore_hw_get_resc_name(enum ecore_resources res_id) 3192 { 3193 switch (res_id) { 3194 case ECORE_L2_QUEUE: 3195 return "L2_QUEUE"; 3196 case ECORE_VPORT: 3197 return "VPORT"; 3198 case ECORE_RSS_ENG: 3199 return "RSS_ENG"; 3200 case ECORE_PQ: 3201 return "PQ"; 3202 case ECORE_RL: 3203 return "RL"; 3204 case ECORE_MAC: 3205 return "MAC"; 3206 case ECORE_VLAN: 3207 return "VLAN"; 3208 case ECORE_RDMA_CNQ_RAM: 3209 return "RDMA_CNQ_RAM"; 3210 case ECORE_ILT: 3211 return "ILT"; 3212 case ECORE_LL2_QUEUE: 3213 return "LL2_QUEUE"; 3214 case ECORE_CMDQS_CQS: 3215 return "CMDQS_CQS"; 3216 case ECORE_RDMA_STATS_QUEUE: 3217 return "RDMA_STATS_QUEUE"; 3218 case ECORE_BDQ: 3219 return "BDQ"; 3220 case ECORE_SB: 3221 return "SB"; 3222 default: 3223 return "UNKNOWN_RESOURCE"; 3224 } 3225 } 3226 3227 static enum _ecore_status_t 3228 __ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn, 3229 struct ecore_ptt *p_ptt, 3230 enum ecore_resources res_id, 3231 u32 resc_max_val, 3232 u32 *p_mcp_resp) 3233 { 3234 enum _ecore_status_t rc; 3235 3236 rc = ecore_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id, 3237 resc_max_val, p_mcp_resp); 3238 if (rc != ECORE_SUCCESS) { 3239 DP_NOTICE(p_hwfn, true, 3240 "MFW response failure for a max value setting of resource %d [%s]\n", 3241 res_id, ecore_hw_get_resc_name(res_id)); 3242 return rc; 3243 } 3244 3245 if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) 3246 DP_INFO(p_hwfn, 3247 "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n", 3248 res_id, ecore_hw_get_resc_name(res_id), *p_mcp_resp); 3249 3250 return ECORE_SUCCESS; 3251 } 3252 3253 static enum _ecore_status_t 3254 ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn, 3255 struct ecore_ptt *p_ptt) 3256 { 3257 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 3258 u32 resc_max_val, mcp_resp; 3259 u8 res_id; 3260 enum _ecore_status_t rc; 3261 3262 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) { 3263 switch (res_id) { 3264 case ECORE_LL2_QUEUE: 3265 resc_max_val = MAX_NUM_LL2_RX_QUEUES; 3266 break; 3267 case ECORE_RDMA_CNQ_RAM: 3268 /* No need for a case for ECORE_CMDQS_CQS since 3269 * CNQ/CMDQS are the same resource. 3270 */ 3271 resc_max_val = NUM_OF_CMDQS_CQS; 3272 break; 3273 case ECORE_RDMA_STATS_QUEUE: 3274 resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 3275 : RDMA_NUM_STATISTIC_COUNTERS_BB; 3276 break; 3277 case ECORE_BDQ: 3278 resc_max_val = BDQ_NUM_RESOURCES; 3279 break; 3280 default: 3281 continue; 3282 } 3283 3284 rc = __ecore_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id, 3285 resc_max_val, &mcp_resp); 3286 if (rc != ECORE_SUCCESS) 3287 return rc; 3288 3289 /* There's no point to continue to the next resource if the 3290 * command is not supported by the MFW. 3291 * We do continue if the command is supported but the resource 3292 * is unknown to the MFW. Such a resource will be later 3293 * configured with the default allocation values. 3294 */ 3295 if (mcp_resp == FW_MSG_CODE_UNSUPPORTED) 3296 return ECORE_NOTIMPL; 3297 } 3298 3299 return ECORE_SUCCESS; 3300 } 3301 3302 static 3303 enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn, 3304 enum ecore_resources res_id, 3305 u32 *p_resc_num, u32 *p_resc_start) 3306 { 3307 u8 num_funcs = p_hwfn->num_funcs_on_engine; 3308 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 3309 3310 switch (res_id) { 3311 case ECORE_L2_QUEUE: 3312 *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 : 3313 MAX_NUM_L2_QUEUES_BB) / num_funcs; 3314 break; 3315 case ECORE_VPORT: 3316 *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 : 3317 MAX_NUM_VPORTS_BB) / num_funcs; 3318 break; 3319 case ECORE_RSS_ENG: 3320 *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 : 3321 ETH_RSS_ENGINE_NUM_BB) / num_funcs; 3322 break; 3323 case ECORE_PQ: 3324 *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 : 3325 MAX_QM_TX_QUEUES_BB) / num_funcs; 3326 *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */ 3327 break; 3328 case ECORE_RL: 3329 *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs; 3330 break; 3331 case ECORE_MAC: 3332 case ECORE_VLAN: 3333 /* Each VFC resource can accommodate both a MAC and a VLAN */ 3334 *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; 3335 break; 3336 case ECORE_ILT: 3337 *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 : 3338 PXP_NUM_ILT_RECORDS_BB) / num_funcs; 3339 break; 3340 case ECORE_LL2_QUEUE: 3341 *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs; 3342 break; 3343 case ECORE_RDMA_CNQ_RAM: 3344 case ECORE_CMDQS_CQS: 3345 /* CNQ/CMDQS are the same resource */ 3346 *p_resc_num = NUM_OF_CMDQS_CQS / num_funcs; 3347 break; 3348 case ECORE_RDMA_STATS_QUEUE: 3349 *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 : 3350 RDMA_NUM_STATISTIC_COUNTERS_BB) / 3351 num_funcs; 3352 break; 3353 case ECORE_BDQ: 3354 if (p_hwfn->hw_info.personality != ECORE_PCI_ISCSI && 3355 p_hwfn->hw_info.personality != ECORE_PCI_FCOE) 3356 *p_resc_num = 0; 3357 else 3358 *p_resc_num = 1; 3359 break; 3360 case ECORE_SB: 3361 /* Since we want its value to reflect whether MFW supports 3362 * the new scheme, have a default of 0. 3363 */ 3364 *p_resc_num = 0; 3365 break; 3366 default: 3367 return ECORE_INVAL; 3368 } 3369 3370 switch (res_id) { 3371 case ECORE_BDQ: 3372 if (!*p_resc_num) 3373 *p_resc_start = 0; 3374 else if (p_hwfn->p_dev->num_ports_in_engine == 4) 3375 *p_resc_start = p_hwfn->port_id; 3376 else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) 3377 *p_resc_start = p_hwfn->port_id; 3378 else if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 3379 *p_resc_start = p_hwfn->port_id + 2; 3380 break; 3381 default: 3382 *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx; 3383 break; 3384 } 3385 3386 return ECORE_SUCCESS; 3387 } 3388 3389 static enum _ecore_status_t 3390 __ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id, 3391 bool drv_resc_alloc) 3392 { 3393 u32 dflt_resc_num = 0, dflt_resc_start = 0; 3394 u32 mcp_resp, *p_resc_num, *p_resc_start; 3395 enum _ecore_status_t rc; 3396 3397 p_resc_num = &RESC_NUM(p_hwfn, res_id); 3398 p_resc_start = &RESC_START(p_hwfn, res_id); 3399 3400 rc = ecore_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num, 3401 &dflt_resc_start); 3402 if (rc != ECORE_SUCCESS) { 3403 DP_ERR(p_hwfn, 3404 "Failed to get default amount for resource %d [%s]\n", 3405 res_id, ecore_hw_get_resc_name(res_id)); 3406 return rc; 3407 } 3408 3409 #ifndef ASIC_ONLY 3410 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 3411 *p_resc_num = dflt_resc_num; 3412 *p_resc_start = dflt_resc_start; 3413 goto out; 3414 } 3415 #endif 3416 3417 rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id, 3418 &mcp_resp, p_resc_num, p_resc_start); 3419 if (rc != ECORE_SUCCESS) { 3420 DP_NOTICE(p_hwfn, true, 3421 "MFW response failure for an allocation request for resource %d [%s]\n", 3422 res_id, ecore_hw_get_resc_name(res_id)); 3423 return rc; 3424 } 3425 3426 /* Default driver values are applied in the following cases: 3427 * - The resource allocation MB command is not supported by the MFW 3428 * - There is an internal error in the MFW while processing the request 3429 * - The resource ID is unknown to the MFW 3430 */ 3431 if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) { 3432 DP_INFO(p_hwfn, 3433 "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n", 3434 res_id, ecore_hw_get_resc_name(res_id), mcp_resp, 3435 dflt_resc_num, dflt_resc_start); 3436 *p_resc_num = dflt_resc_num; 3437 *p_resc_start = dflt_resc_start; 3438 goto out; 3439 } 3440 3441 if ((*p_resc_num != dflt_resc_num || 3442 *p_resc_start != dflt_resc_start) && 3443 res_id != ECORE_SB) { 3444 DP_INFO(p_hwfn, 3445 "MFW allocation for resource %d [%s] differs from default values [%d,%d vs. %d,%d]%s\n", 3446 res_id, ecore_hw_get_resc_name(res_id), *p_resc_num, 3447 *p_resc_start, dflt_resc_num, dflt_resc_start, 3448 drv_resc_alloc ? " - Applying default values" : ""); 3449 if (drv_resc_alloc) { 3450 *p_resc_num = dflt_resc_num; 3451 *p_resc_start = dflt_resc_start; 3452 } 3453 } 3454 out: 3455 /* PQs have to divide by 8 [that's the HW granularity]. 3456 * Reduce number so it would fit. 3457 */ 3458 if ((res_id == ECORE_PQ) && 3459 ((*p_resc_num % 8) || (*p_resc_start % 8))) { 3460 DP_INFO(p_hwfn, 3461 "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n", 3462 *p_resc_num, (*p_resc_num) & ~0x7, 3463 *p_resc_start, (*p_resc_start) & ~0x7); 3464 *p_resc_num &= ~0x7; 3465 *p_resc_start &= ~0x7; 3466 } 3467 3468 return ECORE_SUCCESS; 3469 } 3470 3471 static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, 3472 bool drv_resc_alloc) 3473 { 3474 enum _ecore_status_t rc; 3475 u8 res_id; 3476 3477 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) { 3478 rc = __ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc); 3479 if (rc != ECORE_SUCCESS) 3480 return rc; 3481 } 3482 3483 return ECORE_SUCCESS; 3484 } 3485 3486 static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn, 3487 struct ecore_ptt *p_ptt, 3488 bool drv_resc_alloc) 3489 { 3490 struct ecore_resc_unlock_params resc_unlock_params; 3491 struct ecore_resc_lock_params resc_lock_params; 3492 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 3493 u8 res_id; 3494 enum _ecore_status_t rc; 3495 #ifndef ASIC_ONLY 3496 u32 *resc_start = p_hwfn->hw_info.resc_start; 3497 u32 *resc_num = p_hwfn->hw_info.resc_num; 3498 /* For AH, an equal share of the ILT lines between the maximal number of 3499 * PFs is not enough for RoCE. This would be solved by the future 3500 * resource allocation scheme, but isn't currently present for 3501 * FPGA/emulation. For now we keep a number that is sufficient for RoCE 3502 * to work - the BB number of ILT lines divided by its max PFs number. 3503 */ 3504 u32 roce_min_ilt_lines = PXP_NUM_ILT_RECORDS_BB / MAX_NUM_PFS_BB; 3505 #endif 3506 3507 /* Setting the max values of the soft resources and the following 3508 * resources allocation queries should be atomic. Since several PFs can 3509 * run in parallel - a resource lock is needed. 3510 * If either the resource lock or resource set value commands are not 3511 * supported - skip the the max values setting, release the lock if 3512 * needed, and proceed to the queries. Other failures, including a 3513 * failure to acquire the lock, will cause this function to fail. 3514 * Old drivers that don't acquire the lock can run in parallel, and 3515 * their allocation values won't be affected by the updated max values. 3516 */ 3517 ecore_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params, 3518 ECORE_RESC_LOCK_RESC_ALLOC, false); 3519 3520 rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params); 3521 if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) { 3522 return rc; 3523 } else if (rc == ECORE_NOTIMPL) { 3524 DP_INFO(p_hwfn, 3525 "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n"); 3526 } else if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) { 3527 DP_NOTICE(p_hwfn, false, 3528 "Failed to acquire the resource lock for the resource allocation commands\n"); 3529 return ECORE_BUSY; 3530 } else { 3531 rc = ecore_hw_set_soft_resc_size(p_hwfn, p_ptt); 3532 if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) { 3533 DP_NOTICE(p_hwfn, false, 3534 "Failed to set the max values of the soft resources\n"); 3535 goto unlock_and_exit; 3536 } else if (rc == ECORE_NOTIMPL) { 3537 DP_INFO(p_hwfn, 3538 "Skip the max values setting of the soft resources since it is not supported by the MFW\n"); 3539 rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt, 3540 &resc_unlock_params); 3541 if (rc != ECORE_SUCCESS) 3542 DP_INFO(p_hwfn, 3543 "Failed to release the resource lock for the resource allocation commands\n"); 3544 } 3545 } 3546 3547 rc = ecore_hw_set_resc_info(p_hwfn, drv_resc_alloc); 3548 if (rc != ECORE_SUCCESS) 3549 goto unlock_and_exit; 3550 3551 if (resc_lock_params.b_granted && !resc_unlock_params.b_released) { 3552 rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt, 3553 &resc_unlock_params); 3554 if (rc != ECORE_SUCCESS) 3555 DP_INFO(p_hwfn, 3556 "Failed to release the resource lock for the resource allocation commands\n"); 3557 } 3558 3559 #ifndef ASIC_ONLY 3560 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 3561 /* Reduced build contains less PQs */ 3562 if (!(p_hwfn->p_dev->b_is_emul_full)) { 3563 resc_num[ECORE_PQ] = 32; 3564 resc_start[ECORE_PQ] = resc_num[ECORE_PQ] * 3565 p_hwfn->enabled_func_idx; 3566 } 3567 3568 /* For AH emulation, since we have a possible maximal number of 3569 * 16 enabled PFs, in case there are not enough ILT lines - 3570 * allocate only first PF as RoCE and have all the other ETH 3571 * only with less ILT lines. 3572 */ 3573 if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full) 3574 resc_num[ECORE_ILT] = OSAL_MAX_T(u32, 3575 resc_num[ECORE_ILT], 3576 roce_min_ilt_lines); 3577 } 3578 3579 /* Correct the common ILT calculation if PF0 has more */ 3580 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) && 3581 p_hwfn->p_dev->b_is_emul_full && 3582 p_hwfn->rel_pf_id && 3583 resc_num[ECORE_ILT] < roce_min_ilt_lines) 3584 resc_start[ECORE_ILT] += roce_min_ilt_lines - 3585 resc_num[ECORE_ILT]; 3586 #endif 3587 3588 /* Sanity for ILT */ 3589 if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) || 3590 (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) { 3591 DP_NOTICE(p_hwfn, true, "Can't assign ILT pages [%08x,...,%08x]\n", 3592 RESC_START(p_hwfn, ECORE_ILT), 3593 RESC_END(p_hwfn, ECORE_ILT) - 1); 3594 return ECORE_INVAL; 3595 } 3596 3597 /* This will also learn the number of SBs from MFW */ 3598 if (ecore_int_igu_reset_cam(p_hwfn, p_ptt)) 3599 return ECORE_INVAL; 3600 3601 ecore_hw_set_feat(p_hwfn); 3602 3603 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 3604 "The numbers for each resource are:\n"); 3605 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) 3606 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "%s = %d start = %d\n", 3607 ecore_hw_get_resc_name(res_id), 3608 RESC_NUM(p_hwfn, res_id), 3609 RESC_START(p_hwfn, res_id)); 3610 3611 return ECORE_SUCCESS; 3612 3613 unlock_and_exit: 3614 if (resc_lock_params.b_granted && !resc_unlock_params.b_released) 3615 ecore_mcp_resc_unlock(p_hwfn, p_ptt, 3616 &resc_unlock_params); 3617 return rc; 3618 } 3619 3620 static enum _ecore_status_t 3621 ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn, 3622 struct ecore_ptt *p_ptt, 3623 struct ecore_hw_prepare_params *p_params) 3624 { 3625 u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg, dcbx_mode; 3626 u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; 3627 struct ecore_mcp_link_capabilities *p_caps; 3628 struct ecore_mcp_link_params *link; 3629 enum _ecore_status_t rc; 3630 3631 /* Read global nvm_cfg address */ 3632 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); 3633 3634 /* Verify MCP has initialized it */ 3635 if (!nvm_cfg_addr) { 3636 DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n"); 3637 if (p_params->b_relaxed_probe) 3638 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_NVM; 3639 return ECORE_INVAL; 3640 } 3641 3642 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ 3643 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); 3644 3645 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3646 OFFSETOF(struct nvm_cfg1, glob) + 3647 OFFSETOF(struct nvm_cfg1_glob, core_cfg); 3648 3649 core_cfg = ecore_rd(p_hwfn, p_ptt, addr); 3650 3651 switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> 3652 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { 3653 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G: 3654 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G; 3655 break; 3656 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G: 3657 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G; 3658 break; 3659 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G: 3660 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G; 3661 break; 3662 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F: 3663 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F; 3664 break; 3665 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E: 3666 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E; 3667 break; 3668 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G: 3669 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G; 3670 break; 3671 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G: 3672 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G; 3673 break; 3674 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: 3675 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G; 3676 break; 3677 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G: 3678 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X10G; 3679 break; 3680 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: 3681 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G; 3682 break; 3683 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G: 3684 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X25G; 3685 break; 3686 default: 3687 DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n", 3688 core_cfg); 3689 break; 3690 } 3691 3692 /* Read DCBX configuration */ 3693 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3694 OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 3695 dcbx_mode = ecore_rd(p_hwfn, p_ptt, 3696 port_cfg_addr + 3697 OFFSETOF(struct nvm_cfg1_port, generic_cont0)); 3698 dcbx_mode = (dcbx_mode & NVM_CFG1_PORT_DCBX_MODE_MASK) 3699 >> NVM_CFG1_PORT_DCBX_MODE_OFFSET; 3700 switch (dcbx_mode) { 3701 case NVM_CFG1_PORT_DCBX_MODE_DYNAMIC: 3702 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DYNAMIC; 3703 break; 3704 case NVM_CFG1_PORT_DCBX_MODE_CEE: 3705 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_CEE; 3706 break; 3707 case NVM_CFG1_PORT_DCBX_MODE_IEEE: 3708 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_IEEE; 3709 break; 3710 default: 3711 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DISABLED; 3712 } 3713 3714 /* Read default link configuration */ 3715 link = &p_hwfn->mcp_info->link_input; 3716 p_caps = &p_hwfn->mcp_info->link_capabilities; 3717 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3718 OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 3719 link_temp = ecore_rd(p_hwfn, p_ptt, 3720 port_cfg_addr + 3721 OFFSETOF(struct nvm_cfg1_port, speed_cap_mask)); 3722 link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; 3723 link->speed.advertised_speeds = link_temp; 3724 p_caps->speed_capabilities = link->speed.advertised_speeds; 3725 3726 link_temp = ecore_rd(p_hwfn, p_ptt, 3727 port_cfg_addr + 3728 OFFSETOF(struct nvm_cfg1_port, link_settings)); 3729 switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >> 3730 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) { 3731 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG: 3732 link->speed.autoneg = true; 3733 break; 3734 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G: 3735 link->speed.forced_speed = 1000; 3736 break; 3737 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: 3738 link->speed.forced_speed = 10000; 3739 break; 3740 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: 3741 link->speed.forced_speed = 25000; 3742 break; 3743 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G: 3744 link->speed.forced_speed = 40000; 3745 break; 3746 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: 3747 link->speed.forced_speed = 50000; 3748 break; 3749 case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G: 3750 link->speed.forced_speed = 100000; 3751 break; 3752 default: 3753 DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", 3754 link_temp); 3755 } 3756 3757 p_caps->default_speed = link->speed.forced_speed; 3758 p_caps->default_speed_autoneg = link->speed.autoneg; 3759 3760 link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; 3761 link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET; 3762 link->pause.autoneg = !!(link_temp & 3763 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); 3764 link->pause.forced_rx = !!(link_temp & 3765 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); 3766 link->pause.forced_tx = !!(link_temp & 3767 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); 3768 link->loopback_mode = 0; 3769 3770 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { 3771 link_temp = ecore_rd(p_hwfn, p_ptt, port_cfg_addr + 3772 OFFSETOF(struct nvm_cfg1_port, ext_phy)); 3773 link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK; 3774 link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET; 3775 p_caps->default_eee = ECORE_MCP_EEE_ENABLED; 3776 link->eee.enable = true; 3777 switch (link_temp) { 3778 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED: 3779 p_caps->default_eee = ECORE_MCP_EEE_DISABLED; 3780 link->eee.enable = false; 3781 break; 3782 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED: 3783 p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME; 3784 break; 3785 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE: 3786 p_caps->eee_lpi_timer = 3787 EEE_TX_TIMER_USEC_AGGRESSIVE_TIME; 3788 break; 3789 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY: 3790 p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME; 3791 break; 3792 } 3793 link->eee.tx_lpi_timer = p_caps->eee_lpi_timer; 3794 link->eee.tx_lpi_enable = link->eee.enable; 3795 if (link->eee.enable) 3796 link->eee.adv_caps = ECORE_EEE_1G_ADV | 3797 ECORE_EEE_10G_ADV; 3798 } else { 3799 p_caps->default_eee = ECORE_MCP_EEE_UNSUPPORTED; 3800 } 3801 3802 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 3803 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n", 3804 link->speed.forced_speed, link->speed.advertised_speeds, 3805 link->speed.autoneg, link->pause.autoneg, 3806 p_caps->default_eee, p_caps->eee_lpi_timer); 3807 3808 /* Read Multi-function information from shmem */ 3809 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3810 OFFSETOF(struct nvm_cfg1, glob) + 3811 OFFSETOF(struct nvm_cfg1_glob, generic_cont0); 3812 3813 generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr); 3814 3815 mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> 3816 NVM_CFG1_GLOB_MF_MODE_OFFSET; 3817 3818 switch (mf_mode) { 3819 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: 3820 p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN; 3821 break; 3822 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: 3823 p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR; 3824 break; 3825 case NVM_CFG1_GLOB_MF_MODE_DEFAULT: 3826 p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT; 3827 break; 3828 } 3829 DP_INFO(p_hwfn, "Multi function mode is %08x\n", 3830 p_hwfn->p_dev->mf_mode); 3831 3832 /* Read Multi-function information from shmem */ 3833 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 3834 OFFSETOF(struct nvm_cfg1, glob) + 3835 OFFSETOF(struct nvm_cfg1_glob, device_capabilities); 3836 3837 device_capabilities = ecore_rd(p_hwfn, p_ptt, addr); 3838 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) 3839 OSAL_SET_BIT(ECORE_DEV_CAP_ETH, 3840 &p_hwfn->hw_info.device_capabilities); 3841 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE) 3842 OSAL_SET_BIT(ECORE_DEV_CAP_FCOE, 3843 &p_hwfn->hw_info.device_capabilities); 3844 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI) 3845 OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI, 3846 &p_hwfn->hw_info.device_capabilities); 3847 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE) 3848 OSAL_SET_BIT(ECORE_DEV_CAP_ROCE, 3849 &p_hwfn->hw_info.device_capabilities); 3850 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP) 3851 OSAL_SET_BIT(ECORE_DEV_CAP_IWARP, 3852 &p_hwfn->hw_info.device_capabilities); 3853 3854 rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt); 3855 if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) { 3856 rc = ECORE_SUCCESS; 3857 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP; 3858 } 3859 3860 return rc; 3861 } 3862 3863 static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn, 3864 struct ecore_ptt *p_ptt) 3865 { 3866 u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id; 3867 u32 reg_function_hide, tmp, eng_mask, low_pfs_mask; 3868 struct ecore_dev *p_dev = p_hwfn->p_dev; 3869 3870 num_funcs = ECORE_IS_AH(p_dev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB; 3871 3872 /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values 3873 * in the other bits are selected. 3874 * Bits 1-15 are for functions 1-15, respectively, and their value is 3875 * '0' only for enabled functions (function 0 always exists and 3876 * enabled). 3877 * In case of CMT in BB, only the "even" functions are enabled, and thus 3878 * the number of functions for both hwfns is learnt from the same bits. 3879 */ 3880 if (ECORE_IS_BB(p_dev) || ECORE_IS_AH(p_dev)) { 3881 reg_function_hide = ecore_rd(p_hwfn, p_ptt, 3882 MISCS_REG_FUNCTION_HIDE_BB_K2); 3883 } else { /* E5 */ 3884 reg_function_hide = 0; 3885 ECORE_E5_MISSING_CODE; 3886 } 3887 3888 if (reg_function_hide & 0x1) { 3889 if (ECORE_IS_BB(p_dev)) { 3890 if (ECORE_PATH_ID(p_hwfn) && p_dev->num_hwfns == 1) { 3891 num_funcs = 0; 3892 eng_mask = 0xaaaa; 3893 } else { 3894 num_funcs = 1; 3895 eng_mask = 0x5554; 3896 } 3897 } else { 3898 num_funcs = 1; 3899 eng_mask = 0xfffe; 3900 } 3901 3902 /* Get the number of the enabled functions on the engine */ 3903 tmp = (reg_function_hide ^ 0xffffffff) & eng_mask; 3904 while (tmp) { 3905 if (tmp & 0x1) 3906 num_funcs++; 3907 tmp >>= 0x1; 3908 } 3909 3910 /* Get the PF index within the enabled functions */ 3911 low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1; 3912 tmp = reg_function_hide & eng_mask & low_pfs_mask; 3913 while (tmp) { 3914 if (tmp & 0x1) 3915 enabled_func_idx--; 3916 tmp >>= 0x1; 3917 } 3918 } 3919 3920 p_hwfn->num_funcs_on_engine = num_funcs; 3921 p_hwfn->enabled_func_idx = enabled_func_idx; 3922 3923 #ifndef ASIC_ONLY 3924 if (CHIP_REV_IS_FPGA(p_dev)) { 3925 DP_NOTICE(p_hwfn, false, 3926 "FPGA: Limit number of PFs to 4 [would affect resource allocation, needed for IOV]\n"); 3927 p_hwfn->num_funcs_on_engine = 4; 3928 } 3929 #endif 3930 3931 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 3932 "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n", 3933 p_hwfn->rel_pf_id, p_hwfn->abs_pf_id, 3934 p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); 3935 } 3936 3937 static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn, 3938 struct ecore_ptt *p_ptt) 3939 { 3940 u32 port_mode; 3941 3942 #ifndef ASIC_ONLY 3943 /* Read the port mode */ 3944 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 3945 port_mode = 4; 3946 else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && 3947 (p_hwfn->p_dev->num_hwfns > 1)) 3948 /* In CMT on emulation, assume 1 port */ 3949 port_mode = 1; 3950 else 3951 #endif 3952 port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB); 3953 3954 if (port_mode < 3) { 3955 p_hwfn->p_dev->num_ports_in_engine = 1; 3956 } else if (port_mode <= 5) { 3957 p_hwfn->p_dev->num_ports_in_engine = 2; 3958 } else { 3959 DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n", 3960 p_hwfn->p_dev->num_ports_in_engine); 3961 3962 /* Default num_ports_in_engine to something */ 3963 p_hwfn->p_dev->num_ports_in_engine = 1; 3964 } 3965 } 3966 3967 static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn, 3968 struct ecore_ptt *p_ptt) 3969 { 3970 u32 port; 3971 int i; 3972 3973 p_hwfn->p_dev->num_ports_in_engine = 0; 3974 3975 #ifndef ASIC_ONLY 3976 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 3977 port = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED); 3978 switch ((port & 0xf000) >> 12) { 3979 case 1: 3980 p_hwfn->p_dev->num_ports_in_engine = 1; 3981 break; 3982 case 3: 3983 p_hwfn->p_dev->num_ports_in_engine = 2; 3984 break; 3985 case 0xf: 3986 p_hwfn->p_dev->num_ports_in_engine = 4; 3987 break; 3988 default: 3989 DP_NOTICE(p_hwfn, false, 3990 "Unknown port mode in ECO_RESERVED %08x\n", 3991 port); 3992 } 3993 } else 3994 #endif 3995 for (i = 0; i < MAX_NUM_PORTS_K2; i++) { 3996 port = ecore_rd(p_hwfn, p_ptt, 3997 CNIG_REG_NIG_PORT0_CONF_K2_E5 + (i * 4)); 3998 if (port & 1) 3999 p_hwfn->p_dev->num_ports_in_engine++; 4000 } 4001 4002 if (!p_hwfn->p_dev->num_ports_in_engine) { 4003 DP_NOTICE(p_hwfn, true, "All NIG ports are inactive\n"); 4004 4005 /* Default num_ports_in_engine to something */ 4006 p_hwfn->p_dev->num_ports_in_engine = 1; 4007 } 4008 } 4009 4010 static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn, 4011 struct ecore_ptt *p_ptt) 4012 { 4013 if (ECORE_IS_BB(p_hwfn->p_dev)) 4014 ecore_hw_info_port_num_bb(p_hwfn, p_ptt); 4015 else 4016 ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt); 4017 } 4018 4019 static enum _ecore_status_t 4020 ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 4021 enum ecore_pci_personality personality, 4022 struct ecore_hw_prepare_params *p_params) 4023 { 4024 bool drv_resc_alloc = p_params->drv_resc_alloc; 4025 enum _ecore_status_t rc; 4026 4027 /* Since all information is common, only first hwfns should do this */ 4028 if (IS_LEAD_HWFN(p_hwfn)) { 4029 rc = ecore_iov_hw_info(p_hwfn); 4030 if (rc != ECORE_SUCCESS) { 4031 if (p_params->b_relaxed_probe) 4032 p_params->p_relaxed_res = 4033 ECORE_HW_PREPARE_BAD_IOV; 4034 else 4035 return rc; 4036 } 4037 } 4038 4039 /* TODO In get_hw_info, amoungst others: 4040 * Get MCP FW revision and determine according to it the supported 4041 * featrues (e.g. DCB) 4042 * Get boot mode 4043 * ecore_get_pcie_width_speed, WOL capability. 4044 * Number of global CQ-s (for storage 4045 */ 4046 ecore_hw_info_port_num(p_hwfn, p_ptt); 4047 4048 ecore_mcp_get_capabilities(p_hwfn, p_ptt); 4049 4050 #ifndef ASIC_ONLY 4051 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) { 4052 #endif 4053 rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params); 4054 if (rc != ECORE_SUCCESS) 4055 return rc; 4056 #ifndef ASIC_ONLY 4057 } 4058 #endif 4059 4060 rc = ecore_int_igu_read_cam(p_hwfn, p_ptt); 4061 if (rc != ECORE_SUCCESS) { 4062 if (p_params->b_relaxed_probe) 4063 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_IGU; 4064 else 4065 return rc; 4066 } 4067 4068 #ifndef ASIC_ONLY 4069 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) { 4070 #endif 4071 OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, 4072 p_hwfn->mcp_info->func_info.mac, ETH_ALEN); 4073 #ifndef ASIC_ONLY 4074 } else { 4075 static u8 mcp_hw_mac[6] = {0, 2, 3, 4, 5, 6}; 4076 4077 OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN); 4078 p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id; 4079 } 4080 #endif 4081 4082 if (ecore_mcp_is_init(p_hwfn)) { 4083 if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET) 4084 p_hwfn->hw_info.ovlan = 4085 p_hwfn->mcp_info->func_info.ovlan; 4086 4087 ecore_mcp_cmd_port_init(p_hwfn, p_ptt); 4088 } 4089 4090 if (personality != ECORE_PCI_DEFAULT) { 4091 p_hwfn->hw_info.personality = personality; 4092 } else if (ecore_mcp_is_init(p_hwfn)) { 4093 enum ecore_pci_personality protocol; 4094 4095 protocol = p_hwfn->mcp_info->func_info.protocol; 4096 p_hwfn->hw_info.personality = protocol; 4097 } 4098 4099 #ifndef ASIC_ONLY 4100 /* To overcome ILT lack for emulation, until at least until we'll have 4101 * a definite answer from system about it, allow only PF0 to be RoCE. 4102 */ 4103 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) { 4104 if (!p_hwfn->rel_pf_id) 4105 p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE; 4106 else 4107 p_hwfn->hw_info.personality = ECORE_PCI_ETH; 4108 } 4109 #endif 4110 4111 /* although in BB some constellations may support more than 4 tcs, 4112 * that can result in performance penalty in some cases. 4 4113 * represents a good tradeoff between performance and flexibility. 4114 */ 4115 p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; 4116 4117 /* start out with a single active tc. This can be increased either 4118 * by dcbx negotiation or by upper layer driver 4119 */ 4120 p_hwfn->hw_info.num_active_tc = 1; 4121 4122 ecore_get_num_funcs(p_hwfn, p_ptt); 4123 4124 if (ecore_mcp_is_init(p_hwfn)) 4125 p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu; 4126 4127 /* In case of forcing the driver's default resource allocation, calling 4128 * ecore_hw_get_resc() should come after initializing the personality 4129 * and after getting the number of functions, since the calculation of 4130 * the resources/features depends on them. 4131 * This order is not harmful if not forcing. 4132 */ 4133 rc = ecore_hw_get_resc(p_hwfn, p_ptt, drv_resc_alloc); 4134 if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) { 4135 rc = ECORE_SUCCESS; 4136 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP; 4137 } 4138 4139 return rc; 4140 } 4141 4142 static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn, 4143 struct ecore_ptt *p_ptt) 4144 { 4145 struct ecore_dev *p_dev = p_hwfn->p_dev; 4146 u16 device_id_mask; 4147 u32 tmp; 4148 4149 /* Read Vendor Id / Device Id */ 4150 OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_VENDOR_ID_OFFSET, 4151 &p_dev->vendor_id); 4152 OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET, 4153 &p_dev->device_id); 4154 4155 /* Determine type */ 4156 device_id_mask = p_dev->device_id & ECORE_DEV_ID_MASK; 4157 switch (device_id_mask) { 4158 case ECORE_DEV_ID_MASK_BB: 4159 p_dev->type = ECORE_DEV_TYPE_BB; 4160 break; 4161 case ECORE_DEV_ID_MASK_AH: 4162 p_dev->type = ECORE_DEV_TYPE_AH; 4163 break; 4164 case ECORE_DEV_ID_MASK_E5: 4165 p_dev->type = ECORE_DEV_TYPE_E5; 4166 break; 4167 default: 4168 DP_NOTICE(p_hwfn, true, "Unknown device id 0x%x\n", 4169 p_dev->device_id); 4170 return ECORE_ABORTED; 4171 } 4172 4173 p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_ptt, 4174 MISCS_REG_CHIP_NUM); 4175 p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_ptt, 4176 MISCS_REG_CHIP_REV); 4177 4178 MASK_FIELD(CHIP_REV, p_dev->chip_rev); 4179 4180 /* Learn number of HW-functions */ 4181 tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR); 4182 4183 if (tmp & (1 << p_hwfn->rel_pf_id)) { 4184 DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n"); 4185 p_dev->num_hwfns = 2; 4186 } else { 4187 p_dev->num_hwfns = 1; 4188 } 4189 4190 #ifndef ASIC_ONLY 4191 if (CHIP_REV_IS_EMUL(p_dev)) { 4192 /* For some reason we have problems with this register 4193 * in B0 emulation; Simply assume no CMT 4194 */ 4195 DP_NOTICE(p_dev->hwfns, false, "device on emul - assume no CMT\n"); 4196 p_dev->num_hwfns = 1; 4197 } 4198 #endif 4199 4200 p_dev->chip_bond_id = ecore_rd(p_hwfn, p_ptt, 4201 MISCS_REG_CHIP_TEST_REG) >> 4; 4202 MASK_FIELD(CHIP_BOND_ID, p_dev->chip_bond_id); 4203 p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_ptt, 4204 MISCS_REG_CHIP_METAL); 4205 MASK_FIELD(CHIP_METAL, p_dev->chip_metal); 4206 DP_INFO(p_dev->hwfns, 4207 "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", 4208 ECORE_IS_BB(p_dev) ? "BB" : "AH", 4209 'A' + p_dev->chip_rev, (int)p_dev->chip_metal, 4210 p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id, 4211 p_dev->chip_metal); 4212 4213 if (ECORE_IS_BB(p_dev) && CHIP_REV_IS_A0(p_dev)) { 4214 DP_NOTICE(p_dev->hwfns, false, 4215 "The chip type/rev (BB A0) is not supported!\n"); 4216 return ECORE_ABORTED; 4217 } 4218 4219 #ifndef ASIC_ONLY 4220 if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev)) 4221 ecore_wr(p_hwfn, p_ptt, MISCS_REG_PLL_MAIN_CTRL_4, 0x1); 4222 4223 if (CHIP_REV_IS_EMUL(p_dev)) { 4224 tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED); 4225 if (tmp & (1 << 29)) { 4226 DP_NOTICE(p_hwfn, false, "Emulation: Running on a FULL build\n"); 4227 p_dev->b_is_emul_full = true; 4228 } else { 4229 DP_NOTICE(p_hwfn, false, "Emulation: Running on a REDUCED build\n"); 4230 } 4231 } 4232 #endif 4233 4234 return ECORE_SUCCESS; 4235 } 4236 4237 void ecore_hw_hibernate_prepare(struct ecore_dev *p_dev) 4238 { 4239 int j; 4240 4241 if (IS_VF(p_dev)) 4242 return; 4243 4244 for_each_hwfn(p_dev, j) { 4245 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 4246 4247 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Mark hw/fw uninitialized\n"); 4248 4249 p_hwfn->hw_init_done = false; 4250 p_hwfn->first_on_engine = false; 4251 4252 ecore_ptt_invalidate(p_hwfn); 4253 } 4254 } 4255 4256 void ecore_hw_hibernate_resume(struct ecore_dev *p_dev) 4257 { 4258 int j = 0; 4259 4260 if (IS_VF(p_dev)) 4261 return; 4262 4263 for_each_hwfn(p_dev, j) { 4264 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 4265 struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); 4266 4267 ecore_hw_hwfn_prepare(p_hwfn); 4268 4269 if (!p_ptt) 4270 DP_NOTICE(p_hwfn, true, "ptt acquire failed\n"); 4271 else { 4272 ecore_load_mcp_offsets(p_hwfn, p_ptt); 4273 ecore_ptt_release(p_hwfn, p_ptt); 4274 } 4275 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "Reinitialized hw after low power state\n"); 4276 } 4277 } 4278 4279 static enum _ecore_status_t ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, 4280 void OSAL_IOMEM *p_regview, 4281 void OSAL_IOMEM *p_doorbells, 4282 struct ecore_hw_prepare_params *p_params) 4283 { 4284 struct ecore_mdump_retain_data mdump_retain; 4285 struct ecore_dev *p_dev = p_hwfn->p_dev; 4286 struct ecore_mdump_info mdump_info; 4287 enum _ecore_status_t rc = ECORE_SUCCESS; 4288 4289 /* Split PCI bars evenly between hwfns */ 4290 p_hwfn->regview = p_regview; 4291 p_hwfn->doorbells = p_doorbells; 4292 4293 if (IS_VF(p_dev)) 4294 return ecore_vf_hw_prepare(p_hwfn); 4295 4296 /* Validate that chip access is feasible */ 4297 if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { 4298 DP_ERR(p_hwfn, "Reading the ME register returns all Fs; Preventing further chip access\n"); 4299 if (p_params->b_relaxed_probe) 4300 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_ME; 4301 return ECORE_INVAL; 4302 } 4303 4304 get_function_id(p_hwfn); 4305 4306 /* Allocate PTT pool */ 4307 rc = ecore_ptt_pool_alloc(p_hwfn); 4308 if (rc) { 4309 DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n"); 4310 if (p_params->b_relaxed_probe) 4311 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 4312 goto err0; 4313 } 4314 4315 /* Allocate the main PTT */ 4316 p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); 4317 4318 /* First hwfn learns basic information, e.g., number of hwfns */ 4319 if (!p_hwfn->my_id) { 4320 rc = ecore_get_dev_info(p_hwfn, p_hwfn->p_main_ptt); 4321 if (rc != ECORE_SUCCESS) { 4322 if (p_params->b_relaxed_probe) 4323 p_params->p_relaxed_res = 4324 ECORE_HW_PREPARE_FAILED_DEV; 4325 goto err1; 4326 } 4327 } 4328 4329 ecore_hw_hwfn_prepare(p_hwfn); 4330 4331 /* Initialize MCP structure */ 4332 rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); 4333 if (rc) { 4334 DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n"); 4335 if (p_params->b_relaxed_probe) 4336 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 4337 goto err1; 4338 } 4339 4340 /* Read the device configuration information from the HW and SHMEM */ 4341 rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, 4342 p_params->personality, p_params); 4343 if (rc) { 4344 DP_NOTICE(p_hwfn, true, "Failed to get HW information\n"); 4345 goto err2; 4346 } 4347 4348 /* Sending a mailbox to the MFW should be after ecore_get_hw_info() is 4349 * called, since among others it sets the ports number in an engine. 4350 */ 4351 if (p_params->initiate_pf_flr && p_hwfn == ECORE_LEADING_HWFN(p_dev) && 4352 !p_dev->recov_in_prog) { 4353 rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt); 4354 if (rc != ECORE_SUCCESS) 4355 DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n"); 4356 } 4357 4358 /* Check if mdump logs/data are present and update the epoch value */ 4359 if (p_hwfn == ECORE_LEADING_HWFN(p_hwfn->p_dev)) { 4360 rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt, 4361 &mdump_info); 4362 if (rc == ECORE_SUCCESS && mdump_info.num_of_logs) 4363 DP_NOTICE(p_hwfn, false, 4364 "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n"); 4365 4366 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_hwfn->p_main_ptt, 4367 &mdump_retain); 4368 if (rc == ECORE_SUCCESS && mdump_retain.valid) 4369 DP_NOTICE(p_hwfn, false, 4370 "mdump retained data: epoch 0x%08x, pf 0x%x, status 0x%08x\n", 4371 mdump_retain.epoch, mdump_retain.pf, 4372 mdump_retain.status); 4373 4374 ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt, 4375 p_params->epoch); 4376 } 4377 4378 /* Allocate the init RT array and initialize the init-ops engine */ 4379 rc = ecore_init_alloc(p_hwfn); 4380 if (rc) { 4381 DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n"); 4382 if (p_params->b_relaxed_probe) 4383 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 4384 goto err2; 4385 } 4386 4387 #ifndef ASIC_ONLY 4388 if (CHIP_REV_IS_FPGA(p_dev)) { 4389 DP_NOTICE(p_hwfn, false, 4390 "FPGA: workaround; Prevent DMAE parities\n"); 4391 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK_K2_E5, 4392 7); 4393 4394 DP_NOTICE(p_hwfn, false, 4395 "FPGA: workaround: Set VF bar0 size\n"); 4396 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 4397 PGLUE_B_REG_VF_BAR0_SIZE_K2_E5, 4); 4398 } 4399 #endif 4400 4401 return rc; 4402 err2: 4403 if (IS_LEAD_HWFN(p_hwfn)) 4404 ecore_iov_free_hw_info(p_dev); 4405 ecore_mcp_free(p_hwfn); 4406 err1: 4407 ecore_hw_hwfn_free(p_hwfn); 4408 err0: 4409 return rc; 4410 } 4411 4412 enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, 4413 struct ecore_hw_prepare_params *p_params) 4414 { 4415 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 4416 enum _ecore_status_t rc; 4417 4418 p_dev->chk_reg_fifo = p_params->chk_reg_fifo; 4419 p_dev->allow_mdump = p_params->allow_mdump; 4420 4421 if (p_params->b_relaxed_probe) 4422 p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS; 4423 4424 /* Store the precompiled init data ptrs */ 4425 if (IS_PF(p_dev)) 4426 ecore_init_iro_array(p_dev); 4427 4428 /* Initialize the first hwfn - will learn number of hwfns */ 4429 rc = ecore_hw_prepare_single(p_hwfn, 4430 p_dev->regview, 4431 p_dev->doorbells, p_params); 4432 if (rc != ECORE_SUCCESS) 4433 return rc; 4434 4435 p_params->personality = p_hwfn->hw_info.personality; 4436 4437 /* initilalize 2nd hwfn if necessary */ 4438 if (p_dev->num_hwfns > 1) { 4439 void OSAL_IOMEM *p_regview, *p_doorbell; 4440 u8 OSAL_IOMEM *addr; 4441 4442 /* adjust bar offset for second engine */ 4443 addr = (u8 OSAL_IOMEM *)p_dev->regview + 4444 ecore_hw_bar_size(p_hwfn, 4445 p_hwfn->p_main_ptt, 4446 BAR_ID_0) / 2; 4447 p_regview = (void OSAL_IOMEM *)addr; 4448 4449 addr = (u8 OSAL_IOMEM *)p_dev->doorbells + 4450 ecore_hw_bar_size(p_hwfn, 4451 p_hwfn->p_main_ptt, 4452 BAR_ID_1) / 2; 4453 p_doorbell = (void OSAL_IOMEM *)addr; 4454 4455 /* prepare second hw function */ 4456 rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview, 4457 p_doorbell, p_params); 4458 4459 /* in case of error, need to free the previously 4460 * initiliazed hwfn 0. 4461 */ 4462 if (rc != ECORE_SUCCESS) { 4463 if (p_params->b_relaxed_probe) 4464 p_params->p_relaxed_res = 4465 ECORE_HW_PREPARE_FAILED_ENG2; 4466 4467 if (IS_PF(p_dev)) { 4468 ecore_init_free(p_hwfn); 4469 ecore_mcp_free(p_hwfn); 4470 ecore_hw_hwfn_free(p_hwfn); 4471 } else { 4472 DP_NOTICE(p_dev, true, "What do we need to free when VF hwfn1 init fails\n"); 4473 } 4474 return rc; 4475 } 4476 } 4477 4478 return rc; 4479 } 4480 4481 void ecore_hw_remove(struct ecore_dev *p_dev) 4482 { 4483 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 4484 int i; 4485 4486 if (IS_PF(p_dev)) 4487 ecore_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt, 4488 ECORE_OV_DRIVER_STATE_NOT_LOADED); 4489 4490 for_each_hwfn(p_dev, i) { 4491 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 4492 4493 if (IS_VF(p_dev)) { 4494 ecore_vf_pf_release(p_hwfn); 4495 continue; 4496 } 4497 4498 ecore_init_free(p_hwfn); 4499 ecore_hw_hwfn_free(p_hwfn); 4500 ecore_mcp_free(p_hwfn); 4501 4502 #ifdef CONFIG_ECORE_LOCK_ALLOC 4503 OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex); 4504 #endif 4505 } 4506 4507 ecore_iov_free_hw_info(p_dev); 4508 } 4509 4510 static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev, 4511 struct ecore_chain *p_chain) 4512 { 4513 void *p_virt = p_chain->p_virt_addr, *p_virt_next = OSAL_NULL; 4514 dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0; 4515 struct ecore_chain_next *p_next; 4516 u32 size, i; 4517 4518 if (!p_virt) 4519 return; 4520 4521 size = p_chain->elem_size * p_chain->usable_per_page; 4522 4523 for (i = 0; i < p_chain->page_cnt; i++) { 4524 if (!p_virt) 4525 break; 4526 4527 p_next = (struct ecore_chain_next *)((u8 *)p_virt + size); 4528 p_virt_next = p_next->next_virt; 4529 p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys); 4530 4531 OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys, 4532 ECORE_CHAIN_PAGE_SIZE); 4533 4534 p_virt = p_virt_next; 4535 p_phys = p_phys_next; 4536 } 4537 } 4538 4539 static void ecore_chain_free_single(struct ecore_dev *p_dev, 4540 struct ecore_chain *p_chain) 4541 { 4542 if (!p_chain->p_virt_addr) 4543 return; 4544 4545 OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr, 4546 p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE); 4547 } 4548 4549 static void ecore_chain_free_pbl(struct ecore_dev *p_dev, 4550 struct ecore_chain *p_chain) 4551 { 4552 void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; 4553 u8 *p_pbl_virt = (u8 *)p_chain->pbl_sp.p_virt_table; 4554 u32 page_cnt = p_chain->page_cnt, i, pbl_size; 4555 4556 if (!pp_virt_addr_tbl) 4557 return; 4558 4559 if (!p_pbl_virt) 4560 goto out; 4561 4562 for (i = 0; i < page_cnt; i++) { 4563 if (!pp_virt_addr_tbl[i]) 4564 break; 4565 4566 OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i], 4567 *(dma_addr_t *)p_pbl_virt, 4568 ECORE_CHAIN_PAGE_SIZE); 4569 4570 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE; 4571 } 4572 4573 pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; 4574 4575 if (!p_chain->b_external_pbl) { 4576 OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table, 4577 p_chain->pbl_sp.p_phys_table, pbl_size); 4578 } 4579 out: 4580 OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl); 4581 p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL; 4582 } 4583 4584 void ecore_chain_free(struct ecore_dev *p_dev, 4585 struct ecore_chain *p_chain) 4586 { 4587 switch (p_chain->mode) { 4588 case ECORE_CHAIN_MODE_NEXT_PTR: 4589 ecore_chain_free_next_ptr(p_dev, p_chain); 4590 break; 4591 case ECORE_CHAIN_MODE_SINGLE: 4592 ecore_chain_free_single(p_dev, p_chain); 4593 break; 4594 case ECORE_CHAIN_MODE_PBL: 4595 ecore_chain_free_pbl(p_dev, p_chain); 4596 break; 4597 } 4598 } 4599 4600 static enum _ecore_status_t 4601 ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev, 4602 enum ecore_chain_cnt_type cnt_type, 4603 osal_size_t elem_size, u32 page_cnt) 4604 { 4605 u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; 4606 4607 /* The actual chain size can be larger than the maximal possible value 4608 * after rounding up the requested elements number to pages, and after 4609 * taking into acount the unusuable elements (next-ptr elements). 4610 * The size of a "u16" chain can be (U16_MAX + 1) since the chain 4611 * size/capacity fields are of a u32 type. 4612 */ 4613 if ((cnt_type == ECORE_CHAIN_CNT_TYPE_U16 && 4614 chain_size > ((u32)ECORE_U16_MAX + 1)) || 4615 (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 && 4616 chain_size > ECORE_U32_MAX)) { 4617 DP_NOTICE(p_dev, true, 4618 "The actual chain size (0x%llx) is larger than the maximal possible value\n", 4619 (unsigned long long)chain_size); 4620 return ECORE_INVAL; 4621 } 4622 4623 return ECORE_SUCCESS; 4624 } 4625 4626 static enum _ecore_status_t 4627 ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain) 4628 { 4629 void *p_virt = OSAL_NULL, *p_virt_prev = OSAL_NULL; 4630 dma_addr_t p_phys = 0; 4631 u32 i; 4632 4633 for (i = 0; i < p_chain->page_cnt; i++) { 4634 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 4635 ECORE_CHAIN_PAGE_SIZE); 4636 if (!p_virt) { 4637 DP_NOTICE(p_dev, true, 4638 "Failed to allocate chain memory\n"); 4639 return ECORE_NOMEM; 4640 } 4641 4642 if (i == 0) { 4643 ecore_chain_init_mem(p_chain, p_virt, p_phys); 4644 ecore_chain_reset(p_chain); 4645 } else { 4646 ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev, 4647 p_virt, p_phys); 4648 } 4649 4650 p_virt_prev = p_virt; 4651 } 4652 /* Last page's next element should point to the beginning of the 4653 * chain. 4654 */ 4655 ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev, 4656 p_chain->p_virt_addr, 4657 p_chain->p_phys_addr); 4658 4659 return ECORE_SUCCESS; 4660 } 4661 4662 static enum _ecore_status_t 4663 ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain) 4664 { 4665 dma_addr_t p_phys = 0; 4666 void *p_virt = OSAL_NULL; 4667 4668 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE); 4669 if (!p_virt) { 4670 DP_NOTICE(p_dev, true, "Failed to allocate chain memory\n"); 4671 return ECORE_NOMEM; 4672 } 4673 4674 ecore_chain_init_mem(p_chain, p_virt, p_phys); 4675 ecore_chain_reset(p_chain); 4676 4677 return ECORE_SUCCESS; 4678 } 4679 4680 static enum _ecore_status_t 4681 ecore_chain_alloc_pbl(struct ecore_dev *p_dev, 4682 struct ecore_chain *p_chain, 4683 struct ecore_chain_ext_pbl *ext_pbl) 4684 { 4685 u32 page_cnt = p_chain->page_cnt, size, i; 4686 dma_addr_t p_phys = 0, p_pbl_phys = 0; 4687 void **pp_virt_addr_tbl = OSAL_NULL; 4688 u8 *p_pbl_virt = OSAL_NULL; 4689 void *p_virt = OSAL_NULL; 4690 4691 size = page_cnt * sizeof(*pp_virt_addr_tbl); 4692 pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size); 4693 if (!pp_virt_addr_tbl) { 4694 DP_NOTICE(p_dev, true, 4695 "Failed to allocate memory for the chain virtual addresses table\n"); 4696 return ECORE_NOMEM; 4697 } 4698 4699 /* The allocation of the PBL table is done with its full size, since it 4700 * is expected to be successive. 4701 * ecore_chain_init_pbl_mem() is called even in a case of an allocation 4702 * failure, since pp_virt_addr_tbl was previously allocated, and it 4703 * should be saved to allow its freeing during the error flow. 4704 */ 4705 size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; 4706 4707 if (ext_pbl == OSAL_NULL) { 4708 p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size); 4709 } else { 4710 p_pbl_virt = ext_pbl->p_pbl_virt; 4711 p_pbl_phys = ext_pbl->p_pbl_phys; 4712 p_chain->b_external_pbl = true; 4713 } 4714 4715 ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, 4716 pp_virt_addr_tbl); 4717 if (!p_pbl_virt) { 4718 DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n"); 4719 return ECORE_NOMEM; 4720 } 4721 4722 for (i = 0; i < page_cnt; i++) { 4723 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 4724 ECORE_CHAIN_PAGE_SIZE); 4725 if (!p_virt) { 4726 DP_NOTICE(p_dev, true, 4727 "Failed to allocate chain memory\n"); 4728 return ECORE_NOMEM; 4729 } 4730 4731 if (i == 0) { 4732 ecore_chain_init_mem(p_chain, p_virt, p_phys); 4733 ecore_chain_reset(p_chain); 4734 } 4735 4736 /* Fill the PBL table with the physical address of the page */ 4737 *(dma_addr_t *)p_pbl_virt = p_phys; 4738 /* Keep the virtual address of the page */ 4739 p_chain->pbl.pp_virt_addr_tbl[i] = p_virt; 4740 4741 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE; 4742 } 4743 4744 return ECORE_SUCCESS; 4745 } 4746 4747 enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev, 4748 enum ecore_chain_use_mode intended_use, 4749 enum ecore_chain_mode mode, 4750 enum ecore_chain_cnt_type cnt_type, 4751 u32 num_elems, osal_size_t elem_size, 4752 struct ecore_chain *p_chain, 4753 struct ecore_chain_ext_pbl *ext_pbl) 4754 { 4755 u32 page_cnt; 4756 enum _ecore_status_t rc = ECORE_SUCCESS; 4757 4758 if (mode == ECORE_CHAIN_MODE_SINGLE) 4759 page_cnt = 1; 4760 else 4761 page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode); 4762 4763 rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size, 4764 page_cnt); 4765 if (rc) { 4766 DP_NOTICE(p_dev, true, 4767 "Cannot allocate a chain with the given arguments:\n" 4768 "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", 4769 intended_use, mode, cnt_type, num_elems, elem_size); 4770 return rc; 4771 } 4772 4773 ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use, 4774 mode, cnt_type, p_dev->dp_ctx); 4775 4776 switch (mode) { 4777 case ECORE_CHAIN_MODE_NEXT_PTR: 4778 rc = ecore_chain_alloc_next_ptr(p_dev, p_chain); 4779 break; 4780 case ECORE_CHAIN_MODE_SINGLE: 4781 rc = ecore_chain_alloc_single(p_dev, p_chain); 4782 break; 4783 case ECORE_CHAIN_MODE_PBL: 4784 rc = ecore_chain_alloc_pbl(p_dev, p_chain, ext_pbl); 4785 break; 4786 } 4787 if (rc) 4788 goto nomem; 4789 4790 return ECORE_SUCCESS; 4791 4792 nomem: 4793 ecore_chain_free(p_dev, p_chain); 4794 return rc; 4795 } 4796 4797 enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn, 4798 u16 src_id, u16 *dst_id) 4799 { 4800 if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) { 4801 u16 min, max; 4802 4803 min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE); 4804 max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE); 4805 DP_NOTICE(p_hwfn, true, "l2_queue id [%d] is not valid, available indices [%d - %d]\n", 4806 src_id, min, max); 4807 4808 return ECORE_INVAL; 4809 } 4810 4811 *dst_id = RESC_START(p_hwfn, ECORE_L2_QUEUE) + src_id; 4812 4813 return ECORE_SUCCESS; 4814 } 4815 4816 enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn, 4817 u8 src_id, u8 *dst_id) 4818 { 4819 if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) { 4820 u8 min, max; 4821 4822 min = (u8)RESC_START(p_hwfn, ECORE_VPORT); 4823 max = min + RESC_NUM(p_hwfn, ECORE_VPORT); 4824 DP_NOTICE(p_hwfn, true, "vport id [%d] is not valid, available indices [%d - %d]\n", 4825 src_id, min, max); 4826 4827 return ECORE_INVAL; 4828 } 4829 4830 *dst_id = RESC_START(p_hwfn, ECORE_VPORT) + src_id; 4831 4832 return ECORE_SUCCESS; 4833 } 4834 4835 enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn, 4836 u8 src_id, u8 *dst_id) 4837 { 4838 if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) { 4839 u8 min, max; 4840 4841 min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG); 4842 max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG); 4843 DP_NOTICE(p_hwfn, true, "rss_eng id [%d] is not valid, available indices [%d - %d]\n", 4844 src_id, min, max); 4845 4846 return ECORE_INVAL; 4847 } 4848 4849 *dst_id = RESC_START(p_hwfn, ECORE_RSS_ENG) + src_id; 4850 4851 return ECORE_SUCCESS; 4852 } 4853 4854 static enum _ecore_status_t 4855 ecore_llh_add_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4856 struct ecore_ptt *p_ptt, u32 high, u32 low, 4857 u32 *p_entry_num) 4858 { 4859 u32 en; 4860 int i; 4861 4862 /* Find a free entry and utilize it */ 4863 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4864 en = ecore_rd(p_hwfn, p_ptt, 4865 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4866 i * sizeof(u32)); 4867 if (en) 4868 continue; 4869 ecore_wr(p_hwfn, p_ptt, 4870 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4871 2 * i * sizeof(u32), low); 4872 ecore_wr(p_hwfn, p_ptt, 4873 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4874 (2 * i + 1) * sizeof(u32), high); 4875 ecore_wr(p_hwfn, p_ptt, 4876 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 4877 i * sizeof(u32), 0); 4878 ecore_wr(p_hwfn, p_ptt, 4879 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 4880 i * sizeof(u32), 0); 4881 ecore_wr(p_hwfn, p_ptt, 4882 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 4883 i * sizeof(u32), 1); 4884 break; 4885 } 4886 4887 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4888 return ECORE_NORESOURCES; 4889 4890 *p_entry_num = i; 4891 4892 return ECORE_SUCCESS; 4893 } 4894 4895 /* OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings. 4896 * Should be removed when the function is implemented. 4897 */ 4898 static enum _ecore_status_t 4899 ecore_llh_add_mac_filter_e5(struct ecore_hwfn OSAL_UNUSED *p_hwfn, 4900 struct ecore_ptt OSAL_UNUSED *p_ptt, 4901 u32 OSAL_UNUSED high, u32 OSAL_UNUSED low, 4902 u32 OSAL_UNUSED *p_entry_num) 4903 { 4904 ECORE_E5_MISSING_CODE; 4905 4906 return ECORE_NOTIMPL; 4907 } 4908 4909 enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn, 4910 struct ecore_ptt *p_ptt, u8 *p_filter) 4911 { 4912 u32 high, low, entry_num; 4913 enum _ecore_status_t rc; 4914 4915 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 4916 return ECORE_SUCCESS; 4917 4918 high = p_filter[1] | (p_filter[0] << 8); 4919 low = p_filter[5] | (p_filter[4] << 8) | 4920 (p_filter[3] << 16) | (p_filter[2] << 24); 4921 4922 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 4923 rc = ecore_llh_add_mac_filter_bb_ah(p_hwfn, p_ptt, high, low, 4924 &entry_num); 4925 else /* E5 */ 4926 rc = ecore_llh_add_mac_filter_e5(p_hwfn, p_ptt, high, low, 4927 &entry_num); 4928 if (rc != ECORE_SUCCESS) { 4929 DP_NOTICE(p_hwfn, false, 4930 "Failed to find an empty LLH filter to utilize\n"); 4931 return rc; 4932 } 4933 4934 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 4935 "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx is added at LLH entry %d\n", 4936 p_filter[0], p_filter[1], p_filter[2], p_filter[3], 4937 p_filter[4], p_filter[5], entry_num); 4938 4939 return ECORE_SUCCESS; 4940 } 4941 4942 static enum _ecore_status_t 4943 ecore_llh_remove_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn, 4944 struct ecore_ptt *p_ptt, u32 high, u32 low, 4945 u32 *p_entry_num) 4946 { 4947 int i; 4948 4949 /* Find the entry and clean it */ 4950 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 4951 if (ecore_rd(p_hwfn, p_ptt, 4952 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4953 2 * i * sizeof(u32)) != low) 4954 continue; 4955 if (ecore_rd(p_hwfn, p_ptt, 4956 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4957 (2 * i + 1) * sizeof(u32)) != high) 4958 continue; 4959 4960 ecore_wr(p_hwfn, p_ptt, 4961 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0); 4962 ecore_wr(p_hwfn, p_ptt, 4963 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4964 2 * i * sizeof(u32), 0); 4965 ecore_wr(p_hwfn, p_ptt, 4966 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 4967 (2 * i + 1) * sizeof(u32), 0); 4968 break; 4969 } 4970 4971 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 4972 return ECORE_INVAL; 4973 4974 *p_entry_num = i; 4975 4976 return ECORE_SUCCESS; 4977 } 4978 4979 /* OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings. 4980 * Should be removed when the function is implemented. 4981 */ 4982 static enum _ecore_status_t 4983 ecore_llh_remove_mac_filter_e5(struct ecore_hwfn OSAL_UNUSED *p_hwfn, 4984 struct ecore_ptt OSAL_UNUSED *p_ptt, 4985 u32 OSAL_UNUSED high, u32 OSAL_UNUSED low, 4986 u32 OSAL_UNUSED *p_entry_num) 4987 { 4988 ECORE_E5_MISSING_CODE; 4989 4990 return ECORE_NOTIMPL; 4991 } 4992 4993 void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn, 4994 struct ecore_ptt *p_ptt, u8 *p_filter) 4995 { 4996 u32 high, low, entry_num; 4997 enum _ecore_status_t rc; 4998 4999 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 5000 return; 5001 5002 high = p_filter[1] | (p_filter[0] << 8); 5003 low = p_filter[5] | (p_filter[4] << 8) | 5004 (p_filter[3] << 16) | (p_filter[2] << 24); 5005 5006 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 5007 rc = ecore_llh_remove_mac_filter_bb_ah(p_hwfn, p_ptt, high, 5008 low, &entry_num); 5009 else /* E5 */ 5010 rc = ecore_llh_remove_mac_filter_e5(p_hwfn, p_ptt, high, low, 5011 &entry_num); 5012 if (rc != ECORE_SUCCESS) { 5013 DP_NOTICE(p_hwfn, false, 5014 "Tried to remove a non-configured filter [MAC %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx]\n", 5015 p_filter[0], p_filter[1], p_filter[2], p_filter[3], 5016 p_filter[4], p_filter[5]); 5017 return; 5018 } 5019 5020 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 5021 "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx was removed from LLH entry %d\n", 5022 p_filter[0], p_filter[1], p_filter[2], p_filter[3], 5023 p_filter[4], p_filter[5], entry_num); 5024 } 5025 5026 static enum _ecore_status_t 5027 ecore_llh_add_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn, 5028 struct ecore_ptt *p_ptt, 5029 enum ecore_llh_port_filter_type_t type, 5030 u32 high, u32 low, u32 *p_entry_num) 5031 { 5032 u32 en; 5033 int i; 5034 5035 /* Find a free entry and utilize it */ 5036 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 5037 en = ecore_rd(p_hwfn, p_ptt, 5038 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 5039 i * sizeof(u32)); 5040 if (en) 5041 continue; 5042 ecore_wr(p_hwfn, p_ptt, 5043 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 5044 2 * i * sizeof(u32), low); 5045 ecore_wr(p_hwfn, p_ptt, 5046 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 5047 (2 * i + 1) * sizeof(u32), high); 5048 ecore_wr(p_hwfn, p_ptt, 5049 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 5050 i * sizeof(u32), 1); 5051 ecore_wr(p_hwfn, p_ptt, 5052 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 5053 i * sizeof(u32), 1 << type); 5054 ecore_wr(p_hwfn, p_ptt, 5055 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 1); 5056 break; 5057 } 5058 5059 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 5060 return ECORE_NORESOURCES; 5061 5062 *p_entry_num = i; 5063 5064 return ECORE_SUCCESS; 5065 } 5066 5067 /* OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings. 5068 * Should be removed when the function is implemented. 5069 */ 5070 static enum _ecore_status_t 5071 ecore_llh_add_protocol_filter_e5(struct ecore_hwfn OSAL_UNUSED *p_hwfn, 5072 struct ecore_ptt OSAL_UNUSED *p_ptt, 5073 enum ecore_llh_port_filter_type_t OSAL_UNUSED type, 5074 u32 OSAL_UNUSED high, u32 OSAL_UNUSED low, 5075 u32 OSAL_UNUSED *p_entry_num) 5076 { 5077 ECORE_E5_MISSING_CODE; 5078 5079 return ECORE_NOTIMPL; 5080 } 5081 5082 enum _ecore_status_t 5083 ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn, 5084 struct ecore_ptt *p_ptt, 5085 u16 source_port_or_eth_type, 5086 u16 dest_port, 5087 enum ecore_llh_port_filter_type_t type) 5088 { 5089 u32 high, low, entry_num; 5090 enum _ecore_status_t rc; 5091 5092 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 5093 return ECORE_SUCCESS; 5094 5095 high = 0; 5096 low = 0; 5097 switch (type) { 5098 case ECORE_LLH_FILTER_ETHERTYPE: 5099 high = source_port_or_eth_type; 5100 break; 5101 case ECORE_LLH_FILTER_TCP_SRC_PORT: 5102 case ECORE_LLH_FILTER_UDP_SRC_PORT: 5103 low = source_port_or_eth_type << 16; 5104 break; 5105 case ECORE_LLH_FILTER_TCP_DEST_PORT: 5106 case ECORE_LLH_FILTER_UDP_DEST_PORT: 5107 low = dest_port; 5108 break; 5109 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 5110 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 5111 low = (source_port_or_eth_type << 16) | dest_port; 5112 break; 5113 default: 5114 DP_NOTICE(p_hwfn, true, 5115 "Non valid LLH protocol filter type %d\n", type); 5116 return ECORE_INVAL; 5117 } 5118 5119 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 5120 rc = ecore_llh_add_protocol_filter_bb_ah(p_hwfn, p_ptt, type, 5121 high, low, &entry_num); 5122 else /* E5 */ 5123 rc = ecore_llh_add_protocol_filter_e5(p_hwfn, p_ptt, type, high, 5124 low, &entry_num); 5125 if (rc != ECORE_SUCCESS) { 5126 DP_NOTICE(p_hwfn, false, 5127 "Failed to find an empty LLH filter to utilize\n"); 5128 return rc; 5129 } 5130 5131 switch (type) { 5132 case ECORE_LLH_FILTER_ETHERTYPE: 5133 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 5134 "ETH type %x is added at LLH entry %d\n", 5135 source_port_or_eth_type, entry_num); 5136 break; 5137 case ECORE_LLH_FILTER_TCP_SRC_PORT: 5138 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 5139 "TCP src port %x is added at LLH entry %d\n", 5140 source_port_or_eth_type, entry_num); 5141 break; 5142 case ECORE_LLH_FILTER_UDP_SRC_PORT: 5143 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 5144 "UDP src port %x is added at LLH entry %d\n", 5145 source_port_or_eth_type, entry_num); 5146 break; 5147 case ECORE_LLH_FILTER_TCP_DEST_PORT: 5148 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 5149 "TCP dst port %x is added at LLH entry %d\n", 5150 dest_port, entry_num); 5151 break; 5152 case ECORE_LLH_FILTER_UDP_DEST_PORT: 5153 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 5154 "UDP dst port %x is added at LLH entry %d\n", 5155 dest_port, entry_num); 5156 break; 5157 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 5158 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 5159 "TCP src/dst ports %x/%x are added at LLH entry %d\n", 5160 source_port_or_eth_type, dest_port, entry_num); 5161 break; 5162 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 5163 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 5164 "UDP src/dst ports %x/%x are added at LLH entry %d\n", 5165 source_port_or_eth_type, dest_port, entry_num); 5166 break; 5167 } 5168 5169 return ECORE_SUCCESS; 5170 } 5171 5172 static enum _ecore_status_t 5173 ecore_llh_remove_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn, 5174 struct ecore_ptt *p_ptt, 5175 enum ecore_llh_port_filter_type_t type, 5176 u32 high, u32 low, u32 *p_entry_num) 5177 { 5178 int i; 5179 5180 /* Find the entry and clean it */ 5181 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 5182 if (!ecore_rd(p_hwfn, p_ptt, 5183 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 5184 i * sizeof(u32))) 5185 continue; 5186 if (!ecore_rd(p_hwfn, p_ptt, 5187 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 5188 i * sizeof(u32))) 5189 continue; 5190 if (!(ecore_rd(p_hwfn, p_ptt, 5191 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 5192 i * sizeof(u32)) & (1 << type))) 5193 continue; 5194 if (ecore_rd(p_hwfn, p_ptt, 5195 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 5196 2 * i * sizeof(u32)) != low) 5197 continue; 5198 if (ecore_rd(p_hwfn, p_ptt, 5199 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 5200 (2 * i + 1) * sizeof(u32)) != high) 5201 continue; 5202 5203 ecore_wr(p_hwfn, p_ptt, 5204 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0); 5205 ecore_wr(p_hwfn, p_ptt, 5206 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + 5207 i * sizeof(u32), 0); 5208 ecore_wr(p_hwfn, p_ptt, 5209 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + 5210 i * sizeof(u32), 0); 5211 ecore_wr(p_hwfn, p_ptt, 5212 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 5213 2 * i * sizeof(u32), 0); 5214 ecore_wr(p_hwfn, p_ptt, 5215 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 5216 (2 * i + 1) * sizeof(u32), 0); 5217 break; 5218 } 5219 5220 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 5221 return ECORE_INVAL; 5222 5223 *p_entry_num = i; 5224 5225 return ECORE_SUCCESS; 5226 } 5227 5228 /* OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings. 5229 * Should be removed when the function is implemented. 5230 */ 5231 static enum _ecore_status_t 5232 ecore_llh_remove_protocol_filter_e5(struct ecore_hwfn OSAL_UNUSED *p_hwfn, 5233 struct ecore_ptt OSAL_UNUSED *p_ptt, 5234 enum ecore_llh_port_filter_type_t OSAL_UNUSED type, 5235 u32 OSAL_UNUSED high, u32 OSAL_UNUSED low, 5236 u32 OSAL_UNUSED *p_entry_num) 5237 { 5238 ECORE_E5_MISSING_CODE; 5239 5240 return ECORE_NOTIMPL; 5241 } 5242 5243 void 5244 ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn, 5245 struct ecore_ptt *p_ptt, 5246 u16 source_port_or_eth_type, 5247 u16 dest_port, 5248 enum ecore_llh_port_filter_type_t type) 5249 { 5250 u32 high, low, entry_num; 5251 enum _ecore_status_t rc; 5252 5253 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 5254 return; 5255 5256 high = 0; 5257 low = 0; 5258 switch (type) { 5259 case ECORE_LLH_FILTER_ETHERTYPE: 5260 high = source_port_or_eth_type; 5261 break; 5262 case ECORE_LLH_FILTER_TCP_SRC_PORT: 5263 case ECORE_LLH_FILTER_UDP_SRC_PORT: 5264 low = source_port_or_eth_type << 16; 5265 break; 5266 case ECORE_LLH_FILTER_TCP_DEST_PORT: 5267 case ECORE_LLH_FILTER_UDP_DEST_PORT: 5268 low = dest_port; 5269 break; 5270 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 5271 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 5272 low = (source_port_or_eth_type << 16) | dest_port; 5273 break; 5274 default: 5275 DP_NOTICE(p_hwfn, true, 5276 "Non valid LLH protocol filter type %d\n", type); 5277 return; 5278 } 5279 5280 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 5281 rc = ecore_llh_remove_protocol_filter_bb_ah(p_hwfn, p_ptt, type, 5282 high, low, 5283 &entry_num); 5284 else /* E5 */ 5285 rc = ecore_llh_remove_protocol_filter_e5(p_hwfn, p_ptt, type, 5286 high, low, &entry_num); 5287 if (rc != ECORE_SUCCESS) { 5288 DP_NOTICE(p_hwfn, false, 5289 "Tried to remove a non-configured filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x]\n", 5290 type, source_port_or_eth_type, dest_port); 5291 return; 5292 } 5293 5294 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 5295 "Protocol filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x] was removed from LLH entry %d\n", 5296 type, source_port_or_eth_type, dest_port, entry_num); 5297 } 5298 5299 static void ecore_llh_clear_all_filters_bb_ah(struct ecore_hwfn *p_hwfn, 5300 struct ecore_ptt *p_ptt) 5301 { 5302 int i; 5303 5304 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 5305 ecore_wr(p_hwfn, p_ptt, 5306 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + 5307 i * sizeof(u32), 0); 5308 ecore_wr(p_hwfn, p_ptt, 5309 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 5310 2 * i * sizeof(u32), 0); 5311 ecore_wr(p_hwfn, p_ptt, 5312 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 5313 (2 * i + 1) * sizeof(u32), 0); 5314 } 5315 } 5316 5317 /* OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings. 5318 * Should be removed when the function is implemented. 5319 */ 5320 static void ecore_llh_clear_all_filters_e5(struct ecore_hwfn OSAL_UNUSED *p_hwfn, 5321 struct ecore_ptt OSAL_UNUSED *p_ptt) 5322 { 5323 ECORE_E5_MISSING_CODE; 5324 } 5325 5326 void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn, 5327 struct ecore_ptt *p_ptt) 5328 { 5329 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 5330 return; 5331 5332 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) 5333 ecore_llh_clear_all_filters_bb_ah(p_hwfn, p_ptt); 5334 else /* E5 */ 5335 ecore_llh_clear_all_filters_e5(p_hwfn, p_ptt); 5336 } 5337 5338 enum _ecore_status_t 5339 ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn, 5340 struct ecore_ptt *p_ptt) 5341 { 5342 if (IS_MF_DEFAULT(p_hwfn) && ECORE_IS_BB(p_hwfn->p_dev)) { 5343 ecore_wr(p_hwfn, p_ptt, 5344 NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 5345 1 << p_hwfn->abs_pf_id / 2); 5346 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, 0); 5347 return ECORE_SUCCESS; 5348 } else { 5349 DP_NOTICE(p_hwfn, false, 5350 "This function can't be set as default\n"); 5351 return ECORE_INVAL; 5352 } 5353 } 5354 5355 static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn, 5356 struct ecore_ptt *p_ptt, 5357 u32 hw_addr, void *p_eth_qzone, 5358 osal_size_t eth_qzone_size, 5359 u8 timeset) 5360 { 5361 struct coalescing_timeset *p_coal_timeset; 5362 5363 if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) { 5364 DP_NOTICE(p_hwfn, true, 5365 "Coalescing configuration not enabled\n"); 5366 return ECORE_INVAL; 5367 } 5368 5369 p_coal_timeset = p_eth_qzone; 5370 OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size); 5371 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); 5372 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); 5373 ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); 5374 5375 return ECORE_SUCCESS; 5376 } 5377 5378 enum _ecore_status_t ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, 5379 u16 rx_coal, u16 tx_coal, 5380 void *p_handle) 5381 { 5382 struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle; 5383 enum _ecore_status_t rc = ECORE_SUCCESS; 5384 struct ecore_ptt *p_ptt; 5385 5386 /* TODO - Configuring a single queue's coalescing but 5387 * claiming all queues are abiding same configuration 5388 * for PF and VF both. 5389 */ 5390 5391 #ifdef CONFIG_ECORE_SRIOV 5392 if (IS_VF(p_hwfn->p_dev)) 5393 return ecore_vf_pf_set_coalesce(p_hwfn, rx_coal, 5394 tx_coal, p_cid); 5395 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 5396 5397 p_ptt = ecore_ptt_acquire(p_hwfn); 5398 if (!p_ptt) 5399 return ECORE_AGAIN; 5400 5401 if (rx_coal) { 5402 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); 5403 if (rc) 5404 goto out; 5405 p_hwfn->p_dev->rx_coalesce_usecs = rx_coal; 5406 } 5407 5408 if (tx_coal) { 5409 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid); 5410 if (rc) 5411 goto out; 5412 p_hwfn->p_dev->tx_coalesce_usecs = tx_coal; 5413 } 5414 out: 5415 ecore_ptt_release(p_hwfn, p_ptt); 5416 5417 return rc; 5418 } 5419 5420 enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn, 5421 struct ecore_ptt *p_ptt, 5422 u16 coalesce, 5423 struct ecore_queue_cid *p_cid) 5424 { 5425 struct ustorm_eth_queue_zone eth_qzone; 5426 u8 timeset, timer_res; 5427 u32 address; 5428 enum _ecore_status_t rc; 5429 5430 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 5431 if (coalesce <= 0x7F) 5432 timer_res = 0; 5433 else if (coalesce <= 0xFF) 5434 timer_res = 1; 5435 else if (coalesce <= 0x1FF) 5436 timer_res = 2; 5437 else { 5438 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 5439 return ECORE_INVAL; 5440 } 5441 timeset = (u8)(coalesce >> timer_res); 5442 5443 rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, 5444 p_cid->sb_igu_id, false); 5445 if (rc != ECORE_SUCCESS) 5446 goto out; 5447 5448 address = BAR0_MAP_REG_USDM_RAM + 5449 USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 5450 5451 rc = ecore_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 5452 sizeof(struct ustorm_eth_queue_zone), timeset); 5453 if (rc != ECORE_SUCCESS) 5454 goto out; 5455 5456 out: 5457 return rc; 5458 } 5459 5460 enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn, 5461 struct ecore_ptt *p_ptt, 5462 u16 coalesce, 5463 struct ecore_queue_cid *p_cid) 5464 { 5465 struct xstorm_eth_queue_zone eth_qzone; 5466 u8 timeset, timer_res; 5467 u32 address; 5468 enum _ecore_status_t rc; 5469 5470 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 5471 if (coalesce <= 0x7F) 5472 timer_res = 0; 5473 else if (coalesce <= 0xFF) 5474 timer_res = 1; 5475 else if (coalesce <= 0x1FF) 5476 timer_res = 2; 5477 else { 5478 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 5479 return ECORE_INVAL; 5480 } 5481 timeset = (u8)(coalesce >> timer_res); 5482 5483 rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, 5484 p_cid->sb_igu_id, true); 5485 if (rc != ECORE_SUCCESS) 5486 goto out; 5487 5488 address = BAR0_MAP_REG_XSDM_RAM + 5489 XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 5490 5491 rc = ecore_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 5492 sizeof(struct xstorm_eth_queue_zone), timeset); 5493 out: 5494 return rc; 5495 } 5496 5497 /* Calculate final WFQ values for all vports and configure it. 5498 * After this configuration each vport must have 5499 * approx min rate = vport_wfq * min_pf_rate / ECORE_WFQ_UNIT 5500 */ 5501 static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn, 5502 struct ecore_ptt *p_ptt, 5503 u32 min_pf_rate) 5504 { 5505 struct init_qm_vport_params *vport_params; 5506 int i; 5507 5508 vport_params = p_hwfn->qm_info.qm_vport_params; 5509 5510 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5511 u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 5512 5513 vport_params[i].vport_wfq = (wfq_speed * ECORE_WFQ_UNIT) / 5514 min_pf_rate; 5515 ecore_init_vport_wfq(p_hwfn, p_ptt, 5516 vport_params[i].first_tx_pq_id, 5517 vport_params[i].vport_wfq); 5518 } 5519 } 5520 5521 static void ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn) 5522 5523 { 5524 int i; 5525 5526 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) 5527 p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1; 5528 } 5529 5530 static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn, 5531 struct ecore_ptt *p_ptt) 5532 { 5533 struct init_qm_vport_params *vport_params; 5534 int i; 5535 5536 vport_params = p_hwfn->qm_info.qm_vport_params; 5537 5538 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5539 ecore_init_wfq_default_param(p_hwfn); 5540 ecore_init_vport_wfq(p_hwfn, p_ptt, 5541 vport_params[i].first_tx_pq_id, 5542 vport_params[i].vport_wfq); 5543 } 5544 } 5545 5546 /* This function performs several validations for WFQ 5547 * configuration and required min rate for a given vport 5548 * 1. req_rate must be greater than one percent of min_pf_rate. 5549 * 2. req_rate should not cause other vports [not configured for WFQ explicitly] 5550 * rates to get less than one percent of min_pf_rate. 5551 * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. 5552 */ 5553 static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn, 5554 u16 vport_id, u32 req_rate, 5555 u32 min_pf_rate) 5556 { 5557 u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; 5558 int non_requested_count = 0, req_count = 0, i, num_vports; 5559 5560 num_vports = p_hwfn->qm_info.num_vports; 5561 5562 /* Accounting for the vports which are configured for WFQ explicitly */ 5563 for (i = 0; i < num_vports; i++) { 5564 u32 tmp_speed; 5565 5566 if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) { 5567 req_count++; 5568 tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 5569 total_req_min_rate += tmp_speed; 5570 } 5571 } 5572 5573 /* Include current vport data as well */ 5574 req_count++; 5575 total_req_min_rate += req_rate; 5576 non_requested_count = num_vports - req_count; 5577 5578 /* validate possible error cases */ 5579 if (req_rate < min_pf_rate / ECORE_WFQ_UNIT) { 5580 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5581 "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 5582 vport_id, req_rate, min_pf_rate); 5583 return ECORE_INVAL; 5584 } 5585 5586 /* TBD - for number of vports greater than 100 */ 5587 if (num_vports > ECORE_WFQ_UNIT) { 5588 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5589 "Number of vports is greater than %d\n", 5590 ECORE_WFQ_UNIT); 5591 return ECORE_INVAL; 5592 } 5593 5594 if (total_req_min_rate > min_pf_rate) { 5595 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5596 "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", 5597 total_req_min_rate, min_pf_rate); 5598 return ECORE_INVAL; 5599 } 5600 5601 /* Data left for non requested vports */ 5602 total_left_rate = min_pf_rate - total_req_min_rate; 5603 left_rate_per_vp = total_left_rate / non_requested_count; 5604 5605 /* validate if non requested get < 1% of min bw */ 5606 if (left_rate_per_vp < min_pf_rate / ECORE_WFQ_UNIT) { 5607 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5608 "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 5609 left_rate_per_vp, min_pf_rate); 5610 return ECORE_INVAL; 5611 } 5612 5613 /* now req_rate for given vport passes all scenarios. 5614 * assign final wfq rates to all vports. 5615 */ 5616 p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate; 5617 p_hwfn->qm_info.wfq_data[vport_id].configured = true; 5618 5619 for (i = 0; i < num_vports; i++) { 5620 if (p_hwfn->qm_info.wfq_data[i].configured) 5621 continue; 5622 5623 p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp; 5624 } 5625 5626 return ECORE_SUCCESS; 5627 } 5628 5629 static int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn, 5630 struct ecore_ptt *p_ptt, 5631 u16 vp_id, u32 rate) 5632 { 5633 struct ecore_mcp_link_state *p_link; 5634 int rc = ECORE_SUCCESS; 5635 5636 p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output; 5637 5638 if (!p_link->min_pf_rate) { 5639 p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate; 5640 p_hwfn->qm_info.wfq_data[vp_id].configured = true; 5641 return rc; 5642 } 5643 5644 rc = ecore_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); 5645 5646 if (rc == ECORE_SUCCESS) 5647 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, 5648 p_link->min_pf_rate); 5649 else 5650 DP_NOTICE(p_hwfn, false, 5651 "Validation failed while configuring min rate\n"); 5652 5653 return rc; 5654 } 5655 5656 static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn, 5657 struct ecore_ptt *p_ptt, 5658 u32 min_pf_rate) 5659 { 5660 bool use_wfq = false; 5661 int rc = ECORE_SUCCESS; 5662 u16 i; 5663 5664 /* Validate all pre configured vports for wfq */ 5665 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5666 u32 rate; 5667 5668 if (!p_hwfn->qm_info.wfq_data[i].configured) 5669 continue; 5670 5671 rate = p_hwfn->qm_info.wfq_data[i].min_speed; 5672 use_wfq = true; 5673 5674 rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate); 5675 if (rc != ECORE_SUCCESS) { 5676 DP_NOTICE(p_hwfn, false, 5677 "WFQ validation failed while configuring min rate\n"); 5678 break; 5679 } 5680 } 5681 5682 if (rc == ECORE_SUCCESS && use_wfq) 5683 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 5684 else 5685 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt); 5686 5687 return rc; 5688 } 5689 5690 /* Main API for ecore clients to configure vport min rate. 5691 * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)] 5692 * rate - Speed in Mbps needs to be assigned to a given vport. 5693 */ 5694 int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate) 5695 { 5696 int i, rc = ECORE_INVAL; 5697 5698 /* TBD - for multiple hardware functions - that is 100 gig */ 5699 if (p_dev->num_hwfns > 1) { 5700 DP_NOTICE(p_dev, false, 5701 "WFQ configuration is not supported for this device\n"); 5702 return rc; 5703 } 5704 5705 for_each_hwfn(p_dev, i) { 5706 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5707 struct ecore_ptt *p_ptt; 5708 5709 p_ptt = ecore_ptt_acquire(p_hwfn); 5710 if (!p_ptt) 5711 return ECORE_TIMEOUT; 5712 5713 rc = __ecore_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); 5714 5715 if (rc != ECORE_SUCCESS) { 5716 ecore_ptt_release(p_hwfn, p_ptt); 5717 return rc; 5718 } 5719 5720 ecore_ptt_release(p_hwfn, p_ptt); 5721 } 5722 5723 return rc; 5724 } 5725 5726 /* API to configure WFQ from mcp link change */ 5727 void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev, 5728 struct ecore_ptt *p_ptt, 5729 u32 min_pf_rate) 5730 { 5731 int i; 5732 5733 /* TBD - for multiple hardware functions - that is 100 gig */ 5734 if (p_dev->num_hwfns > 1) { 5735 DP_VERBOSE(p_dev, ECORE_MSG_LINK, 5736 "WFQ configuration is not supported for this device\n"); 5737 return; 5738 } 5739 5740 for_each_hwfn(p_dev, i) { 5741 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5742 5743 __ecore_configure_vp_wfq_on_link_change(p_hwfn, p_ptt, 5744 min_pf_rate); 5745 } 5746 } 5747 5748 int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn, 5749 struct ecore_ptt *p_ptt, 5750 struct ecore_mcp_link_state *p_link, 5751 u8 max_bw) 5752 { 5753 int rc = ECORE_SUCCESS; 5754 5755 p_hwfn->mcp_info->func_info.bandwidth_max = max_bw; 5756 5757 if (!p_link->line_speed && (max_bw != 100)) 5758 return rc; 5759 5760 p_link->speed = (p_link->line_speed * max_bw) / 100; 5761 p_hwfn->qm_info.pf_rl = p_link->speed; 5762 5763 /* Since the limiter also affects Tx-switched traffic, we don't want it 5764 * to limit such traffic in case there's no actual limit. 5765 * In that case, set limit to imaginary high boundary. 5766 */ 5767 if (max_bw == 100) 5768 p_hwfn->qm_info.pf_rl = 100000; 5769 5770 rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 5771 p_hwfn->qm_info.pf_rl); 5772 5773 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5774 "Configured MAX bandwidth to be %08x Mb/sec\n", 5775 p_link->speed); 5776 5777 return rc; 5778 } 5779 5780 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */ 5781 int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw) 5782 { 5783 int i, rc = ECORE_INVAL; 5784 5785 if (max_bw < 1 || max_bw > 100) { 5786 DP_NOTICE(p_dev, false, "PF max bw valid range is [1-100]\n"); 5787 return rc; 5788 } 5789 5790 for_each_hwfn(p_dev, i) { 5791 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5792 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev); 5793 struct ecore_mcp_link_state *p_link; 5794 struct ecore_ptt *p_ptt; 5795 5796 p_link = &p_lead->mcp_info->link_output; 5797 5798 p_ptt = ecore_ptt_acquire(p_hwfn); 5799 if (!p_ptt) 5800 return ECORE_TIMEOUT; 5801 5802 rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, 5803 p_link, max_bw); 5804 5805 ecore_ptt_release(p_hwfn, p_ptt); 5806 5807 if (rc != ECORE_SUCCESS) 5808 break; 5809 } 5810 5811 return rc; 5812 } 5813 5814 int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn, 5815 struct ecore_ptt *p_ptt, 5816 struct ecore_mcp_link_state *p_link, 5817 u8 min_bw) 5818 { 5819 int rc = ECORE_SUCCESS; 5820 5821 p_hwfn->mcp_info->func_info.bandwidth_min = min_bw; 5822 p_hwfn->qm_info.pf_wfq = min_bw; 5823 5824 if (!p_link->line_speed) 5825 return rc; 5826 5827 p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100; 5828 5829 rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw); 5830 5831 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5832 "Configured MIN bandwidth to be %d Mb/sec\n", 5833 p_link->min_pf_rate); 5834 5835 return rc; 5836 } 5837 5838 /* Main API to configure PF min bandwidth where bw range is [1-100] */ 5839 int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw) 5840 { 5841 int i, rc = ECORE_INVAL; 5842 5843 if (min_bw < 1 || min_bw > 100) { 5844 DP_NOTICE(p_dev, false, "PF min bw valid range is [1-100]\n"); 5845 return rc; 5846 } 5847 5848 for_each_hwfn(p_dev, i) { 5849 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 5850 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev); 5851 struct ecore_mcp_link_state *p_link; 5852 struct ecore_ptt *p_ptt; 5853 5854 p_link = &p_lead->mcp_info->link_output; 5855 5856 p_ptt = ecore_ptt_acquire(p_hwfn); 5857 if (!p_ptt) 5858 return ECORE_TIMEOUT; 5859 5860 rc = __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, 5861 p_link, min_bw); 5862 if (rc != ECORE_SUCCESS) { 5863 ecore_ptt_release(p_hwfn, p_ptt); 5864 return rc; 5865 } 5866 5867 if (p_link->min_pf_rate) { 5868 u32 min_rate = p_link->min_pf_rate; 5869 5870 rc = __ecore_configure_vp_wfq_on_link_change(p_hwfn, 5871 p_ptt, 5872 min_rate); 5873 } 5874 5875 ecore_ptt_release(p_hwfn, p_ptt); 5876 } 5877 5878 return rc; 5879 } 5880 5881 void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 5882 { 5883 struct ecore_mcp_link_state *p_link; 5884 5885 p_link = &p_hwfn->mcp_info->link_output; 5886 5887 if (p_link->min_pf_rate) 5888 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt); 5889 5890 OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0, 5891 sizeof(*p_hwfn->qm_info.wfq_data) * 5892 p_hwfn->qm_info.num_vports); 5893 } 5894 5895 int ecore_device_num_engines(struct ecore_dev *p_dev) 5896 { 5897 return ECORE_IS_BB(p_dev) ? 2 : 1; 5898 } 5899 5900 int ecore_device_num_ports(struct ecore_dev *p_dev) 5901 { 5902 /* in CMT always only one port */ 5903 if (p_dev->num_hwfns > 1) 5904 return 1; 5905 5906 return p_dev->num_ports_in_engine * ecore_device_num_engines(p_dev); 5907 } 5908 5909 int ecore_device_get_port_id(struct ecore_dev *p_dev) 5910 { 5911 return (ECORE_LEADING_HWFN(p_dev)->abs_pf_id) % 5912 ecore_device_num_ports(p_dev); 5913 } 5914 5915 void ecore_set_fw_mac_addr(__le16 *fw_msb, 5916 __le16 *fw_mid, 5917 __le16 *fw_lsb, 5918 u8 *mac) 5919 { 5920 ((u8 *)fw_msb)[0] = mac[1]; 5921 ((u8 *)fw_msb)[1] = mac[0]; 5922 ((u8 *)fw_mid)[0] = mac[3]; 5923 ((u8 *)fw_mid)[1] = mac[2]; 5924 ((u8 *)fw_lsb)[0] = mac[5]; 5925 ((u8 *)fw_lsb)[1] = mac[4]; 5926 } 5927