1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File : ecore_dev.c 30 */ 31 #include <sys/cdefs.h> 32 #include "bcm_osal.h" 33 #include "reg_addr.h" 34 #include "ecore_gtt_reg_addr.h" 35 #include "ecore.h" 36 #include "ecore_chain.h" 37 #include "ecore_status.h" 38 #include "ecore_hw.h" 39 #include "ecore_rt_defs.h" 40 #include "ecore_init_ops.h" 41 #include "ecore_int.h" 42 #include "ecore_cxt.h" 43 #include "ecore_spq.h" 44 #include "ecore_init_fw_funcs.h" 45 #include "ecore_sp_commands.h" 46 #include "ecore_dev_api.h" 47 #include "ecore_sriov.h" 48 #include "ecore_vf.h" 49 #include "ecore_ll2.h" 50 #include "ecore_fcoe.h" 51 #include "ecore_iscsi.h" 52 #include "ecore_ooo.h" 53 #include "ecore_mcp.h" 54 #include "ecore_hw_defs.h" 55 #include "mcp_public.h" 56 #include "ecore_rdma.h" 57 #include "ecore_iro.h" 58 #include "nvm_cfg.h" 59 #include "ecore_dev_api.h" 60 #include "ecore_dcbx.h" 61 #include "pcics_reg_driver.h" 62 #include "ecore_l2.h" 63 #ifndef LINUX_REMOVE 64 #include "ecore_tcp_ip.h" 65 #endif 66 67 #ifdef _NTDDK_ 68 #pragma warning(push) 69 #pragma warning(disable : 28167) 70 #pragma warning(disable : 28123) 71 #endif 72 73 /* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM 74 * registers involved are not split and thus configuration is a race where 75 * some of the PFs configuration might be lost. 76 * Eventually, this needs to move into a MFW-covered HW-lock as arbitration 77 * mechanism as this doesn't cover some cases [E.g., PDA or scenarios where 78 * there's more than a single compiled ecore component in system]. 79 */ 80 static osal_spinlock_t qm_lock; 81 static u32 qm_lock_ref_cnt; 82 83 void ecore_set_ilt_page_size(struct ecore_dev *p_dev, u8 ilt_page_size) 84 { 85 p_dev->ilt_page_size = ilt_page_size; 86 } 87 88 /******************** Doorbell Recovery *******************/ 89 /* The doorbell recovery mechanism consists of a list of entries which represent 90 * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each 91 * entity needs to register with the mechanism and provide the parameters 92 * describing it's doorbell, including a location where last used doorbell data 93 * can be found. The doorbell execute function will traverse the list and 94 * doorbell all of the registered entries. 95 */ 96 struct ecore_db_recovery_entry { 97 osal_list_entry_t list_entry; 98 void OSAL_IOMEM *db_addr; 99 void *db_data; 100 enum ecore_db_rec_width db_width; 101 enum ecore_db_rec_space db_space; 102 u8 hwfn_idx; 103 }; 104 105 /* display a single doorbell recovery entry */ 106 static void ecore_db_recovery_dp_entry(struct ecore_hwfn *p_hwfn, 107 struct ecore_db_recovery_entry *db_entry, 108 char *action) 109 { 110 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n", 111 action, db_entry, db_entry->db_addr, db_entry->db_data, 112 db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b", 113 db_entry->db_space == DB_REC_USER ? "user" : "kernel", 114 db_entry->hwfn_idx); 115 } 116 117 /* doorbell address sanity (address within doorbell bar range) */ 118 static bool ecore_db_rec_sanity(struct ecore_dev *p_dev, void OSAL_IOMEM *db_addr, 119 void *db_data) 120 { 121 /* make sure doorbell address is within the doorbell bar */ 122 if (db_addr < p_dev->doorbells || (u8 *)db_addr > 123 (u8 *)p_dev->doorbells + p_dev->db_size) { 124 OSAL_WARN(true, 125 "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n", 126 db_addr, p_dev->doorbells, 127 (u8 *)p_dev->doorbells + p_dev->db_size); 128 return false; 129 } 130 131 /* make sure doorbell data pointer is not null */ 132 if (!db_data) { 133 OSAL_WARN(true, "Illegal doorbell data pointer: %p", db_data); 134 return false; 135 } 136 137 return true; 138 } 139 140 /* find hwfn according to the doorbell address */ 141 static struct ecore_hwfn *ecore_db_rec_find_hwfn(struct ecore_dev *p_dev, 142 void OSAL_IOMEM *db_addr) 143 { 144 struct ecore_hwfn *p_hwfn; 145 146 /* in CMT doorbell bar is split down the middle between engine 0 and enigne 1 */ 147 if (ECORE_IS_CMT(p_dev)) 148 p_hwfn = db_addr < p_dev->hwfns[1].doorbells ? 149 &p_dev->hwfns[0] : &p_dev->hwfns[1]; 150 else 151 p_hwfn = ECORE_LEADING_HWFN(p_dev); 152 153 return p_hwfn; 154 } 155 156 /* add a new entry to the doorbell recovery mechanism */ 157 enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev, 158 void OSAL_IOMEM *db_addr, 159 void *db_data, 160 enum ecore_db_rec_width db_width, 161 enum ecore_db_rec_space db_space) 162 { 163 struct ecore_db_recovery_entry *db_entry; 164 struct ecore_hwfn *p_hwfn; 165 166 /* shortcircuit VFs, for now */ 167 if (IS_VF(p_dev)) { 168 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n"); 169 return ECORE_SUCCESS; 170 } 171 172 /* sanitize doorbell address */ 173 if (!ecore_db_rec_sanity(p_dev, db_addr, db_data)) 174 return ECORE_INVAL; 175 176 /* obtain hwfn from doorbell address */ 177 p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr); 178 179 /* create entry */ 180 db_entry = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*db_entry)); 181 if (!db_entry) { 182 DP_NOTICE(p_dev, false, "Failed to allocate a db recovery entry\n"); 183 return ECORE_NOMEM; 184 } 185 186 /* populate entry */ 187 db_entry->db_addr = db_addr; 188 db_entry->db_data = db_data; 189 db_entry->db_width = db_width; 190 db_entry->db_space = db_space; 191 db_entry->hwfn_idx = p_hwfn->my_id; 192 193 /* display */ 194 ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Adding"); 195 196 /* protect the list */ 197 OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock); 198 OSAL_LIST_PUSH_TAIL(&db_entry->list_entry, 199 &p_hwfn->db_recovery_info.list); 200 OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock); 201 202 return ECORE_SUCCESS; 203 } 204 205 /* remove an entry from the doorbell recovery mechanism */ 206 enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev, 207 void OSAL_IOMEM *db_addr, 208 void *db_data) 209 { 210 struct ecore_db_recovery_entry *db_entry = OSAL_NULL; 211 enum _ecore_status_t rc = ECORE_INVAL; 212 struct ecore_hwfn *p_hwfn; 213 214 /* shortcircuit VFs, for now */ 215 if (IS_VF(p_dev)) { 216 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n"); 217 return ECORE_SUCCESS; 218 } 219 220 /* sanitize doorbell address */ 221 if (!ecore_db_rec_sanity(p_dev, db_addr, db_data)) 222 return ECORE_INVAL; 223 224 /* obtain hwfn from doorbell address */ 225 p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr); 226 227 /* protect the list */ 228 OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock); 229 OSAL_LIST_FOR_EACH_ENTRY(db_entry, 230 &p_hwfn->db_recovery_info.list, 231 list_entry, 232 struct ecore_db_recovery_entry) { 233 /* search according to db_data addr since db_addr is not unique (roce) */ 234 if (db_entry->db_data == db_data) { 235 ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Deleting"); 236 OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry, 237 &p_hwfn->db_recovery_info.list); 238 rc = ECORE_SUCCESS; 239 break; 240 } 241 } 242 243 OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock); 244 245 if (rc == ECORE_INVAL) { 246 /*OSAL_WARN(true,*/ 247 DP_NOTICE(p_hwfn, false, 248 "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n", 249 db_data, db_addr); 250 } else 251 OSAL_FREE(p_dev, db_entry); 252 253 return rc; 254 } 255 256 /* initialize the doorbell recovery mechanism */ 257 static enum _ecore_status_t ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn) 258 { 259 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Setting up db recovery\n"); 260 261 /* make sure db_size was set in p_dev */ 262 if (!p_hwfn->p_dev->db_size) { 263 DP_ERR(p_hwfn->p_dev, "db_size not set\n"); 264 return ECORE_INVAL; 265 } 266 267 OSAL_LIST_INIT(&p_hwfn->db_recovery_info.list); 268 #ifdef CONFIG_ECORE_LOCK_ALLOC 269 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock)) 270 return ECORE_NOMEM; 271 #endif 272 OSAL_SPIN_LOCK_INIT(&p_hwfn->db_recovery_info.lock); 273 p_hwfn->db_recovery_info.db_recovery_counter = 0; 274 275 return ECORE_SUCCESS; 276 } 277 278 /* destroy the doorbell recovery mechanism */ 279 static void ecore_db_recovery_teardown(struct ecore_hwfn *p_hwfn) 280 { 281 struct ecore_db_recovery_entry *db_entry = OSAL_NULL; 282 283 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Tearing down db recovery\n"); 284 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) { 285 DP_VERBOSE(p_hwfn, false, "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n"); 286 while (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) { 287 db_entry = OSAL_LIST_FIRST_ENTRY(&p_hwfn->db_recovery_info.list, 288 struct ecore_db_recovery_entry, 289 list_entry); 290 ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Purging"); 291 OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry, 292 &p_hwfn->db_recovery_info.list); 293 OSAL_FREE(p_hwfn->p_dev, db_entry); 294 } 295 } 296 #ifdef CONFIG_ECORE_LOCK_ALLOC 297 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->db_recovery_info.lock); 298 #endif 299 p_hwfn->db_recovery_info.db_recovery_counter = 0; 300 } 301 302 /* print the content of the doorbell recovery mechanism */ 303 void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn) 304 { 305 struct ecore_db_recovery_entry *db_entry = OSAL_NULL; 306 307 DP_NOTICE(p_hwfn, false, 308 "Dispalying doorbell recovery database. Counter was %d\n", 309 p_hwfn->db_recovery_info.db_recovery_counter); 310 311 /* protect the list */ 312 OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock); 313 OSAL_LIST_FOR_EACH_ENTRY(db_entry, 314 &p_hwfn->db_recovery_info.list, 315 list_entry, 316 struct ecore_db_recovery_entry) { 317 ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Printing"); 318 } 319 320 OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock); 321 } 322 323 /* ring the doorbell of a single doorbell recovery entry */ 324 static void ecore_db_recovery_ring(struct ecore_hwfn *p_hwfn, 325 struct ecore_db_recovery_entry *db_entry, 326 enum ecore_db_rec_exec db_exec) 327 { 328 if (db_exec != DB_REC_ONCE) { 329 /* Print according to width */ 330 if (db_entry->db_width == DB_REC_WIDTH_32B) 331 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, 332 "%s doorbell address %p data %x\n", 333 db_exec == DB_REC_DRY_RUN ? 334 "would have rung" : "ringing", 335 db_entry->db_addr, 336 *(u32 *)db_entry->db_data); 337 else 338 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, 339 "%s doorbell address %p data %llx\n", 340 db_exec == DB_REC_DRY_RUN ? 341 "would have rung" : "ringing", 342 db_entry->db_addr, 343 (unsigned long long)*(u64 *)(db_entry->db_data)); 344 } 345 346 /* Sanity */ 347 if (!ecore_db_rec_sanity(p_hwfn->p_dev, db_entry->db_addr, 348 db_entry->db_data)) 349 return; 350 351 /* Flush the write combined buffer. Since there are multiple doorbelling 352 * entities using the same address, if we don't flush, a transaction 353 * could be lost. 354 */ 355 OSAL_WMB(p_hwfn->p_dev); 356 357 /* Ring the doorbell */ 358 if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) { 359 if (db_entry->db_width == DB_REC_WIDTH_32B) 360 DIRECT_REG_WR(p_hwfn, db_entry->db_addr, *(u32 *)(db_entry->db_data)); 361 else 362 DIRECT_REG_WR64(p_hwfn, db_entry->db_addr, *(u64 *)(db_entry->db_data)); 363 } 364 365 /* Flush the write combined buffer. Next doorbell may come from a 366 * different entity to the same address... 367 */ 368 OSAL_WMB(p_hwfn->p_dev); 369 } 370 371 /* traverse the doorbell recovery entry list and ring all the doorbells */ 372 void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn, 373 enum ecore_db_rec_exec db_exec) 374 { 375 struct ecore_db_recovery_entry *db_entry = OSAL_NULL; 376 377 if (db_exec != DB_REC_ONCE) { 378 DP_NOTICE(p_hwfn, false, "Executing doorbell recovery. Counter was %d\n", 379 p_hwfn->db_recovery_info.db_recovery_counter); 380 381 /* track amount of times recovery was executed */ 382 p_hwfn->db_recovery_info.db_recovery_counter++; 383 } 384 385 /* protect the list */ 386 OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock); 387 OSAL_LIST_FOR_EACH_ENTRY(db_entry, 388 &p_hwfn->db_recovery_info.list, 389 list_entry, 390 struct ecore_db_recovery_entry) { 391 ecore_db_recovery_ring(p_hwfn, db_entry, db_exec); 392 if (db_exec == DB_REC_ONCE) 393 break; 394 } 395 396 OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock); 397 } 398 /******************** Doorbell Recovery end ****************/ 399 400 /********************************** NIG LLH ***********************************/ 401 402 enum ecore_llh_filter_type { 403 ECORE_LLH_FILTER_TYPE_MAC, 404 ECORE_LLH_FILTER_TYPE_PROTOCOL, 405 }; 406 407 struct ecore_llh_mac_filter { 408 u8 addr[ETH_ALEN]; 409 }; 410 411 struct ecore_llh_protocol_filter { 412 enum ecore_llh_prot_filter_type_t type; 413 u16 source_port_or_eth_type; 414 u16 dest_port; 415 }; 416 417 union ecore_llh_filter { 418 struct ecore_llh_mac_filter mac; 419 struct ecore_llh_protocol_filter protocol; 420 }; 421 422 struct ecore_llh_filter_info { 423 bool b_enabled; 424 u32 ref_cnt; 425 enum ecore_llh_filter_type type; 426 union ecore_llh_filter filter; 427 }; 428 429 struct ecore_llh_info { 430 /* Number of LLH filters banks */ 431 u8 num_ppfid; 432 433 #define MAX_NUM_PPFID 8 434 u8 ppfid_array[MAX_NUM_PPFID]; 435 436 /* Array of filters arrays: 437 * "num_ppfid" elements of filters banks, where each is an array of 438 * "NIG_REG_LLH_FUNC_FILTER_EN_SIZE" filters. 439 */ 440 struct ecore_llh_filter_info **pp_filters; 441 }; 442 443 static void ecore_llh_free(struct ecore_dev *p_dev) 444 { 445 struct ecore_llh_info *p_llh_info = p_dev->p_llh_info; 446 u32 i; 447 448 if (p_llh_info != OSAL_NULL) { 449 if (p_llh_info->pp_filters != OSAL_NULL) { 450 for (i = 0; i < p_llh_info->num_ppfid; i++) 451 OSAL_FREE(p_dev, p_llh_info->pp_filters[i]); 452 } 453 454 OSAL_FREE(p_dev, p_llh_info->pp_filters); 455 } 456 457 OSAL_FREE(p_dev, p_llh_info); 458 p_dev->p_llh_info = OSAL_NULL; 459 } 460 461 static enum _ecore_status_t ecore_llh_alloc(struct ecore_dev *p_dev) 462 { 463 struct ecore_llh_info *p_llh_info; 464 u32 size; u8 i; 465 466 p_llh_info = OSAL_ZALLOC(p_dev, GFP_KERNEL, sizeof(*p_llh_info)); 467 if (!p_llh_info) 468 return ECORE_NOMEM; 469 p_dev->p_llh_info = p_llh_info; 470 471 for (i = 0; i < MAX_NUM_PPFID; i++) { 472 if (!(p_dev->ppfid_bitmap & (0x1 << i))) 473 continue; 474 475 p_llh_info->ppfid_array[p_llh_info->num_ppfid] = i; 476 DP_VERBOSE(p_dev, ECORE_MSG_SP, "ppfid_array[%d] = %hhd\n", 477 p_llh_info->num_ppfid, i); 478 p_llh_info->num_ppfid++; 479 } 480 481 size = p_llh_info->num_ppfid * sizeof(*p_llh_info->pp_filters); 482 p_llh_info->pp_filters = OSAL_ZALLOC(p_dev, GFP_KERNEL, size); 483 if (!p_llh_info->pp_filters) 484 return ECORE_NOMEM; 485 486 size = NIG_REG_LLH_FUNC_FILTER_EN_SIZE * 487 sizeof(**p_llh_info->pp_filters); 488 for (i = 0; i < p_llh_info->num_ppfid; i++) { 489 p_llh_info->pp_filters[i] = OSAL_ZALLOC(p_dev, GFP_KERNEL, 490 size); 491 if (!p_llh_info->pp_filters[i]) 492 return ECORE_NOMEM; 493 } 494 495 return ECORE_SUCCESS; 496 } 497 498 static enum _ecore_status_t ecore_llh_shadow_sanity(struct ecore_dev *p_dev, 499 u8 ppfid, u8 filter_idx, 500 const char *action) 501 { 502 struct ecore_llh_info *p_llh_info = p_dev->p_llh_info; 503 504 if (ppfid >= p_llh_info->num_ppfid) { 505 DP_NOTICE(p_dev, false, 506 "LLH shadow [%s]: using ppfid %d while only %d ppfids are available\n", 507 action, ppfid, p_llh_info->num_ppfid); 508 return ECORE_INVAL; 509 } 510 511 if (filter_idx >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) { 512 DP_NOTICE(p_dev, false, 513 "LLH shadow [%s]: using filter_idx %d while only %d filters are available\n", 514 action, filter_idx, NIG_REG_LLH_FUNC_FILTER_EN_SIZE); 515 return ECORE_INVAL; 516 } 517 518 return ECORE_SUCCESS; 519 } 520 521 #define ECORE_LLH_INVALID_FILTER_IDX 0xff 522 523 static enum _ecore_status_t 524 ecore_llh_shadow_search_filter(struct ecore_dev *p_dev, u8 ppfid, 525 union ecore_llh_filter *p_filter, 526 u8 *p_filter_idx) 527 { 528 struct ecore_llh_info *p_llh_info = p_dev->p_llh_info; 529 struct ecore_llh_filter_info *p_filters; 530 enum _ecore_status_t rc; 531 u8 i; 532 533 rc = ecore_llh_shadow_sanity(p_dev, ppfid, 0, "search"); 534 if (rc != ECORE_SUCCESS) 535 return rc; 536 537 *p_filter_idx = ECORE_LLH_INVALID_FILTER_IDX; 538 539 p_filters = p_llh_info->pp_filters[ppfid]; 540 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 541 if (!OSAL_MEMCMP(p_filter, &p_filters[i].filter, 542 sizeof(*p_filter))) { 543 *p_filter_idx = i; 544 break; 545 } 546 } 547 548 return ECORE_SUCCESS; 549 } 550 551 static enum _ecore_status_t 552 ecore_llh_shadow_get_free_idx(struct ecore_dev *p_dev, u8 ppfid, 553 u8 *p_filter_idx) 554 { 555 struct ecore_llh_info *p_llh_info = p_dev->p_llh_info; 556 struct ecore_llh_filter_info *p_filters; 557 enum _ecore_status_t rc; 558 u8 i; 559 560 rc = ecore_llh_shadow_sanity(p_dev, ppfid, 0, "get_free_idx"); 561 if (rc != ECORE_SUCCESS) 562 return rc; 563 564 *p_filter_idx = ECORE_LLH_INVALID_FILTER_IDX; 565 566 p_filters = p_llh_info->pp_filters[ppfid]; 567 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 568 if (!p_filters[i].b_enabled) { 569 *p_filter_idx = i; 570 break; 571 } 572 } 573 574 return ECORE_SUCCESS; 575 } 576 577 static enum _ecore_status_t 578 __ecore_llh_shadow_add_filter(struct ecore_dev *p_dev, u8 ppfid, u8 filter_idx, 579 enum ecore_llh_filter_type type, 580 union ecore_llh_filter *p_filter, u32 *p_ref_cnt) 581 { 582 struct ecore_llh_info *p_llh_info = p_dev->p_llh_info; 583 struct ecore_llh_filter_info *p_filters; 584 enum _ecore_status_t rc; 585 586 rc = ecore_llh_shadow_sanity(p_dev, ppfid, filter_idx, "add"); 587 if (rc != ECORE_SUCCESS) 588 return rc; 589 590 p_filters = p_llh_info->pp_filters[ppfid]; 591 if (!p_filters[filter_idx].ref_cnt) { 592 p_filters[filter_idx].b_enabled = true; 593 p_filters[filter_idx].type = type; 594 OSAL_MEMCPY(&p_filters[filter_idx].filter, p_filter, 595 sizeof(p_filters[filter_idx].filter)); 596 } 597 598 *p_ref_cnt = ++p_filters[filter_idx].ref_cnt; 599 600 return ECORE_SUCCESS; 601 } 602 603 static enum _ecore_status_t 604 ecore_llh_shadow_add_filter(struct ecore_dev *p_dev, u8 ppfid, 605 enum ecore_llh_filter_type type, 606 union ecore_llh_filter *p_filter, 607 u8 *p_filter_idx, u32 *p_ref_cnt) 608 { 609 enum _ecore_status_t rc; 610 611 /* Check if the same filter already exist */ 612 rc = ecore_llh_shadow_search_filter(p_dev, ppfid, p_filter, 613 p_filter_idx); 614 if (rc != ECORE_SUCCESS) 615 return rc; 616 617 /* Find a new entry in case of a new filter */ 618 if (*p_filter_idx == ECORE_LLH_INVALID_FILTER_IDX) { 619 rc = ecore_llh_shadow_get_free_idx(p_dev, ppfid, p_filter_idx); 620 if (rc != ECORE_SUCCESS) 621 return rc; 622 } 623 624 /* No free entry was found */ 625 if (*p_filter_idx == ECORE_LLH_INVALID_FILTER_IDX) { 626 DP_NOTICE(p_dev, false, 627 "Failed to find an empty LLH filter to utilize [ppfid %d]\n", 628 ppfid); 629 return ECORE_NORESOURCES; 630 } 631 632 return __ecore_llh_shadow_add_filter(p_dev, ppfid, *p_filter_idx, type, 633 p_filter, p_ref_cnt); 634 } 635 636 static enum _ecore_status_t 637 __ecore_llh_shadow_remove_filter(struct ecore_dev *p_dev, u8 ppfid, 638 u8 filter_idx, u32 *p_ref_cnt) 639 { 640 struct ecore_llh_info *p_llh_info = p_dev->p_llh_info; 641 struct ecore_llh_filter_info *p_filters; 642 enum _ecore_status_t rc; 643 644 rc = ecore_llh_shadow_sanity(p_dev, ppfid, filter_idx, "remove"); 645 if (rc != ECORE_SUCCESS) 646 return rc; 647 648 p_filters = p_llh_info->pp_filters[ppfid]; 649 if (!p_filters[filter_idx].ref_cnt) { 650 DP_NOTICE(p_dev, false, 651 "LLH shadow: trying to remove a filter with ref_cnt=0\n"); 652 return ECORE_INVAL; 653 } 654 655 *p_ref_cnt = --p_filters[filter_idx].ref_cnt; 656 if (!p_filters[filter_idx].ref_cnt) 657 OSAL_MEM_ZERO(&p_filters[filter_idx], 658 sizeof(p_filters[filter_idx])); 659 660 return ECORE_SUCCESS; 661 } 662 663 static enum _ecore_status_t 664 ecore_llh_shadow_remove_filter(struct ecore_dev *p_dev, u8 ppfid, 665 union ecore_llh_filter *p_filter, 666 u8 *p_filter_idx, u32 *p_ref_cnt) 667 { 668 enum _ecore_status_t rc; 669 670 rc = ecore_llh_shadow_search_filter(p_dev, ppfid, p_filter, 671 p_filter_idx); 672 if (rc != ECORE_SUCCESS) 673 return rc; 674 675 /* No matching filter was found */ 676 if (*p_filter_idx == ECORE_LLH_INVALID_FILTER_IDX) { 677 DP_NOTICE(p_dev, false, 678 "Failed to find a filter in the LLH shadow\n"); 679 return ECORE_INVAL; 680 } 681 682 return __ecore_llh_shadow_remove_filter(p_dev, ppfid, *p_filter_idx, 683 p_ref_cnt); 684 } 685 686 static enum _ecore_status_t 687 ecore_llh_shadow_remove_all_filters(struct ecore_dev *p_dev, u8 ppfid) 688 { 689 struct ecore_llh_info *p_llh_info = p_dev->p_llh_info; 690 struct ecore_llh_filter_info *p_filters; 691 enum _ecore_status_t rc; 692 693 rc = ecore_llh_shadow_sanity(p_dev, ppfid, 0, "remove_all"); 694 if (rc != ECORE_SUCCESS) 695 return rc; 696 697 p_filters = p_llh_info->pp_filters[ppfid]; 698 OSAL_MEM_ZERO(p_filters, 699 NIG_REG_LLH_FUNC_FILTER_EN_SIZE * sizeof(*p_filters)); 700 701 return ECORE_SUCCESS; 702 } 703 704 static enum _ecore_status_t ecore_abs_ppfid(struct ecore_dev *p_dev, 705 u8 rel_ppfid, u8 *p_abs_ppfid) 706 { 707 struct ecore_llh_info *p_llh_info = p_dev->p_llh_info; 708 709 if (rel_ppfid >= p_llh_info->num_ppfid) { 710 DP_NOTICE(p_dev, false, 711 "rel_ppfid %d is not valid, available indices are 0..%hhd\n", 712 rel_ppfid, (u8)(p_llh_info->num_ppfid - 1)); 713 return ECORE_INVAL; 714 } 715 716 *p_abs_ppfid = p_llh_info->ppfid_array[rel_ppfid]; 717 718 return ECORE_SUCCESS; 719 } 720 721 static enum _ecore_status_t 722 __ecore_llh_set_engine_affin(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 723 { 724 struct ecore_dev *p_dev = p_hwfn->p_dev; 725 enum ecore_eng eng; 726 u8 ppfid; 727 enum _ecore_status_t rc; 728 729 rc = ecore_mcp_get_engine_config(p_hwfn, p_ptt); 730 if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) { 731 DP_NOTICE(p_hwfn, false, 732 "Failed to get the engine affinity configuration\n"); 733 return rc; 734 } 735 736 /* RoCE PF is bound to a single engine */ 737 if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) { 738 eng = p_dev->fir_affin ? ECORE_ENG1 : ECORE_ENG0; 739 rc = ecore_llh_set_roce_affinity(p_dev, eng); 740 if (rc != ECORE_SUCCESS) { 741 DP_NOTICE(p_dev, false, 742 "Failed to set the RoCE engine affinity\n"); 743 return rc; 744 } 745 746 DP_VERBOSE(p_dev, ECORE_MSG_SP, 747 "LLH: Set the engine affinity of RoCE packets as %d\n", 748 eng); 749 } 750 751 /* Storage PF is bound to a single engine while L2 PF uses both */ 752 if (ECORE_IS_FCOE_PERSONALITY(p_hwfn) || 753 ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) 754 eng = p_dev->fir_affin ? ECORE_ENG1 : ECORE_ENG0; 755 else /* L2_PERSONALITY */ 756 eng = ECORE_BOTH_ENG; 757 758 for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) { 759 rc = ecore_llh_set_ppfid_affinity(p_dev, ppfid, eng); 760 if (rc != ECORE_SUCCESS) { 761 DP_NOTICE(p_dev, false, 762 "Failed to set the engine affinity of ppfid %d\n", 763 ppfid); 764 return rc; 765 } 766 } 767 768 DP_VERBOSE(p_dev, ECORE_MSG_SP, 769 "LLH: Set the engine affinity of non-RoCE packets as %d\n", 770 eng); 771 772 return ECORE_SUCCESS; 773 } 774 775 static enum _ecore_status_t 776 ecore_llh_set_engine_affin(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 777 bool avoid_eng_affin) 778 { 779 struct ecore_dev *p_dev = p_hwfn->p_dev; 780 enum _ecore_status_t rc; 781 782 /* Backwards compatible mode: 783 * - RoCE packets - Use engine 0. 784 * - Non-RoCE packets - Use connection based classification for L2 PFs, 785 * and engine 0 otherwise. 786 */ 787 if (avoid_eng_affin) { 788 enum ecore_eng eng; 789 u8 ppfid; 790 791 if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) { 792 eng = ECORE_ENG0; 793 rc = ecore_llh_set_roce_affinity(p_dev, eng); 794 if (rc != ECORE_SUCCESS) { 795 DP_NOTICE(p_dev, false, 796 "Failed to set the RoCE engine affinity\n"); 797 return rc; 798 } 799 800 DP_VERBOSE(p_dev, ECORE_MSG_SP, 801 "LLH [backwards compatible mode]: Set the engine affinity of RoCE packets as %d\n", 802 eng); 803 } 804 805 eng = (ECORE_IS_FCOE_PERSONALITY(p_hwfn) || 806 ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) ? ECORE_ENG0 807 : ECORE_BOTH_ENG; 808 for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) { 809 rc = ecore_llh_set_ppfid_affinity(p_dev, ppfid, eng); 810 if (rc != ECORE_SUCCESS) { 811 DP_NOTICE(p_dev, false, 812 "Failed to set the engine affinity of ppfid %d\n", 813 ppfid); 814 return rc; 815 } 816 } 817 818 DP_VERBOSE(p_dev, ECORE_MSG_SP, 819 "LLH [backwards compatible mode]: Set the engine affinity of non-RoCE packets as %d\n", 820 eng); 821 822 return ECORE_SUCCESS; 823 } 824 825 return __ecore_llh_set_engine_affin(p_hwfn, p_ptt); 826 } 827 828 static enum _ecore_status_t ecore_llh_hw_init_pf(struct ecore_hwfn *p_hwfn, 829 struct ecore_ptt *p_ptt, 830 bool avoid_eng_affin) 831 { 832 struct ecore_dev *p_dev = p_hwfn->p_dev; 833 u8 ppfid, abs_ppfid; 834 enum _ecore_status_t rc; 835 836 for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) { 837 u32 addr; 838 839 rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid); 840 if (rc != ECORE_SUCCESS) 841 return rc; 842 843 addr = NIG_REG_LLH_PPFID2PFID_TBL_0 + abs_ppfid * 0x4; 844 ecore_wr(p_hwfn, p_ptt, addr, p_hwfn->rel_pf_id); 845 } 846 847 if (OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits) && 848 !ECORE_IS_FCOE_PERSONALITY(p_hwfn)) { 849 rc = ecore_llh_add_mac_filter(p_dev, 0, 850 p_hwfn->hw_info.hw_mac_addr); 851 if (rc != ECORE_SUCCESS) 852 DP_NOTICE(p_dev, false, 853 "Failed to add an LLH filter with the primary MAC\n"); 854 } 855 856 if (ECORE_IS_CMT(p_dev)) { 857 rc = ecore_llh_set_engine_affin(p_hwfn, p_ptt, avoid_eng_affin); 858 if (rc != ECORE_SUCCESS) 859 return rc; 860 } 861 862 return ECORE_SUCCESS; 863 } 864 865 u8 ecore_llh_get_num_ppfid(struct ecore_dev *p_dev) 866 { 867 return p_dev->p_llh_info->num_ppfid; 868 } 869 870 enum ecore_eng ecore_llh_get_l2_affinity_hint(struct ecore_dev *p_dev) 871 { 872 return p_dev->l2_affin_hint ? ECORE_ENG1 : ECORE_ENG0; 873 } 874 875 /* TBD - should be removed when these definitions are available in reg_addr.h */ 876 #define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_MASK 0x3 877 #define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_SHIFT 0 878 #define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_MASK 0x3 879 #define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_SHIFT 2 880 881 enum _ecore_status_t ecore_llh_set_ppfid_affinity(struct ecore_dev *p_dev, 882 u8 ppfid, enum ecore_eng eng) 883 { 884 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 885 struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); 886 u32 addr, val, eng_sel; 887 enum _ecore_status_t rc = ECORE_SUCCESS; 888 u8 abs_ppfid; 889 890 if (p_ptt == OSAL_NULL) 891 return ECORE_AGAIN; 892 893 if (!ECORE_IS_CMT(p_dev)) 894 goto out; 895 896 rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid); 897 if (rc != ECORE_SUCCESS) 898 goto out; 899 900 switch (eng) { 901 case ECORE_ENG0: 902 eng_sel = 0; 903 break; 904 case ECORE_ENG1: 905 eng_sel = 1; 906 break; 907 case ECORE_BOTH_ENG: 908 eng_sel = 2; 909 break; 910 default: 911 DP_NOTICE(p_dev, false, 912 "Invalid affinity value for ppfid [%d]\n", eng); 913 rc = ECORE_INVAL; 914 goto out; 915 } 916 917 addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4; 918 val = ecore_rd(p_hwfn, p_ptt, addr); 919 SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE, eng_sel); 920 ecore_wr(p_hwfn, p_ptt, addr, val); 921 922 /* The iWARP affinity is set as the affinity of ppfid 0 */ 923 if (!ppfid && ECORE_IS_IWARP_PERSONALITY(p_hwfn)) 924 p_dev->iwarp_affin = (eng == ECORE_ENG1) ? 1 : 0; 925 out: 926 ecore_ptt_release(p_hwfn, p_ptt); 927 928 return rc; 929 } 930 931 enum _ecore_status_t ecore_llh_set_roce_affinity(struct ecore_dev *p_dev, 932 enum ecore_eng eng) 933 { 934 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 935 struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); 936 u32 addr, val, eng_sel; 937 enum _ecore_status_t rc = ECORE_SUCCESS; 938 u8 ppfid, abs_ppfid; 939 940 if (p_ptt == OSAL_NULL) 941 return ECORE_AGAIN; 942 943 if (!ECORE_IS_CMT(p_dev)) 944 goto out; 945 946 switch (eng) { 947 case ECORE_ENG0: 948 eng_sel = 0; 949 break; 950 case ECORE_ENG1: 951 eng_sel = 1; 952 break; 953 case ECORE_BOTH_ENG: 954 eng_sel = 2; 955 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL, 956 0xf /* QP bit 15 */); 957 break; 958 default: 959 DP_NOTICE(p_dev, false, 960 "Invalid affinity value for RoCE [%d]\n", eng); 961 rc = ECORE_INVAL; 962 goto out; 963 } 964 965 for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) { 966 rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid); 967 if (rc != ECORE_SUCCESS) 968 goto out; 969 970 addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4; 971 val = ecore_rd(p_hwfn, p_ptt, addr); 972 SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_ROCE, eng_sel); 973 ecore_wr(p_hwfn, p_ptt, addr, val); 974 } 975 out: 976 ecore_ptt_release(p_hwfn, p_ptt); 977 978 return rc; 979 } 980 981 struct ecore_llh_filter_e4_details { 982 u64 value; 983 u32 mode; 984 u32 protocol_type; 985 u32 hdr_sel; 986 u32 enable; 987 }; 988 989 static enum _ecore_status_t 990 ecore_llh_access_filter_e4(struct ecore_hwfn *p_hwfn, 991 struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx, 992 struct ecore_llh_filter_e4_details *p_details, 993 bool b_write_access) 994 { 995 u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid); 996 struct ecore_dmae_params params; 997 enum _ecore_status_t rc; 998 u32 addr; 999 1000 /* The NIG/LLH registers that are accessed in this function have only 16 1001 * rows which are exposed to a PF. I.e. only the 16 filters of its 1002 * default ppfid 1003 * Accessing filters of other ppfids requires pretending to other PFs, 1004 * and thus the usage of the ecore_ppfid_rd/wr() functions. 1005 */ 1006 1007 /* Filter enable - should be done first when removing a filter */ 1008 if (b_write_access && !p_details->enable) { 1009 addr = NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + filter_idx * 0x4; 1010 ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, 1011 p_details->enable); 1012 } 1013 1014 /* Filter value */ 1015 addr = NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 2 * filter_idx * 0x4; 1016 OSAL_MEMSET(¶ms, 0, sizeof(params)); 1017 1018 if (b_write_access) { 1019 params.flags = ECORE_DMAE_FLAG_PF_DST; 1020 params.dst_pfid = pfid; 1021 rc = ecore_dmae_host2grc(p_hwfn, p_ptt, 1022 (u64)(osal_uintptr_t)&p_details->value, 1023 addr, 2 /* size_in_dwords */, ¶ms); 1024 } else { 1025 params.flags = ECORE_DMAE_FLAG_PF_SRC | 1026 ECORE_DMAE_FLAG_COMPLETION_DST; 1027 params.src_pfid = pfid; 1028 rc = ecore_dmae_grc2host(p_hwfn, p_ptt, addr, 1029 (u64)(osal_uintptr_t)&p_details->value, 1030 2 /* size_in_dwords */, ¶ms); 1031 } 1032 1033 if (rc != ECORE_SUCCESS) 1034 return rc; 1035 1036 /* Filter mode */ 1037 addr = NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + filter_idx * 0x4; 1038 if (b_write_access) 1039 ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, p_details->mode); 1040 else 1041 p_details->mode = ecore_ppfid_rd(p_hwfn, p_ptt, abs_ppfid, 1042 addr); 1043 1044 /* Filter protocol type */ 1045 addr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + filter_idx * 0x4; 1046 if (b_write_access) 1047 ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, 1048 p_details->protocol_type); 1049 else 1050 p_details->protocol_type = ecore_ppfid_rd(p_hwfn, p_ptt, 1051 abs_ppfid, addr); 1052 1053 /* Filter header select */ 1054 addr = NIG_REG_LLH_FUNC_FILTER_HDR_SEL_BB_K2 + filter_idx * 0x4; 1055 if (b_write_access) 1056 ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, 1057 p_details->hdr_sel); 1058 else 1059 p_details->hdr_sel = ecore_ppfid_rd(p_hwfn, p_ptt, abs_ppfid, 1060 addr); 1061 1062 /* Filter enable - should be done last when adding a filter */ 1063 if (!b_write_access || p_details->enable) { 1064 addr = NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + filter_idx * 0x4; 1065 if (b_write_access) 1066 ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, 1067 p_details->enable); 1068 else 1069 p_details->enable = ecore_ppfid_rd(p_hwfn, p_ptt, 1070 abs_ppfid, addr); 1071 } 1072 1073 return ECORE_SUCCESS; 1074 } 1075 1076 static enum _ecore_status_t 1077 ecore_llh_add_filter_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1078 u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type, 1079 u32 high, u32 low) 1080 { 1081 struct ecore_llh_filter_e4_details filter_details; 1082 1083 filter_details.enable = 1; 1084 filter_details.value = ((u64)high << 32) | low; 1085 filter_details.hdr_sel = 0; 1086 filter_details.protocol_type = filter_prot_type; 1087 filter_details.mode = filter_prot_type ? 1088 1 : /* protocol-based classification */ 1089 0; /* MAC-address based classification */ 1090 1091 return ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid, filter_idx, 1092 &filter_details, 1093 true /* write access */); 1094 } 1095 1096 static enum _ecore_status_t 1097 ecore_llh_remove_filter_e4(struct ecore_hwfn *p_hwfn, 1098 struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx) 1099 { 1100 struct ecore_llh_filter_e4_details filter_details; 1101 1102 OSAL_MEMSET(&filter_details, 0, sizeof(filter_details)); 1103 1104 return ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid, filter_idx, 1105 &filter_details, 1106 true /* write access */); 1107 } 1108 1109 /* OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings. 1110 * Should be removed when the function is implemented. 1111 */ 1112 static enum _ecore_status_t 1113 ecore_llh_add_filter_e5(struct ecore_hwfn OSAL_UNUSED *p_hwfn, 1114 struct ecore_ptt OSAL_UNUSED *p_ptt, 1115 u8 OSAL_UNUSED abs_ppfid, u8 OSAL_UNUSED filter_idx, 1116 u8 OSAL_UNUSED filter_prot_type, u32 OSAL_UNUSED high, 1117 u32 OSAL_UNUSED low) 1118 { 1119 ECORE_E5_MISSING_CODE; 1120 1121 return ECORE_NOTIMPL; 1122 } 1123 1124 /* OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings. 1125 * Should be removed when the function is implemented. 1126 */ 1127 static enum _ecore_status_t 1128 ecore_llh_remove_filter_e5(struct ecore_hwfn OSAL_UNUSED *p_hwfn, 1129 struct ecore_ptt OSAL_UNUSED *p_ptt, 1130 u8 OSAL_UNUSED abs_ppfid, 1131 u8 OSAL_UNUSED filter_idx) 1132 { 1133 ECORE_E5_MISSING_CODE; 1134 1135 return ECORE_NOTIMPL; 1136 } 1137 1138 static enum _ecore_status_t 1139 ecore_llh_add_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1140 u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type, u32 high, 1141 u32 low) 1142 { 1143 if (ECORE_IS_E4(p_hwfn->p_dev)) 1144 return ecore_llh_add_filter_e4(p_hwfn, p_ptt, abs_ppfid, 1145 filter_idx, filter_prot_type, 1146 high, low); 1147 else /* E5 */ 1148 return ecore_llh_add_filter_e5(p_hwfn, p_ptt, abs_ppfid, 1149 filter_idx, filter_prot_type, 1150 high, low); 1151 } 1152 1153 static enum _ecore_status_t 1154 ecore_llh_remove_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1155 u8 abs_ppfid, u8 filter_idx) 1156 { 1157 if (ECORE_IS_E4(p_hwfn->p_dev)) 1158 return ecore_llh_remove_filter_e4(p_hwfn, p_ptt, abs_ppfid, 1159 filter_idx); 1160 else /* E5 */ 1161 return ecore_llh_remove_filter_e5(p_hwfn, p_ptt, abs_ppfid, 1162 filter_idx); 1163 } 1164 1165 enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid, 1166 u8 mac_addr[ETH_ALEN]) 1167 { 1168 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 1169 struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); 1170 union ecore_llh_filter filter; 1171 u8 filter_idx, abs_ppfid; 1172 u32 high, low, ref_cnt; 1173 enum _ecore_status_t rc = ECORE_SUCCESS; 1174 1175 if (p_ptt == OSAL_NULL) 1176 return ECORE_AGAIN; 1177 1178 if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits)) 1179 goto out; 1180 1181 OSAL_MEM_ZERO(&filter, sizeof(filter)); 1182 OSAL_MEMCPY(filter.mac.addr, mac_addr, ETH_ALEN); 1183 rc = ecore_llh_shadow_add_filter(p_dev, ppfid, 1184 ECORE_LLH_FILTER_TYPE_MAC, 1185 &filter, &filter_idx, &ref_cnt); 1186 if (rc != ECORE_SUCCESS) 1187 goto err; 1188 1189 rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid); 1190 if (rc != ECORE_SUCCESS) 1191 goto err; 1192 1193 /* Configure the LLH only in case of a new the filter */ 1194 if (ref_cnt == 1) { 1195 high = mac_addr[1] | (mac_addr[0] << 8); 1196 low = mac_addr[5] | (mac_addr[4] << 8) | (mac_addr[3] << 16) | 1197 (mac_addr[2] << 24); 1198 rc = ecore_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx, 1199 0, high, low); 1200 if (rc != ECORE_SUCCESS) 1201 goto err; 1202 } 1203 1204 DP_VERBOSE(p_dev, ECORE_MSG_SP, 1205 "LLH: Added MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n", 1206 mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], 1207 mac_addr[4], mac_addr[5], ppfid, abs_ppfid, filter_idx, 1208 ref_cnt); 1209 1210 goto out; 1211 1212 err: 1213 DP_NOTICE(p_dev, false, 1214 "LLH: Failed to add MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] to ppfid %hhd\n", 1215 mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], 1216 mac_addr[4], mac_addr[5], ppfid); 1217 out: 1218 ecore_ptt_release(p_hwfn, p_ptt); 1219 1220 return rc; 1221 } 1222 1223 static enum _ecore_status_t 1224 ecore_llh_protocol_filter_stringify(struct ecore_dev *p_dev, 1225 enum ecore_llh_prot_filter_type_t type, 1226 u16 source_port_or_eth_type, u16 dest_port, 1227 u8 *str, osal_size_t str_len) 1228 { 1229 switch (type) { 1230 case ECORE_LLH_FILTER_ETHERTYPE: 1231 OSAL_SNPRINTF(str, str_len, "Ethertype 0x%04x", 1232 source_port_or_eth_type); 1233 break; 1234 case ECORE_LLH_FILTER_TCP_SRC_PORT: 1235 OSAL_SNPRINTF(str, str_len, "TCP src port 0x%04x", 1236 source_port_or_eth_type); 1237 break; 1238 case ECORE_LLH_FILTER_UDP_SRC_PORT: 1239 OSAL_SNPRINTF(str, str_len, "UDP src port 0x%04x", 1240 source_port_or_eth_type); 1241 break; 1242 case ECORE_LLH_FILTER_TCP_DEST_PORT: 1243 OSAL_SNPRINTF(str, str_len, "TCP dst port 0x%04x", dest_port); 1244 break; 1245 case ECORE_LLH_FILTER_UDP_DEST_PORT: 1246 OSAL_SNPRINTF(str, str_len, "UDP dst port 0x%04x", dest_port); 1247 break; 1248 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 1249 OSAL_SNPRINTF(str, str_len, "TCP src/dst ports 0x%04x/0x%04x", 1250 source_port_or_eth_type, dest_port); 1251 break; 1252 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 1253 OSAL_SNPRINTF(str, str_len, "UDP src/dst ports 0x%04x/0x%04x", 1254 source_port_or_eth_type, dest_port); 1255 break; 1256 default: 1257 DP_NOTICE(p_dev, true, 1258 "Non valid LLH protocol filter type %d\n", type); 1259 return ECORE_INVAL; 1260 } 1261 1262 return ECORE_SUCCESS; 1263 } 1264 1265 static enum _ecore_status_t 1266 ecore_llh_protocol_filter_to_hilo(struct ecore_dev *p_dev, 1267 enum ecore_llh_prot_filter_type_t type, 1268 u16 source_port_or_eth_type, u16 dest_port, 1269 u32 *p_high, u32 *p_low) 1270 { 1271 *p_high = 0; 1272 *p_low = 0; 1273 1274 switch (type) { 1275 case ECORE_LLH_FILTER_ETHERTYPE: 1276 *p_high = source_port_or_eth_type; 1277 break; 1278 case ECORE_LLH_FILTER_TCP_SRC_PORT: 1279 case ECORE_LLH_FILTER_UDP_SRC_PORT: 1280 *p_low = source_port_or_eth_type << 16; 1281 break; 1282 case ECORE_LLH_FILTER_TCP_DEST_PORT: 1283 case ECORE_LLH_FILTER_UDP_DEST_PORT: 1284 *p_low = dest_port; 1285 break; 1286 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 1287 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 1288 *p_low = (source_port_or_eth_type << 16) | dest_port; 1289 break; 1290 default: 1291 DP_NOTICE(p_dev, true, 1292 "Non valid LLH protocol filter type %d\n", type); 1293 return ECORE_INVAL; 1294 } 1295 1296 return ECORE_SUCCESS; 1297 } 1298 1299 enum _ecore_status_t 1300 ecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid, 1301 enum ecore_llh_prot_filter_type_t type, 1302 u16 source_port_or_eth_type, u16 dest_port) 1303 { 1304 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 1305 struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); 1306 u8 filter_idx, abs_ppfid, str[32], type_bitmap; 1307 union ecore_llh_filter filter; 1308 u32 high, low, ref_cnt; 1309 enum _ecore_status_t rc = ECORE_SUCCESS; 1310 1311 if (p_ptt == OSAL_NULL) 1312 return ECORE_AGAIN; 1313 1314 if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits)) 1315 goto out; 1316 1317 rc = ecore_llh_protocol_filter_stringify(p_dev, type, 1318 source_port_or_eth_type, 1319 dest_port, str, sizeof(str)); 1320 if (rc != ECORE_SUCCESS) 1321 goto err; 1322 1323 OSAL_MEM_ZERO(&filter, sizeof(filter)); 1324 filter.protocol.type = type; 1325 filter.protocol.source_port_or_eth_type = source_port_or_eth_type; 1326 filter.protocol.dest_port = dest_port; 1327 rc = ecore_llh_shadow_add_filter(p_dev, ppfid, 1328 ECORE_LLH_FILTER_TYPE_PROTOCOL, 1329 &filter, &filter_idx, &ref_cnt); 1330 if (rc != ECORE_SUCCESS) 1331 goto err; 1332 1333 rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid); 1334 if (rc != ECORE_SUCCESS) 1335 goto err; 1336 1337 /* Configure the LLH only in case of a new the filter */ 1338 if (ref_cnt == 1) { 1339 rc = ecore_llh_protocol_filter_to_hilo(p_dev, type, 1340 source_port_or_eth_type, 1341 dest_port, &high, &low); 1342 if (rc != ECORE_SUCCESS) 1343 goto err; 1344 1345 type_bitmap = 0x1 << type; 1346 rc = ecore_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx, 1347 type_bitmap, high, low); 1348 if (rc != ECORE_SUCCESS) 1349 goto err; 1350 } 1351 1352 DP_VERBOSE(p_dev, ECORE_MSG_SP, 1353 "LLH: Added protocol filter [%s] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n", 1354 str, ppfid, abs_ppfid, filter_idx, ref_cnt); 1355 1356 goto out; 1357 1358 err: 1359 DP_NOTICE(p_hwfn, false, 1360 "LLH: Failed to add protocol filter [%s] to ppfid %hhd\n", 1361 str, ppfid); 1362 out: 1363 ecore_ptt_release(p_hwfn, p_ptt); 1364 1365 return rc; 1366 } 1367 1368 void ecore_llh_remove_mac_filter(struct ecore_dev *p_dev, u8 ppfid, 1369 u8 mac_addr[ETH_ALEN]) 1370 { 1371 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 1372 struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); 1373 union ecore_llh_filter filter; 1374 u8 filter_idx, abs_ppfid; 1375 enum _ecore_status_t rc = ECORE_SUCCESS; 1376 u32 ref_cnt; 1377 1378 if (p_ptt == OSAL_NULL) 1379 return; 1380 1381 if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits)) 1382 goto out; 1383 1384 OSAL_MEM_ZERO(&filter, sizeof(filter)); 1385 OSAL_MEMCPY(filter.mac.addr, mac_addr, ETH_ALEN); 1386 rc = ecore_llh_shadow_remove_filter(p_dev, ppfid, &filter, &filter_idx, 1387 &ref_cnt); 1388 if (rc != ECORE_SUCCESS) 1389 goto err; 1390 1391 rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid); 1392 if (rc != ECORE_SUCCESS) 1393 goto err; 1394 1395 /* Remove from the LLH in case the filter is not in use */ 1396 if (!ref_cnt) { 1397 rc = ecore_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid, 1398 filter_idx); 1399 if (rc != ECORE_SUCCESS) 1400 goto err; 1401 } 1402 1403 DP_VERBOSE(p_dev, ECORE_MSG_SP, 1404 "LLH: Removed MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n", 1405 mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], 1406 mac_addr[4], mac_addr[5], ppfid, abs_ppfid, filter_idx, 1407 ref_cnt); 1408 1409 goto out; 1410 1411 err: 1412 DP_NOTICE(p_dev, false, 1413 "LLH: Failed to remove MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] from ppfid %hhd\n", 1414 mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], 1415 mac_addr[4], mac_addr[5], ppfid); 1416 out: 1417 ecore_ptt_release(p_hwfn, p_ptt); 1418 } 1419 1420 void ecore_llh_remove_protocol_filter(struct ecore_dev *p_dev, u8 ppfid, 1421 enum ecore_llh_prot_filter_type_t type, 1422 u16 source_port_or_eth_type, 1423 u16 dest_port) 1424 { 1425 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 1426 struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); 1427 u8 filter_idx, abs_ppfid, str[32]; 1428 union ecore_llh_filter filter; 1429 enum _ecore_status_t rc = ECORE_SUCCESS; 1430 u32 ref_cnt; 1431 1432 if (p_ptt == OSAL_NULL) 1433 return; 1434 1435 if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits)) 1436 goto out; 1437 1438 rc = ecore_llh_protocol_filter_stringify(p_dev, type, 1439 source_port_or_eth_type, 1440 dest_port, str, sizeof(str)); 1441 if (rc != ECORE_SUCCESS) 1442 goto err; 1443 1444 OSAL_MEM_ZERO(&filter, sizeof(filter)); 1445 filter.protocol.type = type; 1446 filter.protocol.source_port_or_eth_type = source_port_or_eth_type; 1447 filter.protocol.dest_port = dest_port; 1448 rc = ecore_llh_shadow_remove_filter(p_dev, ppfid, &filter, &filter_idx, 1449 &ref_cnt); 1450 if (rc != ECORE_SUCCESS) 1451 goto err; 1452 1453 rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid); 1454 if (rc != ECORE_SUCCESS) 1455 goto err; 1456 1457 /* Remove from the LLH in case the filter is not in use */ 1458 if (!ref_cnt) { 1459 rc = ecore_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid, 1460 filter_idx); 1461 if (rc != ECORE_SUCCESS) 1462 goto err; 1463 } 1464 1465 DP_VERBOSE(p_dev, ECORE_MSG_SP, 1466 "LLH: Removed protocol filter [%s] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n", 1467 str, ppfid, abs_ppfid, filter_idx, ref_cnt); 1468 1469 goto out; 1470 1471 err: 1472 DP_NOTICE(p_dev, false, 1473 "LLH: Failed to remove protocol filter [%s] from ppfid %hhd\n", 1474 str, ppfid); 1475 out: 1476 ecore_ptt_release(p_hwfn, p_ptt); 1477 } 1478 1479 void ecore_llh_clear_ppfid_filters(struct ecore_dev *p_dev, u8 ppfid) 1480 { 1481 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 1482 struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); 1483 u8 filter_idx, abs_ppfid; 1484 enum _ecore_status_t rc = ECORE_SUCCESS; 1485 1486 if (p_ptt == OSAL_NULL) 1487 return; 1488 1489 if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) && 1490 !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits)) 1491 goto out; 1492 1493 rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid); 1494 if (rc != ECORE_SUCCESS) 1495 goto out; 1496 1497 rc = ecore_llh_shadow_remove_all_filters(p_dev, ppfid); 1498 if (rc != ECORE_SUCCESS) 1499 goto out; 1500 1501 for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; 1502 filter_idx++) { 1503 if (ECORE_IS_E4(p_dev)) 1504 rc = ecore_llh_remove_filter_e4(p_hwfn, p_ptt, 1505 abs_ppfid, filter_idx); 1506 else /* E5 */ 1507 rc = ecore_llh_remove_filter_e5(p_hwfn, p_ptt, 1508 abs_ppfid, filter_idx); 1509 if (rc != ECORE_SUCCESS) 1510 goto out; 1511 } 1512 out: 1513 ecore_ptt_release(p_hwfn, p_ptt); 1514 } 1515 1516 void ecore_llh_clear_all_filters(struct ecore_dev *p_dev) 1517 { 1518 u8 ppfid; 1519 1520 if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) && 1521 !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits)) 1522 return; 1523 1524 for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) 1525 ecore_llh_clear_ppfid_filters(p_dev, ppfid); 1526 } 1527 1528 enum _ecore_status_t ecore_all_ppfids_wr(struct ecore_hwfn *p_hwfn, 1529 struct ecore_ptt *p_ptt, u32 addr, 1530 u32 val) 1531 { 1532 struct ecore_dev *p_dev = p_hwfn->p_dev; 1533 u8 ppfid, abs_ppfid; 1534 enum _ecore_status_t rc; 1535 1536 for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) { 1537 rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid); 1538 if (rc != ECORE_SUCCESS) 1539 return rc; 1540 1541 ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, val); 1542 } 1543 1544 return ECORE_SUCCESS; 1545 } 1546 1547 static enum _ecore_status_t 1548 ecore_llh_dump_ppfid_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1549 u8 ppfid) 1550 { 1551 struct ecore_llh_filter_e4_details filter_details; 1552 u8 abs_ppfid, filter_idx; 1553 u32 addr; 1554 enum _ecore_status_t rc; 1555 1556 rc = ecore_abs_ppfid(p_hwfn->p_dev, ppfid, &abs_ppfid); 1557 if (rc != ECORE_SUCCESS) 1558 return rc; 1559 1560 addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4; 1561 DP_NOTICE(p_hwfn, false, 1562 "[rel_pf_id %hhd, ppfid={rel %hhd, abs %hhd}, engine_sel 0x%x]\n", 1563 p_hwfn->rel_pf_id, ppfid, abs_ppfid, 1564 ecore_rd(p_hwfn, p_ptt, addr)); 1565 1566 for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; 1567 filter_idx++) { 1568 OSAL_MEMSET(&filter_details, 0, sizeof(filter_details)); 1569 rc = ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid, 1570 filter_idx, &filter_details, 1571 false /* read access */); 1572 if (rc != ECORE_SUCCESS) 1573 return rc; 1574 1575 DP_NOTICE(p_hwfn, false, 1576 "filter %2hhd: enable %d, value 0x%016llx, mode %d, protocol_type 0x%x, hdr_sel 0x%x\n", 1577 filter_idx, filter_details.enable, 1578 (unsigned long long)filter_details.value, filter_details.mode, 1579 filter_details.protocol_type, filter_details.hdr_sel); 1580 } 1581 1582 return ECORE_SUCCESS; 1583 } 1584 1585 static enum _ecore_status_t 1586 ecore_llh_dump_ppfid_e5(struct ecore_hwfn OSAL_UNUSED *p_hwfn, 1587 struct ecore_ptt OSAL_UNUSED *p_ptt, 1588 u8 OSAL_UNUSED ppfid) 1589 { 1590 ECORE_E5_MISSING_CODE; 1591 1592 return ECORE_NOTIMPL; 1593 } 1594 1595 enum _ecore_status_t ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid) 1596 { 1597 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 1598 struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); 1599 enum _ecore_status_t rc; 1600 1601 if (p_ptt == OSAL_NULL) 1602 return ECORE_AGAIN; 1603 1604 if (ECORE_IS_E4(p_dev)) 1605 rc = ecore_llh_dump_ppfid_e4(p_hwfn, p_ptt, ppfid); 1606 else /* E5 */ 1607 rc = ecore_llh_dump_ppfid_e5(p_hwfn, p_ptt, ppfid); 1608 1609 ecore_ptt_release(p_hwfn, p_ptt); 1610 1611 return rc; 1612 } 1613 1614 enum _ecore_status_t ecore_llh_dump_all(struct ecore_dev *p_dev) 1615 { 1616 u8 ppfid; 1617 enum _ecore_status_t rc; 1618 1619 for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) { 1620 rc = ecore_llh_dump_ppfid(p_dev, ppfid); 1621 if (rc != ECORE_SUCCESS) 1622 return rc; 1623 } 1624 1625 return ECORE_SUCCESS; 1626 } 1627 1628 /******************************* NIG LLH - End ********************************/ 1629 1630 /* Configurable */ 1631 #define ECORE_MIN_DPIS (4) /* The minimal number of DPIs required to 1632 * load the driver. The number was 1633 * arbitrarily set. 1634 */ 1635 1636 /* Derived */ 1637 #define ECORE_MIN_PWM_REGION (ECORE_WID_SIZE * ECORE_MIN_DPIS) 1638 1639 static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, 1640 struct ecore_ptt *p_ptt, 1641 enum BAR_ID bar_id) 1642 { 1643 u32 bar_reg = (bar_id == BAR_ID_0 ? 1644 PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); 1645 u32 val; 1646 1647 if (IS_VF(p_hwfn->p_dev)) 1648 return ecore_vf_hw_bar_size(p_hwfn, bar_id); 1649 1650 val = ecore_rd(p_hwfn, p_ptt, bar_reg); 1651 if (val) 1652 return 1 << (val + 15); 1653 1654 /* The above registers were updated in the past only in CMT mode. Since 1655 * they were found to be useful MFW started updating them from 8.7.7.0. 1656 * In older MFW versions they are set to 0 which means disabled. 1657 */ 1658 if (ECORE_IS_CMT(p_hwfn->p_dev)) { 1659 DP_INFO(p_hwfn, 1660 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n"); 1661 return BAR_ID_0 ? 256 * 1024 : 512 * 1024; 1662 } else { 1663 DP_INFO(p_hwfn, 1664 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n"); 1665 return 512 * 1024; 1666 } 1667 } 1668 1669 void ecore_init_dp(struct ecore_dev *p_dev, 1670 u32 dp_module, 1671 u8 dp_level, 1672 void *dp_ctx) 1673 { 1674 u32 i; 1675 1676 p_dev->dp_level = dp_level; 1677 p_dev->dp_module = dp_module; 1678 p_dev->dp_ctx = dp_ctx; 1679 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 1680 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1681 1682 p_hwfn->dp_level = dp_level; 1683 p_hwfn->dp_module = dp_module; 1684 p_hwfn->dp_ctx = dp_ctx; 1685 } 1686 } 1687 1688 enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev) 1689 { 1690 u8 i; 1691 1692 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 1693 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1694 1695 p_hwfn->p_dev = p_dev; 1696 p_hwfn->my_id = i; 1697 p_hwfn->b_active = false; 1698 1699 #ifdef CONFIG_ECORE_LOCK_ALLOC 1700 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->dmae_info.lock)) 1701 goto handle_err; 1702 #endif 1703 OSAL_SPIN_LOCK_INIT(&p_hwfn->dmae_info.lock); 1704 } 1705 1706 /* hwfn 0 is always active */ 1707 p_dev->hwfns[0].b_active = true; 1708 1709 /* set the default cache alignment to 128 (may be overridden later) */ 1710 p_dev->cache_shift = 7; 1711 1712 p_dev->ilt_page_size = ECORE_DEFAULT_ILT_PAGE_SIZE; 1713 1714 return ECORE_SUCCESS; 1715 #ifdef CONFIG_ECORE_LOCK_ALLOC 1716 handle_err: 1717 while (--i) { 1718 struct ecore_hwfn *p_hwfn = OSAL_NULL; 1719 1720 p_hwfn = &p_dev->hwfns[i]; 1721 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock); 1722 } 1723 return ECORE_NOMEM; 1724 #endif 1725 } 1726 1727 static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn) 1728 { 1729 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 1730 1731 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params); 1732 qm_info->qm_pq_params = OSAL_NULL; 1733 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params); 1734 qm_info->qm_vport_params = OSAL_NULL; 1735 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params); 1736 qm_info->qm_port_params = OSAL_NULL; 1737 OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data); 1738 qm_info->wfq_data = OSAL_NULL; 1739 } 1740 1741 void ecore_resc_free(struct ecore_dev *p_dev) 1742 { 1743 int i; 1744 1745 if (IS_VF(p_dev)) { 1746 for_each_hwfn(p_dev, i) 1747 ecore_l2_free(&p_dev->hwfns[i]); 1748 return; 1749 } 1750 1751 OSAL_FREE(p_dev, p_dev->fw_data); 1752 p_dev->fw_data = OSAL_NULL; 1753 1754 OSAL_FREE(p_dev, p_dev->reset_stats); 1755 p_dev->reset_stats = OSAL_NULL; 1756 1757 ecore_llh_free(p_dev); 1758 1759 for_each_hwfn(p_dev, i) { 1760 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1761 1762 ecore_cxt_mngr_free(p_hwfn); 1763 ecore_qm_info_free(p_hwfn); 1764 ecore_spq_free(p_hwfn); 1765 ecore_eq_free(p_hwfn); 1766 ecore_consq_free(p_hwfn); 1767 ecore_int_free(p_hwfn); 1768 #ifdef CONFIG_ECORE_LL2 1769 ecore_ll2_free(p_hwfn); 1770 #endif 1771 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 1772 ecore_fcoe_free(p_hwfn); 1773 1774 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 1775 ecore_iscsi_free(p_hwfn); 1776 ecore_ooo_free(p_hwfn); 1777 } 1778 1779 #ifdef CONFIG_ECORE_ROCE 1780 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) 1781 ecore_rdma_info_free(p_hwfn); 1782 #endif 1783 ecore_iov_free(p_hwfn); 1784 ecore_l2_free(p_hwfn); 1785 ecore_dmae_info_free(p_hwfn); 1786 ecore_dcbx_info_free(p_hwfn); 1787 /* @@@TBD Flush work-queue ?*/ 1788 1789 /* destroy doorbell recovery mechanism */ 1790 ecore_db_recovery_teardown(p_hwfn); 1791 } 1792 } 1793 1794 /******************** QM initialization *******************/ 1795 /* bitmaps for indicating active traffic classes. Special case for Arrowhead 4 port */ 1796 #define ACTIVE_TCS_BMAP 0x9f /* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */ 1797 #define ACTIVE_TCS_BMAP_4PORT_K2 0xf /* 0..3 actually used, OOO and high priority stuff all use 3 */ 1798 1799 /* determines the physical queue flags for a given PF. */ 1800 static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn) 1801 { 1802 u32 flags; 1803 1804 /* common flags */ 1805 flags = PQ_FLAGS_LB; 1806 1807 /* feature flags */ 1808 if (IS_ECORE_SRIOV(p_hwfn->p_dev)) 1809 flags |= PQ_FLAGS_VFS; 1810 if (IS_ECORE_DCQCN(p_hwfn)) 1811 flags |= PQ_FLAGS_RLS; 1812 1813 /* protocol flags */ 1814 switch (p_hwfn->hw_info.personality) { 1815 case ECORE_PCI_ETH: 1816 flags |= PQ_FLAGS_MCOS; 1817 break; 1818 case ECORE_PCI_FCOE: 1819 flags |= PQ_FLAGS_OFLD; 1820 break; 1821 case ECORE_PCI_ISCSI: 1822 flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; 1823 break; 1824 case ECORE_PCI_ETH_ROCE: 1825 flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT; 1826 break; 1827 case ECORE_PCI_ETH_IWARP: 1828 flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; 1829 break; 1830 default: 1831 DP_ERR(p_hwfn, "unknown personality %d\n", p_hwfn->hw_info.personality); 1832 return 0; 1833 } 1834 1835 return flags; 1836 } 1837 1838 /* Getters for resource amounts necessary for qm initialization */ 1839 u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn) 1840 { 1841 return p_hwfn->hw_info.num_hw_tc; 1842 } 1843 1844 u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn) 1845 { 1846 return IS_ECORE_SRIOV(p_hwfn->p_dev) ? p_hwfn->p_dev->p_iov_info->total_vfs : 0; 1847 } 1848 1849 #define NUM_DEFAULT_RLS 1 1850 1851 u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn) 1852 { 1853 u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn); 1854 1855 /* num RLs can't exceed resource amount of rls or vports or the dcqcn qps */ 1856 num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL), 1857 (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_VPORT), 1858 ROCE_DCQCN_RP_MAX_QPS)); 1859 1860 /* make sure after we reserve the default and VF rls we'll have something left */ 1861 if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) { 1862 if (IS_ECORE_DCQCN(p_hwfn)) 1863 DP_NOTICE(p_hwfn, false, "no rate limiters left for PF rate limiting [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs); 1864 return 0; 1865 } 1866 1867 /* subtract rls necessary for VFs and one default one for the PF */ 1868 num_pf_rls -= num_vfs + NUM_DEFAULT_RLS; 1869 1870 return num_pf_rls; 1871 } 1872 1873 u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn) 1874 { 1875 u32 pq_flags = ecore_get_pq_flags(p_hwfn); 1876 1877 /* all pqs share the same vport (hence the 1 below), except for vfs and pf_rl pqs */ 1878 return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) + 1879 (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn) + 1; 1880 } 1881 1882 /* calc amount of PQs according to the requested flags */ 1883 u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn) 1884 { 1885 u32 pq_flags = ecore_get_pq_flags(p_hwfn); 1886 1887 return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) + 1888 (!!(PQ_FLAGS_MCOS & pq_flags)) * ecore_init_qm_get_num_tcs(p_hwfn) + 1889 (!!(PQ_FLAGS_LB & pq_flags)) + 1890 (!!(PQ_FLAGS_OOO & pq_flags)) + 1891 (!!(PQ_FLAGS_ACK & pq_flags)) + 1892 (!!(PQ_FLAGS_OFLD & pq_flags)) + 1893 (!!(PQ_FLAGS_LLT & pq_flags)) + 1894 (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn); 1895 } 1896 1897 /* initialize the top level QM params */ 1898 static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn) 1899 { 1900 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 1901 bool four_port; 1902 1903 /* pq and vport bases for this PF */ 1904 qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ); 1905 qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT); 1906 1907 /* rate limiting and weighted fair queueing are always enabled */ 1908 qm_info->vport_rl_en = 1; 1909 qm_info->vport_wfq_en = 1; 1910 1911 /* TC config is different for AH 4 port */ 1912 four_port = p_hwfn->p_dev->num_ports_in_engine == MAX_NUM_PORTS_K2; 1913 1914 /* in AH 4 port we have fewer TCs per port */ 1915 qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : NUM_OF_PHYS_TCS; 1916 1917 /* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and 4 otherwise */ 1918 if (!qm_info->ooo_tc) 1919 qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : DCBX_TCP_OOO_TC; 1920 } 1921 1922 /* initialize qm vport params */ 1923 static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn) 1924 { 1925 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 1926 u8 i; 1927 1928 /* all vports participate in weighted fair queueing */ 1929 for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++) 1930 qm_info->qm_vport_params[i].vport_wfq = 1; 1931 } 1932 1933 /* initialize qm port params */ 1934 static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn) 1935 { 1936 /* Initialize qm port parameters */ 1937 u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engine; 1938 1939 /* indicate how ooo and high pri traffic is dealt with */ 1940 active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ? 1941 ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP; 1942 1943 for (i = 0; i < num_ports; i++) { 1944 struct init_qm_port_params *p_qm_port = 1945 &p_hwfn->qm_info.qm_port_params[i]; 1946 1947 p_qm_port->active = 1; 1948 p_qm_port->active_phys_tcs = active_phys_tcs; 1949 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES_E4 / num_ports; 1950 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports; 1951 } 1952 } 1953 1954 /* Reset the params which must be reset for qm init. QM init may be called as 1955 * a result of flows other than driver load (e.g. dcbx renegotiation). Other 1956 * params may be affected by the init but would simply recalculate to the same 1957 * values. The allocations made for QM init, ports, vports, pqs and vfqs are not 1958 * affected as these amounts stay the same. 1959 */ 1960 static void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn) 1961 { 1962 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 1963 1964 qm_info->num_pqs = 0; 1965 qm_info->num_vports = 0; 1966 qm_info->num_pf_rls = 0; 1967 qm_info->num_vf_pqs = 0; 1968 qm_info->first_vf_pq = 0; 1969 qm_info->first_mcos_pq = 0; 1970 qm_info->first_rl_pq = 0; 1971 } 1972 1973 static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn) 1974 { 1975 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 1976 1977 qm_info->num_vports++; 1978 1979 if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn)) 1980 DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn)); 1981 } 1982 1983 /* initialize a single pq and manage qm_info resources accounting. 1984 * The pq_init_flags param determines whether the PQ is rate limited (for VF or PF) 1985 * and whether a new vport is allocated to the pq or not (i.e. vport will be shared) 1986 */ 1987 1988 /* flags for pq init */ 1989 #define PQ_INIT_SHARE_VPORT (1 << 0) 1990 #define PQ_INIT_PF_RL (1 << 1) 1991 #define PQ_INIT_VF_RL (1 << 2) 1992 1993 /* defines for pq init */ 1994 #define PQ_INIT_DEFAULT_WRR_GROUP 1 1995 #define PQ_INIT_DEFAULT_TC 0 1996 #define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc) 1997 1998 static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn, 1999 struct ecore_qm_info *qm_info, 2000 u8 tc, u32 pq_init_flags) 2001 { 2002 u16 pq_idx = qm_info->num_pqs, max_pq = ecore_init_qm_get_num_pqs(p_hwfn); 2003 2004 if (pq_idx > max_pq) 2005 DP_ERR(p_hwfn, "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq); 2006 2007 /* init pq params */ 2008 qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + qm_info->num_vports; 2009 qm_info->qm_pq_params[pq_idx].tc_id = tc; 2010 qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP; 2011 qm_info->qm_pq_params[pq_idx].rl_valid = 2012 (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL); 2013 2014 /* qm params accounting */ 2015 qm_info->num_pqs++; 2016 if (!(pq_init_flags & PQ_INIT_SHARE_VPORT)) 2017 qm_info->num_vports++; 2018 2019 if (pq_init_flags & PQ_INIT_PF_RL) 2020 qm_info->num_pf_rls++; 2021 2022 if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn)) 2023 DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn)); 2024 2025 if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn)) 2026 DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n", qm_info->num_pf_rls, ecore_init_qm_get_num_pf_rls(p_hwfn)); 2027 } 2028 2029 /* get pq index according to PQ_FLAGS */ 2030 static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn, 2031 u32 pq_flags) 2032 { 2033 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 2034 2035 /* Can't have multiple flags set here */ 2036 if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1) 2037 goto err; 2038 2039 switch (pq_flags) { 2040 case PQ_FLAGS_RLS: 2041 return &qm_info->first_rl_pq; 2042 case PQ_FLAGS_MCOS: 2043 return &qm_info->first_mcos_pq; 2044 case PQ_FLAGS_LB: 2045 return &qm_info->pure_lb_pq; 2046 case PQ_FLAGS_OOO: 2047 return &qm_info->ooo_pq; 2048 case PQ_FLAGS_ACK: 2049 return &qm_info->pure_ack_pq; 2050 case PQ_FLAGS_OFLD: 2051 return &qm_info->offload_pq; 2052 case PQ_FLAGS_LLT: 2053 return &qm_info->low_latency_pq; 2054 case PQ_FLAGS_VFS: 2055 return &qm_info->first_vf_pq; 2056 default: 2057 goto err; 2058 } 2059 2060 err: 2061 DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags); 2062 return OSAL_NULL; 2063 } 2064 2065 /* save pq index in qm info */ 2066 static void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn, 2067 u32 pq_flags, u16 pq_val) 2068 { 2069 u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 2070 2071 *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val; 2072 } 2073 2074 /* get tx pq index, with the PQ TX base already set (ready for context init) */ 2075 u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags) 2076 { 2077 u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 2078 2079 return *base_pq_idx + CM_TX_PQ_BASE; 2080 } 2081 2082 u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc) 2083 { 2084 u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn); 2085 2086 if (tc > max_tc) 2087 DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); 2088 2089 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc; 2090 } 2091 2092 u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf) 2093 { 2094 u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn); 2095 2096 if (vf > max_vf) 2097 DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); 2098 2099 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; 2100 } 2101 2102 u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl) 2103 { 2104 u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn); 2105 2106 if (rl > max_rl) 2107 DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl); 2108 2109 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl; 2110 } 2111 2112 /* Functions for creating specific types of pqs */ 2113 static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn) 2114 { 2115 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 2116 2117 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LB)) 2118 return; 2119 2120 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs); 2121 ecore_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT); 2122 } 2123 2124 static void ecore_init_qm_ooo_pq(struct ecore_hwfn *p_hwfn) 2125 { 2126 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 2127 2128 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO)) 2129 return; 2130 2131 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs); 2132 ecore_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT); 2133 } 2134 2135 static void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn) 2136 { 2137 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 2138 2139 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK)) 2140 return; 2141 2142 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs); 2143 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 2144 } 2145 2146 static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn) 2147 { 2148 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 2149 2150 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD)) 2151 return; 2152 2153 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs); 2154 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 2155 } 2156 2157 static void ecore_init_qm_low_latency_pq(struct ecore_hwfn *p_hwfn) 2158 { 2159 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 2160 2161 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT)) 2162 return; 2163 2164 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs); 2165 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 2166 } 2167 2168 static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn) 2169 { 2170 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 2171 u8 tc_idx; 2172 2173 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS)) 2174 return; 2175 2176 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs); 2177 for (tc_idx = 0; tc_idx < ecore_init_qm_get_num_tcs(p_hwfn); tc_idx++) 2178 ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT); 2179 } 2180 2181 static void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn) 2182 { 2183 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 2184 u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn); 2185 2186 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS)) 2187 return; 2188 2189 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs); 2190 qm_info->num_vf_pqs = num_vfs; 2191 for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) 2192 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL); 2193 } 2194 2195 static void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn) 2196 { 2197 u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn); 2198 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 2199 2200 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS)) 2201 return; 2202 2203 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs); 2204 for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++) 2205 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL); 2206 } 2207 2208 static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn) 2209 { 2210 /* rate limited pqs, must come first (FW assumption) */ 2211 ecore_init_qm_rl_pqs(p_hwfn); 2212 2213 /* pqs for multi cos */ 2214 ecore_init_qm_mcos_pqs(p_hwfn); 2215 2216 /* pure loopback pq */ 2217 ecore_init_qm_lb_pq(p_hwfn); 2218 2219 /* out of order pq */ 2220 ecore_init_qm_ooo_pq(p_hwfn); 2221 2222 /* pure ack pq */ 2223 ecore_init_qm_pure_ack_pq(p_hwfn); 2224 2225 /* pq for offloaded protocol */ 2226 ecore_init_qm_offload_pq(p_hwfn); 2227 2228 /* low latency pq */ 2229 ecore_init_qm_low_latency_pq(p_hwfn); 2230 2231 /* done sharing vports */ 2232 ecore_init_qm_advance_vport(p_hwfn); 2233 2234 /* pqs for vfs */ 2235 ecore_init_qm_vf_pqs(p_hwfn); 2236 } 2237 2238 /* compare values of getters against resources amounts */ 2239 static enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn) 2240 { 2241 if (ecore_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, ECORE_VPORT)) { 2242 DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n"); 2243 return ECORE_INVAL; 2244 } 2245 2246 if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) { 2247 DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n"); 2248 return ECORE_INVAL; 2249 } 2250 2251 return ECORE_SUCCESS; 2252 } 2253 2254 /* 2255 * Function for verbose printing of the qm initialization results 2256 */ 2257 static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn) 2258 { 2259 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 2260 struct init_qm_vport_params *vport; 2261 struct init_qm_port_params *port; 2262 struct init_qm_pq_params *pq; 2263 int i, tc; 2264 2265 /* top level params */ 2266 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n", 2267 qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq, qm_info->offload_pq, qm_info->pure_ack_pq); 2268 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n", 2269 qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs, qm_info->num_vf_pqs, qm_info->num_vports, qm_info->max_phys_tcs_per_port); 2270 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n", 2271 qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en, qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl, qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn)); 2272 2273 /* port table */ 2274 for (i = 0; i < p_hwfn->p_dev->num_ports_in_engine; i++) { 2275 port = &(qm_info->qm_port_params[i]); 2276 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n", 2277 i, port->active, port->active_phys_tcs, port->num_pbf_cmd_lines, port->num_btb_blocks, port->reserved); 2278 } 2279 2280 /* vport table */ 2281 for (i = 0; i < qm_info->num_vports; i++) { 2282 vport = &(qm_info->qm_vport_params[i]); 2283 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ", 2284 qm_info->start_vport + i, vport->vport_rl, vport->vport_wfq); 2285 for (tc = 0; tc < NUM_OF_TCS; tc++) 2286 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ", vport->first_tx_pq_id[tc]); 2287 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n"); 2288 } 2289 2290 /* pq table */ 2291 for (i = 0; i < qm_info->num_pqs; i++) { 2292 pq = &(qm_info->qm_pq_params[i]); 2293 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n", 2294 qm_info->start_pq + i, pq->vport_id, pq->tc_id, pq->wrr_group, pq->rl_valid); 2295 } 2296 } 2297 2298 static void ecore_init_qm_info(struct ecore_hwfn *p_hwfn) 2299 { 2300 /* reset params required for init run */ 2301 ecore_init_qm_reset_params(p_hwfn); 2302 2303 /* init QM top level params */ 2304 ecore_init_qm_params(p_hwfn); 2305 2306 /* init QM port params */ 2307 ecore_init_qm_port_params(p_hwfn); 2308 2309 /* init QM vport params */ 2310 ecore_init_qm_vport_params(p_hwfn); 2311 2312 /* init QM physical queue params */ 2313 ecore_init_qm_pq_params(p_hwfn); 2314 2315 /* display all that init */ 2316 ecore_dp_init_qm_params(p_hwfn); 2317 } 2318 2319 /* This function reconfigures the QM pf on the fly. 2320 * For this purpose we: 2321 * 1. reconfigure the QM database 2322 * 2. set new values to runtime array 2323 * 3. send an sdm_qm_cmd through the rbc interface to stop the QM 2324 * 4. activate init tool in QM_PF stage 2325 * 5. send an sdm_qm_cmd through rbc interface to release the QM 2326 */ 2327 enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn, 2328 struct ecore_ptt *p_ptt) 2329 { 2330 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 2331 bool b_rc; 2332 enum _ecore_status_t rc; 2333 2334 /* initialize ecore's qm data structure */ 2335 ecore_init_qm_info(p_hwfn); 2336 2337 /* stop PF's qm queues */ 2338 OSAL_SPIN_LOCK(&qm_lock); 2339 b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true, 2340 qm_info->start_pq, qm_info->num_pqs); 2341 OSAL_SPIN_UNLOCK(&qm_lock); 2342 if (!b_rc) 2343 return ECORE_INVAL; 2344 2345 /* clear the QM_PF runtime phase leftovers from previous init */ 2346 ecore_init_clear_rt_data(p_hwfn); 2347 2348 /* prepare QM portion of runtime array */ 2349 ecore_qm_init_pf(p_hwfn, p_ptt, false); 2350 2351 /* activate init tool on runtime array */ 2352 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, 2353 p_hwfn->hw_info.hw_mode); 2354 if (rc != ECORE_SUCCESS) 2355 return rc; 2356 2357 /* start PF's qm queues */ 2358 OSAL_SPIN_LOCK(&qm_lock); 2359 b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true, 2360 qm_info->start_pq, qm_info->num_pqs); 2361 OSAL_SPIN_UNLOCK(&qm_lock); 2362 if (!b_rc) 2363 return ECORE_INVAL; 2364 2365 return ECORE_SUCCESS; 2366 } 2367 2368 static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn) 2369 { 2370 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 2371 enum _ecore_status_t rc; 2372 2373 rc = ecore_init_qm_sanity(p_hwfn); 2374 if (rc != ECORE_SUCCESS) 2375 goto alloc_err; 2376 2377 qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 2378 sizeof(struct init_qm_pq_params) * 2379 ecore_init_qm_get_num_pqs(p_hwfn)); 2380 if (!qm_info->qm_pq_params) 2381 goto alloc_err; 2382 2383 qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 2384 sizeof(struct init_qm_vport_params) * 2385 ecore_init_qm_get_num_vports(p_hwfn)); 2386 if (!qm_info->qm_vport_params) 2387 goto alloc_err; 2388 2389 qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 2390 sizeof(struct init_qm_port_params) * 2391 p_hwfn->p_dev->num_ports_in_engine); 2392 if (!qm_info->qm_port_params) 2393 goto alloc_err; 2394 2395 qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 2396 sizeof(struct ecore_wfq_data) * 2397 ecore_init_qm_get_num_vports(p_hwfn)); 2398 if (!qm_info->wfq_data) 2399 goto alloc_err; 2400 2401 return ECORE_SUCCESS; 2402 2403 alloc_err: 2404 DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n"); 2405 ecore_qm_info_free(p_hwfn); 2406 return ECORE_NOMEM; 2407 } 2408 /******************** End QM initialization ***************/ 2409 2410 enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev) 2411 { 2412 u32 rdma_tasks, excess_tasks; 2413 u32 line_count; 2414 enum _ecore_status_t rc = ECORE_SUCCESS; 2415 int i; 2416 2417 if (IS_VF(p_dev)) { 2418 for_each_hwfn(p_dev, i) { 2419 rc = ecore_l2_alloc(&p_dev->hwfns[i]); 2420 if (rc != ECORE_SUCCESS) 2421 return rc; 2422 } 2423 return rc; 2424 } 2425 2426 p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL, 2427 sizeof(*p_dev->fw_data)); 2428 if (!p_dev->fw_data) 2429 return ECORE_NOMEM; 2430 2431 for_each_hwfn(p_dev, i) { 2432 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 2433 u32 n_eqes, num_cons; 2434 2435 /* initialize the doorbell recovery mechanism */ 2436 rc = ecore_db_recovery_setup(p_hwfn); 2437 if (rc) 2438 goto alloc_err; 2439 2440 /* First allocate the context manager structure */ 2441 rc = ecore_cxt_mngr_alloc(p_hwfn); 2442 if (rc) 2443 goto alloc_err; 2444 2445 /* Set the HW cid/tid numbers (in the context manager) 2446 * Must be done prior to any further computations. 2447 */ 2448 rc = ecore_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS); 2449 if (rc) 2450 goto alloc_err; 2451 2452 rc = ecore_alloc_qm_data(p_hwfn); 2453 if (rc) 2454 goto alloc_err; 2455 2456 /* init qm info */ 2457 ecore_init_qm_info(p_hwfn); 2458 2459 /* Compute the ILT client partition */ 2460 rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count); 2461 if (rc) { 2462 DP_NOTICE(p_hwfn, false, "too many ILT lines; re-computing with less lines\n"); 2463 /* In case there are not enough ILT lines we reduce the 2464 * number of RDMA tasks and re-compute. 2465 */ 2466 excess_tasks = ecore_cxt_cfg_ilt_compute_excess( 2467 p_hwfn, line_count); 2468 if (!excess_tasks) 2469 goto alloc_err; 2470 2471 rdma_tasks = RDMA_MAX_TIDS - excess_tasks; 2472 rc = ecore_cxt_set_pf_params(p_hwfn, rdma_tasks); 2473 if (rc) 2474 goto alloc_err; 2475 2476 rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count); 2477 if (rc) { 2478 DP_ERR(p_hwfn, "failed ILT compute. Requested too many lines: %u\n", 2479 line_count); 2480 2481 goto alloc_err; 2482 } 2483 } 2484 2485 /* CID map / ILT shadow table / T2 2486 * The talbes sizes are determined by the computations above 2487 */ 2488 rc = ecore_cxt_tables_alloc(p_hwfn); 2489 if (rc) 2490 goto alloc_err; 2491 2492 /* SPQ, must follow ILT because initializes SPQ context */ 2493 rc = ecore_spq_alloc(p_hwfn); 2494 if (rc) 2495 goto alloc_err; 2496 2497 /* SP status block allocation */ 2498 p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn, 2499 RESERVED_PTT_DPC); 2500 2501 rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt); 2502 if (rc) 2503 goto alloc_err; 2504 2505 rc = ecore_iov_alloc(p_hwfn); 2506 if (rc) 2507 goto alloc_err; 2508 2509 /* EQ */ 2510 n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain); 2511 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) { 2512 u32 n_srq = ecore_cxt_get_total_srq_count(p_hwfn); 2513 2514 /* Calculate the EQ size 2515 * --------------------- 2516 * Each ICID may generate up to one event at a time i.e. 2517 * the event must be handled/cleared before a new one 2518 * can be generated. We calculate the sum of events per 2519 * protocol and create an EQ deep enough to handle the 2520 * worst case: 2521 * - Core - according to SPQ. 2522 * - RoCE - per QP there are a couple of ICIDs, one 2523 * responder and one requester, each can 2524 * generate max 2 EQE (err+qp_destroyed) => 2525 * n_eqes_qp = 4 * n_qp. 2526 * Each CQ can generate an EQE. There are 2 CQs 2527 * per QP => n_eqes_cq = 2 * n_qp. 2528 * Hence the RoCE total is 6 * n_qp or 2529 * 3 * num_cons. 2530 * On top of that one eqe shoule be added for 2531 * each XRC SRQ and SRQ. 2532 * - iWARP - can generate three async per QP (error 2533 * detected and qp in error) and an 2534 additional error per CQ. 4* num_cons. 2535 On top of that one eqe shoule be added for 2536 * each SRQ and XRC SRQ. 2537 * - ENet - There can be up to two events per VF. One 2538 * for VF-PF channel and another for VF FLR 2539 * initial cleanup. The number of VFs is 2540 * bounded by MAX_NUM_VFS_BB, and is much 2541 * smaller than RoCE's so we avoid exact 2542 * calculation. 2543 */ 2544 if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) { 2545 num_cons = ecore_cxt_get_proto_cid_count( 2546 p_hwfn, PROTOCOLID_ROCE, OSAL_NULL); 2547 num_cons *= 3; 2548 } else { 2549 num_cons = ecore_cxt_get_proto_cid_count( 2550 p_hwfn, PROTOCOLID_IWARP, 2551 OSAL_NULL); 2552 num_cons *= 4; 2553 } 2554 n_eqes += num_cons + 2 * MAX_NUM_VFS_BB + n_srq; 2555 } else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 2556 num_cons = ecore_cxt_get_proto_cid_count( 2557 p_hwfn, PROTOCOLID_ISCSI, OSAL_NULL); 2558 n_eqes += 2 * num_cons; 2559 } 2560 2561 if (n_eqes > 0xFF00) { 2562 DP_ERR(p_hwfn, "EQs maxing out at 0xFF00 elements\n"); 2563 n_eqes = 0xFF00; 2564 } 2565 2566 rc = ecore_eq_alloc(p_hwfn, (u16)n_eqes); 2567 if (rc) 2568 goto alloc_err; 2569 2570 rc = ecore_consq_alloc(p_hwfn); 2571 if (rc) 2572 goto alloc_err; 2573 2574 rc = ecore_l2_alloc(p_hwfn); 2575 if (rc != ECORE_SUCCESS) 2576 goto alloc_err; 2577 2578 #ifdef CONFIG_ECORE_LL2 2579 if (p_hwfn->using_ll2) { 2580 rc = ecore_ll2_alloc(p_hwfn); 2581 if (rc) 2582 goto alloc_err; 2583 } 2584 #endif 2585 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) { 2586 rc = ecore_fcoe_alloc(p_hwfn); 2587 if (rc) 2588 goto alloc_err; 2589 } 2590 2591 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 2592 rc = ecore_iscsi_alloc(p_hwfn); 2593 if (rc) 2594 goto alloc_err; 2595 2596 rc = ecore_ooo_alloc(p_hwfn); 2597 if (rc) 2598 goto alloc_err; 2599 } 2600 #ifdef CONFIG_ECORE_ROCE 2601 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) { 2602 rc = ecore_rdma_info_alloc(p_hwfn); 2603 if (rc) 2604 goto alloc_err; 2605 } 2606 #endif 2607 2608 /* DMA info initialization */ 2609 rc = ecore_dmae_info_alloc(p_hwfn); 2610 if (rc) { 2611 DP_NOTICE(p_hwfn, false, 2612 "Failed to allocate memory for dmae_info structure\n"); 2613 goto alloc_err; 2614 } 2615 2616 /* DCBX initialization */ 2617 rc = ecore_dcbx_info_alloc(p_hwfn); 2618 if (rc) { 2619 DP_NOTICE(p_hwfn, false, 2620 "Failed to allocate memory for dcbx structure\n"); 2621 goto alloc_err; 2622 } 2623 } 2624 2625 rc = ecore_llh_alloc(p_dev); 2626 if (rc != ECORE_SUCCESS) { 2627 DP_NOTICE(p_dev, false, 2628 "Failed to allocate memory for the llh_info structure\n"); 2629 goto alloc_err; 2630 } 2631 2632 p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL, 2633 sizeof(*p_dev->reset_stats)); 2634 if (!p_dev->reset_stats) { 2635 DP_NOTICE(p_dev, false, 2636 "Failed to allocate reset statistics\n"); 2637 goto alloc_no_mem; 2638 } 2639 2640 return ECORE_SUCCESS; 2641 2642 alloc_no_mem: 2643 rc = ECORE_NOMEM; 2644 alloc_err: 2645 ecore_resc_free(p_dev); 2646 return rc; 2647 } 2648 2649 void ecore_resc_setup(struct ecore_dev *p_dev) 2650 { 2651 int i; 2652 2653 if (IS_VF(p_dev)) { 2654 for_each_hwfn(p_dev, i) 2655 ecore_l2_setup(&p_dev->hwfns[i]); 2656 return; 2657 } 2658 2659 for_each_hwfn(p_dev, i) { 2660 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 2661 2662 ecore_cxt_mngr_setup(p_hwfn); 2663 ecore_spq_setup(p_hwfn); 2664 ecore_eq_setup(p_hwfn); 2665 ecore_consq_setup(p_hwfn); 2666 2667 /* Read shadow of current MFW mailbox */ 2668 ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt); 2669 OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow, 2670 p_hwfn->mcp_info->mfw_mb_cur, 2671 p_hwfn->mcp_info->mfw_mb_length); 2672 2673 ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt); 2674 2675 ecore_l2_setup(p_hwfn); 2676 ecore_iov_setup(p_hwfn); 2677 #ifdef CONFIG_ECORE_LL2 2678 if (p_hwfn->using_ll2) 2679 ecore_ll2_setup(p_hwfn); 2680 #endif 2681 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 2682 ecore_fcoe_setup(p_hwfn); 2683 2684 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 2685 ecore_iscsi_setup(p_hwfn); 2686 ecore_ooo_setup(p_hwfn); 2687 } 2688 } 2689 } 2690 2691 #define FINAL_CLEANUP_POLL_CNT (100) 2692 #define FINAL_CLEANUP_POLL_TIME (10) 2693 enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn, 2694 struct ecore_ptt *p_ptt, 2695 u16 id, bool is_vf) 2696 { 2697 u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; 2698 enum _ecore_status_t rc = ECORE_TIMEOUT; 2699 2700 #ifndef ASIC_ONLY 2701 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) || 2702 CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 2703 DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n"); 2704 return ECORE_SUCCESS; 2705 } 2706 #endif 2707 2708 addr = GTT_BAR0_MAP_REG_USDM_RAM + 2709 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); 2710 2711 if (is_vf) 2712 id += 0x10; 2713 2714 command |= X_FINAL_CLEANUP_AGG_INT << 2715 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; 2716 command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; 2717 command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT; 2718 command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT; 2719 2720 /* Make sure notification is not set before initiating final cleanup */ 2721 if (REG_RD(p_hwfn, addr)) { 2722 DP_NOTICE(p_hwfn, false, 2723 "Unexpected; Found final cleanup notification before initiating final cleanup\n"); 2724 REG_WR(p_hwfn, addr, 0); 2725 } 2726 2727 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2728 "Sending final cleanup for PFVF[%d] [Command %08x]\n", 2729 id, command); 2730 2731 ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); 2732 2733 /* Poll until completion */ 2734 while (!REG_RD(p_hwfn, addr) && count--) 2735 OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME); 2736 2737 if (REG_RD(p_hwfn, addr)) 2738 rc = ECORE_SUCCESS; 2739 else 2740 DP_NOTICE(p_hwfn, true, "Failed to receive FW final cleanup notification\n"); 2741 2742 /* Cleanup afterwards */ 2743 REG_WR(p_hwfn, addr, 0); 2744 2745 return rc; 2746 } 2747 2748 static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn) 2749 { 2750 int hw_mode = 0; 2751 2752 if (ECORE_IS_BB_B0(p_hwfn->p_dev)) { 2753 hw_mode |= 1 << MODE_BB; 2754 } else if (ECORE_IS_AH(p_hwfn->p_dev)) { 2755 hw_mode |= 1 << MODE_K2; 2756 } else if (ECORE_IS_E5(p_hwfn->p_dev)) { 2757 hw_mode |= 1 << MODE_E5; 2758 } else { 2759 DP_NOTICE(p_hwfn, true, "Unknown chip type %#x\n", 2760 p_hwfn->p_dev->type); 2761 return ECORE_INVAL; 2762 } 2763 2764 /* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE*/ 2765 switch (p_hwfn->p_dev->num_ports_in_engine) { 2766 case 1: 2767 hw_mode |= 1 << MODE_PORTS_PER_ENG_1; 2768 break; 2769 case 2: 2770 hw_mode |= 1 << MODE_PORTS_PER_ENG_2; 2771 break; 2772 case 4: 2773 hw_mode |= 1 << MODE_PORTS_PER_ENG_4; 2774 break; 2775 default: 2776 DP_NOTICE(p_hwfn, true, "num_ports_in_engine = %d not supported\n", 2777 p_hwfn->p_dev->num_ports_in_engine); 2778 return ECORE_INVAL; 2779 } 2780 2781 if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, 2782 &p_hwfn->p_dev->mf_bits)) 2783 hw_mode |= 1 << MODE_MF_SD; 2784 else 2785 hw_mode |= 1 << MODE_MF_SI; 2786 2787 #ifndef ASIC_ONLY 2788 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 2789 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 2790 hw_mode |= 1 << MODE_FPGA; 2791 } else { 2792 if (p_hwfn->p_dev->b_is_emul_full) 2793 hw_mode |= 1 << MODE_EMUL_FULL; 2794 else 2795 hw_mode |= 1 << MODE_EMUL_REDUCED; 2796 } 2797 } else 2798 #endif 2799 hw_mode |= 1 << MODE_ASIC; 2800 2801 if (ECORE_IS_CMT(p_hwfn->p_dev)) 2802 hw_mode |= 1 << MODE_100G; 2803 2804 p_hwfn->hw_info.hw_mode = hw_mode; 2805 2806 DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP), 2807 "Configuring function for hw_mode: 0x%08x\n", 2808 p_hwfn->hw_info.hw_mode); 2809 2810 return ECORE_SUCCESS; 2811 } 2812 2813 #ifndef ASIC_ONLY 2814 /* MFW-replacement initializations for non-ASIC */ 2815 static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn, 2816 struct ecore_ptt *p_ptt) 2817 { 2818 struct ecore_dev *p_dev = p_hwfn->p_dev; 2819 u32 pl_hv = 1; 2820 int i; 2821 2822 if (CHIP_REV_IS_EMUL(p_dev)) { 2823 if (ECORE_IS_AH(p_dev)) 2824 pl_hv |= 0x600; 2825 else if (ECORE_IS_E5(p_dev)) 2826 ECORE_E5_MISSING_CODE; 2827 } 2828 2829 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv); 2830 2831 if (CHIP_REV_IS_EMUL(p_dev) && 2832 (ECORE_IS_AH(p_dev) || ECORE_IS_E5(p_dev))) 2833 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2_E5, 2834 0x3ffffff); 2835 2836 /* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */ 2837 /* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */ 2838 if (!CHIP_REV_IS_EMUL(p_dev) || ECORE_IS_BB(p_dev)) 2839 ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4); 2840 2841 if (CHIP_REV_IS_EMUL(p_dev)) { 2842 if (ECORE_IS_AH(p_dev)) { 2843 /* 2 for 4-port, 1 for 2-port, 0 for 1-port */ 2844 ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE, 2845 (p_dev->num_ports_in_engine >> 1)); 2846 2847 ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN, 2848 p_dev->num_ports_in_engine == 4 ? 0 : 3); 2849 } else if (ECORE_IS_E5(p_dev)) { 2850 ECORE_E5_MISSING_CODE; 2851 } 2852 2853 /* Poll on RBC */ 2854 ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1); 2855 for (i = 0; i < 100; i++) { 2856 OSAL_UDELAY(50); 2857 if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1) 2858 break; 2859 } 2860 if (i == 100) 2861 DP_NOTICE(p_hwfn, true, 2862 "RBC done failed to complete in PSWRQ2\n"); 2863 } 2864 2865 return ECORE_SUCCESS; 2866 } 2867 #endif 2868 2869 /* Init run time data for all PFs and their VFs on an engine. 2870 * TBD - for VFs - Once we have parent PF info for each VF in 2871 * shmem available as CAU requires knowledge of parent PF for each VF. 2872 */ 2873 static void ecore_init_cau_rt_data(struct ecore_dev *p_dev) 2874 { 2875 u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; 2876 int i, igu_sb_id; 2877 2878 for_each_hwfn(p_dev, i) { 2879 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 2880 struct ecore_igu_info *p_igu_info; 2881 struct ecore_igu_block *p_block; 2882 struct cau_sb_entry sb_entry; 2883 2884 p_igu_info = p_hwfn->hw_info.p_igu_info; 2885 2886 for (igu_sb_id = 0; 2887 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev); 2888 igu_sb_id++) { 2889 p_block = &p_igu_info->entry[igu_sb_id]; 2890 2891 if (!p_block->is_pf) 2892 continue; 2893 2894 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, 2895 p_block->function_id, 2896 0, 0); 2897 STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2, 2898 sb_entry); 2899 } 2900 } 2901 } 2902 2903 static void ecore_init_cache_line_size(struct ecore_hwfn *p_hwfn, 2904 struct ecore_ptt *p_ptt) 2905 { 2906 u32 val, wr_mbs, cache_line_size; 2907 2908 val = ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0); 2909 switch (val) { 2910 case 0: 2911 wr_mbs = 128; 2912 break; 2913 case 1: 2914 wr_mbs = 256; 2915 break; 2916 case 2: 2917 wr_mbs = 512; 2918 break; 2919 default: 2920 DP_INFO(p_hwfn, 2921 "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 2922 val); 2923 return; 2924 } 2925 2926 cache_line_size = OSAL_MIN_T(u32, OSAL_CACHE_LINE_SIZE, wr_mbs); 2927 switch (cache_line_size) { 2928 case 32: 2929 val = 0; 2930 break; 2931 case 64: 2932 val = 1; 2933 break; 2934 case 128: 2935 val = 2; 2936 break; 2937 case 256: 2938 val = 3; 2939 break; 2940 default: 2941 DP_INFO(p_hwfn, 2942 "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 2943 cache_line_size); 2944 } 2945 2946 if (OSAL_CACHE_LINE_SIZE > wr_mbs) 2947 DP_INFO(p_hwfn, 2948 "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", 2949 OSAL_CACHE_LINE_SIZE, wr_mbs); 2950 2951 STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val); 2952 if (val > 0) { 2953 STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val); 2954 STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val); 2955 } 2956 } 2957 2958 static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn, 2959 struct ecore_ptt *p_ptt, 2960 int hw_mode) 2961 { 2962 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 2963 struct ecore_dev *p_dev = p_hwfn->p_dev; 2964 u8 vf_id, max_num_vfs; 2965 u16 num_pfs, pf_id; 2966 u32 concrete_fid; 2967 enum _ecore_status_t rc = ECORE_SUCCESS; 2968 2969 ecore_init_cau_rt_data(p_dev); 2970 2971 /* Program GTT windows */ 2972 ecore_gtt_init(p_hwfn, p_ptt); 2973 2974 #ifndef ASIC_ONLY 2975 if (CHIP_REV_IS_EMUL(p_dev)) { 2976 rc = ecore_hw_init_chip(p_hwfn, p_ptt); 2977 if (rc != ECORE_SUCCESS) 2978 return rc; 2979 } 2980 #endif 2981 2982 if (p_hwfn->mcp_info) { 2983 if (p_hwfn->mcp_info->func_info.bandwidth_max) 2984 qm_info->pf_rl_en = 1; 2985 if (p_hwfn->mcp_info->func_info.bandwidth_min) 2986 qm_info->pf_wfq_en = 1; 2987 } 2988 2989 ecore_qm_common_rt_init(p_hwfn, 2990 p_dev->num_ports_in_engine, 2991 qm_info->max_phys_tcs_per_port, 2992 qm_info->pf_rl_en, qm_info->pf_wfq_en, 2993 qm_info->vport_rl_en, qm_info->vport_wfq_en, 2994 qm_info->qm_port_params); 2995 2996 ecore_cxt_hw_init_common(p_hwfn); 2997 2998 ecore_init_cache_line_size(p_hwfn, p_ptt); 2999 3000 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ECORE_PATH_ID(p_hwfn), 3001 hw_mode); 3002 if (rc != ECORE_SUCCESS) 3003 return rc; 3004 3005 /* @@TBD MichalK - should add VALIDATE_VFID to init tool... 3006 * need to decide with which value, maybe runtime 3007 */ 3008 ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); 3009 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); 3010 3011 if (ECORE_IS_BB(p_dev)) { 3012 /* Workaround clears ROCE search for all functions to prevent 3013 * involving non initialized function in processing ROCE packet. 3014 */ 3015 num_pfs = NUM_OF_ENG_PFS(p_dev); 3016 for (pf_id = 0; pf_id < num_pfs; pf_id++) { 3017 ecore_fid_pretend(p_hwfn, p_ptt, pf_id); 3018 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 3019 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 3020 } 3021 /* pretend to original PF */ 3022 ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 3023 } 3024 3025 /* Workaround for avoiding CCFC execution error when getting packets 3026 * with CRC errors, and allowing instead the invoking of the FW error 3027 * handler. 3028 * This is not done inside the init tool since it currently can't 3029 * perform a pretending to VFs. 3030 */ 3031 max_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; 3032 for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { 3033 concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id); 3034 ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid); 3035 ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); 3036 ecore_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); 3037 ecore_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); 3038 ecore_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0); 3039 } 3040 /* pretend to original PF */ 3041 ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 3042 3043 return rc; 3044 } 3045 3046 #ifndef ASIC_ONLY 3047 #define MISC_REG_RESET_REG_2_XMAC_BIT (1<<4) 3048 #define MISC_REG_RESET_REG_2_XMAC_SOFT_BIT (1<<5) 3049 3050 #define PMEG_IF_BYTE_COUNT 8 3051 3052 static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn, 3053 struct ecore_ptt *p_ptt, 3054 u32 addr, 3055 u64 data, 3056 u8 reg_type, 3057 u8 port) 3058 { 3059 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 3060 "CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n", 3061 ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) | 3062 (8 << PMEG_IF_BYTE_COUNT), 3063 (reg_type << 25) | (addr << 8) | port, 3064 (u32)((data >> 32) & 0xffffffff), 3065 (u32)(data & 0xffffffff)); 3066 3067 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB, 3068 (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) & 3069 0xffff00fe) | 3070 (8 << PMEG_IF_BYTE_COUNT)); 3071 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB, 3072 (reg_type << 25) | (addr << 8) | port); 3073 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, data & 0xffffffff); 3074 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, 3075 (data >> 32) & 0xffffffff); 3076 } 3077 3078 #define XLPORT_MODE_REG (0x20a) 3079 #define XLPORT_MAC_CONTROL (0x210) 3080 #define XLPORT_FLOW_CONTROL_CONFIG (0x207) 3081 #define XLPORT_ENABLE_REG (0x20b) 3082 3083 #define XLMAC_CTRL (0x600) 3084 #define XLMAC_MODE (0x601) 3085 #define XLMAC_RX_MAX_SIZE (0x608) 3086 #define XLMAC_TX_CTRL (0x604) 3087 #define XLMAC_PAUSE_CTRL (0x60d) 3088 #define XLMAC_PFC_CTRL (0x60e) 3089 3090 static void ecore_emul_link_init_bb(struct ecore_hwfn *p_hwfn, 3091 struct ecore_ptt *p_ptt) 3092 { 3093 u8 loopback = 0, port = p_hwfn->port_id * 2; 3094 3095 DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port); 3096 3097 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, 3098 (0x4 << 4) | 0x4, 1, port); /* XLPORT MAC MODE */ /* 0 Quad, 4 Single... */ 3099 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port); 3100 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 3101 0x40, 0, port); /*XLMAC: SOFT RESET */ 3102 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE, 3103 0x40, 0, port); /*XLMAC: Port Speed >= 10Gbps */ 3104 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE, 3105 0x3fff, 0, port); /* XLMAC: Max Size */ 3106 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL, 3107 0x01000000800ULL | (0xa << 12) | ((u64)1 << 38), 3108 0, port); 3109 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL, 3110 0x7c000, 0, port); 3111 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL, 3112 0x30ffffc000ULL, 0, port); 3113 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2), 3114 0, port); /* XLMAC: TX_EN, RX_EN */ 3115 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x1003 | (loopback << 2), 3116 0, port); /* XLMAC: TX_EN, RX_EN, SW_LINK_STATUS */ 3117 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG, 3118 1, 0, port); /* Enabled Parallel PFC interface */ 3119 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 3120 0xf, 1, port); /* XLPORT port enable */ 3121 } 3122 3123 static void ecore_emul_link_init_ah_e5(struct ecore_hwfn *p_hwfn, 3124 struct ecore_ptt *p_ptt) 3125 { 3126 u8 port = p_hwfn->port_id; 3127 u32 mac_base = NWM_REG_MAC0_K2_E5 + (port << 2) * NWM_REG_MAC0_SIZE; 3128 3129 DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port); 3130 3131 ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2_E5 + (port << 2), 3132 (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT) | 3133 (port << 3134 CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT) | 3135 (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT)); 3136 3137 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE_K2_E5, 3138 1 << ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT); 3139 3140 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH_K2_E5, 3141 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT); 3142 3143 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH_K2_E5, 3144 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT); 3145 3146 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5, 3147 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT); 3148 3149 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5, 3150 (0xA << 3151 ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT) | 3152 (8 << 3153 ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT)); 3154 3155 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG_K2_E5, 3156 0xa853); 3157 } 3158 3159 static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn, 3160 struct ecore_ptt *p_ptt) 3161 { 3162 if (ECORE_IS_AH(p_hwfn->p_dev) || ECORE_IS_E5(p_hwfn->p_dev)) 3163 ecore_emul_link_init_ah_e5(p_hwfn, p_ptt); 3164 else /* BB */ 3165 ecore_emul_link_init_bb(p_hwfn, p_ptt); 3166 3167 return; 3168 } 3169 3170 static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn, 3171 struct ecore_ptt *p_ptt, u8 port) 3172 { 3173 int port_offset = port ? 0x800 : 0; 3174 u32 xmac_rxctrl = 0; 3175 3176 /* Reset of XMAC */ 3177 /* FIXME: move to common start */ 3178 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2*sizeof(u32), 3179 MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */ 3180 OSAL_MSLEEP(1); 3181 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32), 3182 MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */ 3183 3184 ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE_BB, 1); 3185 3186 /* Set the number of ports on the Warp Core to 10G */ 3187 ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE_BB, 3); 3188 3189 /* Soft reset of XMAC */ 3190 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32), 3191 MISC_REG_RESET_REG_2_XMAC_SOFT_BIT); 3192 OSAL_MSLEEP(1); 3193 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32), 3194 MISC_REG_RESET_REG_2_XMAC_SOFT_BIT); 3195 3196 /* FIXME: move to common end */ 3197 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 3198 ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE_BB + port_offset, 0x20); 3199 3200 /* Set Max packet size: initialize XMAC block register for port 0 */ 3201 ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE_BB + port_offset, 0x2710); 3202 3203 /* CRC append for Tx packets: init XMAC block register for port 1 */ 3204 ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO_BB + port_offset, 0xC800); 3205 3206 /* Enable TX and RX: initialize XMAC block register for port 1 */ 3207 ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL_BB + port_offset, 3208 XMAC_REG_CTRL_TX_EN_BB | XMAC_REG_CTRL_RX_EN_BB); 3209 xmac_rxctrl = ecore_rd(p_hwfn, p_ptt, 3210 XMAC_REG_RX_CTRL_BB + port_offset); 3211 xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB; 3212 ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL_BB + port_offset, xmac_rxctrl); 3213 } 3214 #endif 3215 3216 static enum _ecore_status_t 3217 ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn, 3218 struct ecore_ptt *p_ptt, 3219 u32 pwm_region_size, 3220 u32 n_cpus) 3221 { 3222 u32 dpi_bit_shift, dpi_count, dpi_page_size; 3223 u32 min_dpis; 3224 u32 n_wids; 3225 3226 /* Calculate DPI size 3227 * ------------------ 3228 * The PWM region contains Doorbell Pages. The first is reserverd for 3229 * the kernel for, e.g, L2. The others are free to be used by non- 3230 * trusted applications, typically from user space. Each page, called a 3231 * doorbell page is sectioned into windows that allow doorbells to be 3232 * issued in parallel by the kernel/application. The size of such a 3233 * window (a.k.a. WID) is 1kB. 3234 * Summary: 3235 * 1kB WID x N WIDS = DPI page size 3236 * DPI page size x N DPIs = PWM region size 3237 * Notes: 3238 * The size of the DPI page size must be in multiples of OSAL_PAGE_SIZE 3239 * in order to ensure that two applications won't share the same page. 3240 * It also must contain at least one WID per CPU to allow parallelism. 3241 * It also must be a power of 2, since it is stored as a bit shift. 3242 * 3243 * The DPI page size is stored in a register as 'dpi_bit_shift' so that 3244 * 0 is 4kB, 1 is 8kB and etc. Hence the minimum size is 4,096 3245 * containing 4 WIDs. 3246 */ 3247 n_wids = OSAL_MAX_T(u32, ECORE_MIN_WIDS, n_cpus); 3248 dpi_page_size = ECORE_WID_SIZE * OSAL_ROUNDUP_POW_OF_TWO(n_wids); 3249 dpi_page_size = (dpi_page_size + OSAL_PAGE_SIZE - 1) & ~(OSAL_PAGE_SIZE - 1); 3250 dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096); 3251 dpi_count = pwm_region_size / dpi_page_size; 3252 3253 min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis; 3254 min_dpis = OSAL_MAX_T(u32, ECORE_MIN_DPIS, min_dpis); 3255 3256 /* Update hwfn */ 3257 p_hwfn->dpi_size = dpi_page_size; 3258 p_hwfn->dpi_count = dpi_count; 3259 3260 /* Update registers */ 3261 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift); 3262 3263 if (dpi_count < min_dpis) 3264 return ECORE_NORESOURCES; 3265 3266 return ECORE_SUCCESS; 3267 } 3268 3269 enum ECORE_ROCE_EDPM_MODE { 3270 ECORE_ROCE_EDPM_MODE_ENABLE = 0, 3271 ECORE_ROCE_EDPM_MODE_FORCE_ON = 1, 3272 ECORE_ROCE_EDPM_MODE_DISABLE = 2, 3273 }; 3274 3275 static enum _ecore_status_t 3276 ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn, 3277 struct ecore_ptt *p_ptt) 3278 { 3279 struct ecore_rdma_pf_params *p_rdma_pf_params; 3280 u32 pwm_regsize, norm_regsize; 3281 u32 non_pwm_conn, min_addr_reg1; 3282 u32 db_bar_size, n_cpus = 1; 3283 u32 roce_edpm_mode; 3284 u32 pf_dems_shift; 3285 enum _ecore_status_t rc = ECORE_SUCCESS; 3286 u8 cond; 3287 3288 db_bar_size = ecore_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1); 3289 if (ECORE_IS_CMT(p_hwfn->p_dev)) 3290 db_bar_size /= 2; 3291 3292 /* Calculate doorbell regions 3293 * ----------------------------------- 3294 * The doorbell BAR is made of two regions. The first is called normal 3295 * region and the second is called PWM region. In the normal region 3296 * each ICID has its own set of addresses so that writing to that 3297 * specific address identifies the ICID. In the Process Window Mode 3298 * region the ICID is given in the data written to the doorbell. The 3299 * above per PF register denotes the offset in the doorbell BAR in which 3300 * the PWM region begins. 3301 * The normal region has ECORE_PF_DEMS_SIZE bytes per ICID, that is per 3302 * non-PWM connection. The calculation below computes the total non-PWM 3303 * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is 3304 * in units of 4,096 bytes. 3305 */ 3306 non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) + 3307 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE, 3308 OSAL_NULL) + 3309 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, 3310 OSAL_NULL); 3311 norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, OSAL_PAGE_SIZE); 3312 min_addr_reg1 = norm_regsize / 4096; 3313 pwm_regsize = db_bar_size - norm_regsize; 3314 3315 /* Check that the normal and PWM sizes are valid */ 3316 if (db_bar_size < norm_regsize) { 3317 DP_ERR(p_hwfn->p_dev, 3318 "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n", 3319 db_bar_size, norm_regsize); 3320 return ECORE_NORESOURCES; 3321 } 3322 if (pwm_regsize < ECORE_MIN_PWM_REGION) { 3323 DP_ERR(p_hwfn->p_dev, 3324 "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n", 3325 pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size, 3326 norm_regsize); 3327 return ECORE_NORESOURCES; 3328 } 3329 3330 p_rdma_pf_params = &p_hwfn->pf_params.rdma_pf_params; 3331 3332 /* Calculate number of DPIs */ 3333 if (ECORE_IS_IWARP_PERSONALITY(p_hwfn)) 3334 p_rdma_pf_params->roce_edpm_mode = ECORE_ROCE_EDPM_MODE_DISABLE; 3335 3336 if (p_rdma_pf_params->roce_edpm_mode <= ECORE_ROCE_EDPM_MODE_DISABLE) { 3337 roce_edpm_mode = p_rdma_pf_params->roce_edpm_mode; 3338 } else { 3339 DP_ERR(p_hwfn->p_dev, 3340 "roce edpm mode was configured to an illegal value of %u. Resetting it to 0-Enable EDPM if BAR size is adequate\n", 3341 p_rdma_pf_params->roce_edpm_mode); 3342 roce_edpm_mode = 0; 3343 } 3344 3345 if ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE) || 3346 ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_FORCE_ON))) { 3347 /* Either EDPM is mandatory, or we are attempting to allocate a 3348 * WID per CPU. 3349 */ 3350 n_cpus = OSAL_NUM_CPUS(); 3351 rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 3352 } 3353 3354 cond = ((rc != ECORE_SUCCESS) && 3355 (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) || 3356 (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE); 3357 if (cond || p_hwfn->dcbx_no_edpm) { 3358 /* Either EDPM is disabled from user configuration, or it is 3359 * disabled via DCBx, or it is not mandatory and we failed to 3360 * allocated a WID per CPU. 3361 */ 3362 n_cpus = 1; 3363 rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 3364 3365 #ifdef CONFIG_ECORE_ROCE 3366 /* If we entered this flow due to DCBX then the DPM register is 3367 * already configured. 3368 */ 3369 if (cond) 3370 ecore_rdma_dpm_bar(p_hwfn, p_ptt); 3371 #endif 3372 } 3373 3374 p_hwfn->wid_count = (u16)n_cpus; 3375 3376 /* Check return codes from above calls */ 3377 if (rc != ECORE_SUCCESS) { 3378 #ifndef LINUX_REMOVE 3379 DP_ERR(p_hwfn, 3380 "Failed to allocate enough DPIs. Allocated %d but the current minimum is set to %d. You can reduce this minimum down to %d via user configuration min_dpis or by disabling EDPM via user configuration roce_edpm_mode\n", 3381 p_hwfn->dpi_count, p_rdma_pf_params->min_dpis, 3382 ECORE_MIN_DPIS); 3383 #else 3384 DP_ERR(p_hwfn, 3385 "Failed to allocate enough DPIs. Allocated %d but the current minimum is set to %d. You can reduce this minimum down to %d via the module parameter min_rdma_dpis or by disabling EDPM by setting the module parameter roce_edpm to 2\n", 3386 p_hwfn->dpi_count, p_rdma_pf_params->min_dpis, 3387 ECORE_MIN_DPIS); 3388 #endif 3389 DP_ERR(p_hwfn, 3390 "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n", 3391 norm_regsize, pwm_regsize, p_hwfn->dpi_size, 3392 p_hwfn->dpi_count, 3393 ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ? 3394 "disabled" : "enabled", (unsigned long)OSAL_PAGE_SIZE); 3395 3396 return ECORE_NORESOURCES; 3397 } 3398 3399 DP_INFO(p_hwfn, 3400 "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n", 3401 norm_regsize, pwm_regsize, p_hwfn->dpi_size, p_hwfn->dpi_count, 3402 ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ? 3403 "disabled" : "enabled", (unsigned long)OSAL_PAGE_SIZE); 3404 3405 /* Update hwfn */ 3406 p_hwfn->dpi_start_offset = norm_regsize; /* this is later used to 3407 * calculate the doorbell 3408 * address 3409 */ 3410 3411 /* Update registers */ 3412 /* DEMS size is configured log2 of DWORDs, hence the division by 4 */ 3413 pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4); 3414 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift); 3415 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1); 3416 3417 return ECORE_SUCCESS; 3418 } 3419 3420 static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn, 3421 struct ecore_ptt *p_ptt, 3422 int hw_mode) 3423 { 3424 enum _ecore_status_t rc = ECORE_SUCCESS; 3425 3426 /* In CMT the gate should be cleared by the 2nd hwfn */ 3427 if (!ECORE_IS_CMT(p_hwfn->p_dev) || !IS_LEAD_HWFN(p_hwfn)) 3428 STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0); 3429 3430 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, 3431 hw_mode); 3432 if (rc != ECORE_SUCCESS) 3433 return rc; 3434 3435 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0); 3436 3437 #ifndef ASIC_ONLY 3438 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) 3439 return ECORE_SUCCESS; 3440 3441 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 3442 if (ECORE_IS_AH(p_hwfn->p_dev)) 3443 return ECORE_SUCCESS; 3444 else if (ECORE_IS_BB(p_hwfn->p_dev)) 3445 ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id); 3446 else /* E5 */ 3447 ECORE_E5_MISSING_CODE; 3448 } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 3449 if (ECORE_IS_CMT(p_hwfn->p_dev)) { 3450 /* Activate OPTE in CMT */ 3451 u32 val; 3452 3453 val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV); 3454 val |= 0x10; 3455 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val); 3456 ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1); 3457 ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1); 3458 ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1); 3459 ecore_wr(p_hwfn, p_ptt, 3460 NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1); 3461 ecore_wr(p_hwfn, p_ptt, 3462 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555); 3463 ecore_wr(p_hwfn, p_ptt, 3464 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4, 3465 0x55555555); 3466 } 3467 3468 ecore_emul_link_init(p_hwfn, p_ptt); 3469 } else { 3470 DP_INFO(p_hwfn->p_dev, "link is not being configured\n"); 3471 } 3472 #endif 3473 3474 return rc; 3475 } 3476 3477 static enum _ecore_status_t 3478 ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3479 int hw_mode, struct ecore_hw_init_params *p_params) 3480 { 3481 u8 rel_pf_id = p_hwfn->rel_pf_id; 3482 u32 prs_reg; 3483 enum _ecore_status_t rc = ECORE_SUCCESS; 3484 u16 ctrl; 3485 int pos; 3486 3487 if (p_hwfn->mcp_info) { 3488 struct ecore_mcp_function_info *p_info; 3489 3490 p_info = &p_hwfn->mcp_info->func_info; 3491 if (p_info->bandwidth_min) 3492 p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; 3493 3494 /* Update rate limit once we'll actually have a link */ 3495 p_hwfn->qm_info.pf_rl = 100000; 3496 } 3497 ecore_cxt_hw_init_pf(p_hwfn, p_ptt); 3498 3499 ecore_int_igu_init_rt(p_hwfn); 3500 3501 /* Set VLAN in NIG if needed */ 3502 if (hw_mode & (1 << MODE_MF_SD)) { 3503 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring LLH_FUNC_TAG\n"); 3504 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); 3505 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, 3506 p_hwfn->hw_info.ovlan); 3507 3508 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 3509 "Configuring LLH_FUNC_FILTER_HDR_SEL\n"); 3510 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET, 3511 1); 3512 } 3513 3514 /* Enable classification by MAC if needed */ 3515 if (hw_mode & (1 << MODE_MF_SI)) { 3516 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring TAGMAC_CLS_TYPE\n"); 3517 STORE_RT_REG(p_hwfn, 3518 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1); 3519 } 3520 3521 /* Protocl Configuration - @@@TBD - should we set 0 otherwise?*/ 3522 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 3523 (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) ? 1 : 0); 3524 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 3525 (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) ? 1 : 0); 3526 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); 3527 3528 /* perform debug configuration when chip is out of reset */ 3529 OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id); 3530 3531 /* Sanity check before the PF init sequence that uses DMAE */ 3532 rc = ecore_dmae_sanity(p_hwfn, p_ptt, "pf_phase"); 3533 if (rc) 3534 return rc; 3535 3536 /* PF Init sequence */ 3537 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); 3538 if (rc) 3539 return rc; 3540 3541 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */ 3542 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode); 3543 if (rc) 3544 return rc; 3545 3546 /* Pure runtime initializations - directly to the HW */ 3547 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); 3548 3549 /* PCI relaxed ordering is generally beneficial for performance, 3550 * but can hurt performance or lead to instability on some setups. 3551 * If management FW is taking care of it go with that, otherwise 3552 * disable to be on the safe side. 3553 */ 3554 pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP); 3555 if (!pos) { 3556 DP_NOTICE(p_hwfn, true, 3557 "Failed to find the PCI Express Capability structure in the PCI config space\n"); 3558 return ECORE_IO; 3559 } 3560 3561 OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, &ctrl); 3562 3563 if (p_params->pci_rlx_odr_mode == ECORE_ENABLE_RLX_ODR) { 3564 ctrl |= PCI_EXP_DEVCTL_RELAX_EN; 3565 OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, 3566 pos + PCI_EXP_DEVCTL, ctrl); 3567 } else if (p_params->pci_rlx_odr_mode == ECORE_DISABLE_RLX_ODR) { 3568 ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN; 3569 OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, 3570 pos + PCI_EXP_DEVCTL, ctrl); 3571 } else if (ecore_mcp_rlx_odr_supported(p_hwfn)) { 3572 DP_INFO(p_hwfn, "PCI relax ordering configured by MFW\n"); 3573 } else { 3574 ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN; 3575 OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, 3576 pos + PCI_EXP_DEVCTL, ctrl); 3577 } 3578 3579 rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt); 3580 if (rc != ECORE_SUCCESS) 3581 return rc; 3582 3583 /* Use the leading hwfn since in CMT only NIG #0 is operational */ 3584 if (IS_LEAD_HWFN(p_hwfn)) { 3585 rc = ecore_llh_hw_init_pf(p_hwfn, p_ptt, 3586 p_params->avoid_eng_affin); 3587 if (rc != ECORE_SUCCESS) 3588 return rc; 3589 } 3590 3591 if (p_params->b_hw_start) { 3592 /* enable interrupts */ 3593 rc = ecore_int_igu_enable(p_hwfn, p_ptt, p_params->int_mode); 3594 if (rc != ECORE_SUCCESS) 3595 return rc; 3596 3597 /* send function start command */ 3598 rc = ecore_sp_pf_start(p_hwfn, p_ptt, p_params->p_tunn, 3599 p_params->allow_npar_tx_switch); 3600 if (rc) { 3601 DP_NOTICE(p_hwfn, true, "Function start ramrod failed\n"); 3602 return rc; 3603 } 3604 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); 3605 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 3606 "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); 3607 3608 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 3609 { 3610 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, 3611 (1 << 2)); 3612 ecore_wr(p_hwfn, p_ptt, 3613 PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, 3614 0x100); 3615 } 3616 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 3617 "PRS_REG_SEARCH registers after start PFn\n"); 3618 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP); 3619 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 3620 "PRS_REG_SEARCH_TCP: %x\n", prs_reg); 3621 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP); 3622 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 3623 "PRS_REG_SEARCH_UDP: %x\n", prs_reg); 3624 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE); 3625 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 3626 "PRS_REG_SEARCH_FCOE: %x\n", prs_reg); 3627 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE); 3628 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 3629 "PRS_REG_SEARCH_ROCE: %x\n", prs_reg); 3630 prs_reg = ecore_rd(p_hwfn, p_ptt, 3631 PRS_REG_SEARCH_TCP_FIRST_FRAG); 3632 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 3633 "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n", 3634 prs_reg); 3635 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); 3636 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, 3637 "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); 3638 } 3639 return ECORE_SUCCESS; 3640 } 3641 3642 enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn, 3643 struct ecore_ptt *p_ptt, 3644 bool b_enable) 3645 { 3646 u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0; 3647 3648 /* Configure the PF's internal FID_enable for master transactions */ 3649 ecore_wr(p_hwfn, p_ptt, 3650 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); 3651 3652 /* Wait until value is set - try for 1 second every 50us */ 3653 for (delay_idx = 0; delay_idx < 20000; delay_idx++) { 3654 val = ecore_rd(p_hwfn, p_ptt, 3655 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 3656 if (val == set_val) 3657 break; 3658 3659 OSAL_UDELAY(50); 3660 } 3661 3662 if (val != set_val) { 3663 DP_NOTICE(p_hwfn, true, 3664 "PFID_ENABLE_MASTER wasn't changed after a second\n"); 3665 return ECORE_UNKNOWN_ERROR; 3666 } 3667 3668 return ECORE_SUCCESS; 3669 } 3670 3671 static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn, 3672 struct ecore_ptt *p_main_ptt) 3673 { 3674 /* Read shadow of current MFW mailbox */ 3675 ecore_mcp_read_mb(p_hwfn, p_main_ptt); 3676 OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow, 3677 p_hwfn->mcp_info->mfw_mb_cur, 3678 p_hwfn->mcp_info->mfw_mb_length); 3679 } 3680 3681 static enum _ecore_status_t 3682 ecore_fill_load_req_params(struct ecore_hwfn *p_hwfn, 3683 struct ecore_load_req_params *p_load_req, 3684 struct ecore_drv_load_params *p_drv_load) 3685 { 3686 /* Make sure that if ecore-client didn't provide inputs, all the 3687 * expected defaults are indeed zero. 3688 */ 3689 OSAL_BUILD_BUG_ON(ECORE_DRV_ROLE_OS != 0); 3690 OSAL_BUILD_BUG_ON(ECORE_LOAD_REQ_LOCK_TO_DEFAULT != 0); 3691 OSAL_BUILD_BUG_ON(ECORE_OVERRIDE_FORCE_LOAD_NONE != 0); 3692 3693 OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req)); 3694 3695 if (p_drv_load == OSAL_NULL) 3696 goto out; 3697 3698 p_load_req->drv_role = p_drv_load->is_crash_kernel ? 3699 ECORE_DRV_ROLE_KDUMP : 3700 ECORE_DRV_ROLE_OS; 3701 p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; 3702 p_load_req->override_force_load = p_drv_load->override_force_load; 3703 3704 /* Old MFW versions don't support timeout values other than default and 3705 * none, so these values are replaced according to the fall-back action. 3706 */ 3707 3708 if (p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT || 3709 p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_NONE || 3710 (p_hwfn->mcp_info->capabilities & 3711 FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO)) { 3712 p_load_req->timeout_val = p_drv_load->mfw_timeout_val; 3713 goto out; 3714 } 3715 3716 switch (p_drv_load->mfw_timeout_fallback) { 3717 case ECORE_TO_FALLBACK_TO_NONE: 3718 p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_NONE; 3719 break; 3720 case ECORE_TO_FALLBACK_TO_DEFAULT: 3721 p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT; 3722 break; 3723 case ECORE_TO_FALLBACK_FAIL_LOAD: 3724 DP_NOTICE(p_hwfn, false, 3725 "Received %d as a value for MFW timeout while the MFW supports only default [%d] or none [%d]. Abort.\n", 3726 p_drv_load->mfw_timeout_val, 3727 ECORE_LOAD_REQ_LOCK_TO_DEFAULT, 3728 ECORE_LOAD_REQ_LOCK_TO_NONE); 3729 return ECORE_ABORTED; 3730 } 3731 3732 DP_INFO(p_hwfn, 3733 "Modified the MFW timeout value from %d to %s [%d] due to lack of MFW support\n", 3734 p_drv_load->mfw_timeout_val, 3735 (p_load_req->timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT) ? 3736 "default" : "none", 3737 p_load_req->timeout_val); 3738 out: 3739 return ECORE_SUCCESS; 3740 } 3741 3742 static enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn, 3743 struct ecore_hw_init_params *p_params) 3744 { 3745 if (p_params->p_tunn) { 3746 ecore_vf_set_vf_start_tunn_update_param(p_params->p_tunn); 3747 ecore_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn); 3748 } 3749 3750 p_hwfn->b_int_enabled = 1; 3751 3752 return ECORE_SUCCESS; 3753 } 3754 3755 static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn, 3756 struct ecore_ptt *p_ptt) 3757 { 3758 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 3759 1 << p_hwfn->abs_pf_id); 3760 } 3761 3762 enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, 3763 struct ecore_hw_init_params *p_params) 3764 { 3765 struct ecore_load_req_params load_req_params; 3766 u32 load_code, resp, param, drv_mb_param; 3767 bool b_default_mtu = true; 3768 struct ecore_hwfn *p_hwfn; 3769 enum _ecore_status_t rc = ECORE_SUCCESS, cancel_load; 3770 u16 ether_type; 3771 int i; 3772 3773 if ((p_params->int_mode == ECORE_INT_MODE_MSI) && ECORE_IS_CMT(p_dev)) { 3774 DP_NOTICE(p_dev, false, 3775 "MSI mode is not supported for CMT devices\n"); 3776 return ECORE_INVAL; 3777 } 3778 3779 if (IS_PF(p_dev)) { 3780 rc = ecore_init_fw_data(p_dev, p_params->bin_fw_data); 3781 if (rc != ECORE_SUCCESS) 3782 return rc; 3783 } 3784 3785 for_each_hwfn(p_dev, i) { 3786 p_hwfn = &p_dev->hwfns[i]; 3787 3788 /* If management didn't provide a default, set one of our own */ 3789 if (!p_hwfn->hw_info.mtu) { 3790 p_hwfn->hw_info.mtu = 1500; 3791 b_default_mtu = false; 3792 } 3793 3794 if (IS_VF(p_dev)) { 3795 ecore_vf_start(p_hwfn, p_params); 3796 continue; 3797 } 3798 3799 rc = ecore_calc_hw_mode(p_hwfn); 3800 if (rc != ECORE_SUCCESS) 3801 return rc; 3802 3803 if (IS_PF(p_dev) && (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING, 3804 &p_dev->mf_bits) || 3805 OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING, 3806 &p_dev->mf_bits))) { 3807 if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING, 3808 &p_dev->mf_bits)) 3809 ether_type = ETH_P_8021Q; 3810 else 3811 ether_type = ETH_P_8021AD; 3812 STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, 3813 ether_type); 3814 STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, 3815 ether_type); 3816 STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, 3817 ether_type); 3818 STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, 3819 ether_type); 3820 } 3821 3822 rc = ecore_fill_load_req_params(p_hwfn, &load_req_params, 3823 p_params->p_drv_load_params); 3824 if (rc != ECORE_SUCCESS) 3825 return rc; 3826 3827 rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, 3828 &load_req_params); 3829 if (rc != ECORE_SUCCESS) { 3830 DP_NOTICE(p_hwfn, false, 3831 "Failed sending a LOAD_REQ command\n"); 3832 return rc; 3833 } 3834 3835 load_code = load_req_params.load_code; 3836 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3837 "Load request was sent. Load code: 0x%x\n", 3838 load_code); 3839 3840 ecore_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt); 3841 3842 /* CQ75580: 3843 * When coming back from hibernate state, the registers from 3844 * which shadow is read initially are not initialized. It turns 3845 * out that these registers get initialized during the call to 3846 * ecore_mcp_load_req request. So we need to reread them here 3847 * to get the proper shadow register value. 3848 * Note: This is a workaround for the missing MFW 3849 * initialization. It may be removed once the implementation 3850 * is done. 3851 */ 3852 ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); 3853 3854 /* Only relevant for recovery: 3855 * Clear the indication after the LOAD_REQ command is responded 3856 * by the MFW. 3857 */ 3858 p_dev->recov_in_prog = false; 3859 3860 if (!qm_lock_ref_cnt) { 3861 #ifdef CONFIG_ECORE_LOCK_ALLOC 3862 rc = OSAL_SPIN_LOCK_ALLOC(p_hwfn, &qm_lock); 3863 if (rc) { 3864 DP_ERR(p_hwfn, "qm_lock allocation failed\n"); 3865 goto qm_lock_fail; 3866 } 3867 #endif 3868 OSAL_SPIN_LOCK_INIT(&qm_lock); 3869 } 3870 ++qm_lock_ref_cnt; 3871 3872 /* Clean up chip from previous driver if such remains exist. 3873 * This is not needed when the PF is the first one on the 3874 * engine, since afterwards we are going to init the FW. 3875 */ 3876 if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) { 3877 rc = ecore_final_cleanup(p_hwfn, p_hwfn->p_main_ptt, 3878 p_hwfn->rel_pf_id, false); 3879 if (rc != ECORE_SUCCESS) { 3880 ecore_hw_err_notify(p_hwfn, 3881 ECORE_HW_ERR_RAMROD_FAIL); 3882 goto load_err; 3883 } 3884 } 3885 3886 /* Log and clear previous pglue_b errors if such exist */ 3887 ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt); 3888 3889 /* Enable the PF's internal FID_enable in the PXP */ 3890 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, 3891 true); 3892 if (rc != ECORE_SUCCESS) 3893 goto load_err; 3894 3895 /* Clear the pglue_b was_error indication. 3896 * In E4 it must be done after the BME and the internal 3897 * FID_enable for the PF are set, since VDMs may cause the 3898 * indication to be set again. 3899 */ 3900 ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); 3901 3902 switch (load_code) { 3903 case FW_MSG_CODE_DRV_LOAD_ENGINE: 3904 rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, 3905 p_hwfn->hw_info.hw_mode); 3906 if (rc != ECORE_SUCCESS) 3907 break; 3908 /* Fall into */ 3909 case FW_MSG_CODE_DRV_LOAD_PORT: 3910 rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, 3911 p_hwfn->hw_info.hw_mode); 3912 if (rc != ECORE_SUCCESS) 3913 break; 3914 /* Fall into */ 3915 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 3916 rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, 3917 p_hwfn->hw_info.hw_mode, 3918 p_params); 3919 break; 3920 default: 3921 DP_NOTICE(p_hwfn, false, 3922 "Unexpected load code [0x%08x]", load_code); 3923 rc = ECORE_NOTIMPL; 3924 break; 3925 } 3926 3927 if (rc != ECORE_SUCCESS) { 3928 DP_NOTICE(p_hwfn, false, 3929 "init phase failed for loadcode 0x%x (rc %d)\n", 3930 load_code, rc); 3931 goto load_err; 3932 } 3933 3934 rc = ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); 3935 if (rc != ECORE_SUCCESS) { 3936 DP_NOTICE(p_hwfn, false, "Sending load done failed, rc = %d\n", rc); 3937 if (rc == ECORE_NOMEM) { 3938 DP_NOTICE(p_hwfn, false, 3939 "Sending load done was failed due to memory allocation failure\n"); 3940 goto load_err; 3941 } 3942 return rc; 3943 } 3944 3945 /* send DCBX attention request command */ 3946 DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, 3947 "sending phony dcbx set command to trigger DCBx attention handling\n"); 3948 rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 3949 DRV_MSG_CODE_SET_DCBX, 3950 1 << DRV_MB_PARAM_DCBX_NOTIFY_OFFSET, &resp, 3951 ¶m); 3952 if (rc != ECORE_SUCCESS) { 3953 DP_NOTICE(p_hwfn, false, 3954 "Failed to send DCBX attention request\n"); 3955 return rc; 3956 } 3957 3958 p_hwfn->hw_init_done = true; 3959 } 3960 3961 if (IS_PF(p_dev)) { 3962 /* Get pre-negotiated values for stag, bandwidth etc. */ 3963 p_hwfn = ECORE_LEADING_HWFN(p_dev); 3964 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, 3965 "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n"); 3966 rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 3967 DRV_MSG_CODE_GET_OEM_UPDATES, 3968 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET, 3969 &resp, ¶m); 3970 if (rc != ECORE_SUCCESS) 3971 DP_NOTICE(p_hwfn, false, 3972 "Failed to send GET_OEM_UPDATES attention request\n"); 3973 } 3974 3975 if (IS_PF(p_dev)) { 3976 p_hwfn = ECORE_LEADING_HWFN(p_dev); 3977 drv_mb_param = STORM_FW_VERSION; 3978 rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 3979 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, 3980 drv_mb_param, &resp, ¶m); 3981 if (rc != ECORE_SUCCESS) 3982 DP_INFO(p_hwfn, "Failed to update firmware version\n"); 3983 3984 if (!b_default_mtu) { 3985 rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt, 3986 p_hwfn->hw_info.mtu); 3987 if (rc != ECORE_SUCCESS) 3988 DP_INFO(p_hwfn, "Failed to update default mtu\n"); 3989 } 3990 3991 rc = ecore_mcp_ov_update_driver_state(p_hwfn, 3992 p_hwfn->p_main_ptt, 3993 ECORE_OV_DRIVER_STATE_DISABLED); 3994 if (rc != ECORE_SUCCESS) 3995 DP_INFO(p_hwfn, "Failed to update driver state\n"); 3996 3997 rc = ecore_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, 3998 ECORE_OV_ESWITCH_VEB); 3999 if (rc != ECORE_SUCCESS) 4000 DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); 4001 } 4002 4003 return rc; 4004 4005 load_err: 4006 --qm_lock_ref_cnt; 4007 #ifdef CONFIG_ECORE_LOCK_ALLOC 4008 if (!qm_lock_ref_cnt) 4009 OSAL_SPIN_LOCK_DEALLOC(&qm_lock); 4010 qm_lock_fail: 4011 #endif 4012 /* The MFW load lock should be released also when initialization fails. 4013 * If supported, use a cancel_load request to update the MFW with the 4014 * load failure. 4015 */ 4016 cancel_load = ecore_mcp_cancel_load_req(p_hwfn, p_hwfn->p_main_ptt); 4017 if (cancel_load == ECORE_NOTIMPL) { 4018 DP_INFO(p_hwfn, 4019 "Send a load done request instead of cancel load\n"); 4020 ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); 4021 } 4022 return rc; 4023 } 4024 4025 #define ECORE_HW_STOP_RETRY_LIMIT (10) 4026 static void ecore_hw_timers_stop(struct ecore_dev *p_dev, 4027 struct ecore_hwfn *p_hwfn, 4028 struct ecore_ptt *p_ptt) 4029 { 4030 int i; 4031 4032 /* close timers */ 4033 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); 4034 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); 4035 for (i = 0; 4036 i < ECORE_HW_STOP_RETRY_LIMIT && !p_dev->recov_in_prog; 4037 i++) { 4038 if ((!ecore_rd(p_hwfn, p_ptt, 4039 TM_REG_PF_SCAN_ACTIVE_CONN)) && 4040 (!ecore_rd(p_hwfn, p_ptt, 4041 TM_REG_PF_SCAN_ACTIVE_TASK))) 4042 break; 4043 4044 /* Dependent on number of connection/tasks, possibly 4045 * 1ms sleep is required between polls 4046 */ 4047 OSAL_MSLEEP(1); 4048 } 4049 4050 if (i < ECORE_HW_STOP_RETRY_LIMIT) 4051 return; 4052 4053 DP_NOTICE(p_hwfn, false, 4054 "Timers linear scans are not over [Connection %02x Tasks %02x]\n", 4055 (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), 4056 (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); 4057 } 4058 4059 void ecore_hw_timers_stop_all(struct ecore_dev *p_dev) 4060 { 4061 int j; 4062 4063 for_each_hwfn(p_dev, j) { 4064 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 4065 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 4066 4067 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt); 4068 } 4069 } 4070 4071 static enum _ecore_status_t ecore_verify_reg_val(struct ecore_hwfn *p_hwfn, 4072 struct ecore_ptt *p_ptt, 4073 u32 addr, u32 expected_val) 4074 { 4075 u32 val = ecore_rd(p_hwfn, p_ptt, addr); 4076 4077 if (val != expected_val) { 4078 DP_NOTICE(p_hwfn, true, 4079 "Value at address 0x%08x is 0x%08x while the expected value is 0x%08x\n", 4080 addr, val, expected_val); 4081 return ECORE_UNKNOWN_ERROR; 4082 } 4083 4084 return ECORE_SUCCESS; 4085 } 4086 4087 enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev) 4088 { 4089 struct ecore_hwfn *p_hwfn; 4090 struct ecore_ptt *p_ptt; 4091 enum _ecore_status_t rc, rc2 = ECORE_SUCCESS; 4092 int j; 4093 4094 for_each_hwfn(p_dev, j) { 4095 p_hwfn = &p_dev->hwfns[j]; 4096 p_ptt = p_hwfn->p_main_ptt; 4097 4098 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n"); 4099 4100 if (IS_VF(p_dev)) { 4101 ecore_vf_pf_int_cleanup(p_hwfn); 4102 rc = ecore_vf_pf_reset(p_hwfn); 4103 if (rc != ECORE_SUCCESS) { 4104 DP_NOTICE(p_hwfn, true, 4105 "ecore_vf_pf_reset failed. rc = %d.\n", 4106 rc); 4107 rc2 = ECORE_UNKNOWN_ERROR; 4108 } 4109 continue; 4110 } 4111 4112 /* mark the hw as uninitialized... */ 4113 p_hwfn->hw_init_done = false; 4114 4115 /* Send unload command to MCP */ 4116 if (!p_dev->recov_in_prog) { 4117 rc = ecore_mcp_unload_req(p_hwfn, p_ptt); 4118 if (rc != ECORE_SUCCESS) { 4119 DP_NOTICE(p_hwfn, false, 4120 "Failed sending a UNLOAD_REQ command. rc = %d.\n", 4121 rc); 4122 rc2 = ECORE_UNKNOWN_ERROR; 4123 } 4124 } 4125 4126 OSAL_DPC_SYNC(p_hwfn); 4127 4128 /* After this point no MFW attentions are expected, e.g. prevent 4129 * race between pf stop and dcbx pf update. 4130 */ 4131 4132 rc = ecore_sp_pf_stop(p_hwfn); 4133 if (rc != ECORE_SUCCESS) { 4134 DP_NOTICE(p_hwfn, false, 4135 "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n", 4136 rc); 4137 rc2 = ECORE_UNKNOWN_ERROR; 4138 } 4139 4140 /* perform debug action after PF stop was sent */ 4141 OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id); 4142 4143 /* close NIG to BRB gate */ 4144 ecore_wr(p_hwfn, p_ptt, 4145 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 4146 4147 /* close parser */ 4148 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 4149 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 4150 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 4151 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 4152 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 4153 4154 /* @@@TBD - clean transmission queues (5.b) */ 4155 /* @@@TBD - clean BTB (5.c) */ 4156 4157 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt); 4158 4159 /* @@@TBD - verify DMAE requests are done (8) */ 4160 4161 /* Disable Attention Generation */ 4162 ecore_int_igu_disable_int(p_hwfn, p_ptt); 4163 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); 4164 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); 4165 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true); 4166 rc = ecore_int_igu_reset_cam_default(p_hwfn, p_ptt); 4167 if (rc != ECORE_SUCCESS) { 4168 DP_NOTICE(p_hwfn, true, 4169 "Failed to return IGU CAM to default\n"); 4170 rc2 = ECORE_UNKNOWN_ERROR; 4171 } 4172 4173 /* Need to wait 1ms to guarantee SBs are cleared */ 4174 OSAL_MSLEEP(1); 4175 4176 if (!p_dev->recov_in_prog) { 4177 ecore_verify_reg_val(p_hwfn, p_ptt, 4178 QM_REG_USG_CNT_PF_TX, 0); 4179 ecore_verify_reg_val(p_hwfn, p_ptt, 4180 QM_REG_USG_CNT_PF_OTHER, 0); 4181 /* @@@TBD - assert on incorrect xCFC values (10.b) */ 4182 } 4183 4184 /* Disable PF in HW blocks */ 4185 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); 4186 ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); 4187 4188 if (IS_LEAD_HWFN(p_hwfn) && 4189 OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits) && 4190 !ECORE_IS_FCOE_PERSONALITY(p_hwfn)) 4191 ecore_llh_remove_mac_filter(p_dev, 0, 4192 p_hwfn->hw_info.hw_mac_addr); 4193 4194 --qm_lock_ref_cnt; 4195 #ifdef CONFIG_ECORE_LOCK_ALLOC 4196 if (!qm_lock_ref_cnt) 4197 OSAL_SPIN_LOCK_DEALLOC(&qm_lock); 4198 #endif 4199 4200 if (!p_dev->recov_in_prog) { 4201 rc = ecore_mcp_unload_done(p_hwfn, p_ptt); 4202 if (rc == ECORE_NOMEM) { 4203 DP_NOTICE(p_hwfn, false, 4204 "Failed sending an UNLOAD_DONE command due to a memory allocation failure. Resending.\n"); 4205 rc = ecore_mcp_unload_done(p_hwfn, p_ptt); 4206 } 4207 if (rc != ECORE_SUCCESS) { 4208 DP_NOTICE(p_hwfn, false, 4209 "Failed sending a UNLOAD_DONE command. rc = %d.\n", 4210 rc); 4211 rc2 = ECORE_UNKNOWN_ERROR; 4212 } 4213 } 4214 } /* hwfn loop */ 4215 4216 if (IS_PF(p_dev) && !p_dev->recov_in_prog) { 4217 p_hwfn = ECORE_LEADING_HWFN(p_dev); 4218 p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt; 4219 4220 /* Clear the PF's internal FID_enable in the PXP. 4221 * In CMT this should only be done for first hw-function, and 4222 * only after all transactions have stopped for all active 4223 * hw-functions. 4224 */ 4225 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, 4226 false); 4227 if (rc != ECORE_SUCCESS) { 4228 DP_NOTICE(p_hwfn, true, 4229 "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n", 4230 rc); 4231 rc2 = ECORE_UNKNOWN_ERROR; 4232 } 4233 } 4234 4235 return rc2; 4236 } 4237 4238 enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev) 4239 { 4240 int j; 4241 4242 for_each_hwfn(p_dev, j) { 4243 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 4244 struct ecore_ptt *p_ptt; 4245 4246 if (IS_VF(p_dev)) { 4247 ecore_vf_pf_int_cleanup(p_hwfn); 4248 continue; 4249 } 4250 p_ptt = ecore_ptt_acquire(p_hwfn); 4251 if (!p_ptt) 4252 return ECORE_AGAIN; 4253 4254 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Shutting down the fastpath\n"); 4255 4256 ecore_wr(p_hwfn, p_ptt, 4257 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 4258 4259 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 4260 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 4261 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 4262 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 4263 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 4264 4265 /* @@@TBD - clean transmission queues (5.b) */ 4266 /* @@@TBD - clean BTB (5.c) */ 4267 4268 /* @@@TBD - verify DMAE requests are done (8) */ 4269 4270 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); 4271 /* Need to wait 1ms to guarantee SBs are cleared */ 4272 OSAL_MSLEEP(1); 4273 ecore_ptt_release(p_hwfn, p_ptt); 4274 } 4275 4276 return ECORE_SUCCESS; 4277 } 4278 4279 enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn) 4280 { 4281 struct ecore_ptt *p_ptt; 4282 4283 if (IS_VF(p_hwfn->p_dev)) 4284 return ECORE_SUCCESS; 4285 4286 p_ptt = ecore_ptt_acquire(p_hwfn); 4287 if (!p_ptt) 4288 return ECORE_AGAIN; 4289 4290 /* If roce info is allocated it means roce is initialized and should 4291 * be enabled in searcher. 4292 */ 4293 if (p_hwfn->p_rdma_info && 4294 p_hwfn->p_rdma_info->active && 4295 p_hwfn->b_rdma_enabled_in_prs) 4296 ecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1); 4297 4298 /* Re-open incoming traffic */ 4299 ecore_wr(p_hwfn, p_ptt, 4300 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); 4301 ecore_ptt_release(p_hwfn, p_ptt); 4302 4303 return ECORE_SUCCESS; 4304 } 4305 4306 enum _ecore_status_t ecore_set_nwuf_reg(struct ecore_dev *p_dev, u32 reg_idx, 4307 u32 pattern_size, u32 crc) 4308 { 4309 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 4310 enum _ecore_status_t rc = ECORE_SUCCESS; 4311 struct ecore_ptt *p_ptt; 4312 u32 reg_len = 0; 4313 u32 reg_crc = 0; 4314 4315 p_ptt = ecore_ptt_acquire(p_hwfn); 4316 if (!p_ptt) 4317 return ECORE_AGAIN; 4318 4319 /* Get length and CRC register offsets */ 4320 switch (reg_idx) 4321 { 4322 case 0: 4323 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_LEN_BB : 4324 WOL_REG_ACPI_PAT_0_LEN_K2_E5; 4325 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_CRC_BB : 4326 WOL_REG_ACPI_PAT_0_CRC_K2_E5; 4327 break; 4328 case 1: 4329 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_LEN_BB : 4330 WOL_REG_ACPI_PAT_1_LEN_K2_E5; 4331 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_CRC_BB : 4332 WOL_REG_ACPI_PAT_1_CRC_K2_E5; 4333 break; 4334 case 2: 4335 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_LEN_BB : 4336 WOL_REG_ACPI_PAT_2_LEN_K2_E5; 4337 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_CRC_BB : 4338 WOL_REG_ACPI_PAT_2_CRC_K2_E5; 4339 break; 4340 case 3: 4341 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_LEN_BB : 4342 WOL_REG_ACPI_PAT_3_LEN_K2_E5; 4343 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_CRC_BB : 4344 WOL_REG_ACPI_PAT_3_CRC_K2_E5; 4345 break; 4346 case 4: 4347 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_LEN_BB : 4348 WOL_REG_ACPI_PAT_4_LEN_K2_E5; 4349 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_CRC_BB : 4350 WOL_REG_ACPI_PAT_4_CRC_K2_E5; 4351 break; 4352 case 5: 4353 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_LEN_BB : 4354 WOL_REG_ACPI_PAT_5_LEN_K2_E5; 4355 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_CRC_BB : 4356 WOL_REG_ACPI_PAT_5_CRC_K2_E5; 4357 break; 4358 case 6: 4359 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_LEN_BB : 4360 WOL_REG_ACPI_PAT_6_LEN_K2_E5; 4361 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_CRC_BB : 4362 WOL_REG_ACPI_PAT_6_CRC_K2_E5; 4363 break; 4364 case 7: 4365 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_LEN_BB : 4366 WOL_REG_ACPI_PAT_7_LEN_K2_E5; 4367 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_CRC_BB : 4368 WOL_REG_ACPI_PAT_7_CRC_K2_E5; 4369 break; 4370 default: 4371 rc = ECORE_UNKNOWN_ERROR; 4372 goto out; 4373 } 4374 4375 /* Allign pattern size to 4 */ 4376 while (pattern_size % 4) 4377 pattern_size++; 4378 4379 /* Write pattern length and crc value */ 4380 if (ECORE_IS_BB(p_dev)) { 4381 rc = ecore_all_ppfids_wr(p_hwfn, p_ptt, reg_len, pattern_size); 4382 if (rc != ECORE_SUCCESS) { 4383 DP_NOTICE(p_hwfn, false, 4384 "Failed to update the ACPI pattern length\n"); 4385 return rc; 4386 } 4387 4388 rc = ecore_all_ppfids_wr(p_hwfn, p_ptt, reg_crc, crc); 4389 if (rc != ECORE_SUCCESS) { 4390 DP_NOTICE(p_hwfn, false, 4391 "Failed to update the ACPI pattern crc value\n"); 4392 return rc; 4393 } 4394 } else { 4395 ecore_mcp_wol_wr(p_hwfn, p_ptt, reg_len, pattern_size); 4396 ecore_mcp_wol_wr(p_hwfn, p_ptt, reg_crc, crc); 4397 } 4398 4399 DP_INFO(p_dev, 4400 "ecore_set_nwuf_reg: idx[%d] reg_crc[0x%x=0x%08x] " 4401 "reg_len[0x%x=0x%x]\n", 4402 reg_idx, reg_crc, crc, reg_len, pattern_size); 4403 out: 4404 ecore_ptt_release(p_hwfn, p_ptt); 4405 4406 return rc; 4407 } 4408 4409 void ecore_wol_buffer_clear(struct ecore_hwfn *p_hwfn, 4410 struct ecore_ptt *p_ptt) 4411 { 4412 const u32 wake_buffer_clear_offset = 4413 ECORE_IS_BB(p_hwfn->p_dev) ? 4414 NIG_REG_WAKE_BUFFER_CLEAR_BB : WOL_REG_WAKE_BUFFER_CLEAR_K2_E5; 4415 4416 DP_INFO(p_hwfn->p_dev, 4417 "ecore_wol_buffer_clear: reset " 4418 "REG_WAKE_BUFFER_CLEAR offset=0x%08x\n", 4419 wake_buffer_clear_offset); 4420 4421 if (ECORE_IS_BB(p_hwfn->p_dev)) { 4422 ecore_wr(p_hwfn, p_ptt, wake_buffer_clear_offset, 1); 4423 ecore_wr(p_hwfn, p_ptt, wake_buffer_clear_offset, 0); 4424 } else { 4425 ecore_mcp_wol_wr(p_hwfn, p_ptt, wake_buffer_clear_offset, 1); 4426 ecore_mcp_wol_wr(p_hwfn, p_ptt, wake_buffer_clear_offset, 0); 4427 } 4428 } 4429 4430 enum _ecore_status_t ecore_get_wake_info(struct ecore_hwfn *p_hwfn, 4431 struct ecore_ptt *p_ptt, 4432 struct ecore_wake_info *wake_info) 4433 { 4434 struct ecore_dev *p_dev = p_hwfn->p_dev; 4435 u32 *buf = OSAL_NULL; 4436 u32 i = 0; 4437 const u32 reg_wake_buffer_offest = 4438 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_BUFFER_BB : 4439 WOL_REG_WAKE_BUFFER_K2_E5; 4440 4441 wake_info->wk_info = ecore_rd(p_hwfn, p_ptt, 4442 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_INFO_BB : 4443 WOL_REG_WAKE_INFO_K2_E5); 4444 wake_info->wk_details = ecore_rd(p_hwfn, p_ptt, 4445 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_DETAILS_BB : 4446 WOL_REG_WAKE_DETAILS_K2_E5); 4447 wake_info->wk_pkt_len = ecore_rd(p_hwfn, p_ptt, 4448 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_PKT_LEN_BB : 4449 WOL_REG_WAKE_PKT_LEN_K2_E5); 4450 4451 DP_INFO(p_dev, 4452 "ecore_get_wake_info: REG_WAKE_INFO=0x%08x " 4453 "REG_WAKE_DETAILS=0x%08x " 4454 "REG_WAKE_PKT_LEN=0x%08x\n", 4455 wake_info->wk_info, 4456 wake_info->wk_details, 4457 wake_info->wk_pkt_len); 4458 4459 buf = (u32 *)wake_info->wk_buffer; 4460 4461 for (i = 0; i < (wake_info->wk_pkt_len / sizeof(u32)); i++) 4462 { 4463 if ((i*sizeof(u32)) >= sizeof(wake_info->wk_buffer)) 4464 { 4465 DP_INFO(p_dev, 4466 "ecore_get_wake_info: i index to 0 high=%d\n", 4467 i); 4468 break; 4469 } 4470 buf[i] = ecore_rd(p_hwfn, p_ptt, 4471 reg_wake_buffer_offest + (i * sizeof(u32))); 4472 DP_INFO(p_dev, "ecore_get_wake_info: wk_buffer[%u]: 0x%08x\n", 4473 i, buf[i]); 4474 } 4475 4476 ecore_wol_buffer_clear(p_hwfn, p_ptt); 4477 4478 return ECORE_SUCCESS; 4479 } 4480 4481 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */ 4482 static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn) 4483 { 4484 ecore_ptt_pool_free(p_hwfn); 4485 OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info); 4486 p_hwfn->hw_info.p_igu_info = OSAL_NULL; 4487 } 4488 4489 /* Setup bar access */ 4490 static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn) 4491 { 4492 /* clear indirect access */ 4493 if (ECORE_IS_AH(p_hwfn->p_dev) || ECORE_IS_E5(p_hwfn->p_dev)) { 4494 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 4495 PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5, 0); 4496 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 4497 PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5, 0); 4498 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 4499 PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5, 0); 4500 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 4501 PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5, 0); 4502 } else { 4503 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 4504 PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0); 4505 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 4506 PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0); 4507 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 4508 PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0); 4509 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 4510 PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0); 4511 } 4512 4513 /* Clean previous pglue_b errors if such exist */ 4514 ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); 4515 4516 /* enable internal target-read */ 4517 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 4518 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 4519 } 4520 4521 static void get_function_id(struct ecore_hwfn *p_hwfn) 4522 { 4523 /* ME Register */ 4524 p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn, 4525 PXP_PF_ME_OPAQUE_ADDR); 4526 4527 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); 4528 4529 /* Bits 16-19 from the ME registers are the pf_num */ 4530 p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf; 4531 p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 4532 PXP_CONCRETE_FID_PFID); 4533 p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 4534 PXP_CONCRETE_FID_PORT); 4535 4536 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 4537 "Read ME register: Concrete 0x%08x Opaque 0x%04x\n", 4538 p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid); 4539 } 4540 4541 void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn) 4542 { 4543 u32 *feat_num = p_hwfn->hw_info.feat_num; 4544 struct ecore_sb_cnt_info sb_cnt; 4545 u32 non_l2_sbs = 0; 4546 4547 OSAL_MEM_ZERO(&sb_cnt, sizeof(sb_cnt)); 4548 ecore_int_get_num_sbs(p_hwfn, &sb_cnt); 4549 4550 #ifdef CONFIG_ECORE_ROCE 4551 /* Roce CNQ require each: 1 status block. 1 CNQ, we divide the 4552 * status blocks equally between L2 / RoCE but with consideration as 4553 * to how many l2 queues / cnqs we have 4554 */ 4555 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) { 4556 #ifndef __EXTRACT__LINUX__THROW__ 4557 u32 max_cnqs; 4558 #endif 4559 4560 feat_num[ECORE_RDMA_CNQ] = 4561 OSAL_MIN_T(u32, 4562 sb_cnt.cnt / 2, 4563 RESC_NUM(p_hwfn, ECORE_RDMA_CNQ_RAM)); 4564 4565 #ifndef __EXTRACT__LINUX__THROW__ 4566 /* Upper layer might require less */ 4567 max_cnqs = (u32)p_hwfn->pf_params.rdma_pf_params.max_cnqs; 4568 if (max_cnqs) { 4569 if (max_cnqs == ECORE_RDMA_PF_PARAMS_CNQS_NONE) 4570 max_cnqs = 0; 4571 feat_num[ECORE_RDMA_CNQ] = 4572 OSAL_MIN_T(u32, 4573 feat_num[ECORE_RDMA_CNQ], 4574 max_cnqs); 4575 } 4576 #endif 4577 4578 non_l2_sbs = feat_num[ECORE_RDMA_CNQ]; 4579 } 4580 #endif 4581 4582 /* L2 Queues require each: 1 status block. 1 L2 queue */ 4583 if (ECORE_IS_L2_PERSONALITY(p_hwfn)) { 4584 /* Start by allocating VF queues, then PF's */ 4585 feat_num[ECORE_VF_L2_QUE] = 4586 OSAL_MIN_T(u32, 4587 RESC_NUM(p_hwfn, ECORE_L2_QUEUE), 4588 sb_cnt.iov_cnt); 4589 feat_num[ECORE_PF_L2_QUE] = 4590 OSAL_MIN_T(u32, 4591 sb_cnt.cnt - non_l2_sbs, 4592 RESC_NUM(p_hwfn, ECORE_L2_QUEUE) - 4593 FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE)); 4594 } 4595 4596 if (ECORE_IS_FCOE_PERSONALITY(p_hwfn)) 4597 feat_num[ECORE_FCOE_CQ] = 4598 OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, 4599 ECORE_CMDQS_CQS)); 4600 4601 if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) 4602 feat_num[ECORE_ISCSI_CQ] = 4603 OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, 4604 ECORE_CMDQS_CQS)); 4605 4606 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 4607 "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n", 4608 (int)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE), 4609 (int)FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE), 4610 (int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ), 4611 (int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ), 4612 (int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ), 4613 (int)sb_cnt.cnt); 4614 } 4615 4616 const char *ecore_hw_get_resc_name(enum ecore_resources res_id) 4617 { 4618 switch (res_id) { 4619 case ECORE_L2_QUEUE: 4620 return "L2_QUEUE"; 4621 case ECORE_VPORT: 4622 return "VPORT"; 4623 case ECORE_RSS_ENG: 4624 return "RSS_ENG"; 4625 case ECORE_PQ: 4626 return "PQ"; 4627 case ECORE_RL: 4628 return "RL"; 4629 case ECORE_MAC: 4630 return "MAC"; 4631 case ECORE_VLAN: 4632 return "VLAN"; 4633 case ECORE_RDMA_CNQ_RAM: 4634 return "RDMA_CNQ_RAM"; 4635 case ECORE_ILT: 4636 return "ILT"; 4637 case ECORE_LL2_QUEUE: 4638 return "LL2_QUEUE"; 4639 case ECORE_CMDQS_CQS: 4640 return "CMDQS_CQS"; 4641 case ECORE_RDMA_STATS_QUEUE: 4642 return "RDMA_STATS_QUEUE"; 4643 case ECORE_BDQ: 4644 return "BDQ"; 4645 case ECORE_SB: 4646 return "SB"; 4647 default: 4648 return "UNKNOWN_RESOURCE"; 4649 } 4650 } 4651 4652 static enum _ecore_status_t 4653 __ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn, 4654 struct ecore_ptt *p_ptt, 4655 enum ecore_resources res_id, 4656 u32 resc_max_val, 4657 u32 *p_mcp_resp) 4658 { 4659 enum _ecore_status_t rc; 4660 4661 rc = ecore_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id, 4662 resc_max_val, p_mcp_resp); 4663 if (rc != ECORE_SUCCESS) { 4664 DP_NOTICE(p_hwfn, false, 4665 "MFW response failure for a max value setting of resource %d [%s]\n", 4666 res_id, ecore_hw_get_resc_name(res_id)); 4667 return rc; 4668 } 4669 4670 if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) 4671 DP_INFO(p_hwfn, 4672 "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n", 4673 res_id, ecore_hw_get_resc_name(res_id), *p_mcp_resp); 4674 4675 return ECORE_SUCCESS; 4676 } 4677 4678 static enum _ecore_status_t 4679 ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn, 4680 struct ecore_ptt *p_ptt) 4681 { 4682 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 4683 u32 resc_max_val, mcp_resp; 4684 u8 res_id; 4685 enum _ecore_status_t rc; 4686 4687 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) { 4688 switch (res_id) { 4689 case ECORE_LL2_QUEUE: 4690 resc_max_val = MAX_NUM_LL2_RX_QUEUES; 4691 break; 4692 case ECORE_RDMA_CNQ_RAM: 4693 /* No need for a case for ECORE_CMDQS_CQS since 4694 * CNQ/CMDQS are the same resource. 4695 */ 4696 resc_max_val = NUM_OF_GLOBAL_QUEUES; 4697 break; 4698 case ECORE_RDMA_STATS_QUEUE: 4699 resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 4700 : RDMA_NUM_STATISTIC_COUNTERS_BB; 4701 break; 4702 case ECORE_BDQ: 4703 resc_max_val = BDQ_NUM_RESOURCES; 4704 break; 4705 default: 4706 continue; 4707 } 4708 4709 rc = __ecore_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id, 4710 resc_max_val, &mcp_resp); 4711 if (rc != ECORE_SUCCESS) 4712 return rc; 4713 4714 /* There's no point to continue to the next resource if the 4715 * command is not supported by the MFW. 4716 * We do continue if the command is supported but the resource 4717 * is unknown to the MFW. Such a resource will be later 4718 * configured with the default allocation values. 4719 */ 4720 if (mcp_resp == FW_MSG_CODE_UNSUPPORTED) 4721 return ECORE_NOTIMPL; 4722 } 4723 4724 return ECORE_SUCCESS; 4725 } 4726 4727 static 4728 enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn, 4729 enum ecore_resources res_id, 4730 u32 *p_resc_num, u32 *p_resc_start) 4731 { 4732 u8 num_funcs = p_hwfn->num_funcs_on_engine; 4733 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 4734 4735 switch (res_id) { 4736 case ECORE_L2_QUEUE: 4737 *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 : 4738 MAX_NUM_L2_QUEUES_BB) / num_funcs; 4739 break; 4740 case ECORE_VPORT: 4741 *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 : 4742 MAX_NUM_VPORTS_BB) / num_funcs; 4743 break; 4744 case ECORE_RSS_ENG: 4745 *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 : 4746 ETH_RSS_ENGINE_NUM_BB) / num_funcs; 4747 break; 4748 case ECORE_PQ: 4749 *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 : 4750 MAX_QM_TX_QUEUES_BB) / num_funcs; 4751 *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */ 4752 break; 4753 case ECORE_RL: 4754 *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs; 4755 break; 4756 case ECORE_MAC: 4757 case ECORE_VLAN: 4758 /* Each VFC resource can accommodate both a MAC and a VLAN */ 4759 *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; 4760 break; 4761 case ECORE_ILT: 4762 *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 : 4763 PXP_NUM_ILT_RECORDS_BB) / num_funcs; 4764 break; 4765 case ECORE_LL2_QUEUE: 4766 *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs; 4767 break; 4768 case ECORE_RDMA_CNQ_RAM: 4769 case ECORE_CMDQS_CQS: 4770 /* CNQ/CMDQS are the same resource */ 4771 *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs; 4772 break; 4773 case ECORE_RDMA_STATS_QUEUE: 4774 *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 : 4775 RDMA_NUM_STATISTIC_COUNTERS_BB) / 4776 num_funcs; 4777 break; 4778 case ECORE_BDQ: 4779 if (p_hwfn->hw_info.personality != ECORE_PCI_ISCSI && 4780 p_hwfn->hw_info.personality != ECORE_PCI_FCOE) 4781 *p_resc_num = 0; 4782 else 4783 *p_resc_num = 1; 4784 break; 4785 case ECORE_SB: 4786 /* Since we want its value to reflect whether MFW supports 4787 * the new scheme, have a default of 0. 4788 */ 4789 *p_resc_num = 0; 4790 break; 4791 default: 4792 return ECORE_INVAL; 4793 } 4794 4795 switch (res_id) { 4796 case ECORE_BDQ: 4797 if (!*p_resc_num) 4798 *p_resc_start = 0; 4799 else if (p_hwfn->p_dev->num_ports_in_engine == 4) 4800 *p_resc_start = p_hwfn->port_id; 4801 else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) 4802 *p_resc_start = p_hwfn->port_id; 4803 else if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) 4804 *p_resc_start = p_hwfn->port_id + 2; 4805 break; 4806 default: 4807 *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx; 4808 break; 4809 } 4810 4811 return ECORE_SUCCESS; 4812 } 4813 4814 static enum _ecore_status_t 4815 __ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id, 4816 bool drv_resc_alloc) 4817 { 4818 u32 dflt_resc_num = 0, dflt_resc_start = 0; 4819 u32 mcp_resp, *p_resc_num, *p_resc_start; 4820 enum _ecore_status_t rc; 4821 4822 p_resc_num = &RESC_NUM(p_hwfn, res_id); 4823 p_resc_start = &RESC_START(p_hwfn, res_id); 4824 4825 rc = ecore_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num, 4826 &dflt_resc_start); 4827 if (rc != ECORE_SUCCESS) { 4828 DP_ERR(p_hwfn, 4829 "Failed to get default amount for resource %d [%s]\n", 4830 res_id, ecore_hw_get_resc_name(res_id)); 4831 return rc; 4832 } 4833 4834 #ifndef ASIC_ONLY 4835 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 4836 *p_resc_num = dflt_resc_num; 4837 *p_resc_start = dflt_resc_start; 4838 goto out; 4839 } 4840 #endif 4841 4842 rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id, 4843 &mcp_resp, p_resc_num, p_resc_start); 4844 if (rc != ECORE_SUCCESS) { 4845 DP_NOTICE(p_hwfn, false, 4846 "MFW response failure for an allocation request for resource %d [%s]\n", 4847 res_id, ecore_hw_get_resc_name(res_id)); 4848 return rc; 4849 } 4850 4851 /* Default driver values are applied in the following cases: 4852 * - The resource allocation MB command is not supported by the MFW 4853 * - There is an internal error in the MFW while processing the request 4854 * - The resource ID is unknown to the MFW 4855 */ 4856 if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) { 4857 DP_INFO(p_hwfn, 4858 "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n", 4859 res_id, ecore_hw_get_resc_name(res_id), mcp_resp, 4860 dflt_resc_num, dflt_resc_start); 4861 *p_resc_num = dflt_resc_num; 4862 *p_resc_start = dflt_resc_start; 4863 goto out; 4864 } 4865 4866 if ((*p_resc_num != dflt_resc_num || 4867 *p_resc_start != dflt_resc_start) && 4868 res_id != ECORE_SB) { 4869 DP_INFO(p_hwfn, 4870 "MFW allocation for resource %d [%s] differs from default values [%d,%d vs. %d,%d]%s\n", 4871 res_id, ecore_hw_get_resc_name(res_id), *p_resc_num, 4872 *p_resc_start, dflt_resc_num, dflt_resc_start, 4873 drv_resc_alloc ? " - Applying default values" : ""); 4874 if (drv_resc_alloc) { 4875 *p_resc_num = dflt_resc_num; 4876 *p_resc_start = dflt_resc_start; 4877 } 4878 } 4879 out: 4880 /* PQs have to divide by 8 [that's the HW granularity]. 4881 * Reduce number so it would fit. 4882 */ 4883 if ((res_id == ECORE_PQ) && 4884 ((*p_resc_num % 8) || (*p_resc_start % 8))) { 4885 DP_INFO(p_hwfn, 4886 "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n", 4887 *p_resc_num, (*p_resc_num) & ~0x7, 4888 *p_resc_start, (*p_resc_start) & ~0x7); 4889 *p_resc_num &= ~0x7; 4890 *p_resc_start &= ~0x7; 4891 } 4892 4893 return ECORE_SUCCESS; 4894 } 4895 4896 static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, 4897 bool drv_resc_alloc) 4898 { 4899 enum _ecore_status_t rc; 4900 u8 res_id; 4901 4902 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) { 4903 rc = __ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc); 4904 if (rc != ECORE_SUCCESS) 4905 return rc; 4906 } 4907 4908 return ECORE_SUCCESS; 4909 } 4910 4911 static enum _ecore_status_t ecore_hw_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn, 4912 struct ecore_ptt *p_ptt) 4913 { 4914 u8 native_ppfid_idx = ECORE_PPFID_BY_PFID(p_hwfn); 4915 struct ecore_dev *p_dev = p_hwfn->p_dev; 4916 enum _ecore_status_t rc; 4917 4918 rc = ecore_mcp_get_ppfid_bitmap(p_hwfn, p_ptt); 4919 if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) 4920 return rc; 4921 else if (rc == ECORE_NOTIMPL) 4922 p_dev->ppfid_bitmap = 0x1 << native_ppfid_idx; 4923 4924 if (!(p_dev->ppfid_bitmap & (0x1 << native_ppfid_idx))) { 4925 DP_INFO(p_hwfn, 4926 "Fix the PPFID bitmap to inculde the native PPFID [native_ppfid_idx %hhd, orig_bitmap 0x%hhx]\n", 4927 native_ppfid_idx, p_dev->ppfid_bitmap); 4928 p_dev->ppfid_bitmap = 0x1 << native_ppfid_idx; 4929 } 4930 4931 return ECORE_SUCCESS; 4932 } 4933 4934 static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn, 4935 struct ecore_ptt *p_ptt, 4936 bool drv_resc_alloc) 4937 { 4938 struct ecore_resc_unlock_params resc_unlock_params; 4939 struct ecore_resc_lock_params resc_lock_params; 4940 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); 4941 u8 res_id; 4942 enum _ecore_status_t rc; 4943 #ifndef ASIC_ONLY 4944 u32 *resc_start = p_hwfn->hw_info.resc_start; 4945 u32 *resc_num = p_hwfn->hw_info.resc_num; 4946 /* For AH, an equal share of the ILT lines between the maximal number of 4947 * PFs is not enough for RoCE. This would be solved by the future 4948 * resource allocation scheme, but isn't currently present for 4949 * FPGA/emulation. For now we keep a number that is sufficient for RoCE 4950 * to work - the BB number of ILT lines divided by its max PFs number. 4951 */ 4952 u32 roce_min_ilt_lines = PXP_NUM_ILT_RECORDS_BB / MAX_NUM_PFS_BB; 4953 #endif 4954 4955 /* Setting the max values of the soft resources and the following 4956 * resources allocation queries should be atomic. Since several PFs can 4957 * run in parallel - a resource lock is needed. 4958 * If either the resource lock or resource set value commands are not 4959 * supported - skip the the max values setting, release the lock if 4960 * needed, and proceed to the queries. Other failures, including a 4961 * failure to acquire the lock, will cause this function to fail. 4962 * Old drivers that don't acquire the lock can run in parallel, and 4963 * their allocation values won't be affected by the updated max values. 4964 */ 4965 4966 ecore_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params, 4967 ECORE_RESC_LOCK_RESC_ALLOC, false); 4968 4969 /* Changes on top of the default values to accommodate parallel attempts 4970 * of several PFs. 4971 * [10 x 10 msec by default ==> 20 x 50 msec] 4972 */ 4973 resc_lock_params.retry_num *= 2; 4974 resc_lock_params.retry_interval *= 5; 4975 4976 rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params); 4977 if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) { 4978 return rc; 4979 } else if (rc == ECORE_NOTIMPL) { 4980 DP_INFO(p_hwfn, 4981 "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n"); 4982 } else if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) { 4983 DP_NOTICE(p_hwfn, false, 4984 "Failed to acquire the resource lock for the resource allocation commands\n"); 4985 return ECORE_BUSY; 4986 } else { 4987 rc = ecore_hw_set_soft_resc_size(p_hwfn, p_ptt); 4988 if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) { 4989 DP_NOTICE(p_hwfn, false, 4990 "Failed to set the max values of the soft resources\n"); 4991 goto unlock_and_exit; 4992 } else if (rc == ECORE_NOTIMPL) { 4993 DP_INFO(p_hwfn, 4994 "Skip the max values setting of the soft resources since it is not supported by the MFW\n"); 4995 rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt, 4996 &resc_unlock_params); 4997 if (rc != ECORE_SUCCESS) 4998 DP_INFO(p_hwfn, 4999 "Failed to release the resource lock for the resource allocation commands\n"); 5000 } 5001 } 5002 5003 rc = ecore_hw_set_resc_info(p_hwfn, drv_resc_alloc); 5004 if (rc != ECORE_SUCCESS) 5005 goto unlock_and_exit; 5006 5007 if (resc_lock_params.b_granted && !resc_unlock_params.b_released) { 5008 rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt, 5009 &resc_unlock_params); 5010 if (rc != ECORE_SUCCESS) 5011 DP_INFO(p_hwfn, 5012 "Failed to release the resource lock for the resource allocation commands\n"); 5013 } 5014 5015 /* PPFID bitmap */ 5016 if (IS_LEAD_HWFN(p_hwfn)) { 5017 rc = ecore_hw_get_ppfid_bitmap(p_hwfn, p_ptt); 5018 if (rc != ECORE_SUCCESS) 5019 return rc; 5020 } 5021 5022 #ifndef ASIC_ONLY 5023 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 5024 /* Reduced build contains less PQs */ 5025 if (!(p_hwfn->p_dev->b_is_emul_full)) { 5026 resc_num[ECORE_PQ] = 32; 5027 resc_start[ECORE_PQ] = resc_num[ECORE_PQ] * 5028 p_hwfn->enabled_func_idx; 5029 } 5030 5031 /* For AH emulation, since we have a possible maximal number of 5032 * 16 enabled PFs, in case there are not enough ILT lines - 5033 * allocate only first PF as RoCE and have all the other ETH 5034 * only with less ILT lines. 5035 */ 5036 if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full) 5037 resc_num[ECORE_ILT] = OSAL_MAX_T(u32, 5038 resc_num[ECORE_ILT], 5039 roce_min_ilt_lines); 5040 } 5041 5042 /* Correct the common ILT calculation if PF0 has more */ 5043 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) && 5044 p_hwfn->p_dev->b_is_emul_full && 5045 p_hwfn->rel_pf_id && 5046 resc_num[ECORE_ILT] < roce_min_ilt_lines) 5047 resc_start[ECORE_ILT] += roce_min_ilt_lines - 5048 resc_num[ECORE_ILT]; 5049 #endif 5050 5051 /* Sanity for ILT */ 5052 if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) || 5053 (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) { 5054 DP_NOTICE(p_hwfn, true, "Can't assign ILT pages [%08x,...,%08x]\n", 5055 RESC_START(p_hwfn, ECORE_ILT), 5056 RESC_END(p_hwfn, ECORE_ILT) - 1); 5057 return ECORE_INVAL; 5058 } 5059 5060 /* This will also learn the number of SBs from MFW */ 5061 if (ecore_int_igu_reset_cam(p_hwfn, p_ptt)) 5062 return ECORE_INVAL; 5063 5064 ecore_hw_set_feat(p_hwfn); 5065 5066 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 5067 "The numbers for each resource are:\n"); 5068 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) 5069 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "%s = %d start = %d\n", 5070 ecore_hw_get_resc_name(res_id), 5071 RESC_NUM(p_hwfn, res_id), 5072 RESC_START(p_hwfn, res_id)); 5073 5074 return ECORE_SUCCESS; 5075 5076 unlock_and_exit: 5077 if (resc_lock_params.b_granted && !resc_unlock_params.b_released) 5078 ecore_mcp_resc_unlock(p_hwfn, p_ptt, 5079 &resc_unlock_params); 5080 return rc; 5081 } 5082 5083 static enum _ecore_status_t 5084 ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn, 5085 struct ecore_ptt *p_ptt, 5086 struct ecore_hw_prepare_params *p_params) 5087 { 5088 u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; 5089 u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; 5090 struct ecore_mcp_link_capabilities *p_caps; 5091 struct ecore_mcp_link_params *link; 5092 enum _ecore_status_t rc; 5093 u32 dcbx_mode; /* __LINUX__THROW__ */ 5094 5095 /* Read global nvm_cfg address */ 5096 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); 5097 5098 /* Verify MCP has initialized it */ 5099 if (!nvm_cfg_addr) { 5100 DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n"); 5101 if (p_params->b_relaxed_probe) 5102 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_NVM; 5103 return ECORE_INVAL; 5104 } 5105 5106 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ 5107 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); 5108 5109 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 5110 OFFSETOF(struct nvm_cfg1, glob) + 5111 OFFSETOF(struct nvm_cfg1_glob, core_cfg); 5112 5113 core_cfg = ecore_rd(p_hwfn, p_ptt, addr); 5114 5115 switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> 5116 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { 5117 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G: 5118 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G; 5119 break; 5120 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G: 5121 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G; 5122 break; 5123 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G: 5124 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G; 5125 break; 5126 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F: 5127 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F; 5128 break; 5129 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E: 5130 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E; 5131 break; 5132 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G: 5133 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G; 5134 break; 5135 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G: 5136 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G; 5137 break; 5138 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: 5139 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G; 5140 break; 5141 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G: 5142 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X10G; 5143 break; 5144 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: 5145 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G; 5146 break; 5147 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G: 5148 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X25G; 5149 break; 5150 default: 5151 DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n", 5152 core_cfg); 5153 break; 5154 } 5155 5156 #ifndef __EXTRACT__LINUX__THROW__ 5157 /* Read DCBX configuration */ 5158 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 5159 OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 5160 dcbx_mode = ecore_rd(p_hwfn, p_ptt, 5161 port_cfg_addr + 5162 OFFSETOF(struct nvm_cfg1_port, generic_cont0)); 5163 dcbx_mode = (dcbx_mode & NVM_CFG1_PORT_DCBX_MODE_MASK) 5164 >> NVM_CFG1_PORT_DCBX_MODE_OFFSET; 5165 switch (dcbx_mode) { 5166 case NVM_CFG1_PORT_DCBX_MODE_DYNAMIC: 5167 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DYNAMIC; 5168 break; 5169 case NVM_CFG1_PORT_DCBX_MODE_CEE: 5170 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_CEE; 5171 break; 5172 case NVM_CFG1_PORT_DCBX_MODE_IEEE: 5173 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_IEEE; 5174 break; 5175 default: 5176 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DISABLED; 5177 } 5178 #endif 5179 5180 /* Read default link configuration */ 5181 link = &p_hwfn->mcp_info->link_input; 5182 p_caps = &p_hwfn->mcp_info->link_capabilities; 5183 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 5184 OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 5185 link_temp = ecore_rd(p_hwfn, p_ptt, 5186 port_cfg_addr + 5187 OFFSETOF(struct nvm_cfg1_port, speed_cap_mask)); 5188 link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; 5189 link->speed.advertised_speeds = link_temp; 5190 p_caps->speed_capabilities = link->speed.advertised_speeds; 5191 5192 link_temp = ecore_rd(p_hwfn, p_ptt, 5193 port_cfg_addr + 5194 OFFSETOF(struct nvm_cfg1_port, link_settings)); 5195 switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >> 5196 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) { 5197 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG: 5198 link->speed.autoneg = true; 5199 break; 5200 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G: 5201 link->speed.forced_speed = 1000; 5202 break; 5203 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: 5204 link->speed.forced_speed = 10000; 5205 break; 5206 case NVM_CFG1_PORT_DRV_LINK_SPEED_20G: 5207 link->speed.forced_speed = 20000; 5208 break; 5209 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: 5210 link->speed.forced_speed = 25000; 5211 break; 5212 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G: 5213 link->speed.forced_speed = 40000; 5214 break; 5215 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: 5216 link->speed.forced_speed = 50000; 5217 break; 5218 case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G: 5219 link->speed.forced_speed = 100000; 5220 break; 5221 default: 5222 DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", 5223 link_temp); 5224 } 5225 5226 p_caps->default_speed = link->speed.forced_speed; /* __LINUX__THROW__ */ 5227 p_caps->default_speed_autoneg = link->speed.autoneg; 5228 5229 link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; 5230 link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET; 5231 link->pause.autoneg = !!(link_temp & 5232 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); 5233 link->pause.forced_rx = !!(link_temp & 5234 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); 5235 link->pause.forced_tx = !!(link_temp & 5236 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); 5237 link->loopback_mode = 0; 5238 5239 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { 5240 link_temp = ecore_rd(p_hwfn, p_ptt, port_cfg_addr + 5241 OFFSETOF(struct nvm_cfg1_port, ext_phy)); 5242 link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK; 5243 link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET; 5244 p_caps->default_eee = ECORE_MCP_EEE_ENABLED; 5245 link->eee.enable = true; 5246 switch (link_temp) { 5247 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED: 5248 p_caps->default_eee = ECORE_MCP_EEE_DISABLED; 5249 link->eee.enable = false; 5250 break; 5251 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED: 5252 p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME; 5253 break; 5254 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE: 5255 p_caps->eee_lpi_timer = 5256 EEE_TX_TIMER_USEC_AGGRESSIVE_TIME; 5257 break; 5258 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY: 5259 p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME; 5260 break; 5261 } 5262 5263 link->eee.tx_lpi_timer = p_caps->eee_lpi_timer; 5264 link->eee.tx_lpi_enable = link->eee.enable; 5265 link->eee.adv_caps = ECORE_EEE_1G_ADV | ECORE_EEE_10G_ADV; 5266 } else { 5267 p_caps->default_eee = ECORE_MCP_EEE_UNSUPPORTED; 5268 } 5269 5270 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 5271 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n", 5272 link->speed.forced_speed, link->speed.advertised_speeds, 5273 link->speed.autoneg, link->pause.autoneg, 5274 p_caps->default_eee, p_caps->eee_lpi_timer); 5275 5276 /* Read Multi-function information from shmem */ 5277 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 5278 OFFSETOF(struct nvm_cfg1, glob) + 5279 OFFSETOF(struct nvm_cfg1_glob, generic_cont0); 5280 5281 generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr); 5282 5283 mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> 5284 NVM_CFG1_GLOB_MF_MODE_OFFSET; 5285 5286 switch (mf_mode) { 5287 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: 5288 p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS; 5289 break; 5290 case NVM_CFG1_GLOB_MF_MODE_UFP: 5291 p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS | 5292 1 << ECORE_MF_LLH_PROTO_CLSS | 5293 1 << ECORE_MF_UFP_SPECIFIC | 5294 1 << ECORE_MF_8021Q_TAGGING; 5295 break; 5296 case NVM_CFG1_GLOB_MF_MODE_BD: 5297 p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS | 5298 1 << ECORE_MF_LLH_PROTO_CLSS | 5299 1 << ECORE_MF_8021AD_TAGGING; 5300 break; 5301 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: 5302 p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS | 5303 1 << ECORE_MF_LLH_PROTO_CLSS | 5304 1 << ECORE_MF_LL2_NON_UNICAST | 5305 1 << ECORE_MF_INTER_PF_SWITCH | 5306 1 << ECORE_MF_DISABLE_ARFS; 5307 break; 5308 case NVM_CFG1_GLOB_MF_MODE_DEFAULT: 5309 p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS | 5310 1 << ECORE_MF_LLH_PROTO_CLSS | 5311 1 << ECORE_MF_LL2_NON_UNICAST; 5312 if (ECORE_IS_BB(p_hwfn->p_dev)) 5313 p_hwfn->p_dev->mf_bits |= 1 << ECORE_MF_NEED_DEF_PF; 5314 break; 5315 } 5316 DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", 5317 p_hwfn->p_dev->mf_bits); 5318 5319 if (ECORE_IS_CMT(p_hwfn->p_dev)) 5320 p_hwfn->p_dev->mf_bits |= (1 << ECORE_MF_DISABLE_ARFS); 5321 5322 #ifndef __EXTRACT__LINUX__THROW__ 5323 /* It's funny since we have another switch, but it's easier 5324 * to throw this away in linux this way. Long term, it might be 5325 * better to have have getters for needed ECORE_MF_* fields, 5326 * convert client code and eliminate this. 5327 */ 5328 switch (mf_mode) { 5329 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: 5330 p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN; 5331 break; 5332 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: 5333 p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR; 5334 break; 5335 case NVM_CFG1_GLOB_MF_MODE_DEFAULT: 5336 p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT; 5337 break; 5338 case NVM_CFG1_GLOB_MF_MODE_UFP: 5339 p_hwfn->p_dev->mf_mode = ECORE_MF_UFP; 5340 break; 5341 } 5342 #endif 5343 5344 /* Read Multi-function information from shmem */ 5345 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 5346 OFFSETOF(struct nvm_cfg1, glob) + 5347 OFFSETOF(struct nvm_cfg1_glob, device_capabilities); 5348 5349 device_capabilities = ecore_rd(p_hwfn, p_ptt, addr); 5350 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) 5351 OSAL_SET_BIT(ECORE_DEV_CAP_ETH, 5352 &p_hwfn->hw_info.device_capabilities); 5353 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE) 5354 OSAL_SET_BIT(ECORE_DEV_CAP_FCOE, 5355 &p_hwfn->hw_info.device_capabilities); 5356 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI) 5357 OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI, 5358 &p_hwfn->hw_info.device_capabilities); 5359 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE) 5360 OSAL_SET_BIT(ECORE_DEV_CAP_ROCE, 5361 &p_hwfn->hw_info.device_capabilities); 5362 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP) 5363 OSAL_SET_BIT(ECORE_DEV_CAP_IWARP, 5364 &p_hwfn->hw_info.device_capabilities); 5365 5366 rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt); 5367 if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) { 5368 rc = ECORE_SUCCESS; 5369 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP; 5370 } 5371 5372 return rc; 5373 } 5374 5375 static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn, 5376 struct ecore_ptt *p_ptt) 5377 { 5378 u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id; 5379 u32 reg_function_hide, tmp, eng_mask, low_pfs_mask; 5380 struct ecore_dev *p_dev = p_hwfn->p_dev; 5381 5382 num_funcs = ECORE_IS_AH(p_dev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB; 5383 5384 /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values 5385 * in the other bits are selected. 5386 * Bits 1-15 are for functions 1-15, respectively, and their value is 5387 * '0' only for enabled functions (function 0 always exists and 5388 * enabled). 5389 * In case of CMT in BB, only the "even" functions are enabled, and thus 5390 * the number of functions for both hwfns is learnt from the same bits. 5391 */ 5392 reg_function_hide = ecore_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE); 5393 5394 if (reg_function_hide & 0x1) { 5395 if (ECORE_IS_BB(p_dev)) { 5396 if (ECORE_PATH_ID(p_hwfn) && !ECORE_IS_CMT(p_dev)) { 5397 num_funcs = 0; 5398 eng_mask = 0xaaaa; 5399 } else { 5400 num_funcs = 1; 5401 eng_mask = 0x5554; 5402 } 5403 } else { 5404 num_funcs = 1; 5405 eng_mask = 0xfffe; 5406 } 5407 5408 /* Get the number of the enabled functions on the engine */ 5409 tmp = (reg_function_hide ^ 0xffffffff) & eng_mask; 5410 while (tmp) { 5411 if (tmp & 0x1) 5412 num_funcs++; 5413 tmp >>= 0x1; 5414 } 5415 5416 /* Get the PF index within the enabled functions */ 5417 low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1; 5418 tmp = reg_function_hide & eng_mask & low_pfs_mask; 5419 while (tmp) { 5420 if (tmp & 0x1) 5421 enabled_func_idx--; 5422 tmp >>= 0x1; 5423 } 5424 } 5425 5426 p_hwfn->num_funcs_on_engine = num_funcs; 5427 p_hwfn->enabled_func_idx = enabled_func_idx; 5428 5429 #ifndef ASIC_ONLY 5430 if (CHIP_REV_IS_FPGA(p_dev)) { 5431 DP_NOTICE(p_hwfn, false, 5432 "FPGA: Limit number of PFs to 4 [would affect resource allocation, needed for IOV]\n"); 5433 p_hwfn->num_funcs_on_engine = 4; 5434 } 5435 #endif 5436 5437 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, 5438 "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n", 5439 p_hwfn->rel_pf_id, p_hwfn->abs_pf_id, 5440 p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); 5441 } 5442 5443 static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn, 5444 struct ecore_ptt *p_ptt) 5445 { 5446 struct ecore_dev *p_dev = p_hwfn->p_dev; 5447 u32 port_mode; 5448 5449 #ifndef ASIC_ONLY 5450 /* Read the port mode */ 5451 if (CHIP_REV_IS_FPGA(p_dev)) 5452 port_mode = 4; 5453 else if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_CMT(p_dev)) 5454 /* In CMT on emulation, assume 1 port */ 5455 port_mode = 1; 5456 else 5457 #endif 5458 port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB); 5459 5460 if (port_mode < 3) { 5461 p_dev->num_ports_in_engine = 1; 5462 } else if (port_mode <= 5) { 5463 p_dev->num_ports_in_engine = 2; 5464 } else { 5465 DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n", 5466 p_dev->num_ports_in_engine); 5467 5468 /* Default num_ports_in_engine to something */ 5469 p_dev->num_ports_in_engine = 1; 5470 } 5471 } 5472 5473 static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn, 5474 struct ecore_ptt *p_ptt) 5475 { 5476 struct ecore_dev *p_dev = p_hwfn->p_dev; 5477 u32 port; 5478 int i; 5479 5480 p_dev->num_ports_in_engine = 0; 5481 5482 #ifndef ASIC_ONLY 5483 if (CHIP_REV_IS_EMUL(p_dev)) { 5484 port = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED); 5485 switch ((port & 0xf000) >> 12) { 5486 case 1: 5487 p_dev->num_ports_in_engine = 1; 5488 break; 5489 case 3: 5490 p_dev->num_ports_in_engine = 2; 5491 break; 5492 case 0xf: 5493 p_dev->num_ports_in_engine = 4; 5494 break; 5495 default: 5496 DP_NOTICE(p_hwfn, false, 5497 "Unknown port mode in ECO_RESERVED %08x\n", 5498 port); 5499 } 5500 } else 5501 #endif 5502 for (i = 0; i < MAX_NUM_PORTS_K2; i++) { 5503 port = ecore_rd(p_hwfn, p_ptt, 5504 CNIG_REG_NIG_PORT0_CONF_K2_E5 + (i * 4)); 5505 if (port & 1) 5506 p_dev->num_ports_in_engine++; 5507 } 5508 5509 if (!p_dev->num_ports_in_engine) { 5510 DP_NOTICE(p_hwfn, true, "All NIG ports are inactive\n"); 5511 5512 /* Default num_ports_in_engine to something */ 5513 p_dev->num_ports_in_engine = 1; 5514 } 5515 } 5516 5517 static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn, 5518 struct ecore_ptt *p_ptt) 5519 { 5520 struct ecore_dev *p_dev = p_hwfn->p_dev; 5521 5522 /* Determine the number of ports per engine */ 5523 if (ECORE_IS_BB(p_dev)) 5524 ecore_hw_info_port_num_bb(p_hwfn, p_ptt); 5525 else 5526 ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt); 5527 5528 /* Get the total number of ports of the device */ 5529 if (ECORE_IS_CMT(p_dev)) { 5530 /* In CMT there is always only one port */ 5531 p_dev->num_ports = 1; 5532 #ifndef ASIC_ONLY 5533 } else if (CHIP_REV_IS_EMUL(p_dev) || CHIP_REV_IS_TEDIBEAR(p_dev)) { 5534 p_dev->num_ports = p_dev->num_ports_in_engine * 5535 ecore_device_num_engines(p_dev); 5536 #endif 5537 } else { 5538 u32 addr, global_offsize, global_addr; 5539 5540 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 5541 PUBLIC_GLOBAL); 5542 global_offsize = ecore_rd(p_hwfn, p_ptt, addr); 5543 global_addr = SECTION_ADDR(global_offsize, 0); 5544 addr = global_addr + OFFSETOF(struct public_global, max_ports); 5545 p_dev->num_ports = (u8)ecore_rd(p_hwfn, p_ptt, addr); 5546 } 5547 } 5548 5549 static void ecore_mcp_get_eee_caps(struct ecore_hwfn *p_hwfn, 5550 struct ecore_ptt *p_ptt) 5551 { 5552 struct ecore_mcp_link_capabilities *p_caps; 5553 u32 eee_status; 5554 5555 p_caps = &p_hwfn->mcp_info->link_capabilities; 5556 if (p_caps->default_eee == ECORE_MCP_EEE_UNSUPPORTED) 5557 return; 5558 5559 p_caps->eee_speed_caps = 0; 5560 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + 5561 OFFSETOF(struct public_port, eee_status)); 5562 eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >> 5563 EEE_SUPPORTED_SPEED_OFFSET; 5564 if (eee_status & EEE_1G_SUPPORTED) 5565 p_caps->eee_speed_caps |= ECORE_EEE_1G_ADV; 5566 if (eee_status & EEE_10G_ADV) 5567 p_caps->eee_speed_caps |= ECORE_EEE_10G_ADV; 5568 } 5569 5570 static enum _ecore_status_t 5571 ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 5572 enum ecore_pci_personality personality, 5573 struct ecore_hw_prepare_params *p_params) 5574 { 5575 bool drv_resc_alloc = p_params->drv_resc_alloc; 5576 enum _ecore_status_t rc; 5577 5578 /* Since all information is common, only first hwfns should do this */ 5579 if (IS_LEAD_HWFN(p_hwfn)) { 5580 rc = ecore_iov_hw_info(p_hwfn); 5581 if (rc != ECORE_SUCCESS) { 5582 if (p_params->b_relaxed_probe) 5583 p_params->p_relaxed_res = 5584 ECORE_HW_PREPARE_BAD_IOV; 5585 else 5586 return rc; 5587 } 5588 } 5589 5590 if (IS_LEAD_HWFN(p_hwfn)) 5591 ecore_hw_info_port_num(p_hwfn, p_ptt); 5592 5593 ecore_mcp_get_capabilities(p_hwfn, p_ptt); 5594 5595 #ifndef ASIC_ONLY 5596 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) { 5597 #endif 5598 rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params); 5599 if (rc != ECORE_SUCCESS) 5600 return rc; 5601 #ifndef ASIC_ONLY 5602 } 5603 #endif 5604 5605 rc = ecore_int_igu_read_cam(p_hwfn, p_ptt); 5606 if (rc != ECORE_SUCCESS) { 5607 if (p_params->b_relaxed_probe) 5608 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_IGU; 5609 else 5610 return rc; 5611 } 5612 5613 #ifndef ASIC_ONLY 5614 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) { 5615 #endif 5616 OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, 5617 p_hwfn->mcp_info->func_info.mac, ETH_ALEN); 5618 #ifndef ASIC_ONLY 5619 } else { 5620 static u8 mcp_hw_mac[6] = {0, 2, 3, 4, 5, 6}; 5621 5622 OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN); 5623 p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id; 5624 } 5625 #endif 5626 5627 if (ecore_mcp_is_init(p_hwfn)) { 5628 if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET) 5629 p_hwfn->hw_info.ovlan = 5630 p_hwfn->mcp_info->func_info.ovlan; 5631 5632 ecore_mcp_cmd_port_init(p_hwfn, p_ptt); 5633 5634 ecore_mcp_get_eee_caps(p_hwfn, p_ptt); 5635 5636 ecore_mcp_read_ufp_config(p_hwfn, p_ptt); 5637 } 5638 5639 if (personality != ECORE_PCI_DEFAULT) { 5640 p_hwfn->hw_info.personality = personality; 5641 } else if (ecore_mcp_is_init(p_hwfn)) { 5642 enum ecore_pci_personality protocol; 5643 5644 protocol = p_hwfn->mcp_info->func_info.protocol; 5645 p_hwfn->hw_info.personality = protocol; 5646 } 5647 5648 #ifndef ASIC_ONLY 5649 /* To overcome ILT lack for emulation, until at least until we'll have 5650 * a definite answer from system about it, allow only PF0 to be RoCE. 5651 */ 5652 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) { 5653 if (!p_hwfn->rel_pf_id) 5654 p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE; 5655 else 5656 p_hwfn->hw_info.personality = ECORE_PCI_ETH; 5657 } 5658 #endif 5659 5660 /* although in BB some constellations may support more than 4 tcs, 5661 * that can result in performance penalty in some cases. 4 5662 * represents a good tradeoff between performance and flexibility. 5663 */ 5664 p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; 5665 5666 /* start out with a single active tc. This can be increased either 5667 * by dcbx negotiation or by upper layer driver 5668 */ 5669 p_hwfn->hw_info.num_active_tc = 1; 5670 5671 ecore_get_num_funcs(p_hwfn, p_ptt); 5672 5673 if (ecore_mcp_is_init(p_hwfn)) 5674 p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu; 5675 5676 /* In case of forcing the driver's default resource allocation, calling 5677 * ecore_hw_get_resc() should come after initializing the personality 5678 * and after getting the number of functions, since the calculation of 5679 * the resources/features depends on them. 5680 * This order is not harmful if not forcing. 5681 */ 5682 rc = ecore_hw_get_resc(p_hwfn, p_ptt, drv_resc_alloc); 5683 if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) { 5684 rc = ECORE_SUCCESS; 5685 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP; 5686 } 5687 5688 return rc; 5689 } 5690 5691 #define ECORE_MAX_DEVICE_NAME_LEN (8) 5692 5693 void ecore_get_dev_name(struct ecore_dev *p_dev, u8 *name, u8 max_chars) 5694 { 5695 u8 n; 5696 5697 n = OSAL_MIN_T(u8, max_chars, ECORE_MAX_DEVICE_NAME_LEN); 5698 OSAL_SNPRINTF(name, n, "%s %c%d", ECORE_IS_BB(p_dev) ? "BB" : "AH", 5699 'A' + p_dev->chip_rev, (int)p_dev->chip_metal); 5700 } 5701 5702 static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn, 5703 struct ecore_ptt *p_ptt) 5704 { 5705 struct ecore_dev *p_dev = p_hwfn->p_dev; 5706 u16 device_id_mask; 5707 u32 tmp; 5708 5709 /* Read Vendor Id / Device Id */ 5710 OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_VENDOR_ID_OFFSET, 5711 &p_dev->vendor_id); 5712 OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET, 5713 &p_dev->device_id); 5714 5715 /* Determine type */ 5716 device_id_mask = p_dev->device_id & ECORE_DEV_ID_MASK; 5717 switch (device_id_mask) { 5718 case ECORE_DEV_ID_MASK_BB: 5719 p_dev->type = ECORE_DEV_TYPE_BB; 5720 break; 5721 case ECORE_DEV_ID_MASK_AH: 5722 p_dev->type = ECORE_DEV_TYPE_AH; 5723 break; 5724 case ECORE_DEV_ID_MASK_E5: 5725 p_dev->type = ECORE_DEV_TYPE_E5; 5726 break; 5727 default: 5728 DP_NOTICE(p_hwfn, true, "Unknown device id 0x%x\n", 5729 p_dev->device_id); 5730 return ECORE_ABORTED; 5731 } 5732 5733 tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM); 5734 p_dev->chip_num = (u16)GET_FIELD(tmp, CHIP_NUM); 5735 tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV); 5736 p_dev->chip_rev = (u8)GET_FIELD(tmp, CHIP_REV); 5737 5738 /* Learn number of HW-functions */ 5739 tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR); 5740 5741 if (tmp & (1 << p_hwfn->rel_pf_id)) { 5742 DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n"); 5743 p_dev->num_hwfns = 2; 5744 } else { 5745 p_dev->num_hwfns = 1; 5746 } 5747 5748 #ifndef ASIC_ONLY 5749 if (CHIP_REV_IS_EMUL(p_dev)) { 5750 /* For some reason we have problems with this register 5751 * in B0 emulation; Simply assume no CMT 5752 */ 5753 DP_NOTICE(p_dev->hwfns, false, "device on emul - assume no CMT\n"); 5754 p_dev->num_hwfns = 1; 5755 } 5756 #endif 5757 5758 tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_TEST_REG); 5759 p_dev->chip_bond_id = (u8)GET_FIELD(tmp, CHIP_BOND_ID); 5760 tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL); 5761 p_dev->chip_metal = (u8)GET_FIELD(tmp, CHIP_METAL); 5762 5763 DP_INFO(p_dev->hwfns, 5764 "Chip details - %s %c%d, Num: %04x Rev: %02x Bond id: %02x Metal: %02x\n", 5765 ECORE_IS_BB(p_dev) ? "BB" : "AH", 5766 'A' + p_dev->chip_rev, (int)p_dev->chip_metal, 5767 p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id, 5768 p_dev->chip_metal); 5769 5770 if (ECORE_IS_BB_A0(p_dev)) { 5771 DP_NOTICE(p_dev->hwfns, false, 5772 "The chip type/rev (BB A0) is not supported!\n"); 5773 return ECORE_ABORTED; 5774 } 5775 5776 #ifndef ASIC_ONLY 5777 if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev)) 5778 ecore_wr(p_hwfn, p_ptt, MISCS_REG_PLL_MAIN_CTRL_4, 0x1); 5779 5780 if (CHIP_REV_IS_EMUL(p_dev)) { 5781 tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED); 5782 if (tmp & (1 << 29)) { 5783 DP_NOTICE(p_hwfn, false, "Emulation: Running on a FULL build\n"); 5784 p_dev->b_is_emul_full = true; 5785 } else { 5786 DP_NOTICE(p_hwfn, false, "Emulation: Running on a REDUCED build\n"); 5787 } 5788 } 5789 #endif 5790 5791 return ECORE_SUCCESS; 5792 } 5793 5794 #ifndef LINUX_REMOVE 5795 void ecore_hw_hibernate_prepare(struct ecore_dev *p_dev) 5796 { 5797 int j; 5798 5799 if (IS_VF(p_dev)) 5800 return; 5801 5802 for_each_hwfn(p_dev, j) { 5803 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 5804 5805 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Mark hw/fw uninitialized\n"); 5806 5807 p_hwfn->hw_init_done = false; 5808 5809 ecore_ptt_invalidate(p_hwfn); 5810 } 5811 } 5812 5813 void ecore_hw_hibernate_resume(struct ecore_dev *p_dev) 5814 { 5815 int j = 0; 5816 5817 if (IS_VF(p_dev)) 5818 return; 5819 5820 for_each_hwfn(p_dev, j) { 5821 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; 5822 struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); 5823 5824 ecore_hw_hwfn_prepare(p_hwfn); 5825 5826 if (!p_ptt) 5827 DP_NOTICE(p_hwfn, false, "ptt acquire failed\n"); 5828 else { 5829 ecore_load_mcp_offsets(p_hwfn, p_ptt); 5830 ecore_ptt_release(p_hwfn, p_ptt); 5831 } 5832 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "Reinitialized hw after low power state\n"); 5833 } 5834 } 5835 5836 #endif 5837 5838 static enum _ecore_status_t 5839 ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview, 5840 void OSAL_IOMEM *p_doorbells, u64 db_phys_addr, 5841 struct ecore_hw_prepare_params *p_params) 5842 { 5843 struct ecore_mdump_retain_data mdump_retain; 5844 struct ecore_dev *p_dev = p_hwfn->p_dev; 5845 struct ecore_mdump_info mdump_info; 5846 enum _ecore_status_t rc = ECORE_SUCCESS; 5847 5848 /* Split PCI bars evenly between hwfns */ 5849 p_hwfn->regview = p_regview; 5850 p_hwfn->doorbells = p_doorbells; 5851 p_hwfn->db_phys_addr = db_phys_addr; 5852 5853 #ifndef LINUX_REMOVE 5854 p_hwfn->reg_offset = (u8 *)p_hwfn->regview - (u8 *)p_hwfn->p_dev->regview; 5855 p_hwfn->db_offset = (u8 *)p_hwfn->doorbells - (u8 *)p_hwfn->p_dev->doorbells; 5856 #endif 5857 5858 if (IS_VF(p_dev)) 5859 return ecore_vf_hw_prepare(p_hwfn); 5860 5861 /* Validate that chip access is feasible */ 5862 if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { 5863 DP_ERR(p_hwfn, "Reading the ME register returns all Fs; Preventing further chip access\n"); 5864 if (p_params->b_relaxed_probe) 5865 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_ME; 5866 return ECORE_INVAL; 5867 } 5868 5869 get_function_id(p_hwfn); 5870 5871 /* Allocate PTT pool */ 5872 rc = ecore_ptt_pool_alloc(p_hwfn); 5873 if (rc) { 5874 DP_NOTICE(p_hwfn, false, "Failed to prepare hwfn's hw\n"); 5875 if (p_params->b_relaxed_probe) 5876 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 5877 goto err0; 5878 } 5879 5880 /* Allocate the main PTT */ 5881 p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); 5882 5883 /* First hwfn learns basic information, e.g., number of hwfns */ 5884 if (!p_hwfn->my_id) { 5885 rc = ecore_get_dev_info(p_hwfn, p_hwfn->p_main_ptt); 5886 if (rc != ECORE_SUCCESS) { 5887 if (p_params->b_relaxed_probe) 5888 p_params->p_relaxed_res = 5889 ECORE_HW_PREPARE_FAILED_DEV; 5890 goto err1; 5891 } 5892 } 5893 5894 ecore_hw_hwfn_prepare(p_hwfn); 5895 5896 /* Initialize MCP structure */ 5897 rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); 5898 if (rc) { 5899 DP_NOTICE(p_hwfn, false, "Failed initializing mcp command\n"); 5900 if (p_params->b_relaxed_probe) 5901 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 5902 goto err1; 5903 } 5904 5905 /* Read the device configuration information from the HW and SHMEM */ 5906 rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, 5907 p_params->personality, p_params); 5908 if (rc) { 5909 DP_NOTICE(p_hwfn, false, "Failed to get HW information\n"); 5910 goto err2; 5911 } 5912 5913 /* Sending a mailbox to the MFW should be after ecore_get_hw_info() is 5914 * called, since among others it sets the ports number in an engine. 5915 */ 5916 if (p_params->initiate_pf_flr && IS_LEAD_HWFN(p_hwfn) && 5917 !p_dev->recov_in_prog) { 5918 rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt); 5919 if (rc != ECORE_SUCCESS) 5920 DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n"); 5921 } 5922 5923 /* Check if mdump logs/data are present and update the epoch value */ 5924 if (IS_LEAD_HWFN(p_hwfn)) { 5925 #ifndef ASIC_ONLY 5926 if (!CHIP_REV_IS_EMUL(p_dev)) { 5927 #endif 5928 rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt, 5929 &mdump_info); 5930 if (rc == ECORE_SUCCESS && mdump_info.num_of_logs) 5931 DP_NOTICE(p_hwfn, false, 5932 "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n"); 5933 5934 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_hwfn->p_main_ptt, 5935 &mdump_retain); 5936 if (rc == ECORE_SUCCESS && mdump_retain.valid) 5937 DP_NOTICE(p_hwfn, false, 5938 "mdump retained data: epoch 0x%08x, pf 0x%x, status 0x%08x\n", 5939 mdump_retain.epoch, mdump_retain.pf, 5940 mdump_retain.status); 5941 5942 ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt, 5943 p_params->epoch); 5944 #ifndef ASIC_ONLY 5945 } 5946 #endif 5947 } 5948 5949 /* Allocate the init RT array and initialize the init-ops engine */ 5950 rc = ecore_init_alloc(p_hwfn); 5951 if (rc) { 5952 DP_NOTICE(p_hwfn, false, "Failed to allocate the init array\n"); 5953 if (p_params->b_relaxed_probe) 5954 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; 5955 goto err2; 5956 } 5957 5958 #ifndef ASIC_ONLY 5959 if (CHIP_REV_IS_FPGA(p_dev)) { 5960 DP_NOTICE(p_hwfn, false, 5961 "FPGA: workaround; Prevent DMAE parities\n"); 5962 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK_K2_E5, 5963 7); 5964 5965 DP_NOTICE(p_hwfn, false, 5966 "FPGA: workaround: Set VF bar0 size\n"); 5967 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, 5968 PGLUE_B_REG_VF_BAR0_SIZE_K2_E5, 4); 5969 } 5970 #endif 5971 5972 return rc; 5973 err2: 5974 if (IS_LEAD_HWFN(p_hwfn)) 5975 ecore_iov_free_hw_info(p_dev); 5976 ecore_mcp_free(p_hwfn); 5977 err1: 5978 ecore_hw_hwfn_free(p_hwfn); 5979 err0: 5980 return rc; 5981 } 5982 5983 enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, 5984 struct ecore_hw_prepare_params *p_params) 5985 { 5986 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 5987 enum _ecore_status_t rc; 5988 5989 p_dev->chk_reg_fifo = p_params->chk_reg_fifo; 5990 p_dev->allow_mdump = p_params->allow_mdump; 5991 5992 if (p_params->b_relaxed_probe) 5993 p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS; 5994 5995 /* Store the precompiled init data ptrs */ 5996 if (IS_PF(p_dev)) 5997 ecore_init_iro_array(p_dev); 5998 5999 /* Initialize the first hwfn - will learn number of hwfns */ 6000 rc = ecore_hw_prepare_single(p_hwfn, p_dev->regview, 6001 p_dev->doorbells, p_dev->db_phys_addr, 6002 p_params); 6003 if (rc != ECORE_SUCCESS) 6004 return rc; 6005 6006 p_params->personality = p_hwfn->hw_info.personality; 6007 6008 /* initilalize 2nd hwfn if necessary */ 6009 if (ECORE_IS_CMT(p_dev)) { 6010 void OSAL_IOMEM *p_regview, *p_doorbell; 6011 u8 OSAL_IOMEM *addr; 6012 u64 db_phys_addr; 6013 u32 offset; 6014 6015 /* adjust bar offset for second engine */ 6016 offset = ecore_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, 6017 BAR_ID_0) / 2; 6018 addr = (u8 OSAL_IOMEM *)p_dev->regview + offset; 6019 p_regview = (void OSAL_IOMEM *)addr; 6020 6021 offset = ecore_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, 6022 BAR_ID_1) / 2; 6023 addr = (u8 OSAL_IOMEM *)p_dev->doorbells + offset; 6024 p_doorbell = (void OSAL_IOMEM *)addr; 6025 db_phys_addr = p_dev->db_phys_addr + offset; 6026 6027 /* prepare second hw function */ 6028 rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview, 6029 p_doorbell, db_phys_addr, 6030 p_params); 6031 6032 /* in case of error, need to free the previously 6033 * initiliazed hwfn 0. 6034 */ 6035 if (rc != ECORE_SUCCESS) { 6036 if (p_params->b_relaxed_probe) 6037 p_params->p_relaxed_res = 6038 ECORE_HW_PREPARE_FAILED_ENG2; 6039 6040 if (IS_PF(p_dev)) { 6041 ecore_init_free(p_hwfn); 6042 ecore_mcp_free(p_hwfn); 6043 ecore_hw_hwfn_free(p_hwfn); 6044 } else { 6045 DP_NOTICE(p_dev, false, "What do we need to free when VF hwfn1 init fails\n"); 6046 } 6047 return rc; 6048 } 6049 } 6050 6051 return rc; 6052 } 6053 6054 void ecore_hw_remove(struct ecore_dev *p_dev) 6055 { 6056 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 6057 int i; 6058 6059 if (IS_PF(p_dev)) 6060 ecore_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt, 6061 ECORE_OV_DRIVER_STATE_NOT_LOADED); 6062 6063 for_each_hwfn(p_dev, i) { 6064 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 6065 6066 if (IS_VF(p_dev)) { 6067 ecore_vf_pf_release(p_hwfn); 6068 continue; 6069 } 6070 6071 ecore_init_free(p_hwfn); 6072 ecore_hw_hwfn_free(p_hwfn); 6073 ecore_mcp_free(p_hwfn); 6074 6075 #ifdef CONFIG_ECORE_LOCK_ALLOC 6076 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock); 6077 #endif 6078 } 6079 6080 ecore_iov_free_hw_info(p_dev); 6081 } 6082 6083 static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev, 6084 struct ecore_chain *p_chain) 6085 { 6086 void *p_virt = p_chain->p_virt_addr, *p_virt_next = OSAL_NULL; 6087 dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0; 6088 struct ecore_chain_next *p_next; 6089 u32 size, i; 6090 6091 if (!p_virt) 6092 return; 6093 6094 size = p_chain->elem_size * p_chain->usable_per_page; 6095 6096 for (i = 0; i < p_chain->page_cnt; i++) { 6097 if (!p_virt) 6098 break; 6099 6100 p_next = (struct ecore_chain_next *)((u8 *)p_virt + size); 6101 p_virt_next = p_next->next_virt; 6102 p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys); 6103 6104 OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys, 6105 ECORE_CHAIN_PAGE_SIZE); 6106 6107 p_virt = p_virt_next; 6108 p_phys = p_phys_next; 6109 } 6110 } 6111 6112 static void ecore_chain_free_single(struct ecore_dev *p_dev, 6113 struct ecore_chain *p_chain) 6114 { 6115 if (!p_chain->p_virt_addr) 6116 return; 6117 6118 OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr, 6119 p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE); 6120 } 6121 6122 static void ecore_chain_free_pbl(struct ecore_dev *p_dev, 6123 struct ecore_chain *p_chain) 6124 { 6125 void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; 6126 u8 *p_pbl_virt = (u8 *)p_chain->pbl_sp.p_virt_table; 6127 u32 page_cnt = p_chain->page_cnt, i, pbl_size; 6128 6129 if (!pp_virt_addr_tbl) 6130 return; 6131 6132 if (!p_pbl_virt) 6133 goto out; 6134 6135 for (i = 0; i < page_cnt; i++) { 6136 if (!pp_virt_addr_tbl[i]) 6137 break; 6138 6139 OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i], 6140 *(dma_addr_t *)p_pbl_virt, 6141 ECORE_CHAIN_PAGE_SIZE); 6142 6143 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE; 6144 } 6145 6146 pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; 6147 6148 if (!p_chain->b_external_pbl) { 6149 OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table, 6150 p_chain->pbl_sp.p_phys_table, pbl_size); 6151 } 6152 out: 6153 OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl); 6154 p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL; 6155 } 6156 6157 void ecore_chain_free(struct ecore_dev *p_dev, 6158 struct ecore_chain *p_chain) 6159 { 6160 switch (p_chain->mode) { 6161 case ECORE_CHAIN_MODE_NEXT_PTR: 6162 ecore_chain_free_next_ptr(p_dev, p_chain); 6163 break; 6164 case ECORE_CHAIN_MODE_SINGLE: 6165 ecore_chain_free_single(p_dev, p_chain); 6166 break; 6167 case ECORE_CHAIN_MODE_PBL: 6168 ecore_chain_free_pbl(p_dev, p_chain); 6169 break; 6170 } 6171 } 6172 6173 static enum _ecore_status_t 6174 ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev, 6175 enum ecore_chain_cnt_type cnt_type, 6176 osal_size_t elem_size, u32 page_cnt) 6177 { 6178 u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; 6179 6180 /* The actual chain size can be larger than the maximal possible value 6181 * after rounding up the requested elements number to pages, and after 6182 * taking into acount the unusuable elements (next-ptr elements). 6183 * The size of a "u16" chain can be (U16_MAX + 1) since the chain 6184 * size/capacity fields are of a u32 type. 6185 */ 6186 if ((cnt_type == ECORE_CHAIN_CNT_TYPE_U16 && 6187 chain_size > ((u32)ECORE_U16_MAX + 1)) || 6188 (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 && 6189 chain_size > ECORE_U32_MAX)) { 6190 DP_NOTICE(p_dev, true, 6191 "The actual chain size (0x%llx) is larger than the maximal possible value\n", 6192 (unsigned long long)chain_size); 6193 return ECORE_INVAL; 6194 } 6195 6196 return ECORE_SUCCESS; 6197 } 6198 6199 static enum _ecore_status_t 6200 ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain) 6201 { 6202 void *p_virt = OSAL_NULL, *p_virt_prev = OSAL_NULL; 6203 dma_addr_t p_phys = 0; 6204 u32 i; 6205 6206 for (i = 0; i < p_chain->page_cnt; i++) { 6207 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 6208 ECORE_CHAIN_PAGE_SIZE); 6209 if (!p_virt) { 6210 DP_NOTICE(p_dev, false, 6211 "Failed to allocate chain memory\n"); 6212 return ECORE_NOMEM; 6213 } 6214 6215 if (i == 0) { 6216 ecore_chain_init_mem(p_chain, p_virt, p_phys); 6217 ecore_chain_reset(p_chain); 6218 } else { 6219 ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev, 6220 p_virt, p_phys); 6221 } 6222 6223 p_virt_prev = p_virt; 6224 } 6225 /* Last page's next element should point to the beginning of the 6226 * chain. 6227 */ 6228 ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev, 6229 p_chain->p_virt_addr, 6230 p_chain->p_phys_addr); 6231 6232 return ECORE_SUCCESS; 6233 } 6234 6235 static enum _ecore_status_t 6236 ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain) 6237 { 6238 dma_addr_t p_phys = 0; 6239 void *p_virt = OSAL_NULL; 6240 6241 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE); 6242 if (!p_virt) { 6243 DP_NOTICE(p_dev, false, "Failed to allocate chain memory\n"); 6244 return ECORE_NOMEM; 6245 } 6246 6247 ecore_chain_init_mem(p_chain, p_virt, p_phys); 6248 ecore_chain_reset(p_chain); 6249 6250 return ECORE_SUCCESS; 6251 } 6252 6253 static enum _ecore_status_t 6254 ecore_chain_alloc_pbl(struct ecore_dev *p_dev, 6255 struct ecore_chain *p_chain, 6256 struct ecore_chain_ext_pbl *ext_pbl) 6257 { 6258 u32 page_cnt = p_chain->page_cnt, size, i; 6259 dma_addr_t p_phys = 0, p_pbl_phys = 0; 6260 void **pp_virt_addr_tbl = OSAL_NULL; 6261 u8 *p_pbl_virt = OSAL_NULL; 6262 void *p_virt = OSAL_NULL; 6263 6264 size = page_cnt * sizeof(*pp_virt_addr_tbl); 6265 pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size); 6266 if (!pp_virt_addr_tbl) { 6267 DP_NOTICE(p_dev, false, 6268 "Failed to allocate memory for the chain virtual addresses table\n"); 6269 return ECORE_NOMEM; 6270 } 6271 6272 /* The allocation of the PBL table is done with its full size, since it 6273 * is expected to be successive. 6274 * ecore_chain_init_pbl_mem() is called even in a case of an allocation 6275 * failure, since pp_virt_addr_tbl was previously allocated, and it 6276 * should be saved to allow its freeing during the error flow. 6277 */ 6278 size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; 6279 6280 if (ext_pbl == OSAL_NULL) { 6281 p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size); 6282 } else { 6283 p_pbl_virt = ext_pbl->p_pbl_virt; 6284 p_pbl_phys = ext_pbl->p_pbl_phys; 6285 p_chain->b_external_pbl = true; 6286 } 6287 6288 ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, 6289 pp_virt_addr_tbl); 6290 if (!p_pbl_virt) { 6291 DP_NOTICE(p_dev, false, "Failed to allocate chain pbl memory\n"); 6292 return ECORE_NOMEM; 6293 } 6294 6295 for (i = 0; i < page_cnt; i++) { 6296 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 6297 ECORE_CHAIN_PAGE_SIZE); 6298 if (!p_virt) { 6299 DP_NOTICE(p_dev, false, 6300 "Failed to allocate chain memory\n"); 6301 return ECORE_NOMEM; 6302 } 6303 6304 if (i == 0) { 6305 ecore_chain_init_mem(p_chain, p_virt, p_phys); 6306 ecore_chain_reset(p_chain); 6307 } 6308 6309 /* Fill the PBL table with the physical address of the page */ 6310 *(dma_addr_t *)p_pbl_virt = p_phys; 6311 /* Keep the virtual address of the page */ 6312 p_chain->pbl.pp_virt_addr_tbl[i] = p_virt; 6313 6314 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE; 6315 } 6316 6317 return ECORE_SUCCESS; 6318 } 6319 6320 enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev, 6321 enum ecore_chain_use_mode intended_use, 6322 enum ecore_chain_mode mode, 6323 enum ecore_chain_cnt_type cnt_type, 6324 u32 num_elems, osal_size_t elem_size, 6325 struct ecore_chain *p_chain, 6326 struct ecore_chain_ext_pbl *ext_pbl) 6327 { 6328 u32 page_cnt; 6329 enum _ecore_status_t rc = ECORE_SUCCESS; 6330 6331 if (mode == ECORE_CHAIN_MODE_SINGLE) 6332 page_cnt = 1; 6333 else 6334 page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode); 6335 6336 rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size, 6337 page_cnt); 6338 if (rc) { 6339 DP_NOTICE(p_dev, false, 6340 "Cannot allocate a chain with the given arguments:\n" 6341 "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", 6342 intended_use, mode, cnt_type, num_elems, elem_size); 6343 return rc; 6344 } 6345 6346 ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use, 6347 mode, cnt_type, p_dev->dp_ctx); 6348 6349 switch (mode) { 6350 case ECORE_CHAIN_MODE_NEXT_PTR: 6351 rc = ecore_chain_alloc_next_ptr(p_dev, p_chain); 6352 break; 6353 case ECORE_CHAIN_MODE_SINGLE: 6354 rc = ecore_chain_alloc_single(p_dev, p_chain); 6355 break; 6356 case ECORE_CHAIN_MODE_PBL: 6357 rc = ecore_chain_alloc_pbl(p_dev, p_chain, ext_pbl); 6358 break; 6359 } 6360 if (rc) 6361 goto nomem; 6362 6363 return ECORE_SUCCESS; 6364 6365 nomem: 6366 ecore_chain_free(p_dev, p_chain); 6367 return rc; 6368 } 6369 6370 enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn, 6371 u16 src_id, u16 *dst_id) 6372 { 6373 if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) { 6374 u16 min, max; 6375 6376 min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE); 6377 max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE); 6378 DP_NOTICE(p_hwfn, true, "l2_queue id [%d] is not valid, available indices [%d - %d]\n", 6379 src_id, min, max); 6380 6381 return ECORE_INVAL; 6382 } 6383 6384 *dst_id = RESC_START(p_hwfn, ECORE_L2_QUEUE) + src_id; 6385 6386 return ECORE_SUCCESS; 6387 } 6388 6389 enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn, 6390 u8 src_id, u8 *dst_id) 6391 { 6392 if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) { 6393 u8 min, max; 6394 6395 min = (u8)RESC_START(p_hwfn, ECORE_VPORT); 6396 max = min + RESC_NUM(p_hwfn, ECORE_VPORT); 6397 DP_NOTICE(p_hwfn, true, "vport id [%d] is not valid, available indices [%d - %d]\n", 6398 src_id, min, max); 6399 6400 return ECORE_INVAL; 6401 } 6402 6403 *dst_id = RESC_START(p_hwfn, ECORE_VPORT) + src_id; 6404 6405 return ECORE_SUCCESS; 6406 } 6407 6408 enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn, 6409 u8 src_id, u8 *dst_id) 6410 { 6411 if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) { 6412 u8 min, max; 6413 6414 min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG); 6415 max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG); 6416 DP_NOTICE(p_hwfn, true, "rss_eng id [%d] is not valid, available indices [%d - %d]\n", 6417 src_id, min, max); 6418 6419 return ECORE_INVAL; 6420 } 6421 6422 *dst_id = RESC_START(p_hwfn, ECORE_RSS_ENG) + src_id; 6423 6424 return ECORE_SUCCESS; 6425 } 6426 6427 enum _ecore_status_t 6428 ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn, 6429 struct ecore_ptt *p_ptt) 6430 { 6431 if (OSAL_TEST_BIT(ECORE_MF_NEED_DEF_PF, &p_hwfn->p_dev->mf_bits)) { 6432 ecore_wr(p_hwfn, p_ptt, 6433 NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 6434 1 << p_hwfn->abs_pf_id / 2); 6435 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, 0); 6436 return ECORE_SUCCESS; 6437 } else { 6438 DP_NOTICE(p_hwfn, false, 6439 "This function can't be set as default\n"); 6440 return ECORE_INVAL; 6441 } 6442 } 6443 6444 static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn, 6445 struct ecore_ptt *p_ptt, 6446 u32 hw_addr, void *p_eth_qzone, 6447 osal_size_t eth_qzone_size, 6448 u8 timeset) 6449 { 6450 struct coalescing_timeset *p_coal_timeset; 6451 6452 if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) { 6453 DP_NOTICE(p_hwfn, true, 6454 "Coalescing configuration not enabled\n"); 6455 return ECORE_INVAL; 6456 } 6457 6458 p_coal_timeset = p_eth_qzone; 6459 OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size); 6460 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); 6461 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); 6462 ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); 6463 6464 return ECORE_SUCCESS; 6465 } 6466 6467 enum _ecore_status_t ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, 6468 u16 rx_coal, u16 tx_coal, 6469 void *p_handle) 6470 { 6471 struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle; 6472 enum _ecore_status_t rc = ECORE_SUCCESS; 6473 struct ecore_ptt *p_ptt; 6474 6475 /* TODO - Configuring a single queue's coalescing but 6476 * claiming all queues are abiding same configuration 6477 * for PF and VF both. 6478 */ 6479 6480 #ifdef CONFIG_ECORE_SRIOV 6481 if (IS_VF(p_hwfn->p_dev)) 6482 return ecore_vf_pf_set_coalesce(p_hwfn, rx_coal, 6483 tx_coal, p_cid); 6484 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 6485 6486 p_ptt = ecore_ptt_acquire(p_hwfn); 6487 if (!p_ptt) 6488 return ECORE_AGAIN; 6489 6490 if (rx_coal) { 6491 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); 6492 if (rc) 6493 goto out; 6494 p_hwfn->p_dev->rx_coalesce_usecs = rx_coal; 6495 } 6496 6497 if (tx_coal) { 6498 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid); 6499 if (rc) 6500 goto out; 6501 p_hwfn->p_dev->tx_coalesce_usecs = tx_coal; 6502 } 6503 out: 6504 ecore_ptt_release(p_hwfn, p_ptt); 6505 6506 return rc; 6507 } 6508 6509 enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn, 6510 struct ecore_ptt *p_ptt, 6511 u16 coalesce, 6512 struct ecore_queue_cid *p_cid) 6513 { 6514 struct ustorm_eth_queue_zone eth_qzone; 6515 u8 timeset, timer_res; 6516 u32 address; 6517 enum _ecore_status_t rc; 6518 6519 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 6520 if (coalesce <= 0x7F) 6521 timer_res = 0; 6522 else if (coalesce <= 0xFF) 6523 timer_res = 1; 6524 else if (coalesce <= 0x1FF) 6525 timer_res = 2; 6526 else { 6527 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 6528 return ECORE_INVAL; 6529 } 6530 timeset = (u8)(coalesce >> timer_res); 6531 6532 rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, 6533 p_cid->sb_igu_id, false); 6534 if (rc != ECORE_SUCCESS) 6535 goto out; 6536 6537 address = BAR0_MAP_REG_USDM_RAM + 6538 USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 6539 6540 rc = ecore_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 6541 sizeof(struct ustorm_eth_queue_zone), timeset); 6542 if (rc != ECORE_SUCCESS) 6543 goto out; 6544 6545 out: 6546 return rc; 6547 } 6548 6549 enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn, 6550 struct ecore_ptt *p_ptt, 6551 u16 coalesce, 6552 struct ecore_queue_cid *p_cid) 6553 { 6554 struct xstorm_eth_queue_zone eth_qzone; 6555 u8 timeset, timer_res; 6556 u32 address; 6557 enum _ecore_status_t rc; 6558 6559 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 6560 if (coalesce <= 0x7F) 6561 timer_res = 0; 6562 else if (coalesce <= 0xFF) 6563 timer_res = 1; 6564 else if (coalesce <= 0x1FF) 6565 timer_res = 2; 6566 else { 6567 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 6568 return ECORE_INVAL; 6569 } 6570 timeset = (u8)(coalesce >> timer_res); 6571 6572 rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, 6573 p_cid->sb_igu_id, true); 6574 if (rc != ECORE_SUCCESS) 6575 goto out; 6576 6577 address = BAR0_MAP_REG_XSDM_RAM + 6578 XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 6579 6580 rc = ecore_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 6581 sizeof(struct xstorm_eth_queue_zone), timeset); 6582 out: 6583 return rc; 6584 } 6585 6586 /* Calculate final WFQ values for all vports and configure it. 6587 * After this configuration each vport must have 6588 * approx min rate = vport_wfq * min_pf_rate / ECORE_WFQ_UNIT 6589 */ 6590 static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn, 6591 struct ecore_ptt *p_ptt, 6592 u32 min_pf_rate) 6593 { 6594 struct init_qm_vport_params *vport_params; 6595 int i; 6596 6597 vport_params = p_hwfn->qm_info.qm_vport_params; 6598 6599 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 6600 u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 6601 6602 vport_params[i].vport_wfq = (wfq_speed * ECORE_WFQ_UNIT) / 6603 min_pf_rate; 6604 ecore_init_vport_wfq(p_hwfn, p_ptt, 6605 vport_params[i].first_tx_pq_id, 6606 vport_params[i].vport_wfq); 6607 } 6608 } 6609 6610 static void ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn) 6611 6612 { 6613 int i; 6614 6615 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) 6616 p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1; 6617 } 6618 6619 static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn, 6620 struct ecore_ptt *p_ptt) 6621 { 6622 struct init_qm_vport_params *vport_params; 6623 int i; 6624 6625 vport_params = p_hwfn->qm_info.qm_vport_params; 6626 6627 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 6628 ecore_init_wfq_default_param(p_hwfn); 6629 ecore_init_vport_wfq(p_hwfn, p_ptt, 6630 vport_params[i].first_tx_pq_id, 6631 vport_params[i].vport_wfq); 6632 } 6633 } 6634 6635 /* This function performs several validations for WFQ 6636 * configuration and required min rate for a given vport 6637 * 1. req_rate must be greater than one percent of min_pf_rate. 6638 * 2. req_rate should not cause other vports [not configured for WFQ explicitly] 6639 * rates to get less than one percent of min_pf_rate. 6640 * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. 6641 */ 6642 static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn, 6643 u16 vport_id, u32 req_rate, 6644 u32 min_pf_rate) 6645 { 6646 u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; 6647 int non_requested_count = 0, req_count = 0, i, num_vports; 6648 6649 num_vports = p_hwfn->qm_info.num_vports; 6650 6651 /* Accounting for the vports which are configured for WFQ explicitly */ 6652 for (i = 0; i < num_vports; i++) { 6653 u32 tmp_speed; 6654 6655 if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) { 6656 req_count++; 6657 tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 6658 total_req_min_rate += tmp_speed; 6659 } 6660 } 6661 6662 /* Include current vport data as well */ 6663 req_count++; 6664 total_req_min_rate += req_rate; 6665 non_requested_count = num_vports - req_count; 6666 6667 /* validate possible error cases */ 6668 if (req_rate < min_pf_rate / ECORE_WFQ_UNIT) { 6669 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 6670 "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 6671 vport_id, req_rate, min_pf_rate); 6672 return ECORE_INVAL; 6673 } 6674 6675 /* TBD - for number of vports greater than 100 */ 6676 if (num_vports > ECORE_WFQ_UNIT) { 6677 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 6678 "Number of vports is greater than %d\n", 6679 ECORE_WFQ_UNIT); 6680 return ECORE_INVAL; 6681 } 6682 6683 if (total_req_min_rate > min_pf_rate) { 6684 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 6685 "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", 6686 total_req_min_rate, min_pf_rate); 6687 return ECORE_INVAL; 6688 } 6689 6690 /* Data left for non requested vports */ 6691 total_left_rate = min_pf_rate - total_req_min_rate; 6692 left_rate_per_vp = total_left_rate / non_requested_count; 6693 6694 /* validate if non requested get < 1% of min bw */ 6695 if (left_rate_per_vp < min_pf_rate / ECORE_WFQ_UNIT) { 6696 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 6697 "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 6698 left_rate_per_vp, min_pf_rate); 6699 return ECORE_INVAL; 6700 } 6701 6702 /* now req_rate for given vport passes all scenarios. 6703 * assign final wfq rates to all vports. 6704 */ 6705 p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate; 6706 p_hwfn->qm_info.wfq_data[vport_id].configured = true; 6707 6708 for (i = 0; i < num_vports; i++) { 6709 if (p_hwfn->qm_info.wfq_data[i].configured) 6710 continue; 6711 6712 p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp; 6713 } 6714 6715 return ECORE_SUCCESS; 6716 } 6717 6718 static int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn, 6719 struct ecore_ptt *p_ptt, 6720 u16 vp_id, u32 rate) 6721 { 6722 struct ecore_mcp_link_state *p_link; 6723 int rc = ECORE_SUCCESS; 6724 6725 p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output; 6726 6727 if (!p_link->min_pf_rate) { 6728 p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate; 6729 p_hwfn->qm_info.wfq_data[vp_id].configured = true; 6730 return rc; 6731 } 6732 6733 rc = ecore_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); 6734 6735 if (rc == ECORE_SUCCESS) 6736 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, 6737 p_link->min_pf_rate); 6738 else 6739 DP_NOTICE(p_hwfn, false, 6740 "Validation failed while configuring min rate\n"); 6741 6742 return rc; 6743 } 6744 6745 static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn, 6746 struct ecore_ptt *p_ptt, 6747 u32 min_pf_rate) 6748 { 6749 bool use_wfq = false; 6750 int rc = ECORE_SUCCESS; 6751 u16 i; 6752 6753 /* Validate all pre configured vports for wfq */ 6754 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 6755 u32 rate; 6756 6757 if (!p_hwfn->qm_info.wfq_data[i].configured) 6758 continue; 6759 6760 rate = p_hwfn->qm_info.wfq_data[i].min_speed; 6761 use_wfq = true; 6762 6763 rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate); 6764 if (rc != ECORE_SUCCESS) { 6765 DP_NOTICE(p_hwfn, false, 6766 "WFQ validation failed while configuring min rate\n"); 6767 break; 6768 } 6769 } 6770 6771 if (rc == ECORE_SUCCESS && use_wfq) 6772 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 6773 else 6774 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt); 6775 6776 return rc; 6777 } 6778 6779 /* Main API for ecore clients to configure vport min rate. 6780 * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)] 6781 * rate - Speed in Mbps needs to be assigned to a given vport. 6782 */ 6783 int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate) 6784 { 6785 int i, rc = ECORE_INVAL; 6786 6787 /* TBD - for multiple hardware functions - that is 100 gig */ 6788 if (ECORE_IS_CMT(p_dev)) { 6789 DP_NOTICE(p_dev, false, 6790 "WFQ configuration is not supported for this device\n"); 6791 return rc; 6792 } 6793 6794 for_each_hwfn(p_dev, i) { 6795 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 6796 struct ecore_ptt *p_ptt; 6797 6798 p_ptt = ecore_ptt_acquire(p_hwfn); 6799 if (!p_ptt) 6800 return ECORE_TIMEOUT; 6801 6802 rc = __ecore_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); 6803 6804 if (rc != ECORE_SUCCESS) { 6805 ecore_ptt_release(p_hwfn, p_ptt); 6806 return rc; 6807 } 6808 6809 ecore_ptt_release(p_hwfn, p_ptt); 6810 } 6811 6812 return rc; 6813 } 6814 6815 /* API to configure WFQ from mcp link change */ 6816 void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev, 6817 struct ecore_ptt *p_ptt, 6818 u32 min_pf_rate) 6819 { 6820 int i; 6821 6822 /* TBD - for multiple hardware functions - that is 100 gig */ 6823 if (ECORE_IS_CMT(p_dev)) { 6824 DP_VERBOSE(p_dev, ECORE_MSG_LINK, 6825 "WFQ configuration is not supported for this device\n"); 6826 return; 6827 } 6828 6829 for_each_hwfn(p_dev, i) { 6830 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 6831 6832 __ecore_configure_vp_wfq_on_link_change(p_hwfn, p_ptt, 6833 min_pf_rate); 6834 } 6835 } 6836 6837 int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn, 6838 struct ecore_ptt *p_ptt, 6839 struct ecore_mcp_link_state *p_link, 6840 u8 max_bw) 6841 { 6842 int rc = ECORE_SUCCESS; 6843 6844 p_hwfn->mcp_info->func_info.bandwidth_max = max_bw; 6845 6846 if (!p_link->line_speed && (max_bw != 100)) 6847 return rc; 6848 6849 p_link->speed = (p_link->line_speed * max_bw) / 100; 6850 p_hwfn->qm_info.pf_rl = p_link->speed; 6851 6852 /* Since the limiter also affects Tx-switched traffic, we don't want it 6853 * to limit such traffic in case there's no actual limit. 6854 * In that case, set limit to imaginary high boundary. 6855 */ 6856 if (max_bw == 100) 6857 p_hwfn->qm_info.pf_rl = 100000; 6858 6859 rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 6860 p_hwfn->qm_info.pf_rl); 6861 6862 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 6863 "Configured MAX bandwidth to be %08x Mb/sec\n", 6864 p_link->speed); 6865 6866 return rc; 6867 } 6868 6869 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */ 6870 int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw) 6871 { 6872 int i, rc = ECORE_INVAL; 6873 6874 if (max_bw < 1 || max_bw > 100) { 6875 DP_NOTICE(p_dev, false, "PF max bw valid range is [1-100]\n"); 6876 return rc; 6877 } 6878 6879 for_each_hwfn(p_dev, i) { 6880 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 6881 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev); 6882 struct ecore_mcp_link_state *p_link; 6883 struct ecore_ptt *p_ptt; 6884 6885 p_link = &p_lead->mcp_info->link_output; 6886 6887 p_ptt = ecore_ptt_acquire(p_hwfn); 6888 if (!p_ptt) 6889 return ECORE_TIMEOUT; 6890 6891 rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, 6892 p_link, max_bw); 6893 6894 ecore_ptt_release(p_hwfn, p_ptt); 6895 6896 if (rc != ECORE_SUCCESS) 6897 break; 6898 } 6899 6900 return rc; 6901 } 6902 6903 int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn, 6904 struct ecore_ptt *p_ptt, 6905 struct ecore_mcp_link_state *p_link, 6906 u8 min_bw) 6907 { 6908 int rc = ECORE_SUCCESS; 6909 6910 p_hwfn->mcp_info->func_info.bandwidth_min = min_bw; 6911 p_hwfn->qm_info.pf_wfq = min_bw; 6912 6913 if (!p_link->line_speed) 6914 return rc; 6915 6916 p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100; 6917 6918 rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw); 6919 6920 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 6921 "Configured MIN bandwidth to be %d Mb/sec\n", 6922 p_link->min_pf_rate); 6923 6924 return rc; 6925 } 6926 6927 /* Main API to configure PF min bandwidth where bw range is [1-100] */ 6928 int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw) 6929 { 6930 int i, rc = ECORE_INVAL; 6931 6932 if (min_bw < 1 || min_bw > 100) { 6933 DP_NOTICE(p_dev, false, "PF min bw valid range is [1-100]\n"); 6934 return rc; 6935 } 6936 6937 for_each_hwfn(p_dev, i) { 6938 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 6939 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev); 6940 struct ecore_mcp_link_state *p_link; 6941 struct ecore_ptt *p_ptt; 6942 6943 p_link = &p_lead->mcp_info->link_output; 6944 6945 p_ptt = ecore_ptt_acquire(p_hwfn); 6946 if (!p_ptt) 6947 return ECORE_TIMEOUT; 6948 6949 rc = __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, 6950 p_link, min_bw); 6951 if (rc != ECORE_SUCCESS) { 6952 ecore_ptt_release(p_hwfn, p_ptt); 6953 return rc; 6954 } 6955 6956 if (p_link->min_pf_rate) { 6957 u32 min_rate = p_link->min_pf_rate; 6958 6959 rc = __ecore_configure_vp_wfq_on_link_change(p_hwfn, 6960 p_ptt, 6961 min_rate); 6962 } 6963 6964 ecore_ptt_release(p_hwfn, p_ptt); 6965 } 6966 6967 return rc; 6968 } 6969 6970 void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 6971 { 6972 struct ecore_mcp_link_state *p_link; 6973 6974 p_link = &p_hwfn->mcp_info->link_output; 6975 6976 if (p_link->min_pf_rate) 6977 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt); 6978 6979 OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0, 6980 sizeof(*p_hwfn->qm_info.wfq_data) * 6981 p_hwfn->qm_info.num_vports); 6982 } 6983 6984 int ecore_device_num_engines(struct ecore_dev *p_dev) 6985 { 6986 return ECORE_IS_BB(p_dev) ? 2 : 1; 6987 } 6988 6989 int ecore_device_num_ports(struct ecore_dev *p_dev) 6990 { 6991 return p_dev->num_ports; 6992 } 6993 6994 void ecore_set_fw_mac_addr(__le16 *fw_msb, 6995 __le16 *fw_mid, 6996 __le16 *fw_lsb, 6997 u8 *mac) 6998 { 6999 ((u8 *)fw_msb)[0] = mac[1]; 7000 ((u8 *)fw_msb)[1] = mac[0]; 7001 ((u8 *)fw_mid)[0] = mac[3]; 7002 ((u8 *)fw_mid)[1] = mac[2]; 7003 ((u8 *)fw_lsb)[0] = mac[5]; 7004 ((u8 *)fw_lsb)[1] = mac[4]; 7005 } 7006 7007 void ecore_set_dev_access_enable(struct ecore_dev *p_dev, bool b_enable) 7008 { 7009 if (p_dev->recov_in_prog != !b_enable) { 7010 DP_INFO(p_dev, "%s access to the device\n", 7011 b_enable ? "Enable" : "Disable"); 7012 p_dev->recov_in_prog = !b_enable; 7013 } 7014 } 7015 7016 #ifdef _NTDDK_ 7017 #pragma warning(pop) 7018 #endif 7019