1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/types.h> 34 #include <asm/byteorder.h> 35 #include <linux/io.h> 36 #include <linux/delay.h> 37 #include <linux/dma-mapping.h> 38 #include <linux/errno.h> 39 #include <linux/kernel.h> 40 #include <linux/mutex.h> 41 #include <linux/pci.h> 42 #include <linux/slab.h> 43 #include <linux/string.h> 44 #include <linux/vmalloc.h> 45 #include <linux/etherdevice.h> 46 #include <linux/qed/qed_chain.h> 47 #include <linux/qed/qed_if.h> 48 #include "qed.h" 49 #include "qed_cxt.h" 50 #include "qed_dcbx.h" 51 #include "qed_dev_api.h" 52 #include "qed_fcoe.h" 53 #include "qed_hsi.h" 54 #include "qed_hw.h" 55 #include "qed_init_ops.h" 56 #include "qed_int.h" 57 #include "qed_iscsi.h" 58 #include "qed_ll2.h" 59 #include "qed_mcp.h" 60 #include "qed_ooo.h" 61 #include "qed_reg_addr.h" 62 #include "qed_sp.h" 63 #include "qed_sriov.h" 64 #include "qed_vf.h" 65 #include "qed_rdma.h" 66 67 static DEFINE_SPINLOCK(qm_lock); 68 69 /******************** Doorbell Recovery *******************/ 70 /* The doorbell recovery mechanism consists of a list of entries which represent 71 * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each 72 * entity needs to register with the mechanism and provide the parameters 73 * describing it's doorbell, including a location where last used doorbell data 74 * can be found. The doorbell execute function will traverse the list and 75 * doorbell all of the registered entries. 76 */ 77 struct qed_db_recovery_entry { 78 struct list_head list_entry; 79 void __iomem *db_addr; 80 void *db_data; 81 enum qed_db_rec_width db_width; 82 enum qed_db_rec_space db_space; 83 u8 hwfn_idx; 84 }; 85 86 /* Display a single doorbell recovery entry */ 87 static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn, 88 struct qed_db_recovery_entry *db_entry, 89 char *action) 90 { 91 DP_VERBOSE(p_hwfn, 92 QED_MSG_SPQ, 93 "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n", 94 action, 95 db_entry, 96 db_entry->db_addr, 97 db_entry->db_data, 98 db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b", 99 db_entry->db_space == DB_REC_USER ? "user" : "kernel", 100 db_entry->hwfn_idx); 101 } 102 103 /* Doorbell address sanity (address within doorbell bar range) */ 104 static bool qed_db_rec_sanity(struct qed_dev *cdev, 105 void __iomem *db_addr, 106 enum qed_db_rec_width db_width, 107 void *db_data) 108 { 109 u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64; 110 111 /* Make sure doorbell address is within the doorbell bar */ 112 if (db_addr < cdev->doorbells || 113 (u8 __iomem *)db_addr + width > 114 (u8 __iomem *)cdev->doorbells + cdev->db_size) { 115 WARN(true, 116 "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n", 117 db_addr, 118 cdev->doorbells, 119 (u8 __iomem *)cdev->doorbells + cdev->db_size); 120 return false; 121 } 122 123 /* ake sure doorbell data pointer is not null */ 124 if (!db_data) { 125 WARN(true, "Illegal doorbell data pointer: %p", db_data); 126 return false; 127 } 128 129 return true; 130 } 131 132 /* Find hwfn according to the doorbell address */ 133 static struct qed_hwfn *qed_db_rec_find_hwfn(struct qed_dev *cdev, 134 void __iomem *db_addr) 135 { 136 struct qed_hwfn *p_hwfn; 137 138 /* In CMT doorbell bar is split down the middle between engine 0 and enigne 1 */ 139 if (cdev->num_hwfns > 1) 140 p_hwfn = db_addr < cdev->hwfns[1].doorbells ? 141 &cdev->hwfns[0] : &cdev->hwfns[1]; 142 else 143 p_hwfn = QED_LEADING_HWFN(cdev); 144 145 return p_hwfn; 146 } 147 148 /* Add a new entry to the doorbell recovery mechanism */ 149 int qed_db_recovery_add(struct qed_dev *cdev, 150 void __iomem *db_addr, 151 void *db_data, 152 enum qed_db_rec_width db_width, 153 enum qed_db_rec_space db_space) 154 { 155 struct qed_db_recovery_entry *db_entry; 156 struct qed_hwfn *p_hwfn; 157 158 /* Shortcircuit VFs, for now */ 159 if (IS_VF(cdev)) { 160 DP_VERBOSE(cdev, 161 QED_MSG_IOV, "db recovery - skipping VF doorbell\n"); 162 return 0; 163 } 164 165 /* Sanitize doorbell address */ 166 if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data)) 167 return -EINVAL; 168 169 /* Obtain hwfn from doorbell address */ 170 p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); 171 172 /* Create entry */ 173 db_entry = kzalloc(sizeof(*db_entry), GFP_KERNEL); 174 if (!db_entry) { 175 DP_NOTICE(cdev, "Failed to allocate a db recovery entry\n"); 176 return -ENOMEM; 177 } 178 179 /* Populate entry */ 180 db_entry->db_addr = db_addr; 181 db_entry->db_data = db_data; 182 db_entry->db_width = db_width; 183 db_entry->db_space = db_space; 184 db_entry->hwfn_idx = p_hwfn->my_id; 185 186 /* Display */ 187 qed_db_recovery_dp_entry(p_hwfn, db_entry, "Adding"); 188 189 /* Protect the list */ 190 spin_lock_bh(&p_hwfn->db_recovery_info.lock); 191 list_add_tail(&db_entry->list_entry, &p_hwfn->db_recovery_info.list); 192 spin_unlock_bh(&p_hwfn->db_recovery_info.lock); 193 194 return 0; 195 } 196 197 /* Remove an entry from the doorbell recovery mechanism */ 198 int qed_db_recovery_del(struct qed_dev *cdev, 199 void __iomem *db_addr, void *db_data) 200 { 201 struct qed_db_recovery_entry *db_entry = NULL; 202 struct qed_hwfn *p_hwfn; 203 int rc = -EINVAL; 204 205 /* Shortcircuit VFs, for now */ 206 if (IS_VF(cdev)) { 207 DP_VERBOSE(cdev, 208 QED_MSG_IOV, "db recovery - skipping VF doorbell\n"); 209 return 0; 210 } 211 212 /* Obtain hwfn from doorbell address */ 213 p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); 214 215 /* Protect the list */ 216 spin_lock_bh(&p_hwfn->db_recovery_info.lock); 217 list_for_each_entry(db_entry, 218 &p_hwfn->db_recovery_info.list, list_entry) { 219 /* search according to db_data addr since db_addr is not unique (roce) */ 220 if (db_entry->db_data == db_data) { 221 qed_db_recovery_dp_entry(p_hwfn, db_entry, "Deleting"); 222 list_del(&db_entry->list_entry); 223 rc = 0; 224 break; 225 } 226 } 227 228 spin_unlock_bh(&p_hwfn->db_recovery_info.lock); 229 230 if (rc == -EINVAL) 231 232 DP_NOTICE(p_hwfn, 233 "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n", 234 db_data, db_addr); 235 else 236 kfree(db_entry); 237 238 return rc; 239 } 240 241 /* Initialize the doorbell recovery mechanism */ 242 static int qed_db_recovery_setup(struct qed_hwfn *p_hwfn) 243 { 244 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting up db recovery\n"); 245 246 /* Make sure db_size was set in cdev */ 247 if (!p_hwfn->cdev->db_size) { 248 DP_ERR(p_hwfn->cdev, "db_size not set\n"); 249 return -EINVAL; 250 } 251 252 INIT_LIST_HEAD(&p_hwfn->db_recovery_info.list); 253 spin_lock_init(&p_hwfn->db_recovery_info.lock); 254 p_hwfn->db_recovery_info.db_recovery_counter = 0; 255 256 return 0; 257 } 258 259 /* Destroy the doorbell recovery mechanism */ 260 static void qed_db_recovery_teardown(struct qed_hwfn *p_hwfn) 261 { 262 struct qed_db_recovery_entry *db_entry = NULL; 263 264 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Tearing down db recovery\n"); 265 if (!list_empty(&p_hwfn->db_recovery_info.list)) { 266 DP_VERBOSE(p_hwfn, 267 QED_MSG_SPQ, 268 "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n"); 269 while (!list_empty(&p_hwfn->db_recovery_info.list)) { 270 db_entry = 271 list_first_entry(&p_hwfn->db_recovery_info.list, 272 struct qed_db_recovery_entry, 273 list_entry); 274 qed_db_recovery_dp_entry(p_hwfn, db_entry, "Purging"); 275 list_del(&db_entry->list_entry); 276 kfree(db_entry); 277 } 278 } 279 p_hwfn->db_recovery_info.db_recovery_counter = 0; 280 } 281 282 /* Print the content of the doorbell recovery mechanism */ 283 void qed_db_recovery_dp(struct qed_hwfn *p_hwfn) 284 { 285 struct qed_db_recovery_entry *db_entry = NULL; 286 287 DP_NOTICE(p_hwfn, 288 "Displaying doorbell recovery database. Counter was %d\n", 289 p_hwfn->db_recovery_info.db_recovery_counter); 290 291 /* Protect the list */ 292 spin_lock_bh(&p_hwfn->db_recovery_info.lock); 293 list_for_each_entry(db_entry, 294 &p_hwfn->db_recovery_info.list, list_entry) { 295 qed_db_recovery_dp_entry(p_hwfn, db_entry, "Printing"); 296 } 297 298 spin_unlock_bh(&p_hwfn->db_recovery_info.lock); 299 } 300 301 /* Ring the doorbell of a single doorbell recovery entry */ 302 static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, 303 struct qed_db_recovery_entry *db_entry) 304 { 305 /* Print according to width */ 306 if (db_entry->db_width == DB_REC_WIDTH_32B) { 307 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, 308 "ringing doorbell address %p data %x\n", 309 db_entry->db_addr, 310 *(u32 *)db_entry->db_data); 311 } else { 312 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, 313 "ringing doorbell address %p data %llx\n", 314 db_entry->db_addr, 315 *(u64 *)(db_entry->db_data)); 316 } 317 318 /* Sanity */ 319 if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr, 320 db_entry->db_width, db_entry->db_data)) 321 return; 322 323 /* Flush the write combined buffer. Since there are multiple doorbelling 324 * entities using the same address, if we don't flush, a transaction 325 * could be lost. 326 */ 327 wmb(); 328 329 /* Ring the doorbell */ 330 if (db_entry->db_width == DB_REC_WIDTH_32B) 331 DIRECT_REG_WR(db_entry->db_addr, 332 *(u32 *)(db_entry->db_data)); 333 else 334 DIRECT_REG_WR64(db_entry->db_addr, 335 *(u64 *)(db_entry->db_data)); 336 337 /* Flush the write combined buffer. Next doorbell may come from a 338 * different entity to the same address... 339 */ 340 wmb(); 341 } 342 343 /* Traverse the doorbell recovery entry list and ring all the doorbells */ 344 void qed_db_recovery_execute(struct qed_hwfn *p_hwfn) 345 { 346 struct qed_db_recovery_entry *db_entry = NULL; 347 348 DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n", 349 p_hwfn->db_recovery_info.db_recovery_counter); 350 351 /* Track amount of times recovery was executed */ 352 p_hwfn->db_recovery_info.db_recovery_counter++; 353 354 /* Protect the list */ 355 spin_lock_bh(&p_hwfn->db_recovery_info.lock); 356 list_for_each_entry(db_entry, 357 &p_hwfn->db_recovery_info.list, list_entry) 358 qed_db_recovery_ring(p_hwfn, db_entry); 359 spin_unlock_bh(&p_hwfn->db_recovery_info.lock); 360 } 361 362 /******************** Doorbell Recovery end ****************/ 363 364 /********************************** NIG LLH ***********************************/ 365 366 enum qed_llh_filter_type { 367 QED_LLH_FILTER_TYPE_MAC, 368 QED_LLH_FILTER_TYPE_PROTOCOL, 369 }; 370 371 struct qed_llh_mac_filter { 372 u8 addr[ETH_ALEN]; 373 }; 374 375 struct qed_llh_protocol_filter { 376 enum qed_llh_prot_filter_type_t type; 377 u16 source_port_or_eth_type; 378 u16 dest_port; 379 }; 380 381 union qed_llh_filter { 382 struct qed_llh_mac_filter mac; 383 struct qed_llh_protocol_filter protocol; 384 }; 385 386 struct qed_llh_filter_info { 387 bool b_enabled; 388 u32 ref_cnt; 389 enum qed_llh_filter_type type; 390 union qed_llh_filter filter; 391 }; 392 393 struct qed_llh_info { 394 /* Number of LLH filters banks */ 395 u8 num_ppfid; 396 397 #define MAX_NUM_PPFID 8 398 u8 ppfid_array[MAX_NUM_PPFID]; 399 400 /* Array of filters arrays: 401 * "num_ppfid" elements of filters banks, where each is an array of 402 * "NIG_REG_LLH_FUNC_FILTER_EN_SIZE" filters. 403 */ 404 struct qed_llh_filter_info **pp_filters; 405 }; 406 407 static void qed_llh_free(struct qed_dev *cdev) 408 { 409 struct qed_llh_info *p_llh_info = cdev->p_llh_info; 410 u32 i; 411 412 if (p_llh_info) { 413 if (p_llh_info->pp_filters) 414 for (i = 0; i < p_llh_info->num_ppfid; i++) 415 kfree(p_llh_info->pp_filters[i]); 416 417 kfree(p_llh_info->pp_filters); 418 } 419 420 kfree(p_llh_info); 421 cdev->p_llh_info = NULL; 422 } 423 424 static int qed_llh_alloc(struct qed_dev *cdev) 425 { 426 struct qed_llh_info *p_llh_info; 427 u32 size, i; 428 429 p_llh_info = kzalloc(sizeof(*p_llh_info), GFP_KERNEL); 430 if (!p_llh_info) 431 return -ENOMEM; 432 cdev->p_llh_info = p_llh_info; 433 434 for (i = 0; i < MAX_NUM_PPFID; i++) { 435 if (!(cdev->ppfid_bitmap & (0x1 << i))) 436 continue; 437 438 p_llh_info->ppfid_array[p_llh_info->num_ppfid] = i; 439 DP_VERBOSE(cdev, QED_MSG_SP, "ppfid_array[%d] = %hhd\n", 440 p_llh_info->num_ppfid, i); 441 p_llh_info->num_ppfid++; 442 } 443 444 size = p_llh_info->num_ppfid * sizeof(*p_llh_info->pp_filters); 445 p_llh_info->pp_filters = kzalloc(size, GFP_KERNEL); 446 if (!p_llh_info->pp_filters) 447 return -ENOMEM; 448 449 size = NIG_REG_LLH_FUNC_FILTER_EN_SIZE * 450 sizeof(**p_llh_info->pp_filters); 451 for (i = 0; i < p_llh_info->num_ppfid; i++) { 452 p_llh_info->pp_filters[i] = kzalloc(size, GFP_KERNEL); 453 if (!p_llh_info->pp_filters[i]) 454 return -ENOMEM; 455 } 456 457 return 0; 458 } 459 460 static int qed_llh_shadow_sanity(struct qed_dev *cdev, 461 u8 ppfid, u8 filter_idx, const char *action) 462 { 463 struct qed_llh_info *p_llh_info = cdev->p_llh_info; 464 465 if (ppfid >= p_llh_info->num_ppfid) { 466 DP_NOTICE(cdev, 467 "LLH shadow [%s]: using ppfid %d while only %d ppfids are available\n", 468 action, ppfid, p_llh_info->num_ppfid); 469 return -EINVAL; 470 } 471 472 if (filter_idx >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) { 473 DP_NOTICE(cdev, 474 "LLH shadow [%s]: using filter_idx %d while only %d filters are available\n", 475 action, filter_idx, NIG_REG_LLH_FUNC_FILTER_EN_SIZE); 476 return -EINVAL; 477 } 478 479 return 0; 480 } 481 482 #define QED_LLH_INVALID_FILTER_IDX 0xff 483 484 static int 485 qed_llh_shadow_search_filter(struct qed_dev *cdev, 486 u8 ppfid, 487 union qed_llh_filter *p_filter, u8 *p_filter_idx) 488 { 489 struct qed_llh_info *p_llh_info = cdev->p_llh_info; 490 struct qed_llh_filter_info *p_filters; 491 int rc; 492 u8 i; 493 494 rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "search"); 495 if (rc) 496 return rc; 497 498 *p_filter_idx = QED_LLH_INVALID_FILTER_IDX; 499 500 p_filters = p_llh_info->pp_filters[ppfid]; 501 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 502 if (!memcmp(p_filter, &p_filters[i].filter, 503 sizeof(*p_filter))) { 504 *p_filter_idx = i; 505 break; 506 } 507 } 508 509 return 0; 510 } 511 512 static int 513 qed_llh_shadow_get_free_idx(struct qed_dev *cdev, u8 ppfid, u8 *p_filter_idx) 514 { 515 struct qed_llh_info *p_llh_info = cdev->p_llh_info; 516 struct qed_llh_filter_info *p_filters; 517 int rc; 518 u8 i; 519 520 rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "get_free_idx"); 521 if (rc) 522 return rc; 523 524 *p_filter_idx = QED_LLH_INVALID_FILTER_IDX; 525 526 p_filters = p_llh_info->pp_filters[ppfid]; 527 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 528 if (!p_filters[i].b_enabled) { 529 *p_filter_idx = i; 530 break; 531 } 532 } 533 534 return 0; 535 } 536 537 static int 538 __qed_llh_shadow_add_filter(struct qed_dev *cdev, 539 u8 ppfid, 540 u8 filter_idx, 541 enum qed_llh_filter_type type, 542 union qed_llh_filter *p_filter, u32 *p_ref_cnt) 543 { 544 struct qed_llh_info *p_llh_info = cdev->p_llh_info; 545 struct qed_llh_filter_info *p_filters; 546 int rc; 547 548 rc = qed_llh_shadow_sanity(cdev, ppfid, filter_idx, "add"); 549 if (rc) 550 return rc; 551 552 p_filters = p_llh_info->pp_filters[ppfid]; 553 if (!p_filters[filter_idx].ref_cnt) { 554 p_filters[filter_idx].b_enabled = true; 555 p_filters[filter_idx].type = type; 556 memcpy(&p_filters[filter_idx].filter, p_filter, 557 sizeof(p_filters[filter_idx].filter)); 558 } 559 560 *p_ref_cnt = ++p_filters[filter_idx].ref_cnt; 561 562 return 0; 563 } 564 565 static int 566 qed_llh_shadow_add_filter(struct qed_dev *cdev, 567 u8 ppfid, 568 enum qed_llh_filter_type type, 569 union qed_llh_filter *p_filter, 570 u8 *p_filter_idx, u32 *p_ref_cnt) 571 { 572 int rc; 573 574 /* Check if the same filter already exist */ 575 rc = qed_llh_shadow_search_filter(cdev, ppfid, p_filter, p_filter_idx); 576 if (rc) 577 return rc; 578 579 /* Find a new entry in case of a new filter */ 580 if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) { 581 rc = qed_llh_shadow_get_free_idx(cdev, ppfid, p_filter_idx); 582 if (rc) 583 return rc; 584 } 585 586 /* No free entry was found */ 587 if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) { 588 DP_NOTICE(cdev, 589 "Failed to find an empty LLH filter to utilize [ppfid %d]\n", 590 ppfid); 591 return -EINVAL; 592 } 593 594 return __qed_llh_shadow_add_filter(cdev, ppfid, *p_filter_idx, type, 595 p_filter, p_ref_cnt); 596 } 597 598 static int 599 __qed_llh_shadow_remove_filter(struct qed_dev *cdev, 600 u8 ppfid, u8 filter_idx, u32 *p_ref_cnt) 601 { 602 struct qed_llh_info *p_llh_info = cdev->p_llh_info; 603 struct qed_llh_filter_info *p_filters; 604 int rc; 605 606 rc = qed_llh_shadow_sanity(cdev, ppfid, filter_idx, "remove"); 607 if (rc) 608 return rc; 609 610 p_filters = p_llh_info->pp_filters[ppfid]; 611 if (!p_filters[filter_idx].ref_cnt) { 612 DP_NOTICE(cdev, 613 "LLH shadow: trying to remove a filter with ref_cnt=0\n"); 614 return -EINVAL; 615 } 616 617 *p_ref_cnt = --p_filters[filter_idx].ref_cnt; 618 if (!p_filters[filter_idx].ref_cnt) 619 memset(&p_filters[filter_idx], 620 0, sizeof(p_filters[filter_idx])); 621 622 return 0; 623 } 624 625 static int 626 qed_llh_shadow_remove_filter(struct qed_dev *cdev, 627 u8 ppfid, 628 union qed_llh_filter *p_filter, 629 u8 *p_filter_idx, u32 *p_ref_cnt) 630 { 631 int rc; 632 633 rc = qed_llh_shadow_search_filter(cdev, ppfid, p_filter, p_filter_idx); 634 if (rc) 635 return rc; 636 637 /* No matching filter was found */ 638 if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) { 639 DP_NOTICE(cdev, "Failed to find a filter in the LLH shadow\n"); 640 return -EINVAL; 641 } 642 643 return __qed_llh_shadow_remove_filter(cdev, ppfid, *p_filter_idx, 644 p_ref_cnt); 645 } 646 647 static int qed_llh_abs_ppfid(struct qed_dev *cdev, u8 ppfid, u8 *p_abs_ppfid) 648 { 649 struct qed_llh_info *p_llh_info = cdev->p_llh_info; 650 651 if (ppfid >= p_llh_info->num_ppfid) { 652 DP_NOTICE(cdev, 653 "ppfid %d is not valid, available indices are 0..%hhd\n", 654 ppfid, p_llh_info->num_ppfid - 1); 655 *p_abs_ppfid = 0; 656 return -EINVAL; 657 } 658 659 *p_abs_ppfid = p_llh_info->ppfid_array[ppfid]; 660 661 return 0; 662 } 663 664 static int 665 qed_llh_set_engine_affin(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 666 { 667 struct qed_dev *cdev = p_hwfn->cdev; 668 enum qed_eng eng; 669 u8 ppfid; 670 int rc; 671 672 rc = qed_mcp_get_engine_config(p_hwfn, p_ptt); 673 if (rc != 0 && rc != -EOPNOTSUPP) { 674 DP_NOTICE(p_hwfn, 675 "Failed to get the engine affinity configuration\n"); 676 return rc; 677 } 678 679 /* RoCE PF is bound to a single engine */ 680 if (QED_IS_ROCE_PERSONALITY(p_hwfn)) { 681 eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0; 682 rc = qed_llh_set_roce_affinity(cdev, eng); 683 if (rc) { 684 DP_NOTICE(cdev, 685 "Failed to set the RoCE engine affinity\n"); 686 return rc; 687 } 688 689 DP_VERBOSE(cdev, 690 QED_MSG_SP, 691 "LLH: Set the engine affinity of RoCE packets as %d\n", 692 eng); 693 } 694 695 /* Storage PF is bound to a single engine while L2 PF uses both */ 696 if (QED_IS_FCOE_PERSONALITY(p_hwfn) || QED_IS_ISCSI_PERSONALITY(p_hwfn)) 697 eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0; 698 else /* L2_PERSONALITY */ 699 eng = QED_BOTH_ENG; 700 701 for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) { 702 rc = qed_llh_set_ppfid_affinity(cdev, ppfid, eng); 703 if (rc) { 704 DP_NOTICE(cdev, 705 "Failed to set the engine affinity of ppfid %d\n", 706 ppfid); 707 return rc; 708 } 709 } 710 711 DP_VERBOSE(cdev, QED_MSG_SP, 712 "LLH: Set the engine affinity of non-RoCE packets as %d\n", 713 eng); 714 715 return 0; 716 } 717 718 static int qed_llh_hw_init_pf(struct qed_hwfn *p_hwfn, 719 struct qed_ptt *p_ptt) 720 { 721 struct qed_dev *cdev = p_hwfn->cdev; 722 u8 ppfid, abs_ppfid; 723 int rc; 724 725 for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) { 726 u32 addr; 727 728 rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); 729 if (rc) 730 return rc; 731 732 addr = NIG_REG_LLH_PPFID2PFID_TBL_0 + abs_ppfid * 0x4; 733 qed_wr(p_hwfn, p_ptt, addr, p_hwfn->rel_pf_id); 734 } 735 736 if (test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits) && 737 !QED_IS_FCOE_PERSONALITY(p_hwfn)) { 738 rc = qed_llh_add_mac_filter(cdev, 0, 739 p_hwfn->hw_info.hw_mac_addr); 740 if (rc) 741 DP_NOTICE(cdev, 742 "Failed to add an LLH filter with the primary MAC\n"); 743 } 744 745 if (QED_IS_CMT(cdev)) { 746 rc = qed_llh_set_engine_affin(p_hwfn, p_ptt); 747 if (rc) 748 return rc; 749 } 750 751 return 0; 752 } 753 754 u8 qed_llh_get_num_ppfid(struct qed_dev *cdev) 755 { 756 return cdev->p_llh_info->num_ppfid; 757 } 758 759 #define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_MASK 0x3 760 #define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_SHIFT 0 761 #define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_MASK 0x3 762 #define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_SHIFT 2 763 764 int qed_llh_set_ppfid_affinity(struct qed_dev *cdev, u8 ppfid, enum qed_eng eng) 765 { 766 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 767 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); 768 u32 addr, val, eng_sel; 769 u8 abs_ppfid; 770 int rc = 0; 771 772 if (!p_ptt) 773 return -EAGAIN; 774 775 if (!QED_IS_CMT(cdev)) 776 goto out; 777 778 rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); 779 if (rc) 780 goto out; 781 782 switch (eng) { 783 case QED_ENG0: 784 eng_sel = 0; 785 break; 786 case QED_ENG1: 787 eng_sel = 1; 788 break; 789 case QED_BOTH_ENG: 790 eng_sel = 2; 791 break; 792 default: 793 DP_NOTICE(cdev, "Invalid affinity value for ppfid [%d]\n", eng); 794 rc = -EINVAL; 795 goto out; 796 } 797 798 addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4; 799 val = qed_rd(p_hwfn, p_ptt, addr); 800 SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE, eng_sel); 801 qed_wr(p_hwfn, p_ptt, addr, val); 802 803 /* The iWARP affinity is set as the affinity of ppfid 0 */ 804 if (!ppfid && QED_IS_IWARP_PERSONALITY(p_hwfn)) 805 cdev->iwarp_affin = (eng == QED_ENG1) ? 1 : 0; 806 out: 807 qed_ptt_release(p_hwfn, p_ptt); 808 809 return rc; 810 } 811 812 int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng) 813 { 814 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 815 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); 816 u32 addr, val, eng_sel; 817 u8 ppfid, abs_ppfid; 818 int rc = 0; 819 820 if (!p_ptt) 821 return -EAGAIN; 822 823 if (!QED_IS_CMT(cdev)) 824 goto out; 825 826 switch (eng) { 827 case QED_ENG0: 828 eng_sel = 0; 829 break; 830 case QED_ENG1: 831 eng_sel = 1; 832 break; 833 case QED_BOTH_ENG: 834 eng_sel = 2; 835 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL, 836 0xf); /* QP bit 15 */ 837 break; 838 default: 839 DP_NOTICE(cdev, "Invalid affinity value for RoCE [%d]\n", eng); 840 rc = -EINVAL; 841 goto out; 842 } 843 844 for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) { 845 rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); 846 if (rc) 847 goto out; 848 849 addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4; 850 val = qed_rd(p_hwfn, p_ptt, addr); 851 SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_ROCE, eng_sel); 852 qed_wr(p_hwfn, p_ptt, addr, val); 853 } 854 out: 855 qed_ptt_release(p_hwfn, p_ptt); 856 857 return rc; 858 } 859 860 struct qed_llh_filter_details { 861 u64 value; 862 u32 mode; 863 u32 protocol_type; 864 u32 hdr_sel; 865 u32 enable; 866 }; 867 868 static int 869 qed_llh_access_filter(struct qed_hwfn *p_hwfn, 870 struct qed_ptt *p_ptt, 871 u8 abs_ppfid, 872 u8 filter_idx, 873 struct qed_llh_filter_details *p_details) 874 { 875 struct qed_dmae_params params = {0}; 876 u32 addr; 877 u8 pfid; 878 int rc; 879 880 /* The NIG/LLH registers that are accessed in this function have only 16 881 * rows which are exposed to a PF. I.e. only the 16 filters of its 882 * default ppfid. Accessing filters of other ppfids requires pretending 883 * to another PFs. 884 * The calculation of PPFID->PFID in AH is based on the relative index 885 * of a PF on its port. 886 * For BB the pfid is actually the abs_ppfid. 887 */ 888 if (QED_IS_BB(p_hwfn->cdev)) 889 pfid = abs_ppfid; 890 else 891 pfid = abs_ppfid * p_hwfn->cdev->num_ports_in_engine + 892 MFW_PORT(p_hwfn); 893 894 /* Filter enable - should be done first when removing a filter */ 895 if (!p_details->enable) { 896 qed_fid_pretend(p_hwfn, p_ptt, 897 pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT); 898 899 addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4; 900 qed_wr(p_hwfn, p_ptt, addr, p_details->enable); 901 902 qed_fid_pretend(p_hwfn, p_ptt, 903 p_hwfn->rel_pf_id << 904 PXP_PRETEND_CONCRETE_FID_PFID_SHIFT); 905 } 906 907 /* Filter value */ 908 addr = NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * filter_idx * 0x4; 909 910 SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_PF_VALID, 0x1); 911 params.dst_pfid = pfid; 912 rc = qed_dmae_host2grc(p_hwfn, 913 p_ptt, 914 (u64)(uintptr_t)&p_details->value, 915 addr, 2 /* size_in_dwords */, 916 ¶ms); 917 if (rc) 918 return rc; 919 920 qed_fid_pretend(p_hwfn, p_ptt, 921 pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT); 922 923 /* Filter mode */ 924 addr = NIG_REG_LLH_FUNC_FILTER_MODE + filter_idx * 0x4; 925 qed_wr(p_hwfn, p_ptt, addr, p_details->mode); 926 927 /* Filter protocol type */ 928 addr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + filter_idx * 0x4; 929 qed_wr(p_hwfn, p_ptt, addr, p_details->protocol_type); 930 931 /* Filter header select */ 932 addr = NIG_REG_LLH_FUNC_FILTER_HDR_SEL + filter_idx * 0x4; 933 qed_wr(p_hwfn, p_ptt, addr, p_details->hdr_sel); 934 935 /* Filter enable - should be done last when adding a filter */ 936 if (p_details->enable) { 937 addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4; 938 qed_wr(p_hwfn, p_ptt, addr, p_details->enable); 939 } 940 941 qed_fid_pretend(p_hwfn, p_ptt, 942 p_hwfn->rel_pf_id << 943 PXP_PRETEND_CONCRETE_FID_PFID_SHIFT); 944 945 return 0; 946 } 947 948 static int 949 qed_llh_add_filter(struct qed_hwfn *p_hwfn, 950 struct qed_ptt *p_ptt, 951 u8 abs_ppfid, 952 u8 filter_idx, u8 filter_prot_type, u32 high, u32 low) 953 { 954 struct qed_llh_filter_details filter_details; 955 956 filter_details.enable = 1; 957 filter_details.value = ((u64)high << 32) | low; 958 filter_details.hdr_sel = 0; 959 filter_details.protocol_type = filter_prot_type; 960 /* Mode: 0: MAC-address classification 1: protocol classification */ 961 filter_details.mode = filter_prot_type ? 1 : 0; 962 963 return qed_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx, 964 &filter_details); 965 } 966 967 static int 968 qed_llh_remove_filter(struct qed_hwfn *p_hwfn, 969 struct qed_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx) 970 { 971 struct qed_llh_filter_details filter_details = {0}; 972 973 return qed_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx, 974 &filter_details); 975 } 976 977 int qed_llh_add_mac_filter(struct qed_dev *cdev, 978 u8 ppfid, u8 mac_addr[ETH_ALEN]) 979 { 980 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 981 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); 982 union qed_llh_filter filter = {}; 983 u8 filter_idx, abs_ppfid; 984 u32 high, low, ref_cnt; 985 int rc = 0; 986 987 if (!p_ptt) 988 return -EAGAIN; 989 990 if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits)) 991 goto out; 992 993 memcpy(filter.mac.addr, mac_addr, ETH_ALEN); 994 rc = qed_llh_shadow_add_filter(cdev, ppfid, 995 QED_LLH_FILTER_TYPE_MAC, 996 &filter, &filter_idx, &ref_cnt); 997 if (rc) 998 goto err; 999 1000 /* Configure the LLH only in case of a new the filter */ 1001 if (ref_cnt == 1) { 1002 rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); 1003 if (rc) 1004 goto err; 1005 1006 high = mac_addr[1] | (mac_addr[0] << 8); 1007 low = mac_addr[5] | (mac_addr[4] << 8) | (mac_addr[3] << 16) | 1008 (mac_addr[2] << 24); 1009 rc = qed_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx, 1010 0, high, low); 1011 if (rc) 1012 goto err; 1013 } 1014 1015 DP_VERBOSE(cdev, 1016 QED_MSG_SP, 1017 "LLH: Added MAC filter [%pM] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n", 1018 mac_addr, ppfid, abs_ppfid, filter_idx, ref_cnt); 1019 1020 goto out; 1021 1022 err: DP_NOTICE(cdev, 1023 "LLH: Failed to add MAC filter [%pM] to ppfid %hhd\n", 1024 mac_addr, ppfid); 1025 out: 1026 qed_ptt_release(p_hwfn, p_ptt); 1027 1028 return rc; 1029 } 1030 1031 static int 1032 qed_llh_protocol_filter_stringify(struct qed_dev *cdev, 1033 enum qed_llh_prot_filter_type_t type, 1034 u16 source_port_or_eth_type, 1035 u16 dest_port, u8 *str, size_t str_len) 1036 { 1037 switch (type) { 1038 case QED_LLH_FILTER_ETHERTYPE: 1039 snprintf(str, str_len, "Ethertype 0x%04x", 1040 source_port_or_eth_type); 1041 break; 1042 case QED_LLH_FILTER_TCP_SRC_PORT: 1043 snprintf(str, str_len, "TCP src port 0x%04x", 1044 source_port_or_eth_type); 1045 break; 1046 case QED_LLH_FILTER_UDP_SRC_PORT: 1047 snprintf(str, str_len, "UDP src port 0x%04x", 1048 source_port_or_eth_type); 1049 break; 1050 case QED_LLH_FILTER_TCP_DEST_PORT: 1051 snprintf(str, str_len, "TCP dst port 0x%04x", dest_port); 1052 break; 1053 case QED_LLH_FILTER_UDP_DEST_PORT: 1054 snprintf(str, str_len, "UDP dst port 0x%04x", dest_port); 1055 break; 1056 case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 1057 snprintf(str, str_len, "TCP src/dst ports 0x%04x/0x%04x", 1058 source_port_or_eth_type, dest_port); 1059 break; 1060 case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 1061 snprintf(str, str_len, "UDP src/dst ports 0x%04x/0x%04x", 1062 source_port_or_eth_type, dest_port); 1063 break; 1064 default: 1065 DP_NOTICE(cdev, 1066 "Non valid LLH protocol filter type %d\n", type); 1067 return -EINVAL; 1068 } 1069 1070 return 0; 1071 } 1072 1073 static int 1074 qed_llh_protocol_filter_to_hilo(struct qed_dev *cdev, 1075 enum qed_llh_prot_filter_type_t type, 1076 u16 source_port_or_eth_type, 1077 u16 dest_port, u32 *p_high, u32 *p_low) 1078 { 1079 *p_high = 0; 1080 *p_low = 0; 1081 1082 switch (type) { 1083 case QED_LLH_FILTER_ETHERTYPE: 1084 *p_high = source_port_or_eth_type; 1085 break; 1086 case QED_LLH_FILTER_TCP_SRC_PORT: 1087 case QED_LLH_FILTER_UDP_SRC_PORT: 1088 *p_low = source_port_or_eth_type << 16; 1089 break; 1090 case QED_LLH_FILTER_TCP_DEST_PORT: 1091 case QED_LLH_FILTER_UDP_DEST_PORT: 1092 *p_low = dest_port; 1093 break; 1094 case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 1095 case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 1096 *p_low = (source_port_or_eth_type << 16) | dest_port; 1097 break; 1098 default: 1099 DP_NOTICE(cdev, 1100 "Non valid LLH protocol filter type %d\n", type); 1101 return -EINVAL; 1102 } 1103 1104 return 0; 1105 } 1106 1107 int 1108 qed_llh_add_protocol_filter(struct qed_dev *cdev, 1109 u8 ppfid, 1110 enum qed_llh_prot_filter_type_t type, 1111 u16 source_port_or_eth_type, u16 dest_port) 1112 { 1113 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 1114 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); 1115 u8 filter_idx, abs_ppfid, str[32], type_bitmap; 1116 union qed_llh_filter filter = {}; 1117 u32 high, low, ref_cnt; 1118 int rc = 0; 1119 1120 if (!p_ptt) 1121 return -EAGAIN; 1122 1123 if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits)) 1124 goto out; 1125 1126 rc = qed_llh_protocol_filter_stringify(cdev, type, 1127 source_port_or_eth_type, 1128 dest_port, str, sizeof(str)); 1129 if (rc) 1130 goto err; 1131 1132 filter.protocol.type = type; 1133 filter.protocol.source_port_or_eth_type = source_port_or_eth_type; 1134 filter.protocol.dest_port = dest_port; 1135 rc = qed_llh_shadow_add_filter(cdev, 1136 ppfid, 1137 QED_LLH_FILTER_TYPE_PROTOCOL, 1138 &filter, &filter_idx, &ref_cnt); 1139 if (rc) 1140 goto err; 1141 1142 rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); 1143 if (rc) 1144 goto err; 1145 1146 /* Configure the LLH only in case of a new the filter */ 1147 if (ref_cnt == 1) { 1148 rc = qed_llh_protocol_filter_to_hilo(cdev, type, 1149 source_port_or_eth_type, 1150 dest_port, &high, &low); 1151 if (rc) 1152 goto err; 1153 1154 type_bitmap = 0x1 << type; 1155 rc = qed_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, 1156 filter_idx, type_bitmap, high, low); 1157 if (rc) 1158 goto err; 1159 } 1160 1161 DP_VERBOSE(cdev, 1162 QED_MSG_SP, 1163 "LLH: Added protocol filter [%s] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n", 1164 str, ppfid, abs_ppfid, filter_idx, ref_cnt); 1165 1166 goto out; 1167 1168 err: DP_NOTICE(p_hwfn, 1169 "LLH: Failed to add protocol filter [%s] to ppfid %hhd\n", 1170 str, ppfid); 1171 out: 1172 qed_ptt_release(p_hwfn, p_ptt); 1173 1174 return rc; 1175 } 1176 1177 void qed_llh_remove_mac_filter(struct qed_dev *cdev, 1178 u8 ppfid, u8 mac_addr[ETH_ALEN]) 1179 { 1180 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 1181 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); 1182 union qed_llh_filter filter = {}; 1183 u8 filter_idx, abs_ppfid; 1184 int rc = 0; 1185 u32 ref_cnt; 1186 1187 if (!p_ptt) 1188 return; 1189 1190 if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits)) 1191 goto out; 1192 1193 ether_addr_copy(filter.mac.addr, mac_addr); 1194 rc = qed_llh_shadow_remove_filter(cdev, ppfid, &filter, &filter_idx, 1195 &ref_cnt); 1196 if (rc) 1197 goto err; 1198 1199 rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); 1200 if (rc) 1201 goto err; 1202 1203 /* Remove from the LLH in case the filter is not in use */ 1204 if (!ref_cnt) { 1205 rc = qed_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid, 1206 filter_idx); 1207 if (rc) 1208 goto err; 1209 } 1210 1211 DP_VERBOSE(cdev, 1212 QED_MSG_SP, 1213 "LLH: Removed MAC filter [%pM] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n", 1214 mac_addr, ppfid, abs_ppfid, filter_idx, ref_cnt); 1215 1216 goto out; 1217 1218 err: DP_NOTICE(cdev, 1219 "LLH: Failed to remove MAC filter [%pM] from ppfid %hhd\n", 1220 mac_addr, ppfid); 1221 out: 1222 qed_ptt_release(p_hwfn, p_ptt); 1223 } 1224 1225 void qed_llh_remove_protocol_filter(struct qed_dev *cdev, 1226 u8 ppfid, 1227 enum qed_llh_prot_filter_type_t type, 1228 u16 source_port_or_eth_type, u16 dest_port) 1229 { 1230 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 1231 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); 1232 u8 filter_idx, abs_ppfid, str[32]; 1233 union qed_llh_filter filter = {}; 1234 int rc = 0; 1235 u32 ref_cnt; 1236 1237 if (!p_ptt) 1238 return; 1239 1240 if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits)) 1241 goto out; 1242 1243 rc = qed_llh_protocol_filter_stringify(cdev, type, 1244 source_port_or_eth_type, 1245 dest_port, str, sizeof(str)); 1246 if (rc) 1247 goto err; 1248 1249 filter.protocol.type = type; 1250 filter.protocol.source_port_or_eth_type = source_port_or_eth_type; 1251 filter.protocol.dest_port = dest_port; 1252 rc = qed_llh_shadow_remove_filter(cdev, ppfid, &filter, &filter_idx, 1253 &ref_cnt); 1254 if (rc) 1255 goto err; 1256 1257 rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); 1258 if (rc) 1259 goto err; 1260 1261 /* Remove from the LLH in case the filter is not in use */ 1262 if (!ref_cnt) { 1263 rc = qed_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid, 1264 filter_idx); 1265 if (rc) 1266 goto err; 1267 } 1268 1269 DP_VERBOSE(cdev, 1270 QED_MSG_SP, 1271 "LLH: Removed protocol filter [%s] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n", 1272 str, ppfid, abs_ppfid, filter_idx, ref_cnt); 1273 1274 goto out; 1275 1276 err: DP_NOTICE(cdev, 1277 "LLH: Failed to remove protocol filter [%s] from ppfid %hhd\n", 1278 str, ppfid); 1279 out: 1280 qed_ptt_release(p_hwfn, p_ptt); 1281 } 1282 1283 /******************************* NIG LLH - End ********************************/ 1284 1285 #define QED_MIN_DPIS (4) 1286 #define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS) 1287 1288 static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, 1289 struct qed_ptt *p_ptt, enum BAR_ID bar_id) 1290 { 1291 u32 bar_reg = (bar_id == BAR_ID_0 ? 1292 PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); 1293 u32 val; 1294 1295 if (IS_VF(p_hwfn->cdev)) 1296 return qed_vf_hw_bar_size(p_hwfn, bar_id); 1297 1298 val = qed_rd(p_hwfn, p_ptt, bar_reg); 1299 if (val) 1300 return 1 << (val + 15); 1301 1302 /* Old MFW initialized above registered only conditionally */ 1303 if (p_hwfn->cdev->num_hwfns > 1) { 1304 DP_INFO(p_hwfn, 1305 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n"); 1306 return BAR_ID_0 ? 256 * 1024 : 512 * 1024; 1307 } else { 1308 DP_INFO(p_hwfn, 1309 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n"); 1310 return 512 * 1024; 1311 } 1312 } 1313 1314 void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level) 1315 { 1316 u32 i; 1317 1318 cdev->dp_level = dp_level; 1319 cdev->dp_module = dp_module; 1320 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 1321 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1322 1323 p_hwfn->dp_level = dp_level; 1324 p_hwfn->dp_module = dp_module; 1325 } 1326 } 1327 1328 void qed_init_struct(struct qed_dev *cdev) 1329 { 1330 u8 i; 1331 1332 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 1333 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1334 1335 p_hwfn->cdev = cdev; 1336 p_hwfn->my_id = i; 1337 p_hwfn->b_active = false; 1338 1339 mutex_init(&p_hwfn->dmae_info.mutex); 1340 } 1341 1342 /* hwfn 0 is always active */ 1343 cdev->hwfns[0].b_active = true; 1344 1345 /* set the default cache alignment to 128 */ 1346 cdev->cache_shift = 7; 1347 } 1348 1349 static void qed_qm_info_free(struct qed_hwfn *p_hwfn) 1350 { 1351 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 1352 1353 kfree(qm_info->qm_pq_params); 1354 qm_info->qm_pq_params = NULL; 1355 kfree(qm_info->qm_vport_params); 1356 qm_info->qm_vport_params = NULL; 1357 kfree(qm_info->qm_port_params); 1358 qm_info->qm_port_params = NULL; 1359 kfree(qm_info->wfq_data); 1360 qm_info->wfq_data = NULL; 1361 } 1362 1363 static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn) 1364 { 1365 kfree(p_hwfn->dbg_user_info); 1366 p_hwfn->dbg_user_info = NULL; 1367 } 1368 1369 void qed_resc_free(struct qed_dev *cdev) 1370 { 1371 int i; 1372 1373 if (IS_VF(cdev)) { 1374 for_each_hwfn(cdev, i) 1375 qed_l2_free(&cdev->hwfns[i]); 1376 return; 1377 } 1378 1379 kfree(cdev->fw_data); 1380 cdev->fw_data = NULL; 1381 1382 kfree(cdev->reset_stats); 1383 cdev->reset_stats = NULL; 1384 1385 qed_llh_free(cdev); 1386 1387 for_each_hwfn(cdev, i) { 1388 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1389 1390 qed_cxt_mngr_free(p_hwfn); 1391 qed_qm_info_free(p_hwfn); 1392 qed_spq_free(p_hwfn); 1393 qed_eq_free(p_hwfn); 1394 qed_consq_free(p_hwfn); 1395 qed_int_free(p_hwfn); 1396 #ifdef CONFIG_QED_LL2 1397 qed_ll2_free(p_hwfn); 1398 #endif 1399 if (p_hwfn->hw_info.personality == QED_PCI_FCOE) 1400 qed_fcoe_free(p_hwfn); 1401 1402 if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { 1403 qed_iscsi_free(p_hwfn); 1404 qed_ooo_free(p_hwfn); 1405 } 1406 1407 if (QED_IS_RDMA_PERSONALITY(p_hwfn)) 1408 qed_rdma_info_free(p_hwfn); 1409 1410 qed_iov_free(p_hwfn); 1411 qed_l2_free(p_hwfn); 1412 qed_dmae_info_free(p_hwfn); 1413 qed_dcbx_info_free(p_hwfn); 1414 qed_dbg_user_data_free(p_hwfn); 1415 qed_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem); 1416 1417 /* Destroy doorbell recovery mechanism */ 1418 qed_db_recovery_teardown(p_hwfn); 1419 } 1420 } 1421 1422 /******************** QM initialization *******************/ 1423 #define ACTIVE_TCS_BMAP 0x9f 1424 #define ACTIVE_TCS_BMAP_4PORT_K2 0xf 1425 1426 /* determines the physical queue flags for a given PF. */ 1427 static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn) 1428 { 1429 u32 flags; 1430 1431 /* common flags */ 1432 flags = PQ_FLAGS_LB; 1433 1434 /* feature flags */ 1435 if (IS_QED_SRIOV(p_hwfn->cdev)) 1436 flags |= PQ_FLAGS_VFS; 1437 1438 /* protocol flags */ 1439 switch (p_hwfn->hw_info.personality) { 1440 case QED_PCI_ETH: 1441 flags |= PQ_FLAGS_MCOS; 1442 break; 1443 case QED_PCI_FCOE: 1444 flags |= PQ_FLAGS_OFLD; 1445 break; 1446 case QED_PCI_ISCSI: 1447 flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; 1448 break; 1449 case QED_PCI_ETH_ROCE: 1450 flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT; 1451 if (IS_QED_MULTI_TC_ROCE(p_hwfn)) 1452 flags |= PQ_FLAGS_MTC; 1453 break; 1454 case QED_PCI_ETH_IWARP: 1455 flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO | 1456 PQ_FLAGS_OFLD; 1457 break; 1458 default: 1459 DP_ERR(p_hwfn, 1460 "unknown personality %d\n", p_hwfn->hw_info.personality); 1461 return 0; 1462 } 1463 1464 return flags; 1465 } 1466 1467 /* Getters for resource amounts necessary for qm initialization */ 1468 static u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn) 1469 { 1470 return p_hwfn->hw_info.num_hw_tc; 1471 } 1472 1473 static u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn) 1474 { 1475 return IS_QED_SRIOV(p_hwfn->cdev) ? 1476 p_hwfn->cdev->p_iov_info->total_vfs : 0; 1477 } 1478 1479 static u8 qed_init_qm_get_num_mtc_tcs(struct qed_hwfn *p_hwfn) 1480 { 1481 u32 pq_flags = qed_get_pq_flags(p_hwfn); 1482 1483 if (!(PQ_FLAGS_MTC & pq_flags)) 1484 return 1; 1485 1486 return qed_init_qm_get_num_tcs(p_hwfn); 1487 } 1488 1489 #define NUM_DEFAULT_RLS 1 1490 1491 static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn) 1492 { 1493 u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); 1494 1495 /* num RLs can't exceed resource amount of rls or vports */ 1496 num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL), 1497 RESC_NUM(p_hwfn, QED_VPORT)); 1498 1499 /* Make sure after we reserve there's something left */ 1500 if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) 1501 return 0; 1502 1503 /* subtract rls necessary for VFs and one default one for the PF */ 1504 num_pf_rls -= num_vfs + NUM_DEFAULT_RLS; 1505 1506 return num_pf_rls; 1507 } 1508 1509 static u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn) 1510 { 1511 u32 pq_flags = qed_get_pq_flags(p_hwfn); 1512 1513 /* all pqs share the same vport, except for vfs and pf_rl pqs */ 1514 return (!!(PQ_FLAGS_RLS & pq_flags)) * 1515 qed_init_qm_get_num_pf_rls(p_hwfn) + 1516 (!!(PQ_FLAGS_VFS & pq_flags)) * 1517 qed_init_qm_get_num_vfs(p_hwfn) + 1; 1518 } 1519 1520 /* calc amount of PQs according to the requested flags */ 1521 static u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn) 1522 { 1523 u32 pq_flags = qed_get_pq_flags(p_hwfn); 1524 1525 return (!!(PQ_FLAGS_RLS & pq_flags)) * 1526 qed_init_qm_get_num_pf_rls(p_hwfn) + 1527 (!!(PQ_FLAGS_MCOS & pq_flags)) * 1528 qed_init_qm_get_num_tcs(p_hwfn) + 1529 (!!(PQ_FLAGS_LB & pq_flags)) + (!!(PQ_FLAGS_OOO & pq_flags)) + 1530 (!!(PQ_FLAGS_ACK & pq_flags)) + 1531 (!!(PQ_FLAGS_OFLD & pq_flags)) * 1532 qed_init_qm_get_num_mtc_tcs(p_hwfn) + 1533 (!!(PQ_FLAGS_LLT & pq_flags)) * 1534 qed_init_qm_get_num_mtc_tcs(p_hwfn) + 1535 (!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn); 1536 } 1537 1538 /* initialize the top level QM params */ 1539 static void qed_init_qm_params(struct qed_hwfn *p_hwfn) 1540 { 1541 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 1542 bool four_port; 1543 1544 /* pq and vport bases for this PF */ 1545 qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ); 1546 qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT); 1547 1548 /* rate limiting and weighted fair queueing are always enabled */ 1549 qm_info->vport_rl_en = true; 1550 qm_info->vport_wfq_en = true; 1551 1552 /* TC config is different for AH 4 port */ 1553 four_port = p_hwfn->cdev->num_ports_in_engine == MAX_NUM_PORTS_K2; 1554 1555 /* in AH 4 port we have fewer TCs per port */ 1556 qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : 1557 NUM_OF_PHYS_TCS; 1558 1559 /* unless MFW indicated otherwise, ooo_tc == 3 for 1560 * AH 4-port and 4 otherwise. 1561 */ 1562 if (!qm_info->ooo_tc) 1563 qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : 1564 DCBX_TCP_OOO_TC; 1565 } 1566 1567 /* initialize qm vport params */ 1568 static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn) 1569 { 1570 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 1571 u8 i; 1572 1573 /* all vports participate in weighted fair queueing */ 1574 for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++) 1575 qm_info->qm_vport_params[i].wfq = 1; 1576 } 1577 1578 /* initialize qm port params */ 1579 static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn) 1580 { 1581 /* Initialize qm port parameters */ 1582 u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engine; 1583 struct qed_dev *cdev = p_hwfn->cdev; 1584 1585 /* indicate how ooo and high pri traffic is dealt with */ 1586 active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ? 1587 ACTIVE_TCS_BMAP_4PORT_K2 : 1588 ACTIVE_TCS_BMAP; 1589 1590 for (i = 0; i < num_ports; i++) { 1591 struct init_qm_port_params *p_qm_port = 1592 &p_hwfn->qm_info.qm_port_params[i]; 1593 u16 pbf_max_cmd_lines; 1594 1595 p_qm_port->active = 1; 1596 p_qm_port->active_phys_tcs = active_phys_tcs; 1597 pbf_max_cmd_lines = (u16)NUM_OF_PBF_CMD_LINES(cdev); 1598 p_qm_port->num_pbf_cmd_lines = pbf_max_cmd_lines / num_ports; 1599 p_qm_port->num_btb_blocks = NUM_OF_BTB_BLOCKS(cdev) / num_ports; 1600 } 1601 } 1602 1603 /* Reset the params which must be reset for qm init. QM init may be called as 1604 * a result of flows other than driver load (e.g. dcbx renegotiation). Other 1605 * params may be affected by the init but would simply recalculate to the same 1606 * values. The allocations made for QM init, ports, vports, pqs and vfqs are not 1607 * affected as these amounts stay the same. 1608 */ 1609 static void qed_init_qm_reset_params(struct qed_hwfn *p_hwfn) 1610 { 1611 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 1612 1613 qm_info->num_pqs = 0; 1614 qm_info->num_vports = 0; 1615 qm_info->num_pf_rls = 0; 1616 qm_info->num_vf_pqs = 0; 1617 qm_info->first_vf_pq = 0; 1618 qm_info->first_mcos_pq = 0; 1619 qm_info->first_rl_pq = 0; 1620 } 1621 1622 static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn) 1623 { 1624 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 1625 1626 qm_info->num_vports++; 1627 1628 if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn)) 1629 DP_ERR(p_hwfn, 1630 "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", 1631 qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn)); 1632 } 1633 1634 /* initialize a single pq and manage qm_info resources accounting. 1635 * The pq_init_flags param determines whether the PQ is rate limited 1636 * (for VF or PF) and whether a new vport is allocated to the pq or not 1637 * (i.e. vport will be shared). 1638 */ 1639 1640 /* flags for pq init */ 1641 #define PQ_INIT_SHARE_VPORT (1 << 0) 1642 #define PQ_INIT_PF_RL (1 << 1) 1643 #define PQ_INIT_VF_RL (1 << 2) 1644 1645 /* defines for pq init */ 1646 #define PQ_INIT_DEFAULT_WRR_GROUP 1 1647 #define PQ_INIT_DEFAULT_TC 0 1648 1649 void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc) 1650 { 1651 p_info->offload_tc = tc; 1652 p_info->offload_tc_set = true; 1653 } 1654 1655 static bool qed_is_offload_tc_set(struct qed_hwfn *p_hwfn) 1656 { 1657 return p_hwfn->hw_info.offload_tc_set; 1658 } 1659 1660 static u32 qed_get_offload_tc(struct qed_hwfn *p_hwfn) 1661 { 1662 if (qed_is_offload_tc_set(p_hwfn)) 1663 return p_hwfn->hw_info.offload_tc; 1664 1665 return PQ_INIT_DEFAULT_TC; 1666 } 1667 1668 static void qed_init_qm_pq(struct qed_hwfn *p_hwfn, 1669 struct qed_qm_info *qm_info, 1670 u8 tc, u32 pq_init_flags) 1671 { 1672 u16 pq_idx = qm_info->num_pqs, max_pq = qed_init_qm_get_num_pqs(p_hwfn); 1673 1674 if (pq_idx > max_pq) 1675 DP_ERR(p_hwfn, 1676 "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq); 1677 1678 /* init pq params */ 1679 qm_info->qm_pq_params[pq_idx].port_id = p_hwfn->port_id; 1680 qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + 1681 qm_info->num_vports; 1682 qm_info->qm_pq_params[pq_idx].tc_id = tc; 1683 qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP; 1684 qm_info->qm_pq_params[pq_idx].rl_valid = 1685 (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL); 1686 1687 /* qm params accounting */ 1688 qm_info->num_pqs++; 1689 if (!(pq_init_flags & PQ_INIT_SHARE_VPORT)) 1690 qm_info->num_vports++; 1691 1692 if (pq_init_flags & PQ_INIT_PF_RL) 1693 qm_info->num_pf_rls++; 1694 1695 if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn)) 1696 DP_ERR(p_hwfn, 1697 "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", 1698 qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn)); 1699 1700 if (qm_info->num_pf_rls > qed_init_qm_get_num_pf_rls(p_hwfn)) 1701 DP_ERR(p_hwfn, 1702 "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n", 1703 qm_info->num_pf_rls, qed_init_qm_get_num_pf_rls(p_hwfn)); 1704 } 1705 1706 /* get pq index according to PQ_FLAGS */ 1707 static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, 1708 unsigned long pq_flags) 1709 { 1710 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 1711 1712 /* Can't have multiple flags set here */ 1713 if (bitmap_weight(&pq_flags, 1714 sizeof(pq_flags) * BITS_PER_BYTE) > 1) { 1715 DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags); 1716 goto err; 1717 } 1718 1719 if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) { 1720 DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags); 1721 goto err; 1722 } 1723 1724 switch (pq_flags) { 1725 case PQ_FLAGS_RLS: 1726 return &qm_info->first_rl_pq; 1727 case PQ_FLAGS_MCOS: 1728 return &qm_info->first_mcos_pq; 1729 case PQ_FLAGS_LB: 1730 return &qm_info->pure_lb_pq; 1731 case PQ_FLAGS_OOO: 1732 return &qm_info->ooo_pq; 1733 case PQ_FLAGS_ACK: 1734 return &qm_info->pure_ack_pq; 1735 case PQ_FLAGS_OFLD: 1736 return &qm_info->first_ofld_pq; 1737 case PQ_FLAGS_LLT: 1738 return &qm_info->first_llt_pq; 1739 case PQ_FLAGS_VFS: 1740 return &qm_info->first_vf_pq; 1741 default: 1742 goto err; 1743 } 1744 1745 err: 1746 return &qm_info->start_pq; 1747 } 1748 1749 /* save pq index in qm info */ 1750 static void qed_init_qm_set_idx(struct qed_hwfn *p_hwfn, 1751 u32 pq_flags, u16 pq_val) 1752 { 1753 u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 1754 1755 *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val; 1756 } 1757 1758 /* get tx pq index, with the PQ TX base already set (ready for context init) */ 1759 u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags) 1760 { 1761 u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 1762 1763 return *base_pq_idx + CM_TX_PQ_BASE; 1764 } 1765 1766 u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc) 1767 { 1768 u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn); 1769 1770 if (max_tc == 0) { 1771 DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n", 1772 PQ_FLAGS_MCOS); 1773 return p_hwfn->qm_info.start_pq; 1774 } 1775 1776 if (tc > max_tc) 1777 DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); 1778 1779 return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc); 1780 } 1781 1782 u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf) 1783 { 1784 u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn); 1785 1786 if (max_vf == 0) { 1787 DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n", 1788 PQ_FLAGS_VFS); 1789 return p_hwfn->qm_info.start_pq; 1790 } 1791 1792 if (vf > max_vf) 1793 DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); 1794 1795 return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf); 1796 } 1797 1798 u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc) 1799 { 1800 u16 first_ofld_pq, pq_offset; 1801 1802 first_ofld_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); 1803 pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ? 1804 tc : PQ_INIT_DEFAULT_TC; 1805 1806 return first_ofld_pq + pq_offset; 1807 } 1808 1809 u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc) 1810 { 1811 u16 first_llt_pq, pq_offset; 1812 1813 first_llt_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LLT); 1814 pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ? 1815 tc : PQ_INIT_DEFAULT_TC; 1816 1817 return first_llt_pq + pq_offset; 1818 } 1819 1820 /* Functions for creating specific types of pqs */ 1821 static void qed_init_qm_lb_pq(struct qed_hwfn *p_hwfn) 1822 { 1823 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 1824 1825 if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LB)) 1826 return; 1827 1828 qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs); 1829 qed_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT); 1830 } 1831 1832 static void qed_init_qm_ooo_pq(struct qed_hwfn *p_hwfn) 1833 { 1834 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 1835 1836 if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO)) 1837 return; 1838 1839 qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs); 1840 qed_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT); 1841 } 1842 1843 static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn) 1844 { 1845 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 1846 1847 if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK)) 1848 return; 1849 1850 qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs); 1851 qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn), 1852 PQ_INIT_SHARE_VPORT); 1853 } 1854 1855 static void qed_init_qm_mtc_pqs(struct qed_hwfn *p_hwfn) 1856 { 1857 u8 num_tcs = qed_init_qm_get_num_mtc_tcs(p_hwfn); 1858 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 1859 u8 tc; 1860 1861 /* override pq's TC if offload TC is set */ 1862 for (tc = 0; tc < num_tcs; tc++) 1863 qed_init_qm_pq(p_hwfn, qm_info, 1864 qed_is_offload_tc_set(p_hwfn) ? 1865 p_hwfn->hw_info.offload_tc : tc, 1866 PQ_INIT_SHARE_VPORT); 1867 } 1868 1869 static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn) 1870 { 1871 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 1872 1873 if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD)) 1874 return; 1875 1876 qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs); 1877 qed_init_qm_mtc_pqs(p_hwfn); 1878 } 1879 1880 static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn) 1881 { 1882 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 1883 1884 if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT)) 1885 return; 1886 1887 qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs); 1888 qed_init_qm_mtc_pqs(p_hwfn); 1889 } 1890 1891 static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn) 1892 { 1893 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 1894 u8 tc_idx; 1895 1896 if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS)) 1897 return; 1898 1899 qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs); 1900 for (tc_idx = 0; tc_idx < qed_init_qm_get_num_tcs(p_hwfn); tc_idx++) 1901 qed_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT); 1902 } 1903 1904 static void qed_init_qm_vf_pqs(struct qed_hwfn *p_hwfn) 1905 { 1906 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 1907 u16 vf_idx, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); 1908 1909 if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS)) 1910 return; 1911 1912 qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs); 1913 qm_info->num_vf_pqs = num_vfs; 1914 for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) 1915 qed_init_qm_pq(p_hwfn, 1916 qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL); 1917 } 1918 1919 static void qed_init_qm_rl_pqs(struct qed_hwfn *p_hwfn) 1920 { 1921 u16 pf_rls_idx, num_pf_rls = qed_init_qm_get_num_pf_rls(p_hwfn); 1922 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 1923 1924 if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS)) 1925 return; 1926 1927 qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs); 1928 for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++) 1929 qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn), 1930 PQ_INIT_PF_RL); 1931 } 1932 1933 static void qed_init_qm_pq_params(struct qed_hwfn *p_hwfn) 1934 { 1935 /* rate limited pqs, must come first (FW assumption) */ 1936 qed_init_qm_rl_pqs(p_hwfn); 1937 1938 /* pqs for multi cos */ 1939 qed_init_qm_mcos_pqs(p_hwfn); 1940 1941 /* pure loopback pq */ 1942 qed_init_qm_lb_pq(p_hwfn); 1943 1944 /* out of order pq */ 1945 qed_init_qm_ooo_pq(p_hwfn); 1946 1947 /* pure ack pq */ 1948 qed_init_qm_pure_ack_pq(p_hwfn); 1949 1950 /* pq for offloaded protocol */ 1951 qed_init_qm_offload_pq(p_hwfn); 1952 1953 /* low latency pq */ 1954 qed_init_qm_low_latency_pq(p_hwfn); 1955 1956 /* done sharing vports */ 1957 qed_init_qm_advance_vport(p_hwfn); 1958 1959 /* pqs for vfs */ 1960 qed_init_qm_vf_pqs(p_hwfn); 1961 } 1962 1963 /* compare values of getters against resources amounts */ 1964 static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn) 1965 { 1966 if (qed_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, QED_VPORT)) { 1967 DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n"); 1968 return -EINVAL; 1969 } 1970 1971 if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ)) 1972 return 0; 1973 1974 if (QED_IS_ROCE_PERSONALITY(p_hwfn)) { 1975 p_hwfn->hw_info.multi_tc_roce_en = false; 1976 DP_NOTICE(p_hwfn, 1977 "multi-tc roce was disabled to reduce requested amount of pqs\n"); 1978 if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ)) 1979 return 0; 1980 } 1981 1982 DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n"); 1983 return -EINVAL; 1984 } 1985 1986 static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn) 1987 { 1988 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 1989 struct init_qm_vport_params *vport; 1990 struct init_qm_port_params *port; 1991 struct init_qm_pq_params *pq; 1992 int i, tc; 1993 1994 /* top level params */ 1995 DP_VERBOSE(p_hwfn, 1996 NETIF_MSG_HW, 1997 "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, llt_pq %d, pure_ack_pq %d\n", 1998 qm_info->start_pq, 1999 qm_info->start_vport, 2000 qm_info->pure_lb_pq, 2001 qm_info->first_ofld_pq, 2002 qm_info->first_llt_pq, 2003 qm_info->pure_ack_pq); 2004 DP_VERBOSE(p_hwfn, 2005 NETIF_MSG_HW, 2006 "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n", 2007 qm_info->ooo_pq, 2008 qm_info->first_vf_pq, 2009 qm_info->num_pqs, 2010 qm_info->num_vf_pqs, 2011 qm_info->num_vports, qm_info->max_phys_tcs_per_port); 2012 DP_VERBOSE(p_hwfn, 2013 NETIF_MSG_HW, 2014 "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n", 2015 qm_info->pf_rl_en, 2016 qm_info->pf_wfq_en, 2017 qm_info->vport_rl_en, 2018 qm_info->vport_wfq_en, 2019 qm_info->pf_wfq, 2020 qm_info->pf_rl, 2021 qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn)); 2022 2023 /* port table */ 2024 for (i = 0; i < p_hwfn->cdev->num_ports_in_engine; i++) { 2025 port = &(qm_info->qm_port_params[i]); 2026 DP_VERBOSE(p_hwfn, 2027 NETIF_MSG_HW, 2028 "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n", 2029 i, 2030 port->active, 2031 port->active_phys_tcs, 2032 port->num_pbf_cmd_lines, 2033 port->num_btb_blocks, port->reserved); 2034 } 2035 2036 /* vport table */ 2037 for (i = 0; i < qm_info->num_vports; i++) { 2038 vport = &(qm_info->qm_vport_params[i]); 2039 DP_VERBOSE(p_hwfn, 2040 NETIF_MSG_HW, 2041 "vport idx %d, wfq %d, first_tx_pq_id [ ", 2042 qm_info->start_vport + i, vport->wfq); 2043 for (tc = 0; tc < NUM_OF_TCS; tc++) 2044 DP_VERBOSE(p_hwfn, 2045 NETIF_MSG_HW, 2046 "%d ", vport->first_tx_pq_id[tc]); 2047 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "]\n"); 2048 } 2049 2050 /* pq table */ 2051 for (i = 0; i < qm_info->num_pqs; i++) { 2052 pq = &(qm_info->qm_pq_params[i]); 2053 DP_VERBOSE(p_hwfn, 2054 NETIF_MSG_HW, 2055 "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d rl_id %d\n", 2056 qm_info->start_pq + i, 2057 pq->port_id, 2058 pq->vport_id, 2059 pq->tc_id, pq->wrr_group, pq->rl_valid, pq->rl_id); 2060 } 2061 } 2062 2063 static void qed_init_qm_info(struct qed_hwfn *p_hwfn) 2064 { 2065 /* reset params required for init run */ 2066 qed_init_qm_reset_params(p_hwfn); 2067 2068 /* init QM top level params */ 2069 qed_init_qm_params(p_hwfn); 2070 2071 /* init QM port params */ 2072 qed_init_qm_port_params(p_hwfn); 2073 2074 /* init QM vport params */ 2075 qed_init_qm_vport_params(p_hwfn); 2076 2077 /* init QM physical queue params */ 2078 qed_init_qm_pq_params(p_hwfn); 2079 2080 /* display all that init */ 2081 qed_dp_init_qm_params(p_hwfn); 2082 } 2083 2084 /* This function reconfigures the QM pf on the fly. 2085 * For this purpose we: 2086 * 1. reconfigure the QM database 2087 * 2. set new values to runtime array 2088 * 3. send an sdm_qm_cmd through the rbc interface to stop the QM 2089 * 4. activate init tool in QM_PF stage 2090 * 5. send an sdm_qm_cmd through rbc interface to release the QM 2091 */ 2092 int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2093 { 2094 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 2095 bool b_rc; 2096 int rc; 2097 2098 /* initialize qed's qm data structure */ 2099 qed_init_qm_info(p_hwfn); 2100 2101 /* stop PF's qm queues */ 2102 spin_lock_bh(&qm_lock); 2103 b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true, 2104 qm_info->start_pq, qm_info->num_pqs); 2105 spin_unlock_bh(&qm_lock); 2106 if (!b_rc) 2107 return -EINVAL; 2108 2109 /* prepare QM portion of runtime array */ 2110 qed_qm_init_pf(p_hwfn, p_ptt, false); 2111 2112 /* activate init tool on runtime array */ 2113 rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, 2114 p_hwfn->hw_info.hw_mode); 2115 if (rc) 2116 return rc; 2117 2118 /* start PF's qm queues */ 2119 spin_lock_bh(&qm_lock); 2120 b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true, 2121 qm_info->start_pq, qm_info->num_pqs); 2122 spin_unlock_bh(&qm_lock); 2123 if (!b_rc) 2124 return -EINVAL; 2125 2126 return 0; 2127 } 2128 2129 static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn) 2130 { 2131 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 2132 int rc; 2133 2134 rc = qed_init_qm_sanity(p_hwfn); 2135 if (rc) 2136 goto alloc_err; 2137 2138 qm_info->qm_pq_params = kcalloc(qed_init_qm_get_num_pqs(p_hwfn), 2139 sizeof(*qm_info->qm_pq_params), 2140 GFP_KERNEL); 2141 if (!qm_info->qm_pq_params) 2142 goto alloc_err; 2143 2144 qm_info->qm_vport_params = kcalloc(qed_init_qm_get_num_vports(p_hwfn), 2145 sizeof(*qm_info->qm_vport_params), 2146 GFP_KERNEL); 2147 if (!qm_info->qm_vport_params) 2148 goto alloc_err; 2149 2150 qm_info->qm_port_params = kcalloc(p_hwfn->cdev->num_ports_in_engine, 2151 sizeof(*qm_info->qm_port_params), 2152 GFP_KERNEL); 2153 if (!qm_info->qm_port_params) 2154 goto alloc_err; 2155 2156 qm_info->wfq_data = kcalloc(qed_init_qm_get_num_vports(p_hwfn), 2157 sizeof(*qm_info->wfq_data), 2158 GFP_KERNEL); 2159 if (!qm_info->wfq_data) 2160 goto alloc_err; 2161 2162 return 0; 2163 2164 alloc_err: 2165 DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n"); 2166 qed_qm_info_free(p_hwfn); 2167 return -ENOMEM; 2168 } 2169 2170 int qed_resc_alloc(struct qed_dev *cdev) 2171 { 2172 u32 rdma_tasks, excess_tasks; 2173 u32 line_count; 2174 int i, rc = 0; 2175 2176 if (IS_VF(cdev)) { 2177 for_each_hwfn(cdev, i) { 2178 rc = qed_l2_alloc(&cdev->hwfns[i]); 2179 if (rc) 2180 return rc; 2181 } 2182 return rc; 2183 } 2184 2185 cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL); 2186 if (!cdev->fw_data) 2187 return -ENOMEM; 2188 2189 for_each_hwfn(cdev, i) { 2190 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2191 u32 n_eqes, num_cons; 2192 2193 /* Initialize the doorbell recovery mechanism */ 2194 rc = qed_db_recovery_setup(p_hwfn); 2195 if (rc) 2196 goto alloc_err; 2197 2198 /* First allocate the context manager structure */ 2199 rc = qed_cxt_mngr_alloc(p_hwfn); 2200 if (rc) 2201 goto alloc_err; 2202 2203 /* Set the HW cid/tid numbers (in the contest manager) 2204 * Must be done prior to any further computations. 2205 */ 2206 rc = qed_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS); 2207 if (rc) 2208 goto alloc_err; 2209 2210 rc = qed_alloc_qm_data(p_hwfn); 2211 if (rc) 2212 goto alloc_err; 2213 2214 /* init qm info */ 2215 qed_init_qm_info(p_hwfn); 2216 2217 /* Compute the ILT client partition */ 2218 rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count); 2219 if (rc) { 2220 DP_NOTICE(p_hwfn, 2221 "too many ILT lines; re-computing with less lines\n"); 2222 /* In case there are not enough ILT lines we reduce the 2223 * number of RDMA tasks and re-compute. 2224 */ 2225 excess_tasks = 2226 qed_cxt_cfg_ilt_compute_excess(p_hwfn, line_count); 2227 if (!excess_tasks) 2228 goto alloc_err; 2229 2230 rdma_tasks = RDMA_MAX_TIDS - excess_tasks; 2231 rc = qed_cxt_set_pf_params(p_hwfn, rdma_tasks); 2232 if (rc) 2233 goto alloc_err; 2234 2235 rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count); 2236 if (rc) { 2237 DP_ERR(p_hwfn, 2238 "failed ILT compute. Requested too many lines: %u\n", 2239 line_count); 2240 2241 goto alloc_err; 2242 } 2243 } 2244 2245 /* CID map / ILT shadow table / T2 2246 * The talbes sizes are determined by the computations above 2247 */ 2248 rc = qed_cxt_tables_alloc(p_hwfn); 2249 if (rc) 2250 goto alloc_err; 2251 2252 /* SPQ, must follow ILT because initializes SPQ context */ 2253 rc = qed_spq_alloc(p_hwfn); 2254 if (rc) 2255 goto alloc_err; 2256 2257 /* SP status block allocation */ 2258 p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn, 2259 RESERVED_PTT_DPC); 2260 2261 rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt); 2262 if (rc) 2263 goto alloc_err; 2264 2265 rc = qed_iov_alloc(p_hwfn); 2266 if (rc) 2267 goto alloc_err; 2268 2269 /* EQ */ 2270 n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain); 2271 if (QED_IS_RDMA_PERSONALITY(p_hwfn)) { 2272 enum protocol_type rdma_proto; 2273 2274 if (QED_IS_ROCE_PERSONALITY(p_hwfn)) 2275 rdma_proto = PROTOCOLID_ROCE; 2276 else 2277 rdma_proto = PROTOCOLID_IWARP; 2278 2279 num_cons = qed_cxt_get_proto_cid_count(p_hwfn, 2280 rdma_proto, 2281 NULL) * 2; 2282 n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; 2283 } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { 2284 num_cons = 2285 qed_cxt_get_proto_cid_count(p_hwfn, 2286 PROTOCOLID_ISCSI, 2287 NULL); 2288 n_eqes += 2 * num_cons; 2289 } 2290 2291 if (n_eqes > 0xFFFF) { 2292 DP_ERR(p_hwfn, 2293 "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n", 2294 n_eqes, 0xFFFF); 2295 goto alloc_no_mem; 2296 } 2297 2298 rc = qed_eq_alloc(p_hwfn, (u16) n_eqes); 2299 if (rc) 2300 goto alloc_err; 2301 2302 rc = qed_consq_alloc(p_hwfn); 2303 if (rc) 2304 goto alloc_err; 2305 2306 rc = qed_l2_alloc(p_hwfn); 2307 if (rc) 2308 goto alloc_err; 2309 2310 #ifdef CONFIG_QED_LL2 2311 if (p_hwfn->using_ll2) { 2312 rc = qed_ll2_alloc(p_hwfn); 2313 if (rc) 2314 goto alloc_err; 2315 } 2316 #endif 2317 2318 if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { 2319 rc = qed_fcoe_alloc(p_hwfn); 2320 if (rc) 2321 goto alloc_err; 2322 } 2323 2324 if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { 2325 rc = qed_iscsi_alloc(p_hwfn); 2326 if (rc) 2327 goto alloc_err; 2328 rc = qed_ooo_alloc(p_hwfn); 2329 if (rc) 2330 goto alloc_err; 2331 } 2332 2333 if (QED_IS_RDMA_PERSONALITY(p_hwfn)) { 2334 rc = qed_rdma_info_alloc(p_hwfn); 2335 if (rc) 2336 goto alloc_err; 2337 } 2338 2339 /* DMA info initialization */ 2340 rc = qed_dmae_info_alloc(p_hwfn); 2341 if (rc) 2342 goto alloc_err; 2343 2344 /* DCBX initialization */ 2345 rc = qed_dcbx_info_alloc(p_hwfn); 2346 if (rc) 2347 goto alloc_err; 2348 2349 rc = qed_dbg_alloc_user_data(p_hwfn, &p_hwfn->dbg_user_info); 2350 if (rc) 2351 goto alloc_err; 2352 } 2353 2354 rc = qed_llh_alloc(cdev); 2355 if (rc) { 2356 DP_NOTICE(cdev, 2357 "Failed to allocate memory for the llh_info structure\n"); 2358 goto alloc_err; 2359 } 2360 2361 cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); 2362 if (!cdev->reset_stats) 2363 goto alloc_no_mem; 2364 2365 return 0; 2366 2367 alloc_no_mem: 2368 rc = -ENOMEM; 2369 alloc_err: 2370 qed_resc_free(cdev); 2371 return rc; 2372 } 2373 2374 void qed_resc_setup(struct qed_dev *cdev) 2375 { 2376 int i; 2377 2378 if (IS_VF(cdev)) { 2379 for_each_hwfn(cdev, i) 2380 qed_l2_setup(&cdev->hwfns[i]); 2381 return; 2382 } 2383 2384 for_each_hwfn(cdev, i) { 2385 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2386 2387 qed_cxt_mngr_setup(p_hwfn); 2388 qed_spq_setup(p_hwfn); 2389 qed_eq_setup(p_hwfn); 2390 qed_consq_setup(p_hwfn); 2391 2392 /* Read shadow of current MFW mailbox */ 2393 qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt); 2394 memcpy(p_hwfn->mcp_info->mfw_mb_shadow, 2395 p_hwfn->mcp_info->mfw_mb_cur, 2396 p_hwfn->mcp_info->mfw_mb_length); 2397 2398 qed_int_setup(p_hwfn, p_hwfn->p_main_ptt); 2399 2400 qed_l2_setup(p_hwfn); 2401 qed_iov_setup(p_hwfn); 2402 #ifdef CONFIG_QED_LL2 2403 if (p_hwfn->using_ll2) 2404 qed_ll2_setup(p_hwfn); 2405 #endif 2406 if (p_hwfn->hw_info.personality == QED_PCI_FCOE) 2407 qed_fcoe_setup(p_hwfn); 2408 2409 if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { 2410 qed_iscsi_setup(p_hwfn); 2411 qed_ooo_setup(p_hwfn); 2412 } 2413 } 2414 } 2415 2416 #define FINAL_CLEANUP_POLL_CNT (100) 2417 #define FINAL_CLEANUP_POLL_TIME (10) 2418 int qed_final_cleanup(struct qed_hwfn *p_hwfn, 2419 struct qed_ptt *p_ptt, u16 id, bool is_vf) 2420 { 2421 u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; 2422 int rc = -EBUSY; 2423 2424 addr = GTT_BAR0_MAP_REG_USDM_RAM + 2425 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); 2426 2427 if (is_vf) 2428 id += 0x10; 2429 2430 command |= X_FINAL_CLEANUP_AGG_INT << 2431 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; 2432 command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; 2433 command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT; 2434 command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT; 2435 2436 /* Make sure notification is not set before initiating final cleanup */ 2437 if (REG_RD(p_hwfn, addr)) { 2438 DP_NOTICE(p_hwfn, 2439 "Unexpected; Found final cleanup notification before initiating final cleanup\n"); 2440 REG_WR(p_hwfn, addr, 0); 2441 } 2442 2443 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2444 "Sending final cleanup for PFVF[%d] [Command %08x]\n", 2445 id, command); 2446 2447 qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); 2448 2449 /* Poll until completion */ 2450 while (!REG_RD(p_hwfn, addr) && count--) 2451 msleep(FINAL_CLEANUP_POLL_TIME); 2452 2453 if (REG_RD(p_hwfn, addr)) 2454 rc = 0; 2455 else 2456 DP_NOTICE(p_hwfn, 2457 "Failed to receive FW final cleanup notification\n"); 2458 2459 /* Cleanup afterwards */ 2460 REG_WR(p_hwfn, addr, 0); 2461 2462 return rc; 2463 } 2464 2465 static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn) 2466 { 2467 int hw_mode = 0; 2468 2469 if (QED_IS_BB_B0(p_hwfn->cdev)) { 2470 hw_mode |= 1 << MODE_BB; 2471 } else if (QED_IS_AH(p_hwfn->cdev)) { 2472 hw_mode |= 1 << MODE_K2; 2473 } else { 2474 DP_NOTICE(p_hwfn, "Unknown chip type %#x\n", 2475 p_hwfn->cdev->type); 2476 return -EINVAL; 2477 } 2478 2479 switch (p_hwfn->cdev->num_ports_in_engine) { 2480 case 1: 2481 hw_mode |= 1 << MODE_PORTS_PER_ENG_1; 2482 break; 2483 case 2: 2484 hw_mode |= 1 << MODE_PORTS_PER_ENG_2; 2485 break; 2486 case 4: 2487 hw_mode |= 1 << MODE_PORTS_PER_ENG_4; 2488 break; 2489 default: 2490 DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n", 2491 p_hwfn->cdev->num_ports_in_engine); 2492 return -EINVAL; 2493 } 2494 2495 if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) 2496 hw_mode |= 1 << MODE_MF_SD; 2497 else 2498 hw_mode |= 1 << MODE_MF_SI; 2499 2500 hw_mode |= 1 << MODE_ASIC; 2501 2502 if (p_hwfn->cdev->num_hwfns > 1) 2503 hw_mode |= 1 << MODE_100G; 2504 2505 p_hwfn->hw_info.hw_mode = hw_mode; 2506 2507 DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP), 2508 "Configuring function for hw_mode: 0x%08x\n", 2509 p_hwfn->hw_info.hw_mode); 2510 2511 return 0; 2512 } 2513 2514 /* Init run time data for all PFs on an engine. */ 2515 static void qed_init_cau_rt_data(struct qed_dev *cdev) 2516 { 2517 u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; 2518 int i, igu_sb_id; 2519 2520 for_each_hwfn(cdev, i) { 2521 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2522 struct qed_igu_info *p_igu_info; 2523 struct qed_igu_block *p_block; 2524 struct cau_sb_entry sb_entry; 2525 2526 p_igu_info = p_hwfn->hw_info.p_igu_info; 2527 2528 for (igu_sb_id = 0; 2529 igu_sb_id < QED_MAPPING_MEMORY_SIZE(cdev); igu_sb_id++) { 2530 p_block = &p_igu_info->entry[igu_sb_id]; 2531 2532 if (!p_block->is_pf) 2533 continue; 2534 2535 qed_init_cau_sb_entry(p_hwfn, &sb_entry, 2536 p_block->function_id, 0, 0); 2537 STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2, 2538 sb_entry); 2539 } 2540 } 2541 } 2542 2543 static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn, 2544 struct qed_ptt *p_ptt) 2545 { 2546 u32 val, wr_mbs, cache_line_size; 2547 2548 val = qed_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0); 2549 switch (val) { 2550 case 0: 2551 wr_mbs = 128; 2552 break; 2553 case 1: 2554 wr_mbs = 256; 2555 break; 2556 case 2: 2557 wr_mbs = 512; 2558 break; 2559 default: 2560 DP_INFO(p_hwfn, 2561 "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 2562 val); 2563 return; 2564 } 2565 2566 cache_line_size = min_t(u32, L1_CACHE_BYTES, wr_mbs); 2567 switch (cache_line_size) { 2568 case 32: 2569 val = 0; 2570 break; 2571 case 64: 2572 val = 1; 2573 break; 2574 case 128: 2575 val = 2; 2576 break; 2577 case 256: 2578 val = 3; 2579 break; 2580 default: 2581 DP_INFO(p_hwfn, 2582 "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 2583 cache_line_size); 2584 } 2585 2586 if (L1_CACHE_BYTES > wr_mbs) 2587 DP_INFO(p_hwfn, 2588 "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", 2589 L1_CACHE_BYTES, wr_mbs); 2590 2591 STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val); 2592 if (val > 0) { 2593 STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val); 2594 STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val); 2595 } 2596 } 2597 2598 static int qed_hw_init_common(struct qed_hwfn *p_hwfn, 2599 struct qed_ptt *p_ptt, int hw_mode) 2600 { 2601 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 2602 struct qed_qm_common_rt_init_params params; 2603 struct qed_dev *cdev = p_hwfn->cdev; 2604 u8 vf_id, max_num_vfs; 2605 u16 num_pfs, pf_id; 2606 u32 concrete_fid; 2607 int rc = 0; 2608 2609 qed_init_cau_rt_data(cdev); 2610 2611 /* Program GTT windows */ 2612 qed_gtt_init(p_hwfn); 2613 2614 if (p_hwfn->mcp_info) { 2615 if (p_hwfn->mcp_info->func_info.bandwidth_max) 2616 qm_info->pf_rl_en = true; 2617 if (p_hwfn->mcp_info->func_info.bandwidth_min) 2618 qm_info->pf_wfq_en = true; 2619 } 2620 2621 memset(¶ms, 0, sizeof(params)); 2622 params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; 2623 params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; 2624 params.pf_rl_en = qm_info->pf_rl_en; 2625 params.pf_wfq_en = qm_info->pf_wfq_en; 2626 params.global_rl_en = qm_info->vport_rl_en; 2627 params.vport_wfq_en = qm_info->vport_wfq_en; 2628 params.port_params = qm_info->qm_port_params; 2629 2630 qed_qm_common_rt_init(p_hwfn, ¶ms); 2631 2632 qed_cxt_hw_init_common(p_hwfn); 2633 2634 qed_init_cache_line_size(p_hwfn, p_ptt); 2635 2636 rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); 2637 if (rc) 2638 return rc; 2639 2640 qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); 2641 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); 2642 2643 if (QED_IS_BB(p_hwfn->cdev)) { 2644 num_pfs = NUM_OF_ENG_PFS(p_hwfn->cdev); 2645 for (pf_id = 0; pf_id < num_pfs; pf_id++) { 2646 qed_fid_pretend(p_hwfn, p_ptt, pf_id); 2647 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 2648 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 2649 } 2650 /* pretend to original PF */ 2651 qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 2652 } 2653 2654 max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; 2655 for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { 2656 concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); 2657 qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); 2658 qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); 2659 qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); 2660 qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); 2661 qed_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0); 2662 } 2663 /* pretend to original PF */ 2664 qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 2665 2666 return rc; 2667 } 2668 2669 static int 2670 qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn, 2671 struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus) 2672 { 2673 u32 dpi_bit_shift, dpi_count, dpi_page_size; 2674 u32 min_dpis; 2675 u32 n_wids; 2676 2677 /* Calculate DPI size */ 2678 n_wids = max_t(u32, QED_MIN_WIDS, n_cpus); 2679 dpi_page_size = QED_WID_SIZE * roundup_pow_of_two(n_wids); 2680 dpi_page_size = (dpi_page_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1); 2681 dpi_bit_shift = ilog2(dpi_page_size / 4096); 2682 dpi_count = pwm_region_size / dpi_page_size; 2683 2684 min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis; 2685 min_dpis = max_t(u32, QED_MIN_DPIS, min_dpis); 2686 2687 p_hwfn->dpi_size = dpi_page_size; 2688 p_hwfn->dpi_count = dpi_count; 2689 2690 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift); 2691 2692 if (dpi_count < min_dpis) 2693 return -EINVAL; 2694 2695 return 0; 2696 } 2697 2698 enum QED_ROCE_EDPM_MODE { 2699 QED_ROCE_EDPM_MODE_ENABLE = 0, 2700 QED_ROCE_EDPM_MODE_FORCE_ON = 1, 2701 QED_ROCE_EDPM_MODE_DISABLE = 2, 2702 }; 2703 2704 bool qed_edpm_enabled(struct qed_hwfn *p_hwfn) 2705 { 2706 if (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) 2707 return false; 2708 2709 return true; 2710 } 2711 2712 static int 2713 qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2714 { 2715 u32 pwm_regsize, norm_regsize; 2716 u32 non_pwm_conn, min_addr_reg1; 2717 u32 db_bar_size, n_cpus = 1; 2718 u32 roce_edpm_mode; 2719 u32 pf_dems_shift; 2720 int rc = 0; 2721 u8 cond; 2722 2723 db_bar_size = qed_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1); 2724 if (p_hwfn->cdev->num_hwfns > 1) 2725 db_bar_size /= 2; 2726 2727 /* Calculate doorbell regions */ 2728 non_pwm_conn = qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) + 2729 qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE, 2730 NULL) + 2731 qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, 2732 NULL); 2733 norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, PAGE_SIZE); 2734 min_addr_reg1 = norm_regsize / 4096; 2735 pwm_regsize = db_bar_size - norm_regsize; 2736 2737 /* Check that the normal and PWM sizes are valid */ 2738 if (db_bar_size < norm_regsize) { 2739 DP_ERR(p_hwfn->cdev, 2740 "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n", 2741 db_bar_size, norm_regsize); 2742 return -EINVAL; 2743 } 2744 2745 if (pwm_regsize < QED_MIN_PWM_REGION) { 2746 DP_ERR(p_hwfn->cdev, 2747 "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n", 2748 pwm_regsize, 2749 QED_MIN_PWM_REGION, db_bar_size, norm_regsize); 2750 return -EINVAL; 2751 } 2752 2753 /* Calculate number of DPIs */ 2754 roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode; 2755 if ((roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE) || 2756 ((roce_edpm_mode == QED_ROCE_EDPM_MODE_FORCE_ON))) { 2757 /* Either EDPM is mandatory, or we are attempting to allocate a 2758 * WID per CPU. 2759 */ 2760 n_cpus = num_present_cpus(); 2761 rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 2762 } 2763 2764 cond = (rc && (roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE)) || 2765 (roce_edpm_mode == QED_ROCE_EDPM_MODE_DISABLE); 2766 if (cond || p_hwfn->dcbx_no_edpm) { 2767 /* Either EDPM is disabled from user configuration, or it is 2768 * disabled via DCBx, or it is not mandatory and we failed to 2769 * allocated a WID per CPU. 2770 */ 2771 n_cpus = 1; 2772 rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 2773 2774 if (cond) 2775 qed_rdma_dpm_bar(p_hwfn, p_ptt); 2776 } 2777 2778 p_hwfn->wid_count = (u16) n_cpus; 2779 2780 DP_INFO(p_hwfn, 2781 "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n", 2782 norm_regsize, 2783 pwm_regsize, 2784 p_hwfn->dpi_size, 2785 p_hwfn->dpi_count, 2786 (!qed_edpm_enabled(p_hwfn)) ? 2787 "disabled" : "enabled", PAGE_SIZE); 2788 2789 if (rc) { 2790 DP_ERR(p_hwfn, 2791 "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n", 2792 p_hwfn->dpi_count, 2793 p_hwfn->pf_params.rdma_pf_params.min_dpis); 2794 return -EINVAL; 2795 } 2796 2797 p_hwfn->dpi_start_offset = norm_regsize; 2798 2799 /* DEMS size is configured log2 of DWORDs, hence the division by 4 */ 2800 pf_dems_shift = ilog2(QED_PF_DEMS_SIZE / 4); 2801 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift); 2802 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1); 2803 2804 return 0; 2805 } 2806 2807 static int qed_hw_init_port(struct qed_hwfn *p_hwfn, 2808 struct qed_ptt *p_ptt, int hw_mode) 2809 { 2810 int rc = 0; 2811 2812 /* In CMT the gate should be cleared by the 2nd hwfn */ 2813 if (!QED_IS_CMT(p_hwfn->cdev) || !IS_LEAD_HWFN(p_hwfn)) 2814 STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0); 2815 2816 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode); 2817 if (rc) 2818 return rc; 2819 2820 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0); 2821 2822 return 0; 2823 } 2824 2825 static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, 2826 struct qed_ptt *p_ptt, 2827 struct qed_tunnel_info *p_tunn, 2828 int hw_mode, 2829 bool b_hw_start, 2830 enum qed_int_mode int_mode, 2831 bool allow_npar_tx_switch) 2832 { 2833 u8 rel_pf_id = p_hwfn->rel_pf_id; 2834 int rc = 0; 2835 2836 if (p_hwfn->mcp_info) { 2837 struct qed_mcp_function_info *p_info; 2838 2839 p_info = &p_hwfn->mcp_info->func_info; 2840 if (p_info->bandwidth_min) 2841 p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; 2842 2843 /* Update rate limit once we'll actually have a link */ 2844 p_hwfn->qm_info.pf_rl = 100000; 2845 } 2846 2847 qed_cxt_hw_init_pf(p_hwfn, p_ptt); 2848 2849 qed_int_igu_init_rt(p_hwfn); 2850 2851 /* Set VLAN in NIG if needed */ 2852 if (hw_mode & BIT(MODE_MF_SD)) { 2853 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n"); 2854 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); 2855 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, 2856 p_hwfn->hw_info.ovlan); 2857 2858 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 2859 "Configuring LLH_FUNC_FILTER_HDR_SEL\n"); 2860 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET, 2861 1); 2862 } 2863 2864 /* Enable classification by MAC if needed */ 2865 if (hw_mode & BIT(MODE_MF_SI)) { 2866 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 2867 "Configuring TAGMAC_CLS_TYPE\n"); 2868 STORE_RT_REG(p_hwfn, 2869 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1); 2870 } 2871 2872 /* Protocol Configuration */ 2873 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 2874 (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0); 2875 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 2876 (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0); 2877 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); 2878 2879 /* Sanity check before the PF init sequence that uses DMAE */ 2880 rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase"); 2881 if (rc) 2882 return rc; 2883 2884 /* PF Init sequence */ 2885 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); 2886 if (rc) 2887 return rc; 2888 2889 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */ 2890 rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode); 2891 if (rc) 2892 return rc; 2893 2894 qed_fw_overlay_init_ram(p_hwfn, p_ptt, p_hwfn->fw_overlay_mem); 2895 2896 /* Pure runtime initializations - directly to the HW */ 2897 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); 2898 2899 rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt); 2900 if (rc) 2901 return rc; 2902 2903 /* Use the leading hwfn since in CMT only NIG #0 is operational */ 2904 if (IS_LEAD_HWFN(p_hwfn)) { 2905 rc = qed_llh_hw_init_pf(p_hwfn, p_ptt); 2906 if (rc) 2907 return rc; 2908 } 2909 2910 if (b_hw_start) { 2911 /* enable interrupts */ 2912 qed_int_igu_enable(p_hwfn, p_ptt, int_mode); 2913 2914 /* send function start command */ 2915 rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn, 2916 allow_npar_tx_switch); 2917 if (rc) { 2918 DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); 2919 return rc; 2920 } 2921 if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { 2922 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, BIT(2)); 2923 qed_wr(p_hwfn, p_ptt, 2924 PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, 2925 0x100); 2926 } 2927 } 2928 return rc; 2929 } 2930 2931 int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn, 2932 struct qed_ptt *p_ptt, bool b_enable) 2933 { 2934 u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0; 2935 2936 /* Configure the PF's internal FID_enable for master transactions */ 2937 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); 2938 2939 /* Wait until value is set - try for 1 second every 50us */ 2940 for (delay_idx = 0; delay_idx < 20000; delay_idx++) { 2941 val = qed_rd(p_hwfn, p_ptt, 2942 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 2943 if (val == set_val) 2944 break; 2945 2946 usleep_range(50, 60); 2947 } 2948 2949 if (val != set_val) { 2950 DP_NOTICE(p_hwfn, 2951 "PFID_ENABLE_MASTER wasn't changed after a second\n"); 2952 return -EAGAIN; 2953 } 2954 2955 return 0; 2956 } 2957 2958 static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn, 2959 struct qed_ptt *p_main_ptt) 2960 { 2961 /* Read shadow of current MFW mailbox */ 2962 qed_mcp_read_mb(p_hwfn, p_main_ptt); 2963 memcpy(p_hwfn->mcp_info->mfw_mb_shadow, 2964 p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length); 2965 } 2966 2967 static void 2968 qed_fill_load_req_params(struct qed_load_req_params *p_load_req, 2969 struct qed_drv_load_params *p_drv_load) 2970 { 2971 memset(p_load_req, 0, sizeof(*p_load_req)); 2972 2973 p_load_req->drv_role = p_drv_load->is_crash_kernel ? 2974 QED_DRV_ROLE_KDUMP : QED_DRV_ROLE_OS; 2975 p_load_req->timeout_val = p_drv_load->mfw_timeout_val; 2976 p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; 2977 p_load_req->override_force_load = p_drv_load->override_force_load; 2978 } 2979 2980 static int qed_vf_start(struct qed_hwfn *p_hwfn, 2981 struct qed_hw_init_params *p_params) 2982 { 2983 if (p_params->p_tunn) { 2984 qed_vf_set_vf_start_tunn_update_param(p_params->p_tunn); 2985 qed_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn); 2986 } 2987 2988 p_hwfn->b_int_enabled = true; 2989 2990 return 0; 2991 } 2992 2993 static void qed_pglueb_clear_err(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2994 { 2995 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 2996 BIT(p_hwfn->abs_pf_id)); 2997 } 2998 2999 int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) 3000 { 3001 struct qed_load_req_params load_req_params; 3002 u32 load_code, resp, param, drv_mb_param; 3003 bool b_default_mtu = true; 3004 struct qed_hwfn *p_hwfn; 3005 const u32 *fw_overlays; 3006 u32 fw_overlays_len; 3007 u16 ether_type; 3008 int rc = 0, i; 3009 3010 if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 3011 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 3012 return -EINVAL; 3013 } 3014 3015 if (IS_PF(cdev)) { 3016 rc = qed_init_fw_data(cdev, p_params->bin_fw_data); 3017 if (rc) 3018 return rc; 3019 } 3020 3021 for_each_hwfn(cdev, i) { 3022 p_hwfn = &cdev->hwfns[i]; 3023 3024 /* If management didn't provide a default, set one of our own */ 3025 if (!p_hwfn->hw_info.mtu) { 3026 p_hwfn->hw_info.mtu = 1500; 3027 b_default_mtu = false; 3028 } 3029 3030 if (IS_VF(cdev)) { 3031 qed_vf_start(p_hwfn, p_params); 3032 continue; 3033 } 3034 3035 rc = qed_calc_hw_mode(p_hwfn); 3036 if (rc) 3037 return rc; 3038 3039 if (IS_PF(cdev) && (test_bit(QED_MF_8021Q_TAGGING, 3040 &cdev->mf_bits) || 3041 test_bit(QED_MF_8021AD_TAGGING, 3042 &cdev->mf_bits))) { 3043 if (test_bit(QED_MF_8021Q_TAGGING, &cdev->mf_bits)) 3044 ether_type = ETH_P_8021Q; 3045 else 3046 ether_type = ETH_P_8021AD; 3047 STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, 3048 ether_type); 3049 STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, 3050 ether_type); 3051 STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, 3052 ether_type); 3053 STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, 3054 ether_type); 3055 } 3056 3057 qed_fill_load_req_params(&load_req_params, 3058 p_params->p_drv_load_params); 3059 rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, 3060 &load_req_params); 3061 if (rc) { 3062 DP_NOTICE(p_hwfn, "Failed sending a LOAD_REQ command\n"); 3063 return rc; 3064 } 3065 3066 load_code = load_req_params.load_code; 3067 DP_VERBOSE(p_hwfn, QED_MSG_SP, 3068 "Load request was sent. Load code: 0x%x\n", 3069 load_code); 3070 3071 /* Only relevant for recovery: 3072 * Clear the indication after LOAD_REQ is responded by the MFW. 3073 */ 3074 cdev->recov_in_prog = false; 3075 3076 qed_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt); 3077 3078 qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); 3079 3080 /* Clean up chip from previous driver if such remains exist. 3081 * This is not needed when the PF is the first one on the 3082 * engine, since afterwards we are going to init the FW. 3083 */ 3084 if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) { 3085 rc = qed_final_cleanup(p_hwfn, p_hwfn->p_main_ptt, 3086 p_hwfn->rel_pf_id, false); 3087 if (rc) { 3088 qed_hw_err_notify(p_hwfn, p_hwfn->p_main_ptt, 3089 QED_HW_ERR_RAMROD_FAIL, 3090 "Final cleanup failed\n"); 3091 goto load_err; 3092 } 3093 } 3094 3095 /* Log and clear previous pglue_b errors if such exist */ 3096 qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt); 3097 3098 /* Enable the PF's internal FID_enable in the PXP */ 3099 rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, 3100 true); 3101 if (rc) 3102 goto load_err; 3103 3104 /* Clear the pglue_b was_error indication. 3105 * In E4 it must be done after the BME and the internal 3106 * FID_enable for the PF are set, since VDMs may cause the 3107 * indication to be set again. 3108 */ 3109 qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); 3110 3111 fw_overlays = cdev->fw_data->fw_overlays; 3112 fw_overlays_len = cdev->fw_data->fw_overlays_len; 3113 p_hwfn->fw_overlay_mem = 3114 qed_fw_overlay_mem_alloc(p_hwfn, fw_overlays, 3115 fw_overlays_len); 3116 if (!p_hwfn->fw_overlay_mem) { 3117 DP_NOTICE(p_hwfn, 3118 "Failed to allocate fw overlay memory\n"); 3119 rc = -ENOMEM; 3120 goto load_err; 3121 } 3122 3123 switch (load_code) { 3124 case FW_MSG_CODE_DRV_LOAD_ENGINE: 3125 rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, 3126 p_hwfn->hw_info.hw_mode); 3127 if (rc) 3128 break; 3129 /* Fall through */ 3130 case FW_MSG_CODE_DRV_LOAD_PORT: 3131 rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, 3132 p_hwfn->hw_info.hw_mode); 3133 if (rc) 3134 break; 3135 3136 /* Fall through */ 3137 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 3138 rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, 3139 p_params->p_tunn, 3140 p_hwfn->hw_info.hw_mode, 3141 p_params->b_hw_start, 3142 p_params->int_mode, 3143 p_params->allow_npar_tx_switch); 3144 break; 3145 default: 3146 DP_NOTICE(p_hwfn, 3147 "Unexpected load code [0x%08x]", load_code); 3148 rc = -EINVAL; 3149 break; 3150 } 3151 3152 if (rc) { 3153 DP_NOTICE(p_hwfn, 3154 "init phase failed for loadcode 0x%x (rc %d)\n", 3155 load_code, rc); 3156 goto load_err; 3157 } 3158 3159 rc = qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); 3160 if (rc) 3161 return rc; 3162 3163 /* send DCBX attention request command */ 3164 DP_VERBOSE(p_hwfn, 3165 QED_MSG_DCB, 3166 "sending phony dcbx set command to trigger DCBx attention handling\n"); 3167 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 3168 DRV_MSG_CODE_SET_DCBX, 3169 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, 3170 &resp, ¶m); 3171 if (rc) { 3172 DP_NOTICE(p_hwfn, 3173 "Failed to send DCBX attention request\n"); 3174 return rc; 3175 } 3176 3177 p_hwfn->hw_init_done = true; 3178 } 3179 3180 if (IS_PF(cdev)) { 3181 p_hwfn = QED_LEADING_HWFN(cdev); 3182 3183 /* Get pre-negotiated values for stag, bandwidth etc. */ 3184 DP_VERBOSE(p_hwfn, 3185 QED_MSG_SPQ, 3186 "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n"); 3187 drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET; 3188 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 3189 DRV_MSG_CODE_GET_OEM_UPDATES, 3190 drv_mb_param, &resp, ¶m); 3191 if (rc) 3192 DP_NOTICE(p_hwfn, 3193 "Failed to send GET_OEM_UPDATES attention request\n"); 3194 3195 drv_mb_param = STORM_FW_VERSION; 3196 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 3197 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, 3198 drv_mb_param, &load_code, ¶m); 3199 if (rc) 3200 DP_INFO(p_hwfn, "Failed to update firmware version\n"); 3201 3202 if (!b_default_mtu) { 3203 rc = qed_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt, 3204 p_hwfn->hw_info.mtu); 3205 if (rc) 3206 DP_INFO(p_hwfn, 3207 "Failed to update default mtu\n"); 3208 } 3209 3210 rc = qed_mcp_ov_update_driver_state(p_hwfn, 3211 p_hwfn->p_main_ptt, 3212 QED_OV_DRIVER_STATE_DISABLED); 3213 if (rc) 3214 DP_INFO(p_hwfn, "Failed to update driver state\n"); 3215 3216 rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, 3217 QED_OV_ESWITCH_NONE); 3218 if (rc) 3219 DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); 3220 } 3221 3222 return 0; 3223 3224 load_err: 3225 /* The MFW load lock should be released also when initialization fails. 3226 */ 3227 qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); 3228 return rc; 3229 } 3230 3231 #define QED_HW_STOP_RETRY_LIMIT (10) 3232 static void qed_hw_timers_stop(struct qed_dev *cdev, 3233 struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 3234 { 3235 int i; 3236 3237 /* close timers */ 3238 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); 3239 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); 3240 3241 if (cdev->recov_in_prog) 3242 return; 3243 3244 for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { 3245 if ((!qed_rd(p_hwfn, p_ptt, 3246 TM_REG_PF_SCAN_ACTIVE_CONN)) && 3247 (!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK))) 3248 break; 3249 3250 /* Dependent on number of connection/tasks, possibly 3251 * 1ms sleep is required between polls 3252 */ 3253 usleep_range(1000, 2000); 3254 } 3255 3256 if (i < QED_HW_STOP_RETRY_LIMIT) 3257 return; 3258 3259 DP_NOTICE(p_hwfn, 3260 "Timers linear scans are not over [Connection %02x Tasks %02x]\n", 3261 (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), 3262 (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); 3263 } 3264 3265 void qed_hw_timers_stop_all(struct qed_dev *cdev) 3266 { 3267 int j; 3268 3269 for_each_hwfn(cdev, j) { 3270 struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; 3271 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; 3272 3273 qed_hw_timers_stop(cdev, p_hwfn, p_ptt); 3274 } 3275 } 3276 3277 int qed_hw_stop(struct qed_dev *cdev) 3278 { 3279 struct qed_hwfn *p_hwfn; 3280 struct qed_ptt *p_ptt; 3281 int rc, rc2 = 0; 3282 int j; 3283 3284 for_each_hwfn(cdev, j) { 3285 p_hwfn = &cdev->hwfns[j]; 3286 p_ptt = p_hwfn->p_main_ptt; 3287 3288 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n"); 3289 3290 if (IS_VF(cdev)) { 3291 qed_vf_pf_int_cleanup(p_hwfn); 3292 rc = qed_vf_pf_reset(p_hwfn); 3293 if (rc) { 3294 DP_NOTICE(p_hwfn, 3295 "qed_vf_pf_reset failed. rc = %d.\n", 3296 rc); 3297 rc2 = -EINVAL; 3298 } 3299 continue; 3300 } 3301 3302 /* mark the hw as uninitialized... */ 3303 p_hwfn->hw_init_done = false; 3304 3305 /* Send unload command to MCP */ 3306 if (!cdev->recov_in_prog) { 3307 rc = qed_mcp_unload_req(p_hwfn, p_ptt); 3308 if (rc) { 3309 DP_NOTICE(p_hwfn, 3310 "Failed sending a UNLOAD_REQ command. rc = %d.\n", 3311 rc); 3312 rc2 = -EINVAL; 3313 } 3314 } 3315 3316 qed_slowpath_irq_sync(p_hwfn); 3317 3318 /* After this point no MFW attentions are expected, e.g. prevent 3319 * race between pf stop and dcbx pf update. 3320 */ 3321 rc = qed_sp_pf_stop(p_hwfn); 3322 if (rc) { 3323 DP_NOTICE(p_hwfn, 3324 "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n", 3325 rc); 3326 rc2 = -EINVAL; 3327 } 3328 3329 qed_wr(p_hwfn, p_ptt, 3330 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 3331 3332 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 3333 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 3334 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 3335 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 3336 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 3337 3338 qed_hw_timers_stop(cdev, p_hwfn, p_ptt); 3339 3340 /* Disable Attention Generation */ 3341 qed_int_igu_disable_int(p_hwfn, p_ptt); 3342 3343 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); 3344 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); 3345 3346 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true); 3347 3348 /* Need to wait 1ms to guarantee SBs are cleared */ 3349 usleep_range(1000, 2000); 3350 3351 /* Disable PF in HW blocks */ 3352 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); 3353 qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); 3354 3355 if (IS_LEAD_HWFN(p_hwfn) && 3356 test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits) && 3357 !QED_IS_FCOE_PERSONALITY(p_hwfn)) 3358 qed_llh_remove_mac_filter(cdev, 0, 3359 p_hwfn->hw_info.hw_mac_addr); 3360 3361 if (!cdev->recov_in_prog) { 3362 rc = qed_mcp_unload_done(p_hwfn, p_ptt); 3363 if (rc) { 3364 DP_NOTICE(p_hwfn, 3365 "Failed sending a UNLOAD_DONE command. rc = %d.\n", 3366 rc); 3367 rc2 = -EINVAL; 3368 } 3369 } 3370 } 3371 3372 if (IS_PF(cdev) && !cdev->recov_in_prog) { 3373 p_hwfn = QED_LEADING_HWFN(cdev); 3374 p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt; 3375 3376 /* Clear the PF's internal FID_enable in the PXP. 3377 * In CMT this should only be done for first hw-function, and 3378 * only after all transactions have stopped for all active 3379 * hw-functions. 3380 */ 3381 rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false); 3382 if (rc) { 3383 DP_NOTICE(p_hwfn, 3384 "qed_pglueb_set_pfid_enable() failed. rc = %d.\n", 3385 rc); 3386 rc2 = -EINVAL; 3387 } 3388 } 3389 3390 return rc2; 3391 } 3392 3393 int qed_hw_stop_fastpath(struct qed_dev *cdev) 3394 { 3395 int j; 3396 3397 for_each_hwfn(cdev, j) { 3398 struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; 3399 struct qed_ptt *p_ptt; 3400 3401 if (IS_VF(cdev)) { 3402 qed_vf_pf_int_cleanup(p_hwfn); 3403 continue; 3404 } 3405 p_ptt = qed_ptt_acquire(p_hwfn); 3406 if (!p_ptt) 3407 return -EAGAIN; 3408 3409 DP_VERBOSE(p_hwfn, 3410 NETIF_MSG_IFDOWN, "Shutting down the fastpath\n"); 3411 3412 qed_wr(p_hwfn, p_ptt, 3413 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 3414 3415 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 3416 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 3417 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 3418 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 3419 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 3420 3421 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); 3422 3423 /* Need to wait 1ms to guarantee SBs are cleared */ 3424 usleep_range(1000, 2000); 3425 qed_ptt_release(p_hwfn, p_ptt); 3426 } 3427 3428 return 0; 3429 } 3430 3431 int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) 3432 { 3433 struct qed_ptt *p_ptt; 3434 3435 if (IS_VF(p_hwfn->cdev)) 3436 return 0; 3437 3438 p_ptt = qed_ptt_acquire(p_hwfn); 3439 if (!p_ptt) 3440 return -EAGAIN; 3441 3442 if (p_hwfn->p_rdma_info && 3443 p_hwfn->p_rdma_info->active && p_hwfn->b_rdma_enabled_in_prs) 3444 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1); 3445 3446 /* Re-open incoming traffic */ 3447 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); 3448 qed_ptt_release(p_hwfn, p_ptt); 3449 3450 return 0; 3451 } 3452 3453 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */ 3454 static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn) 3455 { 3456 qed_ptt_pool_free(p_hwfn); 3457 kfree(p_hwfn->hw_info.p_igu_info); 3458 p_hwfn->hw_info.p_igu_info = NULL; 3459 } 3460 3461 /* Setup bar access */ 3462 static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) 3463 { 3464 /* clear indirect access */ 3465 if (QED_IS_AH(p_hwfn->cdev)) { 3466 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 3467 PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0); 3468 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 3469 PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0); 3470 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 3471 PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0); 3472 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 3473 PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0); 3474 } else { 3475 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 3476 PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0); 3477 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 3478 PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0); 3479 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 3480 PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0); 3481 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 3482 PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0); 3483 } 3484 3485 /* Clean previous pglue_b errors if such exist */ 3486 qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); 3487 3488 /* enable internal target-read */ 3489 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 3490 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 3491 } 3492 3493 static void get_function_id(struct qed_hwfn *p_hwfn) 3494 { 3495 /* ME Register */ 3496 p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn, 3497 PXP_PF_ME_OPAQUE_ADDR); 3498 3499 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); 3500 3501 p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf; 3502 p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 3503 PXP_CONCRETE_FID_PFID); 3504 p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 3505 PXP_CONCRETE_FID_PORT); 3506 3507 DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, 3508 "Read ME register: Concrete 0x%08x Opaque 0x%04x\n", 3509 p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid); 3510 } 3511 3512 static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) 3513 { 3514 u32 *feat_num = p_hwfn->hw_info.feat_num; 3515 struct qed_sb_cnt_info sb_cnt; 3516 u32 non_l2_sbs = 0; 3517 3518 memset(&sb_cnt, 0, sizeof(sb_cnt)); 3519 qed_int_get_num_sbs(p_hwfn, &sb_cnt); 3520 3521 if (IS_ENABLED(CONFIG_QED_RDMA) && 3522 QED_IS_RDMA_PERSONALITY(p_hwfn)) { 3523 /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide 3524 * the status blocks equally between L2 / RoCE but with 3525 * consideration as to how many l2 queues / cnqs we have. 3526 */ 3527 feat_num[QED_RDMA_CNQ] = 3528 min_t(u32, sb_cnt.cnt / 2, 3529 RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); 3530 3531 non_l2_sbs = feat_num[QED_RDMA_CNQ]; 3532 } 3533 if (QED_IS_L2_PERSONALITY(p_hwfn)) { 3534 /* Start by allocating VF queues, then PF's */ 3535 feat_num[QED_VF_L2_QUE] = min_t(u32, 3536 RESC_NUM(p_hwfn, QED_L2_QUEUE), 3537 sb_cnt.iov_cnt); 3538 feat_num[QED_PF_L2_QUE] = min_t(u32, 3539 sb_cnt.cnt - non_l2_sbs, 3540 RESC_NUM(p_hwfn, 3541 QED_L2_QUEUE) - 3542 FEAT_NUM(p_hwfn, 3543 QED_VF_L2_QUE)); 3544 } 3545 3546 if (QED_IS_FCOE_PERSONALITY(p_hwfn)) 3547 feat_num[QED_FCOE_CQ] = min_t(u32, sb_cnt.cnt, 3548 RESC_NUM(p_hwfn, 3549 QED_CMDQS_CQS)); 3550 3551 if (QED_IS_ISCSI_PERSONALITY(p_hwfn)) 3552 feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt, 3553 RESC_NUM(p_hwfn, 3554 QED_CMDQS_CQS)); 3555 DP_VERBOSE(p_hwfn, 3556 NETIF_MSG_PROBE, 3557 "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d #SBS=%d\n", 3558 (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE), 3559 (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE), 3560 (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ), 3561 (int)FEAT_NUM(p_hwfn, QED_FCOE_CQ), 3562 (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ), 3563 (int)sb_cnt.cnt); 3564 } 3565 3566 const char *qed_hw_get_resc_name(enum qed_resources res_id) 3567 { 3568 switch (res_id) { 3569 case QED_L2_QUEUE: 3570 return "L2_QUEUE"; 3571 case QED_VPORT: 3572 return "VPORT"; 3573 case QED_RSS_ENG: 3574 return "RSS_ENG"; 3575 case QED_PQ: 3576 return "PQ"; 3577 case QED_RL: 3578 return "RL"; 3579 case QED_MAC: 3580 return "MAC"; 3581 case QED_VLAN: 3582 return "VLAN"; 3583 case QED_RDMA_CNQ_RAM: 3584 return "RDMA_CNQ_RAM"; 3585 case QED_ILT: 3586 return "ILT"; 3587 case QED_LL2_RAM_QUEUE: 3588 return "LL2_RAM_QUEUE"; 3589 case QED_LL2_CTX_QUEUE: 3590 return "LL2_CTX_QUEUE"; 3591 case QED_CMDQS_CQS: 3592 return "CMDQS_CQS"; 3593 case QED_RDMA_STATS_QUEUE: 3594 return "RDMA_STATS_QUEUE"; 3595 case QED_BDQ: 3596 return "BDQ"; 3597 case QED_SB: 3598 return "SB"; 3599 default: 3600 return "UNKNOWN_RESOURCE"; 3601 } 3602 } 3603 3604 static int 3605 __qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, 3606 struct qed_ptt *p_ptt, 3607 enum qed_resources res_id, 3608 u32 resc_max_val, u32 *p_mcp_resp) 3609 { 3610 int rc; 3611 3612 rc = qed_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id, 3613 resc_max_val, p_mcp_resp); 3614 if (rc) { 3615 DP_NOTICE(p_hwfn, 3616 "MFW response failure for a max value setting of resource %d [%s]\n", 3617 res_id, qed_hw_get_resc_name(res_id)); 3618 return rc; 3619 } 3620 3621 if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) 3622 DP_INFO(p_hwfn, 3623 "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n", 3624 res_id, qed_hw_get_resc_name(res_id), *p_mcp_resp); 3625 3626 return 0; 3627 } 3628 3629 static u32 qed_hsi_def_val[][MAX_CHIP_IDS] = { 3630 {MAX_NUM_VFS_BB, MAX_NUM_VFS_K2}, 3631 {MAX_NUM_L2_QUEUES_BB, MAX_NUM_L2_QUEUES_K2}, 3632 {MAX_NUM_PORTS_BB, MAX_NUM_PORTS_K2}, 3633 {MAX_SB_PER_PATH_BB, MAX_SB_PER_PATH_K2,}, 3634 {MAX_NUM_PFS_BB, MAX_NUM_PFS_K2}, 3635 {MAX_NUM_VPORTS_BB, MAX_NUM_VPORTS_K2}, 3636 {ETH_RSS_ENGINE_NUM_BB, ETH_RSS_ENGINE_NUM_K2}, 3637 {MAX_QM_TX_QUEUES_BB, MAX_QM_TX_QUEUES_K2}, 3638 {PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2}, 3639 {RDMA_NUM_STATISTIC_COUNTERS_BB, RDMA_NUM_STATISTIC_COUNTERS_K2}, 3640 {MAX_QM_GLOBAL_RLS, MAX_QM_GLOBAL_RLS}, 3641 {PBF_MAX_CMD_LINES, PBF_MAX_CMD_LINES}, 3642 {BTB_MAX_BLOCKS_BB, BTB_MAX_BLOCKS_K2}, 3643 }; 3644 3645 u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type) 3646 { 3647 enum chip_ids chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2; 3648 3649 if (type >= QED_NUM_HSI_DEFS) { 3650 DP_ERR(cdev, "Unexpected HSI definition type [%d]\n", type); 3651 return 0; 3652 } 3653 3654 return qed_hsi_def_val[type][chip_id]; 3655 } 3656 static int 3657 qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 3658 { 3659 u32 resc_max_val, mcp_resp; 3660 u8 res_id; 3661 int rc; 3662 for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { 3663 switch (res_id) { 3664 case QED_LL2_RAM_QUEUE: 3665 resc_max_val = MAX_NUM_LL2_RX_RAM_QUEUES; 3666 break; 3667 case QED_LL2_CTX_QUEUE: 3668 resc_max_val = MAX_NUM_LL2_RX_CTX_QUEUES; 3669 break; 3670 case QED_RDMA_CNQ_RAM: 3671 /* No need for a case for QED_CMDQS_CQS since 3672 * CNQ/CMDQS are the same resource. 3673 */ 3674 resc_max_val = NUM_OF_GLOBAL_QUEUES; 3675 break; 3676 case QED_RDMA_STATS_QUEUE: 3677 resc_max_val = 3678 NUM_OF_RDMA_STATISTIC_COUNTERS(p_hwfn->cdev); 3679 break; 3680 case QED_BDQ: 3681 resc_max_val = BDQ_NUM_RESOURCES; 3682 break; 3683 default: 3684 continue; 3685 } 3686 3687 rc = __qed_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id, 3688 resc_max_val, &mcp_resp); 3689 if (rc) 3690 return rc; 3691 3692 /* There's no point to continue to the next resource if the 3693 * command is not supported by the MFW. 3694 * We do continue if the command is supported but the resource 3695 * is unknown to the MFW. Such a resource will be later 3696 * configured with the default allocation values. 3697 */ 3698 if (mcp_resp == FW_MSG_CODE_UNSUPPORTED) 3699 return -EINVAL; 3700 } 3701 3702 return 0; 3703 } 3704 3705 static 3706 int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, 3707 enum qed_resources res_id, 3708 u32 *p_resc_num, u32 *p_resc_start) 3709 { 3710 u8 num_funcs = p_hwfn->num_funcs_on_engine; 3711 struct qed_dev *cdev = p_hwfn->cdev; 3712 3713 switch (res_id) { 3714 case QED_L2_QUEUE: 3715 *p_resc_num = NUM_OF_L2_QUEUES(cdev) / num_funcs; 3716 break; 3717 case QED_VPORT: 3718 *p_resc_num = NUM_OF_VPORTS(cdev) / num_funcs; 3719 break; 3720 case QED_RSS_ENG: 3721 *p_resc_num = NUM_OF_RSS_ENGINES(cdev) / num_funcs; 3722 break; 3723 case QED_PQ: 3724 *p_resc_num = NUM_OF_QM_TX_QUEUES(cdev) / num_funcs; 3725 *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */ 3726 break; 3727 case QED_RL: 3728 *p_resc_num = NUM_OF_QM_GLOBAL_RLS(cdev) / num_funcs; 3729 break; 3730 case QED_MAC: 3731 case QED_VLAN: 3732 /* Each VFC resource can accommodate both a MAC and a VLAN */ 3733 *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; 3734 break; 3735 case QED_ILT: 3736 *p_resc_num = NUM_OF_PXP_ILT_RECORDS(cdev) / num_funcs; 3737 break; 3738 case QED_LL2_RAM_QUEUE: 3739 *p_resc_num = MAX_NUM_LL2_RX_RAM_QUEUES / num_funcs; 3740 break; 3741 case QED_LL2_CTX_QUEUE: 3742 *p_resc_num = MAX_NUM_LL2_RX_CTX_QUEUES / num_funcs; 3743 break; 3744 case QED_RDMA_CNQ_RAM: 3745 case QED_CMDQS_CQS: 3746 /* CNQ/CMDQS are the same resource */ 3747 *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs; 3748 break; 3749 case QED_RDMA_STATS_QUEUE: 3750 *p_resc_num = NUM_OF_RDMA_STATISTIC_COUNTERS(cdev) / num_funcs; 3751 break; 3752 case QED_BDQ: 3753 if (p_hwfn->hw_info.personality != QED_PCI_ISCSI && 3754 p_hwfn->hw_info.personality != QED_PCI_FCOE) 3755 *p_resc_num = 0; 3756 else 3757 *p_resc_num = 1; 3758 break; 3759 case QED_SB: 3760 /* Since we want its value to reflect whether MFW supports 3761 * the new scheme, have a default of 0. 3762 */ 3763 *p_resc_num = 0; 3764 break; 3765 default: 3766 return -EINVAL; 3767 } 3768 3769 switch (res_id) { 3770 case QED_BDQ: 3771 if (!*p_resc_num) 3772 *p_resc_start = 0; 3773 else if (p_hwfn->cdev->num_ports_in_engine == 4) 3774 *p_resc_start = p_hwfn->port_id; 3775 else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) 3776 *p_resc_start = p_hwfn->port_id; 3777 else if (p_hwfn->hw_info.personality == QED_PCI_FCOE) 3778 *p_resc_start = p_hwfn->port_id + 2; 3779 break; 3780 default: 3781 *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx; 3782 break; 3783 } 3784 3785 return 0; 3786 } 3787 3788 static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, 3789 enum qed_resources res_id) 3790 { 3791 u32 dflt_resc_num = 0, dflt_resc_start = 0; 3792 u32 mcp_resp, *p_resc_num, *p_resc_start; 3793 int rc; 3794 3795 p_resc_num = &RESC_NUM(p_hwfn, res_id); 3796 p_resc_start = &RESC_START(p_hwfn, res_id); 3797 3798 rc = qed_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num, 3799 &dflt_resc_start); 3800 if (rc) { 3801 DP_ERR(p_hwfn, 3802 "Failed to get default amount for resource %d [%s]\n", 3803 res_id, qed_hw_get_resc_name(res_id)); 3804 return rc; 3805 } 3806 3807 rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id, 3808 &mcp_resp, p_resc_num, p_resc_start); 3809 if (rc) { 3810 DP_NOTICE(p_hwfn, 3811 "MFW response failure for an allocation request for resource %d [%s]\n", 3812 res_id, qed_hw_get_resc_name(res_id)); 3813 return rc; 3814 } 3815 3816 /* Default driver values are applied in the following cases: 3817 * - The resource allocation MB command is not supported by the MFW 3818 * - There is an internal error in the MFW while processing the request 3819 * - The resource ID is unknown to the MFW 3820 */ 3821 if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) { 3822 DP_INFO(p_hwfn, 3823 "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n", 3824 res_id, 3825 qed_hw_get_resc_name(res_id), 3826 mcp_resp, dflt_resc_num, dflt_resc_start); 3827 *p_resc_num = dflt_resc_num; 3828 *p_resc_start = dflt_resc_start; 3829 goto out; 3830 } 3831 3832 out: 3833 /* PQs have to divide by 8 [that's the HW granularity]. 3834 * Reduce number so it would fit. 3835 */ 3836 if ((res_id == QED_PQ) && ((*p_resc_num % 8) || (*p_resc_start % 8))) { 3837 DP_INFO(p_hwfn, 3838 "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n", 3839 *p_resc_num, 3840 (*p_resc_num) & ~0x7, 3841 *p_resc_start, (*p_resc_start) & ~0x7); 3842 *p_resc_num &= ~0x7; 3843 *p_resc_start &= ~0x7; 3844 } 3845 3846 return 0; 3847 } 3848 3849 static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn) 3850 { 3851 int rc; 3852 u8 res_id; 3853 3854 for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { 3855 rc = __qed_hw_set_resc_info(p_hwfn, res_id); 3856 if (rc) 3857 return rc; 3858 } 3859 3860 return 0; 3861 } 3862 3863 static int qed_hw_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, 3864 struct qed_ptt *p_ptt) 3865 { 3866 struct qed_dev *cdev = p_hwfn->cdev; 3867 u8 native_ppfid_idx; 3868 int rc; 3869 3870 /* Calculation of BB/AH is different for native_ppfid_idx */ 3871 if (QED_IS_BB(cdev)) 3872 native_ppfid_idx = p_hwfn->rel_pf_id; 3873 else 3874 native_ppfid_idx = p_hwfn->rel_pf_id / 3875 cdev->num_ports_in_engine; 3876 3877 rc = qed_mcp_get_ppfid_bitmap(p_hwfn, p_ptt); 3878 if (rc != 0 && rc != -EOPNOTSUPP) 3879 return rc; 3880 else if (rc == -EOPNOTSUPP) 3881 cdev->ppfid_bitmap = 0x1 << native_ppfid_idx; 3882 3883 if (!(cdev->ppfid_bitmap & (0x1 << native_ppfid_idx))) { 3884 DP_INFO(p_hwfn, 3885 "Fix the PPFID bitmap to include the native PPFID [native_ppfid_idx %hhd, orig_bitmap 0x%hhx]\n", 3886 native_ppfid_idx, cdev->ppfid_bitmap); 3887 cdev->ppfid_bitmap = 0x1 << native_ppfid_idx; 3888 } 3889 3890 return 0; 3891 } 3892 3893 static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 3894 { 3895 struct qed_resc_unlock_params resc_unlock_params; 3896 struct qed_resc_lock_params resc_lock_params; 3897 bool b_ah = QED_IS_AH(p_hwfn->cdev); 3898 u8 res_id; 3899 int rc; 3900 3901 /* Setting the max values of the soft resources and the following 3902 * resources allocation queries should be atomic. Since several PFs can 3903 * run in parallel - a resource lock is needed. 3904 * If either the resource lock or resource set value commands are not 3905 * supported - skip the the max values setting, release the lock if 3906 * needed, and proceed to the queries. Other failures, including a 3907 * failure to acquire the lock, will cause this function to fail. 3908 */ 3909 qed_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params, 3910 QED_RESC_LOCK_RESC_ALLOC, false); 3911 3912 rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params); 3913 if (rc && rc != -EINVAL) { 3914 return rc; 3915 } else if (rc == -EINVAL) { 3916 DP_INFO(p_hwfn, 3917 "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n"); 3918 } else if (!rc && !resc_lock_params.b_granted) { 3919 DP_NOTICE(p_hwfn, 3920 "Failed to acquire the resource lock for the resource allocation commands\n"); 3921 return -EBUSY; 3922 } else { 3923 rc = qed_hw_set_soft_resc_size(p_hwfn, p_ptt); 3924 if (rc && rc != -EINVAL) { 3925 DP_NOTICE(p_hwfn, 3926 "Failed to set the max values of the soft resources\n"); 3927 goto unlock_and_exit; 3928 } else if (rc == -EINVAL) { 3929 DP_INFO(p_hwfn, 3930 "Skip the max values setting of the soft resources since it is not supported by the MFW\n"); 3931 rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, 3932 &resc_unlock_params); 3933 if (rc) 3934 DP_INFO(p_hwfn, 3935 "Failed to release the resource lock for the resource allocation commands\n"); 3936 } 3937 } 3938 3939 rc = qed_hw_set_resc_info(p_hwfn); 3940 if (rc) 3941 goto unlock_and_exit; 3942 3943 if (resc_lock_params.b_granted && !resc_unlock_params.b_released) { 3944 rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params); 3945 if (rc) 3946 DP_INFO(p_hwfn, 3947 "Failed to release the resource lock for the resource allocation commands\n"); 3948 } 3949 3950 /* PPFID bitmap */ 3951 if (IS_LEAD_HWFN(p_hwfn)) { 3952 rc = qed_hw_get_ppfid_bitmap(p_hwfn, p_ptt); 3953 if (rc) 3954 return rc; 3955 } 3956 3957 /* Sanity for ILT */ 3958 if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) || 3959 (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) { 3960 DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n", 3961 RESC_START(p_hwfn, QED_ILT), 3962 RESC_END(p_hwfn, QED_ILT) - 1); 3963 return -EINVAL; 3964 } 3965 3966 /* This will also learn the number of SBs from MFW */ 3967 if (qed_int_igu_reset_cam(p_hwfn, p_ptt)) 3968 return -EINVAL; 3969 3970 qed_hw_set_feat(p_hwfn); 3971 3972 for (res_id = 0; res_id < QED_MAX_RESC; res_id++) 3973 DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n", 3974 qed_hw_get_resc_name(res_id), 3975 RESC_NUM(p_hwfn, res_id), 3976 RESC_START(p_hwfn, res_id)); 3977 3978 return 0; 3979 3980 unlock_and_exit: 3981 if (resc_lock_params.b_granted && !resc_unlock_params.b_released) 3982 qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params); 3983 return rc; 3984 } 3985 3986 static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 3987 { 3988 u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; 3989 u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; 3990 struct qed_mcp_link_capabilities *p_caps; 3991 struct qed_mcp_link_params *link; 3992 3993 /* Read global nvm_cfg address */ 3994 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); 3995 3996 /* Verify MCP has initialized it */ 3997 if (!nvm_cfg_addr) { 3998 DP_NOTICE(p_hwfn, "Shared memory not initialized\n"); 3999 return -EINVAL; 4000 } 4001 4002 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ 4003 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); 4004 4005 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 4006 offsetof(struct nvm_cfg1, glob) + 4007 offsetof(struct nvm_cfg1_glob, core_cfg); 4008 4009 core_cfg = qed_rd(p_hwfn, p_ptt, addr); 4010 4011 switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> 4012 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { 4013 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G: 4014 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G; 4015 break; 4016 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G: 4017 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G; 4018 break; 4019 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G: 4020 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G; 4021 break; 4022 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F: 4023 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F; 4024 break; 4025 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E: 4026 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E; 4027 break; 4028 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G: 4029 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G; 4030 break; 4031 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G: 4032 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G; 4033 break; 4034 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: 4035 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G; 4036 break; 4037 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G: 4038 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X10G; 4039 break; 4040 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: 4041 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G; 4042 break; 4043 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G: 4044 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X25G; 4045 break; 4046 default: 4047 DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg); 4048 break; 4049 } 4050 4051 /* Read default link configuration */ 4052 link = &p_hwfn->mcp_info->link_input; 4053 p_caps = &p_hwfn->mcp_info->link_capabilities; 4054 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 4055 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 4056 link_temp = qed_rd(p_hwfn, p_ptt, 4057 port_cfg_addr + 4058 offsetof(struct nvm_cfg1_port, speed_cap_mask)); 4059 link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; 4060 link->speed.advertised_speeds = link_temp; 4061 4062 link_temp = link->speed.advertised_speeds; 4063 p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp; 4064 4065 link_temp = qed_rd(p_hwfn, p_ptt, 4066 port_cfg_addr + 4067 offsetof(struct nvm_cfg1_port, link_settings)); 4068 switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >> 4069 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) { 4070 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG: 4071 link->speed.autoneg = true; 4072 break; 4073 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G: 4074 link->speed.forced_speed = 1000; 4075 break; 4076 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: 4077 link->speed.forced_speed = 10000; 4078 break; 4079 case NVM_CFG1_PORT_DRV_LINK_SPEED_20G: 4080 link->speed.forced_speed = 20000; 4081 break; 4082 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: 4083 link->speed.forced_speed = 25000; 4084 break; 4085 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G: 4086 link->speed.forced_speed = 40000; 4087 break; 4088 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: 4089 link->speed.forced_speed = 50000; 4090 break; 4091 case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G: 4092 link->speed.forced_speed = 100000; 4093 break; 4094 default: 4095 DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp); 4096 } 4097 4098 p_hwfn->mcp_info->link_capabilities.default_speed_autoneg = 4099 link->speed.autoneg; 4100 4101 link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; 4102 link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET; 4103 link->pause.autoneg = !!(link_temp & 4104 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); 4105 link->pause.forced_rx = !!(link_temp & 4106 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); 4107 link->pause.forced_tx = !!(link_temp & 4108 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); 4109 link->loopback_mode = 0; 4110 4111 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { 4112 link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + 4113 offsetof(struct nvm_cfg1_port, ext_phy)); 4114 link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK; 4115 link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET; 4116 p_caps->default_eee = QED_MCP_EEE_ENABLED; 4117 link->eee.enable = true; 4118 switch (link_temp) { 4119 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED: 4120 p_caps->default_eee = QED_MCP_EEE_DISABLED; 4121 link->eee.enable = false; 4122 break; 4123 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED: 4124 p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME; 4125 break; 4126 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE: 4127 p_caps->eee_lpi_timer = 4128 EEE_TX_TIMER_USEC_AGGRESSIVE_TIME; 4129 break; 4130 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY: 4131 p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME; 4132 break; 4133 } 4134 4135 link->eee.tx_lpi_timer = p_caps->eee_lpi_timer; 4136 link->eee.tx_lpi_enable = link->eee.enable; 4137 link->eee.adv_caps = QED_EEE_1G_ADV | QED_EEE_10G_ADV; 4138 } else { 4139 p_caps->default_eee = QED_MCP_EEE_UNSUPPORTED; 4140 } 4141 4142 DP_VERBOSE(p_hwfn, 4143 NETIF_MSG_LINK, 4144 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n", 4145 link->speed.forced_speed, 4146 link->speed.advertised_speeds, 4147 link->speed.autoneg, 4148 link->pause.autoneg, 4149 p_caps->default_eee, p_caps->eee_lpi_timer); 4150 4151 if (IS_LEAD_HWFN(p_hwfn)) { 4152 struct qed_dev *cdev = p_hwfn->cdev; 4153 4154 /* Read Multi-function information from shmem */ 4155 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 4156 offsetof(struct nvm_cfg1, glob) + 4157 offsetof(struct nvm_cfg1_glob, generic_cont0); 4158 4159 generic_cont0 = qed_rd(p_hwfn, p_ptt, addr); 4160 4161 mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> 4162 NVM_CFG1_GLOB_MF_MODE_OFFSET; 4163 4164 switch (mf_mode) { 4165 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: 4166 cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS); 4167 break; 4168 case NVM_CFG1_GLOB_MF_MODE_UFP: 4169 cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) | 4170 BIT(QED_MF_LLH_PROTO_CLSS) | 4171 BIT(QED_MF_UFP_SPECIFIC) | 4172 BIT(QED_MF_8021Q_TAGGING) | 4173 BIT(QED_MF_DONT_ADD_VLAN0_TAG); 4174 break; 4175 case NVM_CFG1_GLOB_MF_MODE_BD: 4176 cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) | 4177 BIT(QED_MF_LLH_PROTO_CLSS) | 4178 BIT(QED_MF_8021AD_TAGGING) | 4179 BIT(QED_MF_DONT_ADD_VLAN0_TAG); 4180 break; 4181 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: 4182 cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | 4183 BIT(QED_MF_LLH_PROTO_CLSS) | 4184 BIT(QED_MF_LL2_NON_UNICAST) | 4185 BIT(QED_MF_INTER_PF_SWITCH); 4186 break; 4187 case NVM_CFG1_GLOB_MF_MODE_DEFAULT: 4188 cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | 4189 BIT(QED_MF_LLH_PROTO_CLSS) | 4190 BIT(QED_MF_LL2_NON_UNICAST); 4191 if (QED_IS_BB(p_hwfn->cdev)) 4192 cdev->mf_bits |= BIT(QED_MF_NEED_DEF_PF); 4193 break; 4194 } 4195 4196 DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", 4197 cdev->mf_bits); 4198 } 4199 4200 DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", 4201 p_hwfn->cdev->mf_bits); 4202 4203 /* Read device capabilities information from shmem */ 4204 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 4205 offsetof(struct nvm_cfg1, glob) + 4206 offsetof(struct nvm_cfg1_glob, device_capabilities); 4207 4208 device_capabilities = qed_rd(p_hwfn, p_ptt, addr); 4209 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) 4210 __set_bit(QED_DEV_CAP_ETH, 4211 &p_hwfn->hw_info.device_capabilities); 4212 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE) 4213 __set_bit(QED_DEV_CAP_FCOE, 4214 &p_hwfn->hw_info.device_capabilities); 4215 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI) 4216 __set_bit(QED_DEV_CAP_ISCSI, 4217 &p_hwfn->hw_info.device_capabilities); 4218 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE) 4219 __set_bit(QED_DEV_CAP_ROCE, 4220 &p_hwfn->hw_info.device_capabilities); 4221 4222 return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); 4223 } 4224 4225 static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 4226 { 4227 u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id; 4228 u32 reg_function_hide, tmp, eng_mask, low_pfs_mask; 4229 struct qed_dev *cdev = p_hwfn->cdev; 4230 4231 num_funcs = QED_IS_AH(cdev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB; 4232 4233 /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values 4234 * in the other bits are selected. 4235 * Bits 1-15 are for functions 1-15, respectively, and their value is 4236 * '0' only for enabled functions (function 0 always exists and 4237 * enabled). 4238 * In case of CMT, only the "even" functions are enabled, and thus the 4239 * number of functions for both hwfns is learnt from the same bits. 4240 */ 4241 reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE); 4242 4243 if (reg_function_hide & 0x1) { 4244 if (QED_IS_BB(cdev)) { 4245 if (QED_PATH_ID(p_hwfn) && cdev->num_hwfns == 1) { 4246 num_funcs = 0; 4247 eng_mask = 0xaaaa; 4248 } else { 4249 num_funcs = 1; 4250 eng_mask = 0x5554; 4251 } 4252 } else { 4253 num_funcs = 1; 4254 eng_mask = 0xfffe; 4255 } 4256 4257 /* Get the number of the enabled functions on the engine */ 4258 tmp = (reg_function_hide ^ 0xffffffff) & eng_mask; 4259 while (tmp) { 4260 if (tmp & 0x1) 4261 num_funcs++; 4262 tmp >>= 0x1; 4263 } 4264 4265 /* Get the PF index within the enabled functions */ 4266 low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1; 4267 tmp = reg_function_hide & eng_mask & low_pfs_mask; 4268 while (tmp) { 4269 if (tmp & 0x1) 4270 enabled_func_idx--; 4271 tmp >>= 0x1; 4272 } 4273 } 4274 4275 p_hwfn->num_funcs_on_engine = num_funcs; 4276 p_hwfn->enabled_func_idx = enabled_func_idx; 4277 4278 DP_VERBOSE(p_hwfn, 4279 NETIF_MSG_PROBE, 4280 "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n", 4281 p_hwfn->rel_pf_id, 4282 p_hwfn->abs_pf_id, 4283 p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); 4284 } 4285 4286 static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 4287 { 4288 u32 addr, global_offsize, global_addr, port_mode; 4289 struct qed_dev *cdev = p_hwfn->cdev; 4290 4291 /* In CMT there is always only one port */ 4292 if (cdev->num_hwfns > 1) { 4293 cdev->num_ports_in_engine = 1; 4294 cdev->num_ports = 1; 4295 return; 4296 } 4297 4298 /* Determine the number of ports per engine */ 4299 port_mode = qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE); 4300 switch (port_mode) { 4301 case 0x0: 4302 cdev->num_ports_in_engine = 1; 4303 break; 4304 case 0x1: 4305 cdev->num_ports_in_engine = 2; 4306 break; 4307 case 0x2: 4308 cdev->num_ports_in_engine = 4; 4309 break; 4310 default: 4311 DP_NOTICE(p_hwfn, "Unknown port mode 0x%08x\n", port_mode); 4312 cdev->num_ports_in_engine = 1; /* Default to something */ 4313 break; 4314 } 4315 4316 /* Get the total number of ports of the device */ 4317 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 4318 PUBLIC_GLOBAL); 4319 global_offsize = qed_rd(p_hwfn, p_ptt, addr); 4320 global_addr = SECTION_ADDR(global_offsize, 0); 4321 addr = global_addr + offsetof(struct public_global, max_ports); 4322 cdev->num_ports = (u8)qed_rd(p_hwfn, p_ptt, addr); 4323 } 4324 4325 static void qed_get_eee_caps(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 4326 { 4327 struct qed_mcp_link_capabilities *p_caps; 4328 u32 eee_status; 4329 4330 p_caps = &p_hwfn->mcp_info->link_capabilities; 4331 if (p_caps->default_eee == QED_MCP_EEE_UNSUPPORTED) 4332 return; 4333 4334 p_caps->eee_speed_caps = 0; 4335 eee_status = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + 4336 offsetof(struct public_port, eee_status)); 4337 eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >> 4338 EEE_SUPPORTED_SPEED_OFFSET; 4339 4340 if (eee_status & EEE_1G_SUPPORTED) 4341 p_caps->eee_speed_caps |= QED_EEE_1G_ADV; 4342 if (eee_status & EEE_10G_ADV) 4343 p_caps->eee_speed_caps |= QED_EEE_10G_ADV; 4344 } 4345 4346 static int 4347 qed_get_hw_info(struct qed_hwfn *p_hwfn, 4348 struct qed_ptt *p_ptt, 4349 enum qed_pci_personality personality) 4350 { 4351 int rc; 4352 4353 /* Since all information is common, only first hwfns should do this */ 4354 if (IS_LEAD_HWFN(p_hwfn)) { 4355 rc = qed_iov_hw_info(p_hwfn); 4356 if (rc) 4357 return rc; 4358 } 4359 4360 if (IS_LEAD_HWFN(p_hwfn)) 4361 qed_hw_info_port_num(p_hwfn, p_ptt); 4362 4363 qed_mcp_get_capabilities(p_hwfn, p_ptt); 4364 4365 qed_hw_get_nvm_info(p_hwfn, p_ptt); 4366 4367 rc = qed_int_igu_read_cam(p_hwfn, p_ptt); 4368 if (rc) 4369 return rc; 4370 4371 if (qed_mcp_is_init(p_hwfn)) 4372 ether_addr_copy(p_hwfn->hw_info.hw_mac_addr, 4373 p_hwfn->mcp_info->func_info.mac); 4374 else 4375 eth_random_addr(p_hwfn->hw_info.hw_mac_addr); 4376 4377 if (qed_mcp_is_init(p_hwfn)) { 4378 if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET) 4379 p_hwfn->hw_info.ovlan = 4380 p_hwfn->mcp_info->func_info.ovlan; 4381 4382 qed_mcp_cmd_port_init(p_hwfn, p_ptt); 4383 4384 qed_get_eee_caps(p_hwfn, p_ptt); 4385 4386 qed_mcp_read_ufp_config(p_hwfn, p_ptt); 4387 } 4388 4389 if (qed_mcp_is_init(p_hwfn)) { 4390 enum qed_pci_personality protocol; 4391 4392 protocol = p_hwfn->mcp_info->func_info.protocol; 4393 p_hwfn->hw_info.personality = protocol; 4394 } 4395 4396 if (QED_IS_ROCE_PERSONALITY(p_hwfn)) 4397 p_hwfn->hw_info.multi_tc_roce_en = true; 4398 4399 p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; 4400 p_hwfn->hw_info.num_active_tc = 1; 4401 4402 qed_get_num_funcs(p_hwfn, p_ptt); 4403 4404 if (qed_mcp_is_init(p_hwfn)) 4405 p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu; 4406 4407 return qed_hw_get_resc(p_hwfn, p_ptt); 4408 } 4409 4410 static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 4411 { 4412 struct qed_dev *cdev = p_hwfn->cdev; 4413 u16 device_id_mask; 4414 u32 tmp; 4415 4416 /* Read Vendor Id / Device Id */ 4417 pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id); 4418 pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id); 4419 4420 /* Determine type */ 4421 device_id_mask = cdev->device_id & QED_DEV_ID_MASK; 4422 switch (device_id_mask) { 4423 case QED_DEV_ID_MASK_BB: 4424 cdev->type = QED_DEV_TYPE_BB; 4425 break; 4426 case QED_DEV_ID_MASK_AH: 4427 cdev->type = QED_DEV_TYPE_AH; 4428 break; 4429 default: 4430 DP_NOTICE(p_hwfn, "Unknown device id 0x%x\n", cdev->device_id); 4431 return -EBUSY; 4432 } 4433 4434 cdev->chip_num = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM); 4435 cdev->chip_rev = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV); 4436 4437 MASK_FIELD(CHIP_REV, cdev->chip_rev); 4438 4439 /* Learn number of HW-functions */ 4440 tmp = qed_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR); 4441 4442 if (tmp & (1 << p_hwfn->rel_pf_id)) { 4443 DP_NOTICE(cdev->hwfns, "device in CMT mode\n"); 4444 cdev->num_hwfns = 2; 4445 } else { 4446 cdev->num_hwfns = 1; 4447 } 4448 4449 cdev->chip_bond_id = qed_rd(p_hwfn, p_ptt, 4450 MISCS_REG_CHIP_TEST_REG) >> 4; 4451 MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id); 4452 cdev->chip_metal = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL); 4453 MASK_FIELD(CHIP_METAL, cdev->chip_metal); 4454 4455 DP_INFO(cdev->hwfns, 4456 "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", 4457 QED_IS_BB(cdev) ? "BB" : "AH", 4458 'A' + cdev->chip_rev, 4459 (int)cdev->chip_metal, 4460 cdev->chip_num, cdev->chip_rev, 4461 cdev->chip_bond_id, cdev->chip_metal); 4462 4463 return 0; 4464 } 4465 4466 static void qed_nvm_info_free(struct qed_hwfn *p_hwfn) 4467 { 4468 kfree(p_hwfn->nvm_info.image_att); 4469 p_hwfn->nvm_info.image_att = NULL; 4470 } 4471 4472 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, 4473 void __iomem *p_regview, 4474 void __iomem *p_doorbells, 4475 u64 db_phys_addr, 4476 enum qed_pci_personality personality) 4477 { 4478 struct qed_dev *cdev = p_hwfn->cdev; 4479 int rc = 0; 4480 4481 /* Split PCI bars evenly between hwfns */ 4482 p_hwfn->regview = p_regview; 4483 p_hwfn->doorbells = p_doorbells; 4484 p_hwfn->db_phys_addr = db_phys_addr; 4485 4486 if (IS_VF(p_hwfn->cdev)) 4487 return qed_vf_hw_prepare(p_hwfn); 4488 4489 /* Validate that chip access is feasible */ 4490 if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { 4491 DP_ERR(p_hwfn, 4492 "Reading the ME register returns all Fs; Preventing further chip access\n"); 4493 return -EINVAL; 4494 } 4495 4496 get_function_id(p_hwfn); 4497 4498 /* Allocate PTT pool */ 4499 rc = qed_ptt_pool_alloc(p_hwfn); 4500 if (rc) 4501 goto err0; 4502 4503 /* Allocate the main PTT */ 4504 p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); 4505 4506 /* First hwfn learns basic information, e.g., number of hwfns */ 4507 if (!p_hwfn->my_id) { 4508 rc = qed_get_dev_info(p_hwfn, p_hwfn->p_main_ptt); 4509 if (rc) 4510 goto err1; 4511 } 4512 4513 qed_hw_hwfn_prepare(p_hwfn); 4514 4515 /* Initialize MCP structure */ 4516 rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); 4517 if (rc) { 4518 DP_NOTICE(p_hwfn, "Failed initializing mcp command\n"); 4519 goto err1; 4520 } 4521 4522 /* Read the device configuration information from the HW and SHMEM */ 4523 rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality); 4524 if (rc) { 4525 DP_NOTICE(p_hwfn, "Failed to get HW information\n"); 4526 goto err2; 4527 } 4528 4529 /* Sending a mailbox to the MFW should be done after qed_get_hw_info() 4530 * is called as it sets the ports number in an engine. 4531 */ 4532 if (IS_LEAD_HWFN(p_hwfn) && !cdev->recov_in_prog) { 4533 rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt); 4534 if (rc) 4535 DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n"); 4536 } 4537 4538 /* NVRAM info initialization and population */ 4539 if (IS_LEAD_HWFN(p_hwfn)) { 4540 rc = qed_mcp_nvm_info_populate(p_hwfn); 4541 if (rc) { 4542 DP_NOTICE(p_hwfn, 4543 "Failed to populate nvm info shadow\n"); 4544 goto err2; 4545 } 4546 } 4547 4548 /* Allocate the init RT array and initialize the init-ops engine */ 4549 rc = qed_init_alloc(p_hwfn); 4550 if (rc) 4551 goto err3; 4552 4553 return rc; 4554 err3: 4555 if (IS_LEAD_HWFN(p_hwfn)) 4556 qed_nvm_info_free(p_hwfn); 4557 err2: 4558 if (IS_LEAD_HWFN(p_hwfn)) 4559 qed_iov_free_hw_info(p_hwfn->cdev); 4560 qed_mcp_free(p_hwfn); 4561 err1: 4562 qed_hw_hwfn_free(p_hwfn); 4563 err0: 4564 return rc; 4565 } 4566 4567 int qed_hw_prepare(struct qed_dev *cdev, 4568 int personality) 4569 { 4570 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 4571 int rc; 4572 4573 /* Store the precompiled init data ptrs */ 4574 if (IS_PF(cdev)) 4575 qed_init_iro_array(cdev); 4576 4577 /* Initialize the first hwfn - will learn number of hwfns */ 4578 rc = qed_hw_prepare_single(p_hwfn, 4579 cdev->regview, 4580 cdev->doorbells, 4581 cdev->db_phys_addr, 4582 personality); 4583 if (rc) 4584 return rc; 4585 4586 personality = p_hwfn->hw_info.personality; 4587 4588 /* Initialize the rest of the hwfns */ 4589 if (cdev->num_hwfns > 1) { 4590 void __iomem *p_regview, *p_doorbell; 4591 u64 db_phys_addr; 4592 u32 offset; 4593 4594 /* adjust bar offset for second engine */ 4595 offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, 4596 BAR_ID_0) / 2; 4597 p_regview = cdev->regview + offset; 4598 4599 offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, 4600 BAR_ID_1) / 2; 4601 4602 p_doorbell = cdev->doorbells + offset; 4603 4604 db_phys_addr = cdev->db_phys_addr + offset; 4605 4606 /* prepare second hw function */ 4607 rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview, 4608 p_doorbell, db_phys_addr, 4609 personality); 4610 4611 /* in case of error, need to free the previously 4612 * initiliazed hwfn 0. 4613 */ 4614 if (rc) { 4615 if (IS_PF(cdev)) { 4616 qed_init_free(p_hwfn); 4617 qed_nvm_info_free(p_hwfn); 4618 qed_mcp_free(p_hwfn); 4619 qed_hw_hwfn_free(p_hwfn); 4620 } 4621 } 4622 } 4623 4624 return rc; 4625 } 4626 4627 void qed_hw_remove(struct qed_dev *cdev) 4628 { 4629 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 4630 int i; 4631 4632 if (IS_PF(cdev)) 4633 qed_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt, 4634 QED_OV_DRIVER_STATE_NOT_LOADED); 4635 4636 for_each_hwfn(cdev, i) { 4637 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 4638 4639 if (IS_VF(cdev)) { 4640 qed_vf_pf_release(p_hwfn); 4641 continue; 4642 } 4643 4644 qed_init_free(p_hwfn); 4645 qed_hw_hwfn_free(p_hwfn); 4646 qed_mcp_free(p_hwfn); 4647 } 4648 4649 qed_iov_free_hw_info(cdev); 4650 4651 qed_nvm_info_free(p_hwfn); 4652 } 4653 4654 static void qed_chain_free_next_ptr(struct qed_dev *cdev, 4655 struct qed_chain *p_chain) 4656 { 4657 void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL; 4658 dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0; 4659 struct qed_chain_next *p_next; 4660 u32 size, i; 4661 4662 if (!p_virt) 4663 return; 4664 4665 size = p_chain->elem_size * p_chain->usable_per_page; 4666 4667 for (i = 0; i < p_chain->page_cnt; i++) { 4668 if (!p_virt) 4669 break; 4670 4671 p_next = (struct qed_chain_next *)((u8 *)p_virt + size); 4672 p_virt_next = p_next->next_virt; 4673 p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys); 4674 4675 dma_free_coherent(&cdev->pdev->dev, 4676 QED_CHAIN_PAGE_SIZE, p_virt, p_phys); 4677 4678 p_virt = p_virt_next; 4679 p_phys = p_phys_next; 4680 } 4681 } 4682 4683 static void qed_chain_free_single(struct qed_dev *cdev, 4684 struct qed_chain *p_chain) 4685 { 4686 if (!p_chain->p_virt_addr) 4687 return; 4688 4689 dma_free_coherent(&cdev->pdev->dev, 4690 QED_CHAIN_PAGE_SIZE, 4691 p_chain->p_virt_addr, p_chain->p_phys_addr); 4692 } 4693 4694 static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) 4695 { 4696 struct addr_tbl_entry *pp_addr_tbl = p_chain->pbl.pp_addr_tbl; 4697 u32 page_cnt = p_chain->page_cnt, i, pbl_size; 4698 4699 if (!pp_addr_tbl) 4700 return; 4701 4702 for (i = 0; i < page_cnt; i++) { 4703 if (!pp_addr_tbl[i].virt_addr || !pp_addr_tbl[i].dma_map) 4704 break; 4705 4706 dma_free_coherent(&cdev->pdev->dev, 4707 QED_CHAIN_PAGE_SIZE, 4708 pp_addr_tbl[i].virt_addr, 4709 pp_addr_tbl[i].dma_map); 4710 } 4711 4712 pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; 4713 4714 if (!p_chain->b_external_pbl) 4715 dma_free_coherent(&cdev->pdev->dev, 4716 pbl_size, 4717 p_chain->pbl_sp.p_virt_table, 4718 p_chain->pbl_sp.p_phys_table); 4719 4720 vfree(p_chain->pbl.pp_addr_tbl); 4721 p_chain->pbl.pp_addr_tbl = NULL; 4722 } 4723 4724 void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain) 4725 { 4726 switch (p_chain->mode) { 4727 case QED_CHAIN_MODE_NEXT_PTR: 4728 qed_chain_free_next_ptr(cdev, p_chain); 4729 break; 4730 case QED_CHAIN_MODE_SINGLE: 4731 qed_chain_free_single(cdev, p_chain); 4732 break; 4733 case QED_CHAIN_MODE_PBL: 4734 qed_chain_free_pbl(cdev, p_chain); 4735 break; 4736 } 4737 } 4738 4739 static int 4740 qed_chain_alloc_sanity_check(struct qed_dev *cdev, 4741 enum qed_chain_cnt_type cnt_type, 4742 size_t elem_size, u32 page_cnt) 4743 { 4744 u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; 4745 4746 /* The actual chain size can be larger than the maximal possible value 4747 * after rounding up the requested elements number to pages, and after 4748 * taking into acount the unusuable elements (next-ptr elements). 4749 * The size of a "u16" chain can be (U16_MAX + 1) since the chain 4750 * size/capacity fields are of a u32 type. 4751 */ 4752 if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 && 4753 chain_size > ((u32)U16_MAX + 1)) || 4754 (cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) { 4755 DP_NOTICE(cdev, 4756 "The actual chain size (0x%llx) is larger than the maximal possible value\n", 4757 chain_size); 4758 return -EINVAL; 4759 } 4760 4761 return 0; 4762 } 4763 4764 static int 4765 qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain) 4766 { 4767 void *p_virt = NULL, *p_virt_prev = NULL; 4768 dma_addr_t p_phys = 0; 4769 u32 i; 4770 4771 for (i = 0; i < p_chain->page_cnt; i++) { 4772 p_virt = dma_alloc_coherent(&cdev->pdev->dev, 4773 QED_CHAIN_PAGE_SIZE, 4774 &p_phys, GFP_KERNEL); 4775 if (!p_virt) 4776 return -ENOMEM; 4777 4778 if (i == 0) { 4779 qed_chain_init_mem(p_chain, p_virt, p_phys); 4780 qed_chain_reset(p_chain); 4781 } else { 4782 qed_chain_init_next_ptr_elem(p_chain, p_virt_prev, 4783 p_virt, p_phys); 4784 } 4785 4786 p_virt_prev = p_virt; 4787 } 4788 /* Last page's next element should point to the beginning of the 4789 * chain. 4790 */ 4791 qed_chain_init_next_ptr_elem(p_chain, p_virt_prev, 4792 p_chain->p_virt_addr, 4793 p_chain->p_phys_addr); 4794 4795 return 0; 4796 } 4797 4798 static int 4799 qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain) 4800 { 4801 dma_addr_t p_phys = 0; 4802 void *p_virt = NULL; 4803 4804 p_virt = dma_alloc_coherent(&cdev->pdev->dev, 4805 QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL); 4806 if (!p_virt) 4807 return -ENOMEM; 4808 4809 qed_chain_init_mem(p_chain, p_virt, p_phys); 4810 qed_chain_reset(p_chain); 4811 4812 return 0; 4813 } 4814 4815 static int 4816 qed_chain_alloc_pbl(struct qed_dev *cdev, 4817 struct qed_chain *p_chain, 4818 struct qed_chain_ext_pbl *ext_pbl) 4819 { 4820 u32 page_cnt = p_chain->page_cnt, size, i; 4821 dma_addr_t p_phys = 0, p_pbl_phys = 0; 4822 struct addr_tbl_entry *pp_addr_tbl; 4823 u8 *p_pbl_virt = NULL; 4824 void *p_virt = NULL; 4825 4826 size = page_cnt * sizeof(*pp_addr_tbl); 4827 pp_addr_tbl = vzalloc(size); 4828 if (!pp_addr_tbl) 4829 return -ENOMEM; 4830 4831 /* The allocation of the PBL table is done with its full size, since it 4832 * is expected to be successive. 4833 * qed_chain_init_pbl_mem() is called even in a case of an allocation 4834 * failure, since tbl was previously allocated, and it 4835 * should be saved to allow its freeing during the error flow. 4836 */ 4837 size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; 4838 4839 if (!ext_pbl) { 4840 p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev, 4841 size, &p_pbl_phys, GFP_KERNEL); 4842 } else { 4843 p_pbl_virt = ext_pbl->p_pbl_virt; 4844 p_pbl_phys = ext_pbl->p_pbl_phys; 4845 p_chain->b_external_pbl = true; 4846 } 4847 4848 qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_addr_tbl); 4849 if (!p_pbl_virt) 4850 return -ENOMEM; 4851 4852 for (i = 0; i < page_cnt; i++) { 4853 p_virt = dma_alloc_coherent(&cdev->pdev->dev, 4854 QED_CHAIN_PAGE_SIZE, 4855 &p_phys, GFP_KERNEL); 4856 if (!p_virt) 4857 return -ENOMEM; 4858 4859 if (i == 0) { 4860 qed_chain_init_mem(p_chain, p_virt, p_phys); 4861 qed_chain_reset(p_chain); 4862 } 4863 4864 /* Fill the PBL table with the physical address of the page */ 4865 *(dma_addr_t *)p_pbl_virt = p_phys; 4866 /* Keep the virtual address of the page */ 4867 p_chain->pbl.pp_addr_tbl[i].virt_addr = p_virt; 4868 p_chain->pbl.pp_addr_tbl[i].dma_map = p_phys; 4869 4870 p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; 4871 } 4872 4873 return 0; 4874 } 4875 4876 int qed_chain_alloc(struct qed_dev *cdev, 4877 enum qed_chain_use_mode intended_use, 4878 enum qed_chain_mode mode, 4879 enum qed_chain_cnt_type cnt_type, 4880 u32 num_elems, 4881 size_t elem_size, 4882 struct qed_chain *p_chain, 4883 struct qed_chain_ext_pbl *ext_pbl) 4884 { 4885 u32 page_cnt; 4886 int rc = 0; 4887 4888 if (mode == QED_CHAIN_MODE_SINGLE) 4889 page_cnt = 1; 4890 else 4891 page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode); 4892 4893 rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt); 4894 if (rc) { 4895 DP_NOTICE(cdev, 4896 "Cannot allocate a chain with the given arguments:\n"); 4897 DP_NOTICE(cdev, 4898 "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", 4899 intended_use, mode, cnt_type, num_elems, elem_size); 4900 return rc; 4901 } 4902 4903 qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use, 4904 mode, cnt_type); 4905 4906 switch (mode) { 4907 case QED_CHAIN_MODE_NEXT_PTR: 4908 rc = qed_chain_alloc_next_ptr(cdev, p_chain); 4909 break; 4910 case QED_CHAIN_MODE_SINGLE: 4911 rc = qed_chain_alloc_single(cdev, p_chain); 4912 break; 4913 case QED_CHAIN_MODE_PBL: 4914 rc = qed_chain_alloc_pbl(cdev, p_chain, ext_pbl); 4915 break; 4916 } 4917 if (rc) 4918 goto nomem; 4919 4920 return 0; 4921 4922 nomem: 4923 qed_chain_free(cdev, p_chain); 4924 return rc; 4925 } 4926 4927 int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) 4928 { 4929 if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { 4930 u16 min, max; 4931 4932 min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE); 4933 max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE); 4934 DP_NOTICE(p_hwfn, 4935 "l2_queue id [%d] is not valid, available indices [%d - %d]\n", 4936 src_id, min, max); 4937 4938 return -EINVAL; 4939 } 4940 4941 *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id; 4942 4943 return 0; 4944 } 4945 4946 int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id) 4947 { 4948 if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) { 4949 u8 min, max; 4950 4951 min = (u8)RESC_START(p_hwfn, QED_VPORT); 4952 max = min + RESC_NUM(p_hwfn, QED_VPORT); 4953 DP_NOTICE(p_hwfn, 4954 "vport id [%d] is not valid, available indices [%d - %d]\n", 4955 src_id, min, max); 4956 4957 return -EINVAL; 4958 } 4959 4960 *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id; 4961 4962 return 0; 4963 } 4964 4965 int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id) 4966 { 4967 if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) { 4968 u8 min, max; 4969 4970 min = (u8)RESC_START(p_hwfn, QED_RSS_ENG); 4971 max = min + RESC_NUM(p_hwfn, QED_RSS_ENG); 4972 DP_NOTICE(p_hwfn, 4973 "rss_eng id [%d] is not valid, available indices [%d - %d]\n", 4974 src_id, min, max); 4975 4976 return -EINVAL; 4977 } 4978 4979 *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id; 4980 4981 return 0; 4982 } 4983 4984 static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 4985 u32 hw_addr, void *p_eth_qzone, 4986 size_t eth_qzone_size, u8 timeset) 4987 { 4988 struct coalescing_timeset *p_coal_timeset; 4989 4990 if (p_hwfn->cdev->int_coalescing_mode != QED_COAL_MODE_ENABLE) { 4991 DP_NOTICE(p_hwfn, "Coalescing configuration not enabled\n"); 4992 return -EINVAL; 4993 } 4994 4995 p_coal_timeset = p_eth_qzone; 4996 memset(p_eth_qzone, 0, eth_qzone_size); 4997 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); 4998 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); 4999 qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); 5000 5001 return 0; 5002 } 5003 5004 int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle) 5005 { 5006 struct qed_queue_cid *p_cid = p_handle; 5007 struct qed_hwfn *p_hwfn; 5008 struct qed_ptt *p_ptt; 5009 int rc = 0; 5010 5011 p_hwfn = p_cid->p_owner; 5012 5013 if (IS_VF(p_hwfn->cdev)) 5014 return qed_vf_pf_set_coalesce(p_hwfn, rx_coal, tx_coal, p_cid); 5015 5016 p_ptt = qed_ptt_acquire(p_hwfn); 5017 if (!p_ptt) 5018 return -EAGAIN; 5019 5020 if (rx_coal) { 5021 rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); 5022 if (rc) 5023 goto out; 5024 p_hwfn->cdev->rx_coalesce_usecs = rx_coal; 5025 } 5026 5027 if (tx_coal) { 5028 rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid); 5029 if (rc) 5030 goto out; 5031 p_hwfn->cdev->tx_coalesce_usecs = tx_coal; 5032 } 5033 out: 5034 qed_ptt_release(p_hwfn, p_ptt); 5035 return rc; 5036 } 5037 5038 int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, 5039 struct qed_ptt *p_ptt, 5040 u16 coalesce, struct qed_queue_cid *p_cid) 5041 { 5042 struct ustorm_eth_queue_zone eth_qzone; 5043 u8 timeset, timer_res; 5044 u32 address; 5045 int rc; 5046 5047 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 5048 if (coalesce <= 0x7F) { 5049 timer_res = 0; 5050 } else if (coalesce <= 0xFF) { 5051 timer_res = 1; 5052 } else if (coalesce <= 0x1FF) { 5053 timer_res = 2; 5054 } else { 5055 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 5056 return -EINVAL; 5057 } 5058 timeset = (u8)(coalesce >> timer_res); 5059 5060 rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, 5061 p_cid->sb_igu_id, false); 5062 if (rc) 5063 goto out; 5064 5065 address = BAR0_MAP_REG_USDM_RAM + 5066 USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 5067 5068 rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 5069 sizeof(struct ustorm_eth_queue_zone), timeset); 5070 if (rc) 5071 goto out; 5072 5073 out: 5074 return rc; 5075 } 5076 5077 int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, 5078 struct qed_ptt *p_ptt, 5079 u16 coalesce, struct qed_queue_cid *p_cid) 5080 { 5081 struct xstorm_eth_queue_zone eth_qzone; 5082 u8 timeset, timer_res; 5083 u32 address; 5084 int rc; 5085 5086 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 5087 if (coalesce <= 0x7F) { 5088 timer_res = 0; 5089 } else if (coalesce <= 0xFF) { 5090 timer_res = 1; 5091 } else if (coalesce <= 0x1FF) { 5092 timer_res = 2; 5093 } else { 5094 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 5095 return -EINVAL; 5096 } 5097 timeset = (u8)(coalesce >> timer_res); 5098 5099 rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, 5100 p_cid->sb_igu_id, true); 5101 if (rc) 5102 goto out; 5103 5104 address = BAR0_MAP_REG_XSDM_RAM + 5105 XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 5106 5107 rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 5108 sizeof(struct xstorm_eth_queue_zone), timeset); 5109 out: 5110 return rc; 5111 } 5112 5113 /* Calculate final WFQ values for all vports and configure them. 5114 * After this configuration each vport will have 5115 * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT) 5116 */ 5117 static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn, 5118 struct qed_ptt *p_ptt, 5119 u32 min_pf_rate) 5120 { 5121 struct init_qm_vport_params *vport_params; 5122 int i; 5123 5124 vport_params = p_hwfn->qm_info.qm_vport_params; 5125 5126 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5127 u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 5128 5129 vport_params[i].wfq = (wfq_speed * QED_WFQ_UNIT) / 5130 min_pf_rate; 5131 qed_init_vport_wfq(p_hwfn, p_ptt, 5132 vport_params[i].first_tx_pq_id, 5133 vport_params[i].wfq); 5134 } 5135 } 5136 5137 static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn, 5138 u32 min_pf_rate) 5139 5140 { 5141 int i; 5142 5143 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) 5144 p_hwfn->qm_info.qm_vport_params[i].wfq = 1; 5145 } 5146 5147 static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn, 5148 struct qed_ptt *p_ptt, 5149 u32 min_pf_rate) 5150 { 5151 struct init_qm_vport_params *vport_params; 5152 int i; 5153 5154 vport_params = p_hwfn->qm_info.qm_vport_params; 5155 5156 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5157 qed_init_wfq_default_param(p_hwfn, min_pf_rate); 5158 qed_init_vport_wfq(p_hwfn, p_ptt, 5159 vport_params[i].first_tx_pq_id, 5160 vport_params[i].wfq); 5161 } 5162 } 5163 5164 /* This function performs several validations for WFQ 5165 * configuration and required min rate for a given vport 5166 * 1. req_rate must be greater than one percent of min_pf_rate. 5167 * 2. req_rate should not cause other vports [not configured for WFQ explicitly] 5168 * rates to get less than one percent of min_pf_rate. 5169 * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. 5170 */ 5171 static int qed_init_wfq_param(struct qed_hwfn *p_hwfn, 5172 u16 vport_id, u32 req_rate, u32 min_pf_rate) 5173 { 5174 u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; 5175 int non_requested_count = 0, req_count = 0, i, num_vports; 5176 5177 num_vports = p_hwfn->qm_info.num_vports; 5178 5179 /* Accounting for the vports which are configured for WFQ explicitly */ 5180 for (i = 0; i < num_vports; i++) { 5181 u32 tmp_speed; 5182 5183 if ((i != vport_id) && 5184 p_hwfn->qm_info.wfq_data[i].configured) { 5185 req_count++; 5186 tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 5187 total_req_min_rate += tmp_speed; 5188 } 5189 } 5190 5191 /* Include current vport data as well */ 5192 req_count++; 5193 total_req_min_rate += req_rate; 5194 non_requested_count = num_vports - req_count; 5195 5196 if (req_rate < min_pf_rate / QED_WFQ_UNIT) { 5197 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 5198 "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 5199 vport_id, req_rate, min_pf_rate); 5200 return -EINVAL; 5201 } 5202 5203 if (num_vports > QED_WFQ_UNIT) { 5204 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 5205 "Number of vports is greater than %d\n", 5206 QED_WFQ_UNIT); 5207 return -EINVAL; 5208 } 5209 5210 if (total_req_min_rate > min_pf_rate) { 5211 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 5212 "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", 5213 total_req_min_rate, min_pf_rate); 5214 return -EINVAL; 5215 } 5216 5217 total_left_rate = min_pf_rate - total_req_min_rate; 5218 5219 left_rate_per_vp = total_left_rate / non_requested_count; 5220 if (left_rate_per_vp < min_pf_rate / QED_WFQ_UNIT) { 5221 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 5222 "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 5223 left_rate_per_vp, min_pf_rate); 5224 return -EINVAL; 5225 } 5226 5227 p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate; 5228 p_hwfn->qm_info.wfq_data[vport_id].configured = true; 5229 5230 for (i = 0; i < num_vports; i++) { 5231 if (p_hwfn->qm_info.wfq_data[i].configured) 5232 continue; 5233 5234 p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp; 5235 } 5236 5237 return 0; 5238 } 5239 5240 static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn, 5241 struct qed_ptt *p_ptt, u16 vp_id, u32 rate) 5242 { 5243 struct qed_mcp_link_state *p_link; 5244 int rc = 0; 5245 5246 p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output; 5247 5248 if (!p_link->min_pf_rate) { 5249 p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate; 5250 p_hwfn->qm_info.wfq_data[vp_id].configured = true; 5251 return rc; 5252 } 5253 5254 rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); 5255 5256 if (!rc) 5257 qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, 5258 p_link->min_pf_rate); 5259 else 5260 DP_NOTICE(p_hwfn, 5261 "Validation failed while configuring min rate\n"); 5262 5263 return rc; 5264 } 5265 5266 static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn, 5267 struct qed_ptt *p_ptt, 5268 u32 min_pf_rate) 5269 { 5270 bool use_wfq = false; 5271 int rc = 0; 5272 u16 i; 5273 5274 /* Validate all pre configured vports for wfq */ 5275 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 5276 u32 rate; 5277 5278 if (!p_hwfn->qm_info.wfq_data[i].configured) 5279 continue; 5280 5281 rate = p_hwfn->qm_info.wfq_data[i].min_speed; 5282 use_wfq = true; 5283 5284 rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate); 5285 if (rc) { 5286 DP_NOTICE(p_hwfn, 5287 "WFQ validation failed while configuring min rate\n"); 5288 break; 5289 } 5290 } 5291 5292 if (!rc && use_wfq) 5293 qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 5294 else 5295 qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 5296 5297 return rc; 5298 } 5299 5300 /* Main API for qed clients to configure vport min rate. 5301 * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)] 5302 * rate - Speed in Mbps needs to be assigned to a given vport. 5303 */ 5304 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate) 5305 { 5306 int i, rc = -EINVAL; 5307 5308 /* Currently not supported; Might change in future */ 5309 if (cdev->num_hwfns > 1) { 5310 DP_NOTICE(cdev, 5311 "WFQ configuration is not supported for this device\n"); 5312 return rc; 5313 } 5314 5315 for_each_hwfn(cdev, i) { 5316 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 5317 struct qed_ptt *p_ptt; 5318 5319 p_ptt = qed_ptt_acquire(p_hwfn); 5320 if (!p_ptt) 5321 return -EBUSY; 5322 5323 rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); 5324 5325 if (rc) { 5326 qed_ptt_release(p_hwfn, p_ptt); 5327 return rc; 5328 } 5329 5330 qed_ptt_release(p_hwfn, p_ptt); 5331 } 5332 5333 return rc; 5334 } 5335 5336 /* API to configure WFQ from mcp link change */ 5337 void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, 5338 struct qed_ptt *p_ptt, u32 min_pf_rate) 5339 { 5340 int i; 5341 5342 if (cdev->num_hwfns > 1) { 5343 DP_VERBOSE(cdev, 5344 NETIF_MSG_LINK, 5345 "WFQ configuration is not supported for this device\n"); 5346 return; 5347 } 5348 5349 for_each_hwfn(cdev, i) { 5350 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 5351 5352 __qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt, 5353 min_pf_rate); 5354 } 5355 } 5356 5357 int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn, 5358 struct qed_ptt *p_ptt, 5359 struct qed_mcp_link_state *p_link, 5360 u8 max_bw) 5361 { 5362 int rc = 0; 5363 5364 p_hwfn->mcp_info->func_info.bandwidth_max = max_bw; 5365 5366 if (!p_link->line_speed && (max_bw != 100)) 5367 return rc; 5368 5369 p_link->speed = (p_link->line_speed * max_bw) / 100; 5370 p_hwfn->qm_info.pf_rl = p_link->speed; 5371 5372 /* Since the limiter also affects Tx-switched traffic, we don't want it 5373 * to limit such traffic in case there's no actual limit. 5374 * In that case, set limit to imaginary high boundary. 5375 */ 5376 if (max_bw == 100) 5377 p_hwfn->qm_info.pf_rl = 100000; 5378 5379 rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 5380 p_hwfn->qm_info.pf_rl); 5381 5382 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 5383 "Configured MAX bandwidth to be %08x Mb/sec\n", 5384 p_link->speed); 5385 5386 return rc; 5387 } 5388 5389 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */ 5390 int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw) 5391 { 5392 int i, rc = -EINVAL; 5393 5394 if (max_bw < 1 || max_bw > 100) { 5395 DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n"); 5396 return rc; 5397 } 5398 5399 for_each_hwfn(cdev, i) { 5400 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 5401 struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); 5402 struct qed_mcp_link_state *p_link; 5403 struct qed_ptt *p_ptt; 5404 5405 p_link = &p_lead->mcp_info->link_output; 5406 5407 p_ptt = qed_ptt_acquire(p_hwfn); 5408 if (!p_ptt) 5409 return -EBUSY; 5410 5411 rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, 5412 p_link, max_bw); 5413 5414 qed_ptt_release(p_hwfn, p_ptt); 5415 5416 if (rc) 5417 break; 5418 } 5419 5420 return rc; 5421 } 5422 5423 int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, 5424 struct qed_ptt *p_ptt, 5425 struct qed_mcp_link_state *p_link, 5426 u8 min_bw) 5427 { 5428 int rc = 0; 5429 5430 p_hwfn->mcp_info->func_info.bandwidth_min = min_bw; 5431 p_hwfn->qm_info.pf_wfq = min_bw; 5432 5433 if (!p_link->line_speed) 5434 return rc; 5435 5436 p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100; 5437 5438 rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw); 5439 5440 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 5441 "Configured MIN bandwidth to be %d Mb/sec\n", 5442 p_link->min_pf_rate); 5443 5444 return rc; 5445 } 5446 5447 /* Main API to configure PF min bandwidth where bw range is [1-100] */ 5448 int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw) 5449 { 5450 int i, rc = -EINVAL; 5451 5452 if (min_bw < 1 || min_bw > 100) { 5453 DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n"); 5454 return rc; 5455 } 5456 5457 for_each_hwfn(cdev, i) { 5458 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 5459 struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); 5460 struct qed_mcp_link_state *p_link; 5461 struct qed_ptt *p_ptt; 5462 5463 p_link = &p_lead->mcp_info->link_output; 5464 5465 p_ptt = qed_ptt_acquire(p_hwfn); 5466 if (!p_ptt) 5467 return -EBUSY; 5468 5469 rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, 5470 p_link, min_bw); 5471 if (rc) { 5472 qed_ptt_release(p_hwfn, p_ptt); 5473 return rc; 5474 } 5475 5476 if (p_link->min_pf_rate) { 5477 u32 min_rate = p_link->min_pf_rate; 5478 5479 rc = __qed_configure_vp_wfq_on_link_change(p_hwfn, 5480 p_ptt, 5481 min_rate); 5482 } 5483 5484 qed_ptt_release(p_hwfn, p_ptt); 5485 } 5486 5487 return rc; 5488 } 5489 5490 void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 5491 { 5492 struct qed_mcp_link_state *p_link; 5493 5494 p_link = &p_hwfn->mcp_info->link_output; 5495 5496 if (p_link->min_pf_rate) 5497 qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, 5498 p_link->min_pf_rate); 5499 5500 memset(p_hwfn->qm_info.wfq_data, 0, 5501 sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports); 5502 } 5503 5504 int qed_device_num_ports(struct qed_dev *cdev) 5505 { 5506 return cdev->num_ports; 5507 } 5508 5509 void qed_set_fw_mac_addr(__le16 *fw_msb, 5510 __le16 *fw_mid, __le16 *fw_lsb, u8 *mac) 5511 { 5512 ((u8 *)fw_msb)[0] = mac[1]; 5513 ((u8 *)fw_msb)[1] = mac[0]; 5514 ((u8 *)fw_mid)[0] = mac[3]; 5515 ((u8 *)fw_mid)[1] = mac[2]; 5516 ((u8 *)fw_lsb)[0] = mac[5]; 5517 ((u8 *)fw_lsb)[1] = mac[4]; 5518 } 5519