1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/types.h> 10 #include <asm/byteorder.h> 11 #include <linux/io.h> 12 #include <linux/delay.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/errno.h> 15 #include <linux/kernel.h> 16 #include <linux/mutex.h> 17 #include <linux/pci.h> 18 #include <linux/slab.h> 19 #include <linux/string.h> 20 #include <linux/etherdevice.h> 21 #include <linux/qed/qed_chain.h> 22 #include <linux/qed/qed_if.h> 23 #include "qed.h" 24 #include "qed_cxt.h" 25 #include "qed_dcbx.h" 26 #include "qed_dev_api.h" 27 #include "qed_hsi.h" 28 #include "qed_hw.h" 29 #include "qed_init_ops.h" 30 #include "qed_int.h" 31 #include "qed_mcp.h" 32 #include "qed_reg_addr.h" 33 #include "qed_sp.h" 34 #include "qed_sriov.h" 35 #include "qed_vf.h" 36 37 static spinlock_t qm_lock; 38 static bool qm_lock_init = false; 39 40 /* API common to all protocols */ 41 enum BAR_ID { 42 BAR_ID_0, /* used for GRC */ 43 BAR_ID_1 /* Used for doorbells */ 44 }; 45 46 static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, 47 enum BAR_ID bar_id) 48 { 49 u32 bar_reg = (bar_id == BAR_ID_0 ? 50 PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); 51 u32 val; 52 53 if (IS_VF(p_hwfn->cdev)) 54 return 1 << 17; 55 56 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); 57 if (val) 58 return 1 << (val + 15); 59 60 /* Old MFW initialized above registered only conditionally */ 61 if (p_hwfn->cdev->num_hwfns > 1) { 62 DP_INFO(p_hwfn, 63 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n"); 64 return BAR_ID_0 ? 256 * 1024 : 512 * 1024; 65 } else { 66 DP_INFO(p_hwfn, 67 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n"); 68 return 512 * 1024; 69 } 70 } 71 72 void qed_init_dp(struct qed_dev *cdev, 73 u32 dp_module, u8 dp_level) 74 { 75 u32 i; 76 77 cdev->dp_level = dp_level; 78 cdev->dp_module = dp_module; 79 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 80 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 81 82 p_hwfn->dp_level = dp_level; 83 p_hwfn->dp_module = dp_module; 84 } 85 } 86 87 void qed_init_struct(struct qed_dev *cdev) 88 { 89 u8 i; 90 91 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 92 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 93 94 p_hwfn->cdev = cdev; 95 p_hwfn->my_id = i; 96 p_hwfn->b_active = false; 97 98 mutex_init(&p_hwfn->dmae_info.mutex); 99 } 100 101 /* hwfn 0 is always active */ 102 cdev->hwfns[0].b_active = true; 103 104 /* set the default cache alignment to 128 */ 105 cdev->cache_shift = 7; 106 } 107 108 static void qed_qm_info_free(struct qed_hwfn *p_hwfn) 109 { 110 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 111 112 kfree(qm_info->qm_pq_params); 113 qm_info->qm_pq_params = NULL; 114 kfree(qm_info->qm_vport_params); 115 qm_info->qm_vport_params = NULL; 116 kfree(qm_info->qm_port_params); 117 qm_info->qm_port_params = NULL; 118 kfree(qm_info->wfq_data); 119 qm_info->wfq_data = NULL; 120 } 121 122 void qed_resc_free(struct qed_dev *cdev) 123 { 124 int i; 125 126 if (IS_VF(cdev)) 127 return; 128 129 kfree(cdev->fw_data); 130 cdev->fw_data = NULL; 131 132 kfree(cdev->reset_stats); 133 134 for_each_hwfn(cdev, i) { 135 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 136 137 kfree(p_hwfn->p_tx_cids); 138 p_hwfn->p_tx_cids = NULL; 139 kfree(p_hwfn->p_rx_cids); 140 p_hwfn->p_rx_cids = NULL; 141 } 142 143 for_each_hwfn(cdev, i) { 144 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 145 146 qed_cxt_mngr_free(p_hwfn); 147 qed_qm_info_free(p_hwfn); 148 qed_spq_free(p_hwfn); 149 qed_eq_free(p_hwfn, p_hwfn->p_eq); 150 qed_consq_free(p_hwfn, p_hwfn->p_consq); 151 qed_int_free(p_hwfn); 152 qed_iov_free(p_hwfn); 153 qed_dmae_info_free(p_hwfn); 154 qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info); 155 } 156 } 157 158 static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable) 159 { 160 u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0; 161 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 162 struct init_qm_port_params *p_qm_port; 163 u16 num_pqs, multi_cos_tcs = 1; 164 u8 pf_wfq = qm_info->pf_wfq; 165 u32 pf_rl = qm_info->pf_rl; 166 u16 num_vfs = 0; 167 168 #ifdef CONFIG_QED_SRIOV 169 if (p_hwfn->cdev->p_iov_info) 170 num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; 171 #endif 172 memset(qm_info, 0, sizeof(*qm_info)); 173 174 num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */ 175 num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT); 176 177 /* Sanity checking that setup requires legal number of resources */ 178 if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) { 179 DP_ERR(p_hwfn, 180 "Need too many Physical queues - 0x%04x when only %04x are available\n", 181 num_pqs, RESC_NUM(p_hwfn, QED_PQ)); 182 return -EINVAL; 183 } 184 185 /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete. 186 */ 187 qm_info->qm_pq_params = kcalloc(num_pqs, 188 sizeof(struct init_qm_pq_params), 189 b_sleepable ? GFP_KERNEL : GFP_ATOMIC); 190 if (!qm_info->qm_pq_params) 191 goto alloc_err; 192 193 qm_info->qm_vport_params = kcalloc(num_vports, 194 sizeof(struct init_qm_vport_params), 195 b_sleepable ? GFP_KERNEL 196 : GFP_ATOMIC); 197 if (!qm_info->qm_vport_params) 198 goto alloc_err; 199 200 qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS, 201 sizeof(struct init_qm_port_params), 202 b_sleepable ? GFP_KERNEL 203 : GFP_ATOMIC); 204 if (!qm_info->qm_port_params) 205 goto alloc_err; 206 207 qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data), 208 b_sleepable ? GFP_KERNEL : GFP_ATOMIC); 209 if (!qm_info->wfq_data) 210 goto alloc_err; 211 212 vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); 213 214 /* First init per-TC PQs */ 215 for (i = 0; i < multi_cos_tcs; i++) { 216 struct init_qm_pq_params *params = 217 &qm_info->qm_pq_params[curr_queue++]; 218 219 if (p_hwfn->hw_info.personality == QED_PCI_ETH) { 220 params->vport_id = vport_id; 221 params->tc_id = p_hwfn->hw_info.non_offload_tc; 222 params->wrr_group = 1; 223 } else { 224 params->vport_id = vport_id; 225 params->tc_id = p_hwfn->hw_info.offload_tc; 226 params->wrr_group = 1; 227 } 228 } 229 230 /* Then init pure-LB PQ */ 231 qm_info->pure_lb_pq = curr_queue; 232 qm_info->qm_pq_params[curr_queue].vport_id = 233 (u8) RESC_START(p_hwfn, QED_VPORT); 234 qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC; 235 qm_info->qm_pq_params[curr_queue].wrr_group = 1; 236 curr_queue++; 237 238 qm_info->offload_pq = 0; 239 /* Then init per-VF PQs */ 240 vf_offset = curr_queue; 241 for (i = 0; i < num_vfs; i++) { 242 /* First vport is used by the PF */ 243 qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1; 244 qm_info->qm_pq_params[curr_queue].tc_id = 245 p_hwfn->hw_info.non_offload_tc; 246 qm_info->qm_pq_params[curr_queue].wrr_group = 1; 247 curr_queue++; 248 } 249 250 qm_info->vf_queues_offset = vf_offset; 251 qm_info->num_pqs = num_pqs; 252 qm_info->num_vports = num_vports; 253 254 /* Initialize qm port parameters */ 255 num_ports = p_hwfn->cdev->num_ports_in_engines; 256 for (i = 0; i < num_ports; i++) { 257 p_qm_port = &qm_info->qm_port_params[i]; 258 p_qm_port->active = 1; 259 p_qm_port->num_active_phys_tcs = 4; 260 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports; 261 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports; 262 } 263 264 qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS; 265 266 qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ); 267 268 qm_info->num_vf_pqs = num_vfs; 269 qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT); 270 271 for (i = 0; i < qm_info->num_vports; i++) 272 qm_info->qm_vport_params[i].vport_wfq = 1; 273 274 qm_info->vport_rl_en = 1; 275 qm_info->vport_wfq_en = 1; 276 qm_info->pf_rl = pf_rl; 277 qm_info->pf_wfq = pf_wfq; 278 279 return 0; 280 281 alloc_err: 282 DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n"); 283 qed_qm_info_free(p_hwfn); 284 return -ENOMEM; 285 } 286 287 /* This function reconfigures the QM pf on the fly. 288 * For this purpose we: 289 * 1. reconfigure the QM database 290 * 2. set new values to runtime arrat 291 * 3. send an sdm_qm_cmd through the rbc interface to stop the QM 292 * 4. activate init tool in QM_PF stage 293 * 5. send an sdm_qm_cmd through rbc interface to release the QM 294 */ 295 int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 296 { 297 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 298 bool b_rc; 299 int rc; 300 301 /* qm_info is allocated in qed_init_qm_info() which is already called 302 * from qed_resc_alloc() or previous call of qed_qm_reconf(). 303 * The allocated size may change each init, so we free it before next 304 * allocation. 305 */ 306 qed_qm_info_free(p_hwfn); 307 308 /* initialize qed's qm data structure */ 309 rc = qed_init_qm_info(p_hwfn, false); 310 if (rc) 311 return rc; 312 313 /* stop PF's qm queues */ 314 spin_lock_bh(&qm_lock); 315 b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true, 316 qm_info->start_pq, qm_info->num_pqs); 317 spin_unlock_bh(&qm_lock); 318 if (!b_rc) 319 return -EINVAL; 320 321 /* clear the QM_PF runtime phase leftovers from previous init */ 322 qed_init_clear_rt_data(p_hwfn); 323 324 /* prepare QM portion of runtime array */ 325 qed_qm_init_pf(p_hwfn); 326 327 /* activate init tool on runtime array */ 328 rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, 329 p_hwfn->hw_info.hw_mode); 330 if (rc) 331 return rc; 332 333 /* start PF's qm queues */ 334 spin_lock_bh(&qm_lock); 335 b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true, 336 qm_info->start_pq, qm_info->num_pqs); 337 spin_unlock_bh(&qm_lock); 338 if (!b_rc) 339 return -EINVAL; 340 341 return 0; 342 } 343 344 int qed_resc_alloc(struct qed_dev *cdev) 345 { 346 struct qed_consq *p_consq; 347 struct qed_eq *p_eq; 348 int i, rc = 0; 349 350 if (IS_VF(cdev)) 351 return rc; 352 353 cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL); 354 if (!cdev->fw_data) 355 return -ENOMEM; 356 357 /* Allocate Memory for the Queue->CID mapping */ 358 for_each_hwfn(cdev, i) { 359 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 360 int tx_size = sizeof(struct qed_hw_cid_data) * 361 RESC_NUM(p_hwfn, QED_L2_QUEUE); 362 int rx_size = sizeof(struct qed_hw_cid_data) * 363 RESC_NUM(p_hwfn, QED_L2_QUEUE); 364 365 p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL); 366 if (!p_hwfn->p_tx_cids) { 367 DP_NOTICE(p_hwfn, 368 "Failed to allocate memory for Tx Cids\n"); 369 rc = -ENOMEM; 370 goto alloc_err; 371 } 372 373 p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL); 374 if (!p_hwfn->p_rx_cids) { 375 DP_NOTICE(p_hwfn, 376 "Failed to allocate memory for Rx Cids\n"); 377 rc = -ENOMEM; 378 goto alloc_err; 379 } 380 } 381 382 for_each_hwfn(cdev, i) { 383 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 384 385 /* First allocate the context manager structure */ 386 rc = qed_cxt_mngr_alloc(p_hwfn); 387 if (rc) 388 goto alloc_err; 389 390 /* Set the HW cid/tid numbers (in the contest manager) 391 * Must be done prior to any further computations. 392 */ 393 rc = qed_cxt_set_pf_params(p_hwfn); 394 if (rc) 395 goto alloc_err; 396 397 /* Prepare and process QM requirements */ 398 rc = qed_init_qm_info(p_hwfn, true); 399 if (rc) 400 goto alloc_err; 401 402 /* Compute the ILT client partition */ 403 rc = qed_cxt_cfg_ilt_compute(p_hwfn); 404 if (rc) 405 goto alloc_err; 406 407 /* CID map / ILT shadow table / T2 408 * The talbes sizes are determined by the computations above 409 */ 410 rc = qed_cxt_tables_alloc(p_hwfn); 411 if (rc) 412 goto alloc_err; 413 414 /* SPQ, must follow ILT because initializes SPQ context */ 415 rc = qed_spq_alloc(p_hwfn); 416 if (rc) 417 goto alloc_err; 418 419 /* SP status block allocation */ 420 p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn, 421 RESERVED_PTT_DPC); 422 423 rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt); 424 if (rc) 425 goto alloc_err; 426 427 rc = qed_iov_alloc(p_hwfn); 428 if (rc) 429 goto alloc_err; 430 431 /* EQ */ 432 p_eq = qed_eq_alloc(p_hwfn, 256); 433 if (!p_eq) { 434 rc = -ENOMEM; 435 goto alloc_err; 436 } 437 p_hwfn->p_eq = p_eq; 438 439 p_consq = qed_consq_alloc(p_hwfn); 440 if (!p_consq) { 441 rc = -ENOMEM; 442 goto alloc_err; 443 } 444 p_hwfn->p_consq = p_consq; 445 446 /* DMA info initialization */ 447 rc = qed_dmae_info_alloc(p_hwfn); 448 if (rc) { 449 DP_NOTICE(p_hwfn, 450 "Failed to allocate memory for dmae_info structure\n"); 451 goto alloc_err; 452 } 453 454 /* DCBX initialization */ 455 rc = qed_dcbx_info_alloc(p_hwfn); 456 if (rc) { 457 DP_NOTICE(p_hwfn, 458 "Failed to allocate memory for dcbx structure\n"); 459 goto alloc_err; 460 } 461 } 462 463 cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); 464 if (!cdev->reset_stats) { 465 DP_NOTICE(cdev, "Failed to allocate reset statistics\n"); 466 rc = -ENOMEM; 467 goto alloc_err; 468 } 469 470 return 0; 471 472 alloc_err: 473 qed_resc_free(cdev); 474 return rc; 475 } 476 477 void qed_resc_setup(struct qed_dev *cdev) 478 { 479 int i; 480 481 if (IS_VF(cdev)) 482 return; 483 484 for_each_hwfn(cdev, i) { 485 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 486 487 qed_cxt_mngr_setup(p_hwfn); 488 qed_spq_setup(p_hwfn); 489 qed_eq_setup(p_hwfn, p_hwfn->p_eq); 490 qed_consq_setup(p_hwfn, p_hwfn->p_consq); 491 492 /* Read shadow of current MFW mailbox */ 493 qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt); 494 memcpy(p_hwfn->mcp_info->mfw_mb_shadow, 495 p_hwfn->mcp_info->mfw_mb_cur, 496 p_hwfn->mcp_info->mfw_mb_length); 497 498 qed_int_setup(p_hwfn, p_hwfn->p_main_ptt); 499 500 qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt); 501 } 502 } 503 504 #define FINAL_CLEANUP_POLL_CNT (100) 505 #define FINAL_CLEANUP_POLL_TIME (10) 506 int qed_final_cleanup(struct qed_hwfn *p_hwfn, 507 struct qed_ptt *p_ptt, u16 id, bool is_vf) 508 { 509 u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; 510 int rc = -EBUSY; 511 512 addr = GTT_BAR0_MAP_REG_USDM_RAM + 513 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); 514 515 if (is_vf) 516 id += 0x10; 517 518 command |= X_FINAL_CLEANUP_AGG_INT << 519 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; 520 command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; 521 command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT; 522 command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT; 523 524 /* Make sure notification is not set before initiating final cleanup */ 525 if (REG_RD(p_hwfn, addr)) { 526 DP_NOTICE( 527 p_hwfn, 528 "Unexpected; Found final cleanup notification before initiating final cleanup\n"); 529 REG_WR(p_hwfn, addr, 0); 530 } 531 532 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 533 "Sending final cleanup for PFVF[%d] [Command %08x\n]", 534 id, command); 535 536 qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); 537 538 /* Poll until completion */ 539 while (!REG_RD(p_hwfn, addr) && count--) 540 msleep(FINAL_CLEANUP_POLL_TIME); 541 542 if (REG_RD(p_hwfn, addr)) 543 rc = 0; 544 else 545 DP_NOTICE(p_hwfn, 546 "Failed to receive FW final cleanup notification\n"); 547 548 /* Cleanup afterwards */ 549 REG_WR(p_hwfn, addr, 0); 550 551 return rc; 552 } 553 554 static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn) 555 { 556 int hw_mode = 0; 557 558 hw_mode = (1 << MODE_BB_B0); 559 560 switch (p_hwfn->cdev->num_ports_in_engines) { 561 case 1: 562 hw_mode |= 1 << MODE_PORTS_PER_ENG_1; 563 break; 564 case 2: 565 hw_mode |= 1 << MODE_PORTS_PER_ENG_2; 566 break; 567 case 4: 568 hw_mode |= 1 << MODE_PORTS_PER_ENG_4; 569 break; 570 default: 571 DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n", 572 p_hwfn->cdev->num_ports_in_engines); 573 return; 574 } 575 576 switch (p_hwfn->cdev->mf_mode) { 577 case QED_MF_DEFAULT: 578 case QED_MF_NPAR: 579 hw_mode |= 1 << MODE_MF_SI; 580 break; 581 case QED_MF_OVLAN: 582 hw_mode |= 1 << MODE_MF_SD; 583 break; 584 default: 585 DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); 586 hw_mode |= 1 << MODE_MF_SI; 587 } 588 589 hw_mode |= 1 << MODE_ASIC; 590 591 if (p_hwfn->cdev->num_hwfns > 1) 592 hw_mode |= 1 << MODE_100G; 593 594 p_hwfn->hw_info.hw_mode = hw_mode; 595 596 DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP), 597 "Configuring function for hw_mode: 0x%08x\n", 598 p_hwfn->hw_info.hw_mode); 599 } 600 601 /* Init run time data for all PFs on an engine. */ 602 static void qed_init_cau_rt_data(struct qed_dev *cdev) 603 { 604 u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; 605 int i, sb_id; 606 607 for_each_hwfn(cdev, i) { 608 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 609 struct qed_igu_info *p_igu_info; 610 struct qed_igu_block *p_block; 611 struct cau_sb_entry sb_entry; 612 613 p_igu_info = p_hwfn->hw_info.p_igu_info; 614 615 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev); 616 sb_id++) { 617 p_block = &p_igu_info->igu_map.igu_blocks[sb_id]; 618 if (!p_block->is_pf) 619 continue; 620 621 qed_init_cau_sb_entry(p_hwfn, &sb_entry, 622 p_block->function_id, 623 0, 0); 624 STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, 625 sb_entry); 626 } 627 } 628 } 629 630 static int qed_hw_init_common(struct qed_hwfn *p_hwfn, 631 struct qed_ptt *p_ptt, 632 int hw_mode) 633 { 634 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 635 struct qed_qm_common_rt_init_params params; 636 struct qed_dev *cdev = p_hwfn->cdev; 637 u32 concrete_fid; 638 int rc = 0; 639 u8 vf_id; 640 641 qed_init_cau_rt_data(cdev); 642 643 /* Program GTT windows */ 644 qed_gtt_init(p_hwfn); 645 646 if (p_hwfn->mcp_info) { 647 if (p_hwfn->mcp_info->func_info.bandwidth_max) 648 qm_info->pf_rl_en = 1; 649 if (p_hwfn->mcp_info->func_info.bandwidth_min) 650 qm_info->pf_wfq_en = 1; 651 } 652 653 memset(¶ms, 0, sizeof(params)); 654 params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines; 655 params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; 656 params.pf_rl_en = qm_info->pf_rl_en; 657 params.pf_wfq_en = qm_info->pf_wfq_en; 658 params.vport_rl_en = qm_info->vport_rl_en; 659 params.vport_wfq_en = qm_info->vport_wfq_en; 660 params.port_params = qm_info->qm_port_params; 661 662 qed_qm_common_rt_init(p_hwfn, ¶ms); 663 664 qed_cxt_hw_init_common(p_hwfn); 665 666 /* Close gate from NIG to BRB/Storm; By default they are open, but 667 * we close them to prevent NIG from passing data to reset blocks. 668 * Should have been done in the ENGINE phase, but init-tool lacks 669 * proper port-pretend capabilities. 670 */ 671 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0); 672 qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0); 673 qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1); 674 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0); 675 qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0); 676 qed_port_unpretend(p_hwfn, p_ptt); 677 678 rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); 679 if (rc != 0) 680 return rc; 681 682 qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); 683 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); 684 685 /* Disable relaxed ordering in the PCI config space */ 686 qed_wr(p_hwfn, p_ptt, 0x20b4, 687 qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10); 688 689 for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) { 690 concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); 691 qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); 692 qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); 693 } 694 /* pretend to original PF */ 695 qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 696 697 return rc; 698 } 699 700 static int qed_hw_init_port(struct qed_hwfn *p_hwfn, 701 struct qed_ptt *p_ptt, 702 int hw_mode) 703 { 704 int rc = 0; 705 706 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, 707 hw_mode); 708 return rc; 709 } 710 711 static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, 712 struct qed_ptt *p_ptt, 713 struct qed_tunn_start_params *p_tunn, 714 int hw_mode, 715 bool b_hw_start, 716 enum qed_int_mode int_mode, 717 bool allow_npar_tx_switch) 718 { 719 u8 rel_pf_id = p_hwfn->rel_pf_id; 720 int rc = 0; 721 722 if (p_hwfn->mcp_info) { 723 struct qed_mcp_function_info *p_info; 724 725 p_info = &p_hwfn->mcp_info->func_info; 726 if (p_info->bandwidth_min) 727 p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; 728 729 /* Update rate limit once we'll actually have a link */ 730 p_hwfn->qm_info.pf_rl = 100000; 731 } 732 733 qed_cxt_hw_init_pf(p_hwfn); 734 735 qed_int_igu_init_rt(p_hwfn); 736 737 /* Set VLAN in NIG if needed */ 738 if (hw_mode & (1 << MODE_MF_SD)) { 739 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n"); 740 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); 741 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, 742 p_hwfn->hw_info.ovlan); 743 } 744 745 /* Enable classification by MAC if needed */ 746 if (hw_mode & (1 << MODE_MF_SI)) { 747 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 748 "Configuring TAGMAC_CLS_TYPE\n"); 749 STORE_RT_REG(p_hwfn, 750 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1); 751 } 752 753 /* Protocl Configuration */ 754 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0); 755 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0); 756 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); 757 758 /* Cleanup chip from previous driver if such remains exist */ 759 rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false); 760 if (rc != 0) 761 return rc; 762 763 /* PF Init sequence */ 764 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); 765 if (rc) 766 return rc; 767 768 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */ 769 rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode); 770 if (rc) 771 return rc; 772 773 /* Pure runtime initializations - directly to the HW */ 774 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); 775 776 if (b_hw_start) { 777 /* enable interrupts */ 778 qed_int_igu_enable(p_hwfn, p_ptt, int_mode); 779 780 /* send function start command */ 781 rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode, 782 allow_npar_tx_switch); 783 if (rc) 784 DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); 785 } 786 return rc; 787 } 788 789 static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn, 790 struct qed_ptt *p_ptt, 791 u8 enable) 792 { 793 u32 delay_idx = 0, val, set_val = enable ? 1 : 0; 794 795 /* Change PF in PXP */ 796 qed_wr(p_hwfn, p_ptt, 797 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); 798 799 /* wait until value is set - try for 1 second every 50us */ 800 for (delay_idx = 0; delay_idx < 20000; delay_idx++) { 801 val = qed_rd(p_hwfn, p_ptt, 802 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 803 if (val == set_val) 804 break; 805 806 usleep_range(50, 60); 807 } 808 809 if (val != set_val) { 810 DP_NOTICE(p_hwfn, 811 "PFID_ENABLE_MASTER wasn't changed after a second\n"); 812 return -EAGAIN; 813 } 814 815 return 0; 816 } 817 818 static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn, 819 struct qed_ptt *p_main_ptt) 820 { 821 /* Read shadow of current MFW mailbox */ 822 qed_mcp_read_mb(p_hwfn, p_main_ptt); 823 memcpy(p_hwfn->mcp_info->mfw_mb_shadow, 824 p_hwfn->mcp_info->mfw_mb_cur, 825 p_hwfn->mcp_info->mfw_mb_length); 826 } 827 828 int qed_hw_init(struct qed_dev *cdev, 829 struct qed_tunn_start_params *p_tunn, 830 bool b_hw_start, 831 enum qed_int_mode int_mode, 832 bool allow_npar_tx_switch, 833 const u8 *bin_fw_data) 834 { 835 u32 load_code, param; 836 int rc, mfw_rc, i; 837 838 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 839 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 840 return -EINVAL; 841 } 842 843 if (IS_PF(cdev)) { 844 rc = qed_init_fw_data(cdev, bin_fw_data); 845 if (rc != 0) 846 return rc; 847 } 848 849 for_each_hwfn(cdev, i) { 850 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 851 852 if (IS_VF(cdev)) { 853 p_hwfn->b_int_enabled = 1; 854 continue; 855 } 856 857 /* Enable DMAE in PXP */ 858 rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); 859 860 qed_calc_hw_mode(p_hwfn); 861 862 rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, 863 &load_code); 864 if (rc) { 865 DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n"); 866 return rc; 867 } 868 869 qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); 870 871 DP_VERBOSE(p_hwfn, QED_MSG_SP, 872 "Load request was sent. Resp:0x%x, Load code: 0x%x\n", 873 rc, load_code); 874 875 p_hwfn->first_on_engine = (load_code == 876 FW_MSG_CODE_DRV_LOAD_ENGINE); 877 878 if (!qm_lock_init) { 879 spin_lock_init(&qm_lock); 880 qm_lock_init = true; 881 } 882 883 switch (load_code) { 884 case FW_MSG_CODE_DRV_LOAD_ENGINE: 885 rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, 886 p_hwfn->hw_info.hw_mode); 887 if (rc) 888 break; 889 /* Fall into */ 890 case FW_MSG_CODE_DRV_LOAD_PORT: 891 rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, 892 p_hwfn->hw_info.hw_mode); 893 if (rc) 894 break; 895 896 /* Fall into */ 897 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 898 rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, 899 p_tunn, p_hwfn->hw_info.hw_mode, 900 b_hw_start, int_mode, 901 allow_npar_tx_switch); 902 break; 903 default: 904 rc = -EINVAL; 905 break; 906 } 907 908 if (rc) 909 DP_NOTICE(p_hwfn, 910 "init phase failed for loadcode 0x%x (rc %d)\n", 911 load_code, rc); 912 913 /* ACK mfw regardless of success or failure of initialization */ 914 mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 915 DRV_MSG_CODE_LOAD_DONE, 916 0, &load_code, ¶m); 917 if (rc) 918 return rc; 919 if (mfw_rc) { 920 DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n"); 921 return mfw_rc; 922 } 923 924 /* send DCBX attention request command */ 925 DP_VERBOSE(p_hwfn, 926 QED_MSG_DCB, 927 "sending phony dcbx set command to trigger DCBx attention handling\n"); 928 mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 929 DRV_MSG_CODE_SET_DCBX, 930 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, 931 &load_code, ¶m); 932 if (mfw_rc) { 933 DP_NOTICE(p_hwfn, 934 "Failed to send DCBX attention request\n"); 935 return mfw_rc; 936 } 937 938 p_hwfn->hw_init_done = true; 939 } 940 941 return 0; 942 } 943 944 #define QED_HW_STOP_RETRY_LIMIT (10) 945 static inline void qed_hw_timers_stop(struct qed_dev *cdev, 946 struct qed_hwfn *p_hwfn, 947 struct qed_ptt *p_ptt) 948 { 949 int i; 950 951 /* close timers */ 952 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); 953 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); 954 955 for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { 956 if ((!qed_rd(p_hwfn, p_ptt, 957 TM_REG_PF_SCAN_ACTIVE_CONN)) && 958 (!qed_rd(p_hwfn, p_ptt, 959 TM_REG_PF_SCAN_ACTIVE_TASK))) 960 break; 961 962 /* Dependent on number of connection/tasks, possibly 963 * 1ms sleep is required between polls 964 */ 965 usleep_range(1000, 2000); 966 } 967 968 if (i < QED_HW_STOP_RETRY_LIMIT) 969 return; 970 971 DP_NOTICE(p_hwfn, 972 "Timers linear scans are not over [Connection %02x Tasks %02x]\n", 973 (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), 974 (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); 975 } 976 977 void qed_hw_timers_stop_all(struct qed_dev *cdev) 978 { 979 int j; 980 981 for_each_hwfn(cdev, j) { 982 struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; 983 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; 984 985 qed_hw_timers_stop(cdev, p_hwfn, p_ptt); 986 } 987 } 988 989 int qed_hw_stop(struct qed_dev *cdev) 990 { 991 int rc = 0, t_rc; 992 int j; 993 994 for_each_hwfn(cdev, j) { 995 struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; 996 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; 997 998 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n"); 999 1000 if (IS_VF(cdev)) { 1001 qed_vf_pf_int_cleanup(p_hwfn); 1002 continue; 1003 } 1004 1005 /* mark the hw as uninitialized... */ 1006 p_hwfn->hw_init_done = false; 1007 1008 rc = qed_sp_pf_stop(p_hwfn); 1009 if (rc) 1010 DP_NOTICE(p_hwfn, 1011 "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n"); 1012 1013 qed_wr(p_hwfn, p_ptt, 1014 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 1015 1016 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 1017 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 1018 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 1019 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 1020 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 1021 1022 qed_hw_timers_stop(cdev, p_hwfn, p_ptt); 1023 1024 /* Disable Attention Generation */ 1025 qed_int_igu_disable_int(p_hwfn, p_ptt); 1026 1027 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); 1028 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); 1029 1030 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true); 1031 1032 /* Need to wait 1ms to guarantee SBs are cleared */ 1033 usleep_range(1000, 2000); 1034 } 1035 1036 if (IS_PF(cdev)) { 1037 /* Disable DMAE in PXP - in CMT, this should only be done for 1038 * first hw-function, and only after all transactions have 1039 * stopped for all active hw-functions. 1040 */ 1041 t_rc = qed_change_pci_hwfn(&cdev->hwfns[0], 1042 cdev->hwfns[0].p_main_ptt, false); 1043 if (t_rc != 0) 1044 rc = t_rc; 1045 } 1046 1047 return rc; 1048 } 1049 1050 void qed_hw_stop_fastpath(struct qed_dev *cdev) 1051 { 1052 int j; 1053 1054 for_each_hwfn(cdev, j) { 1055 struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; 1056 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; 1057 1058 if (IS_VF(cdev)) { 1059 qed_vf_pf_int_cleanup(p_hwfn); 1060 continue; 1061 } 1062 1063 DP_VERBOSE(p_hwfn, 1064 NETIF_MSG_IFDOWN, 1065 "Shutting down the fastpath\n"); 1066 1067 qed_wr(p_hwfn, p_ptt, 1068 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 1069 1070 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 1071 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 1072 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 1073 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 1074 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 1075 1076 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); 1077 1078 /* Need to wait 1ms to guarantee SBs are cleared */ 1079 usleep_range(1000, 2000); 1080 } 1081 } 1082 1083 void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) 1084 { 1085 if (IS_VF(p_hwfn->cdev)) 1086 return; 1087 1088 /* Re-open incoming traffic */ 1089 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 1090 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); 1091 } 1092 1093 static int qed_reg_assert(struct qed_hwfn *hwfn, 1094 struct qed_ptt *ptt, u32 reg, 1095 bool expected) 1096 { 1097 u32 assert_val = qed_rd(hwfn, ptt, reg); 1098 1099 if (assert_val != expected) { 1100 DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n", 1101 reg, expected); 1102 return -EINVAL; 1103 } 1104 1105 return 0; 1106 } 1107 1108 int qed_hw_reset(struct qed_dev *cdev) 1109 { 1110 int rc = 0; 1111 u32 unload_resp, unload_param; 1112 int i; 1113 1114 for_each_hwfn(cdev, i) { 1115 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1116 1117 if (IS_VF(cdev)) { 1118 rc = qed_vf_pf_reset(p_hwfn); 1119 if (rc) 1120 return rc; 1121 continue; 1122 } 1123 1124 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n"); 1125 1126 /* Check for incorrect states */ 1127 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt, 1128 QM_REG_USG_CNT_PF_TX, 0); 1129 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt, 1130 QM_REG_USG_CNT_PF_OTHER, 0); 1131 1132 /* Disable PF in HW blocks */ 1133 qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0); 1134 qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0); 1135 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 1136 TCFC_REG_STRONG_ENABLE_PF, 0); 1137 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 1138 CCFC_REG_STRONG_ENABLE_PF, 0); 1139 1140 /* Send unload command to MCP */ 1141 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 1142 DRV_MSG_CODE_UNLOAD_REQ, 1143 DRV_MB_PARAM_UNLOAD_WOL_MCP, 1144 &unload_resp, &unload_param); 1145 if (rc) { 1146 DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n"); 1147 unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE; 1148 } 1149 1150 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 1151 DRV_MSG_CODE_UNLOAD_DONE, 1152 0, &unload_resp, &unload_param); 1153 if (rc) { 1154 DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n"); 1155 return rc; 1156 } 1157 } 1158 1159 return rc; 1160 } 1161 1162 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */ 1163 static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn) 1164 { 1165 qed_ptt_pool_free(p_hwfn); 1166 kfree(p_hwfn->hw_info.p_igu_info); 1167 } 1168 1169 /* Setup bar access */ 1170 static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) 1171 { 1172 /* clear indirect access */ 1173 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0); 1174 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0); 1175 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0); 1176 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0); 1177 1178 /* Clean Previous errors if such exist */ 1179 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 1180 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1181 1 << p_hwfn->abs_pf_id); 1182 1183 /* enable internal target-read */ 1184 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 1185 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 1186 } 1187 1188 static void get_function_id(struct qed_hwfn *p_hwfn) 1189 { 1190 /* ME Register */ 1191 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR); 1192 1193 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); 1194 1195 p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf; 1196 p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 1197 PXP_CONCRETE_FID_PFID); 1198 p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 1199 PXP_CONCRETE_FID_PORT); 1200 } 1201 1202 static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) 1203 { 1204 u32 *feat_num = p_hwfn->hw_info.feat_num; 1205 int num_features = 1; 1206 1207 feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) / 1208 num_features, 1209 RESC_NUM(p_hwfn, QED_L2_QUEUE)); 1210 DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, 1211 "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n", 1212 feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB), 1213 num_features); 1214 } 1215 1216 static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) 1217 { 1218 u32 *resc_start = p_hwfn->hw_info.resc_start; 1219 u8 num_funcs = p_hwfn->num_funcs_on_engine; 1220 u32 *resc_num = p_hwfn->hw_info.resc_num; 1221 struct qed_sb_cnt_info sb_cnt_info; 1222 int i, max_vf_vlan_filters; 1223 1224 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 1225 1226 #ifdef CONFIG_QED_SRIOV 1227 max_vf_vlan_filters = QED_ETH_MAX_VF_NUM_VLAN_FILTERS; 1228 #else 1229 max_vf_vlan_filters = 0; 1230 #endif 1231 1232 qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); 1233 1234 resc_num[QED_SB] = min_t(u32, 1235 (MAX_SB_PER_PATH_BB / num_funcs), 1236 sb_cnt_info.sb_cnt); 1237 resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs; 1238 resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs; 1239 resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs; 1240 resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs; 1241 resc_num[QED_RL] = 8; 1242 resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs; 1243 resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) / 1244 num_funcs; 1245 resc_num[QED_ILT] = 950; 1246 1247 for (i = 0; i < QED_MAX_RESC; i++) 1248 resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id; 1249 1250 qed_hw_set_feat(p_hwfn); 1251 1252 DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, 1253 "The numbers for each resource are:\n" 1254 "SB = %d start = %d\n" 1255 "L2_QUEUE = %d start = %d\n" 1256 "VPORT = %d start = %d\n" 1257 "PQ = %d start = %d\n" 1258 "RL = %d start = %d\n" 1259 "MAC = %d start = %d\n" 1260 "VLAN = %d start = %d\n" 1261 "ILT = %d start = %d\n", 1262 p_hwfn->hw_info.resc_num[QED_SB], 1263 p_hwfn->hw_info.resc_start[QED_SB], 1264 p_hwfn->hw_info.resc_num[QED_L2_QUEUE], 1265 p_hwfn->hw_info.resc_start[QED_L2_QUEUE], 1266 p_hwfn->hw_info.resc_num[QED_VPORT], 1267 p_hwfn->hw_info.resc_start[QED_VPORT], 1268 p_hwfn->hw_info.resc_num[QED_PQ], 1269 p_hwfn->hw_info.resc_start[QED_PQ], 1270 p_hwfn->hw_info.resc_num[QED_RL], 1271 p_hwfn->hw_info.resc_start[QED_RL], 1272 p_hwfn->hw_info.resc_num[QED_MAC], 1273 p_hwfn->hw_info.resc_start[QED_MAC], 1274 p_hwfn->hw_info.resc_num[QED_VLAN], 1275 p_hwfn->hw_info.resc_start[QED_VLAN], 1276 p_hwfn->hw_info.resc_num[QED_ILT], 1277 p_hwfn->hw_info.resc_start[QED_ILT]); 1278 } 1279 1280 static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, 1281 struct qed_ptt *p_ptt) 1282 { 1283 u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; 1284 u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; 1285 struct qed_mcp_link_params *link; 1286 1287 /* Read global nvm_cfg address */ 1288 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); 1289 1290 /* Verify MCP has initialized it */ 1291 if (!nvm_cfg_addr) { 1292 DP_NOTICE(p_hwfn, "Shared memory not initialized\n"); 1293 return -EINVAL; 1294 } 1295 1296 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ 1297 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); 1298 1299 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 1300 offsetof(struct nvm_cfg1, glob) + 1301 offsetof(struct nvm_cfg1_glob, core_cfg); 1302 1303 core_cfg = qed_rd(p_hwfn, p_ptt, addr); 1304 1305 switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> 1306 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { 1307 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G: 1308 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G; 1309 break; 1310 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G: 1311 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G; 1312 break; 1313 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G: 1314 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G; 1315 break; 1316 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F: 1317 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F; 1318 break; 1319 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E: 1320 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E; 1321 break; 1322 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G: 1323 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G; 1324 break; 1325 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G: 1326 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G; 1327 break; 1328 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G: 1329 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G; 1330 break; 1331 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G: 1332 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G; 1333 break; 1334 default: 1335 DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", 1336 core_cfg); 1337 break; 1338 } 1339 1340 /* Read default link configuration */ 1341 link = &p_hwfn->mcp_info->link_input; 1342 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 1343 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 1344 link_temp = qed_rd(p_hwfn, p_ptt, 1345 port_cfg_addr + 1346 offsetof(struct nvm_cfg1_port, speed_cap_mask)); 1347 link->speed.advertised_speeds = 1348 link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; 1349 1350 p_hwfn->mcp_info->link_capabilities.speed_capabilities = 1351 link->speed.advertised_speeds; 1352 1353 link_temp = qed_rd(p_hwfn, p_ptt, 1354 port_cfg_addr + 1355 offsetof(struct nvm_cfg1_port, link_settings)); 1356 switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >> 1357 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) { 1358 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG: 1359 link->speed.autoneg = true; 1360 break; 1361 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G: 1362 link->speed.forced_speed = 1000; 1363 break; 1364 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: 1365 link->speed.forced_speed = 10000; 1366 break; 1367 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: 1368 link->speed.forced_speed = 25000; 1369 break; 1370 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G: 1371 link->speed.forced_speed = 40000; 1372 break; 1373 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: 1374 link->speed.forced_speed = 50000; 1375 break; 1376 case NVM_CFG1_PORT_DRV_LINK_SPEED_100G: 1377 link->speed.forced_speed = 100000; 1378 break; 1379 default: 1380 DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", 1381 link_temp); 1382 } 1383 1384 link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; 1385 link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET; 1386 link->pause.autoneg = !!(link_temp & 1387 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); 1388 link->pause.forced_rx = !!(link_temp & 1389 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); 1390 link->pause.forced_tx = !!(link_temp & 1391 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); 1392 link->loopback_mode = 0; 1393 1394 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 1395 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n", 1396 link->speed.forced_speed, link->speed.advertised_speeds, 1397 link->speed.autoneg, link->pause.autoneg); 1398 1399 /* Read Multi-function information from shmem */ 1400 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 1401 offsetof(struct nvm_cfg1, glob) + 1402 offsetof(struct nvm_cfg1_glob, generic_cont0); 1403 1404 generic_cont0 = qed_rd(p_hwfn, p_ptt, addr); 1405 1406 mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> 1407 NVM_CFG1_GLOB_MF_MODE_OFFSET; 1408 1409 switch (mf_mode) { 1410 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: 1411 p_hwfn->cdev->mf_mode = QED_MF_OVLAN; 1412 break; 1413 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: 1414 p_hwfn->cdev->mf_mode = QED_MF_NPAR; 1415 break; 1416 case NVM_CFG1_GLOB_MF_MODE_DEFAULT: 1417 p_hwfn->cdev->mf_mode = QED_MF_DEFAULT; 1418 break; 1419 } 1420 DP_INFO(p_hwfn, "Multi function mode is %08x\n", 1421 p_hwfn->cdev->mf_mode); 1422 1423 /* Read Multi-function information from shmem */ 1424 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 1425 offsetof(struct nvm_cfg1, glob) + 1426 offsetof(struct nvm_cfg1_glob, device_capabilities); 1427 1428 device_capabilities = qed_rd(p_hwfn, p_ptt, addr); 1429 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) 1430 __set_bit(QED_DEV_CAP_ETH, 1431 &p_hwfn->hw_info.device_capabilities); 1432 1433 return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); 1434 } 1435 1436 static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1437 { 1438 u32 reg_function_hide, tmp, eng_mask; 1439 u8 num_funcs; 1440 1441 num_funcs = MAX_NUM_PFS_BB; 1442 1443 /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values 1444 * in the other bits are selected. 1445 * Bits 1-15 are for functions 1-15, respectively, and their value is 1446 * '0' only for enabled functions (function 0 always exists and 1447 * enabled). 1448 * In case of CMT, only the "even" functions are enabled, and thus the 1449 * number of functions for both hwfns is learnt from the same bits. 1450 */ 1451 reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE); 1452 1453 if (reg_function_hide & 0x1) { 1454 if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) { 1455 num_funcs = 0; 1456 eng_mask = 0xaaaa; 1457 } else { 1458 num_funcs = 1; 1459 eng_mask = 0x5554; 1460 } 1461 1462 /* Get the number of the enabled functions on the engine */ 1463 tmp = (reg_function_hide ^ 0xffffffff) & eng_mask; 1464 while (tmp) { 1465 if (tmp & 0x1) 1466 num_funcs++; 1467 tmp >>= 0x1; 1468 } 1469 } 1470 1471 p_hwfn->num_funcs_on_engine = num_funcs; 1472 1473 DP_VERBOSE(p_hwfn, 1474 NETIF_MSG_PROBE, 1475 "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n", 1476 p_hwfn->rel_pf_id, 1477 p_hwfn->abs_pf_id, 1478 p_hwfn->num_funcs_on_engine); 1479 } 1480 1481 static int 1482 qed_get_hw_info(struct qed_hwfn *p_hwfn, 1483 struct qed_ptt *p_ptt, 1484 enum qed_pci_personality personality) 1485 { 1486 u32 port_mode; 1487 int rc; 1488 1489 /* Since all information is common, only first hwfns should do this */ 1490 if (IS_LEAD_HWFN(p_hwfn)) { 1491 rc = qed_iov_hw_info(p_hwfn); 1492 if (rc) 1493 return rc; 1494 } 1495 1496 /* Read the port mode */ 1497 port_mode = qed_rd(p_hwfn, p_ptt, 1498 CNIG_REG_NW_PORT_MODE_BB_B0); 1499 1500 if (port_mode < 3) { 1501 p_hwfn->cdev->num_ports_in_engines = 1; 1502 } else if (port_mode <= 5) { 1503 p_hwfn->cdev->num_ports_in_engines = 2; 1504 } else { 1505 DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n", 1506 p_hwfn->cdev->num_ports_in_engines); 1507 1508 /* Default num_ports_in_engines to something */ 1509 p_hwfn->cdev->num_ports_in_engines = 1; 1510 } 1511 1512 qed_hw_get_nvm_info(p_hwfn, p_ptt); 1513 1514 rc = qed_int_igu_read_cam(p_hwfn, p_ptt); 1515 if (rc) 1516 return rc; 1517 1518 if (qed_mcp_is_init(p_hwfn)) 1519 ether_addr_copy(p_hwfn->hw_info.hw_mac_addr, 1520 p_hwfn->mcp_info->func_info.mac); 1521 else 1522 eth_random_addr(p_hwfn->hw_info.hw_mac_addr); 1523 1524 if (qed_mcp_is_init(p_hwfn)) { 1525 if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET) 1526 p_hwfn->hw_info.ovlan = 1527 p_hwfn->mcp_info->func_info.ovlan; 1528 1529 qed_mcp_cmd_port_init(p_hwfn, p_ptt); 1530 } 1531 1532 if (qed_mcp_is_init(p_hwfn)) { 1533 enum qed_pci_personality protocol; 1534 1535 protocol = p_hwfn->mcp_info->func_info.protocol; 1536 p_hwfn->hw_info.personality = protocol; 1537 } 1538 1539 qed_get_num_funcs(p_hwfn, p_ptt); 1540 1541 qed_hw_get_resc(p_hwfn); 1542 1543 return rc; 1544 } 1545 1546 static int qed_get_dev_info(struct qed_dev *cdev) 1547 { 1548 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 1549 u32 tmp; 1550 1551 /* Read Vendor Id / Device Id */ 1552 pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, 1553 &cdev->vendor_id); 1554 pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, 1555 &cdev->device_id); 1556 cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, 1557 MISCS_REG_CHIP_NUM); 1558 cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, 1559 MISCS_REG_CHIP_REV); 1560 MASK_FIELD(CHIP_REV, cdev->chip_rev); 1561 1562 cdev->type = QED_DEV_TYPE_BB; 1563 /* Learn number of HW-functions */ 1564 tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt, 1565 MISCS_REG_CMT_ENABLED_FOR_PAIR); 1566 1567 if (tmp & (1 << p_hwfn->rel_pf_id)) { 1568 DP_NOTICE(cdev->hwfns, "device in CMT mode\n"); 1569 cdev->num_hwfns = 2; 1570 } else { 1571 cdev->num_hwfns = 1; 1572 } 1573 1574 cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt, 1575 MISCS_REG_CHIP_TEST_REG) >> 4; 1576 MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id); 1577 cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, 1578 MISCS_REG_CHIP_METAL); 1579 MASK_FIELD(CHIP_METAL, cdev->chip_metal); 1580 1581 DP_INFO(cdev->hwfns, 1582 "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", 1583 cdev->chip_num, cdev->chip_rev, 1584 cdev->chip_bond_id, cdev->chip_metal); 1585 1586 if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) { 1587 DP_NOTICE(cdev->hwfns, 1588 "The chip type/rev (BB A0) is not supported!\n"); 1589 return -EINVAL; 1590 } 1591 1592 return 0; 1593 } 1594 1595 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, 1596 void __iomem *p_regview, 1597 void __iomem *p_doorbells, 1598 enum qed_pci_personality personality) 1599 { 1600 int rc = 0; 1601 1602 /* Split PCI bars evenly between hwfns */ 1603 p_hwfn->regview = p_regview; 1604 p_hwfn->doorbells = p_doorbells; 1605 1606 if (IS_VF(p_hwfn->cdev)) 1607 return qed_vf_hw_prepare(p_hwfn); 1608 1609 /* Validate that chip access is feasible */ 1610 if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { 1611 DP_ERR(p_hwfn, 1612 "Reading the ME register returns all Fs; Preventing further chip access\n"); 1613 return -EINVAL; 1614 } 1615 1616 get_function_id(p_hwfn); 1617 1618 /* Allocate PTT pool */ 1619 rc = qed_ptt_pool_alloc(p_hwfn); 1620 if (rc) { 1621 DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n"); 1622 goto err0; 1623 } 1624 1625 /* Allocate the main PTT */ 1626 p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); 1627 1628 /* First hwfn learns basic information, e.g., number of hwfns */ 1629 if (!p_hwfn->my_id) { 1630 rc = qed_get_dev_info(p_hwfn->cdev); 1631 if (rc != 0) 1632 goto err1; 1633 } 1634 1635 qed_hw_hwfn_prepare(p_hwfn); 1636 1637 /* Initialize MCP structure */ 1638 rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); 1639 if (rc) { 1640 DP_NOTICE(p_hwfn, "Failed initializing mcp command\n"); 1641 goto err1; 1642 } 1643 1644 /* Read the device configuration information from the HW and SHMEM */ 1645 rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality); 1646 if (rc) { 1647 DP_NOTICE(p_hwfn, "Failed to get HW information\n"); 1648 goto err2; 1649 } 1650 1651 /* Allocate the init RT array and initialize the init-ops engine */ 1652 rc = qed_init_alloc(p_hwfn); 1653 if (rc) { 1654 DP_NOTICE(p_hwfn, "Failed to allocate the init array\n"); 1655 goto err2; 1656 } 1657 1658 return rc; 1659 err2: 1660 if (IS_LEAD_HWFN(p_hwfn)) 1661 qed_iov_free_hw_info(p_hwfn->cdev); 1662 qed_mcp_free(p_hwfn); 1663 err1: 1664 qed_hw_hwfn_free(p_hwfn); 1665 err0: 1666 return rc; 1667 } 1668 1669 int qed_hw_prepare(struct qed_dev *cdev, 1670 int personality) 1671 { 1672 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 1673 int rc; 1674 1675 /* Store the precompiled init data ptrs */ 1676 if (IS_PF(cdev)) 1677 qed_init_iro_array(cdev); 1678 1679 /* Initialize the first hwfn - will learn number of hwfns */ 1680 rc = qed_hw_prepare_single(p_hwfn, 1681 cdev->regview, 1682 cdev->doorbells, personality); 1683 if (rc) 1684 return rc; 1685 1686 personality = p_hwfn->hw_info.personality; 1687 1688 /* Initialize the rest of the hwfns */ 1689 if (cdev->num_hwfns > 1) { 1690 void __iomem *p_regview, *p_doorbell; 1691 u8 __iomem *addr; 1692 1693 /* adjust bar offset for second engine */ 1694 addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2; 1695 p_regview = addr; 1696 1697 /* adjust doorbell bar offset for second engine */ 1698 addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2; 1699 p_doorbell = addr; 1700 1701 /* prepare second hw function */ 1702 rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview, 1703 p_doorbell, personality); 1704 1705 /* in case of error, need to free the previously 1706 * initiliazed hwfn 0. 1707 */ 1708 if (rc) { 1709 if (IS_PF(cdev)) { 1710 qed_init_free(p_hwfn); 1711 qed_mcp_free(p_hwfn); 1712 qed_hw_hwfn_free(p_hwfn); 1713 } 1714 } 1715 } 1716 1717 return rc; 1718 } 1719 1720 void qed_hw_remove(struct qed_dev *cdev) 1721 { 1722 int i; 1723 1724 for_each_hwfn(cdev, i) { 1725 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1726 1727 if (IS_VF(cdev)) { 1728 qed_vf_pf_release(p_hwfn); 1729 continue; 1730 } 1731 1732 qed_init_free(p_hwfn); 1733 qed_hw_hwfn_free(p_hwfn); 1734 qed_mcp_free(p_hwfn); 1735 } 1736 1737 qed_iov_free_hw_info(cdev); 1738 } 1739 1740 int qed_chain_alloc(struct qed_dev *cdev, 1741 enum qed_chain_use_mode intended_use, 1742 enum qed_chain_mode mode, 1743 u16 num_elems, 1744 size_t elem_size, 1745 struct qed_chain *p_chain) 1746 { 1747 dma_addr_t p_pbl_phys = 0; 1748 void *p_pbl_virt = NULL; 1749 dma_addr_t p_phys = 0; 1750 void *p_virt = NULL; 1751 u16 page_cnt = 0; 1752 size_t size; 1753 1754 if (mode == QED_CHAIN_MODE_SINGLE) 1755 page_cnt = 1; 1756 else 1757 page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode); 1758 1759 size = page_cnt * QED_CHAIN_PAGE_SIZE; 1760 p_virt = dma_alloc_coherent(&cdev->pdev->dev, 1761 size, &p_phys, GFP_KERNEL); 1762 if (!p_virt) { 1763 DP_NOTICE(cdev, "Failed to allocate chain mem\n"); 1764 goto nomem; 1765 } 1766 1767 if (mode == QED_CHAIN_MODE_PBL) { 1768 size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; 1769 p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev, 1770 size, &p_pbl_phys, 1771 GFP_KERNEL); 1772 if (!p_pbl_virt) { 1773 DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n"); 1774 goto nomem; 1775 } 1776 1777 qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt, 1778 (u8)elem_size, intended_use, 1779 p_pbl_phys, p_pbl_virt); 1780 } else { 1781 qed_chain_init(p_chain, p_virt, p_phys, page_cnt, 1782 (u8)elem_size, intended_use, mode); 1783 } 1784 1785 return 0; 1786 1787 nomem: 1788 dma_free_coherent(&cdev->pdev->dev, 1789 page_cnt * QED_CHAIN_PAGE_SIZE, 1790 p_virt, p_phys); 1791 dma_free_coherent(&cdev->pdev->dev, 1792 page_cnt * QED_CHAIN_PBL_ENTRY_SIZE, 1793 p_pbl_virt, p_pbl_phys); 1794 1795 return -ENOMEM; 1796 } 1797 1798 void qed_chain_free(struct qed_dev *cdev, 1799 struct qed_chain *p_chain) 1800 { 1801 size_t size; 1802 1803 if (!p_chain->p_virt_addr) 1804 return; 1805 1806 if (p_chain->mode == QED_CHAIN_MODE_PBL) { 1807 size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; 1808 dma_free_coherent(&cdev->pdev->dev, size, 1809 p_chain->pbl.p_virt_table, 1810 p_chain->pbl.p_phys_table); 1811 } 1812 1813 size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE; 1814 dma_free_coherent(&cdev->pdev->dev, size, 1815 p_chain->p_virt_addr, 1816 p_chain->p_phys_addr); 1817 } 1818 1819 int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, 1820 u16 src_id, u16 *dst_id) 1821 { 1822 if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { 1823 u16 min, max; 1824 1825 min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); 1826 max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE); 1827 DP_NOTICE(p_hwfn, 1828 "l2_queue id [%d] is not valid, available indices [%d - %d]\n", 1829 src_id, min, max); 1830 1831 return -EINVAL; 1832 } 1833 1834 *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id; 1835 1836 return 0; 1837 } 1838 1839 int qed_fw_vport(struct qed_hwfn *p_hwfn, 1840 u8 src_id, u8 *dst_id) 1841 { 1842 if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) { 1843 u8 min, max; 1844 1845 min = (u8)RESC_START(p_hwfn, QED_VPORT); 1846 max = min + RESC_NUM(p_hwfn, QED_VPORT); 1847 DP_NOTICE(p_hwfn, 1848 "vport id [%d] is not valid, available indices [%d - %d]\n", 1849 src_id, min, max); 1850 1851 return -EINVAL; 1852 } 1853 1854 *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id; 1855 1856 return 0; 1857 } 1858 1859 int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, 1860 u8 src_id, u8 *dst_id) 1861 { 1862 if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) { 1863 u8 min, max; 1864 1865 min = (u8)RESC_START(p_hwfn, QED_RSS_ENG); 1866 max = min + RESC_NUM(p_hwfn, QED_RSS_ENG); 1867 DP_NOTICE(p_hwfn, 1868 "rss_eng id [%d] is not valid, available indices [%d - %d]\n", 1869 src_id, min, max); 1870 1871 return -EINVAL; 1872 } 1873 1874 *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id; 1875 1876 return 0; 1877 } 1878 1879 /* Calculate final WFQ values for all vports and configure them. 1880 * After this configuration each vport will have 1881 * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT) 1882 */ 1883 static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn, 1884 struct qed_ptt *p_ptt, 1885 u32 min_pf_rate) 1886 { 1887 struct init_qm_vport_params *vport_params; 1888 int i; 1889 1890 vport_params = p_hwfn->qm_info.qm_vport_params; 1891 1892 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 1893 u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 1894 1895 vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) / 1896 min_pf_rate; 1897 qed_init_vport_wfq(p_hwfn, p_ptt, 1898 vport_params[i].first_tx_pq_id, 1899 vport_params[i].vport_wfq); 1900 } 1901 } 1902 1903 static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn, 1904 u32 min_pf_rate) 1905 1906 { 1907 int i; 1908 1909 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) 1910 p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1; 1911 } 1912 1913 static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn, 1914 struct qed_ptt *p_ptt, 1915 u32 min_pf_rate) 1916 { 1917 struct init_qm_vport_params *vport_params; 1918 int i; 1919 1920 vport_params = p_hwfn->qm_info.qm_vport_params; 1921 1922 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 1923 qed_init_wfq_default_param(p_hwfn, min_pf_rate); 1924 qed_init_vport_wfq(p_hwfn, p_ptt, 1925 vport_params[i].first_tx_pq_id, 1926 vport_params[i].vport_wfq); 1927 } 1928 } 1929 1930 /* This function performs several validations for WFQ 1931 * configuration and required min rate for a given vport 1932 * 1. req_rate must be greater than one percent of min_pf_rate. 1933 * 2. req_rate should not cause other vports [not configured for WFQ explicitly] 1934 * rates to get less than one percent of min_pf_rate. 1935 * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. 1936 */ 1937 static int qed_init_wfq_param(struct qed_hwfn *p_hwfn, 1938 u16 vport_id, u32 req_rate, 1939 u32 min_pf_rate) 1940 { 1941 u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; 1942 int non_requested_count = 0, req_count = 0, i, num_vports; 1943 1944 num_vports = p_hwfn->qm_info.num_vports; 1945 1946 /* Accounting for the vports which are configured for WFQ explicitly */ 1947 for (i = 0; i < num_vports; i++) { 1948 u32 tmp_speed; 1949 1950 if ((i != vport_id) && 1951 p_hwfn->qm_info.wfq_data[i].configured) { 1952 req_count++; 1953 tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 1954 total_req_min_rate += tmp_speed; 1955 } 1956 } 1957 1958 /* Include current vport data as well */ 1959 req_count++; 1960 total_req_min_rate += req_rate; 1961 non_requested_count = num_vports - req_count; 1962 1963 if (req_rate < min_pf_rate / QED_WFQ_UNIT) { 1964 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 1965 "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 1966 vport_id, req_rate, min_pf_rate); 1967 return -EINVAL; 1968 } 1969 1970 if (num_vports > QED_WFQ_UNIT) { 1971 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 1972 "Number of vports is greater than %d\n", 1973 QED_WFQ_UNIT); 1974 return -EINVAL; 1975 } 1976 1977 if (total_req_min_rate > min_pf_rate) { 1978 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 1979 "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", 1980 total_req_min_rate, min_pf_rate); 1981 return -EINVAL; 1982 } 1983 1984 total_left_rate = min_pf_rate - total_req_min_rate; 1985 1986 left_rate_per_vp = total_left_rate / non_requested_count; 1987 if (left_rate_per_vp < min_pf_rate / QED_WFQ_UNIT) { 1988 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 1989 "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 1990 left_rate_per_vp, min_pf_rate); 1991 return -EINVAL; 1992 } 1993 1994 p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate; 1995 p_hwfn->qm_info.wfq_data[vport_id].configured = true; 1996 1997 for (i = 0; i < num_vports; i++) { 1998 if (p_hwfn->qm_info.wfq_data[i].configured) 1999 continue; 2000 2001 p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp; 2002 } 2003 2004 return 0; 2005 } 2006 2007 static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn, 2008 struct qed_ptt *p_ptt, u16 vp_id, u32 rate) 2009 { 2010 struct qed_mcp_link_state *p_link; 2011 int rc = 0; 2012 2013 p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output; 2014 2015 if (!p_link->min_pf_rate) { 2016 p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate; 2017 p_hwfn->qm_info.wfq_data[vp_id].configured = true; 2018 return rc; 2019 } 2020 2021 rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); 2022 2023 if (rc == 0) 2024 qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, 2025 p_link->min_pf_rate); 2026 else 2027 DP_NOTICE(p_hwfn, 2028 "Validation failed while configuring min rate\n"); 2029 2030 return rc; 2031 } 2032 2033 static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn, 2034 struct qed_ptt *p_ptt, 2035 u32 min_pf_rate) 2036 { 2037 bool use_wfq = false; 2038 int rc = 0; 2039 u16 i; 2040 2041 /* Validate all pre configured vports for wfq */ 2042 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 2043 u32 rate; 2044 2045 if (!p_hwfn->qm_info.wfq_data[i].configured) 2046 continue; 2047 2048 rate = p_hwfn->qm_info.wfq_data[i].min_speed; 2049 use_wfq = true; 2050 2051 rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate); 2052 if (rc) { 2053 DP_NOTICE(p_hwfn, 2054 "WFQ validation failed while configuring min rate\n"); 2055 break; 2056 } 2057 } 2058 2059 if (!rc && use_wfq) 2060 qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 2061 else 2062 qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 2063 2064 return rc; 2065 } 2066 2067 /* Main API for qed clients to configure vport min rate. 2068 * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)] 2069 * rate - Speed in Mbps needs to be assigned to a given vport. 2070 */ 2071 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate) 2072 { 2073 int i, rc = -EINVAL; 2074 2075 /* Currently not supported; Might change in future */ 2076 if (cdev->num_hwfns > 1) { 2077 DP_NOTICE(cdev, 2078 "WFQ configuration is not supported for this device\n"); 2079 return rc; 2080 } 2081 2082 for_each_hwfn(cdev, i) { 2083 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2084 struct qed_ptt *p_ptt; 2085 2086 p_ptt = qed_ptt_acquire(p_hwfn); 2087 if (!p_ptt) 2088 return -EBUSY; 2089 2090 rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); 2091 2092 if (!rc) { 2093 qed_ptt_release(p_hwfn, p_ptt); 2094 return rc; 2095 } 2096 2097 qed_ptt_release(p_hwfn, p_ptt); 2098 } 2099 2100 return rc; 2101 } 2102 2103 /* API to configure WFQ from mcp link change */ 2104 void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate) 2105 { 2106 int i; 2107 2108 if (cdev->num_hwfns > 1) { 2109 DP_VERBOSE(cdev, 2110 NETIF_MSG_LINK, 2111 "WFQ configuration is not supported for this device\n"); 2112 return; 2113 } 2114 2115 for_each_hwfn(cdev, i) { 2116 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2117 2118 __qed_configure_vp_wfq_on_link_change(p_hwfn, 2119 p_hwfn->p_dpc_ptt, 2120 min_pf_rate); 2121 } 2122 } 2123 2124 int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn, 2125 struct qed_ptt *p_ptt, 2126 struct qed_mcp_link_state *p_link, 2127 u8 max_bw) 2128 { 2129 int rc = 0; 2130 2131 p_hwfn->mcp_info->func_info.bandwidth_max = max_bw; 2132 2133 if (!p_link->line_speed && (max_bw != 100)) 2134 return rc; 2135 2136 p_link->speed = (p_link->line_speed * max_bw) / 100; 2137 p_hwfn->qm_info.pf_rl = p_link->speed; 2138 2139 /* Since the limiter also affects Tx-switched traffic, we don't want it 2140 * to limit such traffic in case there's no actual limit. 2141 * In that case, set limit to imaginary high boundary. 2142 */ 2143 if (max_bw == 100) 2144 p_hwfn->qm_info.pf_rl = 100000; 2145 2146 rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 2147 p_hwfn->qm_info.pf_rl); 2148 2149 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 2150 "Configured MAX bandwidth to be %08x Mb/sec\n", 2151 p_link->speed); 2152 2153 return rc; 2154 } 2155 2156 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */ 2157 int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw) 2158 { 2159 int i, rc = -EINVAL; 2160 2161 if (max_bw < 1 || max_bw > 100) { 2162 DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n"); 2163 return rc; 2164 } 2165 2166 for_each_hwfn(cdev, i) { 2167 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2168 struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); 2169 struct qed_mcp_link_state *p_link; 2170 struct qed_ptt *p_ptt; 2171 2172 p_link = &p_lead->mcp_info->link_output; 2173 2174 p_ptt = qed_ptt_acquire(p_hwfn); 2175 if (!p_ptt) 2176 return -EBUSY; 2177 2178 rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, 2179 p_link, max_bw); 2180 2181 qed_ptt_release(p_hwfn, p_ptt); 2182 2183 if (rc) 2184 break; 2185 } 2186 2187 return rc; 2188 } 2189 2190 int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, 2191 struct qed_ptt *p_ptt, 2192 struct qed_mcp_link_state *p_link, 2193 u8 min_bw) 2194 { 2195 int rc = 0; 2196 2197 p_hwfn->mcp_info->func_info.bandwidth_min = min_bw; 2198 p_hwfn->qm_info.pf_wfq = min_bw; 2199 2200 if (!p_link->line_speed) 2201 return rc; 2202 2203 p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100; 2204 2205 rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw); 2206 2207 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 2208 "Configured MIN bandwidth to be %d Mb/sec\n", 2209 p_link->min_pf_rate); 2210 2211 return rc; 2212 } 2213 2214 /* Main API to configure PF min bandwidth where bw range is [1-100] */ 2215 int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw) 2216 { 2217 int i, rc = -EINVAL; 2218 2219 if (min_bw < 1 || min_bw > 100) { 2220 DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n"); 2221 return rc; 2222 } 2223 2224 for_each_hwfn(cdev, i) { 2225 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2226 struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); 2227 struct qed_mcp_link_state *p_link; 2228 struct qed_ptt *p_ptt; 2229 2230 p_link = &p_lead->mcp_info->link_output; 2231 2232 p_ptt = qed_ptt_acquire(p_hwfn); 2233 if (!p_ptt) 2234 return -EBUSY; 2235 2236 rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, 2237 p_link, min_bw); 2238 if (rc) { 2239 qed_ptt_release(p_hwfn, p_ptt); 2240 return rc; 2241 } 2242 2243 if (p_link->min_pf_rate) { 2244 u32 min_rate = p_link->min_pf_rate; 2245 2246 rc = __qed_configure_vp_wfq_on_link_change(p_hwfn, 2247 p_ptt, 2248 min_rate); 2249 } 2250 2251 qed_ptt_release(p_hwfn, p_ptt); 2252 } 2253 2254 return rc; 2255 } 2256 2257 void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2258 { 2259 struct qed_mcp_link_state *p_link; 2260 2261 p_link = &p_hwfn->mcp_info->link_output; 2262 2263 if (p_link->min_pf_rate) 2264 qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, 2265 p_link->min_pf_rate); 2266 2267 memset(p_hwfn->qm_info.wfq_data, 0, 2268 sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports); 2269 } 2270