1 /* 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. 3 * All rights reserved 4 * www.brocade.com 5 * 6 * Linux driver for Brocade Fibre Channel Host Bus Adapter. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License (GPL) Version 2 as 10 * published by the Free Software Foundation 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 */ 17 18 #include "bfad_drv.h" 19 #include "bfa_modules.h" 20 #include "bfi_reg.h" 21 22 BFA_TRC_FILE(HAL, CORE); 23 24 /* 25 * BFA module list terminated by NULL 26 */ 27 static struct bfa_module_s *hal_mods[] = { 28 &hal_mod_fcdiag, 29 &hal_mod_sgpg, 30 &hal_mod_fcport, 31 &hal_mod_fcxp, 32 &hal_mod_lps, 33 &hal_mod_uf, 34 &hal_mod_rport, 35 &hal_mod_fcp, 36 &hal_mod_dconf, 37 NULL 38 }; 39 40 /* 41 * Message handlers for various modules. 42 */ 43 static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { 44 bfa_isr_unhandled, /* NONE */ 45 bfa_isr_unhandled, /* BFI_MC_IOC */ 46 bfa_fcdiag_intr, /* BFI_MC_DIAG */ 47 bfa_isr_unhandled, /* BFI_MC_FLASH */ 48 bfa_isr_unhandled, /* BFI_MC_CEE */ 49 bfa_fcport_isr, /* BFI_MC_FCPORT */ 50 bfa_isr_unhandled, /* BFI_MC_IOCFC */ 51 bfa_isr_unhandled, /* BFI_MC_LL */ 52 bfa_uf_isr, /* BFI_MC_UF */ 53 bfa_fcxp_isr, /* BFI_MC_FCXP */ 54 bfa_lps_isr, /* BFI_MC_LPS */ 55 bfa_rport_isr, /* BFI_MC_RPORT */ 56 bfa_itn_isr, /* BFI_MC_ITN */ 57 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */ 58 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */ 59 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */ 60 bfa_ioim_isr, /* BFI_MC_IOIM */ 61 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */ 62 bfa_tskim_isr, /* BFI_MC_TSKIM */ 63 bfa_isr_unhandled, /* BFI_MC_SBOOT */ 64 bfa_isr_unhandled, /* BFI_MC_IPFC */ 65 bfa_isr_unhandled, /* BFI_MC_PORT */ 66 bfa_isr_unhandled, /* --------- */ 67 bfa_isr_unhandled, /* --------- */ 68 bfa_isr_unhandled, /* --------- */ 69 bfa_isr_unhandled, /* --------- */ 70 bfa_isr_unhandled, /* --------- */ 71 bfa_isr_unhandled, /* --------- */ 72 bfa_isr_unhandled, /* --------- */ 73 bfa_isr_unhandled, /* --------- */ 74 bfa_isr_unhandled, /* --------- */ 75 bfa_isr_unhandled, /* --------- */ 76 }; 77 /* 78 * Message handlers for mailbox command classes 79 */ 80 static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = { 81 NULL, 82 NULL, /* BFI_MC_IOC */ 83 NULL, /* BFI_MC_DIAG */ 84 NULL, /* BFI_MC_FLASH */ 85 NULL, /* BFI_MC_CEE */ 86 NULL, /* BFI_MC_PORT */ 87 bfa_iocfc_isr, /* BFI_MC_IOCFC */ 88 NULL, 89 }; 90 91 92 93 static void 94 bfa_com_port_attach(struct bfa_s *bfa) 95 { 96 struct bfa_port_s *port = &bfa->modules.port; 97 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa); 98 99 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod); 100 bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp); 101 } 102 103 /* 104 * ablk module attach 105 */ 106 static void 107 bfa_com_ablk_attach(struct bfa_s *bfa) 108 { 109 struct bfa_ablk_s *ablk = &bfa->modules.ablk; 110 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa); 111 112 bfa_ablk_attach(ablk, &bfa->ioc); 113 bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp); 114 } 115 116 static void 117 bfa_com_cee_attach(struct bfa_s *bfa) 118 { 119 struct bfa_cee_s *cee = &bfa->modules.cee; 120 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa); 121 122 cee->trcmod = bfa->trcmod; 123 bfa_cee_attach(cee, &bfa->ioc, bfa); 124 bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp); 125 } 126 127 static void 128 bfa_com_sfp_attach(struct bfa_s *bfa) 129 { 130 struct bfa_sfp_s *sfp = BFA_SFP_MOD(bfa); 131 struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa); 132 133 bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod); 134 bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp); 135 } 136 137 static void 138 bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) 139 { 140 struct bfa_flash_s *flash = BFA_FLASH(bfa); 141 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa); 142 143 bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg); 144 bfa_flash_memclaim(flash, flash_dma->kva_curp, 145 flash_dma->dma_curp, mincfg); 146 } 147 148 static void 149 bfa_com_diag_attach(struct bfa_s *bfa) 150 { 151 struct bfa_diag_s *diag = BFA_DIAG_MOD(bfa); 152 struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa); 153 154 bfa_diag_attach(diag, &bfa->ioc, bfa, bfa_fcport_beacon, bfa->trcmod); 155 bfa_diag_memclaim(diag, diag_dma->kva_curp, diag_dma->dma_curp); 156 } 157 158 static void 159 bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) 160 { 161 struct bfa_phy_s *phy = BFA_PHY(bfa); 162 struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa); 163 164 bfa_phy_attach(phy, &bfa->ioc, bfa, bfa->trcmod, mincfg); 165 bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg); 166 } 167 168 /* 169 * BFA IOC FC related definitions 170 */ 171 172 /* 173 * IOC local definitions 174 */ 175 #define BFA_IOCFC_TOV 5000 /* msecs */ 176 177 enum { 178 BFA_IOCFC_ACT_NONE = 0, 179 BFA_IOCFC_ACT_INIT = 1, 180 BFA_IOCFC_ACT_STOP = 2, 181 BFA_IOCFC_ACT_DISABLE = 3, 182 BFA_IOCFC_ACT_ENABLE = 4, 183 }; 184 185 #define DEF_CFG_NUM_FABRICS 1 186 #define DEF_CFG_NUM_LPORTS 256 187 #define DEF_CFG_NUM_CQS 4 188 #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX) 189 #define DEF_CFG_NUM_TSKIM_REQS 128 190 #define DEF_CFG_NUM_FCXP_REQS 64 191 #define DEF_CFG_NUM_UF_BUFS 64 192 #define DEF_CFG_NUM_RPORTS 1024 193 #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS) 194 #define DEF_CFG_NUM_TINS 256 195 196 #define DEF_CFG_NUM_SGPGS 2048 197 #define DEF_CFG_NUM_REQQ_ELEMS 256 198 #define DEF_CFG_NUM_RSPQ_ELEMS 64 199 #define DEF_CFG_NUM_SBOOT_TGTS 16 200 #define DEF_CFG_NUM_SBOOT_LUNS 16 201 202 /* 203 * forward declaration for IOC FC functions 204 */ 205 static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); 206 static void bfa_iocfc_disable_cbfn(void *bfa_arg); 207 static void bfa_iocfc_hbfail_cbfn(void *bfa_arg); 208 static void bfa_iocfc_reset_cbfn(void *bfa_arg); 209 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; 210 211 /* 212 * BFA Interrupt handling functions 213 */ 214 static void 215 bfa_reqq_resume(struct bfa_s *bfa, int qid) 216 { 217 struct list_head *waitq, *qe, *qen; 218 struct bfa_reqq_wait_s *wqe; 219 220 waitq = bfa_reqq(bfa, qid); 221 list_for_each_safe(qe, qen, waitq) { 222 /* 223 * Callback only as long as there is room in request queue 224 */ 225 if (bfa_reqq_full(bfa, qid)) 226 break; 227 228 list_del(qe); 229 wqe = (struct bfa_reqq_wait_s *) qe; 230 wqe->qresume(wqe->cbarg); 231 } 232 } 233 234 static inline void 235 bfa_isr_rspq(struct bfa_s *bfa, int qid) 236 { 237 struct bfi_msg_s *m; 238 u32 pi, ci; 239 struct list_head *waitq; 240 241 ci = bfa_rspq_ci(bfa, qid); 242 pi = bfa_rspq_pi(bfa, qid); 243 244 while (ci != pi) { 245 m = bfa_rspq_elem(bfa, qid, ci); 246 WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX); 247 248 bfa_isrs[m->mhdr.msg_class] (bfa, m); 249 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems); 250 } 251 252 /* 253 * acknowledge RME completions and update CI 254 */ 255 bfa_isr_rspq_ack(bfa, qid, ci); 256 257 /* 258 * Resume any pending requests in the corresponding reqq. 259 */ 260 waitq = bfa_reqq(bfa, qid); 261 if (!list_empty(waitq)) 262 bfa_reqq_resume(bfa, qid); 263 } 264 265 static inline void 266 bfa_isr_reqq(struct bfa_s *bfa, int qid) 267 { 268 struct list_head *waitq; 269 270 bfa_isr_reqq_ack(bfa, qid); 271 272 /* 273 * Resume any pending requests in the corresponding reqq. 274 */ 275 waitq = bfa_reqq(bfa, qid); 276 if (!list_empty(waitq)) 277 bfa_reqq_resume(bfa, qid); 278 } 279 280 void 281 bfa_msix_all(struct bfa_s *bfa, int vec) 282 { 283 u32 intr, qintr; 284 int queue; 285 286 intr = readl(bfa->iocfc.bfa_regs.intr_status); 287 if (!intr) 288 return; 289 290 /* 291 * RME completion queue interrupt 292 */ 293 qintr = intr & __HFN_INT_RME_MASK; 294 if (qintr && bfa->queue_process) { 295 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) 296 bfa_isr_rspq(bfa, queue); 297 } 298 299 intr &= ~qintr; 300 if (!intr) 301 return; 302 303 /* 304 * CPE completion queue interrupt 305 */ 306 qintr = intr & __HFN_INT_CPE_MASK; 307 if (qintr && bfa->queue_process) { 308 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) 309 bfa_isr_reqq(bfa, queue); 310 } 311 intr &= ~qintr; 312 if (!intr) 313 return; 314 315 bfa_msix_lpu_err(bfa, intr); 316 } 317 318 bfa_boolean_t 319 bfa_intx(struct bfa_s *bfa) 320 { 321 u32 intr, qintr; 322 int queue; 323 324 intr = readl(bfa->iocfc.bfa_regs.intr_status); 325 326 qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK); 327 if (qintr) 328 writel(qintr, bfa->iocfc.bfa_regs.intr_status); 329 330 /* 331 * Unconditional RME completion queue interrupt 332 */ 333 if (bfa->queue_process) { 334 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) 335 bfa_isr_rspq(bfa, queue); 336 } 337 338 if (!intr) 339 return BFA_TRUE; 340 341 /* 342 * CPE completion queue interrupt 343 */ 344 qintr = intr & __HFN_INT_CPE_MASK; 345 if (qintr && bfa->queue_process) { 346 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) 347 bfa_isr_reqq(bfa, queue); 348 } 349 intr &= ~qintr; 350 if (!intr) 351 return BFA_TRUE; 352 353 bfa_msix_lpu_err(bfa, intr); 354 355 return BFA_TRUE; 356 } 357 358 void 359 bfa_isr_enable(struct bfa_s *bfa) 360 { 361 u32 umsk; 362 int pci_func = bfa_ioc_pcifn(&bfa->ioc); 363 364 bfa_trc(bfa, pci_func); 365 366 bfa_msix_ctrl_install(bfa); 367 368 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { 369 umsk = __HFN_INT_ERR_MASK_CT2; 370 umsk |= pci_func == 0 ? 371 __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2; 372 } else { 373 umsk = __HFN_INT_ERR_MASK; 374 umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK; 375 } 376 377 writel(umsk, bfa->iocfc.bfa_regs.intr_status); 378 writel(~umsk, bfa->iocfc.bfa_regs.intr_mask); 379 bfa->iocfc.intr_mask = ~umsk; 380 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0); 381 } 382 383 void 384 bfa_isr_disable(struct bfa_s *bfa) 385 { 386 bfa_isr_mode_set(bfa, BFA_FALSE); 387 writel(-1L, bfa->iocfc.bfa_regs.intr_mask); 388 bfa_msix_uninstall(bfa); 389 } 390 391 void 392 bfa_msix_reqq(struct bfa_s *bfa, int vec) 393 { 394 bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0); 395 } 396 397 void 398 bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m) 399 { 400 bfa_trc(bfa, m->mhdr.msg_class); 401 bfa_trc(bfa, m->mhdr.msg_id); 402 bfa_trc(bfa, m->mhdr.mtag.i2htok); 403 WARN_ON(1); 404 bfa_trc_stop(bfa->trcmod); 405 } 406 407 void 408 bfa_msix_rspq(struct bfa_s *bfa, int vec) 409 { 410 bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0); 411 } 412 413 void 414 bfa_msix_lpu_err(struct bfa_s *bfa, int vec) 415 { 416 u32 intr, curr_value; 417 bfa_boolean_t lpu_isr, halt_isr, pss_isr; 418 419 intr = readl(bfa->iocfc.bfa_regs.intr_status); 420 421 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { 422 halt_isr = intr & __HFN_INT_CPQ_HALT_CT2; 423 pss_isr = intr & __HFN_INT_ERR_PSS_CT2; 424 lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 | 425 __HFN_INT_MBOX_LPU1_CT2); 426 intr &= __HFN_INT_ERR_MASK_CT2; 427 } else { 428 halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ? 429 (intr & __HFN_INT_LL_HALT) : 0; 430 pss_isr = intr & __HFN_INT_ERR_PSS; 431 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1); 432 intr &= __HFN_INT_ERR_MASK; 433 } 434 435 if (lpu_isr) 436 bfa_ioc_mbox_isr(&bfa->ioc); 437 438 if (intr) { 439 if (halt_isr) { 440 /* 441 * If LL_HALT bit is set then FW Init Halt LL Port 442 * Register needs to be cleared as well so Interrupt 443 * Status Register will be cleared. 444 */ 445 curr_value = readl(bfa->ioc.ioc_regs.ll_halt); 446 curr_value &= ~__FW_INIT_HALT_P; 447 writel(curr_value, bfa->ioc.ioc_regs.ll_halt); 448 } 449 450 if (pss_isr) { 451 /* 452 * ERR_PSS bit needs to be cleared as well in case 453 * interrups are shared so driver's interrupt handler is 454 * still called even though it is already masked out. 455 */ 456 curr_value = readl( 457 bfa->ioc.ioc_regs.pss_err_status_reg); 458 writel(curr_value, 459 bfa->ioc.ioc_regs.pss_err_status_reg); 460 } 461 462 writel(intr, bfa->iocfc.bfa_regs.intr_status); 463 bfa_ioc_error_isr(&bfa->ioc); 464 } 465 } 466 467 /* 468 * BFA IOC FC related functions 469 */ 470 471 /* 472 * BFA IOC private functions 473 */ 474 475 /* 476 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ 477 */ 478 static void 479 bfa_iocfc_send_cfg(void *bfa_arg) 480 { 481 struct bfa_s *bfa = bfa_arg; 482 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 483 struct bfi_iocfc_cfg_req_s cfg_req; 484 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; 485 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg; 486 int i; 487 488 WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS); 489 bfa_trc(bfa, cfg->fwcfg.num_cqs); 490 491 bfa_iocfc_reset_queues(bfa); 492 493 /* 494 * initialize IOC configuration info 495 */ 496 cfg_info->single_msix_vec = 0; 497 if (bfa->msix.nvecs == 1) 498 cfg_info->single_msix_vec = 1; 499 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; 500 cfg_info->num_cqs = cfg->fwcfg.num_cqs; 501 cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs); 502 cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs); 503 504 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); 505 /* 506 * dma map REQ and RSP circular queues and shadow pointers 507 */ 508 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 509 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i], 510 iocfc->req_cq_ba[i].pa); 511 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i], 512 iocfc->req_cq_shadow_ci[i].pa); 513 cfg_info->req_cq_elems[i] = 514 cpu_to_be16(cfg->drvcfg.num_reqq_elems); 515 516 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i], 517 iocfc->rsp_cq_ba[i].pa); 518 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i], 519 iocfc->rsp_cq_shadow_pi[i].pa); 520 cfg_info->rsp_cq_elems[i] = 521 cpu_to_be16(cfg->drvcfg.num_rspq_elems); 522 } 523 524 /* 525 * Enable interrupt coalescing if it is driver init path 526 * and not ioc disable/enable path. 527 */ 528 if (!iocfc->cfgdone) 529 cfg_info->intr_attr.coalesce = BFA_TRUE; 530 531 iocfc->cfgdone = BFA_FALSE; 532 533 /* 534 * dma map IOC configuration itself 535 */ 536 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, 537 bfa_fn_lpu(bfa)); 538 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa); 539 540 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req, 541 sizeof(struct bfi_iocfc_cfg_req_s)); 542 } 543 544 static void 545 bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 546 struct bfa_pcidev_s *pcidev) 547 { 548 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 549 550 bfa->bfad = bfad; 551 iocfc->bfa = bfa; 552 iocfc->action = BFA_IOCFC_ACT_NONE; 553 554 iocfc->cfg = *cfg; 555 556 /* 557 * Initialize chip specific handlers. 558 */ 559 if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) { 560 iocfc->hwif.hw_reginit = bfa_hwct_reginit; 561 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack; 562 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; 563 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; 564 iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install; 565 iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install; 566 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall; 567 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set; 568 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; 569 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range; 570 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT; 571 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT; 572 } else { 573 iocfc->hwif.hw_reginit = bfa_hwcb_reginit; 574 iocfc->hwif.hw_reqq_ack = NULL; 575 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; 576 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; 577 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install; 578 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install; 579 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall; 580 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set; 581 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs; 582 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range; 583 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB + 584 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS; 585 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB + 586 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS; 587 } 588 589 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) { 590 iocfc->hwif.hw_reginit = bfa_hwct2_reginit; 591 iocfc->hwif.hw_isr_mode_set = NULL; 592 iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack; 593 } 594 595 iocfc->hwif.hw_reginit(bfa); 596 bfa->msix.nvecs = 0; 597 } 598 599 static void 600 bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg) 601 { 602 u8 *dm_kva = NULL; 603 u64 dm_pa = 0; 604 int i, per_reqq_sz, per_rspq_sz, dbgsz; 605 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 606 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa); 607 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa); 608 struct bfa_mem_dma_s *reqq_dma, *rspq_dma; 609 610 /* First allocate dma memory for IOC */ 611 bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma), 612 bfa_mem_dma_phys(ioc_dma)); 613 614 /* Claim DMA-able memory for the request/response queues */ 615 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), 616 BFA_DMA_ALIGN_SZ); 617 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), 618 BFA_DMA_ALIGN_SZ); 619 620 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 621 reqq_dma = BFA_MEM_REQQ_DMA(bfa, i); 622 iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma); 623 iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma); 624 memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz); 625 626 rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i); 627 iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma); 628 iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma); 629 memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz); 630 } 631 632 /* Claim IOCFC dma memory - for shadow CI/PI */ 633 dm_kva = bfa_mem_dma_virt(iocfc_dma); 634 dm_pa = bfa_mem_dma_phys(iocfc_dma); 635 636 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 637 iocfc->req_cq_shadow_ci[i].kva = dm_kva; 638 iocfc->req_cq_shadow_ci[i].pa = dm_pa; 639 dm_kva += BFA_CACHELINE_SZ; 640 dm_pa += BFA_CACHELINE_SZ; 641 642 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva; 643 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa; 644 dm_kva += BFA_CACHELINE_SZ; 645 dm_pa += BFA_CACHELINE_SZ; 646 } 647 648 /* Claim IOCFC dma memory - for the config info page */ 649 bfa->iocfc.cfg_info.kva = dm_kva; 650 bfa->iocfc.cfg_info.pa = dm_pa; 651 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva; 652 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); 653 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); 654 655 /* Claim IOCFC dma memory - for the config response */ 656 bfa->iocfc.cfgrsp_dma.kva = dm_kva; 657 bfa->iocfc.cfgrsp_dma.pa = dm_pa; 658 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva; 659 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), 660 BFA_CACHELINE_SZ); 661 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), 662 BFA_CACHELINE_SZ); 663 664 /* Claim IOCFC kva memory */ 665 dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0; 666 if (dbgsz > 0) { 667 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc)); 668 bfa_mem_kva_curp(iocfc) += dbgsz; 669 } 670 } 671 672 /* 673 * Start BFA submodules. 674 */ 675 static void 676 bfa_iocfc_start_submod(struct bfa_s *bfa) 677 { 678 int i; 679 680 bfa->queue_process = BFA_TRUE; 681 for (i = 0; i < BFI_IOC_MAX_CQS; i++) 682 bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i)); 683 684 for (i = 0; hal_mods[i]; i++) 685 hal_mods[i]->start(bfa); 686 } 687 688 /* 689 * Disable BFA submodules. 690 */ 691 static void 692 bfa_iocfc_disable_submod(struct bfa_s *bfa) 693 { 694 int i; 695 696 for (i = 0; hal_mods[i]; i++) 697 hal_mods[i]->iocdisable(bfa); 698 } 699 700 static void 701 bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete) 702 { 703 struct bfa_s *bfa = bfa_arg; 704 705 if (complete) { 706 if (bfa->iocfc.cfgdone && BFA_DCONF_MOD(bfa)->flashdone) 707 bfa_cb_init(bfa->bfad, BFA_STATUS_OK); 708 else 709 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED); 710 } else { 711 if (bfa->iocfc.cfgdone) 712 bfa->iocfc.action = BFA_IOCFC_ACT_NONE; 713 } 714 } 715 716 static void 717 bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl) 718 { 719 struct bfa_s *bfa = bfa_arg; 720 struct bfad_s *bfad = bfa->bfad; 721 722 if (compl) 723 complete(&bfad->comp); 724 else 725 bfa->iocfc.action = BFA_IOCFC_ACT_NONE; 726 } 727 728 static void 729 bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl) 730 { 731 struct bfa_s *bfa = bfa_arg; 732 struct bfad_s *bfad = bfa->bfad; 733 734 if (compl) 735 complete(&bfad->enable_comp); 736 } 737 738 static void 739 bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl) 740 { 741 struct bfa_s *bfa = bfa_arg; 742 struct bfad_s *bfad = bfa->bfad; 743 744 if (compl) 745 complete(&bfad->disable_comp); 746 } 747 748 /** 749 * configure queue registers from firmware response 750 */ 751 static void 752 bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg) 753 { 754 int i; 755 struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs; 756 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); 757 758 for (i = 0; i < BFI_IOC_MAX_CQS; i++) { 759 bfa->iocfc.hw_qid[i] = qreg->hw_qid[i]; 760 r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]); 761 r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]); 762 r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]); 763 r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]); 764 r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]); 765 r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]); 766 } 767 } 768 769 static void 770 bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg) 771 { 772 bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs); 773 bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs); 774 bfa_rport_res_recfg(bfa, fwcfg->num_rports); 775 bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs); 776 bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs); 777 } 778 779 /* 780 * Update BFA configuration from firmware configuration. 781 */ 782 static void 783 bfa_iocfc_cfgrsp(struct bfa_s *bfa) 784 { 785 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 786 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 787 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg; 788 789 fwcfg->num_cqs = fwcfg->num_cqs; 790 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs); 791 fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs); 792 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs); 793 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs); 794 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs); 795 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports); 796 797 iocfc->cfgdone = BFA_TRUE; 798 799 /* 800 * configure queue register offsets as learnt from firmware 801 */ 802 bfa_iocfc_qreg(bfa, &cfgrsp->qreg); 803 804 /* 805 * Re-configure resources as learnt from Firmware 806 */ 807 bfa_iocfc_res_recfg(bfa, fwcfg); 808 809 /* 810 * Install MSIX queue handlers 811 */ 812 bfa_msix_queue_install(bfa); 813 814 /* 815 * Configuration is complete - initialize/start submodules 816 */ 817 bfa_fcport_init(bfa); 818 819 if (iocfc->action == BFA_IOCFC_ACT_INIT) { 820 if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE) 821 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, 822 bfa_iocfc_init_cb, bfa); 823 } else { 824 if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE) 825 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe, 826 bfa_iocfc_enable_cb, bfa); 827 bfa_iocfc_start_submod(bfa); 828 } 829 } 830 void 831 bfa_iocfc_reset_queues(struct bfa_s *bfa) 832 { 833 int q; 834 835 for (q = 0; q < BFI_IOC_MAX_CQS; q++) { 836 bfa_reqq_ci(bfa, q) = 0; 837 bfa_reqq_pi(bfa, q) = 0; 838 bfa_rspq_ci(bfa, q) = 0; 839 bfa_rspq_pi(bfa, q) = 0; 840 } 841 } 842 843 /* Fabric Assigned Address specific functions */ 844 845 /* 846 * Check whether IOC is ready before sending command down 847 */ 848 static bfa_status_t 849 bfa_faa_validate_request(struct bfa_s *bfa) 850 { 851 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa); 852 u32 card_type = bfa->ioc.attr->card_type; 853 854 if (bfa_ioc_is_operational(&bfa->ioc)) { 855 if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type)) 856 return BFA_STATUS_FEATURE_NOT_SUPPORTED; 857 } else { 858 if (!bfa_ioc_is_acq_addr(&bfa->ioc)) 859 return BFA_STATUS_IOC_NON_OP; 860 } 861 862 return BFA_STATUS_OK; 863 } 864 865 bfa_status_t 866 bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg) 867 { 868 struct bfi_faa_en_dis_s faa_enable_req; 869 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 870 bfa_status_t status; 871 872 iocfc->faa_args.faa_cb.faa_cbfn = cbfn; 873 iocfc->faa_args.faa_cb.faa_cbarg = cbarg; 874 875 status = bfa_faa_validate_request(bfa); 876 if (status != BFA_STATUS_OK) 877 return status; 878 879 if (iocfc->faa_args.busy == BFA_TRUE) 880 return BFA_STATUS_DEVBUSY; 881 882 if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED) 883 return BFA_STATUS_FAA_ENABLED; 884 885 if (bfa_fcport_is_trunk_enabled(bfa)) 886 return BFA_STATUS_ERROR_TRUNK_ENABLED; 887 888 bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED); 889 iocfc->faa_args.busy = BFA_TRUE; 890 891 memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s)); 892 bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC, 893 BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_fn_lpu(bfa)); 894 895 bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req, 896 sizeof(struct bfi_faa_en_dis_s)); 897 898 return BFA_STATUS_OK; 899 } 900 901 bfa_status_t 902 bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, 903 void *cbarg) 904 { 905 struct bfi_faa_en_dis_s faa_disable_req; 906 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 907 bfa_status_t status; 908 909 iocfc->faa_args.faa_cb.faa_cbfn = cbfn; 910 iocfc->faa_args.faa_cb.faa_cbarg = cbarg; 911 912 status = bfa_faa_validate_request(bfa); 913 if (status != BFA_STATUS_OK) 914 return status; 915 916 if (iocfc->faa_args.busy == BFA_TRUE) 917 return BFA_STATUS_DEVBUSY; 918 919 if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED) 920 return BFA_STATUS_FAA_DISABLED; 921 922 bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED); 923 iocfc->faa_args.busy = BFA_TRUE; 924 925 memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s)); 926 bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC, 927 BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_fn_lpu(bfa)); 928 929 bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req, 930 sizeof(struct bfi_faa_en_dis_s)); 931 932 return BFA_STATUS_OK; 933 } 934 935 bfa_status_t 936 bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, 937 bfa_cb_iocfc_t cbfn, void *cbarg) 938 { 939 struct bfi_faa_query_s faa_attr_req; 940 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 941 bfa_status_t status; 942 943 iocfc->faa_args.faa_attr = attr; 944 iocfc->faa_args.faa_cb.faa_cbfn = cbfn; 945 iocfc->faa_args.faa_cb.faa_cbarg = cbarg; 946 947 status = bfa_faa_validate_request(bfa); 948 if (status != BFA_STATUS_OK) 949 return status; 950 951 if (iocfc->faa_args.busy == BFA_TRUE) 952 return BFA_STATUS_DEVBUSY; 953 954 iocfc->faa_args.busy = BFA_TRUE; 955 memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s)); 956 bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC, 957 BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa)); 958 959 bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req, 960 sizeof(struct bfi_faa_query_s)); 961 962 return BFA_STATUS_OK; 963 } 964 965 /* 966 * FAA enable response 967 */ 968 static void 969 bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc, 970 struct bfi_faa_en_dis_rsp_s *rsp) 971 { 972 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg; 973 bfa_status_t status = rsp->status; 974 975 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn); 976 977 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status); 978 iocfc->faa_args.busy = BFA_FALSE; 979 } 980 981 /* 982 * FAA disable response 983 */ 984 static void 985 bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc, 986 struct bfi_faa_en_dis_rsp_s *rsp) 987 { 988 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg; 989 bfa_status_t status = rsp->status; 990 991 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn); 992 993 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status); 994 iocfc->faa_args.busy = BFA_FALSE; 995 } 996 997 /* 998 * FAA query response 999 */ 1000 static void 1001 bfa_faa_query_reply(struct bfa_iocfc_s *iocfc, 1002 bfi_faa_query_rsp_t *rsp) 1003 { 1004 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg; 1005 1006 if (iocfc->faa_args.faa_attr) { 1007 iocfc->faa_args.faa_attr->faa = rsp->faa; 1008 iocfc->faa_args.faa_attr->faa_state = rsp->faa_status; 1009 iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source; 1010 } 1011 1012 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn); 1013 1014 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK); 1015 iocfc->faa_args.busy = BFA_FALSE; 1016 } 1017 1018 /* 1019 * IOC enable request is complete 1020 */ 1021 static void 1022 bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status) 1023 { 1024 struct bfa_s *bfa = bfa_arg; 1025 1026 if (status == BFA_STATUS_FAA_ACQ_ADDR) { 1027 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, 1028 bfa_iocfc_init_cb, bfa); 1029 return; 1030 } 1031 1032 if (status != BFA_STATUS_OK) { 1033 bfa_isr_disable(bfa); 1034 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) 1035 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, 1036 bfa_iocfc_init_cb, bfa); 1037 else if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE) 1038 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe, 1039 bfa_iocfc_enable_cb, bfa); 1040 return; 1041 } 1042 1043 bfa_iocfc_send_cfg(bfa); 1044 bfa_dconf_modinit(bfa); 1045 } 1046 1047 /* 1048 * IOC disable request is complete 1049 */ 1050 static void 1051 bfa_iocfc_disable_cbfn(void *bfa_arg) 1052 { 1053 struct bfa_s *bfa = bfa_arg; 1054 1055 bfa_isr_disable(bfa); 1056 bfa_iocfc_disable_submod(bfa); 1057 1058 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP) 1059 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb, 1060 bfa); 1061 else { 1062 WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE); 1063 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb, 1064 bfa); 1065 } 1066 } 1067 1068 /* 1069 * Notify sub-modules of hardware failure. 1070 */ 1071 static void 1072 bfa_iocfc_hbfail_cbfn(void *bfa_arg) 1073 { 1074 struct bfa_s *bfa = bfa_arg; 1075 1076 bfa->queue_process = BFA_FALSE; 1077 1078 bfa_isr_disable(bfa); 1079 bfa_iocfc_disable_submod(bfa); 1080 1081 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) 1082 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb, 1083 bfa); 1084 } 1085 1086 /* 1087 * Actions on chip-reset completion. 1088 */ 1089 static void 1090 bfa_iocfc_reset_cbfn(void *bfa_arg) 1091 { 1092 struct bfa_s *bfa = bfa_arg; 1093 1094 bfa_iocfc_reset_queues(bfa); 1095 bfa_isr_enable(bfa); 1096 } 1097 1098 1099 /* 1100 * Query IOC memory requirement information. 1101 */ 1102 void 1103 bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, 1104 struct bfa_s *bfa) 1105 { 1106 int q, per_reqq_sz, per_rspq_sz; 1107 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa); 1108 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa); 1109 struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa); 1110 u32 dm_len = 0; 1111 1112 /* dma memory setup for IOC */ 1113 bfa_mem_dma_setup(meminfo, ioc_dma, 1114 BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ)); 1115 1116 /* dma memory setup for REQ/RSP queues */ 1117 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), 1118 BFA_DMA_ALIGN_SZ); 1119 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), 1120 BFA_DMA_ALIGN_SZ); 1121 1122 for (q = 0; q < cfg->fwcfg.num_cqs; q++) { 1123 bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q), 1124 per_reqq_sz); 1125 bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q), 1126 per_rspq_sz); 1127 } 1128 1129 /* IOCFC dma memory - calculate Shadow CI/PI size */ 1130 for (q = 0; q < cfg->fwcfg.num_cqs; q++) 1131 dm_len += (2 * BFA_CACHELINE_SZ); 1132 1133 /* IOCFC dma memory - calculate config info / rsp size */ 1134 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); 1135 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), 1136 BFA_CACHELINE_SZ); 1137 1138 /* dma memory setup for IOCFC */ 1139 bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len); 1140 1141 /* kva memory setup for IOCFC */ 1142 bfa_mem_kva_setup(meminfo, iocfc_kva, 1143 ((bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0)); 1144 } 1145 1146 /* 1147 * Query IOC memory requirement information. 1148 */ 1149 void 1150 bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 1151 struct bfa_pcidev_s *pcidev) 1152 { 1153 int i; 1154 struct bfa_ioc_s *ioc = &bfa->ioc; 1155 1156 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn; 1157 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn; 1158 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn; 1159 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn; 1160 1161 ioc->trcmod = bfa->trcmod; 1162 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod); 1163 1164 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC); 1165 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs); 1166 1167 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); 1168 bfa_iocfc_mem_claim(bfa, cfg); 1169 INIT_LIST_HEAD(&bfa->timer_mod.timer_q); 1170 1171 INIT_LIST_HEAD(&bfa->comp_q); 1172 for (i = 0; i < BFI_IOC_MAX_CQS; i++) 1173 INIT_LIST_HEAD(&bfa->reqq_waitq[i]); 1174 } 1175 1176 /* 1177 * Query IOC memory requirement information. 1178 */ 1179 void 1180 bfa_iocfc_init(struct bfa_s *bfa) 1181 { 1182 bfa->iocfc.action = BFA_IOCFC_ACT_INIT; 1183 bfa_ioc_enable(&bfa->ioc); 1184 } 1185 1186 /* 1187 * IOC start called from bfa_start(). Called to start IOC operations 1188 * at driver instantiation for this instance. 1189 */ 1190 void 1191 bfa_iocfc_start(struct bfa_s *bfa) 1192 { 1193 if (bfa->iocfc.cfgdone) 1194 bfa_iocfc_start_submod(bfa); 1195 } 1196 1197 /* 1198 * IOC stop called from bfa_stop(). Called only when driver is unloaded 1199 * for this instance. 1200 */ 1201 void 1202 bfa_iocfc_stop(struct bfa_s *bfa) 1203 { 1204 bfa->iocfc.action = BFA_IOCFC_ACT_STOP; 1205 1206 bfa->queue_process = BFA_FALSE; 1207 bfa_dconf_modexit(bfa); 1208 if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE) 1209 bfa_ioc_disable(&bfa->ioc); 1210 } 1211 1212 void 1213 bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m) 1214 { 1215 struct bfa_s *bfa = bfaarg; 1216 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1217 union bfi_iocfc_i2h_msg_u *msg; 1218 1219 msg = (union bfi_iocfc_i2h_msg_u *) m; 1220 bfa_trc(bfa, msg->mh.msg_id); 1221 1222 switch (msg->mh.msg_id) { 1223 case BFI_IOCFC_I2H_CFG_REPLY: 1224 bfa_iocfc_cfgrsp(bfa); 1225 break; 1226 case BFI_IOCFC_I2H_UPDATEQ_RSP: 1227 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); 1228 break; 1229 case BFI_IOCFC_I2H_FAA_ENABLE_RSP: 1230 bfa_faa_enable_reply(iocfc, 1231 (struct bfi_faa_en_dis_rsp_s *)msg); 1232 break; 1233 case BFI_IOCFC_I2H_FAA_DISABLE_RSP: 1234 bfa_faa_disable_reply(iocfc, 1235 (struct bfi_faa_en_dis_rsp_s *)msg); 1236 break; 1237 case BFI_IOCFC_I2H_FAA_QUERY_RSP: 1238 bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg); 1239 break; 1240 default: 1241 WARN_ON(1); 1242 } 1243 } 1244 1245 void 1246 bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr) 1247 { 1248 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1249 1250 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce; 1251 1252 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ? 1253 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) : 1254 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay); 1255 1256 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ? 1257 be16_to_cpu(iocfc->cfginfo->intr_attr.latency) : 1258 be16_to_cpu(iocfc->cfgrsp->intr_attr.latency); 1259 1260 attr->config = iocfc->cfg; 1261 } 1262 1263 bfa_status_t 1264 bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr) 1265 { 1266 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1267 struct bfi_iocfc_set_intr_req_s *m; 1268 1269 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce; 1270 iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay); 1271 iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency); 1272 1273 if (!bfa_iocfc_is_operational(bfa)) 1274 return BFA_STATUS_OK; 1275 1276 m = bfa_reqq_next(bfa, BFA_REQQ_IOC); 1277 if (!m) 1278 return BFA_STATUS_DEVBUSY; 1279 1280 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ, 1281 bfa_fn_lpu(bfa)); 1282 m->coalesce = iocfc->cfginfo->intr_attr.coalesce; 1283 m->delay = iocfc->cfginfo->intr_attr.delay; 1284 m->latency = iocfc->cfginfo->intr_attr.latency; 1285 1286 bfa_trc(bfa, attr->delay); 1287 bfa_trc(bfa, attr->latency); 1288 1289 bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh); 1290 return BFA_STATUS_OK; 1291 } 1292 1293 void 1294 bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa) 1295 { 1296 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1297 1298 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); 1299 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa); 1300 } 1301 /* 1302 * Enable IOC after it is disabled. 1303 */ 1304 void 1305 bfa_iocfc_enable(struct bfa_s *bfa) 1306 { 1307 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, 1308 "IOC Enable"); 1309 bfa->iocfc.action = BFA_IOCFC_ACT_ENABLE; 1310 bfa_ioc_enable(&bfa->ioc); 1311 } 1312 1313 void 1314 bfa_iocfc_disable(struct bfa_s *bfa) 1315 { 1316 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, 1317 "IOC Disable"); 1318 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE; 1319 1320 bfa->queue_process = BFA_FALSE; 1321 bfa_ioc_disable(&bfa->ioc); 1322 } 1323 1324 1325 bfa_boolean_t 1326 bfa_iocfc_is_operational(struct bfa_s *bfa) 1327 { 1328 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone; 1329 } 1330 1331 /* 1332 * Return boot target port wwns -- read from boot information in flash. 1333 */ 1334 void 1335 bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns) 1336 { 1337 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1338 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 1339 int i; 1340 1341 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) { 1342 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns); 1343 *nwwns = cfgrsp->pbc_cfg.nbluns; 1344 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++) 1345 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn; 1346 1347 return; 1348 } 1349 1350 *nwwns = cfgrsp->bootwwns.nwwns; 1351 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn)); 1352 } 1353 1354 int 1355 bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport) 1356 { 1357 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1358 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 1359 1360 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport)); 1361 return cfgrsp->pbc_cfg.nvports; 1362 } 1363 1364 1365 /* 1366 * Use this function query the memory requirement of the BFA library. 1367 * This function needs to be called before bfa_attach() to get the 1368 * memory required of the BFA layer for a given driver configuration. 1369 * 1370 * This call will fail, if the cap is out of range compared to pre-defined 1371 * values within the BFA library 1372 * 1373 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate 1374 * its configuration in this structure. 1375 * The default values for struct bfa_iocfc_cfg_s can be 1376 * fetched using bfa_cfg_get_default() API. 1377 * 1378 * If cap's boundary check fails, the library will use 1379 * the default bfa_cap_t values (and log a warning msg). 1380 * 1381 * @param[out] meminfo - pointer to bfa_meminfo_t. This content 1382 * indicates the memory type (see bfa_mem_type_t) and 1383 * amount of memory required. 1384 * 1385 * Driver should allocate the memory, populate the 1386 * starting address for each block and provide the same 1387 * structure as input parameter to bfa_attach() call. 1388 * 1389 * @param[in] bfa - pointer to the bfa structure, used while fetching the 1390 * dma, kva memory information of the bfa sub-modules. 1391 * 1392 * @return void 1393 * 1394 * Special Considerations: @note 1395 */ 1396 void 1397 bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, 1398 struct bfa_s *bfa) 1399 { 1400 int i; 1401 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa); 1402 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa); 1403 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa); 1404 struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa); 1405 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa); 1406 struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa); 1407 struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa); 1408 1409 WARN_ON((cfg == NULL) || (meminfo == NULL)); 1410 1411 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s)); 1412 1413 /* Initialize the DMA & KVA meminfo queues */ 1414 INIT_LIST_HEAD(&meminfo->dma_info.qe); 1415 INIT_LIST_HEAD(&meminfo->kva_info.qe); 1416 1417 bfa_iocfc_meminfo(cfg, meminfo, bfa); 1418 1419 for (i = 0; hal_mods[i]; i++) 1420 hal_mods[i]->meminfo(cfg, meminfo, bfa); 1421 1422 /* dma info setup */ 1423 bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo()); 1424 bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo()); 1425 bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo()); 1426 bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo()); 1427 bfa_mem_dma_setup(meminfo, flash_dma, 1428 bfa_flash_meminfo(cfg->drvcfg.min_cfg)); 1429 bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo()); 1430 bfa_mem_dma_setup(meminfo, phy_dma, 1431 bfa_phy_meminfo(cfg->drvcfg.min_cfg)); 1432 } 1433 1434 /* 1435 * Use this function to do attach the driver instance with the BFA 1436 * library. This function will not trigger any HW initialization 1437 * process (which will be done in bfa_init() call) 1438 * 1439 * This call will fail, if the cap is out of range compared to 1440 * pre-defined values within the BFA library 1441 * 1442 * @param[out] bfa Pointer to bfa_t. 1443 * @param[in] bfad Opaque handle back to the driver's IOC structure 1444 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure 1445 * that was used in bfa_cfg_get_meminfo(). 1446 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should 1447 * use the bfa_cfg_get_meminfo() call to 1448 * find the memory blocks required, allocate the 1449 * required memory and provide the starting addresses. 1450 * @param[in] pcidev pointer to struct bfa_pcidev_s 1451 * 1452 * @return 1453 * void 1454 * 1455 * Special Considerations: 1456 * 1457 * @note 1458 * 1459 */ 1460 void 1461 bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 1462 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 1463 { 1464 int i; 1465 struct bfa_mem_dma_s *dma_info, *dma_elem; 1466 struct bfa_mem_kva_s *kva_info, *kva_elem; 1467 struct list_head *dm_qe, *km_qe; 1468 1469 bfa->fcs = BFA_FALSE; 1470 1471 WARN_ON((cfg == NULL) || (meminfo == NULL)); 1472 1473 /* Initialize memory pointers for iterative allocation */ 1474 dma_info = &meminfo->dma_info; 1475 dma_info->kva_curp = dma_info->kva; 1476 dma_info->dma_curp = dma_info->dma; 1477 1478 kva_info = &meminfo->kva_info; 1479 kva_info->kva_curp = kva_info->kva; 1480 1481 list_for_each(dm_qe, &dma_info->qe) { 1482 dma_elem = (struct bfa_mem_dma_s *) dm_qe; 1483 dma_elem->kva_curp = dma_elem->kva; 1484 dma_elem->dma_curp = dma_elem->dma; 1485 } 1486 1487 list_for_each(km_qe, &kva_info->qe) { 1488 kva_elem = (struct bfa_mem_kva_s *) km_qe; 1489 kva_elem->kva_curp = kva_elem->kva; 1490 } 1491 1492 bfa_iocfc_attach(bfa, bfad, cfg, pcidev); 1493 1494 for (i = 0; hal_mods[i]; i++) 1495 hal_mods[i]->attach(bfa, bfad, cfg, pcidev); 1496 1497 bfa_com_port_attach(bfa); 1498 bfa_com_ablk_attach(bfa); 1499 bfa_com_cee_attach(bfa); 1500 bfa_com_sfp_attach(bfa); 1501 bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg); 1502 bfa_com_diag_attach(bfa); 1503 bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg); 1504 } 1505 1506 /* 1507 * Use this function to delete a BFA IOC. IOC should be stopped (by 1508 * calling bfa_stop()) before this function call. 1509 * 1510 * @param[in] bfa - pointer to bfa_t. 1511 * 1512 * @return 1513 * void 1514 * 1515 * Special Considerations: 1516 * 1517 * @note 1518 */ 1519 void 1520 bfa_detach(struct bfa_s *bfa) 1521 { 1522 int i; 1523 1524 for (i = 0; hal_mods[i]; i++) 1525 hal_mods[i]->detach(bfa); 1526 bfa_ioc_detach(&bfa->ioc); 1527 } 1528 1529 void 1530 bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q) 1531 { 1532 INIT_LIST_HEAD(comp_q); 1533 list_splice_tail_init(&bfa->comp_q, comp_q); 1534 } 1535 1536 void 1537 bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q) 1538 { 1539 struct list_head *qe; 1540 struct list_head *qen; 1541 struct bfa_cb_qe_s *hcb_qe; 1542 bfa_cb_cbfn_status_t cbfn; 1543 1544 list_for_each_safe(qe, qen, comp_q) { 1545 hcb_qe = (struct bfa_cb_qe_s *) qe; 1546 if (hcb_qe->pre_rmv) { 1547 /* qe is invalid after return, dequeue before cbfn() */ 1548 list_del(qe); 1549 cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn); 1550 cbfn(hcb_qe->cbarg, hcb_qe->fw_status); 1551 } else 1552 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE); 1553 } 1554 } 1555 1556 void 1557 bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) 1558 { 1559 struct list_head *qe; 1560 struct bfa_cb_qe_s *hcb_qe; 1561 1562 while (!list_empty(comp_q)) { 1563 bfa_q_deq(comp_q, &qe); 1564 hcb_qe = (struct bfa_cb_qe_s *) qe; 1565 WARN_ON(hcb_qe->pre_rmv); 1566 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE); 1567 } 1568 } 1569 1570 void 1571 bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status) 1572 { 1573 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) { 1574 if (bfa->iocfc.cfgdone == BFA_TRUE) 1575 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, 1576 bfa_iocfc_init_cb, bfa); 1577 } 1578 } 1579 1580 /* 1581 * Return the list of PCI vendor/device id lists supported by this 1582 * BFA instance. 1583 */ 1584 void 1585 bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids) 1586 { 1587 static struct bfa_pciid_s __pciids[] = { 1588 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P}, 1589 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P}, 1590 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT}, 1591 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC}, 1592 }; 1593 1594 *npciids = sizeof(__pciids) / sizeof(__pciids[0]); 1595 *pciids = __pciids; 1596 } 1597 1598 /* 1599 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled 1600 * into BFA layer). The OS driver can then turn back and overwrite entries that 1601 * have been configured by the user. 1602 * 1603 * @param[in] cfg - pointer to bfa_ioc_cfg_t 1604 * 1605 * @return 1606 * void 1607 * 1608 * Special Considerations: 1609 * note 1610 */ 1611 void 1612 bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg) 1613 { 1614 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS; 1615 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS; 1616 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS; 1617 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS; 1618 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS; 1619 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS; 1620 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS; 1621 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS; 1622 cfg->fwcfg.num_fwtio_reqs = 0; 1623 1624 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS; 1625 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS; 1626 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS; 1627 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS; 1628 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS; 1629 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF; 1630 cfg->drvcfg.ioc_recover = BFA_FALSE; 1631 cfg->drvcfg.delay_comp = BFA_FALSE; 1632 1633 } 1634 1635 void 1636 bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg) 1637 { 1638 bfa_cfg_get_default(cfg); 1639 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN; 1640 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN; 1641 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN; 1642 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN; 1643 cfg->fwcfg.num_rports = BFA_RPORT_MIN; 1644 cfg->fwcfg.num_fwtio_reqs = 0; 1645 1646 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; 1647 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN; 1648 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN; 1649 cfg->drvcfg.min_cfg = BFA_TRUE; 1650 } 1651