1 /* 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. 3 * All rights reserved 4 * www.brocade.com 5 * 6 * Linux driver for Brocade Fibre Channel Host Bus Adapter. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License (GPL) Version 2 as 10 * published by the Free Software Foundation 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 */ 17 18 #include "bfa_modules.h" 19 #include "bfi_ctreg.h" 20 #include "bfad_drv.h" 21 22 BFA_TRC_FILE(HAL, CORE); 23 24 /** 25 * BFA IOC FC related definitions 26 */ 27 28 /** 29 * IOC local definitions 30 */ 31 #define BFA_IOCFC_TOV 5000 /* msecs */ 32 33 enum { 34 BFA_IOCFC_ACT_NONE = 0, 35 BFA_IOCFC_ACT_INIT = 1, 36 BFA_IOCFC_ACT_STOP = 2, 37 BFA_IOCFC_ACT_DISABLE = 3, 38 }; 39 40 #define DEF_CFG_NUM_FABRICS 1 41 #define DEF_CFG_NUM_LPORTS 256 42 #define DEF_CFG_NUM_CQS 4 43 #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX) 44 #define DEF_CFG_NUM_TSKIM_REQS 128 45 #define DEF_CFG_NUM_FCXP_REQS 64 46 #define DEF_CFG_NUM_UF_BUFS 64 47 #define DEF_CFG_NUM_RPORTS 1024 48 #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS) 49 #define DEF_CFG_NUM_TINS 256 50 51 #define DEF_CFG_NUM_SGPGS 2048 52 #define DEF_CFG_NUM_REQQ_ELEMS 256 53 #define DEF_CFG_NUM_RSPQ_ELEMS 64 54 #define DEF_CFG_NUM_SBOOT_TGTS 16 55 #define DEF_CFG_NUM_SBOOT_LUNS 16 56 57 /** 58 * forward declaration for IOC FC functions 59 */ 60 static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); 61 static void bfa_iocfc_disable_cbfn(void *bfa_arg); 62 static void bfa_iocfc_hbfail_cbfn(void *bfa_arg); 63 static void bfa_iocfc_reset_cbfn(void *bfa_arg); 64 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; 65 66 /** 67 * BFA Interrupt handling functions 68 */ 69 static void 70 bfa_msix_errint(struct bfa_s *bfa, u32 intr) 71 { 72 bfa_ioc_error_isr(&bfa->ioc); 73 } 74 75 static void 76 bfa_msix_lpu(struct bfa_s *bfa) 77 { 78 bfa_ioc_mbox_isr(&bfa->ioc); 79 } 80 81 static void 82 bfa_reqq_resume(struct bfa_s *bfa, int qid) 83 { 84 struct list_head *waitq, *qe, *qen; 85 struct bfa_reqq_wait_s *wqe; 86 87 waitq = bfa_reqq(bfa, qid); 88 list_for_each_safe(qe, qen, waitq) { 89 /** 90 * Callback only as long as there is room in request queue 91 */ 92 if (bfa_reqq_full(bfa, qid)) 93 break; 94 95 list_del(qe); 96 wqe = (struct bfa_reqq_wait_s *) qe; 97 wqe->qresume(wqe->cbarg); 98 } 99 } 100 101 void 102 bfa_msix_all(struct bfa_s *bfa, int vec) 103 { 104 bfa_intx(bfa); 105 } 106 107 /** 108 * hal_intr_api 109 */ 110 bfa_boolean_t 111 bfa_intx(struct bfa_s *bfa) 112 { 113 u32 intr, qintr; 114 int queue; 115 116 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status); 117 if (!intr) 118 return BFA_FALSE; 119 120 /** 121 * RME completion queue interrupt 122 */ 123 qintr = intr & __HFN_INT_RME_MASK; 124 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr); 125 126 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { 127 if (intr & (__HFN_INT_RME_Q0 << queue)) 128 bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); 129 } 130 intr &= ~qintr; 131 if (!intr) 132 return BFA_TRUE; 133 134 /** 135 * CPE completion queue interrupt 136 */ 137 qintr = intr & __HFN_INT_CPE_MASK; 138 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr); 139 140 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { 141 if (intr & (__HFN_INT_CPE_Q0 << queue)) 142 bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); 143 } 144 intr &= ~qintr; 145 if (!intr) 146 return BFA_TRUE; 147 148 bfa_msix_lpu_err(bfa, intr); 149 150 return BFA_TRUE; 151 } 152 153 void 154 bfa_intx_enable(struct bfa_s *bfa) 155 { 156 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, bfa->iocfc.intr_mask); 157 } 158 159 void 160 bfa_intx_disable(struct bfa_s *bfa) 161 { 162 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L); 163 } 164 165 void 166 bfa_isr_enable(struct bfa_s *bfa) 167 { 168 u32 intr_unmask; 169 int pci_func = bfa_ioc_pcifn(&bfa->ioc); 170 171 bfa_trc(bfa, pci_func); 172 173 bfa_msix_install(bfa); 174 intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | 175 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | 176 __HFN_INT_LL_HALT); 177 178 if (pci_func == 0) 179 intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | 180 __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 | 181 __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | 182 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | 183 __HFN_INT_MBOX_LPU0); 184 else 185 intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | 186 __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 | 187 __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | 188 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | 189 __HFN_INT_MBOX_LPU1); 190 191 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask); 192 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask); 193 bfa->iocfc.intr_mask = ~intr_unmask; 194 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0); 195 } 196 197 void 198 bfa_isr_disable(struct bfa_s *bfa) 199 { 200 bfa_isr_mode_set(bfa, BFA_FALSE); 201 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L); 202 bfa_msix_uninstall(bfa); 203 } 204 205 void 206 bfa_msix_reqq(struct bfa_s *bfa, int qid) 207 { 208 struct list_head *waitq; 209 210 qid &= (BFI_IOC_MAX_CQS - 1); 211 212 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid); 213 214 /** 215 * Resume any pending requests in the corresponding reqq. 216 */ 217 waitq = bfa_reqq(bfa, qid); 218 if (!list_empty(waitq)) 219 bfa_reqq_resume(bfa, qid); 220 } 221 222 void 223 bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m) 224 { 225 bfa_trc(bfa, m->mhdr.msg_class); 226 bfa_trc(bfa, m->mhdr.msg_id); 227 bfa_trc(bfa, m->mhdr.mtag.i2htok); 228 bfa_assert(0); 229 bfa_trc_stop(bfa->trcmod); 230 } 231 232 void 233 bfa_msix_rspq(struct bfa_s *bfa, int qid) 234 { 235 struct bfi_msg_s *m; 236 u32 pi, ci; 237 struct list_head *waitq; 238 239 bfa_trc_fp(bfa, qid); 240 241 qid &= (BFI_IOC_MAX_CQS - 1); 242 243 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid); 244 245 ci = bfa_rspq_ci(bfa, qid); 246 pi = bfa_rspq_pi(bfa, qid); 247 248 bfa_trc_fp(bfa, ci); 249 bfa_trc_fp(bfa, pi); 250 251 if (bfa->rme_process) { 252 while (ci != pi) { 253 m = bfa_rspq_elem(bfa, qid, ci); 254 bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX); 255 256 bfa_isrs[m->mhdr.msg_class] (bfa, m); 257 258 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems); 259 } 260 } 261 262 /** 263 * update CI 264 */ 265 bfa_rspq_ci(bfa, qid) = pi; 266 bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi); 267 mmiowb(); 268 269 /** 270 * Resume any pending requests in the corresponding reqq. 271 */ 272 waitq = bfa_reqq(bfa, qid); 273 if (!list_empty(waitq)) 274 bfa_reqq_resume(bfa, qid); 275 } 276 277 void 278 bfa_msix_lpu_err(struct bfa_s *bfa, int vec) 279 { 280 u32 intr, curr_value; 281 282 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status); 283 284 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1)) 285 bfa_msix_lpu(bfa); 286 287 intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | 288 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT); 289 290 if (intr) { 291 if (intr & __HFN_INT_LL_HALT) { 292 /** 293 * If LL_HALT bit is set then FW Init Halt LL Port 294 * Register needs to be cleared as well so Interrupt 295 * Status Register will be cleared. 296 */ 297 curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt); 298 curr_value &= ~__FW_INIT_HALT_P; 299 bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value); 300 } 301 302 if (intr & __HFN_INT_ERR_PSS) { 303 /** 304 * ERR_PSS bit needs to be cleared as well in case 305 * interrups are shared so driver's interrupt handler is 306 * still called eventhough it is already masked out. 307 */ 308 curr_value = bfa_reg_read( 309 bfa->ioc.ioc_regs.pss_err_status_reg); 310 curr_value &= __PSS_ERR_STATUS_SET; 311 bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg, 312 curr_value); 313 } 314 315 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr); 316 bfa_msix_errint(bfa, intr); 317 } 318 } 319 320 void 321 bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func) 322 { 323 bfa_isrs[mc] = isr_func; 324 } 325 326 /** 327 * BFA IOC FC related functions 328 */ 329 330 /** 331 * hal_ioc_pvt BFA IOC private functions 332 */ 333 334 static void 335 bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len) 336 { 337 int i, per_reqq_sz, per_rspq_sz; 338 339 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), 340 BFA_DMA_ALIGN_SZ); 341 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), 342 BFA_DMA_ALIGN_SZ); 343 344 /* 345 * Calculate CQ size 346 */ 347 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 348 *dm_len = *dm_len + per_reqq_sz; 349 *dm_len = *dm_len + per_rspq_sz; 350 } 351 352 /* 353 * Calculate Shadow CI/PI size 354 */ 355 for (i = 0; i < cfg->fwcfg.num_cqs; i++) 356 *dm_len += (2 * BFA_CACHELINE_SZ); 357 } 358 359 static void 360 bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len) 361 { 362 *dm_len += 363 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); 364 *dm_len += 365 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), 366 BFA_CACHELINE_SZ); 367 } 368 369 /** 370 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ 371 */ 372 static void 373 bfa_iocfc_send_cfg(void *bfa_arg) 374 { 375 struct bfa_s *bfa = bfa_arg; 376 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 377 struct bfi_iocfc_cfg_req_s cfg_req; 378 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; 379 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg; 380 int i; 381 382 bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS); 383 bfa_trc(bfa, cfg->fwcfg.num_cqs); 384 385 bfa_iocfc_reset_queues(bfa); 386 387 /** 388 * initialize IOC configuration info 389 */ 390 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; 391 cfg_info->num_cqs = cfg->fwcfg.num_cqs; 392 393 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); 394 /** 395 * dma map REQ and RSP circular queues and shadow pointers 396 */ 397 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 398 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i], 399 iocfc->req_cq_ba[i].pa); 400 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i], 401 iocfc->req_cq_shadow_ci[i].pa); 402 cfg_info->req_cq_elems[i] = 403 bfa_os_htons(cfg->drvcfg.num_reqq_elems); 404 405 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i], 406 iocfc->rsp_cq_ba[i].pa); 407 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i], 408 iocfc->rsp_cq_shadow_pi[i].pa); 409 cfg_info->rsp_cq_elems[i] = 410 bfa_os_htons(cfg->drvcfg.num_rspq_elems); 411 } 412 413 /** 414 * Enable interrupt coalescing if it is driver init path 415 * and not ioc disable/enable path. 416 */ 417 if (!iocfc->cfgdone) 418 cfg_info->intr_attr.coalesce = BFA_TRUE; 419 420 iocfc->cfgdone = BFA_FALSE; 421 422 /** 423 * dma map IOC configuration itself 424 */ 425 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, 426 bfa_lpuid(bfa)); 427 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa); 428 429 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req, 430 sizeof(struct bfi_iocfc_cfg_req_s)); 431 } 432 433 static void 434 bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 435 struct bfa_pcidev_s *pcidev) 436 { 437 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 438 439 bfa->bfad = bfad; 440 iocfc->bfa = bfa; 441 iocfc->action = BFA_IOCFC_ACT_NONE; 442 443 bfa_os_assign(iocfc->cfg, *cfg); 444 445 /** 446 * Initialize chip specific handlers. 447 */ 448 if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) { 449 iocfc->hwif.hw_reginit = bfa_hwct_reginit; 450 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack; 451 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; 452 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; 453 iocfc->hwif.hw_msix_install = bfa_hwct_msix_install; 454 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall; 455 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set; 456 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; 457 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range; 458 } else { 459 iocfc->hwif.hw_reginit = bfa_hwcb_reginit; 460 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack; 461 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; 462 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; 463 iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install; 464 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall; 465 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set; 466 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs; 467 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range; 468 } 469 470 iocfc->hwif.hw_reginit(bfa); 471 bfa->msix.nvecs = 0; 472 } 473 474 static void 475 bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg, 476 struct bfa_meminfo_s *meminfo) 477 { 478 u8 *dm_kva; 479 u64 dm_pa; 480 int i, per_reqq_sz, per_rspq_sz; 481 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 482 int dbgsz; 483 484 dm_kva = bfa_meminfo_dma_virt(meminfo); 485 dm_pa = bfa_meminfo_dma_phys(meminfo); 486 487 /* 488 * First allocate dma memory for IOC. 489 */ 490 bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa); 491 dm_kva += bfa_ioc_meminfo(); 492 dm_pa += bfa_ioc_meminfo(); 493 494 /* 495 * Claim DMA-able memory for the request/response queues and for shadow 496 * ci/pi registers 497 */ 498 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), 499 BFA_DMA_ALIGN_SZ); 500 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), 501 BFA_DMA_ALIGN_SZ); 502 503 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 504 iocfc->req_cq_ba[i].kva = dm_kva; 505 iocfc->req_cq_ba[i].pa = dm_pa; 506 bfa_os_memset(dm_kva, 0, per_reqq_sz); 507 dm_kva += per_reqq_sz; 508 dm_pa += per_reqq_sz; 509 510 iocfc->rsp_cq_ba[i].kva = dm_kva; 511 iocfc->rsp_cq_ba[i].pa = dm_pa; 512 bfa_os_memset(dm_kva, 0, per_rspq_sz); 513 dm_kva += per_rspq_sz; 514 dm_pa += per_rspq_sz; 515 } 516 517 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 518 iocfc->req_cq_shadow_ci[i].kva = dm_kva; 519 iocfc->req_cq_shadow_ci[i].pa = dm_pa; 520 dm_kva += BFA_CACHELINE_SZ; 521 dm_pa += BFA_CACHELINE_SZ; 522 523 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva; 524 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa; 525 dm_kva += BFA_CACHELINE_SZ; 526 dm_pa += BFA_CACHELINE_SZ; 527 } 528 529 /* 530 * Claim DMA-able memory for the config info page 531 */ 532 bfa->iocfc.cfg_info.kva = dm_kva; 533 bfa->iocfc.cfg_info.pa = dm_pa; 534 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva; 535 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); 536 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); 537 538 /* 539 * Claim DMA-able memory for the config response 540 */ 541 bfa->iocfc.cfgrsp_dma.kva = dm_kva; 542 bfa->iocfc.cfgrsp_dma.pa = dm_pa; 543 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva; 544 545 dm_kva += 546 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), 547 BFA_CACHELINE_SZ); 548 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), 549 BFA_CACHELINE_SZ); 550 551 552 bfa_meminfo_dma_virt(meminfo) = dm_kva; 553 bfa_meminfo_dma_phys(meminfo) = dm_pa; 554 555 dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover); 556 if (dbgsz > 0) { 557 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo)); 558 bfa_meminfo_kva(meminfo) += dbgsz; 559 } 560 } 561 562 /** 563 * Start BFA submodules. 564 */ 565 static void 566 bfa_iocfc_start_submod(struct bfa_s *bfa) 567 { 568 int i; 569 570 bfa->rme_process = BFA_TRUE; 571 572 for (i = 0; hal_mods[i]; i++) 573 hal_mods[i]->start(bfa); 574 } 575 576 /** 577 * Disable BFA submodules. 578 */ 579 static void 580 bfa_iocfc_disable_submod(struct bfa_s *bfa) 581 { 582 int i; 583 584 for (i = 0; hal_mods[i]; i++) 585 hal_mods[i]->iocdisable(bfa); 586 } 587 588 static void 589 bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete) 590 { 591 struct bfa_s *bfa = bfa_arg; 592 593 if (complete) { 594 if (bfa->iocfc.cfgdone) 595 bfa_cb_init(bfa->bfad, BFA_STATUS_OK); 596 else 597 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED); 598 } else { 599 if (bfa->iocfc.cfgdone) 600 bfa->iocfc.action = BFA_IOCFC_ACT_NONE; 601 } 602 } 603 604 static void 605 bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl) 606 { 607 struct bfa_s *bfa = bfa_arg; 608 struct bfad_s *bfad = bfa->bfad; 609 610 if (compl) 611 complete(&bfad->comp); 612 else 613 bfa->iocfc.action = BFA_IOCFC_ACT_NONE; 614 } 615 616 static void 617 bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl) 618 { 619 struct bfa_s *bfa = bfa_arg; 620 struct bfad_s *bfad = bfa->bfad; 621 622 if (compl) 623 complete(&bfad->disable_comp); 624 } 625 626 /** 627 * Update BFA configuration from firmware configuration. 628 */ 629 static void 630 bfa_iocfc_cfgrsp(struct bfa_s *bfa) 631 { 632 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 633 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 634 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg; 635 636 fwcfg->num_cqs = fwcfg->num_cqs; 637 fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs); 638 fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs); 639 fwcfg->num_fcxp_reqs = bfa_os_ntohs(fwcfg->num_fcxp_reqs); 640 fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs); 641 fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports); 642 643 iocfc->cfgdone = BFA_TRUE; 644 645 /** 646 * Configuration is complete - initialize/start submodules 647 */ 648 bfa_fcport_init(bfa); 649 650 if (iocfc->action == BFA_IOCFC_ACT_INIT) 651 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa); 652 else 653 bfa_iocfc_start_submod(bfa); 654 } 655 void 656 bfa_iocfc_reset_queues(struct bfa_s *bfa) 657 { 658 int q; 659 660 for (q = 0; q < BFI_IOC_MAX_CQS; q++) { 661 bfa_reqq_ci(bfa, q) = 0; 662 bfa_reqq_pi(bfa, q) = 0; 663 bfa_rspq_ci(bfa, q) = 0; 664 bfa_rspq_pi(bfa, q) = 0; 665 } 666 } 667 668 /** 669 * IOC enable request is complete 670 */ 671 static void 672 bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status) 673 { 674 struct bfa_s *bfa = bfa_arg; 675 676 if (status != BFA_STATUS_OK) { 677 bfa_isr_disable(bfa); 678 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) 679 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, 680 bfa_iocfc_init_cb, bfa); 681 return; 682 } 683 684 bfa_iocfc_send_cfg(bfa); 685 } 686 687 /** 688 * IOC disable request is complete 689 */ 690 static void 691 bfa_iocfc_disable_cbfn(void *bfa_arg) 692 { 693 struct bfa_s *bfa = bfa_arg; 694 695 bfa_isr_disable(bfa); 696 bfa_iocfc_disable_submod(bfa); 697 698 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP) 699 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb, 700 bfa); 701 else { 702 bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE); 703 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb, 704 bfa); 705 } 706 } 707 708 /** 709 * Notify sub-modules of hardware failure. 710 */ 711 static void 712 bfa_iocfc_hbfail_cbfn(void *bfa_arg) 713 { 714 struct bfa_s *bfa = bfa_arg; 715 716 bfa->rme_process = BFA_FALSE; 717 718 bfa_isr_disable(bfa); 719 bfa_iocfc_disable_submod(bfa); 720 721 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) 722 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb, 723 bfa); 724 } 725 726 /** 727 * Actions on chip-reset completion. 728 */ 729 static void 730 bfa_iocfc_reset_cbfn(void *bfa_arg) 731 { 732 struct bfa_s *bfa = bfa_arg; 733 734 bfa_iocfc_reset_queues(bfa); 735 bfa_isr_enable(bfa); 736 } 737 738 /** 739 * hal_ioc_public 740 */ 741 742 /** 743 * Query IOC memory requirement information. 744 */ 745 void 746 bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 747 u32 *dm_len) 748 { 749 /* dma memory for IOC */ 750 *dm_len += bfa_ioc_meminfo(); 751 752 bfa_iocfc_fw_cfg_sz(cfg, dm_len); 753 bfa_iocfc_cqs_sz(cfg, dm_len); 754 *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover); 755 } 756 757 /** 758 * Query IOC memory requirement information. 759 */ 760 void 761 bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 762 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 763 { 764 int i; 765 struct bfa_ioc_s *ioc = &bfa->ioc; 766 767 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn; 768 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn; 769 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn; 770 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn; 771 772 ioc->trcmod = bfa->trcmod; 773 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod); 774 775 /** 776 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC. 777 */ 778 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC) 779 bfa_ioc_set_fcmode(&bfa->ioc); 780 781 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC); 782 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs); 783 784 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); 785 bfa_iocfc_mem_claim(bfa, cfg, meminfo); 786 bfa_timer_init(&bfa->timer_mod); 787 788 INIT_LIST_HEAD(&bfa->comp_q); 789 for (i = 0; i < BFI_IOC_MAX_CQS; i++) 790 INIT_LIST_HEAD(&bfa->reqq_waitq[i]); 791 } 792 793 /** 794 * Query IOC memory requirement information. 795 */ 796 void 797 bfa_iocfc_detach(struct bfa_s *bfa) 798 { 799 bfa_ioc_detach(&bfa->ioc); 800 } 801 802 /** 803 * Query IOC memory requirement information. 804 */ 805 void 806 bfa_iocfc_init(struct bfa_s *bfa) 807 { 808 bfa->iocfc.action = BFA_IOCFC_ACT_INIT; 809 bfa_ioc_enable(&bfa->ioc); 810 } 811 812 /** 813 * IOC start called from bfa_start(). Called to start IOC operations 814 * at driver instantiation for this instance. 815 */ 816 void 817 bfa_iocfc_start(struct bfa_s *bfa) 818 { 819 if (bfa->iocfc.cfgdone) 820 bfa_iocfc_start_submod(bfa); 821 } 822 823 /** 824 * IOC stop called from bfa_stop(). Called only when driver is unloaded 825 * for this instance. 826 */ 827 void 828 bfa_iocfc_stop(struct bfa_s *bfa) 829 { 830 bfa->iocfc.action = BFA_IOCFC_ACT_STOP; 831 832 bfa->rme_process = BFA_FALSE; 833 bfa_ioc_disable(&bfa->ioc); 834 } 835 836 void 837 bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m) 838 { 839 struct bfa_s *bfa = bfaarg; 840 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 841 union bfi_iocfc_i2h_msg_u *msg; 842 843 msg = (union bfi_iocfc_i2h_msg_u *) m; 844 bfa_trc(bfa, msg->mh.msg_id); 845 846 switch (msg->mh.msg_id) { 847 case BFI_IOCFC_I2H_CFG_REPLY: 848 iocfc->cfg_reply = &msg->cfg_reply; 849 bfa_iocfc_cfgrsp(bfa); 850 break; 851 case BFI_IOCFC_I2H_UPDATEQ_RSP: 852 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); 853 break; 854 default: 855 bfa_assert(0); 856 } 857 } 858 859 void 860 bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr) 861 { 862 bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr); 863 } 864 865 u64 866 bfa_adapter_get_id(struct bfa_s *bfa) 867 { 868 return bfa_ioc_get_adid(&bfa->ioc); 869 } 870 871 void 872 bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr) 873 { 874 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 875 876 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce; 877 878 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ? 879 bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) : 880 bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay); 881 882 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ? 883 bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) : 884 bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency); 885 886 attr->config = iocfc->cfg; 887 } 888 889 bfa_status_t 890 bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr) 891 { 892 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 893 struct bfi_iocfc_set_intr_req_s *m; 894 895 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce; 896 iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay); 897 iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency); 898 899 if (!bfa_iocfc_is_operational(bfa)) 900 return BFA_STATUS_OK; 901 902 m = bfa_reqq_next(bfa, BFA_REQQ_IOC); 903 if (!m) 904 return BFA_STATUS_DEVBUSY; 905 906 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ, 907 bfa_lpuid(bfa)); 908 m->coalesce = iocfc->cfginfo->intr_attr.coalesce; 909 m->delay = iocfc->cfginfo->intr_attr.delay; 910 m->latency = iocfc->cfginfo->intr_attr.latency; 911 912 bfa_trc(bfa, attr->delay); 913 bfa_trc(bfa, attr->latency); 914 915 bfa_reqq_produce(bfa, BFA_REQQ_IOC); 916 return BFA_STATUS_OK; 917 } 918 919 void 920 bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa) 921 { 922 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 923 924 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); 925 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa); 926 } 927 /** 928 * Enable IOC after it is disabled. 929 */ 930 void 931 bfa_iocfc_enable(struct bfa_s *bfa) 932 { 933 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, 934 "IOC Enable"); 935 bfa_ioc_enable(&bfa->ioc); 936 } 937 938 void 939 bfa_iocfc_disable(struct bfa_s *bfa) 940 { 941 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, 942 "IOC Disable"); 943 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE; 944 945 bfa->rme_process = BFA_FALSE; 946 bfa_ioc_disable(&bfa->ioc); 947 } 948 949 950 bfa_boolean_t 951 bfa_iocfc_is_operational(struct bfa_s *bfa) 952 { 953 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone; 954 } 955 956 /** 957 * Return boot target port wwns -- read from boot information in flash. 958 */ 959 void 960 bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns) 961 { 962 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 963 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 964 int i; 965 966 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) { 967 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns); 968 *nwwns = cfgrsp->pbc_cfg.nbluns; 969 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++) 970 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn; 971 972 return; 973 } 974 975 *nwwns = cfgrsp->bootwwns.nwwns; 976 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn)); 977 } 978 979 void 980 bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg) 981 { 982 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 983 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 984 985 pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled; 986 pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns; 987 pbcfg->speed = cfgrsp->pbc_cfg.port_speed; 988 memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun)); 989 } 990 991 int 992 bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport) 993 { 994 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 995 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 996 997 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport)); 998 return cfgrsp->pbc_cfg.nvports; 999 } 1000 1001 /** 1002 * hal_api 1003 */ 1004 1005 /** 1006 * Use this function query the memory requirement of the BFA library. 1007 * This function needs to be called before bfa_attach() to get the 1008 * memory required of the BFA layer for a given driver configuration. 1009 * 1010 * This call will fail, if the cap is out of range compared to pre-defined 1011 * values within the BFA library 1012 * 1013 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate 1014 * its configuration in this structure. 1015 * The default values for struct bfa_iocfc_cfg_s can be 1016 * fetched using bfa_cfg_get_default() API. 1017 * 1018 * If cap's boundary check fails, the library will use 1019 * the default bfa_cap_t values (and log a warning msg). 1020 * 1021 * @param[out] meminfo - pointer to bfa_meminfo_t. This content 1022 * indicates the memory type (see bfa_mem_type_t) and 1023 * amount of memory required. 1024 * 1025 * Driver should allocate the memory, populate the 1026 * starting address for each block and provide the same 1027 * structure as input parameter to bfa_attach() call. 1028 * 1029 * @return void 1030 * 1031 * Special Considerations: @note 1032 */ 1033 void 1034 bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo) 1035 { 1036 int i; 1037 u32 km_len = 0, dm_len = 0; 1038 1039 bfa_assert((cfg != NULL) && (meminfo != NULL)); 1040 1041 bfa_os_memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s)); 1042 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type = 1043 BFA_MEM_TYPE_KVA; 1044 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type = 1045 BFA_MEM_TYPE_DMA; 1046 1047 bfa_iocfc_meminfo(cfg, &km_len, &dm_len); 1048 1049 for (i = 0; hal_mods[i]; i++) 1050 hal_mods[i]->meminfo(cfg, &km_len, &dm_len); 1051 1052 dm_len += bfa_port_meminfo(); 1053 1054 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len; 1055 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; 1056 } 1057 1058 /** 1059 * Use this function to do attach the driver instance with the BFA 1060 * library. This function will not trigger any HW initialization 1061 * process (which will be done in bfa_init() call) 1062 * 1063 * This call will fail, if the cap is out of range compared to 1064 * pre-defined values within the BFA library 1065 * 1066 * @param[out] bfa Pointer to bfa_t. 1067 * @param[in] bfad Opaque handle back to the driver's IOC structure 1068 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure 1069 * that was used in bfa_cfg_get_meminfo(). 1070 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should 1071 * use the bfa_cfg_get_meminfo() call to 1072 * find the memory blocks required, allocate the 1073 * required memory and provide the starting addresses. 1074 * @param[in] pcidev pointer to struct bfa_pcidev_s 1075 * 1076 * @return 1077 * void 1078 * 1079 * Special Considerations: 1080 * 1081 * @note 1082 * 1083 */ 1084 void 1085 bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 1086 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 1087 { 1088 int i; 1089 struct bfa_mem_elem_s *melem; 1090 1091 bfa->fcs = BFA_FALSE; 1092 1093 bfa_assert((cfg != NULL) && (meminfo != NULL)); 1094 1095 /** 1096 * initialize all memory pointers for iterative allocation 1097 */ 1098 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { 1099 melem = meminfo->meminfo + i; 1100 melem->kva_curp = melem->kva; 1101 melem->dma_curp = melem->dma; 1102 } 1103 1104 bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev); 1105 1106 for (i = 0; hal_mods[i]; i++) 1107 hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev); 1108 1109 bfa_com_port_attach(bfa, meminfo); 1110 } 1111 1112 /** 1113 * Use this function to delete a BFA IOC. IOC should be stopped (by 1114 * calling bfa_stop()) before this function call. 1115 * 1116 * @param[in] bfa - pointer to bfa_t. 1117 * 1118 * @return 1119 * void 1120 * 1121 * Special Considerations: 1122 * 1123 * @note 1124 */ 1125 void 1126 bfa_detach(struct bfa_s *bfa) 1127 { 1128 int i; 1129 1130 for (i = 0; hal_mods[i]; i++) 1131 hal_mods[i]->detach(bfa); 1132 1133 bfa_iocfc_detach(bfa); 1134 } 1135 1136 1137 void 1138 bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod) 1139 { 1140 bfa->trcmod = trcmod; 1141 } 1142 1143 void 1144 bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog) 1145 { 1146 bfa->plog = plog; 1147 } 1148 1149 /** 1150 * Initialize IOC. 1151 * 1152 * This function will return immediately, when the IOC initialization is 1153 * completed, the bfa_cb_init() will be called. 1154 * 1155 * @param[in] bfa instance 1156 * 1157 * @return void 1158 * 1159 * Special Considerations: 1160 * 1161 * @note 1162 * When this function returns, the driver should register the interrupt service 1163 * routine(s) and enable the device interrupts. If this is not done, 1164 * bfa_cb_init() will never get called 1165 */ 1166 void 1167 bfa_init(struct bfa_s *bfa) 1168 { 1169 bfa_iocfc_init(bfa); 1170 } 1171 1172 /** 1173 * Use this function initiate the IOC configuration setup. This function 1174 * will return immediately. 1175 * 1176 * @param[in] bfa instance 1177 * 1178 * @return None 1179 */ 1180 void 1181 bfa_start(struct bfa_s *bfa) 1182 { 1183 bfa_iocfc_start(bfa); 1184 } 1185 1186 /** 1187 * Use this function quiese the IOC. This function will return immediately, 1188 * when the IOC is actually stopped, the bfad->comp will be set. 1189 * 1190 * @param[in]bfa - pointer to bfa_t. 1191 * 1192 * @return None 1193 * 1194 * Special Considerations: 1195 * bfad->comp can be set before or after bfa_stop() returns. 1196 * 1197 * @note 1198 * In case of any failure, we could handle it automatically by doing a 1199 * reset and then succeed the bfa_stop() call. 1200 */ 1201 void 1202 bfa_stop(struct bfa_s *bfa) 1203 { 1204 bfa_iocfc_stop(bfa); 1205 } 1206 1207 void 1208 bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q) 1209 { 1210 INIT_LIST_HEAD(comp_q); 1211 list_splice_tail_init(&bfa->comp_q, comp_q); 1212 } 1213 1214 void 1215 bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q) 1216 { 1217 struct list_head *qe; 1218 struct list_head *qen; 1219 struct bfa_cb_qe_s *hcb_qe; 1220 1221 list_for_each_safe(qe, qen, comp_q) { 1222 hcb_qe = (struct bfa_cb_qe_s *) qe; 1223 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE); 1224 } 1225 } 1226 1227 void 1228 bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) 1229 { 1230 struct list_head *qe; 1231 struct bfa_cb_qe_s *hcb_qe; 1232 1233 while (!list_empty(comp_q)) { 1234 bfa_q_deq(comp_q, &qe); 1235 hcb_qe = (struct bfa_cb_qe_s *) qe; 1236 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE); 1237 } 1238 } 1239 1240 void 1241 bfa_attach_fcs(struct bfa_s *bfa) 1242 { 1243 bfa->fcs = BFA_TRUE; 1244 } 1245 1246 /** 1247 * Periodic timer heart beat from driver 1248 */ 1249 void 1250 bfa_timer_tick(struct bfa_s *bfa) 1251 { 1252 bfa_timer_beat(&bfa->timer_mod); 1253 } 1254 1255 /** 1256 * Return the list of PCI vendor/device id lists supported by this 1257 * BFA instance. 1258 */ 1259 void 1260 bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids) 1261 { 1262 static struct bfa_pciid_s __pciids[] = { 1263 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P}, 1264 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P}, 1265 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT}, 1266 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC}, 1267 }; 1268 1269 *npciids = sizeof(__pciids) / sizeof(__pciids[0]); 1270 *pciids = __pciids; 1271 } 1272 1273 /** 1274 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled 1275 * into BFA layer). The OS driver can then turn back and overwrite entries that 1276 * have been configured by the user. 1277 * 1278 * @param[in] cfg - pointer to bfa_ioc_cfg_t 1279 * 1280 * @return 1281 * void 1282 * 1283 * Special Considerations: 1284 * note 1285 */ 1286 void 1287 bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg) 1288 { 1289 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS; 1290 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS; 1291 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS; 1292 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS; 1293 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS; 1294 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS; 1295 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS; 1296 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS; 1297 1298 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS; 1299 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS; 1300 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS; 1301 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS; 1302 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS; 1303 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF; 1304 cfg->drvcfg.ioc_recover = BFA_FALSE; 1305 cfg->drvcfg.delay_comp = BFA_FALSE; 1306 1307 } 1308 1309 void 1310 bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg) 1311 { 1312 bfa_cfg_get_default(cfg); 1313 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN; 1314 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN; 1315 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN; 1316 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN; 1317 cfg->fwcfg.num_rports = BFA_RPORT_MIN; 1318 1319 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; 1320 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN; 1321 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN; 1322 cfg->drvcfg.min_cfg = BFA_TRUE; 1323 } 1324 1325 void 1326 bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr) 1327 { 1328 bfa_ioc_get_attr(&bfa->ioc, ioc_attr); 1329 } 1330 1331 /** 1332 * Retrieve firmware trace information on IOC failure. 1333 */ 1334 bfa_status_t 1335 bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen) 1336 { 1337 return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen); 1338 } 1339 1340 /** 1341 * Clear the saved firmware trace information of an IOC. 1342 */ 1343 void 1344 bfa_debug_fwsave_clear(struct bfa_s *bfa) 1345 { 1346 bfa_ioc_debug_fwsave_clear(&bfa->ioc); 1347 } 1348 1349 /** 1350 * Fetch firmware trace data. 1351 * 1352 * @param[in] bfa BFA instance 1353 * @param[out] trcdata Firmware trace buffer 1354 * @param[in,out] trclen Firmware trace buffer len 1355 * 1356 * @retval BFA_STATUS_OK Firmware trace is fetched. 1357 * @retval BFA_STATUS_INPROGRESS Firmware trace fetch is in progress. 1358 */ 1359 bfa_status_t 1360 bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen) 1361 { 1362 return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen); 1363 } 1364 1365 /** 1366 * Dump firmware memory. 1367 * 1368 * @param[in] bfa BFA instance 1369 * @param[out] buf buffer for dump 1370 * @param[in,out] offset smem offset to start read 1371 * @param[in,out] buflen length of buffer 1372 * 1373 * @retval BFA_STATUS_OK Firmware memory is dumped. 1374 * @retval BFA_STATUS_INPROGRESS Firmware memory dump is in progress. 1375 */ 1376 bfa_status_t 1377 bfa_debug_fwcore(struct bfa_s *bfa, void *buf, u32 *offset, int *buflen) 1378 { 1379 return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen); 1380 } 1381 /** 1382 * Reset hw semaphore & usage cnt regs and initialize. 1383 */ 1384 void 1385 bfa_chip_reset(struct bfa_s *bfa) 1386 { 1387 bfa_ioc_ownership_reset(&bfa->ioc); 1388 bfa_ioc_pll_init(&bfa->ioc); 1389 } 1390 1391 /** 1392 * Fetch firmware statistics data. 1393 * 1394 * @param[in] bfa BFA instance 1395 * @param[out] data Firmware stats buffer 1396 * 1397 * @retval BFA_STATUS_OK Firmware trace is fetched. 1398 */ 1399 bfa_status_t 1400 bfa_fw_stats_get(struct bfa_s *bfa, void *data) 1401 { 1402 return bfa_ioc_fw_stats_get(&bfa->ioc, data); 1403 } 1404 1405 bfa_status_t 1406 bfa_fw_stats_clear(struct bfa_s *bfa) 1407 { 1408 return bfa_ioc_fw_stats_clear(&bfa->ioc); 1409 } 1410