1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. 4 * Copyright (c) 2014- QLogic Corporation. 5 * All rights reserved 6 * www.qlogic.com 7 * 8 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. 9 */ 10 11 #include "bfad_drv.h" 12 #include "bfa_modules.h" 13 #include "bfi_reg.h" 14 15 BFA_TRC_FILE(HAL, CORE); 16 17 /* 18 * Message handlers for various modules. 19 */ 20 static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { 21 bfa_isr_unhandled, /* NONE */ 22 bfa_isr_unhandled, /* BFI_MC_IOC */ 23 bfa_fcdiag_intr, /* BFI_MC_DIAG */ 24 bfa_isr_unhandled, /* BFI_MC_FLASH */ 25 bfa_isr_unhandled, /* BFI_MC_CEE */ 26 bfa_fcport_isr, /* BFI_MC_FCPORT */ 27 bfa_isr_unhandled, /* BFI_MC_IOCFC */ 28 bfa_isr_unhandled, /* BFI_MC_LL */ 29 bfa_uf_isr, /* BFI_MC_UF */ 30 bfa_fcxp_isr, /* BFI_MC_FCXP */ 31 bfa_lps_isr, /* BFI_MC_LPS */ 32 bfa_rport_isr, /* BFI_MC_RPORT */ 33 bfa_itn_isr, /* BFI_MC_ITN */ 34 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */ 35 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */ 36 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */ 37 bfa_ioim_isr, /* BFI_MC_IOIM */ 38 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */ 39 bfa_tskim_isr, /* BFI_MC_TSKIM */ 40 bfa_isr_unhandled, /* BFI_MC_SBOOT */ 41 bfa_isr_unhandled, /* BFI_MC_IPFC */ 42 bfa_isr_unhandled, /* BFI_MC_PORT */ 43 bfa_isr_unhandled, /* --------- */ 44 bfa_isr_unhandled, /* --------- */ 45 bfa_isr_unhandled, /* --------- */ 46 bfa_isr_unhandled, /* --------- */ 47 bfa_isr_unhandled, /* --------- */ 48 bfa_isr_unhandled, /* --------- */ 49 bfa_isr_unhandled, /* --------- */ 50 bfa_isr_unhandled, /* --------- */ 51 bfa_isr_unhandled, /* --------- */ 52 bfa_isr_unhandled, /* --------- */ 53 }; 54 /* 55 * Message handlers for mailbox command classes 56 */ 57 static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = { 58 NULL, 59 NULL, /* BFI_MC_IOC */ 60 NULL, /* BFI_MC_DIAG */ 61 NULL, /* BFI_MC_FLASH */ 62 NULL, /* BFI_MC_CEE */ 63 NULL, /* BFI_MC_PORT */ 64 bfa_iocfc_isr, /* BFI_MC_IOCFC */ 65 NULL, 66 }; 67 68 69 70 void 71 __bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data) 72 { 73 int tail = trcm->tail; 74 struct bfa_trc_s *trc = &trcm->trc[tail]; 75 76 if (trcm->stopped) 77 return; 78 79 trc->fileno = (u16) fileno; 80 trc->line = (u16) line; 81 trc->data.u64 = data; 82 trc->timestamp = BFA_TRC_TS(trcm); 83 84 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1); 85 if (trcm->tail == trcm->head) 86 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1); 87 } 88 89 static void 90 bfa_com_port_attach(struct bfa_s *bfa) 91 { 92 struct bfa_port_s *port = &bfa->modules.port; 93 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa); 94 95 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod); 96 bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp); 97 } 98 99 /* 100 * ablk module attach 101 */ 102 static void 103 bfa_com_ablk_attach(struct bfa_s *bfa) 104 { 105 struct bfa_ablk_s *ablk = &bfa->modules.ablk; 106 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa); 107 108 bfa_ablk_attach(ablk, &bfa->ioc); 109 bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp); 110 } 111 112 static void 113 bfa_com_cee_attach(struct bfa_s *bfa) 114 { 115 struct bfa_cee_s *cee = &bfa->modules.cee; 116 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa); 117 118 cee->trcmod = bfa->trcmod; 119 bfa_cee_attach(cee, &bfa->ioc, bfa); 120 bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp); 121 } 122 123 static void 124 bfa_com_sfp_attach(struct bfa_s *bfa) 125 { 126 struct bfa_sfp_s *sfp = BFA_SFP_MOD(bfa); 127 struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa); 128 129 bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod); 130 bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp); 131 } 132 133 static void 134 bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) 135 { 136 struct bfa_flash_s *flash = BFA_FLASH(bfa); 137 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa); 138 139 bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg); 140 bfa_flash_memclaim(flash, flash_dma->kva_curp, 141 flash_dma->dma_curp, mincfg); 142 } 143 144 static void 145 bfa_com_diag_attach(struct bfa_s *bfa) 146 { 147 struct bfa_diag_s *diag = BFA_DIAG_MOD(bfa); 148 struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa); 149 150 bfa_diag_attach(diag, &bfa->ioc, bfa, bfa_fcport_beacon, bfa->trcmod); 151 bfa_diag_memclaim(diag, diag_dma->kva_curp, diag_dma->dma_curp); 152 } 153 154 static void 155 bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) 156 { 157 struct bfa_phy_s *phy = BFA_PHY(bfa); 158 struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa); 159 160 bfa_phy_attach(phy, &bfa->ioc, bfa, bfa->trcmod, mincfg); 161 bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg); 162 } 163 164 static void 165 bfa_com_fru_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) 166 { 167 struct bfa_fru_s *fru = BFA_FRU(bfa); 168 struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa); 169 170 bfa_fru_attach(fru, &bfa->ioc, bfa, bfa->trcmod, mincfg); 171 bfa_fru_memclaim(fru, fru_dma->kva_curp, fru_dma->dma_curp, mincfg); 172 } 173 174 /* 175 * BFA IOC FC related definitions 176 */ 177 178 /* 179 * IOC local definitions 180 */ 181 #define BFA_IOCFC_TOV 5000 /* msecs */ 182 183 enum { 184 BFA_IOCFC_ACT_NONE = 0, 185 BFA_IOCFC_ACT_INIT = 1, 186 BFA_IOCFC_ACT_STOP = 2, 187 BFA_IOCFC_ACT_DISABLE = 3, 188 BFA_IOCFC_ACT_ENABLE = 4, 189 }; 190 191 #define DEF_CFG_NUM_FABRICS 1 192 #define DEF_CFG_NUM_LPORTS 256 193 #define DEF_CFG_NUM_CQS 4 194 #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX) 195 #define DEF_CFG_NUM_TSKIM_REQS 128 196 #define DEF_CFG_NUM_FCXP_REQS 64 197 #define DEF_CFG_NUM_UF_BUFS 64 198 #define DEF_CFG_NUM_RPORTS 1024 199 #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS) 200 #define DEF_CFG_NUM_TINS 256 201 202 #define DEF_CFG_NUM_SGPGS 2048 203 #define DEF_CFG_NUM_REQQ_ELEMS 256 204 #define DEF_CFG_NUM_RSPQ_ELEMS 64 205 #define DEF_CFG_NUM_SBOOT_TGTS 16 206 #define DEF_CFG_NUM_SBOOT_LUNS 16 207 208 /* 209 * IOCFC state machine definitions/declarations 210 */ 211 bfa_fsm_state_decl(bfa_iocfc, stopped, struct bfa_iocfc_s, enum iocfc_event); 212 bfa_fsm_state_decl(bfa_iocfc, initing, struct bfa_iocfc_s, enum iocfc_event); 213 bfa_fsm_state_decl(bfa_iocfc, dconf_read, struct bfa_iocfc_s, enum iocfc_event); 214 bfa_fsm_state_decl(bfa_iocfc, init_cfg_wait, 215 struct bfa_iocfc_s, enum iocfc_event); 216 bfa_fsm_state_decl(bfa_iocfc, init_cfg_done, 217 struct bfa_iocfc_s, enum iocfc_event); 218 bfa_fsm_state_decl(bfa_iocfc, operational, 219 struct bfa_iocfc_s, enum iocfc_event); 220 bfa_fsm_state_decl(bfa_iocfc, dconf_write, 221 struct bfa_iocfc_s, enum iocfc_event); 222 bfa_fsm_state_decl(bfa_iocfc, stopping, struct bfa_iocfc_s, enum iocfc_event); 223 bfa_fsm_state_decl(bfa_iocfc, enabling, struct bfa_iocfc_s, enum iocfc_event); 224 bfa_fsm_state_decl(bfa_iocfc, cfg_wait, struct bfa_iocfc_s, enum iocfc_event); 225 bfa_fsm_state_decl(bfa_iocfc, disabling, struct bfa_iocfc_s, enum iocfc_event); 226 bfa_fsm_state_decl(bfa_iocfc, disabled, struct bfa_iocfc_s, enum iocfc_event); 227 bfa_fsm_state_decl(bfa_iocfc, failed, struct bfa_iocfc_s, enum iocfc_event); 228 bfa_fsm_state_decl(bfa_iocfc, init_failed, 229 struct bfa_iocfc_s, enum iocfc_event); 230 231 /* 232 * forward declaration for IOC FC functions 233 */ 234 static void bfa_iocfc_start_submod(struct bfa_s *bfa); 235 static void bfa_iocfc_disable_submod(struct bfa_s *bfa); 236 static void bfa_iocfc_send_cfg(void *bfa_arg); 237 static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); 238 static void bfa_iocfc_disable_cbfn(void *bfa_arg); 239 static void bfa_iocfc_hbfail_cbfn(void *bfa_arg); 240 static void bfa_iocfc_reset_cbfn(void *bfa_arg); 241 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; 242 static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete); 243 static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl); 244 static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl); 245 static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl); 246 247 static void 248 bfa_iocfc_sm_stopped_entry(struct bfa_iocfc_s *iocfc) 249 { 250 } 251 252 static void 253 bfa_iocfc_sm_stopped(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 254 { 255 bfa_trc(iocfc->bfa, event); 256 257 switch (event) { 258 case IOCFC_E_INIT: 259 case IOCFC_E_ENABLE: 260 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_initing); 261 break; 262 default: 263 bfa_sm_fault(iocfc->bfa, event); 264 break; 265 } 266 } 267 268 static void 269 bfa_iocfc_sm_initing_entry(struct bfa_iocfc_s *iocfc) 270 { 271 bfa_ioc_enable(&iocfc->bfa->ioc); 272 } 273 274 static void 275 bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 276 { 277 bfa_trc(iocfc->bfa, event); 278 279 switch (event) { 280 case IOCFC_E_IOC_ENABLED: 281 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read); 282 break; 283 284 case IOCFC_E_DISABLE: 285 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); 286 break; 287 288 case IOCFC_E_STOP: 289 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); 290 break; 291 292 case IOCFC_E_IOC_FAILED: 293 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); 294 break; 295 default: 296 bfa_sm_fault(iocfc->bfa, event); 297 break; 298 } 299 } 300 301 static void 302 bfa_iocfc_sm_dconf_read_entry(struct bfa_iocfc_s *iocfc) 303 { 304 bfa_dconf_modinit(iocfc->bfa); 305 } 306 307 static void 308 bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 309 { 310 bfa_trc(iocfc->bfa, event); 311 312 switch (event) { 313 case IOCFC_E_DCONF_DONE: 314 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait); 315 break; 316 317 case IOCFC_E_DISABLE: 318 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); 319 break; 320 321 case IOCFC_E_STOP: 322 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); 323 break; 324 325 case IOCFC_E_IOC_FAILED: 326 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); 327 break; 328 default: 329 bfa_sm_fault(iocfc->bfa, event); 330 break; 331 } 332 } 333 334 static void 335 bfa_iocfc_sm_init_cfg_wait_entry(struct bfa_iocfc_s *iocfc) 336 { 337 bfa_iocfc_send_cfg(iocfc->bfa); 338 } 339 340 static void 341 bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 342 { 343 bfa_trc(iocfc->bfa, event); 344 345 switch (event) { 346 case IOCFC_E_CFG_DONE: 347 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done); 348 break; 349 350 case IOCFC_E_DISABLE: 351 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); 352 break; 353 354 case IOCFC_E_STOP: 355 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); 356 break; 357 358 case IOCFC_E_IOC_FAILED: 359 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); 360 break; 361 default: 362 bfa_sm_fault(iocfc->bfa, event); 363 break; 364 } 365 } 366 367 static void 368 bfa_iocfc_sm_init_cfg_done_entry(struct bfa_iocfc_s *iocfc) 369 { 370 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; 371 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe, 372 bfa_iocfc_init_cb, iocfc->bfa); 373 } 374 375 static void 376 bfa_iocfc_sm_init_cfg_done(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 377 { 378 bfa_trc(iocfc->bfa, event); 379 380 switch (event) { 381 case IOCFC_E_START: 382 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational); 383 break; 384 case IOCFC_E_STOP: 385 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); 386 break; 387 case IOCFC_E_DISABLE: 388 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); 389 break; 390 case IOCFC_E_IOC_FAILED: 391 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); 392 break; 393 default: 394 bfa_sm_fault(iocfc->bfa, event); 395 break; 396 } 397 } 398 399 static void 400 bfa_iocfc_sm_operational_entry(struct bfa_iocfc_s *iocfc) 401 { 402 bfa_fcport_init(iocfc->bfa); 403 bfa_iocfc_start_submod(iocfc->bfa); 404 } 405 406 static void 407 bfa_iocfc_sm_operational(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 408 { 409 bfa_trc(iocfc->bfa, event); 410 411 switch (event) { 412 case IOCFC_E_STOP: 413 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); 414 break; 415 case IOCFC_E_DISABLE: 416 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); 417 break; 418 case IOCFC_E_IOC_FAILED: 419 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); 420 break; 421 default: 422 bfa_sm_fault(iocfc->bfa, event); 423 break; 424 } 425 } 426 427 static void 428 bfa_iocfc_sm_dconf_write_entry(struct bfa_iocfc_s *iocfc) 429 { 430 bfa_dconf_modexit(iocfc->bfa); 431 } 432 433 static void 434 bfa_iocfc_sm_dconf_write(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 435 { 436 bfa_trc(iocfc->bfa, event); 437 438 switch (event) { 439 case IOCFC_E_DCONF_DONE: 440 case IOCFC_E_IOC_FAILED: 441 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); 442 break; 443 default: 444 bfa_sm_fault(iocfc->bfa, event); 445 break; 446 } 447 } 448 449 static void 450 bfa_iocfc_sm_stopping_entry(struct bfa_iocfc_s *iocfc) 451 { 452 bfa_ioc_disable(&iocfc->bfa->ioc); 453 } 454 455 static void 456 bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 457 { 458 bfa_trc(iocfc->bfa, event); 459 460 switch (event) { 461 case IOCFC_E_IOC_DISABLED: 462 bfa_isr_disable(iocfc->bfa); 463 bfa_iocfc_disable_submod(iocfc->bfa); 464 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped); 465 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; 466 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe, 467 bfa_iocfc_stop_cb, iocfc->bfa); 468 break; 469 470 case IOCFC_E_IOC_ENABLED: 471 case IOCFC_E_DCONF_DONE: 472 case IOCFC_E_CFG_DONE: 473 break; 474 475 default: 476 bfa_sm_fault(iocfc->bfa, event); 477 break; 478 } 479 } 480 481 static void 482 bfa_iocfc_sm_enabling_entry(struct bfa_iocfc_s *iocfc) 483 { 484 bfa_ioc_enable(&iocfc->bfa->ioc); 485 } 486 487 static void 488 bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 489 { 490 bfa_trc(iocfc->bfa, event); 491 492 switch (event) { 493 case IOCFC_E_IOC_ENABLED: 494 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait); 495 break; 496 497 case IOCFC_E_DISABLE: 498 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); 499 break; 500 501 case IOCFC_E_STOP: 502 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); 503 break; 504 505 case IOCFC_E_IOC_FAILED: 506 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); 507 508 if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) 509 break; 510 511 iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED; 512 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe, 513 bfa_iocfc_enable_cb, iocfc->bfa); 514 iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; 515 break; 516 default: 517 bfa_sm_fault(iocfc->bfa, event); 518 break; 519 } 520 } 521 522 static void 523 bfa_iocfc_sm_cfg_wait_entry(struct bfa_iocfc_s *iocfc) 524 { 525 bfa_iocfc_send_cfg(iocfc->bfa); 526 } 527 528 static void 529 bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 530 { 531 bfa_trc(iocfc->bfa, event); 532 533 switch (event) { 534 case IOCFC_E_CFG_DONE: 535 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational); 536 if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) 537 break; 538 539 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; 540 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe, 541 bfa_iocfc_enable_cb, iocfc->bfa); 542 iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; 543 break; 544 case IOCFC_E_DISABLE: 545 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); 546 break; 547 548 case IOCFC_E_STOP: 549 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); 550 break; 551 case IOCFC_E_IOC_FAILED: 552 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); 553 if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) 554 break; 555 556 iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED; 557 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe, 558 bfa_iocfc_enable_cb, iocfc->bfa); 559 iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; 560 break; 561 default: 562 bfa_sm_fault(iocfc->bfa, event); 563 break; 564 } 565 } 566 567 static void 568 bfa_iocfc_sm_disabling_entry(struct bfa_iocfc_s *iocfc) 569 { 570 bfa_ioc_disable(&iocfc->bfa->ioc); 571 } 572 573 static void 574 bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 575 { 576 bfa_trc(iocfc->bfa, event); 577 578 switch (event) { 579 case IOCFC_E_IOC_DISABLED: 580 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled); 581 break; 582 case IOCFC_E_IOC_ENABLED: 583 case IOCFC_E_DCONF_DONE: 584 case IOCFC_E_CFG_DONE: 585 break; 586 default: 587 bfa_sm_fault(iocfc->bfa, event); 588 break; 589 } 590 } 591 592 static void 593 bfa_iocfc_sm_disabled_entry(struct bfa_iocfc_s *iocfc) 594 { 595 bfa_isr_disable(iocfc->bfa); 596 bfa_iocfc_disable_submod(iocfc->bfa); 597 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; 598 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe, 599 bfa_iocfc_disable_cb, iocfc->bfa); 600 } 601 602 static void 603 bfa_iocfc_sm_disabled(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 604 { 605 bfa_trc(iocfc->bfa, event); 606 607 switch (event) { 608 case IOCFC_E_STOP: 609 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); 610 break; 611 case IOCFC_E_ENABLE: 612 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_enabling); 613 break; 614 default: 615 bfa_sm_fault(iocfc->bfa, event); 616 break; 617 } 618 } 619 620 static void 621 bfa_iocfc_sm_failed_entry(struct bfa_iocfc_s *iocfc) 622 { 623 bfa_isr_disable(iocfc->bfa); 624 bfa_iocfc_disable_submod(iocfc->bfa); 625 } 626 627 static void 628 bfa_iocfc_sm_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 629 { 630 bfa_trc(iocfc->bfa, event); 631 632 switch (event) { 633 case IOCFC_E_STOP: 634 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); 635 break; 636 case IOCFC_E_DISABLE: 637 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); 638 break; 639 case IOCFC_E_IOC_ENABLED: 640 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait); 641 break; 642 case IOCFC_E_IOC_FAILED: 643 break; 644 default: 645 bfa_sm_fault(iocfc->bfa, event); 646 break; 647 } 648 } 649 650 static void 651 bfa_iocfc_sm_init_failed_entry(struct bfa_iocfc_s *iocfc) 652 { 653 bfa_isr_disable(iocfc->bfa); 654 iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED; 655 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe, 656 bfa_iocfc_init_cb, iocfc->bfa); 657 } 658 659 static void 660 bfa_iocfc_sm_init_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 661 { 662 bfa_trc(iocfc->bfa, event); 663 664 switch (event) { 665 case IOCFC_E_STOP: 666 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); 667 break; 668 case IOCFC_E_DISABLE: 669 bfa_ioc_disable(&iocfc->bfa->ioc); 670 break; 671 case IOCFC_E_IOC_ENABLED: 672 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read); 673 break; 674 case IOCFC_E_IOC_DISABLED: 675 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped); 676 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; 677 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe, 678 bfa_iocfc_disable_cb, iocfc->bfa); 679 break; 680 case IOCFC_E_IOC_FAILED: 681 break; 682 default: 683 bfa_sm_fault(iocfc->bfa, event); 684 break; 685 } 686 } 687 688 /* 689 * BFA Interrupt handling functions 690 */ 691 static void 692 bfa_reqq_resume(struct bfa_s *bfa, int qid) 693 { 694 struct list_head *waitq, *qe, *qen; 695 struct bfa_reqq_wait_s *wqe; 696 697 waitq = bfa_reqq(bfa, qid); 698 list_for_each_safe(qe, qen, waitq) { 699 /* 700 * Callback only as long as there is room in request queue 701 */ 702 if (bfa_reqq_full(bfa, qid)) 703 break; 704 705 list_del(qe); 706 wqe = (struct bfa_reqq_wait_s *) qe; 707 wqe->qresume(wqe->cbarg); 708 } 709 } 710 711 static bfa_boolean_t 712 bfa_isr_rspq(struct bfa_s *bfa, int qid) 713 { 714 struct bfi_msg_s *m; 715 u32 pi, ci; 716 struct list_head *waitq; 717 bfa_boolean_t ret; 718 719 ci = bfa_rspq_ci(bfa, qid); 720 pi = bfa_rspq_pi(bfa, qid); 721 722 ret = (ci != pi); 723 724 while (ci != pi) { 725 m = bfa_rspq_elem(bfa, qid, ci); 726 WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX); 727 728 bfa_isrs[m->mhdr.msg_class] (bfa, m); 729 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems); 730 } 731 732 /* 733 * acknowledge RME completions and update CI 734 */ 735 bfa_isr_rspq_ack(bfa, qid, ci); 736 737 /* 738 * Resume any pending requests in the corresponding reqq. 739 */ 740 waitq = bfa_reqq(bfa, qid); 741 if (!list_empty(waitq)) 742 bfa_reqq_resume(bfa, qid); 743 744 return ret; 745 } 746 747 static inline void 748 bfa_isr_reqq(struct bfa_s *bfa, int qid) 749 { 750 struct list_head *waitq; 751 752 bfa_isr_reqq_ack(bfa, qid); 753 754 /* 755 * Resume any pending requests in the corresponding reqq. 756 */ 757 waitq = bfa_reqq(bfa, qid); 758 if (!list_empty(waitq)) 759 bfa_reqq_resume(bfa, qid); 760 } 761 762 void 763 bfa_msix_all(struct bfa_s *bfa, int vec) 764 { 765 u32 intr, qintr; 766 int queue; 767 768 intr = readl(bfa->iocfc.bfa_regs.intr_status); 769 if (!intr) 770 return; 771 772 /* 773 * RME completion queue interrupt 774 */ 775 qintr = intr & __HFN_INT_RME_MASK; 776 if (qintr && bfa->queue_process) { 777 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) 778 bfa_isr_rspq(bfa, queue); 779 } 780 781 intr &= ~qintr; 782 if (!intr) 783 return; 784 785 /* 786 * CPE completion queue interrupt 787 */ 788 qintr = intr & __HFN_INT_CPE_MASK; 789 if (qintr && bfa->queue_process) { 790 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) 791 bfa_isr_reqq(bfa, queue); 792 } 793 intr &= ~qintr; 794 if (!intr) 795 return; 796 797 bfa_msix_lpu_err(bfa, intr); 798 } 799 800 bfa_boolean_t 801 bfa_intx(struct bfa_s *bfa) 802 { 803 u32 intr, qintr; 804 int queue; 805 bfa_boolean_t rspq_comp = BFA_FALSE; 806 807 intr = readl(bfa->iocfc.bfa_regs.intr_status); 808 809 qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK); 810 if (qintr) 811 writel(qintr, bfa->iocfc.bfa_regs.intr_status); 812 813 /* 814 * Unconditional RME completion queue interrupt 815 */ 816 if (bfa->queue_process) { 817 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) 818 if (bfa_isr_rspq(bfa, queue)) 819 rspq_comp = BFA_TRUE; 820 } 821 822 if (!intr) 823 return (qintr | rspq_comp) ? BFA_TRUE : BFA_FALSE; 824 825 /* 826 * CPE completion queue interrupt 827 */ 828 qintr = intr & __HFN_INT_CPE_MASK; 829 if (qintr && bfa->queue_process) { 830 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) 831 bfa_isr_reqq(bfa, queue); 832 } 833 intr &= ~qintr; 834 if (!intr) 835 return BFA_TRUE; 836 837 if (bfa->intr_enabled) 838 bfa_msix_lpu_err(bfa, intr); 839 840 return BFA_TRUE; 841 } 842 843 void 844 bfa_isr_enable(struct bfa_s *bfa) 845 { 846 u32 umsk; 847 int port_id = bfa_ioc_portid(&bfa->ioc); 848 849 bfa_trc(bfa, bfa_ioc_pcifn(&bfa->ioc)); 850 bfa_trc(bfa, port_id); 851 852 bfa_msix_ctrl_install(bfa); 853 854 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { 855 umsk = __HFN_INT_ERR_MASK_CT2; 856 umsk |= port_id == 0 ? 857 __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2; 858 } else { 859 umsk = __HFN_INT_ERR_MASK; 860 umsk |= port_id == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK; 861 } 862 863 writel(umsk, bfa->iocfc.bfa_regs.intr_status); 864 writel(~umsk, bfa->iocfc.bfa_regs.intr_mask); 865 bfa->iocfc.intr_mask = ~umsk; 866 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0); 867 868 /* 869 * Set the flag indicating successful enabling of interrupts 870 */ 871 bfa->intr_enabled = BFA_TRUE; 872 } 873 874 void 875 bfa_isr_disable(struct bfa_s *bfa) 876 { 877 bfa->intr_enabled = BFA_FALSE; 878 bfa_isr_mode_set(bfa, BFA_FALSE); 879 writel(-1L, bfa->iocfc.bfa_regs.intr_mask); 880 bfa_msix_uninstall(bfa); 881 } 882 883 void 884 bfa_msix_reqq(struct bfa_s *bfa, int vec) 885 { 886 bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0); 887 } 888 889 void 890 bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m) 891 { 892 bfa_trc(bfa, m->mhdr.msg_class); 893 bfa_trc(bfa, m->mhdr.msg_id); 894 bfa_trc(bfa, m->mhdr.mtag.i2htok); 895 WARN_ON(1); 896 bfa_trc_stop(bfa->trcmod); 897 } 898 899 void 900 bfa_msix_rspq(struct bfa_s *bfa, int vec) 901 { 902 bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0); 903 } 904 905 void 906 bfa_msix_lpu_err(struct bfa_s *bfa, int vec) 907 { 908 u32 intr, curr_value; 909 bfa_boolean_t lpu_isr, halt_isr, pss_isr; 910 911 intr = readl(bfa->iocfc.bfa_regs.intr_status); 912 913 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { 914 halt_isr = intr & __HFN_INT_CPQ_HALT_CT2; 915 pss_isr = intr & __HFN_INT_ERR_PSS_CT2; 916 lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 | 917 __HFN_INT_MBOX_LPU1_CT2); 918 intr &= __HFN_INT_ERR_MASK_CT2; 919 } else { 920 halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ? 921 (intr & __HFN_INT_LL_HALT) : 0; 922 pss_isr = intr & __HFN_INT_ERR_PSS; 923 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1); 924 intr &= __HFN_INT_ERR_MASK; 925 } 926 927 if (lpu_isr) 928 bfa_ioc_mbox_isr(&bfa->ioc); 929 930 if (intr) { 931 if (halt_isr) { 932 /* 933 * If LL_HALT bit is set then FW Init Halt LL Port 934 * Register needs to be cleared as well so Interrupt 935 * Status Register will be cleared. 936 */ 937 curr_value = readl(bfa->ioc.ioc_regs.ll_halt); 938 curr_value &= ~__FW_INIT_HALT_P; 939 writel(curr_value, bfa->ioc.ioc_regs.ll_halt); 940 } 941 942 if (pss_isr) { 943 /* 944 * ERR_PSS bit needs to be cleared as well in case 945 * interrups are shared so driver's interrupt handler is 946 * still called even though it is already masked out. 947 */ 948 curr_value = readl( 949 bfa->ioc.ioc_regs.pss_err_status_reg); 950 writel(curr_value, 951 bfa->ioc.ioc_regs.pss_err_status_reg); 952 } 953 954 writel(intr, bfa->iocfc.bfa_regs.intr_status); 955 bfa_ioc_error_isr(&bfa->ioc); 956 } 957 } 958 959 /* 960 * BFA IOC FC related functions 961 */ 962 963 /* 964 * BFA IOC private functions 965 */ 966 967 /* 968 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ 969 */ 970 static void 971 bfa_iocfc_send_cfg(void *bfa_arg) 972 { 973 struct bfa_s *bfa = bfa_arg; 974 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 975 struct bfi_iocfc_cfg_req_s cfg_req; 976 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; 977 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg; 978 int i; 979 980 WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS); 981 bfa_trc(bfa, cfg->fwcfg.num_cqs); 982 983 bfa_iocfc_reset_queues(bfa); 984 985 /* 986 * initialize IOC configuration info 987 */ 988 cfg_info->single_msix_vec = 0; 989 if (bfa->msix.nvecs == 1) 990 cfg_info->single_msix_vec = 1; 991 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; 992 cfg_info->num_cqs = cfg->fwcfg.num_cqs; 993 cfg_info->num_ioim_reqs = cpu_to_be16(bfa_fcpim_get_throttle_cfg(bfa, 994 cfg->fwcfg.num_ioim_reqs)); 995 cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs); 996 997 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); 998 /* 999 * dma map REQ and RSP circular queues and shadow pointers 1000 */ 1001 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 1002 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i], 1003 iocfc->req_cq_ba[i].pa); 1004 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i], 1005 iocfc->req_cq_shadow_ci[i].pa); 1006 cfg_info->req_cq_elems[i] = 1007 cpu_to_be16(cfg->drvcfg.num_reqq_elems); 1008 1009 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i], 1010 iocfc->rsp_cq_ba[i].pa); 1011 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i], 1012 iocfc->rsp_cq_shadow_pi[i].pa); 1013 cfg_info->rsp_cq_elems[i] = 1014 cpu_to_be16(cfg->drvcfg.num_rspq_elems); 1015 } 1016 1017 /* 1018 * Enable interrupt coalescing if it is driver init path 1019 * and not ioc disable/enable path. 1020 */ 1021 if (bfa_fsm_cmp_state(iocfc, bfa_iocfc_sm_init_cfg_wait)) 1022 cfg_info->intr_attr.coalesce = BFA_TRUE; 1023 1024 /* 1025 * dma map IOC configuration itself 1026 */ 1027 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, 1028 bfa_fn_lpu(bfa)); 1029 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa); 1030 1031 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req, 1032 sizeof(struct bfi_iocfc_cfg_req_s)); 1033 } 1034 1035 static void 1036 bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 1037 struct bfa_pcidev_s *pcidev) 1038 { 1039 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1040 1041 bfa->bfad = bfad; 1042 iocfc->bfa = bfa; 1043 iocfc->cfg = *cfg; 1044 1045 /* 1046 * Initialize chip specific handlers. 1047 */ 1048 if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) { 1049 iocfc->hwif.hw_reginit = bfa_hwct_reginit; 1050 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack; 1051 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; 1052 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; 1053 iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install; 1054 iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install; 1055 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall; 1056 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set; 1057 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; 1058 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range; 1059 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT; 1060 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT; 1061 } else { 1062 iocfc->hwif.hw_reginit = bfa_hwcb_reginit; 1063 iocfc->hwif.hw_reqq_ack = NULL; 1064 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; 1065 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; 1066 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install; 1067 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install; 1068 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall; 1069 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set; 1070 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs; 1071 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range; 1072 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB + 1073 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS; 1074 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB + 1075 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS; 1076 } 1077 1078 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) { 1079 iocfc->hwif.hw_reginit = bfa_hwct2_reginit; 1080 iocfc->hwif.hw_isr_mode_set = NULL; 1081 iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack; 1082 } 1083 1084 iocfc->hwif.hw_reginit(bfa); 1085 bfa->msix.nvecs = 0; 1086 } 1087 1088 static void 1089 bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg) 1090 { 1091 u8 *dm_kva = NULL; 1092 u64 dm_pa = 0; 1093 int i, per_reqq_sz, per_rspq_sz; 1094 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1095 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa); 1096 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa); 1097 struct bfa_mem_dma_s *reqq_dma, *rspq_dma; 1098 1099 /* First allocate dma memory for IOC */ 1100 bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma), 1101 bfa_mem_dma_phys(ioc_dma)); 1102 1103 /* Claim DMA-able memory for the request/response queues */ 1104 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), 1105 BFA_DMA_ALIGN_SZ); 1106 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), 1107 BFA_DMA_ALIGN_SZ); 1108 1109 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 1110 reqq_dma = BFA_MEM_REQQ_DMA(bfa, i); 1111 iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma); 1112 iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma); 1113 memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz); 1114 1115 rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i); 1116 iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma); 1117 iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma); 1118 memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz); 1119 } 1120 1121 /* Claim IOCFC dma memory - for shadow CI/PI */ 1122 dm_kva = bfa_mem_dma_virt(iocfc_dma); 1123 dm_pa = bfa_mem_dma_phys(iocfc_dma); 1124 1125 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 1126 iocfc->req_cq_shadow_ci[i].kva = dm_kva; 1127 iocfc->req_cq_shadow_ci[i].pa = dm_pa; 1128 dm_kva += BFA_CACHELINE_SZ; 1129 dm_pa += BFA_CACHELINE_SZ; 1130 1131 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva; 1132 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa; 1133 dm_kva += BFA_CACHELINE_SZ; 1134 dm_pa += BFA_CACHELINE_SZ; 1135 } 1136 1137 /* Claim IOCFC dma memory - for the config info page */ 1138 bfa->iocfc.cfg_info.kva = dm_kva; 1139 bfa->iocfc.cfg_info.pa = dm_pa; 1140 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva; 1141 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); 1142 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); 1143 1144 /* Claim IOCFC dma memory - for the config response */ 1145 bfa->iocfc.cfgrsp_dma.kva = dm_kva; 1146 bfa->iocfc.cfgrsp_dma.pa = dm_pa; 1147 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva; 1148 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), 1149 BFA_CACHELINE_SZ); 1150 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), 1151 BFA_CACHELINE_SZ); 1152 1153 /* Claim IOCFC kva memory */ 1154 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc)); 1155 bfa_mem_kva_curp(iocfc) += BFA_DBG_FWTRC_LEN; 1156 } 1157 1158 /* 1159 * Start BFA submodules. 1160 */ 1161 static void 1162 bfa_iocfc_start_submod(struct bfa_s *bfa) 1163 { 1164 int i; 1165 1166 bfa->queue_process = BFA_TRUE; 1167 for (i = 0; i < BFI_IOC_MAX_CQS; i++) 1168 bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i)); 1169 1170 bfa_fcport_start(bfa); 1171 bfa_uf_start(bfa); 1172 /* 1173 * bfa_init() with flash read is complete. now invalidate the stale 1174 * content of lun mask like unit attention, rp tag and lp tag. 1175 */ 1176 bfa_ioim_lm_init(BFA_FCP_MOD(bfa)->bfa); 1177 1178 bfa->iocfc.submod_enabled = BFA_TRUE; 1179 } 1180 1181 /* 1182 * Disable BFA submodules. 1183 */ 1184 static void 1185 bfa_iocfc_disable_submod(struct bfa_s *bfa) 1186 { 1187 if (bfa->iocfc.submod_enabled == BFA_FALSE) 1188 return; 1189 1190 bfa_fcdiag_iocdisable(bfa); 1191 bfa_fcport_iocdisable(bfa); 1192 bfa_fcxp_iocdisable(bfa); 1193 bfa_lps_iocdisable(bfa); 1194 bfa_rport_iocdisable(bfa); 1195 bfa_fcp_iocdisable(bfa); 1196 bfa_dconf_iocdisable(bfa); 1197 1198 bfa->iocfc.submod_enabled = BFA_FALSE; 1199 } 1200 1201 static void 1202 bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete) 1203 { 1204 struct bfa_s *bfa = bfa_arg; 1205 1206 if (complete) 1207 bfa_cb_init(bfa->bfad, bfa->iocfc.op_status); 1208 } 1209 1210 static void 1211 bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl) 1212 { 1213 struct bfa_s *bfa = bfa_arg; 1214 struct bfad_s *bfad = bfa->bfad; 1215 1216 if (compl) 1217 complete(&bfad->comp); 1218 } 1219 1220 static void 1221 bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl) 1222 { 1223 struct bfa_s *bfa = bfa_arg; 1224 struct bfad_s *bfad = bfa->bfad; 1225 1226 if (compl) 1227 complete(&bfad->enable_comp); 1228 } 1229 1230 static void 1231 bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl) 1232 { 1233 struct bfa_s *bfa = bfa_arg; 1234 struct bfad_s *bfad = bfa->bfad; 1235 1236 if (compl) 1237 complete(&bfad->disable_comp); 1238 } 1239 1240 /* 1241 * configure queue registers from firmware response 1242 */ 1243 static void 1244 bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg) 1245 { 1246 int i; 1247 struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs; 1248 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); 1249 1250 for (i = 0; i < BFI_IOC_MAX_CQS; i++) { 1251 bfa->iocfc.hw_qid[i] = qreg->hw_qid[i]; 1252 r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]); 1253 r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]); 1254 r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]); 1255 r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]); 1256 r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]); 1257 r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]); 1258 } 1259 } 1260 1261 static void 1262 bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg) 1263 { 1264 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1265 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; 1266 1267 bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs); 1268 bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs); 1269 bfa_rport_res_recfg(bfa, fwcfg->num_rports); 1270 bfa_fcp_res_recfg(bfa, cpu_to_be16(cfg_info->num_ioim_reqs), 1271 fwcfg->num_ioim_reqs); 1272 bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs); 1273 } 1274 1275 /* 1276 * Update BFA configuration from firmware configuration. 1277 */ 1278 static void 1279 bfa_iocfc_cfgrsp(struct bfa_s *bfa) 1280 { 1281 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1282 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 1283 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg; 1284 1285 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs); 1286 fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs); 1287 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs); 1288 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs); 1289 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs); 1290 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports); 1291 1292 /* 1293 * configure queue register offsets as learnt from firmware 1294 */ 1295 bfa_iocfc_qreg(bfa, &cfgrsp->qreg); 1296 1297 /* 1298 * Re-configure resources as learnt from Firmware 1299 */ 1300 bfa_iocfc_res_recfg(bfa, fwcfg); 1301 1302 /* 1303 * Install MSIX queue handlers 1304 */ 1305 bfa_msix_queue_install(bfa); 1306 1307 if (bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn != 0) { 1308 bfa->ioc.attr->pwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn; 1309 bfa->ioc.attr->nwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_nwwn; 1310 bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE); 1311 } 1312 } 1313 1314 void 1315 bfa_iocfc_reset_queues(struct bfa_s *bfa) 1316 { 1317 int q; 1318 1319 for (q = 0; q < BFI_IOC_MAX_CQS; q++) { 1320 bfa_reqq_ci(bfa, q) = 0; 1321 bfa_reqq_pi(bfa, q) = 0; 1322 bfa_rspq_ci(bfa, q) = 0; 1323 bfa_rspq_pi(bfa, q) = 0; 1324 } 1325 } 1326 1327 /* 1328 * Process FAA pwwn msg from fw. 1329 */ 1330 static void 1331 bfa_iocfc_process_faa_addr(struct bfa_s *bfa, struct bfi_faa_addr_msg_s *msg) 1332 { 1333 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1334 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 1335 1336 cfgrsp->pbc_cfg.pbc_pwwn = msg->pwwn; 1337 cfgrsp->pbc_cfg.pbc_nwwn = msg->nwwn; 1338 1339 bfa->ioc.attr->pwwn = msg->pwwn; 1340 bfa->ioc.attr->nwwn = msg->nwwn; 1341 bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE); 1342 } 1343 1344 /* Fabric Assigned Address specific functions */ 1345 1346 /* 1347 * Check whether IOC is ready before sending command down 1348 */ 1349 static bfa_status_t 1350 bfa_faa_validate_request(struct bfa_s *bfa) 1351 { 1352 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa); 1353 u32 card_type = bfa->ioc.attr->card_type; 1354 1355 if (bfa_ioc_is_operational(&bfa->ioc)) { 1356 if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type)) 1357 return BFA_STATUS_FEATURE_NOT_SUPPORTED; 1358 } else { 1359 return BFA_STATUS_IOC_NON_OP; 1360 } 1361 1362 return BFA_STATUS_OK; 1363 } 1364 1365 bfa_status_t 1366 bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, 1367 bfa_cb_iocfc_t cbfn, void *cbarg) 1368 { 1369 struct bfi_faa_query_s faa_attr_req; 1370 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1371 bfa_status_t status; 1372 1373 status = bfa_faa_validate_request(bfa); 1374 if (status != BFA_STATUS_OK) 1375 return status; 1376 1377 if (iocfc->faa_args.busy == BFA_TRUE) 1378 return BFA_STATUS_DEVBUSY; 1379 1380 iocfc->faa_args.faa_attr = attr; 1381 iocfc->faa_args.faa_cb.faa_cbfn = cbfn; 1382 iocfc->faa_args.faa_cb.faa_cbarg = cbarg; 1383 1384 iocfc->faa_args.busy = BFA_TRUE; 1385 memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s)); 1386 bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC, 1387 BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa)); 1388 1389 bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req, 1390 sizeof(struct bfi_faa_query_s)); 1391 1392 return BFA_STATUS_OK; 1393 } 1394 1395 /* 1396 * FAA query response 1397 */ 1398 static void 1399 bfa_faa_query_reply(struct bfa_iocfc_s *iocfc, 1400 bfi_faa_query_rsp_t *rsp) 1401 { 1402 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg; 1403 1404 if (iocfc->faa_args.faa_attr) { 1405 iocfc->faa_args.faa_attr->faa = rsp->faa; 1406 iocfc->faa_args.faa_attr->faa_state = rsp->faa_status; 1407 iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source; 1408 } 1409 1410 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn); 1411 1412 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK); 1413 iocfc->faa_args.busy = BFA_FALSE; 1414 } 1415 1416 /* 1417 * IOC enable request is complete 1418 */ 1419 static void 1420 bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status) 1421 { 1422 struct bfa_s *bfa = bfa_arg; 1423 1424 if (status == BFA_STATUS_OK) 1425 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_ENABLED); 1426 else 1427 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED); 1428 } 1429 1430 /* 1431 * IOC disable request is complete 1432 */ 1433 static void 1434 bfa_iocfc_disable_cbfn(void *bfa_arg) 1435 { 1436 struct bfa_s *bfa = bfa_arg; 1437 1438 bfa->queue_process = BFA_FALSE; 1439 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED); 1440 } 1441 1442 /* 1443 * Notify sub-modules of hardware failure. 1444 */ 1445 static void 1446 bfa_iocfc_hbfail_cbfn(void *bfa_arg) 1447 { 1448 struct bfa_s *bfa = bfa_arg; 1449 1450 bfa->queue_process = BFA_FALSE; 1451 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED); 1452 } 1453 1454 /* 1455 * Actions on chip-reset completion. 1456 */ 1457 static void 1458 bfa_iocfc_reset_cbfn(void *bfa_arg) 1459 { 1460 struct bfa_s *bfa = bfa_arg; 1461 1462 bfa_iocfc_reset_queues(bfa); 1463 bfa_isr_enable(bfa); 1464 } 1465 1466 /* 1467 * Query IOC memory requirement information. 1468 */ 1469 void 1470 bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, 1471 struct bfa_s *bfa) 1472 { 1473 int q, per_reqq_sz, per_rspq_sz; 1474 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa); 1475 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa); 1476 struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa); 1477 u32 dm_len = 0; 1478 1479 /* dma memory setup for IOC */ 1480 bfa_mem_dma_setup(meminfo, ioc_dma, 1481 BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ)); 1482 1483 /* dma memory setup for REQ/RSP queues */ 1484 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), 1485 BFA_DMA_ALIGN_SZ); 1486 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), 1487 BFA_DMA_ALIGN_SZ); 1488 1489 for (q = 0; q < cfg->fwcfg.num_cqs; q++) { 1490 bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q), 1491 per_reqq_sz); 1492 bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q), 1493 per_rspq_sz); 1494 } 1495 1496 /* IOCFC dma memory - calculate Shadow CI/PI size */ 1497 for (q = 0; q < cfg->fwcfg.num_cqs; q++) 1498 dm_len += (2 * BFA_CACHELINE_SZ); 1499 1500 /* IOCFC dma memory - calculate config info / rsp size */ 1501 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); 1502 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), 1503 BFA_CACHELINE_SZ); 1504 1505 /* dma memory setup for IOCFC */ 1506 bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len); 1507 1508 /* kva memory setup for IOCFC */ 1509 bfa_mem_kva_setup(meminfo, iocfc_kva, BFA_DBG_FWTRC_LEN); 1510 } 1511 1512 /* 1513 * Query IOC memory requirement information. 1514 */ 1515 void 1516 bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 1517 struct bfa_pcidev_s *pcidev) 1518 { 1519 int i; 1520 struct bfa_ioc_s *ioc = &bfa->ioc; 1521 1522 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn; 1523 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn; 1524 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn; 1525 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn; 1526 1527 ioc->trcmod = bfa->trcmod; 1528 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod); 1529 1530 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC); 1531 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs); 1532 1533 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); 1534 bfa_iocfc_mem_claim(bfa, cfg); 1535 INIT_LIST_HEAD(&bfa->timer_mod.timer_q); 1536 1537 INIT_LIST_HEAD(&bfa->comp_q); 1538 for (i = 0; i < BFI_IOC_MAX_CQS; i++) 1539 INIT_LIST_HEAD(&bfa->reqq_waitq[i]); 1540 1541 bfa->iocfc.cb_reqd = BFA_FALSE; 1542 bfa->iocfc.op_status = BFA_STATUS_OK; 1543 bfa->iocfc.submod_enabled = BFA_FALSE; 1544 1545 bfa_fsm_set_state(&bfa->iocfc, bfa_iocfc_sm_stopped); 1546 } 1547 1548 /* 1549 * Query IOC memory requirement information. 1550 */ 1551 void 1552 bfa_iocfc_init(struct bfa_s *bfa) 1553 { 1554 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_INIT); 1555 } 1556 1557 /* 1558 * IOC start called from bfa_start(). Called to start IOC operations 1559 * at driver instantiation for this instance. 1560 */ 1561 void 1562 bfa_iocfc_start(struct bfa_s *bfa) 1563 { 1564 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_START); 1565 } 1566 1567 /* 1568 * IOC stop called from bfa_stop(). Called only when driver is unloaded 1569 * for this instance. 1570 */ 1571 void 1572 bfa_iocfc_stop(struct bfa_s *bfa) 1573 { 1574 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP); 1575 } 1576 1577 void 1578 bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m) 1579 { 1580 struct bfa_s *bfa = bfaarg; 1581 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1582 union bfi_iocfc_i2h_msg_u *msg; 1583 1584 msg = (union bfi_iocfc_i2h_msg_u *) m; 1585 bfa_trc(bfa, msg->mh.msg_id); 1586 1587 switch (msg->mh.msg_id) { 1588 case BFI_IOCFC_I2H_CFG_REPLY: 1589 bfa_iocfc_cfgrsp(bfa); 1590 break; 1591 case BFI_IOCFC_I2H_UPDATEQ_RSP: 1592 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); 1593 break; 1594 case BFI_IOCFC_I2H_ADDR_MSG: 1595 bfa_iocfc_process_faa_addr(bfa, 1596 (struct bfi_faa_addr_msg_s *)msg); 1597 break; 1598 case BFI_IOCFC_I2H_FAA_QUERY_RSP: 1599 bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg); 1600 break; 1601 default: 1602 WARN_ON(1); 1603 } 1604 } 1605 1606 void 1607 bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr) 1608 { 1609 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1610 1611 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce; 1612 1613 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ? 1614 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) : 1615 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay); 1616 1617 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ? 1618 be16_to_cpu(iocfc->cfginfo->intr_attr.latency) : 1619 be16_to_cpu(iocfc->cfgrsp->intr_attr.latency); 1620 1621 attr->config = iocfc->cfg; 1622 } 1623 1624 bfa_status_t 1625 bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr) 1626 { 1627 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1628 struct bfi_iocfc_set_intr_req_s *m; 1629 1630 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce; 1631 iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay); 1632 iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency); 1633 1634 if (!bfa_iocfc_is_operational(bfa)) 1635 return BFA_STATUS_OK; 1636 1637 m = bfa_reqq_next(bfa, BFA_REQQ_IOC); 1638 if (!m) 1639 return BFA_STATUS_DEVBUSY; 1640 1641 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ, 1642 bfa_fn_lpu(bfa)); 1643 m->coalesce = iocfc->cfginfo->intr_attr.coalesce; 1644 m->delay = iocfc->cfginfo->intr_attr.delay; 1645 m->latency = iocfc->cfginfo->intr_attr.latency; 1646 1647 bfa_trc(bfa, attr->delay); 1648 bfa_trc(bfa, attr->latency); 1649 1650 bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh); 1651 return BFA_STATUS_OK; 1652 } 1653 1654 void 1655 bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa) 1656 { 1657 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1658 1659 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); 1660 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa); 1661 } 1662 /* 1663 * Enable IOC after it is disabled. 1664 */ 1665 void 1666 bfa_iocfc_enable(struct bfa_s *bfa) 1667 { 1668 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, 1669 "IOC Enable"); 1670 bfa->iocfc.cb_reqd = BFA_TRUE; 1671 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_ENABLE); 1672 } 1673 1674 void 1675 bfa_iocfc_disable(struct bfa_s *bfa) 1676 { 1677 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, 1678 "IOC Disable"); 1679 1680 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE); 1681 } 1682 1683 bfa_boolean_t 1684 bfa_iocfc_is_operational(struct bfa_s *bfa) 1685 { 1686 return bfa_ioc_is_operational(&bfa->ioc) && 1687 bfa_fsm_cmp_state(&bfa->iocfc, bfa_iocfc_sm_operational); 1688 } 1689 1690 /* 1691 * Return boot target port wwns -- read from boot information in flash. 1692 */ 1693 void 1694 bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns) 1695 { 1696 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1697 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 1698 int i; 1699 1700 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) { 1701 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns); 1702 *nwwns = cfgrsp->pbc_cfg.nbluns; 1703 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++) 1704 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn; 1705 1706 return; 1707 } 1708 1709 *nwwns = cfgrsp->bootwwns.nwwns; 1710 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn)); 1711 } 1712 1713 int 1714 bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport) 1715 { 1716 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1717 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 1718 1719 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport)); 1720 return cfgrsp->pbc_cfg.nvports; 1721 } 1722 1723 1724 /* 1725 * Use this function query the memory requirement of the BFA library. 1726 * This function needs to be called before bfa_attach() to get the 1727 * memory required of the BFA layer for a given driver configuration. 1728 * 1729 * This call will fail, if the cap is out of range compared to pre-defined 1730 * values within the BFA library 1731 * 1732 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate 1733 * its configuration in this structure. 1734 * The default values for struct bfa_iocfc_cfg_s can be 1735 * fetched using bfa_cfg_get_default() API. 1736 * 1737 * If cap's boundary check fails, the library will use 1738 * the default bfa_cap_t values (and log a warning msg). 1739 * 1740 * @param[out] meminfo - pointer to bfa_meminfo_t. This content 1741 * indicates the memory type (see bfa_mem_type_t) and 1742 * amount of memory required. 1743 * 1744 * Driver should allocate the memory, populate the 1745 * starting address for each block and provide the same 1746 * structure as input parameter to bfa_attach() call. 1747 * 1748 * @param[in] bfa - pointer to the bfa structure, used while fetching the 1749 * dma, kva memory information of the bfa sub-modules. 1750 * 1751 * @return void 1752 * 1753 * Special Considerations: @note 1754 */ 1755 void 1756 bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, 1757 struct bfa_s *bfa) 1758 { 1759 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa); 1760 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa); 1761 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa); 1762 struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa); 1763 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa); 1764 struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa); 1765 struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa); 1766 struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa); 1767 1768 WARN_ON((cfg == NULL) || (meminfo == NULL)); 1769 1770 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s)); 1771 1772 /* Initialize the DMA & KVA meminfo queues */ 1773 INIT_LIST_HEAD(&meminfo->dma_info.qe); 1774 INIT_LIST_HEAD(&meminfo->kva_info.qe); 1775 1776 bfa_iocfc_meminfo(cfg, meminfo, bfa); 1777 bfa_sgpg_meminfo(cfg, meminfo, bfa); 1778 bfa_fcport_meminfo(cfg, meminfo, bfa); 1779 bfa_fcxp_meminfo(cfg, meminfo, bfa); 1780 bfa_lps_meminfo(cfg, meminfo, bfa); 1781 bfa_uf_meminfo(cfg, meminfo, bfa); 1782 bfa_rport_meminfo(cfg, meminfo, bfa); 1783 bfa_fcp_meminfo(cfg, meminfo, bfa); 1784 bfa_dconf_meminfo(cfg, meminfo, bfa); 1785 1786 /* dma info setup */ 1787 bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo()); 1788 bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo()); 1789 bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo()); 1790 bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo()); 1791 bfa_mem_dma_setup(meminfo, flash_dma, 1792 bfa_flash_meminfo(cfg->drvcfg.min_cfg)); 1793 bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo()); 1794 bfa_mem_dma_setup(meminfo, phy_dma, 1795 bfa_phy_meminfo(cfg->drvcfg.min_cfg)); 1796 bfa_mem_dma_setup(meminfo, fru_dma, 1797 bfa_fru_meminfo(cfg->drvcfg.min_cfg)); 1798 } 1799 1800 /* 1801 * Use this function to do attach the driver instance with the BFA 1802 * library. This function will not trigger any HW initialization 1803 * process (which will be done in bfa_init() call) 1804 * 1805 * This call will fail, if the cap is out of range compared to 1806 * pre-defined values within the BFA library 1807 * 1808 * @param[out] bfa Pointer to bfa_t. 1809 * @param[in] bfad Opaque handle back to the driver's IOC structure 1810 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure 1811 * that was used in bfa_cfg_get_meminfo(). 1812 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should 1813 * use the bfa_cfg_get_meminfo() call to 1814 * find the memory blocks required, allocate the 1815 * required memory and provide the starting addresses. 1816 * @param[in] pcidev pointer to struct bfa_pcidev_s 1817 * 1818 * @return 1819 * void 1820 * 1821 * Special Considerations: 1822 * 1823 * @note 1824 * 1825 */ 1826 void 1827 bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 1828 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 1829 { 1830 struct bfa_mem_dma_s *dma_info, *dma_elem; 1831 struct bfa_mem_kva_s *kva_info, *kva_elem; 1832 struct list_head *dm_qe, *km_qe; 1833 1834 bfa->fcs = BFA_FALSE; 1835 1836 WARN_ON((cfg == NULL) || (meminfo == NULL)); 1837 1838 /* Initialize memory pointers for iterative allocation */ 1839 dma_info = &meminfo->dma_info; 1840 dma_info->kva_curp = dma_info->kva; 1841 dma_info->dma_curp = dma_info->dma; 1842 1843 kva_info = &meminfo->kva_info; 1844 kva_info->kva_curp = kva_info->kva; 1845 1846 list_for_each(dm_qe, &dma_info->qe) { 1847 dma_elem = (struct bfa_mem_dma_s *) dm_qe; 1848 dma_elem->kva_curp = dma_elem->kva; 1849 dma_elem->dma_curp = dma_elem->dma; 1850 } 1851 1852 list_for_each(km_qe, &kva_info->qe) { 1853 kva_elem = (struct bfa_mem_kva_s *) km_qe; 1854 kva_elem->kva_curp = kva_elem->kva; 1855 } 1856 1857 bfa_iocfc_attach(bfa, bfad, cfg, pcidev); 1858 bfa_fcdiag_attach(bfa, bfad, cfg, pcidev); 1859 bfa_sgpg_attach(bfa, bfad, cfg, pcidev); 1860 bfa_fcport_attach(bfa, bfad, cfg, pcidev); 1861 bfa_fcxp_attach(bfa, bfad, cfg, pcidev); 1862 bfa_lps_attach(bfa, bfad, cfg, pcidev); 1863 bfa_uf_attach(bfa, bfad, cfg, pcidev); 1864 bfa_rport_attach(bfa, bfad, cfg, pcidev); 1865 bfa_fcp_attach(bfa, bfad, cfg, pcidev); 1866 bfa_dconf_attach(bfa, bfad, cfg); 1867 bfa_com_port_attach(bfa); 1868 bfa_com_ablk_attach(bfa); 1869 bfa_com_cee_attach(bfa); 1870 bfa_com_sfp_attach(bfa); 1871 bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg); 1872 bfa_com_diag_attach(bfa); 1873 bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg); 1874 bfa_com_fru_attach(bfa, cfg->drvcfg.min_cfg); 1875 } 1876 1877 /* 1878 * Use this function to delete a BFA IOC. IOC should be stopped (by 1879 * calling bfa_stop()) before this function call. 1880 * 1881 * @param[in] bfa - pointer to bfa_t. 1882 * 1883 * @return 1884 * void 1885 * 1886 * Special Considerations: 1887 * 1888 * @note 1889 */ 1890 void 1891 bfa_detach(struct bfa_s *bfa) 1892 { 1893 bfa_ioc_detach(&bfa->ioc); 1894 } 1895 1896 void 1897 bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q) 1898 { 1899 INIT_LIST_HEAD(comp_q); 1900 list_splice_tail_init(&bfa->comp_q, comp_q); 1901 } 1902 1903 void 1904 bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q) 1905 { 1906 struct list_head *qe; 1907 struct list_head *qen; 1908 struct bfa_cb_qe_s *hcb_qe; 1909 1910 list_for_each_safe(qe, qen, comp_q) { 1911 hcb_qe = (struct bfa_cb_qe_s *) qe; 1912 if (hcb_qe->pre_rmv) { 1913 /* qe is invalid after return, dequeue before cbfn() */ 1914 list_del(qe); 1915 hcb_qe->cbfn_status(hcb_qe->cbarg, hcb_qe->fw_status); 1916 } else 1917 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE); 1918 } 1919 } 1920 1921 void 1922 bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) 1923 { 1924 struct list_head *qe; 1925 struct bfa_cb_qe_s *hcb_qe; 1926 1927 while (!list_empty(comp_q)) { 1928 bfa_q_deq(comp_q, &qe); 1929 hcb_qe = (struct bfa_cb_qe_s *) qe; 1930 WARN_ON(hcb_qe->pre_rmv); 1931 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE); 1932 } 1933 } 1934 1935 /* 1936 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled 1937 * into BFA layer). The OS driver can then turn back and overwrite entries that 1938 * have been configured by the user. 1939 * 1940 * @param[in] cfg - pointer to bfa_ioc_cfg_t 1941 * 1942 * @return 1943 * void 1944 * 1945 * Special Considerations: 1946 * note 1947 */ 1948 void 1949 bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg) 1950 { 1951 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS; 1952 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS; 1953 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS; 1954 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS; 1955 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS; 1956 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS; 1957 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS; 1958 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS; 1959 cfg->fwcfg.num_fwtio_reqs = 0; 1960 1961 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS; 1962 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS; 1963 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS; 1964 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS; 1965 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS; 1966 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF; 1967 cfg->drvcfg.ioc_recover = BFA_FALSE; 1968 cfg->drvcfg.delay_comp = BFA_FALSE; 1969 1970 } 1971