1 /* 2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 3 * All rights reserved 4 * www.brocade.com 5 * 6 * Linux driver for Brocade Fibre Channel Host Bus Adapter. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License (GPL) Version 2 as 10 * published by the Free Software Foundation 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 */ 17 18 /** 19 * bfad.c Linux driver PCI interface module. 20 */ 21 22 #include <linux/slab.h> 23 #include <linux/module.h> 24 #include <linux/kthread.h> 25 #include "bfad_drv.h" 26 #include "bfad_im.h" 27 #include "bfad_tm.h" 28 #include "bfad_ipfc.h" 29 #include "bfad_trcmod.h" 30 #include <fcb/bfa_fcb_vf.h> 31 #include <fcb/bfa_fcb_rport.h> 32 #include <fcb/bfa_fcb_port.h> 33 #include <fcb/bfa_fcb.h> 34 35 BFA_TRC_FILE(LDRV, BFAD); 36 static DEFINE_MUTEX(bfad_mutex); 37 LIST_HEAD(bfad_list); 38 static int bfad_inst; 39 int bfad_supported_fc4s; 40 41 static char *host_name; 42 static char *os_name; 43 static char *os_patch; 44 static int num_rports; 45 static int num_ios; 46 static int num_tms; 47 static int num_fcxps; 48 static int num_ufbufs; 49 static int reqq_size; 50 static int rspq_size; 51 static int num_sgpgs; 52 static int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT; 53 static int bfa_io_max_sge = BFAD_IO_MAX_SGE; 54 static int log_level = BFA_LOG_WARNING; 55 static int ioc_auto_recover = BFA_TRUE; 56 static int ipfc_enable = BFA_FALSE; 57 static int ipfc_mtu = -1; 58 static int fdmi_enable = BFA_TRUE; 59 int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH; 60 int bfa_linkup_delay = -1; 61 62 module_param(os_name, charp, S_IRUGO | S_IWUSR); 63 module_param(os_patch, charp, S_IRUGO | S_IWUSR); 64 module_param(host_name, charp, S_IRUGO | S_IWUSR); 65 module_param(num_rports, int, S_IRUGO | S_IWUSR); 66 module_param(num_ios, int, S_IRUGO | S_IWUSR); 67 module_param(num_tms, int, S_IRUGO | S_IWUSR); 68 module_param(num_fcxps, int, S_IRUGO | S_IWUSR); 69 module_param(num_ufbufs, int, S_IRUGO | S_IWUSR); 70 module_param(reqq_size, int, S_IRUGO | S_IWUSR); 71 module_param(rspq_size, int, S_IRUGO | S_IWUSR); 72 module_param(num_sgpgs, int, S_IRUGO | S_IWUSR); 73 module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR); 74 module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR); 75 module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR); 76 module_param(log_level, int, S_IRUGO | S_IWUSR); 77 module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR); 78 module_param(ipfc_enable, int, S_IRUGO | S_IWUSR); 79 module_param(ipfc_mtu, int, S_IRUGO | S_IWUSR); 80 module_param(fdmi_enable, int, S_IRUGO | S_IWUSR); 81 module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR); 82 83 /* 84 * Stores the module parm num_sgpgs value; 85 * used to reset for bfad next instance. 86 */ 87 static int num_sgpgs_parm; 88 89 static bfa_status_t 90 bfad_fc4_probe(struct bfad_s *bfad) 91 { 92 int rc; 93 94 rc = bfad_im_probe(bfad); 95 if (rc != BFA_STATUS_OK) 96 goto ext; 97 98 bfad_tm_probe(bfad); 99 100 if (ipfc_enable) 101 bfad_ipfc_probe(bfad); 102 103 bfad->bfad_flags |= BFAD_FC4_PROBE_DONE; 104 ext: 105 return rc; 106 } 107 108 static void 109 bfad_fc4_probe_undo(struct bfad_s *bfad) 110 { 111 bfad_im_probe_undo(bfad); 112 bfad_tm_probe_undo(bfad); 113 if (ipfc_enable) 114 bfad_ipfc_probe_undo(bfad); 115 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; 116 } 117 118 static void 119 bfad_fc4_probe_post(struct bfad_s *bfad) 120 { 121 if (bfad->im) 122 bfad_im_probe_post(bfad->im); 123 124 bfad_tm_probe_post(bfad); 125 if (ipfc_enable) 126 bfad_ipfc_probe_post(bfad); 127 } 128 129 static bfa_status_t 130 bfad_fc4_port_new(struct bfad_s *bfad, struct bfad_port_s *port, int roles) 131 { 132 int rc = BFA_STATUS_FAILED; 133 134 if (roles & BFA_PORT_ROLE_FCP_IM) 135 rc = bfad_im_port_new(bfad, port); 136 if (rc != BFA_STATUS_OK) 137 goto ext; 138 139 if (roles & BFA_PORT_ROLE_FCP_TM) 140 rc = bfad_tm_port_new(bfad, port); 141 if (rc != BFA_STATUS_OK) 142 goto ext; 143 144 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) 145 rc = bfad_ipfc_port_new(bfad, port, port->pvb_type); 146 ext: 147 return rc; 148 } 149 150 static void 151 bfad_fc4_port_delete(struct bfad_s *bfad, struct bfad_port_s *port, int roles) 152 { 153 if (roles & BFA_PORT_ROLE_FCP_IM) 154 bfad_im_port_delete(bfad, port); 155 156 if (roles & BFA_PORT_ROLE_FCP_TM) 157 bfad_tm_port_delete(bfad, port); 158 159 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) 160 bfad_ipfc_port_delete(bfad, port); 161 } 162 163 /** 164 * BFA callbacks 165 */ 166 void 167 bfad_hcb_comp(void *arg, bfa_status_t status) 168 { 169 struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg; 170 171 fcomp->status = status; 172 complete(&fcomp->comp); 173 } 174 175 /** 176 * bfa_init callback 177 */ 178 void 179 bfa_cb_init(void *drv, bfa_status_t init_status) 180 { 181 struct bfad_s *bfad = drv; 182 183 if (init_status == BFA_STATUS_OK) { 184 bfad->bfad_flags |= BFAD_HAL_INIT_DONE; 185 186 /* If BFAD_HAL_INIT_FAIL flag is set: 187 * Wake up the kernel thread to start 188 * the bfad operations after HAL init done 189 */ 190 if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) { 191 bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL; 192 wake_up_process(bfad->bfad_tsk); 193 } 194 } 195 196 complete(&bfad->comp); 197 } 198 199 200 201 /** 202 * BFA_FCS callbacks 203 */ 204 static struct bfad_port_s * 205 bfad_get_drv_port(struct bfad_s *bfad, struct bfad_vf_s *vf_drv, 206 struct bfad_vport_s *vp_drv) 207 { 208 return (vp_drv) ? (&(vp_drv)->drv_port) 209 : ((vf_drv) ? (&(vf_drv)->base_port) : (&(bfad)->pport)); 210 } 211 212 struct bfad_port_s * 213 bfa_fcb_port_new(struct bfad_s *bfad, struct bfa_fcs_port_s *port, 214 enum bfa_port_role roles, struct bfad_vf_s *vf_drv, 215 struct bfad_vport_s *vp_drv) 216 { 217 bfa_status_t rc; 218 struct bfad_port_s *port_drv; 219 220 if (!vp_drv && !vf_drv) { 221 port_drv = &bfad->pport; 222 port_drv->pvb_type = BFAD_PORT_PHYS_BASE; 223 } else if (!vp_drv && vf_drv) { 224 port_drv = &vf_drv->base_port; 225 port_drv->pvb_type = BFAD_PORT_VF_BASE; 226 } else if (vp_drv && !vf_drv) { 227 port_drv = &vp_drv->drv_port; 228 port_drv->pvb_type = BFAD_PORT_PHYS_VPORT; 229 } else { 230 port_drv = &vp_drv->drv_port; 231 port_drv->pvb_type = BFAD_PORT_VF_VPORT; 232 } 233 234 port_drv->fcs_port = port; 235 port_drv->roles = roles; 236 rc = bfad_fc4_port_new(bfad, port_drv, roles); 237 if (rc != BFA_STATUS_OK) { 238 bfad_fc4_port_delete(bfad, port_drv, roles); 239 port_drv = NULL; 240 } 241 242 return port_drv; 243 } 244 245 void 246 bfa_fcb_port_delete(struct bfad_s *bfad, enum bfa_port_role roles, 247 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) 248 { 249 struct bfad_port_s *port_drv; 250 251 /* 252 * this will be only called from rmmod context 253 */ 254 if (vp_drv && !vp_drv->comp_del) { 255 port_drv = bfad_get_drv_port(bfad, vf_drv, vp_drv); 256 bfa_trc(bfad, roles); 257 bfad_fc4_port_delete(bfad, port_drv, roles); 258 } 259 } 260 261 void 262 bfa_fcb_port_online(struct bfad_s *bfad, enum bfa_port_role roles, 263 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) 264 { 265 struct bfad_port_s *port_drv = bfad_get_drv_port(bfad, vf_drv, vp_drv); 266 267 if (roles & BFA_PORT_ROLE_FCP_IM) 268 bfad_im_port_online(bfad, port_drv); 269 270 if (roles & BFA_PORT_ROLE_FCP_TM) 271 bfad_tm_port_online(bfad, port_drv); 272 273 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) 274 bfad_ipfc_port_online(bfad, port_drv); 275 276 bfad->bfad_flags |= BFAD_PORT_ONLINE; 277 } 278 279 void 280 bfa_fcb_port_offline(struct bfad_s *bfad, enum bfa_port_role roles, 281 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) 282 { 283 struct bfad_port_s *port_drv = bfad_get_drv_port(bfad, vf_drv, vp_drv); 284 285 if (roles & BFA_PORT_ROLE_FCP_IM) 286 bfad_im_port_offline(bfad, port_drv); 287 288 if (roles & BFA_PORT_ROLE_FCP_TM) 289 bfad_tm_port_offline(bfad, port_drv); 290 291 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) 292 bfad_ipfc_port_offline(bfad, port_drv); 293 } 294 295 void 296 bfa_fcb_vport_delete(struct bfad_vport_s *vport_drv) 297 { 298 if (vport_drv->comp_del) { 299 complete(vport_drv->comp_del); 300 return; 301 } 302 303 kfree(vport_drv); 304 } 305 306 /** 307 * FCS RPORT alloc callback, after successful PLOGI by FCS 308 */ 309 bfa_status_t 310 bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport, 311 struct bfad_rport_s **rport_drv) 312 { 313 bfa_status_t rc = BFA_STATUS_OK; 314 315 *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC); 316 if (*rport_drv == NULL) { 317 rc = BFA_STATUS_ENOMEM; 318 goto ext; 319 } 320 321 *rport = &(*rport_drv)->fcs_rport; 322 323 ext: 324 return rc; 325 } 326 327 328 329 void 330 bfad_hal_mem_release(struct bfad_s *bfad) 331 { 332 int i; 333 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; 334 struct bfa_mem_elem_s *meminfo_elem; 335 336 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { 337 meminfo_elem = &hal_meminfo->meminfo[i]; 338 if (meminfo_elem->kva != NULL) { 339 switch (meminfo_elem->mem_type) { 340 case BFA_MEM_TYPE_KVA: 341 vfree(meminfo_elem->kva); 342 break; 343 case BFA_MEM_TYPE_DMA: 344 dma_free_coherent(&bfad->pcidev->dev, 345 meminfo_elem->mem_len, 346 meminfo_elem->kva, 347 (dma_addr_t) meminfo_elem->dma); 348 break; 349 default: 350 bfa_assert(0); 351 break; 352 } 353 } 354 } 355 356 memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s)); 357 } 358 359 void 360 bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg) 361 { 362 if (num_rports > 0) 363 bfa_cfg->fwcfg.num_rports = num_rports; 364 if (num_ios > 0) 365 bfa_cfg->fwcfg.num_ioim_reqs = num_ios; 366 if (num_tms > 0) 367 bfa_cfg->fwcfg.num_tskim_reqs = num_tms; 368 if (num_fcxps > 0) 369 bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps; 370 if (num_ufbufs > 0) 371 bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs; 372 if (reqq_size > 0) 373 bfa_cfg->drvcfg.num_reqq_elems = reqq_size; 374 if (rspq_size > 0) 375 bfa_cfg->drvcfg.num_rspq_elems = rspq_size; 376 if (num_sgpgs > 0) 377 bfa_cfg->drvcfg.num_sgpgs = num_sgpgs; 378 379 /* 380 * populate the hal values back to the driver for sysfs use. 381 * otherwise, the default values will be shown as 0 in sysfs 382 */ 383 num_rports = bfa_cfg->fwcfg.num_rports; 384 num_ios = bfa_cfg->fwcfg.num_ioim_reqs; 385 num_tms = bfa_cfg->fwcfg.num_tskim_reqs; 386 num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs; 387 num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs; 388 reqq_size = bfa_cfg->drvcfg.num_reqq_elems; 389 rspq_size = bfa_cfg->drvcfg.num_rspq_elems; 390 num_sgpgs = bfa_cfg->drvcfg.num_sgpgs; 391 } 392 393 bfa_status_t 394 bfad_hal_mem_alloc(struct bfad_s *bfad) 395 { 396 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; 397 struct bfa_mem_elem_s *meminfo_elem; 398 bfa_status_t rc = BFA_STATUS_OK; 399 dma_addr_t phys_addr; 400 int retry_count = 0; 401 int reset_value = 1; 402 int min_num_sgpgs = 512; 403 void *kva; 404 int i; 405 406 bfa_cfg_get_default(&bfad->ioc_cfg); 407 408 retry: 409 bfad_update_hal_cfg(&bfad->ioc_cfg); 410 bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs; 411 bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo); 412 413 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { 414 meminfo_elem = &hal_meminfo->meminfo[i]; 415 switch (meminfo_elem->mem_type) { 416 case BFA_MEM_TYPE_KVA: 417 kva = vmalloc(meminfo_elem->mem_len); 418 if (kva == NULL) { 419 bfad_hal_mem_release(bfad); 420 rc = BFA_STATUS_ENOMEM; 421 goto ext; 422 } 423 memset(kva, 0, meminfo_elem->mem_len); 424 meminfo_elem->kva = kva; 425 break; 426 case BFA_MEM_TYPE_DMA: 427 kva = dma_alloc_coherent(&bfad->pcidev->dev, 428 meminfo_elem->mem_len, 429 &phys_addr, GFP_KERNEL); 430 if (kva == NULL) { 431 bfad_hal_mem_release(bfad); 432 /* 433 * If we cannot allocate with default 434 * num_sgpages try with half the value. 435 */ 436 if (num_sgpgs > min_num_sgpgs) { 437 printk(KERN_INFO "bfad[%d]: memory" 438 " allocation failed with" 439 " num_sgpgs: %d\n", 440 bfad->inst_no, num_sgpgs); 441 nextLowerInt(&num_sgpgs); 442 printk(KERN_INFO "bfad[%d]: trying to" 443 " allocate memory with" 444 " num_sgpgs: %d\n", 445 bfad->inst_no, num_sgpgs); 446 retry_count++; 447 goto retry; 448 } else { 449 if (num_sgpgs_parm > 0) 450 num_sgpgs = num_sgpgs_parm; 451 else { 452 reset_value = 453 (1 << retry_count); 454 num_sgpgs *= reset_value; 455 } 456 rc = BFA_STATUS_ENOMEM; 457 goto ext; 458 } 459 } 460 461 if (num_sgpgs_parm > 0) 462 num_sgpgs = num_sgpgs_parm; 463 else { 464 reset_value = (1 << retry_count); 465 num_sgpgs *= reset_value; 466 } 467 468 memset(kva, 0, meminfo_elem->mem_len); 469 meminfo_elem->kva = kva; 470 meminfo_elem->dma = phys_addr; 471 break; 472 default: 473 break; 474 475 } 476 } 477 ext: 478 return rc; 479 } 480 481 /** 482 * Create a vport under a vf. 483 */ 484 bfa_status_t 485 bfad_vport_create(struct bfad_s *bfad, u16 vf_id, 486 struct bfa_port_cfg_s *port_cfg) 487 { 488 struct bfad_vport_s *vport; 489 int rc = BFA_STATUS_OK; 490 unsigned long flags; 491 struct completion fcomp; 492 493 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); 494 if (!vport) { 495 rc = BFA_STATUS_ENOMEM; 496 goto ext; 497 } 498 499 vport->drv_port.bfad = bfad; 500 spin_lock_irqsave(&bfad->bfad_lock, flags); 501 rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id, 502 port_cfg, vport); 503 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 504 505 if (rc != BFA_STATUS_OK) 506 goto ext_free_vport; 507 508 if (port_cfg->roles & BFA_PORT_ROLE_FCP_IM) { 509 rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port); 510 if (rc != BFA_STATUS_OK) 511 goto ext_free_fcs_vport; 512 } 513 514 spin_lock_irqsave(&bfad->bfad_lock, flags); 515 bfa_fcs_vport_start(&vport->fcs_vport); 516 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 517 518 return BFA_STATUS_OK; 519 520 ext_free_fcs_vport: 521 spin_lock_irqsave(&bfad->bfad_lock, flags); 522 vport->comp_del = &fcomp; 523 init_completion(vport->comp_del); 524 bfa_fcs_vport_delete(&vport->fcs_vport); 525 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 526 wait_for_completion(vport->comp_del); 527 ext_free_vport: 528 kfree(vport); 529 ext: 530 return rc; 531 } 532 533 /** 534 * Create a vf and its base vport implicitely. 535 */ 536 bfa_status_t 537 bfad_vf_create(struct bfad_s *bfad, u16 vf_id, 538 struct bfa_port_cfg_s *port_cfg) 539 { 540 struct bfad_vf_s *vf; 541 int rc = BFA_STATUS_OK; 542 543 vf = kzalloc(sizeof(struct bfad_vf_s), GFP_KERNEL); 544 if (!vf) { 545 rc = BFA_STATUS_FAILED; 546 goto ext; 547 } 548 549 rc = bfa_fcs_vf_create(&vf->fcs_vf, &bfad->bfa_fcs, vf_id, port_cfg, 550 vf); 551 if (rc != BFA_STATUS_OK) 552 kfree(vf); 553 ext: 554 return rc; 555 } 556 557 void 558 bfad_bfa_tmo(unsigned long data) 559 { 560 struct bfad_s *bfad = (struct bfad_s *)data; 561 unsigned long flags; 562 struct list_head doneq; 563 564 spin_lock_irqsave(&bfad->bfad_lock, flags); 565 566 bfa_timer_tick(&bfad->bfa); 567 568 bfa_comp_deq(&bfad->bfa, &doneq); 569 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 570 571 if (!list_empty(&doneq)) { 572 bfa_comp_process(&bfad->bfa, &doneq); 573 spin_lock_irqsave(&bfad->bfad_lock, flags); 574 bfa_comp_free(&bfad->bfa, &doneq); 575 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 576 } 577 578 mod_timer(&bfad->hal_tmo, jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); 579 } 580 581 void 582 bfad_init_timer(struct bfad_s *bfad) 583 { 584 init_timer(&bfad->hal_tmo); 585 bfad->hal_tmo.function = bfad_bfa_tmo; 586 bfad->hal_tmo.data = (unsigned long)bfad; 587 588 mod_timer(&bfad->hal_tmo, jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); 589 } 590 591 int 592 bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) 593 { 594 unsigned long bar0_len; 595 int rc = -ENODEV; 596 597 if (pci_enable_device(pdev)) { 598 BFA_PRINTF(BFA_ERR, "pci_enable_device fail %p\n", pdev); 599 goto out; 600 } 601 602 if (pci_request_regions(pdev, BFAD_DRIVER_NAME)) 603 goto out_disable_device; 604 605 pci_set_master(pdev); 606 607 608 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 609 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 610 BFA_PRINTF(BFA_ERR, "pci_set_dma_mask fail %p\n", pdev); 611 goto out_release_region; 612 } 613 614 bfad->pci_bar0_map = pci_resource_start(pdev, 0); 615 bar0_len = pci_resource_len(pdev, 0); 616 bfad->pci_bar0_kva = ioremap(bfad->pci_bar0_map, bar0_len); 617 618 if (bfad->pci_bar0_kva == NULL) { 619 BFA_PRINTF(BFA_ERR, "Fail to map bar0\n"); 620 goto out_release_region; 621 } 622 623 bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn); 624 bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn); 625 bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva; 626 bfad->hal_pcidev.device_id = pdev->device; 627 bfad->pci_name = pci_name(pdev); 628 629 bfad->pci_attr.vendor_id = pdev->vendor; 630 bfad->pci_attr.device_id = pdev->device; 631 bfad->pci_attr.ssid = pdev->subsystem_device; 632 bfad->pci_attr.ssvid = pdev->subsystem_vendor; 633 bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn); 634 635 bfad->pcidev = pdev; 636 return 0; 637 638 out_release_region: 639 pci_release_regions(pdev); 640 out_disable_device: 641 pci_disable_device(pdev); 642 out: 643 return rc; 644 } 645 646 void 647 bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad) 648 { 649 #if defined(__ia64__) 650 pci_iounmap(pdev, bfad->pci_bar0_kva); 651 #else 652 iounmap(bfad->pci_bar0_kva); 653 #endif 654 pci_release_regions(pdev); 655 pci_disable_device(pdev); 656 pci_set_drvdata(pdev, NULL); 657 } 658 659 void 660 bfad_fcs_port_cfg(struct bfad_s *bfad) 661 { 662 struct bfa_port_cfg_s port_cfg; 663 struct bfa_pport_attr_s attr; 664 char symname[BFA_SYMNAME_MAXLEN]; 665 666 sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no); 667 memcpy(port_cfg.sym_name.symname, symname, strlen(symname)); 668 bfa_fcport_get_attr(&bfad->bfa, &attr); 669 port_cfg.nwwn = attr.nwwn; 670 port_cfg.pwwn = attr.pwwn; 671 672 bfa_fcs_cfg_base_port(&bfad->bfa_fcs, &port_cfg); 673 } 674 675 bfa_status_t 676 bfad_drv_init(struct bfad_s *bfad) 677 { 678 bfa_status_t rc; 679 unsigned long flags; 680 struct bfa_fcs_driver_info_s driver_info; 681 682 bfad->cfg_data.rport_del_timeout = rport_del_timeout; 683 bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth; 684 bfad->cfg_data.io_max_sge = bfa_io_max_sge; 685 bfad->cfg_data.binding_method = FCP_PWWN_BINDING; 686 687 rc = bfad_hal_mem_alloc(bfad); 688 if (rc != BFA_STATUS_OK) { 689 printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n", 690 bfad->inst_no); 691 printk(KERN_WARNING 692 "Not enough memory to attach all Brocade HBA ports," 693 " System may need more memory.\n"); 694 goto out_hal_mem_alloc_failure; 695 } 696 697 bfa_init_log(&bfad->bfa, bfad->logmod); 698 bfa_init_trc(&bfad->bfa, bfad->trcmod); 699 bfa_init_aen(&bfad->bfa, bfad->aen); 700 memset(bfad->file_map, 0, sizeof(bfad->file_map)); 701 bfa_init_plog(&bfad->bfa, &bfad->plog_buf); 702 bfa_plog_init(&bfad->plog_buf); 703 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, 704 0, "Driver Attach"); 705 706 bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo, 707 &bfad->hal_pcidev); 708 709 init_completion(&bfad->comp); 710 711 /* 712 * Enable Interrupt and wait bfa_init completion 713 */ 714 if (bfad_setup_intr(bfad)) { 715 printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n", 716 bfad->inst_no); 717 goto out_setup_intr_failure; 718 } 719 720 spin_lock_irqsave(&bfad->bfad_lock, flags); 721 bfa_init(&bfad->bfa); 722 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 723 724 /* 725 * Set up interrupt handler for each vectors 726 */ 727 if ((bfad->bfad_flags & BFAD_MSIX_ON) 728 && bfad_install_msix_handler(bfad)) { 729 printk(KERN_WARNING "%s: install_msix failed, bfad%d\n", 730 __func__, bfad->inst_no); 731 } 732 733 bfad_init_timer(bfad); 734 735 wait_for_completion(&bfad->comp); 736 737 memset(&driver_info, 0, sizeof(driver_info)); 738 strncpy(driver_info.version, BFAD_DRIVER_VERSION, 739 sizeof(driver_info.version) - 1); 740 if (host_name) 741 strncpy(driver_info.host_machine_name, host_name, 742 sizeof(driver_info.host_machine_name) - 1); 743 if (os_name) 744 strncpy(driver_info.host_os_name, os_name, 745 sizeof(driver_info.host_os_name) - 1); 746 if (os_patch) 747 strncpy(driver_info.host_os_patch, os_patch, 748 sizeof(driver_info.host_os_patch) - 1); 749 750 strncpy(driver_info.os_device_name, bfad->pci_name, 751 sizeof(driver_info.os_device_name - 1)); 752 753 /* 754 * FCS INIT 755 */ 756 spin_lock_irqsave(&bfad->bfad_lock, flags); 757 bfa_fcs_log_init(&bfad->bfa_fcs, bfad->logmod); 758 bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod); 759 bfa_fcs_aen_init(&bfad->bfa_fcs, bfad->aen); 760 bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); 761 762 /* Do FCS init only when HAL init is done */ 763 if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { 764 bfa_fcs_init(&bfad->bfa_fcs); 765 bfad->bfad_flags |= BFAD_FCS_INIT_DONE; 766 } 767 768 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); 769 bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable); 770 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 771 772 bfad->bfad_flags |= BFAD_DRV_INIT_DONE; 773 return BFA_STATUS_OK; 774 775 out_setup_intr_failure: 776 bfa_detach(&bfad->bfa); 777 bfad_hal_mem_release(bfad); 778 out_hal_mem_alloc_failure: 779 return BFA_STATUS_FAILED; 780 } 781 782 void 783 bfad_drv_uninit(struct bfad_s *bfad) 784 { 785 unsigned long flags; 786 787 spin_lock_irqsave(&bfad->bfad_lock, flags); 788 init_completion(&bfad->comp); 789 bfa_stop(&bfad->bfa); 790 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 791 wait_for_completion(&bfad->comp); 792 793 del_timer_sync(&bfad->hal_tmo); 794 bfa_isr_disable(&bfad->bfa); 795 bfa_detach(&bfad->bfa); 796 bfad_remove_intr(bfad); 797 bfad_hal_mem_release(bfad); 798 799 bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE; 800 } 801 802 void 803 bfad_drv_start(struct bfad_s *bfad) 804 { 805 unsigned long flags; 806 807 spin_lock_irqsave(&bfad->bfad_lock, flags); 808 bfa_start(&bfad->bfa); 809 bfa_fcs_start(&bfad->bfa_fcs); 810 bfad->bfad_flags |= BFAD_HAL_START_DONE; 811 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 812 813 bfad_fc4_probe_post(bfad); 814 } 815 816 void 817 bfad_drv_stop(struct bfad_s *bfad) 818 { 819 unsigned long flags; 820 821 spin_lock_irqsave(&bfad->bfad_lock, flags); 822 init_completion(&bfad->comp); 823 bfad->pport.flags |= BFAD_PORT_DELETE; 824 bfa_fcs_exit(&bfad->bfa_fcs); 825 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 826 wait_for_completion(&bfad->comp); 827 828 spin_lock_irqsave(&bfad->bfad_lock, flags); 829 init_completion(&bfad->comp); 830 bfa_stop(&bfad->bfa); 831 bfad->bfad_flags &= ~BFAD_HAL_START_DONE; 832 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 833 wait_for_completion(&bfad->comp); 834 } 835 836 bfa_status_t 837 bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role) 838 { 839 int rc = BFA_STATUS_OK; 840 841 /* 842 * Allocate scsi_host for the physical port 843 */ 844 if ((bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IM) 845 && (role & BFA_PORT_ROLE_FCP_IM)) { 846 if (bfad->pport.im_port == NULL) { 847 rc = BFA_STATUS_FAILED; 848 goto out; 849 } 850 851 rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port); 852 if (rc != BFA_STATUS_OK) 853 goto out; 854 855 bfad->pport.roles |= BFA_PORT_ROLE_FCP_IM; 856 } 857 858 bfad->bfad_flags |= BFAD_CFG_PPORT_DONE; 859 860 out: 861 return rc; 862 } 863 864 void 865 bfad_uncfg_pport(struct bfad_s *bfad) 866 { 867 if ((bfad->pport.roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) { 868 bfad_ipfc_port_delete(bfad, &bfad->pport); 869 bfad->pport.roles &= ~BFA_PORT_ROLE_FCP_IPFC; 870 } 871 872 if ((bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IM) 873 && (bfad->pport.roles & BFA_PORT_ROLE_FCP_IM)) { 874 bfad_im_scsi_host_free(bfad, bfad->pport.im_port); 875 bfad_im_port_clean(bfad->pport.im_port); 876 kfree(bfad->pport.im_port); 877 bfad->pport.roles &= ~BFA_PORT_ROLE_FCP_IM; 878 } 879 880 bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE; 881 } 882 883 void 884 bfad_drv_log_level_set(struct bfad_s *bfad) 885 { 886 if (log_level > BFA_LOG_INVALID && log_level <= BFA_LOG_LEVEL_MAX) 887 bfa_log_set_level_all(&bfad->log_data, log_level); 888 } 889 890 bfa_status_t 891 bfad_start_ops(struct bfad_s *bfad) 892 { 893 int retval; 894 895 /* PPORT FCS config */ 896 bfad_fcs_port_cfg(bfad); 897 898 retval = bfad_cfg_pport(bfad, BFA_PORT_ROLE_FCP_IM); 899 if (retval != BFA_STATUS_OK) 900 goto out_cfg_pport_failure; 901 902 /* BFAD level FC4 (IM/TM/IPFC) specific resource allocation */ 903 retval = bfad_fc4_probe(bfad); 904 if (retval != BFA_STATUS_OK) { 905 printk(KERN_WARNING "bfad_fc4_probe failed\n"); 906 goto out_fc4_probe_failure; 907 } 908 909 bfad_drv_start(bfad); 910 911 /* 912 * If bfa_linkup_delay is set to -1 default; try to retrive the 913 * value using the bfad_os_get_linkup_delay(); else use the 914 * passed in module param value as the bfa_linkup_delay. 915 */ 916 if (bfa_linkup_delay < 0) { 917 918 bfa_linkup_delay = bfad_os_get_linkup_delay(bfad); 919 bfad_os_rport_online_wait(bfad); 920 bfa_linkup_delay = -1; 921 922 } else { 923 bfad_os_rport_online_wait(bfad); 924 } 925 926 bfa_log(bfad->logmod, BFA_LOG_LINUX_DEVICE_CLAIMED, bfad->pci_name); 927 928 return BFA_STATUS_OK; 929 930 out_fc4_probe_failure: 931 bfad_fc4_probe_undo(bfad); 932 bfad_uncfg_pport(bfad); 933 out_cfg_pport_failure: 934 return BFA_STATUS_FAILED; 935 } 936 937 int 938 bfad_worker (void *ptr) 939 { 940 struct bfad_s *bfad; 941 unsigned long flags; 942 943 bfad = (struct bfad_s *)ptr; 944 945 while (!kthread_should_stop()) { 946 947 /* Check if the FCS init is done from bfad_drv_init; 948 * if not done do FCS init and set the flag. 949 */ 950 if (!(bfad->bfad_flags & BFAD_FCS_INIT_DONE)) { 951 spin_lock_irqsave(&bfad->bfad_lock, flags); 952 bfa_fcs_init(&bfad->bfa_fcs); 953 bfad->bfad_flags |= BFAD_FCS_INIT_DONE; 954 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 955 } 956 957 /* Start the bfad operations after HAL init done */ 958 bfad_start_ops(bfad); 959 960 spin_lock_irqsave(&bfad->bfad_lock, flags); 961 bfad->bfad_tsk = NULL; 962 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 963 964 break; 965 } 966 967 return 0; 968 } 969 970 /* 971 * PCI_entry PCI driver entries * { 972 */ 973 974 /** 975 * PCI probe entry. 976 */ 977 int 978 bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) 979 { 980 struct bfad_s *bfad; 981 int error = -ENODEV, retval; 982 983 /* 984 * For single port cards - only claim function 0 985 */ 986 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) 987 && (PCI_FUNC(pdev->devfn) != 0)) 988 return -ENODEV; 989 990 BFA_TRACE(BFA_INFO, "bfad_pci_probe entry"); 991 992 bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL); 993 if (!bfad) { 994 error = -ENOMEM; 995 goto out; 996 } 997 998 bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL); 999 if (!bfad->trcmod) { 1000 printk(KERN_WARNING "Error alloc trace buffer!\n"); 1001 error = -ENOMEM; 1002 goto out_alloc_trace_failure; 1003 } 1004 1005 /* 1006 * LOG/TRACE INIT 1007 */ 1008 bfa_trc_init(bfad->trcmod); 1009 bfa_trc(bfad, bfad_inst); 1010 1011 bfad->logmod = &bfad->log_data; 1012 bfa_log_init(bfad->logmod, (char *)pci_name(pdev), bfa_os_printf); 1013 1014 bfad_drv_log_level_set(bfad); 1015 1016 bfad->aen = &bfad->aen_buf; 1017 1018 if (!(bfad_load_fwimg(pdev))) { 1019 printk(KERN_WARNING "bfad_load_fwimg failure!\n"); 1020 kfree(bfad->trcmod); 1021 goto out_alloc_trace_failure; 1022 } 1023 1024 retval = bfad_pci_init(pdev, bfad); 1025 if (retval) { 1026 printk(KERN_WARNING "bfad_pci_init failure!\n"); 1027 error = retval; 1028 goto out_pci_init_failure; 1029 } 1030 1031 mutex_lock(&bfad_mutex); 1032 bfad->inst_no = bfad_inst++; 1033 list_add_tail(&bfad->list_entry, &bfad_list); 1034 mutex_unlock(&bfad_mutex); 1035 1036 spin_lock_init(&bfad->bfad_lock); 1037 pci_set_drvdata(pdev, bfad); 1038 1039 bfad->ref_count = 0; 1040 bfad->pport.bfad = bfad; 1041 1042 bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, "%s", 1043 "bfad_worker"); 1044 if (IS_ERR(bfad->bfad_tsk)) { 1045 printk(KERN_INFO "bfad[%d]: Kernel thread" 1046 " creation failed!\n", 1047 bfad->inst_no); 1048 goto out_kthread_create_failure; 1049 } 1050 1051 retval = bfad_drv_init(bfad); 1052 if (retval != BFA_STATUS_OK) 1053 goto out_drv_init_failure; 1054 if (!(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { 1055 bfad->bfad_flags |= BFAD_HAL_INIT_FAIL; 1056 printk(KERN_WARNING "bfad%d: hal init failed\n", bfad->inst_no); 1057 goto ok; 1058 } 1059 1060 retval = bfad_start_ops(bfad); 1061 if (retval != BFA_STATUS_OK) 1062 goto out_start_ops_failure; 1063 1064 kthread_stop(bfad->bfad_tsk); 1065 bfad->bfad_tsk = NULL; 1066 1067 ok: 1068 return 0; 1069 1070 out_start_ops_failure: 1071 bfad_drv_uninit(bfad); 1072 out_drv_init_failure: 1073 kthread_stop(bfad->bfad_tsk); 1074 out_kthread_create_failure: 1075 mutex_lock(&bfad_mutex); 1076 bfad_inst--; 1077 list_del(&bfad->list_entry); 1078 mutex_unlock(&bfad_mutex); 1079 bfad_pci_uninit(pdev, bfad); 1080 out_pci_init_failure: 1081 kfree(bfad->trcmod); 1082 out_alloc_trace_failure: 1083 kfree(bfad); 1084 out: 1085 return error; 1086 } 1087 1088 /** 1089 * PCI remove entry. 1090 */ 1091 void 1092 bfad_pci_remove(struct pci_dev *pdev) 1093 { 1094 struct bfad_s *bfad = pci_get_drvdata(pdev); 1095 unsigned long flags; 1096 1097 bfa_trc(bfad, bfad->inst_no); 1098 1099 spin_lock_irqsave(&bfad->bfad_lock, flags); 1100 if (bfad->bfad_tsk != NULL) 1101 kthread_stop(bfad->bfad_tsk); 1102 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1103 1104 if ((bfad->bfad_flags & BFAD_DRV_INIT_DONE) 1105 && !(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { 1106 1107 spin_lock_irqsave(&bfad->bfad_lock, flags); 1108 init_completion(&bfad->comp); 1109 bfa_stop(&bfad->bfa); 1110 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1111 wait_for_completion(&bfad->comp); 1112 1113 bfad_remove_intr(bfad); 1114 del_timer_sync(&bfad->hal_tmo); 1115 goto hal_detach; 1116 } else if (!(bfad->bfad_flags & BFAD_DRV_INIT_DONE)) { 1117 goto remove_sysfs; 1118 } 1119 1120 if (bfad->bfad_flags & BFAD_HAL_START_DONE) { 1121 bfad_drv_stop(bfad); 1122 } else if (bfad->bfad_flags & BFAD_DRV_INIT_DONE) { 1123 /* Invoking bfa_stop() before bfa_detach 1124 * when HAL and DRV init are success 1125 * but HAL start did not occur. 1126 */ 1127 spin_lock_irqsave(&bfad->bfad_lock, flags); 1128 init_completion(&bfad->comp); 1129 bfa_stop(&bfad->bfa); 1130 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1131 wait_for_completion(&bfad->comp); 1132 } 1133 1134 bfad_remove_intr(bfad); 1135 del_timer_sync(&bfad->hal_tmo); 1136 1137 if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) 1138 bfad_fc4_probe_undo(bfad); 1139 1140 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) 1141 bfad_uncfg_pport(bfad); 1142 1143 hal_detach: 1144 spin_lock_irqsave(&bfad->bfad_lock, flags); 1145 bfa_detach(&bfad->bfa); 1146 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1147 bfad_hal_mem_release(bfad); 1148 remove_sysfs: 1149 1150 mutex_lock(&bfad_mutex); 1151 bfad_inst--; 1152 list_del(&bfad->list_entry); 1153 mutex_unlock(&bfad_mutex); 1154 bfad_pci_uninit(pdev, bfad); 1155 1156 kfree(bfad->trcmod); 1157 kfree(bfad); 1158 } 1159 1160 1161 static struct pci_device_id bfad_id_table[] = { 1162 { 1163 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1164 .device = BFA_PCI_DEVICE_ID_FC_8G2P, 1165 .subvendor = PCI_ANY_ID, 1166 .subdevice = PCI_ANY_ID, 1167 }, 1168 { 1169 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1170 .device = BFA_PCI_DEVICE_ID_FC_8G1P, 1171 .subvendor = PCI_ANY_ID, 1172 .subdevice = PCI_ANY_ID, 1173 }, 1174 { 1175 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1176 .device = BFA_PCI_DEVICE_ID_CT, 1177 .subvendor = PCI_ANY_ID, 1178 .subdevice = PCI_ANY_ID, 1179 .class = (PCI_CLASS_SERIAL_FIBER << 8), 1180 .class_mask = ~0, 1181 }, 1182 1183 {0, 0}, 1184 }; 1185 1186 MODULE_DEVICE_TABLE(pci, bfad_id_table); 1187 1188 static struct pci_driver bfad_pci_driver = { 1189 .name = BFAD_DRIVER_NAME, 1190 .id_table = bfad_id_table, 1191 .probe = bfad_pci_probe, 1192 .remove = __devexit_p(bfad_pci_remove), 1193 }; 1194 1195 /** 1196 * Linux driver module functions 1197 */ 1198 bfa_status_t 1199 bfad_fc4_module_init(void) 1200 { 1201 int rc; 1202 1203 rc = bfad_im_module_init(); 1204 if (rc != BFA_STATUS_OK) 1205 goto ext; 1206 1207 bfad_tm_module_init(); 1208 if (ipfc_enable) 1209 bfad_ipfc_module_init(); 1210 ext: 1211 return rc; 1212 } 1213 1214 void 1215 bfad_fc4_module_exit(void) 1216 { 1217 if (ipfc_enable) 1218 bfad_ipfc_module_exit(); 1219 bfad_tm_module_exit(); 1220 bfad_im_module_exit(); 1221 } 1222 1223 /** 1224 * Driver module init. 1225 */ 1226 static int __init 1227 bfad_init(void) 1228 { 1229 int error = 0; 1230 1231 printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n", 1232 BFAD_DRIVER_VERSION); 1233 1234 if (num_sgpgs > 0) 1235 num_sgpgs_parm = num_sgpgs; 1236 1237 error = bfad_fc4_module_init(); 1238 if (error) { 1239 error = -ENOMEM; 1240 printk(KERN_WARNING "bfad_fc4_module_init failure\n"); 1241 goto ext; 1242 } 1243 1244 if (!strcmp(FCPI_NAME, " fcpim")) 1245 bfad_supported_fc4s |= BFA_PORT_ROLE_FCP_IM; 1246 if (!strcmp(FCPT_NAME, " fcptm")) 1247 bfad_supported_fc4s |= BFA_PORT_ROLE_FCP_TM; 1248 if (!strcmp(IPFC_NAME, " ipfc")) 1249 bfad_supported_fc4s |= BFA_PORT_ROLE_FCP_IPFC; 1250 1251 bfa_ioc_auto_recover(ioc_auto_recover); 1252 bfa_fcs_rport_set_del_timeout(rport_del_timeout); 1253 error = pci_register_driver(&bfad_pci_driver); 1254 1255 if (error) { 1256 printk(KERN_WARNING "bfad pci_register_driver failure\n"); 1257 goto ext; 1258 } 1259 1260 return 0; 1261 1262 ext: 1263 bfad_fc4_module_exit(); 1264 return error; 1265 } 1266 1267 /** 1268 * Driver module exit. 1269 */ 1270 static void __exit 1271 bfad_exit(void) 1272 { 1273 pci_unregister_driver(&bfad_pci_driver); 1274 bfad_fc4_module_exit(); 1275 bfad_free_fwimg(); 1276 } 1277 1278 #define BFAD_PROTO_NAME FCPI_NAME FCPT_NAME IPFC_NAME 1279 1280 module_init(bfad_init); 1281 module_exit(bfad_exit); 1282 MODULE_LICENSE("GPL"); 1283 MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME); 1284 MODULE_AUTHOR("Brocade Communications Systems, Inc."); 1285 MODULE_VERSION(BFAD_DRIVER_VERSION); 1286 1287 1288