1 /* 2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 3 * All rights reserved 4 * www.brocade.com 5 * 6 * Linux driver for Brocade Fibre Channel Host Bus Adapter. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License (GPL) Version 2 as 10 * published by the Free Software Foundation 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 */ 17 18 /** 19 * bfad.c Linux driver PCI interface module. 20 */ 21 22 #include <linux/slab.h> 23 #include <linux/module.h> 24 #include <linux/kthread.h> 25 #include "bfad_drv.h" 26 #include "bfad_im.h" 27 #include "bfad_tm.h" 28 #include "bfad_ipfc.h" 29 #include "bfad_trcmod.h" 30 #include <fcb/bfa_fcb_vf.h> 31 #include <fcb/bfa_fcb_rport.h> 32 #include <fcb/bfa_fcb_port.h> 33 #include <fcb/bfa_fcb.h> 34 35 BFA_TRC_FILE(LDRV, BFAD); 36 DEFINE_MUTEX(bfad_mutex); 37 LIST_HEAD(bfad_list); 38 static int bfad_inst; 39 int bfad_supported_fc4s; 40 41 static char *host_name; 42 static char *os_name; 43 static char *os_patch; 44 static int num_rports; 45 static int num_ios; 46 static int num_tms; 47 static int num_fcxps; 48 static int num_ufbufs; 49 static int reqq_size; 50 static int rspq_size; 51 static int num_sgpgs; 52 static int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT; 53 static int bfa_io_max_sge = BFAD_IO_MAX_SGE; 54 static int log_level = BFA_LOG_WARNING; 55 static int ioc_auto_recover = BFA_TRUE; 56 static int ipfc_enable = BFA_FALSE; 57 static int ipfc_mtu = -1; 58 static int fdmi_enable = BFA_TRUE; 59 int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH; 60 int bfa_linkup_delay = -1; 61 62 module_param(os_name, charp, S_IRUGO | S_IWUSR); 63 module_param(os_patch, charp, S_IRUGO | S_IWUSR); 64 module_param(host_name, charp, S_IRUGO | S_IWUSR); 65 module_param(num_rports, int, S_IRUGO | S_IWUSR); 66 module_param(num_ios, int, S_IRUGO | S_IWUSR); 67 module_param(num_tms, int, S_IRUGO | S_IWUSR); 68 module_param(num_fcxps, int, S_IRUGO | S_IWUSR); 69 module_param(num_ufbufs, int, S_IRUGO | S_IWUSR); 70 module_param(reqq_size, int, S_IRUGO | S_IWUSR); 71 module_param(rspq_size, int, S_IRUGO | S_IWUSR); 72 module_param(num_sgpgs, int, S_IRUGO | S_IWUSR); 73 module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR); 74 module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR); 75 module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR); 76 module_param(log_level, int, S_IRUGO | S_IWUSR); 77 module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR); 78 module_param(ipfc_enable, int, S_IRUGO | S_IWUSR); 79 module_param(ipfc_mtu, int, S_IRUGO | S_IWUSR); 80 module_param(fdmi_enable, int, S_IRUGO | S_IWUSR); 81 module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR); 82 83 /* 84 * Stores the module parm num_sgpgs value; 85 * used to reset for bfad next instance. 86 */ 87 static int num_sgpgs_parm; 88 89 static bfa_status_t 90 bfad_fc4_probe(struct bfad_s *bfad) 91 { 92 int rc; 93 94 rc = bfad_im_probe(bfad); 95 if (rc != BFA_STATUS_OK) 96 goto ext; 97 98 bfad_tm_probe(bfad); 99 100 if (ipfc_enable) 101 bfad_ipfc_probe(bfad); 102 103 bfad->bfad_flags |= BFAD_FC4_PROBE_DONE; 104 ext: 105 return rc; 106 } 107 108 static void 109 bfad_fc4_probe_undo(struct bfad_s *bfad) 110 { 111 bfad_im_probe_undo(bfad); 112 bfad_tm_probe_undo(bfad); 113 if (ipfc_enable) 114 bfad_ipfc_probe_undo(bfad); 115 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; 116 } 117 118 static void 119 bfad_fc4_probe_post(struct bfad_s *bfad) 120 { 121 if (bfad->im) 122 bfad_im_probe_post(bfad->im); 123 124 bfad_tm_probe_post(bfad); 125 if (ipfc_enable) 126 bfad_ipfc_probe_post(bfad); 127 } 128 129 static bfa_status_t 130 bfad_fc4_port_new(struct bfad_s *bfad, struct bfad_port_s *port, int roles) 131 { 132 int rc = BFA_STATUS_FAILED; 133 134 if (roles & BFA_PORT_ROLE_FCP_IM) 135 rc = bfad_im_port_new(bfad, port); 136 if (rc != BFA_STATUS_OK) 137 goto ext; 138 139 if (roles & BFA_PORT_ROLE_FCP_TM) 140 rc = bfad_tm_port_new(bfad, port); 141 if (rc != BFA_STATUS_OK) 142 goto ext; 143 144 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) 145 rc = bfad_ipfc_port_new(bfad, port, port->pvb_type); 146 ext: 147 return rc; 148 } 149 150 static void 151 bfad_fc4_port_delete(struct bfad_s *bfad, struct bfad_port_s *port, int roles) 152 { 153 if (roles & BFA_PORT_ROLE_FCP_IM) 154 bfad_im_port_delete(bfad, port); 155 156 if (roles & BFA_PORT_ROLE_FCP_TM) 157 bfad_tm_port_delete(bfad, port); 158 159 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) 160 bfad_ipfc_port_delete(bfad, port); 161 } 162 163 /** 164 * BFA callbacks 165 */ 166 void 167 bfad_hcb_comp(void *arg, bfa_status_t status) 168 { 169 struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg; 170 171 fcomp->status = status; 172 complete(&fcomp->comp); 173 } 174 175 /** 176 * bfa_init callback 177 */ 178 void 179 bfa_cb_init(void *drv, bfa_status_t init_status) 180 { 181 struct bfad_s *bfad = drv; 182 183 if (init_status == BFA_STATUS_OK) { 184 bfad->bfad_flags |= BFAD_HAL_INIT_DONE; 185 186 /* If BFAD_HAL_INIT_FAIL flag is set: 187 * Wake up the kernel thread to start 188 * the bfad operations after HAL init done 189 */ 190 if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) { 191 bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL; 192 wake_up_process(bfad->bfad_tsk); 193 } 194 } 195 196 complete(&bfad->comp); 197 } 198 199 200 201 /** 202 * BFA_FCS callbacks 203 */ 204 static struct bfad_port_s * 205 bfad_get_drv_port(struct bfad_s *bfad, struct bfad_vf_s *vf_drv, 206 struct bfad_vport_s *vp_drv) 207 { 208 return (vp_drv) ? (&(vp_drv)->drv_port) 209 : ((vf_drv) ? (&(vf_drv)->base_port) : (&(bfad)->pport)); 210 } 211 212 struct bfad_port_s * 213 bfa_fcb_port_new(struct bfad_s *bfad, struct bfa_fcs_port_s *port, 214 enum bfa_port_role roles, struct bfad_vf_s *vf_drv, 215 struct bfad_vport_s *vp_drv) 216 { 217 bfa_status_t rc; 218 struct bfad_port_s *port_drv; 219 220 if (!vp_drv && !vf_drv) { 221 port_drv = &bfad->pport; 222 port_drv->pvb_type = BFAD_PORT_PHYS_BASE; 223 } else if (!vp_drv && vf_drv) { 224 port_drv = &vf_drv->base_port; 225 port_drv->pvb_type = BFAD_PORT_VF_BASE; 226 } else if (vp_drv && !vf_drv) { 227 port_drv = &vp_drv->drv_port; 228 port_drv->pvb_type = BFAD_PORT_PHYS_VPORT; 229 } else { 230 port_drv = &vp_drv->drv_port; 231 port_drv->pvb_type = BFAD_PORT_VF_VPORT; 232 } 233 234 port_drv->fcs_port = port; 235 port_drv->roles = roles; 236 rc = bfad_fc4_port_new(bfad, port_drv, roles); 237 if (rc != BFA_STATUS_OK) { 238 bfad_fc4_port_delete(bfad, port_drv, roles); 239 port_drv = NULL; 240 } 241 242 return port_drv; 243 } 244 245 void 246 bfa_fcb_port_delete(struct bfad_s *bfad, enum bfa_port_role roles, 247 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) 248 { 249 struct bfad_port_s *port_drv; 250 251 /* 252 * this will be only called from rmmod context 253 */ 254 if (vp_drv && !vp_drv->comp_del) { 255 port_drv = bfad_get_drv_port(bfad, vf_drv, vp_drv); 256 bfa_trc(bfad, roles); 257 bfad_fc4_port_delete(bfad, port_drv, roles); 258 } 259 } 260 261 void 262 bfa_fcb_port_online(struct bfad_s *bfad, enum bfa_port_role roles, 263 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) 264 { 265 struct bfad_port_s *port_drv = bfad_get_drv_port(bfad, vf_drv, vp_drv); 266 267 if (roles & BFA_PORT_ROLE_FCP_IM) 268 bfad_im_port_online(bfad, port_drv); 269 270 if (roles & BFA_PORT_ROLE_FCP_TM) 271 bfad_tm_port_online(bfad, port_drv); 272 273 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) 274 bfad_ipfc_port_online(bfad, port_drv); 275 276 bfad->bfad_flags |= BFAD_PORT_ONLINE; 277 } 278 279 void 280 bfa_fcb_port_offline(struct bfad_s *bfad, enum bfa_port_role roles, 281 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) 282 { 283 struct bfad_port_s *port_drv = bfad_get_drv_port(bfad, vf_drv, vp_drv); 284 285 if (roles & BFA_PORT_ROLE_FCP_IM) 286 bfad_im_port_offline(bfad, port_drv); 287 288 if (roles & BFA_PORT_ROLE_FCP_TM) 289 bfad_tm_port_offline(bfad, port_drv); 290 291 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) 292 bfad_ipfc_port_offline(bfad, port_drv); 293 } 294 295 void 296 bfa_fcb_vport_delete(struct bfad_vport_s *vport_drv) 297 { 298 if (vport_drv->comp_del) { 299 complete(vport_drv->comp_del); 300 return; 301 } 302 } 303 304 /** 305 * FCS RPORT alloc callback, after successful PLOGI by FCS 306 */ 307 bfa_status_t 308 bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport, 309 struct bfad_rport_s **rport_drv) 310 { 311 bfa_status_t rc = BFA_STATUS_OK; 312 313 *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC); 314 if (*rport_drv == NULL) { 315 rc = BFA_STATUS_ENOMEM; 316 goto ext; 317 } 318 319 *rport = &(*rport_drv)->fcs_rport; 320 321 ext: 322 return rc; 323 } 324 325 326 327 void 328 bfad_hal_mem_release(struct bfad_s *bfad) 329 { 330 int i; 331 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; 332 struct bfa_mem_elem_s *meminfo_elem; 333 334 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { 335 meminfo_elem = &hal_meminfo->meminfo[i]; 336 if (meminfo_elem->kva != NULL) { 337 switch (meminfo_elem->mem_type) { 338 case BFA_MEM_TYPE_KVA: 339 vfree(meminfo_elem->kva); 340 break; 341 case BFA_MEM_TYPE_DMA: 342 dma_free_coherent(&bfad->pcidev->dev, 343 meminfo_elem->mem_len, 344 meminfo_elem->kva, 345 (dma_addr_t) meminfo_elem->dma); 346 break; 347 default: 348 bfa_assert(0); 349 break; 350 } 351 } 352 } 353 354 memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s)); 355 } 356 357 void 358 bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg) 359 { 360 if (num_rports > 0) 361 bfa_cfg->fwcfg.num_rports = num_rports; 362 if (num_ios > 0) 363 bfa_cfg->fwcfg.num_ioim_reqs = num_ios; 364 if (num_tms > 0) 365 bfa_cfg->fwcfg.num_tskim_reqs = num_tms; 366 if (num_fcxps > 0) 367 bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps; 368 if (num_ufbufs > 0) 369 bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs; 370 if (reqq_size > 0) 371 bfa_cfg->drvcfg.num_reqq_elems = reqq_size; 372 if (rspq_size > 0) 373 bfa_cfg->drvcfg.num_rspq_elems = rspq_size; 374 if (num_sgpgs > 0) 375 bfa_cfg->drvcfg.num_sgpgs = num_sgpgs; 376 377 /* 378 * populate the hal values back to the driver for sysfs use. 379 * otherwise, the default values will be shown as 0 in sysfs 380 */ 381 num_rports = bfa_cfg->fwcfg.num_rports; 382 num_ios = bfa_cfg->fwcfg.num_ioim_reqs; 383 num_tms = bfa_cfg->fwcfg.num_tskim_reqs; 384 num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs; 385 num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs; 386 reqq_size = bfa_cfg->drvcfg.num_reqq_elems; 387 rspq_size = bfa_cfg->drvcfg.num_rspq_elems; 388 num_sgpgs = bfa_cfg->drvcfg.num_sgpgs; 389 } 390 391 bfa_status_t 392 bfad_hal_mem_alloc(struct bfad_s *bfad) 393 { 394 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; 395 struct bfa_mem_elem_s *meminfo_elem; 396 bfa_status_t rc = BFA_STATUS_OK; 397 dma_addr_t phys_addr; 398 int retry_count = 0; 399 int reset_value = 1; 400 int min_num_sgpgs = 512; 401 void *kva; 402 int i; 403 404 bfa_cfg_get_default(&bfad->ioc_cfg); 405 406 retry: 407 bfad_update_hal_cfg(&bfad->ioc_cfg); 408 bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs; 409 bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo); 410 411 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { 412 meminfo_elem = &hal_meminfo->meminfo[i]; 413 switch (meminfo_elem->mem_type) { 414 case BFA_MEM_TYPE_KVA: 415 kva = vmalloc(meminfo_elem->mem_len); 416 if (kva == NULL) { 417 bfad_hal_mem_release(bfad); 418 rc = BFA_STATUS_ENOMEM; 419 goto ext; 420 } 421 memset(kva, 0, meminfo_elem->mem_len); 422 meminfo_elem->kva = kva; 423 break; 424 case BFA_MEM_TYPE_DMA: 425 kva = dma_alloc_coherent(&bfad->pcidev->dev, 426 meminfo_elem->mem_len, 427 &phys_addr, GFP_KERNEL); 428 if (kva == NULL) { 429 bfad_hal_mem_release(bfad); 430 /* 431 * If we cannot allocate with default 432 * num_sgpages try with half the value. 433 */ 434 if (num_sgpgs > min_num_sgpgs) { 435 printk(KERN_INFO "bfad[%d]: memory" 436 " allocation failed with" 437 " num_sgpgs: %d\n", 438 bfad->inst_no, num_sgpgs); 439 nextLowerInt(&num_sgpgs); 440 printk(KERN_INFO "bfad[%d]: trying to" 441 " allocate memory with" 442 " num_sgpgs: %d\n", 443 bfad->inst_no, num_sgpgs); 444 retry_count++; 445 goto retry; 446 } else { 447 if (num_sgpgs_parm > 0) 448 num_sgpgs = num_sgpgs_parm; 449 else { 450 reset_value = 451 (1 << retry_count); 452 num_sgpgs *= reset_value; 453 } 454 rc = BFA_STATUS_ENOMEM; 455 goto ext; 456 } 457 } 458 459 if (num_sgpgs_parm > 0) 460 num_sgpgs = num_sgpgs_parm; 461 else { 462 reset_value = (1 << retry_count); 463 num_sgpgs *= reset_value; 464 } 465 466 memset(kva, 0, meminfo_elem->mem_len); 467 meminfo_elem->kva = kva; 468 meminfo_elem->dma = phys_addr; 469 break; 470 default: 471 break; 472 473 } 474 } 475 ext: 476 return rc; 477 } 478 479 /** 480 * Create a vport under a vf. 481 */ 482 bfa_status_t 483 bfad_vport_create(struct bfad_s *bfad, u16 vf_id, 484 struct bfa_port_cfg_s *port_cfg, struct device *dev) 485 { 486 struct bfad_vport_s *vport; 487 int rc = BFA_STATUS_OK; 488 unsigned long flags; 489 struct completion fcomp; 490 491 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); 492 if (!vport) { 493 rc = BFA_STATUS_ENOMEM; 494 goto ext; 495 } 496 497 vport->drv_port.bfad = bfad; 498 spin_lock_irqsave(&bfad->bfad_lock, flags); 499 rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id, 500 port_cfg, vport); 501 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 502 503 if (rc != BFA_STATUS_OK) 504 goto ext_free_vport; 505 506 if (port_cfg->roles & BFA_PORT_ROLE_FCP_IM) { 507 rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port, 508 dev); 509 if (rc != BFA_STATUS_OK) 510 goto ext_free_fcs_vport; 511 } 512 513 spin_lock_irqsave(&bfad->bfad_lock, flags); 514 bfa_fcs_vport_start(&vport->fcs_vport); 515 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 516 517 return BFA_STATUS_OK; 518 519 ext_free_fcs_vport: 520 spin_lock_irqsave(&bfad->bfad_lock, flags); 521 vport->comp_del = &fcomp; 522 init_completion(vport->comp_del); 523 bfa_fcs_vport_delete(&vport->fcs_vport); 524 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 525 wait_for_completion(vport->comp_del); 526 ext_free_vport: 527 kfree(vport); 528 ext: 529 return rc; 530 } 531 532 /** 533 * Create a vf and its base vport implicitely. 534 */ 535 bfa_status_t 536 bfad_vf_create(struct bfad_s *bfad, u16 vf_id, 537 struct bfa_port_cfg_s *port_cfg) 538 { 539 struct bfad_vf_s *vf; 540 int rc = BFA_STATUS_OK; 541 542 vf = kzalloc(sizeof(struct bfad_vf_s), GFP_KERNEL); 543 if (!vf) { 544 rc = BFA_STATUS_FAILED; 545 goto ext; 546 } 547 548 rc = bfa_fcs_vf_create(&vf->fcs_vf, &bfad->bfa_fcs, vf_id, port_cfg, 549 vf); 550 if (rc != BFA_STATUS_OK) 551 kfree(vf); 552 ext: 553 return rc; 554 } 555 556 void 557 bfad_bfa_tmo(unsigned long data) 558 { 559 struct bfad_s *bfad = (struct bfad_s *)data; 560 unsigned long flags; 561 struct list_head doneq; 562 563 spin_lock_irqsave(&bfad->bfad_lock, flags); 564 565 bfa_timer_tick(&bfad->bfa); 566 567 bfa_comp_deq(&bfad->bfa, &doneq); 568 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 569 570 if (!list_empty(&doneq)) { 571 bfa_comp_process(&bfad->bfa, &doneq); 572 spin_lock_irqsave(&bfad->bfad_lock, flags); 573 bfa_comp_free(&bfad->bfa, &doneq); 574 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 575 } 576 577 mod_timer(&bfad->hal_tmo, jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); 578 } 579 580 void 581 bfad_init_timer(struct bfad_s *bfad) 582 { 583 init_timer(&bfad->hal_tmo); 584 bfad->hal_tmo.function = bfad_bfa_tmo; 585 bfad->hal_tmo.data = (unsigned long)bfad; 586 587 mod_timer(&bfad->hal_tmo, jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); 588 } 589 590 int 591 bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) 592 { 593 int rc = -ENODEV; 594 595 if (pci_enable_device(pdev)) { 596 BFA_PRINTF(BFA_ERR, "pci_enable_device fail %p\n", pdev); 597 goto out; 598 } 599 600 if (pci_request_regions(pdev, BFAD_DRIVER_NAME)) 601 goto out_disable_device; 602 603 pci_set_master(pdev); 604 605 606 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 607 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 608 BFA_PRINTF(BFA_ERR, "pci_set_dma_mask fail %p\n", pdev); 609 goto out_release_region; 610 } 611 612 bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); 613 614 if (bfad->pci_bar0_kva == NULL) { 615 BFA_PRINTF(BFA_ERR, "Fail to map bar0\n"); 616 goto out_release_region; 617 } 618 619 bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn); 620 bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn); 621 bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva; 622 bfad->hal_pcidev.device_id = pdev->device; 623 bfad->pci_name = pci_name(pdev); 624 625 bfad->pci_attr.vendor_id = pdev->vendor; 626 bfad->pci_attr.device_id = pdev->device; 627 bfad->pci_attr.ssid = pdev->subsystem_device; 628 bfad->pci_attr.ssvid = pdev->subsystem_vendor; 629 bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn); 630 631 bfad->pcidev = pdev; 632 return 0; 633 634 out_release_region: 635 pci_release_regions(pdev); 636 out_disable_device: 637 pci_disable_device(pdev); 638 out: 639 return rc; 640 } 641 642 void 643 bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad) 644 { 645 pci_iounmap(pdev, bfad->pci_bar0_kva); 646 pci_release_regions(pdev); 647 pci_disable_device(pdev); 648 pci_set_drvdata(pdev, NULL); 649 } 650 651 void 652 bfad_fcs_port_cfg(struct bfad_s *bfad) 653 { 654 struct bfa_port_cfg_s port_cfg; 655 struct bfa_pport_attr_s attr; 656 char symname[BFA_SYMNAME_MAXLEN]; 657 658 sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no); 659 memcpy(port_cfg.sym_name.symname, symname, strlen(symname)); 660 bfa_fcport_get_attr(&bfad->bfa, &attr); 661 port_cfg.nwwn = attr.nwwn; 662 port_cfg.pwwn = attr.pwwn; 663 664 bfa_fcs_cfg_base_port(&bfad->bfa_fcs, &port_cfg); 665 } 666 667 bfa_status_t 668 bfad_drv_init(struct bfad_s *bfad) 669 { 670 bfa_status_t rc; 671 unsigned long flags; 672 struct bfa_fcs_driver_info_s driver_info; 673 674 bfad->cfg_data.rport_del_timeout = rport_del_timeout; 675 bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth; 676 bfad->cfg_data.io_max_sge = bfa_io_max_sge; 677 bfad->cfg_data.binding_method = FCP_PWWN_BINDING; 678 679 rc = bfad_hal_mem_alloc(bfad); 680 if (rc != BFA_STATUS_OK) { 681 printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n", 682 bfad->inst_no); 683 printk(KERN_WARNING 684 "Not enough memory to attach all Brocade HBA ports," 685 " System may need more memory.\n"); 686 goto out_hal_mem_alloc_failure; 687 } 688 689 bfa_init_log(&bfad->bfa, bfad->logmod); 690 bfa_init_trc(&bfad->bfa, bfad->trcmod); 691 bfa_init_aen(&bfad->bfa, bfad->aen); 692 memset(bfad->file_map, 0, sizeof(bfad->file_map)); 693 bfa_init_plog(&bfad->bfa, &bfad->plog_buf); 694 bfa_plog_init(&bfad->plog_buf); 695 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, 696 0, "Driver Attach"); 697 698 bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo, 699 &bfad->hal_pcidev); 700 701 init_completion(&bfad->comp); 702 703 /* 704 * Enable Interrupt and wait bfa_init completion 705 */ 706 if (bfad_setup_intr(bfad)) { 707 printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n", 708 bfad->inst_no); 709 goto out_setup_intr_failure; 710 } 711 712 spin_lock_irqsave(&bfad->bfad_lock, flags); 713 bfa_init(&bfad->bfa); 714 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 715 716 /* 717 * Set up interrupt handler for each vectors 718 */ 719 if ((bfad->bfad_flags & BFAD_MSIX_ON) 720 && bfad_install_msix_handler(bfad)) { 721 printk(KERN_WARNING "%s: install_msix failed, bfad%d\n", 722 __func__, bfad->inst_no); 723 } 724 725 bfad_init_timer(bfad); 726 727 wait_for_completion(&bfad->comp); 728 729 memset(&driver_info, 0, sizeof(driver_info)); 730 strncpy(driver_info.version, BFAD_DRIVER_VERSION, 731 sizeof(driver_info.version) - 1); 732 if (host_name) 733 strncpy(driver_info.host_machine_name, host_name, 734 sizeof(driver_info.host_machine_name) - 1); 735 if (os_name) 736 strncpy(driver_info.host_os_name, os_name, 737 sizeof(driver_info.host_os_name) - 1); 738 if (os_patch) 739 strncpy(driver_info.host_os_patch, os_patch, 740 sizeof(driver_info.host_os_patch) - 1); 741 742 strncpy(driver_info.os_device_name, bfad->pci_name, 743 sizeof(driver_info.os_device_name - 1)); 744 745 /* 746 * FCS INIT 747 */ 748 spin_lock_irqsave(&bfad->bfad_lock, flags); 749 bfa_fcs_log_init(&bfad->bfa_fcs, bfad->logmod); 750 bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod); 751 bfa_fcs_aen_init(&bfad->bfa_fcs, bfad->aen); 752 bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); 753 754 /* Do FCS init only when HAL init is done */ 755 if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { 756 bfa_fcs_init(&bfad->bfa_fcs); 757 bfad->bfad_flags |= BFAD_FCS_INIT_DONE; 758 } 759 760 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); 761 bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable); 762 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 763 764 bfad->bfad_flags |= BFAD_DRV_INIT_DONE; 765 return BFA_STATUS_OK; 766 767 out_setup_intr_failure: 768 bfa_detach(&bfad->bfa); 769 bfad_hal_mem_release(bfad); 770 out_hal_mem_alloc_failure: 771 return BFA_STATUS_FAILED; 772 } 773 774 void 775 bfad_drv_uninit(struct bfad_s *bfad) 776 { 777 unsigned long flags; 778 779 spin_lock_irqsave(&bfad->bfad_lock, flags); 780 init_completion(&bfad->comp); 781 bfa_stop(&bfad->bfa); 782 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 783 wait_for_completion(&bfad->comp); 784 785 del_timer_sync(&bfad->hal_tmo); 786 bfa_isr_disable(&bfad->bfa); 787 bfa_detach(&bfad->bfa); 788 bfad_remove_intr(bfad); 789 bfad_hal_mem_release(bfad); 790 791 bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE; 792 } 793 794 void 795 bfad_drv_start(struct bfad_s *bfad) 796 { 797 unsigned long flags; 798 799 spin_lock_irqsave(&bfad->bfad_lock, flags); 800 bfa_start(&bfad->bfa); 801 bfa_fcs_start(&bfad->bfa_fcs); 802 bfad->bfad_flags |= BFAD_HAL_START_DONE; 803 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 804 805 bfad_fc4_probe_post(bfad); 806 } 807 808 void 809 bfad_drv_stop(struct bfad_s *bfad) 810 { 811 unsigned long flags; 812 813 spin_lock_irqsave(&bfad->bfad_lock, flags); 814 init_completion(&bfad->comp); 815 bfad->pport.flags |= BFAD_PORT_DELETE; 816 bfa_fcs_exit(&bfad->bfa_fcs); 817 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 818 wait_for_completion(&bfad->comp); 819 820 spin_lock_irqsave(&bfad->bfad_lock, flags); 821 init_completion(&bfad->comp); 822 bfa_stop(&bfad->bfa); 823 bfad->bfad_flags &= ~BFAD_HAL_START_DONE; 824 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 825 wait_for_completion(&bfad->comp); 826 } 827 828 bfa_status_t 829 bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role) 830 { 831 int rc = BFA_STATUS_OK; 832 833 /* 834 * Allocate scsi_host for the physical port 835 */ 836 if ((bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IM) 837 && (role & BFA_PORT_ROLE_FCP_IM)) { 838 if (bfad->pport.im_port == NULL) { 839 rc = BFA_STATUS_FAILED; 840 goto out; 841 } 842 843 rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port, 844 &bfad->pcidev->dev); 845 if (rc != BFA_STATUS_OK) 846 goto out; 847 848 bfad->pport.roles |= BFA_PORT_ROLE_FCP_IM; 849 } 850 851 bfad->bfad_flags |= BFAD_CFG_PPORT_DONE; 852 853 out: 854 return rc; 855 } 856 857 void 858 bfad_uncfg_pport(struct bfad_s *bfad) 859 { 860 if ((bfad->pport.roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) { 861 bfad_ipfc_port_delete(bfad, &bfad->pport); 862 bfad->pport.roles &= ~BFA_PORT_ROLE_FCP_IPFC; 863 } 864 865 if ((bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IM) 866 && (bfad->pport.roles & BFA_PORT_ROLE_FCP_IM)) { 867 bfad_im_scsi_host_free(bfad, bfad->pport.im_port); 868 bfad_im_port_clean(bfad->pport.im_port); 869 kfree(bfad->pport.im_port); 870 bfad->pport.roles &= ~BFA_PORT_ROLE_FCP_IM; 871 } 872 873 bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE; 874 } 875 876 void 877 bfad_drv_log_level_set(struct bfad_s *bfad) 878 { 879 if (log_level > BFA_LOG_INVALID && log_level <= BFA_LOG_LEVEL_MAX) 880 bfa_log_set_level_all(&bfad->log_data, log_level); 881 } 882 883 bfa_status_t 884 bfad_start_ops(struct bfad_s *bfad) 885 { 886 int retval; 887 888 /* PPORT FCS config */ 889 bfad_fcs_port_cfg(bfad); 890 891 retval = bfad_cfg_pport(bfad, BFA_PORT_ROLE_FCP_IM); 892 if (retval != BFA_STATUS_OK) 893 goto out_cfg_pport_failure; 894 895 /* BFAD level FC4 (IM/TM/IPFC) specific resource allocation */ 896 retval = bfad_fc4_probe(bfad); 897 if (retval != BFA_STATUS_OK) { 898 printk(KERN_WARNING "bfad_fc4_probe failed\n"); 899 goto out_fc4_probe_failure; 900 } 901 902 bfad_drv_start(bfad); 903 904 /* 905 * If bfa_linkup_delay is set to -1 default; try to retrive the 906 * value using the bfad_os_get_linkup_delay(); else use the 907 * passed in module param value as the bfa_linkup_delay. 908 */ 909 if (bfa_linkup_delay < 0) { 910 911 bfa_linkup_delay = bfad_os_get_linkup_delay(bfad); 912 bfad_os_rport_online_wait(bfad); 913 bfa_linkup_delay = -1; 914 915 } else { 916 bfad_os_rport_online_wait(bfad); 917 } 918 919 bfa_log(bfad->logmod, BFA_LOG_LINUX_DEVICE_CLAIMED, bfad->pci_name); 920 921 return BFA_STATUS_OK; 922 923 out_fc4_probe_failure: 924 bfad_fc4_probe_undo(bfad); 925 bfad_uncfg_pport(bfad); 926 out_cfg_pport_failure: 927 return BFA_STATUS_FAILED; 928 } 929 930 int 931 bfad_worker (void *ptr) 932 { 933 struct bfad_s *bfad; 934 unsigned long flags; 935 936 bfad = (struct bfad_s *)ptr; 937 938 while (!kthread_should_stop()) { 939 940 /* Check if the FCS init is done from bfad_drv_init; 941 * if not done do FCS init and set the flag. 942 */ 943 if (!(bfad->bfad_flags & BFAD_FCS_INIT_DONE)) { 944 spin_lock_irqsave(&bfad->bfad_lock, flags); 945 bfa_fcs_init(&bfad->bfa_fcs); 946 bfad->bfad_flags |= BFAD_FCS_INIT_DONE; 947 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 948 } 949 950 /* Start the bfad operations after HAL init done */ 951 bfad_start_ops(bfad); 952 953 spin_lock_irqsave(&bfad->bfad_lock, flags); 954 bfad->bfad_tsk = NULL; 955 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 956 957 break; 958 } 959 960 return 0; 961 } 962 963 /* 964 * PCI_entry PCI driver entries * { 965 */ 966 967 /** 968 * PCI probe entry. 969 */ 970 int 971 bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) 972 { 973 struct bfad_s *bfad; 974 int error = -ENODEV, retval; 975 976 /* 977 * For single port cards - only claim function 0 978 */ 979 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) 980 && (PCI_FUNC(pdev->devfn) != 0)) 981 return -ENODEV; 982 983 BFA_TRACE(BFA_INFO, "bfad_pci_probe entry"); 984 985 bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL); 986 if (!bfad) { 987 error = -ENOMEM; 988 goto out; 989 } 990 991 bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL); 992 if (!bfad->trcmod) { 993 printk(KERN_WARNING "Error alloc trace buffer!\n"); 994 error = -ENOMEM; 995 goto out_alloc_trace_failure; 996 } 997 998 /* 999 * LOG/TRACE INIT 1000 */ 1001 bfa_trc_init(bfad->trcmod); 1002 bfa_trc(bfad, bfad_inst); 1003 1004 bfad->logmod = &bfad->log_data; 1005 bfa_log_init(bfad->logmod, (char *)pci_name(pdev), bfa_os_printf); 1006 1007 bfad_drv_log_level_set(bfad); 1008 1009 bfad->aen = &bfad->aen_buf; 1010 1011 if (!(bfad_load_fwimg(pdev))) { 1012 printk(KERN_WARNING "bfad_load_fwimg failure!\n"); 1013 kfree(bfad->trcmod); 1014 goto out_alloc_trace_failure; 1015 } 1016 1017 retval = bfad_pci_init(pdev, bfad); 1018 if (retval) { 1019 printk(KERN_WARNING "bfad_pci_init failure!\n"); 1020 error = retval; 1021 goto out_pci_init_failure; 1022 } 1023 1024 mutex_lock(&bfad_mutex); 1025 bfad->inst_no = bfad_inst++; 1026 list_add_tail(&bfad->list_entry, &bfad_list); 1027 mutex_unlock(&bfad_mutex); 1028 1029 spin_lock_init(&bfad->bfad_lock); 1030 pci_set_drvdata(pdev, bfad); 1031 1032 bfad->ref_count = 0; 1033 bfad->pport.bfad = bfad; 1034 1035 bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, "%s", 1036 "bfad_worker"); 1037 if (IS_ERR(bfad->bfad_tsk)) { 1038 printk(KERN_INFO "bfad[%d]: Kernel thread" 1039 " creation failed!\n", 1040 bfad->inst_no); 1041 goto out_kthread_create_failure; 1042 } 1043 1044 retval = bfad_drv_init(bfad); 1045 if (retval != BFA_STATUS_OK) 1046 goto out_drv_init_failure; 1047 if (!(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { 1048 bfad->bfad_flags |= BFAD_HAL_INIT_FAIL; 1049 printk(KERN_WARNING "bfad%d: hal init failed\n", bfad->inst_no); 1050 goto ok; 1051 } 1052 1053 retval = bfad_start_ops(bfad); 1054 if (retval != BFA_STATUS_OK) 1055 goto out_start_ops_failure; 1056 1057 kthread_stop(bfad->bfad_tsk); 1058 bfad->bfad_tsk = NULL; 1059 1060 ok: 1061 return 0; 1062 1063 out_start_ops_failure: 1064 bfad_drv_uninit(bfad); 1065 out_drv_init_failure: 1066 kthread_stop(bfad->bfad_tsk); 1067 out_kthread_create_failure: 1068 mutex_lock(&bfad_mutex); 1069 bfad_inst--; 1070 list_del(&bfad->list_entry); 1071 mutex_unlock(&bfad_mutex); 1072 bfad_pci_uninit(pdev, bfad); 1073 out_pci_init_failure: 1074 kfree(bfad->trcmod); 1075 out_alloc_trace_failure: 1076 kfree(bfad); 1077 out: 1078 return error; 1079 } 1080 1081 /** 1082 * PCI remove entry. 1083 */ 1084 void 1085 bfad_pci_remove(struct pci_dev *pdev) 1086 { 1087 struct bfad_s *bfad = pci_get_drvdata(pdev); 1088 unsigned long flags; 1089 1090 bfa_trc(bfad, bfad->inst_no); 1091 1092 spin_lock_irqsave(&bfad->bfad_lock, flags); 1093 if (bfad->bfad_tsk != NULL) 1094 kthread_stop(bfad->bfad_tsk); 1095 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1096 1097 if ((bfad->bfad_flags & BFAD_DRV_INIT_DONE) 1098 && !(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { 1099 1100 spin_lock_irqsave(&bfad->bfad_lock, flags); 1101 init_completion(&bfad->comp); 1102 bfa_stop(&bfad->bfa); 1103 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1104 wait_for_completion(&bfad->comp); 1105 1106 bfad_remove_intr(bfad); 1107 del_timer_sync(&bfad->hal_tmo); 1108 goto hal_detach; 1109 } else if (!(bfad->bfad_flags & BFAD_DRV_INIT_DONE)) { 1110 goto remove_sysfs; 1111 } 1112 1113 if (bfad->bfad_flags & BFAD_HAL_START_DONE) { 1114 bfad_drv_stop(bfad); 1115 } else if (bfad->bfad_flags & BFAD_DRV_INIT_DONE) { 1116 /* Invoking bfa_stop() before bfa_detach 1117 * when HAL and DRV init are success 1118 * but HAL start did not occur. 1119 */ 1120 spin_lock_irqsave(&bfad->bfad_lock, flags); 1121 init_completion(&bfad->comp); 1122 bfa_stop(&bfad->bfa); 1123 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1124 wait_for_completion(&bfad->comp); 1125 } 1126 1127 bfad_remove_intr(bfad); 1128 del_timer_sync(&bfad->hal_tmo); 1129 1130 if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) 1131 bfad_fc4_probe_undo(bfad); 1132 1133 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) 1134 bfad_uncfg_pport(bfad); 1135 1136 hal_detach: 1137 spin_lock_irqsave(&bfad->bfad_lock, flags); 1138 bfa_detach(&bfad->bfa); 1139 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1140 bfad_hal_mem_release(bfad); 1141 remove_sysfs: 1142 1143 mutex_lock(&bfad_mutex); 1144 bfad_inst--; 1145 list_del(&bfad->list_entry); 1146 mutex_unlock(&bfad_mutex); 1147 bfad_pci_uninit(pdev, bfad); 1148 1149 kfree(bfad->trcmod); 1150 kfree(bfad); 1151 } 1152 1153 1154 static struct pci_device_id bfad_id_table[] = { 1155 { 1156 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1157 .device = BFA_PCI_DEVICE_ID_FC_8G2P, 1158 .subvendor = PCI_ANY_ID, 1159 .subdevice = PCI_ANY_ID, 1160 }, 1161 { 1162 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1163 .device = BFA_PCI_DEVICE_ID_FC_8G1P, 1164 .subvendor = PCI_ANY_ID, 1165 .subdevice = PCI_ANY_ID, 1166 }, 1167 { 1168 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1169 .device = BFA_PCI_DEVICE_ID_CT, 1170 .subvendor = PCI_ANY_ID, 1171 .subdevice = PCI_ANY_ID, 1172 .class = (PCI_CLASS_SERIAL_FIBER << 8), 1173 .class_mask = ~0, 1174 }, 1175 1176 {0, 0}, 1177 }; 1178 1179 MODULE_DEVICE_TABLE(pci, bfad_id_table); 1180 1181 static struct pci_driver bfad_pci_driver = { 1182 .name = BFAD_DRIVER_NAME, 1183 .id_table = bfad_id_table, 1184 .probe = bfad_pci_probe, 1185 .remove = __devexit_p(bfad_pci_remove), 1186 }; 1187 1188 /** 1189 * Linux driver module functions 1190 */ 1191 bfa_status_t 1192 bfad_fc4_module_init(void) 1193 { 1194 int rc; 1195 1196 rc = bfad_im_module_init(); 1197 if (rc != BFA_STATUS_OK) 1198 goto ext; 1199 1200 bfad_tm_module_init(); 1201 if (ipfc_enable) 1202 bfad_ipfc_module_init(); 1203 ext: 1204 return rc; 1205 } 1206 1207 void 1208 bfad_fc4_module_exit(void) 1209 { 1210 if (ipfc_enable) 1211 bfad_ipfc_module_exit(); 1212 bfad_tm_module_exit(); 1213 bfad_im_module_exit(); 1214 } 1215 1216 /** 1217 * Driver module init. 1218 */ 1219 static int __init 1220 bfad_init(void) 1221 { 1222 int error = 0; 1223 1224 printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n", 1225 BFAD_DRIVER_VERSION); 1226 1227 if (num_sgpgs > 0) 1228 num_sgpgs_parm = num_sgpgs; 1229 1230 error = bfad_fc4_module_init(); 1231 if (error) { 1232 error = -ENOMEM; 1233 printk(KERN_WARNING "bfad_fc4_module_init failure\n"); 1234 goto ext; 1235 } 1236 1237 if (!strcmp(FCPI_NAME, " fcpim")) 1238 bfad_supported_fc4s |= BFA_PORT_ROLE_FCP_IM; 1239 if (!strcmp(FCPT_NAME, " fcptm")) 1240 bfad_supported_fc4s |= BFA_PORT_ROLE_FCP_TM; 1241 if (!strcmp(IPFC_NAME, " ipfc")) 1242 bfad_supported_fc4s |= BFA_PORT_ROLE_FCP_IPFC; 1243 1244 bfa_ioc_auto_recover(ioc_auto_recover); 1245 bfa_fcs_rport_set_del_timeout(rport_del_timeout); 1246 error = pci_register_driver(&bfad_pci_driver); 1247 1248 if (error) { 1249 printk(KERN_WARNING "bfad pci_register_driver failure\n"); 1250 goto ext; 1251 } 1252 1253 return 0; 1254 1255 ext: 1256 bfad_fc4_module_exit(); 1257 return error; 1258 } 1259 1260 /** 1261 * Driver module exit. 1262 */ 1263 static void __exit 1264 bfad_exit(void) 1265 { 1266 pci_unregister_driver(&bfad_pci_driver); 1267 bfad_fc4_module_exit(); 1268 bfad_free_fwimg(); 1269 } 1270 1271 #define BFAD_PROTO_NAME FCPI_NAME FCPT_NAME IPFC_NAME 1272 1273 module_init(bfad_init); 1274 module_exit(bfad_exit); 1275 MODULE_LICENSE("GPL"); 1276 MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME); 1277 MODULE_AUTHOR("Brocade Communications Systems, Inc."); 1278 MODULE_VERSION(BFAD_DRIVER_VERSION); 1279 1280 1281