1 /* 2 * Copyright 2017 Broadcom. All Rights Reserved. 3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 2 7 * as published by the Free Software Foundation. The full GNU General 8 * Public License is included in this distribution in the file called COPYING. 9 * 10 * Contact Information: 11 * linux-drivers@broadcom.com 12 * 13 */ 14 15 #include <linux/reboot.h> 16 #include <linux/delay.h> 17 #include <linux/slab.h> 18 #include <linux/interrupt.h> 19 #include <linux/blkdev.h> 20 #include <linux/pci.h> 21 #include <linux/string.h> 22 #include <linux/kernel.h> 23 #include <linux/semaphore.h> 24 #include <linux/iscsi_boot_sysfs.h> 25 #include <linux/module.h> 26 #include <linux/bsg-lib.h> 27 #include <linux/irq_poll.h> 28 29 #include <scsi/libiscsi.h> 30 #include <scsi/scsi_bsg_iscsi.h> 31 #include <scsi/scsi_netlink.h> 32 #include <scsi/scsi_transport_iscsi.h> 33 #include <scsi/scsi_transport.h> 34 #include <scsi/scsi_cmnd.h> 35 #include <scsi/scsi_device.h> 36 #include <scsi/scsi_host.h> 37 #include <scsi/scsi.h> 38 #include "be_main.h" 39 #include "be_iscsi.h" 40 #include "be_mgmt.h" 41 #include "be_cmds.h" 42 43 static unsigned int be_iopoll_budget = 10; 44 static unsigned int be_max_phys_size = 64; 45 static unsigned int enable_msix = 1; 46 47 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); 48 MODULE_VERSION(BUILD_STR); 49 MODULE_AUTHOR("Emulex Corporation"); 50 MODULE_LICENSE("GPL"); 51 module_param(be_iopoll_budget, int, 0); 52 module_param(enable_msix, int, 0); 53 module_param(be_max_phys_size, uint, S_IRUGO); 54 MODULE_PARM_DESC(be_max_phys_size, 55 "Maximum Size (In Kilobytes) of physically contiguous " 56 "memory that can be allocated. Range is 16 - 128"); 57 58 #define beiscsi_disp_param(_name)\ 59 static ssize_t \ 60 beiscsi_##_name##_disp(struct device *dev,\ 61 struct device_attribute *attrib, char *buf) \ 62 { \ 63 struct Scsi_Host *shost = class_to_shost(dev);\ 64 struct beiscsi_hba *phba = iscsi_host_priv(shost); \ 65 return snprintf(buf, PAGE_SIZE, "%d\n",\ 66 phba->attr_##_name);\ 67 } 68 69 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\ 70 static int \ 71 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\ 72 {\ 73 if (val >= _minval && val <= _maxval) {\ 74 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 75 "BA_%d : beiscsi_"#_name" updated "\ 76 "from 0x%x ==> 0x%x\n",\ 77 phba->attr_##_name, val); \ 78 phba->attr_##_name = val;\ 79 return 0;\ 80 } \ 81 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \ 82 "BA_%d beiscsi_"#_name" attribute "\ 83 "cannot be updated to 0x%x, "\ 84 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 85 return -EINVAL;\ 86 } 87 88 #define beiscsi_store_param(_name) \ 89 static ssize_t \ 90 beiscsi_##_name##_store(struct device *dev,\ 91 struct device_attribute *attr, const char *buf,\ 92 size_t count) \ 93 { \ 94 struct Scsi_Host *shost = class_to_shost(dev);\ 95 struct beiscsi_hba *phba = iscsi_host_priv(shost);\ 96 uint32_t param_val = 0;\ 97 if (!isdigit(buf[0]))\ 98 return -EINVAL;\ 99 if (sscanf(buf, "%i", ¶m_val) != 1)\ 100 return -EINVAL;\ 101 if (beiscsi_##_name##_change(phba, param_val) == 0) \ 102 return strlen(buf);\ 103 else \ 104 return -EINVAL;\ 105 } 106 107 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \ 108 static int \ 109 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \ 110 { \ 111 if (val >= _minval && val <= _maxval) {\ 112 phba->attr_##_name = val;\ 113 return 0;\ 114 } \ 115 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 116 "BA_%d beiscsi_"#_name" attribute " \ 117 "cannot be updated to 0x%x, "\ 118 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 119 phba->attr_##_name = _defval;\ 120 return -EINVAL;\ 121 } 122 123 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \ 124 static uint beiscsi_##_name = _defval;\ 125 module_param(beiscsi_##_name, uint, S_IRUGO);\ 126 MODULE_PARM_DESC(beiscsi_##_name, _descp);\ 127 beiscsi_disp_param(_name)\ 128 beiscsi_change_param(_name, _minval, _maxval, _defval)\ 129 beiscsi_store_param(_name)\ 130 beiscsi_init_param(_name, _minval, _maxval, _defval)\ 131 DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\ 132 beiscsi_##_name##_disp, beiscsi_##_name##_store) 133 134 /* 135 * When new log level added update the 136 * the MAX allowed value for log_enable 137 */ 138 BEISCSI_RW_ATTR(log_enable, 0x00, 139 0xFF, 0x00, "Enable logging Bit Mask\n" 140 "\t\t\t\tInitialization Events : 0x01\n" 141 "\t\t\t\tMailbox Events : 0x02\n" 142 "\t\t\t\tMiscellaneous Events : 0x04\n" 143 "\t\t\t\tError Handling : 0x08\n" 144 "\t\t\t\tIO Path Events : 0x10\n" 145 "\t\t\t\tConfiguration Path : 0x20\n" 146 "\t\t\t\tiSCSI Protocol : 0x40\n"); 147 148 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); 149 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); 150 DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); 151 DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL); 152 DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO, 153 beiscsi_active_session_disp, NULL); 154 DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO, 155 beiscsi_free_session_disp, NULL); 156 struct device_attribute *beiscsi_attrs[] = { 157 &dev_attr_beiscsi_log_enable, 158 &dev_attr_beiscsi_drvr_ver, 159 &dev_attr_beiscsi_adapter_family, 160 &dev_attr_beiscsi_fw_ver, 161 &dev_attr_beiscsi_active_session_count, 162 &dev_attr_beiscsi_free_session_count, 163 &dev_attr_beiscsi_phys_port, 164 NULL, 165 }; 166 167 static char const *cqe_desc[] = { 168 "RESERVED_DESC", 169 "SOL_CMD_COMPLETE", 170 "SOL_CMD_KILLED_DATA_DIGEST_ERR", 171 "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL", 172 "CXN_KILLED_BURST_LEN_MISMATCH", 173 "CXN_KILLED_AHS_RCVD", 174 "CXN_KILLED_HDR_DIGEST_ERR", 175 "CXN_KILLED_UNKNOWN_HDR", 176 "CXN_KILLED_STALE_ITT_TTT_RCVD", 177 "CXN_KILLED_INVALID_ITT_TTT_RCVD", 178 "CXN_KILLED_RST_RCVD", 179 "CXN_KILLED_TIMED_OUT", 180 "CXN_KILLED_RST_SENT", 181 "CXN_KILLED_FIN_RCVD", 182 "CXN_KILLED_BAD_UNSOL_PDU_RCVD", 183 "CXN_KILLED_BAD_WRB_INDEX_ERROR", 184 "CXN_KILLED_OVER_RUN_RESIDUAL", 185 "CXN_KILLED_UNDER_RUN_RESIDUAL", 186 "CMD_KILLED_INVALID_STATSN_RCVD", 187 "CMD_KILLED_INVALID_R2T_RCVD", 188 "CMD_CXN_KILLED_LUN_INVALID", 189 "CMD_CXN_KILLED_ICD_INVALID", 190 "CMD_CXN_KILLED_ITT_INVALID", 191 "CMD_CXN_KILLED_SEQ_OUTOFORDER", 192 "CMD_CXN_KILLED_INVALID_DATASN_RCVD", 193 "CXN_INVALIDATE_NOTIFY", 194 "CXN_INVALIDATE_INDEX_NOTIFY", 195 "CMD_INVALIDATED_NOTIFY", 196 "UNSOL_HDR_NOTIFY", 197 "UNSOL_DATA_NOTIFY", 198 "UNSOL_DATA_DIGEST_ERROR_NOTIFY", 199 "DRIVERMSG_NOTIFY", 200 "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN", 201 "SOL_CMD_KILLED_DIF_ERR", 202 "CXN_KILLED_SYN_RCVD", 203 "CXN_KILLED_IMM_DATA_RCVD" 204 }; 205 206 static int beiscsi_slave_configure(struct scsi_device *sdev) 207 { 208 blk_queue_max_segment_size(sdev->request_queue, 65536); 209 return 0; 210 } 211 212 static int beiscsi_eh_abort(struct scsi_cmnd *sc) 213 { 214 struct iscsi_task *abrt_task = (struct iscsi_task *)sc->SCp.ptr; 215 struct iscsi_cls_session *cls_session; 216 struct beiscsi_io_task *abrt_io_task; 217 struct beiscsi_conn *beiscsi_conn; 218 struct iscsi_session *session; 219 struct invldt_cmd_tbl inv_tbl; 220 struct beiscsi_hba *phba; 221 struct iscsi_conn *conn; 222 int rc; 223 224 cls_session = starget_to_session(scsi_target(sc->device)); 225 session = cls_session->dd_data; 226 227 /* check if we raced, task just got cleaned up under us */ 228 spin_lock_bh(&session->back_lock); 229 if (!abrt_task || !abrt_task->sc) { 230 spin_unlock_bh(&session->back_lock); 231 return SUCCESS; 232 } 233 /* get a task ref till FW processes the req for the ICD used */ 234 __iscsi_get_task(abrt_task); 235 abrt_io_task = abrt_task->dd_data; 236 conn = abrt_task->conn; 237 beiscsi_conn = conn->dd_data; 238 phba = beiscsi_conn->phba; 239 /* mark WRB invalid which have been not processed by FW yet */ 240 if (is_chip_be2_be3r(phba)) { 241 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 242 abrt_io_task->pwrb_handle->pwrb, 1); 243 } else { 244 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld, 245 abrt_io_task->pwrb_handle->pwrb, 1); 246 } 247 inv_tbl.cid = beiscsi_conn->beiscsi_conn_cid; 248 inv_tbl.icd = abrt_io_task->psgl_handle->sgl_index; 249 spin_unlock_bh(&session->back_lock); 250 251 rc = beiscsi_mgmt_invalidate_icds(phba, &inv_tbl, 1); 252 iscsi_put_task(abrt_task); 253 if (rc) { 254 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 255 "BM_%d : sc %p invalidation failed %d\n", 256 sc, rc); 257 return FAILED; 258 } 259 260 return iscsi_eh_abort(sc); 261 } 262 263 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) 264 { 265 struct beiscsi_invldt_cmd_tbl { 266 struct invldt_cmd_tbl tbl[BE_INVLDT_CMD_TBL_SZ]; 267 struct iscsi_task *task[BE_INVLDT_CMD_TBL_SZ]; 268 } *inv_tbl; 269 struct iscsi_cls_session *cls_session; 270 struct beiscsi_conn *beiscsi_conn; 271 struct beiscsi_io_task *io_task; 272 struct iscsi_session *session; 273 struct beiscsi_hba *phba; 274 struct iscsi_conn *conn; 275 struct iscsi_task *task; 276 unsigned int i, nents; 277 int rc, more = 0; 278 279 cls_session = starget_to_session(scsi_target(sc->device)); 280 session = cls_session->dd_data; 281 282 spin_lock_bh(&session->frwd_lock); 283 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) { 284 spin_unlock_bh(&session->frwd_lock); 285 return FAILED; 286 } 287 288 conn = session->leadconn; 289 beiscsi_conn = conn->dd_data; 290 phba = beiscsi_conn->phba; 291 292 inv_tbl = kzalloc(sizeof(*inv_tbl), GFP_ATOMIC); 293 if (!inv_tbl) { 294 spin_unlock_bh(&session->frwd_lock); 295 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 296 "BM_%d : invldt_cmd_tbl alloc failed\n"); 297 return FAILED; 298 } 299 nents = 0; 300 /* take back_lock to prevent task from getting cleaned up under us */ 301 spin_lock(&session->back_lock); 302 for (i = 0; i < conn->session->cmds_max; i++) { 303 task = conn->session->cmds[i]; 304 if (!task->sc) 305 continue; 306 307 if (sc->device->lun != task->sc->device->lun) 308 continue; 309 /** 310 * Can't fit in more cmds? Normally this won't happen b'coz 311 * BEISCSI_CMD_PER_LUN is same as BE_INVLDT_CMD_TBL_SZ. 312 */ 313 if (nents == BE_INVLDT_CMD_TBL_SZ) { 314 more = 1; 315 break; 316 } 317 318 /* get a task ref till FW processes the req for the ICD used */ 319 __iscsi_get_task(task); 320 io_task = task->dd_data; 321 /* mark WRB invalid which have been not processed by FW yet */ 322 if (is_chip_be2_be3r(phba)) { 323 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 324 io_task->pwrb_handle->pwrb, 1); 325 } else { 326 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld, 327 io_task->pwrb_handle->pwrb, 1); 328 } 329 330 inv_tbl->tbl[nents].cid = beiscsi_conn->beiscsi_conn_cid; 331 inv_tbl->tbl[nents].icd = io_task->psgl_handle->sgl_index; 332 inv_tbl->task[nents] = task; 333 nents++; 334 } 335 spin_unlock(&session->back_lock); 336 spin_unlock_bh(&session->frwd_lock); 337 338 rc = SUCCESS; 339 if (!nents) 340 goto end_reset; 341 342 if (more) { 343 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 344 "BM_%d : number of cmds exceeds size of invalidation table\n"); 345 rc = FAILED; 346 goto end_reset; 347 } 348 349 if (beiscsi_mgmt_invalidate_icds(phba, &inv_tbl->tbl[0], nents)) { 350 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 351 "BM_%d : cid %u scmds invalidation failed\n", 352 beiscsi_conn->beiscsi_conn_cid); 353 rc = FAILED; 354 } 355 356 end_reset: 357 for (i = 0; i < nents; i++) 358 iscsi_put_task(inv_tbl->task[i]); 359 kfree(inv_tbl); 360 361 if (rc == SUCCESS) 362 rc = iscsi_eh_device_reset(sc); 363 return rc; 364 } 365 366 /*------------------- PCI Driver operations and data ----------------- */ 367 static const struct pci_device_id beiscsi_pci_id_table[] = { 368 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 369 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 370 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 371 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 372 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, 373 { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) }, 374 { 0 } 375 }; 376 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 377 378 379 static struct scsi_host_template beiscsi_sht = { 380 .module = THIS_MODULE, 381 .name = "Emulex 10Gbe open-iscsi Initiator Driver", 382 .proc_name = DRV_NAME, 383 .queuecommand = iscsi_queuecommand, 384 .change_queue_depth = scsi_change_queue_depth, 385 .slave_configure = beiscsi_slave_configure, 386 .target_alloc = iscsi_target_alloc, 387 .eh_timed_out = iscsi_eh_cmd_timed_out, 388 .eh_abort_handler = beiscsi_eh_abort, 389 .eh_device_reset_handler = beiscsi_eh_device_reset, 390 .eh_target_reset_handler = iscsi_eh_session_reset, 391 .shost_attrs = beiscsi_attrs, 392 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, 393 .can_queue = BE2_IO_DEPTH, 394 .this_id = -1, 395 .max_sectors = BEISCSI_MAX_SECTORS, 396 .cmd_per_lun = BEISCSI_CMD_PER_LUN, 397 .use_clustering = ENABLE_CLUSTERING, 398 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID, 399 .track_queue_depth = 1, 400 }; 401 402 static struct scsi_transport_template *beiscsi_scsi_transport; 403 404 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) 405 { 406 struct beiscsi_hba *phba; 407 struct Scsi_Host *shost; 408 409 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0); 410 if (!shost) { 411 dev_err(&pcidev->dev, 412 "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); 413 return NULL; 414 } 415 shost->max_id = BE2_MAX_SESSIONS; 416 shost->max_channel = 0; 417 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; 418 shost->max_lun = BEISCSI_NUM_MAX_LUN; 419 shost->transportt = beiscsi_scsi_transport; 420 phba = iscsi_host_priv(shost); 421 memset(phba, 0, sizeof(*phba)); 422 phba->shost = shost; 423 phba->pcidev = pci_dev_get(pcidev); 424 pci_set_drvdata(pcidev, phba); 425 phba->interface_handle = 0xFFFFFFFF; 426 427 return phba; 428 } 429 430 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) 431 { 432 if (phba->csr_va) { 433 iounmap(phba->csr_va); 434 phba->csr_va = NULL; 435 } 436 if (phba->db_va) { 437 iounmap(phba->db_va); 438 phba->db_va = NULL; 439 } 440 if (phba->pci_va) { 441 iounmap(phba->pci_va); 442 phba->pci_va = NULL; 443 } 444 } 445 446 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, 447 struct pci_dev *pcidev) 448 { 449 u8 __iomem *addr; 450 int pcicfg_reg; 451 452 addr = ioremap_nocache(pci_resource_start(pcidev, 2), 453 pci_resource_len(pcidev, 2)); 454 if (addr == NULL) 455 return -ENOMEM; 456 phba->ctrl.csr = addr; 457 phba->csr_va = addr; 458 459 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024); 460 if (addr == NULL) 461 goto pci_map_err; 462 phba->ctrl.db = addr; 463 phba->db_va = addr; 464 465 if (phba->generation == BE_GEN2) 466 pcicfg_reg = 1; 467 else 468 pcicfg_reg = 0; 469 470 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg), 471 pci_resource_len(pcidev, pcicfg_reg)); 472 473 if (addr == NULL) 474 goto pci_map_err; 475 phba->ctrl.pcicfg = addr; 476 phba->pci_va = addr; 477 return 0; 478 479 pci_map_err: 480 beiscsi_unmap_pci_function(phba); 481 return -ENOMEM; 482 } 483 484 static int beiscsi_enable_pci(struct pci_dev *pcidev) 485 { 486 int ret; 487 488 ret = pci_enable_device(pcidev); 489 if (ret) { 490 dev_err(&pcidev->dev, 491 "beiscsi_enable_pci - enable device failed\n"); 492 return ret; 493 } 494 495 ret = pci_request_regions(pcidev, DRV_NAME); 496 if (ret) { 497 dev_err(&pcidev->dev, 498 "beiscsi_enable_pci - request region failed\n"); 499 goto pci_dev_disable; 500 } 501 502 pci_set_master(pcidev); 503 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); 504 if (ret) { 505 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); 506 if (ret) { 507 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 508 goto pci_region_release; 509 } else { 510 ret = pci_set_consistent_dma_mask(pcidev, 511 DMA_BIT_MASK(32)); 512 } 513 } else { 514 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64)); 515 if (ret) { 516 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 517 goto pci_region_release; 518 } 519 } 520 return 0; 521 522 pci_region_release: 523 pci_release_regions(pcidev); 524 pci_dev_disable: 525 pci_disable_device(pcidev); 526 527 return ret; 528 } 529 530 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) 531 { 532 struct be_ctrl_info *ctrl = &phba->ctrl; 533 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced; 534 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem; 535 int status = 0; 536 537 ctrl->pdev = pdev; 538 status = beiscsi_map_pci_bars(phba, pdev); 539 if (status) 540 return status; 541 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 542 mbox_mem_alloc->va = pci_alloc_consistent(pdev, 543 mbox_mem_alloc->size, 544 &mbox_mem_alloc->dma); 545 if (!mbox_mem_alloc->va) { 546 beiscsi_unmap_pci_function(phba); 547 return -ENOMEM; 548 } 549 550 mbox_mem_align->size = sizeof(struct be_mcc_mailbox); 551 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); 552 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 553 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 554 mutex_init(&ctrl->mbox_lock); 555 spin_lock_init(&phba->ctrl.mcc_lock); 556 557 return status; 558 } 559 560 /** 561 * beiscsi_get_params()- Set the config paramters 562 * @phba: ptr device priv structure 563 **/ 564 static void beiscsi_get_params(struct beiscsi_hba *phba) 565 { 566 uint32_t total_cid_count = 0; 567 uint32_t total_icd_count = 0; 568 uint8_t ulp_num = 0; 569 570 total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) + 571 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1); 572 573 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 574 uint32_t align_mask = 0; 575 uint32_t icd_post_per_page = 0; 576 uint32_t icd_count_unavailable = 0; 577 uint32_t icd_start = 0, icd_count = 0; 578 uint32_t icd_start_align = 0, icd_count_align = 0; 579 580 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 581 icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 582 icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 583 584 /* Get ICD count that can be posted on each page */ 585 icd_post_per_page = (PAGE_SIZE / (BE2_SGE * 586 sizeof(struct iscsi_sge))); 587 align_mask = (icd_post_per_page - 1); 588 589 /* Check if icd_start is aligned ICD per page posting */ 590 if (icd_start % icd_post_per_page) { 591 icd_start_align = ((icd_start + 592 icd_post_per_page) & 593 ~(align_mask)); 594 phba->fw_config. 595 iscsi_icd_start[ulp_num] = 596 icd_start_align; 597 } 598 599 icd_count_align = (icd_count & ~align_mask); 600 601 /* ICD discarded in the process of alignment */ 602 if (icd_start_align) 603 icd_count_unavailable = ((icd_start_align - 604 icd_start) + 605 (icd_count - 606 icd_count_align)); 607 608 /* Updated ICD count available */ 609 phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count - 610 icd_count_unavailable); 611 612 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 613 "BM_%d : Aligned ICD values\n" 614 "\t ICD Start : %d\n" 615 "\t ICD Count : %d\n" 616 "\t ICD Discarded : %d\n", 617 phba->fw_config. 618 iscsi_icd_start[ulp_num], 619 phba->fw_config. 620 iscsi_icd_count[ulp_num], 621 icd_count_unavailable); 622 break; 623 } 624 } 625 626 total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 627 phba->params.ios_per_ctrl = (total_icd_count - 628 (total_cid_count + 629 BE2_TMFS + BE2_NOPOUT_REQ)); 630 phba->params.cxns_per_ctrl = total_cid_count; 631 phba->params.icds_per_ctrl = total_icd_count; 632 phba->params.num_sge_per_io = BE2_SGE; 633 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 634 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; 635 phba->params.num_eq_entries = 1024; 636 phba->params.num_cq_entries = 1024; 637 phba->params.wrbs_per_cxn = 256; 638 } 639 640 static void hwi_ring_eq_db(struct beiscsi_hba *phba, 641 unsigned int id, unsigned int clr_interrupt, 642 unsigned int num_processed, 643 unsigned char rearm, unsigned char event) 644 { 645 u32 val = 0; 646 647 if (rearm) 648 val |= 1 << DB_EQ_REARM_SHIFT; 649 if (clr_interrupt) 650 val |= 1 << DB_EQ_CLR_SHIFT; 651 if (event) 652 val |= 1 << DB_EQ_EVNT_SHIFT; 653 654 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT; 655 /* Setting lower order EQ_ID Bits */ 656 val |= (id & DB_EQ_RING_ID_LOW_MASK); 657 658 /* Setting Higher order EQ_ID Bits */ 659 val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) & 660 DB_EQ_RING_ID_HIGH_MASK) 661 << DB_EQ_HIGH_SET_SHIFT); 662 663 iowrite32(val, phba->db_va + DB_EQ_OFFSET); 664 } 665 666 /** 667 * be_isr_mcc - The isr routine of the driver. 668 * @irq: Not used 669 * @dev_id: Pointer to host adapter structure 670 */ 671 static irqreturn_t be_isr_mcc(int irq, void *dev_id) 672 { 673 struct beiscsi_hba *phba; 674 struct be_eq_entry *eqe; 675 struct be_queue_info *eq; 676 struct be_queue_info *mcc; 677 unsigned int mcc_events; 678 struct be_eq_obj *pbe_eq; 679 680 pbe_eq = dev_id; 681 eq = &pbe_eq->q; 682 phba = pbe_eq->phba; 683 mcc = &phba->ctrl.mcc_obj.cq; 684 eqe = queue_tail_node(eq); 685 686 mcc_events = 0; 687 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 688 & EQE_VALID_MASK) { 689 if (((eqe->dw[offsetof(struct amap_eq_entry, 690 resource_id) / 32] & 691 EQE_RESID_MASK) >> 16) == mcc->id) { 692 mcc_events++; 693 } 694 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 695 queue_tail_inc(eq); 696 eqe = queue_tail_node(eq); 697 } 698 699 if (mcc_events) { 700 queue_work(phba->wq, &pbe_eq->mcc_work); 701 hwi_ring_eq_db(phba, eq->id, 1, mcc_events, 1, 1); 702 } 703 return IRQ_HANDLED; 704 } 705 706 /** 707 * be_isr_msix - The isr routine of the driver. 708 * @irq: Not used 709 * @dev_id: Pointer to host adapter structure 710 */ 711 static irqreturn_t be_isr_msix(int irq, void *dev_id) 712 { 713 struct beiscsi_hba *phba; 714 struct be_queue_info *eq; 715 struct be_eq_obj *pbe_eq; 716 717 pbe_eq = dev_id; 718 eq = &pbe_eq->q; 719 720 phba = pbe_eq->phba; 721 /* disable interrupt till iopoll completes */ 722 hwi_ring_eq_db(phba, eq->id, 1, 0, 0, 1); 723 irq_poll_sched(&pbe_eq->iopoll); 724 725 return IRQ_HANDLED; 726 } 727 728 /** 729 * be_isr - The isr routine of the driver. 730 * @irq: Not used 731 * @dev_id: Pointer to host adapter structure 732 */ 733 static irqreturn_t be_isr(int irq, void *dev_id) 734 { 735 struct beiscsi_hba *phba; 736 struct hwi_controller *phwi_ctrlr; 737 struct hwi_context_memory *phwi_context; 738 struct be_eq_entry *eqe; 739 struct be_queue_info *eq; 740 struct be_queue_info *mcc; 741 unsigned int mcc_events, io_events; 742 struct be_ctrl_info *ctrl; 743 struct be_eq_obj *pbe_eq; 744 int isr, rearm; 745 746 phba = dev_id; 747 ctrl = &phba->ctrl; 748 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + 749 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); 750 if (!isr) 751 return IRQ_NONE; 752 753 phwi_ctrlr = phba->phwi_ctrlr; 754 phwi_context = phwi_ctrlr->phwi_ctxt; 755 pbe_eq = &phwi_context->be_eq[0]; 756 757 eq = &phwi_context->be_eq[0].q; 758 mcc = &phba->ctrl.mcc_obj.cq; 759 eqe = queue_tail_node(eq); 760 761 io_events = 0; 762 mcc_events = 0; 763 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 764 & EQE_VALID_MASK) { 765 if (((eqe->dw[offsetof(struct amap_eq_entry, 766 resource_id) / 32] & EQE_RESID_MASK) >> 16) == mcc->id) 767 mcc_events++; 768 else 769 io_events++; 770 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 771 queue_tail_inc(eq); 772 eqe = queue_tail_node(eq); 773 } 774 if (!io_events && !mcc_events) 775 return IRQ_NONE; 776 777 /* no need to rearm if interrupt is only for IOs */ 778 rearm = 0; 779 if (mcc_events) { 780 queue_work(phba->wq, &pbe_eq->mcc_work); 781 /* rearm for MCCQ */ 782 rearm = 1; 783 } 784 if (io_events) 785 irq_poll_sched(&pbe_eq->iopoll); 786 hwi_ring_eq_db(phba, eq->id, 0, (io_events + mcc_events), rearm, 1); 787 return IRQ_HANDLED; 788 } 789 790 static void beiscsi_free_irqs(struct beiscsi_hba *phba) 791 { 792 struct hwi_context_memory *phwi_context; 793 int i; 794 795 if (!phba->pcidev->msix_enabled) { 796 if (phba->pcidev->irq) 797 free_irq(phba->pcidev->irq, phba); 798 return; 799 } 800 801 phwi_context = phba->phwi_ctrlr->phwi_ctxt; 802 for (i = 0; i <= phba->num_cpus; i++) { 803 free_irq(pci_irq_vector(phba->pcidev, i), 804 &phwi_context->be_eq[i]); 805 kfree(phba->msi_name[i]); 806 } 807 } 808 809 static int beiscsi_init_irqs(struct beiscsi_hba *phba) 810 { 811 struct pci_dev *pcidev = phba->pcidev; 812 struct hwi_controller *phwi_ctrlr; 813 struct hwi_context_memory *phwi_context; 814 int ret, i, j; 815 816 phwi_ctrlr = phba->phwi_ctrlr; 817 phwi_context = phwi_ctrlr->phwi_ctxt; 818 819 if (pcidev->msix_enabled) { 820 for (i = 0; i < phba->num_cpus; i++) { 821 phba->msi_name[i] = kasprintf(GFP_KERNEL, 822 "beiscsi_%02x_%02x", 823 phba->shost->host_no, i); 824 if (!phba->msi_name[i]) { 825 ret = -ENOMEM; 826 goto free_msix_irqs; 827 } 828 829 ret = request_irq(pci_irq_vector(pcidev, i), 830 be_isr_msix, 0, phba->msi_name[i], 831 &phwi_context->be_eq[i]); 832 if (ret) { 833 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 834 "BM_%d : beiscsi_init_irqs-Failed to" 835 "register msix for i = %d\n", 836 i); 837 kfree(phba->msi_name[i]); 838 goto free_msix_irqs; 839 } 840 } 841 phba->msi_name[i] = kasprintf(GFP_KERNEL, "beiscsi_mcc_%02x", 842 phba->shost->host_no); 843 if (!phba->msi_name[i]) { 844 ret = -ENOMEM; 845 goto free_msix_irqs; 846 } 847 ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0, 848 phba->msi_name[i], &phwi_context->be_eq[i]); 849 if (ret) { 850 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT , 851 "BM_%d : beiscsi_init_irqs-" 852 "Failed to register beiscsi_msix_mcc\n"); 853 kfree(phba->msi_name[i]); 854 goto free_msix_irqs; 855 } 856 857 } else { 858 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, 859 "beiscsi", phba); 860 if (ret) { 861 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 862 "BM_%d : beiscsi_init_irqs-" 863 "Failed to register irq\\n"); 864 return ret; 865 } 866 } 867 return 0; 868 free_msix_irqs: 869 for (j = i - 1; j >= 0; j--) { 870 free_irq(pci_irq_vector(pcidev, i), &phwi_context->be_eq[j]); 871 kfree(phba->msi_name[j]); 872 } 873 return ret; 874 } 875 876 void hwi_ring_cq_db(struct beiscsi_hba *phba, 877 unsigned int id, unsigned int num_processed, 878 unsigned char rearm) 879 { 880 u32 val = 0; 881 882 if (rearm) 883 val |= 1 << DB_CQ_REARM_SHIFT; 884 885 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT; 886 887 /* Setting lower order CQ_ID Bits */ 888 val |= (id & DB_CQ_RING_ID_LOW_MASK); 889 890 /* Setting Higher order CQ_ID Bits */ 891 val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) & 892 DB_CQ_RING_ID_HIGH_MASK) 893 << DB_CQ_HIGH_SET_SHIFT); 894 895 iowrite32(val, phba->db_va + DB_CQ_OFFSET); 896 } 897 898 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) 899 { 900 struct sgl_handle *psgl_handle; 901 unsigned long flags; 902 903 spin_lock_irqsave(&phba->io_sgl_lock, flags); 904 if (phba->io_sgl_hndl_avbl) { 905 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 906 "BM_%d : In alloc_io_sgl_handle," 907 " io_sgl_alloc_index=%d\n", 908 phba->io_sgl_alloc_index); 909 910 psgl_handle = phba->io_sgl_hndl_base[phba-> 911 io_sgl_alloc_index]; 912 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; 913 phba->io_sgl_hndl_avbl--; 914 if (phba->io_sgl_alloc_index == (phba->params. 915 ios_per_ctrl - 1)) 916 phba->io_sgl_alloc_index = 0; 917 else 918 phba->io_sgl_alloc_index++; 919 } else 920 psgl_handle = NULL; 921 spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 922 return psgl_handle; 923 } 924 925 static void 926 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 927 { 928 unsigned long flags; 929 930 spin_lock_irqsave(&phba->io_sgl_lock, flags); 931 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 932 "BM_%d : In free_,io_sgl_free_index=%d\n", 933 phba->io_sgl_free_index); 934 935 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) { 936 /* 937 * this can happen if clean_task is called on a task that 938 * failed in xmit_task or alloc_pdu. 939 */ 940 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 941 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d, value there=%p\n", 942 phba->io_sgl_free_index, 943 phba->io_sgl_hndl_base[phba->io_sgl_free_index]); 944 spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 945 return; 946 } 947 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; 948 phba->io_sgl_hndl_avbl++; 949 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1)) 950 phba->io_sgl_free_index = 0; 951 else 952 phba->io_sgl_free_index++; 953 spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 954 } 955 956 static inline struct wrb_handle * 957 beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context, 958 unsigned int wrbs_per_cxn) 959 { 960 struct wrb_handle *pwrb_handle; 961 unsigned long flags; 962 963 spin_lock_irqsave(&pwrb_context->wrb_lock, flags); 964 if (!pwrb_context->wrb_handles_available) { 965 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 966 return NULL; 967 } 968 pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index]; 969 pwrb_context->wrb_handles_available--; 970 if (pwrb_context->alloc_index == (wrbs_per_cxn - 1)) 971 pwrb_context->alloc_index = 0; 972 else 973 pwrb_context->alloc_index++; 974 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 975 976 if (pwrb_handle) 977 memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb)); 978 979 return pwrb_handle; 980 } 981 982 /** 983 * alloc_wrb_handle - To allocate a wrb handle 984 * @phba: The hba pointer 985 * @cid: The cid to use for allocation 986 * @pwrb_context: ptr to ptr to wrb context 987 * 988 * This happens under session_lock until submission to chip 989 */ 990 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid, 991 struct hwi_wrb_context **pcontext) 992 { 993 struct hwi_wrb_context *pwrb_context; 994 struct hwi_controller *phwi_ctrlr; 995 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); 996 997 phwi_ctrlr = phba->phwi_ctrlr; 998 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 999 /* return the context address */ 1000 *pcontext = pwrb_context; 1001 return beiscsi_get_wrb_handle(pwrb_context, phba->params.wrbs_per_cxn); 1002 } 1003 1004 static inline void 1005 beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context, 1006 struct wrb_handle *pwrb_handle, 1007 unsigned int wrbs_per_cxn) 1008 { 1009 unsigned long flags; 1010 1011 spin_lock_irqsave(&pwrb_context->wrb_lock, flags); 1012 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; 1013 pwrb_context->wrb_handles_available++; 1014 if (pwrb_context->free_index == (wrbs_per_cxn - 1)) 1015 pwrb_context->free_index = 0; 1016 else 1017 pwrb_context->free_index++; 1018 pwrb_handle->pio_handle = NULL; 1019 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 1020 } 1021 1022 /** 1023 * free_wrb_handle - To free the wrb handle back to pool 1024 * @phba: The hba pointer 1025 * @pwrb_context: The context to free from 1026 * @pwrb_handle: The wrb_handle to free 1027 * 1028 * This happens under session_lock until submission to chip 1029 */ 1030 static void 1031 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, 1032 struct wrb_handle *pwrb_handle) 1033 { 1034 beiscsi_put_wrb_handle(pwrb_context, 1035 pwrb_handle, 1036 phba->params.wrbs_per_cxn); 1037 beiscsi_log(phba, KERN_INFO, 1038 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1039 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x" 1040 "wrb_handles_available=%d\n", 1041 pwrb_handle, pwrb_context->free_index, 1042 pwrb_context->wrb_handles_available); 1043 } 1044 1045 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) 1046 { 1047 struct sgl_handle *psgl_handle; 1048 unsigned long flags; 1049 1050 spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); 1051 if (phba->eh_sgl_hndl_avbl) { 1052 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; 1053 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; 1054 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1055 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n", 1056 phba->eh_sgl_alloc_index, 1057 phba->eh_sgl_alloc_index); 1058 1059 phba->eh_sgl_hndl_avbl--; 1060 if (phba->eh_sgl_alloc_index == 1061 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1062 1)) 1063 phba->eh_sgl_alloc_index = 0; 1064 else 1065 phba->eh_sgl_alloc_index++; 1066 } else 1067 psgl_handle = NULL; 1068 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1069 return psgl_handle; 1070 } 1071 1072 void 1073 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1074 { 1075 unsigned long flags; 1076 1077 spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); 1078 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1079 "BM_%d : In free_mgmt_sgl_handle," 1080 "eh_sgl_free_index=%d\n", 1081 phba->eh_sgl_free_index); 1082 1083 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { 1084 /* 1085 * this can happen if clean_task is called on a task that 1086 * failed in xmit_task or alloc_pdu. 1087 */ 1088 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 1089 "BM_%d : Double Free in eh SGL ," 1090 "eh_sgl_free_index=%d\n", 1091 phba->eh_sgl_free_index); 1092 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1093 return; 1094 } 1095 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle; 1096 phba->eh_sgl_hndl_avbl++; 1097 if (phba->eh_sgl_free_index == 1098 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1)) 1099 phba->eh_sgl_free_index = 0; 1100 else 1101 phba->eh_sgl_free_index++; 1102 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1103 } 1104 1105 static void 1106 be_complete_io(struct beiscsi_conn *beiscsi_conn, 1107 struct iscsi_task *task, 1108 struct common_sol_cqe *csol_cqe) 1109 { 1110 struct beiscsi_io_task *io_task = task->dd_data; 1111 struct be_status_bhs *sts_bhs = 1112 (struct be_status_bhs *)io_task->cmd_bhs; 1113 struct iscsi_conn *conn = beiscsi_conn->conn; 1114 unsigned char *sense; 1115 u32 resid = 0, exp_cmdsn, max_cmdsn; 1116 u8 rsp, status, flags; 1117 1118 exp_cmdsn = csol_cqe->exp_cmdsn; 1119 max_cmdsn = (csol_cqe->exp_cmdsn + 1120 csol_cqe->cmd_wnd - 1); 1121 rsp = csol_cqe->i_resp; 1122 status = csol_cqe->i_sts; 1123 flags = csol_cqe->i_flags; 1124 resid = csol_cqe->res_cnt; 1125 1126 if (!task->sc) { 1127 if (io_task->scsi_cmnd) { 1128 scsi_dma_unmap(io_task->scsi_cmnd); 1129 io_task->scsi_cmnd = NULL; 1130 } 1131 1132 return; 1133 } 1134 task->sc->result = (DID_OK << 16) | status; 1135 if (rsp != ISCSI_STATUS_CMD_COMPLETED) { 1136 task->sc->result = DID_ERROR << 16; 1137 goto unmap; 1138 } 1139 1140 /* bidi not initially supported */ 1141 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) { 1142 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW)) 1143 task->sc->result = DID_ERROR << 16; 1144 1145 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) { 1146 scsi_set_resid(task->sc, resid); 1147 if (!status && (scsi_bufflen(task->sc) - resid < 1148 task->sc->underflow)) 1149 task->sc->result = DID_ERROR << 16; 1150 } 1151 } 1152 1153 if (status == SAM_STAT_CHECK_CONDITION) { 1154 u16 sense_len; 1155 unsigned short *slen = (unsigned short *)sts_bhs->sense_info; 1156 1157 sense = sts_bhs->sense_info + sizeof(unsigned short); 1158 sense_len = be16_to_cpu(*slen); 1159 memcpy(task->sc->sense_buffer, sense, 1160 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); 1161 } 1162 1163 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) 1164 conn->rxdata_octets += resid; 1165 unmap: 1166 if (io_task->scsi_cmnd) { 1167 scsi_dma_unmap(io_task->scsi_cmnd); 1168 io_task->scsi_cmnd = NULL; 1169 } 1170 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); 1171 } 1172 1173 static void 1174 be_complete_logout(struct beiscsi_conn *beiscsi_conn, 1175 struct iscsi_task *task, 1176 struct common_sol_cqe *csol_cqe) 1177 { 1178 struct iscsi_logout_rsp *hdr; 1179 struct beiscsi_io_task *io_task = task->dd_data; 1180 struct iscsi_conn *conn = beiscsi_conn->conn; 1181 1182 hdr = (struct iscsi_logout_rsp *)task->hdr; 1183 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 1184 hdr->t2wait = 5; 1185 hdr->t2retain = 0; 1186 hdr->flags = csol_cqe->i_flags; 1187 hdr->response = csol_cqe->i_resp; 1188 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1189 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1190 csol_cqe->cmd_wnd - 1); 1191 1192 hdr->dlength[0] = 0; 1193 hdr->dlength[1] = 0; 1194 hdr->dlength[2] = 0; 1195 hdr->hlength = 0; 1196 hdr->itt = io_task->libiscsi_itt; 1197 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1198 } 1199 1200 static void 1201 be_complete_tmf(struct beiscsi_conn *beiscsi_conn, 1202 struct iscsi_task *task, 1203 struct common_sol_cqe *csol_cqe) 1204 { 1205 struct iscsi_tm_rsp *hdr; 1206 struct iscsi_conn *conn = beiscsi_conn->conn; 1207 struct beiscsi_io_task *io_task = task->dd_data; 1208 1209 hdr = (struct iscsi_tm_rsp *)task->hdr; 1210 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 1211 hdr->flags = csol_cqe->i_flags; 1212 hdr->response = csol_cqe->i_resp; 1213 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1214 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1215 csol_cqe->cmd_wnd - 1); 1216 1217 hdr->itt = io_task->libiscsi_itt; 1218 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1219 } 1220 1221 static void 1222 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, 1223 struct beiscsi_hba *phba, struct sol_cqe *psol) 1224 { 1225 struct hwi_wrb_context *pwrb_context; 1226 uint16_t wrb_index, cid, cri_index; 1227 struct hwi_controller *phwi_ctrlr; 1228 struct wrb_handle *pwrb_handle; 1229 struct iscsi_session *session; 1230 struct iscsi_task *task; 1231 1232 phwi_ctrlr = phba->phwi_ctrlr; 1233 if (is_chip_be2_be3r(phba)) { 1234 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1235 wrb_idx, psol); 1236 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1237 cid, psol); 1238 } else { 1239 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1240 wrb_idx, psol); 1241 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1242 cid, psol); 1243 } 1244 1245 cri_index = BE_GET_CRI_FROM_CID(cid); 1246 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1247 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; 1248 session = beiscsi_conn->conn->session; 1249 spin_lock_bh(&session->back_lock); 1250 task = pwrb_handle->pio_handle; 1251 if (task) 1252 __iscsi_put_task(task); 1253 spin_unlock_bh(&session->back_lock); 1254 } 1255 1256 static void 1257 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, 1258 struct iscsi_task *task, 1259 struct common_sol_cqe *csol_cqe) 1260 { 1261 struct iscsi_nopin *hdr; 1262 struct iscsi_conn *conn = beiscsi_conn->conn; 1263 struct beiscsi_io_task *io_task = task->dd_data; 1264 1265 hdr = (struct iscsi_nopin *)task->hdr; 1266 hdr->flags = csol_cqe->i_flags; 1267 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1268 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1269 csol_cqe->cmd_wnd - 1); 1270 1271 hdr->opcode = ISCSI_OP_NOOP_IN; 1272 hdr->itt = io_task->libiscsi_itt; 1273 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1274 } 1275 1276 static void adapter_get_sol_cqe(struct beiscsi_hba *phba, 1277 struct sol_cqe *psol, 1278 struct common_sol_cqe *csol_cqe) 1279 { 1280 if (is_chip_be2_be3r(phba)) { 1281 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, 1282 i_exp_cmd_sn, psol); 1283 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, 1284 i_res_cnt, psol); 1285 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, 1286 i_cmd_wnd, psol); 1287 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, 1288 wrb_index, psol); 1289 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, 1290 cid, psol); 1291 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1292 hw_sts, psol); 1293 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, 1294 i_resp, psol); 1295 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1296 i_sts, psol); 1297 csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, 1298 i_flags, psol); 1299 } else { 1300 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1301 i_exp_cmd_sn, psol); 1302 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1303 i_res_cnt, psol); 1304 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1305 wrb_index, psol); 1306 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1307 cid, psol); 1308 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1309 hw_sts, psol); 1310 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1311 i_cmd_wnd, psol); 1312 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1313 cmd_cmpl, psol)) 1314 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1315 i_sts, psol); 1316 else 1317 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1318 i_sts, psol); 1319 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1320 u, psol)) 1321 csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW; 1322 1323 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1324 o, psol)) 1325 csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; 1326 } 1327 } 1328 1329 1330 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, 1331 struct beiscsi_hba *phba, struct sol_cqe *psol) 1332 { 1333 struct iscsi_conn *conn = beiscsi_conn->conn; 1334 struct iscsi_session *session = conn->session; 1335 struct common_sol_cqe csol_cqe = {0}; 1336 struct hwi_wrb_context *pwrb_context; 1337 struct hwi_controller *phwi_ctrlr; 1338 struct wrb_handle *pwrb_handle; 1339 struct iscsi_task *task; 1340 uint16_t cri_index = 0; 1341 uint8_t type; 1342 1343 phwi_ctrlr = phba->phwi_ctrlr; 1344 1345 /* Copy the elements to a common structure */ 1346 adapter_get_sol_cqe(phba, psol, &csol_cqe); 1347 1348 cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid); 1349 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1350 1351 pwrb_handle = pwrb_context->pwrb_handle_basestd[ 1352 csol_cqe.wrb_index]; 1353 1354 spin_lock_bh(&session->back_lock); 1355 task = pwrb_handle->pio_handle; 1356 if (!task) { 1357 spin_unlock_bh(&session->back_lock); 1358 return; 1359 } 1360 type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; 1361 1362 switch (type) { 1363 case HWH_TYPE_IO: 1364 case HWH_TYPE_IO_RD: 1365 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == 1366 ISCSI_OP_NOOP_OUT) 1367 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1368 else 1369 be_complete_io(beiscsi_conn, task, &csol_cqe); 1370 break; 1371 1372 case HWH_TYPE_LOGOUT: 1373 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) 1374 be_complete_logout(beiscsi_conn, task, &csol_cqe); 1375 else 1376 be_complete_tmf(beiscsi_conn, task, &csol_cqe); 1377 break; 1378 1379 case HWH_TYPE_LOGIN: 1380 beiscsi_log(phba, KERN_ERR, 1381 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1382 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in" 1383 " hwi_complete_cmd- Solicited path\n"); 1384 break; 1385 1386 case HWH_TYPE_NOP: 1387 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1388 break; 1389 1390 default: 1391 beiscsi_log(phba, KERN_WARNING, 1392 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1393 "BM_%d : In hwi_complete_cmd, unknown type = %d" 1394 "wrb_index 0x%x CID 0x%x\n", type, 1395 csol_cqe.wrb_index, 1396 csol_cqe.cid); 1397 break; 1398 } 1399 1400 spin_unlock_bh(&session->back_lock); 1401 } 1402 1403 /** 1404 * ASYNC PDUs include 1405 * a. Unsolicited NOP-In (target initiated NOP-In) 1406 * b. ASYNC Messages 1407 * c. Reject PDU 1408 * d. Login response 1409 * These headers arrive unprocessed by the EP firmware. 1410 * iSCSI layer processes them. 1411 */ 1412 static unsigned int 1413 beiscsi_complete_pdu(struct beiscsi_conn *beiscsi_conn, 1414 struct pdu_base *phdr, void *pdata, unsigned int dlen) 1415 { 1416 struct beiscsi_hba *phba = beiscsi_conn->phba; 1417 struct iscsi_conn *conn = beiscsi_conn->conn; 1418 struct beiscsi_io_task *io_task; 1419 struct iscsi_hdr *login_hdr; 1420 struct iscsi_task *task; 1421 u8 code; 1422 1423 code = AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr); 1424 switch (code) { 1425 case ISCSI_OP_NOOP_IN: 1426 pdata = NULL; 1427 dlen = 0; 1428 break; 1429 case ISCSI_OP_ASYNC_EVENT: 1430 break; 1431 case ISCSI_OP_REJECT: 1432 WARN_ON(!pdata); 1433 WARN_ON(!(dlen == 48)); 1434 beiscsi_log(phba, KERN_ERR, 1435 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1436 "BM_%d : In ISCSI_OP_REJECT\n"); 1437 break; 1438 case ISCSI_OP_LOGIN_RSP: 1439 case ISCSI_OP_TEXT_RSP: 1440 task = conn->login_task; 1441 io_task = task->dd_data; 1442 login_hdr = (struct iscsi_hdr *)phdr; 1443 login_hdr->itt = io_task->libiscsi_itt; 1444 break; 1445 default: 1446 beiscsi_log(phba, KERN_WARNING, 1447 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1448 "BM_%d : unrecognized async PDU opcode 0x%x\n", 1449 code); 1450 return 1; 1451 } 1452 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)phdr, pdata, dlen); 1453 return 0; 1454 } 1455 1456 static inline void 1457 beiscsi_hdl_put_handle(struct hd_async_context *pasync_ctx, 1458 struct hd_async_handle *pasync_handle) 1459 { 1460 pasync_handle->is_final = 0; 1461 pasync_handle->buffer_len = 0; 1462 pasync_handle->in_use = 0; 1463 list_del_init(&pasync_handle->link); 1464 } 1465 1466 static void 1467 beiscsi_hdl_purge_handles(struct beiscsi_hba *phba, 1468 struct hd_async_context *pasync_ctx, 1469 u16 cri) 1470 { 1471 struct hd_async_handle *pasync_handle, *tmp_handle; 1472 struct list_head *plist; 1473 1474 plist = &pasync_ctx->async_entry[cri].wq.list; 1475 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) 1476 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1477 1478 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list); 1479 pasync_ctx->async_entry[cri].wq.hdr_len = 0; 1480 pasync_ctx->async_entry[cri].wq.bytes_received = 0; 1481 pasync_ctx->async_entry[cri].wq.bytes_needed = 0; 1482 } 1483 1484 static struct hd_async_handle * 1485 beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn, 1486 struct hd_async_context *pasync_ctx, 1487 struct i_t_dpdu_cqe *pdpdu_cqe, 1488 u8 *header) 1489 { 1490 struct beiscsi_hba *phba = beiscsi_conn->phba; 1491 struct hd_async_handle *pasync_handle; 1492 struct be_bus_address phys_addr; 1493 u16 cid, code, ci, cri; 1494 u8 final, error = 0; 1495 u32 dpl; 1496 1497 cid = beiscsi_conn->beiscsi_conn_cid; 1498 cri = BE_GET_ASYNC_CRI_FROM_CID(cid); 1499 /** 1500 * This function is invoked to get the right async_handle structure 1501 * from a given DEF PDU CQ entry. 1502 * 1503 * - index in CQ entry gives the vertical index 1504 * - address in CQ entry is the offset where the DMA last ended 1505 * - final - no more notifications for this PDU 1506 */ 1507 if (is_chip_be2_be3r(phba)) { 1508 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1509 dpl, pdpdu_cqe); 1510 ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1511 index, pdpdu_cqe); 1512 final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1513 final, pdpdu_cqe); 1514 } else { 1515 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1516 dpl, pdpdu_cqe); 1517 ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1518 index, pdpdu_cqe); 1519 final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1520 final, pdpdu_cqe); 1521 } 1522 1523 /** 1524 * DB addr Hi/Lo is same for BE and SKH. 1525 * Subtract the dataplacementlength to get to the base. 1526 */ 1527 phys_addr.u.a32.address_lo = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1528 db_addr_lo, pdpdu_cqe); 1529 phys_addr.u.a32.address_lo -= dpl; 1530 phys_addr.u.a32.address_hi = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1531 db_addr_hi, pdpdu_cqe); 1532 1533 code = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, code, pdpdu_cqe); 1534 switch (code) { 1535 case UNSOL_HDR_NOTIFY: 1536 pasync_handle = pasync_ctx->async_entry[ci].header; 1537 *header = 1; 1538 break; 1539 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 1540 error = 1; 1541 case UNSOL_DATA_NOTIFY: 1542 pasync_handle = pasync_ctx->async_entry[ci].data; 1543 break; 1544 /* called only for above codes */ 1545 default: 1546 return NULL; 1547 } 1548 1549 if (pasync_handle->pa.u.a64.address != phys_addr.u.a64.address || 1550 pasync_handle->index != ci) { 1551 /* driver bug - if ci does not match async handle index */ 1552 error = 1; 1553 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1554 "BM_%d : cid %u async PDU handle mismatch - addr in %cQE %llx at %u:addr in CQE %llx ci %u\n", 1555 cid, pasync_handle->is_header ? 'H' : 'D', 1556 pasync_handle->pa.u.a64.address, 1557 pasync_handle->index, 1558 phys_addr.u.a64.address, ci); 1559 /* FW has stale address - attempt continuing by dropping */ 1560 } 1561 1562 /** 1563 * DEF PDU header and data buffers with errors should be simply 1564 * dropped as there are no consumers for it. 1565 */ 1566 if (error) { 1567 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1568 return NULL; 1569 } 1570 1571 if (pasync_handle->in_use || !list_empty(&pasync_handle->link)) { 1572 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1573 "BM_%d : cid %d async PDU handle in use - code %d ci %d addr %llx\n", 1574 cid, code, ci, phys_addr.u.a64.address); 1575 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1576 } 1577 1578 list_del_init(&pasync_handle->link); 1579 /** 1580 * Each CID is associated with unique CRI. 1581 * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different. 1582 **/ 1583 pasync_handle->cri = cri; 1584 pasync_handle->is_final = final; 1585 pasync_handle->buffer_len = dpl; 1586 pasync_handle->in_use = 1; 1587 1588 return pasync_handle; 1589 } 1590 1591 static unsigned int 1592 beiscsi_hdl_fwd_pdu(struct beiscsi_conn *beiscsi_conn, 1593 struct hd_async_context *pasync_ctx, 1594 u16 cri) 1595 { 1596 struct iscsi_session *session = beiscsi_conn->conn->session; 1597 struct hd_async_handle *pasync_handle, *plast_handle; 1598 struct beiscsi_hba *phba = beiscsi_conn->phba; 1599 void *phdr = NULL, *pdata = NULL; 1600 u32 dlen = 0, status = 0; 1601 struct list_head *plist; 1602 1603 plist = &pasync_ctx->async_entry[cri].wq.list; 1604 plast_handle = NULL; 1605 list_for_each_entry(pasync_handle, plist, link) { 1606 plast_handle = pasync_handle; 1607 /* get the header, the first entry */ 1608 if (!phdr) { 1609 phdr = pasync_handle->pbuffer; 1610 continue; 1611 } 1612 /* use first buffer to collect all the data */ 1613 if (!pdata) { 1614 pdata = pasync_handle->pbuffer; 1615 dlen = pasync_handle->buffer_len; 1616 continue; 1617 } 1618 if (!pasync_handle->buffer_len || 1619 (dlen + pasync_handle->buffer_len) > 1620 pasync_ctx->async_data.buffer_size) 1621 break; 1622 memcpy(pdata + dlen, pasync_handle->pbuffer, 1623 pasync_handle->buffer_len); 1624 dlen += pasync_handle->buffer_len; 1625 } 1626 1627 if (!plast_handle->is_final) { 1628 /* last handle should have final PDU notification from FW */ 1629 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1630 "BM_%d : cid %u %p fwd async PDU opcode %x with last handle missing - HL%u:DN%u:DR%u\n", 1631 beiscsi_conn->beiscsi_conn_cid, plast_handle, 1632 AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr), 1633 pasync_ctx->async_entry[cri].wq.hdr_len, 1634 pasync_ctx->async_entry[cri].wq.bytes_needed, 1635 pasync_ctx->async_entry[cri].wq.bytes_received); 1636 } 1637 spin_lock_bh(&session->back_lock); 1638 status = beiscsi_complete_pdu(beiscsi_conn, phdr, pdata, dlen); 1639 spin_unlock_bh(&session->back_lock); 1640 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1641 return status; 1642 } 1643 1644 static unsigned int 1645 beiscsi_hdl_gather_pdu(struct beiscsi_conn *beiscsi_conn, 1646 struct hd_async_context *pasync_ctx, 1647 struct hd_async_handle *pasync_handle) 1648 { 1649 unsigned int bytes_needed = 0, status = 0; 1650 u16 cri = pasync_handle->cri; 1651 struct cri_wait_queue *wq; 1652 struct beiscsi_hba *phba; 1653 struct pdu_base *ppdu; 1654 char *err = ""; 1655 1656 phba = beiscsi_conn->phba; 1657 wq = &pasync_ctx->async_entry[cri].wq; 1658 if (pasync_handle->is_header) { 1659 /* check if PDU hdr is rcv'd when old hdr not completed */ 1660 if (wq->hdr_len) { 1661 err = "incomplete"; 1662 goto drop_pdu; 1663 } 1664 ppdu = pasync_handle->pbuffer; 1665 bytes_needed = AMAP_GET_BITS(struct amap_pdu_base, 1666 data_len_hi, ppdu); 1667 bytes_needed <<= 16; 1668 bytes_needed |= be16_to_cpu(AMAP_GET_BITS(struct amap_pdu_base, 1669 data_len_lo, ppdu)); 1670 wq->hdr_len = pasync_handle->buffer_len; 1671 wq->bytes_received = 0; 1672 wq->bytes_needed = bytes_needed; 1673 list_add_tail(&pasync_handle->link, &wq->list); 1674 if (!bytes_needed) 1675 status = beiscsi_hdl_fwd_pdu(beiscsi_conn, 1676 pasync_ctx, cri); 1677 } else { 1678 /* check if data received has header and is needed */ 1679 if (!wq->hdr_len || !wq->bytes_needed) { 1680 err = "header less"; 1681 goto drop_pdu; 1682 } 1683 wq->bytes_received += pasync_handle->buffer_len; 1684 /* Something got overwritten? Better catch it here. */ 1685 if (wq->bytes_received > wq->bytes_needed) { 1686 err = "overflow"; 1687 goto drop_pdu; 1688 } 1689 list_add_tail(&pasync_handle->link, &wq->list); 1690 if (wq->bytes_received == wq->bytes_needed) 1691 status = beiscsi_hdl_fwd_pdu(beiscsi_conn, 1692 pasync_ctx, cri); 1693 } 1694 return status; 1695 1696 drop_pdu: 1697 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1698 "BM_%d : cid %u async PDU %s - def-%c:HL%u:DN%u:DR%u\n", 1699 beiscsi_conn->beiscsi_conn_cid, err, 1700 pasync_handle->is_header ? 'H' : 'D', 1701 wq->hdr_len, wq->bytes_needed, 1702 pasync_handle->buffer_len); 1703 /* discard this handle */ 1704 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1705 /* free all the other handles in cri_wait_queue */ 1706 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1707 /* try continuing */ 1708 return status; 1709 } 1710 1711 static void 1712 beiscsi_hdq_post_handles(struct beiscsi_hba *phba, 1713 u8 header, u8 ulp_num, u16 nbuf) 1714 { 1715 struct hd_async_handle *pasync_handle; 1716 struct hd_async_context *pasync_ctx; 1717 struct hwi_controller *phwi_ctrlr; 1718 struct phys_addr *pasync_sge; 1719 u32 ring_id, doorbell = 0; 1720 u32 doorbell_offset; 1721 u16 prod, pi; 1722 1723 phwi_ctrlr = phba->phwi_ctrlr; 1724 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1725 if (header) { 1726 pasync_sge = pasync_ctx->async_header.ring_base; 1727 pi = pasync_ctx->async_header.pi; 1728 ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id; 1729 doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num]. 1730 doorbell_offset; 1731 } else { 1732 pasync_sge = pasync_ctx->async_data.ring_base; 1733 pi = pasync_ctx->async_data.pi; 1734 ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id; 1735 doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num]. 1736 doorbell_offset; 1737 } 1738 1739 for (prod = 0; prod < nbuf; prod++) { 1740 if (header) 1741 pasync_handle = pasync_ctx->async_entry[pi].header; 1742 else 1743 pasync_handle = pasync_ctx->async_entry[pi].data; 1744 WARN_ON(pasync_handle->is_header != header); 1745 WARN_ON(pasync_handle->index != pi); 1746 /* setup the ring only once */ 1747 if (nbuf == pasync_ctx->num_entries) { 1748 /* note hi is lo */ 1749 pasync_sge[pi].hi = pasync_handle->pa.u.a32.address_lo; 1750 pasync_sge[pi].lo = pasync_handle->pa.u.a32.address_hi; 1751 } 1752 if (++pi == pasync_ctx->num_entries) 1753 pi = 0; 1754 } 1755 1756 if (header) 1757 pasync_ctx->async_header.pi = pi; 1758 else 1759 pasync_ctx->async_data.pi = pi; 1760 1761 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK; 1762 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT; 1763 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT; 1764 doorbell |= (prod & DB_DEF_PDU_CQPROC_MASK) << DB_DEF_PDU_CQPROC_SHIFT; 1765 iowrite32(doorbell, phba->db_va + doorbell_offset); 1766 } 1767 1768 static void 1769 beiscsi_hdq_process_compl(struct beiscsi_conn *beiscsi_conn, 1770 struct i_t_dpdu_cqe *pdpdu_cqe) 1771 { 1772 struct beiscsi_hba *phba = beiscsi_conn->phba; 1773 struct hd_async_handle *pasync_handle = NULL; 1774 struct hd_async_context *pasync_ctx; 1775 struct hwi_controller *phwi_ctrlr; 1776 u8 ulp_num, consumed, header = 0; 1777 u16 cid_cri; 1778 1779 phwi_ctrlr = phba->phwi_ctrlr; 1780 cid_cri = BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid); 1781 ulp_num = BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cid_cri); 1782 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1783 pasync_handle = beiscsi_hdl_get_handle(beiscsi_conn, pasync_ctx, 1784 pdpdu_cqe, &header); 1785 if (is_chip_be2_be3r(phba)) 1786 consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1787 num_cons, pdpdu_cqe); 1788 else 1789 consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1790 num_cons, pdpdu_cqe); 1791 if (pasync_handle) 1792 beiscsi_hdl_gather_pdu(beiscsi_conn, pasync_ctx, pasync_handle); 1793 /* num_cons indicates number of 8 RQEs consumed */ 1794 if (consumed) 1795 beiscsi_hdq_post_handles(phba, header, ulp_num, 8 * consumed); 1796 } 1797 1798 void beiscsi_process_mcc_cq(struct beiscsi_hba *phba) 1799 { 1800 struct be_queue_info *mcc_cq; 1801 struct be_mcc_compl *mcc_compl; 1802 unsigned int num_processed = 0; 1803 1804 mcc_cq = &phba->ctrl.mcc_obj.cq; 1805 mcc_compl = queue_tail_node(mcc_cq); 1806 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1807 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) { 1808 if (beiscsi_hba_in_error(phba)) 1809 return; 1810 1811 if (num_processed >= 32) { 1812 hwi_ring_cq_db(phba, mcc_cq->id, 1813 num_processed, 0); 1814 num_processed = 0; 1815 } 1816 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) { 1817 beiscsi_process_async_event(phba, mcc_compl); 1818 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { 1819 beiscsi_process_mcc_compl(&phba->ctrl, mcc_compl); 1820 } 1821 1822 mcc_compl->flags = 0; 1823 queue_tail_inc(mcc_cq); 1824 mcc_compl = queue_tail_node(mcc_cq); 1825 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1826 num_processed++; 1827 } 1828 1829 if (num_processed > 0) 1830 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1); 1831 } 1832 1833 static void beiscsi_mcc_work(struct work_struct *work) 1834 { 1835 struct be_eq_obj *pbe_eq; 1836 struct beiscsi_hba *phba; 1837 1838 pbe_eq = container_of(work, struct be_eq_obj, mcc_work); 1839 phba = pbe_eq->phba; 1840 beiscsi_process_mcc_cq(phba); 1841 /* rearm EQ for further interrupts */ 1842 if (!beiscsi_hba_in_error(phba)) 1843 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 1844 } 1845 1846 /** 1847 * beiscsi_process_cq()- Process the Completion Queue 1848 * @pbe_eq: Event Q on which the Completion has come 1849 * @budget: Max number of events to processed 1850 * 1851 * return 1852 * Number of Completion Entries processed. 1853 **/ 1854 unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget) 1855 { 1856 struct be_queue_info *cq; 1857 struct sol_cqe *sol; 1858 struct dmsg_cqe *dmsg; 1859 unsigned int total = 0; 1860 unsigned int num_processed = 0; 1861 unsigned short code = 0, cid = 0; 1862 uint16_t cri_index = 0; 1863 struct beiscsi_conn *beiscsi_conn; 1864 struct beiscsi_endpoint *beiscsi_ep; 1865 struct iscsi_endpoint *ep; 1866 struct beiscsi_hba *phba; 1867 1868 cq = pbe_eq->cq; 1869 sol = queue_tail_node(cq); 1870 phba = pbe_eq->phba; 1871 1872 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & 1873 CQE_VALID_MASK) { 1874 if (beiscsi_hba_in_error(phba)) 1875 return 0; 1876 1877 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); 1878 1879 code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 32] & 1880 CQE_CODE_MASK); 1881 1882 /* Get the CID */ 1883 if (is_chip_be2_be3r(phba)) { 1884 cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); 1885 } else { 1886 if ((code == DRIVERMSG_NOTIFY) || 1887 (code == UNSOL_HDR_NOTIFY) || 1888 (code == UNSOL_DATA_NOTIFY)) 1889 cid = AMAP_GET_BITS( 1890 struct amap_i_t_dpdu_cqe_v2, 1891 cid, sol); 1892 else 1893 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1894 cid, sol); 1895 } 1896 1897 cri_index = BE_GET_CRI_FROM_CID(cid); 1898 ep = phba->ep_array[cri_index]; 1899 1900 if (ep == NULL) { 1901 /* connection has already been freed 1902 * just move on to next one 1903 */ 1904 beiscsi_log(phba, KERN_WARNING, 1905 BEISCSI_LOG_INIT, 1906 "BM_%d : proc cqe of disconn ep: cid %d\n", 1907 cid); 1908 goto proc_next_cqe; 1909 } 1910 1911 beiscsi_ep = ep->dd_data; 1912 beiscsi_conn = beiscsi_ep->conn; 1913 1914 /* replenish cq */ 1915 if (num_processed == 32) { 1916 hwi_ring_cq_db(phba, cq->id, 32, 0); 1917 num_processed = 0; 1918 } 1919 total++; 1920 1921 switch (code) { 1922 case SOL_CMD_COMPLETE: 1923 hwi_complete_cmd(beiscsi_conn, phba, sol); 1924 break; 1925 case DRIVERMSG_NOTIFY: 1926 beiscsi_log(phba, KERN_INFO, 1927 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1928 "BM_%d : Received %s[%d] on CID : %d\n", 1929 cqe_desc[code], code, cid); 1930 1931 dmsg = (struct dmsg_cqe *)sol; 1932 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); 1933 break; 1934 case UNSOL_HDR_NOTIFY: 1935 beiscsi_log(phba, KERN_INFO, 1936 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1937 "BM_%d : Received %s[%d] on CID : %d\n", 1938 cqe_desc[code], code, cid); 1939 1940 spin_lock_bh(&phba->async_pdu_lock); 1941 beiscsi_hdq_process_compl(beiscsi_conn, 1942 (struct i_t_dpdu_cqe *)sol); 1943 spin_unlock_bh(&phba->async_pdu_lock); 1944 break; 1945 case UNSOL_DATA_NOTIFY: 1946 beiscsi_log(phba, KERN_INFO, 1947 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1948 "BM_%d : Received %s[%d] on CID : %d\n", 1949 cqe_desc[code], code, cid); 1950 1951 spin_lock_bh(&phba->async_pdu_lock); 1952 beiscsi_hdq_process_compl(beiscsi_conn, 1953 (struct i_t_dpdu_cqe *)sol); 1954 spin_unlock_bh(&phba->async_pdu_lock); 1955 break; 1956 case CXN_INVALIDATE_INDEX_NOTIFY: 1957 case CMD_INVALIDATED_NOTIFY: 1958 case CXN_INVALIDATE_NOTIFY: 1959 beiscsi_log(phba, KERN_ERR, 1960 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1961 "BM_%d : Ignoring %s[%d] on CID : %d\n", 1962 cqe_desc[code], code, cid); 1963 break; 1964 case CXN_KILLED_HDR_DIGEST_ERR: 1965 case SOL_CMD_KILLED_DATA_DIGEST_ERR: 1966 beiscsi_log(phba, KERN_ERR, 1967 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1968 "BM_%d : Cmd Notification %s[%d] on CID : %d\n", 1969 cqe_desc[code], code, cid); 1970 break; 1971 case CMD_KILLED_INVALID_STATSN_RCVD: 1972 case CMD_KILLED_INVALID_R2T_RCVD: 1973 case CMD_CXN_KILLED_LUN_INVALID: 1974 case CMD_CXN_KILLED_ICD_INVALID: 1975 case CMD_CXN_KILLED_ITT_INVALID: 1976 case CMD_CXN_KILLED_SEQ_OUTOFORDER: 1977 case CMD_CXN_KILLED_INVALID_DATASN_RCVD: 1978 beiscsi_log(phba, KERN_ERR, 1979 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1980 "BM_%d : Cmd Notification %s[%d] on CID : %d\n", 1981 cqe_desc[code], code, cid); 1982 break; 1983 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 1984 beiscsi_log(phba, KERN_ERR, 1985 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1986 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n", 1987 cqe_desc[code], code, cid); 1988 spin_lock_bh(&phba->async_pdu_lock); 1989 /* driver consumes the entry and drops the contents */ 1990 beiscsi_hdq_process_compl(beiscsi_conn, 1991 (struct i_t_dpdu_cqe *)sol); 1992 spin_unlock_bh(&phba->async_pdu_lock); 1993 break; 1994 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: 1995 case CXN_KILLED_BURST_LEN_MISMATCH: 1996 case CXN_KILLED_AHS_RCVD: 1997 case CXN_KILLED_UNKNOWN_HDR: 1998 case CXN_KILLED_STALE_ITT_TTT_RCVD: 1999 case CXN_KILLED_INVALID_ITT_TTT_RCVD: 2000 case CXN_KILLED_TIMED_OUT: 2001 case CXN_KILLED_FIN_RCVD: 2002 case CXN_KILLED_RST_SENT: 2003 case CXN_KILLED_RST_RCVD: 2004 case CXN_KILLED_BAD_UNSOL_PDU_RCVD: 2005 case CXN_KILLED_BAD_WRB_INDEX_ERROR: 2006 case CXN_KILLED_OVER_RUN_RESIDUAL: 2007 case CXN_KILLED_UNDER_RUN_RESIDUAL: 2008 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: 2009 beiscsi_log(phba, KERN_ERR, 2010 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2011 "BM_%d : Event %s[%d] received on CID : %d\n", 2012 cqe_desc[code], code, cid); 2013 if (beiscsi_conn) 2014 iscsi_conn_failure(beiscsi_conn->conn, 2015 ISCSI_ERR_CONN_FAILED); 2016 break; 2017 default: 2018 beiscsi_log(phba, KERN_ERR, 2019 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2020 "BM_%d : Invalid CQE Event Received Code : %d" 2021 "CID 0x%x...\n", 2022 code, cid); 2023 break; 2024 } 2025 2026 proc_next_cqe: 2027 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0); 2028 queue_tail_inc(cq); 2029 sol = queue_tail_node(cq); 2030 num_processed++; 2031 if (total == budget) 2032 break; 2033 } 2034 2035 hwi_ring_cq_db(phba, cq->id, num_processed, 1); 2036 return total; 2037 } 2038 2039 static int be_iopoll(struct irq_poll *iop, int budget) 2040 { 2041 unsigned int ret, io_events; 2042 struct beiscsi_hba *phba; 2043 struct be_eq_obj *pbe_eq; 2044 struct be_eq_entry *eqe = NULL; 2045 struct be_queue_info *eq; 2046 2047 pbe_eq = container_of(iop, struct be_eq_obj, iopoll); 2048 phba = pbe_eq->phba; 2049 if (beiscsi_hba_in_error(phba)) { 2050 irq_poll_complete(iop); 2051 return 0; 2052 } 2053 2054 io_events = 0; 2055 eq = &pbe_eq->q; 2056 eqe = queue_tail_node(eq); 2057 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] & 2058 EQE_VALID_MASK) { 2059 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 2060 queue_tail_inc(eq); 2061 eqe = queue_tail_node(eq); 2062 io_events++; 2063 } 2064 hwi_ring_eq_db(phba, eq->id, 1, io_events, 0, 1); 2065 2066 ret = beiscsi_process_cq(pbe_eq, budget); 2067 pbe_eq->cq_count += ret; 2068 if (ret < budget) { 2069 irq_poll_complete(iop); 2070 beiscsi_log(phba, KERN_INFO, 2071 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2072 "BM_%d : rearm pbe_eq->q.id =%d ret %d\n", 2073 pbe_eq->q.id, ret); 2074 if (!beiscsi_hba_in_error(phba)) 2075 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 2076 } 2077 return ret; 2078 } 2079 2080 static void 2081 hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2082 unsigned int num_sg, struct beiscsi_io_task *io_task) 2083 { 2084 struct iscsi_sge *psgl; 2085 unsigned int sg_len, index; 2086 unsigned int sge_len = 0; 2087 unsigned long long addr; 2088 struct scatterlist *l_sg; 2089 unsigned int offset; 2090 2091 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb, 2092 io_task->bhs_pa.u.a32.address_lo); 2093 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb, 2094 io_task->bhs_pa.u.a32.address_hi); 2095 2096 l_sg = sg; 2097 for (index = 0; (index < num_sg) && (index < 2); index++, 2098 sg = sg_next(sg)) { 2099 if (index == 0) { 2100 sg_len = sg_dma_len(sg); 2101 addr = (u64) sg_dma_address(sg); 2102 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2103 sge0_addr_lo, pwrb, 2104 lower_32_bits(addr)); 2105 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2106 sge0_addr_hi, pwrb, 2107 upper_32_bits(addr)); 2108 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2109 sge0_len, pwrb, 2110 sg_len); 2111 sge_len = sg_len; 2112 } else { 2113 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset, 2114 pwrb, sge_len); 2115 sg_len = sg_dma_len(sg); 2116 addr = (u64) sg_dma_address(sg); 2117 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2118 sge1_addr_lo, pwrb, 2119 lower_32_bits(addr)); 2120 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2121 sge1_addr_hi, pwrb, 2122 upper_32_bits(addr)); 2123 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2124 sge1_len, pwrb, 2125 sg_len); 2126 } 2127 } 2128 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2129 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2130 2131 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2132 2133 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2134 io_task->bhs_pa.u.a32.address_hi); 2135 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2136 io_task->bhs_pa.u.a32.address_lo); 2137 2138 if (num_sg == 1) { 2139 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2140 1); 2141 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2142 0); 2143 } else if (num_sg == 2) { 2144 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2145 0); 2146 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2147 1); 2148 } else { 2149 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2150 0); 2151 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2152 0); 2153 } 2154 2155 sg = l_sg; 2156 psgl++; 2157 psgl++; 2158 offset = 0; 2159 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2160 sg_len = sg_dma_len(sg); 2161 addr = (u64) sg_dma_address(sg); 2162 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2163 lower_32_bits(addr)); 2164 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2165 upper_32_bits(addr)); 2166 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2167 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2168 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2169 offset += sg_len; 2170 } 2171 psgl--; 2172 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2173 } 2174 2175 static void 2176 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2177 unsigned int num_sg, struct beiscsi_io_task *io_task) 2178 { 2179 struct iscsi_sge *psgl; 2180 unsigned int sg_len, index; 2181 unsigned int sge_len = 0; 2182 unsigned long long addr; 2183 struct scatterlist *l_sg; 2184 unsigned int offset; 2185 2186 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2187 io_task->bhs_pa.u.a32.address_lo); 2188 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2189 io_task->bhs_pa.u.a32.address_hi); 2190 2191 l_sg = sg; 2192 for (index = 0; (index < num_sg) && (index < 2); index++, 2193 sg = sg_next(sg)) { 2194 if (index == 0) { 2195 sg_len = sg_dma_len(sg); 2196 addr = (u64) sg_dma_address(sg); 2197 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2198 ((u32)(addr & 0xFFFFFFFF))); 2199 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2200 ((u32)(addr >> 32))); 2201 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2202 sg_len); 2203 sge_len = sg_len; 2204 } else { 2205 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset, 2206 pwrb, sge_len); 2207 sg_len = sg_dma_len(sg); 2208 addr = (u64) sg_dma_address(sg); 2209 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb, 2210 ((u32)(addr & 0xFFFFFFFF))); 2211 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb, 2212 ((u32)(addr >> 32))); 2213 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb, 2214 sg_len); 2215 } 2216 } 2217 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2218 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2219 2220 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2221 2222 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2223 io_task->bhs_pa.u.a32.address_hi); 2224 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2225 io_task->bhs_pa.u.a32.address_lo); 2226 2227 if (num_sg == 1) { 2228 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2229 1); 2230 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2231 0); 2232 } else if (num_sg == 2) { 2233 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2234 0); 2235 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2236 1); 2237 } else { 2238 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2239 0); 2240 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2241 0); 2242 } 2243 sg = l_sg; 2244 psgl++; 2245 psgl++; 2246 offset = 0; 2247 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2248 sg_len = sg_dma_len(sg); 2249 addr = (u64) sg_dma_address(sg); 2250 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2251 (addr & 0xFFFFFFFF)); 2252 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2253 (addr >> 32)); 2254 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2255 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2256 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2257 offset += sg_len; 2258 } 2259 psgl--; 2260 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2261 } 2262 2263 /** 2264 * hwi_write_buffer()- Populate the WRB with task info 2265 * @pwrb: ptr to the WRB entry 2266 * @task: iscsi task which is to be executed 2267 **/ 2268 static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) 2269 { 2270 struct iscsi_sge *psgl; 2271 struct beiscsi_io_task *io_task = task->dd_data; 2272 struct beiscsi_conn *beiscsi_conn = io_task->conn; 2273 struct beiscsi_hba *phba = beiscsi_conn->phba; 2274 uint8_t dsp_value = 0; 2275 2276 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2; 2277 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2278 io_task->bhs_pa.u.a32.address_lo); 2279 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2280 io_task->bhs_pa.u.a32.address_hi); 2281 2282 if (task->data) { 2283 2284 /* Check for the data_count */ 2285 dsp_value = (task->data_count) ? 1 : 0; 2286 2287 if (is_chip_be2_be3r(phba)) 2288 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, 2289 pwrb, dsp_value); 2290 else 2291 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, 2292 pwrb, dsp_value); 2293 2294 /* Map addr only if there is data_count */ 2295 if (dsp_value) { 2296 io_task->mtask_addr = pci_map_single(phba->pcidev, 2297 task->data, 2298 task->data_count, 2299 PCI_DMA_TODEVICE); 2300 if (pci_dma_mapping_error(phba->pcidev, 2301 io_task->mtask_addr)) 2302 return -ENOMEM; 2303 io_task->mtask_data_count = task->data_count; 2304 } else 2305 io_task->mtask_addr = 0; 2306 2307 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2308 lower_32_bits(io_task->mtask_addr)); 2309 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2310 upper_32_bits(io_task->mtask_addr)); 2311 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2312 task->data_count); 2313 2314 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1); 2315 } else { 2316 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 2317 io_task->mtask_addr = 0; 2318 } 2319 2320 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2321 2322 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len); 2323 2324 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2325 io_task->bhs_pa.u.a32.address_hi); 2326 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2327 io_task->bhs_pa.u.a32.address_lo); 2328 if (task->data) { 2329 psgl++; 2330 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0); 2331 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0); 2332 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0); 2333 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0); 2334 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0); 2335 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2336 2337 psgl++; 2338 if (task->data) { 2339 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2340 lower_32_bits(io_task->mtask_addr)); 2341 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2342 upper_32_bits(io_task->mtask_addr)); 2343 } 2344 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106); 2345 } 2346 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2347 return 0; 2348 } 2349 2350 /** 2351 * beiscsi_find_mem_req()- Find mem needed 2352 * @phba: ptr to HBA struct 2353 **/ 2354 static void beiscsi_find_mem_req(struct beiscsi_hba *phba) 2355 { 2356 uint8_t mem_descr_index, ulp_num; 2357 unsigned int num_async_pdu_buf_pages; 2358 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; 2359 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; 2360 2361 phba->params.hwi_ws_sz = sizeof(struct hwi_controller); 2362 2363 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 * 2364 BE_ISCSI_PDU_HEADER_SIZE; 2365 phba->mem_req[HWI_MEM_ADDN_CONTEXT] = 2366 sizeof(struct hwi_context_memory); 2367 2368 2369 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb) 2370 * (phba->params.wrbs_per_cxn) 2371 * phba->params.cxns_per_ctrl; 2372 wrb_sz_per_cxn = sizeof(struct wrb_handle) * 2373 (phba->params.wrbs_per_cxn); 2374 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) * 2375 phba->params.cxns_per_ctrl); 2376 2377 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) * 2378 phba->params.icds_per_ctrl; 2379 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) * 2380 phba->params.num_sge_per_io * phba->params.icds_per_ctrl; 2381 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2382 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2383 2384 num_async_pdu_buf_sgl_pages = 2385 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2386 phba, ulp_num) * 2387 sizeof(struct phys_addr)); 2388 2389 num_async_pdu_buf_pages = 2390 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2391 phba, ulp_num) * 2392 phba->params.defpdu_hdr_sz); 2393 2394 num_async_pdu_data_pages = 2395 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2396 phba, ulp_num) * 2397 phba->params.defpdu_data_sz); 2398 2399 num_async_pdu_data_sgl_pages = 2400 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2401 phba, ulp_num) * 2402 sizeof(struct phys_addr)); 2403 2404 mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 + 2405 (ulp_num * MEM_DESCR_OFFSET)); 2406 phba->mem_req[mem_descr_index] = 2407 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2408 BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE; 2409 2410 mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2411 (ulp_num * MEM_DESCR_OFFSET)); 2412 phba->mem_req[mem_descr_index] = 2413 num_async_pdu_buf_pages * 2414 PAGE_SIZE; 2415 2416 mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2417 (ulp_num * MEM_DESCR_OFFSET)); 2418 phba->mem_req[mem_descr_index] = 2419 num_async_pdu_data_pages * 2420 PAGE_SIZE; 2421 2422 mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2423 (ulp_num * MEM_DESCR_OFFSET)); 2424 phba->mem_req[mem_descr_index] = 2425 num_async_pdu_buf_sgl_pages * 2426 PAGE_SIZE; 2427 2428 mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 + 2429 (ulp_num * MEM_DESCR_OFFSET)); 2430 phba->mem_req[mem_descr_index] = 2431 num_async_pdu_data_sgl_pages * 2432 PAGE_SIZE; 2433 2434 mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2435 (ulp_num * MEM_DESCR_OFFSET)); 2436 phba->mem_req[mem_descr_index] = 2437 BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * 2438 sizeof(struct hd_async_handle); 2439 2440 mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2441 (ulp_num * MEM_DESCR_OFFSET)); 2442 phba->mem_req[mem_descr_index] = 2443 BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * 2444 sizeof(struct hd_async_handle); 2445 2446 mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2447 (ulp_num * MEM_DESCR_OFFSET)); 2448 phba->mem_req[mem_descr_index] = 2449 sizeof(struct hd_async_context) + 2450 (BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * 2451 sizeof(struct hd_async_entry)); 2452 } 2453 } 2454 } 2455 2456 static int beiscsi_alloc_mem(struct beiscsi_hba *phba) 2457 { 2458 dma_addr_t bus_add; 2459 struct hwi_controller *phwi_ctrlr; 2460 struct be_mem_descriptor *mem_descr; 2461 struct mem_array *mem_arr, *mem_arr_orig; 2462 unsigned int i, j, alloc_size, curr_alloc_size; 2463 2464 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL); 2465 if (!phba->phwi_ctrlr) 2466 return -ENOMEM; 2467 2468 /* Allocate memory for wrb_context */ 2469 phwi_ctrlr = phba->phwi_ctrlr; 2470 phwi_ctrlr->wrb_context = kcalloc(phba->params.cxns_per_ctrl, 2471 sizeof(struct hwi_wrb_context), 2472 GFP_KERNEL); 2473 if (!phwi_ctrlr->wrb_context) { 2474 kfree(phba->phwi_ctrlr); 2475 return -ENOMEM; 2476 } 2477 2478 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), 2479 GFP_KERNEL); 2480 if (!phba->init_mem) { 2481 kfree(phwi_ctrlr->wrb_context); 2482 kfree(phba->phwi_ctrlr); 2483 return -ENOMEM; 2484 } 2485 2486 mem_arr_orig = kmalloc_array(BEISCSI_MAX_FRAGS_INIT, 2487 sizeof(*mem_arr_orig), 2488 GFP_KERNEL); 2489 if (!mem_arr_orig) { 2490 kfree(phba->init_mem); 2491 kfree(phwi_ctrlr->wrb_context); 2492 kfree(phba->phwi_ctrlr); 2493 return -ENOMEM; 2494 } 2495 2496 mem_descr = phba->init_mem; 2497 for (i = 0; i < SE_MEM_MAX; i++) { 2498 if (!phba->mem_req[i]) { 2499 mem_descr->mem_array = NULL; 2500 mem_descr++; 2501 continue; 2502 } 2503 2504 j = 0; 2505 mem_arr = mem_arr_orig; 2506 alloc_size = phba->mem_req[i]; 2507 memset(mem_arr, 0, sizeof(struct mem_array) * 2508 BEISCSI_MAX_FRAGS_INIT); 2509 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size); 2510 do { 2511 mem_arr->virtual_address = pci_alloc_consistent( 2512 phba->pcidev, 2513 curr_alloc_size, 2514 &bus_add); 2515 if (!mem_arr->virtual_address) { 2516 if (curr_alloc_size <= BE_MIN_MEM_SIZE) 2517 goto free_mem; 2518 if (curr_alloc_size - 2519 rounddown_pow_of_two(curr_alloc_size)) 2520 curr_alloc_size = rounddown_pow_of_two 2521 (curr_alloc_size); 2522 else 2523 curr_alloc_size = curr_alloc_size / 2; 2524 } else { 2525 mem_arr->bus_address.u. 2526 a64.address = (__u64) bus_add; 2527 mem_arr->size = curr_alloc_size; 2528 alloc_size -= curr_alloc_size; 2529 curr_alloc_size = min(be_max_phys_size * 2530 1024, alloc_size); 2531 j++; 2532 mem_arr++; 2533 } 2534 } while (alloc_size); 2535 mem_descr->num_elements = j; 2536 mem_descr->size_in_bytes = phba->mem_req[i]; 2537 mem_descr->mem_array = kmalloc_array(j, sizeof(*mem_arr), 2538 GFP_KERNEL); 2539 if (!mem_descr->mem_array) 2540 goto free_mem; 2541 2542 memcpy(mem_descr->mem_array, mem_arr_orig, 2543 sizeof(struct mem_array) * j); 2544 mem_descr++; 2545 } 2546 kfree(mem_arr_orig); 2547 return 0; 2548 free_mem: 2549 mem_descr->num_elements = j; 2550 while ((i) || (j)) { 2551 for (j = mem_descr->num_elements; j > 0; j--) { 2552 pci_free_consistent(phba->pcidev, 2553 mem_descr->mem_array[j - 1].size, 2554 mem_descr->mem_array[j - 1]. 2555 virtual_address, 2556 (unsigned long)mem_descr-> 2557 mem_array[j - 1]. 2558 bus_address.u.a64.address); 2559 } 2560 if (i) { 2561 i--; 2562 kfree(mem_descr->mem_array); 2563 mem_descr--; 2564 } 2565 } 2566 kfree(mem_arr_orig); 2567 kfree(phba->init_mem); 2568 kfree(phba->phwi_ctrlr->wrb_context); 2569 kfree(phba->phwi_ctrlr); 2570 return -ENOMEM; 2571 } 2572 2573 static int beiscsi_get_memory(struct beiscsi_hba *phba) 2574 { 2575 beiscsi_find_mem_req(phba); 2576 return beiscsi_alloc_mem(phba); 2577 } 2578 2579 static void iscsi_init_global_templates(struct beiscsi_hba *phba) 2580 { 2581 struct pdu_data_out *pdata_out; 2582 struct pdu_nop_out *pnop_out; 2583 struct be_mem_descriptor *mem_descr; 2584 2585 mem_descr = phba->init_mem; 2586 mem_descr += ISCSI_MEM_GLOBAL_HEADER; 2587 pdata_out = 2588 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address; 2589 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2590 2591 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out, 2592 IIOC_SCSI_DATA); 2593 2594 pnop_out = 2595 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0]. 2596 virtual_address + BE_ISCSI_PDU_HEADER_SIZE); 2597 2598 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2599 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF); 2600 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1); 2601 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0); 2602 } 2603 2604 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) 2605 { 2606 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; 2607 struct hwi_context_memory *phwi_ctxt; 2608 struct wrb_handle *pwrb_handle = NULL; 2609 struct hwi_controller *phwi_ctrlr; 2610 struct hwi_wrb_context *pwrb_context; 2611 struct iscsi_wrb *pwrb = NULL; 2612 unsigned int num_cxn_wrbh = 0; 2613 unsigned int num_cxn_wrb = 0, j, idx = 0, index; 2614 2615 mem_descr_wrbh = phba->init_mem; 2616 mem_descr_wrbh += HWI_MEM_WRBH; 2617 2618 mem_descr_wrb = phba->init_mem; 2619 mem_descr_wrb += HWI_MEM_WRB; 2620 phwi_ctrlr = phba->phwi_ctrlr; 2621 2622 /* Allocate memory for WRBQ */ 2623 phwi_ctxt = phwi_ctrlr->phwi_ctxt; 2624 phwi_ctxt->be_wrbq = kcalloc(phba->params.cxns_per_ctrl, 2625 sizeof(struct be_queue_info), 2626 GFP_KERNEL); 2627 if (!phwi_ctxt->be_wrbq) { 2628 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2629 "BM_%d : WRBQ Mem Alloc Failed\n"); 2630 return -ENOMEM; 2631 } 2632 2633 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2634 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2635 pwrb_context->pwrb_handle_base = 2636 kcalloc(phba->params.wrbs_per_cxn, 2637 sizeof(struct wrb_handle *), 2638 GFP_KERNEL); 2639 if (!pwrb_context->pwrb_handle_base) { 2640 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2641 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2642 goto init_wrb_hndl_failed; 2643 } 2644 pwrb_context->pwrb_handle_basestd = 2645 kcalloc(phba->params.wrbs_per_cxn, 2646 sizeof(struct wrb_handle *), 2647 GFP_KERNEL); 2648 if (!pwrb_context->pwrb_handle_basestd) { 2649 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2650 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2651 goto init_wrb_hndl_failed; 2652 } 2653 if (!num_cxn_wrbh) { 2654 pwrb_handle = 2655 mem_descr_wrbh->mem_array[idx].virtual_address; 2656 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) / 2657 ((sizeof(struct wrb_handle)) * 2658 phba->params.wrbs_per_cxn)); 2659 idx++; 2660 } 2661 pwrb_context->alloc_index = 0; 2662 pwrb_context->wrb_handles_available = 0; 2663 pwrb_context->free_index = 0; 2664 2665 if (num_cxn_wrbh) { 2666 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2667 pwrb_context->pwrb_handle_base[j] = pwrb_handle; 2668 pwrb_context->pwrb_handle_basestd[j] = 2669 pwrb_handle; 2670 pwrb_context->wrb_handles_available++; 2671 pwrb_handle->wrb_index = j; 2672 pwrb_handle++; 2673 } 2674 num_cxn_wrbh--; 2675 } 2676 spin_lock_init(&pwrb_context->wrb_lock); 2677 } 2678 idx = 0; 2679 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2680 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2681 if (!num_cxn_wrb) { 2682 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2683 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / 2684 ((sizeof(struct iscsi_wrb) * 2685 phba->params.wrbs_per_cxn)); 2686 idx++; 2687 } 2688 2689 if (num_cxn_wrb) { 2690 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2691 pwrb_handle = pwrb_context->pwrb_handle_base[j]; 2692 pwrb_handle->pwrb = pwrb; 2693 pwrb++; 2694 } 2695 num_cxn_wrb--; 2696 } 2697 } 2698 return 0; 2699 init_wrb_hndl_failed: 2700 for (j = index; j > 0; j--) { 2701 pwrb_context = &phwi_ctrlr->wrb_context[j]; 2702 kfree(pwrb_context->pwrb_handle_base); 2703 kfree(pwrb_context->pwrb_handle_basestd); 2704 } 2705 return -ENOMEM; 2706 } 2707 2708 static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) 2709 { 2710 uint8_t ulp_num; 2711 struct hwi_controller *phwi_ctrlr; 2712 struct hba_parameters *p = &phba->params; 2713 struct hd_async_context *pasync_ctx; 2714 struct hd_async_handle *pasync_header_h, *pasync_data_h; 2715 unsigned int index, idx, num_per_mem, num_async_data; 2716 struct be_mem_descriptor *mem_descr; 2717 2718 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2719 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2720 /* get async_ctx for each ULP */ 2721 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2722 mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2723 (ulp_num * MEM_DESCR_OFFSET)); 2724 2725 phwi_ctrlr = phba->phwi_ctrlr; 2726 phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] = 2727 (struct hd_async_context *) 2728 mem_descr->mem_array[0].virtual_address; 2729 2730 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; 2731 memset(pasync_ctx, 0, sizeof(*pasync_ctx)); 2732 2733 pasync_ctx->async_entry = 2734 (struct hd_async_entry *) 2735 ((long unsigned int)pasync_ctx + 2736 sizeof(struct hd_async_context)); 2737 2738 pasync_ctx->num_entries = BEISCSI_ASYNC_HDQ_SIZE(phba, 2739 ulp_num); 2740 /* setup header buffers */ 2741 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2742 mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2743 (ulp_num * MEM_DESCR_OFFSET); 2744 if (mem_descr->mem_array[0].virtual_address) { 2745 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2746 "BM_%d : hwi_init_async_pdu_ctx" 2747 " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n", 2748 ulp_num, 2749 mem_descr->mem_array[0]. 2750 virtual_address); 2751 } else 2752 beiscsi_log(phba, KERN_WARNING, 2753 BEISCSI_LOG_INIT, 2754 "BM_%d : No Virtual address for ULP : %d\n", 2755 ulp_num); 2756 2757 pasync_ctx->async_header.pi = 0; 2758 pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz; 2759 pasync_ctx->async_header.va_base = 2760 mem_descr->mem_array[0].virtual_address; 2761 2762 pasync_ctx->async_header.pa_base.u.a64.address = 2763 mem_descr->mem_array[0]. 2764 bus_address.u.a64.address; 2765 2766 /* setup header buffer sgls */ 2767 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2768 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2769 (ulp_num * MEM_DESCR_OFFSET); 2770 if (mem_descr->mem_array[0].virtual_address) { 2771 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2772 "BM_%d : hwi_init_async_pdu_ctx" 2773 " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n", 2774 ulp_num, 2775 mem_descr->mem_array[0]. 2776 virtual_address); 2777 } else 2778 beiscsi_log(phba, KERN_WARNING, 2779 BEISCSI_LOG_INIT, 2780 "BM_%d : No Virtual address for ULP : %d\n", 2781 ulp_num); 2782 2783 pasync_ctx->async_header.ring_base = 2784 mem_descr->mem_array[0].virtual_address; 2785 2786 /* setup header buffer handles */ 2787 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2788 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2789 (ulp_num * MEM_DESCR_OFFSET); 2790 if (mem_descr->mem_array[0].virtual_address) { 2791 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2792 "BM_%d : hwi_init_async_pdu_ctx" 2793 " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n", 2794 ulp_num, 2795 mem_descr->mem_array[0]. 2796 virtual_address); 2797 } else 2798 beiscsi_log(phba, KERN_WARNING, 2799 BEISCSI_LOG_INIT, 2800 "BM_%d : No Virtual address for ULP : %d\n", 2801 ulp_num); 2802 2803 pasync_ctx->async_header.handle_base = 2804 mem_descr->mem_array[0].virtual_address; 2805 2806 /* setup data buffer sgls */ 2807 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2808 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 2809 (ulp_num * MEM_DESCR_OFFSET); 2810 if (mem_descr->mem_array[0].virtual_address) { 2811 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2812 "BM_%d : hwi_init_async_pdu_ctx" 2813 " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n", 2814 ulp_num, 2815 mem_descr->mem_array[0]. 2816 virtual_address); 2817 } else 2818 beiscsi_log(phba, KERN_WARNING, 2819 BEISCSI_LOG_INIT, 2820 "BM_%d : No Virtual address for ULP : %d\n", 2821 ulp_num); 2822 2823 pasync_ctx->async_data.ring_base = 2824 mem_descr->mem_array[0].virtual_address; 2825 2826 /* setup data buffer handles */ 2827 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2828 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2829 (ulp_num * MEM_DESCR_OFFSET); 2830 if (!mem_descr->mem_array[0].virtual_address) 2831 beiscsi_log(phba, KERN_WARNING, 2832 BEISCSI_LOG_INIT, 2833 "BM_%d : No Virtual address for ULP : %d\n", 2834 ulp_num); 2835 2836 pasync_ctx->async_data.handle_base = 2837 mem_descr->mem_array[0].virtual_address; 2838 2839 pasync_header_h = 2840 (struct hd_async_handle *) 2841 pasync_ctx->async_header.handle_base; 2842 pasync_data_h = 2843 (struct hd_async_handle *) 2844 pasync_ctx->async_data.handle_base; 2845 2846 /* setup data buffers */ 2847 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2848 mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2849 (ulp_num * MEM_DESCR_OFFSET); 2850 if (mem_descr->mem_array[0].virtual_address) { 2851 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2852 "BM_%d : hwi_init_async_pdu_ctx" 2853 " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n", 2854 ulp_num, 2855 mem_descr->mem_array[0]. 2856 virtual_address); 2857 } else 2858 beiscsi_log(phba, KERN_WARNING, 2859 BEISCSI_LOG_INIT, 2860 "BM_%d : No Virtual address for ULP : %d\n", 2861 ulp_num); 2862 2863 idx = 0; 2864 pasync_ctx->async_data.pi = 0; 2865 pasync_ctx->async_data.buffer_size = p->defpdu_data_sz; 2866 pasync_ctx->async_data.va_base = 2867 mem_descr->mem_array[idx].virtual_address; 2868 pasync_ctx->async_data.pa_base.u.a64.address = 2869 mem_descr->mem_array[idx]. 2870 bus_address.u.a64.address; 2871 2872 num_async_data = ((mem_descr->mem_array[idx].size) / 2873 phba->params.defpdu_data_sz); 2874 num_per_mem = 0; 2875 2876 for (index = 0; index < BEISCSI_ASYNC_HDQ_SIZE 2877 (phba, ulp_num); index++) { 2878 pasync_header_h->cri = -1; 2879 pasync_header_h->is_header = 1; 2880 pasync_header_h->index = index; 2881 INIT_LIST_HEAD(&pasync_header_h->link); 2882 pasync_header_h->pbuffer = 2883 (void *)((unsigned long) 2884 (pasync_ctx-> 2885 async_header.va_base) + 2886 (p->defpdu_hdr_sz * index)); 2887 2888 pasync_header_h->pa.u.a64.address = 2889 pasync_ctx->async_header.pa_base.u.a64. 2890 address + (p->defpdu_hdr_sz * index); 2891 2892 pasync_ctx->async_entry[index].header = 2893 pasync_header_h; 2894 pasync_header_h++; 2895 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 2896 wq.list); 2897 2898 pasync_data_h->cri = -1; 2899 pasync_data_h->is_header = 0; 2900 pasync_data_h->index = index; 2901 INIT_LIST_HEAD(&pasync_data_h->link); 2902 2903 if (!num_async_data) { 2904 num_per_mem = 0; 2905 idx++; 2906 pasync_ctx->async_data.va_base = 2907 mem_descr->mem_array[idx]. 2908 virtual_address; 2909 pasync_ctx->async_data.pa_base.u. 2910 a64.address = 2911 mem_descr->mem_array[idx]. 2912 bus_address.u.a64.address; 2913 num_async_data = 2914 ((mem_descr->mem_array[idx]. 2915 size) / 2916 phba->params.defpdu_data_sz); 2917 } 2918 pasync_data_h->pbuffer = 2919 (void *)((unsigned long) 2920 (pasync_ctx->async_data.va_base) + 2921 (p->defpdu_data_sz * num_per_mem)); 2922 2923 pasync_data_h->pa.u.a64.address = 2924 pasync_ctx->async_data.pa_base.u.a64. 2925 address + (p->defpdu_data_sz * 2926 num_per_mem); 2927 num_per_mem++; 2928 num_async_data--; 2929 2930 pasync_ctx->async_entry[index].data = 2931 pasync_data_h; 2932 pasync_data_h++; 2933 } 2934 } 2935 } 2936 2937 return 0; 2938 } 2939 2940 static int 2941 be_sgl_create_contiguous(void *virtual_address, 2942 u64 physical_address, u32 length, 2943 struct be_dma_mem *sgl) 2944 { 2945 WARN_ON(!virtual_address); 2946 WARN_ON(!physical_address); 2947 WARN_ON(!length); 2948 WARN_ON(!sgl); 2949 2950 sgl->va = virtual_address; 2951 sgl->dma = (unsigned long)physical_address; 2952 sgl->size = length; 2953 2954 return 0; 2955 } 2956 2957 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl) 2958 { 2959 memset(sgl, 0, sizeof(*sgl)); 2960 } 2961 2962 static void 2963 hwi_build_be_sgl_arr(struct beiscsi_hba *phba, 2964 struct mem_array *pmem, struct be_dma_mem *sgl) 2965 { 2966 if (sgl->va) 2967 be_sgl_destroy_contiguous(sgl); 2968 2969 be_sgl_create_contiguous(pmem->virtual_address, 2970 pmem->bus_address.u.a64.address, 2971 pmem->size, sgl); 2972 } 2973 2974 static void 2975 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba, 2976 struct mem_array *pmem, struct be_dma_mem *sgl) 2977 { 2978 if (sgl->va) 2979 be_sgl_destroy_contiguous(sgl); 2980 2981 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address, 2982 pmem->bus_address.u.a64.address, 2983 pmem->size, sgl); 2984 } 2985 2986 static int be_fill_queue(struct be_queue_info *q, 2987 u16 len, u16 entry_size, void *vaddress) 2988 { 2989 struct be_dma_mem *mem = &q->dma_mem; 2990 2991 memset(q, 0, sizeof(*q)); 2992 q->len = len; 2993 q->entry_size = entry_size; 2994 mem->size = len * entry_size; 2995 mem->va = vaddress; 2996 if (!mem->va) 2997 return -ENOMEM; 2998 memset(mem->va, 0, mem->size); 2999 return 0; 3000 } 3001 3002 static int beiscsi_create_eqs(struct beiscsi_hba *phba, 3003 struct hwi_context_memory *phwi_context) 3004 { 3005 int ret = -ENOMEM, eq_for_mcc; 3006 unsigned int i, num_eq_pages; 3007 struct be_queue_info *eq; 3008 struct be_dma_mem *mem; 3009 void *eq_vaddress; 3010 dma_addr_t paddr; 3011 3012 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \ 3013 sizeof(struct be_eq_entry)); 3014 3015 if (phba->pcidev->msix_enabled) 3016 eq_for_mcc = 1; 3017 else 3018 eq_for_mcc = 0; 3019 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3020 eq = &phwi_context->be_eq[i].q; 3021 mem = &eq->dma_mem; 3022 phwi_context->be_eq[i].phba = phba; 3023 eq_vaddress = pci_alloc_consistent(phba->pcidev, 3024 num_eq_pages * PAGE_SIZE, 3025 &paddr); 3026 if (!eq_vaddress) { 3027 ret = -ENOMEM; 3028 goto create_eq_error; 3029 } 3030 3031 mem->va = eq_vaddress; 3032 ret = be_fill_queue(eq, phba->params.num_eq_entries, 3033 sizeof(struct be_eq_entry), eq_vaddress); 3034 if (ret) { 3035 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3036 "BM_%d : be_fill_queue Failed for EQ\n"); 3037 goto create_eq_error; 3038 } 3039 3040 mem->dma = paddr; 3041 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, 3042 BEISCSI_EQ_DELAY_DEF); 3043 if (ret) { 3044 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3045 "BM_%d : beiscsi_cmd_eq_create" 3046 "Failed for EQ\n"); 3047 goto create_eq_error; 3048 } 3049 3050 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3051 "BM_%d : eqid = %d\n", 3052 phwi_context->be_eq[i].q.id); 3053 } 3054 return 0; 3055 3056 create_eq_error: 3057 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3058 eq = &phwi_context->be_eq[i].q; 3059 mem = &eq->dma_mem; 3060 if (mem->va) 3061 pci_free_consistent(phba->pcidev, num_eq_pages 3062 * PAGE_SIZE, 3063 mem->va, mem->dma); 3064 } 3065 return ret; 3066 } 3067 3068 static int beiscsi_create_cqs(struct beiscsi_hba *phba, 3069 struct hwi_context_memory *phwi_context) 3070 { 3071 unsigned int i, num_cq_pages; 3072 struct be_queue_info *cq, *eq; 3073 struct be_dma_mem *mem; 3074 struct be_eq_obj *pbe_eq; 3075 void *cq_vaddress; 3076 int ret = -ENOMEM; 3077 dma_addr_t paddr; 3078 3079 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 3080 sizeof(struct sol_cqe)); 3081 3082 for (i = 0; i < phba->num_cpus; i++) { 3083 cq = &phwi_context->be_cq[i]; 3084 eq = &phwi_context->be_eq[i].q; 3085 pbe_eq = &phwi_context->be_eq[i]; 3086 pbe_eq->cq = cq; 3087 pbe_eq->phba = phba; 3088 mem = &cq->dma_mem; 3089 cq_vaddress = pci_alloc_consistent(phba->pcidev, 3090 num_cq_pages * PAGE_SIZE, 3091 &paddr); 3092 if (!cq_vaddress) { 3093 ret = -ENOMEM; 3094 goto create_cq_error; 3095 } 3096 3097 ret = be_fill_queue(cq, phba->params.num_cq_entries, 3098 sizeof(struct sol_cqe), cq_vaddress); 3099 if (ret) { 3100 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3101 "BM_%d : be_fill_queue Failed " 3102 "for ISCSI CQ\n"); 3103 goto create_cq_error; 3104 } 3105 3106 mem->dma = paddr; 3107 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, 3108 false, 0); 3109 if (ret) { 3110 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3111 "BM_%d : beiscsi_cmd_eq_create" 3112 "Failed for ISCSI CQ\n"); 3113 goto create_cq_error; 3114 } 3115 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3116 "BM_%d : iscsi cq_id is %d for eq_id %d\n" 3117 "iSCSI CQ CREATED\n", cq->id, eq->id); 3118 } 3119 return 0; 3120 3121 create_cq_error: 3122 for (i = 0; i < phba->num_cpus; i++) { 3123 cq = &phwi_context->be_cq[i]; 3124 mem = &cq->dma_mem; 3125 if (mem->va) 3126 pci_free_consistent(phba->pcidev, num_cq_pages 3127 * PAGE_SIZE, 3128 mem->va, mem->dma); 3129 } 3130 return ret; 3131 } 3132 3133 static int 3134 beiscsi_create_def_hdr(struct beiscsi_hba *phba, 3135 struct hwi_context_memory *phwi_context, 3136 struct hwi_controller *phwi_ctrlr, 3137 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3138 { 3139 unsigned int idx; 3140 int ret; 3141 struct be_queue_info *dq, *cq; 3142 struct be_dma_mem *mem; 3143 struct be_mem_descriptor *mem_descr; 3144 void *dq_vaddress; 3145 3146 idx = 0; 3147 dq = &phwi_context->be_def_hdrq[ulp_num]; 3148 cq = &phwi_context->be_cq[0]; 3149 mem = &dq->dma_mem; 3150 mem_descr = phba->init_mem; 3151 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 3152 (ulp_num * MEM_DESCR_OFFSET); 3153 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3154 ret = be_fill_queue(dq, mem_descr->mem_array[0].size / 3155 sizeof(struct phys_addr), 3156 sizeof(struct phys_addr), dq_vaddress); 3157 if (ret) { 3158 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3159 "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n", 3160 ulp_num); 3161 3162 return ret; 3163 } 3164 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3165 bus_address.u.a64.address; 3166 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, 3167 def_pdu_ring_sz, 3168 phba->params.defpdu_hdr_sz, 3169 BEISCSI_DEFQ_HDR, ulp_num); 3170 if (ret) { 3171 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3172 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n", 3173 ulp_num); 3174 3175 return ret; 3176 } 3177 3178 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3179 "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n", 3180 ulp_num, 3181 phwi_context->be_def_hdrq[ulp_num].id); 3182 return 0; 3183 } 3184 3185 static int 3186 beiscsi_create_def_data(struct beiscsi_hba *phba, 3187 struct hwi_context_memory *phwi_context, 3188 struct hwi_controller *phwi_ctrlr, 3189 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3190 { 3191 unsigned int idx; 3192 int ret; 3193 struct be_queue_info *dataq, *cq; 3194 struct be_dma_mem *mem; 3195 struct be_mem_descriptor *mem_descr; 3196 void *dq_vaddress; 3197 3198 idx = 0; 3199 dataq = &phwi_context->be_def_dataq[ulp_num]; 3200 cq = &phwi_context->be_cq[0]; 3201 mem = &dataq->dma_mem; 3202 mem_descr = phba->init_mem; 3203 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 3204 (ulp_num * MEM_DESCR_OFFSET); 3205 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3206 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size / 3207 sizeof(struct phys_addr), 3208 sizeof(struct phys_addr), dq_vaddress); 3209 if (ret) { 3210 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3211 "BM_%d : be_fill_queue Failed for DEF PDU " 3212 "DATA on ULP : %d\n", 3213 ulp_num); 3214 3215 return ret; 3216 } 3217 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3218 bus_address.u.a64.address; 3219 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, 3220 def_pdu_ring_sz, 3221 phba->params.defpdu_data_sz, 3222 BEISCSI_DEFQ_DATA, ulp_num); 3223 if (ret) { 3224 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3225 "BM_%d be_cmd_create_default_pdu_queue" 3226 " Failed for DEF PDU DATA on ULP : %d\n", 3227 ulp_num); 3228 return ret; 3229 } 3230 3231 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3232 "BM_%d : iscsi def data id on ULP : %d is %d\n", 3233 ulp_num, 3234 phwi_context->be_def_dataq[ulp_num].id); 3235 3236 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3237 "BM_%d : DEFAULT PDU DATA RING CREATED" 3238 "on ULP : %d\n", ulp_num); 3239 return 0; 3240 } 3241 3242 3243 static int 3244 beiscsi_post_template_hdr(struct beiscsi_hba *phba) 3245 { 3246 struct be_mem_descriptor *mem_descr; 3247 struct mem_array *pm_arr; 3248 struct be_dma_mem sgl; 3249 int status, ulp_num; 3250 3251 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3252 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3253 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3254 mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 + 3255 (ulp_num * MEM_DESCR_OFFSET); 3256 pm_arr = mem_descr->mem_array; 3257 3258 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3259 status = be_cmd_iscsi_post_template_hdr( 3260 &phba->ctrl, &sgl); 3261 3262 if (status != 0) { 3263 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3264 "BM_%d : Post Template HDR Failed for" 3265 "ULP_%d\n", ulp_num); 3266 return status; 3267 } 3268 3269 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3270 "BM_%d : Template HDR Pages Posted for" 3271 "ULP_%d\n", ulp_num); 3272 } 3273 } 3274 return 0; 3275 } 3276 3277 static int 3278 beiscsi_post_pages(struct beiscsi_hba *phba) 3279 { 3280 struct be_mem_descriptor *mem_descr; 3281 struct mem_array *pm_arr; 3282 unsigned int page_offset, i; 3283 struct be_dma_mem sgl; 3284 int status, ulp_num = 0; 3285 3286 mem_descr = phba->init_mem; 3287 mem_descr += HWI_MEM_SGE; 3288 pm_arr = mem_descr->mem_array; 3289 3290 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3291 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3292 break; 3293 3294 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * 3295 phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE; 3296 for (i = 0; i < mem_descr->num_elements; i++) { 3297 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3298 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, 3299 page_offset, 3300 (pm_arr->size / PAGE_SIZE)); 3301 page_offset += pm_arr->size / PAGE_SIZE; 3302 if (status != 0) { 3303 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3304 "BM_%d : post sgl failed.\n"); 3305 return status; 3306 } 3307 pm_arr++; 3308 } 3309 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3310 "BM_%d : POSTED PAGES\n"); 3311 return 0; 3312 } 3313 3314 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q) 3315 { 3316 struct be_dma_mem *mem = &q->dma_mem; 3317 if (mem->va) { 3318 pci_free_consistent(phba->pcidev, mem->size, 3319 mem->va, mem->dma); 3320 mem->va = NULL; 3321 } 3322 } 3323 3324 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, 3325 u16 len, u16 entry_size) 3326 { 3327 struct be_dma_mem *mem = &q->dma_mem; 3328 3329 memset(q, 0, sizeof(*q)); 3330 q->len = len; 3331 q->entry_size = entry_size; 3332 mem->size = len * entry_size; 3333 mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma); 3334 if (!mem->va) 3335 return -ENOMEM; 3336 return 0; 3337 } 3338 3339 static int 3340 beiscsi_create_wrb_rings(struct beiscsi_hba *phba, 3341 struct hwi_context_memory *phwi_context, 3342 struct hwi_controller *phwi_ctrlr) 3343 { 3344 unsigned int num_wrb_rings; 3345 u64 pa_addr_lo; 3346 unsigned int idx, num, i, ulp_num; 3347 struct mem_array *pwrb_arr; 3348 void *wrb_vaddr; 3349 struct be_dma_mem sgl; 3350 struct be_mem_descriptor *mem_descr; 3351 struct hwi_wrb_context *pwrb_context; 3352 int status; 3353 uint8_t ulp_count = 0, ulp_base_num = 0; 3354 uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 }; 3355 3356 idx = 0; 3357 mem_descr = phba->init_mem; 3358 mem_descr += HWI_MEM_WRB; 3359 pwrb_arr = kmalloc_array(phba->params.cxns_per_ctrl, 3360 sizeof(*pwrb_arr), 3361 GFP_KERNEL); 3362 if (!pwrb_arr) { 3363 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3364 "BM_%d : Memory alloc failed in create wrb ring.\n"); 3365 return -ENOMEM; 3366 } 3367 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3368 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address; 3369 num_wrb_rings = mem_descr->mem_array[idx].size / 3370 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb)); 3371 3372 for (num = 0; num < phba->params.cxns_per_ctrl; num++) { 3373 if (num_wrb_rings) { 3374 pwrb_arr[num].virtual_address = wrb_vaddr; 3375 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo; 3376 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3377 sizeof(struct iscsi_wrb); 3378 wrb_vaddr += pwrb_arr[num].size; 3379 pa_addr_lo += pwrb_arr[num].size; 3380 num_wrb_rings--; 3381 } else { 3382 idx++; 3383 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3384 pa_addr_lo = mem_descr->mem_array[idx].\ 3385 bus_address.u.a64.address; 3386 num_wrb_rings = mem_descr->mem_array[idx].size / 3387 (phba->params.wrbs_per_cxn * 3388 sizeof(struct iscsi_wrb)); 3389 pwrb_arr[num].virtual_address = wrb_vaddr; 3390 pwrb_arr[num].bus_address.u.a64.address\ 3391 = pa_addr_lo; 3392 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3393 sizeof(struct iscsi_wrb); 3394 wrb_vaddr += pwrb_arr[num].size; 3395 pa_addr_lo += pwrb_arr[num].size; 3396 num_wrb_rings--; 3397 } 3398 } 3399 3400 /* Get the ULP Count */ 3401 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3402 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3403 ulp_count++; 3404 ulp_base_num = ulp_num; 3405 cid_count_ulp[ulp_num] = 3406 BEISCSI_GET_CID_COUNT(phba, ulp_num); 3407 } 3408 3409 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3410 if (ulp_count > 1) { 3411 ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT; 3412 3413 if (!cid_count_ulp[ulp_base_num]) 3414 ulp_base_num = (ulp_base_num + 1) % 3415 BEISCSI_ULP_COUNT; 3416 3417 cid_count_ulp[ulp_base_num]--; 3418 } 3419 3420 3421 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl); 3422 status = be_cmd_wrbq_create(&phba->ctrl, &sgl, 3423 &phwi_context->be_wrbq[i], 3424 &phwi_ctrlr->wrb_context[i], 3425 ulp_base_num); 3426 if (status != 0) { 3427 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3428 "BM_%d : wrbq create failed."); 3429 kfree(pwrb_arr); 3430 return status; 3431 } 3432 pwrb_context = &phwi_ctrlr->wrb_context[i]; 3433 BE_SET_CID_TO_CRI(i, pwrb_context->cid); 3434 } 3435 kfree(pwrb_arr); 3436 return 0; 3437 } 3438 3439 static void free_wrb_handles(struct beiscsi_hba *phba) 3440 { 3441 unsigned int index; 3442 struct hwi_controller *phwi_ctrlr; 3443 struct hwi_wrb_context *pwrb_context; 3444 3445 phwi_ctrlr = phba->phwi_ctrlr; 3446 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 3447 pwrb_context = &phwi_ctrlr->wrb_context[index]; 3448 kfree(pwrb_context->pwrb_handle_base); 3449 kfree(pwrb_context->pwrb_handle_basestd); 3450 } 3451 } 3452 3453 static void be_mcc_queues_destroy(struct beiscsi_hba *phba) 3454 { 3455 struct be_ctrl_info *ctrl = &phba->ctrl; 3456 struct be_dma_mem *ptag_mem; 3457 struct be_queue_info *q; 3458 int i, tag; 3459 3460 q = &phba->ctrl.mcc_obj.q; 3461 for (i = 0; i < MAX_MCC_CMD; i++) { 3462 tag = i + 1; 3463 if (!test_bit(MCC_TAG_STATE_RUNNING, 3464 &ctrl->ptag_state[tag].tag_state)) 3465 continue; 3466 3467 if (test_bit(MCC_TAG_STATE_TIMEOUT, 3468 &ctrl->ptag_state[tag].tag_state)) { 3469 ptag_mem = &ctrl->ptag_state[tag].tag_mem_state; 3470 if (ptag_mem->size) { 3471 pci_free_consistent(ctrl->pdev, 3472 ptag_mem->size, 3473 ptag_mem->va, 3474 ptag_mem->dma); 3475 ptag_mem->size = 0; 3476 } 3477 continue; 3478 } 3479 /** 3480 * If MCC is still active and waiting then wake up the process. 3481 * We are here only because port is going offline. The process 3482 * sees that (BEISCSI_HBA_ONLINE is cleared) and EIO error is 3483 * returned for the operation and allocated memory cleaned up. 3484 */ 3485 if (waitqueue_active(&ctrl->mcc_wait[tag])) { 3486 ctrl->mcc_tag_status[tag] = MCC_STATUS_FAILED; 3487 ctrl->mcc_tag_status[tag] |= CQE_VALID_MASK; 3488 wake_up_interruptible(&ctrl->mcc_wait[tag]); 3489 /* 3490 * Control tag info gets reinitialized in enable 3491 * so wait for the process to clear running state. 3492 */ 3493 while (test_bit(MCC_TAG_STATE_RUNNING, 3494 &ctrl->ptag_state[tag].tag_state)) 3495 schedule_timeout_uninterruptible(HZ); 3496 } 3497 /** 3498 * For MCC with tag_states MCC_TAG_STATE_ASYNC and 3499 * MCC_TAG_STATE_IGNORE nothing needs to done. 3500 */ 3501 } 3502 if (q->created) { 3503 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); 3504 be_queue_free(phba, q); 3505 } 3506 3507 q = &phba->ctrl.mcc_obj.cq; 3508 if (q->created) { 3509 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3510 be_queue_free(phba, q); 3511 } 3512 } 3513 3514 static int be_mcc_queues_create(struct beiscsi_hba *phba, 3515 struct hwi_context_memory *phwi_context) 3516 { 3517 struct be_queue_info *q, *cq; 3518 struct be_ctrl_info *ctrl = &phba->ctrl; 3519 3520 /* Alloc MCC compl queue */ 3521 cq = &phba->ctrl.mcc_obj.cq; 3522 if (be_queue_alloc(phba, cq, MCC_CQ_LEN, 3523 sizeof(struct be_mcc_compl))) 3524 goto err; 3525 /* Ask BE to create MCC compl queue; */ 3526 if (phba->pcidev->msix_enabled) { 3527 if (beiscsi_cmd_cq_create(ctrl, cq, 3528 &phwi_context->be_eq[phba->num_cpus].q, 3529 false, true, 0)) 3530 goto mcc_cq_free; 3531 } else { 3532 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q, 3533 false, true, 0)) 3534 goto mcc_cq_free; 3535 } 3536 3537 /* Alloc MCC queue */ 3538 q = &phba->ctrl.mcc_obj.q; 3539 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) 3540 goto mcc_cq_destroy; 3541 3542 /* Ask BE to create MCC queue */ 3543 if (beiscsi_cmd_mccq_create(phba, q, cq)) 3544 goto mcc_q_free; 3545 3546 return 0; 3547 3548 mcc_q_free: 3549 be_queue_free(phba, q); 3550 mcc_cq_destroy: 3551 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ); 3552 mcc_cq_free: 3553 be_queue_free(phba, cq); 3554 err: 3555 return -ENOMEM; 3556 } 3557 3558 static void be2iscsi_enable_msix(struct beiscsi_hba *phba) 3559 { 3560 int nvec = 1; 3561 3562 switch (phba->generation) { 3563 case BE_GEN2: 3564 case BE_GEN3: 3565 nvec = BEISCSI_MAX_NUM_CPUS + 1; 3566 break; 3567 case BE_GEN4: 3568 nvec = phba->fw_config.eqid_count; 3569 break; 3570 default: 3571 nvec = 2; 3572 break; 3573 } 3574 3575 /* if eqid_count == 1 fall back to INTX */ 3576 if (enable_msix && nvec > 1) { 3577 const struct irq_affinity desc = { .post_vectors = 1 }; 3578 3579 if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec, 3580 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) { 3581 phba->num_cpus = nvec - 1; 3582 return; 3583 } 3584 } 3585 3586 phba->num_cpus = 1; 3587 } 3588 3589 static void hwi_purge_eq(struct beiscsi_hba *phba) 3590 { 3591 struct hwi_controller *phwi_ctrlr; 3592 struct hwi_context_memory *phwi_context; 3593 struct be_queue_info *eq; 3594 struct be_eq_entry *eqe = NULL; 3595 int i, eq_msix; 3596 unsigned int num_processed; 3597 3598 if (beiscsi_hba_in_error(phba)) 3599 return; 3600 3601 phwi_ctrlr = phba->phwi_ctrlr; 3602 phwi_context = phwi_ctrlr->phwi_ctxt; 3603 if (phba->pcidev->msix_enabled) 3604 eq_msix = 1; 3605 else 3606 eq_msix = 0; 3607 3608 for (i = 0; i < (phba->num_cpus + eq_msix); i++) { 3609 eq = &phwi_context->be_eq[i].q; 3610 eqe = queue_tail_node(eq); 3611 num_processed = 0; 3612 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 3613 & EQE_VALID_MASK) { 3614 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 3615 queue_tail_inc(eq); 3616 eqe = queue_tail_node(eq); 3617 num_processed++; 3618 } 3619 3620 if (num_processed) 3621 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1); 3622 } 3623 } 3624 3625 static void hwi_cleanup_port(struct beiscsi_hba *phba) 3626 { 3627 struct be_queue_info *q; 3628 struct be_ctrl_info *ctrl = &phba->ctrl; 3629 struct hwi_controller *phwi_ctrlr; 3630 struct hwi_context_memory *phwi_context; 3631 int i, eq_for_mcc, ulp_num; 3632 3633 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3634 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3635 beiscsi_cmd_iscsi_cleanup(phba, ulp_num); 3636 3637 /** 3638 * Purge all EQ entries that may have been left out. This is to 3639 * workaround a problem we've seen occasionally where driver gets an 3640 * interrupt with EQ entry bit set after stopping the controller. 3641 */ 3642 hwi_purge_eq(phba); 3643 3644 phwi_ctrlr = phba->phwi_ctrlr; 3645 phwi_context = phwi_ctrlr->phwi_ctxt; 3646 3647 be_cmd_iscsi_remove_template_hdr(ctrl); 3648 3649 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3650 q = &phwi_context->be_wrbq[i]; 3651 if (q->created) 3652 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); 3653 } 3654 kfree(phwi_context->be_wrbq); 3655 free_wrb_handles(phba); 3656 3657 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3658 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3659 3660 q = &phwi_context->be_def_hdrq[ulp_num]; 3661 if (q->created) 3662 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3663 3664 q = &phwi_context->be_def_dataq[ulp_num]; 3665 if (q->created) 3666 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3667 } 3668 } 3669 3670 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); 3671 3672 for (i = 0; i < (phba->num_cpus); i++) { 3673 q = &phwi_context->be_cq[i]; 3674 if (q->created) { 3675 be_queue_free(phba, q); 3676 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3677 } 3678 } 3679 3680 be_mcc_queues_destroy(phba); 3681 if (phba->pcidev->msix_enabled) 3682 eq_for_mcc = 1; 3683 else 3684 eq_for_mcc = 0; 3685 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3686 q = &phwi_context->be_eq[i].q; 3687 if (q->created) { 3688 be_queue_free(phba, q); 3689 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 3690 } 3691 } 3692 /* this ensures complete FW cleanup */ 3693 beiscsi_cmd_function_reset(phba); 3694 /* last communication, indicate driver is unloading */ 3695 beiscsi_cmd_special_wrb(&phba->ctrl, 0); 3696 } 3697 3698 static int hwi_init_port(struct beiscsi_hba *phba) 3699 { 3700 struct hwi_controller *phwi_ctrlr; 3701 struct hwi_context_memory *phwi_context; 3702 unsigned int def_pdu_ring_sz; 3703 struct be_ctrl_info *ctrl = &phba->ctrl; 3704 int status, ulp_num; 3705 u16 nbufs; 3706 3707 phwi_ctrlr = phba->phwi_ctrlr; 3708 phwi_context = phwi_ctrlr->phwi_ctxt; 3709 /* set port optic state to unknown */ 3710 phba->optic_state = 0xff; 3711 3712 status = beiscsi_create_eqs(phba, phwi_context); 3713 if (status != 0) { 3714 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3715 "BM_%d : EQ not created\n"); 3716 goto error; 3717 } 3718 3719 status = be_mcc_queues_create(phba, phwi_context); 3720 if (status != 0) 3721 goto error; 3722 3723 status = beiscsi_check_supported_fw(ctrl, phba); 3724 if (status != 0) { 3725 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3726 "BM_%d : Unsupported fw version\n"); 3727 goto error; 3728 } 3729 3730 status = beiscsi_create_cqs(phba, phwi_context); 3731 if (status != 0) { 3732 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3733 "BM_%d : CQ not created\n"); 3734 goto error; 3735 } 3736 3737 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3738 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3739 nbufs = phwi_context->pasync_ctx[ulp_num]->num_entries; 3740 def_pdu_ring_sz = nbufs * sizeof(struct phys_addr); 3741 3742 status = beiscsi_create_def_hdr(phba, phwi_context, 3743 phwi_ctrlr, 3744 def_pdu_ring_sz, 3745 ulp_num); 3746 if (status != 0) { 3747 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3748 "BM_%d : Default Header not created for ULP : %d\n", 3749 ulp_num); 3750 goto error; 3751 } 3752 3753 status = beiscsi_create_def_data(phba, phwi_context, 3754 phwi_ctrlr, 3755 def_pdu_ring_sz, 3756 ulp_num); 3757 if (status != 0) { 3758 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3759 "BM_%d : Default Data not created for ULP : %d\n", 3760 ulp_num); 3761 goto error; 3762 } 3763 /** 3764 * Now that the default PDU rings have been created, 3765 * let EP know about it. 3766 */ 3767 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR, 3768 ulp_num, nbufs); 3769 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA, 3770 ulp_num, nbufs); 3771 } 3772 } 3773 3774 status = beiscsi_post_pages(phba); 3775 if (status != 0) { 3776 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3777 "BM_%d : Post SGL Pages Failed\n"); 3778 goto error; 3779 } 3780 3781 status = beiscsi_post_template_hdr(phba); 3782 if (status != 0) { 3783 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3784 "BM_%d : Template HDR Posting for CXN Failed\n"); 3785 } 3786 3787 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); 3788 if (status != 0) { 3789 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3790 "BM_%d : WRB Rings not created\n"); 3791 goto error; 3792 } 3793 3794 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3795 uint16_t async_arr_idx = 0; 3796 3797 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3798 uint16_t cri = 0; 3799 struct hd_async_context *pasync_ctx; 3800 3801 pasync_ctx = HWI_GET_ASYNC_PDU_CTX( 3802 phwi_ctrlr, ulp_num); 3803 for (cri = 0; cri < 3804 phba->params.cxns_per_ctrl; cri++) { 3805 if (ulp_num == BEISCSI_GET_ULP_FROM_CRI 3806 (phwi_ctrlr, cri)) 3807 pasync_ctx->cid_to_async_cri_map[ 3808 phwi_ctrlr->wrb_context[cri].cid] = 3809 async_arr_idx++; 3810 } 3811 } 3812 } 3813 3814 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3815 "BM_%d : hwi_init_port success\n"); 3816 return 0; 3817 3818 error: 3819 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3820 "BM_%d : hwi_init_port failed"); 3821 hwi_cleanup_port(phba); 3822 return status; 3823 } 3824 3825 static int hwi_init_controller(struct beiscsi_hba *phba) 3826 { 3827 struct hwi_controller *phwi_ctrlr; 3828 3829 phwi_ctrlr = phba->phwi_ctrlr; 3830 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) { 3831 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba-> 3832 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address; 3833 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3834 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n", 3835 phwi_ctrlr->phwi_ctxt); 3836 } else { 3837 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3838 "BM_%d : HWI_MEM_ADDN_CONTEXT is more " 3839 "than one element.Failing to load\n"); 3840 return -ENOMEM; 3841 } 3842 3843 iscsi_init_global_templates(phba); 3844 if (beiscsi_init_wrb_handle(phba)) 3845 return -ENOMEM; 3846 3847 if (hwi_init_async_pdu_ctx(phba)) { 3848 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3849 "BM_%d : hwi_init_async_pdu_ctx failed\n"); 3850 return -ENOMEM; 3851 } 3852 3853 if (hwi_init_port(phba) != 0) { 3854 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3855 "BM_%d : hwi_init_controller failed\n"); 3856 3857 return -ENOMEM; 3858 } 3859 return 0; 3860 } 3861 3862 static void beiscsi_free_mem(struct beiscsi_hba *phba) 3863 { 3864 struct be_mem_descriptor *mem_descr; 3865 int i, j; 3866 3867 mem_descr = phba->init_mem; 3868 i = 0; 3869 j = 0; 3870 for (i = 0; i < SE_MEM_MAX; i++) { 3871 for (j = mem_descr->num_elements; j > 0; j--) { 3872 pci_free_consistent(phba->pcidev, 3873 mem_descr->mem_array[j - 1].size, 3874 mem_descr->mem_array[j - 1].virtual_address, 3875 (unsigned long)mem_descr->mem_array[j - 1]. 3876 bus_address.u.a64.address); 3877 } 3878 3879 kfree(mem_descr->mem_array); 3880 mem_descr++; 3881 } 3882 kfree(phba->init_mem); 3883 kfree(phba->phwi_ctrlr->wrb_context); 3884 kfree(phba->phwi_ctrlr); 3885 } 3886 3887 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) 3888 { 3889 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg; 3890 struct sgl_handle *psgl_handle; 3891 struct iscsi_sge *pfrag; 3892 unsigned int arr_index, i, idx; 3893 unsigned int ulp_icd_start, ulp_num = 0; 3894 3895 phba->io_sgl_hndl_avbl = 0; 3896 phba->eh_sgl_hndl_avbl = 0; 3897 3898 mem_descr_sglh = phba->init_mem; 3899 mem_descr_sglh += HWI_MEM_SGLH; 3900 if (1 == mem_descr_sglh->num_elements) { 3901 phba->io_sgl_hndl_base = kcalloc(phba->params.ios_per_ctrl, 3902 sizeof(struct sgl_handle *), 3903 GFP_KERNEL); 3904 if (!phba->io_sgl_hndl_base) { 3905 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3906 "BM_%d : Mem Alloc Failed. Failing to load\n"); 3907 return -ENOMEM; 3908 } 3909 phba->eh_sgl_hndl_base = 3910 kcalloc(phba->params.icds_per_ctrl - 3911 phba->params.ios_per_ctrl, 3912 sizeof(struct sgl_handle *), GFP_KERNEL); 3913 if (!phba->eh_sgl_hndl_base) { 3914 kfree(phba->io_sgl_hndl_base); 3915 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3916 "BM_%d : Mem Alloc Failed. Failing to load\n"); 3917 return -ENOMEM; 3918 } 3919 } else { 3920 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3921 "BM_%d : HWI_MEM_SGLH is more than one element." 3922 "Failing to load\n"); 3923 return -ENOMEM; 3924 } 3925 3926 arr_index = 0; 3927 idx = 0; 3928 while (idx < mem_descr_sglh->num_elements) { 3929 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address; 3930 3931 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size / 3932 sizeof(struct sgl_handle)); i++) { 3933 if (arr_index < phba->params.ios_per_ctrl) { 3934 phba->io_sgl_hndl_base[arr_index] = psgl_handle; 3935 phba->io_sgl_hndl_avbl++; 3936 arr_index++; 3937 } else { 3938 phba->eh_sgl_hndl_base[arr_index - 3939 phba->params.ios_per_ctrl] = 3940 psgl_handle; 3941 arr_index++; 3942 phba->eh_sgl_hndl_avbl++; 3943 } 3944 psgl_handle++; 3945 } 3946 idx++; 3947 } 3948 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3949 "BM_%d : phba->io_sgl_hndl_avbl=%d" 3950 "phba->eh_sgl_hndl_avbl=%d\n", 3951 phba->io_sgl_hndl_avbl, 3952 phba->eh_sgl_hndl_avbl); 3953 3954 mem_descr_sg = phba->init_mem; 3955 mem_descr_sg += HWI_MEM_SGE; 3956 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3957 "\n BM_%d : mem_descr_sg->num_elements=%d\n", 3958 mem_descr_sg->num_elements); 3959 3960 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3961 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3962 break; 3963 3964 ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 3965 3966 arr_index = 0; 3967 idx = 0; 3968 while (idx < mem_descr_sg->num_elements) { 3969 pfrag = mem_descr_sg->mem_array[idx].virtual_address; 3970 3971 for (i = 0; 3972 i < (mem_descr_sg->mem_array[idx].size) / 3973 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io); 3974 i++) { 3975 if (arr_index < phba->params.ios_per_ctrl) 3976 psgl_handle = phba->io_sgl_hndl_base[arr_index]; 3977 else 3978 psgl_handle = phba->eh_sgl_hndl_base[arr_index - 3979 phba->params.ios_per_ctrl]; 3980 psgl_handle->pfrag = pfrag; 3981 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0); 3982 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); 3983 pfrag += phba->params.num_sge_per_io; 3984 psgl_handle->sgl_index = ulp_icd_start + arr_index++; 3985 } 3986 idx++; 3987 } 3988 phba->io_sgl_free_index = 0; 3989 phba->io_sgl_alloc_index = 0; 3990 phba->eh_sgl_free_index = 0; 3991 phba->eh_sgl_alloc_index = 0; 3992 return 0; 3993 } 3994 3995 static int hba_setup_cid_tbls(struct beiscsi_hba *phba) 3996 { 3997 int ret; 3998 uint16_t i, ulp_num; 3999 struct ulp_cid_info *ptr_cid_info = NULL; 4000 4001 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4002 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4003 ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info), 4004 GFP_KERNEL); 4005 4006 if (!ptr_cid_info) { 4007 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4008 "BM_%d : Failed to allocate memory" 4009 "for ULP_CID_INFO for ULP : %d\n", 4010 ulp_num); 4011 ret = -ENOMEM; 4012 goto free_memory; 4013 4014 } 4015 4016 /* Allocate memory for CID array */ 4017 ptr_cid_info->cid_array = 4018 kcalloc(BEISCSI_GET_CID_COUNT(phba, ulp_num), 4019 sizeof(*ptr_cid_info->cid_array), 4020 GFP_KERNEL); 4021 if (!ptr_cid_info->cid_array) { 4022 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4023 "BM_%d : Failed to allocate memory" 4024 "for CID_ARRAY for ULP : %d\n", 4025 ulp_num); 4026 kfree(ptr_cid_info); 4027 ptr_cid_info = NULL; 4028 ret = -ENOMEM; 4029 4030 goto free_memory; 4031 } 4032 ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT( 4033 phba, ulp_num); 4034 4035 /* Save the cid_info_array ptr */ 4036 phba->cid_array_info[ulp_num] = ptr_cid_info; 4037 } 4038 } 4039 phba->ep_array = kcalloc(phba->params.cxns_per_ctrl, 4040 sizeof(struct iscsi_endpoint *), 4041 GFP_KERNEL); 4042 if (!phba->ep_array) { 4043 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4044 "BM_%d : Failed to allocate memory in " 4045 "hba_setup_cid_tbls\n"); 4046 ret = -ENOMEM; 4047 4048 goto free_memory; 4049 } 4050 4051 phba->conn_table = kcalloc(phba->params.cxns_per_ctrl, 4052 sizeof(struct beiscsi_conn *), 4053 GFP_KERNEL); 4054 if (!phba->conn_table) { 4055 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4056 "BM_%d : Failed to allocate memory in" 4057 "hba_setup_cid_tbls\n"); 4058 4059 kfree(phba->ep_array); 4060 phba->ep_array = NULL; 4061 ret = -ENOMEM; 4062 4063 goto free_memory; 4064 } 4065 4066 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 4067 ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num; 4068 4069 ptr_cid_info = phba->cid_array_info[ulp_num]; 4070 ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] = 4071 phba->phwi_ctrlr->wrb_context[i].cid; 4072 4073 } 4074 4075 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4076 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4077 ptr_cid_info = phba->cid_array_info[ulp_num]; 4078 4079 ptr_cid_info->cid_alloc = 0; 4080 ptr_cid_info->cid_free = 0; 4081 } 4082 } 4083 return 0; 4084 4085 free_memory: 4086 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4087 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4088 ptr_cid_info = phba->cid_array_info[ulp_num]; 4089 4090 if (ptr_cid_info) { 4091 kfree(ptr_cid_info->cid_array); 4092 kfree(ptr_cid_info); 4093 phba->cid_array_info[ulp_num] = NULL; 4094 } 4095 } 4096 } 4097 4098 return ret; 4099 } 4100 4101 static void hwi_enable_intr(struct beiscsi_hba *phba) 4102 { 4103 struct be_ctrl_info *ctrl = &phba->ctrl; 4104 struct hwi_controller *phwi_ctrlr; 4105 struct hwi_context_memory *phwi_context; 4106 struct be_queue_info *eq; 4107 u8 __iomem *addr; 4108 u32 reg, i; 4109 u32 enabled; 4110 4111 phwi_ctrlr = phba->phwi_ctrlr; 4112 phwi_context = phwi_ctrlr->phwi_ctxt; 4113 4114 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + 4115 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 4116 reg = ioread32(addr); 4117 4118 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4119 if (!enabled) { 4120 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4121 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4122 "BM_%d : reg =x%08x addr=%p\n", reg, addr); 4123 iowrite32(reg, addr); 4124 } 4125 4126 if (!phba->pcidev->msix_enabled) { 4127 eq = &phwi_context->be_eq[0].q; 4128 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4129 "BM_%d : eq->id=%d\n", eq->id); 4130 4131 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4132 } else { 4133 for (i = 0; i <= phba->num_cpus; i++) { 4134 eq = &phwi_context->be_eq[i].q; 4135 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4136 "BM_%d : eq->id=%d\n", eq->id); 4137 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4138 } 4139 } 4140 } 4141 4142 static void hwi_disable_intr(struct beiscsi_hba *phba) 4143 { 4144 struct be_ctrl_info *ctrl = &phba->ctrl; 4145 4146 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; 4147 u32 reg = ioread32(addr); 4148 4149 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4150 if (enabled) { 4151 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4152 iowrite32(reg, addr); 4153 } else 4154 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 4155 "BM_%d : In hwi_disable_intr, Already Disabled\n"); 4156 } 4157 4158 static int beiscsi_init_port(struct beiscsi_hba *phba) 4159 { 4160 int ret; 4161 4162 ret = hwi_init_controller(phba); 4163 if (ret < 0) { 4164 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4165 "BM_%d : init controller failed\n"); 4166 return ret; 4167 } 4168 ret = beiscsi_init_sgl_handle(phba); 4169 if (ret < 0) { 4170 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4171 "BM_%d : init sgl handles failed\n"); 4172 goto cleanup_port; 4173 } 4174 4175 ret = hba_setup_cid_tbls(phba); 4176 if (ret < 0) { 4177 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4178 "BM_%d : setup CID table failed\n"); 4179 kfree(phba->io_sgl_hndl_base); 4180 kfree(phba->eh_sgl_hndl_base); 4181 goto cleanup_port; 4182 } 4183 return ret; 4184 4185 cleanup_port: 4186 hwi_cleanup_port(phba); 4187 return ret; 4188 } 4189 4190 static void beiscsi_cleanup_port(struct beiscsi_hba *phba) 4191 { 4192 struct ulp_cid_info *ptr_cid_info = NULL; 4193 int ulp_num; 4194 4195 kfree(phba->io_sgl_hndl_base); 4196 kfree(phba->eh_sgl_hndl_base); 4197 kfree(phba->ep_array); 4198 kfree(phba->conn_table); 4199 4200 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4201 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4202 ptr_cid_info = phba->cid_array_info[ulp_num]; 4203 4204 if (ptr_cid_info) { 4205 kfree(ptr_cid_info->cid_array); 4206 kfree(ptr_cid_info); 4207 phba->cid_array_info[ulp_num] = NULL; 4208 } 4209 } 4210 } 4211 } 4212 4213 /** 4214 * beiscsi_free_mgmt_task_handles()- Free driver CXN resources 4215 * @beiscsi_conn: ptr to the conn to be cleaned up 4216 * @task: ptr to iscsi_task resource to be freed. 4217 * 4218 * Free driver mgmt resources binded to CXN. 4219 **/ 4220 void 4221 beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, 4222 struct iscsi_task *task) 4223 { 4224 struct beiscsi_io_task *io_task; 4225 struct beiscsi_hba *phba = beiscsi_conn->phba; 4226 struct hwi_wrb_context *pwrb_context; 4227 struct hwi_controller *phwi_ctrlr; 4228 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4229 beiscsi_conn->beiscsi_conn_cid); 4230 4231 phwi_ctrlr = phba->phwi_ctrlr; 4232 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4233 4234 io_task = task->dd_data; 4235 4236 if (io_task->pwrb_handle) { 4237 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4238 io_task->pwrb_handle = NULL; 4239 } 4240 4241 if (io_task->psgl_handle) { 4242 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4243 io_task->psgl_handle = NULL; 4244 } 4245 4246 if (io_task->mtask_addr) { 4247 pci_unmap_single(phba->pcidev, 4248 io_task->mtask_addr, 4249 io_task->mtask_data_count, 4250 PCI_DMA_TODEVICE); 4251 io_task->mtask_addr = 0; 4252 } 4253 } 4254 4255 /** 4256 * beiscsi_cleanup_task()- Free driver resources of the task 4257 * @task: ptr to the iscsi task 4258 * 4259 **/ 4260 static void beiscsi_cleanup_task(struct iscsi_task *task) 4261 { 4262 struct beiscsi_io_task *io_task = task->dd_data; 4263 struct iscsi_conn *conn = task->conn; 4264 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4265 struct beiscsi_hba *phba = beiscsi_conn->phba; 4266 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4267 struct hwi_wrb_context *pwrb_context; 4268 struct hwi_controller *phwi_ctrlr; 4269 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4270 beiscsi_conn->beiscsi_conn_cid); 4271 4272 phwi_ctrlr = phba->phwi_ctrlr; 4273 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4274 4275 if (io_task->cmd_bhs) { 4276 dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4277 io_task->bhs_pa.u.a64.address); 4278 io_task->cmd_bhs = NULL; 4279 task->hdr = NULL; 4280 } 4281 4282 if (task->sc) { 4283 if (io_task->pwrb_handle) { 4284 free_wrb_handle(phba, pwrb_context, 4285 io_task->pwrb_handle); 4286 io_task->pwrb_handle = NULL; 4287 } 4288 4289 if (io_task->psgl_handle) { 4290 free_io_sgl_handle(phba, io_task->psgl_handle); 4291 io_task->psgl_handle = NULL; 4292 } 4293 4294 if (io_task->scsi_cmnd) { 4295 if (io_task->num_sg) 4296 scsi_dma_unmap(io_task->scsi_cmnd); 4297 io_task->scsi_cmnd = NULL; 4298 } 4299 } else { 4300 if (!beiscsi_conn->login_in_progress) 4301 beiscsi_free_mgmt_task_handles(beiscsi_conn, task); 4302 } 4303 } 4304 4305 void 4306 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, 4307 struct beiscsi_offload_params *params) 4308 { 4309 struct wrb_handle *pwrb_handle; 4310 struct hwi_wrb_context *pwrb_context = NULL; 4311 struct beiscsi_hba *phba = beiscsi_conn->phba; 4312 struct iscsi_task *task = beiscsi_conn->task; 4313 struct iscsi_session *session = task->conn->session; 4314 u32 doorbell = 0; 4315 4316 /* 4317 * We can always use 0 here because it is reserved by libiscsi for 4318 * login/startup related tasks. 4319 */ 4320 beiscsi_conn->login_in_progress = 0; 4321 spin_lock_bh(&session->back_lock); 4322 beiscsi_cleanup_task(task); 4323 spin_unlock_bh(&session->back_lock); 4324 4325 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, 4326 &pwrb_context); 4327 4328 /* Check for the adapter family */ 4329 if (is_chip_be2_be3r(phba)) 4330 beiscsi_offload_cxn_v0(params, pwrb_handle, 4331 phba->init_mem, 4332 pwrb_context); 4333 else 4334 beiscsi_offload_cxn_v2(params, pwrb_handle, 4335 pwrb_context); 4336 4337 be_dws_le_to_cpu(pwrb_handle->pwrb, 4338 sizeof(struct iscsi_target_context_update_wrb)); 4339 4340 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4341 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) 4342 << DB_DEF_PDU_WRB_INDEX_SHIFT; 4343 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4344 iowrite32(doorbell, phba->db_va + 4345 beiscsi_conn->doorbell_offset); 4346 4347 /* 4348 * There is no completion for CONTEXT_UPDATE. The completion of next 4349 * WRB posted guarantees FW's processing and DMA'ing of it. 4350 * Use beiscsi_put_wrb_handle to put it back in the pool which makes 4351 * sure zero'ing or reuse of the WRB only after wrbs_per_cxn. 4352 */ 4353 beiscsi_put_wrb_handle(pwrb_context, pwrb_handle, 4354 phba->params.wrbs_per_cxn); 4355 beiscsi_log(phba, KERN_INFO, 4356 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4357 "BM_%d : put CONTEXT_UPDATE pwrb_handle=%p free_index=0x%x wrb_handles_available=%d\n", 4358 pwrb_handle, pwrb_context->free_index, 4359 pwrb_context->wrb_handles_available); 4360 } 4361 4362 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, 4363 int *index, int *age) 4364 { 4365 *index = (int)itt; 4366 if (age) 4367 *age = conn->session->age; 4368 } 4369 4370 /** 4371 * beiscsi_alloc_pdu - allocates pdu and related resources 4372 * @task: libiscsi task 4373 * @opcode: opcode of pdu for task 4374 * 4375 * This is called with the session lock held. It will allocate 4376 * the wrb and sgl if needed for the command. And it will prep 4377 * the pdu's itt. beiscsi_parse_pdu will later translate 4378 * the pdu itt to the libiscsi task itt. 4379 */ 4380 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 4381 { 4382 struct beiscsi_io_task *io_task = task->dd_data; 4383 struct iscsi_conn *conn = task->conn; 4384 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4385 struct beiscsi_hba *phba = beiscsi_conn->phba; 4386 struct hwi_wrb_context *pwrb_context; 4387 struct hwi_controller *phwi_ctrlr; 4388 itt_t itt; 4389 uint16_t cri_index = 0; 4390 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4391 dma_addr_t paddr; 4392 4393 io_task->cmd_bhs = dma_pool_alloc(beiscsi_sess->bhs_pool, 4394 GFP_ATOMIC, &paddr); 4395 if (!io_task->cmd_bhs) 4396 return -ENOMEM; 4397 io_task->bhs_pa.u.a64.address = paddr; 4398 io_task->libiscsi_itt = (itt_t)task->itt; 4399 io_task->conn = beiscsi_conn; 4400 4401 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; 4402 task->hdr_max = sizeof(struct be_cmd_bhs); 4403 io_task->psgl_handle = NULL; 4404 io_task->pwrb_handle = NULL; 4405 4406 if (task->sc) { 4407 io_task->psgl_handle = alloc_io_sgl_handle(phba); 4408 if (!io_task->psgl_handle) { 4409 beiscsi_log(phba, KERN_ERR, 4410 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4411 "BM_%d : Alloc of IO_SGL_ICD Failed" 4412 "for the CID : %d\n", 4413 beiscsi_conn->beiscsi_conn_cid); 4414 goto free_hndls; 4415 } 4416 io_task->pwrb_handle = alloc_wrb_handle(phba, 4417 beiscsi_conn->beiscsi_conn_cid, 4418 &io_task->pwrb_context); 4419 if (!io_task->pwrb_handle) { 4420 beiscsi_log(phba, KERN_ERR, 4421 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4422 "BM_%d : Alloc of WRB_HANDLE Failed" 4423 "for the CID : %d\n", 4424 beiscsi_conn->beiscsi_conn_cid); 4425 goto free_io_hndls; 4426 } 4427 } else { 4428 io_task->scsi_cmnd = NULL; 4429 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { 4430 beiscsi_conn->task = task; 4431 if (!beiscsi_conn->login_in_progress) { 4432 io_task->psgl_handle = (struct sgl_handle *) 4433 alloc_mgmt_sgl_handle(phba); 4434 if (!io_task->psgl_handle) { 4435 beiscsi_log(phba, KERN_ERR, 4436 BEISCSI_LOG_IO | 4437 BEISCSI_LOG_CONFIG, 4438 "BM_%d : Alloc of MGMT_SGL_ICD Failed" 4439 "for the CID : %d\n", 4440 beiscsi_conn-> 4441 beiscsi_conn_cid); 4442 goto free_hndls; 4443 } 4444 4445 beiscsi_conn->login_in_progress = 1; 4446 beiscsi_conn->plogin_sgl_handle = 4447 io_task->psgl_handle; 4448 io_task->pwrb_handle = 4449 alloc_wrb_handle(phba, 4450 beiscsi_conn->beiscsi_conn_cid, 4451 &io_task->pwrb_context); 4452 if (!io_task->pwrb_handle) { 4453 beiscsi_log(phba, KERN_ERR, 4454 BEISCSI_LOG_IO | 4455 BEISCSI_LOG_CONFIG, 4456 "BM_%d : Alloc of WRB_HANDLE Failed" 4457 "for the CID : %d\n", 4458 beiscsi_conn-> 4459 beiscsi_conn_cid); 4460 goto free_mgmt_hndls; 4461 } 4462 beiscsi_conn->plogin_wrb_handle = 4463 io_task->pwrb_handle; 4464 4465 } else { 4466 io_task->psgl_handle = 4467 beiscsi_conn->plogin_sgl_handle; 4468 io_task->pwrb_handle = 4469 beiscsi_conn->plogin_wrb_handle; 4470 } 4471 } else { 4472 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); 4473 if (!io_task->psgl_handle) { 4474 beiscsi_log(phba, KERN_ERR, 4475 BEISCSI_LOG_IO | 4476 BEISCSI_LOG_CONFIG, 4477 "BM_%d : Alloc of MGMT_SGL_ICD Failed" 4478 "for the CID : %d\n", 4479 beiscsi_conn-> 4480 beiscsi_conn_cid); 4481 goto free_hndls; 4482 } 4483 io_task->pwrb_handle = 4484 alloc_wrb_handle(phba, 4485 beiscsi_conn->beiscsi_conn_cid, 4486 &io_task->pwrb_context); 4487 if (!io_task->pwrb_handle) { 4488 beiscsi_log(phba, KERN_ERR, 4489 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4490 "BM_%d : Alloc of WRB_HANDLE Failed" 4491 "for the CID : %d\n", 4492 beiscsi_conn->beiscsi_conn_cid); 4493 goto free_mgmt_hndls; 4494 } 4495 4496 } 4497 } 4498 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> 4499 wrb_index << 16) | (unsigned int) 4500 (io_task->psgl_handle->sgl_index)); 4501 io_task->pwrb_handle->pio_handle = task; 4502 4503 io_task->cmd_bhs->iscsi_hdr.itt = itt; 4504 return 0; 4505 4506 free_io_hndls: 4507 free_io_sgl_handle(phba, io_task->psgl_handle); 4508 goto free_hndls; 4509 free_mgmt_hndls: 4510 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4511 io_task->psgl_handle = NULL; 4512 free_hndls: 4513 phwi_ctrlr = phba->phwi_ctrlr; 4514 cri_index = BE_GET_CRI_FROM_CID( 4515 beiscsi_conn->beiscsi_conn_cid); 4516 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4517 if (io_task->pwrb_handle) 4518 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4519 io_task->pwrb_handle = NULL; 4520 dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4521 io_task->bhs_pa.u.a64.address); 4522 io_task->cmd_bhs = NULL; 4523 return -ENOMEM; 4524 } 4525 static int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, 4526 unsigned int num_sg, unsigned int xferlen, 4527 unsigned int writedir) 4528 { 4529 4530 struct beiscsi_io_task *io_task = task->dd_data; 4531 struct iscsi_conn *conn = task->conn; 4532 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4533 struct beiscsi_hba *phba = beiscsi_conn->phba; 4534 struct iscsi_wrb *pwrb = NULL; 4535 unsigned int doorbell = 0; 4536 4537 pwrb = io_task->pwrb_handle->pwrb; 4538 4539 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4540 4541 if (writedir) { 4542 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4543 INI_WR_CMD); 4544 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1); 4545 } else { 4546 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4547 INI_RD_CMD); 4548 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0); 4549 } 4550 4551 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2, 4552 type, pwrb); 4553 4554 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb, 4555 cpu_to_be16(*(unsigned short *) 4556 &io_task->cmd_bhs->iscsi_hdr.lun)); 4557 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen); 4558 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4559 io_task->pwrb_handle->wrb_index); 4560 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4561 be32_to_cpu(task->cmdsn)); 4562 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4563 io_task->psgl_handle->sgl_index); 4564 4565 hwi_write_sgl_v2(pwrb, sg, num_sg, io_task); 4566 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 4567 io_task->pwrb_handle->wrb_index); 4568 if (io_task->pwrb_context->plast_wrb) 4569 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, 4570 io_task->pwrb_context->plast_wrb, 4571 io_task->pwrb_handle->wrb_index); 4572 io_task->pwrb_context->plast_wrb = pwrb; 4573 4574 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4575 4576 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4577 doorbell |= (io_task->pwrb_handle->wrb_index & 4578 DB_DEF_PDU_WRB_INDEX_MASK) << 4579 DB_DEF_PDU_WRB_INDEX_SHIFT; 4580 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4581 iowrite32(doorbell, phba->db_va + 4582 beiscsi_conn->doorbell_offset); 4583 return 0; 4584 } 4585 4586 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, 4587 unsigned int num_sg, unsigned int xferlen, 4588 unsigned int writedir) 4589 { 4590 4591 struct beiscsi_io_task *io_task = task->dd_data; 4592 struct iscsi_conn *conn = task->conn; 4593 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4594 struct beiscsi_hba *phba = beiscsi_conn->phba; 4595 struct iscsi_wrb *pwrb = NULL; 4596 unsigned int doorbell = 0; 4597 4598 pwrb = io_task->pwrb_handle->pwrb; 4599 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4600 4601 if (writedir) { 4602 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4603 INI_WR_CMD); 4604 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 4605 } else { 4606 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4607 INI_RD_CMD); 4608 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 4609 } 4610 4611 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb, 4612 type, pwrb); 4613 4614 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb, 4615 cpu_to_be16(*(unsigned short *) 4616 &io_task->cmd_bhs->iscsi_hdr.lun)); 4617 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen); 4618 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4619 io_task->pwrb_handle->wrb_index); 4620 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4621 be32_to_cpu(task->cmdsn)); 4622 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4623 io_task->psgl_handle->sgl_index); 4624 4625 hwi_write_sgl(pwrb, sg, num_sg, io_task); 4626 4627 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4628 io_task->pwrb_handle->wrb_index); 4629 if (io_task->pwrb_context->plast_wrb) 4630 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, 4631 io_task->pwrb_context->plast_wrb, 4632 io_task->pwrb_handle->wrb_index); 4633 io_task->pwrb_context->plast_wrb = pwrb; 4634 4635 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4636 4637 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4638 doorbell |= (io_task->pwrb_handle->wrb_index & 4639 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4640 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4641 4642 iowrite32(doorbell, phba->db_va + 4643 beiscsi_conn->doorbell_offset); 4644 return 0; 4645 } 4646 4647 static int beiscsi_mtask(struct iscsi_task *task) 4648 { 4649 struct beiscsi_io_task *io_task = task->dd_data; 4650 struct iscsi_conn *conn = task->conn; 4651 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4652 struct beiscsi_hba *phba = beiscsi_conn->phba; 4653 struct iscsi_wrb *pwrb = NULL; 4654 unsigned int doorbell = 0; 4655 unsigned int cid; 4656 unsigned int pwrb_typeoffset = 0; 4657 int ret = 0; 4658 4659 cid = beiscsi_conn->beiscsi_conn_cid; 4660 pwrb = io_task->pwrb_handle->pwrb; 4661 4662 if (is_chip_be2_be3r(phba)) { 4663 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4664 be32_to_cpu(task->cmdsn)); 4665 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4666 io_task->pwrb_handle->wrb_index); 4667 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4668 io_task->psgl_handle->sgl_index); 4669 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, 4670 task->data_count); 4671 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4672 io_task->pwrb_handle->wrb_index); 4673 if (io_task->pwrb_context->plast_wrb) 4674 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, 4675 io_task->pwrb_context->plast_wrb, 4676 io_task->pwrb_handle->wrb_index); 4677 io_task->pwrb_context->plast_wrb = pwrb; 4678 4679 pwrb_typeoffset = BE_WRB_TYPE_OFFSET; 4680 } else { 4681 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4682 be32_to_cpu(task->cmdsn)); 4683 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4684 io_task->pwrb_handle->wrb_index); 4685 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4686 io_task->psgl_handle->sgl_index); 4687 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, 4688 task->data_count); 4689 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 4690 io_task->pwrb_handle->wrb_index); 4691 if (io_task->pwrb_context->plast_wrb) 4692 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, 4693 io_task->pwrb_context->plast_wrb, 4694 io_task->pwrb_handle->wrb_index); 4695 io_task->pwrb_context->plast_wrb = pwrb; 4696 4697 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; 4698 } 4699 4700 4701 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 4702 case ISCSI_OP_LOGIN: 4703 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); 4704 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4705 ret = hwi_write_buffer(pwrb, task); 4706 break; 4707 case ISCSI_OP_NOOP_OUT: 4708 if (task->hdr->ttt != ISCSI_RESERVED_TAG) { 4709 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4710 if (is_chip_be2_be3r(phba)) 4711 AMAP_SET_BITS(struct amap_iscsi_wrb, 4712 dmsg, pwrb, 1); 4713 else 4714 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4715 dmsg, pwrb, 1); 4716 } else { 4717 ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); 4718 if (is_chip_be2_be3r(phba)) 4719 AMAP_SET_BITS(struct amap_iscsi_wrb, 4720 dmsg, pwrb, 0); 4721 else 4722 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4723 dmsg, pwrb, 0); 4724 } 4725 ret = hwi_write_buffer(pwrb, task); 4726 break; 4727 case ISCSI_OP_TEXT: 4728 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4729 ret = hwi_write_buffer(pwrb, task); 4730 break; 4731 case ISCSI_OP_SCSI_TMFUNC: 4732 ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset); 4733 ret = hwi_write_buffer(pwrb, task); 4734 break; 4735 case ISCSI_OP_LOGOUT: 4736 ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset); 4737 ret = hwi_write_buffer(pwrb, task); 4738 break; 4739 4740 default: 4741 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4742 "BM_%d : opcode =%d Not supported\n", 4743 task->hdr->opcode & ISCSI_OPCODE_MASK); 4744 4745 return -EINVAL; 4746 } 4747 4748 if (ret) 4749 return ret; 4750 4751 /* Set the task type */ 4752 io_task->wrb_type = (is_chip_be2_be3r(phba)) ? 4753 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) : 4754 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb); 4755 4756 doorbell |= cid & DB_WRB_POST_CID_MASK; 4757 doorbell |= (io_task->pwrb_handle->wrb_index & 4758 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4759 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4760 iowrite32(doorbell, phba->db_va + 4761 beiscsi_conn->doorbell_offset); 4762 return 0; 4763 } 4764 4765 static int beiscsi_task_xmit(struct iscsi_task *task) 4766 { 4767 struct beiscsi_io_task *io_task = task->dd_data; 4768 struct scsi_cmnd *sc = task->sc; 4769 struct beiscsi_hba *phba; 4770 struct scatterlist *sg; 4771 int num_sg; 4772 unsigned int writedir = 0, xferlen = 0; 4773 4774 phba = io_task->conn->phba; 4775 /** 4776 * HBA in error includes BEISCSI_HBA_FW_TIMEOUT. IO path might be 4777 * operational if FW still gets heartbeat from EP FW. Is management 4778 * path really needed to continue further? 4779 */ 4780 if (!beiscsi_hba_is_online(phba)) 4781 return -EIO; 4782 4783 if (!io_task->conn->login_in_progress) 4784 task->hdr->exp_statsn = 0; 4785 4786 if (!sc) 4787 return beiscsi_mtask(task); 4788 4789 io_task->scsi_cmnd = sc; 4790 io_task->num_sg = 0; 4791 num_sg = scsi_dma_map(sc); 4792 if (num_sg < 0) { 4793 beiscsi_log(phba, KERN_ERR, 4794 BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI, 4795 "BM_%d : scsi_dma_map Failed " 4796 "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n", 4797 be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt), 4798 io_task->libiscsi_itt, scsi_bufflen(sc)); 4799 4800 return num_sg; 4801 } 4802 /** 4803 * For scsi cmd task, check num_sg before unmapping in cleanup_task. 4804 * For management task, cleanup_task checks mtask_addr before unmapping. 4805 */ 4806 io_task->num_sg = num_sg; 4807 xferlen = scsi_bufflen(sc); 4808 sg = scsi_sglist(sc); 4809 if (sc->sc_data_direction == DMA_TO_DEVICE) 4810 writedir = 1; 4811 else 4812 writedir = 0; 4813 4814 return phba->iotask_fn(task, sg, num_sg, xferlen, writedir); 4815 } 4816 4817 /** 4818 * beiscsi_bsg_request - handle bsg request from ISCSI transport 4819 * @job: job to handle 4820 */ 4821 static int beiscsi_bsg_request(struct bsg_job *job) 4822 { 4823 struct Scsi_Host *shost; 4824 struct beiscsi_hba *phba; 4825 struct iscsi_bsg_request *bsg_req = job->request; 4826 int rc = -EINVAL; 4827 unsigned int tag; 4828 struct be_dma_mem nonemb_cmd; 4829 struct be_cmd_resp_hdr *resp; 4830 struct iscsi_bsg_reply *bsg_reply = job->reply; 4831 unsigned short status, extd_status; 4832 4833 shost = iscsi_job_to_shost(job); 4834 phba = iscsi_host_priv(shost); 4835 4836 if (!beiscsi_hba_is_online(phba)) { 4837 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 4838 "BM_%d : HBA in error 0x%lx\n", phba->state); 4839 return -ENXIO; 4840 } 4841 4842 switch (bsg_req->msgcode) { 4843 case ISCSI_BSG_HST_VENDOR: 4844 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 4845 job->request_payload.payload_len, 4846 &nonemb_cmd.dma); 4847 if (nonemb_cmd.va == NULL) { 4848 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4849 "BM_%d : Failed to allocate memory for " 4850 "beiscsi_bsg_request\n"); 4851 return -ENOMEM; 4852 } 4853 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job, 4854 &nonemb_cmd); 4855 if (!tag) { 4856 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4857 "BM_%d : MBX Tag Allocation Failed\n"); 4858 4859 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4860 nonemb_cmd.va, nonemb_cmd.dma); 4861 return -EAGAIN; 4862 } 4863 4864 rc = wait_event_interruptible_timeout( 4865 phba->ctrl.mcc_wait[tag], 4866 phba->ctrl.mcc_tag_status[tag], 4867 msecs_to_jiffies( 4868 BEISCSI_HOST_MBX_TIMEOUT)); 4869 4870 if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { 4871 clear_bit(MCC_TAG_STATE_RUNNING, 4872 &phba->ctrl.ptag_state[tag].tag_state); 4873 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4874 nonemb_cmd.va, nonemb_cmd.dma); 4875 return -EIO; 4876 } 4877 extd_status = (phba->ctrl.mcc_tag_status[tag] & 4878 CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT; 4879 status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK; 4880 free_mcc_wrb(&phba->ctrl, tag); 4881 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va; 4882 sg_copy_from_buffer(job->reply_payload.sg_list, 4883 job->reply_payload.sg_cnt, 4884 nonemb_cmd.va, (resp->response_length 4885 + sizeof(*resp))); 4886 bsg_reply->reply_payload_rcv_len = resp->response_length; 4887 bsg_reply->result = status; 4888 bsg_job_done(job, bsg_reply->result, 4889 bsg_reply->reply_payload_rcv_len); 4890 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4891 nonemb_cmd.va, nonemb_cmd.dma); 4892 if (status || extd_status) { 4893 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4894 "BM_%d : MBX Cmd Failed" 4895 " status = %d extd_status = %d\n", 4896 status, extd_status); 4897 4898 return -EIO; 4899 } else { 4900 rc = 0; 4901 } 4902 break; 4903 4904 default: 4905 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4906 "BM_%d : Unsupported bsg command: 0x%x\n", 4907 bsg_req->msgcode); 4908 break; 4909 } 4910 4911 return rc; 4912 } 4913 4914 static void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) 4915 { 4916 /* Set the logging parameter */ 4917 beiscsi_log_enable_init(phba, beiscsi_log_enable); 4918 } 4919 4920 void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle) 4921 { 4922 if (phba->boot_struct.boot_kset) 4923 return; 4924 4925 /* skip if boot work is already in progress */ 4926 if (test_and_set_bit(BEISCSI_HBA_BOOT_WORK, &phba->state)) 4927 return; 4928 4929 phba->boot_struct.retry = 3; 4930 phba->boot_struct.tag = 0; 4931 phba->boot_struct.s_handle = s_handle; 4932 phba->boot_struct.action = BEISCSI_BOOT_GET_SHANDLE; 4933 schedule_work(&phba->boot_work); 4934 } 4935 4936 /** 4937 * Boot flag info for iscsi-utilities 4938 * Bit 0 Block valid flag 4939 * Bit 1 Firmware booting selected 4940 */ 4941 #define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS 3 4942 4943 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf) 4944 { 4945 struct beiscsi_hba *phba = data; 4946 struct mgmt_session_info *boot_sess = &phba->boot_struct.boot_sess; 4947 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0]; 4948 char *str = buf; 4949 int rc = -EPERM; 4950 4951 switch (type) { 4952 case ISCSI_BOOT_TGT_NAME: 4953 rc = sprintf(buf, "%.*s\n", 4954 (int)strlen(boot_sess->target_name), 4955 (char *)&boot_sess->target_name); 4956 break; 4957 case ISCSI_BOOT_TGT_IP_ADDR: 4958 if (boot_conn->dest_ipaddr.ip_type == BEISCSI_IP_TYPE_V4) 4959 rc = sprintf(buf, "%pI4\n", 4960 (char *)&boot_conn->dest_ipaddr.addr); 4961 else 4962 rc = sprintf(str, "%pI6\n", 4963 (char *)&boot_conn->dest_ipaddr.addr); 4964 break; 4965 case ISCSI_BOOT_TGT_PORT: 4966 rc = sprintf(str, "%d\n", boot_conn->dest_port); 4967 break; 4968 4969 case ISCSI_BOOT_TGT_CHAP_NAME: 4970 rc = sprintf(str, "%.*s\n", 4971 boot_conn->negotiated_login_options.auth_data.chap. 4972 target_chap_name_length, 4973 (char *)&boot_conn->negotiated_login_options. 4974 auth_data.chap.target_chap_name); 4975 break; 4976 case ISCSI_BOOT_TGT_CHAP_SECRET: 4977 rc = sprintf(str, "%.*s\n", 4978 boot_conn->negotiated_login_options.auth_data.chap. 4979 target_secret_length, 4980 (char *)&boot_conn->negotiated_login_options. 4981 auth_data.chap.target_secret); 4982 break; 4983 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 4984 rc = sprintf(str, "%.*s\n", 4985 boot_conn->negotiated_login_options.auth_data.chap. 4986 intr_chap_name_length, 4987 (char *)&boot_conn->negotiated_login_options. 4988 auth_data.chap.intr_chap_name); 4989 break; 4990 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 4991 rc = sprintf(str, "%.*s\n", 4992 boot_conn->negotiated_login_options.auth_data.chap. 4993 intr_secret_length, 4994 (char *)&boot_conn->negotiated_login_options. 4995 auth_data.chap.intr_secret); 4996 break; 4997 case ISCSI_BOOT_TGT_FLAGS: 4998 rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS); 4999 break; 5000 case ISCSI_BOOT_TGT_NIC_ASSOC: 5001 rc = sprintf(str, "0\n"); 5002 break; 5003 } 5004 return rc; 5005 } 5006 5007 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf) 5008 { 5009 struct beiscsi_hba *phba = data; 5010 char *str = buf; 5011 int rc = -EPERM; 5012 5013 switch (type) { 5014 case ISCSI_BOOT_INI_INITIATOR_NAME: 5015 rc = sprintf(str, "%s\n", 5016 phba->boot_struct.boot_sess.initiator_iscsiname); 5017 break; 5018 } 5019 return rc; 5020 } 5021 5022 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf) 5023 { 5024 struct beiscsi_hba *phba = data; 5025 char *str = buf; 5026 int rc = -EPERM; 5027 5028 switch (type) { 5029 case ISCSI_BOOT_ETH_FLAGS: 5030 rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS); 5031 break; 5032 case ISCSI_BOOT_ETH_INDEX: 5033 rc = sprintf(str, "0\n"); 5034 break; 5035 case ISCSI_BOOT_ETH_MAC: 5036 rc = beiscsi_get_macaddr(str, phba); 5037 break; 5038 } 5039 return rc; 5040 } 5041 5042 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type) 5043 { 5044 umode_t rc = 0; 5045 5046 switch (type) { 5047 case ISCSI_BOOT_TGT_NAME: 5048 case ISCSI_BOOT_TGT_IP_ADDR: 5049 case ISCSI_BOOT_TGT_PORT: 5050 case ISCSI_BOOT_TGT_CHAP_NAME: 5051 case ISCSI_BOOT_TGT_CHAP_SECRET: 5052 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5053 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5054 case ISCSI_BOOT_TGT_NIC_ASSOC: 5055 case ISCSI_BOOT_TGT_FLAGS: 5056 rc = S_IRUGO; 5057 break; 5058 } 5059 return rc; 5060 } 5061 5062 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type) 5063 { 5064 umode_t rc = 0; 5065 5066 switch (type) { 5067 case ISCSI_BOOT_INI_INITIATOR_NAME: 5068 rc = S_IRUGO; 5069 break; 5070 } 5071 return rc; 5072 } 5073 5074 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type) 5075 { 5076 umode_t rc = 0; 5077 5078 switch (type) { 5079 case ISCSI_BOOT_ETH_FLAGS: 5080 case ISCSI_BOOT_ETH_MAC: 5081 case ISCSI_BOOT_ETH_INDEX: 5082 rc = S_IRUGO; 5083 break; 5084 } 5085 return rc; 5086 } 5087 5088 static void beiscsi_boot_kobj_release(void *data) 5089 { 5090 struct beiscsi_hba *phba = data; 5091 5092 scsi_host_put(phba->shost); 5093 } 5094 5095 static int beiscsi_boot_create_kset(struct beiscsi_hba *phba) 5096 { 5097 struct boot_struct *bs = &phba->boot_struct; 5098 struct iscsi_boot_kobj *boot_kobj; 5099 5100 if (bs->boot_kset) { 5101 __beiscsi_log(phba, KERN_ERR, 5102 "BM_%d: boot_kset already created\n"); 5103 return 0; 5104 } 5105 5106 bs->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no); 5107 if (!bs->boot_kset) { 5108 __beiscsi_log(phba, KERN_ERR, 5109 "BM_%d: boot_kset alloc failed\n"); 5110 return -ENOMEM; 5111 } 5112 5113 /* get shost ref because the show function will refer phba */ 5114 if (!scsi_host_get(phba->shost)) 5115 goto free_kset; 5116 5117 boot_kobj = iscsi_boot_create_target(bs->boot_kset, 0, phba, 5118 beiscsi_show_boot_tgt_info, 5119 beiscsi_tgt_get_attr_visibility, 5120 beiscsi_boot_kobj_release); 5121 if (!boot_kobj) 5122 goto put_shost; 5123 5124 if (!scsi_host_get(phba->shost)) 5125 goto free_kset; 5126 5127 boot_kobj = iscsi_boot_create_initiator(bs->boot_kset, 0, phba, 5128 beiscsi_show_boot_ini_info, 5129 beiscsi_ini_get_attr_visibility, 5130 beiscsi_boot_kobj_release); 5131 if (!boot_kobj) 5132 goto put_shost; 5133 5134 if (!scsi_host_get(phba->shost)) 5135 goto free_kset; 5136 5137 boot_kobj = iscsi_boot_create_ethernet(bs->boot_kset, 0, phba, 5138 beiscsi_show_boot_eth_info, 5139 beiscsi_eth_get_attr_visibility, 5140 beiscsi_boot_kobj_release); 5141 if (!boot_kobj) 5142 goto put_shost; 5143 5144 return 0; 5145 5146 put_shost: 5147 scsi_host_put(phba->shost); 5148 free_kset: 5149 iscsi_boot_destroy_kset(bs->boot_kset); 5150 bs->boot_kset = NULL; 5151 return -ENOMEM; 5152 } 5153 5154 static void beiscsi_boot_work(struct work_struct *work) 5155 { 5156 struct beiscsi_hba *phba = 5157 container_of(work, struct beiscsi_hba, boot_work); 5158 struct boot_struct *bs = &phba->boot_struct; 5159 unsigned int tag = 0; 5160 5161 if (!beiscsi_hba_is_online(phba)) 5162 return; 5163 5164 beiscsi_log(phba, KERN_INFO, 5165 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, 5166 "BM_%d : %s action %d\n", 5167 __func__, phba->boot_struct.action); 5168 5169 switch (phba->boot_struct.action) { 5170 case BEISCSI_BOOT_REOPEN_SESS: 5171 tag = beiscsi_boot_reopen_sess(phba); 5172 break; 5173 case BEISCSI_BOOT_GET_SHANDLE: 5174 tag = __beiscsi_boot_get_shandle(phba, 1); 5175 break; 5176 case BEISCSI_BOOT_GET_SINFO: 5177 tag = beiscsi_boot_get_sinfo(phba); 5178 break; 5179 case BEISCSI_BOOT_LOGOUT_SESS: 5180 tag = beiscsi_boot_logout_sess(phba); 5181 break; 5182 case BEISCSI_BOOT_CREATE_KSET: 5183 beiscsi_boot_create_kset(phba); 5184 /** 5185 * updated boot_kset is made visible to all before 5186 * ending the boot work. 5187 */ 5188 mb(); 5189 clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); 5190 return; 5191 } 5192 if (!tag) { 5193 if (bs->retry--) 5194 schedule_work(&phba->boot_work); 5195 else 5196 clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); 5197 } 5198 } 5199 5200 static void beiscsi_eqd_update_work(struct work_struct *work) 5201 { 5202 struct hwi_context_memory *phwi_context; 5203 struct be_set_eqd set_eqd[MAX_CPUS]; 5204 struct hwi_controller *phwi_ctrlr; 5205 struct be_eq_obj *pbe_eq; 5206 struct beiscsi_hba *phba; 5207 unsigned int pps, delta; 5208 struct be_aic_obj *aic; 5209 int eqd, i, num = 0; 5210 unsigned long now; 5211 5212 phba = container_of(work, struct beiscsi_hba, eqd_update.work); 5213 if (!beiscsi_hba_is_online(phba)) 5214 return; 5215 5216 phwi_ctrlr = phba->phwi_ctrlr; 5217 phwi_context = phwi_ctrlr->phwi_ctxt; 5218 5219 for (i = 0; i <= phba->num_cpus; i++) { 5220 aic = &phba->aic_obj[i]; 5221 pbe_eq = &phwi_context->be_eq[i]; 5222 now = jiffies; 5223 if (!aic->jiffies || time_before(now, aic->jiffies) || 5224 pbe_eq->cq_count < aic->eq_prev) { 5225 aic->jiffies = now; 5226 aic->eq_prev = pbe_eq->cq_count; 5227 continue; 5228 } 5229 delta = jiffies_to_msecs(now - aic->jiffies); 5230 pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta); 5231 eqd = (pps / 1500) << 2; 5232 5233 if (eqd < 8) 5234 eqd = 0; 5235 eqd = min_t(u32, eqd, BEISCSI_EQ_DELAY_MAX); 5236 eqd = max_t(u32, eqd, BEISCSI_EQ_DELAY_MIN); 5237 5238 aic->jiffies = now; 5239 aic->eq_prev = pbe_eq->cq_count; 5240 5241 if (eqd != aic->prev_eqd) { 5242 set_eqd[num].delay_multiplier = (eqd * 65)/100; 5243 set_eqd[num].eq_id = pbe_eq->q.id; 5244 aic->prev_eqd = eqd; 5245 num++; 5246 } 5247 } 5248 if (num) 5249 /* completion of this is ignored */ 5250 beiscsi_modify_eq_delay(phba, set_eqd, num); 5251 5252 schedule_delayed_work(&phba->eqd_update, 5253 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5254 } 5255 5256 static void beiscsi_hw_tpe_check(struct timer_list *t) 5257 { 5258 struct beiscsi_hba *phba = from_timer(phba, t, hw_check); 5259 u32 wait; 5260 5261 /* if not TPE, do nothing */ 5262 if (!beiscsi_detect_tpe(phba)) 5263 return; 5264 5265 /* wait default 4000ms before recovering */ 5266 wait = 4000; 5267 if (phba->ue2rp > BEISCSI_UE_DETECT_INTERVAL) 5268 wait = phba->ue2rp - BEISCSI_UE_DETECT_INTERVAL; 5269 queue_delayed_work(phba->wq, &phba->recover_port, 5270 msecs_to_jiffies(wait)); 5271 } 5272 5273 static void beiscsi_hw_health_check(struct timer_list *t) 5274 { 5275 struct beiscsi_hba *phba = from_timer(phba, t, hw_check); 5276 5277 beiscsi_detect_ue(phba); 5278 if (beiscsi_detect_ue(phba)) { 5279 __beiscsi_log(phba, KERN_ERR, 5280 "BM_%d : port in error: %lx\n", phba->state); 5281 /* sessions are no longer valid, so first fail the sessions */ 5282 queue_work(phba->wq, &phba->sess_work); 5283 5284 /* detect UER supported */ 5285 if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state)) 5286 return; 5287 /* modify this timer to check TPE */ 5288 phba->hw_check.function = beiscsi_hw_tpe_check; 5289 } 5290 5291 mod_timer(&phba->hw_check, 5292 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5293 } 5294 5295 /* 5296 * beiscsi_enable_port()- Enables the disabled port. 5297 * Only port resources freed in disable function are reallocated. 5298 * This is called in HBA error handling path. 5299 * 5300 * @phba: Instance of driver private structure 5301 * 5302 **/ 5303 static int beiscsi_enable_port(struct beiscsi_hba *phba) 5304 { 5305 struct hwi_context_memory *phwi_context; 5306 struct hwi_controller *phwi_ctrlr; 5307 struct be_eq_obj *pbe_eq; 5308 int ret, i; 5309 5310 if (test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { 5311 __beiscsi_log(phba, KERN_ERR, 5312 "BM_%d : %s : port is online %lx\n", 5313 __func__, phba->state); 5314 return 0; 5315 } 5316 5317 ret = beiscsi_init_sliport(phba); 5318 if (ret) 5319 return ret; 5320 5321 be2iscsi_enable_msix(phba); 5322 5323 beiscsi_get_params(phba); 5324 beiscsi_set_host_data(phba); 5325 /* Re-enable UER. If different TPE occurs then it is recoverable. */ 5326 beiscsi_set_uer_feature(phba); 5327 5328 phba->shost->max_id = phba->params.cxns_per_ctrl; 5329 phba->shost->can_queue = phba->params.ios_per_ctrl; 5330 ret = beiscsi_init_port(phba); 5331 if (ret < 0) { 5332 __beiscsi_log(phba, KERN_ERR, 5333 "BM_%d : init port failed\n"); 5334 goto disable_msix; 5335 } 5336 5337 for (i = 0; i < MAX_MCC_CMD; i++) { 5338 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5339 phba->ctrl.mcc_tag[i] = i + 1; 5340 phba->ctrl.mcc_tag_status[i + 1] = 0; 5341 phba->ctrl.mcc_tag_available++; 5342 } 5343 5344 phwi_ctrlr = phba->phwi_ctrlr; 5345 phwi_context = phwi_ctrlr->phwi_ctxt; 5346 for (i = 0; i < phba->num_cpus; i++) { 5347 pbe_eq = &phwi_context->be_eq[i]; 5348 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); 5349 } 5350 5351 i = (phba->pcidev->msix_enabled) ? i : 0; 5352 /* Work item for MCC handling */ 5353 pbe_eq = &phwi_context->be_eq[i]; 5354 INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); 5355 5356 ret = beiscsi_init_irqs(phba); 5357 if (ret < 0) { 5358 __beiscsi_log(phba, KERN_ERR, 5359 "BM_%d : setup IRQs failed %d\n", ret); 5360 goto cleanup_port; 5361 } 5362 hwi_enable_intr(phba); 5363 /* port operational: clear all error bits */ 5364 set_bit(BEISCSI_HBA_ONLINE, &phba->state); 5365 __beiscsi_log(phba, KERN_INFO, 5366 "BM_%d : port online: 0x%lx\n", phba->state); 5367 5368 /* start hw_check timer and eqd_update work */ 5369 schedule_delayed_work(&phba->eqd_update, 5370 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5371 5372 /** 5373 * Timer function gets modified for TPE detection. 5374 * Always reinit to do health check first. 5375 */ 5376 phba->hw_check.function = beiscsi_hw_health_check; 5377 mod_timer(&phba->hw_check, 5378 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5379 return 0; 5380 5381 cleanup_port: 5382 for (i = 0; i < phba->num_cpus; i++) { 5383 pbe_eq = &phwi_context->be_eq[i]; 5384 irq_poll_disable(&pbe_eq->iopoll); 5385 } 5386 hwi_cleanup_port(phba); 5387 5388 disable_msix: 5389 pci_free_irq_vectors(phba->pcidev); 5390 return ret; 5391 } 5392 5393 /* 5394 * beiscsi_disable_port()- Disable port and cleanup driver resources. 5395 * This is called in HBA error handling and driver removal. 5396 * @phba: Instance Priv structure 5397 * @unload: indicate driver is unloading 5398 * 5399 * Free the OS and HW resources held by the driver 5400 **/ 5401 static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload) 5402 { 5403 struct hwi_context_memory *phwi_context; 5404 struct hwi_controller *phwi_ctrlr; 5405 struct be_eq_obj *pbe_eq; 5406 unsigned int i; 5407 5408 if (!test_and_clear_bit(BEISCSI_HBA_ONLINE, &phba->state)) 5409 return; 5410 5411 phwi_ctrlr = phba->phwi_ctrlr; 5412 phwi_context = phwi_ctrlr->phwi_ctxt; 5413 hwi_disable_intr(phba); 5414 beiscsi_free_irqs(phba); 5415 pci_free_irq_vectors(phba->pcidev); 5416 5417 for (i = 0; i < phba->num_cpus; i++) { 5418 pbe_eq = &phwi_context->be_eq[i]; 5419 irq_poll_disable(&pbe_eq->iopoll); 5420 } 5421 cancel_delayed_work_sync(&phba->eqd_update); 5422 cancel_work_sync(&phba->boot_work); 5423 /* WQ might be running cancel queued mcc_work if we are not exiting */ 5424 if (!unload && beiscsi_hba_in_error(phba)) { 5425 pbe_eq = &phwi_context->be_eq[i]; 5426 cancel_work_sync(&pbe_eq->mcc_work); 5427 } 5428 hwi_cleanup_port(phba); 5429 beiscsi_cleanup_port(phba); 5430 } 5431 5432 static void beiscsi_sess_work(struct work_struct *work) 5433 { 5434 struct beiscsi_hba *phba; 5435 5436 phba = container_of(work, struct beiscsi_hba, sess_work); 5437 /* 5438 * This work gets scheduled only in case of HBA error. 5439 * Old sessions are gone so need to be re-established. 5440 * iscsi_session_failure needs process context hence this work. 5441 */ 5442 iscsi_host_for_each_session(phba->shost, beiscsi_session_fail); 5443 } 5444 5445 static void beiscsi_recover_port(struct work_struct *work) 5446 { 5447 struct beiscsi_hba *phba; 5448 5449 phba = container_of(work, struct beiscsi_hba, recover_port.work); 5450 beiscsi_disable_port(phba, 0); 5451 beiscsi_enable_port(phba); 5452 } 5453 5454 static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev, 5455 pci_channel_state_t state) 5456 { 5457 struct beiscsi_hba *phba = NULL; 5458 5459 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5460 set_bit(BEISCSI_HBA_PCI_ERR, &phba->state); 5461 5462 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5463 "BM_%d : EEH error detected\n"); 5464 5465 /* first stop UE detection when PCI error detected */ 5466 del_timer_sync(&phba->hw_check); 5467 cancel_delayed_work_sync(&phba->recover_port); 5468 5469 /* sessions are no longer valid, so first fail the sessions */ 5470 iscsi_host_for_each_session(phba->shost, beiscsi_session_fail); 5471 beiscsi_disable_port(phba, 0); 5472 5473 if (state == pci_channel_io_perm_failure) { 5474 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5475 "BM_%d : EEH : State PERM Failure"); 5476 return PCI_ERS_RESULT_DISCONNECT; 5477 } 5478 5479 pci_disable_device(pdev); 5480 5481 /* The error could cause the FW to trigger a flash debug dump. 5482 * Resetting the card while flash dump is in progress 5483 * can cause it not to recover; wait for it to finish. 5484 * Wait only for first function as it is needed only once per 5485 * adapter. 5486 **/ 5487 if (pdev->devfn == 0) 5488 ssleep(30); 5489 5490 return PCI_ERS_RESULT_NEED_RESET; 5491 } 5492 5493 static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev) 5494 { 5495 struct beiscsi_hba *phba = NULL; 5496 int status = 0; 5497 5498 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5499 5500 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5501 "BM_%d : EEH Reset\n"); 5502 5503 status = pci_enable_device(pdev); 5504 if (status) 5505 return PCI_ERS_RESULT_DISCONNECT; 5506 5507 pci_set_master(pdev); 5508 pci_set_power_state(pdev, PCI_D0); 5509 pci_restore_state(pdev); 5510 5511 status = beiscsi_check_fw_rdy(phba); 5512 if (status) { 5513 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5514 "BM_%d : EEH Reset Completed\n"); 5515 } else { 5516 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5517 "BM_%d : EEH Reset Completion Failure\n"); 5518 return PCI_ERS_RESULT_DISCONNECT; 5519 } 5520 5521 pci_cleanup_aer_uncorrect_error_status(pdev); 5522 return PCI_ERS_RESULT_RECOVERED; 5523 } 5524 5525 static void beiscsi_eeh_resume(struct pci_dev *pdev) 5526 { 5527 struct beiscsi_hba *phba; 5528 int ret; 5529 5530 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5531 pci_save_state(pdev); 5532 5533 ret = beiscsi_enable_port(phba); 5534 if (ret) 5535 __beiscsi_log(phba, KERN_ERR, 5536 "BM_%d : AER EEH resume failed\n"); 5537 } 5538 5539 static int beiscsi_dev_probe(struct pci_dev *pcidev, 5540 const struct pci_device_id *id) 5541 { 5542 struct hwi_context_memory *phwi_context; 5543 struct hwi_controller *phwi_ctrlr; 5544 struct beiscsi_hba *phba = NULL; 5545 struct be_eq_obj *pbe_eq; 5546 unsigned int s_handle; 5547 char wq_name[20]; 5548 int ret, i; 5549 5550 ret = beiscsi_enable_pci(pcidev); 5551 if (ret < 0) { 5552 dev_err(&pcidev->dev, 5553 "beiscsi_dev_probe - Failed to enable pci device\n"); 5554 return ret; 5555 } 5556 5557 phba = beiscsi_hba_alloc(pcidev); 5558 if (!phba) { 5559 dev_err(&pcidev->dev, 5560 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n"); 5561 ret = -ENOMEM; 5562 goto disable_pci; 5563 } 5564 5565 /* Enable EEH reporting */ 5566 ret = pci_enable_pcie_error_reporting(pcidev); 5567 if (ret) 5568 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5569 "BM_%d : PCIe Error Reporting " 5570 "Enabling Failed\n"); 5571 5572 pci_save_state(pcidev); 5573 5574 /* Initialize Driver configuration Paramters */ 5575 beiscsi_hba_attrs_init(phba); 5576 5577 phba->mac_addr_set = false; 5578 5579 switch (pcidev->device) { 5580 case BE_DEVICE_ID1: 5581 case OC_DEVICE_ID1: 5582 case OC_DEVICE_ID2: 5583 phba->generation = BE_GEN2; 5584 phba->iotask_fn = beiscsi_iotask; 5585 dev_warn(&pcidev->dev, 5586 "Obsolete/Unsupported BE2 Adapter Family\n"); 5587 break; 5588 case BE_DEVICE_ID2: 5589 case OC_DEVICE_ID3: 5590 phba->generation = BE_GEN3; 5591 phba->iotask_fn = beiscsi_iotask; 5592 break; 5593 case OC_SKH_ID1: 5594 phba->generation = BE_GEN4; 5595 phba->iotask_fn = beiscsi_iotask_v2; 5596 break; 5597 default: 5598 phba->generation = 0; 5599 } 5600 5601 ret = be_ctrl_init(phba, pcidev); 5602 if (ret) { 5603 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5604 "BM_%d : be_ctrl_init failed\n"); 5605 goto free_hba; 5606 } 5607 5608 ret = beiscsi_init_sliport(phba); 5609 if (ret) 5610 goto free_hba; 5611 5612 spin_lock_init(&phba->io_sgl_lock); 5613 spin_lock_init(&phba->mgmt_sgl_lock); 5614 spin_lock_init(&phba->async_pdu_lock); 5615 ret = beiscsi_get_fw_config(&phba->ctrl, phba); 5616 if (ret != 0) { 5617 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5618 "BM_%d : Error getting fw config\n"); 5619 goto free_port; 5620 } 5621 beiscsi_get_port_name(&phba->ctrl, phba); 5622 beiscsi_get_params(phba); 5623 beiscsi_set_host_data(phba); 5624 beiscsi_set_uer_feature(phba); 5625 5626 be2iscsi_enable_msix(phba); 5627 5628 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5629 "BM_%d : num_cpus = %d\n", 5630 phba->num_cpus); 5631 5632 phba->shost->max_id = phba->params.cxns_per_ctrl; 5633 phba->shost->can_queue = phba->params.ios_per_ctrl; 5634 ret = beiscsi_get_memory(phba); 5635 if (ret < 0) { 5636 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5637 "BM_%d : alloc host mem failed\n"); 5638 goto free_port; 5639 } 5640 5641 ret = beiscsi_init_port(phba); 5642 if (ret < 0) { 5643 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5644 "BM_%d : init port failed\n"); 5645 beiscsi_free_mem(phba); 5646 goto free_port; 5647 } 5648 5649 for (i = 0; i < MAX_MCC_CMD; i++) { 5650 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5651 phba->ctrl.mcc_tag[i] = i + 1; 5652 phba->ctrl.mcc_tag_status[i + 1] = 0; 5653 phba->ctrl.mcc_tag_available++; 5654 memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, 5655 sizeof(struct be_dma_mem)); 5656 } 5657 5658 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; 5659 5660 snprintf(wq_name, sizeof(wq_name), "beiscsi_%02x_wq", 5661 phba->shost->host_no); 5662 phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, wq_name); 5663 if (!phba->wq) { 5664 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5665 "BM_%d : beiscsi_dev_probe-" 5666 "Failed to allocate work queue\n"); 5667 ret = -ENOMEM; 5668 goto free_twq; 5669 } 5670 5671 INIT_DELAYED_WORK(&phba->eqd_update, beiscsi_eqd_update_work); 5672 5673 phwi_ctrlr = phba->phwi_ctrlr; 5674 phwi_context = phwi_ctrlr->phwi_ctxt; 5675 5676 for (i = 0; i < phba->num_cpus; i++) { 5677 pbe_eq = &phwi_context->be_eq[i]; 5678 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); 5679 } 5680 5681 i = (phba->pcidev->msix_enabled) ? i : 0; 5682 /* Work item for MCC handling */ 5683 pbe_eq = &phwi_context->be_eq[i]; 5684 INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); 5685 5686 ret = beiscsi_init_irqs(phba); 5687 if (ret < 0) { 5688 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5689 "BM_%d : beiscsi_dev_probe-" 5690 "Failed to beiscsi_init_irqs\n"); 5691 goto disable_iopoll; 5692 } 5693 hwi_enable_intr(phba); 5694 5695 ret = iscsi_host_add(phba->shost, &phba->pcidev->dev); 5696 if (ret) 5697 goto free_irqs; 5698 5699 /* set online bit after port is operational */ 5700 set_bit(BEISCSI_HBA_ONLINE, &phba->state); 5701 __beiscsi_log(phba, KERN_INFO, 5702 "BM_%d : port online: 0x%lx\n", phba->state); 5703 5704 INIT_WORK(&phba->boot_work, beiscsi_boot_work); 5705 ret = beiscsi_boot_get_shandle(phba, &s_handle); 5706 if (ret > 0) { 5707 beiscsi_start_boot_work(phba, s_handle); 5708 /** 5709 * Set this bit after starting the work to let 5710 * probe handle it first. 5711 * ASYNC event can too schedule this work. 5712 */ 5713 set_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state); 5714 } 5715 5716 beiscsi_iface_create_default(phba); 5717 schedule_delayed_work(&phba->eqd_update, 5718 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5719 5720 INIT_WORK(&phba->sess_work, beiscsi_sess_work); 5721 INIT_DELAYED_WORK(&phba->recover_port, beiscsi_recover_port); 5722 /** 5723 * Start UE detection here. UE before this will cause stall in probe 5724 * and eventually fail the probe. 5725 */ 5726 timer_setup(&phba->hw_check, beiscsi_hw_health_check, 0); 5727 mod_timer(&phba->hw_check, 5728 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5729 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5730 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n"); 5731 return 0; 5732 5733 free_irqs: 5734 hwi_disable_intr(phba); 5735 beiscsi_free_irqs(phba); 5736 disable_iopoll: 5737 for (i = 0; i < phba->num_cpus; i++) { 5738 pbe_eq = &phwi_context->be_eq[i]; 5739 irq_poll_disable(&pbe_eq->iopoll); 5740 } 5741 destroy_workqueue(phba->wq); 5742 free_twq: 5743 hwi_cleanup_port(phba); 5744 beiscsi_cleanup_port(phba); 5745 beiscsi_free_mem(phba); 5746 free_port: 5747 pci_free_consistent(phba->pcidev, 5748 phba->ctrl.mbox_mem_alloced.size, 5749 phba->ctrl.mbox_mem_alloced.va, 5750 phba->ctrl.mbox_mem_alloced.dma); 5751 beiscsi_unmap_pci_function(phba); 5752 free_hba: 5753 pci_disable_msix(phba->pcidev); 5754 pci_dev_put(phba->pcidev); 5755 iscsi_host_free(phba->shost); 5756 pci_set_drvdata(pcidev, NULL); 5757 disable_pci: 5758 pci_release_regions(pcidev); 5759 pci_disable_device(pcidev); 5760 return ret; 5761 } 5762 5763 static void beiscsi_remove(struct pci_dev *pcidev) 5764 { 5765 struct beiscsi_hba *phba = NULL; 5766 5767 phba = pci_get_drvdata(pcidev); 5768 if (!phba) { 5769 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n"); 5770 return; 5771 } 5772 5773 /* first stop UE detection before unloading */ 5774 del_timer_sync(&phba->hw_check); 5775 cancel_delayed_work_sync(&phba->recover_port); 5776 cancel_work_sync(&phba->sess_work); 5777 5778 beiscsi_iface_destroy_default(phba); 5779 iscsi_host_remove(phba->shost); 5780 beiscsi_disable_port(phba, 1); 5781 5782 /* after cancelling boot_work */ 5783 iscsi_boot_destroy_kset(phba->boot_struct.boot_kset); 5784 5785 /* free all resources */ 5786 destroy_workqueue(phba->wq); 5787 beiscsi_free_mem(phba); 5788 5789 /* ctrl uninit */ 5790 beiscsi_unmap_pci_function(phba); 5791 pci_free_consistent(phba->pcidev, 5792 phba->ctrl.mbox_mem_alloced.size, 5793 phba->ctrl.mbox_mem_alloced.va, 5794 phba->ctrl.mbox_mem_alloced.dma); 5795 5796 pci_dev_put(phba->pcidev); 5797 iscsi_host_free(phba->shost); 5798 pci_disable_pcie_error_reporting(pcidev); 5799 pci_set_drvdata(pcidev, NULL); 5800 pci_release_regions(pcidev); 5801 pci_disable_device(pcidev); 5802 } 5803 5804 5805 static struct pci_error_handlers beiscsi_eeh_handlers = { 5806 .error_detected = beiscsi_eeh_err_detected, 5807 .slot_reset = beiscsi_eeh_reset, 5808 .resume = beiscsi_eeh_resume, 5809 }; 5810 5811 struct iscsi_transport beiscsi_iscsi_transport = { 5812 .owner = THIS_MODULE, 5813 .name = DRV_NAME, 5814 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO | 5815 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, 5816 .create_session = beiscsi_session_create, 5817 .destroy_session = beiscsi_session_destroy, 5818 .create_conn = beiscsi_conn_create, 5819 .bind_conn = beiscsi_conn_bind, 5820 .destroy_conn = iscsi_conn_teardown, 5821 .attr_is_visible = beiscsi_attr_is_visible, 5822 .set_iface_param = beiscsi_iface_set_param, 5823 .get_iface_param = beiscsi_iface_get_param, 5824 .set_param = beiscsi_set_param, 5825 .get_conn_param = iscsi_conn_get_param, 5826 .get_session_param = iscsi_session_get_param, 5827 .get_host_param = beiscsi_get_host_param, 5828 .start_conn = beiscsi_conn_start, 5829 .stop_conn = iscsi_conn_stop, 5830 .send_pdu = iscsi_conn_send_pdu, 5831 .xmit_task = beiscsi_task_xmit, 5832 .cleanup_task = beiscsi_cleanup_task, 5833 .alloc_pdu = beiscsi_alloc_pdu, 5834 .parse_pdu_itt = beiscsi_parse_pdu, 5835 .get_stats = beiscsi_conn_get_stats, 5836 .get_ep_param = beiscsi_ep_get_param, 5837 .ep_connect = beiscsi_ep_connect, 5838 .ep_poll = beiscsi_ep_poll, 5839 .ep_disconnect = beiscsi_ep_disconnect, 5840 .session_recovery_timedout = iscsi_session_recovery_timedout, 5841 .bsg_request = beiscsi_bsg_request, 5842 }; 5843 5844 static struct pci_driver beiscsi_pci_driver = { 5845 .name = DRV_NAME, 5846 .probe = beiscsi_dev_probe, 5847 .remove = beiscsi_remove, 5848 .id_table = beiscsi_pci_id_table, 5849 .err_handler = &beiscsi_eeh_handlers 5850 }; 5851 5852 static int __init beiscsi_module_init(void) 5853 { 5854 int ret; 5855 5856 beiscsi_scsi_transport = 5857 iscsi_register_transport(&beiscsi_iscsi_transport); 5858 if (!beiscsi_scsi_transport) { 5859 printk(KERN_ERR 5860 "beiscsi_module_init - Unable to register beiscsi transport.\n"); 5861 return -ENOMEM; 5862 } 5863 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n", 5864 &beiscsi_iscsi_transport); 5865 5866 ret = pci_register_driver(&beiscsi_pci_driver); 5867 if (ret) { 5868 printk(KERN_ERR 5869 "beiscsi_module_init - Unable to register beiscsi pci driver.\n"); 5870 goto unregister_iscsi_transport; 5871 } 5872 return 0; 5873 5874 unregister_iscsi_transport: 5875 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5876 return ret; 5877 } 5878 5879 static void __exit beiscsi_module_exit(void) 5880 { 5881 pci_unregister_driver(&beiscsi_pci_driver); 5882 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5883 } 5884 5885 module_init(beiscsi_module_init); 5886 module_exit(beiscsi_module_exit); 5887