1 /* 2 * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI 3 * Host Bus Adapters. Refer to the README file included with this package 4 * for driver version and adapter compatibility. 5 * 6 * Copyright (c) 2018 Broadcom. All Rights Reserved. 7 * The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of version 2 of the GNU General Public License as published 11 * by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful. ALL EXPRESS 14 * OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY 15 * IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, 16 * OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH 17 * DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. 18 * See the GNU General Public License for more details, a copy of which 19 * can be found in the file COPYING included with this package. 20 * 21 * Contact Information: 22 * linux-drivers@broadcom.com 23 * 24 */ 25 26 #include <linux/reboot.h> 27 #include <linux/delay.h> 28 #include <linux/slab.h> 29 #include <linux/interrupt.h> 30 #include <linux/blkdev.h> 31 #include <linux/pci.h> 32 #include <linux/string.h> 33 #include <linux/kernel.h> 34 #include <linux/semaphore.h> 35 #include <linux/iscsi_boot_sysfs.h> 36 #include <linux/module.h> 37 #include <linux/bsg-lib.h> 38 #include <linux/irq_poll.h> 39 40 #include <scsi/libiscsi.h> 41 #include <scsi/scsi_bsg_iscsi.h> 42 #include <scsi/scsi_netlink.h> 43 #include <scsi/scsi_transport_iscsi.h> 44 #include <scsi/scsi_transport.h> 45 #include <scsi/scsi_cmnd.h> 46 #include <scsi/scsi_device.h> 47 #include <scsi/scsi_host.h> 48 #include <scsi/scsi.h> 49 #include "be_main.h" 50 #include "be_iscsi.h" 51 #include "be_mgmt.h" 52 #include "be_cmds.h" 53 54 static unsigned int be_iopoll_budget = 10; 55 static unsigned int be_max_phys_size = 64; 56 static unsigned int enable_msix = 1; 57 58 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); 59 MODULE_VERSION(BUILD_STR); 60 MODULE_AUTHOR("Emulex Corporation"); 61 MODULE_LICENSE("GPL"); 62 module_param(be_iopoll_budget, int, 0); 63 module_param(enable_msix, int, 0); 64 module_param(be_max_phys_size, uint, S_IRUGO); 65 MODULE_PARM_DESC(be_max_phys_size, 66 "Maximum Size (In Kilobytes) of physically contiguous " 67 "memory that can be allocated. Range is 16 - 128"); 68 69 #define beiscsi_disp_param(_name)\ 70 static ssize_t \ 71 beiscsi_##_name##_disp(struct device *dev,\ 72 struct device_attribute *attrib, char *buf) \ 73 { \ 74 struct Scsi_Host *shost = class_to_shost(dev);\ 75 struct beiscsi_hba *phba = iscsi_host_priv(shost); \ 76 return snprintf(buf, PAGE_SIZE, "%d\n",\ 77 phba->attr_##_name);\ 78 } 79 80 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\ 81 static int \ 82 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\ 83 {\ 84 if (val >= _minval && val <= _maxval) {\ 85 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 86 "BA_%d : beiscsi_"#_name" updated "\ 87 "from 0x%x ==> 0x%x\n",\ 88 phba->attr_##_name, val); \ 89 phba->attr_##_name = val;\ 90 return 0;\ 91 } \ 92 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \ 93 "BA_%d beiscsi_"#_name" attribute "\ 94 "cannot be updated to 0x%x, "\ 95 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 96 return -EINVAL;\ 97 } 98 99 #define beiscsi_store_param(_name) \ 100 static ssize_t \ 101 beiscsi_##_name##_store(struct device *dev,\ 102 struct device_attribute *attr, const char *buf,\ 103 size_t count) \ 104 { \ 105 struct Scsi_Host *shost = class_to_shost(dev);\ 106 struct beiscsi_hba *phba = iscsi_host_priv(shost);\ 107 uint32_t param_val = 0;\ 108 if (!isdigit(buf[0]))\ 109 return -EINVAL;\ 110 if (sscanf(buf, "%i", ¶m_val) != 1)\ 111 return -EINVAL;\ 112 if (beiscsi_##_name##_change(phba, param_val) == 0) \ 113 return strlen(buf);\ 114 else \ 115 return -EINVAL;\ 116 } 117 118 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \ 119 static int \ 120 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \ 121 { \ 122 if (val >= _minval && val <= _maxval) {\ 123 phba->attr_##_name = val;\ 124 return 0;\ 125 } \ 126 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 127 "BA_%d beiscsi_"#_name" attribute " \ 128 "cannot be updated to 0x%x, "\ 129 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 130 phba->attr_##_name = _defval;\ 131 return -EINVAL;\ 132 } 133 134 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \ 135 static uint beiscsi_##_name = _defval;\ 136 module_param(beiscsi_##_name, uint, S_IRUGO);\ 137 MODULE_PARM_DESC(beiscsi_##_name, _descp);\ 138 beiscsi_disp_param(_name)\ 139 beiscsi_change_param(_name, _minval, _maxval, _defval)\ 140 beiscsi_store_param(_name)\ 141 beiscsi_init_param(_name, _minval, _maxval, _defval)\ 142 DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\ 143 beiscsi_##_name##_disp, beiscsi_##_name##_store) 144 145 /* 146 * When new log level added update MAX allowed value for log_enable 147 */ 148 BEISCSI_RW_ATTR(log_enable, 0x00, 149 0xFF, 0x00, "Enable logging Bit Mask\n" 150 "\t\t\t\tInitialization Events : 0x01\n" 151 "\t\t\t\tMailbox Events : 0x02\n" 152 "\t\t\t\tMiscellaneous Events : 0x04\n" 153 "\t\t\t\tError Handling : 0x08\n" 154 "\t\t\t\tIO Path Events : 0x10\n" 155 "\t\t\t\tConfiguration Path : 0x20\n" 156 "\t\t\t\tiSCSI Protocol : 0x40\n"); 157 158 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); 159 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); 160 DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); 161 DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL); 162 DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO, 163 beiscsi_active_session_disp, NULL); 164 DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO, 165 beiscsi_free_session_disp, NULL); 166 static struct device_attribute *beiscsi_attrs[] = { 167 &dev_attr_beiscsi_log_enable, 168 &dev_attr_beiscsi_drvr_ver, 169 &dev_attr_beiscsi_adapter_family, 170 &dev_attr_beiscsi_fw_ver, 171 &dev_attr_beiscsi_active_session_count, 172 &dev_attr_beiscsi_free_session_count, 173 &dev_attr_beiscsi_phys_port, 174 NULL, 175 }; 176 177 static char const *cqe_desc[] = { 178 "RESERVED_DESC", 179 "SOL_CMD_COMPLETE", 180 "SOL_CMD_KILLED_DATA_DIGEST_ERR", 181 "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL", 182 "CXN_KILLED_BURST_LEN_MISMATCH", 183 "CXN_KILLED_AHS_RCVD", 184 "CXN_KILLED_HDR_DIGEST_ERR", 185 "CXN_KILLED_UNKNOWN_HDR", 186 "CXN_KILLED_STALE_ITT_TTT_RCVD", 187 "CXN_KILLED_INVALID_ITT_TTT_RCVD", 188 "CXN_KILLED_RST_RCVD", 189 "CXN_KILLED_TIMED_OUT", 190 "CXN_KILLED_RST_SENT", 191 "CXN_KILLED_FIN_RCVD", 192 "CXN_KILLED_BAD_UNSOL_PDU_RCVD", 193 "CXN_KILLED_BAD_WRB_INDEX_ERROR", 194 "CXN_KILLED_OVER_RUN_RESIDUAL", 195 "CXN_KILLED_UNDER_RUN_RESIDUAL", 196 "CMD_KILLED_INVALID_STATSN_RCVD", 197 "CMD_KILLED_INVALID_R2T_RCVD", 198 "CMD_CXN_KILLED_LUN_INVALID", 199 "CMD_CXN_KILLED_ICD_INVALID", 200 "CMD_CXN_KILLED_ITT_INVALID", 201 "CMD_CXN_KILLED_SEQ_OUTOFORDER", 202 "CMD_CXN_KILLED_INVALID_DATASN_RCVD", 203 "CXN_INVALIDATE_NOTIFY", 204 "CXN_INVALIDATE_INDEX_NOTIFY", 205 "CMD_INVALIDATED_NOTIFY", 206 "UNSOL_HDR_NOTIFY", 207 "UNSOL_DATA_NOTIFY", 208 "UNSOL_DATA_DIGEST_ERROR_NOTIFY", 209 "DRIVERMSG_NOTIFY", 210 "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN", 211 "SOL_CMD_KILLED_DIF_ERR", 212 "CXN_KILLED_SYN_RCVD", 213 "CXN_KILLED_IMM_DATA_RCVD" 214 }; 215 216 static int beiscsi_eh_abort(struct scsi_cmnd *sc) 217 { 218 struct iscsi_task *abrt_task = (struct iscsi_task *)sc->SCp.ptr; 219 struct iscsi_cls_session *cls_session; 220 struct beiscsi_io_task *abrt_io_task; 221 struct beiscsi_conn *beiscsi_conn; 222 struct iscsi_session *session; 223 struct invldt_cmd_tbl inv_tbl; 224 struct beiscsi_hba *phba; 225 struct iscsi_conn *conn; 226 int rc; 227 228 cls_session = starget_to_session(scsi_target(sc->device)); 229 session = cls_session->dd_data; 230 231 /* check if we raced, task just got cleaned up under us */ 232 spin_lock_bh(&session->back_lock); 233 if (!abrt_task || !abrt_task->sc) { 234 spin_unlock_bh(&session->back_lock); 235 return SUCCESS; 236 } 237 /* get a task ref till FW processes the req for the ICD used */ 238 __iscsi_get_task(abrt_task); 239 abrt_io_task = abrt_task->dd_data; 240 conn = abrt_task->conn; 241 beiscsi_conn = conn->dd_data; 242 phba = beiscsi_conn->phba; 243 /* mark WRB invalid which have been not processed by FW yet */ 244 if (is_chip_be2_be3r(phba)) { 245 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 246 abrt_io_task->pwrb_handle->pwrb, 1); 247 } else { 248 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld, 249 abrt_io_task->pwrb_handle->pwrb, 1); 250 } 251 inv_tbl.cid = beiscsi_conn->beiscsi_conn_cid; 252 inv_tbl.icd = abrt_io_task->psgl_handle->sgl_index; 253 spin_unlock_bh(&session->back_lock); 254 255 rc = beiscsi_mgmt_invalidate_icds(phba, &inv_tbl, 1); 256 iscsi_put_task(abrt_task); 257 if (rc) { 258 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 259 "BM_%d : sc %p invalidation failed %d\n", 260 sc, rc); 261 return FAILED; 262 } 263 264 return iscsi_eh_abort(sc); 265 } 266 267 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) 268 { 269 struct beiscsi_invldt_cmd_tbl { 270 struct invldt_cmd_tbl tbl[BE_INVLDT_CMD_TBL_SZ]; 271 struct iscsi_task *task[BE_INVLDT_CMD_TBL_SZ]; 272 } *inv_tbl; 273 struct iscsi_cls_session *cls_session; 274 struct beiscsi_conn *beiscsi_conn; 275 struct beiscsi_io_task *io_task; 276 struct iscsi_session *session; 277 struct beiscsi_hba *phba; 278 struct iscsi_conn *conn; 279 struct iscsi_task *task; 280 unsigned int i, nents; 281 int rc, more = 0; 282 283 cls_session = starget_to_session(scsi_target(sc->device)); 284 session = cls_session->dd_data; 285 286 spin_lock_bh(&session->frwd_lock); 287 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) { 288 spin_unlock_bh(&session->frwd_lock); 289 return FAILED; 290 } 291 292 conn = session->leadconn; 293 beiscsi_conn = conn->dd_data; 294 phba = beiscsi_conn->phba; 295 296 inv_tbl = kzalloc(sizeof(*inv_tbl), GFP_ATOMIC); 297 if (!inv_tbl) { 298 spin_unlock_bh(&session->frwd_lock); 299 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 300 "BM_%d : invldt_cmd_tbl alloc failed\n"); 301 return FAILED; 302 } 303 nents = 0; 304 /* take back_lock to prevent task from getting cleaned up under us */ 305 spin_lock(&session->back_lock); 306 for (i = 0; i < conn->session->cmds_max; i++) { 307 task = conn->session->cmds[i]; 308 if (!task->sc) 309 continue; 310 311 if (sc->device->lun != task->sc->device->lun) 312 continue; 313 /** 314 * Can't fit in more cmds? Normally this won't happen b'coz 315 * BEISCSI_CMD_PER_LUN is same as BE_INVLDT_CMD_TBL_SZ. 316 */ 317 if (nents == BE_INVLDT_CMD_TBL_SZ) { 318 more = 1; 319 break; 320 } 321 322 /* get a task ref till FW processes the req for the ICD used */ 323 __iscsi_get_task(task); 324 io_task = task->dd_data; 325 /* mark WRB invalid which have been not processed by FW yet */ 326 if (is_chip_be2_be3r(phba)) { 327 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 328 io_task->pwrb_handle->pwrb, 1); 329 } else { 330 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld, 331 io_task->pwrb_handle->pwrb, 1); 332 } 333 334 inv_tbl->tbl[nents].cid = beiscsi_conn->beiscsi_conn_cid; 335 inv_tbl->tbl[nents].icd = io_task->psgl_handle->sgl_index; 336 inv_tbl->task[nents] = task; 337 nents++; 338 } 339 spin_unlock(&session->back_lock); 340 spin_unlock_bh(&session->frwd_lock); 341 342 rc = SUCCESS; 343 if (!nents) 344 goto end_reset; 345 346 if (more) { 347 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 348 "BM_%d : number of cmds exceeds size of invalidation table\n"); 349 rc = FAILED; 350 goto end_reset; 351 } 352 353 if (beiscsi_mgmt_invalidate_icds(phba, &inv_tbl->tbl[0], nents)) { 354 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 355 "BM_%d : cid %u scmds invalidation failed\n", 356 beiscsi_conn->beiscsi_conn_cid); 357 rc = FAILED; 358 } 359 360 end_reset: 361 for (i = 0; i < nents; i++) 362 iscsi_put_task(inv_tbl->task[i]); 363 kfree(inv_tbl); 364 365 if (rc == SUCCESS) 366 rc = iscsi_eh_device_reset(sc); 367 return rc; 368 } 369 370 /*------------------- PCI Driver operations and data ----------------- */ 371 static const struct pci_device_id beiscsi_pci_id_table[] = { 372 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 373 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 374 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 375 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 376 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, 377 { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) }, 378 { 0 } 379 }; 380 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 381 382 383 static struct scsi_host_template beiscsi_sht = { 384 .module = THIS_MODULE, 385 .name = "Emulex 10Gbe open-iscsi Initiator Driver", 386 .proc_name = DRV_NAME, 387 .queuecommand = iscsi_queuecommand, 388 .change_queue_depth = scsi_change_queue_depth, 389 .target_alloc = iscsi_target_alloc, 390 .eh_timed_out = iscsi_eh_cmd_timed_out, 391 .eh_abort_handler = beiscsi_eh_abort, 392 .eh_device_reset_handler = beiscsi_eh_device_reset, 393 .eh_target_reset_handler = iscsi_eh_session_reset, 394 .shost_attrs = beiscsi_attrs, 395 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, 396 .can_queue = BE2_IO_DEPTH, 397 .this_id = -1, 398 .max_sectors = BEISCSI_MAX_SECTORS, 399 .max_segment_size = 65536, 400 .cmd_per_lun = BEISCSI_CMD_PER_LUN, 401 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID, 402 .track_queue_depth = 1, 403 }; 404 405 static struct scsi_transport_template *beiscsi_scsi_transport; 406 407 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) 408 { 409 struct beiscsi_hba *phba; 410 struct Scsi_Host *shost; 411 412 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0); 413 if (!shost) { 414 dev_err(&pcidev->dev, 415 "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); 416 return NULL; 417 } 418 shost->max_id = BE2_MAX_SESSIONS - 1; 419 shost->max_channel = 0; 420 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; 421 shost->max_lun = BEISCSI_NUM_MAX_LUN; 422 shost->transportt = beiscsi_scsi_transport; 423 phba = iscsi_host_priv(shost); 424 memset(phba, 0, sizeof(*phba)); 425 phba->shost = shost; 426 phba->pcidev = pci_dev_get(pcidev); 427 pci_set_drvdata(pcidev, phba); 428 phba->interface_handle = 0xFFFFFFFF; 429 430 return phba; 431 } 432 433 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) 434 { 435 if (phba->csr_va) { 436 iounmap(phba->csr_va); 437 phba->csr_va = NULL; 438 } 439 if (phba->db_va) { 440 iounmap(phba->db_va); 441 phba->db_va = NULL; 442 } 443 if (phba->pci_va) { 444 iounmap(phba->pci_va); 445 phba->pci_va = NULL; 446 } 447 } 448 449 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, 450 struct pci_dev *pcidev) 451 { 452 u8 __iomem *addr; 453 int pcicfg_reg; 454 455 addr = ioremap(pci_resource_start(pcidev, 2), 456 pci_resource_len(pcidev, 2)); 457 if (addr == NULL) 458 return -ENOMEM; 459 phba->ctrl.csr = addr; 460 phba->csr_va = addr; 461 462 addr = ioremap(pci_resource_start(pcidev, 4), 128 * 1024); 463 if (addr == NULL) 464 goto pci_map_err; 465 phba->ctrl.db = addr; 466 phba->db_va = addr; 467 468 if (phba->generation == BE_GEN2) 469 pcicfg_reg = 1; 470 else 471 pcicfg_reg = 0; 472 473 addr = ioremap(pci_resource_start(pcidev, pcicfg_reg), 474 pci_resource_len(pcidev, pcicfg_reg)); 475 476 if (addr == NULL) 477 goto pci_map_err; 478 phba->ctrl.pcicfg = addr; 479 phba->pci_va = addr; 480 return 0; 481 482 pci_map_err: 483 beiscsi_unmap_pci_function(phba); 484 return -ENOMEM; 485 } 486 487 static int beiscsi_enable_pci(struct pci_dev *pcidev) 488 { 489 int ret; 490 491 ret = pci_enable_device(pcidev); 492 if (ret) { 493 dev_err(&pcidev->dev, 494 "beiscsi_enable_pci - enable device failed\n"); 495 return ret; 496 } 497 498 ret = pci_request_regions(pcidev, DRV_NAME); 499 if (ret) { 500 dev_err(&pcidev->dev, 501 "beiscsi_enable_pci - request region failed\n"); 502 goto pci_dev_disable; 503 } 504 505 pci_set_master(pcidev); 506 ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64)); 507 if (ret) { 508 ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)); 509 if (ret) { 510 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 511 goto pci_region_release; 512 } 513 } 514 return 0; 515 516 pci_region_release: 517 pci_release_regions(pcidev); 518 pci_dev_disable: 519 pci_disable_device(pcidev); 520 521 return ret; 522 } 523 524 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) 525 { 526 struct be_ctrl_info *ctrl = &phba->ctrl; 527 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced; 528 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem; 529 int status = 0; 530 531 ctrl->pdev = pdev; 532 status = beiscsi_map_pci_bars(phba, pdev); 533 if (status) 534 return status; 535 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 536 mbox_mem_alloc->va = dma_alloc_coherent(&pdev->dev, 537 mbox_mem_alloc->size, &mbox_mem_alloc->dma, GFP_KERNEL); 538 if (!mbox_mem_alloc->va) { 539 beiscsi_unmap_pci_function(phba); 540 return -ENOMEM; 541 } 542 543 mbox_mem_align->size = sizeof(struct be_mcc_mailbox); 544 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); 545 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 546 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 547 mutex_init(&ctrl->mbox_lock); 548 spin_lock_init(&phba->ctrl.mcc_lock); 549 550 return status; 551 } 552 553 /** 554 * beiscsi_get_params()- Set the config paramters 555 * @phba: ptr device priv structure 556 **/ 557 static void beiscsi_get_params(struct beiscsi_hba *phba) 558 { 559 uint32_t total_cid_count = 0; 560 uint32_t total_icd_count = 0; 561 uint8_t ulp_num = 0; 562 563 total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) + 564 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1); 565 566 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 567 uint32_t align_mask = 0; 568 uint32_t icd_post_per_page = 0; 569 uint32_t icd_count_unavailable = 0; 570 uint32_t icd_start = 0, icd_count = 0; 571 uint32_t icd_start_align = 0, icd_count_align = 0; 572 573 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 574 icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 575 icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 576 577 /* Get ICD count that can be posted on each page */ 578 icd_post_per_page = (PAGE_SIZE / (BE2_SGE * 579 sizeof(struct iscsi_sge))); 580 align_mask = (icd_post_per_page - 1); 581 582 /* Check if icd_start is aligned ICD per page posting */ 583 if (icd_start % icd_post_per_page) { 584 icd_start_align = ((icd_start + 585 icd_post_per_page) & 586 ~(align_mask)); 587 phba->fw_config. 588 iscsi_icd_start[ulp_num] = 589 icd_start_align; 590 } 591 592 icd_count_align = (icd_count & ~align_mask); 593 594 /* ICD discarded in the process of alignment */ 595 if (icd_start_align) 596 icd_count_unavailable = ((icd_start_align - 597 icd_start) + 598 (icd_count - 599 icd_count_align)); 600 601 /* Updated ICD count available */ 602 phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count - 603 icd_count_unavailable); 604 605 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 606 "BM_%d : Aligned ICD values\n" 607 "\t ICD Start : %d\n" 608 "\t ICD Count : %d\n" 609 "\t ICD Discarded : %d\n", 610 phba->fw_config. 611 iscsi_icd_start[ulp_num], 612 phba->fw_config. 613 iscsi_icd_count[ulp_num], 614 icd_count_unavailable); 615 break; 616 } 617 } 618 619 total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 620 phba->params.ios_per_ctrl = (total_icd_count - 621 (total_cid_count + 622 BE2_TMFS + BE2_NOPOUT_REQ)); 623 phba->params.cxns_per_ctrl = total_cid_count; 624 phba->params.icds_per_ctrl = total_icd_count; 625 phba->params.num_sge_per_io = BE2_SGE; 626 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 627 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; 628 phba->params.num_eq_entries = 1024; 629 phba->params.num_cq_entries = 1024; 630 phba->params.wrbs_per_cxn = 256; 631 } 632 633 static void hwi_ring_eq_db(struct beiscsi_hba *phba, 634 unsigned int id, unsigned int clr_interrupt, 635 unsigned int num_processed, 636 unsigned char rearm, unsigned char event) 637 { 638 u32 val = 0; 639 640 if (rearm) 641 val |= 1 << DB_EQ_REARM_SHIFT; 642 if (clr_interrupt) 643 val |= 1 << DB_EQ_CLR_SHIFT; 644 if (event) 645 val |= 1 << DB_EQ_EVNT_SHIFT; 646 647 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT; 648 /* Setting lower order EQ_ID Bits */ 649 val |= (id & DB_EQ_RING_ID_LOW_MASK); 650 651 /* Setting Higher order EQ_ID Bits */ 652 val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) & 653 DB_EQ_RING_ID_HIGH_MASK) 654 << DB_EQ_HIGH_SET_SHIFT); 655 656 iowrite32(val, phba->db_va + DB_EQ_OFFSET); 657 } 658 659 /** 660 * be_isr_mcc - The isr routine of the driver. 661 * @irq: Not used 662 * @dev_id: Pointer to host adapter structure 663 */ 664 static irqreturn_t be_isr_mcc(int irq, void *dev_id) 665 { 666 struct beiscsi_hba *phba; 667 struct be_eq_entry *eqe; 668 struct be_queue_info *eq; 669 struct be_queue_info *mcc; 670 unsigned int mcc_events; 671 struct be_eq_obj *pbe_eq; 672 673 pbe_eq = dev_id; 674 eq = &pbe_eq->q; 675 phba = pbe_eq->phba; 676 mcc = &phba->ctrl.mcc_obj.cq; 677 eqe = queue_tail_node(eq); 678 679 mcc_events = 0; 680 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 681 & EQE_VALID_MASK) { 682 if (((eqe->dw[offsetof(struct amap_eq_entry, 683 resource_id) / 32] & 684 EQE_RESID_MASK) >> 16) == mcc->id) { 685 mcc_events++; 686 } 687 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 688 queue_tail_inc(eq); 689 eqe = queue_tail_node(eq); 690 } 691 692 if (mcc_events) { 693 queue_work(phba->wq, &pbe_eq->mcc_work); 694 hwi_ring_eq_db(phba, eq->id, 1, mcc_events, 1, 1); 695 } 696 return IRQ_HANDLED; 697 } 698 699 /** 700 * be_isr_msix - The isr routine of the driver. 701 * @irq: Not used 702 * @dev_id: Pointer to host adapter structure 703 */ 704 static irqreturn_t be_isr_msix(int irq, void *dev_id) 705 { 706 struct beiscsi_hba *phba; 707 struct be_queue_info *eq; 708 struct be_eq_obj *pbe_eq; 709 710 pbe_eq = dev_id; 711 eq = &pbe_eq->q; 712 713 phba = pbe_eq->phba; 714 /* disable interrupt till iopoll completes */ 715 hwi_ring_eq_db(phba, eq->id, 1, 0, 0, 1); 716 irq_poll_sched(&pbe_eq->iopoll); 717 718 return IRQ_HANDLED; 719 } 720 721 /** 722 * be_isr - The isr routine of the driver. 723 * @irq: Not used 724 * @dev_id: Pointer to host adapter structure 725 */ 726 static irqreturn_t be_isr(int irq, void *dev_id) 727 { 728 struct beiscsi_hba *phba; 729 struct hwi_controller *phwi_ctrlr; 730 struct hwi_context_memory *phwi_context; 731 struct be_eq_entry *eqe; 732 struct be_queue_info *eq; 733 struct be_queue_info *mcc; 734 unsigned int mcc_events, io_events; 735 struct be_ctrl_info *ctrl; 736 struct be_eq_obj *pbe_eq; 737 int isr, rearm; 738 739 phba = dev_id; 740 ctrl = &phba->ctrl; 741 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + 742 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); 743 if (!isr) 744 return IRQ_NONE; 745 746 phwi_ctrlr = phba->phwi_ctrlr; 747 phwi_context = phwi_ctrlr->phwi_ctxt; 748 pbe_eq = &phwi_context->be_eq[0]; 749 750 eq = &phwi_context->be_eq[0].q; 751 mcc = &phba->ctrl.mcc_obj.cq; 752 eqe = queue_tail_node(eq); 753 754 io_events = 0; 755 mcc_events = 0; 756 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 757 & EQE_VALID_MASK) { 758 if (((eqe->dw[offsetof(struct amap_eq_entry, 759 resource_id) / 32] & EQE_RESID_MASK) >> 16) == mcc->id) 760 mcc_events++; 761 else 762 io_events++; 763 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 764 queue_tail_inc(eq); 765 eqe = queue_tail_node(eq); 766 } 767 if (!io_events && !mcc_events) 768 return IRQ_NONE; 769 770 /* no need to rearm if interrupt is only for IOs */ 771 rearm = 0; 772 if (mcc_events) { 773 queue_work(phba->wq, &pbe_eq->mcc_work); 774 /* rearm for MCCQ */ 775 rearm = 1; 776 } 777 if (io_events) 778 irq_poll_sched(&pbe_eq->iopoll); 779 hwi_ring_eq_db(phba, eq->id, 0, (io_events + mcc_events), rearm, 1); 780 return IRQ_HANDLED; 781 } 782 783 static void beiscsi_free_irqs(struct beiscsi_hba *phba) 784 { 785 struct hwi_context_memory *phwi_context; 786 int i; 787 788 if (!phba->pcidev->msix_enabled) { 789 if (phba->pcidev->irq) 790 free_irq(phba->pcidev->irq, phba); 791 return; 792 } 793 794 phwi_context = phba->phwi_ctrlr->phwi_ctxt; 795 for (i = 0; i <= phba->num_cpus; i++) { 796 free_irq(pci_irq_vector(phba->pcidev, i), 797 &phwi_context->be_eq[i]); 798 kfree(phba->msi_name[i]); 799 } 800 } 801 802 static int beiscsi_init_irqs(struct beiscsi_hba *phba) 803 { 804 struct pci_dev *pcidev = phba->pcidev; 805 struct hwi_controller *phwi_ctrlr; 806 struct hwi_context_memory *phwi_context; 807 int ret, i, j; 808 809 phwi_ctrlr = phba->phwi_ctrlr; 810 phwi_context = phwi_ctrlr->phwi_ctxt; 811 812 if (pcidev->msix_enabled) { 813 for (i = 0; i < phba->num_cpus; i++) { 814 phba->msi_name[i] = kasprintf(GFP_KERNEL, 815 "beiscsi_%02x_%02x", 816 phba->shost->host_no, i); 817 if (!phba->msi_name[i]) { 818 ret = -ENOMEM; 819 goto free_msix_irqs; 820 } 821 822 ret = request_irq(pci_irq_vector(pcidev, i), 823 be_isr_msix, 0, phba->msi_name[i], 824 &phwi_context->be_eq[i]); 825 if (ret) { 826 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 827 "BM_%d : %s-Failed to register msix for i = %d\n", 828 __func__, i); 829 kfree(phba->msi_name[i]); 830 goto free_msix_irqs; 831 } 832 } 833 phba->msi_name[i] = kasprintf(GFP_KERNEL, "beiscsi_mcc_%02x", 834 phba->shost->host_no); 835 if (!phba->msi_name[i]) { 836 ret = -ENOMEM; 837 goto free_msix_irqs; 838 } 839 ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0, 840 phba->msi_name[i], &phwi_context->be_eq[i]); 841 if (ret) { 842 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 843 "BM_%d : %s-Failed to register beiscsi_msix_mcc\n", 844 __func__); 845 kfree(phba->msi_name[i]); 846 goto free_msix_irqs; 847 } 848 849 } else { 850 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, 851 "beiscsi", phba); 852 if (ret) { 853 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 854 "BM_%d : %s-Failed to register irq\n", 855 __func__); 856 return ret; 857 } 858 } 859 return 0; 860 free_msix_irqs: 861 for (j = i - 1; j >= 0; j--) { 862 free_irq(pci_irq_vector(pcidev, i), &phwi_context->be_eq[j]); 863 kfree(phba->msi_name[j]); 864 } 865 return ret; 866 } 867 868 void hwi_ring_cq_db(struct beiscsi_hba *phba, 869 unsigned int id, unsigned int num_processed, 870 unsigned char rearm) 871 { 872 u32 val = 0; 873 874 if (rearm) 875 val |= 1 << DB_CQ_REARM_SHIFT; 876 877 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT; 878 879 /* Setting lower order CQ_ID Bits */ 880 val |= (id & DB_CQ_RING_ID_LOW_MASK); 881 882 /* Setting Higher order CQ_ID Bits */ 883 val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) & 884 DB_CQ_RING_ID_HIGH_MASK) 885 << DB_CQ_HIGH_SET_SHIFT); 886 887 iowrite32(val, phba->db_va + DB_CQ_OFFSET); 888 } 889 890 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) 891 { 892 struct sgl_handle *psgl_handle; 893 unsigned long flags; 894 895 spin_lock_irqsave(&phba->io_sgl_lock, flags); 896 if (phba->io_sgl_hndl_avbl) { 897 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 898 "BM_%d : In alloc_io_sgl_handle," 899 " io_sgl_alloc_index=%d\n", 900 phba->io_sgl_alloc_index); 901 902 psgl_handle = phba->io_sgl_hndl_base[phba-> 903 io_sgl_alloc_index]; 904 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; 905 phba->io_sgl_hndl_avbl--; 906 if (phba->io_sgl_alloc_index == (phba->params. 907 ios_per_ctrl - 1)) 908 phba->io_sgl_alloc_index = 0; 909 else 910 phba->io_sgl_alloc_index++; 911 } else 912 psgl_handle = NULL; 913 spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 914 return psgl_handle; 915 } 916 917 static void 918 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 919 { 920 unsigned long flags; 921 922 spin_lock_irqsave(&phba->io_sgl_lock, flags); 923 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 924 "BM_%d : In free_,io_sgl_free_index=%d\n", 925 phba->io_sgl_free_index); 926 927 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) { 928 /* 929 * this can happen if clean_task is called on a task that 930 * failed in xmit_task or alloc_pdu. 931 */ 932 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 933 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d, value there=%p\n", 934 phba->io_sgl_free_index, 935 phba->io_sgl_hndl_base[phba->io_sgl_free_index]); 936 spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 937 return; 938 } 939 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; 940 phba->io_sgl_hndl_avbl++; 941 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1)) 942 phba->io_sgl_free_index = 0; 943 else 944 phba->io_sgl_free_index++; 945 spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 946 } 947 948 static inline struct wrb_handle * 949 beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context, 950 unsigned int wrbs_per_cxn) 951 { 952 struct wrb_handle *pwrb_handle; 953 unsigned long flags; 954 955 spin_lock_irqsave(&pwrb_context->wrb_lock, flags); 956 if (!pwrb_context->wrb_handles_available) { 957 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 958 return NULL; 959 } 960 pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index]; 961 pwrb_context->wrb_handles_available--; 962 if (pwrb_context->alloc_index == (wrbs_per_cxn - 1)) 963 pwrb_context->alloc_index = 0; 964 else 965 pwrb_context->alloc_index++; 966 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 967 968 if (pwrb_handle) 969 memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb)); 970 971 return pwrb_handle; 972 } 973 974 /** 975 * alloc_wrb_handle - To allocate a wrb handle 976 * @phba: The hba pointer 977 * @cid: The cid to use for allocation 978 * @pcontext: ptr to ptr to wrb context 979 * 980 * This happens under session_lock until submission to chip 981 */ 982 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid, 983 struct hwi_wrb_context **pcontext) 984 { 985 struct hwi_wrb_context *pwrb_context; 986 struct hwi_controller *phwi_ctrlr; 987 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); 988 989 phwi_ctrlr = phba->phwi_ctrlr; 990 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 991 /* return the context address */ 992 *pcontext = pwrb_context; 993 return beiscsi_get_wrb_handle(pwrb_context, phba->params.wrbs_per_cxn); 994 } 995 996 static inline void 997 beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context, 998 struct wrb_handle *pwrb_handle, 999 unsigned int wrbs_per_cxn) 1000 { 1001 unsigned long flags; 1002 1003 spin_lock_irqsave(&pwrb_context->wrb_lock, flags); 1004 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; 1005 pwrb_context->wrb_handles_available++; 1006 if (pwrb_context->free_index == (wrbs_per_cxn - 1)) 1007 pwrb_context->free_index = 0; 1008 else 1009 pwrb_context->free_index++; 1010 pwrb_handle->pio_handle = NULL; 1011 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 1012 } 1013 1014 /** 1015 * free_wrb_handle - To free the wrb handle back to pool 1016 * @phba: The hba pointer 1017 * @pwrb_context: The context to free from 1018 * @pwrb_handle: The wrb_handle to free 1019 * 1020 * This happens under session_lock until submission to chip 1021 */ 1022 static void 1023 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, 1024 struct wrb_handle *pwrb_handle) 1025 { 1026 beiscsi_put_wrb_handle(pwrb_context, 1027 pwrb_handle, 1028 phba->params.wrbs_per_cxn); 1029 beiscsi_log(phba, KERN_INFO, 1030 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1031 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x " 1032 "wrb_handles_available=%d\n", 1033 pwrb_handle, pwrb_context->free_index, 1034 pwrb_context->wrb_handles_available); 1035 } 1036 1037 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) 1038 { 1039 struct sgl_handle *psgl_handle; 1040 unsigned long flags; 1041 1042 spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); 1043 if (phba->eh_sgl_hndl_avbl) { 1044 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; 1045 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; 1046 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1047 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n", 1048 phba->eh_sgl_alloc_index, 1049 phba->eh_sgl_alloc_index); 1050 1051 phba->eh_sgl_hndl_avbl--; 1052 if (phba->eh_sgl_alloc_index == 1053 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1054 1)) 1055 phba->eh_sgl_alloc_index = 0; 1056 else 1057 phba->eh_sgl_alloc_index++; 1058 } else 1059 psgl_handle = NULL; 1060 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1061 return psgl_handle; 1062 } 1063 1064 void 1065 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1066 { 1067 unsigned long flags; 1068 1069 spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); 1070 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1071 "BM_%d : In free_mgmt_sgl_handle," 1072 "eh_sgl_free_index=%d\n", 1073 phba->eh_sgl_free_index); 1074 1075 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { 1076 /* 1077 * this can happen if clean_task is called on a task that 1078 * failed in xmit_task or alloc_pdu. 1079 */ 1080 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 1081 "BM_%d : Double Free in eh SGL ," 1082 "eh_sgl_free_index=%d\n", 1083 phba->eh_sgl_free_index); 1084 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1085 return; 1086 } 1087 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle; 1088 phba->eh_sgl_hndl_avbl++; 1089 if (phba->eh_sgl_free_index == 1090 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1)) 1091 phba->eh_sgl_free_index = 0; 1092 else 1093 phba->eh_sgl_free_index++; 1094 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1095 } 1096 1097 static void 1098 be_complete_io(struct beiscsi_conn *beiscsi_conn, 1099 struct iscsi_task *task, 1100 struct common_sol_cqe *csol_cqe) 1101 { 1102 struct beiscsi_io_task *io_task = task->dd_data; 1103 struct be_status_bhs *sts_bhs = 1104 (struct be_status_bhs *)io_task->cmd_bhs; 1105 struct iscsi_conn *conn = beiscsi_conn->conn; 1106 unsigned char *sense; 1107 u32 resid = 0, exp_cmdsn, max_cmdsn; 1108 u8 rsp, status, flags; 1109 1110 exp_cmdsn = csol_cqe->exp_cmdsn; 1111 max_cmdsn = (csol_cqe->exp_cmdsn + 1112 csol_cqe->cmd_wnd - 1); 1113 rsp = csol_cqe->i_resp; 1114 status = csol_cqe->i_sts; 1115 flags = csol_cqe->i_flags; 1116 resid = csol_cqe->res_cnt; 1117 1118 if (!task->sc) { 1119 if (io_task->scsi_cmnd) { 1120 scsi_dma_unmap(io_task->scsi_cmnd); 1121 io_task->scsi_cmnd = NULL; 1122 } 1123 1124 return; 1125 } 1126 task->sc->result = (DID_OK << 16) | status; 1127 if (rsp != ISCSI_STATUS_CMD_COMPLETED) { 1128 task->sc->result = DID_ERROR << 16; 1129 goto unmap; 1130 } 1131 1132 /* bidi not initially supported */ 1133 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) { 1134 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW)) 1135 task->sc->result = DID_ERROR << 16; 1136 1137 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) { 1138 scsi_set_resid(task->sc, resid); 1139 if (!status && (scsi_bufflen(task->sc) - resid < 1140 task->sc->underflow)) 1141 task->sc->result = DID_ERROR << 16; 1142 } 1143 } 1144 1145 if (status == SAM_STAT_CHECK_CONDITION) { 1146 u16 sense_len; 1147 unsigned short *slen = (unsigned short *)sts_bhs->sense_info; 1148 1149 sense = sts_bhs->sense_info + sizeof(unsigned short); 1150 sense_len = be16_to_cpu(*slen); 1151 memcpy(task->sc->sense_buffer, sense, 1152 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); 1153 } 1154 1155 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) 1156 conn->rxdata_octets += resid; 1157 unmap: 1158 if (io_task->scsi_cmnd) { 1159 scsi_dma_unmap(io_task->scsi_cmnd); 1160 io_task->scsi_cmnd = NULL; 1161 } 1162 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); 1163 } 1164 1165 static void 1166 be_complete_logout(struct beiscsi_conn *beiscsi_conn, 1167 struct iscsi_task *task, 1168 struct common_sol_cqe *csol_cqe) 1169 { 1170 struct iscsi_logout_rsp *hdr; 1171 struct beiscsi_io_task *io_task = task->dd_data; 1172 struct iscsi_conn *conn = beiscsi_conn->conn; 1173 1174 hdr = (struct iscsi_logout_rsp *)task->hdr; 1175 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 1176 hdr->t2wait = 5; 1177 hdr->t2retain = 0; 1178 hdr->flags = csol_cqe->i_flags; 1179 hdr->response = csol_cqe->i_resp; 1180 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1181 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1182 csol_cqe->cmd_wnd - 1); 1183 1184 hdr->dlength[0] = 0; 1185 hdr->dlength[1] = 0; 1186 hdr->dlength[2] = 0; 1187 hdr->hlength = 0; 1188 hdr->itt = io_task->libiscsi_itt; 1189 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1190 } 1191 1192 static void 1193 be_complete_tmf(struct beiscsi_conn *beiscsi_conn, 1194 struct iscsi_task *task, 1195 struct common_sol_cqe *csol_cqe) 1196 { 1197 struct iscsi_tm_rsp *hdr; 1198 struct iscsi_conn *conn = beiscsi_conn->conn; 1199 struct beiscsi_io_task *io_task = task->dd_data; 1200 1201 hdr = (struct iscsi_tm_rsp *)task->hdr; 1202 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 1203 hdr->flags = csol_cqe->i_flags; 1204 hdr->response = csol_cqe->i_resp; 1205 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1206 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1207 csol_cqe->cmd_wnd - 1); 1208 1209 hdr->itt = io_task->libiscsi_itt; 1210 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1211 } 1212 1213 static void 1214 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, 1215 struct beiscsi_hba *phba, struct sol_cqe *psol) 1216 { 1217 struct hwi_wrb_context *pwrb_context; 1218 uint16_t wrb_index, cid, cri_index; 1219 struct hwi_controller *phwi_ctrlr; 1220 struct wrb_handle *pwrb_handle; 1221 struct iscsi_session *session; 1222 struct iscsi_task *task; 1223 1224 phwi_ctrlr = phba->phwi_ctrlr; 1225 if (is_chip_be2_be3r(phba)) { 1226 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1227 wrb_idx, psol); 1228 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1229 cid, psol); 1230 } else { 1231 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1232 wrb_idx, psol); 1233 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1234 cid, psol); 1235 } 1236 1237 cri_index = BE_GET_CRI_FROM_CID(cid); 1238 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1239 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; 1240 session = beiscsi_conn->conn->session; 1241 spin_lock_bh(&session->back_lock); 1242 task = pwrb_handle->pio_handle; 1243 if (task) 1244 __iscsi_put_task(task); 1245 spin_unlock_bh(&session->back_lock); 1246 } 1247 1248 static void 1249 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, 1250 struct iscsi_task *task, 1251 struct common_sol_cqe *csol_cqe) 1252 { 1253 struct iscsi_nopin *hdr; 1254 struct iscsi_conn *conn = beiscsi_conn->conn; 1255 struct beiscsi_io_task *io_task = task->dd_data; 1256 1257 hdr = (struct iscsi_nopin *)task->hdr; 1258 hdr->flags = csol_cqe->i_flags; 1259 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1260 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1261 csol_cqe->cmd_wnd - 1); 1262 1263 hdr->opcode = ISCSI_OP_NOOP_IN; 1264 hdr->itt = io_task->libiscsi_itt; 1265 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1266 } 1267 1268 static void adapter_get_sol_cqe(struct beiscsi_hba *phba, 1269 struct sol_cqe *psol, 1270 struct common_sol_cqe *csol_cqe) 1271 { 1272 if (is_chip_be2_be3r(phba)) { 1273 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, 1274 i_exp_cmd_sn, psol); 1275 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, 1276 i_res_cnt, psol); 1277 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, 1278 i_cmd_wnd, psol); 1279 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, 1280 wrb_index, psol); 1281 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, 1282 cid, psol); 1283 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1284 hw_sts, psol); 1285 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, 1286 i_resp, psol); 1287 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1288 i_sts, psol); 1289 csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, 1290 i_flags, psol); 1291 } else { 1292 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1293 i_exp_cmd_sn, psol); 1294 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1295 i_res_cnt, psol); 1296 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1297 wrb_index, psol); 1298 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1299 cid, psol); 1300 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1301 hw_sts, psol); 1302 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1303 i_cmd_wnd, psol); 1304 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1305 cmd_cmpl, psol)) 1306 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1307 i_sts, psol); 1308 else 1309 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1310 i_sts, psol); 1311 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1312 u, psol)) 1313 csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW; 1314 1315 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1316 o, psol)) 1317 csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; 1318 } 1319 } 1320 1321 1322 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, 1323 struct beiscsi_hba *phba, struct sol_cqe *psol) 1324 { 1325 struct iscsi_conn *conn = beiscsi_conn->conn; 1326 struct iscsi_session *session = conn->session; 1327 struct common_sol_cqe csol_cqe = {0}; 1328 struct hwi_wrb_context *pwrb_context; 1329 struct hwi_controller *phwi_ctrlr; 1330 struct wrb_handle *pwrb_handle; 1331 struct iscsi_task *task; 1332 uint16_t cri_index = 0; 1333 uint8_t type; 1334 1335 phwi_ctrlr = phba->phwi_ctrlr; 1336 1337 /* Copy the elements to a common structure */ 1338 adapter_get_sol_cqe(phba, psol, &csol_cqe); 1339 1340 cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid); 1341 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1342 1343 pwrb_handle = pwrb_context->pwrb_handle_basestd[ 1344 csol_cqe.wrb_index]; 1345 1346 spin_lock_bh(&session->back_lock); 1347 task = pwrb_handle->pio_handle; 1348 if (!task) { 1349 spin_unlock_bh(&session->back_lock); 1350 return; 1351 } 1352 type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; 1353 1354 switch (type) { 1355 case HWH_TYPE_IO: 1356 case HWH_TYPE_IO_RD: 1357 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == 1358 ISCSI_OP_NOOP_OUT) 1359 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1360 else 1361 be_complete_io(beiscsi_conn, task, &csol_cqe); 1362 break; 1363 1364 case HWH_TYPE_LOGOUT: 1365 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) 1366 be_complete_logout(beiscsi_conn, task, &csol_cqe); 1367 else 1368 be_complete_tmf(beiscsi_conn, task, &csol_cqe); 1369 break; 1370 1371 case HWH_TYPE_LOGIN: 1372 beiscsi_log(phba, KERN_ERR, 1373 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1374 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in" 1375 " %s- Solicited path\n", __func__); 1376 break; 1377 1378 case HWH_TYPE_NOP: 1379 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1380 break; 1381 1382 default: 1383 beiscsi_log(phba, KERN_WARNING, 1384 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1385 "BM_%d : In %s, unknown type = %d " 1386 "wrb_index 0x%x CID 0x%x\n", __func__, type, 1387 csol_cqe.wrb_index, 1388 csol_cqe.cid); 1389 break; 1390 } 1391 1392 spin_unlock_bh(&session->back_lock); 1393 } 1394 1395 /* 1396 * ASYNC PDUs include 1397 * a. Unsolicited NOP-In (target initiated NOP-In) 1398 * b. ASYNC Messages 1399 * c. Reject PDU 1400 * d. Login response 1401 * These headers arrive unprocessed by the EP firmware. 1402 * iSCSI layer processes them. 1403 */ 1404 static unsigned int 1405 beiscsi_complete_pdu(struct beiscsi_conn *beiscsi_conn, 1406 struct pdu_base *phdr, void *pdata, unsigned int dlen) 1407 { 1408 struct beiscsi_hba *phba = beiscsi_conn->phba; 1409 struct iscsi_conn *conn = beiscsi_conn->conn; 1410 struct beiscsi_io_task *io_task; 1411 struct iscsi_hdr *login_hdr; 1412 struct iscsi_task *task; 1413 u8 code; 1414 1415 code = AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr); 1416 switch (code) { 1417 case ISCSI_OP_NOOP_IN: 1418 pdata = NULL; 1419 dlen = 0; 1420 break; 1421 case ISCSI_OP_ASYNC_EVENT: 1422 break; 1423 case ISCSI_OP_REJECT: 1424 WARN_ON(!pdata); 1425 WARN_ON(!(dlen == 48)); 1426 beiscsi_log(phba, KERN_ERR, 1427 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1428 "BM_%d : In ISCSI_OP_REJECT\n"); 1429 break; 1430 case ISCSI_OP_LOGIN_RSP: 1431 case ISCSI_OP_TEXT_RSP: 1432 task = conn->login_task; 1433 io_task = task->dd_data; 1434 login_hdr = (struct iscsi_hdr *)phdr; 1435 login_hdr->itt = io_task->libiscsi_itt; 1436 break; 1437 default: 1438 beiscsi_log(phba, KERN_WARNING, 1439 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1440 "BM_%d : unrecognized async PDU opcode 0x%x\n", 1441 code); 1442 return 1; 1443 } 1444 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)phdr, pdata, dlen); 1445 return 0; 1446 } 1447 1448 static inline void 1449 beiscsi_hdl_put_handle(struct hd_async_context *pasync_ctx, 1450 struct hd_async_handle *pasync_handle) 1451 { 1452 pasync_handle->is_final = 0; 1453 pasync_handle->buffer_len = 0; 1454 pasync_handle->in_use = 0; 1455 list_del_init(&pasync_handle->link); 1456 } 1457 1458 static void 1459 beiscsi_hdl_purge_handles(struct beiscsi_hba *phba, 1460 struct hd_async_context *pasync_ctx, 1461 u16 cri) 1462 { 1463 struct hd_async_handle *pasync_handle, *tmp_handle; 1464 struct list_head *plist; 1465 1466 plist = &pasync_ctx->async_entry[cri].wq.list; 1467 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) 1468 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1469 1470 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list); 1471 pasync_ctx->async_entry[cri].wq.hdr_len = 0; 1472 pasync_ctx->async_entry[cri].wq.bytes_received = 0; 1473 pasync_ctx->async_entry[cri].wq.bytes_needed = 0; 1474 } 1475 1476 static struct hd_async_handle * 1477 beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn, 1478 struct hd_async_context *pasync_ctx, 1479 struct i_t_dpdu_cqe *pdpdu_cqe, 1480 u8 *header) 1481 { 1482 struct beiscsi_hba *phba = beiscsi_conn->phba; 1483 struct hd_async_handle *pasync_handle; 1484 struct be_bus_address phys_addr; 1485 u16 cid, code, ci, cri; 1486 u8 final, error = 0; 1487 u32 dpl; 1488 1489 cid = beiscsi_conn->beiscsi_conn_cid; 1490 cri = BE_GET_ASYNC_CRI_FROM_CID(cid); 1491 /** 1492 * This function is invoked to get the right async_handle structure 1493 * from a given DEF PDU CQ entry. 1494 * 1495 * - index in CQ entry gives the vertical index 1496 * - address in CQ entry is the offset where the DMA last ended 1497 * - final - no more notifications for this PDU 1498 */ 1499 if (is_chip_be2_be3r(phba)) { 1500 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1501 dpl, pdpdu_cqe); 1502 ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1503 index, pdpdu_cqe); 1504 final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1505 final, pdpdu_cqe); 1506 } else { 1507 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1508 dpl, pdpdu_cqe); 1509 ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1510 index, pdpdu_cqe); 1511 final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1512 final, pdpdu_cqe); 1513 } 1514 1515 /** 1516 * DB addr Hi/Lo is same for BE and SKH. 1517 * Subtract the dataplacementlength to get to the base. 1518 */ 1519 phys_addr.u.a32.address_lo = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1520 db_addr_lo, pdpdu_cqe); 1521 phys_addr.u.a32.address_lo -= dpl; 1522 phys_addr.u.a32.address_hi = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1523 db_addr_hi, pdpdu_cqe); 1524 1525 code = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, code, pdpdu_cqe); 1526 switch (code) { 1527 case UNSOL_HDR_NOTIFY: 1528 pasync_handle = pasync_ctx->async_entry[ci].header; 1529 *header = 1; 1530 break; 1531 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 1532 error = 1; 1533 fallthrough; 1534 case UNSOL_DATA_NOTIFY: 1535 pasync_handle = pasync_ctx->async_entry[ci].data; 1536 break; 1537 /* called only for above codes */ 1538 default: 1539 return NULL; 1540 } 1541 1542 if (pasync_handle->pa.u.a64.address != phys_addr.u.a64.address || 1543 pasync_handle->index != ci) { 1544 /* driver bug - if ci does not match async handle index */ 1545 error = 1; 1546 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1547 "BM_%d : cid %u async PDU handle mismatch - addr in %cQE %llx at %u:addr in CQE %llx ci %u\n", 1548 cid, pasync_handle->is_header ? 'H' : 'D', 1549 pasync_handle->pa.u.a64.address, 1550 pasync_handle->index, 1551 phys_addr.u.a64.address, ci); 1552 /* FW has stale address - attempt continuing by dropping */ 1553 } 1554 1555 /** 1556 * DEF PDU header and data buffers with errors should be simply 1557 * dropped as there are no consumers for it. 1558 */ 1559 if (error) { 1560 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1561 return NULL; 1562 } 1563 1564 if (pasync_handle->in_use || !list_empty(&pasync_handle->link)) { 1565 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1566 "BM_%d : cid %d async PDU handle in use - code %d ci %d addr %llx\n", 1567 cid, code, ci, phys_addr.u.a64.address); 1568 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1569 } 1570 1571 list_del_init(&pasync_handle->link); 1572 /** 1573 * Each CID is associated with unique CRI. 1574 * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different. 1575 **/ 1576 pasync_handle->cri = cri; 1577 pasync_handle->is_final = final; 1578 pasync_handle->buffer_len = dpl; 1579 pasync_handle->in_use = 1; 1580 1581 return pasync_handle; 1582 } 1583 1584 static unsigned int 1585 beiscsi_hdl_fwd_pdu(struct beiscsi_conn *beiscsi_conn, 1586 struct hd_async_context *pasync_ctx, 1587 u16 cri) 1588 { 1589 struct iscsi_session *session = beiscsi_conn->conn->session; 1590 struct hd_async_handle *pasync_handle, *plast_handle; 1591 struct beiscsi_hba *phba = beiscsi_conn->phba; 1592 void *phdr = NULL, *pdata = NULL; 1593 u32 dlen = 0, status = 0; 1594 struct list_head *plist; 1595 1596 plist = &pasync_ctx->async_entry[cri].wq.list; 1597 plast_handle = NULL; 1598 list_for_each_entry(pasync_handle, plist, link) { 1599 plast_handle = pasync_handle; 1600 /* get the header, the first entry */ 1601 if (!phdr) { 1602 phdr = pasync_handle->pbuffer; 1603 continue; 1604 } 1605 /* use first buffer to collect all the data */ 1606 if (!pdata) { 1607 pdata = pasync_handle->pbuffer; 1608 dlen = pasync_handle->buffer_len; 1609 continue; 1610 } 1611 if (!pasync_handle->buffer_len || 1612 (dlen + pasync_handle->buffer_len) > 1613 pasync_ctx->async_data.buffer_size) 1614 break; 1615 memcpy(pdata + dlen, pasync_handle->pbuffer, 1616 pasync_handle->buffer_len); 1617 dlen += pasync_handle->buffer_len; 1618 } 1619 1620 if (!plast_handle->is_final) { 1621 /* last handle should have final PDU notification from FW */ 1622 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1623 "BM_%d : cid %u %p fwd async PDU opcode %x with last handle missing - HL%u:DN%u:DR%u\n", 1624 beiscsi_conn->beiscsi_conn_cid, plast_handle, 1625 AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr), 1626 pasync_ctx->async_entry[cri].wq.hdr_len, 1627 pasync_ctx->async_entry[cri].wq.bytes_needed, 1628 pasync_ctx->async_entry[cri].wq.bytes_received); 1629 } 1630 spin_lock_bh(&session->back_lock); 1631 status = beiscsi_complete_pdu(beiscsi_conn, phdr, pdata, dlen); 1632 spin_unlock_bh(&session->back_lock); 1633 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1634 return status; 1635 } 1636 1637 static unsigned int 1638 beiscsi_hdl_gather_pdu(struct beiscsi_conn *beiscsi_conn, 1639 struct hd_async_context *pasync_ctx, 1640 struct hd_async_handle *pasync_handle) 1641 { 1642 unsigned int bytes_needed = 0, status = 0; 1643 u16 cri = pasync_handle->cri; 1644 struct cri_wait_queue *wq; 1645 struct beiscsi_hba *phba; 1646 struct pdu_base *ppdu; 1647 char *err = ""; 1648 1649 phba = beiscsi_conn->phba; 1650 wq = &pasync_ctx->async_entry[cri].wq; 1651 if (pasync_handle->is_header) { 1652 /* check if PDU hdr is rcv'd when old hdr not completed */ 1653 if (wq->hdr_len) { 1654 err = "incomplete"; 1655 goto drop_pdu; 1656 } 1657 ppdu = pasync_handle->pbuffer; 1658 bytes_needed = AMAP_GET_BITS(struct amap_pdu_base, 1659 data_len_hi, ppdu); 1660 bytes_needed <<= 16; 1661 bytes_needed |= be16_to_cpu(AMAP_GET_BITS(struct amap_pdu_base, 1662 data_len_lo, ppdu)); 1663 wq->hdr_len = pasync_handle->buffer_len; 1664 wq->bytes_received = 0; 1665 wq->bytes_needed = bytes_needed; 1666 list_add_tail(&pasync_handle->link, &wq->list); 1667 if (!bytes_needed) 1668 status = beiscsi_hdl_fwd_pdu(beiscsi_conn, 1669 pasync_ctx, cri); 1670 } else { 1671 /* check if data received has header and is needed */ 1672 if (!wq->hdr_len || !wq->bytes_needed) { 1673 err = "header less"; 1674 goto drop_pdu; 1675 } 1676 wq->bytes_received += pasync_handle->buffer_len; 1677 /* Something got overwritten? Better catch it here. */ 1678 if (wq->bytes_received > wq->bytes_needed) { 1679 err = "overflow"; 1680 goto drop_pdu; 1681 } 1682 list_add_tail(&pasync_handle->link, &wq->list); 1683 if (wq->bytes_received == wq->bytes_needed) 1684 status = beiscsi_hdl_fwd_pdu(beiscsi_conn, 1685 pasync_ctx, cri); 1686 } 1687 return status; 1688 1689 drop_pdu: 1690 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1691 "BM_%d : cid %u async PDU %s - def-%c:HL%u:DN%u:DR%u\n", 1692 beiscsi_conn->beiscsi_conn_cid, err, 1693 pasync_handle->is_header ? 'H' : 'D', 1694 wq->hdr_len, wq->bytes_needed, 1695 pasync_handle->buffer_len); 1696 /* discard this handle */ 1697 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1698 /* free all the other handles in cri_wait_queue */ 1699 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1700 /* try continuing */ 1701 return status; 1702 } 1703 1704 static void 1705 beiscsi_hdq_post_handles(struct beiscsi_hba *phba, 1706 u8 header, u8 ulp_num, u16 nbuf) 1707 { 1708 struct hd_async_handle *pasync_handle; 1709 struct hd_async_context *pasync_ctx; 1710 struct hwi_controller *phwi_ctrlr; 1711 struct phys_addr *pasync_sge; 1712 u32 ring_id, doorbell = 0; 1713 u32 doorbell_offset; 1714 u16 prod, pi; 1715 1716 phwi_ctrlr = phba->phwi_ctrlr; 1717 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1718 if (header) { 1719 pasync_sge = pasync_ctx->async_header.ring_base; 1720 pi = pasync_ctx->async_header.pi; 1721 ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id; 1722 doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num]. 1723 doorbell_offset; 1724 } else { 1725 pasync_sge = pasync_ctx->async_data.ring_base; 1726 pi = pasync_ctx->async_data.pi; 1727 ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id; 1728 doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num]. 1729 doorbell_offset; 1730 } 1731 1732 for (prod = 0; prod < nbuf; prod++) { 1733 if (header) 1734 pasync_handle = pasync_ctx->async_entry[pi].header; 1735 else 1736 pasync_handle = pasync_ctx->async_entry[pi].data; 1737 WARN_ON(pasync_handle->is_header != header); 1738 WARN_ON(pasync_handle->index != pi); 1739 /* setup the ring only once */ 1740 if (nbuf == pasync_ctx->num_entries) { 1741 /* note hi is lo */ 1742 pasync_sge[pi].hi = pasync_handle->pa.u.a32.address_lo; 1743 pasync_sge[pi].lo = pasync_handle->pa.u.a32.address_hi; 1744 } 1745 if (++pi == pasync_ctx->num_entries) 1746 pi = 0; 1747 } 1748 1749 if (header) 1750 pasync_ctx->async_header.pi = pi; 1751 else 1752 pasync_ctx->async_data.pi = pi; 1753 1754 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK; 1755 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT; 1756 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT; 1757 doorbell |= (prod & DB_DEF_PDU_CQPROC_MASK) << DB_DEF_PDU_CQPROC_SHIFT; 1758 iowrite32(doorbell, phba->db_va + doorbell_offset); 1759 } 1760 1761 static void 1762 beiscsi_hdq_process_compl(struct beiscsi_conn *beiscsi_conn, 1763 struct i_t_dpdu_cqe *pdpdu_cqe) 1764 { 1765 struct beiscsi_hba *phba = beiscsi_conn->phba; 1766 struct hd_async_handle *pasync_handle = NULL; 1767 struct hd_async_context *pasync_ctx; 1768 struct hwi_controller *phwi_ctrlr; 1769 u8 ulp_num, consumed, header = 0; 1770 u16 cid_cri; 1771 1772 phwi_ctrlr = phba->phwi_ctrlr; 1773 cid_cri = BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid); 1774 ulp_num = BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cid_cri); 1775 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1776 pasync_handle = beiscsi_hdl_get_handle(beiscsi_conn, pasync_ctx, 1777 pdpdu_cqe, &header); 1778 if (is_chip_be2_be3r(phba)) 1779 consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1780 num_cons, pdpdu_cqe); 1781 else 1782 consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1783 num_cons, pdpdu_cqe); 1784 if (pasync_handle) 1785 beiscsi_hdl_gather_pdu(beiscsi_conn, pasync_ctx, pasync_handle); 1786 /* num_cons indicates number of 8 RQEs consumed */ 1787 if (consumed) 1788 beiscsi_hdq_post_handles(phba, header, ulp_num, 8 * consumed); 1789 } 1790 1791 void beiscsi_process_mcc_cq(struct beiscsi_hba *phba) 1792 { 1793 struct be_queue_info *mcc_cq; 1794 struct be_mcc_compl *mcc_compl; 1795 unsigned int num_processed = 0; 1796 1797 mcc_cq = &phba->ctrl.mcc_obj.cq; 1798 mcc_compl = queue_tail_node(mcc_cq); 1799 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1800 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) { 1801 if (beiscsi_hba_in_error(phba)) 1802 return; 1803 1804 if (num_processed >= 32) { 1805 hwi_ring_cq_db(phba, mcc_cq->id, 1806 num_processed, 0); 1807 num_processed = 0; 1808 } 1809 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) { 1810 beiscsi_process_async_event(phba, mcc_compl); 1811 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { 1812 beiscsi_process_mcc_compl(&phba->ctrl, mcc_compl); 1813 } 1814 1815 mcc_compl->flags = 0; 1816 queue_tail_inc(mcc_cq); 1817 mcc_compl = queue_tail_node(mcc_cq); 1818 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1819 num_processed++; 1820 } 1821 1822 if (num_processed > 0) 1823 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1); 1824 } 1825 1826 static void beiscsi_mcc_work(struct work_struct *work) 1827 { 1828 struct be_eq_obj *pbe_eq; 1829 struct beiscsi_hba *phba; 1830 1831 pbe_eq = container_of(work, struct be_eq_obj, mcc_work); 1832 phba = pbe_eq->phba; 1833 beiscsi_process_mcc_cq(phba); 1834 /* rearm EQ for further interrupts */ 1835 if (!beiscsi_hba_in_error(phba)) 1836 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 1837 } 1838 1839 /** 1840 * beiscsi_process_cq()- Process the Completion Queue 1841 * @pbe_eq: Event Q on which the Completion has come 1842 * @budget: Max number of events to processed 1843 * 1844 * return 1845 * Number of Completion Entries processed. 1846 **/ 1847 unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget) 1848 { 1849 struct be_queue_info *cq; 1850 struct sol_cqe *sol; 1851 unsigned int total = 0; 1852 unsigned int num_processed = 0; 1853 unsigned short code = 0, cid = 0; 1854 uint16_t cri_index = 0; 1855 struct beiscsi_conn *beiscsi_conn; 1856 struct beiscsi_endpoint *beiscsi_ep; 1857 struct iscsi_endpoint *ep; 1858 struct beiscsi_hba *phba; 1859 1860 cq = pbe_eq->cq; 1861 sol = queue_tail_node(cq); 1862 phba = pbe_eq->phba; 1863 1864 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & 1865 CQE_VALID_MASK) { 1866 if (beiscsi_hba_in_error(phba)) 1867 return 0; 1868 1869 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); 1870 1871 code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 32] & 1872 CQE_CODE_MASK); 1873 1874 /* Get the CID */ 1875 if (is_chip_be2_be3r(phba)) { 1876 cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); 1877 } else { 1878 if ((code == DRIVERMSG_NOTIFY) || 1879 (code == UNSOL_HDR_NOTIFY) || 1880 (code == UNSOL_DATA_NOTIFY)) 1881 cid = AMAP_GET_BITS( 1882 struct amap_i_t_dpdu_cqe_v2, 1883 cid, sol); 1884 else 1885 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1886 cid, sol); 1887 } 1888 1889 cri_index = BE_GET_CRI_FROM_CID(cid); 1890 ep = phba->ep_array[cri_index]; 1891 1892 if (ep == NULL) { 1893 /* connection has already been freed 1894 * just move on to next one 1895 */ 1896 beiscsi_log(phba, KERN_WARNING, 1897 BEISCSI_LOG_INIT, 1898 "BM_%d : proc cqe of disconn ep: cid %d\n", 1899 cid); 1900 goto proc_next_cqe; 1901 } 1902 1903 beiscsi_ep = ep->dd_data; 1904 beiscsi_conn = beiscsi_ep->conn; 1905 1906 /* replenish cq */ 1907 if (num_processed == 32) { 1908 hwi_ring_cq_db(phba, cq->id, 32, 0); 1909 num_processed = 0; 1910 } 1911 total++; 1912 1913 switch (code) { 1914 case SOL_CMD_COMPLETE: 1915 hwi_complete_cmd(beiscsi_conn, phba, sol); 1916 break; 1917 case DRIVERMSG_NOTIFY: 1918 beiscsi_log(phba, KERN_INFO, 1919 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1920 "BM_%d : Received %s[%d] on CID : %d\n", 1921 cqe_desc[code], code, cid); 1922 1923 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); 1924 break; 1925 case UNSOL_HDR_NOTIFY: 1926 beiscsi_log(phba, KERN_INFO, 1927 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1928 "BM_%d : Received %s[%d] on CID : %d\n", 1929 cqe_desc[code], code, cid); 1930 1931 spin_lock_bh(&phba->async_pdu_lock); 1932 beiscsi_hdq_process_compl(beiscsi_conn, 1933 (struct i_t_dpdu_cqe *)sol); 1934 spin_unlock_bh(&phba->async_pdu_lock); 1935 break; 1936 case UNSOL_DATA_NOTIFY: 1937 beiscsi_log(phba, KERN_INFO, 1938 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1939 "BM_%d : Received %s[%d] on CID : %d\n", 1940 cqe_desc[code], code, cid); 1941 1942 spin_lock_bh(&phba->async_pdu_lock); 1943 beiscsi_hdq_process_compl(beiscsi_conn, 1944 (struct i_t_dpdu_cqe *)sol); 1945 spin_unlock_bh(&phba->async_pdu_lock); 1946 break; 1947 case CXN_INVALIDATE_INDEX_NOTIFY: 1948 case CMD_INVALIDATED_NOTIFY: 1949 case CXN_INVALIDATE_NOTIFY: 1950 beiscsi_log(phba, KERN_ERR, 1951 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1952 "BM_%d : Ignoring %s[%d] on CID : %d\n", 1953 cqe_desc[code], code, cid); 1954 break; 1955 case CXN_KILLED_HDR_DIGEST_ERR: 1956 case SOL_CMD_KILLED_DATA_DIGEST_ERR: 1957 beiscsi_log(phba, KERN_ERR, 1958 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1959 "BM_%d : Cmd Notification %s[%d] on CID : %d\n", 1960 cqe_desc[code], code, cid); 1961 break; 1962 case CMD_KILLED_INVALID_STATSN_RCVD: 1963 case CMD_KILLED_INVALID_R2T_RCVD: 1964 case CMD_CXN_KILLED_LUN_INVALID: 1965 case CMD_CXN_KILLED_ICD_INVALID: 1966 case CMD_CXN_KILLED_ITT_INVALID: 1967 case CMD_CXN_KILLED_SEQ_OUTOFORDER: 1968 case CMD_CXN_KILLED_INVALID_DATASN_RCVD: 1969 beiscsi_log(phba, KERN_ERR, 1970 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1971 "BM_%d : Cmd Notification %s[%d] on CID : %d\n", 1972 cqe_desc[code], code, cid); 1973 break; 1974 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 1975 beiscsi_log(phba, KERN_ERR, 1976 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1977 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n", 1978 cqe_desc[code], code, cid); 1979 spin_lock_bh(&phba->async_pdu_lock); 1980 /* driver consumes the entry and drops the contents */ 1981 beiscsi_hdq_process_compl(beiscsi_conn, 1982 (struct i_t_dpdu_cqe *)sol); 1983 spin_unlock_bh(&phba->async_pdu_lock); 1984 break; 1985 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: 1986 case CXN_KILLED_BURST_LEN_MISMATCH: 1987 case CXN_KILLED_AHS_RCVD: 1988 case CXN_KILLED_UNKNOWN_HDR: 1989 case CXN_KILLED_STALE_ITT_TTT_RCVD: 1990 case CXN_KILLED_INVALID_ITT_TTT_RCVD: 1991 case CXN_KILLED_TIMED_OUT: 1992 case CXN_KILLED_FIN_RCVD: 1993 case CXN_KILLED_RST_SENT: 1994 case CXN_KILLED_RST_RCVD: 1995 case CXN_KILLED_BAD_UNSOL_PDU_RCVD: 1996 case CXN_KILLED_BAD_WRB_INDEX_ERROR: 1997 case CXN_KILLED_OVER_RUN_RESIDUAL: 1998 case CXN_KILLED_UNDER_RUN_RESIDUAL: 1999 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: 2000 beiscsi_log(phba, KERN_ERR, 2001 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2002 "BM_%d : Event %s[%d] received on CID : %d\n", 2003 cqe_desc[code], code, cid); 2004 if (beiscsi_conn) 2005 iscsi_conn_failure(beiscsi_conn->conn, 2006 ISCSI_ERR_CONN_FAILED); 2007 break; 2008 default: 2009 beiscsi_log(phba, KERN_ERR, 2010 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2011 "BM_%d : Invalid CQE Event Received Code : %d CID 0x%x...\n", 2012 code, cid); 2013 break; 2014 } 2015 2016 proc_next_cqe: 2017 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0); 2018 queue_tail_inc(cq); 2019 sol = queue_tail_node(cq); 2020 num_processed++; 2021 if (total == budget) 2022 break; 2023 } 2024 2025 hwi_ring_cq_db(phba, cq->id, num_processed, 1); 2026 return total; 2027 } 2028 2029 static int be_iopoll(struct irq_poll *iop, int budget) 2030 { 2031 unsigned int ret, io_events; 2032 struct beiscsi_hba *phba; 2033 struct be_eq_obj *pbe_eq; 2034 struct be_eq_entry *eqe = NULL; 2035 struct be_queue_info *eq; 2036 2037 pbe_eq = container_of(iop, struct be_eq_obj, iopoll); 2038 phba = pbe_eq->phba; 2039 if (beiscsi_hba_in_error(phba)) { 2040 irq_poll_complete(iop); 2041 return 0; 2042 } 2043 2044 io_events = 0; 2045 eq = &pbe_eq->q; 2046 eqe = queue_tail_node(eq); 2047 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] & 2048 EQE_VALID_MASK) { 2049 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 2050 queue_tail_inc(eq); 2051 eqe = queue_tail_node(eq); 2052 io_events++; 2053 } 2054 hwi_ring_eq_db(phba, eq->id, 1, io_events, 0, 1); 2055 2056 ret = beiscsi_process_cq(pbe_eq, budget); 2057 pbe_eq->cq_count += ret; 2058 if (ret < budget) { 2059 irq_poll_complete(iop); 2060 beiscsi_log(phba, KERN_INFO, 2061 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2062 "BM_%d : rearm pbe_eq->q.id =%d ret %d\n", 2063 pbe_eq->q.id, ret); 2064 if (!beiscsi_hba_in_error(phba)) 2065 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 2066 } 2067 return ret; 2068 } 2069 2070 static void 2071 hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2072 unsigned int num_sg, struct beiscsi_io_task *io_task) 2073 { 2074 struct iscsi_sge *psgl; 2075 unsigned int sg_len, index; 2076 unsigned int sge_len = 0; 2077 unsigned long long addr; 2078 struct scatterlist *l_sg; 2079 unsigned int offset; 2080 2081 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb, 2082 io_task->bhs_pa.u.a32.address_lo); 2083 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb, 2084 io_task->bhs_pa.u.a32.address_hi); 2085 2086 l_sg = sg; 2087 for (index = 0; (index < num_sg) && (index < 2); index++, 2088 sg = sg_next(sg)) { 2089 if (index == 0) { 2090 sg_len = sg_dma_len(sg); 2091 addr = (u64) sg_dma_address(sg); 2092 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2093 sge0_addr_lo, pwrb, 2094 lower_32_bits(addr)); 2095 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2096 sge0_addr_hi, pwrb, 2097 upper_32_bits(addr)); 2098 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2099 sge0_len, pwrb, 2100 sg_len); 2101 sge_len = sg_len; 2102 } else { 2103 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset, 2104 pwrb, sge_len); 2105 sg_len = sg_dma_len(sg); 2106 addr = (u64) sg_dma_address(sg); 2107 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2108 sge1_addr_lo, pwrb, 2109 lower_32_bits(addr)); 2110 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2111 sge1_addr_hi, pwrb, 2112 upper_32_bits(addr)); 2113 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2114 sge1_len, pwrb, 2115 sg_len); 2116 } 2117 } 2118 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2119 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2120 2121 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2122 2123 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2124 io_task->bhs_pa.u.a32.address_hi); 2125 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2126 io_task->bhs_pa.u.a32.address_lo); 2127 2128 if (num_sg == 1) { 2129 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2130 1); 2131 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2132 0); 2133 } else if (num_sg == 2) { 2134 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2135 0); 2136 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2137 1); 2138 } else { 2139 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2140 0); 2141 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2142 0); 2143 } 2144 2145 sg = l_sg; 2146 psgl++; 2147 psgl++; 2148 offset = 0; 2149 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2150 sg_len = sg_dma_len(sg); 2151 addr = (u64) sg_dma_address(sg); 2152 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2153 lower_32_bits(addr)); 2154 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2155 upper_32_bits(addr)); 2156 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2157 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2158 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2159 offset += sg_len; 2160 } 2161 psgl--; 2162 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2163 } 2164 2165 static void 2166 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2167 unsigned int num_sg, struct beiscsi_io_task *io_task) 2168 { 2169 struct iscsi_sge *psgl; 2170 unsigned int sg_len, index; 2171 unsigned int sge_len = 0; 2172 unsigned long long addr; 2173 struct scatterlist *l_sg; 2174 unsigned int offset; 2175 2176 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2177 io_task->bhs_pa.u.a32.address_lo); 2178 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2179 io_task->bhs_pa.u.a32.address_hi); 2180 2181 l_sg = sg; 2182 for (index = 0; (index < num_sg) && (index < 2); index++, 2183 sg = sg_next(sg)) { 2184 if (index == 0) { 2185 sg_len = sg_dma_len(sg); 2186 addr = (u64) sg_dma_address(sg); 2187 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2188 ((u32)(addr & 0xFFFFFFFF))); 2189 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2190 ((u32)(addr >> 32))); 2191 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2192 sg_len); 2193 sge_len = sg_len; 2194 } else { 2195 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset, 2196 pwrb, sge_len); 2197 sg_len = sg_dma_len(sg); 2198 addr = (u64) sg_dma_address(sg); 2199 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb, 2200 ((u32)(addr & 0xFFFFFFFF))); 2201 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb, 2202 ((u32)(addr >> 32))); 2203 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb, 2204 sg_len); 2205 } 2206 } 2207 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2208 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2209 2210 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2211 2212 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2213 io_task->bhs_pa.u.a32.address_hi); 2214 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2215 io_task->bhs_pa.u.a32.address_lo); 2216 2217 if (num_sg == 1) { 2218 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2219 1); 2220 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2221 0); 2222 } else if (num_sg == 2) { 2223 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2224 0); 2225 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2226 1); 2227 } else { 2228 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2229 0); 2230 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2231 0); 2232 } 2233 sg = l_sg; 2234 psgl++; 2235 psgl++; 2236 offset = 0; 2237 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2238 sg_len = sg_dma_len(sg); 2239 addr = (u64) sg_dma_address(sg); 2240 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2241 (addr & 0xFFFFFFFF)); 2242 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2243 (addr >> 32)); 2244 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2245 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2246 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2247 offset += sg_len; 2248 } 2249 psgl--; 2250 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2251 } 2252 2253 /** 2254 * hwi_write_buffer()- Populate the WRB with task info 2255 * @pwrb: ptr to the WRB entry 2256 * @task: iscsi task which is to be executed 2257 **/ 2258 static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) 2259 { 2260 struct iscsi_sge *psgl; 2261 struct beiscsi_io_task *io_task = task->dd_data; 2262 struct beiscsi_conn *beiscsi_conn = io_task->conn; 2263 struct beiscsi_hba *phba = beiscsi_conn->phba; 2264 uint8_t dsp_value = 0; 2265 2266 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2; 2267 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2268 io_task->bhs_pa.u.a32.address_lo); 2269 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2270 io_task->bhs_pa.u.a32.address_hi); 2271 2272 if (task->data) { 2273 2274 /* Check for the data_count */ 2275 dsp_value = (task->data_count) ? 1 : 0; 2276 2277 if (is_chip_be2_be3r(phba)) 2278 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, 2279 pwrb, dsp_value); 2280 else 2281 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, 2282 pwrb, dsp_value); 2283 2284 /* Map addr only if there is data_count */ 2285 if (dsp_value) { 2286 io_task->mtask_addr = dma_map_single(&phba->pcidev->dev, 2287 task->data, 2288 task->data_count, 2289 DMA_TO_DEVICE); 2290 if (dma_mapping_error(&phba->pcidev->dev, 2291 io_task->mtask_addr)) 2292 return -ENOMEM; 2293 io_task->mtask_data_count = task->data_count; 2294 } else 2295 io_task->mtask_addr = 0; 2296 2297 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2298 lower_32_bits(io_task->mtask_addr)); 2299 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2300 upper_32_bits(io_task->mtask_addr)); 2301 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2302 task->data_count); 2303 2304 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1); 2305 } else { 2306 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 2307 io_task->mtask_addr = 0; 2308 } 2309 2310 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2311 2312 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len); 2313 2314 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2315 io_task->bhs_pa.u.a32.address_hi); 2316 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2317 io_task->bhs_pa.u.a32.address_lo); 2318 if (task->data) { 2319 psgl++; 2320 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0); 2321 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0); 2322 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0); 2323 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0); 2324 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0); 2325 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2326 2327 psgl++; 2328 if (task->data) { 2329 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2330 lower_32_bits(io_task->mtask_addr)); 2331 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2332 upper_32_bits(io_task->mtask_addr)); 2333 } 2334 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106); 2335 } 2336 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2337 return 0; 2338 } 2339 2340 /** 2341 * beiscsi_find_mem_req()- Find mem needed 2342 * @phba: ptr to HBA struct 2343 **/ 2344 static void beiscsi_find_mem_req(struct beiscsi_hba *phba) 2345 { 2346 uint8_t mem_descr_index, ulp_num; 2347 unsigned int num_async_pdu_buf_pages; 2348 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; 2349 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; 2350 2351 phba->params.hwi_ws_sz = sizeof(struct hwi_controller); 2352 2353 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 * 2354 BE_ISCSI_PDU_HEADER_SIZE; 2355 phba->mem_req[HWI_MEM_ADDN_CONTEXT] = 2356 sizeof(struct hwi_context_memory); 2357 2358 2359 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb) 2360 * (phba->params.wrbs_per_cxn) 2361 * phba->params.cxns_per_ctrl; 2362 wrb_sz_per_cxn = sizeof(struct wrb_handle) * 2363 (phba->params.wrbs_per_cxn); 2364 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) * 2365 phba->params.cxns_per_ctrl); 2366 2367 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) * 2368 phba->params.icds_per_ctrl; 2369 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) * 2370 phba->params.num_sge_per_io * phba->params.icds_per_ctrl; 2371 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2372 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2373 2374 num_async_pdu_buf_sgl_pages = 2375 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2376 phba, ulp_num) * 2377 sizeof(struct phys_addr)); 2378 2379 num_async_pdu_buf_pages = 2380 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2381 phba, ulp_num) * 2382 phba->params.defpdu_hdr_sz); 2383 2384 num_async_pdu_data_pages = 2385 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2386 phba, ulp_num) * 2387 phba->params.defpdu_data_sz); 2388 2389 num_async_pdu_data_sgl_pages = 2390 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2391 phba, ulp_num) * 2392 sizeof(struct phys_addr)); 2393 2394 mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 + 2395 (ulp_num * MEM_DESCR_OFFSET)); 2396 phba->mem_req[mem_descr_index] = 2397 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2398 BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE; 2399 2400 mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2401 (ulp_num * MEM_DESCR_OFFSET)); 2402 phba->mem_req[mem_descr_index] = 2403 num_async_pdu_buf_pages * 2404 PAGE_SIZE; 2405 2406 mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2407 (ulp_num * MEM_DESCR_OFFSET)); 2408 phba->mem_req[mem_descr_index] = 2409 num_async_pdu_data_pages * 2410 PAGE_SIZE; 2411 2412 mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2413 (ulp_num * MEM_DESCR_OFFSET)); 2414 phba->mem_req[mem_descr_index] = 2415 num_async_pdu_buf_sgl_pages * 2416 PAGE_SIZE; 2417 2418 mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 + 2419 (ulp_num * MEM_DESCR_OFFSET)); 2420 phba->mem_req[mem_descr_index] = 2421 num_async_pdu_data_sgl_pages * 2422 PAGE_SIZE; 2423 2424 mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2425 (ulp_num * MEM_DESCR_OFFSET)); 2426 phba->mem_req[mem_descr_index] = 2427 BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * 2428 sizeof(struct hd_async_handle); 2429 2430 mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2431 (ulp_num * MEM_DESCR_OFFSET)); 2432 phba->mem_req[mem_descr_index] = 2433 BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * 2434 sizeof(struct hd_async_handle); 2435 2436 mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2437 (ulp_num * MEM_DESCR_OFFSET)); 2438 phba->mem_req[mem_descr_index] = 2439 sizeof(struct hd_async_context) + 2440 (BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * 2441 sizeof(struct hd_async_entry)); 2442 } 2443 } 2444 } 2445 2446 static int beiscsi_alloc_mem(struct beiscsi_hba *phba) 2447 { 2448 dma_addr_t bus_add; 2449 struct hwi_controller *phwi_ctrlr; 2450 struct be_mem_descriptor *mem_descr; 2451 struct mem_array *mem_arr, *mem_arr_orig; 2452 unsigned int i, j, alloc_size, curr_alloc_size; 2453 2454 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL); 2455 if (!phba->phwi_ctrlr) 2456 return -ENOMEM; 2457 2458 /* Allocate memory for wrb_context */ 2459 phwi_ctrlr = phba->phwi_ctrlr; 2460 phwi_ctrlr->wrb_context = kcalloc(phba->params.cxns_per_ctrl, 2461 sizeof(struct hwi_wrb_context), 2462 GFP_KERNEL); 2463 if (!phwi_ctrlr->wrb_context) { 2464 kfree(phba->phwi_ctrlr); 2465 return -ENOMEM; 2466 } 2467 2468 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), 2469 GFP_KERNEL); 2470 if (!phba->init_mem) { 2471 kfree(phwi_ctrlr->wrb_context); 2472 kfree(phba->phwi_ctrlr); 2473 return -ENOMEM; 2474 } 2475 2476 mem_arr_orig = kmalloc_array(BEISCSI_MAX_FRAGS_INIT, 2477 sizeof(*mem_arr_orig), 2478 GFP_KERNEL); 2479 if (!mem_arr_orig) { 2480 kfree(phba->init_mem); 2481 kfree(phwi_ctrlr->wrb_context); 2482 kfree(phba->phwi_ctrlr); 2483 return -ENOMEM; 2484 } 2485 2486 mem_descr = phba->init_mem; 2487 for (i = 0; i < SE_MEM_MAX; i++) { 2488 if (!phba->mem_req[i]) { 2489 mem_descr->mem_array = NULL; 2490 mem_descr++; 2491 continue; 2492 } 2493 2494 j = 0; 2495 mem_arr = mem_arr_orig; 2496 alloc_size = phba->mem_req[i]; 2497 memset(mem_arr, 0, sizeof(struct mem_array) * 2498 BEISCSI_MAX_FRAGS_INIT); 2499 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size); 2500 do { 2501 mem_arr->virtual_address = 2502 dma_alloc_coherent(&phba->pcidev->dev, 2503 curr_alloc_size, &bus_add, GFP_KERNEL); 2504 if (!mem_arr->virtual_address) { 2505 if (curr_alloc_size <= BE_MIN_MEM_SIZE) 2506 goto free_mem; 2507 if (curr_alloc_size - 2508 rounddown_pow_of_two(curr_alloc_size)) 2509 curr_alloc_size = rounddown_pow_of_two 2510 (curr_alloc_size); 2511 else 2512 curr_alloc_size = curr_alloc_size / 2; 2513 } else { 2514 mem_arr->bus_address.u. 2515 a64.address = (__u64) bus_add; 2516 mem_arr->size = curr_alloc_size; 2517 alloc_size -= curr_alloc_size; 2518 curr_alloc_size = min(be_max_phys_size * 2519 1024, alloc_size); 2520 j++; 2521 mem_arr++; 2522 } 2523 } while (alloc_size); 2524 mem_descr->num_elements = j; 2525 mem_descr->size_in_bytes = phba->mem_req[i]; 2526 mem_descr->mem_array = kmalloc_array(j, sizeof(*mem_arr), 2527 GFP_KERNEL); 2528 if (!mem_descr->mem_array) 2529 goto free_mem; 2530 2531 memcpy(mem_descr->mem_array, mem_arr_orig, 2532 sizeof(struct mem_array) * j); 2533 mem_descr++; 2534 } 2535 kfree(mem_arr_orig); 2536 return 0; 2537 free_mem: 2538 mem_descr->num_elements = j; 2539 while ((i) || (j)) { 2540 for (j = mem_descr->num_elements; j > 0; j--) { 2541 dma_free_coherent(&phba->pcidev->dev, 2542 mem_descr->mem_array[j - 1].size, 2543 mem_descr->mem_array[j - 1]. 2544 virtual_address, 2545 (unsigned long)mem_descr-> 2546 mem_array[j - 1]. 2547 bus_address.u.a64.address); 2548 } 2549 if (i) { 2550 i--; 2551 kfree(mem_descr->mem_array); 2552 mem_descr--; 2553 } 2554 } 2555 kfree(mem_arr_orig); 2556 kfree(phba->init_mem); 2557 kfree(phba->phwi_ctrlr->wrb_context); 2558 kfree(phba->phwi_ctrlr); 2559 return -ENOMEM; 2560 } 2561 2562 static int beiscsi_get_memory(struct beiscsi_hba *phba) 2563 { 2564 beiscsi_find_mem_req(phba); 2565 return beiscsi_alloc_mem(phba); 2566 } 2567 2568 static void iscsi_init_global_templates(struct beiscsi_hba *phba) 2569 { 2570 struct pdu_data_out *pdata_out; 2571 struct pdu_nop_out *pnop_out; 2572 struct be_mem_descriptor *mem_descr; 2573 2574 mem_descr = phba->init_mem; 2575 mem_descr += ISCSI_MEM_GLOBAL_HEADER; 2576 pdata_out = 2577 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address; 2578 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2579 2580 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out, 2581 IIOC_SCSI_DATA); 2582 2583 pnop_out = 2584 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0]. 2585 virtual_address + BE_ISCSI_PDU_HEADER_SIZE); 2586 2587 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2588 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF); 2589 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1); 2590 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0); 2591 } 2592 2593 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) 2594 { 2595 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; 2596 struct hwi_context_memory *phwi_ctxt; 2597 struct wrb_handle *pwrb_handle = NULL; 2598 struct hwi_controller *phwi_ctrlr; 2599 struct hwi_wrb_context *pwrb_context; 2600 struct iscsi_wrb *pwrb = NULL; 2601 unsigned int num_cxn_wrbh = 0; 2602 unsigned int num_cxn_wrb = 0, j, idx = 0, index; 2603 2604 mem_descr_wrbh = phba->init_mem; 2605 mem_descr_wrbh += HWI_MEM_WRBH; 2606 2607 mem_descr_wrb = phba->init_mem; 2608 mem_descr_wrb += HWI_MEM_WRB; 2609 phwi_ctrlr = phba->phwi_ctrlr; 2610 2611 /* Allocate memory for WRBQ */ 2612 phwi_ctxt = phwi_ctrlr->phwi_ctxt; 2613 phwi_ctxt->be_wrbq = kcalloc(phba->params.cxns_per_ctrl, 2614 sizeof(struct be_queue_info), 2615 GFP_KERNEL); 2616 if (!phwi_ctxt->be_wrbq) { 2617 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2618 "BM_%d : WRBQ Mem Alloc Failed\n"); 2619 return -ENOMEM; 2620 } 2621 2622 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2623 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2624 pwrb_context->pwrb_handle_base = 2625 kcalloc(phba->params.wrbs_per_cxn, 2626 sizeof(struct wrb_handle *), 2627 GFP_KERNEL); 2628 if (!pwrb_context->pwrb_handle_base) { 2629 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2630 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2631 goto init_wrb_hndl_failed; 2632 } 2633 pwrb_context->pwrb_handle_basestd = 2634 kcalloc(phba->params.wrbs_per_cxn, 2635 sizeof(struct wrb_handle *), 2636 GFP_KERNEL); 2637 if (!pwrb_context->pwrb_handle_basestd) { 2638 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2639 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2640 goto init_wrb_hndl_failed; 2641 } 2642 if (!num_cxn_wrbh) { 2643 pwrb_handle = 2644 mem_descr_wrbh->mem_array[idx].virtual_address; 2645 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) / 2646 ((sizeof(struct wrb_handle)) * 2647 phba->params.wrbs_per_cxn)); 2648 idx++; 2649 } 2650 pwrb_context->alloc_index = 0; 2651 pwrb_context->wrb_handles_available = 0; 2652 pwrb_context->free_index = 0; 2653 2654 if (num_cxn_wrbh) { 2655 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2656 pwrb_context->pwrb_handle_base[j] = pwrb_handle; 2657 pwrb_context->pwrb_handle_basestd[j] = 2658 pwrb_handle; 2659 pwrb_context->wrb_handles_available++; 2660 pwrb_handle->wrb_index = j; 2661 pwrb_handle++; 2662 } 2663 num_cxn_wrbh--; 2664 } 2665 spin_lock_init(&pwrb_context->wrb_lock); 2666 } 2667 idx = 0; 2668 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2669 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2670 if (!num_cxn_wrb) { 2671 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2672 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / 2673 ((sizeof(struct iscsi_wrb) * 2674 phba->params.wrbs_per_cxn)); 2675 idx++; 2676 } 2677 2678 if (num_cxn_wrb) { 2679 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2680 pwrb_handle = pwrb_context->pwrb_handle_base[j]; 2681 pwrb_handle->pwrb = pwrb; 2682 pwrb++; 2683 } 2684 num_cxn_wrb--; 2685 } 2686 } 2687 return 0; 2688 init_wrb_hndl_failed: 2689 for (j = index; j > 0; j--) { 2690 pwrb_context = &phwi_ctrlr->wrb_context[j]; 2691 kfree(pwrb_context->pwrb_handle_base); 2692 kfree(pwrb_context->pwrb_handle_basestd); 2693 } 2694 return -ENOMEM; 2695 } 2696 2697 static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) 2698 { 2699 uint8_t ulp_num; 2700 struct hwi_controller *phwi_ctrlr; 2701 struct hba_parameters *p = &phba->params; 2702 struct hd_async_context *pasync_ctx; 2703 struct hd_async_handle *pasync_header_h, *pasync_data_h; 2704 unsigned int index, idx, num_per_mem, num_async_data; 2705 struct be_mem_descriptor *mem_descr; 2706 2707 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2708 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2709 /* get async_ctx for each ULP */ 2710 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2711 mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2712 (ulp_num * MEM_DESCR_OFFSET)); 2713 2714 phwi_ctrlr = phba->phwi_ctrlr; 2715 phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] = 2716 (struct hd_async_context *) 2717 mem_descr->mem_array[0].virtual_address; 2718 2719 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; 2720 memset(pasync_ctx, 0, sizeof(*pasync_ctx)); 2721 2722 pasync_ctx->async_entry = 2723 (struct hd_async_entry *) 2724 ((long unsigned int)pasync_ctx + 2725 sizeof(struct hd_async_context)); 2726 2727 pasync_ctx->num_entries = BEISCSI_ASYNC_HDQ_SIZE(phba, 2728 ulp_num); 2729 /* setup header buffers */ 2730 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2731 mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2732 (ulp_num * MEM_DESCR_OFFSET); 2733 if (mem_descr->mem_array[0].virtual_address) { 2734 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2735 "BM_%d : hwi_init_async_pdu_ctx" 2736 " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n", 2737 ulp_num, 2738 mem_descr->mem_array[0]. 2739 virtual_address); 2740 } else 2741 beiscsi_log(phba, KERN_WARNING, 2742 BEISCSI_LOG_INIT, 2743 "BM_%d : No Virtual address for ULP : %d\n", 2744 ulp_num); 2745 2746 pasync_ctx->async_header.pi = 0; 2747 pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz; 2748 pasync_ctx->async_header.va_base = 2749 mem_descr->mem_array[0].virtual_address; 2750 2751 pasync_ctx->async_header.pa_base.u.a64.address = 2752 mem_descr->mem_array[0]. 2753 bus_address.u.a64.address; 2754 2755 /* setup header buffer sgls */ 2756 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2757 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2758 (ulp_num * MEM_DESCR_OFFSET); 2759 if (mem_descr->mem_array[0].virtual_address) { 2760 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2761 "BM_%d : hwi_init_async_pdu_ctx" 2762 " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n", 2763 ulp_num, 2764 mem_descr->mem_array[0]. 2765 virtual_address); 2766 } else 2767 beiscsi_log(phba, KERN_WARNING, 2768 BEISCSI_LOG_INIT, 2769 "BM_%d : No Virtual address for ULP : %d\n", 2770 ulp_num); 2771 2772 pasync_ctx->async_header.ring_base = 2773 mem_descr->mem_array[0].virtual_address; 2774 2775 /* setup header buffer handles */ 2776 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2777 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2778 (ulp_num * MEM_DESCR_OFFSET); 2779 if (mem_descr->mem_array[0].virtual_address) { 2780 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2781 "BM_%d : hwi_init_async_pdu_ctx" 2782 " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n", 2783 ulp_num, 2784 mem_descr->mem_array[0]. 2785 virtual_address); 2786 } else 2787 beiscsi_log(phba, KERN_WARNING, 2788 BEISCSI_LOG_INIT, 2789 "BM_%d : No Virtual address for ULP : %d\n", 2790 ulp_num); 2791 2792 pasync_ctx->async_header.handle_base = 2793 mem_descr->mem_array[0].virtual_address; 2794 2795 /* setup data buffer sgls */ 2796 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2797 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 2798 (ulp_num * MEM_DESCR_OFFSET); 2799 if (mem_descr->mem_array[0].virtual_address) { 2800 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2801 "BM_%d : hwi_init_async_pdu_ctx" 2802 " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n", 2803 ulp_num, 2804 mem_descr->mem_array[0]. 2805 virtual_address); 2806 } else 2807 beiscsi_log(phba, KERN_WARNING, 2808 BEISCSI_LOG_INIT, 2809 "BM_%d : No Virtual address for ULP : %d\n", 2810 ulp_num); 2811 2812 pasync_ctx->async_data.ring_base = 2813 mem_descr->mem_array[0].virtual_address; 2814 2815 /* setup data buffer handles */ 2816 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2817 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2818 (ulp_num * MEM_DESCR_OFFSET); 2819 if (!mem_descr->mem_array[0].virtual_address) 2820 beiscsi_log(phba, KERN_WARNING, 2821 BEISCSI_LOG_INIT, 2822 "BM_%d : No Virtual address for ULP : %d\n", 2823 ulp_num); 2824 2825 pasync_ctx->async_data.handle_base = 2826 mem_descr->mem_array[0].virtual_address; 2827 2828 pasync_header_h = 2829 (struct hd_async_handle *) 2830 pasync_ctx->async_header.handle_base; 2831 pasync_data_h = 2832 (struct hd_async_handle *) 2833 pasync_ctx->async_data.handle_base; 2834 2835 /* setup data buffers */ 2836 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2837 mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2838 (ulp_num * MEM_DESCR_OFFSET); 2839 if (mem_descr->mem_array[0].virtual_address) { 2840 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2841 "BM_%d : hwi_init_async_pdu_ctx" 2842 " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n", 2843 ulp_num, 2844 mem_descr->mem_array[0]. 2845 virtual_address); 2846 } else 2847 beiscsi_log(phba, KERN_WARNING, 2848 BEISCSI_LOG_INIT, 2849 "BM_%d : No Virtual address for ULP : %d\n", 2850 ulp_num); 2851 2852 idx = 0; 2853 pasync_ctx->async_data.pi = 0; 2854 pasync_ctx->async_data.buffer_size = p->defpdu_data_sz; 2855 pasync_ctx->async_data.va_base = 2856 mem_descr->mem_array[idx].virtual_address; 2857 pasync_ctx->async_data.pa_base.u.a64.address = 2858 mem_descr->mem_array[idx]. 2859 bus_address.u.a64.address; 2860 2861 num_async_data = ((mem_descr->mem_array[idx].size) / 2862 phba->params.defpdu_data_sz); 2863 num_per_mem = 0; 2864 2865 for (index = 0; index < BEISCSI_ASYNC_HDQ_SIZE 2866 (phba, ulp_num); index++) { 2867 pasync_header_h->cri = -1; 2868 pasync_header_h->is_header = 1; 2869 pasync_header_h->index = index; 2870 INIT_LIST_HEAD(&pasync_header_h->link); 2871 pasync_header_h->pbuffer = 2872 (void *)((unsigned long) 2873 (pasync_ctx-> 2874 async_header.va_base) + 2875 (p->defpdu_hdr_sz * index)); 2876 2877 pasync_header_h->pa.u.a64.address = 2878 pasync_ctx->async_header.pa_base.u.a64. 2879 address + (p->defpdu_hdr_sz * index); 2880 2881 pasync_ctx->async_entry[index].header = 2882 pasync_header_h; 2883 pasync_header_h++; 2884 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 2885 wq.list); 2886 2887 pasync_data_h->cri = -1; 2888 pasync_data_h->is_header = 0; 2889 pasync_data_h->index = index; 2890 INIT_LIST_HEAD(&pasync_data_h->link); 2891 2892 if (!num_async_data) { 2893 num_per_mem = 0; 2894 idx++; 2895 pasync_ctx->async_data.va_base = 2896 mem_descr->mem_array[idx]. 2897 virtual_address; 2898 pasync_ctx->async_data.pa_base.u. 2899 a64.address = 2900 mem_descr->mem_array[idx]. 2901 bus_address.u.a64.address; 2902 num_async_data = 2903 ((mem_descr->mem_array[idx]. 2904 size) / 2905 phba->params.defpdu_data_sz); 2906 } 2907 pasync_data_h->pbuffer = 2908 (void *)((unsigned long) 2909 (pasync_ctx->async_data.va_base) + 2910 (p->defpdu_data_sz * num_per_mem)); 2911 2912 pasync_data_h->pa.u.a64.address = 2913 pasync_ctx->async_data.pa_base.u.a64. 2914 address + (p->defpdu_data_sz * 2915 num_per_mem); 2916 num_per_mem++; 2917 num_async_data--; 2918 2919 pasync_ctx->async_entry[index].data = 2920 pasync_data_h; 2921 pasync_data_h++; 2922 } 2923 } 2924 } 2925 2926 return 0; 2927 } 2928 2929 static int 2930 be_sgl_create_contiguous(void *virtual_address, 2931 u64 physical_address, u32 length, 2932 struct be_dma_mem *sgl) 2933 { 2934 WARN_ON(!virtual_address); 2935 WARN_ON(!physical_address); 2936 WARN_ON(!length); 2937 WARN_ON(!sgl); 2938 2939 sgl->va = virtual_address; 2940 sgl->dma = (unsigned long)physical_address; 2941 sgl->size = length; 2942 2943 return 0; 2944 } 2945 2946 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl) 2947 { 2948 memset(sgl, 0, sizeof(*sgl)); 2949 } 2950 2951 static void 2952 hwi_build_be_sgl_arr(struct beiscsi_hba *phba, 2953 struct mem_array *pmem, struct be_dma_mem *sgl) 2954 { 2955 if (sgl->va) 2956 be_sgl_destroy_contiguous(sgl); 2957 2958 be_sgl_create_contiguous(pmem->virtual_address, 2959 pmem->bus_address.u.a64.address, 2960 pmem->size, sgl); 2961 } 2962 2963 static void 2964 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba, 2965 struct mem_array *pmem, struct be_dma_mem *sgl) 2966 { 2967 if (sgl->va) 2968 be_sgl_destroy_contiguous(sgl); 2969 2970 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address, 2971 pmem->bus_address.u.a64.address, 2972 pmem->size, sgl); 2973 } 2974 2975 static int be_fill_queue(struct be_queue_info *q, 2976 u16 len, u16 entry_size, void *vaddress) 2977 { 2978 struct be_dma_mem *mem = &q->dma_mem; 2979 2980 memset(q, 0, sizeof(*q)); 2981 q->len = len; 2982 q->entry_size = entry_size; 2983 mem->size = len * entry_size; 2984 mem->va = vaddress; 2985 if (!mem->va) 2986 return -ENOMEM; 2987 memset(mem->va, 0, mem->size); 2988 return 0; 2989 } 2990 2991 static int beiscsi_create_eqs(struct beiscsi_hba *phba, 2992 struct hwi_context_memory *phwi_context) 2993 { 2994 int ret = -ENOMEM, eq_for_mcc; 2995 unsigned int i, num_eq_pages; 2996 struct be_queue_info *eq; 2997 struct be_dma_mem *mem; 2998 void *eq_vaddress; 2999 dma_addr_t paddr; 3000 3001 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * 3002 sizeof(struct be_eq_entry)); 3003 3004 if (phba->pcidev->msix_enabled) 3005 eq_for_mcc = 1; 3006 else 3007 eq_for_mcc = 0; 3008 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3009 eq = &phwi_context->be_eq[i].q; 3010 mem = &eq->dma_mem; 3011 phwi_context->be_eq[i].phba = phba; 3012 eq_vaddress = dma_alloc_coherent(&phba->pcidev->dev, 3013 num_eq_pages * PAGE_SIZE, 3014 &paddr, GFP_KERNEL); 3015 if (!eq_vaddress) { 3016 ret = -ENOMEM; 3017 goto create_eq_error; 3018 } 3019 3020 mem->va = eq_vaddress; 3021 ret = be_fill_queue(eq, phba->params.num_eq_entries, 3022 sizeof(struct be_eq_entry), eq_vaddress); 3023 if (ret) { 3024 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3025 "BM_%d : be_fill_queue Failed for EQ\n"); 3026 goto create_eq_error; 3027 } 3028 3029 mem->dma = paddr; 3030 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, 3031 BEISCSI_EQ_DELAY_DEF); 3032 if (ret) { 3033 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3034 "BM_%d : beiscsi_cmd_eq_create Failed for EQ\n"); 3035 goto create_eq_error; 3036 } 3037 3038 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3039 "BM_%d : eqid = %d\n", 3040 phwi_context->be_eq[i].q.id); 3041 } 3042 return 0; 3043 3044 create_eq_error: 3045 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3046 eq = &phwi_context->be_eq[i].q; 3047 mem = &eq->dma_mem; 3048 if (mem->va) 3049 dma_free_coherent(&phba->pcidev->dev, num_eq_pages 3050 * PAGE_SIZE, 3051 mem->va, mem->dma); 3052 } 3053 return ret; 3054 } 3055 3056 static int beiscsi_create_cqs(struct beiscsi_hba *phba, 3057 struct hwi_context_memory *phwi_context) 3058 { 3059 unsigned int i, num_cq_pages; 3060 struct be_queue_info *cq, *eq; 3061 struct be_dma_mem *mem; 3062 struct be_eq_obj *pbe_eq; 3063 void *cq_vaddress; 3064 int ret = -ENOMEM; 3065 dma_addr_t paddr; 3066 3067 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * 3068 sizeof(struct sol_cqe)); 3069 3070 for (i = 0; i < phba->num_cpus; i++) { 3071 cq = &phwi_context->be_cq[i]; 3072 eq = &phwi_context->be_eq[i].q; 3073 pbe_eq = &phwi_context->be_eq[i]; 3074 pbe_eq->cq = cq; 3075 pbe_eq->phba = phba; 3076 mem = &cq->dma_mem; 3077 cq_vaddress = dma_alloc_coherent(&phba->pcidev->dev, 3078 num_cq_pages * PAGE_SIZE, 3079 &paddr, GFP_KERNEL); 3080 if (!cq_vaddress) { 3081 ret = -ENOMEM; 3082 goto create_cq_error; 3083 } 3084 3085 ret = be_fill_queue(cq, phba->params.num_cq_entries, 3086 sizeof(struct sol_cqe), cq_vaddress); 3087 if (ret) { 3088 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3089 "BM_%d : be_fill_queue Failed for ISCSI CQ\n"); 3090 goto create_cq_error; 3091 } 3092 3093 mem->dma = paddr; 3094 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, 3095 false, 0); 3096 if (ret) { 3097 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3098 "BM_%d : beiscsi_cmd_eq_create Failed for ISCSI CQ\n"); 3099 goto create_cq_error; 3100 } 3101 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3102 "BM_%d : iscsi cq_id is %d for eq_id %d\n" 3103 "iSCSI CQ CREATED\n", cq->id, eq->id); 3104 } 3105 return 0; 3106 3107 create_cq_error: 3108 for (i = 0; i < phba->num_cpus; i++) { 3109 cq = &phwi_context->be_cq[i]; 3110 mem = &cq->dma_mem; 3111 if (mem->va) 3112 dma_free_coherent(&phba->pcidev->dev, num_cq_pages 3113 * PAGE_SIZE, 3114 mem->va, mem->dma); 3115 } 3116 return ret; 3117 } 3118 3119 static int 3120 beiscsi_create_def_hdr(struct beiscsi_hba *phba, 3121 struct hwi_context_memory *phwi_context, 3122 struct hwi_controller *phwi_ctrlr, 3123 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3124 { 3125 unsigned int idx; 3126 int ret; 3127 struct be_queue_info *dq, *cq; 3128 struct be_dma_mem *mem; 3129 struct be_mem_descriptor *mem_descr; 3130 void *dq_vaddress; 3131 3132 idx = 0; 3133 dq = &phwi_context->be_def_hdrq[ulp_num]; 3134 cq = &phwi_context->be_cq[0]; 3135 mem = &dq->dma_mem; 3136 mem_descr = phba->init_mem; 3137 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 3138 (ulp_num * MEM_DESCR_OFFSET); 3139 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3140 ret = be_fill_queue(dq, mem_descr->mem_array[0].size / 3141 sizeof(struct phys_addr), 3142 sizeof(struct phys_addr), dq_vaddress); 3143 if (ret) { 3144 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3145 "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n", 3146 ulp_num); 3147 3148 return ret; 3149 } 3150 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3151 bus_address.u.a64.address; 3152 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, 3153 def_pdu_ring_sz, 3154 phba->params.defpdu_hdr_sz, 3155 BEISCSI_DEFQ_HDR, ulp_num); 3156 if (ret) { 3157 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3158 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n", 3159 ulp_num); 3160 3161 return ret; 3162 } 3163 3164 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3165 "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n", 3166 ulp_num, 3167 phwi_context->be_def_hdrq[ulp_num].id); 3168 return 0; 3169 } 3170 3171 static int 3172 beiscsi_create_def_data(struct beiscsi_hba *phba, 3173 struct hwi_context_memory *phwi_context, 3174 struct hwi_controller *phwi_ctrlr, 3175 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3176 { 3177 unsigned int idx; 3178 int ret; 3179 struct be_queue_info *dataq, *cq; 3180 struct be_dma_mem *mem; 3181 struct be_mem_descriptor *mem_descr; 3182 void *dq_vaddress; 3183 3184 idx = 0; 3185 dataq = &phwi_context->be_def_dataq[ulp_num]; 3186 cq = &phwi_context->be_cq[0]; 3187 mem = &dataq->dma_mem; 3188 mem_descr = phba->init_mem; 3189 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 3190 (ulp_num * MEM_DESCR_OFFSET); 3191 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3192 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size / 3193 sizeof(struct phys_addr), 3194 sizeof(struct phys_addr), dq_vaddress); 3195 if (ret) { 3196 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3197 "BM_%d : be_fill_queue Failed for DEF PDU " 3198 "DATA on ULP : %d\n", 3199 ulp_num); 3200 3201 return ret; 3202 } 3203 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3204 bus_address.u.a64.address; 3205 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, 3206 def_pdu_ring_sz, 3207 phba->params.defpdu_data_sz, 3208 BEISCSI_DEFQ_DATA, ulp_num); 3209 if (ret) { 3210 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3211 "BM_%d be_cmd_create_default_pdu_queue" 3212 " Failed for DEF PDU DATA on ULP : %d\n", 3213 ulp_num); 3214 return ret; 3215 } 3216 3217 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3218 "BM_%d : iscsi def data id on ULP : %d is %d\n", 3219 ulp_num, 3220 phwi_context->be_def_dataq[ulp_num].id); 3221 3222 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3223 "BM_%d : DEFAULT PDU DATA RING CREATED on ULP : %d\n", 3224 ulp_num); 3225 return 0; 3226 } 3227 3228 3229 static int 3230 beiscsi_post_template_hdr(struct beiscsi_hba *phba) 3231 { 3232 struct be_mem_descriptor *mem_descr; 3233 struct mem_array *pm_arr; 3234 struct be_dma_mem sgl; 3235 int status, ulp_num; 3236 3237 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3238 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3239 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3240 mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 + 3241 (ulp_num * MEM_DESCR_OFFSET); 3242 pm_arr = mem_descr->mem_array; 3243 3244 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3245 status = be_cmd_iscsi_post_template_hdr( 3246 &phba->ctrl, &sgl); 3247 3248 if (status != 0) { 3249 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3250 "BM_%d : Post Template HDR Failed for " 3251 "ULP_%d\n", ulp_num); 3252 return status; 3253 } 3254 3255 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3256 "BM_%d : Template HDR Pages Posted for " 3257 "ULP_%d\n", ulp_num); 3258 } 3259 } 3260 return 0; 3261 } 3262 3263 static int 3264 beiscsi_post_pages(struct beiscsi_hba *phba) 3265 { 3266 struct be_mem_descriptor *mem_descr; 3267 struct mem_array *pm_arr; 3268 unsigned int page_offset, i; 3269 struct be_dma_mem sgl; 3270 int status, ulp_num = 0; 3271 3272 mem_descr = phba->init_mem; 3273 mem_descr += HWI_MEM_SGE; 3274 pm_arr = mem_descr->mem_array; 3275 3276 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3277 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3278 break; 3279 3280 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * 3281 phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE; 3282 for (i = 0; i < mem_descr->num_elements; i++) { 3283 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3284 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, 3285 page_offset, 3286 (pm_arr->size / PAGE_SIZE)); 3287 page_offset += pm_arr->size / PAGE_SIZE; 3288 if (status != 0) { 3289 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3290 "BM_%d : post sgl failed.\n"); 3291 return status; 3292 } 3293 pm_arr++; 3294 } 3295 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3296 "BM_%d : POSTED PAGES\n"); 3297 return 0; 3298 } 3299 3300 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q) 3301 { 3302 struct be_dma_mem *mem = &q->dma_mem; 3303 if (mem->va) { 3304 dma_free_coherent(&phba->pcidev->dev, mem->size, 3305 mem->va, mem->dma); 3306 mem->va = NULL; 3307 } 3308 } 3309 3310 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, 3311 u16 len, u16 entry_size) 3312 { 3313 struct be_dma_mem *mem = &q->dma_mem; 3314 3315 memset(q, 0, sizeof(*q)); 3316 q->len = len; 3317 q->entry_size = entry_size; 3318 mem->size = len * entry_size; 3319 mem->va = dma_alloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma, 3320 GFP_KERNEL); 3321 if (!mem->va) 3322 return -ENOMEM; 3323 return 0; 3324 } 3325 3326 static int 3327 beiscsi_create_wrb_rings(struct beiscsi_hba *phba, 3328 struct hwi_context_memory *phwi_context, 3329 struct hwi_controller *phwi_ctrlr) 3330 { 3331 unsigned int num_wrb_rings; 3332 u64 pa_addr_lo; 3333 unsigned int idx, num, i, ulp_num; 3334 struct mem_array *pwrb_arr; 3335 void *wrb_vaddr; 3336 struct be_dma_mem sgl; 3337 struct be_mem_descriptor *mem_descr; 3338 struct hwi_wrb_context *pwrb_context; 3339 int status; 3340 uint8_t ulp_count = 0, ulp_base_num = 0; 3341 uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 }; 3342 3343 idx = 0; 3344 mem_descr = phba->init_mem; 3345 mem_descr += HWI_MEM_WRB; 3346 pwrb_arr = kmalloc_array(phba->params.cxns_per_ctrl, 3347 sizeof(*pwrb_arr), 3348 GFP_KERNEL); 3349 if (!pwrb_arr) { 3350 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3351 "BM_%d : Memory alloc failed in create wrb ring.\n"); 3352 return -ENOMEM; 3353 } 3354 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3355 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address; 3356 num_wrb_rings = mem_descr->mem_array[idx].size / 3357 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb)); 3358 3359 for (num = 0; num < phba->params.cxns_per_ctrl; num++) { 3360 if (num_wrb_rings) { 3361 pwrb_arr[num].virtual_address = wrb_vaddr; 3362 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo; 3363 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3364 sizeof(struct iscsi_wrb); 3365 wrb_vaddr += pwrb_arr[num].size; 3366 pa_addr_lo += pwrb_arr[num].size; 3367 num_wrb_rings--; 3368 } else { 3369 idx++; 3370 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3371 pa_addr_lo = mem_descr->mem_array[idx]. 3372 bus_address.u.a64.address; 3373 num_wrb_rings = mem_descr->mem_array[idx].size / 3374 (phba->params.wrbs_per_cxn * 3375 sizeof(struct iscsi_wrb)); 3376 pwrb_arr[num].virtual_address = wrb_vaddr; 3377 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo; 3378 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3379 sizeof(struct iscsi_wrb); 3380 wrb_vaddr += pwrb_arr[num].size; 3381 pa_addr_lo += pwrb_arr[num].size; 3382 num_wrb_rings--; 3383 } 3384 } 3385 3386 /* Get the ULP Count */ 3387 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3388 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3389 ulp_count++; 3390 ulp_base_num = ulp_num; 3391 cid_count_ulp[ulp_num] = 3392 BEISCSI_GET_CID_COUNT(phba, ulp_num); 3393 } 3394 3395 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3396 if (ulp_count > 1) { 3397 ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT; 3398 3399 if (!cid_count_ulp[ulp_base_num]) 3400 ulp_base_num = (ulp_base_num + 1) % 3401 BEISCSI_ULP_COUNT; 3402 3403 cid_count_ulp[ulp_base_num]--; 3404 } 3405 3406 3407 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl); 3408 status = be_cmd_wrbq_create(&phba->ctrl, &sgl, 3409 &phwi_context->be_wrbq[i], 3410 &phwi_ctrlr->wrb_context[i], 3411 ulp_base_num); 3412 if (status != 0) { 3413 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3414 "BM_%d : wrbq create failed."); 3415 kfree(pwrb_arr); 3416 return status; 3417 } 3418 pwrb_context = &phwi_ctrlr->wrb_context[i]; 3419 BE_SET_CID_TO_CRI(i, pwrb_context->cid); 3420 } 3421 kfree(pwrb_arr); 3422 return 0; 3423 } 3424 3425 static void free_wrb_handles(struct beiscsi_hba *phba) 3426 { 3427 unsigned int index; 3428 struct hwi_controller *phwi_ctrlr; 3429 struct hwi_wrb_context *pwrb_context; 3430 3431 phwi_ctrlr = phba->phwi_ctrlr; 3432 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 3433 pwrb_context = &phwi_ctrlr->wrb_context[index]; 3434 kfree(pwrb_context->pwrb_handle_base); 3435 kfree(pwrb_context->pwrb_handle_basestd); 3436 } 3437 } 3438 3439 static void be_mcc_queues_destroy(struct beiscsi_hba *phba) 3440 { 3441 struct be_ctrl_info *ctrl = &phba->ctrl; 3442 struct be_dma_mem *ptag_mem; 3443 struct be_queue_info *q; 3444 int i, tag; 3445 3446 q = &phba->ctrl.mcc_obj.q; 3447 for (i = 0; i < MAX_MCC_CMD; i++) { 3448 tag = i + 1; 3449 if (!test_bit(MCC_TAG_STATE_RUNNING, 3450 &ctrl->ptag_state[tag].tag_state)) 3451 continue; 3452 3453 if (test_bit(MCC_TAG_STATE_TIMEOUT, 3454 &ctrl->ptag_state[tag].tag_state)) { 3455 ptag_mem = &ctrl->ptag_state[tag].tag_mem_state; 3456 if (ptag_mem->size) { 3457 dma_free_coherent(&ctrl->pdev->dev, 3458 ptag_mem->size, 3459 ptag_mem->va, 3460 ptag_mem->dma); 3461 ptag_mem->size = 0; 3462 } 3463 continue; 3464 } 3465 /** 3466 * If MCC is still active and waiting then wake up the process. 3467 * We are here only because port is going offline. The process 3468 * sees that (BEISCSI_HBA_ONLINE is cleared) and EIO error is 3469 * returned for the operation and allocated memory cleaned up. 3470 */ 3471 if (waitqueue_active(&ctrl->mcc_wait[tag])) { 3472 ctrl->mcc_tag_status[tag] = MCC_STATUS_FAILED; 3473 ctrl->mcc_tag_status[tag] |= CQE_VALID_MASK; 3474 wake_up_interruptible(&ctrl->mcc_wait[tag]); 3475 /* 3476 * Control tag info gets reinitialized in enable 3477 * so wait for the process to clear running state. 3478 */ 3479 while (test_bit(MCC_TAG_STATE_RUNNING, 3480 &ctrl->ptag_state[tag].tag_state)) 3481 schedule_timeout_uninterruptible(HZ); 3482 } 3483 /** 3484 * For MCC with tag_states MCC_TAG_STATE_ASYNC and 3485 * MCC_TAG_STATE_IGNORE nothing needs to done. 3486 */ 3487 } 3488 if (q->created) { 3489 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); 3490 be_queue_free(phba, q); 3491 } 3492 3493 q = &phba->ctrl.mcc_obj.cq; 3494 if (q->created) { 3495 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3496 be_queue_free(phba, q); 3497 } 3498 } 3499 3500 static int be_mcc_queues_create(struct beiscsi_hba *phba, 3501 struct hwi_context_memory *phwi_context) 3502 { 3503 struct be_queue_info *q, *cq; 3504 struct be_ctrl_info *ctrl = &phba->ctrl; 3505 3506 /* Alloc MCC compl queue */ 3507 cq = &phba->ctrl.mcc_obj.cq; 3508 if (be_queue_alloc(phba, cq, MCC_CQ_LEN, 3509 sizeof(struct be_mcc_compl))) 3510 goto err; 3511 /* Ask BE to create MCC compl queue; */ 3512 if (phba->pcidev->msix_enabled) { 3513 if (beiscsi_cmd_cq_create(ctrl, cq, 3514 &phwi_context->be_eq[phba->num_cpus].q, 3515 false, true, 0)) 3516 goto mcc_cq_free; 3517 } else { 3518 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q, 3519 false, true, 0)) 3520 goto mcc_cq_free; 3521 } 3522 3523 /* Alloc MCC queue */ 3524 q = &phba->ctrl.mcc_obj.q; 3525 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) 3526 goto mcc_cq_destroy; 3527 3528 /* Ask BE to create MCC queue */ 3529 if (beiscsi_cmd_mccq_create(phba, q, cq)) 3530 goto mcc_q_free; 3531 3532 return 0; 3533 3534 mcc_q_free: 3535 be_queue_free(phba, q); 3536 mcc_cq_destroy: 3537 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ); 3538 mcc_cq_free: 3539 be_queue_free(phba, cq); 3540 err: 3541 return -ENOMEM; 3542 } 3543 3544 static void be2iscsi_enable_msix(struct beiscsi_hba *phba) 3545 { 3546 int nvec = 1; 3547 3548 switch (phba->generation) { 3549 case BE_GEN2: 3550 case BE_GEN3: 3551 nvec = BEISCSI_MAX_NUM_CPUS + 1; 3552 break; 3553 case BE_GEN4: 3554 nvec = phba->fw_config.eqid_count; 3555 break; 3556 default: 3557 nvec = 2; 3558 break; 3559 } 3560 3561 /* if eqid_count == 1 fall back to INTX */ 3562 if (enable_msix && nvec > 1) { 3563 struct irq_affinity desc = { .post_vectors = 1 }; 3564 3565 if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec, 3566 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) { 3567 phba->num_cpus = nvec - 1; 3568 return; 3569 } 3570 } 3571 3572 phba->num_cpus = 1; 3573 } 3574 3575 static void hwi_purge_eq(struct beiscsi_hba *phba) 3576 { 3577 struct hwi_controller *phwi_ctrlr; 3578 struct hwi_context_memory *phwi_context; 3579 struct be_queue_info *eq; 3580 struct be_eq_entry *eqe = NULL; 3581 int i, eq_msix; 3582 unsigned int num_processed; 3583 3584 if (beiscsi_hba_in_error(phba)) 3585 return; 3586 3587 phwi_ctrlr = phba->phwi_ctrlr; 3588 phwi_context = phwi_ctrlr->phwi_ctxt; 3589 if (phba->pcidev->msix_enabled) 3590 eq_msix = 1; 3591 else 3592 eq_msix = 0; 3593 3594 for (i = 0; i < (phba->num_cpus + eq_msix); i++) { 3595 eq = &phwi_context->be_eq[i].q; 3596 eqe = queue_tail_node(eq); 3597 num_processed = 0; 3598 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 3599 & EQE_VALID_MASK) { 3600 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 3601 queue_tail_inc(eq); 3602 eqe = queue_tail_node(eq); 3603 num_processed++; 3604 } 3605 3606 if (num_processed) 3607 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1); 3608 } 3609 } 3610 3611 static void hwi_cleanup_port(struct beiscsi_hba *phba) 3612 { 3613 struct be_queue_info *q; 3614 struct be_ctrl_info *ctrl = &phba->ctrl; 3615 struct hwi_controller *phwi_ctrlr; 3616 struct hwi_context_memory *phwi_context; 3617 int i, eq_for_mcc, ulp_num; 3618 3619 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3620 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3621 beiscsi_cmd_iscsi_cleanup(phba, ulp_num); 3622 3623 /** 3624 * Purge all EQ entries that may have been left out. This is to 3625 * workaround a problem we've seen occasionally where driver gets an 3626 * interrupt with EQ entry bit set after stopping the controller. 3627 */ 3628 hwi_purge_eq(phba); 3629 3630 phwi_ctrlr = phba->phwi_ctrlr; 3631 phwi_context = phwi_ctrlr->phwi_ctxt; 3632 3633 be_cmd_iscsi_remove_template_hdr(ctrl); 3634 3635 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3636 q = &phwi_context->be_wrbq[i]; 3637 if (q->created) 3638 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); 3639 } 3640 kfree(phwi_context->be_wrbq); 3641 free_wrb_handles(phba); 3642 3643 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3644 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3645 3646 q = &phwi_context->be_def_hdrq[ulp_num]; 3647 if (q->created) 3648 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3649 3650 q = &phwi_context->be_def_dataq[ulp_num]; 3651 if (q->created) 3652 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3653 } 3654 } 3655 3656 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); 3657 3658 for (i = 0; i < (phba->num_cpus); i++) { 3659 q = &phwi_context->be_cq[i]; 3660 if (q->created) { 3661 be_queue_free(phba, q); 3662 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3663 } 3664 } 3665 3666 be_mcc_queues_destroy(phba); 3667 if (phba->pcidev->msix_enabled) 3668 eq_for_mcc = 1; 3669 else 3670 eq_for_mcc = 0; 3671 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3672 q = &phwi_context->be_eq[i].q; 3673 if (q->created) { 3674 be_queue_free(phba, q); 3675 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 3676 } 3677 } 3678 /* this ensures complete FW cleanup */ 3679 beiscsi_cmd_function_reset(phba); 3680 /* last communication, indicate driver is unloading */ 3681 beiscsi_cmd_special_wrb(&phba->ctrl, 0); 3682 } 3683 3684 static int hwi_init_port(struct beiscsi_hba *phba) 3685 { 3686 struct hwi_controller *phwi_ctrlr; 3687 struct hwi_context_memory *phwi_context; 3688 unsigned int def_pdu_ring_sz; 3689 struct be_ctrl_info *ctrl = &phba->ctrl; 3690 int status, ulp_num; 3691 u16 nbufs; 3692 3693 phwi_ctrlr = phba->phwi_ctrlr; 3694 phwi_context = phwi_ctrlr->phwi_ctxt; 3695 /* set port optic state to unknown */ 3696 phba->optic_state = 0xff; 3697 3698 status = beiscsi_create_eqs(phba, phwi_context); 3699 if (status != 0) { 3700 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3701 "BM_%d : EQ not created\n"); 3702 goto error; 3703 } 3704 3705 status = be_mcc_queues_create(phba, phwi_context); 3706 if (status != 0) 3707 goto error; 3708 3709 status = beiscsi_check_supported_fw(ctrl, phba); 3710 if (status != 0) { 3711 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3712 "BM_%d : Unsupported fw version\n"); 3713 goto error; 3714 } 3715 3716 status = beiscsi_create_cqs(phba, phwi_context); 3717 if (status != 0) { 3718 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3719 "BM_%d : CQ not created\n"); 3720 goto error; 3721 } 3722 3723 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3724 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3725 nbufs = phwi_context->pasync_ctx[ulp_num]->num_entries; 3726 def_pdu_ring_sz = nbufs * sizeof(struct phys_addr); 3727 3728 status = beiscsi_create_def_hdr(phba, phwi_context, 3729 phwi_ctrlr, 3730 def_pdu_ring_sz, 3731 ulp_num); 3732 if (status != 0) { 3733 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3734 "BM_%d : Default Header not created for ULP : %d\n", 3735 ulp_num); 3736 goto error; 3737 } 3738 3739 status = beiscsi_create_def_data(phba, phwi_context, 3740 phwi_ctrlr, 3741 def_pdu_ring_sz, 3742 ulp_num); 3743 if (status != 0) { 3744 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3745 "BM_%d : Default Data not created for ULP : %d\n", 3746 ulp_num); 3747 goto error; 3748 } 3749 /** 3750 * Now that the default PDU rings have been created, 3751 * let EP know about it. 3752 */ 3753 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR, 3754 ulp_num, nbufs); 3755 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA, 3756 ulp_num, nbufs); 3757 } 3758 } 3759 3760 status = beiscsi_post_pages(phba); 3761 if (status != 0) { 3762 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3763 "BM_%d : Post SGL Pages Failed\n"); 3764 goto error; 3765 } 3766 3767 status = beiscsi_post_template_hdr(phba); 3768 if (status != 0) { 3769 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3770 "BM_%d : Template HDR Posting for CXN Failed\n"); 3771 } 3772 3773 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); 3774 if (status != 0) { 3775 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3776 "BM_%d : WRB Rings not created\n"); 3777 goto error; 3778 } 3779 3780 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3781 uint16_t async_arr_idx = 0; 3782 3783 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3784 uint16_t cri = 0; 3785 struct hd_async_context *pasync_ctx; 3786 3787 pasync_ctx = HWI_GET_ASYNC_PDU_CTX( 3788 phwi_ctrlr, ulp_num); 3789 for (cri = 0; cri < 3790 phba->params.cxns_per_ctrl; cri++) { 3791 if (ulp_num == BEISCSI_GET_ULP_FROM_CRI 3792 (phwi_ctrlr, cri)) 3793 pasync_ctx->cid_to_async_cri_map[ 3794 phwi_ctrlr->wrb_context[cri].cid] = 3795 async_arr_idx++; 3796 } 3797 } 3798 } 3799 3800 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3801 "BM_%d : hwi_init_port success\n"); 3802 return 0; 3803 3804 error: 3805 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3806 "BM_%d : hwi_init_port failed"); 3807 hwi_cleanup_port(phba); 3808 return status; 3809 } 3810 3811 static int hwi_init_controller(struct beiscsi_hba *phba) 3812 { 3813 struct hwi_controller *phwi_ctrlr; 3814 3815 phwi_ctrlr = phba->phwi_ctrlr; 3816 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) { 3817 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba-> 3818 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address; 3819 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3820 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n", 3821 phwi_ctrlr->phwi_ctxt); 3822 } else { 3823 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3824 "BM_%d : HWI_MEM_ADDN_CONTEXT is more " 3825 "than one element.Failing to load\n"); 3826 return -ENOMEM; 3827 } 3828 3829 iscsi_init_global_templates(phba); 3830 if (beiscsi_init_wrb_handle(phba)) 3831 return -ENOMEM; 3832 3833 if (hwi_init_async_pdu_ctx(phba)) { 3834 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3835 "BM_%d : hwi_init_async_pdu_ctx failed\n"); 3836 return -ENOMEM; 3837 } 3838 3839 if (hwi_init_port(phba) != 0) { 3840 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3841 "BM_%d : hwi_init_controller failed\n"); 3842 3843 return -ENOMEM; 3844 } 3845 return 0; 3846 } 3847 3848 static void beiscsi_free_mem(struct beiscsi_hba *phba) 3849 { 3850 struct be_mem_descriptor *mem_descr; 3851 int i, j; 3852 3853 mem_descr = phba->init_mem; 3854 for (i = 0; i < SE_MEM_MAX; i++) { 3855 for (j = mem_descr->num_elements; j > 0; j--) { 3856 dma_free_coherent(&phba->pcidev->dev, 3857 mem_descr->mem_array[j - 1].size, 3858 mem_descr->mem_array[j - 1].virtual_address, 3859 (unsigned long)mem_descr->mem_array[j - 1]. 3860 bus_address.u.a64.address); 3861 } 3862 3863 kfree(mem_descr->mem_array); 3864 mem_descr++; 3865 } 3866 kfree(phba->init_mem); 3867 kfree(phba->phwi_ctrlr->wrb_context); 3868 kfree(phba->phwi_ctrlr); 3869 } 3870 3871 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) 3872 { 3873 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg; 3874 struct sgl_handle *psgl_handle; 3875 struct iscsi_sge *pfrag; 3876 unsigned int arr_index, i, idx; 3877 unsigned int ulp_icd_start, ulp_num = 0; 3878 3879 phba->io_sgl_hndl_avbl = 0; 3880 phba->eh_sgl_hndl_avbl = 0; 3881 3882 mem_descr_sglh = phba->init_mem; 3883 mem_descr_sglh += HWI_MEM_SGLH; 3884 if (1 == mem_descr_sglh->num_elements) { 3885 phba->io_sgl_hndl_base = kcalloc(phba->params.ios_per_ctrl, 3886 sizeof(struct sgl_handle *), 3887 GFP_KERNEL); 3888 if (!phba->io_sgl_hndl_base) { 3889 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3890 "BM_%d : Mem Alloc Failed. Failing to load\n"); 3891 return -ENOMEM; 3892 } 3893 phba->eh_sgl_hndl_base = 3894 kcalloc(phba->params.icds_per_ctrl - 3895 phba->params.ios_per_ctrl, 3896 sizeof(struct sgl_handle *), GFP_KERNEL); 3897 if (!phba->eh_sgl_hndl_base) { 3898 kfree(phba->io_sgl_hndl_base); 3899 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3900 "BM_%d : Mem Alloc Failed. Failing to load\n"); 3901 return -ENOMEM; 3902 } 3903 } else { 3904 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3905 "BM_%d : HWI_MEM_SGLH is more than one element." 3906 "Failing to load\n"); 3907 return -ENOMEM; 3908 } 3909 3910 arr_index = 0; 3911 idx = 0; 3912 while (idx < mem_descr_sglh->num_elements) { 3913 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address; 3914 3915 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size / 3916 sizeof(struct sgl_handle)); i++) { 3917 if (arr_index < phba->params.ios_per_ctrl) { 3918 phba->io_sgl_hndl_base[arr_index] = psgl_handle; 3919 phba->io_sgl_hndl_avbl++; 3920 arr_index++; 3921 } else { 3922 phba->eh_sgl_hndl_base[arr_index - 3923 phba->params.ios_per_ctrl] = 3924 psgl_handle; 3925 arr_index++; 3926 phba->eh_sgl_hndl_avbl++; 3927 } 3928 psgl_handle++; 3929 } 3930 idx++; 3931 } 3932 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3933 "BM_%d : phba->io_sgl_hndl_avbl=%d " 3934 "phba->eh_sgl_hndl_avbl=%d\n", 3935 phba->io_sgl_hndl_avbl, 3936 phba->eh_sgl_hndl_avbl); 3937 3938 mem_descr_sg = phba->init_mem; 3939 mem_descr_sg += HWI_MEM_SGE; 3940 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3941 "\n BM_%d : mem_descr_sg->num_elements=%d\n", 3942 mem_descr_sg->num_elements); 3943 3944 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3945 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3946 break; 3947 3948 ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 3949 3950 arr_index = 0; 3951 idx = 0; 3952 while (idx < mem_descr_sg->num_elements) { 3953 pfrag = mem_descr_sg->mem_array[idx].virtual_address; 3954 3955 for (i = 0; 3956 i < (mem_descr_sg->mem_array[idx].size) / 3957 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io); 3958 i++) { 3959 if (arr_index < phba->params.ios_per_ctrl) 3960 psgl_handle = phba->io_sgl_hndl_base[arr_index]; 3961 else 3962 psgl_handle = phba->eh_sgl_hndl_base[arr_index - 3963 phba->params.ios_per_ctrl]; 3964 psgl_handle->pfrag = pfrag; 3965 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0); 3966 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); 3967 pfrag += phba->params.num_sge_per_io; 3968 psgl_handle->sgl_index = ulp_icd_start + arr_index++; 3969 } 3970 idx++; 3971 } 3972 phba->io_sgl_free_index = 0; 3973 phba->io_sgl_alloc_index = 0; 3974 phba->eh_sgl_free_index = 0; 3975 phba->eh_sgl_alloc_index = 0; 3976 return 0; 3977 } 3978 3979 static int hba_setup_cid_tbls(struct beiscsi_hba *phba) 3980 { 3981 int ret; 3982 uint16_t i, ulp_num; 3983 struct ulp_cid_info *ptr_cid_info = NULL; 3984 3985 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3986 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 3987 ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info), 3988 GFP_KERNEL); 3989 3990 if (!ptr_cid_info) { 3991 ret = -ENOMEM; 3992 goto free_memory; 3993 } 3994 3995 /* Allocate memory for CID array */ 3996 ptr_cid_info->cid_array = 3997 kcalloc(BEISCSI_GET_CID_COUNT(phba, ulp_num), 3998 sizeof(*ptr_cid_info->cid_array), 3999 GFP_KERNEL); 4000 if (!ptr_cid_info->cid_array) { 4001 kfree(ptr_cid_info); 4002 ptr_cid_info = NULL; 4003 ret = -ENOMEM; 4004 4005 goto free_memory; 4006 } 4007 ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT( 4008 phba, ulp_num); 4009 4010 /* Save the cid_info_array ptr */ 4011 phba->cid_array_info[ulp_num] = ptr_cid_info; 4012 } 4013 } 4014 phba->ep_array = kcalloc(phba->params.cxns_per_ctrl, 4015 sizeof(struct iscsi_endpoint *), 4016 GFP_KERNEL); 4017 if (!phba->ep_array) { 4018 ret = -ENOMEM; 4019 4020 goto free_memory; 4021 } 4022 4023 phba->conn_table = kcalloc(phba->params.cxns_per_ctrl, 4024 sizeof(struct beiscsi_conn *), 4025 GFP_KERNEL); 4026 if (!phba->conn_table) { 4027 kfree(phba->ep_array); 4028 phba->ep_array = NULL; 4029 ret = -ENOMEM; 4030 4031 goto free_memory; 4032 } 4033 4034 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 4035 ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num; 4036 4037 ptr_cid_info = phba->cid_array_info[ulp_num]; 4038 ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] = 4039 phba->phwi_ctrlr->wrb_context[i].cid; 4040 4041 } 4042 4043 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4044 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4045 ptr_cid_info = phba->cid_array_info[ulp_num]; 4046 4047 ptr_cid_info->cid_alloc = 0; 4048 ptr_cid_info->cid_free = 0; 4049 } 4050 } 4051 return 0; 4052 4053 free_memory: 4054 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4055 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4056 ptr_cid_info = phba->cid_array_info[ulp_num]; 4057 4058 if (ptr_cid_info) { 4059 kfree(ptr_cid_info->cid_array); 4060 kfree(ptr_cid_info); 4061 phba->cid_array_info[ulp_num] = NULL; 4062 } 4063 } 4064 } 4065 4066 return ret; 4067 } 4068 4069 static void hwi_enable_intr(struct beiscsi_hba *phba) 4070 { 4071 struct be_ctrl_info *ctrl = &phba->ctrl; 4072 struct hwi_controller *phwi_ctrlr; 4073 struct hwi_context_memory *phwi_context; 4074 struct be_queue_info *eq; 4075 u8 __iomem *addr; 4076 u32 reg, i; 4077 u32 enabled; 4078 4079 phwi_ctrlr = phba->phwi_ctrlr; 4080 phwi_context = phwi_ctrlr->phwi_ctxt; 4081 4082 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + 4083 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 4084 reg = ioread32(addr); 4085 4086 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4087 if (!enabled) { 4088 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4089 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4090 "BM_%d : reg =x%08x addr=%p\n", reg, addr); 4091 iowrite32(reg, addr); 4092 } 4093 4094 if (!phba->pcidev->msix_enabled) { 4095 eq = &phwi_context->be_eq[0].q; 4096 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4097 "BM_%d : eq->id=%d\n", eq->id); 4098 4099 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4100 } else { 4101 for (i = 0; i <= phba->num_cpus; i++) { 4102 eq = &phwi_context->be_eq[i].q; 4103 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4104 "BM_%d : eq->id=%d\n", eq->id); 4105 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4106 } 4107 } 4108 } 4109 4110 static void hwi_disable_intr(struct beiscsi_hba *phba) 4111 { 4112 struct be_ctrl_info *ctrl = &phba->ctrl; 4113 4114 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; 4115 u32 reg = ioread32(addr); 4116 4117 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4118 if (enabled) { 4119 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4120 iowrite32(reg, addr); 4121 } else 4122 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 4123 "BM_%d : In hwi_disable_intr, Already Disabled\n"); 4124 } 4125 4126 static int beiscsi_init_port(struct beiscsi_hba *phba) 4127 { 4128 int ret; 4129 4130 ret = hwi_init_controller(phba); 4131 if (ret < 0) { 4132 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4133 "BM_%d : init controller failed\n"); 4134 return ret; 4135 } 4136 ret = beiscsi_init_sgl_handle(phba); 4137 if (ret < 0) { 4138 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4139 "BM_%d : init sgl handles failed\n"); 4140 goto cleanup_port; 4141 } 4142 4143 ret = hba_setup_cid_tbls(phba); 4144 if (ret < 0) { 4145 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4146 "BM_%d : setup CID table failed\n"); 4147 kfree(phba->io_sgl_hndl_base); 4148 kfree(phba->eh_sgl_hndl_base); 4149 goto cleanup_port; 4150 } 4151 return ret; 4152 4153 cleanup_port: 4154 hwi_cleanup_port(phba); 4155 return ret; 4156 } 4157 4158 static void beiscsi_cleanup_port(struct beiscsi_hba *phba) 4159 { 4160 struct ulp_cid_info *ptr_cid_info = NULL; 4161 int ulp_num; 4162 4163 kfree(phba->io_sgl_hndl_base); 4164 kfree(phba->eh_sgl_hndl_base); 4165 kfree(phba->ep_array); 4166 kfree(phba->conn_table); 4167 4168 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4169 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4170 ptr_cid_info = phba->cid_array_info[ulp_num]; 4171 4172 if (ptr_cid_info) { 4173 kfree(ptr_cid_info->cid_array); 4174 kfree(ptr_cid_info); 4175 phba->cid_array_info[ulp_num] = NULL; 4176 } 4177 } 4178 } 4179 } 4180 4181 /** 4182 * beiscsi_free_mgmt_task_handles()- Free driver CXN resources 4183 * @beiscsi_conn: ptr to the conn to be cleaned up 4184 * @task: ptr to iscsi_task resource to be freed. 4185 * 4186 * Free driver mgmt resources binded to CXN. 4187 **/ 4188 void 4189 beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, 4190 struct iscsi_task *task) 4191 { 4192 struct beiscsi_io_task *io_task; 4193 struct beiscsi_hba *phba = beiscsi_conn->phba; 4194 struct hwi_wrb_context *pwrb_context; 4195 struct hwi_controller *phwi_ctrlr; 4196 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4197 beiscsi_conn->beiscsi_conn_cid); 4198 4199 phwi_ctrlr = phba->phwi_ctrlr; 4200 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4201 4202 io_task = task->dd_data; 4203 4204 if (io_task->pwrb_handle) { 4205 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4206 io_task->pwrb_handle = NULL; 4207 } 4208 4209 if (io_task->psgl_handle) { 4210 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4211 io_task->psgl_handle = NULL; 4212 } 4213 4214 if (io_task->mtask_addr) { 4215 dma_unmap_single(&phba->pcidev->dev, 4216 io_task->mtask_addr, 4217 io_task->mtask_data_count, 4218 DMA_TO_DEVICE); 4219 io_task->mtask_addr = 0; 4220 } 4221 } 4222 4223 /** 4224 * beiscsi_cleanup_task()- Free driver resources of the task 4225 * @task: ptr to the iscsi task 4226 * 4227 **/ 4228 static void beiscsi_cleanup_task(struct iscsi_task *task) 4229 { 4230 struct beiscsi_io_task *io_task = task->dd_data; 4231 struct iscsi_conn *conn = task->conn; 4232 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4233 struct beiscsi_hba *phba = beiscsi_conn->phba; 4234 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4235 struct hwi_wrb_context *pwrb_context; 4236 struct hwi_controller *phwi_ctrlr; 4237 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4238 beiscsi_conn->beiscsi_conn_cid); 4239 4240 phwi_ctrlr = phba->phwi_ctrlr; 4241 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4242 4243 if (io_task->cmd_bhs) { 4244 dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4245 io_task->bhs_pa.u.a64.address); 4246 io_task->cmd_bhs = NULL; 4247 task->hdr = NULL; 4248 } 4249 4250 if (task->sc) { 4251 if (io_task->pwrb_handle) { 4252 free_wrb_handle(phba, pwrb_context, 4253 io_task->pwrb_handle); 4254 io_task->pwrb_handle = NULL; 4255 } 4256 4257 if (io_task->psgl_handle) { 4258 free_io_sgl_handle(phba, io_task->psgl_handle); 4259 io_task->psgl_handle = NULL; 4260 } 4261 4262 if (io_task->scsi_cmnd) { 4263 if (io_task->num_sg) 4264 scsi_dma_unmap(io_task->scsi_cmnd); 4265 io_task->scsi_cmnd = NULL; 4266 } 4267 } else { 4268 if (!beiscsi_conn->login_in_progress) 4269 beiscsi_free_mgmt_task_handles(beiscsi_conn, task); 4270 } 4271 } 4272 4273 void 4274 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, 4275 struct beiscsi_offload_params *params) 4276 { 4277 struct wrb_handle *pwrb_handle; 4278 struct hwi_wrb_context *pwrb_context = NULL; 4279 struct beiscsi_hba *phba = beiscsi_conn->phba; 4280 struct iscsi_task *task = beiscsi_conn->task; 4281 struct iscsi_session *session = task->conn->session; 4282 u32 doorbell = 0; 4283 4284 /* 4285 * We can always use 0 here because it is reserved by libiscsi for 4286 * login/startup related tasks. 4287 */ 4288 beiscsi_conn->login_in_progress = 0; 4289 spin_lock_bh(&session->back_lock); 4290 beiscsi_cleanup_task(task); 4291 spin_unlock_bh(&session->back_lock); 4292 4293 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, 4294 &pwrb_context); 4295 4296 /* Check for the adapter family */ 4297 if (is_chip_be2_be3r(phba)) 4298 beiscsi_offload_cxn_v0(params, pwrb_handle, 4299 phba->init_mem, 4300 pwrb_context); 4301 else 4302 beiscsi_offload_cxn_v2(params, pwrb_handle, 4303 pwrb_context); 4304 4305 be_dws_le_to_cpu(pwrb_handle->pwrb, 4306 sizeof(struct iscsi_target_context_update_wrb)); 4307 4308 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4309 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) 4310 << DB_DEF_PDU_WRB_INDEX_SHIFT; 4311 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4312 iowrite32(doorbell, phba->db_va + 4313 beiscsi_conn->doorbell_offset); 4314 4315 /* 4316 * There is no completion for CONTEXT_UPDATE. The completion of next 4317 * WRB posted guarantees FW's processing and DMA'ing of it. 4318 * Use beiscsi_put_wrb_handle to put it back in the pool which makes 4319 * sure zero'ing or reuse of the WRB only after wrbs_per_cxn. 4320 */ 4321 beiscsi_put_wrb_handle(pwrb_context, pwrb_handle, 4322 phba->params.wrbs_per_cxn); 4323 beiscsi_log(phba, KERN_INFO, 4324 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4325 "BM_%d : put CONTEXT_UPDATE pwrb_handle=%p free_index=0x%x wrb_handles_available=%d\n", 4326 pwrb_handle, pwrb_context->free_index, 4327 pwrb_context->wrb_handles_available); 4328 } 4329 4330 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, 4331 int *index, int *age) 4332 { 4333 *index = (int)itt; 4334 if (age) 4335 *age = conn->session->age; 4336 } 4337 4338 /** 4339 * beiscsi_alloc_pdu - allocates pdu and related resources 4340 * @task: libiscsi task 4341 * @opcode: opcode of pdu for task 4342 * 4343 * This is called with the session lock held. It will allocate 4344 * the wrb and sgl if needed for the command. And it will prep 4345 * the pdu's itt. beiscsi_parse_pdu will later translate 4346 * the pdu itt to the libiscsi task itt. 4347 */ 4348 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 4349 { 4350 struct beiscsi_io_task *io_task = task->dd_data; 4351 struct iscsi_conn *conn = task->conn; 4352 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4353 struct beiscsi_hba *phba = beiscsi_conn->phba; 4354 struct hwi_wrb_context *pwrb_context; 4355 struct hwi_controller *phwi_ctrlr; 4356 itt_t itt; 4357 uint16_t cri_index = 0; 4358 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4359 dma_addr_t paddr; 4360 4361 io_task->cmd_bhs = dma_pool_alloc(beiscsi_sess->bhs_pool, 4362 GFP_ATOMIC, &paddr); 4363 if (!io_task->cmd_bhs) 4364 return -ENOMEM; 4365 io_task->bhs_pa.u.a64.address = paddr; 4366 io_task->libiscsi_itt = (itt_t)task->itt; 4367 io_task->conn = beiscsi_conn; 4368 4369 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; 4370 task->hdr_max = sizeof(struct be_cmd_bhs); 4371 io_task->psgl_handle = NULL; 4372 io_task->pwrb_handle = NULL; 4373 4374 if (task->sc) { 4375 io_task->psgl_handle = alloc_io_sgl_handle(phba); 4376 if (!io_task->psgl_handle) { 4377 beiscsi_log(phba, KERN_ERR, 4378 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4379 "BM_%d : Alloc of IO_SGL_ICD Failed " 4380 "for the CID : %d\n", 4381 beiscsi_conn->beiscsi_conn_cid); 4382 goto free_hndls; 4383 } 4384 io_task->pwrb_handle = alloc_wrb_handle(phba, 4385 beiscsi_conn->beiscsi_conn_cid, 4386 &io_task->pwrb_context); 4387 if (!io_task->pwrb_handle) { 4388 beiscsi_log(phba, KERN_ERR, 4389 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4390 "BM_%d : Alloc of WRB_HANDLE Failed " 4391 "for the CID : %d\n", 4392 beiscsi_conn->beiscsi_conn_cid); 4393 goto free_io_hndls; 4394 } 4395 } else { 4396 io_task->scsi_cmnd = NULL; 4397 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { 4398 beiscsi_conn->task = task; 4399 if (!beiscsi_conn->login_in_progress) { 4400 io_task->psgl_handle = (struct sgl_handle *) 4401 alloc_mgmt_sgl_handle(phba); 4402 if (!io_task->psgl_handle) { 4403 beiscsi_log(phba, KERN_ERR, 4404 BEISCSI_LOG_IO | 4405 BEISCSI_LOG_CONFIG, 4406 "BM_%d : Alloc of MGMT_SGL_ICD Failed " 4407 "for the CID : %d\n", 4408 beiscsi_conn->beiscsi_conn_cid); 4409 goto free_hndls; 4410 } 4411 4412 beiscsi_conn->login_in_progress = 1; 4413 beiscsi_conn->plogin_sgl_handle = 4414 io_task->psgl_handle; 4415 io_task->pwrb_handle = 4416 alloc_wrb_handle(phba, 4417 beiscsi_conn->beiscsi_conn_cid, 4418 &io_task->pwrb_context); 4419 if (!io_task->pwrb_handle) { 4420 beiscsi_log(phba, KERN_ERR, 4421 BEISCSI_LOG_IO | 4422 BEISCSI_LOG_CONFIG, 4423 "BM_%d : Alloc of WRB_HANDLE Failed " 4424 "for the CID : %d\n", 4425 beiscsi_conn->beiscsi_conn_cid); 4426 goto free_mgmt_hndls; 4427 } 4428 beiscsi_conn->plogin_wrb_handle = 4429 io_task->pwrb_handle; 4430 4431 } else { 4432 io_task->psgl_handle = 4433 beiscsi_conn->plogin_sgl_handle; 4434 io_task->pwrb_handle = 4435 beiscsi_conn->plogin_wrb_handle; 4436 } 4437 } else { 4438 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); 4439 if (!io_task->psgl_handle) { 4440 beiscsi_log(phba, KERN_ERR, 4441 BEISCSI_LOG_IO | 4442 BEISCSI_LOG_CONFIG, 4443 "BM_%d : Alloc of MGMT_SGL_ICD Failed " 4444 "for the CID : %d\n", 4445 beiscsi_conn->beiscsi_conn_cid); 4446 goto free_hndls; 4447 } 4448 io_task->pwrb_handle = 4449 alloc_wrb_handle(phba, 4450 beiscsi_conn->beiscsi_conn_cid, 4451 &io_task->pwrb_context); 4452 if (!io_task->pwrb_handle) { 4453 beiscsi_log(phba, KERN_ERR, 4454 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4455 "BM_%d : Alloc of WRB_HANDLE Failed " 4456 "for the CID : %d\n", 4457 beiscsi_conn->beiscsi_conn_cid); 4458 goto free_mgmt_hndls; 4459 } 4460 4461 } 4462 } 4463 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> 4464 wrb_index << 16) | (unsigned int) 4465 (io_task->psgl_handle->sgl_index)); 4466 io_task->pwrb_handle->pio_handle = task; 4467 4468 io_task->cmd_bhs->iscsi_hdr.itt = itt; 4469 return 0; 4470 4471 free_io_hndls: 4472 free_io_sgl_handle(phba, io_task->psgl_handle); 4473 goto free_hndls; 4474 free_mgmt_hndls: 4475 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4476 io_task->psgl_handle = NULL; 4477 free_hndls: 4478 phwi_ctrlr = phba->phwi_ctrlr; 4479 cri_index = BE_GET_CRI_FROM_CID( 4480 beiscsi_conn->beiscsi_conn_cid); 4481 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4482 if (io_task->pwrb_handle) 4483 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4484 io_task->pwrb_handle = NULL; 4485 dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4486 io_task->bhs_pa.u.a64.address); 4487 io_task->cmd_bhs = NULL; 4488 return -ENOMEM; 4489 } 4490 static int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, 4491 unsigned int num_sg, unsigned int xferlen, 4492 unsigned int writedir) 4493 { 4494 4495 struct beiscsi_io_task *io_task = task->dd_data; 4496 struct iscsi_conn *conn = task->conn; 4497 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4498 struct beiscsi_hba *phba = beiscsi_conn->phba; 4499 struct iscsi_wrb *pwrb = NULL; 4500 unsigned int doorbell = 0; 4501 4502 pwrb = io_task->pwrb_handle->pwrb; 4503 4504 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4505 4506 if (writedir) { 4507 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4508 INI_WR_CMD); 4509 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1); 4510 } else { 4511 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4512 INI_RD_CMD); 4513 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0); 4514 } 4515 4516 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2, 4517 type, pwrb); 4518 4519 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb, 4520 cpu_to_be16(*(unsigned short *) 4521 &io_task->cmd_bhs->iscsi_hdr.lun)); 4522 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen); 4523 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4524 io_task->pwrb_handle->wrb_index); 4525 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4526 be32_to_cpu(task->cmdsn)); 4527 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4528 io_task->psgl_handle->sgl_index); 4529 4530 hwi_write_sgl_v2(pwrb, sg, num_sg, io_task); 4531 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 4532 io_task->pwrb_handle->wrb_index); 4533 if (io_task->pwrb_context->plast_wrb) 4534 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, 4535 io_task->pwrb_context->plast_wrb, 4536 io_task->pwrb_handle->wrb_index); 4537 io_task->pwrb_context->plast_wrb = pwrb; 4538 4539 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4540 4541 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4542 doorbell |= (io_task->pwrb_handle->wrb_index & 4543 DB_DEF_PDU_WRB_INDEX_MASK) << 4544 DB_DEF_PDU_WRB_INDEX_SHIFT; 4545 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4546 iowrite32(doorbell, phba->db_va + 4547 beiscsi_conn->doorbell_offset); 4548 return 0; 4549 } 4550 4551 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, 4552 unsigned int num_sg, unsigned int xferlen, 4553 unsigned int writedir) 4554 { 4555 4556 struct beiscsi_io_task *io_task = task->dd_data; 4557 struct iscsi_conn *conn = task->conn; 4558 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4559 struct beiscsi_hba *phba = beiscsi_conn->phba; 4560 struct iscsi_wrb *pwrb = NULL; 4561 unsigned int doorbell = 0; 4562 4563 pwrb = io_task->pwrb_handle->pwrb; 4564 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4565 4566 if (writedir) { 4567 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4568 INI_WR_CMD); 4569 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 4570 } else { 4571 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4572 INI_RD_CMD); 4573 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 4574 } 4575 4576 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb, 4577 type, pwrb); 4578 4579 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb, 4580 cpu_to_be16(*(unsigned short *) 4581 &io_task->cmd_bhs->iscsi_hdr.lun)); 4582 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen); 4583 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4584 io_task->pwrb_handle->wrb_index); 4585 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4586 be32_to_cpu(task->cmdsn)); 4587 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4588 io_task->psgl_handle->sgl_index); 4589 4590 hwi_write_sgl(pwrb, sg, num_sg, io_task); 4591 4592 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4593 io_task->pwrb_handle->wrb_index); 4594 if (io_task->pwrb_context->plast_wrb) 4595 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, 4596 io_task->pwrb_context->plast_wrb, 4597 io_task->pwrb_handle->wrb_index); 4598 io_task->pwrb_context->plast_wrb = pwrb; 4599 4600 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4601 4602 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4603 doorbell |= (io_task->pwrb_handle->wrb_index & 4604 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4605 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4606 4607 iowrite32(doorbell, phba->db_va + 4608 beiscsi_conn->doorbell_offset); 4609 return 0; 4610 } 4611 4612 static int beiscsi_mtask(struct iscsi_task *task) 4613 { 4614 struct beiscsi_io_task *io_task = task->dd_data; 4615 struct iscsi_conn *conn = task->conn; 4616 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4617 struct beiscsi_hba *phba = beiscsi_conn->phba; 4618 struct iscsi_wrb *pwrb = NULL; 4619 unsigned int doorbell = 0; 4620 unsigned int cid; 4621 unsigned int pwrb_typeoffset = 0; 4622 int ret = 0; 4623 4624 cid = beiscsi_conn->beiscsi_conn_cid; 4625 pwrb = io_task->pwrb_handle->pwrb; 4626 4627 if (is_chip_be2_be3r(phba)) { 4628 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4629 be32_to_cpu(task->cmdsn)); 4630 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4631 io_task->pwrb_handle->wrb_index); 4632 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4633 io_task->psgl_handle->sgl_index); 4634 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, 4635 task->data_count); 4636 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4637 io_task->pwrb_handle->wrb_index); 4638 if (io_task->pwrb_context->plast_wrb) 4639 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, 4640 io_task->pwrb_context->plast_wrb, 4641 io_task->pwrb_handle->wrb_index); 4642 io_task->pwrb_context->plast_wrb = pwrb; 4643 4644 pwrb_typeoffset = BE_WRB_TYPE_OFFSET; 4645 } else { 4646 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4647 be32_to_cpu(task->cmdsn)); 4648 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4649 io_task->pwrb_handle->wrb_index); 4650 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4651 io_task->psgl_handle->sgl_index); 4652 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, 4653 task->data_count); 4654 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 4655 io_task->pwrb_handle->wrb_index); 4656 if (io_task->pwrb_context->plast_wrb) 4657 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, 4658 io_task->pwrb_context->plast_wrb, 4659 io_task->pwrb_handle->wrb_index); 4660 io_task->pwrb_context->plast_wrb = pwrb; 4661 4662 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; 4663 } 4664 4665 4666 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 4667 case ISCSI_OP_LOGIN: 4668 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); 4669 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4670 ret = hwi_write_buffer(pwrb, task); 4671 break; 4672 case ISCSI_OP_NOOP_OUT: 4673 if (task->hdr->ttt != ISCSI_RESERVED_TAG) { 4674 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4675 if (is_chip_be2_be3r(phba)) 4676 AMAP_SET_BITS(struct amap_iscsi_wrb, 4677 dmsg, pwrb, 1); 4678 else 4679 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4680 dmsg, pwrb, 1); 4681 } else { 4682 ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); 4683 if (is_chip_be2_be3r(phba)) 4684 AMAP_SET_BITS(struct amap_iscsi_wrb, 4685 dmsg, pwrb, 0); 4686 else 4687 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4688 dmsg, pwrb, 0); 4689 } 4690 ret = hwi_write_buffer(pwrb, task); 4691 break; 4692 case ISCSI_OP_TEXT: 4693 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4694 ret = hwi_write_buffer(pwrb, task); 4695 break; 4696 case ISCSI_OP_SCSI_TMFUNC: 4697 ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset); 4698 ret = hwi_write_buffer(pwrb, task); 4699 break; 4700 case ISCSI_OP_LOGOUT: 4701 ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset); 4702 ret = hwi_write_buffer(pwrb, task); 4703 break; 4704 4705 default: 4706 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4707 "BM_%d : opcode =%d Not supported\n", 4708 task->hdr->opcode & ISCSI_OPCODE_MASK); 4709 4710 return -EINVAL; 4711 } 4712 4713 if (ret) 4714 return ret; 4715 4716 /* Set the task type */ 4717 io_task->wrb_type = (is_chip_be2_be3r(phba)) ? 4718 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) : 4719 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb); 4720 4721 doorbell |= cid & DB_WRB_POST_CID_MASK; 4722 doorbell |= (io_task->pwrb_handle->wrb_index & 4723 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4724 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4725 iowrite32(doorbell, phba->db_va + 4726 beiscsi_conn->doorbell_offset); 4727 return 0; 4728 } 4729 4730 static int beiscsi_task_xmit(struct iscsi_task *task) 4731 { 4732 struct beiscsi_io_task *io_task = task->dd_data; 4733 struct scsi_cmnd *sc = task->sc; 4734 struct beiscsi_hba *phba; 4735 struct scatterlist *sg; 4736 int num_sg; 4737 unsigned int writedir = 0, xferlen = 0; 4738 4739 phba = io_task->conn->phba; 4740 /** 4741 * HBA in error includes BEISCSI_HBA_FW_TIMEOUT. IO path might be 4742 * operational if FW still gets heartbeat from EP FW. Is management 4743 * path really needed to continue further? 4744 */ 4745 if (!beiscsi_hba_is_online(phba)) 4746 return -EIO; 4747 4748 if (!io_task->conn->login_in_progress) 4749 task->hdr->exp_statsn = 0; 4750 4751 if (!sc) 4752 return beiscsi_mtask(task); 4753 4754 io_task->scsi_cmnd = sc; 4755 io_task->num_sg = 0; 4756 num_sg = scsi_dma_map(sc); 4757 if (num_sg < 0) { 4758 beiscsi_log(phba, KERN_ERR, 4759 BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI, 4760 "BM_%d : scsi_dma_map Failed " 4761 "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n", 4762 be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt), 4763 io_task->libiscsi_itt, scsi_bufflen(sc)); 4764 4765 return num_sg; 4766 } 4767 /** 4768 * For scsi cmd task, check num_sg before unmapping in cleanup_task. 4769 * For management task, cleanup_task checks mtask_addr before unmapping. 4770 */ 4771 io_task->num_sg = num_sg; 4772 xferlen = scsi_bufflen(sc); 4773 sg = scsi_sglist(sc); 4774 if (sc->sc_data_direction == DMA_TO_DEVICE) 4775 writedir = 1; 4776 else 4777 writedir = 0; 4778 4779 return phba->iotask_fn(task, sg, num_sg, xferlen, writedir); 4780 } 4781 4782 /** 4783 * beiscsi_bsg_request - handle bsg request from ISCSI transport 4784 * @job: job to handle 4785 */ 4786 static int beiscsi_bsg_request(struct bsg_job *job) 4787 { 4788 struct Scsi_Host *shost; 4789 struct beiscsi_hba *phba; 4790 struct iscsi_bsg_request *bsg_req = job->request; 4791 int rc = -EINVAL; 4792 unsigned int tag; 4793 struct be_dma_mem nonemb_cmd; 4794 struct be_cmd_resp_hdr *resp; 4795 struct iscsi_bsg_reply *bsg_reply = job->reply; 4796 unsigned short status, extd_status; 4797 4798 shost = iscsi_job_to_shost(job); 4799 phba = iscsi_host_priv(shost); 4800 4801 if (!beiscsi_hba_is_online(phba)) { 4802 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 4803 "BM_%d : HBA in error 0x%lx\n", phba->state); 4804 return -ENXIO; 4805 } 4806 4807 switch (bsg_req->msgcode) { 4808 case ISCSI_BSG_HST_VENDOR: 4809 nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev, 4810 job->request_payload.payload_len, 4811 &nonemb_cmd.dma, GFP_KERNEL); 4812 if (nonemb_cmd.va == NULL) { 4813 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4814 "BM_%d : Failed to allocate memory for " 4815 "beiscsi_bsg_request\n"); 4816 return -ENOMEM; 4817 } 4818 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job, 4819 &nonemb_cmd); 4820 if (!tag) { 4821 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4822 "BM_%d : MBX Tag Allocation Failed\n"); 4823 4824 dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, 4825 nonemb_cmd.va, nonemb_cmd.dma); 4826 return -EAGAIN; 4827 } 4828 4829 rc = wait_event_interruptible_timeout( 4830 phba->ctrl.mcc_wait[tag], 4831 phba->ctrl.mcc_tag_status[tag], 4832 msecs_to_jiffies( 4833 BEISCSI_HOST_MBX_TIMEOUT)); 4834 4835 if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { 4836 clear_bit(MCC_TAG_STATE_RUNNING, 4837 &phba->ctrl.ptag_state[tag].tag_state); 4838 dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, 4839 nonemb_cmd.va, nonemb_cmd.dma); 4840 return -EIO; 4841 } 4842 extd_status = (phba->ctrl.mcc_tag_status[tag] & 4843 CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT; 4844 status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK; 4845 free_mcc_wrb(&phba->ctrl, tag); 4846 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va; 4847 sg_copy_from_buffer(job->reply_payload.sg_list, 4848 job->reply_payload.sg_cnt, 4849 nonemb_cmd.va, (resp->response_length 4850 + sizeof(*resp))); 4851 bsg_reply->reply_payload_rcv_len = resp->response_length; 4852 bsg_reply->result = status; 4853 bsg_job_done(job, bsg_reply->result, 4854 bsg_reply->reply_payload_rcv_len); 4855 dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, 4856 nonemb_cmd.va, nonemb_cmd.dma); 4857 if (status || extd_status) { 4858 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4859 "BM_%d : MBX Cmd Failed" 4860 " status = %d extd_status = %d\n", 4861 status, extd_status); 4862 4863 return -EIO; 4864 } else { 4865 rc = 0; 4866 } 4867 break; 4868 4869 default: 4870 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4871 "BM_%d : Unsupported bsg command: 0x%x\n", 4872 bsg_req->msgcode); 4873 break; 4874 } 4875 4876 return rc; 4877 } 4878 4879 static void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) 4880 { 4881 /* Set the logging parameter */ 4882 beiscsi_log_enable_init(phba, beiscsi_log_enable); 4883 } 4884 4885 void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle) 4886 { 4887 if (phba->boot_struct.boot_kset) 4888 return; 4889 4890 /* skip if boot work is already in progress */ 4891 if (test_and_set_bit(BEISCSI_HBA_BOOT_WORK, &phba->state)) 4892 return; 4893 4894 phba->boot_struct.retry = 3; 4895 phba->boot_struct.tag = 0; 4896 phba->boot_struct.s_handle = s_handle; 4897 phba->boot_struct.action = BEISCSI_BOOT_GET_SHANDLE; 4898 schedule_work(&phba->boot_work); 4899 } 4900 4901 #define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS 3 4902 /* 4903 * beiscsi_show_boot_tgt_info() 4904 * Boot flag info for iscsi-utilities 4905 * Bit 0 Block valid flag 4906 * Bit 1 Firmware booting selected 4907 */ 4908 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf) 4909 { 4910 struct beiscsi_hba *phba = data; 4911 struct mgmt_session_info *boot_sess = &phba->boot_struct.boot_sess; 4912 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0]; 4913 char *str = buf; 4914 int rc = -EPERM; 4915 4916 switch (type) { 4917 case ISCSI_BOOT_TGT_NAME: 4918 rc = sprintf(buf, "%.*s\n", 4919 (int)strlen(boot_sess->target_name), 4920 (char *)&boot_sess->target_name); 4921 break; 4922 case ISCSI_BOOT_TGT_IP_ADDR: 4923 if (boot_conn->dest_ipaddr.ip_type == BEISCSI_IP_TYPE_V4) 4924 rc = sprintf(buf, "%pI4\n", 4925 (char *)&boot_conn->dest_ipaddr.addr); 4926 else 4927 rc = sprintf(str, "%pI6\n", 4928 (char *)&boot_conn->dest_ipaddr.addr); 4929 break; 4930 case ISCSI_BOOT_TGT_PORT: 4931 rc = sprintf(str, "%d\n", boot_conn->dest_port); 4932 break; 4933 4934 case ISCSI_BOOT_TGT_CHAP_NAME: 4935 rc = sprintf(str, "%.*s\n", 4936 boot_conn->negotiated_login_options.auth_data.chap. 4937 target_chap_name_length, 4938 (char *)&boot_conn->negotiated_login_options. 4939 auth_data.chap.target_chap_name); 4940 break; 4941 case ISCSI_BOOT_TGT_CHAP_SECRET: 4942 rc = sprintf(str, "%.*s\n", 4943 boot_conn->negotiated_login_options.auth_data.chap. 4944 target_secret_length, 4945 (char *)&boot_conn->negotiated_login_options. 4946 auth_data.chap.target_secret); 4947 break; 4948 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 4949 rc = sprintf(str, "%.*s\n", 4950 boot_conn->negotiated_login_options.auth_data.chap. 4951 intr_chap_name_length, 4952 (char *)&boot_conn->negotiated_login_options. 4953 auth_data.chap.intr_chap_name); 4954 break; 4955 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 4956 rc = sprintf(str, "%.*s\n", 4957 boot_conn->negotiated_login_options.auth_data.chap. 4958 intr_secret_length, 4959 (char *)&boot_conn->negotiated_login_options. 4960 auth_data.chap.intr_secret); 4961 break; 4962 case ISCSI_BOOT_TGT_FLAGS: 4963 rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS); 4964 break; 4965 case ISCSI_BOOT_TGT_NIC_ASSOC: 4966 rc = sprintf(str, "0\n"); 4967 break; 4968 } 4969 return rc; 4970 } 4971 4972 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf) 4973 { 4974 struct beiscsi_hba *phba = data; 4975 char *str = buf; 4976 int rc = -EPERM; 4977 4978 switch (type) { 4979 case ISCSI_BOOT_INI_INITIATOR_NAME: 4980 rc = sprintf(str, "%s\n", 4981 phba->boot_struct.boot_sess.initiator_iscsiname); 4982 break; 4983 } 4984 return rc; 4985 } 4986 4987 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf) 4988 { 4989 struct beiscsi_hba *phba = data; 4990 char *str = buf; 4991 int rc = -EPERM; 4992 4993 switch (type) { 4994 case ISCSI_BOOT_ETH_FLAGS: 4995 rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS); 4996 break; 4997 case ISCSI_BOOT_ETH_INDEX: 4998 rc = sprintf(str, "0\n"); 4999 break; 5000 case ISCSI_BOOT_ETH_MAC: 5001 rc = beiscsi_get_macaddr(str, phba); 5002 break; 5003 } 5004 return rc; 5005 } 5006 5007 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type) 5008 { 5009 umode_t rc = 0; 5010 5011 switch (type) { 5012 case ISCSI_BOOT_TGT_NAME: 5013 case ISCSI_BOOT_TGT_IP_ADDR: 5014 case ISCSI_BOOT_TGT_PORT: 5015 case ISCSI_BOOT_TGT_CHAP_NAME: 5016 case ISCSI_BOOT_TGT_CHAP_SECRET: 5017 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5018 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5019 case ISCSI_BOOT_TGT_NIC_ASSOC: 5020 case ISCSI_BOOT_TGT_FLAGS: 5021 rc = S_IRUGO; 5022 break; 5023 } 5024 return rc; 5025 } 5026 5027 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type) 5028 { 5029 umode_t rc = 0; 5030 5031 switch (type) { 5032 case ISCSI_BOOT_INI_INITIATOR_NAME: 5033 rc = S_IRUGO; 5034 break; 5035 } 5036 return rc; 5037 } 5038 5039 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type) 5040 { 5041 umode_t rc = 0; 5042 5043 switch (type) { 5044 case ISCSI_BOOT_ETH_FLAGS: 5045 case ISCSI_BOOT_ETH_MAC: 5046 case ISCSI_BOOT_ETH_INDEX: 5047 rc = S_IRUGO; 5048 break; 5049 } 5050 return rc; 5051 } 5052 5053 static void beiscsi_boot_kobj_release(void *data) 5054 { 5055 struct beiscsi_hba *phba = data; 5056 5057 scsi_host_put(phba->shost); 5058 } 5059 5060 static int beiscsi_boot_create_kset(struct beiscsi_hba *phba) 5061 { 5062 struct boot_struct *bs = &phba->boot_struct; 5063 struct iscsi_boot_kobj *boot_kobj; 5064 5065 if (bs->boot_kset) { 5066 __beiscsi_log(phba, KERN_ERR, 5067 "BM_%d: boot_kset already created\n"); 5068 return 0; 5069 } 5070 5071 bs->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no); 5072 if (!bs->boot_kset) { 5073 __beiscsi_log(phba, KERN_ERR, 5074 "BM_%d: boot_kset alloc failed\n"); 5075 return -ENOMEM; 5076 } 5077 5078 /* get shost ref because the show function will refer phba */ 5079 if (!scsi_host_get(phba->shost)) 5080 goto free_kset; 5081 5082 boot_kobj = iscsi_boot_create_target(bs->boot_kset, 0, phba, 5083 beiscsi_show_boot_tgt_info, 5084 beiscsi_tgt_get_attr_visibility, 5085 beiscsi_boot_kobj_release); 5086 if (!boot_kobj) 5087 goto put_shost; 5088 5089 if (!scsi_host_get(phba->shost)) 5090 goto free_kset; 5091 5092 boot_kobj = iscsi_boot_create_initiator(bs->boot_kset, 0, phba, 5093 beiscsi_show_boot_ini_info, 5094 beiscsi_ini_get_attr_visibility, 5095 beiscsi_boot_kobj_release); 5096 if (!boot_kobj) 5097 goto put_shost; 5098 5099 if (!scsi_host_get(phba->shost)) 5100 goto free_kset; 5101 5102 boot_kobj = iscsi_boot_create_ethernet(bs->boot_kset, 0, phba, 5103 beiscsi_show_boot_eth_info, 5104 beiscsi_eth_get_attr_visibility, 5105 beiscsi_boot_kobj_release); 5106 if (!boot_kobj) 5107 goto put_shost; 5108 5109 return 0; 5110 5111 put_shost: 5112 scsi_host_put(phba->shost); 5113 free_kset: 5114 iscsi_boot_destroy_kset(bs->boot_kset); 5115 bs->boot_kset = NULL; 5116 return -ENOMEM; 5117 } 5118 5119 static void beiscsi_boot_work(struct work_struct *work) 5120 { 5121 struct beiscsi_hba *phba = 5122 container_of(work, struct beiscsi_hba, boot_work); 5123 struct boot_struct *bs = &phba->boot_struct; 5124 unsigned int tag = 0; 5125 5126 if (!beiscsi_hba_is_online(phba)) 5127 return; 5128 5129 beiscsi_log(phba, KERN_INFO, 5130 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, 5131 "BM_%d : %s action %d\n", 5132 __func__, phba->boot_struct.action); 5133 5134 switch (phba->boot_struct.action) { 5135 case BEISCSI_BOOT_REOPEN_SESS: 5136 tag = beiscsi_boot_reopen_sess(phba); 5137 break; 5138 case BEISCSI_BOOT_GET_SHANDLE: 5139 tag = __beiscsi_boot_get_shandle(phba, 1); 5140 break; 5141 case BEISCSI_BOOT_GET_SINFO: 5142 tag = beiscsi_boot_get_sinfo(phba); 5143 break; 5144 case BEISCSI_BOOT_LOGOUT_SESS: 5145 tag = beiscsi_boot_logout_sess(phba); 5146 break; 5147 case BEISCSI_BOOT_CREATE_KSET: 5148 beiscsi_boot_create_kset(phba); 5149 /** 5150 * updated boot_kset is made visible to all before 5151 * ending the boot work. 5152 */ 5153 mb(); 5154 clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); 5155 return; 5156 } 5157 if (!tag) { 5158 if (bs->retry--) 5159 schedule_work(&phba->boot_work); 5160 else 5161 clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); 5162 } 5163 } 5164 5165 static void beiscsi_eqd_update_work(struct work_struct *work) 5166 { 5167 struct hwi_context_memory *phwi_context; 5168 struct be_set_eqd set_eqd[MAX_CPUS]; 5169 struct hwi_controller *phwi_ctrlr; 5170 struct be_eq_obj *pbe_eq; 5171 struct beiscsi_hba *phba; 5172 unsigned int pps, delta; 5173 struct be_aic_obj *aic; 5174 int eqd, i, num = 0; 5175 unsigned long now; 5176 5177 phba = container_of(work, struct beiscsi_hba, eqd_update.work); 5178 if (!beiscsi_hba_is_online(phba)) 5179 return; 5180 5181 phwi_ctrlr = phba->phwi_ctrlr; 5182 phwi_context = phwi_ctrlr->phwi_ctxt; 5183 5184 for (i = 0; i <= phba->num_cpus; i++) { 5185 aic = &phba->aic_obj[i]; 5186 pbe_eq = &phwi_context->be_eq[i]; 5187 now = jiffies; 5188 if (!aic->jiffies || time_before(now, aic->jiffies) || 5189 pbe_eq->cq_count < aic->eq_prev) { 5190 aic->jiffies = now; 5191 aic->eq_prev = pbe_eq->cq_count; 5192 continue; 5193 } 5194 delta = jiffies_to_msecs(now - aic->jiffies); 5195 pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta); 5196 eqd = (pps / 1500) << 2; 5197 5198 if (eqd < 8) 5199 eqd = 0; 5200 eqd = min_t(u32, eqd, BEISCSI_EQ_DELAY_MAX); 5201 eqd = max_t(u32, eqd, BEISCSI_EQ_DELAY_MIN); 5202 5203 aic->jiffies = now; 5204 aic->eq_prev = pbe_eq->cq_count; 5205 5206 if (eqd != aic->prev_eqd) { 5207 set_eqd[num].delay_multiplier = (eqd * 65)/100; 5208 set_eqd[num].eq_id = pbe_eq->q.id; 5209 aic->prev_eqd = eqd; 5210 num++; 5211 } 5212 } 5213 if (num) 5214 /* completion of this is ignored */ 5215 beiscsi_modify_eq_delay(phba, set_eqd, num); 5216 5217 schedule_delayed_work(&phba->eqd_update, 5218 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5219 } 5220 5221 static void beiscsi_hw_tpe_check(struct timer_list *t) 5222 { 5223 struct beiscsi_hba *phba = from_timer(phba, t, hw_check); 5224 u32 wait; 5225 5226 /* if not TPE, do nothing */ 5227 if (!beiscsi_detect_tpe(phba)) 5228 return; 5229 5230 /* wait default 4000ms before recovering */ 5231 wait = 4000; 5232 if (phba->ue2rp > BEISCSI_UE_DETECT_INTERVAL) 5233 wait = phba->ue2rp - BEISCSI_UE_DETECT_INTERVAL; 5234 queue_delayed_work(phba->wq, &phba->recover_port, 5235 msecs_to_jiffies(wait)); 5236 } 5237 5238 static void beiscsi_hw_health_check(struct timer_list *t) 5239 { 5240 struct beiscsi_hba *phba = from_timer(phba, t, hw_check); 5241 5242 beiscsi_detect_ue(phba); 5243 if (beiscsi_detect_ue(phba)) { 5244 __beiscsi_log(phba, KERN_ERR, 5245 "BM_%d : port in error: %lx\n", phba->state); 5246 /* sessions are no longer valid, so first fail the sessions */ 5247 queue_work(phba->wq, &phba->sess_work); 5248 5249 /* detect UER supported */ 5250 if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state)) 5251 return; 5252 /* modify this timer to check TPE */ 5253 phba->hw_check.function = beiscsi_hw_tpe_check; 5254 } 5255 5256 mod_timer(&phba->hw_check, 5257 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5258 } 5259 5260 /* 5261 * beiscsi_enable_port()- Enables the disabled port. 5262 * Only port resources freed in disable function are reallocated. 5263 * This is called in HBA error handling path. 5264 * 5265 * @phba: Instance of driver private structure 5266 * 5267 **/ 5268 static int beiscsi_enable_port(struct beiscsi_hba *phba) 5269 { 5270 struct hwi_context_memory *phwi_context; 5271 struct hwi_controller *phwi_ctrlr; 5272 struct be_eq_obj *pbe_eq; 5273 int ret, i; 5274 5275 if (test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { 5276 __beiscsi_log(phba, KERN_ERR, 5277 "BM_%d : %s : port is online %lx\n", 5278 __func__, phba->state); 5279 return 0; 5280 } 5281 5282 ret = beiscsi_init_sliport(phba); 5283 if (ret) 5284 return ret; 5285 5286 be2iscsi_enable_msix(phba); 5287 5288 beiscsi_get_params(phba); 5289 beiscsi_set_host_data(phba); 5290 /* Re-enable UER. If different TPE occurs then it is recoverable. */ 5291 beiscsi_set_uer_feature(phba); 5292 5293 phba->shost->max_id = phba->params.cxns_per_ctrl - 1; 5294 phba->shost->can_queue = phba->params.ios_per_ctrl; 5295 ret = beiscsi_init_port(phba); 5296 if (ret < 0) { 5297 __beiscsi_log(phba, KERN_ERR, 5298 "BM_%d : init port failed\n"); 5299 goto disable_msix; 5300 } 5301 5302 for (i = 0; i < MAX_MCC_CMD; i++) { 5303 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5304 phba->ctrl.mcc_tag[i] = i + 1; 5305 phba->ctrl.mcc_tag_status[i + 1] = 0; 5306 phba->ctrl.mcc_tag_available++; 5307 } 5308 5309 phwi_ctrlr = phba->phwi_ctrlr; 5310 phwi_context = phwi_ctrlr->phwi_ctxt; 5311 for (i = 0; i < phba->num_cpus; i++) { 5312 pbe_eq = &phwi_context->be_eq[i]; 5313 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); 5314 } 5315 5316 i = (phba->pcidev->msix_enabled) ? i : 0; 5317 /* Work item for MCC handling */ 5318 pbe_eq = &phwi_context->be_eq[i]; 5319 INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); 5320 5321 ret = beiscsi_init_irqs(phba); 5322 if (ret < 0) { 5323 __beiscsi_log(phba, KERN_ERR, 5324 "BM_%d : setup IRQs failed %d\n", ret); 5325 goto cleanup_port; 5326 } 5327 hwi_enable_intr(phba); 5328 /* port operational: clear all error bits */ 5329 set_bit(BEISCSI_HBA_ONLINE, &phba->state); 5330 __beiscsi_log(phba, KERN_INFO, 5331 "BM_%d : port online: 0x%lx\n", phba->state); 5332 5333 /* start hw_check timer and eqd_update work */ 5334 schedule_delayed_work(&phba->eqd_update, 5335 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5336 5337 /** 5338 * Timer function gets modified for TPE detection. 5339 * Always reinit to do health check first. 5340 */ 5341 phba->hw_check.function = beiscsi_hw_health_check; 5342 mod_timer(&phba->hw_check, 5343 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5344 return 0; 5345 5346 cleanup_port: 5347 for (i = 0; i < phba->num_cpus; i++) { 5348 pbe_eq = &phwi_context->be_eq[i]; 5349 irq_poll_disable(&pbe_eq->iopoll); 5350 } 5351 hwi_cleanup_port(phba); 5352 5353 disable_msix: 5354 pci_free_irq_vectors(phba->pcidev); 5355 return ret; 5356 } 5357 5358 /* 5359 * beiscsi_disable_port()- Disable port and cleanup driver resources. 5360 * This is called in HBA error handling and driver removal. 5361 * @phba: Instance Priv structure 5362 * @unload: indicate driver is unloading 5363 * 5364 * Free the OS and HW resources held by the driver 5365 **/ 5366 static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload) 5367 { 5368 struct hwi_context_memory *phwi_context; 5369 struct hwi_controller *phwi_ctrlr; 5370 struct be_eq_obj *pbe_eq; 5371 unsigned int i; 5372 5373 if (!test_and_clear_bit(BEISCSI_HBA_ONLINE, &phba->state)) 5374 return; 5375 5376 phwi_ctrlr = phba->phwi_ctrlr; 5377 phwi_context = phwi_ctrlr->phwi_ctxt; 5378 hwi_disable_intr(phba); 5379 beiscsi_free_irqs(phba); 5380 pci_free_irq_vectors(phba->pcidev); 5381 5382 for (i = 0; i < phba->num_cpus; i++) { 5383 pbe_eq = &phwi_context->be_eq[i]; 5384 irq_poll_disable(&pbe_eq->iopoll); 5385 } 5386 cancel_delayed_work_sync(&phba->eqd_update); 5387 cancel_work_sync(&phba->boot_work); 5388 /* WQ might be running cancel queued mcc_work if we are not exiting */ 5389 if (!unload && beiscsi_hba_in_error(phba)) { 5390 pbe_eq = &phwi_context->be_eq[i]; 5391 cancel_work_sync(&pbe_eq->mcc_work); 5392 } 5393 hwi_cleanup_port(phba); 5394 beiscsi_cleanup_port(phba); 5395 } 5396 5397 static void beiscsi_sess_work(struct work_struct *work) 5398 { 5399 struct beiscsi_hba *phba; 5400 5401 phba = container_of(work, struct beiscsi_hba, sess_work); 5402 /* 5403 * This work gets scheduled only in case of HBA error. 5404 * Old sessions are gone so need to be re-established. 5405 * iscsi_session_failure needs process context hence this work. 5406 */ 5407 iscsi_host_for_each_session(phba->shost, beiscsi_session_fail); 5408 } 5409 5410 static void beiscsi_recover_port(struct work_struct *work) 5411 { 5412 struct beiscsi_hba *phba; 5413 5414 phba = container_of(work, struct beiscsi_hba, recover_port.work); 5415 beiscsi_disable_port(phba, 0); 5416 beiscsi_enable_port(phba); 5417 } 5418 5419 static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev, 5420 pci_channel_state_t state) 5421 { 5422 struct beiscsi_hba *phba = NULL; 5423 5424 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5425 set_bit(BEISCSI_HBA_PCI_ERR, &phba->state); 5426 5427 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5428 "BM_%d : EEH error detected\n"); 5429 5430 /* first stop UE detection when PCI error detected */ 5431 del_timer_sync(&phba->hw_check); 5432 cancel_delayed_work_sync(&phba->recover_port); 5433 5434 /* sessions are no longer valid, so first fail the sessions */ 5435 iscsi_host_for_each_session(phba->shost, beiscsi_session_fail); 5436 beiscsi_disable_port(phba, 0); 5437 5438 if (state == pci_channel_io_perm_failure) { 5439 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5440 "BM_%d : EEH : State PERM Failure"); 5441 return PCI_ERS_RESULT_DISCONNECT; 5442 } 5443 5444 pci_disable_device(pdev); 5445 5446 /* The error could cause the FW to trigger a flash debug dump. 5447 * Resetting the card while flash dump is in progress 5448 * can cause it not to recover; wait for it to finish. 5449 * Wait only for first function as it is needed only once per 5450 * adapter. 5451 **/ 5452 if (pdev->devfn == 0) 5453 ssleep(30); 5454 5455 return PCI_ERS_RESULT_NEED_RESET; 5456 } 5457 5458 static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev) 5459 { 5460 struct beiscsi_hba *phba = NULL; 5461 int status = 0; 5462 5463 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5464 5465 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5466 "BM_%d : EEH Reset\n"); 5467 5468 status = pci_enable_device(pdev); 5469 if (status) 5470 return PCI_ERS_RESULT_DISCONNECT; 5471 5472 pci_set_master(pdev); 5473 pci_set_power_state(pdev, PCI_D0); 5474 pci_restore_state(pdev); 5475 5476 status = beiscsi_check_fw_rdy(phba); 5477 if (status) { 5478 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5479 "BM_%d : EEH Reset Completed\n"); 5480 } else { 5481 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5482 "BM_%d : EEH Reset Completion Failure\n"); 5483 return PCI_ERS_RESULT_DISCONNECT; 5484 } 5485 5486 return PCI_ERS_RESULT_RECOVERED; 5487 } 5488 5489 static void beiscsi_eeh_resume(struct pci_dev *pdev) 5490 { 5491 struct beiscsi_hba *phba; 5492 int ret; 5493 5494 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5495 pci_save_state(pdev); 5496 5497 ret = beiscsi_enable_port(phba); 5498 if (ret) 5499 __beiscsi_log(phba, KERN_ERR, 5500 "BM_%d : AER EEH resume failed\n"); 5501 } 5502 5503 static int beiscsi_dev_probe(struct pci_dev *pcidev, 5504 const struct pci_device_id *id) 5505 { 5506 struct hwi_context_memory *phwi_context; 5507 struct hwi_controller *phwi_ctrlr; 5508 struct beiscsi_hba *phba = NULL; 5509 struct be_eq_obj *pbe_eq; 5510 unsigned int s_handle; 5511 char wq_name[20]; 5512 int ret, i; 5513 5514 ret = beiscsi_enable_pci(pcidev); 5515 if (ret < 0) { 5516 dev_err(&pcidev->dev, 5517 "beiscsi_dev_probe - Failed to enable pci device\n"); 5518 return ret; 5519 } 5520 5521 phba = beiscsi_hba_alloc(pcidev); 5522 if (!phba) { 5523 dev_err(&pcidev->dev, 5524 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n"); 5525 ret = -ENOMEM; 5526 goto disable_pci; 5527 } 5528 5529 /* Enable EEH reporting */ 5530 ret = pci_enable_pcie_error_reporting(pcidev); 5531 if (ret) 5532 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5533 "BM_%d : PCIe Error Reporting " 5534 "Enabling Failed\n"); 5535 5536 pci_save_state(pcidev); 5537 5538 /* Initialize Driver configuration Paramters */ 5539 beiscsi_hba_attrs_init(phba); 5540 5541 phba->mac_addr_set = false; 5542 5543 switch (pcidev->device) { 5544 case BE_DEVICE_ID1: 5545 case OC_DEVICE_ID1: 5546 case OC_DEVICE_ID2: 5547 phba->generation = BE_GEN2; 5548 phba->iotask_fn = beiscsi_iotask; 5549 dev_warn(&pcidev->dev, 5550 "Obsolete/Unsupported BE2 Adapter Family\n"); 5551 break; 5552 case BE_DEVICE_ID2: 5553 case OC_DEVICE_ID3: 5554 phba->generation = BE_GEN3; 5555 phba->iotask_fn = beiscsi_iotask; 5556 break; 5557 case OC_SKH_ID1: 5558 phba->generation = BE_GEN4; 5559 phba->iotask_fn = beiscsi_iotask_v2; 5560 break; 5561 default: 5562 phba->generation = 0; 5563 } 5564 5565 ret = be_ctrl_init(phba, pcidev); 5566 if (ret) { 5567 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5568 "BM_%d : be_ctrl_init failed\n"); 5569 goto free_hba; 5570 } 5571 5572 ret = beiscsi_init_sliport(phba); 5573 if (ret) 5574 goto free_hba; 5575 5576 spin_lock_init(&phba->io_sgl_lock); 5577 spin_lock_init(&phba->mgmt_sgl_lock); 5578 spin_lock_init(&phba->async_pdu_lock); 5579 ret = beiscsi_get_fw_config(&phba->ctrl, phba); 5580 if (ret != 0) { 5581 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5582 "BM_%d : Error getting fw config\n"); 5583 goto free_port; 5584 } 5585 beiscsi_get_port_name(&phba->ctrl, phba); 5586 beiscsi_get_params(phba); 5587 beiscsi_set_host_data(phba); 5588 beiscsi_set_uer_feature(phba); 5589 5590 be2iscsi_enable_msix(phba); 5591 5592 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5593 "BM_%d : num_cpus = %d\n", 5594 phba->num_cpus); 5595 5596 phba->shost->max_id = phba->params.cxns_per_ctrl; 5597 phba->shost->can_queue = phba->params.ios_per_ctrl; 5598 ret = beiscsi_get_memory(phba); 5599 if (ret < 0) { 5600 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5601 "BM_%d : alloc host mem failed\n"); 5602 goto free_port; 5603 } 5604 5605 ret = beiscsi_init_port(phba); 5606 if (ret < 0) { 5607 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5608 "BM_%d : init port failed\n"); 5609 beiscsi_free_mem(phba); 5610 goto free_port; 5611 } 5612 5613 for (i = 0; i < MAX_MCC_CMD; i++) { 5614 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5615 phba->ctrl.mcc_tag[i] = i + 1; 5616 phba->ctrl.mcc_tag_status[i + 1] = 0; 5617 phba->ctrl.mcc_tag_available++; 5618 memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, 5619 sizeof(struct be_dma_mem)); 5620 } 5621 5622 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; 5623 5624 snprintf(wq_name, sizeof(wq_name), "beiscsi_%02x_wq", 5625 phba->shost->host_no); 5626 phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, wq_name); 5627 if (!phba->wq) { 5628 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5629 "BM_%d : beiscsi_dev_probe-" 5630 "Failed to allocate work queue\n"); 5631 ret = -ENOMEM; 5632 goto free_twq; 5633 } 5634 5635 INIT_DELAYED_WORK(&phba->eqd_update, beiscsi_eqd_update_work); 5636 5637 phwi_ctrlr = phba->phwi_ctrlr; 5638 phwi_context = phwi_ctrlr->phwi_ctxt; 5639 5640 for (i = 0; i < phba->num_cpus; i++) { 5641 pbe_eq = &phwi_context->be_eq[i]; 5642 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); 5643 } 5644 5645 i = (phba->pcidev->msix_enabled) ? i : 0; 5646 /* Work item for MCC handling */ 5647 pbe_eq = &phwi_context->be_eq[i]; 5648 INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); 5649 5650 ret = beiscsi_init_irqs(phba); 5651 if (ret < 0) { 5652 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5653 "BM_%d : beiscsi_dev_probe-" 5654 "Failed to beiscsi_init_irqs\n"); 5655 goto disable_iopoll; 5656 } 5657 hwi_enable_intr(phba); 5658 5659 ret = iscsi_host_add(phba->shost, &phba->pcidev->dev); 5660 if (ret) 5661 goto free_irqs; 5662 5663 /* set online bit after port is operational */ 5664 set_bit(BEISCSI_HBA_ONLINE, &phba->state); 5665 __beiscsi_log(phba, KERN_INFO, 5666 "BM_%d : port online: 0x%lx\n", phba->state); 5667 5668 INIT_WORK(&phba->boot_work, beiscsi_boot_work); 5669 ret = beiscsi_boot_get_shandle(phba, &s_handle); 5670 if (ret > 0) { 5671 beiscsi_start_boot_work(phba, s_handle); 5672 /** 5673 * Set this bit after starting the work to let 5674 * probe handle it first. 5675 * ASYNC event can too schedule this work. 5676 */ 5677 set_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state); 5678 } 5679 5680 beiscsi_iface_create_default(phba); 5681 schedule_delayed_work(&phba->eqd_update, 5682 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5683 5684 INIT_WORK(&phba->sess_work, beiscsi_sess_work); 5685 INIT_DELAYED_WORK(&phba->recover_port, beiscsi_recover_port); 5686 /** 5687 * Start UE detection here. UE before this will cause stall in probe 5688 * and eventually fail the probe. 5689 */ 5690 timer_setup(&phba->hw_check, beiscsi_hw_health_check, 0); 5691 mod_timer(&phba->hw_check, 5692 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5693 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5694 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n"); 5695 return 0; 5696 5697 free_irqs: 5698 hwi_disable_intr(phba); 5699 beiscsi_free_irqs(phba); 5700 disable_iopoll: 5701 for (i = 0; i < phba->num_cpus; i++) { 5702 pbe_eq = &phwi_context->be_eq[i]; 5703 irq_poll_disable(&pbe_eq->iopoll); 5704 } 5705 destroy_workqueue(phba->wq); 5706 free_twq: 5707 hwi_cleanup_port(phba); 5708 beiscsi_cleanup_port(phba); 5709 beiscsi_free_mem(phba); 5710 free_port: 5711 dma_free_coherent(&phba->pcidev->dev, 5712 phba->ctrl.mbox_mem_alloced.size, 5713 phba->ctrl.mbox_mem_alloced.va, 5714 phba->ctrl.mbox_mem_alloced.dma); 5715 beiscsi_unmap_pci_function(phba); 5716 free_hba: 5717 pci_disable_msix(phba->pcidev); 5718 pci_dev_put(phba->pcidev); 5719 iscsi_host_free(phba->shost); 5720 pci_disable_pcie_error_reporting(pcidev); 5721 pci_set_drvdata(pcidev, NULL); 5722 disable_pci: 5723 pci_release_regions(pcidev); 5724 pci_disable_device(pcidev); 5725 return ret; 5726 } 5727 5728 static void beiscsi_remove(struct pci_dev *pcidev) 5729 { 5730 struct beiscsi_hba *phba = NULL; 5731 5732 phba = pci_get_drvdata(pcidev); 5733 if (!phba) { 5734 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n"); 5735 return; 5736 } 5737 5738 /* first stop UE detection before unloading */ 5739 del_timer_sync(&phba->hw_check); 5740 cancel_delayed_work_sync(&phba->recover_port); 5741 cancel_work_sync(&phba->sess_work); 5742 5743 beiscsi_iface_destroy_default(phba); 5744 iscsi_host_remove(phba->shost); 5745 beiscsi_disable_port(phba, 1); 5746 5747 /* after cancelling boot_work */ 5748 iscsi_boot_destroy_kset(phba->boot_struct.boot_kset); 5749 5750 /* free all resources */ 5751 destroy_workqueue(phba->wq); 5752 beiscsi_free_mem(phba); 5753 5754 /* ctrl uninit */ 5755 beiscsi_unmap_pci_function(phba); 5756 dma_free_coherent(&phba->pcidev->dev, 5757 phba->ctrl.mbox_mem_alloced.size, 5758 phba->ctrl.mbox_mem_alloced.va, 5759 phba->ctrl.mbox_mem_alloced.dma); 5760 5761 pci_dev_put(phba->pcidev); 5762 iscsi_host_free(phba->shost); 5763 pci_disable_pcie_error_reporting(pcidev); 5764 pci_set_drvdata(pcidev, NULL); 5765 pci_release_regions(pcidev); 5766 pci_disable_device(pcidev); 5767 } 5768 5769 5770 static struct pci_error_handlers beiscsi_eeh_handlers = { 5771 .error_detected = beiscsi_eeh_err_detected, 5772 .slot_reset = beiscsi_eeh_reset, 5773 .resume = beiscsi_eeh_resume, 5774 }; 5775 5776 struct iscsi_transport beiscsi_iscsi_transport = { 5777 .owner = THIS_MODULE, 5778 .name = DRV_NAME, 5779 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO | 5780 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, 5781 .create_session = beiscsi_session_create, 5782 .destroy_session = beiscsi_session_destroy, 5783 .create_conn = beiscsi_conn_create, 5784 .bind_conn = beiscsi_conn_bind, 5785 .unbind_conn = iscsi_conn_unbind, 5786 .destroy_conn = iscsi_conn_teardown, 5787 .attr_is_visible = beiscsi_attr_is_visible, 5788 .set_iface_param = beiscsi_iface_set_param, 5789 .get_iface_param = beiscsi_iface_get_param, 5790 .set_param = beiscsi_set_param, 5791 .get_conn_param = iscsi_conn_get_param, 5792 .get_session_param = iscsi_session_get_param, 5793 .get_host_param = beiscsi_get_host_param, 5794 .start_conn = beiscsi_conn_start, 5795 .stop_conn = iscsi_conn_stop, 5796 .send_pdu = iscsi_conn_send_pdu, 5797 .xmit_task = beiscsi_task_xmit, 5798 .cleanup_task = beiscsi_cleanup_task, 5799 .alloc_pdu = beiscsi_alloc_pdu, 5800 .parse_pdu_itt = beiscsi_parse_pdu, 5801 .get_stats = beiscsi_conn_get_stats, 5802 .get_ep_param = beiscsi_ep_get_param, 5803 .ep_connect = beiscsi_ep_connect, 5804 .ep_poll = beiscsi_ep_poll, 5805 .ep_disconnect = beiscsi_ep_disconnect, 5806 .session_recovery_timedout = iscsi_session_recovery_timedout, 5807 .bsg_request = beiscsi_bsg_request, 5808 }; 5809 5810 static struct pci_driver beiscsi_pci_driver = { 5811 .name = DRV_NAME, 5812 .probe = beiscsi_dev_probe, 5813 .remove = beiscsi_remove, 5814 .id_table = beiscsi_pci_id_table, 5815 .err_handler = &beiscsi_eeh_handlers 5816 }; 5817 5818 static int __init beiscsi_module_init(void) 5819 { 5820 int ret; 5821 5822 beiscsi_scsi_transport = 5823 iscsi_register_transport(&beiscsi_iscsi_transport); 5824 if (!beiscsi_scsi_transport) { 5825 printk(KERN_ERR 5826 "beiscsi_module_init - Unable to register beiscsi transport.\n"); 5827 return -ENOMEM; 5828 } 5829 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n", 5830 &beiscsi_iscsi_transport); 5831 5832 ret = pci_register_driver(&beiscsi_pci_driver); 5833 if (ret) { 5834 printk(KERN_ERR 5835 "beiscsi_module_init - Unable to register beiscsi pci driver.\n"); 5836 goto unregister_iscsi_transport; 5837 } 5838 return 0; 5839 5840 unregister_iscsi_transport: 5841 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5842 return ret; 5843 } 5844 5845 static void __exit beiscsi_module_exit(void) 5846 { 5847 pci_unregister_driver(&beiscsi_pci_driver); 5848 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5849 } 5850 5851 module_init(beiscsi_module_init); 5852 module_exit(beiscsi_module_exit); 5853