1 /* 2 * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI 3 * Host Bus Adapters. Refer to the README file included with this package 4 * for driver version and adapter compatibility. 5 * 6 * Copyright (c) 2018 Broadcom. All Rights Reserved. 7 * The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of version 2 of the GNU General Public License as published 11 * by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful. ALL EXPRESS 14 * OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY 15 * IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, 16 * OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH 17 * DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. 18 * See the GNU General Public License for more details, a copy of which 19 * can be found in the file COPYING included with this package. 20 * 21 * Contact Information: 22 * linux-drivers@broadcom.com 23 * 24 */ 25 26 #include <linux/reboot.h> 27 #include <linux/delay.h> 28 #include <linux/slab.h> 29 #include <linux/interrupt.h> 30 #include <linux/blkdev.h> 31 #include <linux/pci.h> 32 #include <linux/string.h> 33 #include <linux/kernel.h> 34 #include <linux/semaphore.h> 35 #include <linux/iscsi_boot_sysfs.h> 36 #include <linux/module.h> 37 #include <linux/bsg-lib.h> 38 #include <linux/irq_poll.h> 39 40 #include <scsi/libiscsi.h> 41 #include <scsi/scsi_bsg_iscsi.h> 42 #include <scsi/scsi_netlink.h> 43 #include <scsi/scsi_transport_iscsi.h> 44 #include <scsi/scsi_transport.h> 45 #include <scsi/scsi_cmnd.h> 46 #include <scsi/scsi_device.h> 47 #include <scsi/scsi_host.h> 48 #include <scsi/scsi.h> 49 #include "be_main.h" 50 #include "be_iscsi.h" 51 #include "be_mgmt.h" 52 #include "be_cmds.h" 53 54 static unsigned int be_iopoll_budget = 10; 55 static unsigned int be_max_phys_size = 64; 56 static unsigned int enable_msix = 1; 57 58 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); 59 MODULE_VERSION(BUILD_STR); 60 MODULE_AUTHOR("Emulex Corporation"); 61 MODULE_LICENSE("GPL"); 62 module_param(be_iopoll_budget, int, 0); 63 module_param(enable_msix, int, 0); 64 module_param(be_max_phys_size, uint, S_IRUGO); 65 MODULE_PARM_DESC(be_max_phys_size, 66 "Maximum Size (In Kilobytes) of physically contiguous " 67 "memory that can be allocated. Range is 16 - 128"); 68 69 #define beiscsi_disp_param(_name)\ 70 static ssize_t \ 71 beiscsi_##_name##_disp(struct device *dev,\ 72 struct device_attribute *attrib, char *buf) \ 73 { \ 74 struct Scsi_Host *shost = class_to_shost(dev);\ 75 struct beiscsi_hba *phba = iscsi_host_priv(shost); \ 76 return snprintf(buf, PAGE_SIZE, "%d\n",\ 77 phba->attr_##_name);\ 78 } 79 80 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\ 81 static int \ 82 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\ 83 {\ 84 if (val >= _minval && val <= _maxval) {\ 85 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 86 "BA_%d : beiscsi_"#_name" updated "\ 87 "from 0x%x ==> 0x%x\n",\ 88 phba->attr_##_name, val); \ 89 phba->attr_##_name = val;\ 90 return 0;\ 91 } \ 92 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \ 93 "BA_%d beiscsi_"#_name" attribute "\ 94 "cannot be updated to 0x%x, "\ 95 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 96 return -EINVAL;\ 97 } 98 99 #define beiscsi_store_param(_name) \ 100 static ssize_t \ 101 beiscsi_##_name##_store(struct device *dev,\ 102 struct device_attribute *attr, const char *buf,\ 103 size_t count) \ 104 { \ 105 struct Scsi_Host *shost = class_to_shost(dev);\ 106 struct beiscsi_hba *phba = iscsi_host_priv(shost);\ 107 uint32_t param_val = 0;\ 108 if (!isdigit(buf[0]))\ 109 return -EINVAL;\ 110 if (sscanf(buf, "%i", ¶m_val) != 1)\ 111 return -EINVAL;\ 112 if (beiscsi_##_name##_change(phba, param_val) == 0) \ 113 return strlen(buf);\ 114 else \ 115 return -EINVAL;\ 116 } 117 118 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \ 119 static int \ 120 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \ 121 { \ 122 if (val >= _minval && val <= _maxval) {\ 123 phba->attr_##_name = val;\ 124 return 0;\ 125 } \ 126 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 127 "BA_%d beiscsi_"#_name" attribute " \ 128 "cannot be updated to 0x%x, "\ 129 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 130 phba->attr_##_name = _defval;\ 131 return -EINVAL;\ 132 } 133 134 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \ 135 static uint beiscsi_##_name = _defval;\ 136 module_param(beiscsi_##_name, uint, S_IRUGO);\ 137 MODULE_PARM_DESC(beiscsi_##_name, _descp);\ 138 beiscsi_disp_param(_name)\ 139 beiscsi_change_param(_name, _minval, _maxval, _defval)\ 140 beiscsi_store_param(_name)\ 141 beiscsi_init_param(_name, _minval, _maxval, _defval)\ 142 static DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\ 143 beiscsi_##_name##_disp, beiscsi_##_name##_store) 144 145 /* 146 * When new log level added update MAX allowed value for log_enable 147 */ 148 BEISCSI_RW_ATTR(log_enable, 0x00, 149 0xFF, 0x00, "Enable logging Bit Mask\n" 150 "\t\t\t\tInitialization Events : 0x01\n" 151 "\t\t\t\tMailbox Events : 0x02\n" 152 "\t\t\t\tMiscellaneous Events : 0x04\n" 153 "\t\t\t\tError Handling : 0x08\n" 154 "\t\t\t\tIO Path Events : 0x10\n" 155 "\t\t\t\tConfiguration Path : 0x20\n" 156 "\t\t\t\tiSCSI Protocol : 0x40\n"); 157 158 static DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); 159 static DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); 160 static DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); 161 static DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL); 162 static DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO, 163 beiscsi_active_session_disp, NULL); 164 static DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO, 165 beiscsi_free_session_disp, NULL); 166 167 static struct attribute *beiscsi_attrs[] = { 168 &dev_attr_beiscsi_log_enable.attr, 169 &dev_attr_beiscsi_drvr_ver.attr, 170 &dev_attr_beiscsi_adapter_family.attr, 171 &dev_attr_beiscsi_fw_ver.attr, 172 &dev_attr_beiscsi_active_session_count.attr, 173 &dev_attr_beiscsi_free_session_count.attr, 174 &dev_attr_beiscsi_phys_port.attr, 175 NULL, 176 }; 177 178 ATTRIBUTE_GROUPS(beiscsi); 179 180 static char const *cqe_desc[] = { 181 "RESERVED_DESC", 182 "SOL_CMD_COMPLETE", 183 "SOL_CMD_KILLED_DATA_DIGEST_ERR", 184 "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL", 185 "CXN_KILLED_BURST_LEN_MISMATCH", 186 "CXN_KILLED_AHS_RCVD", 187 "CXN_KILLED_HDR_DIGEST_ERR", 188 "CXN_KILLED_UNKNOWN_HDR", 189 "CXN_KILLED_STALE_ITT_TTT_RCVD", 190 "CXN_KILLED_INVALID_ITT_TTT_RCVD", 191 "CXN_KILLED_RST_RCVD", 192 "CXN_KILLED_TIMED_OUT", 193 "CXN_KILLED_RST_SENT", 194 "CXN_KILLED_FIN_RCVD", 195 "CXN_KILLED_BAD_UNSOL_PDU_RCVD", 196 "CXN_KILLED_BAD_WRB_INDEX_ERROR", 197 "CXN_KILLED_OVER_RUN_RESIDUAL", 198 "CXN_KILLED_UNDER_RUN_RESIDUAL", 199 "CMD_KILLED_INVALID_STATSN_RCVD", 200 "CMD_KILLED_INVALID_R2T_RCVD", 201 "CMD_CXN_KILLED_LUN_INVALID", 202 "CMD_CXN_KILLED_ICD_INVALID", 203 "CMD_CXN_KILLED_ITT_INVALID", 204 "CMD_CXN_KILLED_SEQ_OUTOFORDER", 205 "CMD_CXN_KILLED_INVALID_DATASN_RCVD", 206 "CXN_INVALIDATE_NOTIFY", 207 "CXN_INVALIDATE_INDEX_NOTIFY", 208 "CMD_INVALIDATED_NOTIFY", 209 "UNSOL_HDR_NOTIFY", 210 "UNSOL_DATA_NOTIFY", 211 "UNSOL_DATA_DIGEST_ERROR_NOTIFY", 212 "DRIVERMSG_NOTIFY", 213 "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN", 214 "SOL_CMD_KILLED_DIF_ERR", 215 "CXN_KILLED_SYN_RCVD", 216 "CXN_KILLED_IMM_DATA_RCVD" 217 }; 218 219 static int beiscsi_eh_abort(struct scsi_cmnd *sc) 220 { 221 struct iscsi_task *abrt_task = iscsi_cmd(sc)->task; 222 struct iscsi_cls_session *cls_session; 223 struct beiscsi_io_task *abrt_io_task; 224 struct beiscsi_conn *beiscsi_conn; 225 struct iscsi_session *session; 226 struct invldt_cmd_tbl inv_tbl; 227 struct beiscsi_hba *phba; 228 struct iscsi_conn *conn; 229 int rc; 230 231 cls_session = starget_to_session(scsi_target(sc->device)); 232 session = cls_session->dd_data; 233 234 completion_check: 235 /* check if we raced, task just got cleaned up under us */ 236 spin_lock_bh(&session->back_lock); 237 if (!abrt_task || !abrt_task->sc) { 238 spin_unlock_bh(&session->back_lock); 239 return SUCCESS; 240 } 241 /* get a task ref till FW processes the req for the ICD used */ 242 if (!iscsi_get_task(abrt_task)) { 243 spin_unlock(&session->back_lock); 244 /* We are just about to call iscsi_free_task so wait for it. */ 245 udelay(5); 246 goto completion_check; 247 } 248 249 abrt_io_task = abrt_task->dd_data; 250 conn = abrt_task->conn; 251 beiscsi_conn = conn->dd_data; 252 phba = beiscsi_conn->phba; 253 /* mark WRB invalid which have been not processed by FW yet */ 254 if (is_chip_be2_be3r(phba)) { 255 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 256 abrt_io_task->pwrb_handle->pwrb, 1); 257 } else { 258 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld, 259 abrt_io_task->pwrb_handle->pwrb, 1); 260 } 261 inv_tbl.cid = beiscsi_conn->beiscsi_conn_cid; 262 inv_tbl.icd = abrt_io_task->psgl_handle->sgl_index; 263 spin_unlock_bh(&session->back_lock); 264 265 rc = beiscsi_mgmt_invalidate_icds(phba, &inv_tbl, 1); 266 iscsi_put_task(abrt_task); 267 if (rc) { 268 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 269 "BM_%d : sc %p invalidation failed %d\n", 270 sc, rc); 271 return FAILED; 272 } 273 274 return iscsi_eh_abort(sc); 275 } 276 277 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) 278 { 279 struct beiscsi_invldt_cmd_tbl { 280 struct invldt_cmd_tbl tbl[BE_INVLDT_CMD_TBL_SZ]; 281 struct iscsi_task *task[BE_INVLDT_CMD_TBL_SZ]; 282 } *inv_tbl; 283 struct iscsi_cls_session *cls_session; 284 struct beiscsi_conn *beiscsi_conn; 285 struct beiscsi_io_task *io_task; 286 struct iscsi_session *session; 287 struct beiscsi_hba *phba; 288 struct iscsi_conn *conn; 289 struct iscsi_task *task; 290 unsigned int i, nents; 291 int rc, more = 0; 292 293 cls_session = starget_to_session(scsi_target(sc->device)); 294 session = cls_session->dd_data; 295 296 spin_lock_bh(&session->frwd_lock); 297 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) { 298 spin_unlock_bh(&session->frwd_lock); 299 return FAILED; 300 } 301 302 conn = session->leadconn; 303 beiscsi_conn = conn->dd_data; 304 phba = beiscsi_conn->phba; 305 306 inv_tbl = kzalloc(sizeof(*inv_tbl), GFP_ATOMIC); 307 if (!inv_tbl) { 308 spin_unlock_bh(&session->frwd_lock); 309 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 310 "BM_%d : invldt_cmd_tbl alloc failed\n"); 311 return FAILED; 312 } 313 nents = 0; 314 /* take back_lock to prevent task from getting cleaned up under us */ 315 spin_lock(&session->back_lock); 316 for (i = 0; i < conn->session->cmds_max; i++) { 317 task = conn->session->cmds[i]; 318 if (!task->sc) 319 continue; 320 321 if (sc->device->lun != task->sc->device->lun) 322 continue; 323 /** 324 * Can't fit in more cmds? Normally this won't happen b'coz 325 * BEISCSI_CMD_PER_LUN is same as BE_INVLDT_CMD_TBL_SZ. 326 */ 327 if (nents == BE_INVLDT_CMD_TBL_SZ) { 328 more = 1; 329 break; 330 } 331 332 /* get a task ref till FW processes the req for the ICD used */ 333 if (!iscsi_get_task(task)) { 334 /* 335 * The task has completed in the driver and is 336 * completing in libiscsi. Just ignore it here. When we 337 * call iscsi_eh_device_reset, it will wait for us. 338 */ 339 continue; 340 } 341 342 io_task = task->dd_data; 343 /* mark WRB invalid which have been not processed by FW yet */ 344 if (is_chip_be2_be3r(phba)) { 345 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 346 io_task->pwrb_handle->pwrb, 1); 347 } else { 348 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld, 349 io_task->pwrb_handle->pwrb, 1); 350 } 351 352 inv_tbl->tbl[nents].cid = beiscsi_conn->beiscsi_conn_cid; 353 inv_tbl->tbl[nents].icd = io_task->psgl_handle->sgl_index; 354 inv_tbl->task[nents] = task; 355 nents++; 356 } 357 spin_unlock(&session->back_lock); 358 spin_unlock_bh(&session->frwd_lock); 359 360 rc = SUCCESS; 361 if (!nents) 362 goto end_reset; 363 364 if (more) { 365 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 366 "BM_%d : number of cmds exceeds size of invalidation table\n"); 367 rc = FAILED; 368 goto end_reset; 369 } 370 371 if (beiscsi_mgmt_invalidate_icds(phba, &inv_tbl->tbl[0], nents)) { 372 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 373 "BM_%d : cid %u scmds invalidation failed\n", 374 beiscsi_conn->beiscsi_conn_cid); 375 rc = FAILED; 376 } 377 378 end_reset: 379 for (i = 0; i < nents; i++) 380 iscsi_put_task(inv_tbl->task[i]); 381 kfree(inv_tbl); 382 383 if (rc == SUCCESS) 384 rc = iscsi_eh_device_reset(sc); 385 return rc; 386 } 387 388 /*------------------- PCI Driver operations and data ----------------- */ 389 static const struct pci_device_id beiscsi_pci_id_table[] = { 390 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 391 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 392 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 393 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 394 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, 395 { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) }, 396 { 0 } 397 }; 398 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 399 400 401 static const struct scsi_host_template beiscsi_sht = { 402 .module = THIS_MODULE, 403 .name = "Emulex 10Gbe open-iscsi Initiator Driver", 404 .proc_name = DRV_NAME, 405 .queuecommand = iscsi_queuecommand, 406 .change_queue_depth = scsi_change_queue_depth, 407 .target_alloc = iscsi_target_alloc, 408 .eh_timed_out = iscsi_eh_cmd_timed_out, 409 .eh_abort_handler = beiscsi_eh_abort, 410 .eh_device_reset_handler = beiscsi_eh_device_reset, 411 .eh_target_reset_handler = iscsi_eh_session_reset, 412 .shost_groups = beiscsi_groups, 413 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, 414 .can_queue = BE2_IO_DEPTH, 415 .this_id = -1, 416 .max_sectors = BEISCSI_MAX_SECTORS, 417 .max_segment_size = 65536, 418 .cmd_per_lun = BEISCSI_CMD_PER_LUN, 419 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID, 420 .track_queue_depth = 1, 421 .cmd_size = sizeof(struct iscsi_cmd), 422 }; 423 424 static struct scsi_transport_template *beiscsi_scsi_transport; 425 426 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) 427 { 428 struct beiscsi_hba *phba; 429 struct Scsi_Host *shost; 430 431 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0); 432 if (!shost) { 433 dev_err(&pcidev->dev, 434 "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); 435 return NULL; 436 } 437 shost->max_id = BE2_MAX_SESSIONS - 1; 438 shost->max_channel = 0; 439 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; 440 shost->max_lun = BEISCSI_NUM_MAX_LUN; 441 shost->transportt = beiscsi_scsi_transport; 442 phba = iscsi_host_priv(shost); 443 memset(phba, 0, sizeof(*phba)); 444 phba->shost = shost; 445 phba->pcidev = pci_dev_get(pcidev); 446 pci_set_drvdata(pcidev, phba); 447 phba->interface_handle = 0xFFFFFFFF; 448 449 return phba; 450 } 451 452 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) 453 { 454 if (phba->csr_va) { 455 iounmap(phba->csr_va); 456 phba->csr_va = NULL; 457 } 458 if (phba->db_va) { 459 iounmap(phba->db_va); 460 phba->db_va = NULL; 461 } 462 if (phba->pci_va) { 463 iounmap(phba->pci_va); 464 phba->pci_va = NULL; 465 } 466 } 467 468 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, 469 struct pci_dev *pcidev) 470 { 471 u8 __iomem *addr; 472 int pcicfg_reg; 473 474 addr = ioremap(pci_resource_start(pcidev, 2), 475 pci_resource_len(pcidev, 2)); 476 if (addr == NULL) 477 return -ENOMEM; 478 phba->ctrl.csr = addr; 479 phba->csr_va = addr; 480 481 addr = ioremap(pci_resource_start(pcidev, 4), 128 * 1024); 482 if (addr == NULL) 483 goto pci_map_err; 484 phba->ctrl.db = addr; 485 phba->db_va = addr; 486 487 if (phba->generation == BE_GEN2) 488 pcicfg_reg = 1; 489 else 490 pcicfg_reg = 0; 491 492 addr = ioremap(pci_resource_start(pcidev, pcicfg_reg), 493 pci_resource_len(pcidev, pcicfg_reg)); 494 495 if (addr == NULL) 496 goto pci_map_err; 497 phba->ctrl.pcicfg = addr; 498 phba->pci_va = addr; 499 return 0; 500 501 pci_map_err: 502 beiscsi_unmap_pci_function(phba); 503 return -ENOMEM; 504 } 505 506 static int beiscsi_enable_pci(struct pci_dev *pcidev) 507 { 508 int ret; 509 510 ret = pci_enable_device(pcidev); 511 if (ret) { 512 dev_err(&pcidev->dev, 513 "beiscsi_enable_pci - enable device failed\n"); 514 return ret; 515 } 516 517 ret = pci_request_regions(pcidev, DRV_NAME); 518 if (ret) { 519 dev_err(&pcidev->dev, 520 "beiscsi_enable_pci - request region failed\n"); 521 goto pci_dev_disable; 522 } 523 524 pci_set_master(pcidev); 525 ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64)); 526 if (ret) { 527 ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)); 528 if (ret) { 529 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 530 goto pci_region_release; 531 } 532 } 533 return 0; 534 535 pci_region_release: 536 pci_release_regions(pcidev); 537 pci_dev_disable: 538 pci_disable_device(pcidev); 539 540 return ret; 541 } 542 543 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) 544 { 545 struct be_ctrl_info *ctrl = &phba->ctrl; 546 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced; 547 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem; 548 int status = 0; 549 550 ctrl->pdev = pdev; 551 status = beiscsi_map_pci_bars(phba, pdev); 552 if (status) 553 return status; 554 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 555 mbox_mem_alloc->va = dma_alloc_coherent(&pdev->dev, 556 mbox_mem_alloc->size, &mbox_mem_alloc->dma, GFP_KERNEL); 557 if (!mbox_mem_alloc->va) { 558 beiscsi_unmap_pci_function(phba); 559 return -ENOMEM; 560 } 561 562 mbox_mem_align->size = sizeof(struct be_mcc_mailbox); 563 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); 564 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 565 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 566 mutex_init(&ctrl->mbox_lock); 567 spin_lock_init(&phba->ctrl.mcc_lock); 568 569 return status; 570 } 571 572 /** 573 * beiscsi_get_params()- Set the config paramters 574 * @phba: ptr device priv structure 575 **/ 576 static void beiscsi_get_params(struct beiscsi_hba *phba) 577 { 578 uint32_t total_cid_count = 0; 579 uint32_t total_icd_count = 0; 580 uint8_t ulp_num = 0; 581 582 total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) + 583 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1); 584 585 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 586 uint32_t align_mask = 0; 587 uint32_t icd_post_per_page = 0; 588 uint32_t icd_count_unavailable = 0; 589 uint32_t icd_start = 0, icd_count = 0; 590 uint32_t icd_start_align = 0, icd_count_align = 0; 591 592 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 593 icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 594 icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 595 596 /* Get ICD count that can be posted on each page */ 597 icd_post_per_page = (PAGE_SIZE / (BE2_SGE * 598 sizeof(struct iscsi_sge))); 599 align_mask = (icd_post_per_page - 1); 600 601 /* Check if icd_start is aligned ICD per page posting */ 602 if (icd_start % icd_post_per_page) { 603 icd_start_align = ((icd_start + 604 icd_post_per_page) & 605 ~(align_mask)); 606 phba->fw_config. 607 iscsi_icd_start[ulp_num] = 608 icd_start_align; 609 } 610 611 icd_count_align = (icd_count & ~align_mask); 612 613 /* ICD discarded in the process of alignment */ 614 if (icd_start_align) 615 icd_count_unavailable = ((icd_start_align - 616 icd_start) + 617 (icd_count - 618 icd_count_align)); 619 620 /* Updated ICD count available */ 621 phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count - 622 icd_count_unavailable); 623 624 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 625 "BM_%d : Aligned ICD values\n" 626 "\t ICD Start : %d\n" 627 "\t ICD Count : %d\n" 628 "\t ICD Discarded : %d\n", 629 phba->fw_config. 630 iscsi_icd_start[ulp_num], 631 phba->fw_config. 632 iscsi_icd_count[ulp_num], 633 icd_count_unavailable); 634 break; 635 } 636 } 637 638 total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 639 phba->params.ios_per_ctrl = (total_icd_count - 640 (total_cid_count + 641 BE2_TMFS + BE2_NOPOUT_REQ)); 642 phba->params.cxns_per_ctrl = total_cid_count; 643 phba->params.icds_per_ctrl = total_icd_count; 644 phba->params.num_sge_per_io = BE2_SGE; 645 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 646 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; 647 phba->params.num_eq_entries = 1024; 648 phba->params.num_cq_entries = 1024; 649 phba->params.wrbs_per_cxn = 256; 650 } 651 652 static void hwi_ring_eq_db(struct beiscsi_hba *phba, 653 unsigned int id, unsigned int clr_interrupt, 654 unsigned int num_processed, 655 unsigned char rearm, unsigned char event) 656 { 657 u32 val = 0; 658 659 if (rearm) 660 val |= 1 << DB_EQ_REARM_SHIFT; 661 if (clr_interrupt) 662 val |= 1 << DB_EQ_CLR_SHIFT; 663 if (event) 664 val |= 1 << DB_EQ_EVNT_SHIFT; 665 666 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT; 667 /* Setting lower order EQ_ID Bits */ 668 val |= (id & DB_EQ_RING_ID_LOW_MASK); 669 670 /* Setting Higher order EQ_ID Bits */ 671 val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) & 672 DB_EQ_RING_ID_HIGH_MASK) 673 << DB_EQ_HIGH_SET_SHIFT); 674 675 iowrite32(val, phba->db_va + DB_EQ_OFFSET); 676 } 677 678 /** 679 * be_isr_mcc - The isr routine of the driver. 680 * @irq: Not used 681 * @dev_id: Pointer to host adapter structure 682 */ 683 static irqreturn_t be_isr_mcc(int irq, void *dev_id) 684 { 685 struct beiscsi_hba *phba; 686 struct be_eq_entry *eqe; 687 struct be_queue_info *eq; 688 struct be_queue_info *mcc; 689 unsigned int mcc_events; 690 struct be_eq_obj *pbe_eq; 691 692 pbe_eq = dev_id; 693 eq = &pbe_eq->q; 694 phba = pbe_eq->phba; 695 mcc = &phba->ctrl.mcc_obj.cq; 696 eqe = queue_tail_node(eq); 697 698 mcc_events = 0; 699 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 700 & EQE_VALID_MASK) { 701 if (((eqe->dw[offsetof(struct amap_eq_entry, 702 resource_id) / 32] & 703 EQE_RESID_MASK) >> 16) == mcc->id) { 704 mcc_events++; 705 } 706 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 707 queue_tail_inc(eq); 708 eqe = queue_tail_node(eq); 709 } 710 711 if (mcc_events) { 712 queue_work(phba->wq, &pbe_eq->mcc_work); 713 hwi_ring_eq_db(phba, eq->id, 1, mcc_events, 1, 1); 714 } 715 return IRQ_HANDLED; 716 } 717 718 /** 719 * be_isr_msix - The isr routine of the driver. 720 * @irq: Not used 721 * @dev_id: Pointer to host adapter structure 722 */ 723 static irqreturn_t be_isr_msix(int irq, void *dev_id) 724 { 725 struct beiscsi_hba *phba; 726 struct be_queue_info *eq; 727 struct be_eq_obj *pbe_eq; 728 729 pbe_eq = dev_id; 730 eq = &pbe_eq->q; 731 732 phba = pbe_eq->phba; 733 /* disable interrupt till iopoll completes */ 734 hwi_ring_eq_db(phba, eq->id, 1, 0, 0, 1); 735 irq_poll_sched(&pbe_eq->iopoll); 736 737 return IRQ_HANDLED; 738 } 739 740 /** 741 * be_isr - The isr routine of the driver. 742 * @irq: Not used 743 * @dev_id: Pointer to host adapter structure 744 */ 745 static irqreturn_t be_isr(int irq, void *dev_id) 746 { 747 struct beiscsi_hba *phba; 748 struct hwi_controller *phwi_ctrlr; 749 struct hwi_context_memory *phwi_context; 750 struct be_eq_entry *eqe; 751 struct be_queue_info *eq; 752 struct be_queue_info *mcc; 753 unsigned int mcc_events, io_events; 754 struct be_ctrl_info *ctrl; 755 struct be_eq_obj *pbe_eq; 756 int isr, rearm; 757 758 phba = dev_id; 759 ctrl = &phba->ctrl; 760 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + 761 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); 762 if (!isr) 763 return IRQ_NONE; 764 765 phwi_ctrlr = phba->phwi_ctrlr; 766 phwi_context = phwi_ctrlr->phwi_ctxt; 767 pbe_eq = &phwi_context->be_eq[0]; 768 769 eq = &phwi_context->be_eq[0].q; 770 mcc = &phba->ctrl.mcc_obj.cq; 771 eqe = queue_tail_node(eq); 772 773 io_events = 0; 774 mcc_events = 0; 775 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 776 & EQE_VALID_MASK) { 777 if (((eqe->dw[offsetof(struct amap_eq_entry, 778 resource_id) / 32] & EQE_RESID_MASK) >> 16) == mcc->id) 779 mcc_events++; 780 else 781 io_events++; 782 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 783 queue_tail_inc(eq); 784 eqe = queue_tail_node(eq); 785 } 786 if (!io_events && !mcc_events) 787 return IRQ_NONE; 788 789 /* no need to rearm if interrupt is only for IOs */ 790 rearm = 0; 791 if (mcc_events) { 792 queue_work(phba->wq, &pbe_eq->mcc_work); 793 /* rearm for MCCQ */ 794 rearm = 1; 795 } 796 if (io_events) 797 irq_poll_sched(&pbe_eq->iopoll); 798 hwi_ring_eq_db(phba, eq->id, 0, (io_events + mcc_events), rearm, 1); 799 return IRQ_HANDLED; 800 } 801 802 static void beiscsi_free_irqs(struct beiscsi_hba *phba) 803 { 804 struct hwi_context_memory *phwi_context; 805 int i; 806 807 if (!phba->pcidev->msix_enabled) { 808 if (phba->pcidev->irq) 809 free_irq(phba->pcidev->irq, phba); 810 return; 811 } 812 813 phwi_context = phba->phwi_ctrlr->phwi_ctxt; 814 for (i = 0; i <= phba->num_cpus; i++) { 815 free_irq(pci_irq_vector(phba->pcidev, i), 816 &phwi_context->be_eq[i]); 817 kfree(phba->msi_name[i]); 818 } 819 } 820 821 static int beiscsi_init_irqs(struct beiscsi_hba *phba) 822 { 823 struct pci_dev *pcidev = phba->pcidev; 824 struct hwi_controller *phwi_ctrlr; 825 struct hwi_context_memory *phwi_context; 826 int ret, i, j; 827 828 phwi_ctrlr = phba->phwi_ctrlr; 829 phwi_context = phwi_ctrlr->phwi_ctxt; 830 831 if (pcidev->msix_enabled) { 832 for (i = 0; i < phba->num_cpus; i++) { 833 phba->msi_name[i] = kasprintf(GFP_KERNEL, 834 "beiscsi_%02x_%02x", 835 phba->shost->host_no, i); 836 if (!phba->msi_name[i]) { 837 ret = -ENOMEM; 838 goto free_msix_irqs; 839 } 840 841 ret = request_irq(pci_irq_vector(pcidev, i), 842 be_isr_msix, 0, phba->msi_name[i], 843 &phwi_context->be_eq[i]); 844 if (ret) { 845 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 846 "BM_%d : %s-Failed to register msix for i = %d\n", 847 __func__, i); 848 kfree(phba->msi_name[i]); 849 goto free_msix_irqs; 850 } 851 } 852 phba->msi_name[i] = kasprintf(GFP_KERNEL, "beiscsi_mcc_%02x", 853 phba->shost->host_no); 854 if (!phba->msi_name[i]) { 855 ret = -ENOMEM; 856 goto free_msix_irqs; 857 } 858 ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0, 859 phba->msi_name[i], &phwi_context->be_eq[i]); 860 if (ret) { 861 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 862 "BM_%d : %s-Failed to register beiscsi_msix_mcc\n", 863 __func__); 864 kfree(phba->msi_name[i]); 865 goto free_msix_irqs; 866 } 867 868 } else { 869 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, 870 "beiscsi", phba); 871 if (ret) { 872 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 873 "BM_%d : %s-Failed to register irq\n", 874 __func__); 875 return ret; 876 } 877 } 878 return 0; 879 free_msix_irqs: 880 for (j = i - 1; j >= 0; j--) { 881 free_irq(pci_irq_vector(pcidev, i), &phwi_context->be_eq[j]); 882 kfree(phba->msi_name[j]); 883 } 884 return ret; 885 } 886 887 void hwi_ring_cq_db(struct beiscsi_hba *phba, 888 unsigned int id, unsigned int num_processed, 889 unsigned char rearm) 890 { 891 u32 val = 0; 892 893 if (rearm) 894 val |= 1 << DB_CQ_REARM_SHIFT; 895 896 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT; 897 898 /* Setting lower order CQ_ID Bits */ 899 val |= (id & DB_CQ_RING_ID_LOW_MASK); 900 901 /* Setting Higher order CQ_ID Bits */ 902 val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) & 903 DB_CQ_RING_ID_HIGH_MASK) 904 << DB_CQ_HIGH_SET_SHIFT); 905 906 iowrite32(val, phba->db_va + DB_CQ_OFFSET); 907 } 908 909 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) 910 { 911 struct sgl_handle *psgl_handle; 912 unsigned long flags; 913 914 spin_lock_irqsave(&phba->io_sgl_lock, flags); 915 if (phba->io_sgl_hndl_avbl) { 916 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 917 "BM_%d : In alloc_io_sgl_handle," 918 " io_sgl_alloc_index=%d\n", 919 phba->io_sgl_alloc_index); 920 921 psgl_handle = phba->io_sgl_hndl_base[phba-> 922 io_sgl_alloc_index]; 923 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; 924 phba->io_sgl_hndl_avbl--; 925 if (phba->io_sgl_alloc_index == (phba->params. 926 ios_per_ctrl - 1)) 927 phba->io_sgl_alloc_index = 0; 928 else 929 phba->io_sgl_alloc_index++; 930 } else 931 psgl_handle = NULL; 932 spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 933 return psgl_handle; 934 } 935 936 static void 937 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 938 { 939 unsigned long flags; 940 941 spin_lock_irqsave(&phba->io_sgl_lock, flags); 942 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 943 "BM_%d : In free_,io_sgl_free_index=%d\n", 944 phba->io_sgl_free_index); 945 946 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) { 947 /* 948 * this can happen if clean_task is called on a task that 949 * failed in xmit_task or alloc_pdu. 950 */ 951 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 952 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d, value there=%p\n", 953 phba->io_sgl_free_index, 954 phba->io_sgl_hndl_base[phba->io_sgl_free_index]); 955 spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 956 return; 957 } 958 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; 959 phba->io_sgl_hndl_avbl++; 960 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1)) 961 phba->io_sgl_free_index = 0; 962 else 963 phba->io_sgl_free_index++; 964 spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 965 } 966 967 static inline struct wrb_handle * 968 beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context, 969 unsigned int wrbs_per_cxn) 970 { 971 struct wrb_handle *pwrb_handle; 972 unsigned long flags; 973 974 spin_lock_irqsave(&pwrb_context->wrb_lock, flags); 975 if (!pwrb_context->wrb_handles_available) { 976 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 977 return NULL; 978 } 979 pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index]; 980 pwrb_context->wrb_handles_available--; 981 if (pwrb_context->alloc_index == (wrbs_per_cxn - 1)) 982 pwrb_context->alloc_index = 0; 983 else 984 pwrb_context->alloc_index++; 985 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 986 987 if (pwrb_handle) 988 memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb)); 989 990 return pwrb_handle; 991 } 992 993 /** 994 * alloc_wrb_handle - To allocate a wrb handle 995 * @phba: The hba pointer 996 * @cid: The cid to use for allocation 997 * @pcontext: ptr to ptr to wrb context 998 * 999 * This happens under session_lock until submission to chip 1000 */ 1001 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid, 1002 struct hwi_wrb_context **pcontext) 1003 { 1004 struct hwi_wrb_context *pwrb_context; 1005 struct hwi_controller *phwi_ctrlr; 1006 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); 1007 1008 phwi_ctrlr = phba->phwi_ctrlr; 1009 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1010 /* return the context address */ 1011 *pcontext = pwrb_context; 1012 return beiscsi_get_wrb_handle(pwrb_context, phba->params.wrbs_per_cxn); 1013 } 1014 1015 static inline void 1016 beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context, 1017 struct wrb_handle *pwrb_handle, 1018 unsigned int wrbs_per_cxn) 1019 { 1020 unsigned long flags; 1021 1022 spin_lock_irqsave(&pwrb_context->wrb_lock, flags); 1023 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; 1024 pwrb_context->wrb_handles_available++; 1025 if (pwrb_context->free_index == (wrbs_per_cxn - 1)) 1026 pwrb_context->free_index = 0; 1027 else 1028 pwrb_context->free_index++; 1029 pwrb_handle->pio_handle = NULL; 1030 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 1031 } 1032 1033 /** 1034 * free_wrb_handle - To free the wrb handle back to pool 1035 * @phba: The hba pointer 1036 * @pwrb_context: The context to free from 1037 * @pwrb_handle: The wrb_handle to free 1038 * 1039 * This happens under session_lock until submission to chip 1040 */ 1041 static void 1042 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, 1043 struct wrb_handle *pwrb_handle) 1044 { 1045 beiscsi_put_wrb_handle(pwrb_context, 1046 pwrb_handle, 1047 phba->params.wrbs_per_cxn); 1048 beiscsi_log(phba, KERN_INFO, 1049 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1050 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x " 1051 "wrb_handles_available=%d\n", 1052 pwrb_handle, pwrb_context->free_index, 1053 pwrb_context->wrb_handles_available); 1054 } 1055 1056 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) 1057 { 1058 struct sgl_handle *psgl_handle; 1059 unsigned long flags; 1060 1061 spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); 1062 if (phba->eh_sgl_hndl_avbl) { 1063 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; 1064 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; 1065 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1066 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n", 1067 phba->eh_sgl_alloc_index, 1068 phba->eh_sgl_alloc_index); 1069 1070 phba->eh_sgl_hndl_avbl--; 1071 if (phba->eh_sgl_alloc_index == 1072 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1073 1)) 1074 phba->eh_sgl_alloc_index = 0; 1075 else 1076 phba->eh_sgl_alloc_index++; 1077 } else 1078 psgl_handle = NULL; 1079 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1080 return psgl_handle; 1081 } 1082 1083 void 1084 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1085 { 1086 unsigned long flags; 1087 1088 spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); 1089 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1090 "BM_%d : In free_mgmt_sgl_handle," 1091 "eh_sgl_free_index=%d\n", 1092 phba->eh_sgl_free_index); 1093 1094 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { 1095 /* 1096 * this can happen if clean_task is called on a task that 1097 * failed in xmit_task or alloc_pdu. 1098 */ 1099 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 1100 "BM_%d : Double Free in eh SGL ," 1101 "eh_sgl_free_index=%d\n", 1102 phba->eh_sgl_free_index); 1103 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1104 return; 1105 } 1106 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle; 1107 phba->eh_sgl_hndl_avbl++; 1108 if (phba->eh_sgl_free_index == 1109 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1)) 1110 phba->eh_sgl_free_index = 0; 1111 else 1112 phba->eh_sgl_free_index++; 1113 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1114 } 1115 1116 static void 1117 be_complete_io(struct beiscsi_conn *beiscsi_conn, 1118 struct iscsi_task *task, 1119 struct common_sol_cqe *csol_cqe) 1120 { 1121 struct beiscsi_io_task *io_task = task->dd_data; 1122 struct be_status_bhs *sts_bhs = 1123 (struct be_status_bhs *)io_task->cmd_bhs; 1124 struct iscsi_conn *conn = beiscsi_conn->conn; 1125 unsigned char *sense; 1126 u32 resid = 0, exp_cmdsn, max_cmdsn; 1127 u8 rsp, status, flags; 1128 1129 exp_cmdsn = csol_cqe->exp_cmdsn; 1130 max_cmdsn = (csol_cqe->exp_cmdsn + 1131 csol_cqe->cmd_wnd - 1); 1132 rsp = csol_cqe->i_resp; 1133 status = csol_cqe->i_sts; 1134 flags = csol_cqe->i_flags; 1135 resid = csol_cqe->res_cnt; 1136 1137 if (!task->sc) { 1138 if (io_task->scsi_cmnd) { 1139 scsi_dma_unmap(io_task->scsi_cmnd); 1140 io_task->scsi_cmnd = NULL; 1141 } 1142 1143 return; 1144 } 1145 task->sc->result = (DID_OK << 16) | status; 1146 if (rsp != ISCSI_STATUS_CMD_COMPLETED) { 1147 task->sc->result = DID_ERROR << 16; 1148 goto unmap; 1149 } 1150 1151 /* bidi not initially supported */ 1152 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) { 1153 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW)) 1154 task->sc->result = DID_ERROR << 16; 1155 1156 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) { 1157 scsi_set_resid(task->sc, resid); 1158 if (!status && (scsi_bufflen(task->sc) - resid < 1159 task->sc->underflow)) 1160 task->sc->result = DID_ERROR << 16; 1161 } 1162 } 1163 1164 if (status == SAM_STAT_CHECK_CONDITION) { 1165 u16 sense_len; 1166 unsigned short *slen = (unsigned short *)sts_bhs->sense_info; 1167 1168 sense = sts_bhs->sense_info + sizeof(unsigned short); 1169 sense_len = be16_to_cpu(*slen); 1170 memcpy(task->sc->sense_buffer, sense, 1171 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); 1172 } 1173 1174 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) 1175 conn->rxdata_octets += resid; 1176 unmap: 1177 if (io_task->scsi_cmnd) { 1178 scsi_dma_unmap(io_task->scsi_cmnd); 1179 io_task->scsi_cmnd = NULL; 1180 } 1181 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); 1182 } 1183 1184 static void 1185 be_complete_logout(struct beiscsi_conn *beiscsi_conn, 1186 struct iscsi_task *task, 1187 struct common_sol_cqe *csol_cqe) 1188 { 1189 struct iscsi_logout_rsp *hdr; 1190 struct beiscsi_io_task *io_task = task->dd_data; 1191 struct iscsi_conn *conn = beiscsi_conn->conn; 1192 1193 hdr = (struct iscsi_logout_rsp *)task->hdr; 1194 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 1195 hdr->t2wait = 5; 1196 hdr->t2retain = 0; 1197 hdr->flags = csol_cqe->i_flags; 1198 hdr->response = csol_cqe->i_resp; 1199 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1200 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1201 csol_cqe->cmd_wnd - 1); 1202 1203 hdr->dlength[0] = 0; 1204 hdr->dlength[1] = 0; 1205 hdr->dlength[2] = 0; 1206 hdr->hlength = 0; 1207 hdr->itt = io_task->libiscsi_itt; 1208 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1209 } 1210 1211 static void 1212 be_complete_tmf(struct beiscsi_conn *beiscsi_conn, 1213 struct iscsi_task *task, 1214 struct common_sol_cqe *csol_cqe) 1215 { 1216 struct iscsi_tm_rsp *hdr; 1217 struct iscsi_conn *conn = beiscsi_conn->conn; 1218 struct beiscsi_io_task *io_task = task->dd_data; 1219 1220 hdr = (struct iscsi_tm_rsp *)task->hdr; 1221 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 1222 hdr->flags = csol_cqe->i_flags; 1223 hdr->response = csol_cqe->i_resp; 1224 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1225 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1226 csol_cqe->cmd_wnd - 1); 1227 1228 hdr->itt = io_task->libiscsi_itt; 1229 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1230 } 1231 1232 static void 1233 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, 1234 struct beiscsi_hba *phba, struct sol_cqe *psol) 1235 { 1236 struct hwi_wrb_context *pwrb_context; 1237 uint16_t wrb_index, cid, cri_index; 1238 struct hwi_controller *phwi_ctrlr; 1239 struct wrb_handle *pwrb_handle; 1240 struct iscsi_session *session; 1241 struct iscsi_task *task; 1242 1243 phwi_ctrlr = phba->phwi_ctrlr; 1244 if (is_chip_be2_be3r(phba)) { 1245 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1246 wrb_idx, psol); 1247 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1248 cid, psol); 1249 } else { 1250 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1251 wrb_idx, psol); 1252 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1253 cid, psol); 1254 } 1255 1256 cri_index = BE_GET_CRI_FROM_CID(cid); 1257 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1258 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; 1259 session = beiscsi_conn->conn->session; 1260 spin_lock_bh(&session->back_lock); 1261 task = pwrb_handle->pio_handle; 1262 if (task) 1263 __iscsi_put_task(task); 1264 spin_unlock_bh(&session->back_lock); 1265 } 1266 1267 static void 1268 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, 1269 struct iscsi_task *task, 1270 struct common_sol_cqe *csol_cqe) 1271 { 1272 struct iscsi_nopin *hdr; 1273 struct iscsi_conn *conn = beiscsi_conn->conn; 1274 struct beiscsi_io_task *io_task = task->dd_data; 1275 1276 hdr = (struct iscsi_nopin *)task->hdr; 1277 hdr->flags = csol_cqe->i_flags; 1278 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1279 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1280 csol_cqe->cmd_wnd - 1); 1281 1282 hdr->opcode = ISCSI_OP_NOOP_IN; 1283 hdr->itt = io_task->libiscsi_itt; 1284 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1285 } 1286 1287 static void adapter_get_sol_cqe(struct beiscsi_hba *phba, 1288 struct sol_cqe *psol, 1289 struct common_sol_cqe *csol_cqe) 1290 { 1291 if (is_chip_be2_be3r(phba)) { 1292 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, 1293 i_exp_cmd_sn, psol); 1294 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, 1295 i_res_cnt, psol); 1296 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, 1297 i_cmd_wnd, psol); 1298 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, 1299 wrb_index, psol); 1300 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, 1301 cid, psol); 1302 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1303 hw_sts, psol); 1304 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, 1305 i_resp, psol); 1306 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1307 i_sts, psol); 1308 csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, 1309 i_flags, psol); 1310 } else { 1311 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1312 i_exp_cmd_sn, psol); 1313 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1314 i_res_cnt, psol); 1315 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1316 wrb_index, psol); 1317 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1318 cid, psol); 1319 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1320 hw_sts, psol); 1321 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1322 i_cmd_wnd, psol); 1323 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1324 cmd_cmpl, psol)) 1325 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1326 i_sts, psol); 1327 else 1328 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1329 i_sts, psol); 1330 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1331 u, psol)) 1332 csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW; 1333 1334 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1335 o, psol)) 1336 csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; 1337 } 1338 } 1339 1340 1341 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, 1342 struct beiscsi_hba *phba, struct sol_cqe *psol) 1343 { 1344 struct iscsi_conn *conn = beiscsi_conn->conn; 1345 struct iscsi_session *session = conn->session; 1346 struct common_sol_cqe csol_cqe = {0}; 1347 struct hwi_wrb_context *pwrb_context; 1348 struct hwi_controller *phwi_ctrlr; 1349 struct wrb_handle *pwrb_handle; 1350 struct iscsi_task *task; 1351 uint16_t cri_index = 0; 1352 uint8_t type; 1353 1354 phwi_ctrlr = phba->phwi_ctrlr; 1355 1356 /* Copy the elements to a common structure */ 1357 adapter_get_sol_cqe(phba, psol, &csol_cqe); 1358 1359 cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid); 1360 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1361 1362 pwrb_handle = pwrb_context->pwrb_handle_basestd[ 1363 csol_cqe.wrb_index]; 1364 1365 spin_lock_bh(&session->back_lock); 1366 task = pwrb_handle->pio_handle; 1367 if (!task) { 1368 spin_unlock_bh(&session->back_lock); 1369 return; 1370 } 1371 type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; 1372 1373 switch (type) { 1374 case HWH_TYPE_IO: 1375 case HWH_TYPE_IO_RD: 1376 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == 1377 ISCSI_OP_NOOP_OUT) 1378 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1379 else 1380 be_complete_io(beiscsi_conn, task, &csol_cqe); 1381 break; 1382 1383 case HWH_TYPE_LOGOUT: 1384 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) 1385 be_complete_logout(beiscsi_conn, task, &csol_cqe); 1386 else 1387 be_complete_tmf(beiscsi_conn, task, &csol_cqe); 1388 break; 1389 1390 case HWH_TYPE_LOGIN: 1391 beiscsi_log(phba, KERN_ERR, 1392 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1393 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in" 1394 " %s- Solicited path\n", __func__); 1395 break; 1396 1397 case HWH_TYPE_NOP: 1398 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1399 break; 1400 1401 default: 1402 beiscsi_log(phba, KERN_WARNING, 1403 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1404 "BM_%d : In %s, unknown type = %d " 1405 "wrb_index 0x%x CID 0x%x\n", __func__, type, 1406 csol_cqe.wrb_index, 1407 csol_cqe.cid); 1408 break; 1409 } 1410 1411 spin_unlock_bh(&session->back_lock); 1412 } 1413 1414 /* 1415 * ASYNC PDUs include 1416 * a. Unsolicited NOP-In (target initiated NOP-In) 1417 * b. ASYNC Messages 1418 * c. Reject PDU 1419 * d. Login response 1420 * These headers arrive unprocessed by the EP firmware. 1421 * iSCSI layer processes them. 1422 */ 1423 static unsigned int 1424 beiscsi_complete_pdu(struct beiscsi_conn *beiscsi_conn, 1425 struct pdu_base *phdr, void *pdata, unsigned int dlen) 1426 { 1427 struct beiscsi_hba *phba = beiscsi_conn->phba; 1428 struct iscsi_conn *conn = beiscsi_conn->conn; 1429 struct beiscsi_io_task *io_task; 1430 struct iscsi_hdr *login_hdr; 1431 struct iscsi_task *task; 1432 u8 code; 1433 1434 code = AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr); 1435 switch (code) { 1436 case ISCSI_OP_NOOP_IN: 1437 pdata = NULL; 1438 dlen = 0; 1439 break; 1440 case ISCSI_OP_ASYNC_EVENT: 1441 break; 1442 case ISCSI_OP_REJECT: 1443 WARN_ON(!pdata); 1444 WARN_ON(!(dlen == 48)); 1445 beiscsi_log(phba, KERN_ERR, 1446 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1447 "BM_%d : In ISCSI_OP_REJECT\n"); 1448 break; 1449 case ISCSI_OP_LOGIN_RSP: 1450 case ISCSI_OP_TEXT_RSP: 1451 task = conn->login_task; 1452 io_task = task->dd_data; 1453 login_hdr = (struct iscsi_hdr *)phdr; 1454 login_hdr->itt = io_task->libiscsi_itt; 1455 break; 1456 default: 1457 beiscsi_log(phba, KERN_WARNING, 1458 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1459 "BM_%d : unrecognized async PDU opcode 0x%x\n", 1460 code); 1461 return 1; 1462 } 1463 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)phdr, pdata, dlen); 1464 return 0; 1465 } 1466 1467 static inline void 1468 beiscsi_hdl_put_handle(struct hd_async_context *pasync_ctx, 1469 struct hd_async_handle *pasync_handle) 1470 { 1471 pasync_handle->is_final = 0; 1472 pasync_handle->buffer_len = 0; 1473 pasync_handle->in_use = 0; 1474 list_del_init(&pasync_handle->link); 1475 } 1476 1477 static void 1478 beiscsi_hdl_purge_handles(struct beiscsi_hba *phba, 1479 struct hd_async_context *pasync_ctx, 1480 u16 cri) 1481 { 1482 struct hd_async_handle *pasync_handle, *tmp_handle; 1483 struct list_head *plist; 1484 1485 plist = &pasync_ctx->async_entry[cri].wq.list; 1486 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) 1487 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1488 1489 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list); 1490 pasync_ctx->async_entry[cri].wq.hdr_len = 0; 1491 pasync_ctx->async_entry[cri].wq.bytes_received = 0; 1492 pasync_ctx->async_entry[cri].wq.bytes_needed = 0; 1493 } 1494 1495 static struct hd_async_handle * 1496 beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn, 1497 struct hd_async_context *pasync_ctx, 1498 struct i_t_dpdu_cqe *pdpdu_cqe, 1499 u8 *header) 1500 { 1501 struct beiscsi_hba *phba = beiscsi_conn->phba; 1502 struct hd_async_handle *pasync_handle; 1503 struct be_bus_address phys_addr; 1504 u16 cid, code, ci, cri; 1505 u8 final, error = 0; 1506 u32 dpl; 1507 1508 cid = beiscsi_conn->beiscsi_conn_cid; 1509 cri = BE_GET_ASYNC_CRI_FROM_CID(cid); 1510 /** 1511 * This function is invoked to get the right async_handle structure 1512 * from a given DEF PDU CQ entry. 1513 * 1514 * - index in CQ entry gives the vertical index 1515 * - address in CQ entry is the offset where the DMA last ended 1516 * - final - no more notifications for this PDU 1517 */ 1518 if (is_chip_be2_be3r(phba)) { 1519 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1520 dpl, pdpdu_cqe); 1521 ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1522 index, pdpdu_cqe); 1523 final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1524 final, pdpdu_cqe); 1525 } else { 1526 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1527 dpl, pdpdu_cqe); 1528 ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1529 index, pdpdu_cqe); 1530 final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1531 final, pdpdu_cqe); 1532 } 1533 1534 /** 1535 * DB addr Hi/Lo is same for BE and SKH. 1536 * Subtract the dataplacementlength to get to the base. 1537 */ 1538 phys_addr.u.a32.address_lo = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1539 db_addr_lo, pdpdu_cqe); 1540 phys_addr.u.a32.address_lo -= dpl; 1541 phys_addr.u.a32.address_hi = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1542 db_addr_hi, pdpdu_cqe); 1543 1544 code = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, code, pdpdu_cqe); 1545 switch (code) { 1546 case UNSOL_HDR_NOTIFY: 1547 pasync_handle = pasync_ctx->async_entry[ci].header; 1548 *header = 1; 1549 break; 1550 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 1551 error = 1; 1552 fallthrough; 1553 case UNSOL_DATA_NOTIFY: 1554 pasync_handle = pasync_ctx->async_entry[ci].data; 1555 break; 1556 /* called only for above codes */ 1557 default: 1558 return NULL; 1559 } 1560 1561 if (pasync_handle->pa.u.a64.address != phys_addr.u.a64.address || 1562 pasync_handle->index != ci) { 1563 /* driver bug - if ci does not match async handle index */ 1564 error = 1; 1565 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1566 "BM_%d : cid %u async PDU handle mismatch - addr in %cQE %llx at %u:addr in CQE %llx ci %u\n", 1567 cid, pasync_handle->is_header ? 'H' : 'D', 1568 pasync_handle->pa.u.a64.address, 1569 pasync_handle->index, 1570 phys_addr.u.a64.address, ci); 1571 /* FW has stale address - attempt continuing by dropping */ 1572 } 1573 1574 /** 1575 * DEF PDU header and data buffers with errors should be simply 1576 * dropped as there are no consumers for it. 1577 */ 1578 if (error) { 1579 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1580 return NULL; 1581 } 1582 1583 if (pasync_handle->in_use || !list_empty(&pasync_handle->link)) { 1584 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1585 "BM_%d : cid %d async PDU handle in use - code %d ci %d addr %llx\n", 1586 cid, code, ci, phys_addr.u.a64.address); 1587 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1588 } 1589 1590 list_del_init(&pasync_handle->link); 1591 /** 1592 * Each CID is associated with unique CRI. 1593 * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different. 1594 **/ 1595 pasync_handle->cri = cri; 1596 pasync_handle->is_final = final; 1597 pasync_handle->buffer_len = dpl; 1598 pasync_handle->in_use = 1; 1599 1600 return pasync_handle; 1601 } 1602 1603 static unsigned int 1604 beiscsi_hdl_fwd_pdu(struct beiscsi_conn *beiscsi_conn, 1605 struct hd_async_context *pasync_ctx, 1606 u16 cri) 1607 { 1608 struct iscsi_session *session = beiscsi_conn->conn->session; 1609 struct hd_async_handle *pasync_handle, *plast_handle; 1610 struct beiscsi_hba *phba = beiscsi_conn->phba; 1611 void *phdr = NULL, *pdata = NULL; 1612 u32 dlen = 0, status = 0; 1613 struct list_head *plist; 1614 1615 plist = &pasync_ctx->async_entry[cri].wq.list; 1616 plast_handle = NULL; 1617 list_for_each_entry(pasync_handle, plist, link) { 1618 plast_handle = pasync_handle; 1619 /* get the header, the first entry */ 1620 if (!phdr) { 1621 phdr = pasync_handle->pbuffer; 1622 continue; 1623 } 1624 /* use first buffer to collect all the data */ 1625 if (!pdata) { 1626 pdata = pasync_handle->pbuffer; 1627 dlen = pasync_handle->buffer_len; 1628 continue; 1629 } 1630 if (!pasync_handle->buffer_len || 1631 (dlen + pasync_handle->buffer_len) > 1632 pasync_ctx->async_data.buffer_size) 1633 break; 1634 memcpy(pdata + dlen, pasync_handle->pbuffer, 1635 pasync_handle->buffer_len); 1636 dlen += pasync_handle->buffer_len; 1637 } 1638 1639 if (!plast_handle->is_final) { 1640 /* last handle should have final PDU notification from FW */ 1641 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1642 "BM_%d : cid %u %p fwd async PDU opcode %x with last handle missing - HL%u:DN%u:DR%u\n", 1643 beiscsi_conn->beiscsi_conn_cid, plast_handle, 1644 AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr), 1645 pasync_ctx->async_entry[cri].wq.hdr_len, 1646 pasync_ctx->async_entry[cri].wq.bytes_needed, 1647 pasync_ctx->async_entry[cri].wq.bytes_received); 1648 } 1649 spin_lock_bh(&session->back_lock); 1650 status = beiscsi_complete_pdu(beiscsi_conn, phdr, pdata, dlen); 1651 spin_unlock_bh(&session->back_lock); 1652 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1653 return status; 1654 } 1655 1656 static unsigned int 1657 beiscsi_hdl_gather_pdu(struct beiscsi_conn *beiscsi_conn, 1658 struct hd_async_context *pasync_ctx, 1659 struct hd_async_handle *pasync_handle) 1660 { 1661 unsigned int bytes_needed = 0, status = 0; 1662 u16 cri = pasync_handle->cri; 1663 struct cri_wait_queue *wq; 1664 struct beiscsi_hba *phba; 1665 struct pdu_base *ppdu; 1666 char *err = ""; 1667 1668 phba = beiscsi_conn->phba; 1669 wq = &pasync_ctx->async_entry[cri].wq; 1670 if (pasync_handle->is_header) { 1671 /* check if PDU hdr is rcv'd when old hdr not completed */ 1672 if (wq->hdr_len) { 1673 err = "incomplete"; 1674 goto drop_pdu; 1675 } 1676 ppdu = pasync_handle->pbuffer; 1677 bytes_needed = AMAP_GET_BITS(struct amap_pdu_base, 1678 data_len_hi, ppdu); 1679 bytes_needed <<= 16; 1680 bytes_needed |= be16_to_cpu(AMAP_GET_BITS(struct amap_pdu_base, 1681 data_len_lo, ppdu)); 1682 wq->hdr_len = pasync_handle->buffer_len; 1683 wq->bytes_received = 0; 1684 wq->bytes_needed = bytes_needed; 1685 list_add_tail(&pasync_handle->link, &wq->list); 1686 if (!bytes_needed) 1687 status = beiscsi_hdl_fwd_pdu(beiscsi_conn, 1688 pasync_ctx, cri); 1689 } else { 1690 /* check if data received has header and is needed */ 1691 if (!wq->hdr_len || !wq->bytes_needed) { 1692 err = "header less"; 1693 goto drop_pdu; 1694 } 1695 wq->bytes_received += pasync_handle->buffer_len; 1696 /* Something got overwritten? Better catch it here. */ 1697 if (wq->bytes_received > wq->bytes_needed) { 1698 err = "overflow"; 1699 goto drop_pdu; 1700 } 1701 list_add_tail(&pasync_handle->link, &wq->list); 1702 if (wq->bytes_received == wq->bytes_needed) 1703 status = beiscsi_hdl_fwd_pdu(beiscsi_conn, 1704 pasync_ctx, cri); 1705 } 1706 return status; 1707 1708 drop_pdu: 1709 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1710 "BM_%d : cid %u async PDU %s - def-%c:HL%u:DN%u:DR%u\n", 1711 beiscsi_conn->beiscsi_conn_cid, err, 1712 pasync_handle->is_header ? 'H' : 'D', 1713 wq->hdr_len, wq->bytes_needed, 1714 pasync_handle->buffer_len); 1715 /* discard this handle */ 1716 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1717 /* free all the other handles in cri_wait_queue */ 1718 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1719 /* try continuing */ 1720 return status; 1721 } 1722 1723 static void 1724 beiscsi_hdq_post_handles(struct beiscsi_hba *phba, 1725 u8 header, u8 ulp_num, u16 nbuf) 1726 { 1727 struct hd_async_handle *pasync_handle; 1728 struct hd_async_context *pasync_ctx; 1729 struct hwi_controller *phwi_ctrlr; 1730 struct phys_addr *pasync_sge; 1731 u32 ring_id, doorbell = 0; 1732 u32 doorbell_offset; 1733 u16 prod, pi; 1734 1735 phwi_ctrlr = phba->phwi_ctrlr; 1736 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1737 if (header) { 1738 pasync_sge = pasync_ctx->async_header.ring_base; 1739 pi = pasync_ctx->async_header.pi; 1740 ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id; 1741 doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num]. 1742 doorbell_offset; 1743 } else { 1744 pasync_sge = pasync_ctx->async_data.ring_base; 1745 pi = pasync_ctx->async_data.pi; 1746 ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id; 1747 doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num]. 1748 doorbell_offset; 1749 } 1750 1751 for (prod = 0; prod < nbuf; prod++) { 1752 if (header) 1753 pasync_handle = pasync_ctx->async_entry[pi].header; 1754 else 1755 pasync_handle = pasync_ctx->async_entry[pi].data; 1756 WARN_ON(pasync_handle->is_header != header); 1757 WARN_ON(pasync_handle->index != pi); 1758 /* setup the ring only once */ 1759 if (nbuf == pasync_ctx->num_entries) { 1760 /* note hi is lo */ 1761 pasync_sge[pi].hi = pasync_handle->pa.u.a32.address_lo; 1762 pasync_sge[pi].lo = pasync_handle->pa.u.a32.address_hi; 1763 } 1764 if (++pi == pasync_ctx->num_entries) 1765 pi = 0; 1766 } 1767 1768 if (header) 1769 pasync_ctx->async_header.pi = pi; 1770 else 1771 pasync_ctx->async_data.pi = pi; 1772 1773 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK; 1774 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT; 1775 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT; 1776 doorbell |= (prod & DB_DEF_PDU_CQPROC_MASK) << DB_DEF_PDU_CQPROC_SHIFT; 1777 iowrite32(doorbell, phba->db_va + doorbell_offset); 1778 } 1779 1780 static void 1781 beiscsi_hdq_process_compl(struct beiscsi_conn *beiscsi_conn, 1782 struct i_t_dpdu_cqe *pdpdu_cqe) 1783 { 1784 struct beiscsi_hba *phba = beiscsi_conn->phba; 1785 struct hd_async_handle *pasync_handle = NULL; 1786 struct hd_async_context *pasync_ctx; 1787 struct hwi_controller *phwi_ctrlr; 1788 u8 ulp_num, consumed, header = 0; 1789 u16 cid_cri; 1790 1791 phwi_ctrlr = phba->phwi_ctrlr; 1792 cid_cri = BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid); 1793 ulp_num = BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cid_cri); 1794 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1795 pasync_handle = beiscsi_hdl_get_handle(beiscsi_conn, pasync_ctx, 1796 pdpdu_cqe, &header); 1797 if (is_chip_be2_be3r(phba)) 1798 consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1799 num_cons, pdpdu_cqe); 1800 else 1801 consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1802 num_cons, pdpdu_cqe); 1803 if (pasync_handle) 1804 beiscsi_hdl_gather_pdu(beiscsi_conn, pasync_ctx, pasync_handle); 1805 /* num_cons indicates number of 8 RQEs consumed */ 1806 if (consumed) 1807 beiscsi_hdq_post_handles(phba, header, ulp_num, 8 * consumed); 1808 } 1809 1810 void beiscsi_process_mcc_cq(struct beiscsi_hba *phba) 1811 { 1812 struct be_queue_info *mcc_cq; 1813 struct be_mcc_compl *mcc_compl; 1814 unsigned int num_processed = 0; 1815 1816 mcc_cq = &phba->ctrl.mcc_obj.cq; 1817 mcc_compl = queue_tail_node(mcc_cq); 1818 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1819 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) { 1820 if (beiscsi_hba_in_error(phba)) 1821 return; 1822 1823 if (num_processed >= 32) { 1824 hwi_ring_cq_db(phba, mcc_cq->id, 1825 num_processed, 0); 1826 num_processed = 0; 1827 } 1828 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) { 1829 beiscsi_process_async_event(phba, mcc_compl); 1830 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { 1831 beiscsi_process_mcc_compl(&phba->ctrl, mcc_compl); 1832 } 1833 1834 mcc_compl->flags = 0; 1835 queue_tail_inc(mcc_cq); 1836 mcc_compl = queue_tail_node(mcc_cq); 1837 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1838 num_processed++; 1839 } 1840 1841 if (num_processed > 0) 1842 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1); 1843 } 1844 1845 static void beiscsi_mcc_work(struct work_struct *work) 1846 { 1847 struct be_eq_obj *pbe_eq; 1848 struct beiscsi_hba *phba; 1849 1850 pbe_eq = container_of(work, struct be_eq_obj, mcc_work); 1851 phba = pbe_eq->phba; 1852 beiscsi_process_mcc_cq(phba); 1853 /* rearm EQ for further interrupts */ 1854 if (!beiscsi_hba_in_error(phba)) 1855 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 1856 } 1857 1858 /** 1859 * beiscsi_process_cq()- Process the Completion Queue 1860 * @pbe_eq: Event Q on which the Completion has come 1861 * @budget: Max number of events to processed 1862 * 1863 * return 1864 * Number of Completion Entries processed. 1865 **/ 1866 unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget) 1867 { 1868 struct be_queue_info *cq; 1869 struct sol_cqe *sol; 1870 unsigned int total = 0; 1871 unsigned int num_processed = 0; 1872 unsigned short code = 0, cid = 0; 1873 uint16_t cri_index = 0; 1874 struct beiscsi_conn *beiscsi_conn; 1875 struct beiscsi_endpoint *beiscsi_ep; 1876 struct iscsi_endpoint *ep; 1877 struct beiscsi_hba *phba; 1878 1879 cq = pbe_eq->cq; 1880 sol = queue_tail_node(cq); 1881 phba = pbe_eq->phba; 1882 1883 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & 1884 CQE_VALID_MASK) { 1885 if (beiscsi_hba_in_error(phba)) 1886 return 0; 1887 1888 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); 1889 1890 code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 32] & 1891 CQE_CODE_MASK); 1892 1893 /* Get the CID */ 1894 if (is_chip_be2_be3r(phba)) { 1895 cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); 1896 } else { 1897 if ((code == DRIVERMSG_NOTIFY) || 1898 (code == UNSOL_HDR_NOTIFY) || 1899 (code == UNSOL_DATA_NOTIFY)) 1900 cid = AMAP_GET_BITS( 1901 struct amap_i_t_dpdu_cqe_v2, 1902 cid, sol); 1903 else 1904 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1905 cid, sol); 1906 } 1907 1908 cri_index = BE_GET_CRI_FROM_CID(cid); 1909 ep = phba->ep_array[cri_index]; 1910 1911 if (ep == NULL) { 1912 /* connection has already been freed 1913 * just move on to next one 1914 */ 1915 beiscsi_log(phba, KERN_WARNING, 1916 BEISCSI_LOG_INIT, 1917 "BM_%d : proc cqe of disconn ep: cid %d\n", 1918 cid); 1919 goto proc_next_cqe; 1920 } 1921 1922 beiscsi_ep = ep->dd_data; 1923 beiscsi_conn = beiscsi_ep->conn; 1924 1925 /* replenish cq */ 1926 if (num_processed == 32) { 1927 hwi_ring_cq_db(phba, cq->id, 32, 0); 1928 num_processed = 0; 1929 } 1930 total++; 1931 1932 switch (code) { 1933 case SOL_CMD_COMPLETE: 1934 hwi_complete_cmd(beiscsi_conn, phba, sol); 1935 break; 1936 case DRIVERMSG_NOTIFY: 1937 beiscsi_log(phba, KERN_INFO, 1938 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1939 "BM_%d : Received %s[%d] on CID : %d\n", 1940 cqe_desc[code], code, cid); 1941 1942 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); 1943 break; 1944 case UNSOL_HDR_NOTIFY: 1945 beiscsi_log(phba, KERN_INFO, 1946 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1947 "BM_%d : Received %s[%d] on CID : %d\n", 1948 cqe_desc[code], code, cid); 1949 1950 spin_lock_bh(&phba->async_pdu_lock); 1951 beiscsi_hdq_process_compl(beiscsi_conn, 1952 (struct i_t_dpdu_cqe *)sol); 1953 spin_unlock_bh(&phba->async_pdu_lock); 1954 break; 1955 case UNSOL_DATA_NOTIFY: 1956 beiscsi_log(phba, KERN_INFO, 1957 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1958 "BM_%d : Received %s[%d] on CID : %d\n", 1959 cqe_desc[code], code, cid); 1960 1961 spin_lock_bh(&phba->async_pdu_lock); 1962 beiscsi_hdq_process_compl(beiscsi_conn, 1963 (struct i_t_dpdu_cqe *)sol); 1964 spin_unlock_bh(&phba->async_pdu_lock); 1965 break; 1966 case CXN_INVALIDATE_INDEX_NOTIFY: 1967 case CMD_INVALIDATED_NOTIFY: 1968 case CXN_INVALIDATE_NOTIFY: 1969 beiscsi_log(phba, KERN_ERR, 1970 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1971 "BM_%d : Ignoring %s[%d] on CID : %d\n", 1972 cqe_desc[code], code, cid); 1973 break; 1974 case CXN_KILLED_HDR_DIGEST_ERR: 1975 case SOL_CMD_KILLED_DATA_DIGEST_ERR: 1976 beiscsi_log(phba, KERN_ERR, 1977 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1978 "BM_%d : Cmd Notification %s[%d] on CID : %d\n", 1979 cqe_desc[code], code, cid); 1980 break; 1981 case CMD_KILLED_INVALID_STATSN_RCVD: 1982 case CMD_KILLED_INVALID_R2T_RCVD: 1983 case CMD_CXN_KILLED_LUN_INVALID: 1984 case CMD_CXN_KILLED_ICD_INVALID: 1985 case CMD_CXN_KILLED_ITT_INVALID: 1986 case CMD_CXN_KILLED_SEQ_OUTOFORDER: 1987 case CMD_CXN_KILLED_INVALID_DATASN_RCVD: 1988 beiscsi_log(phba, KERN_ERR, 1989 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1990 "BM_%d : Cmd Notification %s[%d] on CID : %d\n", 1991 cqe_desc[code], code, cid); 1992 break; 1993 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 1994 beiscsi_log(phba, KERN_ERR, 1995 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1996 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n", 1997 cqe_desc[code], code, cid); 1998 spin_lock_bh(&phba->async_pdu_lock); 1999 /* driver consumes the entry and drops the contents */ 2000 beiscsi_hdq_process_compl(beiscsi_conn, 2001 (struct i_t_dpdu_cqe *)sol); 2002 spin_unlock_bh(&phba->async_pdu_lock); 2003 break; 2004 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: 2005 case CXN_KILLED_BURST_LEN_MISMATCH: 2006 case CXN_KILLED_AHS_RCVD: 2007 case CXN_KILLED_UNKNOWN_HDR: 2008 case CXN_KILLED_STALE_ITT_TTT_RCVD: 2009 case CXN_KILLED_INVALID_ITT_TTT_RCVD: 2010 case CXN_KILLED_TIMED_OUT: 2011 case CXN_KILLED_FIN_RCVD: 2012 case CXN_KILLED_RST_SENT: 2013 case CXN_KILLED_RST_RCVD: 2014 case CXN_KILLED_BAD_UNSOL_PDU_RCVD: 2015 case CXN_KILLED_BAD_WRB_INDEX_ERROR: 2016 case CXN_KILLED_OVER_RUN_RESIDUAL: 2017 case CXN_KILLED_UNDER_RUN_RESIDUAL: 2018 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: 2019 beiscsi_log(phba, KERN_ERR, 2020 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2021 "BM_%d : Event %s[%d] received on CID : %d\n", 2022 cqe_desc[code], code, cid); 2023 if (beiscsi_conn) 2024 iscsi_conn_failure(beiscsi_conn->conn, 2025 ISCSI_ERR_CONN_FAILED); 2026 break; 2027 default: 2028 beiscsi_log(phba, KERN_ERR, 2029 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2030 "BM_%d : Invalid CQE Event Received Code : %d CID 0x%x...\n", 2031 code, cid); 2032 break; 2033 } 2034 2035 proc_next_cqe: 2036 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0); 2037 queue_tail_inc(cq); 2038 sol = queue_tail_node(cq); 2039 num_processed++; 2040 if (total == budget) 2041 break; 2042 } 2043 2044 hwi_ring_cq_db(phba, cq->id, num_processed, 1); 2045 return total; 2046 } 2047 2048 static int be_iopoll(struct irq_poll *iop, int budget) 2049 { 2050 unsigned int ret, io_events; 2051 struct beiscsi_hba *phba; 2052 struct be_eq_obj *pbe_eq; 2053 struct be_eq_entry *eqe = NULL; 2054 struct be_queue_info *eq; 2055 2056 pbe_eq = container_of(iop, struct be_eq_obj, iopoll); 2057 phba = pbe_eq->phba; 2058 if (beiscsi_hba_in_error(phba)) { 2059 irq_poll_complete(iop); 2060 return 0; 2061 } 2062 2063 io_events = 0; 2064 eq = &pbe_eq->q; 2065 eqe = queue_tail_node(eq); 2066 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] & 2067 EQE_VALID_MASK) { 2068 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 2069 queue_tail_inc(eq); 2070 eqe = queue_tail_node(eq); 2071 io_events++; 2072 } 2073 hwi_ring_eq_db(phba, eq->id, 1, io_events, 0, 1); 2074 2075 ret = beiscsi_process_cq(pbe_eq, budget); 2076 pbe_eq->cq_count += ret; 2077 if (ret < budget) { 2078 irq_poll_complete(iop); 2079 beiscsi_log(phba, KERN_INFO, 2080 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2081 "BM_%d : rearm pbe_eq->q.id =%d ret %d\n", 2082 pbe_eq->q.id, ret); 2083 if (!beiscsi_hba_in_error(phba)) 2084 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 2085 } 2086 return ret; 2087 } 2088 2089 static void 2090 hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2091 unsigned int num_sg, struct beiscsi_io_task *io_task) 2092 { 2093 struct iscsi_sge *psgl; 2094 unsigned int sg_len, index; 2095 unsigned int sge_len = 0; 2096 unsigned long long addr; 2097 struct scatterlist *l_sg; 2098 unsigned int offset; 2099 2100 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb, 2101 io_task->bhs_pa.u.a32.address_lo); 2102 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb, 2103 io_task->bhs_pa.u.a32.address_hi); 2104 2105 l_sg = sg; 2106 for (index = 0; (index < num_sg) && (index < 2); index++, 2107 sg = sg_next(sg)) { 2108 if (index == 0) { 2109 sg_len = sg_dma_len(sg); 2110 addr = (u64) sg_dma_address(sg); 2111 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2112 sge0_addr_lo, pwrb, 2113 lower_32_bits(addr)); 2114 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2115 sge0_addr_hi, pwrb, 2116 upper_32_bits(addr)); 2117 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2118 sge0_len, pwrb, 2119 sg_len); 2120 sge_len = sg_len; 2121 } else { 2122 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset, 2123 pwrb, sge_len); 2124 sg_len = sg_dma_len(sg); 2125 addr = (u64) sg_dma_address(sg); 2126 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2127 sge1_addr_lo, pwrb, 2128 lower_32_bits(addr)); 2129 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2130 sge1_addr_hi, pwrb, 2131 upper_32_bits(addr)); 2132 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2133 sge1_len, pwrb, 2134 sg_len); 2135 } 2136 } 2137 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2138 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2139 2140 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2141 2142 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2143 io_task->bhs_pa.u.a32.address_hi); 2144 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2145 io_task->bhs_pa.u.a32.address_lo); 2146 2147 if (num_sg == 1) { 2148 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2149 1); 2150 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2151 0); 2152 } else if (num_sg == 2) { 2153 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2154 0); 2155 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2156 1); 2157 } else { 2158 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2159 0); 2160 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2161 0); 2162 } 2163 2164 sg = l_sg; 2165 psgl++; 2166 psgl++; 2167 offset = 0; 2168 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2169 sg_len = sg_dma_len(sg); 2170 addr = (u64) sg_dma_address(sg); 2171 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2172 lower_32_bits(addr)); 2173 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2174 upper_32_bits(addr)); 2175 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2176 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2177 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2178 offset += sg_len; 2179 } 2180 psgl--; 2181 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2182 } 2183 2184 static void 2185 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2186 unsigned int num_sg, struct beiscsi_io_task *io_task) 2187 { 2188 struct iscsi_sge *psgl; 2189 unsigned int sg_len, index; 2190 unsigned int sge_len = 0; 2191 unsigned long long addr; 2192 struct scatterlist *l_sg; 2193 unsigned int offset; 2194 2195 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2196 io_task->bhs_pa.u.a32.address_lo); 2197 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2198 io_task->bhs_pa.u.a32.address_hi); 2199 2200 l_sg = sg; 2201 for (index = 0; (index < num_sg) && (index < 2); index++, 2202 sg = sg_next(sg)) { 2203 if (index == 0) { 2204 sg_len = sg_dma_len(sg); 2205 addr = (u64) sg_dma_address(sg); 2206 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2207 ((u32)(addr & 0xFFFFFFFF))); 2208 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2209 ((u32)(addr >> 32))); 2210 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2211 sg_len); 2212 sge_len = sg_len; 2213 } else { 2214 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset, 2215 pwrb, sge_len); 2216 sg_len = sg_dma_len(sg); 2217 addr = (u64) sg_dma_address(sg); 2218 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb, 2219 ((u32)(addr & 0xFFFFFFFF))); 2220 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb, 2221 ((u32)(addr >> 32))); 2222 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb, 2223 sg_len); 2224 } 2225 } 2226 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2227 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2228 2229 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2230 2231 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2232 io_task->bhs_pa.u.a32.address_hi); 2233 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2234 io_task->bhs_pa.u.a32.address_lo); 2235 2236 if (num_sg == 1) { 2237 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2238 1); 2239 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2240 0); 2241 } else if (num_sg == 2) { 2242 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2243 0); 2244 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2245 1); 2246 } else { 2247 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2248 0); 2249 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2250 0); 2251 } 2252 sg = l_sg; 2253 psgl++; 2254 psgl++; 2255 offset = 0; 2256 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2257 sg_len = sg_dma_len(sg); 2258 addr = (u64) sg_dma_address(sg); 2259 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2260 (addr & 0xFFFFFFFF)); 2261 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2262 (addr >> 32)); 2263 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2264 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2265 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2266 offset += sg_len; 2267 } 2268 psgl--; 2269 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2270 } 2271 2272 /** 2273 * hwi_write_buffer()- Populate the WRB with task info 2274 * @pwrb: ptr to the WRB entry 2275 * @task: iscsi task which is to be executed 2276 **/ 2277 static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) 2278 { 2279 struct iscsi_sge *psgl; 2280 struct beiscsi_io_task *io_task = task->dd_data; 2281 struct beiscsi_conn *beiscsi_conn = io_task->conn; 2282 struct beiscsi_hba *phba = beiscsi_conn->phba; 2283 uint8_t dsp_value = 0; 2284 2285 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2; 2286 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2287 io_task->bhs_pa.u.a32.address_lo); 2288 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2289 io_task->bhs_pa.u.a32.address_hi); 2290 2291 if (task->data) { 2292 2293 /* Check for the data_count */ 2294 dsp_value = (task->data_count) ? 1 : 0; 2295 2296 if (is_chip_be2_be3r(phba)) 2297 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, 2298 pwrb, dsp_value); 2299 else 2300 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, 2301 pwrb, dsp_value); 2302 2303 /* Map addr only if there is data_count */ 2304 if (dsp_value) { 2305 io_task->mtask_addr = dma_map_single(&phba->pcidev->dev, 2306 task->data, 2307 task->data_count, 2308 DMA_TO_DEVICE); 2309 if (dma_mapping_error(&phba->pcidev->dev, 2310 io_task->mtask_addr)) 2311 return -ENOMEM; 2312 io_task->mtask_data_count = task->data_count; 2313 } else 2314 io_task->mtask_addr = 0; 2315 2316 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2317 lower_32_bits(io_task->mtask_addr)); 2318 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2319 upper_32_bits(io_task->mtask_addr)); 2320 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2321 task->data_count); 2322 2323 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1); 2324 } else { 2325 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 2326 io_task->mtask_addr = 0; 2327 } 2328 2329 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2330 2331 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len); 2332 2333 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2334 io_task->bhs_pa.u.a32.address_hi); 2335 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2336 io_task->bhs_pa.u.a32.address_lo); 2337 if (task->data) { 2338 psgl++; 2339 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0); 2340 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0); 2341 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0); 2342 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0); 2343 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0); 2344 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2345 2346 psgl++; 2347 if (task->data) { 2348 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2349 lower_32_bits(io_task->mtask_addr)); 2350 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2351 upper_32_bits(io_task->mtask_addr)); 2352 } 2353 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106); 2354 } 2355 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2356 return 0; 2357 } 2358 2359 /** 2360 * beiscsi_find_mem_req()- Find mem needed 2361 * @phba: ptr to HBA struct 2362 **/ 2363 static void beiscsi_find_mem_req(struct beiscsi_hba *phba) 2364 { 2365 uint8_t mem_descr_index, ulp_num; 2366 unsigned int num_async_pdu_buf_pages; 2367 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; 2368 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; 2369 2370 phba->params.hwi_ws_sz = sizeof(struct hwi_controller); 2371 2372 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 * 2373 BE_ISCSI_PDU_HEADER_SIZE; 2374 phba->mem_req[HWI_MEM_ADDN_CONTEXT] = 2375 sizeof(struct hwi_context_memory); 2376 2377 2378 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb) 2379 * (phba->params.wrbs_per_cxn) 2380 * phba->params.cxns_per_ctrl; 2381 wrb_sz_per_cxn = sizeof(struct wrb_handle) * 2382 (phba->params.wrbs_per_cxn); 2383 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) * 2384 phba->params.cxns_per_ctrl); 2385 2386 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) * 2387 phba->params.icds_per_ctrl; 2388 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) * 2389 phba->params.num_sge_per_io * phba->params.icds_per_ctrl; 2390 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2391 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2392 2393 num_async_pdu_buf_sgl_pages = 2394 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2395 phba, ulp_num) * 2396 sizeof(struct phys_addr)); 2397 2398 num_async_pdu_buf_pages = 2399 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2400 phba, ulp_num) * 2401 phba->params.defpdu_hdr_sz); 2402 2403 num_async_pdu_data_pages = 2404 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2405 phba, ulp_num) * 2406 phba->params.defpdu_data_sz); 2407 2408 num_async_pdu_data_sgl_pages = 2409 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2410 phba, ulp_num) * 2411 sizeof(struct phys_addr)); 2412 2413 mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 + 2414 (ulp_num * MEM_DESCR_OFFSET)); 2415 phba->mem_req[mem_descr_index] = 2416 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2417 BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE; 2418 2419 mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2420 (ulp_num * MEM_DESCR_OFFSET)); 2421 phba->mem_req[mem_descr_index] = 2422 num_async_pdu_buf_pages * 2423 PAGE_SIZE; 2424 2425 mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2426 (ulp_num * MEM_DESCR_OFFSET)); 2427 phba->mem_req[mem_descr_index] = 2428 num_async_pdu_data_pages * 2429 PAGE_SIZE; 2430 2431 mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2432 (ulp_num * MEM_DESCR_OFFSET)); 2433 phba->mem_req[mem_descr_index] = 2434 num_async_pdu_buf_sgl_pages * 2435 PAGE_SIZE; 2436 2437 mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 + 2438 (ulp_num * MEM_DESCR_OFFSET)); 2439 phba->mem_req[mem_descr_index] = 2440 num_async_pdu_data_sgl_pages * 2441 PAGE_SIZE; 2442 2443 mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2444 (ulp_num * MEM_DESCR_OFFSET)); 2445 phba->mem_req[mem_descr_index] = 2446 BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * 2447 sizeof(struct hd_async_handle); 2448 2449 mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2450 (ulp_num * MEM_DESCR_OFFSET)); 2451 phba->mem_req[mem_descr_index] = 2452 BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * 2453 sizeof(struct hd_async_handle); 2454 2455 mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2456 (ulp_num * MEM_DESCR_OFFSET)); 2457 phba->mem_req[mem_descr_index] = 2458 sizeof(struct hd_async_context) + 2459 (BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * 2460 sizeof(struct hd_async_entry)); 2461 } 2462 } 2463 } 2464 2465 static int beiscsi_alloc_mem(struct beiscsi_hba *phba) 2466 { 2467 dma_addr_t bus_add; 2468 struct hwi_controller *phwi_ctrlr; 2469 struct be_mem_descriptor *mem_descr; 2470 struct mem_array *mem_arr, *mem_arr_orig; 2471 unsigned int i, j, alloc_size, curr_alloc_size; 2472 2473 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL); 2474 if (!phba->phwi_ctrlr) 2475 return -ENOMEM; 2476 2477 /* Allocate memory for wrb_context */ 2478 phwi_ctrlr = phba->phwi_ctrlr; 2479 phwi_ctrlr->wrb_context = kcalloc(phba->params.cxns_per_ctrl, 2480 sizeof(struct hwi_wrb_context), 2481 GFP_KERNEL); 2482 if (!phwi_ctrlr->wrb_context) { 2483 kfree(phba->phwi_ctrlr); 2484 return -ENOMEM; 2485 } 2486 2487 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), 2488 GFP_KERNEL); 2489 if (!phba->init_mem) { 2490 kfree(phwi_ctrlr->wrb_context); 2491 kfree(phba->phwi_ctrlr); 2492 return -ENOMEM; 2493 } 2494 2495 mem_arr_orig = kmalloc_array(BEISCSI_MAX_FRAGS_INIT, 2496 sizeof(*mem_arr_orig), 2497 GFP_KERNEL); 2498 if (!mem_arr_orig) { 2499 kfree(phba->init_mem); 2500 kfree(phwi_ctrlr->wrb_context); 2501 kfree(phba->phwi_ctrlr); 2502 return -ENOMEM; 2503 } 2504 2505 mem_descr = phba->init_mem; 2506 for (i = 0; i < SE_MEM_MAX; i++) { 2507 if (!phba->mem_req[i]) { 2508 mem_descr->mem_array = NULL; 2509 mem_descr++; 2510 continue; 2511 } 2512 2513 j = 0; 2514 mem_arr = mem_arr_orig; 2515 alloc_size = phba->mem_req[i]; 2516 memset(mem_arr, 0, sizeof(struct mem_array) * 2517 BEISCSI_MAX_FRAGS_INIT); 2518 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size); 2519 do { 2520 mem_arr->virtual_address = 2521 dma_alloc_coherent(&phba->pcidev->dev, 2522 curr_alloc_size, &bus_add, GFP_KERNEL); 2523 if (!mem_arr->virtual_address) { 2524 if (curr_alloc_size <= BE_MIN_MEM_SIZE) 2525 goto free_mem; 2526 if (curr_alloc_size - 2527 rounddown_pow_of_two(curr_alloc_size)) 2528 curr_alloc_size = rounddown_pow_of_two 2529 (curr_alloc_size); 2530 else 2531 curr_alloc_size = curr_alloc_size / 2; 2532 } else { 2533 mem_arr->bus_address.u. 2534 a64.address = (__u64) bus_add; 2535 mem_arr->size = curr_alloc_size; 2536 alloc_size -= curr_alloc_size; 2537 curr_alloc_size = min(be_max_phys_size * 2538 1024, alloc_size); 2539 j++; 2540 mem_arr++; 2541 } 2542 } while (alloc_size); 2543 mem_descr->num_elements = j; 2544 mem_descr->size_in_bytes = phba->mem_req[i]; 2545 mem_descr->mem_array = kmalloc_array(j, sizeof(*mem_arr), 2546 GFP_KERNEL); 2547 if (!mem_descr->mem_array) 2548 goto free_mem; 2549 2550 memcpy(mem_descr->mem_array, mem_arr_orig, 2551 sizeof(struct mem_array) * j); 2552 mem_descr++; 2553 } 2554 kfree(mem_arr_orig); 2555 return 0; 2556 free_mem: 2557 mem_descr->num_elements = j; 2558 while ((i) || (j)) { 2559 for (j = mem_descr->num_elements; j > 0; j--) { 2560 dma_free_coherent(&phba->pcidev->dev, 2561 mem_descr->mem_array[j - 1].size, 2562 mem_descr->mem_array[j - 1]. 2563 virtual_address, 2564 (unsigned long)mem_descr-> 2565 mem_array[j - 1]. 2566 bus_address.u.a64.address); 2567 } 2568 if (i) { 2569 i--; 2570 kfree(mem_descr->mem_array); 2571 mem_descr--; 2572 } 2573 } 2574 kfree(mem_arr_orig); 2575 kfree(phba->init_mem); 2576 kfree(phba->phwi_ctrlr->wrb_context); 2577 kfree(phba->phwi_ctrlr); 2578 return -ENOMEM; 2579 } 2580 2581 static int beiscsi_get_memory(struct beiscsi_hba *phba) 2582 { 2583 beiscsi_find_mem_req(phba); 2584 return beiscsi_alloc_mem(phba); 2585 } 2586 2587 static void iscsi_init_global_templates(struct beiscsi_hba *phba) 2588 { 2589 struct pdu_data_out *pdata_out; 2590 struct pdu_nop_out *pnop_out; 2591 struct be_mem_descriptor *mem_descr; 2592 2593 mem_descr = phba->init_mem; 2594 mem_descr += ISCSI_MEM_GLOBAL_HEADER; 2595 pdata_out = 2596 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address; 2597 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2598 2599 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out, 2600 IIOC_SCSI_DATA); 2601 2602 pnop_out = 2603 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0]. 2604 virtual_address + BE_ISCSI_PDU_HEADER_SIZE); 2605 2606 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2607 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF); 2608 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1); 2609 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0); 2610 } 2611 2612 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) 2613 { 2614 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; 2615 struct hwi_context_memory *phwi_ctxt; 2616 struct wrb_handle *pwrb_handle = NULL; 2617 struct hwi_controller *phwi_ctrlr; 2618 struct hwi_wrb_context *pwrb_context; 2619 struct iscsi_wrb *pwrb = NULL; 2620 unsigned int num_cxn_wrbh = 0; 2621 unsigned int num_cxn_wrb = 0, j, idx = 0, index; 2622 2623 mem_descr_wrbh = phba->init_mem; 2624 mem_descr_wrbh += HWI_MEM_WRBH; 2625 2626 mem_descr_wrb = phba->init_mem; 2627 mem_descr_wrb += HWI_MEM_WRB; 2628 phwi_ctrlr = phba->phwi_ctrlr; 2629 2630 /* Allocate memory for WRBQ */ 2631 phwi_ctxt = phwi_ctrlr->phwi_ctxt; 2632 phwi_ctxt->be_wrbq = kcalloc(phba->params.cxns_per_ctrl, 2633 sizeof(struct be_queue_info), 2634 GFP_KERNEL); 2635 if (!phwi_ctxt->be_wrbq) { 2636 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2637 "BM_%d : WRBQ Mem Alloc Failed\n"); 2638 return -ENOMEM; 2639 } 2640 2641 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2642 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2643 pwrb_context->pwrb_handle_base = 2644 kcalloc(phba->params.wrbs_per_cxn, 2645 sizeof(struct wrb_handle *), 2646 GFP_KERNEL); 2647 if (!pwrb_context->pwrb_handle_base) { 2648 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2649 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2650 goto init_wrb_hndl_failed; 2651 } 2652 pwrb_context->pwrb_handle_basestd = 2653 kcalloc(phba->params.wrbs_per_cxn, 2654 sizeof(struct wrb_handle *), 2655 GFP_KERNEL); 2656 if (!pwrb_context->pwrb_handle_basestd) { 2657 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2658 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2659 goto init_wrb_hndl_failed; 2660 } 2661 if (!num_cxn_wrbh) { 2662 pwrb_handle = 2663 mem_descr_wrbh->mem_array[idx].virtual_address; 2664 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) / 2665 ((sizeof(struct wrb_handle)) * 2666 phba->params.wrbs_per_cxn)); 2667 idx++; 2668 } 2669 pwrb_context->alloc_index = 0; 2670 pwrb_context->wrb_handles_available = 0; 2671 pwrb_context->free_index = 0; 2672 2673 if (num_cxn_wrbh) { 2674 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2675 pwrb_context->pwrb_handle_base[j] = pwrb_handle; 2676 pwrb_context->pwrb_handle_basestd[j] = 2677 pwrb_handle; 2678 pwrb_context->wrb_handles_available++; 2679 pwrb_handle->wrb_index = j; 2680 pwrb_handle++; 2681 } 2682 num_cxn_wrbh--; 2683 } 2684 spin_lock_init(&pwrb_context->wrb_lock); 2685 } 2686 idx = 0; 2687 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2688 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2689 if (!num_cxn_wrb) { 2690 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2691 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / 2692 ((sizeof(struct iscsi_wrb) * 2693 phba->params.wrbs_per_cxn)); 2694 idx++; 2695 } 2696 2697 if (num_cxn_wrb) { 2698 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2699 pwrb_handle = pwrb_context->pwrb_handle_base[j]; 2700 pwrb_handle->pwrb = pwrb; 2701 pwrb++; 2702 } 2703 num_cxn_wrb--; 2704 } 2705 } 2706 return 0; 2707 init_wrb_hndl_failed: 2708 for (j = index; j > 0; j--) { 2709 pwrb_context = &phwi_ctrlr->wrb_context[j]; 2710 kfree(pwrb_context->pwrb_handle_base); 2711 kfree(pwrb_context->pwrb_handle_basestd); 2712 } 2713 kfree(phwi_ctxt->be_wrbq); 2714 return -ENOMEM; 2715 } 2716 2717 static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) 2718 { 2719 uint8_t ulp_num; 2720 struct hwi_controller *phwi_ctrlr; 2721 struct hba_parameters *p = &phba->params; 2722 struct hd_async_context *pasync_ctx; 2723 struct hd_async_handle *pasync_header_h, *pasync_data_h; 2724 unsigned int index, idx, num_per_mem, num_async_data; 2725 struct be_mem_descriptor *mem_descr; 2726 2727 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2728 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2729 /* get async_ctx for each ULP */ 2730 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2731 mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2732 (ulp_num * MEM_DESCR_OFFSET)); 2733 2734 phwi_ctrlr = phba->phwi_ctrlr; 2735 phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] = 2736 (struct hd_async_context *) 2737 mem_descr->mem_array[0].virtual_address; 2738 2739 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; 2740 memset(pasync_ctx, 0, sizeof(*pasync_ctx)); 2741 2742 pasync_ctx->async_entry = 2743 (struct hd_async_entry *) 2744 ((long unsigned int)pasync_ctx + 2745 sizeof(struct hd_async_context)); 2746 2747 pasync_ctx->num_entries = BEISCSI_ASYNC_HDQ_SIZE(phba, 2748 ulp_num); 2749 /* setup header buffers */ 2750 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2751 mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2752 (ulp_num * MEM_DESCR_OFFSET); 2753 if (mem_descr->mem_array[0].virtual_address) { 2754 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2755 "BM_%d : hwi_init_async_pdu_ctx" 2756 " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n", 2757 ulp_num, 2758 mem_descr->mem_array[0]. 2759 virtual_address); 2760 } else 2761 beiscsi_log(phba, KERN_WARNING, 2762 BEISCSI_LOG_INIT, 2763 "BM_%d : No Virtual address for ULP : %d\n", 2764 ulp_num); 2765 2766 pasync_ctx->async_header.pi = 0; 2767 pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz; 2768 pasync_ctx->async_header.va_base = 2769 mem_descr->mem_array[0].virtual_address; 2770 2771 pasync_ctx->async_header.pa_base.u.a64.address = 2772 mem_descr->mem_array[0]. 2773 bus_address.u.a64.address; 2774 2775 /* setup header buffer sgls */ 2776 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2777 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2778 (ulp_num * MEM_DESCR_OFFSET); 2779 if (mem_descr->mem_array[0].virtual_address) { 2780 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2781 "BM_%d : hwi_init_async_pdu_ctx" 2782 " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n", 2783 ulp_num, 2784 mem_descr->mem_array[0]. 2785 virtual_address); 2786 } else 2787 beiscsi_log(phba, KERN_WARNING, 2788 BEISCSI_LOG_INIT, 2789 "BM_%d : No Virtual address for ULP : %d\n", 2790 ulp_num); 2791 2792 pasync_ctx->async_header.ring_base = 2793 mem_descr->mem_array[0].virtual_address; 2794 2795 /* setup header buffer handles */ 2796 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2797 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2798 (ulp_num * MEM_DESCR_OFFSET); 2799 if (mem_descr->mem_array[0].virtual_address) { 2800 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2801 "BM_%d : hwi_init_async_pdu_ctx" 2802 " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n", 2803 ulp_num, 2804 mem_descr->mem_array[0]. 2805 virtual_address); 2806 } else 2807 beiscsi_log(phba, KERN_WARNING, 2808 BEISCSI_LOG_INIT, 2809 "BM_%d : No Virtual address for ULP : %d\n", 2810 ulp_num); 2811 2812 pasync_ctx->async_header.handle_base = 2813 mem_descr->mem_array[0].virtual_address; 2814 2815 /* setup data buffer sgls */ 2816 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2817 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 2818 (ulp_num * MEM_DESCR_OFFSET); 2819 if (mem_descr->mem_array[0].virtual_address) { 2820 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2821 "BM_%d : hwi_init_async_pdu_ctx" 2822 " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n", 2823 ulp_num, 2824 mem_descr->mem_array[0]. 2825 virtual_address); 2826 } else 2827 beiscsi_log(phba, KERN_WARNING, 2828 BEISCSI_LOG_INIT, 2829 "BM_%d : No Virtual address for ULP : %d\n", 2830 ulp_num); 2831 2832 pasync_ctx->async_data.ring_base = 2833 mem_descr->mem_array[0].virtual_address; 2834 2835 /* setup data buffer handles */ 2836 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2837 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2838 (ulp_num * MEM_DESCR_OFFSET); 2839 if (!mem_descr->mem_array[0].virtual_address) 2840 beiscsi_log(phba, KERN_WARNING, 2841 BEISCSI_LOG_INIT, 2842 "BM_%d : No Virtual address for ULP : %d\n", 2843 ulp_num); 2844 2845 pasync_ctx->async_data.handle_base = 2846 mem_descr->mem_array[0].virtual_address; 2847 2848 pasync_header_h = 2849 (struct hd_async_handle *) 2850 pasync_ctx->async_header.handle_base; 2851 pasync_data_h = 2852 (struct hd_async_handle *) 2853 pasync_ctx->async_data.handle_base; 2854 2855 /* setup data buffers */ 2856 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2857 mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2858 (ulp_num * MEM_DESCR_OFFSET); 2859 if (mem_descr->mem_array[0].virtual_address) { 2860 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2861 "BM_%d : hwi_init_async_pdu_ctx" 2862 " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n", 2863 ulp_num, 2864 mem_descr->mem_array[0]. 2865 virtual_address); 2866 } else 2867 beiscsi_log(phba, KERN_WARNING, 2868 BEISCSI_LOG_INIT, 2869 "BM_%d : No Virtual address for ULP : %d\n", 2870 ulp_num); 2871 2872 idx = 0; 2873 pasync_ctx->async_data.pi = 0; 2874 pasync_ctx->async_data.buffer_size = p->defpdu_data_sz; 2875 pasync_ctx->async_data.va_base = 2876 mem_descr->mem_array[idx].virtual_address; 2877 pasync_ctx->async_data.pa_base.u.a64.address = 2878 mem_descr->mem_array[idx]. 2879 bus_address.u.a64.address; 2880 2881 num_async_data = ((mem_descr->mem_array[idx].size) / 2882 phba->params.defpdu_data_sz); 2883 num_per_mem = 0; 2884 2885 for (index = 0; index < BEISCSI_ASYNC_HDQ_SIZE 2886 (phba, ulp_num); index++) { 2887 pasync_header_h->cri = -1; 2888 pasync_header_h->is_header = 1; 2889 pasync_header_h->index = index; 2890 INIT_LIST_HEAD(&pasync_header_h->link); 2891 pasync_header_h->pbuffer = 2892 (void *)((unsigned long) 2893 (pasync_ctx-> 2894 async_header.va_base) + 2895 (p->defpdu_hdr_sz * index)); 2896 2897 pasync_header_h->pa.u.a64.address = 2898 pasync_ctx->async_header.pa_base.u.a64. 2899 address + (p->defpdu_hdr_sz * index); 2900 2901 pasync_ctx->async_entry[index].header = 2902 pasync_header_h; 2903 pasync_header_h++; 2904 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 2905 wq.list); 2906 2907 pasync_data_h->cri = -1; 2908 pasync_data_h->is_header = 0; 2909 pasync_data_h->index = index; 2910 INIT_LIST_HEAD(&pasync_data_h->link); 2911 2912 if (!num_async_data) { 2913 num_per_mem = 0; 2914 idx++; 2915 pasync_ctx->async_data.va_base = 2916 mem_descr->mem_array[idx]. 2917 virtual_address; 2918 pasync_ctx->async_data.pa_base.u. 2919 a64.address = 2920 mem_descr->mem_array[idx]. 2921 bus_address.u.a64.address; 2922 num_async_data = 2923 ((mem_descr->mem_array[idx]. 2924 size) / 2925 phba->params.defpdu_data_sz); 2926 } 2927 pasync_data_h->pbuffer = 2928 (void *)((unsigned long) 2929 (pasync_ctx->async_data.va_base) + 2930 (p->defpdu_data_sz * num_per_mem)); 2931 2932 pasync_data_h->pa.u.a64.address = 2933 pasync_ctx->async_data.pa_base.u.a64. 2934 address + (p->defpdu_data_sz * 2935 num_per_mem); 2936 num_per_mem++; 2937 num_async_data--; 2938 2939 pasync_ctx->async_entry[index].data = 2940 pasync_data_h; 2941 pasync_data_h++; 2942 } 2943 } 2944 } 2945 2946 return 0; 2947 } 2948 2949 static int 2950 be_sgl_create_contiguous(void *virtual_address, 2951 u64 physical_address, u32 length, 2952 struct be_dma_mem *sgl) 2953 { 2954 WARN_ON(!virtual_address); 2955 WARN_ON(!physical_address); 2956 WARN_ON(!length); 2957 WARN_ON(!sgl); 2958 2959 sgl->va = virtual_address; 2960 sgl->dma = (unsigned long)physical_address; 2961 sgl->size = length; 2962 2963 return 0; 2964 } 2965 2966 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl) 2967 { 2968 memset(sgl, 0, sizeof(*sgl)); 2969 } 2970 2971 static void 2972 hwi_build_be_sgl_arr(struct beiscsi_hba *phba, 2973 struct mem_array *pmem, struct be_dma_mem *sgl) 2974 { 2975 if (sgl->va) 2976 be_sgl_destroy_contiguous(sgl); 2977 2978 be_sgl_create_contiguous(pmem->virtual_address, 2979 pmem->bus_address.u.a64.address, 2980 pmem->size, sgl); 2981 } 2982 2983 static void 2984 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba, 2985 struct mem_array *pmem, struct be_dma_mem *sgl) 2986 { 2987 if (sgl->va) 2988 be_sgl_destroy_contiguous(sgl); 2989 2990 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address, 2991 pmem->bus_address.u.a64.address, 2992 pmem->size, sgl); 2993 } 2994 2995 static int be_fill_queue(struct be_queue_info *q, 2996 u16 len, u16 entry_size, void *vaddress) 2997 { 2998 struct be_dma_mem *mem = &q->dma_mem; 2999 3000 memset(q, 0, sizeof(*q)); 3001 q->len = len; 3002 q->entry_size = entry_size; 3003 mem->size = len * entry_size; 3004 mem->va = vaddress; 3005 if (!mem->va) 3006 return -ENOMEM; 3007 memset(mem->va, 0, mem->size); 3008 return 0; 3009 } 3010 3011 static int beiscsi_create_eqs(struct beiscsi_hba *phba, 3012 struct hwi_context_memory *phwi_context) 3013 { 3014 int ret = -ENOMEM, eq_for_mcc; 3015 unsigned int i, num_eq_pages; 3016 struct be_queue_info *eq; 3017 struct be_dma_mem *mem; 3018 void *eq_vaddress; 3019 dma_addr_t paddr; 3020 3021 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * 3022 sizeof(struct be_eq_entry)); 3023 3024 if (phba->pcidev->msix_enabled) 3025 eq_for_mcc = 1; 3026 else 3027 eq_for_mcc = 0; 3028 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3029 eq = &phwi_context->be_eq[i].q; 3030 mem = &eq->dma_mem; 3031 phwi_context->be_eq[i].phba = phba; 3032 eq_vaddress = dma_alloc_coherent(&phba->pcidev->dev, 3033 num_eq_pages * PAGE_SIZE, 3034 &paddr, GFP_KERNEL); 3035 if (!eq_vaddress) { 3036 ret = -ENOMEM; 3037 goto create_eq_error; 3038 } 3039 3040 mem->va = eq_vaddress; 3041 ret = be_fill_queue(eq, phba->params.num_eq_entries, 3042 sizeof(struct be_eq_entry), eq_vaddress); 3043 if (ret) { 3044 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3045 "BM_%d : be_fill_queue Failed for EQ\n"); 3046 goto create_eq_error; 3047 } 3048 3049 mem->dma = paddr; 3050 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, 3051 BEISCSI_EQ_DELAY_DEF); 3052 if (ret) { 3053 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3054 "BM_%d : beiscsi_cmd_eq_create Failed for EQ\n"); 3055 goto create_eq_error; 3056 } 3057 3058 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3059 "BM_%d : eqid = %d\n", 3060 phwi_context->be_eq[i].q.id); 3061 } 3062 return 0; 3063 3064 create_eq_error: 3065 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3066 eq = &phwi_context->be_eq[i].q; 3067 mem = &eq->dma_mem; 3068 if (mem->va) 3069 dma_free_coherent(&phba->pcidev->dev, num_eq_pages 3070 * PAGE_SIZE, 3071 mem->va, mem->dma); 3072 } 3073 return ret; 3074 } 3075 3076 static int beiscsi_create_cqs(struct beiscsi_hba *phba, 3077 struct hwi_context_memory *phwi_context) 3078 { 3079 unsigned int i, num_cq_pages; 3080 struct be_queue_info *cq, *eq; 3081 struct be_dma_mem *mem; 3082 struct be_eq_obj *pbe_eq; 3083 void *cq_vaddress; 3084 int ret = -ENOMEM; 3085 dma_addr_t paddr; 3086 3087 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * 3088 sizeof(struct sol_cqe)); 3089 3090 for (i = 0; i < phba->num_cpus; i++) { 3091 cq = &phwi_context->be_cq[i]; 3092 eq = &phwi_context->be_eq[i].q; 3093 pbe_eq = &phwi_context->be_eq[i]; 3094 pbe_eq->cq = cq; 3095 pbe_eq->phba = phba; 3096 mem = &cq->dma_mem; 3097 cq_vaddress = dma_alloc_coherent(&phba->pcidev->dev, 3098 num_cq_pages * PAGE_SIZE, 3099 &paddr, GFP_KERNEL); 3100 if (!cq_vaddress) { 3101 ret = -ENOMEM; 3102 goto create_cq_error; 3103 } 3104 3105 ret = be_fill_queue(cq, phba->params.num_cq_entries, 3106 sizeof(struct sol_cqe), cq_vaddress); 3107 if (ret) { 3108 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3109 "BM_%d : be_fill_queue Failed for ISCSI CQ\n"); 3110 goto create_cq_error; 3111 } 3112 3113 mem->dma = paddr; 3114 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, 3115 false, 0); 3116 if (ret) { 3117 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3118 "BM_%d : beiscsi_cmd_eq_create Failed for ISCSI CQ\n"); 3119 goto create_cq_error; 3120 } 3121 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3122 "BM_%d : iscsi cq_id is %d for eq_id %d\n" 3123 "iSCSI CQ CREATED\n", cq->id, eq->id); 3124 } 3125 return 0; 3126 3127 create_cq_error: 3128 for (i = 0; i < phba->num_cpus; i++) { 3129 cq = &phwi_context->be_cq[i]; 3130 mem = &cq->dma_mem; 3131 if (mem->va) 3132 dma_free_coherent(&phba->pcidev->dev, num_cq_pages 3133 * PAGE_SIZE, 3134 mem->va, mem->dma); 3135 } 3136 return ret; 3137 } 3138 3139 static int 3140 beiscsi_create_def_hdr(struct beiscsi_hba *phba, 3141 struct hwi_context_memory *phwi_context, 3142 struct hwi_controller *phwi_ctrlr, 3143 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3144 { 3145 unsigned int idx; 3146 int ret; 3147 struct be_queue_info *dq, *cq; 3148 struct be_dma_mem *mem; 3149 struct be_mem_descriptor *mem_descr; 3150 void *dq_vaddress; 3151 3152 idx = 0; 3153 dq = &phwi_context->be_def_hdrq[ulp_num]; 3154 cq = &phwi_context->be_cq[0]; 3155 mem = &dq->dma_mem; 3156 mem_descr = phba->init_mem; 3157 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 3158 (ulp_num * MEM_DESCR_OFFSET); 3159 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3160 ret = be_fill_queue(dq, mem_descr->mem_array[0].size / 3161 sizeof(struct phys_addr), 3162 sizeof(struct phys_addr), dq_vaddress); 3163 if (ret) { 3164 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3165 "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n", 3166 ulp_num); 3167 3168 return ret; 3169 } 3170 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3171 bus_address.u.a64.address; 3172 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, 3173 def_pdu_ring_sz, 3174 phba->params.defpdu_hdr_sz, 3175 BEISCSI_DEFQ_HDR, ulp_num); 3176 if (ret) { 3177 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3178 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n", 3179 ulp_num); 3180 3181 return ret; 3182 } 3183 3184 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3185 "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n", 3186 ulp_num, 3187 phwi_context->be_def_hdrq[ulp_num].id); 3188 return 0; 3189 } 3190 3191 static int 3192 beiscsi_create_def_data(struct beiscsi_hba *phba, 3193 struct hwi_context_memory *phwi_context, 3194 struct hwi_controller *phwi_ctrlr, 3195 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3196 { 3197 unsigned int idx; 3198 int ret; 3199 struct be_queue_info *dataq, *cq; 3200 struct be_dma_mem *mem; 3201 struct be_mem_descriptor *mem_descr; 3202 void *dq_vaddress; 3203 3204 idx = 0; 3205 dataq = &phwi_context->be_def_dataq[ulp_num]; 3206 cq = &phwi_context->be_cq[0]; 3207 mem = &dataq->dma_mem; 3208 mem_descr = phba->init_mem; 3209 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 3210 (ulp_num * MEM_DESCR_OFFSET); 3211 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3212 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size / 3213 sizeof(struct phys_addr), 3214 sizeof(struct phys_addr), dq_vaddress); 3215 if (ret) { 3216 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3217 "BM_%d : be_fill_queue Failed for DEF PDU " 3218 "DATA on ULP : %d\n", 3219 ulp_num); 3220 3221 return ret; 3222 } 3223 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3224 bus_address.u.a64.address; 3225 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, 3226 def_pdu_ring_sz, 3227 phba->params.defpdu_data_sz, 3228 BEISCSI_DEFQ_DATA, ulp_num); 3229 if (ret) { 3230 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3231 "BM_%d be_cmd_create_default_pdu_queue" 3232 " Failed for DEF PDU DATA on ULP : %d\n", 3233 ulp_num); 3234 return ret; 3235 } 3236 3237 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3238 "BM_%d : iscsi def data id on ULP : %d is %d\n", 3239 ulp_num, 3240 phwi_context->be_def_dataq[ulp_num].id); 3241 3242 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3243 "BM_%d : DEFAULT PDU DATA RING CREATED on ULP : %d\n", 3244 ulp_num); 3245 return 0; 3246 } 3247 3248 3249 static int 3250 beiscsi_post_template_hdr(struct beiscsi_hba *phba) 3251 { 3252 struct be_mem_descriptor *mem_descr; 3253 struct mem_array *pm_arr; 3254 struct be_dma_mem sgl; 3255 int status, ulp_num; 3256 3257 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3258 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3259 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3260 mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 + 3261 (ulp_num * MEM_DESCR_OFFSET); 3262 pm_arr = mem_descr->mem_array; 3263 3264 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3265 status = be_cmd_iscsi_post_template_hdr( 3266 &phba->ctrl, &sgl); 3267 3268 if (status != 0) { 3269 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3270 "BM_%d : Post Template HDR Failed for " 3271 "ULP_%d\n", ulp_num); 3272 return status; 3273 } 3274 3275 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3276 "BM_%d : Template HDR Pages Posted for " 3277 "ULP_%d\n", ulp_num); 3278 } 3279 } 3280 return 0; 3281 } 3282 3283 static int 3284 beiscsi_post_pages(struct beiscsi_hba *phba) 3285 { 3286 struct be_mem_descriptor *mem_descr; 3287 struct mem_array *pm_arr; 3288 unsigned int page_offset, i; 3289 struct be_dma_mem sgl; 3290 int status, ulp_num = 0; 3291 3292 mem_descr = phba->init_mem; 3293 mem_descr += HWI_MEM_SGE; 3294 pm_arr = mem_descr->mem_array; 3295 3296 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3297 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3298 break; 3299 3300 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * 3301 phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE; 3302 for (i = 0; i < mem_descr->num_elements; i++) { 3303 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3304 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, 3305 page_offset, 3306 (pm_arr->size / PAGE_SIZE)); 3307 page_offset += pm_arr->size / PAGE_SIZE; 3308 if (status != 0) { 3309 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3310 "BM_%d : post sgl failed.\n"); 3311 return status; 3312 } 3313 pm_arr++; 3314 } 3315 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3316 "BM_%d : POSTED PAGES\n"); 3317 return 0; 3318 } 3319 3320 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q) 3321 { 3322 struct be_dma_mem *mem = &q->dma_mem; 3323 if (mem->va) { 3324 dma_free_coherent(&phba->pcidev->dev, mem->size, 3325 mem->va, mem->dma); 3326 mem->va = NULL; 3327 } 3328 } 3329 3330 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, 3331 u16 len, u16 entry_size) 3332 { 3333 struct be_dma_mem *mem = &q->dma_mem; 3334 3335 memset(q, 0, sizeof(*q)); 3336 q->len = len; 3337 q->entry_size = entry_size; 3338 mem->size = len * entry_size; 3339 mem->va = dma_alloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma, 3340 GFP_KERNEL); 3341 if (!mem->va) 3342 return -ENOMEM; 3343 return 0; 3344 } 3345 3346 static int 3347 beiscsi_create_wrb_rings(struct beiscsi_hba *phba, 3348 struct hwi_context_memory *phwi_context, 3349 struct hwi_controller *phwi_ctrlr) 3350 { 3351 unsigned int num_wrb_rings; 3352 u64 pa_addr_lo; 3353 unsigned int idx, num, i, ulp_num; 3354 struct mem_array *pwrb_arr; 3355 void *wrb_vaddr; 3356 struct be_dma_mem sgl; 3357 struct be_mem_descriptor *mem_descr; 3358 struct hwi_wrb_context *pwrb_context; 3359 int status; 3360 uint8_t ulp_count = 0, ulp_base_num = 0; 3361 uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 }; 3362 3363 idx = 0; 3364 mem_descr = phba->init_mem; 3365 mem_descr += HWI_MEM_WRB; 3366 pwrb_arr = kmalloc_array(phba->params.cxns_per_ctrl, 3367 sizeof(*pwrb_arr), 3368 GFP_KERNEL); 3369 if (!pwrb_arr) { 3370 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3371 "BM_%d : Memory alloc failed in create wrb ring.\n"); 3372 return -ENOMEM; 3373 } 3374 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3375 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address; 3376 num_wrb_rings = mem_descr->mem_array[idx].size / 3377 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb)); 3378 3379 for (num = 0; num < phba->params.cxns_per_ctrl; num++) { 3380 if (num_wrb_rings) { 3381 pwrb_arr[num].virtual_address = wrb_vaddr; 3382 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo; 3383 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3384 sizeof(struct iscsi_wrb); 3385 wrb_vaddr += pwrb_arr[num].size; 3386 pa_addr_lo += pwrb_arr[num].size; 3387 num_wrb_rings--; 3388 } else { 3389 idx++; 3390 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3391 pa_addr_lo = mem_descr->mem_array[idx]. 3392 bus_address.u.a64.address; 3393 num_wrb_rings = mem_descr->mem_array[idx].size / 3394 (phba->params.wrbs_per_cxn * 3395 sizeof(struct iscsi_wrb)); 3396 pwrb_arr[num].virtual_address = wrb_vaddr; 3397 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo; 3398 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3399 sizeof(struct iscsi_wrb); 3400 wrb_vaddr += pwrb_arr[num].size; 3401 pa_addr_lo += pwrb_arr[num].size; 3402 num_wrb_rings--; 3403 } 3404 } 3405 3406 /* Get the ULP Count */ 3407 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3408 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3409 ulp_count++; 3410 ulp_base_num = ulp_num; 3411 cid_count_ulp[ulp_num] = 3412 BEISCSI_GET_CID_COUNT(phba, ulp_num); 3413 } 3414 3415 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3416 if (ulp_count > 1) { 3417 ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT; 3418 3419 if (!cid_count_ulp[ulp_base_num]) 3420 ulp_base_num = (ulp_base_num + 1) % 3421 BEISCSI_ULP_COUNT; 3422 3423 cid_count_ulp[ulp_base_num]--; 3424 } 3425 3426 3427 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl); 3428 status = be_cmd_wrbq_create(&phba->ctrl, &sgl, 3429 &phwi_context->be_wrbq[i], 3430 &phwi_ctrlr->wrb_context[i], 3431 ulp_base_num); 3432 if (status != 0) { 3433 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3434 "BM_%d : wrbq create failed."); 3435 kfree(pwrb_arr); 3436 return status; 3437 } 3438 pwrb_context = &phwi_ctrlr->wrb_context[i]; 3439 BE_SET_CID_TO_CRI(i, pwrb_context->cid); 3440 } 3441 kfree(pwrb_arr); 3442 return 0; 3443 } 3444 3445 static void free_wrb_handles(struct beiscsi_hba *phba) 3446 { 3447 unsigned int index; 3448 struct hwi_controller *phwi_ctrlr; 3449 struct hwi_wrb_context *pwrb_context; 3450 3451 phwi_ctrlr = phba->phwi_ctrlr; 3452 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 3453 pwrb_context = &phwi_ctrlr->wrb_context[index]; 3454 kfree(pwrb_context->pwrb_handle_base); 3455 kfree(pwrb_context->pwrb_handle_basestd); 3456 } 3457 } 3458 3459 static void be_mcc_queues_destroy(struct beiscsi_hba *phba) 3460 { 3461 struct be_ctrl_info *ctrl = &phba->ctrl; 3462 struct be_dma_mem *ptag_mem; 3463 struct be_queue_info *q; 3464 int i, tag; 3465 3466 q = &phba->ctrl.mcc_obj.q; 3467 for (i = 0; i < MAX_MCC_CMD; i++) { 3468 tag = i + 1; 3469 if (!test_bit(MCC_TAG_STATE_RUNNING, 3470 &ctrl->ptag_state[tag].tag_state)) 3471 continue; 3472 3473 if (test_bit(MCC_TAG_STATE_TIMEOUT, 3474 &ctrl->ptag_state[tag].tag_state)) { 3475 ptag_mem = &ctrl->ptag_state[tag].tag_mem_state; 3476 if (ptag_mem->size) { 3477 dma_free_coherent(&ctrl->pdev->dev, 3478 ptag_mem->size, 3479 ptag_mem->va, 3480 ptag_mem->dma); 3481 ptag_mem->size = 0; 3482 } 3483 continue; 3484 } 3485 /** 3486 * If MCC is still active and waiting then wake up the process. 3487 * We are here only because port is going offline. The process 3488 * sees that (BEISCSI_HBA_ONLINE is cleared) and EIO error is 3489 * returned for the operation and allocated memory cleaned up. 3490 */ 3491 if (waitqueue_active(&ctrl->mcc_wait[tag])) { 3492 ctrl->mcc_tag_status[tag] = MCC_STATUS_FAILED; 3493 ctrl->mcc_tag_status[tag] |= CQE_VALID_MASK; 3494 wake_up_interruptible(&ctrl->mcc_wait[tag]); 3495 /* 3496 * Control tag info gets reinitialized in enable 3497 * so wait for the process to clear running state. 3498 */ 3499 while (test_bit(MCC_TAG_STATE_RUNNING, 3500 &ctrl->ptag_state[tag].tag_state)) 3501 schedule_timeout_uninterruptible(HZ); 3502 } 3503 /** 3504 * For MCC with tag_states MCC_TAG_STATE_ASYNC and 3505 * MCC_TAG_STATE_IGNORE nothing needs to done. 3506 */ 3507 } 3508 if (q->created) { 3509 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); 3510 be_queue_free(phba, q); 3511 } 3512 3513 q = &phba->ctrl.mcc_obj.cq; 3514 if (q->created) { 3515 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3516 be_queue_free(phba, q); 3517 } 3518 } 3519 3520 static int be_mcc_queues_create(struct beiscsi_hba *phba, 3521 struct hwi_context_memory *phwi_context) 3522 { 3523 struct be_queue_info *q, *cq; 3524 struct be_ctrl_info *ctrl = &phba->ctrl; 3525 3526 /* Alloc MCC compl queue */ 3527 cq = &phba->ctrl.mcc_obj.cq; 3528 if (be_queue_alloc(phba, cq, MCC_CQ_LEN, 3529 sizeof(struct be_mcc_compl))) 3530 goto err; 3531 /* Ask BE to create MCC compl queue; */ 3532 if (phba->pcidev->msix_enabled) { 3533 if (beiscsi_cmd_cq_create(ctrl, cq, 3534 &phwi_context->be_eq[phba->num_cpus].q, 3535 false, true, 0)) 3536 goto mcc_cq_free; 3537 } else { 3538 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q, 3539 false, true, 0)) 3540 goto mcc_cq_free; 3541 } 3542 3543 /* Alloc MCC queue */ 3544 q = &phba->ctrl.mcc_obj.q; 3545 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) 3546 goto mcc_cq_destroy; 3547 3548 /* Ask BE to create MCC queue */ 3549 if (beiscsi_cmd_mccq_create(phba, q, cq)) 3550 goto mcc_q_free; 3551 3552 return 0; 3553 3554 mcc_q_free: 3555 be_queue_free(phba, q); 3556 mcc_cq_destroy: 3557 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ); 3558 mcc_cq_free: 3559 be_queue_free(phba, cq); 3560 err: 3561 return -ENOMEM; 3562 } 3563 3564 static void be2iscsi_enable_msix(struct beiscsi_hba *phba) 3565 { 3566 int nvec = 1; 3567 3568 switch (phba->generation) { 3569 case BE_GEN2: 3570 case BE_GEN3: 3571 nvec = BEISCSI_MAX_NUM_CPUS + 1; 3572 break; 3573 case BE_GEN4: 3574 nvec = phba->fw_config.eqid_count; 3575 break; 3576 default: 3577 nvec = 2; 3578 break; 3579 } 3580 3581 /* if eqid_count == 1 fall back to INTX */ 3582 if (enable_msix && nvec > 1) { 3583 struct irq_affinity desc = { .post_vectors = 1 }; 3584 3585 if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec, 3586 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) { 3587 phba->num_cpus = nvec - 1; 3588 return; 3589 } 3590 } 3591 3592 phba->num_cpus = 1; 3593 } 3594 3595 static void hwi_purge_eq(struct beiscsi_hba *phba) 3596 { 3597 struct hwi_controller *phwi_ctrlr; 3598 struct hwi_context_memory *phwi_context; 3599 struct be_queue_info *eq; 3600 struct be_eq_entry *eqe = NULL; 3601 int i, eq_msix; 3602 unsigned int num_processed; 3603 3604 if (beiscsi_hba_in_error(phba)) 3605 return; 3606 3607 phwi_ctrlr = phba->phwi_ctrlr; 3608 phwi_context = phwi_ctrlr->phwi_ctxt; 3609 if (phba->pcidev->msix_enabled) 3610 eq_msix = 1; 3611 else 3612 eq_msix = 0; 3613 3614 for (i = 0; i < (phba->num_cpus + eq_msix); i++) { 3615 eq = &phwi_context->be_eq[i].q; 3616 eqe = queue_tail_node(eq); 3617 num_processed = 0; 3618 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 3619 & EQE_VALID_MASK) { 3620 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 3621 queue_tail_inc(eq); 3622 eqe = queue_tail_node(eq); 3623 num_processed++; 3624 } 3625 3626 if (num_processed) 3627 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1); 3628 } 3629 } 3630 3631 static void hwi_cleanup_port(struct beiscsi_hba *phba) 3632 { 3633 struct be_queue_info *q; 3634 struct be_ctrl_info *ctrl = &phba->ctrl; 3635 struct hwi_controller *phwi_ctrlr; 3636 struct hwi_context_memory *phwi_context; 3637 int i, eq_for_mcc, ulp_num; 3638 3639 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3640 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3641 beiscsi_cmd_iscsi_cleanup(phba, ulp_num); 3642 3643 /** 3644 * Purge all EQ entries that may have been left out. This is to 3645 * workaround a problem we've seen occasionally where driver gets an 3646 * interrupt with EQ entry bit set after stopping the controller. 3647 */ 3648 hwi_purge_eq(phba); 3649 3650 phwi_ctrlr = phba->phwi_ctrlr; 3651 phwi_context = phwi_ctrlr->phwi_ctxt; 3652 3653 be_cmd_iscsi_remove_template_hdr(ctrl); 3654 3655 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3656 q = &phwi_context->be_wrbq[i]; 3657 if (q->created) 3658 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); 3659 } 3660 kfree(phwi_context->be_wrbq); 3661 free_wrb_handles(phba); 3662 3663 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3664 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3665 3666 q = &phwi_context->be_def_hdrq[ulp_num]; 3667 if (q->created) 3668 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3669 3670 q = &phwi_context->be_def_dataq[ulp_num]; 3671 if (q->created) 3672 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3673 } 3674 } 3675 3676 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); 3677 3678 for (i = 0; i < (phba->num_cpus); i++) { 3679 q = &phwi_context->be_cq[i]; 3680 if (q->created) { 3681 be_queue_free(phba, q); 3682 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3683 } 3684 } 3685 3686 be_mcc_queues_destroy(phba); 3687 if (phba->pcidev->msix_enabled) 3688 eq_for_mcc = 1; 3689 else 3690 eq_for_mcc = 0; 3691 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3692 q = &phwi_context->be_eq[i].q; 3693 if (q->created) { 3694 be_queue_free(phba, q); 3695 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 3696 } 3697 } 3698 /* this ensures complete FW cleanup */ 3699 beiscsi_cmd_function_reset(phba); 3700 /* last communication, indicate driver is unloading */ 3701 beiscsi_cmd_special_wrb(&phba->ctrl, 0); 3702 } 3703 3704 static int hwi_init_port(struct beiscsi_hba *phba) 3705 { 3706 struct hwi_controller *phwi_ctrlr; 3707 struct hwi_context_memory *phwi_context; 3708 unsigned int def_pdu_ring_sz; 3709 struct be_ctrl_info *ctrl = &phba->ctrl; 3710 int status, ulp_num; 3711 u16 nbufs; 3712 3713 phwi_ctrlr = phba->phwi_ctrlr; 3714 phwi_context = phwi_ctrlr->phwi_ctxt; 3715 /* set port optic state to unknown */ 3716 phba->optic_state = 0xff; 3717 3718 status = beiscsi_create_eqs(phba, phwi_context); 3719 if (status != 0) { 3720 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3721 "BM_%d : EQ not created\n"); 3722 goto error; 3723 } 3724 3725 status = be_mcc_queues_create(phba, phwi_context); 3726 if (status != 0) 3727 goto error; 3728 3729 status = beiscsi_check_supported_fw(ctrl, phba); 3730 if (status != 0) { 3731 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3732 "BM_%d : Unsupported fw version\n"); 3733 goto error; 3734 } 3735 3736 status = beiscsi_create_cqs(phba, phwi_context); 3737 if (status != 0) { 3738 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3739 "BM_%d : CQ not created\n"); 3740 goto error; 3741 } 3742 3743 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3744 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3745 nbufs = phwi_context->pasync_ctx[ulp_num]->num_entries; 3746 def_pdu_ring_sz = nbufs * sizeof(struct phys_addr); 3747 3748 status = beiscsi_create_def_hdr(phba, phwi_context, 3749 phwi_ctrlr, 3750 def_pdu_ring_sz, 3751 ulp_num); 3752 if (status != 0) { 3753 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3754 "BM_%d : Default Header not created for ULP : %d\n", 3755 ulp_num); 3756 goto error; 3757 } 3758 3759 status = beiscsi_create_def_data(phba, phwi_context, 3760 phwi_ctrlr, 3761 def_pdu_ring_sz, 3762 ulp_num); 3763 if (status != 0) { 3764 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3765 "BM_%d : Default Data not created for ULP : %d\n", 3766 ulp_num); 3767 goto error; 3768 } 3769 /** 3770 * Now that the default PDU rings have been created, 3771 * let EP know about it. 3772 */ 3773 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR, 3774 ulp_num, nbufs); 3775 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA, 3776 ulp_num, nbufs); 3777 } 3778 } 3779 3780 status = beiscsi_post_pages(phba); 3781 if (status != 0) { 3782 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3783 "BM_%d : Post SGL Pages Failed\n"); 3784 goto error; 3785 } 3786 3787 status = beiscsi_post_template_hdr(phba); 3788 if (status != 0) { 3789 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3790 "BM_%d : Template HDR Posting for CXN Failed\n"); 3791 } 3792 3793 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); 3794 if (status != 0) { 3795 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3796 "BM_%d : WRB Rings not created\n"); 3797 goto error; 3798 } 3799 3800 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3801 uint16_t async_arr_idx = 0; 3802 3803 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3804 uint16_t cri = 0; 3805 struct hd_async_context *pasync_ctx; 3806 3807 pasync_ctx = HWI_GET_ASYNC_PDU_CTX( 3808 phwi_ctrlr, ulp_num); 3809 for (cri = 0; cri < 3810 phba->params.cxns_per_ctrl; cri++) { 3811 if (ulp_num == BEISCSI_GET_ULP_FROM_CRI 3812 (phwi_ctrlr, cri)) 3813 pasync_ctx->cid_to_async_cri_map[ 3814 phwi_ctrlr->wrb_context[cri].cid] = 3815 async_arr_idx++; 3816 } 3817 } 3818 } 3819 3820 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3821 "BM_%d : hwi_init_port success\n"); 3822 return 0; 3823 3824 error: 3825 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3826 "BM_%d : hwi_init_port failed"); 3827 hwi_cleanup_port(phba); 3828 return status; 3829 } 3830 3831 static int hwi_init_controller(struct beiscsi_hba *phba) 3832 { 3833 struct hwi_controller *phwi_ctrlr; 3834 3835 phwi_ctrlr = phba->phwi_ctrlr; 3836 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) { 3837 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba-> 3838 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address; 3839 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3840 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n", 3841 phwi_ctrlr->phwi_ctxt); 3842 } else { 3843 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3844 "BM_%d : HWI_MEM_ADDN_CONTEXT is more " 3845 "than one element.Failing to load\n"); 3846 return -ENOMEM; 3847 } 3848 3849 iscsi_init_global_templates(phba); 3850 if (beiscsi_init_wrb_handle(phba)) 3851 return -ENOMEM; 3852 3853 if (hwi_init_async_pdu_ctx(phba)) { 3854 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3855 "BM_%d : hwi_init_async_pdu_ctx failed\n"); 3856 return -ENOMEM; 3857 } 3858 3859 if (hwi_init_port(phba) != 0) { 3860 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3861 "BM_%d : hwi_init_controller failed\n"); 3862 3863 return -ENOMEM; 3864 } 3865 return 0; 3866 } 3867 3868 static void beiscsi_free_mem(struct beiscsi_hba *phba) 3869 { 3870 struct be_mem_descriptor *mem_descr; 3871 int i, j; 3872 3873 mem_descr = phba->init_mem; 3874 for (i = 0; i < SE_MEM_MAX; i++) { 3875 for (j = mem_descr->num_elements; j > 0; j--) { 3876 dma_free_coherent(&phba->pcidev->dev, 3877 mem_descr->mem_array[j - 1].size, 3878 mem_descr->mem_array[j - 1].virtual_address, 3879 (unsigned long)mem_descr->mem_array[j - 1]. 3880 bus_address.u.a64.address); 3881 } 3882 3883 kfree(mem_descr->mem_array); 3884 mem_descr++; 3885 } 3886 kfree(phba->init_mem); 3887 kfree(phba->phwi_ctrlr->wrb_context); 3888 kfree(phba->phwi_ctrlr); 3889 } 3890 3891 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) 3892 { 3893 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg; 3894 struct sgl_handle *psgl_handle; 3895 struct iscsi_sge *pfrag; 3896 unsigned int arr_index, i, idx; 3897 unsigned int ulp_icd_start, ulp_num = 0; 3898 3899 phba->io_sgl_hndl_avbl = 0; 3900 phba->eh_sgl_hndl_avbl = 0; 3901 3902 mem_descr_sglh = phba->init_mem; 3903 mem_descr_sglh += HWI_MEM_SGLH; 3904 if (1 == mem_descr_sglh->num_elements) { 3905 phba->io_sgl_hndl_base = kcalloc(phba->params.ios_per_ctrl, 3906 sizeof(struct sgl_handle *), 3907 GFP_KERNEL); 3908 if (!phba->io_sgl_hndl_base) { 3909 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3910 "BM_%d : Mem Alloc Failed. Failing to load\n"); 3911 return -ENOMEM; 3912 } 3913 phba->eh_sgl_hndl_base = 3914 kcalloc(phba->params.icds_per_ctrl - 3915 phba->params.ios_per_ctrl, 3916 sizeof(struct sgl_handle *), GFP_KERNEL); 3917 if (!phba->eh_sgl_hndl_base) { 3918 kfree(phba->io_sgl_hndl_base); 3919 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3920 "BM_%d : Mem Alloc Failed. Failing to load\n"); 3921 return -ENOMEM; 3922 } 3923 } else { 3924 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3925 "BM_%d : HWI_MEM_SGLH is more than one element." 3926 "Failing to load\n"); 3927 return -ENOMEM; 3928 } 3929 3930 arr_index = 0; 3931 idx = 0; 3932 while (idx < mem_descr_sglh->num_elements) { 3933 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address; 3934 3935 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size / 3936 sizeof(struct sgl_handle)); i++) { 3937 if (arr_index < phba->params.ios_per_ctrl) { 3938 phba->io_sgl_hndl_base[arr_index] = psgl_handle; 3939 phba->io_sgl_hndl_avbl++; 3940 arr_index++; 3941 } else { 3942 phba->eh_sgl_hndl_base[arr_index - 3943 phba->params.ios_per_ctrl] = 3944 psgl_handle; 3945 arr_index++; 3946 phba->eh_sgl_hndl_avbl++; 3947 } 3948 psgl_handle++; 3949 } 3950 idx++; 3951 } 3952 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3953 "BM_%d : phba->io_sgl_hndl_avbl=%d " 3954 "phba->eh_sgl_hndl_avbl=%d\n", 3955 phba->io_sgl_hndl_avbl, 3956 phba->eh_sgl_hndl_avbl); 3957 3958 mem_descr_sg = phba->init_mem; 3959 mem_descr_sg += HWI_MEM_SGE; 3960 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3961 "\n BM_%d : mem_descr_sg->num_elements=%d\n", 3962 mem_descr_sg->num_elements); 3963 3964 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3965 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3966 break; 3967 3968 ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 3969 3970 arr_index = 0; 3971 idx = 0; 3972 while (idx < mem_descr_sg->num_elements) { 3973 pfrag = mem_descr_sg->mem_array[idx].virtual_address; 3974 3975 for (i = 0; 3976 i < (mem_descr_sg->mem_array[idx].size) / 3977 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io); 3978 i++) { 3979 if (arr_index < phba->params.ios_per_ctrl) 3980 psgl_handle = phba->io_sgl_hndl_base[arr_index]; 3981 else 3982 psgl_handle = phba->eh_sgl_hndl_base[arr_index - 3983 phba->params.ios_per_ctrl]; 3984 psgl_handle->pfrag = pfrag; 3985 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0); 3986 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); 3987 pfrag += phba->params.num_sge_per_io; 3988 psgl_handle->sgl_index = ulp_icd_start + arr_index++; 3989 } 3990 idx++; 3991 } 3992 phba->io_sgl_free_index = 0; 3993 phba->io_sgl_alloc_index = 0; 3994 phba->eh_sgl_free_index = 0; 3995 phba->eh_sgl_alloc_index = 0; 3996 return 0; 3997 } 3998 3999 static int hba_setup_cid_tbls(struct beiscsi_hba *phba) 4000 { 4001 int ret; 4002 uint16_t i, ulp_num; 4003 struct ulp_cid_info *ptr_cid_info = NULL; 4004 4005 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4006 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4007 ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info), 4008 GFP_KERNEL); 4009 4010 if (!ptr_cid_info) { 4011 ret = -ENOMEM; 4012 goto free_memory; 4013 } 4014 4015 /* Allocate memory for CID array */ 4016 ptr_cid_info->cid_array = 4017 kcalloc(BEISCSI_GET_CID_COUNT(phba, ulp_num), 4018 sizeof(*ptr_cid_info->cid_array), 4019 GFP_KERNEL); 4020 if (!ptr_cid_info->cid_array) { 4021 kfree(ptr_cid_info); 4022 ptr_cid_info = NULL; 4023 ret = -ENOMEM; 4024 4025 goto free_memory; 4026 } 4027 ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT( 4028 phba, ulp_num); 4029 4030 /* Save the cid_info_array ptr */ 4031 phba->cid_array_info[ulp_num] = ptr_cid_info; 4032 } 4033 } 4034 phba->ep_array = kcalloc(phba->params.cxns_per_ctrl, 4035 sizeof(struct iscsi_endpoint *), 4036 GFP_KERNEL); 4037 if (!phba->ep_array) { 4038 ret = -ENOMEM; 4039 4040 goto free_memory; 4041 } 4042 4043 phba->conn_table = kcalloc(phba->params.cxns_per_ctrl, 4044 sizeof(struct beiscsi_conn *), 4045 GFP_KERNEL); 4046 if (!phba->conn_table) { 4047 kfree(phba->ep_array); 4048 phba->ep_array = NULL; 4049 ret = -ENOMEM; 4050 4051 goto free_memory; 4052 } 4053 4054 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 4055 ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num; 4056 4057 ptr_cid_info = phba->cid_array_info[ulp_num]; 4058 ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] = 4059 phba->phwi_ctrlr->wrb_context[i].cid; 4060 4061 } 4062 4063 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4064 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4065 ptr_cid_info = phba->cid_array_info[ulp_num]; 4066 4067 ptr_cid_info->cid_alloc = 0; 4068 ptr_cid_info->cid_free = 0; 4069 } 4070 } 4071 return 0; 4072 4073 free_memory: 4074 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4075 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4076 ptr_cid_info = phba->cid_array_info[ulp_num]; 4077 4078 if (ptr_cid_info) { 4079 kfree(ptr_cid_info->cid_array); 4080 kfree(ptr_cid_info); 4081 phba->cid_array_info[ulp_num] = NULL; 4082 } 4083 } 4084 } 4085 4086 return ret; 4087 } 4088 4089 static void hwi_enable_intr(struct beiscsi_hba *phba) 4090 { 4091 struct be_ctrl_info *ctrl = &phba->ctrl; 4092 struct hwi_controller *phwi_ctrlr; 4093 struct hwi_context_memory *phwi_context; 4094 struct be_queue_info *eq; 4095 u8 __iomem *addr; 4096 u32 reg, i; 4097 u32 enabled; 4098 4099 phwi_ctrlr = phba->phwi_ctrlr; 4100 phwi_context = phwi_ctrlr->phwi_ctxt; 4101 4102 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + 4103 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 4104 reg = ioread32(addr); 4105 4106 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4107 if (!enabled) { 4108 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4109 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4110 "BM_%d : reg =x%08x addr=%p\n", reg, addr); 4111 iowrite32(reg, addr); 4112 } 4113 4114 if (!phba->pcidev->msix_enabled) { 4115 eq = &phwi_context->be_eq[0].q; 4116 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4117 "BM_%d : eq->id=%d\n", eq->id); 4118 4119 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4120 } else { 4121 for (i = 0; i <= phba->num_cpus; i++) { 4122 eq = &phwi_context->be_eq[i].q; 4123 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4124 "BM_%d : eq->id=%d\n", eq->id); 4125 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4126 } 4127 } 4128 } 4129 4130 static void hwi_disable_intr(struct beiscsi_hba *phba) 4131 { 4132 struct be_ctrl_info *ctrl = &phba->ctrl; 4133 4134 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; 4135 u32 reg = ioread32(addr); 4136 4137 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4138 if (enabled) { 4139 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4140 iowrite32(reg, addr); 4141 } else 4142 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 4143 "BM_%d : In hwi_disable_intr, Already Disabled\n"); 4144 } 4145 4146 static int beiscsi_init_port(struct beiscsi_hba *phba) 4147 { 4148 int ret; 4149 4150 ret = hwi_init_controller(phba); 4151 if (ret < 0) { 4152 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4153 "BM_%d : init controller failed\n"); 4154 return ret; 4155 } 4156 ret = beiscsi_init_sgl_handle(phba); 4157 if (ret < 0) { 4158 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4159 "BM_%d : init sgl handles failed\n"); 4160 goto cleanup_port; 4161 } 4162 4163 ret = hba_setup_cid_tbls(phba); 4164 if (ret < 0) { 4165 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4166 "BM_%d : setup CID table failed\n"); 4167 kfree(phba->io_sgl_hndl_base); 4168 kfree(phba->eh_sgl_hndl_base); 4169 goto cleanup_port; 4170 } 4171 return ret; 4172 4173 cleanup_port: 4174 hwi_cleanup_port(phba); 4175 return ret; 4176 } 4177 4178 static void beiscsi_cleanup_port(struct beiscsi_hba *phba) 4179 { 4180 struct ulp_cid_info *ptr_cid_info = NULL; 4181 int ulp_num; 4182 4183 kfree(phba->io_sgl_hndl_base); 4184 kfree(phba->eh_sgl_hndl_base); 4185 kfree(phba->ep_array); 4186 kfree(phba->conn_table); 4187 4188 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4189 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4190 ptr_cid_info = phba->cid_array_info[ulp_num]; 4191 4192 if (ptr_cid_info) { 4193 kfree(ptr_cid_info->cid_array); 4194 kfree(ptr_cid_info); 4195 phba->cid_array_info[ulp_num] = NULL; 4196 } 4197 } 4198 } 4199 } 4200 4201 /** 4202 * beiscsi_free_mgmt_task_handles()- Free driver CXN resources 4203 * @beiscsi_conn: ptr to the conn to be cleaned up 4204 * @task: ptr to iscsi_task resource to be freed. 4205 * 4206 * Free driver mgmt resources binded to CXN. 4207 **/ 4208 void 4209 beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, 4210 struct iscsi_task *task) 4211 { 4212 struct beiscsi_io_task *io_task; 4213 struct beiscsi_hba *phba = beiscsi_conn->phba; 4214 struct hwi_wrb_context *pwrb_context; 4215 struct hwi_controller *phwi_ctrlr; 4216 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4217 beiscsi_conn->beiscsi_conn_cid); 4218 4219 phwi_ctrlr = phba->phwi_ctrlr; 4220 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4221 4222 io_task = task->dd_data; 4223 4224 if (io_task->pwrb_handle) { 4225 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4226 io_task->pwrb_handle = NULL; 4227 } 4228 4229 if (io_task->psgl_handle) { 4230 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4231 io_task->psgl_handle = NULL; 4232 } 4233 4234 if (io_task->mtask_addr) { 4235 dma_unmap_single(&phba->pcidev->dev, 4236 io_task->mtask_addr, 4237 io_task->mtask_data_count, 4238 DMA_TO_DEVICE); 4239 io_task->mtask_addr = 0; 4240 } 4241 } 4242 4243 /** 4244 * beiscsi_cleanup_task()- Free driver resources of the task 4245 * @task: ptr to the iscsi task 4246 * 4247 **/ 4248 static void beiscsi_cleanup_task(struct iscsi_task *task) 4249 { 4250 struct beiscsi_io_task *io_task = task->dd_data; 4251 struct iscsi_conn *conn = task->conn; 4252 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4253 struct beiscsi_hba *phba = beiscsi_conn->phba; 4254 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4255 struct hwi_wrb_context *pwrb_context; 4256 struct hwi_controller *phwi_ctrlr; 4257 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4258 beiscsi_conn->beiscsi_conn_cid); 4259 4260 phwi_ctrlr = phba->phwi_ctrlr; 4261 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4262 4263 if (io_task->cmd_bhs) { 4264 dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4265 io_task->bhs_pa.u.a64.address); 4266 io_task->cmd_bhs = NULL; 4267 task->hdr = NULL; 4268 } 4269 4270 if (task->sc) { 4271 if (io_task->pwrb_handle) { 4272 free_wrb_handle(phba, pwrb_context, 4273 io_task->pwrb_handle); 4274 io_task->pwrb_handle = NULL; 4275 } 4276 4277 if (io_task->psgl_handle) { 4278 free_io_sgl_handle(phba, io_task->psgl_handle); 4279 io_task->psgl_handle = NULL; 4280 } 4281 4282 if (io_task->scsi_cmnd) { 4283 if (io_task->num_sg) 4284 scsi_dma_unmap(io_task->scsi_cmnd); 4285 io_task->scsi_cmnd = NULL; 4286 } 4287 } else { 4288 if (!beiscsi_conn->login_in_progress) 4289 beiscsi_free_mgmt_task_handles(beiscsi_conn, task); 4290 } 4291 } 4292 4293 void 4294 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, 4295 struct beiscsi_offload_params *params) 4296 { 4297 struct wrb_handle *pwrb_handle; 4298 struct hwi_wrb_context *pwrb_context = NULL; 4299 struct beiscsi_hba *phba = beiscsi_conn->phba; 4300 struct iscsi_task *task = beiscsi_conn->task; 4301 struct iscsi_session *session = task->conn->session; 4302 u32 doorbell = 0; 4303 4304 /* 4305 * We can always use 0 here because it is reserved by libiscsi for 4306 * login/startup related tasks. 4307 */ 4308 beiscsi_conn->login_in_progress = 0; 4309 spin_lock_bh(&session->back_lock); 4310 beiscsi_cleanup_task(task); 4311 spin_unlock_bh(&session->back_lock); 4312 4313 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, 4314 &pwrb_context); 4315 4316 /* Check for the adapter family */ 4317 if (is_chip_be2_be3r(phba)) 4318 beiscsi_offload_cxn_v0(params, pwrb_handle, 4319 phba->init_mem, 4320 pwrb_context); 4321 else 4322 beiscsi_offload_cxn_v2(params, pwrb_handle, 4323 pwrb_context); 4324 4325 be_dws_le_to_cpu(pwrb_handle->pwrb, 4326 sizeof(struct iscsi_target_context_update_wrb)); 4327 4328 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4329 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) 4330 << DB_DEF_PDU_WRB_INDEX_SHIFT; 4331 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4332 iowrite32(doorbell, phba->db_va + 4333 beiscsi_conn->doorbell_offset); 4334 4335 /* 4336 * There is no completion for CONTEXT_UPDATE. The completion of next 4337 * WRB posted guarantees FW's processing and DMA'ing of it. 4338 * Use beiscsi_put_wrb_handle to put it back in the pool which makes 4339 * sure zero'ing or reuse of the WRB only after wrbs_per_cxn. 4340 */ 4341 beiscsi_put_wrb_handle(pwrb_context, pwrb_handle, 4342 phba->params.wrbs_per_cxn); 4343 beiscsi_log(phba, KERN_INFO, 4344 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4345 "BM_%d : put CONTEXT_UPDATE pwrb_handle=%p free_index=0x%x wrb_handles_available=%d\n", 4346 pwrb_handle, pwrb_context->free_index, 4347 pwrb_context->wrb_handles_available); 4348 } 4349 4350 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, 4351 int *index, int *age) 4352 { 4353 *index = (int)itt; 4354 if (age) 4355 *age = conn->session->age; 4356 } 4357 4358 /** 4359 * beiscsi_alloc_pdu - allocates pdu and related resources 4360 * @task: libiscsi task 4361 * @opcode: opcode of pdu for task 4362 * 4363 * This is called with the session lock held. It will allocate 4364 * the wrb and sgl if needed for the command. And it will prep 4365 * the pdu's itt. beiscsi_parse_pdu will later translate 4366 * the pdu itt to the libiscsi task itt. 4367 */ 4368 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 4369 { 4370 struct beiscsi_io_task *io_task = task->dd_data; 4371 struct iscsi_conn *conn = task->conn; 4372 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4373 struct beiscsi_hba *phba = beiscsi_conn->phba; 4374 struct hwi_wrb_context *pwrb_context; 4375 struct hwi_controller *phwi_ctrlr; 4376 itt_t itt; 4377 uint16_t cri_index = 0; 4378 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4379 dma_addr_t paddr; 4380 4381 io_task->cmd_bhs = dma_pool_alloc(beiscsi_sess->bhs_pool, 4382 GFP_ATOMIC, &paddr); 4383 if (!io_task->cmd_bhs) 4384 return -ENOMEM; 4385 io_task->bhs_pa.u.a64.address = paddr; 4386 io_task->libiscsi_itt = (itt_t)task->itt; 4387 io_task->conn = beiscsi_conn; 4388 4389 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; 4390 task->hdr_max = sizeof(struct be_cmd_bhs); 4391 io_task->psgl_handle = NULL; 4392 io_task->pwrb_handle = NULL; 4393 4394 if (task->sc) { 4395 io_task->psgl_handle = alloc_io_sgl_handle(phba); 4396 if (!io_task->psgl_handle) { 4397 beiscsi_log(phba, KERN_ERR, 4398 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4399 "BM_%d : Alloc of IO_SGL_ICD Failed " 4400 "for the CID : %d\n", 4401 beiscsi_conn->beiscsi_conn_cid); 4402 goto free_hndls; 4403 } 4404 io_task->pwrb_handle = alloc_wrb_handle(phba, 4405 beiscsi_conn->beiscsi_conn_cid, 4406 &io_task->pwrb_context); 4407 if (!io_task->pwrb_handle) { 4408 beiscsi_log(phba, KERN_ERR, 4409 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4410 "BM_%d : Alloc of WRB_HANDLE Failed " 4411 "for the CID : %d\n", 4412 beiscsi_conn->beiscsi_conn_cid); 4413 goto free_io_hndls; 4414 } 4415 } else { 4416 io_task->scsi_cmnd = NULL; 4417 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { 4418 beiscsi_conn->task = task; 4419 if (!beiscsi_conn->login_in_progress) { 4420 io_task->psgl_handle = (struct sgl_handle *) 4421 alloc_mgmt_sgl_handle(phba); 4422 if (!io_task->psgl_handle) { 4423 beiscsi_log(phba, KERN_ERR, 4424 BEISCSI_LOG_IO | 4425 BEISCSI_LOG_CONFIG, 4426 "BM_%d : Alloc of MGMT_SGL_ICD Failed " 4427 "for the CID : %d\n", 4428 beiscsi_conn->beiscsi_conn_cid); 4429 goto free_hndls; 4430 } 4431 4432 beiscsi_conn->login_in_progress = 1; 4433 beiscsi_conn->plogin_sgl_handle = 4434 io_task->psgl_handle; 4435 io_task->pwrb_handle = 4436 alloc_wrb_handle(phba, 4437 beiscsi_conn->beiscsi_conn_cid, 4438 &io_task->pwrb_context); 4439 if (!io_task->pwrb_handle) { 4440 beiscsi_log(phba, KERN_ERR, 4441 BEISCSI_LOG_IO | 4442 BEISCSI_LOG_CONFIG, 4443 "BM_%d : Alloc of WRB_HANDLE Failed " 4444 "for the CID : %d\n", 4445 beiscsi_conn->beiscsi_conn_cid); 4446 goto free_mgmt_hndls; 4447 } 4448 beiscsi_conn->plogin_wrb_handle = 4449 io_task->pwrb_handle; 4450 4451 } else { 4452 io_task->psgl_handle = 4453 beiscsi_conn->plogin_sgl_handle; 4454 io_task->pwrb_handle = 4455 beiscsi_conn->plogin_wrb_handle; 4456 } 4457 } else { 4458 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); 4459 if (!io_task->psgl_handle) { 4460 beiscsi_log(phba, KERN_ERR, 4461 BEISCSI_LOG_IO | 4462 BEISCSI_LOG_CONFIG, 4463 "BM_%d : Alloc of MGMT_SGL_ICD Failed " 4464 "for the CID : %d\n", 4465 beiscsi_conn->beiscsi_conn_cid); 4466 goto free_hndls; 4467 } 4468 io_task->pwrb_handle = 4469 alloc_wrb_handle(phba, 4470 beiscsi_conn->beiscsi_conn_cid, 4471 &io_task->pwrb_context); 4472 if (!io_task->pwrb_handle) { 4473 beiscsi_log(phba, KERN_ERR, 4474 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4475 "BM_%d : Alloc of WRB_HANDLE Failed " 4476 "for the CID : %d\n", 4477 beiscsi_conn->beiscsi_conn_cid); 4478 goto free_mgmt_hndls; 4479 } 4480 4481 } 4482 } 4483 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> 4484 wrb_index << 16) | (unsigned int) 4485 (io_task->psgl_handle->sgl_index)); 4486 io_task->pwrb_handle->pio_handle = task; 4487 4488 io_task->cmd_bhs->iscsi_hdr.itt = itt; 4489 return 0; 4490 4491 free_io_hndls: 4492 free_io_sgl_handle(phba, io_task->psgl_handle); 4493 goto free_hndls; 4494 free_mgmt_hndls: 4495 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4496 io_task->psgl_handle = NULL; 4497 free_hndls: 4498 phwi_ctrlr = phba->phwi_ctrlr; 4499 cri_index = BE_GET_CRI_FROM_CID( 4500 beiscsi_conn->beiscsi_conn_cid); 4501 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4502 if (io_task->pwrb_handle) 4503 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4504 io_task->pwrb_handle = NULL; 4505 dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4506 io_task->bhs_pa.u.a64.address); 4507 io_task->cmd_bhs = NULL; 4508 return -ENOMEM; 4509 } 4510 static int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, 4511 unsigned int num_sg, unsigned int xferlen, 4512 unsigned int writedir) 4513 { 4514 4515 struct beiscsi_io_task *io_task = task->dd_data; 4516 struct iscsi_conn *conn = task->conn; 4517 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4518 struct beiscsi_hba *phba = beiscsi_conn->phba; 4519 struct iscsi_wrb *pwrb = NULL; 4520 unsigned int doorbell = 0; 4521 4522 pwrb = io_task->pwrb_handle->pwrb; 4523 4524 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4525 4526 if (writedir) { 4527 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4528 INI_WR_CMD); 4529 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1); 4530 } else { 4531 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4532 INI_RD_CMD); 4533 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0); 4534 } 4535 4536 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2, 4537 type, pwrb); 4538 4539 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb, 4540 cpu_to_be16(*(unsigned short *) 4541 &io_task->cmd_bhs->iscsi_hdr.lun)); 4542 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen); 4543 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4544 io_task->pwrb_handle->wrb_index); 4545 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4546 be32_to_cpu(task->cmdsn)); 4547 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4548 io_task->psgl_handle->sgl_index); 4549 4550 hwi_write_sgl_v2(pwrb, sg, num_sg, io_task); 4551 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 4552 io_task->pwrb_handle->wrb_index); 4553 if (io_task->pwrb_context->plast_wrb) 4554 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, 4555 io_task->pwrb_context->plast_wrb, 4556 io_task->pwrb_handle->wrb_index); 4557 io_task->pwrb_context->plast_wrb = pwrb; 4558 4559 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4560 4561 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4562 doorbell |= (io_task->pwrb_handle->wrb_index & 4563 DB_DEF_PDU_WRB_INDEX_MASK) << 4564 DB_DEF_PDU_WRB_INDEX_SHIFT; 4565 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4566 iowrite32(doorbell, phba->db_va + 4567 beiscsi_conn->doorbell_offset); 4568 return 0; 4569 } 4570 4571 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, 4572 unsigned int num_sg, unsigned int xferlen, 4573 unsigned int writedir) 4574 { 4575 4576 struct beiscsi_io_task *io_task = task->dd_data; 4577 struct iscsi_conn *conn = task->conn; 4578 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4579 struct beiscsi_hba *phba = beiscsi_conn->phba; 4580 struct iscsi_wrb *pwrb = NULL; 4581 unsigned int doorbell = 0; 4582 4583 pwrb = io_task->pwrb_handle->pwrb; 4584 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4585 4586 if (writedir) { 4587 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4588 INI_WR_CMD); 4589 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 4590 } else { 4591 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4592 INI_RD_CMD); 4593 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 4594 } 4595 4596 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb, 4597 type, pwrb); 4598 4599 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb, 4600 cpu_to_be16(*(unsigned short *) 4601 &io_task->cmd_bhs->iscsi_hdr.lun)); 4602 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen); 4603 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4604 io_task->pwrb_handle->wrb_index); 4605 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4606 be32_to_cpu(task->cmdsn)); 4607 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4608 io_task->psgl_handle->sgl_index); 4609 4610 hwi_write_sgl(pwrb, sg, num_sg, io_task); 4611 4612 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4613 io_task->pwrb_handle->wrb_index); 4614 if (io_task->pwrb_context->plast_wrb) 4615 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, 4616 io_task->pwrb_context->plast_wrb, 4617 io_task->pwrb_handle->wrb_index); 4618 io_task->pwrb_context->plast_wrb = pwrb; 4619 4620 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4621 4622 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4623 doorbell |= (io_task->pwrb_handle->wrb_index & 4624 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4625 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4626 4627 iowrite32(doorbell, phba->db_va + 4628 beiscsi_conn->doorbell_offset); 4629 return 0; 4630 } 4631 4632 static int beiscsi_mtask(struct iscsi_task *task) 4633 { 4634 struct beiscsi_io_task *io_task = task->dd_data; 4635 struct iscsi_conn *conn = task->conn; 4636 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4637 struct beiscsi_hba *phba = beiscsi_conn->phba; 4638 struct iscsi_wrb *pwrb = NULL; 4639 unsigned int doorbell = 0; 4640 unsigned int cid; 4641 unsigned int pwrb_typeoffset = 0; 4642 int ret = 0; 4643 4644 cid = beiscsi_conn->beiscsi_conn_cid; 4645 pwrb = io_task->pwrb_handle->pwrb; 4646 4647 if (is_chip_be2_be3r(phba)) { 4648 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4649 be32_to_cpu(task->cmdsn)); 4650 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4651 io_task->pwrb_handle->wrb_index); 4652 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4653 io_task->psgl_handle->sgl_index); 4654 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, 4655 task->data_count); 4656 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4657 io_task->pwrb_handle->wrb_index); 4658 if (io_task->pwrb_context->plast_wrb) 4659 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, 4660 io_task->pwrb_context->plast_wrb, 4661 io_task->pwrb_handle->wrb_index); 4662 io_task->pwrb_context->plast_wrb = pwrb; 4663 4664 pwrb_typeoffset = BE_WRB_TYPE_OFFSET; 4665 } else { 4666 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4667 be32_to_cpu(task->cmdsn)); 4668 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4669 io_task->pwrb_handle->wrb_index); 4670 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4671 io_task->psgl_handle->sgl_index); 4672 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, 4673 task->data_count); 4674 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 4675 io_task->pwrb_handle->wrb_index); 4676 if (io_task->pwrb_context->plast_wrb) 4677 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, 4678 io_task->pwrb_context->plast_wrb, 4679 io_task->pwrb_handle->wrb_index); 4680 io_task->pwrb_context->plast_wrb = pwrb; 4681 4682 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; 4683 } 4684 4685 4686 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 4687 case ISCSI_OP_LOGIN: 4688 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); 4689 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4690 ret = hwi_write_buffer(pwrb, task); 4691 break; 4692 case ISCSI_OP_NOOP_OUT: 4693 if (task->hdr->ttt != ISCSI_RESERVED_TAG) { 4694 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4695 if (is_chip_be2_be3r(phba)) 4696 AMAP_SET_BITS(struct amap_iscsi_wrb, 4697 dmsg, pwrb, 1); 4698 else 4699 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4700 dmsg, pwrb, 1); 4701 } else { 4702 ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); 4703 if (is_chip_be2_be3r(phba)) 4704 AMAP_SET_BITS(struct amap_iscsi_wrb, 4705 dmsg, pwrb, 0); 4706 else 4707 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4708 dmsg, pwrb, 0); 4709 } 4710 ret = hwi_write_buffer(pwrb, task); 4711 break; 4712 case ISCSI_OP_TEXT: 4713 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4714 ret = hwi_write_buffer(pwrb, task); 4715 break; 4716 case ISCSI_OP_SCSI_TMFUNC: 4717 ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset); 4718 ret = hwi_write_buffer(pwrb, task); 4719 break; 4720 case ISCSI_OP_LOGOUT: 4721 ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset); 4722 ret = hwi_write_buffer(pwrb, task); 4723 break; 4724 4725 default: 4726 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4727 "BM_%d : opcode =%d Not supported\n", 4728 task->hdr->opcode & ISCSI_OPCODE_MASK); 4729 4730 return -EINVAL; 4731 } 4732 4733 if (ret) 4734 return ret; 4735 4736 /* Set the task type */ 4737 io_task->wrb_type = (is_chip_be2_be3r(phba)) ? 4738 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) : 4739 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb); 4740 4741 doorbell |= cid & DB_WRB_POST_CID_MASK; 4742 doorbell |= (io_task->pwrb_handle->wrb_index & 4743 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4744 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4745 iowrite32(doorbell, phba->db_va + 4746 beiscsi_conn->doorbell_offset); 4747 return 0; 4748 } 4749 4750 static int beiscsi_task_xmit(struct iscsi_task *task) 4751 { 4752 struct beiscsi_io_task *io_task = task->dd_data; 4753 struct scsi_cmnd *sc = task->sc; 4754 struct beiscsi_hba *phba; 4755 struct scatterlist *sg; 4756 int num_sg; 4757 unsigned int writedir = 0, xferlen = 0; 4758 4759 phba = io_task->conn->phba; 4760 /** 4761 * HBA in error includes BEISCSI_HBA_FW_TIMEOUT. IO path might be 4762 * operational if FW still gets heartbeat from EP FW. Is management 4763 * path really needed to continue further? 4764 */ 4765 if (!beiscsi_hba_is_online(phba)) 4766 return -EIO; 4767 4768 if (!io_task->conn->login_in_progress) 4769 task->hdr->exp_statsn = 0; 4770 4771 if (!sc) 4772 return beiscsi_mtask(task); 4773 4774 io_task->scsi_cmnd = sc; 4775 io_task->num_sg = 0; 4776 num_sg = scsi_dma_map(sc); 4777 if (num_sg < 0) { 4778 beiscsi_log(phba, KERN_ERR, 4779 BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI, 4780 "BM_%d : scsi_dma_map Failed " 4781 "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n", 4782 be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt), 4783 io_task->libiscsi_itt, scsi_bufflen(sc)); 4784 4785 return num_sg; 4786 } 4787 /** 4788 * For scsi cmd task, check num_sg before unmapping in cleanup_task. 4789 * For management task, cleanup_task checks mtask_addr before unmapping. 4790 */ 4791 io_task->num_sg = num_sg; 4792 xferlen = scsi_bufflen(sc); 4793 sg = scsi_sglist(sc); 4794 if (sc->sc_data_direction == DMA_TO_DEVICE) 4795 writedir = 1; 4796 else 4797 writedir = 0; 4798 4799 return phba->iotask_fn(task, sg, num_sg, xferlen, writedir); 4800 } 4801 4802 /** 4803 * beiscsi_bsg_request - handle bsg request from ISCSI transport 4804 * @job: job to handle 4805 */ 4806 static int beiscsi_bsg_request(struct bsg_job *job) 4807 { 4808 struct Scsi_Host *shost; 4809 struct beiscsi_hba *phba; 4810 struct iscsi_bsg_request *bsg_req = job->request; 4811 int rc = -EINVAL; 4812 unsigned int tag; 4813 struct be_dma_mem nonemb_cmd; 4814 struct be_cmd_resp_hdr *resp; 4815 struct iscsi_bsg_reply *bsg_reply = job->reply; 4816 unsigned short status, extd_status; 4817 4818 shost = iscsi_job_to_shost(job); 4819 phba = iscsi_host_priv(shost); 4820 4821 if (!beiscsi_hba_is_online(phba)) { 4822 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 4823 "BM_%d : HBA in error 0x%lx\n", phba->state); 4824 return -ENXIO; 4825 } 4826 4827 switch (bsg_req->msgcode) { 4828 case ISCSI_BSG_HST_VENDOR: 4829 nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev, 4830 job->request_payload.payload_len, 4831 &nonemb_cmd.dma, GFP_KERNEL); 4832 if (nonemb_cmd.va == NULL) { 4833 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4834 "BM_%d : Failed to allocate memory for " 4835 "beiscsi_bsg_request\n"); 4836 return -ENOMEM; 4837 } 4838 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job, 4839 &nonemb_cmd); 4840 if (!tag) { 4841 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4842 "BM_%d : MBX Tag Allocation Failed\n"); 4843 4844 dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, 4845 nonemb_cmd.va, nonemb_cmd.dma); 4846 return -EAGAIN; 4847 } 4848 4849 rc = wait_event_interruptible_timeout( 4850 phba->ctrl.mcc_wait[tag], 4851 phba->ctrl.mcc_tag_status[tag], 4852 msecs_to_jiffies( 4853 BEISCSI_HOST_MBX_TIMEOUT)); 4854 4855 if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { 4856 clear_bit(MCC_TAG_STATE_RUNNING, 4857 &phba->ctrl.ptag_state[tag].tag_state); 4858 dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, 4859 nonemb_cmd.va, nonemb_cmd.dma); 4860 return -EIO; 4861 } 4862 extd_status = (phba->ctrl.mcc_tag_status[tag] & 4863 CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT; 4864 status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK; 4865 free_mcc_wrb(&phba->ctrl, tag); 4866 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va; 4867 sg_copy_from_buffer(job->reply_payload.sg_list, 4868 job->reply_payload.sg_cnt, 4869 nonemb_cmd.va, (resp->response_length 4870 + sizeof(*resp))); 4871 bsg_reply->reply_payload_rcv_len = resp->response_length; 4872 bsg_reply->result = status; 4873 bsg_job_done(job, bsg_reply->result, 4874 bsg_reply->reply_payload_rcv_len); 4875 dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, 4876 nonemb_cmd.va, nonemb_cmd.dma); 4877 if (status || extd_status) { 4878 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4879 "BM_%d : MBX Cmd Failed" 4880 " status = %d extd_status = %d\n", 4881 status, extd_status); 4882 4883 return -EIO; 4884 } else { 4885 rc = 0; 4886 } 4887 break; 4888 4889 default: 4890 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4891 "BM_%d : Unsupported bsg command: 0x%x\n", 4892 bsg_req->msgcode); 4893 break; 4894 } 4895 4896 return rc; 4897 } 4898 4899 static void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) 4900 { 4901 /* Set the logging parameter */ 4902 beiscsi_log_enable_init(phba, beiscsi_log_enable); 4903 } 4904 4905 void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle) 4906 { 4907 if (phba->boot_struct.boot_kset) 4908 return; 4909 4910 /* skip if boot work is already in progress */ 4911 if (test_and_set_bit(BEISCSI_HBA_BOOT_WORK, &phba->state)) 4912 return; 4913 4914 phba->boot_struct.retry = 3; 4915 phba->boot_struct.tag = 0; 4916 phba->boot_struct.s_handle = s_handle; 4917 phba->boot_struct.action = BEISCSI_BOOT_GET_SHANDLE; 4918 schedule_work(&phba->boot_work); 4919 } 4920 4921 #define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS 3 4922 /* 4923 * beiscsi_show_boot_tgt_info() 4924 * Boot flag info for iscsi-utilities 4925 * Bit 0 Block valid flag 4926 * Bit 1 Firmware booting selected 4927 */ 4928 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf) 4929 { 4930 struct beiscsi_hba *phba = data; 4931 struct mgmt_session_info *boot_sess = &phba->boot_struct.boot_sess; 4932 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0]; 4933 char *str = buf; 4934 int rc = -EPERM; 4935 4936 switch (type) { 4937 case ISCSI_BOOT_TGT_NAME: 4938 rc = sprintf(buf, "%.*s\n", 4939 (int)strlen(boot_sess->target_name), 4940 (char *)&boot_sess->target_name); 4941 break; 4942 case ISCSI_BOOT_TGT_IP_ADDR: 4943 if (boot_conn->dest_ipaddr.ip_type == BEISCSI_IP_TYPE_V4) 4944 rc = sprintf(buf, "%pI4\n", 4945 (char *)&boot_conn->dest_ipaddr.addr); 4946 else 4947 rc = sprintf(str, "%pI6\n", 4948 (char *)&boot_conn->dest_ipaddr.addr); 4949 break; 4950 case ISCSI_BOOT_TGT_PORT: 4951 rc = sprintf(str, "%d\n", boot_conn->dest_port); 4952 break; 4953 4954 case ISCSI_BOOT_TGT_CHAP_NAME: 4955 rc = sprintf(str, "%.*s\n", 4956 boot_conn->negotiated_login_options.auth_data.chap. 4957 target_chap_name_length, 4958 (char *)&boot_conn->negotiated_login_options. 4959 auth_data.chap.target_chap_name); 4960 break; 4961 case ISCSI_BOOT_TGT_CHAP_SECRET: 4962 rc = sprintf(str, "%.*s\n", 4963 boot_conn->negotiated_login_options.auth_data.chap. 4964 target_secret_length, 4965 (char *)&boot_conn->negotiated_login_options. 4966 auth_data.chap.target_secret); 4967 break; 4968 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 4969 rc = sprintf(str, "%.*s\n", 4970 boot_conn->negotiated_login_options.auth_data.chap. 4971 intr_chap_name_length, 4972 (char *)&boot_conn->negotiated_login_options. 4973 auth_data.chap.intr_chap_name); 4974 break; 4975 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 4976 rc = sprintf(str, "%.*s\n", 4977 boot_conn->negotiated_login_options.auth_data.chap. 4978 intr_secret_length, 4979 (char *)&boot_conn->negotiated_login_options. 4980 auth_data.chap.intr_secret); 4981 break; 4982 case ISCSI_BOOT_TGT_FLAGS: 4983 rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS); 4984 break; 4985 case ISCSI_BOOT_TGT_NIC_ASSOC: 4986 rc = sprintf(str, "0\n"); 4987 break; 4988 } 4989 return rc; 4990 } 4991 4992 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf) 4993 { 4994 struct beiscsi_hba *phba = data; 4995 char *str = buf; 4996 int rc = -EPERM; 4997 4998 switch (type) { 4999 case ISCSI_BOOT_INI_INITIATOR_NAME: 5000 rc = sprintf(str, "%s\n", 5001 phba->boot_struct.boot_sess.initiator_iscsiname); 5002 break; 5003 } 5004 return rc; 5005 } 5006 5007 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf) 5008 { 5009 struct beiscsi_hba *phba = data; 5010 char *str = buf; 5011 int rc = -EPERM; 5012 5013 switch (type) { 5014 case ISCSI_BOOT_ETH_FLAGS: 5015 rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS); 5016 break; 5017 case ISCSI_BOOT_ETH_INDEX: 5018 rc = sprintf(str, "0\n"); 5019 break; 5020 case ISCSI_BOOT_ETH_MAC: 5021 rc = beiscsi_get_macaddr(str, phba); 5022 break; 5023 } 5024 return rc; 5025 } 5026 5027 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type) 5028 { 5029 umode_t rc = 0; 5030 5031 switch (type) { 5032 case ISCSI_BOOT_TGT_NAME: 5033 case ISCSI_BOOT_TGT_IP_ADDR: 5034 case ISCSI_BOOT_TGT_PORT: 5035 case ISCSI_BOOT_TGT_CHAP_NAME: 5036 case ISCSI_BOOT_TGT_CHAP_SECRET: 5037 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5038 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5039 case ISCSI_BOOT_TGT_NIC_ASSOC: 5040 case ISCSI_BOOT_TGT_FLAGS: 5041 rc = S_IRUGO; 5042 break; 5043 } 5044 return rc; 5045 } 5046 5047 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type) 5048 { 5049 umode_t rc = 0; 5050 5051 switch (type) { 5052 case ISCSI_BOOT_INI_INITIATOR_NAME: 5053 rc = S_IRUGO; 5054 break; 5055 } 5056 return rc; 5057 } 5058 5059 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type) 5060 { 5061 umode_t rc = 0; 5062 5063 switch (type) { 5064 case ISCSI_BOOT_ETH_FLAGS: 5065 case ISCSI_BOOT_ETH_MAC: 5066 case ISCSI_BOOT_ETH_INDEX: 5067 rc = S_IRUGO; 5068 break; 5069 } 5070 return rc; 5071 } 5072 5073 static void beiscsi_boot_kobj_release(void *data) 5074 { 5075 struct beiscsi_hba *phba = data; 5076 5077 scsi_host_put(phba->shost); 5078 } 5079 5080 static int beiscsi_boot_create_kset(struct beiscsi_hba *phba) 5081 { 5082 struct boot_struct *bs = &phba->boot_struct; 5083 struct iscsi_boot_kobj *boot_kobj; 5084 5085 if (bs->boot_kset) { 5086 __beiscsi_log(phba, KERN_ERR, 5087 "BM_%d: boot_kset already created\n"); 5088 return 0; 5089 } 5090 5091 bs->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no); 5092 if (!bs->boot_kset) { 5093 __beiscsi_log(phba, KERN_ERR, 5094 "BM_%d: boot_kset alloc failed\n"); 5095 return -ENOMEM; 5096 } 5097 5098 /* get shost ref because the show function will refer phba */ 5099 if (!scsi_host_get(phba->shost)) 5100 goto free_kset; 5101 5102 boot_kobj = iscsi_boot_create_target(bs->boot_kset, 0, phba, 5103 beiscsi_show_boot_tgt_info, 5104 beiscsi_tgt_get_attr_visibility, 5105 beiscsi_boot_kobj_release); 5106 if (!boot_kobj) 5107 goto put_shost; 5108 5109 if (!scsi_host_get(phba->shost)) 5110 goto free_kset; 5111 5112 boot_kobj = iscsi_boot_create_initiator(bs->boot_kset, 0, phba, 5113 beiscsi_show_boot_ini_info, 5114 beiscsi_ini_get_attr_visibility, 5115 beiscsi_boot_kobj_release); 5116 if (!boot_kobj) 5117 goto put_shost; 5118 5119 if (!scsi_host_get(phba->shost)) 5120 goto free_kset; 5121 5122 boot_kobj = iscsi_boot_create_ethernet(bs->boot_kset, 0, phba, 5123 beiscsi_show_boot_eth_info, 5124 beiscsi_eth_get_attr_visibility, 5125 beiscsi_boot_kobj_release); 5126 if (!boot_kobj) 5127 goto put_shost; 5128 5129 return 0; 5130 5131 put_shost: 5132 scsi_host_put(phba->shost); 5133 free_kset: 5134 iscsi_boot_destroy_kset(bs->boot_kset); 5135 bs->boot_kset = NULL; 5136 return -ENOMEM; 5137 } 5138 5139 static void beiscsi_boot_work(struct work_struct *work) 5140 { 5141 struct beiscsi_hba *phba = 5142 container_of(work, struct beiscsi_hba, boot_work); 5143 struct boot_struct *bs = &phba->boot_struct; 5144 unsigned int tag = 0; 5145 5146 if (!beiscsi_hba_is_online(phba)) 5147 return; 5148 5149 beiscsi_log(phba, KERN_INFO, 5150 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, 5151 "BM_%d : %s action %d\n", 5152 __func__, phba->boot_struct.action); 5153 5154 switch (phba->boot_struct.action) { 5155 case BEISCSI_BOOT_REOPEN_SESS: 5156 tag = beiscsi_boot_reopen_sess(phba); 5157 break; 5158 case BEISCSI_BOOT_GET_SHANDLE: 5159 tag = __beiscsi_boot_get_shandle(phba, 1); 5160 break; 5161 case BEISCSI_BOOT_GET_SINFO: 5162 tag = beiscsi_boot_get_sinfo(phba); 5163 break; 5164 case BEISCSI_BOOT_LOGOUT_SESS: 5165 tag = beiscsi_boot_logout_sess(phba); 5166 break; 5167 case BEISCSI_BOOT_CREATE_KSET: 5168 beiscsi_boot_create_kset(phba); 5169 /** 5170 * updated boot_kset is made visible to all before 5171 * ending the boot work. 5172 */ 5173 mb(); 5174 clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); 5175 return; 5176 } 5177 if (!tag) { 5178 if (bs->retry--) 5179 schedule_work(&phba->boot_work); 5180 else 5181 clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); 5182 } 5183 } 5184 5185 static void beiscsi_eqd_update_work(struct work_struct *work) 5186 { 5187 struct hwi_context_memory *phwi_context; 5188 struct be_set_eqd set_eqd[MAX_CPUS]; 5189 struct hwi_controller *phwi_ctrlr; 5190 struct be_eq_obj *pbe_eq; 5191 struct beiscsi_hba *phba; 5192 unsigned int pps, delta; 5193 struct be_aic_obj *aic; 5194 int eqd, i, num = 0; 5195 unsigned long now; 5196 5197 phba = container_of(work, struct beiscsi_hba, eqd_update.work); 5198 if (!beiscsi_hba_is_online(phba)) 5199 return; 5200 5201 phwi_ctrlr = phba->phwi_ctrlr; 5202 phwi_context = phwi_ctrlr->phwi_ctxt; 5203 5204 for (i = 0; i <= phba->num_cpus; i++) { 5205 aic = &phba->aic_obj[i]; 5206 pbe_eq = &phwi_context->be_eq[i]; 5207 now = jiffies; 5208 if (!aic->jiffies || time_before(now, aic->jiffies) || 5209 pbe_eq->cq_count < aic->eq_prev) { 5210 aic->jiffies = now; 5211 aic->eq_prev = pbe_eq->cq_count; 5212 continue; 5213 } 5214 delta = jiffies_to_msecs(now - aic->jiffies); 5215 pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta); 5216 eqd = (pps / 1500) << 2; 5217 5218 if (eqd < 8) 5219 eqd = 0; 5220 eqd = min_t(u32, eqd, BEISCSI_EQ_DELAY_MAX); 5221 eqd = max_t(u32, eqd, BEISCSI_EQ_DELAY_MIN); 5222 5223 aic->jiffies = now; 5224 aic->eq_prev = pbe_eq->cq_count; 5225 5226 if (eqd != aic->prev_eqd) { 5227 set_eqd[num].delay_multiplier = (eqd * 65)/100; 5228 set_eqd[num].eq_id = pbe_eq->q.id; 5229 aic->prev_eqd = eqd; 5230 num++; 5231 } 5232 } 5233 if (num) 5234 /* completion of this is ignored */ 5235 beiscsi_modify_eq_delay(phba, set_eqd, num); 5236 5237 schedule_delayed_work(&phba->eqd_update, 5238 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5239 } 5240 5241 static void beiscsi_hw_tpe_check(struct timer_list *t) 5242 { 5243 struct beiscsi_hba *phba = from_timer(phba, t, hw_check); 5244 u32 wait; 5245 5246 /* if not TPE, do nothing */ 5247 if (!beiscsi_detect_tpe(phba)) 5248 return; 5249 5250 /* wait default 4000ms before recovering */ 5251 wait = 4000; 5252 if (phba->ue2rp > BEISCSI_UE_DETECT_INTERVAL) 5253 wait = phba->ue2rp - BEISCSI_UE_DETECT_INTERVAL; 5254 queue_delayed_work(phba->wq, &phba->recover_port, 5255 msecs_to_jiffies(wait)); 5256 } 5257 5258 static void beiscsi_hw_health_check(struct timer_list *t) 5259 { 5260 struct beiscsi_hba *phba = from_timer(phba, t, hw_check); 5261 5262 beiscsi_detect_ue(phba); 5263 if (beiscsi_detect_ue(phba)) { 5264 __beiscsi_log(phba, KERN_ERR, 5265 "BM_%d : port in error: %lx\n", phba->state); 5266 /* sessions are no longer valid, so first fail the sessions */ 5267 queue_work(phba->wq, &phba->sess_work); 5268 5269 /* detect UER supported */ 5270 if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state)) 5271 return; 5272 /* modify this timer to check TPE */ 5273 phba->hw_check.function = beiscsi_hw_tpe_check; 5274 } 5275 5276 mod_timer(&phba->hw_check, 5277 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5278 } 5279 5280 /* 5281 * beiscsi_enable_port()- Enables the disabled port. 5282 * Only port resources freed in disable function are reallocated. 5283 * This is called in HBA error handling path. 5284 * 5285 * @phba: Instance of driver private structure 5286 * 5287 **/ 5288 static int beiscsi_enable_port(struct beiscsi_hba *phba) 5289 { 5290 struct hwi_context_memory *phwi_context; 5291 struct hwi_controller *phwi_ctrlr; 5292 struct be_eq_obj *pbe_eq; 5293 int ret, i; 5294 5295 if (test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { 5296 __beiscsi_log(phba, KERN_ERR, 5297 "BM_%d : %s : port is online %lx\n", 5298 __func__, phba->state); 5299 return 0; 5300 } 5301 5302 ret = beiscsi_init_sliport(phba); 5303 if (ret) 5304 return ret; 5305 5306 be2iscsi_enable_msix(phba); 5307 5308 beiscsi_get_params(phba); 5309 beiscsi_set_host_data(phba); 5310 /* Re-enable UER. If different TPE occurs then it is recoverable. */ 5311 beiscsi_set_uer_feature(phba); 5312 5313 phba->shost->max_id = phba->params.cxns_per_ctrl - 1; 5314 phba->shost->can_queue = phba->params.ios_per_ctrl; 5315 ret = beiscsi_init_port(phba); 5316 if (ret < 0) { 5317 __beiscsi_log(phba, KERN_ERR, 5318 "BM_%d : init port failed\n"); 5319 goto disable_msix; 5320 } 5321 5322 for (i = 0; i < MAX_MCC_CMD; i++) { 5323 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5324 phba->ctrl.mcc_tag[i] = i + 1; 5325 phba->ctrl.mcc_tag_status[i + 1] = 0; 5326 phba->ctrl.mcc_tag_available++; 5327 } 5328 5329 phwi_ctrlr = phba->phwi_ctrlr; 5330 phwi_context = phwi_ctrlr->phwi_ctxt; 5331 for (i = 0; i < phba->num_cpus; i++) { 5332 pbe_eq = &phwi_context->be_eq[i]; 5333 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); 5334 } 5335 5336 i = (phba->pcidev->msix_enabled) ? i : 0; 5337 /* Work item for MCC handling */ 5338 pbe_eq = &phwi_context->be_eq[i]; 5339 INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); 5340 5341 ret = beiscsi_init_irqs(phba); 5342 if (ret < 0) { 5343 __beiscsi_log(phba, KERN_ERR, 5344 "BM_%d : setup IRQs failed %d\n", ret); 5345 goto cleanup_port; 5346 } 5347 hwi_enable_intr(phba); 5348 /* port operational: clear all error bits */ 5349 set_bit(BEISCSI_HBA_ONLINE, &phba->state); 5350 __beiscsi_log(phba, KERN_INFO, 5351 "BM_%d : port online: 0x%lx\n", phba->state); 5352 5353 /* start hw_check timer and eqd_update work */ 5354 schedule_delayed_work(&phba->eqd_update, 5355 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5356 5357 /** 5358 * Timer function gets modified for TPE detection. 5359 * Always reinit to do health check first. 5360 */ 5361 phba->hw_check.function = beiscsi_hw_health_check; 5362 mod_timer(&phba->hw_check, 5363 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5364 return 0; 5365 5366 cleanup_port: 5367 for (i = 0; i < phba->num_cpus; i++) { 5368 pbe_eq = &phwi_context->be_eq[i]; 5369 irq_poll_disable(&pbe_eq->iopoll); 5370 } 5371 hwi_cleanup_port(phba); 5372 5373 disable_msix: 5374 pci_free_irq_vectors(phba->pcidev); 5375 return ret; 5376 } 5377 5378 /* 5379 * beiscsi_disable_port()- Disable port and cleanup driver resources. 5380 * This is called in HBA error handling and driver removal. 5381 * @phba: Instance Priv structure 5382 * @unload: indicate driver is unloading 5383 * 5384 * Free the OS and HW resources held by the driver 5385 **/ 5386 static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload) 5387 { 5388 struct hwi_context_memory *phwi_context; 5389 struct hwi_controller *phwi_ctrlr; 5390 struct be_eq_obj *pbe_eq; 5391 unsigned int i; 5392 5393 if (!test_and_clear_bit(BEISCSI_HBA_ONLINE, &phba->state)) 5394 return; 5395 5396 phwi_ctrlr = phba->phwi_ctrlr; 5397 phwi_context = phwi_ctrlr->phwi_ctxt; 5398 hwi_disable_intr(phba); 5399 beiscsi_free_irqs(phba); 5400 pci_free_irq_vectors(phba->pcidev); 5401 5402 for (i = 0; i < phba->num_cpus; i++) { 5403 pbe_eq = &phwi_context->be_eq[i]; 5404 irq_poll_disable(&pbe_eq->iopoll); 5405 } 5406 cancel_delayed_work_sync(&phba->eqd_update); 5407 cancel_work_sync(&phba->boot_work); 5408 /* WQ might be running cancel queued mcc_work if we are not exiting */ 5409 if (!unload && beiscsi_hba_in_error(phba)) { 5410 pbe_eq = &phwi_context->be_eq[i]; 5411 cancel_work_sync(&pbe_eq->mcc_work); 5412 } 5413 hwi_cleanup_port(phba); 5414 beiscsi_cleanup_port(phba); 5415 } 5416 5417 static void beiscsi_sess_work(struct work_struct *work) 5418 { 5419 struct beiscsi_hba *phba; 5420 5421 phba = container_of(work, struct beiscsi_hba, sess_work); 5422 /* 5423 * This work gets scheduled only in case of HBA error. 5424 * Old sessions are gone so need to be re-established. 5425 * iscsi_session_failure needs process context hence this work. 5426 */ 5427 iscsi_host_for_each_session(phba->shost, beiscsi_session_fail); 5428 } 5429 5430 static void beiscsi_recover_port(struct work_struct *work) 5431 { 5432 struct beiscsi_hba *phba; 5433 5434 phba = container_of(work, struct beiscsi_hba, recover_port.work); 5435 beiscsi_disable_port(phba, 0); 5436 beiscsi_enable_port(phba); 5437 } 5438 5439 static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev, 5440 pci_channel_state_t state) 5441 { 5442 struct beiscsi_hba *phba = NULL; 5443 5444 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5445 set_bit(BEISCSI_HBA_PCI_ERR, &phba->state); 5446 5447 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5448 "BM_%d : EEH error detected\n"); 5449 5450 /* first stop UE detection when PCI error detected */ 5451 del_timer_sync(&phba->hw_check); 5452 cancel_delayed_work_sync(&phba->recover_port); 5453 5454 /* sessions are no longer valid, so first fail the sessions */ 5455 iscsi_host_for_each_session(phba->shost, beiscsi_session_fail); 5456 beiscsi_disable_port(phba, 0); 5457 5458 if (state == pci_channel_io_perm_failure) { 5459 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5460 "BM_%d : EEH : State PERM Failure"); 5461 return PCI_ERS_RESULT_DISCONNECT; 5462 } 5463 5464 pci_disable_device(pdev); 5465 5466 /* The error could cause the FW to trigger a flash debug dump. 5467 * Resetting the card while flash dump is in progress 5468 * can cause it not to recover; wait for it to finish. 5469 * Wait only for first function as it is needed only once per 5470 * adapter. 5471 **/ 5472 if (pdev->devfn == 0) 5473 ssleep(30); 5474 5475 return PCI_ERS_RESULT_NEED_RESET; 5476 } 5477 5478 static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev) 5479 { 5480 struct beiscsi_hba *phba = NULL; 5481 int status = 0; 5482 5483 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5484 5485 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5486 "BM_%d : EEH Reset\n"); 5487 5488 status = pci_enable_device(pdev); 5489 if (status) 5490 return PCI_ERS_RESULT_DISCONNECT; 5491 5492 pci_set_master(pdev); 5493 pci_set_power_state(pdev, PCI_D0); 5494 pci_restore_state(pdev); 5495 5496 status = beiscsi_check_fw_rdy(phba); 5497 if (status) { 5498 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5499 "BM_%d : EEH Reset Completed\n"); 5500 } else { 5501 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5502 "BM_%d : EEH Reset Completion Failure\n"); 5503 return PCI_ERS_RESULT_DISCONNECT; 5504 } 5505 5506 return PCI_ERS_RESULT_RECOVERED; 5507 } 5508 5509 static void beiscsi_eeh_resume(struct pci_dev *pdev) 5510 { 5511 struct beiscsi_hba *phba; 5512 int ret; 5513 5514 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5515 pci_save_state(pdev); 5516 5517 ret = beiscsi_enable_port(phba); 5518 if (ret) 5519 __beiscsi_log(phba, KERN_ERR, 5520 "BM_%d : AER EEH resume failed\n"); 5521 } 5522 5523 static int beiscsi_dev_probe(struct pci_dev *pcidev, 5524 const struct pci_device_id *id) 5525 { 5526 struct hwi_context_memory *phwi_context; 5527 struct hwi_controller *phwi_ctrlr; 5528 struct beiscsi_hba *phba = NULL; 5529 struct be_eq_obj *pbe_eq; 5530 unsigned int s_handle; 5531 int ret, i; 5532 5533 ret = beiscsi_enable_pci(pcidev); 5534 if (ret < 0) { 5535 dev_err(&pcidev->dev, 5536 "beiscsi_dev_probe - Failed to enable pci device\n"); 5537 return ret; 5538 } 5539 5540 phba = beiscsi_hba_alloc(pcidev); 5541 if (!phba) { 5542 dev_err(&pcidev->dev, 5543 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n"); 5544 ret = -ENOMEM; 5545 goto disable_pci; 5546 } 5547 5548 pci_save_state(pcidev); 5549 5550 /* Initialize Driver configuration Paramters */ 5551 beiscsi_hba_attrs_init(phba); 5552 5553 phba->mac_addr_set = false; 5554 5555 switch (pcidev->device) { 5556 case BE_DEVICE_ID1: 5557 case OC_DEVICE_ID1: 5558 case OC_DEVICE_ID2: 5559 phba->generation = BE_GEN2; 5560 phba->iotask_fn = beiscsi_iotask; 5561 dev_warn(&pcidev->dev, 5562 "Obsolete/Unsupported BE2 Adapter Family\n"); 5563 break; 5564 case BE_DEVICE_ID2: 5565 case OC_DEVICE_ID3: 5566 phba->generation = BE_GEN3; 5567 phba->iotask_fn = beiscsi_iotask; 5568 break; 5569 case OC_SKH_ID1: 5570 phba->generation = BE_GEN4; 5571 phba->iotask_fn = beiscsi_iotask_v2; 5572 break; 5573 default: 5574 phba->generation = 0; 5575 } 5576 5577 ret = be_ctrl_init(phba, pcidev); 5578 if (ret) { 5579 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5580 "BM_%d : be_ctrl_init failed\n"); 5581 goto free_hba; 5582 } 5583 5584 ret = beiscsi_init_sliport(phba); 5585 if (ret) 5586 goto free_hba; 5587 5588 spin_lock_init(&phba->io_sgl_lock); 5589 spin_lock_init(&phba->mgmt_sgl_lock); 5590 spin_lock_init(&phba->async_pdu_lock); 5591 ret = beiscsi_get_fw_config(&phba->ctrl, phba); 5592 if (ret != 0) { 5593 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5594 "BM_%d : Error getting fw config\n"); 5595 goto free_port; 5596 } 5597 beiscsi_get_port_name(&phba->ctrl, phba); 5598 beiscsi_get_params(phba); 5599 beiscsi_set_host_data(phba); 5600 beiscsi_set_uer_feature(phba); 5601 5602 be2iscsi_enable_msix(phba); 5603 5604 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5605 "BM_%d : num_cpus = %d\n", 5606 phba->num_cpus); 5607 5608 phba->shost->max_id = phba->params.cxns_per_ctrl; 5609 phba->shost->can_queue = phba->params.ios_per_ctrl; 5610 ret = beiscsi_get_memory(phba); 5611 if (ret < 0) { 5612 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5613 "BM_%d : alloc host mem failed\n"); 5614 goto free_port; 5615 } 5616 5617 ret = beiscsi_init_port(phba); 5618 if (ret < 0) { 5619 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5620 "BM_%d : init port failed\n"); 5621 beiscsi_free_mem(phba); 5622 goto free_port; 5623 } 5624 5625 for (i = 0; i < MAX_MCC_CMD; i++) { 5626 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5627 phba->ctrl.mcc_tag[i] = i + 1; 5628 phba->ctrl.mcc_tag_status[i + 1] = 0; 5629 phba->ctrl.mcc_tag_available++; 5630 memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, 5631 sizeof(struct be_dma_mem)); 5632 } 5633 5634 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; 5635 5636 phba->wq = alloc_workqueue("beiscsi_%02x_wq", WQ_MEM_RECLAIM, 1, 5637 phba->shost->host_no); 5638 if (!phba->wq) { 5639 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5640 "BM_%d : beiscsi_dev_probe-" 5641 "Failed to allocate work queue\n"); 5642 ret = -ENOMEM; 5643 goto free_twq; 5644 } 5645 5646 INIT_DELAYED_WORK(&phba->eqd_update, beiscsi_eqd_update_work); 5647 5648 phwi_ctrlr = phba->phwi_ctrlr; 5649 phwi_context = phwi_ctrlr->phwi_ctxt; 5650 5651 for (i = 0; i < phba->num_cpus; i++) { 5652 pbe_eq = &phwi_context->be_eq[i]; 5653 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); 5654 } 5655 5656 i = (phba->pcidev->msix_enabled) ? i : 0; 5657 /* Work item for MCC handling */ 5658 pbe_eq = &phwi_context->be_eq[i]; 5659 INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); 5660 5661 ret = beiscsi_init_irqs(phba); 5662 if (ret < 0) { 5663 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5664 "BM_%d : beiscsi_dev_probe-" 5665 "Failed to beiscsi_init_irqs\n"); 5666 goto disable_iopoll; 5667 } 5668 hwi_enable_intr(phba); 5669 5670 ret = iscsi_host_add(phba->shost, &phba->pcidev->dev); 5671 if (ret) 5672 goto free_irqs; 5673 5674 /* set online bit after port is operational */ 5675 set_bit(BEISCSI_HBA_ONLINE, &phba->state); 5676 __beiscsi_log(phba, KERN_INFO, 5677 "BM_%d : port online: 0x%lx\n", phba->state); 5678 5679 INIT_WORK(&phba->boot_work, beiscsi_boot_work); 5680 ret = beiscsi_boot_get_shandle(phba, &s_handle); 5681 if (ret > 0) { 5682 beiscsi_start_boot_work(phba, s_handle); 5683 /** 5684 * Set this bit after starting the work to let 5685 * probe handle it first. 5686 * ASYNC event can too schedule this work. 5687 */ 5688 set_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state); 5689 } 5690 5691 beiscsi_iface_create_default(phba); 5692 schedule_delayed_work(&phba->eqd_update, 5693 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5694 5695 INIT_WORK(&phba->sess_work, beiscsi_sess_work); 5696 INIT_DELAYED_WORK(&phba->recover_port, beiscsi_recover_port); 5697 /** 5698 * Start UE detection here. UE before this will cause stall in probe 5699 * and eventually fail the probe. 5700 */ 5701 timer_setup(&phba->hw_check, beiscsi_hw_health_check, 0); 5702 mod_timer(&phba->hw_check, 5703 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5704 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5705 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n"); 5706 return 0; 5707 5708 free_irqs: 5709 hwi_disable_intr(phba); 5710 beiscsi_free_irqs(phba); 5711 disable_iopoll: 5712 for (i = 0; i < phba->num_cpus; i++) { 5713 pbe_eq = &phwi_context->be_eq[i]; 5714 irq_poll_disable(&pbe_eq->iopoll); 5715 } 5716 destroy_workqueue(phba->wq); 5717 free_twq: 5718 hwi_cleanup_port(phba); 5719 beiscsi_cleanup_port(phba); 5720 beiscsi_free_mem(phba); 5721 free_port: 5722 dma_free_coherent(&phba->pcidev->dev, 5723 phba->ctrl.mbox_mem_alloced.size, 5724 phba->ctrl.mbox_mem_alloced.va, 5725 phba->ctrl.mbox_mem_alloced.dma); 5726 beiscsi_unmap_pci_function(phba); 5727 free_hba: 5728 pci_disable_msix(phba->pcidev); 5729 pci_dev_put(phba->pcidev); 5730 iscsi_host_free(phba->shost); 5731 pci_set_drvdata(pcidev, NULL); 5732 disable_pci: 5733 pci_release_regions(pcidev); 5734 pci_disable_device(pcidev); 5735 return ret; 5736 } 5737 5738 static void beiscsi_remove(struct pci_dev *pcidev) 5739 { 5740 struct beiscsi_hba *phba = NULL; 5741 5742 phba = pci_get_drvdata(pcidev); 5743 if (!phba) { 5744 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n"); 5745 return; 5746 } 5747 5748 /* first stop UE detection before unloading */ 5749 del_timer_sync(&phba->hw_check); 5750 cancel_delayed_work_sync(&phba->recover_port); 5751 cancel_work_sync(&phba->sess_work); 5752 5753 beiscsi_iface_destroy_default(phba); 5754 iscsi_host_remove(phba->shost, false); 5755 beiscsi_disable_port(phba, 1); 5756 5757 /* after cancelling boot_work */ 5758 iscsi_boot_destroy_kset(phba->boot_struct.boot_kset); 5759 5760 /* free all resources */ 5761 destroy_workqueue(phba->wq); 5762 beiscsi_free_mem(phba); 5763 5764 /* ctrl uninit */ 5765 beiscsi_unmap_pci_function(phba); 5766 dma_free_coherent(&phba->pcidev->dev, 5767 phba->ctrl.mbox_mem_alloced.size, 5768 phba->ctrl.mbox_mem_alloced.va, 5769 phba->ctrl.mbox_mem_alloced.dma); 5770 5771 pci_dev_put(phba->pcidev); 5772 iscsi_host_free(phba->shost); 5773 pci_set_drvdata(pcidev, NULL); 5774 pci_release_regions(pcidev); 5775 pci_disable_device(pcidev); 5776 } 5777 5778 5779 static struct pci_error_handlers beiscsi_eeh_handlers = { 5780 .error_detected = beiscsi_eeh_err_detected, 5781 .slot_reset = beiscsi_eeh_reset, 5782 .resume = beiscsi_eeh_resume, 5783 }; 5784 5785 struct iscsi_transport beiscsi_iscsi_transport = { 5786 .owner = THIS_MODULE, 5787 .name = DRV_NAME, 5788 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO | 5789 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, 5790 .create_session = beiscsi_session_create, 5791 .destroy_session = beiscsi_session_destroy, 5792 .create_conn = beiscsi_conn_create, 5793 .bind_conn = beiscsi_conn_bind, 5794 .unbind_conn = iscsi_conn_unbind, 5795 .destroy_conn = iscsi_conn_teardown, 5796 .attr_is_visible = beiscsi_attr_is_visible, 5797 .set_iface_param = beiscsi_iface_set_param, 5798 .get_iface_param = beiscsi_iface_get_param, 5799 .set_param = beiscsi_set_param, 5800 .get_conn_param = iscsi_conn_get_param, 5801 .get_session_param = iscsi_session_get_param, 5802 .get_host_param = beiscsi_get_host_param, 5803 .start_conn = beiscsi_conn_start, 5804 .stop_conn = iscsi_conn_stop, 5805 .send_pdu = iscsi_conn_send_pdu, 5806 .xmit_task = beiscsi_task_xmit, 5807 .cleanup_task = beiscsi_cleanup_task, 5808 .alloc_pdu = beiscsi_alloc_pdu, 5809 .parse_pdu_itt = beiscsi_parse_pdu, 5810 .get_stats = beiscsi_conn_get_stats, 5811 .get_ep_param = beiscsi_ep_get_param, 5812 .ep_connect = beiscsi_ep_connect, 5813 .ep_poll = beiscsi_ep_poll, 5814 .ep_disconnect = beiscsi_ep_disconnect, 5815 .session_recovery_timedout = iscsi_session_recovery_timedout, 5816 .bsg_request = beiscsi_bsg_request, 5817 }; 5818 5819 static struct pci_driver beiscsi_pci_driver = { 5820 .name = DRV_NAME, 5821 .probe = beiscsi_dev_probe, 5822 .remove = beiscsi_remove, 5823 .id_table = beiscsi_pci_id_table, 5824 .err_handler = &beiscsi_eeh_handlers 5825 }; 5826 5827 static int __init beiscsi_module_init(void) 5828 { 5829 int ret; 5830 5831 beiscsi_scsi_transport = 5832 iscsi_register_transport(&beiscsi_iscsi_transport); 5833 if (!beiscsi_scsi_transport) { 5834 printk(KERN_ERR 5835 "beiscsi_module_init - Unable to register beiscsi transport.\n"); 5836 return -ENOMEM; 5837 } 5838 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n", 5839 &beiscsi_iscsi_transport); 5840 5841 ret = pci_register_driver(&beiscsi_pci_driver); 5842 if (ret) { 5843 printk(KERN_ERR 5844 "beiscsi_module_init - Unable to register beiscsi pci driver.\n"); 5845 goto unregister_iscsi_transport; 5846 } 5847 return 0; 5848 5849 unregister_iscsi_transport: 5850 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5851 return ret; 5852 } 5853 5854 static void __exit beiscsi_module_exit(void) 5855 { 5856 pci_unregister_driver(&beiscsi_pci_driver); 5857 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5858 } 5859 5860 module_init(beiscsi_module_init); 5861 module_exit(beiscsi_module_exit); 5862