1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/pci.h> 23 #include <linux/interrupt.h> 24 25 #include <scsi/scsi.h> 26 #include <scsi/scsi_device.h> 27 #include <scsi/scsi_host.h> 28 #include <scsi/scsi_tcq.h> 29 #include <scsi/scsi_transport_fc.h> 30 31 #include "lpfc_version.h" 32 #include "lpfc_hw.h" 33 #include "lpfc_sli.h" 34 #include "lpfc_disc.h" 35 #include "lpfc_scsi.h" 36 #include "lpfc.h" 37 #include "lpfc_logmsg.h" 38 #include "lpfc_crtn.h" 39 40 #define LPFC_RESET_WAIT 2 41 #define LPFC_ABORT_WAIT 2 42 43 44 static inline void 45 lpfc_block_requests(struct lpfc_hba * phba) 46 { 47 down(&phba->hba_can_block); 48 scsi_block_requests(phba->host); 49 } 50 51 static inline void 52 lpfc_unblock_requests(struct lpfc_hba * phba) 53 { 54 scsi_unblock_requests(phba->host); 55 up(&phba->hba_can_block); 56 } 57 58 /* 59 * This routine allocates a scsi buffer, which contains all the necessary 60 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 61 * contains information to build the IOCB. The DMAable region contains 62 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to 63 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL 64 * and the BPL BDE is setup in the IOCB. 65 */ 66 static struct lpfc_scsi_buf * 67 lpfc_new_scsi_buf(struct lpfc_hba * phba) 68 { 69 struct lpfc_scsi_buf *psb; 70 struct ulp_bde64 *bpl; 71 IOCB_t *iocb; 72 dma_addr_t pdma_phys; 73 uint16_t iotag; 74 75 psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 76 if (!psb) 77 return NULL; 78 memset(psb, 0, sizeof (struct lpfc_scsi_buf)); 79 psb->scsi_hba = phba; 80 81 /* 82 * Get memory from the pci pool to map the virt space to pci bus space 83 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd, 84 * struct fcp_rsp and the number of bde's necessary to support the 85 * sg_tablesize. 86 */ 87 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL, 88 &psb->dma_handle); 89 if (!psb->data) { 90 kfree(psb); 91 return NULL; 92 } 93 94 /* Initialize virtual ptrs to dma_buf region. */ 95 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 96 97 /* Allocate iotag for psb->cur_iocbq. */ 98 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 99 if (iotag == 0) { 100 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 101 psb->data, psb->dma_handle); 102 kfree (psb); 103 return NULL; 104 } 105 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 106 107 psb->fcp_cmnd = psb->data; 108 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 109 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + 110 sizeof(struct fcp_rsp); 111 112 /* Initialize local short-hand pointers. */ 113 bpl = psb->fcp_bpl; 114 pdma_phys = psb->dma_handle; 115 116 /* 117 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg 118 * list bdes. Initialize the first two and leave the rest for 119 * queuecommand. 120 */ 121 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); 122 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); 123 bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd); 124 bpl->tus.f.bdeFlags = BUFF_USE_CMND; 125 bpl->tus.w = le32_to_cpu(bpl->tus.w); 126 bpl++; 127 128 /* Setup the physical region for the FCP RSP */ 129 pdma_phys += sizeof (struct fcp_cmnd); 130 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); 131 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); 132 bpl->tus.f.bdeSize = sizeof (struct fcp_rsp); 133 bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV); 134 bpl->tus.w = le32_to_cpu(bpl->tus.w); 135 136 /* 137 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, 138 * initialize it with all known data now. 139 */ 140 pdma_phys += (sizeof (struct fcp_rsp)); 141 iocb = &psb->cur_iocbq.iocb; 142 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 143 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys); 144 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys); 145 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 146 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL; 147 iocb->ulpBdeCount = 1; 148 iocb->ulpClass = CLASS3; 149 150 return psb; 151 } 152 153 static struct lpfc_scsi_buf* 154 lpfc_get_scsi_buf(struct lpfc_hba * phba) 155 { 156 struct lpfc_scsi_buf * lpfc_cmd = NULL; 157 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; 158 unsigned long iflag = 0; 159 160 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 161 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 162 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 163 return lpfc_cmd; 164 } 165 166 static void 167 lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 168 { 169 unsigned long iflag = 0; 170 /* 171 * There are only two special cases to consider. (1) the scsi command 172 * requested scatter-gather usage or (2) the scsi command allocated 173 * a request buffer, but did not request use_sg. There is a third 174 * case, but it does not require resource deallocation. 175 */ 176 if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) { 177 dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer, 178 psb->seg_cnt, psb->pCmd->sc_data_direction); 179 } else { 180 if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) { 181 dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys, 182 psb->pCmd->request_bufflen, 183 psb->pCmd->sc_data_direction); 184 } 185 } 186 187 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 188 psb->pCmd = NULL; 189 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 190 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 191 } 192 193 static int 194 lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd) 195 { 196 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 197 struct scatterlist *sgel = NULL; 198 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 199 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 200 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 201 dma_addr_t physaddr; 202 uint32_t i, num_bde = 0; 203 int datadir = scsi_cmnd->sc_data_direction; 204 int dma_error; 205 206 /* 207 * There are three possibilities here - use scatter-gather segment, use 208 * the single mapping, or neither. Start the lpfc command prep by 209 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 210 * data bde entry. 211 */ 212 bpl += 2; 213 if (scsi_cmnd->use_sg) { 214 /* 215 * The driver stores the segment count returned from pci_map_sg 216 * because this a count of dma-mappings used to map the use_sg 217 * pages. They are not guaranteed to be the same for those 218 * architectures that implement an IOMMU. 219 */ 220 sgel = (struct scatterlist *)scsi_cmnd->request_buffer; 221 lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel, 222 scsi_cmnd->use_sg, datadir); 223 if (lpfc_cmd->seg_cnt == 0) 224 return 1; 225 226 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 227 printk(KERN_ERR "%s: Too many sg segments from " 228 "dma_map_sg. Config %d, seg_cnt %d", 229 __FUNCTION__, phba->cfg_sg_seg_cnt, 230 lpfc_cmd->seg_cnt); 231 dma_unmap_sg(&phba->pcidev->dev, sgel, 232 lpfc_cmd->seg_cnt, datadir); 233 return 1; 234 } 235 236 /* 237 * The driver established a maximum scatter-gather segment count 238 * during probe that limits the number of sg elements in any 239 * single scsi command. Just run through the seg_cnt and format 240 * the bde's. 241 */ 242 for (i = 0; i < lpfc_cmd->seg_cnt; i++) { 243 physaddr = sg_dma_address(sgel); 244 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 245 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 246 bpl->tus.f.bdeSize = sg_dma_len(sgel); 247 if (datadir == DMA_TO_DEVICE) 248 bpl->tus.f.bdeFlags = 0; 249 else 250 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 251 bpl->tus.w = le32_to_cpu(bpl->tus.w); 252 bpl++; 253 sgel++; 254 num_bde++; 255 } 256 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) { 257 physaddr = dma_map_single(&phba->pcidev->dev, 258 scsi_cmnd->request_buffer, 259 scsi_cmnd->request_bufflen, 260 datadir); 261 dma_error = dma_mapping_error(physaddr); 262 if (dma_error) { 263 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 264 "%d:0718 Unable to dma_map_single " 265 "request_buffer: x%x\n", 266 phba->brd_no, dma_error); 267 return 1; 268 } 269 270 lpfc_cmd->nonsg_phys = physaddr; 271 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 272 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 273 bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen; 274 if (datadir == DMA_TO_DEVICE) 275 bpl->tus.f.bdeFlags = 0; 276 else 277 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 278 bpl->tus.w = le32_to_cpu(bpl->tus.w); 279 num_bde = 1; 280 bpl++; 281 } 282 283 /* 284 * Finish initializing those IOCB fields that are dependent on the 285 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly 286 * reinitialized since all iocb memory resources are used many times 287 * for transmit, receive, and continuation bpl's. 288 */ 289 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 290 iocb_cmd->un.fcpi64.bdl.bdeSize += 291 (num_bde * sizeof (struct ulp_bde64)); 292 iocb_cmd->ulpBdeCount = 1; 293 iocb_cmd->ulpLe = 1; 294 fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen); 295 return 0; 296 } 297 298 static void 299 lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd) 300 { 301 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 302 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 303 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 304 struct lpfc_hba *phba = lpfc_cmd->scsi_hba; 305 uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm; 306 uint32_t resp_info = fcprsp->rspStatus2; 307 uint32_t scsi_status = fcprsp->rspStatus3; 308 uint32_t host_status = DID_OK; 309 uint32_t rsplen = 0; 310 311 /* 312 * If this is a task management command, there is no 313 * scsi packet associated with this lpfc_cmd. The driver 314 * consumes it. 315 */ 316 if (fcpcmd->fcpCntl2) { 317 scsi_status = 0; 318 goto out; 319 } 320 321 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 322 "%d:0730 FCP command failed: RSP " 323 "Data: x%x x%x x%x x%x x%x x%x\n", 324 phba->brd_no, resp_info, scsi_status, 325 be32_to_cpu(fcprsp->rspResId), 326 be32_to_cpu(fcprsp->rspSnsLen), 327 be32_to_cpu(fcprsp->rspRspLen), 328 fcprsp->rspInfo3); 329 330 if (resp_info & RSP_LEN_VALID) { 331 rsplen = be32_to_cpu(fcprsp->rspRspLen); 332 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) || 333 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) { 334 host_status = DID_ERROR; 335 goto out; 336 } 337 } 338 339 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 340 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); 341 if (snslen > SCSI_SENSE_BUFFERSIZE) 342 snslen = SCSI_SENSE_BUFFERSIZE; 343 344 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); 345 } 346 347 cmnd->resid = 0; 348 if (resp_info & RESID_UNDER) { 349 cmnd->resid = be32_to_cpu(fcprsp->rspResId); 350 351 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 352 "%d:0716 FCP Read Underrun, expected %d, " 353 "residual %d Data: x%x x%x x%x\n", phba->brd_no, 354 be32_to_cpu(fcpcmd->fcpDl), cmnd->resid, 355 fcpi_parm, cmnd->cmnd[0], cmnd->underflow); 356 357 /* 358 * The cmnd->underflow is the minimum number of bytes that must 359 * be transfered for this command. Provided a sense condition 360 * is not present, make sure the actual amount transferred is at 361 * least the underflow value or fail. 362 */ 363 if (!(resp_info & SNS_LEN_VALID) && 364 (scsi_status == SAM_STAT_GOOD) && 365 (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) { 366 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 367 "%d:0717 FCP command x%x residual " 368 "underrun converted to error " 369 "Data: x%x x%x x%x\n", phba->brd_no, 370 cmnd->cmnd[0], cmnd->request_bufflen, 371 cmnd->resid, cmnd->underflow); 372 373 host_status = DID_ERROR; 374 } 375 } else if (resp_info & RESID_OVER) { 376 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 377 "%d:0720 FCP command x%x residual " 378 "overrun error. Data: x%x x%x \n", 379 phba->brd_no, cmnd->cmnd[0], 380 cmnd->request_bufflen, cmnd->resid); 381 host_status = DID_ERROR; 382 383 /* 384 * Check SLI validation that all the transfer was actually done 385 * (fcpi_parm should be zero). Apply check only to reads. 386 */ 387 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 388 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 389 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 390 "%d:0734 FCP Read Check Error Data: " 391 "x%x x%x x%x x%x\n", phba->brd_no, 392 be32_to_cpu(fcpcmd->fcpDl), 393 be32_to_cpu(fcprsp->rspResId), 394 fcpi_parm, cmnd->cmnd[0]); 395 host_status = DID_ERROR; 396 cmnd->resid = cmnd->request_bufflen; 397 } 398 399 out: 400 cmnd->result = ScsiResult(host_status, scsi_status); 401 } 402 403 static void 404 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 405 struct lpfc_iocbq *pIocbOut) 406 { 407 struct lpfc_scsi_buf *lpfc_cmd = 408 (struct lpfc_scsi_buf *) pIocbIn->context1; 409 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 410 struct lpfc_nodelist *pnode = rdata->pnode; 411 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 412 int result; 413 struct scsi_device *sdev, *tmp_sdev; 414 int depth = 0; 415 416 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 417 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 418 419 if (lpfc_cmd->status) { 420 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 421 (lpfc_cmd->result & IOERR_DRVR_MASK)) 422 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 423 else if (lpfc_cmd->status >= IOSTAT_CNT) 424 lpfc_cmd->status = IOSTAT_DEFAULT; 425 426 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 427 "%d:0729 FCP cmd x%x failed <%d/%d> status: " 428 "x%x result: x%x Data: x%x x%x\n", 429 phba->brd_no, cmd->cmnd[0], cmd->device->id, 430 cmd->device->lun, lpfc_cmd->status, 431 lpfc_cmd->result, pIocbOut->iocb.ulpContext, 432 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 433 434 switch (lpfc_cmd->status) { 435 case IOSTAT_FCP_RSP_ERROR: 436 /* Call FCP RSP handler to determine result */ 437 lpfc_handle_fcp_err(lpfc_cmd); 438 break; 439 case IOSTAT_NPORT_BSY: 440 case IOSTAT_FABRIC_BSY: 441 cmd->result = ScsiResult(DID_BUS_BUSY, 0); 442 break; 443 default: 444 cmd->result = ScsiResult(DID_ERROR, 0); 445 break; 446 } 447 448 if ((pnode == NULL ) 449 || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 450 cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY); 451 } else { 452 cmd->result = ScsiResult(DID_OK, 0); 453 } 454 455 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 456 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 457 458 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 459 "%d:0710 Iodone <%d/%d> cmd %p, error x%x " 460 "SNS x%x x%x Data: x%x x%x\n", 461 phba->brd_no, cmd->device->id, 462 cmd->device->lun, cmd, cmd->result, 463 *lp, *(lp + 3), cmd->retries, cmd->resid); 464 } 465 466 result = cmd->result; 467 sdev = cmd->device; 468 cmd->scsi_done(cmd); 469 470 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 471 lpfc_release_scsi_buf(phba, lpfc_cmd); 472 return; 473 } 474 475 if (!result && pnode != NULL && 476 ((jiffies - pnode->last_ramp_up_time) > 477 LPFC_Q_RAMP_UP_INTERVAL * HZ) && 478 ((jiffies - pnode->last_q_full_time) > 479 LPFC_Q_RAMP_UP_INTERVAL * HZ) && 480 (phba->cfg_lun_queue_depth > sdev->queue_depth)) { 481 shost_for_each_device(tmp_sdev, sdev->host) { 482 if (phba->cfg_lun_queue_depth > tmp_sdev->queue_depth) { 483 if (tmp_sdev->id != sdev->id) 484 continue; 485 if (tmp_sdev->ordered_tags) 486 scsi_adjust_queue_depth(tmp_sdev, 487 MSG_ORDERED_TAG, 488 tmp_sdev->queue_depth+1); 489 else 490 scsi_adjust_queue_depth(tmp_sdev, 491 MSG_SIMPLE_TAG, 492 tmp_sdev->queue_depth+1); 493 494 pnode->last_ramp_up_time = jiffies; 495 } 496 } 497 } 498 499 /* 500 * Check for queue full. If the lun is reporting queue full, then 501 * back off the lun queue depth to prevent target overloads. 502 */ 503 if (result == SAM_STAT_TASK_SET_FULL && pnode != NULL) { 504 pnode->last_q_full_time = jiffies; 505 506 shost_for_each_device(tmp_sdev, sdev->host) { 507 if (tmp_sdev->id != sdev->id) 508 continue; 509 depth = scsi_track_queue_full(tmp_sdev, 510 tmp_sdev->queue_depth - 1); 511 } 512 /* 513 * The queue depth cannot be lowered any more. 514 * Modify the returned error code to store 515 * the final depth value set by 516 * scsi_track_queue_full. 517 */ 518 if (depth == -1) 519 depth = sdev->host->cmd_per_lun; 520 521 if (depth) { 522 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 523 "%d:0711 detected queue full - lun queue depth " 524 " adjusted to %d.\n", phba->brd_no, depth); 525 } 526 } 527 528 lpfc_release_scsi_buf(phba, lpfc_cmd); 529 } 530 531 static void 532 lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd, 533 struct lpfc_nodelist *pnode) 534 { 535 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 536 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 537 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 538 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); 539 int datadir = scsi_cmnd->sc_data_direction; 540 541 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 542 /* clear task management bits */ 543 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; 544 545 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 546 &lpfc_cmd->fcp_cmnd->fcp_lun); 547 548 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16); 549 550 if (scsi_cmnd->device->tagged_supported) { 551 switch (scsi_cmnd->tag) { 552 case HEAD_OF_QUEUE_TAG: 553 fcp_cmnd->fcpCntl1 = HEAD_OF_Q; 554 break; 555 case ORDERED_QUEUE_TAG: 556 fcp_cmnd->fcpCntl1 = ORDERED_Q; 557 break; 558 default: 559 fcp_cmnd->fcpCntl1 = SIMPLE_Q; 560 break; 561 } 562 } else 563 fcp_cmnd->fcpCntl1 = 0; 564 565 /* 566 * There are three possibilities here - use scatter-gather segment, use 567 * the single mapping, or neither. Start the lpfc command prep by 568 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 569 * data bde entry. 570 */ 571 if (scsi_cmnd->use_sg) { 572 if (datadir == DMA_TO_DEVICE) { 573 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 574 iocb_cmd->un.fcpi.fcpi_parm = 0; 575 iocb_cmd->ulpPU = 0; 576 fcp_cmnd->fcpCntl3 = WRITE_DATA; 577 phba->fc4OutputRequests++; 578 } else { 579 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 580 iocb_cmd->ulpPU = PARM_READ_CHECK; 581 iocb_cmd->un.fcpi.fcpi_parm = 582 scsi_cmnd->request_bufflen; 583 fcp_cmnd->fcpCntl3 = READ_DATA; 584 phba->fc4InputRequests++; 585 } 586 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) { 587 if (datadir == DMA_TO_DEVICE) { 588 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 589 iocb_cmd->un.fcpi.fcpi_parm = 0; 590 iocb_cmd->ulpPU = 0; 591 fcp_cmnd->fcpCntl3 = WRITE_DATA; 592 phba->fc4OutputRequests++; 593 } else { 594 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 595 iocb_cmd->ulpPU = PARM_READ_CHECK; 596 iocb_cmd->un.fcpi.fcpi_parm = 597 scsi_cmnd->request_bufflen; 598 fcp_cmnd->fcpCntl3 = READ_DATA; 599 phba->fc4InputRequests++; 600 } 601 } else { 602 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; 603 iocb_cmd->un.fcpi.fcpi_parm = 0; 604 iocb_cmd->ulpPU = 0; 605 fcp_cmnd->fcpCntl3 = 0; 606 phba->fc4ControlRequests++; 607 } 608 609 /* 610 * Finish initializing those IOCB fields that are independent 611 * of the scsi_cmnd request_buffer 612 */ 613 piocbq->iocb.ulpContext = pnode->nlp_rpi; 614 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 615 piocbq->iocb.ulpFCP2Rcvy = 1; 616 617 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); 618 piocbq->context1 = lpfc_cmd; 619 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 620 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout; 621 } 622 623 static int 624 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, 625 struct lpfc_scsi_buf *lpfc_cmd, 626 uint8_t task_mgmt_cmd) 627 { 628 struct lpfc_sli *psli; 629 struct lpfc_iocbq *piocbq; 630 IOCB_t *piocb; 631 struct fcp_cmnd *fcp_cmnd; 632 struct scsi_device *scsi_dev = lpfc_cmd->pCmd->device; 633 struct lpfc_rport_data *rdata = scsi_dev->hostdata; 634 struct lpfc_nodelist *ndlp = rdata->pnode; 635 636 if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 637 return 0; 638 } 639 640 psli = &phba->sli; 641 piocbq = &(lpfc_cmd->cur_iocbq); 642 piocb = &piocbq->iocb; 643 644 fcp_cmnd = lpfc_cmd->fcp_cmnd; 645 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 646 &lpfc_cmd->fcp_cmnd->fcp_lun); 647 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 648 649 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 650 651 piocb->ulpContext = ndlp->nlp_rpi; 652 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 653 piocb->ulpFCP2Rcvy = 1; 654 } 655 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); 656 657 /* ulpTimeout is only one byte */ 658 if (lpfc_cmd->timeout > 0xff) { 659 /* 660 * Do not timeout the command at the firmware level. 661 * The driver will provide the timeout mechanism. 662 */ 663 piocb->ulpTimeout = 0; 664 } else { 665 piocb->ulpTimeout = lpfc_cmd->timeout; 666 } 667 668 lpfc_cmd->rdata = rdata; 669 670 switch (task_mgmt_cmd) { 671 case FCP_LUN_RESET: 672 /* Issue LUN Reset to TGT <num> LUN <num> */ 673 lpfc_printf_log(phba, 674 KERN_INFO, 675 LOG_FCP, 676 "%d:0703 Issue LUN Reset to TGT %d LUN %d " 677 "Data: x%x x%x\n", 678 phba->brd_no, 679 scsi_dev->id, scsi_dev->lun, 680 ndlp->nlp_rpi, ndlp->nlp_flag); 681 682 break; 683 case FCP_ABORT_TASK_SET: 684 /* Issue Abort Task Set to TGT <num> LUN <num> */ 685 lpfc_printf_log(phba, 686 KERN_INFO, 687 LOG_FCP, 688 "%d:0701 Issue Abort Task Set to TGT %d LUN %d " 689 "Data: x%x x%x\n", 690 phba->brd_no, 691 scsi_dev->id, scsi_dev->lun, 692 ndlp->nlp_rpi, ndlp->nlp_flag); 693 694 break; 695 case FCP_TARGET_RESET: 696 /* Issue Target Reset to TGT <num> */ 697 lpfc_printf_log(phba, 698 KERN_INFO, 699 LOG_FCP, 700 "%d:0702 Issue Target Reset to TGT %d " 701 "Data: x%x x%x\n", 702 phba->brd_no, 703 scsi_dev->id, ndlp->nlp_rpi, 704 ndlp->nlp_flag); 705 break; 706 } 707 708 return (1); 709 } 710 711 static int 712 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba) 713 { 714 struct lpfc_iocbq *iocbq; 715 struct lpfc_iocbq *iocbqrsp; 716 int ret; 717 718 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET); 719 if (!ret) 720 return FAILED; 721 722 lpfc_cmd->scsi_hba = phba; 723 iocbq = &lpfc_cmd->cur_iocbq; 724 iocbqrsp = lpfc_sli_get_iocbq(phba); 725 726 if (!iocbqrsp) 727 return FAILED; 728 729 ret = lpfc_sli_issue_iocb_wait(phba, 730 &phba->sli.ring[phba->sli.fcp_ring], 731 iocbq, iocbqrsp, lpfc_cmd->timeout); 732 if (ret != IOCB_SUCCESS) { 733 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 734 ret = FAILED; 735 } else { 736 ret = SUCCESS; 737 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4]; 738 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus; 739 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 740 (lpfc_cmd->result & IOERR_DRVR_MASK)) 741 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 742 } 743 744 lpfc_sli_release_iocbq(phba, iocbqrsp); 745 return ret; 746 } 747 748 const char * 749 lpfc_info(struct Scsi_Host *host) 750 { 751 struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata; 752 int len; 753 static char lpfcinfobuf[384]; 754 755 memset(lpfcinfobuf,0,384); 756 if (phba && phba->pcidev){ 757 strncpy(lpfcinfobuf, phba->ModelDesc, 256); 758 len = strlen(lpfcinfobuf); 759 snprintf(lpfcinfobuf + len, 760 384-len, 761 " on PCI bus %02x device %02x irq %d", 762 phba->pcidev->bus->number, 763 phba->pcidev->devfn, 764 phba->pcidev->irq); 765 len = strlen(lpfcinfobuf); 766 if (phba->Port[0]) { 767 snprintf(lpfcinfobuf + len, 768 384-len, 769 " port %s", 770 phba->Port); 771 } 772 } 773 return lpfcinfobuf; 774 } 775 776 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) 777 { 778 unsigned long poll_tmo_expires = 779 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); 780 781 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt) 782 mod_timer(&phba->fcp_poll_timer, 783 poll_tmo_expires); 784 } 785 786 void lpfc_poll_start_timer(struct lpfc_hba * phba) 787 { 788 lpfc_poll_rearm_timer(phba); 789 } 790 791 void lpfc_poll_timeout(unsigned long ptr) 792 { 793 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 794 unsigned long iflag; 795 796 spin_lock_irqsave(phba->host->host_lock, iflag); 797 798 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 799 lpfc_sli_poll_fcp_ring (phba); 800 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 801 lpfc_poll_rearm_timer(phba); 802 } 803 804 spin_unlock_irqrestore(phba->host->host_lock, iflag); 805 } 806 807 static int 808 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 809 { 810 struct lpfc_hba *phba = 811 (struct lpfc_hba *) cmnd->device->host->hostdata; 812 struct lpfc_sli *psli = &phba->sli; 813 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 814 struct lpfc_nodelist *ndlp = rdata->pnode; 815 struct lpfc_scsi_buf *lpfc_cmd; 816 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 817 int err; 818 819 err = fc_remote_port_chkready(rport); 820 if (err) { 821 cmnd->result = err; 822 goto out_fail_command; 823 } 824 825 /* 826 * Catch race where our node has transitioned, but the 827 * transport is still transitioning. 828 */ 829 if (!ndlp) { 830 cmnd->result = ScsiResult(DID_BUS_BUSY, 0); 831 goto out_fail_command; 832 } 833 lpfc_cmd = lpfc_get_scsi_buf (phba); 834 if (lpfc_cmd == NULL) { 835 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 836 "%d:0707 driver's buffer pool is empty, " 837 "IO busied\n", phba->brd_no); 838 goto out_host_busy; 839 } 840 841 /* 842 * Store the midlayer's command structure for the completion phase 843 * and complete the command initialization. 844 */ 845 lpfc_cmd->pCmd = cmnd; 846 lpfc_cmd->rdata = rdata; 847 lpfc_cmd->timeout = 0; 848 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 849 cmnd->scsi_done = done; 850 851 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 852 if (err) 853 goto out_host_busy_free_buf; 854 855 lpfc_scsi_prep_cmnd(phba, lpfc_cmd, ndlp); 856 857 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 858 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 859 if (err) 860 goto out_host_busy_free_buf; 861 862 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 863 lpfc_sli_poll_fcp_ring(phba); 864 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 865 lpfc_poll_rearm_timer(phba); 866 } 867 868 return 0; 869 870 out_host_busy_free_buf: 871 lpfc_release_scsi_buf(phba, lpfc_cmd); 872 out_host_busy: 873 return SCSI_MLQUEUE_HOST_BUSY; 874 875 out_fail_command: 876 done(cmnd); 877 return 0; 878 } 879 880 881 static int 882 lpfc_abort_handler(struct scsi_cmnd *cmnd) 883 { 884 struct Scsi_Host *shost = cmnd->device->host; 885 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 886 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; 887 struct lpfc_iocbq *iocb; 888 struct lpfc_iocbq *abtsiocb; 889 struct lpfc_scsi_buf *lpfc_cmd; 890 IOCB_t *cmd, *icmd; 891 unsigned int loop_count = 0; 892 int ret = SUCCESS; 893 894 lpfc_block_requests(phba); 895 spin_lock_irq(shost->host_lock); 896 897 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 898 BUG_ON(!lpfc_cmd); 899 900 /* 901 * If pCmd field of the corresponding lpfc_scsi_buf structure 902 * points to a different SCSI command, then the driver has 903 * already completed this command, but the midlayer did not 904 * see the completion before the eh fired. Just return 905 * SUCCESS. 906 */ 907 iocb = &lpfc_cmd->cur_iocbq; 908 if (lpfc_cmd->pCmd != cmnd) 909 goto out; 910 911 BUG_ON(iocb->context1 != lpfc_cmd); 912 913 abtsiocb = lpfc_sli_get_iocbq(phba); 914 if (abtsiocb == NULL) { 915 ret = FAILED; 916 goto out; 917 } 918 919 /* 920 * The scsi command can not be in txq and it is in flight because the 921 * pCmd is still pointig at the SCSI command we have to abort. There 922 * is no need to search the txcmplq. Just send an abort to the FW. 923 */ 924 925 cmd = &iocb->iocb; 926 icmd = &abtsiocb->iocb; 927 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 928 icmd->un.acxri.abortContextTag = cmd->ulpContext; 929 icmd->un.acxri.abortIoTag = cmd->ulpIoTag; 930 931 icmd->ulpLe = 1; 932 icmd->ulpClass = cmd->ulpClass; 933 if (phba->hba_state >= LPFC_LINK_UP) 934 icmd->ulpCommand = CMD_ABORT_XRI_CN; 935 else 936 icmd->ulpCommand = CMD_CLOSE_XRI_CN; 937 938 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 939 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { 940 lpfc_sli_release_iocbq(phba, abtsiocb); 941 ret = FAILED; 942 goto out; 943 } 944 945 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 946 lpfc_sli_poll_fcp_ring (phba); 947 948 /* Wait for abort to complete */ 949 while (lpfc_cmd->pCmd == cmnd) 950 { 951 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 952 lpfc_sli_poll_fcp_ring (phba); 953 954 spin_unlock_irq(phba->host->host_lock); 955 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ); 956 spin_lock_irq(phba->host->host_lock); 957 if (++loop_count 958 > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT) 959 break; 960 } 961 962 if (lpfc_cmd->pCmd == cmnd) { 963 ret = FAILED; 964 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 965 "%d:0748 abort handler timed out waiting for " 966 "abort to complete: ret %#x, ID %d, LUN %d, " 967 "snum %#lx\n", 968 phba->brd_no, ret, cmnd->device->id, 969 cmnd->device->lun, cmnd->serial_number); 970 } 971 972 out: 973 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 974 "%d:0749 SCSI Layer I/O Abort Request " 975 "Status x%x ID %d LUN %d snum %#lx\n", 976 phba->brd_no, ret, cmnd->device->id, 977 cmnd->device->lun, cmnd->serial_number); 978 979 spin_unlock_irq(shost->host_lock); 980 lpfc_unblock_requests(phba); 981 982 return ret; 983 } 984 985 static int 986 lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) 987 { 988 struct Scsi_Host *shost = cmnd->device->host; 989 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 990 struct lpfc_scsi_buf *lpfc_cmd; 991 struct lpfc_iocbq *iocbq, *iocbqrsp; 992 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 993 struct lpfc_nodelist *pnode = rdata->pnode; 994 uint32_t cmd_result = 0, cmd_status = 0; 995 int ret = FAILED; 996 int cnt, loopcnt; 997 998 lpfc_block_requests(phba); 999 spin_lock_irq(shost->host_lock); 1000 /* 1001 * If target is not in a MAPPED state, delay the reset until 1002 * target is rediscovered or nodev timeout expires. 1003 */ 1004 while ( 1 ) { 1005 if (!pnode) 1006 break; 1007 1008 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { 1009 spin_unlock_irq(phba->host->host_lock); 1010 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 1011 spin_lock_irq(phba->host->host_lock); 1012 } 1013 if ((pnode) && (pnode->nlp_state == NLP_STE_MAPPED_NODE)) 1014 break; 1015 } 1016 1017 lpfc_cmd = lpfc_get_scsi_buf (phba); 1018 if (lpfc_cmd == NULL) 1019 goto out; 1020 1021 lpfc_cmd->pCmd = cmnd; 1022 lpfc_cmd->timeout = 60; 1023 lpfc_cmd->scsi_hba = phba; 1024 1025 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET); 1026 if (!ret) 1027 goto out_free_scsi_buf; 1028 1029 iocbq = &lpfc_cmd->cur_iocbq; 1030 1031 /* get a buffer for this IOCB command response */ 1032 iocbqrsp = lpfc_sli_get_iocbq(phba); 1033 if (iocbqrsp == NULL) 1034 goto out_free_scsi_buf; 1035 1036 ret = lpfc_sli_issue_iocb_wait(phba, 1037 &phba->sli.ring[phba->sli.fcp_ring], 1038 iocbq, iocbqrsp, lpfc_cmd->timeout); 1039 if (ret == IOCB_SUCCESS) 1040 ret = SUCCESS; 1041 1042 1043 cmd_result = iocbqrsp->iocb.un.ulpWord[4]; 1044 cmd_status = iocbqrsp->iocb.ulpStatus; 1045 1046 lpfc_sli_release_iocbq(phba, iocbqrsp); 1047 lpfc_release_scsi_buf(phba, lpfc_cmd); 1048 1049 /* 1050 * All outstanding txcmplq I/Os should have been aborted by the device. 1051 * Unfortunately, some targets do not abide by this forcing the driver 1052 * to double check. 1053 */ 1054 cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 1055 cmnd->device->id, cmnd->device->lun, 1056 LPFC_CTX_LUN); 1057 if (cnt) 1058 lpfc_sli_abort_iocb(phba, 1059 &phba->sli.ring[phba->sli.fcp_ring], 1060 cmnd->device->id, cmnd->device->lun, 1061 0, LPFC_CTX_LUN); 1062 loopcnt = 0; 1063 while(cnt) { 1064 spin_unlock_irq(phba->host->host_lock); 1065 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1066 spin_lock_irq(phba->host->host_lock); 1067 1068 if (++loopcnt 1069 > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) 1070 break; 1071 1072 cnt = lpfc_sli_sum_iocb(phba, 1073 &phba->sli.ring[phba->sli.fcp_ring], 1074 cmnd->device->id, cmnd->device->lun, 1075 LPFC_CTX_LUN); 1076 } 1077 1078 if (cnt) { 1079 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1080 "%d:0719 LUN Reset I/O flush failure: cnt x%x\n", 1081 phba->brd_no, cnt); 1082 ret = FAILED; 1083 } 1084 1085 out_free_scsi_buf: 1086 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1087 "%d:0713 SCSI layer issued LUN reset (%d, %d) " 1088 "Data: x%x x%x x%x\n", 1089 phba->brd_no, cmnd->device->id,cmnd->device->lun, 1090 ret, cmd_status, cmd_result); 1091 1092 out: 1093 spin_unlock_irq(shost->host_lock); 1094 lpfc_unblock_requests(phba); 1095 return ret; 1096 } 1097 1098 static int 1099 lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) 1100 { 1101 struct Scsi_Host *shost = cmnd->device->host; 1102 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1103 struct lpfc_nodelist *ndlp = NULL; 1104 int match; 1105 int ret = FAILED, i, err_count = 0; 1106 int cnt, loopcnt; 1107 unsigned int midlayer_id = 0; 1108 struct lpfc_scsi_buf * lpfc_cmd; 1109 1110 lpfc_block_requests(phba); 1111 spin_lock_irq(shost->host_lock); 1112 1113 lpfc_cmd = lpfc_get_scsi_buf(phba); 1114 if (lpfc_cmd == NULL) 1115 goto out; 1116 1117 /* The lpfc_cmd storage is reused. Set all loop invariants. */ 1118 lpfc_cmd->timeout = 60; 1119 lpfc_cmd->pCmd = cmnd; 1120 lpfc_cmd->scsi_hba = phba; 1121 1122 /* 1123 * Since the driver manages a single bus device, reset all 1124 * targets known to the driver. Should any target reset 1125 * fail, this routine returns failure to the midlayer. 1126 */ 1127 midlayer_id = cmnd->device->id; 1128 for (i = 0; i < MAX_FCP_TARGET; i++) { 1129 /* Search the mapped list for this target ID */ 1130 match = 0; 1131 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { 1132 if ((i == ndlp->nlp_sid) && ndlp->rport) { 1133 match = 1; 1134 break; 1135 } 1136 } 1137 if (!match) 1138 continue; 1139 1140 lpfc_cmd->pCmd->device->id = i; 1141 lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data; 1142 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba); 1143 if (ret != SUCCESS) { 1144 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1145 "%d:0713 Bus Reset on target %d failed\n", 1146 phba->brd_no, i); 1147 err_count++; 1148 } 1149 } 1150 1151 if (err_count == 0) 1152 ret = SUCCESS; 1153 1154 lpfc_release_scsi_buf(phba, lpfc_cmd); 1155 1156 /* 1157 * All outstanding txcmplq I/Os should have been aborted by 1158 * the targets. Unfortunately, some targets do not abide by 1159 * this forcing the driver to double check. 1160 */ 1161 cmnd->device->id = midlayer_id; 1162 cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 1163 0, 0, LPFC_CTX_HOST); 1164 if (cnt) 1165 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 1166 0, 0, 0, LPFC_CTX_HOST); 1167 loopcnt = 0; 1168 while(cnt) { 1169 spin_unlock_irq(phba->host->host_lock); 1170 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1171 spin_lock_irq(phba->host->host_lock); 1172 1173 if (++loopcnt 1174 > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) 1175 break; 1176 1177 cnt = lpfc_sli_sum_iocb(phba, 1178 &phba->sli.ring[phba->sli.fcp_ring], 1179 0, 0, LPFC_CTX_HOST); 1180 } 1181 1182 if (cnt) { 1183 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1184 "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n", 1185 phba->brd_no, cnt, i); 1186 ret = FAILED; 1187 } 1188 1189 lpfc_printf_log(phba, 1190 KERN_ERR, 1191 LOG_FCP, 1192 "%d:0714 SCSI layer issued Bus Reset Data: x%x\n", 1193 phba->brd_no, ret); 1194 out: 1195 spin_unlock_irq(shost->host_lock); 1196 lpfc_unblock_requests(phba); 1197 return ret; 1198 } 1199 1200 static int 1201 lpfc_slave_alloc(struct scsi_device *sdev) 1202 { 1203 struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata; 1204 struct lpfc_scsi_buf *scsi_buf = NULL; 1205 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1206 uint32_t total = 0, i; 1207 uint32_t num_to_alloc = 0; 1208 unsigned long flags; 1209 1210 if (!rport || fc_remote_port_chkready(rport)) 1211 return -ENXIO; 1212 1213 sdev->hostdata = rport->dd_data; 1214 1215 /* 1216 * Populate the cmds_per_lun count scsi_bufs into this host's globally 1217 * available list of scsi buffers. Don't allocate more than the 1218 * HBA limit conveyed to the midlayer via the host structure. The 1219 * formula accounts for the lun_queue_depth + error handlers + 1 1220 * extra. This list of scsi bufs exists for the lifetime of the driver. 1221 */ 1222 total = phba->total_scsi_bufs; 1223 num_to_alloc = phba->cfg_lun_queue_depth + 2; 1224 if (total >= phba->cfg_hba_queue_depth) { 1225 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1226 "%d:0704 At limitation of %d preallocated " 1227 "command buffers\n", phba->brd_no, total); 1228 return 0; 1229 } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) { 1230 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1231 "%d:0705 Allocation request of %d command " 1232 "buffers will exceed max of %d. Reducing " 1233 "allocation request to %d.\n", phba->brd_no, 1234 num_to_alloc, phba->cfg_hba_queue_depth, 1235 (phba->cfg_hba_queue_depth - total)); 1236 num_to_alloc = phba->cfg_hba_queue_depth - total; 1237 } 1238 1239 for (i = 0; i < num_to_alloc; i++) { 1240 scsi_buf = lpfc_new_scsi_buf(phba); 1241 if (!scsi_buf) { 1242 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1243 "%d:0706 Failed to allocate command " 1244 "buffer\n", phba->brd_no); 1245 break; 1246 } 1247 1248 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags); 1249 phba->total_scsi_bufs++; 1250 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list); 1251 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags); 1252 } 1253 return 0; 1254 } 1255 1256 static int 1257 lpfc_slave_configure(struct scsi_device *sdev) 1258 { 1259 struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata; 1260 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1261 1262 if (sdev->tagged_supported) 1263 scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth); 1264 else 1265 scsi_deactivate_tcq(sdev, phba->cfg_lun_queue_depth); 1266 1267 /* 1268 * Initialize the fc transport attributes for the target 1269 * containing this scsi device. Also note that the driver's 1270 * target pointer is stored in the starget_data for the 1271 * driver's sysfs entry point functions. 1272 */ 1273 rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5; 1274 1275 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 1276 lpfc_sli_poll_fcp_ring(phba); 1277 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 1278 lpfc_poll_rearm_timer(phba); 1279 } 1280 1281 return 0; 1282 } 1283 1284 static void 1285 lpfc_slave_destroy(struct scsi_device *sdev) 1286 { 1287 sdev->hostdata = NULL; 1288 return; 1289 } 1290 1291 struct scsi_host_template lpfc_template = { 1292 .module = THIS_MODULE, 1293 .name = LPFC_DRIVER_NAME, 1294 .info = lpfc_info, 1295 .queuecommand = lpfc_queuecommand, 1296 .eh_abort_handler = lpfc_abort_handler, 1297 .eh_device_reset_handler= lpfc_reset_lun_handler, 1298 .eh_bus_reset_handler = lpfc_reset_bus_handler, 1299 .slave_alloc = lpfc_slave_alloc, 1300 .slave_configure = lpfc_slave_configure, 1301 .slave_destroy = lpfc_slave_destroy, 1302 .this_id = -1, 1303 .sg_tablesize = LPFC_SG_SEG_CNT, 1304 .cmd_per_lun = LPFC_CMD_PER_LUN, 1305 .use_clustering = ENABLE_CLUSTERING, 1306 .shost_attrs = lpfc_host_attrs, 1307 .max_sectors = 0xFFFF, 1308 }; 1309