1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/pci.h> 23 #include <linux/interrupt.h> 24 #include <linux/delay.h> 25 26 #include <scsi/scsi.h> 27 #include <scsi/scsi_device.h> 28 #include <scsi/scsi_host.h> 29 #include <scsi/scsi_tcq.h> 30 #include <scsi/scsi_transport_fc.h> 31 32 #include "lpfc_version.h" 33 #include "lpfc_hw.h" 34 #include "lpfc_sli.h" 35 #include "lpfc_disc.h" 36 #include "lpfc_scsi.h" 37 #include "lpfc.h" 38 #include "lpfc_logmsg.h" 39 #include "lpfc_crtn.h" 40 #include "lpfc_vport.h" 41 42 #define LPFC_RESET_WAIT 2 43 #define LPFC_ABORT_WAIT 2 44 45 /* 46 * This function is called with no lock held when there is a resource 47 * error in driver or in firmware. 48 */ 49 void 50 lpfc_adjust_queue_depth(struct lpfc_hba *phba) 51 { 52 unsigned long flags; 53 54 spin_lock_irqsave(&phba->hbalock, flags); 55 atomic_inc(&phba->num_rsrc_err); 56 phba->last_rsrc_error_time = jiffies; 57 58 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) { 59 spin_unlock_irqrestore(&phba->hbalock, flags); 60 return; 61 } 62 63 phba->last_ramp_down_time = jiffies; 64 65 spin_unlock_irqrestore(&phba->hbalock, flags); 66 67 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 68 if ((phba->pport->work_port_events & 69 WORKER_RAMP_DOWN_QUEUE) == 0) { 70 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; 71 } 72 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 73 74 spin_lock_irqsave(&phba->hbalock, flags); 75 if (phba->work_wait) 76 wake_up(phba->work_wait); 77 spin_unlock_irqrestore(&phba->hbalock, flags); 78 79 return; 80 } 81 82 /* 83 * This function is called with no lock held when there is a successful 84 * SCSI command completion. 85 */ 86 static inline void 87 lpfc_rampup_queue_depth(struct lpfc_vport *vport, 88 struct scsi_device *sdev) 89 { 90 unsigned long flags; 91 struct lpfc_hba *phba = vport->phba; 92 atomic_inc(&phba->num_cmd_success); 93 94 if (vport->cfg_lun_queue_depth <= sdev->queue_depth) 95 return; 96 spin_lock_irqsave(&phba->hbalock, flags); 97 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) || 98 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) { 99 spin_unlock_irqrestore(&phba->hbalock, flags); 100 return; 101 } 102 phba->last_ramp_up_time = jiffies; 103 spin_unlock_irqrestore(&phba->hbalock, flags); 104 105 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 106 if ((phba->pport->work_port_events & 107 WORKER_RAMP_UP_QUEUE) == 0) { 108 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE; 109 } 110 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 111 112 spin_lock_irqsave(&phba->hbalock, flags); 113 if (phba->work_wait) 114 wake_up(phba->work_wait); 115 spin_unlock_irqrestore(&phba->hbalock, flags); 116 } 117 118 void 119 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) 120 { 121 struct lpfc_vport **vports; 122 struct Scsi_Host *shost; 123 struct scsi_device *sdev; 124 unsigned long new_queue_depth; 125 unsigned long num_rsrc_err, num_cmd_success; 126 int i; 127 128 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 129 num_cmd_success = atomic_read(&phba->num_cmd_success); 130 131 vports = lpfc_create_vport_work_array(phba); 132 if (vports != NULL) 133 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { 134 shost = lpfc_shost_from_vport(vports[i]); 135 shost_for_each_device(sdev, shost) { 136 new_queue_depth = 137 sdev->queue_depth * num_rsrc_err / 138 (num_rsrc_err + num_cmd_success); 139 if (!new_queue_depth) 140 new_queue_depth = sdev->queue_depth - 1; 141 else 142 new_queue_depth = sdev->queue_depth - 143 new_queue_depth; 144 if (sdev->ordered_tags) 145 scsi_adjust_queue_depth(sdev, 146 MSG_ORDERED_TAG, 147 new_queue_depth); 148 else 149 scsi_adjust_queue_depth(sdev, 150 MSG_SIMPLE_TAG, 151 new_queue_depth); 152 } 153 } 154 lpfc_destroy_vport_work_array(vports); 155 atomic_set(&phba->num_rsrc_err, 0); 156 atomic_set(&phba->num_cmd_success, 0); 157 } 158 159 void 160 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) 161 { 162 struct lpfc_vport **vports; 163 struct Scsi_Host *shost; 164 struct scsi_device *sdev; 165 int i; 166 167 vports = lpfc_create_vport_work_array(phba); 168 if (vports != NULL) 169 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { 170 shost = lpfc_shost_from_vport(vports[i]); 171 shost_for_each_device(sdev, shost) { 172 if (sdev->ordered_tags) 173 scsi_adjust_queue_depth(sdev, 174 MSG_ORDERED_TAG, 175 sdev->queue_depth+1); 176 else 177 scsi_adjust_queue_depth(sdev, 178 MSG_SIMPLE_TAG, 179 sdev->queue_depth+1); 180 } 181 } 182 lpfc_destroy_vport_work_array(vports); 183 atomic_set(&phba->num_rsrc_err, 0); 184 atomic_set(&phba->num_cmd_success, 0); 185 } 186 187 /* 188 * This routine allocates a scsi buffer, which contains all the necessary 189 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 190 * contains information to build the IOCB. The DMAable region contains 191 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to 192 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL 193 * and the BPL BDE is setup in the IOCB. 194 */ 195 static struct lpfc_scsi_buf * 196 lpfc_new_scsi_buf(struct lpfc_vport *vport) 197 { 198 struct lpfc_hba *phba = vport->phba; 199 struct lpfc_scsi_buf *psb; 200 struct ulp_bde64 *bpl; 201 IOCB_t *iocb; 202 dma_addr_t pdma_phys; 203 uint16_t iotag; 204 205 psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 206 if (!psb) 207 return NULL; 208 memset(psb, 0, sizeof (struct lpfc_scsi_buf)); 209 210 /* 211 * Get memory from the pci pool to map the virt space to pci bus space 212 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd, 213 * struct fcp_rsp and the number of bde's necessary to support the 214 * sg_tablesize. 215 */ 216 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL, 217 &psb->dma_handle); 218 if (!psb->data) { 219 kfree(psb); 220 return NULL; 221 } 222 223 /* Initialize virtual ptrs to dma_buf region. */ 224 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 225 226 /* Allocate iotag for psb->cur_iocbq. */ 227 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 228 if (iotag == 0) { 229 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 230 psb->data, psb->dma_handle); 231 kfree (psb); 232 return NULL; 233 } 234 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 235 236 psb->fcp_cmnd = psb->data; 237 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 238 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + 239 sizeof(struct fcp_rsp); 240 241 /* Initialize local short-hand pointers. */ 242 bpl = psb->fcp_bpl; 243 pdma_phys = psb->dma_handle; 244 245 /* 246 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg 247 * list bdes. Initialize the first two and leave the rest for 248 * queuecommand. 249 */ 250 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); 251 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); 252 bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd); 253 bpl->tus.f.bdeFlags = BUFF_USE_CMND; 254 bpl->tus.w = le32_to_cpu(bpl->tus.w); 255 bpl++; 256 257 /* Setup the physical region for the FCP RSP */ 258 pdma_phys += sizeof (struct fcp_cmnd); 259 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); 260 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); 261 bpl->tus.f.bdeSize = sizeof (struct fcp_rsp); 262 bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV); 263 bpl->tus.w = le32_to_cpu(bpl->tus.w); 264 265 /* 266 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, 267 * initialize it with all known data now. 268 */ 269 pdma_phys += (sizeof (struct fcp_rsp)); 270 iocb = &psb->cur_iocbq.iocb; 271 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 272 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys); 273 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys); 274 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 275 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL; 276 iocb->ulpBdeCount = 1; 277 iocb->ulpClass = CLASS3; 278 279 return psb; 280 } 281 282 static struct lpfc_scsi_buf* 283 lpfc_get_scsi_buf(struct lpfc_hba * phba) 284 { 285 struct lpfc_scsi_buf * lpfc_cmd = NULL; 286 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; 287 unsigned long iflag = 0; 288 289 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 290 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 291 if (lpfc_cmd) { 292 lpfc_cmd->seg_cnt = 0; 293 lpfc_cmd->nonsg_phys = 0; 294 } 295 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 296 return lpfc_cmd; 297 } 298 299 static void 300 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 301 { 302 unsigned long iflag = 0; 303 304 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 305 psb->pCmd = NULL; 306 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 307 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 308 } 309 310 static int 311 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 312 { 313 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 314 struct scatterlist *sgel = NULL; 315 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 316 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 317 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 318 dma_addr_t physaddr; 319 uint32_t i, num_bde = 0; 320 int nseg, datadir = scsi_cmnd->sc_data_direction; 321 322 /* 323 * There are three possibilities here - use scatter-gather segment, use 324 * the single mapping, or neither. Start the lpfc command prep by 325 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 326 * data bde entry. 327 */ 328 bpl += 2; 329 if (scsi_sg_count(scsi_cmnd)) { 330 /* 331 * The driver stores the segment count returned from pci_map_sg 332 * because this a count of dma-mappings used to map the use_sg 333 * pages. They are not guaranteed to be the same for those 334 * architectures that implement an IOMMU. 335 */ 336 337 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), 338 scsi_sg_count(scsi_cmnd), datadir); 339 if (unlikely(!nseg)) 340 return 1; 341 342 lpfc_cmd->seg_cnt = nseg; 343 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 344 printk(KERN_ERR "%s: Too many sg segments from " 345 "dma_map_sg. Config %d, seg_cnt %d", 346 __FUNCTION__, phba->cfg_sg_seg_cnt, 347 lpfc_cmd->seg_cnt); 348 scsi_dma_unmap(scsi_cmnd); 349 return 1; 350 } 351 352 /* 353 * The driver established a maximum scatter-gather segment count 354 * during probe that limits the number of sg elements in any 355 * single scsi command. Just run through the seg_cnt and format 356 * the bde's. 357 */ 358 scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) { 359 physaddr = sg_dma_address(sgel); 360 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 361 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 362 bpl->tus.f.bdeSize = sg_dma_len(sgel); 363 if (datadir == DMA_TO_DEVICE) 364 bpl->tus.f.bdeFlags = 0; 365 else 366 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 367 bpl->tus.w = le32_to_cpu(bpl->tus.w); 368 bpl++; 369 num_bde++; 370 } 371 } 372 373 /* 374 * Finish initializing those IOCB fields that are dependent on the 375 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly 376 * reinitialized since all iocb memory resources are used many times 377 * for transmit, receive, and continuation bpl's. 378 */ 379 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 380 iocb_cmd->un.fcpi64.bdl.bdeSize += 381 (num_bde * sizeof (struct ulp_bde64)); 382 iocb_cmd->ulpBdeCount = 1; 383 iocb_cmd->ulpLe = 1; 384 fcp_cmnd->fcpDl = be32_to_cpu(scsi_bufflen(scsi_cmnd)); 385 return 0; 386 } 387 388 static void 389 lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 390 { 391 /* 392 * There are only two special cases to consider. (1) the scsi command 393 * requested scatter-gather usage or (2) the scsi command allocated 394 * a request buffer, but did not request use_sg. There is a third 395 * case, but it does not require resource deallocation. 396 */ 397 if (psb->seg_cnt > 0) 398 scsi_dma_unmap(psb->pCmd); 399 } 400 401 static void 402 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 403 struct lpfc_iocbq *rsp_iocb) 404 { 405 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 406 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 407 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 408 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; 409 uint32_t resp_info = fcprsp->rspStatus2; 410 uint32_t scsi_status = fcprsp->rspStatus3; 411 uint32_t *lp; 412 uint32_t host_status = DID_OK; 413 uint32_t rsplen = 0; 414 uint32_t logit = LOG_FCP | LOG_FCP_ERROR; 415 416 /* 417 * If this is a task management command, there is no 418 * scsi packet associated with this lpfc_cmd. The driver 419 * consumes it. 420 */ 421 if (fcpcmd->fcpCntl2) { 422 scsi_status = 0; 423 goto out; 424 } 425 426 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 427 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); 428 if (snslen > SCSI_SENSE_BUFFERSIZE) 429 snslen = SCSI_SENSE_BUFFERSIZE; 430 431 if (resp_info & RSP_LEN_VALID) 432 rsplen = be32_to_cpu(fcprsp->rspRspLen); 433 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); 434 } 435 lp = (uint32_t *)cmnd->sense_buffer; 436 437 if (!scsi_status && (resp_info & RESID_UNDER)) 438 logit = LOG_FCP; 439 440 lpfc_printf_vlog(vport, KERN_WARNING, logit, 441 "0730 FCP command x%x failed: x%x SNS x%x x%x " 442 "Data: x%x x%x x%x x%x x%x\n", 443 cmnd->cmnd[0], scsi_status, 444 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 445 be32_to_cpu(fcprsp->rspResId), 446 be32_to_cpu(fcprsp->rspSnsLen), 447 be32_to_cpu(fcprsp->rspRspLen), 448 fcprsp->rspInfo3); 449 450 if (resp_info & RSP_LEN_VALID) { 451 rsplen = be32_to_cpu(fcprsp->rspRspLen); 452 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) || 453 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) { 454 host_status = DID_ERROR; 455 goto out; 456 } 457 } 458 459 scsi_set_resid(cmnd, 0); 460 if (resp_info & RESID_UNDER) { 461 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); 462 463 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 464 "0716 FCP Read Underrun, expected %d, " 465 "residual %d Data: x%x x%x x%x\n", 466 be32_to_cpu(fcpcmd->fcpDl), 467 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], 468 cmnd->underflow); 469 470 /* 471 * If there is an under run check if under run reported by 472 * storage array is same as the under run reported by HBA. 473 * If this is not same, there is a dropped frame. 474 */ 475 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 476 fcpi_parm && 477 (scsi_get_resid(cmnd) != fcpi_parm)) { 478 lpfc_printf_vlog(vport, KERN_WARNING, 479 LOG_FCP | LOG_FCP_ERROR, 480 "0735 FCP Read Check Error " 481 "and Underrun Data: x%x x%x x%x x%x\n", 482 be32_to_cpu(fcpcmd->fcpDl), 483 scsi_get_resid(cmnd), fcpi_parm, 484 cmnd->cmnd[0]); 485 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 486 host_status = DID_ERROR; 487 } 488 /* 489 * The cmnd->underflow is the minimum number of bytes that must 490 * be transfered for this command. Provided a sense condition 491 * is not present, make sure the actual amount transferred is at 492 * least the underflow value or fail. 493 */ 494 if (!(resp_info & SNS_LEN_VALID) && 495 (scsi_status == SAM_STAT_GOOD) && 496 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) 497 < cmnd->underflow)) { 498 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 499 "0717 FCP command x%x residual " 500 "underrun converted to error " 501 "Data: x%x x%x x%x\n", 502 cmnd->cmnd[0], scsi_bufflen(cmnd), 503 scsi_get_resid(cmnd), cmnd->underflow); 504 host_status = DID_ERROR; 505 } 506 } else if (resp_info & RESID_OVER) { 507 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 508 "0720 FCP command x%x residual overrun error. " 509 "Data: x%x x%x \n", cmnd->cmnd[0], 510 scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 511 host_status = DID_ERROR; 512 513 /* 514 * Check SLI validation that all the transfer was actually done 515 * (fcpi_parm should be zero). Apply check only to reads. 516 */ 517 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 518 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 519 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 520 "0734 FCP Read Check Error Data: " 521 "x%x x%x x%x x%x\n", 522 be32_to_cpu(fcpcmd->fcpDl), 523 be32_to_cpu(fcprsp->rspResId), 524 fcpi_parm, cmnd->cmnd[0]); 525 host_status = DID_ERROR; 526 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 527 } 528 529 out: 530 cmnd->result = ScsiResult(host_status, scsi_status); 531 } 532 533 static void 534 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 535 struct lpfc_iocbq *pIocbOut) 536 { 537 struct lpfc_scsi_buf *lpfc_cmd = 538 (struct lpfc_scsi_buf *) pIocbIn->context1; 539 struct lpfc_vport *vport = pIocbIn->vport; 540 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 541 struct lpfc_nodelist *pnode = rdata->pnode; 542 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 543 int result; 544 struct scsi_device *sdev, *tmp_sdev; 545 int depth = 0; 546 547 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 548 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 549 550 if (lpfc_cmd->status) { 551 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 552 (lpfc_cmd->result & IOERR_DRVR_MASK)) 553 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 554 else if (lpfc_cmd->status >= IOSTAT_CNT) 555 lpfc_cmd->status = IOSTAT_DEFAULT; 556 557 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 558 "0729 FCP cmd x%x failed <%d/%d> " 559 "status: x%x result: x%x Data: x%x x%x\n", 560 cmd->cmnd[0], 561 cmd->device ? cmd->device->id : 0xffff, 562 cmd->device ? cmd->device->lun : 0xffff, 563 lpfc_cmd->status, lpfc_cmd->result, 564 pIocbOut->iocb.ulpContext, 565 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 566 567 switch (lpfc_cmd->status) { 568 case IOSTAT_FCP_RSP_ERROR: 569 /* Call FCP RSP handler to determine result */ 570 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut); 571 break; 572 case IOSTAT_NPORT_BSY: 573 case IOSTAT_FABRIC_BSY: 574 cmd->result = ScsiResult(DID_BUS_BUSY, 0); 575 break; 576 case IOSTAT_LOCAL_REJECT: 577 if (lpfc_cmd->result == RJT_UNAVAIL_PERM || 578 lpfc_cmd->result == IOERR_NO_RESOURCES || 579 lpfc_cmd->result == RJT_LOGIN_REQUIRED) { 580 cmd->result = ScsiResult(DID_REQUEUE, 0); 581 break; 582 } /* else: fall through */ 583 default: 584 cmd->result = ScsiResult(DID_ERROR, 0); 585 break; 586 } 587 588 if ((pnode == NULL ) 589 || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 590 cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY); 591 } else { 592 cmd->result = ScsiResult(DID_OK, 0); 593 } 594 595 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 596 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 597 598 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 599 "0710 Iodone <%d/%d> cmd %p, error " 600 "x%x SNS x%x x%x Data: x%x x%x\n", 601 cmd->device->id, cmd->device->lun, cmd, 602 cmd->result, *lp, *(lp + 3), cmd->retries, 603 scsi_get_resid(cmd)); 604 } 605 606 result = cmd->result; 607 sdev = cmd->device; 608 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 609 cmd->scsi_done(cmd); 610 611 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 612 lpfc_release_scsi_buf(phba, lpfc_cmd); 613 return; 614 } 615 616 617 if (!result) 618 lpfc_rampup_queue_depth(vport, sdev); 619 620 if (!result && pnode != NULL && 621 ((jiffies - pnode->last_ramp_up_time) > 622 LPFC_Q_RAMP_UP_INTERVAL * HZ) && 623 ((jiffies - pnode->last_q_full_time) > 624 LPFC_Q_RAMP_UP_INTERVAL * HZ) && 625 (vport->cfg_lun_queue_depth > sdev->queue_depth)) { 626 shost_for_each_device(tmp_sdev, sdev->host) { 627 if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){ 628 if (tmp_sdev->id != sdev->id) 629 continue; 630 if (tmp_sdev->ordered_tags) 631 scsi_adjust_queue_depth(tmp_sdev, 632 MSG_ORDERED_TAG, 633 tmp_sdev->queue_depth+1); 634 else 635 scsi_adjust_queue_depth(tmp_sdev, 636 MSG_SIMPLE_TAG, 637 tmp_sdev->queue_depth+1); 638 639 pnode->last_ramp_up_time = jiffies; 640 } 641 } 642 } 643 644 /* 645 * Check for queue full. If the lun is reporting queue full, then 646 * back off the lun queue depth to prevent target overloads. 647 */ 648 if (result == SAM_STAT_TASK_SET_FULL && pnode != NULL) { 649 pnode->last_q_full_time = jiffies; 650 651 shost_for_each_device(tmp_sdev, sdev->host) { 652 if (tmp_sdev->id != sdev->id) 653 continue; 654 depth = scsi_track_queue_full(tmp_sdev, 655 tmp_sdev->queue_depth - 1); 656 } 657 /* 658 * The queue depth cannot be lowered any more. 659 * Modify the returned error code to store 660 * the final depth value set by 661 * scsi_track_queue_full. 662 */ 663 if (depth == -1) 664 depth = sdev->host->cmd_per_lun; 665 666 if (depth) { 667 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 668 "0711 detected queue full - lun queue " 669 "depth adjusted to %d.\n", depth); 670 } 671 } 672 673 lpfc_release_scsi_buf(phba, lpfc_cmd); 674 } 675 676 static void 677 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 678 struct lpfc_nodelist *pnode) 679 { 680 struct lpfc_hba *phba = vport->phba; 681 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 682 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 683 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 684 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); 685 int datadir = scsi_cmnd->sc_data_direction; 686 687 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 688 /* clear task management bits */ 689 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; 690 691 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 692 &lpfc_cmd->fcp_cmnd->fcp_lun); 693 694 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16); 695 696 if (scsi_cmnd->device->tagged_supported) { 697 switch (scsi_cmnd->tag) { 698 case HEAD_OF_QUEUE_TAG: 699 fcp_cmnd->fcpCntl1 = HEAD_OF_Q; 700 break; 701 case ORDERED_QUEUE_TAG: 702 fcp_cmnd->fcpCntl1 = ORDERED_Q; 703 break; 704 default: 705 fcp_cmnd->fcpCntl1 = SIMPLE_Q; 706 break; 707 } 708 } else 709 fcp_cmnd->fcpCntl1 = 0; 710 711 /* 712 * There are three possibilities here - use scatter-gather segment, use 713 * the single mapping, or neither. Start the lpfc command prep by 714 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 715 * data bde entry. 716 */ 717 if (scsi_sg_count(scsi_cmnd)) { 718 if (datadir == DMA_TO_DEVICE) { 719 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 720 iocb_cmd->un.fcpi.fcpi_parm = 0; 721 iocb_cmd->ulpPU = 0; 722 fcp_cmnd->fcpCntl3 = WRITE_DATA; 723 phba->fc4OutputRequests++; 724 } else { 725 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 726 iocb_cmd->ulpPU = PARM_READ_CHECK; 727 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 728 fcp_cmnd->fcpCntl3 = READ_DATA; 729 phba->fc4InputRequests++; 730 } 731 } else { 732 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; 733 iocb_cmd->un.fcpi.fcpi_parm = 0; 734 iocb_cmd->ulpPU = 0; 735 fcp_cmnd->fcpCntl3 = 0; 736 phba->fc4ControlRequests++; 737 } 738 739 /* 740 * Finish initializing those IOCB fields that are independent 741 * of the scsi_cmnd request_buffer 742 */ 743 piocbq->iocb.ulpContext = pnode->nlp_rpi; 744 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 745 piocbq->iocb.ulpFCP2Rcvy = 1; 746 747 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); 748 piocbq->context1 = lpfc_cmd; 749 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 750 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout; 751 piocbq->vport = vport; 752 } 753 754 static int 755 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, 756 struct lpfc_scsi_buf *lpfc_cmd, 757 unsigned int lun, 758 uint8_t task_mgmt_cmd) 759 { 760 struct lpfc_iocbq *piocbq; 761 IOCB_t *piocb; 762 struct fcp_cmnd *fcp_cmnd; 763 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 764 struct lpfc_nodelist *ndlp = rdata->pnode; 765 766 if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 767 return 0; 768 } 769 770 piocbq = &(lpfc_cmd->cur_iocbq); 771 piocbq->vport = vport; 772 773 piocb = &piocbq->iocb; 774 775 fcp_cmnd = lpfc_cmd->fcp_cmnd; 776 int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun); 777 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 778 779 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 780 781 piocb->ulpContext = ndlp->nlp_rpi; 782 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 783 piocb->ulpFCP2Rcvy = 1; 784 } 785 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); 786 787 /* ulpTimeout is only one byte */ 788 if (lpfc_cmd->timeout > 0xff) { 789 /* 790 * Do not timeout the command at the firmware level. 791 * The driver will provide the timeout mechanism. 792 */ 793 piocb->ulpTimeout = 0; 794 } else { 795 piocb->ulpTimeout = lpfc_cmd->timeout; 796 } 797 798 return 1; 799 } 800 801 static void 802 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, 803 struct lpfc_iocbq *cmdiocbq, 804 struct lpfc_iocbq *rspiocbq) 805 { 806 struct lpfc_scsi_buf *lpfc_cmd = 807 (struct lpfc_scsi_buf *) cmdiocbq->context1; 808 if (lpfc_cmd) 809 lpfc_release_scsi_buf(phba, lpfc_cmd); 810 return; 811 } 812 813 static int 814 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, 815 unsigned tgt_id, unsigned int lun, 816 struct lpfc_rport_data *rdata) 817 { 818 struct lpfc_hba *phba = vport->phba; 819 struct lpfc_iocbq *iocbq; 820 struct lpfc_iocbq *iocbqrsp; 821 int ret; 822 823 if (!rdata->pnode) 824 return FAILED; 825 826 lpfc_cmd->rdata = rdata; 827 ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, 828 FCP_TARGET_RESET); 829 if (!ret) 830 return FAILED; 831 832 iocbq = &lpfc_cmd->cur_iocbq; 833 iocbqrsp = lpfc_sli_get_iocbq(phba); 834 835 if (!iocbqrsp) 836 return FAILED; 837 838 /* Issue Target Reset to TGT <num> */ 839 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 840 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n", 841 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); 842 ret = lpfc_sli_issue_iocb_wait(phba, 843 &phba->sli.ring[phba->sli.fcp_ring], 844 iocbq, iocbqrsp, lpfc_cmd->timeout); 845 if (ret != IOCB_SUCCESS) { 846 if (ret == IOCB_TIMEDOUT) 847 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 848 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 849 } else { 850 ret = SUCCESS; 851 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4]; 852 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus; 853 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 854 (lpfc_cmd->result & IOERR_DRVR_MASK)) 855 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 856 } 857 858 lpfc_sli_release_iocbq(phba, iocbqrsp); 859 return ret; 860 } 861 862 const char * 863 lpfc_info(struct Scsi_Host *host) 864 { 865 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; 866 struct lpfc_hba *phba = vport->phba; 867 int len; 868 static char lpfcinfobuf[384]; 869 870 memset(lpfcinfobuf,0,384); 871 if (phba && phba->pcidev){ 872 strncpy(lpfcinfobuf, phba->ModelDesc, 256); 873 len = strlen(lpfcinfobuf); 874 snprintf(lpfcinfobuf + len, 875 384-len, 876 " on PCI bus %02x device %02x irq %d", 877 phba->pcidev->bus->number, 878 phba->pcidev->devfn, 879 phba->pcidev->irq); 880 len = strlen(lpfcinfobuf); 881 if (phba->Port[0]) { 882 snprintf(lpfcinfobuf + len, 883 384-len, 884 " port %s", 885 phba->Port); 886 } 887 } 888 return lpfcinfobuf; 889 } 890 891 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) 892 { 893 unsigned long poll_tmo_expires = 894 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); 895 896 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt) 897 mod_timer(&phba->fcp_poll_timer, 898 poll_tmo_expires); 899 } 900 901 void lpfc_poll_start_timer(struct lpfc_hba * phba) 902 { 903 lpfc_poll_rearm_timer(phba); 904 } 905 906 void lpfc_poll_timeout(unsigned long ptr) 907 { 908 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 909 910 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 911 lpfc_sli_poll_fcp_ring (phba); 912 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 913 lpfc_poll_rearm_timer(phba); 914 } 915 } 916 917 static int 918 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 919 { 920 struct Scsi_Host *shost = cmnd->device->host; 921 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 922 struct lpfc_hba *phba = vport->phba; 923 struct lpfc_sli *psli = &phba->sli; 924 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 925 struct lpfc_nodelist *ndlp = rdata->pnode; 926 struct lpfc_scsi_buf *lpfc_cmd; 927 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 928 int err; 929 930 err = fc_remote_port_chkready(rport); 931 if (err) { 932 cmnd->result = err; 933 goto out_fail_command; 934 } 935 936 /* 937 * Catch race where our node has transitioned, but the 938 * transport is still transitioning. 939 */ 940 if (!ndlp) { 941 cmnd->result = ScsiResult(DID_BUS_BUSY, 0); 942 goto out_fail_command; 943 } 944 lpfc_cmd = lpfc_get_scsi_buf(phba); 945 if (lpfc_cmd == NULL) { 946 lpfc_adjust_queue_depth(phba); 947 948 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 949 "0707 driver's buffer pool is empty, " 950 "IO busied\n"); 951 goto out_host_busy; 952 } 953 954 /* 955 * Store the midlayer's command structure for the completion phase 956 * and complete the command initialization. 957 */ 958 lpfc_cmd->pCmd = cmnd; 959 lpfc_cmd->rdata = rdata; 960 lpfc_cmd->timeout = 0; 961 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 962 cmnd->scsi_done = done; 963 964 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 965 if (err) 966 goto out_host_busy_free_buf; 967 968 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 969 970 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 971 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 972 if (err) 973 goto out_host_busy_free_buf; 974 975 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 976 lpfc_sli_poll_fcp_ring(phba); 977 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 978 lpfc_poll_rearm_timer(phba); 979 } 980 981 return 0; 982 983 out_host_busy_free_buf: 984 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 985 lpfc_release_scsi_buf(phba, lpfc_cmd); 986 out_host_busy: 987 return SCSI_MLQUEUE_HOST_BUSY; 988 989 out_fail_command: 990 done(cmnd); 991 return 0; 992 } 993 994 static void 995 lpfc_block_error_handler(struct scsi_cmnd *cmnd) 996 { 997 struct Scsi_Host *shost = cmnd->device->host; 998 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 999 1000 spin_lock_irq(shost->host_lock); 1001 while (rport->port_state == FC_PORTSTATE_BLOCKED) { 1002 spin_unlock_irq(shost->host_lock); 1003 msleep(1000); 1004 spin_lock_irq(shost->host_lock); 1005 } 1006 spin_unlock_irq(shost->host_lock); 1007 return; 1008 } 1009 1010 static int 1011 lpfc_abort_handler(struct scsi_cmnd *cmnd) 1012 { 1013 struct Scsi_Host *shost = cmnd->device->host; 1014 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1015 struct lpfc_hba *phba = vport->phba; 1016 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; 1017 struct lpfc_iocbq *iocb; 1018 struct lpfc_iocbq *abtsiocb; 1019 struct lpfc_scsi_buf *lpfc_cmd; 1020 IOCB_t *cmd, *icmd; 1021 unsigned int loop_count = 0; 1022 int ret = SUCCESS; 1023 1024 lpfc_block_error_handler(cmnd); 1025 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 1026 BUG_ON(!lpfc_cmd); 1027 1028 /* 1029 * If pCmd field of the corresponding lpfc_scsi_buf structure 1030 * points to a different SCSI command, then the driver has 1031 * already completed this command, but the midlayer did not 1032 * see the completion before the eh fired. Just return 1033 * SUCCESS. 1034 */ 1035 iocb = &lpfc_cmd->cur_iocbq; 1036 if (lpfc_cmd->pCmd != cmnd) 1037 goto out; 1038 1039 BUG_ON(iocb->context1 != lpfc_cmd); 1040 1041 abtsiocb = lpfc_sli_get_iocbq(phba); 1042 if (abtsiocb == NULL) { 1043 ret = FAILED; 1044 goto out; 1045 } 1046 1047 /* 1048 * The scsi command can not be in txq and it is in flight because the 1049 * pCmd is still pointig at the SCSI command we have to abort. There 1050 * is no need to search the txcmplq. Just send an abort to the FW. 1051 */ 1052 1053 cmd = &iocb->iocb; 1054 icmd = &abtsiocb->iocb; 1055 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 1056 icmd->un.acxri.abortContextTag = cmd->ulpContext; 1057 icmd->un.acxri.abortIoTag = cmd->ulpIoTag; 1058 1059 icmd->ulpLe = 1; 1060 icmd->ulpClass = cmd->ulpClass; 1061 if (lpfc_is_link_up(phba)) 1062 icmd->ulpCommand = CMD_ABORT_XRI_CN; 1063 else 1064 icmd->ulpCommand = CMD_CLOSE_XRI_CN; 1065 1066 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 1067 abtsiocb->vport = vport; 1068 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { 1069 lpfc_sli_release_iocbq(phba, abtsiocb); 1070 ret = FAILED; 1071 goto out; 1072 } 1073 1074 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 1075 lpfc_sli_poll_fcp_ring (phba); 1076 1077 /* Wait for abort to complete */ 1078 while (lpfc_cmd->pCmd == cmnd) 1079 { 1080 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 1081 lpfc_sli_poll_fcp_ring (phba); 1082 1083 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT * HZ); 1084 if (++loop_count 1085 > (2 * vport->cfg_devloss_tmo)/LPFC_ABORT_WAIT) 1086 break; 1087 } 1088 1089 if (lpfc_cmd->pCmd == cmnd) { 1090 ret = FAILED; 1091 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1092 "0748 abort handler timed out waiting " 1093 "for abort to complete: ret %#x, ID %d, " 1094 "LUN %d, snum %#lx\n", 1095 ret, cmnd->device->id, cmnd->device->lun, 1096 cmnd->serial_number); 1097 } 1098 1099 out: 1100 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 1101 "0749 SCSI Layer I/O Abort Request Status x%x ID %d " 1102 "LUN %d snum %#lx\n", ret, cmnd->device->id, 1103 cmnd->device->lun, cmnd->serial_number); 1104 return ret; 1105 } 1106 1107 static int 1108 lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 1109 { 1110 struct Scsi_Host *shost = cmnd->device->host; 1111 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1112 struct lpfc_hba *phba = vport->phba; 1113 struct lpfc_scsi_buf *lpfc_cmd; 1114 struct lpfc_iocbq *iocbq, *iocbqrsp; 1115 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 1116 struct lpfc_nodelist *pnode = rdata->pnode; 1117 uint32_t cmd_result = 0, cmd_status = 0; 1118 int ret = FAILED; 1119 int iocb_status = IOCB_SUCCESS; 1120 int cnt, loopcnt; 1121 1122 lpfc_block_error_handler(cmnd); 1123 loopcnt = 0; 1124 /* 1125 * If target is not in a MAPPED state, delay the reset until 1126 * target is rediscovered or devloss timeout expires. 1127 */ 1128 while (1) { 1129 if (!pnode) 1130 goto out; 1131 1132 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { 1133 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 1134 loopcnt++; 1135 rdata = cmnd->device->hostdata; 1136 if (!rdata || 1137 (loopcnt > ((vport->cfg_devloss_tmo * 2) + 1))){ 1138 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1139 "0721 LUN Reset rport " 1140 "failure: cnt x%x rdata x%p\n", 1141 loopcnt, rdata); 1142 goto out; 1143 } 1144 pnode = rdata->pnode; 1145 if (!pnode) 1146 goto out; 1147 } 1148 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 1149 break; 1150 } 1151 1152 lpfc_cmd = lpfc_get_scsi_buf(phba); 1153 if (lpfc_cmd == NULL) 1154 goto out; 1155 1156 lpfc_cmd->timeout = 60; 1157 lpfc_cmd->rdata = rdata; 1158 1159 ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun, 1160 FCP_TARGET_RESET); 1161 if (!ret) 1162 goto out_free_scsi_buf; 1163 1164 iocbq = &lpfc_cmd->cur_iocbq; 1165 1166 /* get a buffer for this IOCB command response */ 1167 iocbqrsp = lpfc_sli_get_iocbq(phba); 1168 if (iocbqrsp == NULL) 1169 goto out_free_scsi_buf; 1170 1171 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 1172 "0703 Issue target reset to TGT %d LUN %d " 1173 "rpi x%x nlp_flag x%x\n", cmnd->device->id, 1174 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 1175 iocb_status = lpfc_sli_issue_iocb_wait(phba, 1176 &phba->sli.ring[phba->sli.fcp_ring], 1177 iocbq, iocbqrsp, lpfc_cmd->timeout); 1178 1179 if (iocb_status == IOCB_TIMEDOUT) 1180 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 1181 1182 if (iocb_status == IOCB_SUCCESS) 1183 ret = SUCCESS; 1184 else 1185 ret = iocb_status; 1186 1187 cmd_result = iocbqrsp->iocb.un.ulpWord[4]; 1188 cmd_status = iocbqrsp->iocb.ulpStatus; 1189 1190 lpfc_sli_release_iocbq(phba, iocbqrsp); 1191 1192 /* 1193 * All outstanding txcmplq I/Os should have been aborted by the device. 1194 * Unfortunately, some targets do not abide by this forcing the driver 1195 * to double check. 1196 */ 1197 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun, 1198 LPFC_CTX_LUN); 1199 if (cnt) 1200 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 1201 cmnd->device->id, cmnd->device->lun, 1202 LPFC_CTX_LUN); 1203 loopcnt = 0; 1204 while(cnt) { 1205 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1206 1207 if (++loopcnt 1208 > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT) 1209 break; 1210 1211 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, 1212 cmnd->device->lun, LPFC_CTX_LUN); 1213 } 1214 1215 if (cnt) { 1216 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1217 "0719 device reset I/O flush failure: " 1218 "cnt x%x\n", cnt); 1219 ret = FAILED; 1220 } 1221 1222 out_free_scsi_buf: 1223 if (iocb_status != IOCB_TIMEDOUT) { 1224 lpfc_release_scsi_buf(phba, lpfc_cmd); 1225 } 1226 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1227 "0713 SCSI layer issued device reset (%d, %d) " 1228 "return x%x status x%x result x%x\n", 1229 cmnd->device->id, cmnd->device->lun, ret, 1230 cmd_status, cmd_result); 1231 out: 1232 return ret; 1233 } 1234 1235 static int 1236 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 1237 { 1238 struct Scsi_Host *shost = cmnd->device->host; 1239 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1240 struct lpfc_hba *phba = vport->phba; 1241 struct lpfc_nodelist *ndlp = NULL; 1242 int match; 1243 int ret = FAILED, i, err_count = 0; 1244 int cnt, loopcnt; 1245 struct lpfc_scsi_buf * lpfc_cmd; 1246 1247 lpfc_block_error_handler(cmnd); 1248 1249 lpfc_cmd = lpfc_get_scsi_buf(phba); 1250 if (lpfc_cmd == NULL) 1251 goto out; 1252 1253 /* The lpfc_cmd storage is reused. Set all loop invariants. */ 1254 lpfc_cmd->timeout = 60; 1255 1256 /* 1257 * Since the driver manages a single bus device, reset all 1258 * targets known to the driver. Should any target reset 1259 * fail, this routine returns failure to the midlayer. 1260 */ 1261 for (i = 0; i < LPFC_MAX_TARGET; i++) { 1262 /* Search for mapped node by target ID */ 1263 match = 0; 1264 spin_lock_irq(shost->host_lock); 1265 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 1266 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 1267 i == ndlp->nlp_sid && 1268 ndlp->rport) { 1269 match = 1; 1270 break; 1271 } 1272 } 1273 spin_unlock_irq(shost->host_lock); 1274 if (!match) 1275 continue; 1276 1277 ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i, 1278 cmnd->device->lun, 1279 ndlp->rport->dd_data); 1280 if (ret != SUCCESS) { 1281 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1282 "0700 Bus Reset on target %d failed\n", 1283 i); 1284 err_count++; 1285 break; 1286 } 1287 } 1288 1289 if (ret != IOCB_TIMEDOUT) 1290 lpfc_release_scsi_buf(phba, lpfc_cmd); 1291 1292 if (err_count == 0) 1293 ret = SUCCESS; 1294 else 1295 ret = FAILED; 1296 1297 /* 1298 * All outstanding txcmplq I/Os should have been aborted by 1299 * the targets. Unfortunately, some targets do not abide by 1300 * this forcing the driver to double check. 1301 */ 1302 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST); 1303 if (cnt) 1304 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 1305 0, 0, LPFC_CTX_HOST); 1306 loopcnt = 0; 1307 while(cnt) { 1308 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1309 1310 if (++loopcnt 1311 > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT) 1312 break; 1313 1314 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST); 1315 } 1316 1317 if (cnt) { 1318 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1319 "0715 Bus Reset I/O flush failure: " 1320 "cnt x%x left x%x\n", cnt, i); 1321 ret = FAILED; 1322 } 1323 1324 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1325 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); 1326 out: 1327 return ret; 1328 } 1329 1330 static int 1331 lpfc_slave_alloc(struct scsi_device *sdev) 1332 { 1333 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 1334 struct lpfc_hba *phba = vport->phba; 1335 struct lpfc_scsi_buf *scsi_buf = NULL; 1336 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1337 uint32_t total = 0, i; 1338 uint32_t num_to_alloc = 0; 1339 unsigned long flags; 1340 1341 if (!rport || fc_remote_port_chkready(rport)) 1342 return -ENXIO; 1343 1344 sdev->hostdata = rport->dd_data; 1345 1346 /* 1347 * Populate the cmds_per_lun count scsi_bufs into this host's globally 1348 * available list of scsi buffers. Don't allocate more than the 1349 * HBA limit conveyed to the midlayer via the host structure. The 1350 * formula accounts for the lun_queue_depth + error handlers + 1 1351 * extra. This list of scsi bufs exists for the lifetime of the driver. 1352 */ 1353 total = phba->total_scsi_bufs; 1354 num_to_alloc = vport->cfg_lun_queue_depth + 2; 1355 1356 /* Allow some exchanges to be available always to complete discovery */ 1357 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 1358 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 1359 "0704 At limitation of %d preallocated " 1360 "command buffers\n", total); 1361 return 0; 1362 /* Allow some exchanges to be available always to complete discovery */ 1363 } else if (total + num_to_alloc > 1364 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 1365 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 1366 "0705 Allocation request of %d " 1367 "command buffers will exceed max of %d. " 1368 "Reducing allocation request to %d.\n", 1369 num_to_alloc, phba->cfg_hba_queue_depth, 1370 (phba->cfg_hba_queue_depth - total)); 1371 num_to_alloc = phba->cfg_hba_queue_depth - total; 1372 } 1373 1374 for (i = 0; i < num_to_alloc; i++) { 1375 scsi_buf = lpfc_new_scsi_buf(vport); 1376 if (!scsi_buf) { 1377 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1378 "0706 Failed to allocate " 1379 "command buffer\n"); 1380 break; 1381 } 1382 1383 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags); 1384 phba->total_scsi_bufs++; 1385 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list); 1386 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags); 1387 } 1388 return 0; 1389 } 1390 1391 static int 1392 lpfc_slave_configure(struct scsi_device *sdev) 1393 { 1394 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 1395 struct lpfc_hba *phba = vport->phba; 1396 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1397 1398 if (sdev->tagged_supported) 1399 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth); 1400 else 1401 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth); 1402 1403 /* 1404 * Initialize the fc transport attributes for the target 1405 * containing this scsi device. Also note that the driver's 1406 * target pointer is stored in the starget_data for the 1407 * driver's sysfs entry point functions. 1408 */ 1409 rport->dev_loss_tmo = vport->cfg_devloss_tmo; 1410 1411 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 1412 lpfc_sli_poll_fcp_ring(phba); 1413 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 1414 lpfc_poll_rearm_timer(phba); 1415 } 1416 1417 return 0; 1418 } 1419 1420 static void 1421 lpfc_slave_destroy(struct scsi_device *sdev) 1422 { 1423 sdev->hostdata = NULL; 1424 return; 1425 } 1426 1427 1428 struct scsi_host_template lpfc_template = { 1429 .module = THIS_MODULE, 1430 .name = LPFC_DRIVER_NAME, 1431 .info = lpfc_info, 1432 .queuecommand = lpfc_queuecommand, 1433 .eh_abort_handler = lpfc_abort_handler, 1434 .eh_device_reset_handler= lpfc_device_reset_handler, 1435 .eh_bus_reset_handler = lpfc_bus_reset_handler, 1436 .slave_alloc = lpfc_slave_alloc, 1437 .slave_configure = lpfc_slave_configure, 1438 .slave_destroy = lpfc_slave_destroy, 1439 .scan_finished = lpfc_scan_finished, 1440 .this_id = -1, 1441 .sg_tablesize = LPFC_SG_SEG_CNT, 1442 .cmd_per_lun = LPFC_CMD_PER_LUN, 1443 .use_clustering = ENABLE_CLUSTERING, 1444 .shost_attrs = lpfc_hba_attrs, 1445 .max_sectors = 0xFFFF, 1446 }; 1447 1448 struct scsi_host_template lpfc_vport_template = { 1449 .module = THIS_MODULE, 1450 .name = LPFC_DRIVER_NAME, 1451 .info = lpfc_info, 1452 .queuecommand = lpfc_queuecommand, 1453 .eh_abort_handler = lpfc_abort_handler, 1454 .eh_device_reset_handler= lpfc_device_reset_handler, 1455 .eh_bus_reset_handler = lpfc_bus_reset_handler, 1456 .slave_alloc = lpfc_slave_alloc, 1457 .slave_configure = lpfc_slave_configure, 1458 .slave_destroy = lpfc_slave_destroy, 1459 .scan_finished = lpfc_scan_finished, 1460 .this_id = -1, 1461 .sg_tablesize = LPFC_SG_SEG_CNT, 1462 .cmd_per_lun = LPFC_CMD_PER_LUN, 1463 .use_clustering = ENABLE_CLUSTERING, 1464 .shost_attrs = lpfc_vport_attrs, 1465 .max_sectors = 0xFFFF, 1466 }; 1467