1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2005 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/pci.h> 23 #include <linux/interrupt.h> 24 25 #include <scsi/scsi.h> 26 #include <scsi/scsi_device.h> 27 #include <scsi/scsi_host.h> 28 #include <scsi/scsi_tcq.h> 29 #include <scsi/scsi_transport_fc.h> 30 31 #include "lpfc_version.h" 32 #include "lpfc_hw.h" 33 #include "lpfc_sli.h" 34 #include "lpfc_disc.h" 35 #include "lpfc_scsi.h" 36 #include "lpfc.h" 37 #include "lpfc_logmsg.h" 38 #include "lpfc_crtn.h" 39 40 #define LPFC_RESET_WAIT 2 41 #define LPFC_ABORT_WAIT 2 42 43 44 /* 45 * This routine allocates a scsi buffer, which contains all the necessary 46 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 47 * contains information to build the IOCB. The DMAable region contains 48 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to 49 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL 50 * and the BPL BDE is setup in the IOCB. 51 */ 52 static struct lpfc_scsi_buf * 53 lpfc_get_scsi_buf(struct lpfc_hba * phba) 54 { 55 struct lpfc_scsi_buf *psb; 56 struct ulp_bde64 *bpl; 57 IOCB_t *iocb; 58 dma_addr_t pdma_phys; 59 60 psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 61 if (!psb) 62 return NULL; 63 memset(psb, 0, sizeof (struct lpfc_scsi_buf)); 64 psb->scsi_hba = phba; 65 66 /* 67 * Get memory from the pci pool to map the virt space to pci bus space 68 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd, 69 * struct fcp_rsp and the number of bde's necessary to support the 70 * sg_tablesize. 71 */ 72 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL, 73 &psb->dma_handle); 74 if (!psb->data) { 75 kfree(psb); 76 return NULL; 77 } 78 79 /* Initialize virtual ptrs to dma_buf region. */ 80 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 81 82 psb->fcp_cmnd = psb->data; 83 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 84 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + 85 sizeof(struct fcp_rsp); 86 87 /* Initialize local short-hand pointers. */ 88 bpl = psb->fcp_bpl; 89 pdma_phys = psb->dma_handle; 90 91 /* 92 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg 93 * list bdes. Initialize the first two and leave the rest for 94 * queuecommand. 95 */ 96 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); 97 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); 98 bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd); 99 bpl->tus.f.bdeFlags = BUFF_USE_CMND; 100 bpl->tus.w = le32_to_cpu(bpl->tus.w); 101 bpl++; 102 103 /* Setup the physical region for the FCP RSP */ 104 pdma_phys += sizeof (struct fcp_cmnd); 105 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); 106 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); 107 bpl->tus.f.bdeSize = sizeof (struct fcp_rsp); 108 bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV); 109 bpl->tus.w = le32_to_cpu(bpl->tus.w); 110 111 /* 112 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, 113 * initialize it with all known data now. 114 */ 115 pdma_phys += (sizeof (struct fcp_rsp)); 116 iocb = &psb->cur_iocbq.iocb; 117 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 118 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys); 119 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys); 120 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 121 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL; 122 iocb->ulpBdeCount = 1; 123 iocb->ulpClass = CLASS3; 124 125 return psb; 126 } 127 128 static void 129 lpfc_free_scsi_buf(struct lpfc_scsi_buf * psb) 130 { 131 struct lpfc_hba *phba = psb->scsi_hba; 132 133 /* 134 * There are only two special cases to consider. (1) the scsi command 135 * requested scatter-gather usage or (2) the scsi command allocated 136 * a request buffer, but did not request use_sg. There is a third 137 * case, but it does not require resource deallocation. 138 */ 139 if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) { 140 dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer, 141 psb->seg_cnt, psb->pCmd->sc_data_direction); 142 } else { 143 if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) { 144 dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys, 145 psb->pCmd->request_bufflen, 146 psb->pCmd->sc_data_direction); 147 } 148 } 149 150 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 151 } 152 153 static int 154 lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd) 155 { 156 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 157 struct scatterlist *sgel = NULL; 158 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 159 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 160 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 161 dma_addr_t physaddr; 162 uint32_t i, num_bde = 0; 163 int datadir = scsi_cmnd->sc_data_direction; 164 int dma_error; 165 166 /* 167 * There are three possibilities here - use scatter-gather segment, use 168 * the single mapping, or neither. Start the lpfc command prep by 169 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 170 * data bde entry. 171 */ 172 bpl += 2; 173 if (scsi_cmnd->use_sg) { 174 /* 175 * The driver stores the segment count returned from pci_map_sg 176 * because this a count of dma-mappings used to map the use_sg 177 * pages. They are not guaranteed to be the same for those 178 * architectures that implement an IOMMU. 179 */ 180 sgel = (struct scatterlist *)scsi_cmnd->request_buffer; 181 lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel, 182 scsi_cmnd->use_sg, datadir); 183 if (lpfc_cmd->seg_cnt == 0) 184 return 1; 185 186 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 187 printk(KERN_ERR "%s: Too many sg segments from " 188 "dma_map_sg. Config %d, seg_cnt %d", 189 __FUNCTION__, phba->cfg_sg_seg_cnt, 190 lpfc_cmd->seg_cnt); 191 dma_unmap_sg(&phba->pcidev->dev, sgel, 192 lpfc_cmd->seg_cnt, datadir); 193 return 1; 194 } 195 196 /* 197 * The driver established a maximum scatter-gather segment count 198 * during probe that limits the number of sg elements in any 199 * single scsi command. Just run through the seg_cnt and format 200 * the bde's. 201 */ 202 for (i = 0; i < lpfc_cmd->seg_cnt; i++) { 203 physaddr = sg_dma_address(sgel); 204 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 205 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 206 bpl->tus.f.bdeSize = sg_dma_len(sgel); 207 if (datadir == DMA_TO_DEVICE) 208 bpl->tus.f.bdeFlags = 0; 209 else 210 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 211 bpl->tus.w = le32_to_cpu(bpl->tus.w); 212 bpl++; 213 sgel++; 214 num_bde++; 215 } 216 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) { 217 physaddr = dma_map_single(&phba->pcidev->dev, 218 scsi_cmnd->request_buffer, 219 scsi_cmnd->request_bufflen, 220 datadir); 221 dma_error = dma_mapping_error(physaddr); 222 if (dma_error) { 223 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 224 "%d:0718 Unable to dma_map_single " 225 "request_buffer: x%x\n", 226 phba->brd_no, dma_error); 227 return 1; 228 } 229 230 lpfc_cmd->nonsg_phys = physaddr; 231 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 232 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 233 bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen; 234 if (datadir == DMA_TO_DEVICE) 235 bpl->tus.f.bdeFlags = 0; 236 else 237 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 238 bpl->tus.w = le32_to_cpu(bpl->tus.w); 239 num_bde = 1; 240 bpl++; 241 } 242 243 /* 244 * Finish initializing those IOCB fields that are dependent on the 245 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly 246 * reinitialized since all iocb memory resources are used many times 247 * for transmit, receive, and continuation bpl's. 248 */ 249 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 250 iocb_cmd->un.fcpi64.bdl.bdeSize += 251 (num_bde * sizeof (struct ulp_bde64)); 252 iocb_cmd->ulpBdeCount = 1; 253 iocb_cmd->ulpLe = 1; 254 fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen); 255 return 0; 256 } 257 258 static void 259 lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd) 260 { 261 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 262 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 263 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 264 struct lpfc_hba *phba = lpfc_cmd->scsi_hba; 265 uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm; 266 uint32_t resp_info = fcprsp->rspStatus2; 267 uint32_t scsi_status = fcprsp->rspStatus3; 268 uint32_t host_status = DID_OK; 269 uint32_t rsplen = 0; 270 271 /* 272 * If this is a task management command, there is no 273 * scsi packet associated with this lpfc_cmd. The driver 274 * consumes it. 275 */ 276 if (fcpcmd->fcpCntl2) { 277 scsi_status = 0; 278 goto out; 279 } 280 281 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 282 "%d:0730 FCP command failed: RSP " 283 "Data: x%x x%x x%x x%x x%x x%x\n", 284 phba->brd_no, resp_info, scsi_status, 285 be32_to_cpu(fcprsp->rspResId), 286 be32_to_cpu(fcprsp->rspSnsLen), 287 be32_to_cpu(fcprsp->rspRspLen), 288 fcprsp->rspInfo3); 289 290 if (resp_info & RSP_LEN_VALID) { 291 rsplen = be32_to_cpu(fcprsp->rspRspLen); 292 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) || 293 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) { 294 host_status = DID_ERROR; 295 goto out; 296 } 297 } 298 299 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 300 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); 301 if (snslen > SCSI_SENSE_BUFFERSIZE) 302 snslen = SCSI_SENSE_BUFFERSIZE; 303 304 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); 305 } 306 307 cmnd->resid = 0; 308 if (resp_info & RESID_UNDER) { 309 cmnd->resid = be32_to_cpu(fcprsp->rspResId); 310 311 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 312 "%d:0716 FCP Read Underrun, expected %d, " 313 "residual %d Data: x%x x%x x%x\n", phba->brd_no, 314 be32_to_cpu(fcpcmd->fcpDl), cmnd->resid, 315 fcpi_parm, cmnd->cmnd[0], cmnd->underflow); 316 317 /* 318 * The cmnd->underflow is the minimum number of bytes that must 319 * be transfered for this command. Provided a sense condition 320 * is not present, make sure the actual amount transferred is at 321 * least the underflow value or fail. 322 */ 323 if (!(resp_info & SNS_LEN_VALID) && 324 (scsi_status == SAM_STAT_GOOD) && 325 (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) { 326 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 327 "%d:0717 FCP command x%x residual " 328 "underrun converted to error " 329 "Data: x%x x%x x%x\n", phba->brd_no, 330 cmnd->cmnd[0], cmnd->request_bufflen, 331 cmnd->resid, cmnd->underflow); 332 333 host_status = DID_ERROR; 334 } 335 } else if (resp_info & RESID_OVER) { 336 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 337 "%d:0720 FCP command x%x residual " 338 "overrun error. Data: x%x x%x \n", 339 phba->brd_no, cmnd->cmnd[0], 340 cmnd->request_bufflen, cmnd->resid); 341 host_status = DID_ERROR; 342 343 /* 344 * Check SLI validation that all the transfer was actually done 345 * (fcpi_parm should be zero). Apply check only to reads. 346 */ 347 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 348 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 349 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 350 "%d:0734 FCP Read Check Error Data: " 351 "x%x x%x x%x x%x\n", phba->brd_no, 352 be32_to_cpu(fcpcmd->fcpDl), 353 be32_to_cpu(fcprsp->rspResId), 354 fcpi_parm, cmnd->cmnd[0]); 355 host_status = DID_ERROR; 356 cmnd->resid = cmnd->request_bufflen; 357 } 358 359 out: 360 cmnd->result = ScsiResult(host_status, scsi_status); 361 } 362 363 static void 364 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 365 struct lpfc_iocbq *pIocbOut) 366 { 367 struct lpfc_scsi_buf *lpfc_cmd = 368 (struct lpfc_scsi_buf *) pIocbIn->context1; 369 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 370 struct lpfc_nodelist *pnode = rdata->pnode; 371 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 372 unsigned long iflag; 373 374 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 375 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 376 377 if (lpfc_cmd->status) { 378 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 379 (lpfc_cmd->result & IOERR_DRVR_MASK)) 380 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 381 else if (lpfc_cmd->status >= IOSTAT_CNT) 382 lpfc_cmd->status = IOSTAT_DEFAULT; 383 384 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 385 "%d:0729 FCP cmd x%x failed <%d/%d> status: " 386 "x%x result: x%x Data: x%x x%x\n", 387 phba->brd_no, cmd->cmnd[0], cmd->device->id, 388 cmd->device->lun, lpfc_cmd->status, 389 lpfc_cmd->result, pIocbOut->iocb.ulpContext, 390 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 391 392 switch (lpfc_cmd->status) { 393 case IOSTAT_FCP_RSP_ERROR: 394 /* Call FCP RSP handler to determine result */ 395 lpfc_handle_fcp_err(lpfc_cmd); 396 break; 397 case IOSTAT_NPORT_BSY: 398 case IOSTAT_FABRIC_BSY: 399 cmd->result = ScsiResult(DID_BUS_BUSY, 0); 400 break; 401 default: 402 cmd->result = ScsiResult(DID_ERROR, 0); 403 break; 404 } 405 406 if (pnode) { 407 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) 408 cmd->result = ScsiResult(DID_BUS_BUSY, 409 SAM_STAT_BUSY); 410 } 411 else { 412 cmd->result = ScsiResult(DID_NO_CONNECT, 0); 413 } 414 } else { 415 cmd->result = ScsiResult(DID_OK, 0); 416 } 417 418 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 419 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 420 421 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 422 "%d:0710 Iodone <%d/%d> cmd %p, error x%x " 423 "SNS x%x x%x Data: x%x x%x\n", 424 phba->brd_no, cmd->device->id, 425 cmd->device->lun, cmd, cmd->result, 426 *lp, *(lp + 3), cmd->retries, cmd->resid); 427 } 428 429 spin_lock_irqsave(phba->host->host_lock, iflag); 430 lpfc_free_scsi_buf(lpfc_cmd); 431 cmd->host_scribble = NULL; 432 spin_unlock_irqrestore(phba->host->host_lock, iflag); 433 434 cmd->scsi_done(cmd); 435 } 436 437 static void 438 lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd, 439 struct lpfc_nodelist *pnode) 440 { 441 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 442 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 443 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 444 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); 445 int datadir = scsi_cmnd->sc_data_direction; 446 447 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 448 /* clear task management bits */ 449 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; 450 451 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 452 &lpfc_cmd->fcp_cmnd->fcp_lun); 453 454 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16); 455 456 if (scsi_cmnd->device->tagged_supported) { 457 switch (scsi_cmnd->tag) { 458 case HEAD_OF_QUEUE_TAG: 459 fcp_cmnd->fcpCntl1 = HEAD_OF_Q; 460 break; 461 case ORDERED_QUEUE_TAG: 462 fcp_cmnd->fcpCntl1 = ORDERED_Q; 463 break; 464 default: 465 fcp_cmnd->fcpCntl1 = SIMPLE_Q; 466 break; 467 } 468 } else 469 fcp_cmnd->fcpCntl1 = 0; 470 471 /* 472 * There are three possibilities here - use scatter-gather segment, use 473 * the single mapping, or neither. Start the lpfc command prep by 474 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 475 * data bde entry. 476 */ 477 if (scsi_cmnd->use_sg) { 478 if (datadir == DMA_TO_DEVICE) { 479 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 480 iocb_cmd->un.fcpi.fcpi_parm = 0; 481 iocb_cmd->ulpPU = 0; 482 fcp_cmnd->fcpCntl3 = WRITE_DATA; 483 phba->fc4OutputRequests++; 484 } else { 485 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 486 iocb_cmd->ulpPU = PARM_READ_CHECK; 487 iocb_cmd->un.fcpi.fcpi_parm = 488 scsi_cmnd->request_bufflen; 489 fcp_cmnd->fcpCntl3 = READ_DATA; 490 phba->fc4InputRequests++; 491 } 492 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) { 493 if (datadir == DMA_TO_DEVICE) { 494 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 495 iocb_cmd->un.fcpi.fcpi_parm = 0; 496 iocb_cmd->ulpPU = 0; 497 fcp_cmnd->fcpCntl3 = WRITE_DATA; 498 phba->fc4OutputRequests++; 499 } else { 500 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 501 iocb_cmd->ulpPU = PARM_READ_CHECK; 502 iocb_cmd->un.fcpi.fcpi_parm = 503 scsi_cmnd->request_bufflen; 504 fcp_cmnd->fcpCntl3 = READ_DATA; 505 phba->fc4InputRequests++; 506 } 507 } else { 508 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; 509 iocb_cmd->un.fcpi.fcpi_parm = 0; 510 iocb_cmd->ulpPU = 0; 511 fcp_cmnd->fcpCntl3 = 0; 512 phba->fc4ControlRequests++; 513 } 514 515 /* 516 * Finish initializing those IOCB fields that are independent 517 * of the scsi_cmnd request_buffer 518 */ 519 piocbq->iocb.ulpContext = pnode->nlp_rpi; 520 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 521 piocbq->iocb.ulpFCP2Rcvy = 1; 522 523 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); 524 piocbq->context1 = lpfc_cmd; 525 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 526 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout; 527 } 528 529 static int 530 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, 531 struct lpfc_scsi_buf *lpfc_cmd, 532 uint8_t task_mgmt_cmd) 533 { 534 struct lpfc_sli *psli; 535 struct lpfc_iocbq *piocbq; 536 IOCB_t *piocb; 537 struct fcp_cmnd *fcp_cmnd; 538 struct scsi_device *scsi_dev = lpfc_cmd->pCmd->device; 539 struct lpfc_rport_data *rdata = scsi_dev->hostdata; 540 struct lpfc_nodelist *ndlp = rdata->pnode; 541 542 if ((ndlp == 0) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 543 return 0; 544 } 545 546 psli = &phba->sli; 547 piocbq = &(lpfc_cmd->cur_iocbq); 548 piocb = &piocbq->iocb; 549 550 fcp_cmnd = lpfc_cmd->fcp_cmnd; 551 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 552 &lpfc_cmd->fcp_cmnd->fcp_lun); 553 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 554 555 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 556 557 piocb->ulpContext = ndlp->nlp_rpi; 558 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 559 piocb->ulpFCP2Rcvy = 1; 560 } 561 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); 562 563 /* ulpTimeout is only one byte */ 564 if (lpfc_cmd->timeout > 0xff) { 565 /* 566 * Do not timeout the command at the firmware level. 567 * The driver will provide the timeout mechanism. 568 */ 569 piocb->ulpTimeout = 0; 570 } else { 571 piocb->ulpTimeout = lpfc_cmd->timeout; 572 } 573 574 lpfc_cmd->rdata = rdata; 575 576 switch (task_mgmt_cmd) { 577 case FCP_LUN_RESET: 578 /* Issue LUN Reset to TGT <num> LUN <num> */ 579 lpfc_printf_log(phba, 580 KERN_INFO, 581 LOG_FCP, 582 "%d:0703 Issue LUN Reset to TGT %d LUN %d " 583 "Data: x%x x%x\n", 584 phba->brd_no, 585 scsi_dev->id, scsi_dev->lun, 586 ndlp->nlp_rpi, ndlp->nlp_flag); 587 588 break; 589 case FCP_ABORT_TASK_SET: 590 /* Issue Abort Task Set to TGT <num> LUN <num> */ 591 lpfc_printf_log(phba, 592 KERN_INFO, 593 LOG_FCP, 594 "%d:0701 Issue Abort Task Set to TGT %d LUN %d " 595 "Data: x%x x%x\n", 596 phba->brd_no, 597 scsi_dev->id, scsi_dev->lun, 598 ndlp->nlp_rpi, ndlp->nlp_flag); 599 600 break; 601 case FCP_TARGET_RESET: 602 /* Issue Target Reset to TGT <num> */ 603 lpfc_printf_log(phba, 604 KERN_INFO, 605 LOG_FCP, 606 "%d:0702 Issue Target Reset to TGT %d " 607 "Data: x%x x%x\n", 608 phba->brd_no, 609 scsi_dev->id, ndlp->nlp_rpi, 610 ndlp->nlp_flag); 611 break; 612 } 613 614 return (1); 615 } 616 617 static int 618 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba) 619 { 620 struct lpfc_iocbq *iocbq; 621 struct lpfc_iocbq *iocbqrsp = NULL; 622 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 623 int ret; 624 625 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET); 626 if (!ret) 627 return FAILED; 628 629 lpfc_cmd->scsi_hba = phba; 630 iocbq = &lpfc_cmd->cur_iocbq; 631 list_remove_head(lpfc_iocb_list, iocbqrsp, struct lpfc_iocbq, list); 632 if (!iocbqrsp) 633 return FAILED; 634 memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq)); 635 636 iocbq->iocb_flag |= LPFC_IO_POLL; 637 ret = lpfc_sli_issue_iocb_wait_high_priority(phba, 638 &phba->sli.ring[phba->sli.fcp_ring], 639 iocbq, SLI_IOCB_HIGH_PRIORITY, 640 iocbqrsp, 641 lpfc_cmd->timeout); 642 if (ret != IOCB_SUCCESS) { 643 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 644 ret = FAILED; 645 } else { 646 ret = SUCCESS; 647 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4]; 648 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus; 649 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 650 (lpfc_cmd->result & IOERR_DRVR_MASK)) 651 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 652 } 653 654 /* 655 * All outstanding txcmplq I/Os should have been aborted by the target. 656 * Unfortunately, some targets do not abide by this forcing the driver 657 * to double check. 658 */ 659 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 660 lpfc_cmd->pCmd->device->id, 661 lpfc_cmd->pCmd->device->lun, 0, LPFC_CTX_TGT); 662 663 /* Return response IOCB to free list. */ 664 list_add_tail(&iocbqrsp->list, lpfc_iocb_list); 665 return ret; 666 } 667 668 static void 669 lpfc_scsi_cmd_iocb_cleanup (struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 670 struct lpfc_iocbq *pIocbOut) 671 { 672 unsigned long iflag; 673 struct lpfc_scsi_buf *lpfc_cmd = 674 (struct lpfc_scsi_buf *) pIocbIn->context1; 675 676 spin_lock_irqsave(phba->host->host_lock, iflag); 677 lpfc_free_scsi_buf(lpfc_cmd); 678 spin_unlock_irqrestore(phba->host->host_lock, iflag); 679 } 680 681 static void 682 lpfc_scsi_cmd_iocb_cmpl_aborted(struct lpfc_hba *phba, 683 struct lpfc_iocbq *pIocbIn, 684 struct lpfc_iocbq *pIocbOut) 685 { 686 struct scsi_cmnd *ml_cmd = 687 ((struct lpfc_scsi_buf *) pIocbIn->context1)->pCmd; 688 689 lpfc_scsi_cmd_iocb_cleanup (phba, pIocbIn, pIocbOut); 690 ml_cmd->host_scribble = NULL; 691 } 692 693 const char * 694 lpfc_info(struct Scsi_Host *host) 695 { 696 struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata[0]; 697 int len; 698 static char lpfcinfobuf[384]; 699 700 memset(lpfcinfobuf,0,384); 701 if (phba && phba->pcidev){ 702 strncpy(lpfcinfobuf, phba->ModelDesc, 256); 703 len = strlen(lpfcinfobuf); 704 snprintf(lpfcinfobuf + len, 705 384-len, 706 " on PCI bus %02x device %02x irq %d", 707 phba->pcidev->bus->number, 708 phba->pcidev->devfn, 709 phba->pcidev->irq); 710 len = strlen(lpfcinfobuf); 711 if (phba->Port[0]) { 712 snprintf(lpfcinfobuf + len, 713 384-len, 714 " port %s", 715 phba->Port); 716 } 717 } 718 return lpfcinfobuf; 719 } 720 721 static int 722 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 723 { 724 struct lpfc_hba *phba = 725 (struct lpfc_hba *) cmnd->device->host->hostdata[0]; 726 struct lpfc_sli *psli = &phba->sli; 727 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 728 struct lpfc_nodelist *ndlp = rdata->pnode; 729 struct lpfc_scsi_buf *lpfc_cmd = NULL; 730 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; 731 int err = 0; 732 733 /* 734 * The target pointer is guaranteed not to be NULL because the driver 735 * only clears the device->hostdata field in lpfc_slave_destroy. This 736 * approach guarantees no further IO calls on this target. 737 */ 738 if (!ndlp) { 739 cmnd->result = ScsiResult(DID_NO_CONNECT, 0); 740 goto out_fail_command; 741 } 742 743 /* 744 * A Fibre Channel target is present and functioning only when the node 745 * state is MAPPED. Any other state is a failure. 746 */ 747 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 748 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 749 (ndlp->nlp_state == NLP_STE_UNUSED_NODE)) { 750 cmnd->result = ScsiResult(DID_NO_CONNECT, 0); 751 goto out_fail_command; 752 } 753 else if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 754 cmnd->result = ScsiResult(DID_BUS_BUSY, 0); 755 goto out_fail_command; 756 } 757 /* 758 * The device is most likely recovered and the driver 759 * needs a bit more time to finish. Ask the midlayer 760 * to retry. 761 */ 762 goto out_host_busy; 763 } 764 765 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 766 if (lpfc_cmd == NULL) { 767 printk(KERN_WARNING "%s: No buffer available - list empty, " 768 "total count %d\n", __FUNCTION__, phba->total_scsi_bufs); 769 goto out_host_busy; 770 } 771 772 /* 773 * Store the midlayer's command structure for the completion phase 774 * and complete the command initialization. 775 */ 776 lpfc_cmd->pCmd = cmnd; 777 lpfc_cmd->rdata = rdata; 778 lpfc_cmd->timeout = 0; 779 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 780 cmnd->scsi_done = done; 781 782 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 783 if (err) 784 goto out_host_busy_free_buf; 785 786 lpfc_scsi_prep_cmnd(phba, lpfc_cmd, ndlp); 787 788 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 789 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 790 if (err) 791 goto out_host_busy_free_buf; 792 return 0; 793 794 out_host_busy_free_buf: 795 lpfc_free_scsi_buf(lpfc_cmd); 796 cmnd->host_scribble = NULL; 797 out_host_busy: 798 return SCSI_MLQUEUE_HOST_BUSY; 799 800 out_fail_command: 801 done(cmnd); 802 return 0; 803 } 804 805 static int 806 __lpfc_abort_handler(struct scsi_cmnd *cmnd) 807 { 808 struct lpfc_hba *phba = 809 (struct lpfc_hba *)cmnd->device->host->hostdata[0]; 810 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; 811 struct lpfc_iocbq *iocb, *next_iocb; 812 struct lpfc_iocbq *abtsiocb = NULL; 813 struct lpfc_scsi_buf *lpfc_cmd; 814 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 815 IOCB_t *cmd, *icmd; 816 unsigned long snum; 817 unsigned int id, lun; 818 unsigned int loop_count = 0; 819 int ret = IOCB_SUCCESS; 820 821 /* 822 * If the host_scribble data area is NULL, then the driver has already 823 * completed this command, but the midlayer did not see the completion 824 * before the eh fired. Just return SUCCESS. 825 */ 826 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 827 if (!lpfc_cmd) 828 return SUCCESS; 829 830 /* save these now since lpfc_cmd can be freed */ 831 id = lpfc_cmd->pCmd->device->id; 832 lun = lpfc_cmd->pCmd->device->lun; 833 snum = lpfc_cmd->pCmd->serial_number; 834 835 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 836 cmd = &iocb->iocb; 837 if (iocb->context1 != lpfc_cmd) 838 continue; 839 840 list_del_init(&iocb->list); 841 pring->txq_cnt--; 842 if (!iocb->iocb_cmpl) { 843 list_add_tail(&iocb->list, lpfc_iocb_list); 844 } 845 else { 846 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 847 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 848 lpfc_scsi_cmd_iocb_cmpl_aborted(phba, iocb, iocb); 849 } 850 851 goto out; 852 } 853 854 list_remove_head(lpfc_iocb_list, abtsiocb, struct lpfc_iocbq, list); 855 if (abtsiocb == NULL) 856 return FAILED; 857 858 memset(abtsiocb, 0, sizeof (struct lpfc_iocbq)); 859 860 /* 861 * The scsi command was not in the txq. Check the txcmplq and if it is 862 * found, send an abort to the FW. 863 */ 864 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 865 if (iocb->context1 != lpfc_cmd) 866 continue; 867 868 iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl_aborted; 869 cmd = &iocb->iocb; 870 icmd = &abtsiocb->iocb; 871 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 872 icmd->un.acxri.abortContextTag = cmd->ulpContext; 873 icmd->un.acxri.abortIoTag = cmd->ulpIoTag; 874 875 icmd->ulpLe = 1; 876 icmd->ulpClass = cmd->ulpClass; 877 if (phba->hba_state >= LPFC_LINK_UP) 878 icmd->ulpCommand = CMD_ABORT_XRI_CN; 879 else 880 icmd->ulpCommand = CMD_CLOSE_XRI_CN; 881 882 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 883 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == 884 IOCB_ERROR) { 885 list_add_tail(&abtsiocb->list, lpfc_iocb_list); 886 ret = IOCB_ERROR; 887 break; 888 } 889 890 /* Wait for abort to complete */ 891 while (cmnd->host_scribble) 892 { 893 spin_unlock_irq(phba->host->host_lock); 894 set_current_state(TASK_UNINTERRUPTIBLE); 895 schedule_timeout(LPFC_ABORT_WAIT*HZ); 896 spin_lock_irq(phba->host->host_lock); 897 if (++loop_count 898 > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT) 899 break; 900 } 901 902 if(cmnd->host_scribble) { 903 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 904 "%d:0748 abort handler timed " 905 "out waiting for abort to " 906 "complete. Data: " 907 "x%x x%x x%x x%lx\n", 908 phba->brd_no, ret, id, lun, snum); 909 cmnd->host_scribble = NULL; 910 iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cleanup; 911 ret = IOCB_ERROR; 912 } 913 914 break; 915 } 916 917 out: 918 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 919 "%d:0749 SCSI layer issued abort device " 920 "Data: x%x x%x x%x x%lx\n", 921 phba->brd_no, ret, id, lun, snum); 922 923 return ret == IOCB_SUCCESS ? SUCCESS : FAILED; 924 } 925 926 static int 927 lpfc_abort_handler(struct scsi_cmnd *cmnd) 928 { 929 int rc; 930 spin_lock_irq(cmnd->device->host->host_lock); 931 rc = __lpfc_abort_handler(cmnd); 932 spin_unlock_irq(cmnd->device->host->host_lock); 933 return rc; 934 } 935 936 static int 937 __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) 938 { 939 struct Scsi_Host *shost = cmnd->device->host; 940 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0]; 941 struct lpfc_sli *psli = &phba->sli; 942 struct lpfc_scsi_buf *lpfc_cmd = NULL; 943 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; 944 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 945 struct lpfc_iocbq *iocbq, *iocbqrsp = NULL; 946 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 947 struct lpfc_nodelist *pnode = rdata->pnode; 948 int ret = FAILED; 949 int cnt, loopcnt; 950 951 /* 952 * If target is not in a MAPPED state, delay the reset until 953 * target is rediscovered or nodev timeout expires. 954 */ 955 while ( 1 ) { 956 if (!pnode) 957 break; 958 959 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { 960 spin_unlock_irq(phba->host->host_lock); 961 set_current_state(TASK_UNINTERRUPTIBLE); 962 schedule_timeout( HZ/2); 963 spin_lock_irq(phba->host->host_lock); 964 } 965 if ((pnode) && (pnode->nlp_state == NLP_STE_MAPPED_NODE)) 966 break; 967 } 968 969 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 970 if (lpfc_cmd == NULL) 971 goto out; 972 973 lpfc_cmd->pCmd = cmnd; 974 lpfc_cmd->timeout = 60; 975 lpfc_cmd->scsi_hba = phba; 976 977 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET); 978 if (!ret) 979 goto out_free_scsi_buf; 980 981 iocbq = &lpfc_cmd->cur_iocbq; 982 983 /* get a buffer for this IOCB command response */ 984 list_remove_head(lpfc_iocb_list, iocbqrsp, struct lpfc_iocbq, list); 985 if (iocbqrsp == NULL) 986 goto out_free_scsi_buf; 987 988 memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq)); 989 990 iocbq->iocb_flag |= LPFC_IO_POLL; 991 iocbq->iocb_cmpl = lpfc_sli_wake_iocb_high_priority; 992 993 ret = lpfc_sli_issue_iocb_wait_high_priority(phba, 994 &phba->sli.ring[psli->fcp_ring], 995 iocbq, 0, iocbqrsp, 60); 996 if (ret == IOCB_SUCCESS) 997 ret = SUCCESS; 998 999 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4]; 1000 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus; 1001 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT) 1002 if (lpfc_cmd->result & IOERR_DRVR_MASK) 1003 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 1004 1005 /* 1006 * All outstanding txcmplq I/Os should have been aborted by the target. 1007 * Unfortunately, some targets do not abide by this forcing the driver 1008 * to double check. 1009 */ 1010 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 1011 cmnd->device->id, cmnd->device->lun, 0, 1012 LPFC_CTX_LUN); 1013 1014 loopcnt = 0; 1015 while((cnt = lpfc_sli_sum_iocb(phba, 1016 &phba->sli.ring[phba->sli.fcp_ring], 1017 cmnd->device->id, cmnd->device->lun, 1018 LPFC_CTX_LUN))) { 1019 spin_unlock_irq(phba->host->host_lock); 1020 set_current_state(TASK_UNINTERRUPTIBLE); 1021 schedule_timeout(LPFC_RESET_WAIT*HZ); 1022 spin_lock_irq(phba->host->host_lock); 1023 1024 if (++loopcnt 1025 > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) 1026 break; 1027 } 1028 1029 if (cnt) { 1030 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 1031 "%d:0719 LUN Reset I/O flush failure: cnt x%x\n", 1032 phba->brd_no, cnt); 1033 } 1034 1035 list_add_tail(&iocbqrsp->list, lpfc_iocb_list); 1036 1037 out_free_scsi_buf: 1038 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1039 "%d:0713 SCSI layer issued LUN reset (%d, %d) " 1040 "Data: x%x x%x x%x\n", 1041 phba->brd_no, lpfc_cmd->pCmd->device->id, 1042 lpfc_cmd->pCmd->device->lun, ret, lpfc_cmd->status, 1043 lpfc_cmd->result); 1044 lpfc_free_scsi_buf(lpfc_cmd); 1045 out: 1046 return ret; 1047 } 1048 1049 static int 1050 lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) 1051 { 1052 int rc; 1053 spin_lock_irq(cmnd->device->host->host_lock); 1054 rc = __lpfc_reset_lun_handler(cmnd); 1055 spin_unlock_irq(cmnd->device->host->host_lock); 1056 return rc; 1057 } 1058 1059 /* 1060 * Note: midlayer calls this function with the host_lock held 1061 */ 1062 static int 1063 __lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) 1064 { 1065 struct Scsi_Host *shost = cmnd->device->host; 1066 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0]; 1067 struct lpfc_nodelist *ndlp = NULL; 1068 int match; 1069 int ret = FAILED, i, err_count = 0; 1070 int cnt, loopcnt; 1071 unsigned int midlayer_id = 0; 1072 struct lpfc_scsi_buf * lpfc_cmd = NULL; 1073 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; 1074 1075 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 1076 if (lpfc_cmd == NULL) 1077 goto out; 1078 1079 /* The lpfc_cmd storage is reused. Set all loop invariants. */ 1080 lpfc_cmd->timeout = 60; 1081 lpfc_cmd->pCmd = cmnd; 1082 lpfc_cmd->scsi_hba = phba; 1083 1084 /* 1085 * Since the driver manages a single bus device, reset all 1086 * targets known to the driver. Should any target reset 1087 * fail, this routine returns failure to the midlayer. 1088 */ 1089 midlayer_id = cmnd->device->id; 1090 for (i = 0; i < MAX_FCP_TARGET; i++) { 1091 /* Search the mapped list for this target ID */ 1092 match = 0; 1093 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { 1094 if ((i == ndlp->nlp_sid) && ndlp->rport) { 1095 match = 1; 1096 break; 1097 } 1098 } 1099 if (!match) 1100 continue; 1101 1102 lpfc_cmd->pCmd->device->id = i; 1103 lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data; 1104 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba); 1105 if (ret != SUCCESS) { 1106 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 1107 "%d:0713 Bus Reset on target %d failed\n", 1108 phba->brd_no, i); 1109 err_count++; 1110 } 1111 } 1112 1113 cmnd->device->id = midlayer_id; 1114 loopcnt = 0; 1115 while((cnt = lpfc_sli_sum_iocb(phba, 1116 &phba->sli.ring[phba->sli.fcp_ring], 1117 0, 0, LPFC_CTX_HOST))) { 1118 spin_unlock_irq(phba->host->host_lock); 1119 set_current_state(TASK_UNINTERRUPTIBLE); 1120 schedule_timeout(LPFC_RESET_WAIT*HZ); 1121 spin_lock_irq(phba->host->host_lock); 1122 1123 if (++loopcnt 1124 > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) 1125 break; 1126 } 1127 1128 if (cnt) { 1129 /* flush all outstanding commands on the host */ 1130 i = lpfc_sli_abort_iocb(phba, 1131 &phba->sli.ring[phba->sli.fcp_ring], 0, 0, 0, 1132 LPFC_CTX_HOST); 1133 1134 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 1135 "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n", 1136 phba->brd_no, cnt, i); 1137 } 1138 1139 if (!err_count) 1140 ret = SUCCESS; 1141 1142 lpfc_free_scsi_buf(lpfc_cmd); 1143 lpfc_printf_log(phba, 1144 KERN_ERR, 1145 LOG_FCP, 1146 "%d:0714 SCSI layer issued Bus Reset Data: x%x\n", 1147 phba->brd_no, ret); 1148 out: 1149 return ret; 1150 } 1151 1152 static int 1153 lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) 1154 { 1155 int rc; 1156 spin_lock_irq(cmnd->device->host->host_lock); 1157 rc = __lpfc_reset_bus_handler(cmnd); 1158 spin_unlock_irq(cmnd->device->host->host_lock); 1159 return rc; 1160 } 1161 1162 static int 1163 lpfc_slave_alloc(struct scsi_device *sdev) 1164 { 1165 struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata[0]; 1166 struct lpfc_nodelist *ndlp = NULL; 1167 int match = 0; 1168 struct lpfc_scsi_buf *scsi_buf = NULL; 1169 uint32_t total = 0, i; 1170 uint32_t num_to_alloc = 0; 1171 unsigned long flags; 1172 struct list_head *listp; 1173 struct list_head *node_list[6]; 1174 1175 /* 1176 * Store the target pointer in the scsi_device hostdata pointer provided 1177 * the driver has already discovered the target id. 1178 */ 1179 1180 /* Search the nlp lists other than unmap_list for this target ID */ 1181 node_list[0] = &phba->fc_npr_list; 1182 node_list[1] = &phba->fc_nlpmap_list; 1183 node_list[2] = &phba->fc_prli_list; 1184 node_list[3] = &phba->fc_reglogin_list; 1185 node_list[4] = &phba->fc_adisc_list; 1186 node_list[5] = &phba->fc_plogi_list; 1187 1188 for (i = 0; i < 6 && !match; i++) { 1189 listp = node_list[i]; 1190 if (list_empty(listp)) 1191 continue; 1192 list_for_each_entry(ndlp, listp, nlp_listp) { 1193 if ((sdev->id == ndlp->nlp_sid) && ndlp->rport) { 1194 match = 1; 1195 break; 1196 } 1197 } 1198 } 1199 1200 if (!match) 1201 return -ENXIO; 1202 1203 sdev->hostdata = ndlp->rport->dd_data; 1204 1205 /* 1206 * Populate the cmds_per_lun count scsi_bufs into this host's globally 1207 * available list of scsi buffers. Don't allocate more than the 1208 * HBA limit conveyed to the midlayer via the host structure. Note 1209 * that this list of scsi bufs exists for the lifetime of the driver. 1210 */ 1211 total = phba->total_scsi_bufs; 1212 num_to_alloc = LPFC_CMD_PER_LUN; 1213 if (total >= phba->cfg_hba_queue_depth) { 1214 printk(KERN_WARNING "%s, At config limitation of " 1215 "%d allocated scsi_bufs\n", __FUNCTION__, total); 1216 return 0; 1217 } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) { 1218 num_to_alloc = phba->cfg_hba_queue_depth - total; 1219 } 1220 1221 for (i = 0; i < num_to_alloc; i++) { 1222 scsi_buf = lpfc_get_scsi_buf(phba); 1223 if (!scsi_buf) { 1224 printk(KERN_ERR "%s, failed to allocate " 1225 "scsi_buf\n", __FUNCTION__); 1226 break; 1227 } 1228 1229 spin_lock_irqsave(phba->host->host_lock, flags); 1230 phba->total_scsi_bufs++; 1231 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list); 1232 spin_unlock_irqrestore(phba->host->host_lock, flags); 1233 } 1234 return 0; 1235 } 1236 1237 static int 1238 lpfc_slave_configure(struct scsi_device *sdev) 1239 { 1240 struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata[0]; 1241 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1242 1243 if (sdev->tagged_supported) 1244 scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth); 1245 else 1246 scsi_deactivate_tcq(sdev, phba->cfg_lun_queue_depth); 1247 1248 /* 1249 * Initialize the fc transport attributes for the target 1250 * containing this scsi device. Also note that the driver's 1251 * target pointer is stored in the starget_data for the 1252 * driver's sysfs entry point functions. 1253 */ 1254 rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5; 1255 1256 return 0; 1257 } 1258 1259 static void 1260 lpfc_slave_destroy(struct scsi_device *sdev) 1261 { 1262 sdev->hostdata = NULL; 1263 return; 1264 } 1265 1266 struct scsi_host_template lpfc_template = { 1267 .module = THIS_MODULE, 1268 .name = LPFC_DRIVER_NAME, 1269 .info = lpfc_info, 1270 .queuecommand = lpfc_queuecommand, 1271 .eh_abort_handler = lpfc_abort_handler, 1272 .eh_device_reset_handler= lpfc_reset_lun_handler, 1273 .eh_bus_reset_handler = lpfc_reset_bus_handler, 1274 .slave_alloc = lpfc_slave_alloc, 1275 .slave_configure = lpfc_slave_configure, 1276 .slave_destroy = lpfc_slave_destroy, 1277 .this_id = -1, 1278 .sg_tablesize = LPFC_SG_SEG_CNT, 1279 .cmd_per_lun = LPFC_CMD_PER_LUN, 1280 .use_clustering = ENABLE_CLUSTERING, 1281 .shost_attrs = lpfc_host_attrs, 1282 .max_sectors = 0xFFFF, 1283 }; 1284