1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2005 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/pci.h> 23 #include <linux/interrupt.h> 24 25 #include <scsi/scsi.h> 26 #include <scsi/scsi_device.h> 27 #include <scsi/scsi_host.h> 28 #include <scsi/scsi_tcq.h> 29 #include <scsi/scsi_transport_fc.h> 30 31 #include "lpfc_version.h" 32 #include "lpfc_hw.h" 33 #include "lpfc_sli.h" 34 #include "lpfc_disc.h" 35 #include "lpfc_scsi.h" 36 #include "lpfc.h" 37 #include "lpfc_logmsg.h" 38 #include "lpfc_crtn.h" 39 40 #define LPFC_RESET_WAIT 2 41 #define LPFC_ABORT_WAIT 2 42 43 44 /* 45 * This routine allocates a scsi buffer, which contains all the necessary 46 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 47 * contains information to build the IOCB. The DMAable region contains 48 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to 49 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL 50 * and the BPL BDE is setup in the IOCB. 51 */ 52 static struct lpfc_scsi_buf * 53 lpfc_new_scsi_buf(struct lpfc_hba * phba) 54 { 55 struct lpfc_scsi_buf *psb; 56 struct ulp_bde64 *bpl; 57 IOCB_t *iocb; 58 dma_addr_t pdma_phys; 59 uint16_t iotag; 60 61 psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 62 if (!psb) 63 return NULL; 64 memset(psb, 0, sizeof (struct lpfc_scsi_buf)); 65 psb->scsi_hba = phba; 66 67 /* 68 * Get memory from the pci pool to map the virt space to pci bus space 69 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd, 70 * struct fcp_rsp and the number of bde's necessary to support the 71 * sg_tablesize. 72 */ 73 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL, 74 &psb->dma_handle); 75 if (!psb->data) { 76 kfree(psb); 77 return NULL; 78 } 79 80 /* Initialize virtual ptrs to dma_buf region. */ 81 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 82 83 /* Allocate iotag for psb->cur_iocbq. */ 84 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 85 if (iotag == 0) { 86 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 87 psb->data, psb->dma_handle); 88 kfree (psb); 89 return NULL; 90 } 91 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 92 93 psb->fcp_cmnd = psb->data; 94 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 95 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + 96 sizeof(struct fcp_rsp); 97 98 /* Initialize local short-hand pointers. */ 99 bpl = psb->fcp_bpl; 100 pdma_phys = psb->dma_handle; 101 102 /* 103 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg 104 * list bdes. Initialize the first two and leave the rest for 105 * queuecommand. 106 */ 107 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); 108 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); 109 bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd); 110 bpl->tus.f.bdeFlags = BUFF_USE_CMND; 111 bpl->tus.w = le32_to_cpu(bpl->tus.w); 112 bpl++; 113 114 /* Setup the physical region for the FCP RSP */ 115 pdma_phys += sizeof (struct fcp_cmnd); 116 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); 117 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); 118 bpl->tus.f.bdeSize = sizeof (struct fcp_rsp); 119 bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV); 120 bpl->tus.w = le32_to_cpu(bpl->tus.w); 121 122 /* 123 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, 124 * initialize it with all known data now. 125 */ 126 pdma_phys += (sizeof (struct fcp_rsp)); 127 iocb = &psb->cur_iocbq.iocb; 128 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 129 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys); 130 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys); 131 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 132 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL; 133 iocb->ulpBdeCount = 1; 134 iocb->ulpClass = CLASS3; 135 136 return psb; 137 } 138 139 struct lpfc_scsi_buf* 140 lpfc_sli_get_scsi_buf(struct lpfc_hba * phba) 141 { 142 struct lpfc_scsi_buf * lpfc_cmd = NULL; 143 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; 144 145 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 146 return lpfc_cmd; 147 } 148 149 static void 150 lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 151 { 152 /* 153 * There are only two special cases to consider. (1) the scsi command 154 * requested scatter-gather usage or (2) the scsi command allocated 155 * a request buffer, but did not request use_sg. There is a third 156 * case, but it does not require resource deallocation. 157 */ 158 if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) { 159 dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer, 160 psb->seg_cnt, psb->pCmd->sc_data_direction); 161 } else { 162 if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) { 163 dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys, 164 psb->pCmd->request_bufflen, 165 psb->pCmd->sc_data_direction); 166 } 167 } 168 169 psb->pCmd = NULL; 170 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 171 } 172 173 static int 174 lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd) 175 { 176 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 177 struct scatterlist *sgel = NULL; 178 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 179 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 180 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 181 dma_addr_t physaddr; 182 uint32_t i, num_bde = 0; 183 int datadir = scsi_cmnd->sc_data_direction; 184 int dma_error; 185 186 /* 187 * There are three possibilities here - use scatter-gather segment, use 188 * the single mapping, or neither. Start the lpfc command prep by 189 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 190 * data bde entry. 191 */ 192 bpl += 2; 193 if (scsi_cmnd->use_sg) { 194 /* 195 * The driver stores the segment count returned from pci_map_sg 196 * because this a count of dma-mappings used to map the use_sg 197 * pages. They are not guaranteed to be the same for those 198 * architectures that implement an IOMMU. 199 */ 200 sgel = (struct scatterlist *)scsi_cmnd->request_buffer; 201 lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel, 202 scsi_cmnd->use_sg, datadir); 203 if (lpfc_cmd->seg_cnt == 0) 204 return 1; 205 206 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 207 printk(KERN_ERR "%s: Too many sg segments from " 208 "dma_map_sg. Config %d, seg_cnt %d", 209 __FUNCTION__, phba->cfg_sg_seg_cnt, 210 lpfc_cmd->seg_cnt); 211 dma_unmap_sg(&phba->pcidev->dev, sgel, 212 lpfc_cmd->seg_cnt, datadir); 213 return 1; 214 } 215 216 /* 217 * The driver established a maximum scatter-gather segment count 218 * during probe that limits the number of sg elements in any 219 * single scsi command. Just run through the seg_cnt and format 220 * the bde's. 221 */ 222 for (i = 0; i < lpfc_cmd->seg_cnt; i++) { 223 physaddr = sg_dma_address(sgel); 224 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 225 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 226 bpl->tus.f.bdeSize = sg_dma_len(sgel); 227 if (datadir == DMA_TO_DEVICE) 228 bpl->tus.f.bdeFlags = 0; 229 else 230 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 231 bpl->tus.w = le32_to_cpu(bpl->tus.w); 232 bpl++; 233 sgel++; 234 num_bde++; 235 } 236 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) { 237 physaddr = dma_map_single(&phba->pcidev->dev, 238 scsi_cmnd->request_buffer, 239 scsi_cmnd->request_bufflen, 240 datadir); 241 dma_error = dma_mapping_error(physaddr); 242 if (dma_error) { 243 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 244 "%d:0718 Unable to dma_map_single " 245 "request_buffer: x%x\n", 246 phba->brd_no, dma_error); 247 return 1; 248 } 249 250 lpfc_cmd->nonsg_phys = physaddr; 251 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 252 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 253 bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen; 254 if (datadir == DMA_TO_DEVICE) 255 bpl->tus.f.bdeFlags = 0; 256 else 257 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 258 bpl->tus.w = le32_to_cpu(bpl->tus.w); 259 num_bde = 1; 260 bpl++; 261 } 262 263 /* 264 * Finish initializing those IOCB fields that are dependent on the 265 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly 266 * reinitialized since all iocb memory resources are used many times 267 * for transmit, receive, and continuation bpl's. 268 */ 269 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 270 iocb_cmd->un.fcpi64.bdl.bdeSize += 271 (num_bde * sizeof (struct ulp_bde64)); 272 iocb_cmd->ulpBdeCount = 1; 273 iocb_cmd->ulpLe = 1; 274 fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen); 275 return 0; 276 } 277 278 static void 279 lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd) 280 { 281 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 282 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 283 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 284 struct lpfc_hba *phba = lpfc_cmd->scsi_hba; 285 uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm; 286 uint32_t resp_info = fcprsp->rspStatus2; 287 uint32_t scsi_status = fcprsp->rspStatus3; 288 uint32_t host_status = DID_OK; 289 uint32_t rsplen = 0; 290 291 /* 292 * If this is a task management command, there is no 293 * scsi packet associated with this lpfc_cmd. The driver 294 * consumes it. 295 */ 296 if (fcpcmd->fcpCntl2) { 297 scsi_status = 0; 298 goto out; 299 } 300 301 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 302 "%d:0730 FCP command failed: RSP " 303 "Data: x%x x%x x%x x%x x%x x%x\n", 304 phba->brd_no, resp_info, scsi_status, 305 be32_to_cpu(fcprsp->rspResId), 306 be32_to_cpu(fcprsp->rspSnsLen), 307 be32_to_cpu(fcprsp->rspRspLen), 308 fcprsp->rspInfo3); 309 310 if (resp_info & RSP_LEN_VALID) { 311 rsplen = be32_to_cpu(fcprsp->rspRspLen); 312 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) || 313 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) { 314 host_status = DID_ERROR; 315 goto out; 316 } 317 } 318 319 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 320 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); 321 if (snslen > SCSI_SENSE_BUFFERSIZE) 322 snslen = SCSI_SENSE_BUFFERSIZE; 323 324 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); 325 } 326 327 cmnd->resid = 0; 328 if (resp_info & RESID_UNDER) { 329 cmnd->resid = be32_to_cpu(fcprsp->rspResId); 330 331 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 332 "%d:0716 FCP Read Underrun, expected %d, " 333 "residual %d Data: x%x x%x x%x\n", phba->brd_no, 334 be32_to_cpu(fcpcmd->fcpDl), cmnd->resid, 335 fcpi_parm, cmnd->cmnd[0], cmnd->underflow); 336 337 /* 338 * The cmnd->underflow is the minimum number of bytes that must 339 * be transfered for this command. Provided a sense condition 340 * is not present, make sure the actual amount transferred is at 341 * least the underflow value or fail. 342 */ 343 if (!(resp_info & SNS_LEN_VALID) && 344 (scsi_status == SAM_STAT_GOOD) && 345 (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) { 346 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 347 "%d:0717 FCP command x%x residual " 348 "underrun converted to error " 349 "Data: x%x x%x x%x\n", phba->brd_no, 350 cmnd->cmnd[0], cmnd->request_bufflen, 351 cmnd->resid, cmnd->underflow); 352 353 host_status = DID_ERROR; 354 } 355 } else if (resp_info & RESID_OVER) { 356 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 357 "%d:0720 FCP command x%x residual " 358 "overrun error. Data: x%x x%x \n", 359 phba->brd_no, cmnd->cmnd[0], 360 cmnd->request_bufflen, cmnd->resid); 361 host_status = DID_ERROR; 362 363 /* 364 * Check SLI validation that all the transfer was actually done 365 * (fcpi_parm should be zero). Apply check only to reads. 366 */ 367 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 368 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 369 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 370 "%d:0734 FCP Read Check Error Data: " 371 "x%x x%x x%x x%x\n", phba->brd_no, 372 be32_to_cpu(fcpcmd->fcpDl), 373 be32_to_cpu(fcprsp->rspResId), 374 fcpi_parm, cmnd->cmnd[0]); 375 host_status = DID_ERROR; 376 cmnd->resid = cmnd->request_bufflen; 377 } 378 379 out: 380 cmnd->result = ScsiResult(host_status, scsi_status); 381 } 382 383 static void 384 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 385 struct lpfc_iocbq *pIocbOut) 386 { 387 struct lpfc_scsi_buf *lpfc_cmd = 388 (struct lpfc_scsi_buf *) pIocbIn->context1; 389 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 390 struct lpfc_nodelist *pnode = rdata->pnode; 391 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 392 unsigned long iflag; 393 394 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 395 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 396 397 if (lpfc_cmd->status) { 398 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 399 (lpfc_cmd->result & IOERR_DRVR_MASK)) 400 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 401 else if (lpfc_cmd->status >= IOSTAT_CNT) 402 lpfc_cmd->status = IOSTAT_DEFAULT; 403 404 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 405 "%d:0729 FCP cmd x%x failed <%d/%d> status: " 406 "x%x result: x%x Data: x%x x%x\n", 407 phba->brd_no, cmd->cmnd[0], cmd->device->id, 408 cmd->device->lun, lpfc_cmd->status, 409 lpfc_cmd->result, pIocbOut->iocb.ulpContext, 410 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 411 412 switch (lpfc_cmd->status) { 413 case IOSTAT_FCP_RSP_ERROR: 414 /* Call FCP RSP handler to determine result */ 415 lpfc_handle_fcp_err(lpfc_cmd); 416 break; 417 case IOSTAT_NPORT_BSY: 418 case IOSTAT_FABRIC_BSY: 419 cmd->result = ScsiResult(DID_BUS_BUSY, 0); 420 break; 421 default: 422 cmd->result = ScsiResult(DID_ERROR, 0); 423 break; 424 } 425 426 if ((pnode == NULL ) 427 || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 428 cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY); 429 } else { 430 cmd->result = ScsiResult(DID_OK, 0); 431 } 432 433 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 434 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 435 436 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 437 "%d:0710 Iodone <%d/%d> cmd %p, error x%x " 438 "SNS x%x x%x Data: x%x x%x\n", 439 phba->brd_no, cmd->device->id, 440 cmd->device->lun, cmd, cmd->result, 441 *lp, *(lp + 3), cmd->retries, cmd->resid); 442 } 443 444 cmd->scsi_done(cmd); 445 446 spin_lock_irqsave(phba->host->host_lock, iflag); 447 lpfc_release_scsi_buf(phba, lpfc_cmd); 448 spin_unlock_irqrestore(phba->host->host_lock, iflag); 449 } 450 451 static void 452 lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd, 453 struct lpfc_nodelist *pnode) 454 { 455 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 456 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 457 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 458 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); 459 int datadir = scsi_cmnd->sc_data_direction; 460 461 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 462 /* clear task management bits */ 463 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; 464 465 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 466 &lpfc_cmd->fcp_cmnd->fcp_lun); 467 468 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16); 469 470 if (scsi_cmnd->device->tagged_supported) { 471 switch (scsi_cmnd->tag) { 472 case HEAD_OF_QUEUE_TAG: 473 fcp_cmnd->fcpCntl1 = HEAD_OF_Q; 474 break; 475 case ORDERED_QUEUE_TAG: 476 fcp_cmnd->fcpCntl1 = ORDERED_Q; 477 break; 478 default: 479 fcp_cmnd->fcpCntl1 = SIMPLE_Q; 480 break; 481 } 482 } else 483 fcp_cmnd->fcpCntl1 = 0; 484 485 /* 486 * There are three possibilities here - use scatter-gather segment, use 487 * the single mapping, or neither. Start the lpfc command prep by 488 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 489 * data bde entry. 490 */ 491 if (scsi_cmnd->use_sg) { 492 if (datadir == DMA_TO_DEVICE) { 493 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 494 iocb_cmd->un.fcpi.fcpi_parm = 0; 495 iocb_cmd->ulpPU = 0; 496 fcp_cmnd->fcpCntl3 = WRITE_DATA; 497 phba->fc4OutputRequests++; 498 } else { 499 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 500 iocb_cmd->ulpPU = PARM_READ_CHECK; 501 iocb_cmd->un.fcpi.fcpi_parm = 502 scsi_cmnd->request_bufflen; 503 fcp_cmnd->fcpCntl3 = READ_DATA; 504 phba->fc4InputRequests++; 505 } 506 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) { 507 if (datadir == DMA_TO_DEVICE) { 508 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 509 iocb_cmd->un.fcpi.fcpi_parm = 0; 510 iocb_cmd->ulpPU = 0; 511 fcp_cmnd->fcpCntl3 = WRITE_DATA; 512 phba->fc4OutputRequests++; 513 } else { 514 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 515 iocb_cmd->ulpPU = PARM_READ_CHECK; 516 iocb_cmd->un.fcpi.fcpi_parm = 517 scsi_cmnd->request_bufflen; 518 fcp_cmnd->fcpCntl3 = READ_DATA; 519 phba->fc4InputRequests++; 520 } 521 } else { 522 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; 523 iocb_cmd->un.fcpi.fcpi_parm = 0; 524 iocb_cmd->ulpPU = 0; 525 fcp_cmnd->fcpCntl3 = 0; 526 phba->fc4ControlRequests++; 527 } 528 529 /* 530 * Finish initializing those IOCB fields that are independent 531 * of the scsi_cmnd request_buffer 532 */ 533 piocbq->iocb.ulpContext = pnode->nlp_rpi; 534 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 535 piocbq->iocb.ulpFCP2Rcvy = 1; 536 537 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); 538 piocbq->context1 = lpfc_cmd; 539 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 540 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout; 541 } 542 543 static int 544 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, 545 struct lpfc_scsi_buf *lpfc_cmd, 546 uint8_t task_mgmt_cmd) 547 { 548 struct lpfc_sli *psli; 549 struct lpfc_iocbq *piocbq; 550 IOCB_t *piocb; 551 struct fcp_cmnd *fcp_cmnd; 552 struct scsi_device *scsi_dev = lpfc_cmd->pCmd->device; 553 struct lpfc_rport_data *rdata = scsi_dev->hostdata; 554 struct lpfc_nodelist *ndlp = rdata->pnode; 555 556 if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 557 return 0; 558 } 559 560 psli = &phba->sli; 561 piocbq = &(lpfc_cmd->cur_iocbq); 562 piocb = &piocbq->iocb; 563 564 fcp_cmnd = lpfc_cmd->fcp_cmnd; 565 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 566 &lpfc_cmd->fcp_cmnd->fcp_lun); 567 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 568 569 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 570 571 piocb->ulpContext = ndlp->nlp_rpi; 572 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 573 piocb->ulpFCP2Rcvy = 1; 574 } 575 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); 576 577 /* ulpTimeout is only one byte */ 578 if (lpfc_cmd->timeout > 0xff) { 579 /* 580 * Do not timeout the command at the firmware level. 581 * The driver will provide the timeout mechanism. 582 */ 583 piocb->ulpTimeout = 0; 584 } else { 585 piocb->ulpTimeout = lpfc_cmd->timeout; 586 } 587 588 lpfc_cmd->rdata = rdata; 589 590 switch (task_mgmt_cmd) { 591 case FCP_LUN_RESET: 592 /* Issue LUN Reset to TGT <num> LUN <num> */ 593 lpfc_printf_log(phba, 594 KERN_INFO, 595 LOG_FCP, 596 "%d:0703 Issue LUN Reset to TGT %d LUN %d " 597 "Data: x%x x%x\n", 598 phba->brd_no, 599 scsi_dev->id, scsi_dev->lun, 600 ndlp->nlp_rpi, ndlp->nlp_flag); 601 602 break; 603 case FCP_ABORT_TASK_SET: 604 /* Issue Abort Task Set to TGT <num> LUN <num> */ 605 lpfc_printf_log(phba, 606 KERN_INFO, 607 LOG_FCP, 608 "%d:0701 Issue Abort Task Set to TGT %d LUN %d " 609 "Data: x%x x%x\n", 610 phba->brd_no, 611 scsi_dev->id, scsi_dev->lun, 612 ndlp->nlp_rpi, ndlp->nlp_flag); 613 614 break; 615 case FCP_TARGET_RESET: 616 /* Issue Target Reset to TGT <num> */ 617 lpfc_printf_log(phba, 618 KERN_INFO, 619 LOG_FCP, 620 "%d:0702 Issue Target Reset to TGT %d " 621 "Data: x%x x%x\n", 622 phba->brd_no, 623 scsi_dev->id, ndlp->nlp_rpi, 624 ndlp->nlp_flag); 625 break; 626 } 627 628 return (1); 629 } 630 631 static int 632 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba) 633 { 634 struct lpfc_iocbq *iocbq; 635 struct lpfc_iocbq *iocbqrsp; 636 int ret; 637 638 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET); 639 if (!ret) 640 return FAILED; 641 642 lpfc_cmd->scsi_hba = phba; 643 iocbq = &lpfc_cmd->cur_iocbq; 644 iocbqrsp = lpfc_sli_get_iocbq(phba); 645 646 if (!iocbqrsp) 647 return FAILED; 648 649 ret = lpfc_sli_issue_iocb_wait(phba, 650 &phba->sli.ring[phba->sli.fcp_ring], 651 iocbq, iocbqrsp, lpfc_cmd->timeout); 652 if (ret != IOCB_SUCCESS) { 653 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 654 ret = FAILED; 655 } else { 656 ret = SUCCESS; 657 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4]; 658 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus; 659 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 660 (lpfc_cmd->result & IOERR_DRVR_MASK)) 661 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 662 } 663 664 lpfc_sli_release_iocbq(phba, iocbqrsp); 665 return ret; 666 } 667 668 const char * 669 lpfc_info(struct Scsi_Host *host) 670 { 671 struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata[0]; 672 int len; 673 static char lpfcinfobuf[384]; 674 675 memset(lpfcinfobuf,0,384); 676 if (phba && phba->pcidev){ 677 strncpy(lpfcinfobuf, phba->ModelDesc, 256); 678 len = strlen(lpfcinfobuf); 679 snprintf(lpfcinfobuf + len, 680 384-len, 681 " on PCI bus %02x device %02x irq %d", 682 phba->pcidev->bus->number, 683 phba->pcidev->devfn, 684 phba->pcidev->irq); 685 len = strlen(lpfcinfobuf); 686 if (phba->Port[0]) { 687 snprintf(lpfcinfobuf + len, 688 384-len, 689 " port %s", 690 phba->Port); 691 } 692 } 693 return lpfcinfobuf; 694 } 695 696 static int 697 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 698 { 699 struct lpfc_hba *phba = 700 (struct lpfc_hba *) cmnd->device->host->hostdata[0]; 701 struct lpfc_sli *psli = &phba->sli; 702 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 703 struct lpfc_nodelist *ndlp = rdata->pnode; 704 struct lpfc_scsi_buf *lpfc_cmd; 705 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 706 int err; 707 708 err = fc_remote_port_chkready(rport); 709 if (err) { 710 cmnd->result = err; 711 goto out_fail_command; 712 } 713 714 /* 715 * Catch race where our node has transitioned, but the 716 * transport is still transitioning. 717 */ 718 if (!ndlp) { 719 cmnd->result = ScsiResult(DID_BUS_BUSY, 0); 720 goto out_fail_command; 721 } 722 lpfc_cmd = lpfc_sli_get_scsi_buf (phba); 723 if (lpfc_cmd == NULL) { 724 printk(KERN_WARNING "%s: No buffer available - list empty, " 725 "total count %d\n", __FUNCTION__, phba->total_scsi_bufs); 726 goto out_host_busy; 727 } 728 729 /* 730 * Store the midlayer's command structure for the completion phase 731 * and complete the command initialization. 732 */ 733 lpfc_cmd->pCmd = cmnd; 734 lpfc_cmd->rdata = rdata; 735 lpfc_cmd->timeout = 0; 736 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 737 cmnd->scsi_done = done; 738 739 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 740 if (err) 741 goto out_host_busy_free_buf; 742 743 lpfc_scsi_prep_cmnd(phba, lpfc_cmd, ndlp); 744 745 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 746 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 747 if (err) 748 goto out_host_busy_free_buf; 749 return 0; 750 751 out_host_busy_free_buf: 752 lpfc_release_scsi_buf(phba, lpfc_cmd); 753 cmnd->host_scribble = NULL; 754 out_host_busy: 755 return SCSI_MLQUEUE_HOST_BUSY; 756 757 out_fail_command: 758 done(cmnd); 759 return 0; 760 } 761 762 static int 763 __lpfc_abort_handler(struct scsi_cmnd *cmnd) 764 { 765 struct lpfc_hba *phba = 766 (struct lpfc_hba *)cmnd->device->host->hostdata[0]; 767 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; 768 struct lpfc_iocbq *iocb; 769 struct lpfc_iocbq *abtsiocb; 770 struct lpfc_scsi_buf *lpfc_cmd; 771 IOCB_t *cmd, *icmd; 772 unsigned int loop_count = 0; 773 int ret = SUCCESS; 774 775 776 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 777 BUG_ON(!lpfc_cmd); 778 779 /* 780 * If pCmd field of the corresponding lpfc_scsi_buf structure 781 * points to a different SCSI command, then the driver has 782 * already completed this command, but the midlayer did not 783 * see the completion before the eh fired. Just return 784 * SUCCESS. 785 */ 786 iocb = &lpfc_cmd->cur_iocbq; 787 if (lpfc_cmd->pCmd != cmnd) 788 goto out; 789 790 BUG_ON(iocb->context1 != lpfc_cmd); 791 792 abtsiocb = lpfc_sli_get_iocbq(phba); 793 if (abtsiocb == NULL) { 794 ret = FAILED; 795 goto out; 796 } 797 798 /* 799 * The scsi command can not be in txq and it is in flight because the 800 * pCmd is still pointig at the SCSI command we have to abort. There 801 * is no need to search the txcmplq. Just send an abort to the FW. 802 */ 803 804 cmd = &iocb->iocb; 805 icmd = &abtsiocb->iocb; 806 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 807 icmd->un.acxri.abortContextTag = cmd->ulpContext; 808 icmd->un.acxri.abortIoTag = cmd->ulpIoTag; 809 810 icmd->ulpLe = 1; 811 icmd->ulpClass = cmd->ulpClass; 812 if (phba->hba_state >= LPFC_LINK_UP) 813 icmd->ulpCommand = CMD_ABORT_XRI_CN; 814 else 815 icmd->ulpCommand = CMD_CLOSE_XRI_CN; 816 817 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 818 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { 819 lpfc_sli_release_iocbq(phba, abtsiocb); 820 ret = FAILED; 821 goto out; 822 } 823 824 /* Wait for abort to complete */ 825 while (lpfc_cmd->pCmd == cmnd) 826 { 827 spin_unlock_irq(phba->host->host_lock); 828 set_current_state(TASK_UNINTERRUPTIBLE); 829 schedule_timeout(LPFC_ABORT_WAIT*HZ); 830 spin_lock_irq(phba->host->host_lock); 831 if (++loop_count 832 > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT) 833 break; 834 } 835 836 if (lpfc_cmd->pCmd == cmnd) { 837 ret = FAILED; 838 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 839 "%d:0748 abort handler timed out waiting for " 840 "abort to complete: ret %#x, ID %d, LUN %d, " 841 "snum %#lx\n", 842 phba->brd_no, ret, cmnd->device->id, 843 cmnd->device->lun, cmnd->serial_number); 844 } 845 846 out: 847 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 848 "%d:0749 SCSI layer issued abort device: ret %#x, " 849 "ID %d, LUN %d, snum %#lx\n", 850 phba->brd_no, ret, cmnd->device->id, 851 cmnd->device->lun, cmnd->serial_number); 852 853 return ret; 854 } 855 856 static int 857 lpfc_abort_handler(struct scsi_cmnd *cmnd) 858 { 859 int rc; 860 spin_lock_irq(cmnd->device->host->host_lock); 861 rc = __lpfc_abort_handler(cmnd); 862 spin_unlock_irq(cmnd->device->host->host_lock); 863 return rc; 864 } 865 866 static int 867 __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) 868 { 869 struct Scsi_Host *shost = cmnd->device->host; 870 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0]; 871 struct lpfc_scsi_buf *lpfc_cmd; 872 struct lpfc_iocbq *iocbq, *iocbqrsp; 873 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 874 struct lpfc_nodelist *pnode = rdata->pnode; 875 int ret = FAILED; 876 int cnt, loopcnt; 877 878 /* 879 * If target is not in a MAPPED state, delay the reset until 880 * target is rediscovered or nodev timeout expires. 881 */ 882 while ( 1 ) { 883 if (!pnode) 884 break; 885 886 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { 887 spin_unlock_irq(phba->host->host_lock); 888 set_current_state(TASK_UNINTERRUPTIBLE); 889 schedule_timeout( HZ/2); 890 spin_lock_irq(phba->host->host_lock); 891 } 892 if ((pnode) && (pnode->nlp_state == NLP_STE_MAPPED_NODE)) 893 break; 894 } 895 896 lpfc_cmd = lpfc_sli_get_scsi_buf (phba); 897 if (lpfc_cmd == NULL) 898 goto out; 899 900 lpfc_cmd->pCmd = cmnd; 901 lpfc_cmd->timeout = 60; 902 lpfc_cmd->scsi_hba = phba; 903 904 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET); 905 if (!ret) 906 goto out_free_scsi_buf; 907 908 iocbq = &lpfc_cmd->cur_iocbq; 909 910 /* get a buffer for this IOCB command response */ 911 iocbqrsp = lpfc_sli_get_iocbq(phba); 912 if (iocbqrsp == NULL) 913 goto out_free_scsi_buf; 914 915 ret = lpfc_sli_issue_iocb_wait(phba, 916 &phba->sli.ring[phba->sli.fcp_ring], 917 iocbq, iocbqrsp, lpfc_cmd->timeout); 918 if (ret == IOCB_SUCCESS) 919 ret = SUCCESS; 920 921 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4]; 922 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus; 923 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT) 924 if (lpfc_cmd->result & IOERR_DRVR_MASK) 925 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 926 927 /* 928 * All outstanding txcmplq I/Os should have been aborted by the target. 929 * Unfortunately, some targets do not abide by this forcing the driver 930 * to double check. 931 */ 932 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 933 cmnd->device->id, cmnd->device->lun, 0, 934 LPFC_CTX_LUN); 935 936 loopcnt = 0; 937 while((cnt = lpfc_sli_sum_iocb(phba, 938 &phba->sli.ring[phba->sli.fcp_ring], 939 cmnd->device->id, cmnd->device->lun, 940 LPFC_CTX_LUN))) { 941 spin_unlock_irq(phba->host->host_lock); 942 set_current_state(TASK_UNINTERRUPTIBLE); 943 schedule_timeout(LPFC_RESET_WAIT*HZ); 944 spin_lock_irq(phba->host->host_lock); 945 946 if (++loopcnt 947 > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) 948 break; 949 } 950 951 if (cnt) { 952 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 953 "%d:0719 LUN Reset I/O flush failure: cnt x%x\n", 954 phba->brd_no, cnt); 955 ret = FAILED; 956 } 957 958 lpfc_sli_release_iocbq(phba, iocbqrsp); 959 960 out_free_scsi_buf: 961 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 962 "%d:0713 SCSI layer issued LUN reset (%d, %d) " 963 "Data: x%x x%x x%x\n", 964 phba->brd_no, lpfc_cmd->pCmd->device->id, 965 lpfc_cmd->pCmd->device->lun, ret, lpfc_cmd->status, 966 lpfc_cmd->result); 967 lpfc_release_scsi_buf(phba, lpfc_cmd); 968 out: 969 return ret; 970 } 971 972 static int 973 lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) 974 { 975 int rc; 976 spin_lock_irq(cmnd->device->host->host_lock); 977 rc = __lpfc_reset_lun_handler(cmnd); 978 spin_unlock_irq(cmnd->device->host->host_lock); 979 return rc; 980 } 981 982 /* 983 * Note: midlayer calls this function with the host_lock held 984 */ 985 static int 986 __lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) 987 { 988 struct Scsi_Host *shost = cmnd->device->host; 989 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0]; 990 struct lpfc_nodelist *ndlp = NULL; 991 int match; 992 int ret = FAILED, i, err_count = 0; 993 int cnt, loopcnt; 994 unsigned int midlayer_id = 0; 995 struct lpfc_scsi_buf * lpfc_cmd; 996 997 lpfc_cmd = lpfc_sli_get_scsi_buf (phba); 998 if (lpfc_cmd == NULL) 999 goto out; 1000 1001 /* The lpfc_cmd storage is reused. Set all loop invariants. */ 1002 lpfc_cmd->timeout = 60; 1003 lpfc_cmd->pCmd = cmnd; 1004 lpfc_cmd->scsi_hba = phba; 1005 1006 /* 1007 * Since the driver manages a single bus device, reset all 1008 * targets known to the driver. Should any target reset 1009 * fail, this routine returns failure to the midlayer. 1010 */ 1011 midlayer_id = cmnd->device->id; 1012 for (i = 0; i < MAX_FCP_TARGET; i++) { 1013 /* Search the mapped list for this target ID */ 1014 match = 0; 1015 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { 1016 if ((i == ndlp->nlp_sid) && ndlp->rport) { 1017 match = 1; 1018 break; 1019 } 1020 } 1021 if (!match) 1022 continue; 1023 1024 lpfc_cmd->pCmd->device->id = i; 1025 lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data; 1026 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba); 1027 if (ret != SUCCESS) { 1028 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 1029 "%d:0713 Bus Reset on target %d failed\n", 1030 phba->brd_no, i); 1031 err_count++; 1032 } 1033 } 1034 1035 cmnd->device->id = midlayer_id; 1036 loopcnt = 0; 1037 while((cnt = lpfc_sli_sum_iocb(phba, 1038 &phba->sli.ring[phba->sli.fcp_ring], 1039 0, 0, LPFC_CTX_HOST))) { 1040 spin_unlock_irq(phba->host->host_lock); 1041 set_current_state(TASK_UNINTERRUPTIBLE); 1042 schedule_timeout(LPFC_RESET_WAIT*HZ); 1043 spin_lock_irq(phba->host->host_lock); 1044 1045 if (++loopcnt 1046 > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) 1047 break; 1048 } 1049 1050 if (cnt) { 1051 /* flush all outstanding commands on the host */ 1052 i = lpfc_sli_abort_iocb(phba, 1053 &phba->sli.ring[phba->sli.fcp_ring], 0, 0, 0, 1054 LPFC_CTX_HOST); 1055 1056 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 1057 "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n", 1058 phba->brd_no, cnt, i); 1059 } 1060 1061 if (cnt == 0) 1062 ret = SUCCESS; 1063 else 1064 ret = FAILED; 1065 1066 lpfc_release_scsi_buf(phba, lpfc_cmd); 1067 lpfc_printf_log(phba, 1068 KERN_ERR, 1069 LOG_FCP, 1070 "%d:0714 SCSI layer issued Bus Reset Data: x%x\n", 1071 phba->brd_no, ret); 1072 out: 1073 return ret; 1074 } 1075 1076 static int 1077 lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) 1078 { 1079 int rc; 1080 spin_lock_irq(cmnd->device->host->host_lock); 1081 rc = __lpfc_reset_bus_handler(cmnd); 1082 spin_unlock_irq(cmnd->device->host->host_lock); 1083 return rc; 1084 } 1085 1086 static int 1087 lpfc_slave_alloc(struct scsi_device *sdev) 1088 { 1089 struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata[0]; 1090 struct lpfc_scsi_buf *scsi_buf = NULL; 1091 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1092 uint32_t total = 0, i; 1093 uint32_t num_to_alloc = 0; 1094 unsigned long flags; 1095 1096 if (!rport || fc_remote_port_chkready(rport)) 1097 return -ENXIO; 1098 1099 sdev->hostdata = rport->dd_data; 1100 1101 /* 1102 * Populate the cmds_per_lun count scsi_bufs into this host's globally 1103 * available list of scsi buffers. Don't allocate more than the 1104 * HBA limit conveyed to the midlayer via the host structure. The 1105 * formula accounts for the lun_queue_depth + error handlers + 1 1106 * extra. This list of scsi bufs exists for the lifetime of the driver. 1107 */ 1108 total = phba->total_scsi_bufs; 1109 num_to_alloc = phba->cfg_lun_queue_depth + 2; 1110 if (total >= phba->cfg_hba_queue_depth) { 1111 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1112 "%d:0704 At limitation of %d preallocated " 1113 "command buffers\n", phba->brd_no, total); 1114 return 0; 1115 } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) { 1116 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1117 "%d:0705 Allocation request of %d command " 1118 "buffers will exceed max of %d. Reducing " 1119 "allocation request to %d.\n", phba->brd_no, 1120 num_to_alloc, phba->cfg_hba_queue_depth, 1121 (phba->cfg_hba_queue_depth - total)); 1122 num_to_alloc = phba->cfg_hba_queue_depth - total; 1123 } 1124 1125 for (i = 0; i < num_to_alloc; i++) { 1126 scsi_buf = lpfc_new_scsi_buf(phba); 1127 if (!scsi_buf) { 1128 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1129 "%d:0706 Failed to allocate command " 1130 "buffer\n", phba->brd_no); 1131 break; 1132 } 1133 1134 spin_lock_irqsave(phba->host->host_lock, flags); 1135 phba->total_scsi_bufs++; 1136 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list); 1137 spin_unlock_irqrestore(phba->host->host_lock, flags); 1138 } 1139 return 0; 1140 } 1141 1142 static int 1143 lpfc_slave_configure(struct scsi_device *sdev) 1144 { 1145 struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata[0]; 1146 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1147 1148 if (sdev->tagged_supported) 1149 scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth); 1150 else 1151 scsi_deactivate_tcq(sdev, phba->cfg_lun_queue_depth); 1152 1153 /* 1154 * Initialize the fc transport attributes for the target 1155 * containing this scsi device. Also note that the driver's 1156 * target pointer is stored in the starget_data for the 1157 * driver's sysfs entry point functions. 1158 */ 1159 rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5; 1160 1161 return 0; 1162 } 1163 1164 static void 1165 lpfc_slave_destroy(struct scsi_device *sdev) 1166 { 1167 sdev->hostdata = NULL; 1168 return; 1169 } 1170 1171 struct scsi_host_template lpfc_template = { 1172 .module = THIS_MODULE, 1173 .name = LPFC_DRIVER_NAME, 1174 .info = lpfc_info, 1175 .queuecommand = lpfc_queuecommand, 1176 .eh_abort_handler = lpfc_abort_handler, 1177 .eh_device_reset_handler= lpfc_reset_lun_handler, 1178 .eh_bus_reset_handler = lpfc_reset_bus_handler, 1179 .slave_alloc = lpfc_slave_alloc, 1180 .slave_configure = lpfc_slave_configure, 1181 .slave_destroy = lpfc_slave_destroy, 1182 .this_id = -1, 1183 .sg_tablesize = LPFC_SG_SEG_CNT, 1184 .cmd_per_lun = LPFC_CMD_PER_LUN, 1185 .use_clustering = ENABLE_CLUSTERING, 1186 .shost_attrs = lpfc_host_attrs, 1187 .max_sectors = 0xFFFF, 1188 }; 1189