1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/interrupt.h> 26 #include <linux/export.h> 27 #include <linux/delay.h> 28 #include <asm/unaligned.h> 29 #include <linux/t10-pi.h> 30 #include <linux/crc-t10dif.h> 31 #include <net/checksum.h> 32 33 #include <scsi/scsi.h> 34 #include <scsi/scsi_device.h> 35 #include <scsi/scsi_eh.h> 36 #include <scsi/scsi_host.h> 37 #include <scsi/scsi_tcq.h> 38 #include <scsi/scsi_transport_fc.h> 39 40 #include "lpfc_version.h" 41 #include "lpfc_hw4.h" 42 #include "lpfc_hw.h" 43 #include "lpfc_sli.h" 44 #include "lpfc_sli4.h" 45 #include "lpfc_nl.h" 46 #include "lpfc_disc.h" 47 #include "lpfc.h" 48 #include "lpfc_scsi.h" 49 #include "lpfc_logmsg.h" 50 #include "lpfc_crtn.h" 51 #include "lpfc_vport.h" 52 53 #define LPFC_RESET_WAIT 2 54 #define LPFC_ABORT_WAIT 2 55 56 int _dump_buf_done = 1; 57 58 static char *dif_op_str[] = { 59 "PROT_NORMAL", 60 "PROT_READ_INSERT", 61 "PROT_WRITE_STRIP", 62 "PROT_READ_STRIP", 63 "PROT_WRITE_INSERT", 64 "PROT_READ_PASS", 65 "PROT_WRITE_PASS", 66 }; 67 68 struct scsi_dif_tuple { 69 __be16 guard_tag; /* Checksum */ 70 __be16 app_tag; /* Opaque storage */ 71 __be32 ref_tag; /* Target LBA or indirect LBA */ 72 }; 73 74 static struct lpfc_rport_data * 75 lpfc_rport_data_from_scsi_device(struct scsi_device *sdev) 76 { 77 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata; 78 79 if (vport->phba->cfg_fof) 80 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data; 81 else 82 return (struct lpfc_rport_data *)sdev->hostdata; 83 } 84 85 static void 86 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); 87 static void 88 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); 89 static int 90 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc); 91 92 static void 93 lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd) 94 { 95 void *src, *dst; 96 struct scatterlist *sgde = scsi_sglist(cmnd); 97 98 if (!_dump_buf_data) { 99 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 100 "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n", 101 __func__); 102 return; 103 } 104 105 106 if (!sgde) { 107 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 108 "9051 BLKGRD: ERROR: data scatterlist is null\n"); 109 return; 110 } 111 112 dst = (void *) _dump_buf_data; 113 while (sgde) { 114 src = sg_virt(sgde); 115 memcpy(dst, src, sgde->length); 116 dst += sgde->length; 117 sgde = sg_next(sgde); 118 } 119 } 120 121 static void 122 lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd) 123 { 124 void *src, *dst; 125 struct scatterlist *sgde = scsi_prot_sglist(cmnd); 126 127 if (!_dump_buf_dif) { 128 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 129 "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n", 130 __func__); 131 return; 132 } 133 134 if (!sgde) { 135 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 136 "9053 BLKGRD: ERROR: prot scatterlist is null\n"); 137 return; 138 } 139 140 dst = _dump_buf_dif; 141 while (sgde) { 142 src = sg_virt(sgde); 143 memcpy(dst, src, sgde->length); 144 dst += sgde->length; 145 sgde = sg_next(sgde); 146 } 147 } 148 149 static inline unsigned 150 lpfc_cmd_blksize(struct scsi_cmnd *sc) 151 { 152 return sc->device->sector_size; 153 } 154 155 #define LPFC_CHECK_PROTECT_GUARD 1 156 #define LPFC_CHECK_PROTECT_REF 2 157 static inline unsigned 158 lpfc_cmd_protect(struct scsi_cmnd *sc, int flag) 159 { 160 return 1; 161 } 162 163 static inline unsigned 164 lpfc_cmd_guard_csum(struct scsi_cmnd *sc) 165 { 166 if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF) 167 return 0; 168 if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP) 169 return 1; 170 return 0; 171 } 172 173 /** 174 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge. 175 * @phba: Pointer to HBA object. 176 * @lpfc_cmd: lpfc scsi command object pointer. 177 * 178 * This function is called from the lpfc_prep_task_mgmt_cmd function to 179 * set the last bit in the response sge entry. 180 **/ 181 static void 182 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, 183 struct lpfc_scsi_buf *lpfc_cmd) 184 { 185 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 186 if (sgl) { 187 sgl += 1; 188 sgl->word2 = le32_to_cpu(sgl->word2); 189 bf_set(lpfc_sli4_sge_last, sgl, 1); 190 sgl->word2 = cpu_to_le32(sgl->word2); 191 } 192 } 193 194 /** 195 * lpfc_update_stats - Update statistical data for the command completion 196 * @phba: Pointer to HBA object. 197 * @lpfc_cmd: lpfc scsi command object pointer. 198 * 199 * This function is called when there is a command completion and this 200 * function updates the statistical data for the command completion. 201 **/ 202 static void 203 lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 204 { 205 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 206 struct lpfc_nodelist *pnode = rdata->pnode; 207 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 208 unsigned long flags; 209 struct Scsi_Host *shost = cmd->device->host; 210 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 211 unsigned long latency; 212 int i; 213 214 if (cmd->result) 215 return; 216 217 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time); 218 219 spin_lock_irqsave(shost->host_lock, flags); 220 if (!vport->stat_data_enabled || 221 vport->stat_data_blocked || 222 !pnode || 223 !pnode->lat_data || 224 (phba->bucket_type == LPFC_NO_BUCKET)) { 225 spin_unlock_irqrestore(shost->host_lock, flags); 226 return; 227 } 228 229 if (phba->bucket_type == LPFC_LINEAR_BUCKET) { 230 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/ 231 phba->bucket_step; 232 /* check array subscript bounds */ 233 if (i < 0) 234 i = 0; 235 else if (i >= LPFC_MAX_BUCKET_COUNT) 236 i = LPFC_MAX_BUCKET_COUNT - 1; 237 } else { 238 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++) 239 if (latency <= (phba->bucket_base + 240 ((1<<i)*phba->bucket_step))) 241 break; 242 } 243 244 pnode->lat_data[i].cmd_count++; 245 spin_unlock_irqrestore(shost->host_lock, flags); 246 } 247 248 /** 249 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread 250 * @phba: The Hba for which this call is being executed. 251 * 252 * This routine is called when there is resource error in driver or firmware. 253 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine 254 * posts at most 1 event each second. This routine wakes up worker thread of 255 * @phba to process WORKER_RAM_DOWN_EVENT event. 256 * 257 * This routine should be called with no lock held. 258 **/ 259 void 260 lpfc_rampdown_queue_depth(struct lpfc_hba *phba) 261 { 262 unsigned long flags; 263 uint32_t evt_posted; 264 unsigned long expires; 265 266 spin_lock_irqsave(&phba->hbalock, flags); 267 atomic_inc(&phba->num_rsrc_err); 268 phba->last_rsrc_error_time = jiffies; 269 270 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL; 271 if (time_after(expires, jiffies)) { 272 spin_unlock_irqrestore(&phba->hbalock, flags); 273 return; 274 } 275 276 phba->last_ramp_down_time = jiffies; 277 278 spin_unlock_irqrestore(&phba->hbalock, flags); 279 280 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 281 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE; 282 if (!evt_posted) 283 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; 284 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 285 286 if (!evt_posted) 287 lpfc_worker_wake_up(phba); 288 return; 289 } 290 291 /** 292 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler 293 * @phba: The Hba for which this call is being executed. 294 * 295 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker 296 * thread.This routine reduces queue depth for all scsi device on each vport 297 * associated with @phba. 298 **/ 299 void 300 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) 301 { 302 struct lpfc_vport **vports; 303 struct Scsi_Host *shost; 304 struct scsi_device *sdev; 305 unsigned long new_queue_depth; 306 unsigned long num_rsrc_err, num_cmd_success; 307 int i; 308 309 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 310 num_cmd_success = atomic_read(&phba->num_cmd_success); 311 312 /* 313 * The error and success command counters are global per 314 * driver instance. If another handler has already 315 * operated on this error event, just exit. 316 */ 317 if (num_rsrc_err == 0) 318 return; 319 320 vports = lpfc_create_vport_work_array(phba); 321 if (vports != NULL) 322 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 323 shost = lpfc_shost_from_vport(vports[i]); 324 shost_for_each_device(sdev, shost) { 325 new_queue_depth = 326 sdev->queue_depth * num_rsrc_err / 327 (num_rsrc_err + num_cmd_success); 328 if (!new_queue_depth) 329 new_queue_depth = sdev->queue_depth - 1; 330 else 331 new_queue_depth = sdev->queue_depth - 332 new_queue_depth; 333 scsi_change_queue_depth(sdev, new_queue_depth); 334 } 335 } 336 lpfc_destroy_vport_work_array(phba, vports); 337 atomic_set(&phba->num_rsrc_err, 0); 338 atomic_set(&phba->num_cmd_success, 0); 339 } 340 341 /** 342 * lpfc_scsi_dev_block - set all scsi hosts to block state 343 * @phba: Pointer to HBA context object. 344 * 345 * This function walks vport list and set each SCSI host to block state 346 * by invoking fc_remote_port_delete() routine. This function is invoked 347 * with EEH when device's PCI slot has been permanently disabled. 348 **/ 349 void 350 lpfc_scsi_dev_block(struct lpfc_hba *phba) 351 { 352 struct lpfc_vport **vports; 353 struct Scsi_Host *shost; 354 struct scsi_device *sdev; 355 struct fc_rport *rport; 356 int i; 357 358 vports = lpfc_create_vport_work_array(phba); 359 if (vports != NULL) 360 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 361 shost = lpfc_shost_from_vport(vports[i]); 362 shost_for_each_device(sdev, shost) { 363 rport = starget_to_rport(scsi_target(sdev)); 364 fc_remote_port_delete(rport); 365 } 366 } 367 lpfc_destroy_vport_work_array(phba, vports); 368 } 369 370 /** 371 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec 372 * @vport: The virtual port for which this call being executed. 373 * @num_to_allocate: The requested number of buffers to allocate. 374 * 375 * This routine allocates a scsi buffer for device with SLI-3 interface spec, 376 * the scsi buffer contains all the necessary information needed to initiate 377 * a SCSI I/O. The non-DMAable buffer region contains information to build 378 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP, 379 * and the initial BPL. In addition to allocating memory, the FCP CMND and 380 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB. 381 * 382 * Return codes: 383 * int - number of scsi buffers that were allocated. 384 * 0 = failure, less than num_to_alloc is a partial failure. 385 **/ 386 static int 387 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) 388 { 389 struct lpfc_hba *phba = vport->phba; 390 struct lpfc_scsi_buf *psb; 391 struct ulp_bde64 *bpl; 392 IOCB_t *iocb; 393 dma_addr_t pdma_phys_fcp_cmd; 394 dma_addr_t pdma_phys_fcp_rsp; 395 dma_addr_t pdma_phys_bpl; 396 uint16_t iotag; 397 int bcnt, bpl_size; 398 399 bpl_size = phba->cfg_sg_dma_buf_size - 400 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 401 402 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 403 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", 404 num_to_alloc, phba->cfg_sg_dma_buf_size, 405 (int)sizeof(struct fcp_cmnd), 406 (int)sizeof(struct fcp_rsp), bpl_size); 407 408 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 409 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 410 if (!psb) 411 break; 412 413 /* 414 * Get memory from the pci pool to map the virt space to pci 415 * bus space for an I/O. The DMA buffer includes space for the 416 * struct fcp_cmnd, struct fcp_rsp and the number of bde's 417 * necessary to support the sg_tablesize. 418 */ 419 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 420 GFP_KERNEL, &psb->dma_handle); 421 if (!psb->data) { 422 kfree(psb); 423 break; 424 } 425 426 427 /* Allocate iotag for psb->cur_iocbq. */ 428 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 429 if (iotag == 0) { 430 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 431 psb->data, psb->dma_handle); 432 kfree(psb); 433 break; 434 } 435 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 436 437 psb->fcp_cmnd = psb->data; 438 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 439 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + 440 sizeof(struct fcp_rsp); 441 442 /* Initialize local short-hand pointers. */ 443 bpl = psb->fcp_bpl; 444 pdma_phys_fcp_cmd = psb->dma_handle; 445 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); 446 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + 447 sizeof(struct fcp_rsp); 448 449 /* 450 * The first two bdes are the FCP_CMD and FCP_RSP. The balance 451 * are sg list bdes. Initialize the first two and leave the 452 * rest for queuecommand. 453 */ 454 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); 455 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); 456 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); 457 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 458 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); 459 460 /* Setup the physical region for the FCP RSP */ 461 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); 462 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); 463 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); 464 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 465 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); 466 467 /* 468 * Since the IOCB for the FCP I/O is built into this 469 * lpfc_scsi_buf, initialize it with all known data now. 470 */ 471 iocb = &psb->cur_iocbq.iocb; 472 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 473 if ((phba->sli_rev == 3) && 474 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 475 /* fill in immediate fcp command BDE */ 476 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; 477 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 478 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, 479 unsli3.fcp_ext.icd); 480 iocb->un.fcpi64.bdl.addrHigh = 0; 481 iocb->ulpBdeCount = 0; 482 iocb->ulpLe = 0; 483 /* fill in response BDE */ 484 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = 485 BUFF_TYPE_BDE_64; 486 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = 487 sizeof(struct fcp_rsp); 488 iocb->unsli3.fcp_ext.rbde.addrLow = 489 putPaddrLow(pdma_phys_fcp_rsp); 490 iocb->unsli3.fcp_ext.rbde.addrHigh = 491 putPaddrHigh(pdma_phys_fcp_rsp); 492 } else { 493 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 494 iocb->un.fcpi64.bdl.bdeSize = 495 (2 * sizeof(struct ulp_bde64)); 496 iocb->un.fcpi64.bdl.addrLow = 497 putPaddrLow(pdma_phys_bpl); 498 iocb->un.fcpi64.bdl.addrHigh = 499 putPaddrHigh(pdma_phys_bpl); 500 iocb->ulpBdeCount = 1; 501 iocb->ulpLe = 1; 502 } 503 iocb->ulpClass = CLASS3; 504 psb->status = IOSTAT_SUCCESS; 505 /* Put it back into the SCSI buffer list */ 506 psb->cur_iocbq.context1 = psb; 507 lpfc_release_scsi_buf_s3(phba, psb); 508 509 } 510 511 return bcnt; 512 } 513 514 /** 515 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport 516 * @vport: pointer to lpfc vport data structure. 517 * 518 * This routine is invoked by the vport cleanup for deletions and the cleanup 519 * for an ndlp on removal. 520 **/ 521 void 522 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) 523 { 524 struct lpfc_hba *phba = vport->phba; 525 struct lpfc_scsi_buf *psb, *next_psb; 526 unsigned long iflag = 0; 527 528 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 529 return; 530 spin_lock_irqsave(&phba->hbalock, iflag); 531 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 532 list_for_each_entry_safe(psb, next_psb, 533 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { 534 if (psb->rdata && psb->rdata->pnode 535 && psb->rdata->pnode->vport == vport) 536 psb->rdata = NULL; 537 } 538 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 539 spin_unlock_irqrestore(&phba->hbalock, iflag); 540 } 541 542 /** 543 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort 544 * @phba: pointer to lpfc hba data structure. 545 * @axri: pointer to the fcp xri abort wcqe structure. 546 * 547 * This routine is invoked by the worker thread to process a SLI4 fast-path 548 * FCP aborted xri. 549 **/ 550 void 551 lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, 552 struct sli4_wcqe_xri_aborted *axri) 553 { 554 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 555 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 556 struct lpfc_scsi_buf *psb, *next_psb; 557 unsigned long iflag = 0; 558 struct lpfc_iocbq *iocbq; 559 int i; 560 struct lpfc_nodelist *ndlp; 561 int rrq_empty = 0; 562 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring; 563 564 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 565 return; 566 spin_lock_irqsave(&phba->hbalock, iflag); 567 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 568 list_for_each_entry_safe(psb, next_psb, 569 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { 570 if (psb->cur_iocbq.sli4_xritag == xri) { 571 list_del(&psb->list); 572 psb->exch_busy = 0; 573 psb->status = IOSTAT_SUCCESS; 574 spin_unlock( 575 &phba->sli4_hba.abts_scsi_buf_list_lock); 576 if (psb->rdata && psb->rdata->pnode) 577 ndlp = psb->rdata->pnode; 578 else 579 ndlp = NULL; 580 581 rrq_empty = list_empty(&phba->active_rrq_list); 582 spin_unlock_irqrestore(&phba->hbalock, iflag); 583 if (ndlp) { 584 lpfc_set_rrq_active(phba, ndlp, 585 psb->cur_iocbq.sli4_lxritag, rxid, 1); 586 lpfc_sli4_abts_err_handler(phba, ndlp, axri); 587 } 588 lpfc_release_scsi_buf_s4(phba, psb); 589 if (rrq_empty) 590 lpfc_worker_wake_up(phba); 591 return; 592 } 593 } 594 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 595 for (i = 1; i <= phba->sli.last_iotag; i++) { 596 iocbq = phba->sli.iocbq_lookup[i]; 597 598 if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 599 (iocbq->iocb_flag & LPFC_IO_LIBDFC)) 600 continue; 601 if (iocbq->sli4_xritag != xri) 602 continue; 603 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 604 psb->exch_busy = 0; 605 spin_unlock_irqrestore(&phba->hbalock, iflag); 606 if (!list_empty(&pring->txq)) 607 lpfc_worker_wake_up(phba); 608 return; 609 610 } 611 spin_unlock_irqrestore(&phba->hbalock, iflag); 612 } 613 614 /** 615 * lpfc_sli4_post_scsi_sgl_list - Post blocks of scsi buffer sgls from a list 616 * @phba: pointer to lpfc hba data structure. 617 * @post_sblist: pointer to the scsi buffer list. 618 * 619 * This routine walks a list of scsi buffers that was passed in. It attempts 620 * to construct blocks of scsi buffer sgls which contains contiguous xris and 621 * uses the non-embedded SGL block post mailbox commands to post to the port. 622 * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use 623 * embedded SGL post mailbox command for posting. The @post_sblist passed in 624 * must be local list, thus no lock is needed when manipulate the list. 625 * 626 * Returns: 0 = failure, non-zero number of successfully posted buffers. 627 **/ 628 static int 629 lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba, 630 struct list_head *post_sblist, int sb_count) 631 { 632 struct lpfc_scsi_buf *psb, *psb_next; 633 int status, sgl_size; 634 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; 635 dma_addr_t pdma_phys_bpl1; 636 int last_xritag = NO_XRI; 637 LIST_HEAD(prep_sblist); 638 LIST_HEAD(blck_sblist); 639 LIST_HEAD(scsi_sblist); 640 641 /* sanity check */ 642 if (sb_count <= 0) 643 return -EINVAL; 644 645 sgl_size = phba->cfg_sg_dma_buf_size - 646 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 647 648 list_for_each_entry_safe(psb, psb_next, post_sblist, list) { 649 list_del_init(&psb->list); 650 block_cnt++; 651 if ((last_xritag != NO_XRI) && 652 (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) { 653 /* a hole in xri block, form a sgl posting block */ 654 list_splice_init(&prep_sblist, &blck_sblist); 655 post_cnt = block_cnt - 1; 656 /* prepare list for next posting block */ 657 list_add_tail(&psb->list, &prep_sblist); 658 block_cnt = 1; 659 } else { 660 /* prepare list for next posting block */ 661 list_add_tail(&psb->list, &prep_sblist); 662 /* enough sgls for non-embed sgl mbox command */ 663 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 664 list_splice_init(&prep_sblist, &blck_sblist); 665 post_cnt = block_cnt; 666 block_cnt = 0; 667 } 668 } 669 num_posting++; 670 last_xritag = psb->cur_iocbq.sli4_xritag; 671 672 /* end of repost sgl list condition for SCSI buffers */ 673 if (num_posting == sb_count) { 674 if (post_cnt == 0) { 675 /* last sgl posting block */ 676 list_splice_init(&prep_sblist, &blck_sblist); 677 post_cnt = block_cnt; 678 } else if (block_cnt == 1) { 679 /* last single sgl with non-contiguous xri */ 680 if (sgl_size > SGL_PAGE_SIZE) 681 pdma_phys_bpl1 = psb->dma_phys_bpl + 682 SGL_PAGE_SIZE; 683 else 684 pdma_phys_bpl1 = 0; 685 status = lpfc_sli4_post_sgl(phba, 686 psb->dma_phys_bpl, 687 pdma_phys_bpl1, 688 psb->cur_iocbq.sli4_xritag); 689 if (status) { 690 /* failure, put on abort scsi list */ 691 psb->exch_busy = 1; 692 } else { 693 /* success, put on SCSI buffer list */ 694 psb->exch_busy = 0; 695 psb->status = IOSTAT_SUCCESS; 696 num_posted++; 697 } 698 /* success, put on SCSI buffer sgl list */ 699 list_add_tail(&psb->list, &scsi_sblist); 700 } 701 } 702 703 /* continue until a nembed page worth of sgls */ 704 if (post_cnt == 0) 705 continue; 706 707 /* post block of SCSI buffer list sgls */ 708 status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist, 709 post_cnt); 710 711 /* don't reset xirtag due to hole in xri block */ 712 if (block_cnt == 0) 713 last_xritag = NO_XRI; 714 715 /* reset SCSI buffer post count for next round of posting */ 716 post_cnt = 0; 717 718 /* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */ 719 while (!list_empty(&blck_sblist)) { 720 list_remove_head(&blck_sblist, psb, 721 struct lpfc_scsi_buf, list); 722 if (status) { 723 /* failure, put on abort scsi list */ 724 psb->exch_busy = 1; 725 } else { 726 /* success, put on SCSI buffer list */ 727 psb->exch_busy = 0; 728 psb->status = IOSTAT_SUCCESS; 729 num_posted++; 730 } 731 list_add_tail(&psb->list, &scsi_sblist); 732 } 733 } 734 /* Push SCSI buffers with sgl posted to the availble list */ 735 while (!list_empty(&scsi_sblist)) { 736 list_remove_head(&scsi_sblist, psb, 737 struct lpfc_scsi_buf, list); 738 lpfc_release_scsi_buf_s4(phba, psb); 739 } 740 return num_posted; 741 } 742 743 /** 744 * lpfc_sli4_repost_scsi_sgl_list - Repost all the allocated scsi buffer sgls 745 * @phba: pointer to lpfc hba data structure. 746 * 747 * This routine walks the list of scsi buffers that have been allocated and 748 * repost them to the port by using SGL block post. This is needed after a 749 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine 750 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list 751 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers. 752 * 753 * Returns: 0 = success, non-zero failure. 754 **/ 755 int 756 lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) 757 { 758 LIST_HEAD(post_sblist); 759 int num_posted, rc = 0; 760 761 /* get all SCSI buffers need to repost to a local list */ 762 spin_lock_irq(&phba->scsi_buf_list_get_lock); 763 spin_lock(&phba->scsi_buf_list_put_lock); 764 list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist); 765 list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist); 766 spin_unlock(&phba->scsi_buf_list_put_lock); 767 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 768 769 /* post the list of scsi buffer sgls to port if available */ 770 if (!list_empty(&post_sblist)) { 771 num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist, 772 phba->sli4_hba.scsi_xri_cnt); 773 /* failed to post any scsi buffer, return error */ 774 if (num_posted == 0) 775 rc = -EIO; 776 } 777 return rc; 778 } 779 780 /** 781 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec 782 * @vport: The virtual port for which this call being executed. 783 * @num_to_allocate: The requested number of buffers to allocate. 784 * 785 * This routine allocates scsi buffers for device with SLI-4 interface spec, 786 * the scsi buffer contains all the necessary information needed to initiate 787 * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put 788 * them on a list, it post them to the port by using SGL block post. 789 * 790 * Return codes: 791 * int - number of scsi buffers that were allocated and posted. 792 * 0 = failure, less than num_to_alloc is a partial failure. 793 **/ 794 static int 795 lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) 796 { 797 struct lpfc_hba *phba = vport->phba; 798 struct lpfc_scsi_buf *psb; 799 struct sli4_sge *sgl; 800 IOCB_t *iocb; 801 dma_addr_t pdma_phys_fcp_cmd; 802 dma_addr_t pdma_phys_fcp_rsp; 803 dma_addr_t pdma_phys_bpl; 804 uint16_t iotag, lxri = 0; 805 int bcnt, num_posted, sgl_size; 806 LIST_HEAD(prep_sblist); 807 LIST_HEAD(post_sblist); 808 LIST_HEAD(scsi_sblist); 809 810 sgl_size = phba->cfg_sg_dma_buf_size - 811 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 812 813 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 814 "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", 815 num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size, 816 (int)sizeof(struct fcp_cmnd), 817 (int)sizeof(struct fcp_rsp)); 818 819 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 820 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 821 if (!psb) 822 break; 823 /* 824 * Get memory from the pci pool to map the virt space to 825 * pci bus space for an I/O. The DMA buffer includes space 826 * for the struct fcp_cmnd, struct fcp_rsp and the number 827 * of bde's necessary to support the sg_tablesize. 828 */ 829 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 830 GFP_KERNEL, &psb->dma_handle); 831 if (!psb->data) { 832 kfree(psb); 833 break; 834 } 835 836 /* 837 * 4K Page alignment is CRITICAL to BlockGuard, double check 838 * to be sure. 839 */ 840 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 841 (((unsigned long)(psb->data) & 842 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { 843 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 844 "3369 Memory alignment error " 845 "addr=%lx\n", 846 (unsigned long)psb->data); 847 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 848 psb->data, psb->dma_handle); 849 kfree(psb); 850 break; 851 } 852 853 854 lxri = lpfc_sli4_next_xritag(phba); 855 if (lxri == NO_XRI) { 856 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 857 psb->data, psb->dma_handle); 858 kfree(psb); 859 break; 860 } 861 862 /* Allocate iotag for psb->cur_iocbq. */ 863 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 864 if (iotag == 0) { 865 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 866 psb->data, psb->dma_handle); 867 kfree(psb); 868 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 869 "3368 Failed to allocate IOTAG for" 870 " XRI:0x%x\n", lxri); 871 lpfc_sli4_free_xri(phba, lxri); 872 break; 873 } 874 psb->cur_iocbq.sli4_lxritag = lxri; 875 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 876 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 877 psb->fcp_bpl = psb->data; 878 psb->fcp_cmnd = (psb->data + sgl_size); 879 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd + 880 sizeof(struct fcp_cmnd)); 881 882 /* Initialize local short-hand pointers. */ 883 sgl = (struct sli4_sge *)psb->fcp_bpl; 884 pdma_phys_bpl = psb->dma_handle; 885 pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size); 886 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); 887 888 /* 889 * The first two bdes are the FCP_CMD and FCP_RSP. 890 * The balance are sg list bdes. Initialize the 891 * first two and leave the rest for queuecommand. 892 */ 893 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); 894 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); 895 sgl->word2 = le32_to_cpu(sgl->word2); 896 bf_set(lpfc_sli4_sge_last, sgl, 0); 897 sgl->word2 = cpu_to_le32(sgl->word2); 898 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); 899 sgl++; 900 901 /* Setup the physical region for the FCP RSP */ 902 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); 903 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); 904 sgl->word2 = le32_to_cpu(sgl->word2); 905 bf_set(lpfc_sli4_sge_last, sgl, 1); 906 sgl->word2 = cpu_to_le32(sgl->word2); 907 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); 908 909 /* 910 * Since the IOCB for the FCP I/O is built into this 911 * lpfc_scsi_buf, initialize it with all known data now. 912 */ 913 iocb = &psb->cur_iocbq.iocb; 914 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 915 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64; 916 /* setting the BLP size to 2 * sizeof BDE may not be correct. 917 * We are setting the bpl to point to out sgl. An sgl's 918 * entries are 16 bytes, a bpl entries are 12 bytes. 919 */ 920 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 921 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd); 922 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd); 923 iocb->ulpBdeCount = 1; 924 iocb->ulpLe = 1; 925 iocb->ulpClass = CLASS3; 926 psb->cur_iocbq.context1 = psb; 927 psb->dma_phys_bpl = pdma_phys_bpl; 928 929 /* add the scsi buffer to a post list */ 930 list_add_tail(&psb->list, &post_sblist); 931 spin_lock_irq(&phba->scsi_buf_list_get_lock); 932 phba->sli4_hba.scsi_xri_cnt++; 933 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 934 } 935 lpfc_printf_log(phba, KERN_INFO, LOG_BG | LOG_FCP, 936 "3021 Allocate %d out of %d requested new SCSI " 937 "buffers\n", bcnt, num_to_alloc); 938 939 /* post the list of scsi buffer sgls to port if available */ 940 if (!list_empty(&post_sblist)) 941 num_posted = lpfc_sli4_post_scsi_sgl_list(phba, 942 &post_sblist, bcnt); 943 else 944 num_posted = 0; 945 946 return num_posted; 947 } 948 949 /** 950 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator 951 * @vport: The virtual port for which this call being executed. 952 * @num_to_allocate: The requested number of buffers to allocate. 953 * 954 * This routine wraps the actual SCSI buffer allocator function pointer from 955 * the lpfc_hba struct. 956 * 957 * Return codes: 958 * int - number of scsi buffers that were allocated. 959 * 0 = failure, less than num_to_alloc is a partial failure. 960 **/ 961 static inline int 962 lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc) 963 { 964 return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc); 965 } 966 967 /** 968 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 969 * @phba: The HBA for which this call is being executed. 970 * 971 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 972 * and returns to caller. 973 * 974 * Return codes: 975 * NULL - Error 976 * Pointer to lpfc_scsi_buf - Success 977 **/ 978 static struct lpfc_scsi_buf* 979 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 980 { 981 struct lpfc_scsi_buf * lpfc_cmd = NULL; 982 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; 983 unsigned long iflag = 0; 984 985 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); 986 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf, 987 list); 988 if (!lpfc_cmd) { 989 spin_lock(&phba->scsi_buf_list_put_lock); 990 list_splice(&phba->lpfc_scsi_buf_list_put, 991 &phba->lpfc_scsi_buf_list_get); 992 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 993 list_remove_head(scsi_buf_list_get, lpfc_cmd, 994 struct lpfc_scsi_buf, list); 995 spin_unlock(&phba->scsi_buf_list_put_lock); 996 } 997 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); 998 999 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) { 1000 atomic_inc(&ndlp->cmd_pending); 1001 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; 1002 } 1003 return lpfc_cmd; 1004 } 1005 /** 1006 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 1007 * @phba: The HBA for which this call is being executed. 1008 * 1009 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 1010 * and returns to caller. 1011 * 1012 * Return codes: 1013 * NULL - Error 1014 * Pointer to lpfc_scsi_buf - Success 1015 **/ 1016 static struct lpfc_scsi_buf* 1017 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 1018 { 1019 struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next; 1020 unsigned long iflag = 0; 1021 int found = 0; 1022 1023 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); 1024 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, 1025 &phba->lpfc_scsi_buf_list_get, list) { 1026 if (lpfc_test_rrq_active(phba, ndlp, 1027 lpfc_cmd->cur_iocbq.sli4_lxritag)) 1028 continue; 1029 list_del_init(&lpfc_cmd->list); 1030 found = 1; 1031 break; 1032 } 1033 if (!found) { 1034 spin_lock(&phba->scsi_buf_list_put_lock); 1035 list_splice(&phba->lpfc_scsi_buf_list_put, 1036 &phba->lpfc_scsi_buf_list_get); 1037 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 1038 spin_unlock(&phba->scsi_buf_list_put_lock); 1039 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, 1040 &phba->lpfc_scsi_buf_list_get, list) { 1041 if (lpfc_test_rrq_active( 1042 phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag)) 1043 continue; 1044 list_del_init(&lpfc_cmd->list); 1045 found = 1; 1046 break; 1047 } 1048 } 1049 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); 1050 if (!found) 1051 return NULL; 1052 1053 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) { 1054 atomic_inc(&ndlp->cmd_pending); 1055 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; 1056 } 1057 return lpfc_cmd; 1058 } 1059 /** 1060 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 1061 * @phba: The HBA for which this call is being executed. 1062 * 1063 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 1064 * and returns to caller. 1065 * 1066 * Return codes: 1067 * NULL - Error 1068 * Pointer to lpfc_scsi_buf - Success 1069 **/ 1070 static struct lpfc_scsi_buf* 1071 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 1072 { 1073 return phba->lpfc_get_scsi_buf(phba, ndlp); 1074 } 1075 1076 /** 1077 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list 1078 * @phba: The Hba for which this call is being executed. 1079 * @psb: The scsi buffer which is being released. 1080 * 1081 * This routine releases @psb scsi buffer by adding it to tail of @phba 1082 * lpfc_scsi_buf_list list. 1083 **/ 1084 static void 1085 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 1086 { 1087 unsigned long iflag = 0; 1088 1089 psb->seg_cnt = 0; 1090 psb->nonsg_phys = 0; 1091 psb->prot_seg_cnt = 0; 1092 1093 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); 1094 psb->pCmd = NULL; 1095 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP; 1096 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); 1097 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); 1098 } 1099 1100 /** 1101 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list. 1102 * @phba: The Hba for which this call is being executed. 1103 * @psb: The scsi buffer which is being released. 1104 * 1105 * This routine releases @psb scsi buffer by adding it to tail of @phba 1106 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer 1107 * and cannot be reused for at least RA_TOV amount of time if it was 1108 * aborted. 1109 **/ 1110 static void 1111 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 1112 { 1113 unsigned long iflag = 0; 1114 1115 psb->seg_cnt = 0; 1116 psb->nonsg_phys = 0; 1117 psb->prot_seg_cnt = 0; 1118 1119 if (psb->exch_busy) { 1120 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, 1121 iflag); 1122 psb->pCmd = NULL; 1123 list_add_tail(&psb->list, 1124 &phba->sli4_hba.lpfc_abts_scsi_buf_list); 1125 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, 1126 iflag); 1127 } else { 1128 psb->pCmd = NULL; 1129 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP; 1130 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); 1131 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); 1132 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); 1133 } 1134 } 1135 1136 /** 1137 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. 1138 * @phba: The Hba for which this call is being executed. 1139 * @psb: The scsi buffer which is being released. 1140 * 1141 * This routine releases @psb scsi buffer by adding it to tail of @phba 1142 * lpfc_scsi_buf_list list. 1143 **/ 1144 static void 1145 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 1146 { 1147 if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp) 1148 atomic_dec(&psb->ndlp->cmd_pending); 1149 1150 psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH; 1151 phba->lpfc_release_scsi_buf(phba, psb); 1152 } 1153 1154 /** 1155 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 1156 * @phba: The Hba for which this call is being executed. 1157 * @lpfc_cmd: The scsi buffer which is going to be mapped. 1158 * 1159 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 1160 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans 1161 * through sg elements and format the bde. This routine also initializes all 1162 * IOCB fields which are dependent on scsi command request buffer. 1163 * 1164 * Return codes: 1165 * 1 - Error 1166 * 0 - Success 1167 **/ 1168 static int 1169 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 1170 { 1171 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 1172 struct scatterlist *sgel = NULL; 1173 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 1174 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 1175 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq; 1176 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 1177 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; 1178 dma_addr_t physaddr; 1179 uint32_t num_bde = 0; 1180 int nseg, datadir = scsi_cmnd->sc_data_direction; 1181 1182 /* 1183 * There are three possibilities here - use scatter-gather segment, use 1184 * the single mapping, or neither. Start the lpfc command prep by 1185 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 1186 * data bde entry. 1187 */ 1188 bpl += 2; 1189 if (scsi_sg_count(scsi_cmnd)) { 1190 /* 1191 * The driver stores the segment count returned from pci_map_sg 1192 * because this a count of dma-mappings used to map the use_sg 1193 * pages. They are not guaranteed to be the same for those 1194 * architectures that implement an IOMMU. 1195 */ 1196 1197 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), 1198 scsi_sg_count(scsi_cmnd), datadir); 1199 if (unlikely(!nseg)) 1200 return 1; 1201 1202 lpfc_cmd->seg_cnt = nseg; 1203 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 1204 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1205 "9064 BLKGRD: %s: Too many sg segments from " 1206 "dma_map_sg. Config %d, seg_cnt %d\n", 1207 __func__, phba->cfg_sg_seg_cnt, 1208 lpfc_cmd->seg_cnt); 1209 lpfc_cmd->seg_cnt = 0; 1210 scsi_dma_unmap(scsi_cmnd); 1211 return 1; 1212 } 1213 1214 /* 1215 * The driver established a maximum scatter-gather segment count 1216 * during probe that limits the number of sg elements in any 1217 * single scsi command. Just run through the seg_cnt and format 1218 * the bde's. 1219 * When using SLI-3 the driver will try to fit all the BDEs into 1220 * the IOCB. If it can't then the BDEs get added to a BPL as it 1221 * does for SLI-2 mode. 1222 */ 1223 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { 1224 physaddr = sg_dma_address(sgel); 1225 if (phba->sli_rev == 3 && 1226 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 1227 !(iocbq->iocb_flag & DSS_SECURITY_OP) && 1228 nseg <= LPFC_EXT_DATA_BDE_COUNT) { 1229 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1230 data_bde->tus.f.bdeSize = sg_dma_len(sgel); 1231 data_bde->addrLow = putPaddrLow(physaddr); 1232 data_bde->addrHigh = putPaddrHigh(physaddr); 1233 data_bde++; 1234 } else { 1235 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1236 bpl->tus.f.bdeSize = sg_dma_len(sgel); 1237 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1238 bpl->addrLow = 1239 le32_to_cpu(putPaddrLow(physaddr)); 1240 bpl->addrHigh = 1241 le32_to_cpu(putPaddrHigh(physaddr)); 1242 bpl++; 1243 } 1244 } 1245 } 1246 1247 /* 1248 * Finish initializing those IOCB fields that are dependent on the 1249 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is 1250 * explicitly reinitialized and for SLI-3 the extended bde count is 1251 * explicitly reinitialized since all iocb memory resources are reused. 1252 */ 1253 if (phba->sli_rev == 3 && 1254 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 1255 !(iocbq->iocb_flag & DSS_SECURITY_OP)) { 1256 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { 1257 /* 1258 * The extended IOCB format can only fit 3 BDE or a BPL. 1259 * This I/O has more than 3 BDE so the 1st data bde will 1260 * be a BPL that is filled in here. 1261 */ 1262 physaddr = lpfc_cmd->dma_handle; 1263 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64; 1264 data_bde->tus.f.bdeSize = (num_bde * 1265 sizeof(struct ulp_bde64)); 1266 physaddr += (sizeof(struct fcp_cmnd) + 1267 sizeof(struct fcp_rsp) + 1268 (2 * sizeof(struct ulp_bde64))); 1269 data_bde->addrHigh = putPaddrHigh(physaddr); 1270 data_bde->addrLow = putPaddrLow(physaddr); 1271 /* ebde count includes the response bde and data bpl */ 1272 iocb_cmd->unsli3.fcp_ext.ebde_count = 2; 1273 } else { 1274 /* ebde count includes the response bde and data bdes */ 1275 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 1276 } 1277 } else { 1278 iocb_cmd->un.fcpi64.bdl.bdeSize = 1279 ((num_bde + 2) * sizeof(struct ulp_bde64)); 1280 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 1281 } 1282 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 1283 1284 /* 1285 * Due to difference in data length between DIF/non-DIF paths, 1286 * we need to set word 4 of IOCB here 1287 */ 1288 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 1289 return 0; 1290 } 1291 1292 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1293 1294 /* Return BG_ERR_INIT if error injection is detected by Initiator */ 1295 #define BG_ERR_INIT 0x1 1296 /* Return BG_ERR_TGT if error injection is detected by Target */ 1297 #define BG_ERR_TGT 0x2 1298 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */ 1299 #define BG_ERR_SWAP 0x10 1300 /** 1301 * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for 1302 * error injection 1303 **/ 1304 #define BG_ERR_CHECK 0x20 1305 1306 /** 1307 * lpfc_bg_err_inject - Determine if we should inject an error 1308 * @phba: The Hba for which this call is being executed. 1309 * @sc: The SCSI command to examine 1310 * @reftag: (out) BlockGuard reference tag for transmitted data 1311 * @apptag: (out) BlockGuard application tag for transmitted data 1312 * @new_guard (in) Value to replace CRC with if needed 1313 * 1314 * Returns BG_ERR_* bit mask or 0 if request ignored 1315 **/ 1316 static int 1317 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1318 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard) 1319 { 1320 struct scatterlist *sgpe; /* s/g prot entry */ 1321 struct lpfc_scsi_buf *lpfc_cmd = NULL; 1322 struct scsi_dif_tuple *src = NULL; 1323 struct lpfc_nodelist *ndlp; 1324 struct lpfc_rport_data *rdata; 1325 uint32_t op = scsi_get_prot_op(sc); 1326 uint32_t blksize; 1327 uint32_t numblks; 1328 sector_t lba; 1329 int rc = 0; 1330 int blockoff = 0; 1331 1332 if (op == SCSI_PROT_NORMAL) 1333 return 0; 1334 1335 sgpe = scsi_prot_sglist(sc); 1336 lba = scsi_get_lba(sc); 1337 1338 /* First check if we need to match the LBA */ 1339 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) { 1340 blksize = lpfc_cmd_blksize(sc); 1341 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize; 1342 1343 /* Make sure we have the right LBA if one is specified */ 1344 if ((phba->lpfc_injerr_lba < lba) || 1345 (phba->lpfc_injerr_lba >= (lba + numblks))) 1346 return 0; 1347 if (sgpe) { 1348 blockoff = phba->lpfc_injerr_lba - lba; 1349 numblks = sg_dma_len(sgpe) / 1350 sizeof(struct scsi_dif_tuple); 1351 if (numblks < blockoff) 1352 blockoff = numblks; 1353 } 1354 } 1355 1356 /* Next check if we need to match the remote NPortID or WWPN */ 1357 rdata = lpfc_rport_data_from_scsi_device(sc->device); 1358 if (rdata && rdata->pnode) { 1359 ndlp = rdata->pnode; 1360 1361 /* Make sure we have the right NPortID if one is specified */ 1362 if (phba->lpfc_injerr_nportid && 1363 (phba->lpfc_injerr_nportid != ndlp->nlp_DID)) 1364 return 0; 1365 1366 /* 1367 * Make sure we have the right WWPN if one is specified. 1368 * wwn[0] should be a non-zero NAA in a good WWPN. 1369 */ 1370 if (phba->lpfc_injerr_wwpn.u.wwn[0] && 1371 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn, 1372 sizeof(struct lpfc_name)) != 0)) 1373 return 0; 1374 } 1375 1376 /* Setup a ptr to the protection data if the SCSI host provides it */ 1377 if (sgpe) { 1378 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 1379 src += blockoff; 1380 lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble; 1381 } 1382 1383 /* Should we change the Reference Tag */ 1384 if (reftag) { 1385 if (phba->lpfc_injerr_wref_cnt) { 1386 switch (op) { 1387 case SCSI_PROT_WRITE_PASS: 1388 if (src) { 1389 /* 1390 * For WRITE_PASS, force the error 1391 * to be sent on the wire. It should 1392 * be detected by the Target. 1393 * If blockoff != 0 error will be 1394 * inserted in middle of the IO. 1395 */ 1396 1397 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1398 "9076 BLKGRD: Injecting reftag error: " 1399 "write lba x%lx + x%x oldrefTag x%x\n", 1400 (unsigned long)lba, blockoff, 1401 be32_to_cpu(src->ref_tag)); 1402 1403 /* 1404 * Save the old ref_tag so we can 1405 * restore it on completion. 1406 */ 1407 if (lpfc_cmd) { 1408 lpfc_cmd->prot_data_type = 1409 LPFC_INJERR_REFTAG; 1410 lpfc_cmd->prot_data_segment = 1411 src; 1412 lpfc_cmd->prot_data = 1413 src->ref_tag; 1414 } 1415 src->ref_tag = cpu_to_be32(0xDEADBEEF); 1416 phba->lpfc_injerr_wref_cnt--; 1417 if (phba->lpfc_injerr_wref_cnt == 0) { 1418 phba->lpfc_injerr_nportid = 0; 1419 phba->lpfc_injerr_lba = 1420 LPFC_INJERR_LBA_OFF; 1421 memset(&phba->lpfc_injerr_wwpn, 1422 0, sizeof(struct lpfc_name)); 1423 } 1424 rc = BG_ERR_TGT | BG_ERR_CHECK; 1425 1426 break; 1427 } 1428 /* Drop thru */ 1429 case SCSI_PROT_WRITE_INSERT: 1430 /* 1431 * For WRITE_INSERT, force the error 1432 * to be sent on the wire. It should be 1433 * detected by the Target. 1434 */ 1435 /* DEADBEEF will be the reftag on the wire */ 1436 *reftag = 0xDEADBEEF; 1437 phba->lpfc_injerr_wref_cnt--; 1438 if (phba->lpfc_injerr_wref_cnt == 0) { 1439 phba->lpfc_injerr_nportid = 0; 1440 phba->lpfc_injerr_lba = 1441 LPFC_INJERR_LBA_OFF; 1442 memset(&phba->lpfc_injerr_wwpn, 1443 0, sizeof(struct lpfc_name)); 1444 } 1445 rc = BG_ERR_TGT | BG_ERR_CHECK; 1446 1447 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1448 "9078 BLKGRD: Injecting reftag error: " 1449 "write lba x%lx\n", (unsigned long)lba); 1450 break; 1451 case SCSI_PROT_WRITE_STRIP: 1452 /* 1453 * For WRITE_STRIP and WRITE_PASS, 1454 * force the error on data 1455 * being copied from SLI-Host to SLI-Port. 1456 */ 1457 *reftag = 0xDEADBEEF; 1458 phba->lpfc_injerr_wref_cnt--; 1459 if (phba->lpfc_injerr_wref_cnt == 0) { 1460 phba->lpfc_injerr_nportid = 0; 1461 phba->lpfc_injerr_lba = 1462 LPFC_INJERR_LBA_OFF; 1463 memset(&phba->lpfc_injerr_wwpn, 1464 0, sizeof(struct lpfc_name)); 1465 } 1466 rc = BG_ERR_INIT; 1467 1468 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1469 "9077 BLKGRD: Injecting reftag error: " 1470 "write lba x%lx\n", (unsigned long)lba); 1471 break; 1472 } 1473 } 1474 if (phba->lpfc_injerr_rref_cnt) { 1475 switch (op) { 1476 case SCSI_PROT_READ_INSERT: 1477 case SCSI_PROT_READ_STRIP: 1478 case SCSI_PROT_READ_PASS: 1479 /* 1480 * For READ_STRIP and READ_PASS, force the 1481 * error on data being read off the wire. It 1482 * should force an IO error to the driver. 1483 */ 1484 *reftag = 0xDEADBEEF; 1485 phba->lpfc_injerr_rref_cnt--; 1486 if (phba->lpfc_injerr_rref_cnt == 0) { 1487 phba->lpfc_injerr_nportid = 0; 1488 phba->lpfc_injerr_lba = 1489 LPFC_INJERR_LBA_OFF; 1490 memset(&phba->lpfc_injerr_wwpn, 1491 0, sizeof(struct lpfc_name)); 1492 } 1493 rc = BG_ERR_INIT; 1494 1495 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1496 "9079 BLKGRD: Injecting reftag error: " 1497 "read lba x%lx\n", (unsigned long)lba); 1498 break; 1499 } 1500 } 1501 } 1502 1503 /* Should we change the Application Tag */ 1504 if (apptag) { 1505 if (phba->lpfc_injerr_wapp_cnt) { 1506 switch (op) { 1507 case SCSI_PROT_WRITE_PASS: 1508 if (src) { 1509 /* 1510 * For WRITE_PASS, force the error 1511 * to be sent on the wire. It should 1512 * be detected by the Target. 1513 * If blockoff != 0 error will be 1514 * inserted in middle of the IO. 1515 */ 1516 1517 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1518 "9080 BLKGRD: Injecting apptag error: " 1519 "write lba x%lx + x%x oldappTag x%x\n", 1520 (unsigned long)lba, blockoff, 1521 be16_to_cpu(src->app_tag)); 1522 1523 /* 1524 * Save the old app_tag so we can 1525 * restore it on completion. 1526 */ 1527 if (lpfc_cmd) { 1528 lpfc_cmd->prot_data_type = 1529 LPFC_INJERR_APPTAG; 1530 lpfc_cmd->prot_data_segment = 1531 src; 1532 lpfc_cmd->prot_data = 1533 src->app_tag; 1534 } 1535 src->app_tag = cpu_to_be16(0xDEAD); 1536 phba->lpfc_injerr_wapp_cnt--; 1537 if (phba->lpfc_injerr_wapp_cnt == 0) { 1538 phba->lpfc_injerr_nportid = 0; 1539 phba->lpfc_injerr_lba = 1540 LPFC_INJERR_LBA_OFF; 1541 memset(&phba->lpfc_injerr_wwpn, 1542 0, sizeof(struct lpfc_name)); 1543 } 1544 rc = BG_ERR_TGT | BG_ERR_CHECK; 1545 break; 1546 } 1547 /* Drop thru */ 1548 case SCSI_PROT_WRITE_INSERT: 1549 /* 1550 * For WRITE_INSERT, force the 1551 * error to be sent on the wire. It should be 1552 * detected by the Target. 1553 */ 1554 /* DEAD will be the apptag on the wire */ 1555 *apptag = 0xDEAD; 1556 phba->lpfc_injerr_wapp_cnt--; 1557 if (phba->lpfc_injerr_wapp_cnt == 0) { 1558 phba->lpfc_injerr_nportid = 0; 1559 phba->lpfc_injerr_lba = 1560 LPFC_INJERR_LBA_OFF; 1561 memset(&phba->lpfc_injerr_wwpn, 1562 0, sizeof(struct lpfc_name)); 1563 } 1564 rc = BG_ERR_TGT | BG_ERR_CHECK; 1565 1566 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1567 "0813 BLKGRD: Injecting apptag error: " 1568 "write lba x%lx\n", (unsigned long)lba); 1569 break; 1570 case SCSI_PROT_WRITE_STRIP: 1571 /* 1572 * For WRITE_STRIP and WRITE_PASS, 1573 * force the error on data 1574 * being copied from SLI-Host to SLI-Port. 1575 */ 1576 *apptag = 0xDEAD; 1577 phba->lpfc_injerr_wapp_cnt--; 1578 if (phba->lpfc_injerr_wapp_cnt == 0) { 1579 phba->lpfc_injerr_nportid = 0; 1580 phba->lpfc_injerr_lba = 1581 LPFC_INJERR_LBA_OFF; 1582 memset(&phba->lpfc_injerr_wwpn, 1583 0, sizeof(struct lpfc_name)); 1584 } 1585 rc = BG_ERR_INIT; 1586 1587 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1588 "0812 BLKGRD: Injecting apptag error: " 1589 "write lba x%lx\n", (unsigned long)lba); 1590 break; 1591 } 1592 } 1593 if (phba->lpfc_injerr_rapp_cnt) { 1594 switch (op) { 1595 case SCSI_PROT_READ_INSERT: 1596 case SCSI_PROT_READ_STRIP: 1597 case SCSI_PROT_READ_PASS: 1598 /* 1599 * For READ_STRIP and READ_PASS, force the 1600 * error on data being read off the wire. It 1601 * should force an IO error to the driver. 1602 */ 1603 *apptag = 0xDEAD; 1604 phba->lpfc_injerr_rapp_cnt--; 1605 if (phba->lpfc_injerr_rapp_cnt == 0) { 1606 phba->lpfc_injerr_nportid = 0; 1607 phba->lpfc_injerr_lba = 1608 LPFC_INJERR_LBA_OFF; 1609 memset(&phba->lpfc_injerr_wwpn, 1610 0, sizeof(struct lpfc_name)); 1611 } 1612 rc = BG_ERR_INIT; 1613 1614 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1615 "0814 BLKGRD: Injecting apptag error: " 1616 "read lba x%lx\n", (unsigned long)lba); 1617 break; 1618 } 1619 } 1620 } 1621 1622 1623 /* Should we change the Guard Tag */ 1624 if (new_guard) { 1625 if (phba->lpfc_injerr_wgrd_cnt) { 1626 switch (op) { 1627 case SCSI_PROT_WRITE_PASS: 1628 rc = BG_ERR_CHECK; 1629 /* Drop thru */ 1630 1631 case SCSI_PROT_WRITE_INSERT: 1632 /* 1633 * For WRITE_INSERT, force the 1634 * error to be sent on the wire. It should be 1635 * detected by the Target. 1636 */ 1637 phba->lpfc_injerr_wgrd_cnt--; 1638 if (phba->lpfc_injerr_wgrd_cnt == 0) { 1639 phba->lpfc_injerr_nportid = 0; 1640 phba->lpfc_injerr_lba = 1641 LPFC_INJERR_LBA_OFF; 1642 memset(&phba->lpfc_injerr_wwpn, 1643 0, sizeof(struct lpfc_name)); 1644 } 1645 1646 rc |= BG_ERR_TGT | BG_ERR_SWAP; 1647 /* Signals the caller to swap CRC->CSUM */ 1648 1649 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1650 "0817 BLKGRD: Injecting guard error: " 1651 "write lba x%lx\n", (unsigned long)lba); 1652 break; 1653 case SCSI_PROT_WRITE_STRIP: 1654 /* 1655 * For WRITE_STRIP and WRITE_PASS, 1656 * force the error on data 1657 * being copied from SLI-Host to SLI-Port. 1658 */ 1659 phba->lpfc_injerr_wgrd_cnt--; 1660 if (phba->lpfc_injerr_wgrd_cnt == 0) { 1661 phba->lpfc_injerr_nportid = 0; 1662 phba->lpfc_injerr_lba = 1663 LPFC_INJERR_LBA_OFF; 1664 memset(&phba->lpfc_injerr_wwpn, 1665 0, sizeof(struct lpfc_name)); 1666 } 1667 1668 rc = BG_ERR_INIT | BG_ERR_SWAP; 1669 /* Signals the caller to swap CRC->CSUM */ 1670 1671 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1672 "0816 BLKGRD: Injecting guard error: " 1673 "write lba x%lx\n", (unsigned long)lba); 1674 break; 1675 } 1676 } 1677 if (phba->lpfc_injerr_rgrd_cnt) { 1678 switch (op) { 1679 case SCSI_PROT_READ_INSERT: 1680 case SCSI_PROT_READ_STRIP: 1681 case SCSI_PROT_READ_PASS: 1682 /* 1683 * For READ_STRIP and READ_PASS, force the 1684 * error on data being read off the wire. It 1685 * should force an IO error to the driver. 1686 */ 1687 phba->lpfc_injerr_rgrd_cnt--; 1688 if (phba->lpfc_injerr_rgrd_cnt == 0) { 1689 phba->lpfc_injerr_nportid = 0; 1690 phba->lpfc_injerr_lba = 1691 LPFC_INJERR_LBA_OFF; 1692 memset(&phba->lpfc_injerr_wwpn, 1693 0, sizeof(struct lpfc_name)); 1694 } 1695 1696 rc = BG_ERR_INIT | BG_ERR_SWAP; 1697 /* Signals the caller to swap CRC->CSUM */ 1698 1699 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1700 "0818 BLKGRD: Injecting guard error: " 1701 "read lba x%lx\n", (unsigned long)lba); 1702 } 1703 } 1704 } 1705 1706 return rc; 1707 } 1708 #endif 1709 1710 /** 1711 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with 1712 * the specified SCSI command. 1713 * @phba: The Hba for which this call is being executed. 1714 * @sc: The SCSI command to examine 1715 * @txopt: (out) BlockGuard operation for transmitted data 1716 * @rxopt: (out) BlockGuard operation for received data 1717 * 1718 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined 1719 * 1720 **/ 1721 static int 1722 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1723 uint8_t *txop, uint8_t *rxop) 1724 { 1725 uint8_t ret = 0; 1726 1727 if (lpfc_cmd_guard_csum(sc)) { 1728 switch (scsi_get_prot_op(sc)) { 1729 case SCSI_PROT_READ_INSERT: 1730 case SCSI_PROT_WRITE_STRIP: 1731 *rxop = BG_OP_IN_NODIF_OUT_CSUM; 1732 *txop = BG_OP_IN_CSUM_OUT_NODIF; 1733 break; 1734 1735 case SCSI_PROT_READ_STRIP: 1736 case SCSI_PROT_WRITE_INSERT: 1737 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1738 *txop = BG_OP_IN_NODIF_OUT_CRC; 1739 break; 1740 1741 case SCSI_PROT_READ_PASS: 1742 case SCSI_PROT_WRITE_PASS: 1743 *rxop = BG_OP_IN_CRC_OUT_CSUM; 1744 *txop = BG_OP_IN_CSUM_OUT_CRC; 1745 break; 1746 1747 case SCSI_PROT_NORMAL: 1748 default: 1749 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1750 "9063 BLKGRD: Bad op/guard:%d/IP combination\n", 1751 scsi_get_prot_op(sc)); 1752 ret = 1; 1753 break; 1754 1755 } 1756 } else { 1757 switch (scsi_get_prot_op(sc)) { 1758 case SCSI_PROT_READ_STRIP: 1759 case SCSI_PROT_WRITE_INSERT: 1760 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1761 *txop = BG_OP_IN_NODIF_OUT_CRC; 1762 break; 1763 1764 case SCSI_PROT_READ_PASS: 1765 case SCSI_PROT_WRITE_PASS: 1766 *rxop = BG_OP_IN_CRC_OUT_CRC; 1767 *txop = BG_OP_IN_CRC_OUT_CRC; 1768 break; 1769 1770 case SCSI_PROT_READ_INSERT: 1771 case SCSI_PROT_WRITE_STRIP: 1772 *rxop = BG_OP_IN_NODIF_OUT_CRC; 1773 *txop = BG_OP_IN_CRC_OUT_NODIF; 1774 break; 1775 1776 case SCSI_PROT_NORMAL: 1777 default: 1778 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1779 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n", 1780 scsi_get_prot_op(sc)); 1781 ret = 1; 1782 break; 1783 } 1784 } 1785 1786 return ret; 1787 } 1788 1789 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1790 /** 1791 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with 1792 * the specified SCSI command in order to force a guard tag error. 1793 * @phba: The Hba for which this call is being executed. 1794 * @sc: The SCSI command to examine 1795 * @txopt: (out) BlockGuard operation for transmitted data 1796 * @rxopt: (out) BlockGuard operation for received data 1797 * 1798 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined 1799 * 1800 **/ 1801 static int 1802 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1803 uint8_t *txop, uint8_t *rxop) 1804 { 1805 uint8_t ret = 0; 1806 1807 if (lpfc_cmd_guard_csum(sc)) { 1808 switch (scsi_get_prot_op(sc)) { 1809 case SCSI_PROT_READ_INSERT: 1810 case SCSI_PROT_WRITE_STRIP: 1811 *rxop = BG_OP_IN_NODIF_OUT_CRC; 1812 *txop = BG_OP_IN_CRC_OUT_NODIF; 1813 break; 1814 1815 case SCSI_PROT_READ_STRIP: 1816 case SCSI_PROT_WRITE_INSERT: 1817 *rxop = BG_OP_IN_CSUM_OUT_NODIF; 1818 *txop = BG_OP_IN_NODIF_OUT_CSUM; 1819 break; 1820 1821 case SCSI_PROT_READ_PASS: 1822 case SCSI_PROT_WRITE_PASS: 1823 *rxop = BG_OP_IN_CSUM_OUT_CRC; 1824 *txop = BG_OP_IN_CRC_OUT_CSUM; 1825 break; 1826 1827 case SCSI_PROT_NORMAL: 1828 default: 1829 break; 1830 1831 } 1832 } else { 1833 switch (scsi_get_prot_op(sc)) { 1834 case SCSI_PROT_READ_STRIP: 1835 case SCSI_PROT_WRITE_INSERT: 1836 *rxop = BG_OP_IN_CSUM_OUT_NODIF; 1837 *txop = BG_OP_IN_NODIF_OUT_CSUM; 1838 break; 1839 1840 case SCSI_PROT_READ_PASS: 1841 case SCSI_PROT_WRITE_PASS: 1842 *rxop = BG_OP_IN_CSUM_OUT_CSUM; 1843 *txop = BG_OP_IN_CSUM_OUT_CSUM; 1844 break; 1845 1846 case SCSI_PROT_READ_INSERT: 1847 case SCSI_PROT_WRITE_STRIP: 1848 *rxop = BG_OP_IN_NODIF_OUT_CSUM; 1849 *txop = BG_OP_IN_CSUM_OUT_NODIF; 1850 break; 1851 1852 case SCSI_PROT_NORMAL: 1853 default: 1854 break; 1855 } 1856 } 1857 1858 return ret; 1859 } 1860 #endif 1861 1862 /** 1863 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data 1864 * @phba: The Hba for which this call is being executed. 1865 * @sc: pointer to scsi command we're working on 1866 * @bpl: pointer to buffer list for protection groups 1867 * @datacnt: number of segments of data that have been dma mapped 1868 * 1869 * This function sets up BPL buffer list for protection groups of 1870 * type LPFC_PG_TYPE_NO_DIF 1871 * 1872 * This is usually used when the HBA is instructed to generate 1873 * DIFs and insert them into data stream (or strip DIF from 1874 * incoming data stream) 1875 * 1876 * The buffer list consists of just one protection group described 1877 * below: 1878 * +-------------------------+ 1879 * start of prot group --> | PDE_5 | 1880 * +-------------------------+ 1881 * | PDE_6 | 1882 * +-------------------------+ 1883 * | Data BDE | 1884 * +-------------------------+ 1885 * |more Data BDE's ... (opt)| 1886 * +-------------------------+ 1887 * 1888 * 1889 * Note: Data s/g buffers have been dma mapped 1890 * 1891 * Returns the number of BDEs added to the BPL. 1892 **/ 1893 static int 1894 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1895 struct ulp_bde64 *bpl, int datasegcnt) 1896 { 1897 struct scatterlist *sgde = NULL; /* s/g data entry */ 1898 struct lpfc_pde5 *pde5 = NULL; 1899 struct lpfc_pde6 *pde6 = NULL; 1900 dma_addr_t physaddr; 1901 int i = 0, num_bde = 0, status; 1902 int datadir = sc->sc_data_direction; 1903 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1904 uint32_t rc; 1905 #endif 1906 uint32_t checking = 1; 1907 uint32_t reftag; 1908 uint8_t txop, rxop; 1909 1910 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1911 if (status) 1912 goto out; 1913 1914 /* extract some info from the scsi command for pde*/ 1915 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ 1916 1917 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1918 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 1919 if (rc) { 1920 if (rc & BG_ERR_SWAP) 1921 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 1922 if (rc & BG_ERR_CHECK) 1923 checking = 0; 1924 } 1925 #endif 1926 1927 /* setup PDE5 with what we have */ 1928 pde5 = (struct lpfc_pde5 *) bpl; 1929 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1930 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1931 1932 /* Endianness conversion if necessary for PDE5 */ 1933 pde5->word0 = cpu_to_le32(pde5->word0); 1934 pde5->reftag = cpu_to_le32(reftag); 1935 1936 /* advance bpl and increment bde count */ 1937 num_bde++; 1938 bpl++; 1939 pde6 = (struct lpfc_pde6 *) bpl; 1940 1941 /* setup PDE6 with the rest of the info */ 1942 memset(pde6, 0, sizeof(struct lpfc_pde6)); 1943 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 1944 bf_set(pde6_optx, pde6, txop); 1945 bf_set(pde6_oprx, pde6, rxop); 1946 1947 /* 1948 * We only need to check the data on READs, for WRITEs 1949 * protection data is automatically generated, not checked. 1950 */ 1951 if (datadir == DMA_FROM_DEVICE) { 1952 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) 1953 bf_set(pde6_ce, pde6, checking); 1954 else 1955 bf_set(pde6_ce, pde6, 0); 1956 1957 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) 1958 bf_set(pde6_re, pde6, checking); 1959 else 1960 bf_set(pde6_re, pde6, 0); 1961 } 1962 bf_set(pde6_ai, pde6, 1); 1963 bf_set(pde6_ae, pde6, 0); 1964 bf_set(pde6_apptagval, pde6, 0); 1965 1966 /* Endianness conversion if necessary for PDE6 */ 1967 pde6->word0 = cpu_to_le32(pde6->word0); 1968 pde6->word1 = cpu_to_le32(pde6->word1); 1969 pde6->word2 = cpu_to_le32(pde6->word2); 1970 1971 /* advance bpl and increment bde count */ 1972 num_bde++; 1973 bpl++; 1974 1975 /* assumption: caller has already run dma_map_sg on command data */ 1976 scsi_for_each_sg(sc, sgde, datasegcnt, i) { 1977 physaddr = sg_dma_address(sgde); 1978 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1979 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1980 bpl->tus.f.bdeSize = sg_dma_len(sgde); 1981 if (datadir == DMA_TO_DEVICE) 1982 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1983 else 1984 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 1985 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1986 bpl++; 1987 num_bde++; 1988 } 1989 1990 out: 1991 return num_bde; 1992 } 1993 1994 /** 1995 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data 1996 * @phba: The Hba for which this call is being executed. 1997 * @sc: pointer to scsi command we're working on 1998 * @bpl: pointer to buffer list for protection groups 1999 * @datacnt: number of segments of data that have been dma mapped 2000 * @protcnt: number of segment of protection data that have been dma mapped 2001 * 2002 * This function sets up BPL buffer list for protection groups of 2003 * type LPFC_PG_TYPE_DIF 2004 * 2005 * This is usually used when DIFs are in their own buffers, 2006 * separate from the data. The HBA can then by instructed 2007 * to place the DIFs in the outgoing stream. For read operations, 2008 * The HBA could extract the DIFs and place it in DIF buffers. 2009 * 2010 * The buffer list for this type consists of one or more of the 2011 * protection groups described below: 2012 * +-------------------------+ 2013 * start of first prot group --> | PDE_5 | 2014 * +-------------------------+ 2015 * | PDE_6 | 2016 * +-------------------------+ 2017 * | PDE_7 (Prot BDE) | 2018 * +-------------------------+ 2019 * | Data BDE | 2020 * +-------------------------+ 2021 * |more Data BDE's ... (opt)| 2022 * +-------------------------+ 2023 * start of new prot group --> | PDE_5 | 2024 * +-------------------------+ 2025 * | ... | 2026 * +-------------------------+ 2027 * 2028 * Note: It is assumed that both data and protection s/g buffers have been 2029 * mapped for DMA 2030 * 2031 * Returns the number of BDEs added to the BPL. 2032 **/ 2033 static int 2034 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, 2035 struct ulp_bde64 *bpl, int datacnt, int protcnt) 2036 { 2037 struct scatterlist *sgde = NULL; /* s/g data entry */ 2038 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 2039 struct lpfc_pde5 *pde5 = NULL; 2040 struct lpfc_pde6 *pde6 = NULL; 2041 struct lpfc_pde7 *pde7 = NULL; 2042 dma_addr_t dataphysaddr, protphysaddr; 2043 unsigned short curr_data = 0, curr_prot = 0; 2044 unsigned int split_offset; 2045 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; 2046 unsigned int protgrp_blks, protgrp_bytes; 2047 unsigned int remainder, subtotal; 2048 int status; 2049 int datadir = sc->sc_data_direction; 2050 unsigned char pgdone = 0, alldone = 0; 2051 unsigned blksize; 2052 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2053 uint32_t rc; 2054 #endif 2055 uint32_t checking = 1; 2056 uint32_t reftag; 2057 uint8_t txop, rxop; 2058 int num_bde = 0; 2059 2060 sgpe = scsi_prot_sglist(sc); 2061 sgde = scsi_sglist(sc); 2062 2063 if (!sgpe || !sgde) { 2064 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 2065 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n", 2066 sgpe, sgde); 2067 return 0; 2068 } 2069 2070 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 2071 if (status) 2072 goto out; 2073 2074 /* extract some info from the scsi command */ 2075 blksize = lpfc_cmd_blksize(sc); 2076 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ 2077 2078 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2079 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 2080 if (rc) { 2081 if (rc & BG_ERR_SWAP) 2082 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 2083 if (rc & BG_ERR_CHECK) 2084 checking = 0; 2085 } 2086 #endif 2087 2088 split_offset = 0; 2089 do { 2090 /* Check to see if we ran out of space */ 2091 if (num_bde >= (phba->cfg_total_seg_cnt - 2)) 2092 return num_bde + 3; 2093 2094 /* setup PDE5 with what we have */ 2095 pde5 = (struct lpfc_pde5 *) bpl; 2096 memset(pde5, 0, sizeof(struct lpfc_pde5)); 2097 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 2098 2099 /* Endianness conversion if necessary for PDE5 */ 2100 pde5->word0 = cpu_to_le32(pde5->word0); 2101 pde5->reftag = cpu_to_le32(reftag); 2102 2103 /* advance bpl and increment bde count */ 2104 num_bde++; 2105 bpl++; 2106 pde6 = (struct lpfc_pde6 *) bpl; 2107 2108 /* setup PDE6 with the rest of the info */ 2109 memset(pde6, 0, sizeof(struct lpfc_pde6)); 2110 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 2111 bf_set(pde6_optx, pde6, txop); 2112 bf_set(pde6_oprx, pde6, rxop); 2113 2114 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) 2115 bf_set(pde6_ce, pde6, checking); 2116 else 2117 bf_set(pde6_ce, pde6, 0); 2118 2119 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) 2120 bf_set(pde6_re, pde6, checking); 2121 else 2122 bf_set(pde6_re, pde6, 0); 2123 2124 bf_set(pde6_ai, pde6, 1); 2125 bf_set(pde6_ae, pde6, 0); 2126 bf_set(pde6_apptagval, pde6, 0); 2127 2128 /* Endianness conversion if necessary for PDE6 */ 2129 pde6->word0 = cpu_to_le32(pde6->word0); 2130 pde6->word1 = cpu_to_le32(pde6->word1); 2131 pde6->word2 = cpu_to_le32(pde6->word2); 2132 2133 /* advance bpl and increment bde count */ 2134 num_bde++; 2135 bpl++; 2136 2137 /* setup the first BDE that points to protection buffer */ 2138 protphysaddr = sg_dma_address(sgpe) + protgroup_offset; 2139 protgroup_len = sg_dma_len(sgpe) - protgroup_offset; 2140 2141 /* must be integer multiple of the DIF block length */ 2142 BUG_ON(protgroup_len % 8); 2143 2144 pde7 = (struct lpfc_pde7 *) bpl; 2145 memset(pde7, 0, sizeof(struct lpfc_pde7)); 2146 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR); 2147 2148 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr)); 2149 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr)); 2150 2151 protgrp_blks = protgroup_len / 8; 2152 protgrp_bytes = protgrp_blks * blksize; 2153 2154 /* check if this pde is crossing the 4K boundary; if so split */ 2155 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) { 2156 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff); 2157 protgroup_offset += protgroup_remainder; 2158 protgrp_blks = protgroup_remainder / 8; 2159 protgrp_bytes = protgrp_blks * blksize; 2160 } else { 2161 protgroup_offset = 0; 2162 curr_prot++; 2163 } 2164 2165 num_bde++; 2166 2167 /* setup BDE's for data blocks associated with DIF data */ 2168 pgdone = 0; 2169 subtotal = 0; /* total bytes processed for current prot grp */ 2170 while (!pgdone) { 2171 /* Check to see if we ran out of space */ 2172 if (num_bde >= phba->cfg_total_seg_cnt) 2173 return num_bde + 1; 2174 2175 if (!sgde) { 2176 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2177 "9065 BLKGRD:%s Invalid data segment\n", 2178 __func__); 2179 return 0; 2180 } 2181 bpl++; 2182 dataphysaddr = sg_dma_address(sgde) + split_offset; 2183 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr)); 2184 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr)); 2185 2186 remainder = sg_dma_len(sgde) - split_offset; 2187 2188 if ((subtotal + remainder) <= protgrp_bytes) { 2189 /* we can use this whole buffer */ 2190 bpl->tus.f.bdeSize = remainder; 2191 split_offset = 0; 2192 2193 if ((subtotal + remainder) == protgrp_bytes) 2194 pgdone = 1; 2195 } else { 2196 /* must split this buffer with next prot grp */ 2197 bpl->tus.f.bdeSize = protgrp_bytes - subtotal; 2198 split_offset += bpl->tus.f.bdeSize; 2199 } 2200 2201 subtotal += bpl->tus.f.bdeSize; 2202 2203 if (datadir == DMA_TO_DEVICE) 2204 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 2205 else 2206 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 2207 bpl->tus.w = le32_to_cpu(bpl->tus.w); 2208 2209 num_bde++; 2210 curr_data++; 2211 2212 if (split_offset) 2213 break; 2214 2215 /* Move to the next s/g segment if possible */ 2216 sgde = sg_next(sgde); 2217 2218 } 2219 2220 if (protgroup_offset) { 2221 /* update the reference tag */ 2222 reftag += protgrp_blks; 2223 bpl++; 2224 continue; 2225 } 2226 2227 /* are we done ? */ 2228 if (curr_prot == protcnt) { 2229 alldone = 1; 2230 } else if (curr_prot < protcnt) { 2231 /* advance to next prot buffer */ 2232 sgpe = sg_next(sgpe); 2233 bpl++; 2234 2235 /* update the reference tag */ 2236 reftag += protgrp_blks; 2237 } else { 2238 /* if we're here, we have a bug */ 2239 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2240 "9054 BLKGRD: bug in %s\n", __func__); 2241 } 2242 2243 } while (!alldone); 2244 out: 2245 2246 return num_bde; 2247 } 2248 2249 /** 2250 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data 2251 * @phba: The Hba for which this call is being executed. 2252 * @sc: pointer to scsi command we're working on 2253 * @sgl: pointer to buffer list for protection groups 2254 * @datacnt: number of segments of data that have been dma mapped 2255 * 2256 * This function sets up SGL buffer list for protection groups of 2257 * type LPFC_PG_TYPE_NO_DIF 2258 * 2259 * This is usually used when the HBA is instructed to generate 2260 * DIFs and insert them into data stream (or strip DIF from 2261 * incoming data stream) 2262 * 2263 * The buffer list consists of just one protection group described 2264 * below: 2265 * +-------------------------+ 2266 * start of prot group --> | DI_SEED | 2267 * +-------------------------+ 2268 * | Data SGE | 2269 * +-------------------------+ 2270 * |more Data SGE's ... (opt)| 2271 * +-------------------------+ 2272 * 2273 * 2274 * Note: Data s/g buffers have been dma mapped 2275 * 2276 * Returns the number of SGEs added to the SGL. 2277 **/ 2278 static int 2279 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, 2280 struct sli4_sge *sgl, int datasegcnt) 2281 { 2282 struct scatterlist *sgde = NULL; /* s/g data entry */ 2283 struct sli4_sge_diseed *diseed = NULL; 2284 dma_addr_t physaddr; 2285 int i = 0, num_sge = 0, status; 2286 uint32_t reftag; 2287 uint8_t txop, rxop; 2288 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2289 uint32_t rc; 2290 #endif 2291 uint32_t checking = 1; 2292 uint32_t dma_len; 2293 uint32_t dma_offset = 0; 2294 2295 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 2296 if (status) 2297 goto out; 2298 2299 /* extract some info from the scsi command for pde*/ 2300 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ 2301 2302 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2303 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 2304 if (rc) { 2305 if (rc & BG_ERR_SWAP) 2306 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 2307 if (rc & BG_ERR_CHECK) 2308 checking = 0; 2309 } 2310 #endif 2311 2312 /* setup DISEED with what we have */ 2313 diseed = (struct sli4_sge_diseed *) sgl; 2314 memset(diseed, 0, sizeof(struct sli4_sge_diseed)); 2315 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); 2316 2317 /* Endianness conversion if necessary */ 2318 diseed->ref_tag = cpu_to_le32(reftag); 2319 diseed->ref_tag_tran = diseed->ref_tag; 2320 2321 /* 2322 * We only need to check the data on READs, for WRITEs 2323 * protection data is automatically generated, not checked. 2324 */ 2325 if (sc->sc_data_direction == DMA_FROM_DEVICE) { 2326 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) 2327 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); 2328 else 2329 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); 2330 2331 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) 2332 bf_set(lpfc_sli4_sge_dif_re, diseed, checking); 2333 else 2334 bf_set(lpfc_sli4_sge_dif_re, diseed, 0); 2335 } 2336 2337 /* setup DISEED with the rest of the info */ 2338 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); 2339 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); 2340 2341 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); 2342 bf_set(lpfc_sli4_sge_dif_me, diseed, 0); 2343 2344 /* Endianness conversion if necessary for DISEED */ 2345 diseed->word2 = cpu_to_le32(diseed->word2); 2346 diseed->word3 = cpu_to_le32(diseed->word3); 2347 2348 /* advance bpl and increment sge count */ 2349 num_sge++; 2350 sgl++; 2351 2352 /* assumption: caller has already run dma_map_sg on command data */ 2353 scsi_for_each_sg(sc, sgde, datasegcnt, i) { 2354 physaddr = sg_dma_address(sgde); 2355 dma_len = sg_dma_len(sgde); 2356 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); 2357 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); 2358 if ((i + 1) == datasegcnt) 2359 bf_set(lpfc_sli4_sge_last, sgl, 1); 2360 else 2361 bf_set(lpfc_sli4_sge_last, sgl, 0); 2362 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 2363 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 2364 2365 sgl->sge_len = cpu_to_le32(dma_len); 2366 dma_offset += dma_len; 2367 2368 sgl++; 2369 num_sge++; 2370 } 2371 2372 out: 2373 return num_sge; 2374 } 2375 2376 /** 2377 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data 2378 * @phba: The Hba for which this call is being executed. 2379 * @sc: pointer to scsi command we're working on 2380 * @sgl: pointer to buffer list for protection groups 2381 * @datacnt: number of segments of data that have been dma mapped 2382 * @protcnt: number of segment of protection data that have been dma mapped 2383 * 2384 * This function sets up SGL buffer list for protection groups of 2385 * type LPFC_PG_TYPE_DIF 2386 * 2387 * This is usually used when DIFs are in their own buffers, 2388 * separate from the data. The HBA can then by instructed 2389 * to place the DIFs in the outgoing stream. For read operations, 2390 * The HBA could extract the DIFs and place it in DIF buffers. 2391 * 2392 * The buffer list for this type consists of one or more of the 2393 * protection groups described below: 2394 * +-------------------------+ 2395 * start of first prot group --> | DISEED | 2396 * +-------------------------+ 2397 * | DIF (Prot SGE) | 2398 * +-------------------------+ 2399 * | Data SGE | 2400 * +-------------------------+ 2401 * |more Data SGE's ... (opt)| 2402 * +-------------------------+ 2403 * start of new prot group --> | DISEED | 2404 * +-------------------------+ 2405 * | ... | 2406 * +-------------------------+ 2407 * 2408 * Note: It is assumed that both data and protection s/g buffers have been 2409 * mapped for DMA 2410 * 2411 * Returns the number of SGEs added to the SGL. 2412 **/ 2413 static int 2414 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, 2415 struct sli4_sge *sgl, int datacnt, int protcnt) 2416 { 2417 struct scatterlist *sgde = NULL; /* s/g data entry */ 2418 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 2419 struct sli4_sge_diseed *diseed = NULL; 2420 dma_addr_t dataphysaddr, protphysaddr; 2421 unsigned short curr_data = 0, curr_prot = 0; 2422 unsigned int split_offset; 2423 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; 2424 unsigned int protgrp_blks, protgrp_bytes; 2425 unsigned int remainder, subtotal; 2426 int status; 2427 unsigned char pgdone = 0, alldone = 0; 2428 unsigned blksize; 2429 uint32_t reftag; 2430 uint8_t txop, rxop; 2431 uint32_t dma_len; 2432 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2433 uint32_t rc; 2434 #endif 2435 uint32_t checking = 1; 2436 uint32_t dma_offset = 0; 2437 int num_sge = 0; 2438 2439 sgpe = scsi_prot_sglist(sc); 2440 sgde = scsi_sglist(sc); 2441 2442 if (!sgpe || !sgde) { 2443 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 2444 "9082 Invalid s/g entry: data=0x%p prot=0x%p\n", 2445 sgpe, sgde); 2446 return 0; 2447 } 2448 2449 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 2450 if (status) 2451 goto out; 2452 2453 /* extract some info from the scsi command */ 2454 blksize = lpfc_cmd_blksize(sc); 2455 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ 2456 2457 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2458 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 2459 if (rc) { 2460 if (rc & BG_ERR_SWAP) 2461 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 2462 if (rc & BG_ERR_CHECK) 2463 checking = 0; 2464 } 2465 #endif 2466 2467 split_offset = 0; 2468 do { 2469 /* Check to see if we ran out of space */ 2470 if (num_sge >= (phba->cfg_total_seg_cnt - 2)) 2471 return num_sge + 3; 2472 2473 /* setup DISEED with what we have */ 2474 diseed = (struct sli4_sge_diseed *) sgl; 2475 memset(diseed, 0, sizeof(struct sli4_sge_diseed)); 2476 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); 2477 2478 /* Endianness conversion if necessary */ 2479 diseed->ref_tag = cpu_to_le32(reftag); 2480 diseed->ref_tag_tran = diseed->ref_tag; 2481 2482 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) { 2483 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); 2484 2485 } else { 2486 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); 2487 /* 2488 * When in this mode, the hardware will replace 2489 * the guard tag from the host with a 2490 * newly generated good CRC for the wire. 2491 * Switch to raw mode here to avoid this 2492 * behavior. What the host sends gets put on the wire. 2493 */ 2494 if (txop == BG_OP_IN_CRC_OUT_CRC) { 2495 txop = BG_OP_RAW_MODE; 2496 rxop = BG_OP_RAW_MODE; 2497 } 2498 } 2499 2500 2501 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) 2502 bf_set(lpfc_sli4_sge_dif_re, diseed, checking); 2503 else 2504 bf_set(lpfc_sli4_sge_dif_re, diseed, 0); 2505 2506 /* setup DISEED with the rest of the info */ 2507 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); 2508 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); 2509 2510 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); 2511 bf_set(lpfc_sli4_sge_dif_me, diseed, 0); 2512 2513 /* Endianness conversion if necessary for DISEED */ 2514 diseed->word2 = cpu_to_le32(diseed->word2); 2515 diseed->word3 = cpu_to_le32(diseed->word3); 2516 2517 /* advance sgl and increment bde count */ 2518 num_sge++; 2519 sgl++; 2520 2521 /* setup the first BDE that points to protection buffer */ 2522 protphysaddr = sg_dma_address(sgpe) + protgroup_offset; 2523 protgroup_len = sg_dma_len(sgpe) - protgroup_offset; 2524 2525 /* must be integer multiple of the DIF block length */ 2526 BUG_ON(protgroup_len % 8); 2527 2528 /* Now setup DIF SGE */ 2529 sgl->word2 = 0; 2530 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF); 2531 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr)); 2532 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr)); 2533 sgl->word2 = cpu_to_le32(sgl->word2); 2534 2535 protgrp_blks = protgroup_len / 8; 2536 protgrp_bytes = protgrp_blks * blksize; 2537 2538 /* check if DIF SGE is crossing the 4K boundary; if so split */ 2539 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) { 2540 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff); 2541 protgroup_offset += protgroup_remainder; 2542 protgrp_blks = protgroup_remainder / 8; 2543 protgrp_bytes = protgrp_blks * blksize; 2544 } else { 2545 protgroup_offset = 0; 2546 curr_prot++; 2547 } 2548 2549 num_sge++; 2550 2551 /* setup SGE's for data blocks associated with DIF data */ 2552 pgdone = 0; 2553 subtotal = 0; /* total bytes processed for current prot grp */ 2554 while (!pgdone) { 2555 /* Check to see if we ran out of space */ 2556 if (num_sge >= phba->cfg_total_seg_cnt) 2557 return num_sge + 1; 2558 2559 if (!sgde) { 2560 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2561 "9086 BLKGRD:%s Invalid data segment\n", 2562 __func__); 2563 return 0; 2564 } 2565 sgl++; 2566 dataphysaddr = sg_dma_address(sgde) + split_offset; 2567 2568 remainder = sg_dma_len(sgde) - split_offset; 2569 2570 if ((subtotal + remainder) <= protgrp_bytes) { 2571 /* we can use this whole buffer */ 2572 dma_len = remainder; 2573 split_offset = 0; 2574 2575 if ((subtotal + remainder) == protgrp_bytes) 2576 pgdone = 1; 2577 } else { 2578 /* must split this buffer with next prot grp */ 2579 dma_len = protgrp_bytes - subtotal; 2580 split_offset += dma_len; 2581 } 2582 2583 subtotal += dma_len; 2584 2585 sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr)); 2586 sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr)); 2587 bf_set(lpfc_sli4_sge_last, sgl, 0); 2588 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 2589 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 2590 2591 sgl->sge_len = cpu_to_le32(dma_len); 2592 dma_offset += dma_len; 2593 2594 num_sge++; 2595 curr_data++; 2596 2597 if (split_offset) 2598 break; 2599 2600 /* Move to the next s/g segment if possible */ 2601 sgde = sg_next(sgde); 2602 } 2603 2604 if (protgroup_offset) { 2605 /* update the reference tag */ 2606 reftag += protgrp_blks; 2607 sgl++; 2608 continue; 2609 } 2610 2611 /* are we done ? */ 2612 if (curr_prot == protcnt) { 2613 bf_set(lpfc_sli4_sge_last, sgl, 1); 2614 alldone = 1; 2615 } else if (curr_prot < protcnt) { 2616 /* advance to next prot buffer */ 2617 sgpe = sg_next(sgpe); 2618 sgl++; 2619 2620 /* update the reference tag */ 2621 reftag += protgrp_blks; 2622 } else { 2623 /* if we're here, we have a bug */ 2624 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2625 "9085 BLKGRD: bug in %s\n", __func__); 2626 } 2627 2628 } while (!alldone); 2629 2630 out: 2631 2632 return num_sge; 2633 } 2634 2635 /** 2636 * lpfc_prot_group_type - Get prtotection group type of SCSI command 2637 * @phba: The Hba for which this call is being executed. 2638 * @sc: pointer to scsi command we're working on 2639 * 2640 * Given a SCSI command that supports DIF, determine composition of protection 2641 * groups involved in setting up buffer lists 2642 * 2643 * Returns: Protection group type (with or without DIF) 2644 * 2645 **/ 2646 static int 2647 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) 2648 { 2649 int ret = LPFC_PG_TYPE_INVALID; 2650 unsigned char op = scsi_get_prot_op(sc); 2651 2652 switch (op) { 2653 case SCSI_PROT_READ_STRIP: 2654 case SCSI_PROT_WRITE_INSERT: 2655 ret = LPFC_PG_TYPE_NO_DIF; 2656 break; 2657 case SCSI_PROT_READ_INSERT: 2658 case SCSI_PROT_WRITE_STRIP: 2659 case SCSI_PROT_READ_PASS: 2660 case SCSI_PROT_WRITE_PASS: 2661 ret = LPFC_PG_TYPE_DIF_BUF; 2662 break; 2663 default: 2664 if (phba) 2665 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 2666 "9021 Unsupported protection op:%d\n", 2667 op); 2668 break; 2669 } 2670 return ret; 2671 } 2672 2673 /** 2674 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard 2675 * @phba: The Hba for which this call is being executed. 2676 * @lpfc_cmd: The scsi buffer which is going to be adjusted. 2677 * 2678 * Adjust the data length to account for how much data 2679 * is actually on the wire. 2680 * 2681 * returns the adjusted data length 2682 **/ 2683 static int 2684 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, 2685 struct lpfc_scsi_buf *lpfc_cmd) 2686 { 2687 struct scsi_cmnd *sc = lpfc_cmd->pCmd; 2688 int fcpdl; 2689 2690 fcpdl = scsi_bufflen(sc); 2691 2692 /* Check if there is protection data on the wire */ 2693 if (sc->sc_data_direction == DMA_FROM_DEVICE) { 2694 /* Read check for protection data */ 2695 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) 2696 return fcpdl; 2697 2698 } else { 2699 /* Write check for protection data */ 2700 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) 2701 return fcpdl; 2702 } 2703 2704 /* 2705 * If we are in DIF Type 1 mode every data block has a 8 byte 2706 * DIF (trailer) attached to it. Must ajust FCP data length 2707 * to account for the protection data. 2708 */ 2709 fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8; 2710 2711 return fcpdl; 2712 } 2713 2714 /** 2715 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 2716 * @phba: The Hba for which this call is being executed. 2717 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 2718 * 2719 * This is the protection/DIF aware version of 2720 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the 2721 * two functions eventually, but for now, it's here 2722 **/ 2723 static int 2724 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, 2725 struct lpfc_scsi_buf *lpfc_cmd) 2726 { 2727 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 2728 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 2729 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 2730 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 2731 uint32_t num_bde = 0; 2732 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 2733 int prot_group_type = 0; 2734 int fcpdl; 2735 2736 /* 2737 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd 2738 * fcp_rsp regions to the first data bde entry 2739 */ 2740 bpl += 2; 2741 if (scsi_sg_count(scsi_cmnd)) { 2742 /* 2743 * The driver stores the segment count returned from pci_map_sg 2744 * because this a count of dma-mappings used to map the use_sg 2745 * pages. They are not guaranteed to be the same for those 2746 * architectures that implement an IOMMU. 2747 */ 2748 datasegcnt = dma_map_sg(&phba->pcidev->dev, 2749 scsi_sglist(scsi_cmnd), 2750 scsi_sg_count(scsi_cmnd), datadir); 2751 if (unlikely(!datasegcnt)) 2752 return 1; 2753 2754 lpfc_cmd->seg_cnt = datasegcnt; 2755 2756 /* First check if data segment count from SCSI Layer is good */ 2757 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) 2758 goto err; 2759 2760 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 2761 2762 switch (prot_group_type) { 2763 case LPFC_PG_TYPE_NO_DIF: 2764 2765 /* Here we need to add a PDE5 and PDE6 to the count */ 2766 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) 2767 goto err; 2768 2769 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, 2770 datasegcnt); 2771 /* we should have 2 or more entries in buffer list */ 2772 if (num_bde < 2) 2773 goto err; 2774 break; 2775 2776 case LPFC_PG_TYPE_DIF_BUF: 2777 /* 2778 * This type indicates that protection buffers are 2779 * passed to the driver, so that needs to be prepared 2780 * for DMA 2781 */ 2782 protsegcnt = dma_map_sg(&phba->pcidev->dev, 2783 scsi_prot_sglist(scsi_cmnd), 2784 scsi_prot_sg_count(scsi_cmnd), datadir); 2785 if (unlikely(!protsegcnt)) { 2786 scsi_dma_unmap(scsi_cmnd); 2787 return 1; 2788 } 2789 2790 lpfc_cmd->prot_seg_cnt = protsegcnt; 2791 2792 /* 2793 * There is a minimun of 4 BPLs used for every 2794 * protection data segment. 2795 */ 2796 if ((lpfc_cmd->prot_seg_cnt * 4) > 2797 (phba->cfg_total_seg_cnt - 2)) 2798 goto err; 2799 2800 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, 2801 datasegcnt, protsegcnt); 2802 /* we should have 3 or more entries in buffer list */ 2803 if ((num_bde < 3) || 2804 (num_bde > phba->cfg_total_seg_cnt)) 2805 goto err; 2806 break; 2807 2808 case LPFC_PG_TYPE_INVALID: 2809 default: 2810 scsi_dma_unmap(scsi_cmnd); 2811 lpfc_cmd->seg_cnt = 0; 2812 2813 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 2814 "9022 Unexpected protection group %i\n", 2815 prot_group_type); 2816 return 1; 2817 } 2818 } 2819 2820 /* 2821 * Finish initializing those IOCB fields that are dependent on the 2822 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly 2823 * reinitialized since all iocb memory resources are used many times 2824 * for transmit, receive, and continuation bpl's. 2825 */ 2826 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 2827 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64)); 2828 iocb_cmd->ulpBdeCount = 1; 2829 iocb_cmd->ulpLe = 1; 2830 2831 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); 2832 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 2833 2834 /* 2835 * Due to difference in data length between DIF/non-DIF paths, 2836 * we need to set word 4 of IOCB here 2837 */ 2838 iocb_cmd->un.fcpi.fcpi_parm = fcpdl; 2839 2840 return 0; 2841 err: 2842 if (lpfc_cmd->seg_cnt) 2843 scsi_dma_unmap(scsi_cmnd); 2844 if (lpfc_cmd->prot_seg_cnt) 2845 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), 2846 scsi_prot_sg_count(scsi_cmnd), 2847 scsi_cmnd->sc_data_direction); 2848 2849 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 2850 "9023 Cannot setup S/G List for HBA" 2851 "IO segs %d/%d BPL %d SCSI %d: %d %d\n", 2852 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, 2853 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, 2854 prot_group_type, num_bde); 2855 2856 lpfc_cmd->seg_cnt = 0; 2857 lpfc_cmd->prot_seg_cnt = 0; 2858 return 1; 2859 } 2860 2861 /* 2862 * This function calcuates the T10 DIF guard tag 2863 * on the specified data using a CRC algorithmn 2864 * using crc_t10dif. 2865 */ 2866 static uint16_t 2867 lpfc_bg_crc(uint8_t *data, int count) 2868 { 2869 uint16_t crc = 0; 2870 uint16_t x; 2871 2872 crc = crc_t10dif(data, count); 2873 x = cpu_to_be16(crc); 2874 return x; 2875 } 2876 2877 /* 2878 * This function calcuates the T10 DIF guard tag 2879 * on the specified data using a CSUM algorithmn 2880 * using ip_compute_csum. 2881 */ 2882 static uint16_t 2883 lpfc_bg_csum(uint8_t *data, int count) 2884 { 2885 uint16_t ret; 2886 2887 ret = ip_compute_csum(data, count); 2888 return ret; 2889 } 2890 2891 /* 2892 * This function examines the protection data to try to determine 2893 * what type of T10-DIF error occurred. 2894 */ 2895 static void 2896 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 2897 { 2898 struct scatterlist *sgpe; /* s/g prot entry */ 2899 struct scatterlist *sgde; /* s/g data entry */ 2900 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 2901 struct scsi_dif_tuple *src = NULL; 2902 uint8_t *data_src = NULL; 2903 uint16_t guard_tag; 2904 uint16_t start_app_tag, app_tag; 2905 uint32_t start_ref_tag, ref_tag; 2906 int prot, protsegcnt; 2907 int err_type, len, data_len; 2908 int chk_ref, chk_app, chk_guard; 2909 uint16_t sum; 2910 unsigned blksize; 2911 2912 err_type = BGS_GUARD_ERR_MASK; 2913 sum = 0; 2914 guard_tag = 0; 2915 2916 /* First check to see if there is protection data to examine */ 2917 prot = scsi_get_prot_op(cmd); 2918 if ((prot == SCSI_PROT_READ_STRIP) || 2919 (prot == SCSI_PROT_WRITE_INSERT) || 2920 (prot == SCSI_PROT_NORMAL)) 2921 goto out; 2922 2923 /* Currently the driver just supports ref_tag and guard_tag checking */ 2924 chk_ref = 1; 2925 chk_app = 0; 2926 chk_guard = 0; 2927 2928 /* Setup a ptr to the protection data provided by the SCSI host */ 2929 sgpe = scsi_prot_sglist(cmd); 2930 protsegcnt = lpfc_cmd->prot_seg_cnt; 2931 2932 if (sgpe && protsegcnt) { 2933 2934 /* 2935 * We will only try to verify guard tag if the segment 2936 * data length is a multiple of the blksize. 2937 */ 2938 sgde = scsi_sglist(cmd); 2939 blksize = lpfc_cmd_blksize(cmd); 2940 data_src = (uint8_t *)sg_virt(sgde); 2941 data_len = sgde->length; 2942 if ((data_len & (blksize - 1)) == 0) 2943 chk_guard = 1; 2944 2945 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 2946 start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */ 2947 start_app_tag = src->app_tag; 2948 len = sgpe->length; 2949 while (src && protsegcnt) { 2950 while (len) { 2951 2952 /* 2953 * First check to see if a protection data 2954 * check is valid 2955 */ 2956 if ((src->ref_tag == T10_PI_REF_ESCAPE) || 2957 (src->app_tag == T10_PI_APP_ESCAPE)) { 2958 start_ref_tag++; 2959 goto skipit; 2960 } 2961 2962 /* First Guard Tag checking */ 2963 if (chk_guard) { 2964 guard_tag = src->guard_tag; 2965 if (lpfc_cmd_guard_csum(cmd)) 2966 sum = lpfc_bg_csum(data_src, 2967 blksize); 2968 else 2969 sum = lpfc_bg_crc(data_src, 2970 blksize); 2971 if ((guard_tag != sum)) { 2972 err_type = BGS_GUARD_ERR_MASK; 2973 goto out; 2974 } 2975 } 2976 2977 /* Reference Tag checking */ 2978 ref_tag = be32_to_cpu(src->ref_tag); 2979 if (chk_ref && (ref_tag != start_ref_tag)) { 2980 err_type = BGS_REFTAG_ERR_MASK; 2981 goto out; 2982 } 2983 start_ref_tag++; 2984 2985 /* App Tag checking */ 2986 app_tag = src->app_tag; 2987 if (chk_app && (app_tag != start_app_tag)) { 2988 err_type = BGS_APPTAG_ERR_MASK; 2989 goto out; 2990 } 2991 skipit: 2992 len -= sizeof(struct scsi_dif_tuple); 2993 if (len < 0) 2994 len = 0; 2995 src++; 2996 2997 data_src += blksize; 2998 data_len -= blksize; 2999 3000 /* 3001 * Are we at the end of the Data segment? 3002 * The data segment is only used for Guard 3003 * tag checking. 3004 */ 3005 if (chk_guard && (data_len == 0)) { 3006 chk_guard = 0; 3007 sgde = sg_next(sgde); 3008 if (!sgde) 3009 goto out; 3010 3011 data_src = (uint8_t *)sg_virt(sgde); 3012 data_len = sgde->length; 3013 if ((data_len & (blksize - 1)) == 0) 3014 chk_guard = 1; 3015 } 3016 } 3017 3018 /* Goto the next Protection data segment */ 3019 sgpe = sg_next(sgpe); 3020 if (sgpe) { 3021 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 3022 len = sgpe->length; 3023 } else { 3024 src = NULL; 3025 } 3026 protsegcnt--; 3027 } 3028 } 3029 out: 3030 if (err_type == BGS_GUARD_ERR_MASK) { 3031 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 3032 0x10, 0x1); 3033 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | 3034 SAM_STAT_CHECK_CONDITION; 3035 phba->bg_guard_err_cnt++; 3036 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3037 "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n", 3038 (unsigned long)scsi_get_lba(cmd), 3039 sum, guard_tag); 3040 3041 } else if (err_type == BGS_REFTAG_ERR_MASK) { 3042 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 3043 0x10, 0x3); 3044 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | 3045 SAM_STAT_CHECK_CONDITION; 3046 3047 phba->bg_reftag_err_cnt++; 3048 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3049 "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n", 3050 (unsigned long)scsi_get_lba(cmd), 3051 ref_tag, start_ref_tag); 3052 3053 } else if (err_type == BGS_APPTAG_ERR_MASK) { 3054 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 3055 0x10, 0x2); 3056 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | 3057 SAM_STAT_CHECK_CONDITION; 3058 3059 phba->bg_apptag_err_cnt++; 3060 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3061 "9041 BLKGRD: LBA %lx app_tag error %x != %x\n", 3062 (unsigned long)scsi_get_lba(cmd), 3063 app_tag, start_app_tag); 3064 } 3065 } 3066 3067 3068 /* 3069 * This function checks for BlockGuard errors detected by 3070 * the HBA. In case of errors, the ASC/ASCQ fields in the 3071 * sense buffer will be set accordingly, paired with 3072 * ILLEGAL_REQUEST to signal to the kernel that the HBA 3073 * detected corruption. 3074 * 3075 * Returns: 3076 * 0 - No error found 3077 * 1 - BlockGuard error found 3078 * -1 - Internal error (bad profile, ...etc) 3079 */ 3080 static int 3081 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, 3082 struct lpfc_iocbq *pIocbOut) 3083 { 3084 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 3085 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg; 3086 int ret = 0; 3087 uint32_t bghm = bgf->bghm; 3088 uint32_t bgstat = bgf->bgstat; 3089 uint64_t failing_sector = 0; 3090 3091 spin_lock(&_dump_buf_lock); 3092 if (!_dump_buf_done) { 3093 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving" 3094 " Data for %u blocks to debugfs\n", 3095 (cmd->cmnd[7] << 8 | cmd->cmnd[8])); 3096 lpfc_debug_save_data(phba, cmd); 3097 3098 /* If we have a prot sgl, save the DIF buffer */ 3099 if (lpfc_prot_group_type(phba, cmd) == 3100 LPFC_PG_TYPE_DIF_BUF) { 3101 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: " 3102 "Saving DIF for %u blocks to debugfs\n", 3103 (cmd->cmnd[7] << 8 | cmd->cmnd[8])); 3104 lpfc_debug_save_dif(phba, cmd); 3105 } 3106 3107 _dump_buf_done = 1; 3108 } 3109 spin_unlock(&_dump_buf_lock); 3110 3111 if (lpfc_bgs_get_invalid_prof(bgstat)) { 3112 cmd->result = DID_ERROR << 16; 3113 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3114 "9072 BLKGRD: Invalid BG Profile in cmd" 3115 " 0x%x lba 0x%llx blk cnt 0x%x " 3116 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3117 (unsigned long long)scsi_get_lba(cmd), 3118 blk_rq_sectors(cmd->request), bgstat, bghm); 3119 ret = (-1); 3120 goto out; 3121 } 3122 3123 if (lpfc_bgs_get_uninit_dif_block(bgstat)) { 3124 cmd->result = DID_ERROR << 16; 3125 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3126 "9073 BLKGRD: Invalid BG PDIF Block in cmd" 3127 " 0x%x lba 0x%llx blk cnt 0x%x " 3128 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3129 (unsigned long long)scsi_get_lba(cmd), 3130 blk_rq_sectors(cmd->request), bgstat, bghm); 3131 ret = (-1); 3132 goto out; 3133 } 3134 3135 if (lpfc_bgs_get_guard_err(bgstat)) { 3136 ret = 1; 3137 3138 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 3139 0x10, 0x1); 3140 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | 3141 SAM_STAT_CHECK_CONDITION; 3142 phba->bg_guard_err_cnt++; 3143 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3144 "9055 BLKGRD: Guard Tag error in cmd" 3145 " 0x%x lba 0x%llx blk cnt 0x%x " 3146 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3147 (unsigned long long)scsi_get_lba(cmd), 3148 blk_rq_sectors(cmd->request), bgstat, bghm); 3149 } 3150 3151 if (lpfc_bgs_get_reftag_err(bgstat)) { 3152 ret = 1; 3153 3154 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 3155 0x10, 0x3); 3156 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | 3157 SAM_STAT_CHECK_CONDITION; 3158 3159 phba->bg_reftag_err_cnt++; 3160 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3161 "9056 BLKGRD: Ref Tag error in cmd" 3162 " 0x%x lba 0x%llx blk cnt 0x%x " 3163 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3164 (unsigned long long)scsi_get_lba(cmd), 3165 blk_rq_sectors(cmd->request), bgstat, bghm); 3166 } 3167 3168 if (lpfc_bgs_get_apptag_err(bgstat)) { 3169 ret = 1; 3170 3171 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 3172 0x10, 0x2); 3173 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | 3174 SAM_STAT_CHECK_CONDITION; 3175 3176 phba->bg_apptag_err_cnt++; 3177 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3178 "9061 BLKGRD: App Tag error in cmd" 3179 " 0x%x lba 0x%llx blk cnt 0x%x " 3180 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3181 (unsigned long long)scsi_get_lba(cmd), 3182 blk_rq_sectors(cmd->request), bgstat, bghm); 3183 } 3184 3185 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 3186 /* 3187 * setup sense data descriptor 0 per SPC-4 as an information 3188 * field, and put the failing LBA in it. 3189 * This code assumes there was also a guard/app/ref tag error 3190 * indication. 3191 */ 3192 cmd->sense_buffer[7] = 0xc; /* Additional sense length */ 3193 cmd->sense_buffer[8] = 0; /* Information descriptor type */ 3194 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */ 3195 cmd->sense_buffer[10] = 0x80; /* Validity bit */ 3196 3197 /* bghm is a "on the wire" FC frame based count */ 3198 switch (scsi_get_prot_op(cmd)) { 3199 case SCSI_PROT_READ_INSERT: 3200 case SCSI_PROT_WRITE_STRIP: 3201 bghm /= cmd->device->sector_size; 3202 break; 3203 case SCSI_PROT_READ_STRIP: 3204 case SCSI_PROT_WRITE_INSERT: 3205 case SCSI_PROT_READ_PASS: 3206 case SCSI_PROT_WRITE_PASS: 3207 bghm /= (cmd->device->sector_size + 3208 sizeof(struct scsi_dif_tuple)); 3209 break; 3210 } 3211 3212 failing_sector = scsi_get_lba(cmd); 3213 failing_sector += bghm; 3214 3215 /* Descriptor Information */ 3216 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]); 3217 } 3218 3219 if (!ret) { 3220 /* No error was reported - problem in FW? */ 3221 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3222 "9057 BLKGRD: Unknown error in cmd" 3223 " 0x%x lba 0x%llx blk cnt 0x%x " 3224 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3225 (unsigned long long)scsi_get_lba(cmd), 3226 blk_rq_sectors(cmd->request), bgstat, bghm); 3227 3228 /* Calcuate what type of error it was */ 3229 lpfc_calc_bg_err(phba, lpfc_cmd); 3230 } 3231 out: 3232 return ret; 3233 } 3234 3235 /** 3236 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec 3237 * @phba: The Hba for which this call is being executed. 3238 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3239 * 3240 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 3241 * field of @lpfc_cmd for device with SLI-4 interface spec. 3242 * 3243 * Return codes: 3244 * 1 - Error 3245 * 0 - Success 3246 **/ 3247 static int 3248 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 3249 { 3250 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 3251 struct scatterlist *sgel = NULL; 3252 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 3253 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 3254 struct sli4_sge *first_data_sgl; 3255 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 3256 dma_addr_t physaddr; 3257 uint32_t num_bde = 0; 3258 uint32_t dma_len; 3259 uint32_t dma_offset = 0; 3260 int nseg; 3261 struct ulp_bde64 *bde; 3262 3263 /* 3264 * There are three possibilities here - use scatter-gather segment, use 3265 * the single mapping, or neither. Start the lpfc command prep by 3266 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 3267 * data bde entry. 3268 */ 3269 if (scsi_sg_count(scsi_cmnd)) { 3270 /* 3271 * The driver stores the segment count returned from pci_map_sg 3272 * because this a count of dma-mappings used to map the use_sg 3273 * pages. They are not guaranteed to be the same for those 3274 * architectures that implement an IOMMU. 3275 */ 3276 3277 nseg = scsi_dma_map(scsi_cmnd); 3278 if (unlikely(nseg <= 0)) 3279 return 1; 3280 sgl += 1; 3281 /* clear the last flag in the fcp_rsp map entry */ 3282 sgl->word2 = le32_to_cpu(sgl->word2); 3283 bf_set(lpfc_sli4_sge_last, sgl, 0); 3284 sgl->word2 = cpu_to_le32(sgl->word2); 3285 sgl += 1; 3286 first_data_sgl = sgl; 3287 lpfc_cmd->seg_cnt = nseg; 3288 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 3289 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:" 3290 " %s: Too many sg segments from " 3291 "dma_map_sg. Config %d, seg_cnt %d\n", 3292 __func__, phba->cfg_sg_seg_cnt, 3293 lpfc_cmd->seg_cnt); 3294 lpfc_cmd->seg_cnt = 0; 3295 scsi_dma_unmap(scsi_cmnd); 3296 return 1; 3297 } 3298 3299 /* 3300 * The driver established a maximum scatter-gather segment count 3301 * during probe that limits the number of sg elements in any 3302 * single scsi command. Just run through the seg_cnt and format 3303 * the sge's. 3304 * When using SLI-3 the driver will try to fit all the BDEs into 3305 * the IOCB. If it can't then the BDEs get added to a BPL as it 3306 * does for SLI-2 mode. 3307 */ 3308 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { 3309 physaddr = sg_dma_address(sgel); 3310 dma_len = sg_dma_len(sgel); 3311 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); 3312 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); 3313 sgl->word2 = le32_to_cpu(sgl->word2); 3314 if ((num_bde + 1) == nseg) 3315 bf_set(lpfc_sli4_sge_last, sgl, 1); 3316 else 3317 bf_set(lpfc_sli4_sge_last, sgl, 0); 3318 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 3319 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 3320 sgl->word2 = cpu_to_le32(sgl->word2); 3321 sgl->sge_len = cpu_to_le32(dma_len); 3322 dma_offset += dma_len; 3323 sgl++; 3324 } 3325 /* 3326 * Setup the first Payload BDE. For FCoE we just key off 3327 * Performance Hints, for FC we use lpfc_enable_pbde. 3328 * We populate words 13-15 of IOCB/WQE. 3329 */ 3330 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || 3331 phba->cfg_enable_pbde) { 3332 bde = (struct ulp_bde64 *) 3333 &(iocb_cmd->unsli3.sli3Words[5]); 3334 bde->addrLow = first_data_sgl->addr_lo; 3335 bde->addrHigh = first_data_sgl->addr_hi; 3336 bde->tus.f.bdeSize = 3337 le32_to_cpu(first_data_sgl->sge_len); 3338 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 3339 bde->tus.w = cpu_to_le32(bde->tus.w); 3340 } 3341 } else { 3342 sgl += 1; 3343 /* clear the last flag in the fcp_rsp map entry */ 3344 sgl->word2 = le32_to_cpu(sgl->word2); 3345 bf_set(lpfc_sli4_sge_last, sgl, 1); 3346 sgl->word2 = cpu_to_le32(sgl->word2); 3347 3348 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || 3349 phba->cfg_enable_pbde) { 3350 bde = (struct ulp_bde64 *) 3351 &(iocb_cmd->unsli3.sli3Words[5]); 3352 memset(bde, 0, (sizeof(uint32_t) * 3)); 3353 } 3354 } 3355 3356 /* 3357 * Finish initializing those IOCB fields that are dependent on the 3358 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is 3359 * explicitly reinitialized. 3360 * all iocb memory resources are reused. 3361 */ 3362 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 3363 3364 /* 3365 * Due to difference in data length between DIF/non-DIF paths, 3366 * we need to set word 4 of IOCB here 3367 */ 3368 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 3369 3370 /* 3371 * If the OAS driver feature is enabled and the lun is enabled for 3372 * OAS, set the oas iocb related flags. 3373 */ 3374 if ((phba->cfg_fof) && ((struct lpfc_device_data *) 3375 scsi_cmnd->device->hostdata)->oas_enabled) { 3376 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); 3377 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *) 3378 scsi_cmnd->device->hostdata)->priority; 3379 } 3380 return 0; 3381 } 3382 3383 /** 3384 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec 3385 * @phba: The Hba for which this call is being executed. 3386 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3387 * 3388 * This is the protection/DIF aware version of 3389 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the 3390 * two functions eventually, but for now, it's here 3391 **/ 3392 static int 3393 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, 3394 struct lpfc_scsi_buf *lpfc_cmd) 3395 { 3396 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 3397 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 3398 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl); 3399 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 3400 uint32_t num_sge = 0; 3401 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 3402 int prot_group_type = 0; 3403 int fcpdl; 3404 3405 /* 3406 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd 3407 * fcp_rsp regions to the first data sge entry 3408 */ 3409 if (scsi_sg_count(scsi_cmnd)) { 3410 /* 3411 * The driver stores the segment count returned from pci_map_sg 3412 * because this a count of dma-mappings used to map the use_sg 3413 * pages. They are not guaranteed to be the same for those 3414 * architectures that implement an IOMMU. 3415 */ 3416 datasegcnt = dma_map_sg(&phba->pcidev->dev, 3417 scsi_sglist(scsi_cmnd), 3418 scsi_sg_count(scsi_cmnd), datadir); 3419 if (unlikely(!datasegcnt)) 3420 return 1; 3421 3422 sgl += 1; 3423 /* clear the last flag in the fcp_rsp map entry */ 3424 sgl->word2 = le32_to_cpu(sgl->word2); 3425 bf_set(lpfc_sli4_sge_last, sgl, 0); 3426 sgl->word2 = cpu_to_le32(sgl->word2); 3427 3428 sgl += 1; 3429 lpfc_cmd->seg_cnt = datasegcnt; 3430 3431 /* First check if data segment count from SCSI Layer is good */ 3432 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) 3433 goto err; 3434 3435 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 3436 3437 switch (prot_group_type) { 3438 case LPFC_PG_TYPE_NO_DIF: 3439 /* Here we need to add a DISEED to the count */ 3440 if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt) 3441 goto err; 3442 3443 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, 3444 datasegcnt); 3445 3446 /* we should have 2 or more entries in buffer list */ 3447 if (num_sge < 2) 3448 goto err; 3449 break; 3450 3451 case LPFC_PG_TYPE_DIF_BUF: 3452 /* 3453 * This type indicates that protection buffers are 3454 * passed to the driver, so that needs to be prepared 3455 * for DMA 3456 */ 3457 protsegcnt = dma_map_sg(&phba->pcidev->dev, 3458 scsi_prot_sglist(scsi_cmnd), 3459 scsi_prot_sg_count(scsi_cmnd), datadir); 3460 if (unlikely(!protsegcnt)) { 3461 scsi_dma_unmap(scsi_cmnd); 3462 return 1; 3463 } 3464 3465 lpfc_cmd->prot_seg_cnt = protsegcnt; 3466 /* 3467 * There is a minimun of 3 SGEs used for every 3468 * protection data segment. 3469 */ 3470 if ((lpfc_cmd->prot_seg_cnt * 3) > 3471 (phba->cfg_total_seg_cnt - 2)) 3472 goto err; 3473 3474 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, 3475 datasegcnt, protsegcnt); 3476 3477 /* we should have 3 or more entries in buffer list */ 3478 if ((num_sge < 3) || 3479 (num_sge > phba->cfg_total_seg_cnt)) 3480 goto err; 3481 break; 3482 3483 case LPFC_PG_TYPE_INVALID: 3484 default: 3485 scsi_dma_unmap(scsi_cmnd); 3486 lpfc_cmd->seg_cnt = 0; 3487 3488 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 3489 "9083 Unexpected protection group %i\n", 3490 prot_group_type); 3491 return 1; 3492 } 3493 } 3494 3495 switch (scsi_get_prot_op(scsi_cmnd)) { 3496 case SCSI_PROT_WRITE_STRIP: 3497 case SCSI_PROT_READ_STRIP: 3498 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP; 3499 break; 3500 case SCSI_PROT_WRITE_INSERT: 3501 case SCSI_PROT_READ_INSERT: 3502 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT; 3503 break; 3504 case SCSI_PROT_WRITE_PASS: 3505 case SCSI_PROT_READ_PASS: 3506 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS; 3507 break; 3508 } 3509 3510 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); 3511 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 3512 3513 /* 3514 * Due to difference in data length between DIF/non-DIF paths, 3515 * we need to set word 4 of IOCB here 3516 */ 3517 iocb_cmd->un.fcpi.fcpi_parm = fcpdl; 3518 3519 /* 3520 * If the OAS driver feature is enabled and the lun is enabled for 3521 * OAS, set the oas iocb related flags. 3522 */ 3523 if ((phba->cfg_fof) && ((struct lpfc_device_data *) 3524 scsi_cmnd->device->hostdata)->oas_enabled) 3525 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); 3526 3527 return 0; 3528 err: 3529 if (lpfc_cmd->seg_cnt) 3530 scsi_dma_unmap(scsi_cmnd); 3531 if (lpfc_cmd->prot_seg_cnt) 3532 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), 3533 scsi_prot_sg_count(scsi_cmnd), 3534 scsi_cmnd->sc_data_direction); 3535 3536 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 3537 "9084 Cannot setup S/G List for HBA" 3538 "IO segs %d/%d SGL %d SCSI %d: %d %d\n", 3539 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, 3540 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, 3541 prot_group_type, num_sge); 3542 3543 lpfc_cmd->seg_cnt = 0; 3544 lpfc_cmd->prot_seg_cnt = 0; 3545 return 1; 3546 } 3547 3548 /** 3549 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer 3550 * @phba: The Hba for which this call is being executed. 3551 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3552 * 3553 * This routine wraps the actual DMA mapping function pointer from the 3554 * lpfc_hba struct. 3555 * 3556 * Return codes: 3557 * 1 - Error 3558 * 0 - Success 3559 **/ 3560 static inline int 3561 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 3562 { 3563 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 3564 } 3565 3566 /** 3567 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer 3568 * using BlockGuard. 3569 * @phba: The Hba for which this call is being executed. 3570 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3571 * 3572 * This routine wraps the actual DMA mapping function pointer from the 3573 * lpfc_hba struct. 3574 * 3575 * Return codes: 3576 * 1 - Error 3577 * 0 - Success 3578 **/ 3579 static inline int 3580 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 3581 { 3582 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 3583 } 3584 3585 /** 3586 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error 3587 * @phba: Pointer to hba context object. 3588 * @vport: Pointer to vport object. 3589 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error. 3590 * @rsp_iocb: Pointer to response iocb object which reported error. 3591 * 3592 * This function posts an event when there is a SCSI command reporting 3593 * error from the scsi device. 3594 **/ 3595 static void 3596 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, 3597 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) { 3598 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 3599 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 3600 uint32_t resp_info = fcprsp->rspStatus2; 3601 uint32_t scsi_status = fcprsp->rspStatus3; 3602 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; 3603 struct lpfc_fast_path_event *fast_path_evt = NULL; 3604 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; 3605 unsigned long flags; 3606 3607 if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 3608 return; 3609 3610 /* If there is queuefull or busy condition send a scsi event */ 3611 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || 3612 (cmnd->result == SAM_STAT_BUSY)) { 3613 fast_path_evt = lpfc_alloc_fast_evt(phba); 3614 if (!fast_path_evt) 3615 return; 3616 fast_path_evt->un.scsi_evt.event_type = 3617 FC_REG_SCSI_EVENT; 3618 fast_path_evt->un.scsi_evt.subcategory = 3619 (cmnd->result == SAM_STAT_TASK_SET_FULL) ? 3620 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY; 3621 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun; 3622 memcpy(&fast_path_evt->un.scsi_evt.wwpn, 3623 &pnode->nlp_portname, sizeof(struct lpfc_name)); 3624 memcpy(&fast_path_evt->un.scsi_evt.wwnn, 3625 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3626 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen && 3627 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) { 3628 fast_path_evt = lpfc_alloc_fast_evt(phba); 3629 if (!fast_path_evt) 3630 return; 3631 fast_path_evt->un.check_cond_evt.scsi_event.event_type = 3632 FC_REG_SCSI_EVENT; 3633 fast_path_evt->un.check_cond_evt.scsi_event.subcategory = 3634 LPFC_EVENT_CHECK_COND; 3635 fast_path_evt->un.check_cond_evt.scsi_event.lun = 3636 cmnd->device->lun; 3637 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn, 3638 &pnode->nlp_portname, sizeof(struct lpfc_name)); 3639 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn, 3640 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3641 fast_path_evt->un.check_cond_evt.sense_key = 3642 cmnd->sense_buffer[2] & 0xf; 3643 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12]; 3644 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13]; 3645 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 3646 fcpi_parm && 3647 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) || 3648 ((scsi_status == SAM_STAT_GOOD) && 3649 !(resp_info & (RESID_UNDER | RESID_OVER))))) { 3650 /* 3651 * If status is good or resid does not match with fcp_param and 3652 * there is valid fcpi_parm, then there is a read_check error 3653 */ 3654 fast_path_evt = lpfc_alloc_fast_evt(phba); 3655 if (!fast_path_evt) 3656 return; 3657 fast_path_evt->un.read_check_error.header.event_type = 3658 FC_REG_FABRIC_EVENT; 3659 fast_path_evt->un.read_check_error.header.subcategory = 3660 LPFC_EVENT_FCPRDCHKERR; 3661 memcpy(&fast_path_evt->un.read_check_error.header.wwpn, 3662 &pnode->nlp_portname, sizeof(struct lpfc_name)); 3663 memcpy(&fast_path_evt->un.read_check_error.header.wwnn, 3664 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3665 fast_path_evt->un.read_check_error.lun = cmnd->device->lun; 3666 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0]; 3667 fast_path_evt->un.read_check_error.fcpiparam = 3668 fcpi_parm; 3669 } else 3670 return; 3671 3672 fast_path_evt->vport = vport; 3673 spin_lock_irqsave(&phba->hbalock, flags); 3674 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); 3675 spin_unlock_irqrestore(&phba->hbalock, flags); 3676 lpfc_worker_wake_up(phba); 3677 return; 3678 } 3679 3680 /** 3681 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev 3682 * @phba: The HBA for which this call is being executed. 3683 * @psb: The scsi buffer which is going to be un-mapped. 3684 * 3685 * This routine does DMA un-mapping of scatter gather list of scsi command 3686 * field of @lpfc_cmd for device with SLI-3 interface spec. 3687 **/ 3688 static void 3689 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 3690 { 3691 /* 3692 * There are only two special cases to consider. (1) the scsi command 3693 * requested scatter-gather usage or (2) the scsi command allocated 3694 * a request buffer, but did not request use_sg. There is a third 3695 * case, but it does not require resource deallocation. 3696 */ 3697 if (psb->seg_cnt > 0) 3698 scsi_dma_unmap(psb->pCmd); 3699 if (psb->prot_seg_cnt > 0) 3700 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd), 3701 scsi_prot_sg_count(psb->pCmd), 3702 psb->pCmd->sc_data_direction); 3703 } 3704 3705 /** 3706 * lpfc_handler_fcp_err - FCP response handler 3707 * @vport: The virtual port for which this call is being executed. 3708 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 3709 * @rsp_iocb: The response IOCB which contains FCP error. 3710 * 3711 * This routine is called to process response IOCB with status field 3712 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command 3713 * based upon SCSI and FCP error. 3714 **/ 3715 static void 3716 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 3717 struct lpfc_iocbq *rsp_iocb) 3718 { 3719 struct lpfc_hba *phba = vport->phba; 3720 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 3721 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 3722 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 3723 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; 3724 uint32_t resp_info = fcprsp->rspStatus2; 3725 uint32_t scsi_status = fcprsp->rspStatus3; 3726 uint32_t *lp; 3727 uint32_t host_status = DID_OK; 3728 uint32_t rsplen = 0; 3729 uint32_t fcpDl; 3730 uint32_t logit = LOG_FCP | LOG_FCP_ERROR; 3731 3732 3733 /* 3734 * If this is a task management command, there is no 3735 * scsi packet associated with this lpfc_cmd. The driver 3736 * consumes it. 3737 */ 3738 if (fcpcmd->fcpCntl2) { 3739 scsi_status = 0; 3740 goto out; 3741 } 3742 3743 if (resp_info & RSP_LEN_VALID) { 3744 rsplen = be32_to_cpu(fcprsp->rspRspLen); 3745 if (rsplen != 0 && rsplen != 4 && rsplen != 8) { 3746 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3747 "2719 Invalid response length: " 3748 "tgt x%x lun x%llx cmnd x%x rsplen x%x\n", 3749 cmnd->device->id, 3750 cmnd->device->lun, cmnd->cmnd[0], 3751 rsplen); 3752 host_status = DID_ERROR; 3753 goto out; 3754 } 3755 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) { 3756 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3757 "2757 Protocol failure detected during " 3758 "processing of FCP I/O op: " 3759 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n", 3760 cmnd->device->id, 3761 cmnd->device->lun, cmnd->cmnd[0], 3762 fcprsp->rspInfo3); 3763 host_status = DID_ERROR; 3764 goto out; 3765 } 3766 } 3767 3768 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 3769 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); 3770 if (snslen > SCSI_SENSE_BUFFERSIZE) 3771 snslen = SCSI_SENSE_BUFFERSIZE; 3772 3773 if (resp_info & RSP_LEN_VALID) 3774 rsplen = be32_to_cpu(fcprsp->rspRspLen); 3775 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); 3776 } 3777 lp = (uint32_t *)cmnd->sense_buffer; 3778 3779 /* special handling for under run conditions */ 3780 if (!scsi_status && (resp_info & RESID_UNDER)) { 3781 /* don't log under runs if fcp set... */ 3782 if (vport->cfg_log_verbose & LOG_FCP) 3783 logit = LOG_FCP_ERROR; 3784 /* unless operator says so */ 3785 if (vport->cfg_log_verbose & LOG_FCP_UNDER) 3786 logit = LOG_FCP_UNDER; 3787 } 3788 3789 lpfc_printf_vlog(vport, KERN_WARNING, logit, 3790 "9024 FCP command x%x failed: x%x SNS x%x x%x " 3791 "Data: x%x x%x x%x x%x x%x\n", 3792 cmnd->cmnd[0], scsi_status, 3793 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 3794 be32_to_cpu(fcprsp->rspResId), 3795 be32_to_cpu(fcprsp->rspSnsLen), 3796 be32_to_cpu(fcprsp->rspRspLen), 3797 fcprsp->rspInfo3); 3798 3799 scsi_set_resid(cmnd, 0); 3800 fcpDl = be32_to_cpu(fcpcmd->fcpDl); 3801 if (resp_info & RESID_UNDER) { 3802 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); 3803 3804 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER, 3805 "9025 FCP Underrun, expected %d, " 3806 "residual %d Data: x%x x%x x%x\n", 3807 fcpDl, 3808 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], 3809 cmnd->underflow); 3810 3811 /* 3812 * If there is an under run, check if under run reported by 3813 * storage array is same as the under run reported by HBA. 3814 * If this is not same, there is a dropped frame. 3815 */ 3816 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) { 3817 lpfc_printf_vlog(vport, KERN_WARNING, 3818 LOG_FCP | LOG_FCP_ERROR, 3819 "9026 FCP Read Check Error " 3820 "and Underrun Data: x%x x%x x%x x%x\n", 3821 fcpDl, 3822 scsi_get_resid(cmnd), fcpi_parm, 3823 cmnd->cmnd[0]); 3824 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 3825 host_status = DID_ERROR; 3826 } 3827 /* 3828 * The cmnd->underflow is the minimum number of bytes that must 3829 * be transferred for this command. Provided a sense condition 3830 * is not present, make sure the actual amount transferred is at 3831 * least the underflow value or fail. 3832 */ 3833 if (!(resp_info & SNS_LEN_VALID) && 3834 (scsi_status == SAM_STAT_GOOD) && 3835 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) 3836 < cmnd->underflow)) { 3837 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 3838 "9027 FCP command x%x residual " 3839 "underrun converted to error " 3840 "Data: x%x x%x x%x\n", 3841 cmnd->cmnd[0], scsi_bufflen(cmnd), 3842 scsi_get_resid(cmnd), cmnd->underflow); 3843 host_status = DID_ERROR; 3844 } 3845 } else if (resp_info & RESID_OVER) { 3846 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3847 "9028 FCP command x%x residual overrun error. " 3848 "Data: x%x x%x\n", cmnd->cmnd[0], 3849 scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 3850 host_status = DID_ERROR; 3851 3852 /* 3853 * Check SLI validation that all the transfer was actually done 3854 * (fcpi_parm should be zero). Apply check only to reads. 3855 */ 3856 } else if (fcpi_parm) { 3857 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 3858 "9029 FCP %s Check Error xri x%x Data: " 3859 "x%x x%x x%x x%x x%x\n", 3860 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ? 3861 "Read" : "Write"), 3862 ((phba->sli_rev == LPFC_SLI_REV4) ? 3863 lpfc_cmd->cur_iocbq.sli4_xritag : 3864 rsp_iocb->iocb.ulpContext), 3865 fcpDl, be32_to_cpu(fcprsp->rspResId), 3866 fcpi_parm, cmnd->cmnd[0], scsi_status); 3867 3868 /* There is some issue with the LPe12000 that causes it 3869 * to miscalculate the fcpi_parm and falsely trip this 3870 * recovery logic. Detect this case and don't error when true. 3871 */ 3872 if (fcpi_parm > fcpDl) 3873 goto out; 3874 3875 switch (scsi_status) { 3876 case SAM_STAT_GOOD: 3877 case SAM_STAT_CHECK_CONDITION: 3878 /* Fabric dropped a data frame. Fail any successful 3879 * command in which we detected dropped frames. 3880 * A status of good or some check conditions could 3881 * be considered a successful command. 3882 */ 3883 host_status = DID_ERROR; 3884 break; 3885 } 3886 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 3887 } 3888 3889 out: 3890 cmnd->result = host_status << 16 | scsi_status; 3891 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb); 3892 } 3893 3894 /** 3895 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution 3896 * @phba: Pointer to HBA context object. 3897 * 3898 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index 3899 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock 3900 * held. 3901 * If scsi-mq is enabled, get the default block layer mapping of software queues 3902 * to hardware queues. This information is saved in request tag. 3903 * 3904 * Return: index into SLI4 fast-path FCP queue index. 3905 **/ 3906 int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, 3907 struct lpfc_scsi_buf *lpfc_cmd) 3908 { 3909 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 3910 struct lpfc_vector_map_info *cpup; 3911 int chann, cpu; 3912 uint32_t tag; 3913 uint16_t hwq; 3914 3915 if (cmnd && shost_use_blk_mq(cmnd->device->host)) { 3916 tag = blk_mq_unique_tag(cmnd->request); 3917 hwq = blk_mq_unique_tag_to_hwq(tag); 3918 3919 return hwq; 3920 } 3921 3922 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU 3923 && phba->cfg_fcp_io_channel > 1) { 3924 cpu = smp_processor_id(); 3925 if (cpu < phba->sli4_hba.num_present_cpu) { 3926 cpup = phba->sli4_hba.cpu_map; 3927 cpup += cpu; 3928 return cpup->channel_id; 3929 } 3930 } 3931 chann = atomic_add_return(1, &phba->fcp_qidx); 3932 chann = chann % phba->cfg_fcp_io_channel; 3933 return chann; 3934 } 3935 3936 3937 /** 3938 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine 3939 * @phba: The Hba for which this call is being executed. 3940 * @pIocbIn: The command IOCBQ for the scsi cmnd. 3941 * @pIocbOut: The response IOCBQ for the scsi cmnd. 3942 * 3943 * This routine assigns scsi command result by looking into response IOCB 3944 * status field appropriately. This routine handles QUEUE FULL condition as 3945 * well by ramping down device queue depth. 3946 **/ 3947 static void 3948 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 3949 struct lpfc_iocbq *pIocbOut) 3950 { 3951 struct lpfc_scsi_buf *lpfc_cmd = 3952 (struct lpfc_scsi_buf *) pIocbIn->context1; 3953 struct lpfc_vport *vport = pIocbIn->vport; 3954 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 3955 struct lpfc_nodelist *pnode = rdata->pnode; 3956 struct scsi_cmnd *cmd; 3957 unsigned long flags; 3958 struct lpfc_fast_path_event *fast_path_evt; 3959 struct Scsi_Host *shost; 3960 uint32_t logit = LOG_FCP; 3961 3962 atomic_inc(&phba->fc4ScsiIoCmpls); 3963 3964 /* Sanity check on return of outstanding command */ 3965 cmd = lpfc_cmd->pCmd; 3966 if (!cmd) 3967 return; 3968 shost = cmd->device->host; 3969 3970 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK); 3971 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 3972 /* pick up SLI4 exhange busy status from HBA */ 3973 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY; 3974 3975 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 3976 if (lpfc_cmd->prot_data_type) { 3977 struct scsi_dif_tuple *src = NULL; 3978 3979 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; 3980 /* 3981 * Used to restore any changes to protection 3982 * data for error injection. 3983 */ 3984 switch (lpfc_cmd->prot_data_type) { 3985 case LPFC_INJERR_REFTAG: 3986 src->ref_tag = 3987 lpfc_cmd->prot_data; 3988 break; 3989 case LPFC_INJERR_APPTAG: 3990 src->app_tag = 3991 (uint16_t)lpfc_cmd->prot_data; 3992 break; 3993 case LPFC_INJERR_GUARD: 3994 src->guard_tag = 3995 (uint16_t)lpfc_cmd->prot_data; 3996 break; 3997 default: 3998 break; 3999 } 4000 4001 lpfc_cmd->prot_data = 0; 4002 lpfc_cmd->prot_data_type = 0; 4003 lpfc_cmd->prot_data_segment = NULL; 4004 } 4005 #endif 4006 4007 if (lpfc_cmd->status) { 4008 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 4009 (lpfc_cmd->result & IOERR_DRVR_MASK)) 4010 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 4011 else if (lpfc_cmd->status >= IOSTAT_CNT) 4012 lpfc_cmd->status = IOSTAT_DEFAULT; 4013 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && 4014 !lpfc_cmd->fcp_rsp->rspStatus3 && 4015 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && 4016 !(vport->cfg_log_verbose & LOG_FCP_UNDER)) 4017 logit = 0; 4018 else 4019 logit = LOG_FCP | LOG_FCP_UNDER; 4020 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4021 "9030 FCP cmd x%x failed <%d/%lld> " 4022 "status: x%x result: x%x " 4023 "sid: x%x did: x%x oxid: x%x " 4024 "Data: x%x x%x\n", 4025 cmd->cmnd[0], 4026 cmd->device ? cmd->device->id : 0xffff, 4027 cmd->device ? cmd->device->lun : 0xffff, 4028 lpfc_cmd->status, lpfc_cmd->result, 4029 vport->fc_myDID, 4030 (pnode) ? pnode->nlp_DID : 0, 4031 phba->sli_rev == LPFC_SLI_REV4 ? 4032 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, 4033 pIocbOut->iocb.ulpContext, 4034 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 4035 4036 switch (lpfc_cmd->status) { 4037 case IOSTAT_FCP_RSP_ERROR: 4038 /* Call FCP RSP handler to determine result */ 4039 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut); 4040 break; 4041 case IOSTAT_NPORT_BSY: 4042 case IOSTAT_FABRIC_BSY: 4043 cmd->result = DID_TRANSPORT_DISRUPTED << 16; 4044 fast_path_evt = lpfc_alloc_fast_evt(phba); 4045 if (!fast_path_evt) 4046 break; 4047 fast_path_evt->un.fabric_evt.event_type = 4048 FC_REG_FABRIC_EVENT; 4049 fast_path_evt->un.fabric_evt.subcategory = 4050 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? 4051 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; 4052 if (pnode && NLP_CHK_NODE_ACT(pnode)) { 4053 memcpy(&fast_path_evt->un.fabric_evt.wwpn, 4054 &pnode->nlp_portname, 4055 sizeof(struct lpfc_name)); 4056 memcpy(&fast_path_evt->un.fabric_evt.wwnn, 4057 &pnode->nlp_nodename, 4058 sizeof(struct lpfc_name)); 4059 } 4060 fast_path_evt->vport = vport; 4061 fast_path_evt->work_evt.evt = 4062 LPFC_EVT_FASTPATH_MGMT_EVT; 4063 spin_lock_irqsave(&phba->hbalock, flags); 4064 list_add_tail(&fast_path_evt->work_evt.evt_listp, 4065 &phba->work_list); 4066 spin_unlock_irqrestore(&phba->hbalock, flags); 4067 lpfc_worker_wake_up(phba); 4068 break; 4069 case IOSTAT_LOCAL_REJECT: 4070 case IOSTAT_REMOTE_STOP: 4071 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || 4072 lpfc_cmd->result == 4073 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || 4074 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || 4075 lpfc_cmd->result == 4076 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { 4077 cmd->result = DID_NO_CONNECT << 16; 4078 break; 4079 } 4080 if (lpfc_cmd->result == IOERR_INVALID_RPI || 4081 lpfc_cmd->result == IOERR_NO_RESOURCES || 4082 lpfc_cmd->result == IOERR_ABORT_REQUESTED || 4083 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { 4084 cmd->result = DID_REQUEUE << 16; 4085 break; 4086 } 4087 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || 4088 lpfc_cmd->result == IOERR_TX_DMA_FAILED) && 4089 pIocbOut->iocb.unsli3.sli3_bg.bgstat) { 4090 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 4091 /* 4092 * This is a response for a BG enabled 4093 * cmd. Parse BG error 4094 */ 4095 lpfc_parse_bg_err(phba, lpfc_cmd, 4096 pIocbOut); 4097 break; 4098 } else { 4099 lpfc_printf_vlog(vport, KERN_WARNING, 4100 LOG_BG, 4101 "9031 non-zero BGSTAT " 4102 "on unprotected cmd\n"); 4103 } 4104 } 4105 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP) 4106 && (phba->sli_rev == LPFC_SLI_REV4) 4107 && (pnode && NLP_CHK_NODE_ACT(pnode))) { 4108 /* This IO was aborted by the target, we don't 4109 * know the rxid and because we did not send the 4110 * ABTS we cannot generate and RRQ. 4111 */ 4112 lpfc_set_rrq_active(phba, pnode, 4113 lpfc_cmd->cur_iocbq.sli4_lxritag, 4114 0, 0); 4115 } 4116 /* else: fall through */ 4117 default: 4118 cmd->result = DID_ERROR << 16; 4119 break; 4120 } 4121 4122 if (!pnode || !NLP_CHK_NODE_ACT(pnode) 4123 || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 4124 cmd->result = DID_TRANSPORT_DISRUPTED << 16 | 4125 SAM_STAT_BUSY; 4126 } else 4127 cmd->result = DID_OK << 16; 4128 4129 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 4130 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 4131 4132 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4133 "0710 Iodone <%d/%llu> cmd %p, error " 4134 "x%x SNS x%x x%x Data: x%x x%x\n", 4135 cmd->device->id, cmd->device->lun, cmd, 4136 cmd->result, *lp, *(lp + 3), cmd->retries, 4137 scsi_get_resid(cmd)); 4138 } 4139 4140 lpfc_update_stats(phba, lpfc_cmd); 4141 if (vport->cfg_max_scsicmpl_time && 4142 time_after(jiffies, lpfc_cmd->start_time + 4143 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { 4144 spin_lock_irqsave(shost->host_lock, flags); 4145 if (pnode && NLP_CHK_NODE_ACT(pnode)) { 4146 if (pnode->cmd_qdepth > 4147 atomic_read(&pnode->cmd_pending) && 4148 (atomic_read(&pnode->cmd_pending) > 4149 LPFC_MIN_TGT_QDEPTH) && 4150 ((cmd->cmnd[0] == READ_10) || 4151 (cmd->cmnd[0] == WRITE_10))) 4152 pnode->cmd_qdepth = 4153 atomic_read(&pnode->cmd_pending); 4154 4155 pnode->last_change_time = jiffies; 4156 } 4157 spin_unlock_irqrestore(shost->host_lock, flags); 4158 } 4159 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 4160 4161 spin_lock_irqsave(&phba->hbalock, flags); 4162 lpfc_cmd->pCmd = NULL; 4163 spin_unlock_irqrestore(&phba->hbalock, flags); 4164 4165 /* The sdev is not guaranteed to be valid post scsi_done upcall. */ 4166 cmd->scsi_done(cmd); 4167 4168 /* 4169 * If there is a thread waiting for command completion 4170 * wake up the thread. 4171 */ 4172 spin_lock_irqsave(shost->host_lock, flags); 4173 if (lpfc_cmd->waitq) 4174 wake_up(lpfc_cmd->waitq); 4175 spin_unlock_irqrestore(shost->host_lock, flags); 4176 4177 lpfc_release_scsi_buf(phba, lpfc_cmd); 4178 } 4179 4180 /** 4181 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB 4182 * @data: A pointer to the immediate command data portion of the IOCB. 4183 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. 4184 * 4185 * The routine copies the entire FCP command from @fcp_cmnd to @data while 4186 * byte swapping the data to big endian format for transmission on the wire. 4187 **/ 4188 static void 4189 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) 4190 { 4191 int i, j; 4192 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); 4193 i += sizeof(uint32_t), j++) { 4194 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); 4195 } 4196 } 4197 4198 /** 4199 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit 4200 * @vport: The virtual port for which this call is being executed. 4201 * @lpfc_cmd: The scsi command which needs to send. 4202 * @pnode: Pointer to lpfc_nodelist. 4203 * 4204 * This routine initializes fcp_cmnd and iocb data structure from scsi command 4205 * to transfer for device with SLI3 interface spec. 4206 **/ 4207 static void 4208 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 4209 struct lpfc_nodelist *pnode) 4210 { 4211 struct lpfc_hba *phba = vport->phba; 4212 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 4213 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 4214 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 4215 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); 4216 int datadir = scsi_cmnd->sc_data_direction; 4217 uint8_t *ptr; 4218 bool sli4; 4219 uint32_t fcpdl; 4220 4221 if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 4222 return; 4223 4224 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 4225 /* clear task management bits */ 4226 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; 4227 4228 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 4229 &lpfc_cmd->fcp_cmnd->fcp_lun); 4230 4231 ptr = &fcp_cmnd->fcpCdb[0]; 4232 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 4233 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) { 4234 ptr += scsi_cmnd->cmd_len; 4235 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len)); 4236 } 4237 4238 fcp_cmnd->fcpCntl1 = SIMPLE_Q; 4239 4240 sli4 = (phba->sli_rev == LPFC_SLI_REV4); 4241 piocbq->iocb.un.fcpi.fcpi_XRdy = 0; 4242 4243 /* 4244 * There are three possibilities here - use scatter-gather segment, use 4245 * the single mapping, or neither. Start the lpfc command prep by 4246 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 4247 * data bde entry. 4248 */ 4249 if (scsi_sg_count(scsi_cmnd)) { 4250 if (datadir == DMA_TO_DEVICE) { 4251 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 4252 iocb_cmd->ulpPU = PARM_READ_CHECK; 4253 if (vport->cfg_first_burst_size && 4254 (pnode->nlp_flag & NLP_FIRSTBURST)) { 4255 fcpdl = scsi_bufflen(scsi_cmnd); 4256 if (fcpdl < vport->cfg_first_burst_size) 4257 piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl; 4258 else 4259 piocbq->iocb.un.fcpi.fcpi_XRdy = 4260 vport->cfg_first_burst_size; 4261 } 4262 fcp_cmnd->fcpCntl3 = WRITE_DATA; 4263 atomic_inc(&phba->fc4ScsiOutputRequests); 4264 } else { 4265 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 4266 iocb_cmd->ulpPU = PARM_READ_CHECK; 4267 fcp_cmnd->fcpCntl3 = READ_DATA; 4268 atomic_inc(&phba->fc4ScsiInputRequests); 4269 } 4270 } else { 4271 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; 4272 iocb_cmd->un.fcpi.fcpi_parm = 0; 4273 iocb_cmd->ulpPU = 0; 4274 fcp_cmnd->fcpCntl3 = 0; 4275 atomic_inc(&phba->fc4ScsiControlRequests); 4276 } 4277 if (phba->sli_rev == 3 && 4278 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) 4279 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); 4280 /* 4281 * Finish initializing those IOCB fields that are independent 4282 * of the scsi_cmnd request_buffer 4283 */ 4284 piocbq->iocb.ulpContext = pnode->nlp_rpi; 4285 if (sli4) 4286 piocbq->iocb.ulpContext = 4287 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]; 4288 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 4289 piocbq->iocb.ulpFCP2Rcvy = 1; 4290 else 4291 piocbq->iocb.ulpFCP2Rcvy = 0; 4292 4293 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); 4294 piocbq->context1 = lpfc_cmd; 4295 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 4296 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout; 4297 piocbq->vport = vport; 4298 } 4299 4300 /** 4301 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit 4302 * @vport: The virtual port for which this call is being executed. 4303 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 4304 * @lun: Logical unit number. 4305 * @task_mgmt_cmd: SCSI task management command. 4306 * 4307 * This routine creates FCP information unit corresponding to @task_mgmt_cmd 4308 * for device with SLI-3 interface spec. 4309 * 4310 * Return codes: 4311 * 0 - Error 4312 * 1 - Success 4313 **/ 4314 static int 4315 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, 4316 struct lpfc_scsi_buf *lpfc_cmd, 4317 uint64_t lun, 4318 uint8_t task_mgmt_cmd) 4319 { 4320 struct lpfc_iocbq *piocbq; 4321 IOCB_t *piocb; 4322 struct fcp_cmnd *fcp_cmnd; 4323 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 4324 struct lpfc_nodelist *ndlp = rdata->pnode; 4325 4326 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 4327 ndlp->nlp_state != NLP_STE_MAPPED_NODE) 4328 return 0; 4329 4330 piocbq = &(lpfc_cmd->cur_iocbq); 4331 piocbq->vport = vport; 4332 4333 piocb = &piocbq->iocb; 4334 4335 fcp_cmnd = lpfc_cmd->fcp_cmnd; 4336 /* Clear out any old data in the FCP command area */ 4337 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 4338 int_to_scsilun(lun, &fcp_cmnd->fcp_lun); 4339 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 4340 if (vport->phba->sli_rev == 3 && 4341 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED)) 4342 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); 4343 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 4344 piocb->ulpContext = ndlp->nlp_rpi; 4345 if (vport->phba->sli_rev == LPFC_SLI_REV4) { 4346 piocb->ulpContext = 4347 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 4348 } 4349 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0; 4350 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); 4351 piocb->ulpPU = 0; 4352 piocb->un.fcpi.fcpi_parm = 0; 4353 4354 /* ulpTimeout is only one byte */ 4355 if (lpfc_cmd->timeout > 0xff) { 4356 /* 4357 * Do not timeout the command at the firmware level. 4358 * The driver will provide the timeout mechanism. 4359 */ 4360 piocb->ulpTimeout = 0; 4361 } else 4362 piocb->ulpTimeout = lpfc_cmd->timeout; 4363 4364 if (vport->phba->sli_rev == LPFC_SLI_REV4) 4365 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd); 4366 4367 return 1; 4368 } 4369 4370 /** 4371 * lpfc_scsi_api_table_setup - Set up scsi api function jump table 4372 * @phba: The hba struct for which this call is being executed. 4373 * @dev_grp: The HBA PCI-Device group number. 4374 * 4375 * This routine sets up the SCSI interface API function jump table in @phba 4376 * struct. 4377 * Returns: 0 - success, -ENODEV - failure. 4378 **/ 4379 int 4380 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4381 { 4382 4383 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf; 4384 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd; 4385 4386 switch (dev_grp) { 4387 case LPFC_PCI_DEV_LP: 4388 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3; 4389 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; 4390 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3; 4391 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; 4392 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3; 4393 break; 4394 case LPFC_PCI_DEV_OC: 4395 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4; 4396 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; 4397 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4; 4398 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; 4399 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4; 4400 break; 4401 default: 4402 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4403 "1418 Invalid HBA PCI-device group: 0x%x\n", 4404 dev_grp); 4405 return -ENODEV; 4406 break; 4407 } 4408 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; 4409 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 4410 return 0; 4411 } 4412 4413 /** 4414 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command 4415 * @phba: The Hba for which this call is being executed. 4416 * @cmdiocbq: Pointer to lpfc_iocbq data structure. 4417 * @rspiocbq: Pointer to lpfc_iocbq data structure. 4418 * 4419 * This routine is IOCB completion routine for device reset and target reset 4420 * routine. This routine release scsi buffer associated with lpfc_cmd. 4421 **/ 4422 static void 4423 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, 4424 struct lpfc_iocbq *cmdiocbq, 4425 struct lpfc_iocbq *rspiocbq) 4426 { 4427 struct lpfc_scsi_buf *lpfc_cmd = 4428 (struct lpfc_scsi_buf *) cmdiocbq->context1; 4429 if (lpfc_cmd) 4430 lpfc_release_scsi_buf(phba, lpfc_cmd); 4431 return; 4432 } 4433 4434 /** 4435 * lpfc_info - Info entry point of scsi_host_template data structure 4436 * @host: The scsi host for which this call is being executed. 4437 * 4438 * This routine provides module information about hba. 4439 * 4440 * Reutrn code: 4441 * Pointer to char - Success. 4442 **/ 4443 const char * 4444 lpfc_info(struct Scsi_Host *host) 4445 { 4446 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; 4447 struct lpfc_hba *phba = vport->phba; 4448 int len, link_speed = 0; 4449 static char lpfcinfobuf[384]; 4450 4451 memset(lpfcinfobuf,0,384); 4452 if (phba && phba->pcidev){ 4453 strncpy(lpfcinfobuf, phba->ModelDesc, 256); 4454 len = strlen(lpfcinfobuf); 4455 snprintf(lpfcinfobuf + len, 4456 384-len, 4457 " on PCI bus %02x device %02x irq %d", 4458 phba->pcidev->bus->number, 4459 phba->pcidev->devfn, 4460 phba->pcidev->irq); 4461 len = strlen(lpfcinfobuf); 4462 if (phba->Port[0]) { 4463 snprintf(lpfcinfobuf + len, 4464 384-len, 4465 " port %s", 4466 phba->Port); 4467 } 4468 len = strlen(lpfcinfobuf); 4469 link_speed = lpfc_sli_port_speed_get(phba); 4470 if (link_speed != 0) 4471 snprintf(lpfcinfobuf + len, 384-len, 4472 " Logical Link Speed: %d Mbps", link_speed); 4473 } 4474 return lpfcinfobuf; 4475 } 4476 4477 /** 4478 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba 4479 * @phba: The Hba for which this call is being executed. 4480 * 4481 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo. 4482 * The default value of cfg_poll_tmo is 10 milliseconds. 4483 **/ 4484 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) 4485 { 4486 unsigned long poll_tmo_expires = 4487 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); 4488 4489 if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq)) 4490 mod_timer(&phba->fcp_poll_timer, 4491 poll_tmo_expires); 4492 } 4493 4494 /** 4495 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA 4496 * @phba: The Hba for which this call is being executed. 4497 * 4498 * This routine starts the fcp_poll_timer of @phba. 4499 **/ 4500 void lpfc_poll_start_timer(struct lpfc_hba * phba) 4501 { 4502 lpfc_poll_rearm_timer(phba); 4503 } 4504 4505 /** 4506 * lpfc_poll_timeout - Restart polling timer 4507 * @ptr: Map to lpfc_hba data structure pointer. 4508 * 4509 * This routine restarts fcp_poll timer, when FCP ring polling is enable 4510 * and FCP Ring interrupt is disable. 4511 **/ 4512 4513 void lpfc_poll_timeout(struct timer_list *t) 4514 { 4515 struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer); 4516 4517 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 4518 lpfc_sli_handle_fast_ring_event(phba, 4519 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 4520 4521 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 4522 lpfc_poll_rearm_timer(phba); 4523 } 4524 } 4525 4526 /** 4527 * lpfc_queuecommand - scsi_host_template queuecommand entry point 4528 * @cmnd: Pointer to scsi_cmnd data structure. 4529 * @done: Pointer to done routine. 4530 * 4531 * Driver registers this routine to scsi midlayer to submit a @cmd to process. 4532 * This routine prepares an IOCB from scsi command and provides to firmware. 4533 * The @done callback is invoked after driver finished processing the command. 4534 * 4535 * Return value : 4536 * 0 - Success 4537 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. 4538 **/ 4539 static int 4540 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) 4541 { 4542 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4543 struct lpfc_hba *phba = vport->phba; 4544 struct lpfc_rport_data *rdata; 4545 struct lpfc_nodelist *ndlp; 4546 struct lpfc_scsi_buf *lpfc_cmd; 4547 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 4548 int err; 4549 4550 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 4551 4552 /* sanity check on references */ 4553 if (unlikely(!rdata) || unlikely(!rport)) 4554 goto out_fail_command; 4555 4556 err = fc_remote_port_chkready(rport); 4557 if (err) { 4558 cmnd->result = err; 4559 goto out_fail_command; 4560 } 4561 ndlp = rdata->pnode; 4562 4563 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && 4564 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) { 4565 4566 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4567 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" 4568 " op:%02x str=%s without registering for" 4569 " BlockGuard - Rejecting command\n", 4570 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 4571 dif_op_str[scsi_get_prot_op(cmnd)]); 4572 goto out_fail_command; 4573 } 4574 4575 /* 4576 * Catch race where our node has transitioned, but the 4577 * transport is still transitioning. 4578 */ 4579 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 4580 goto out_tgt_busy; 4581 if (lpfc_ndlp_check_qdepth(phba, ndlp)) { 4582 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) { 4583 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, 4584 "3377 Target Queue Full, scsi Id:%d " 4585 "Qdepth:%d Pending command:%d" 4586 " WWNN:%02x:%02x:%02x:%02x:" 4587 "%02x:%02x:%02x:%02x, " 4588 " WWPN:%02x:%02x:%02x:%02x:" 4589 "%02x:%02x:%02x:%02x", 4590 ndlp->nlp_sid, ndlp->cmd_qdepth, 4591 atomic_read(&ndlp->cmd_pending), 4592 ndlp->nlp_nodename.u.wwn[0], 4593 ndlp->nlp_nodename.u.wwn[1], 4594 ndlp->nlp_nodename.u.wwn[2], 4595 ndlp->nlp_nodename.u.wwn[3], 4596 ndlp->nlp_nodename.u.wwn[4], 4597 ndlp->nlp_nodename.u.wwn[5], 4598 ndlp->nlp_nodename.u.wwn[6], 4599 ndlp->nlp_nodename.u.wwn[7], 4600 ndlp->nlp_portname.u.wwn[0], 4601 ndlp->nlp_portname.u.wwn[1], 4602 ndlp->nlp_portname.u.wwn[2], 4603 ndlp->nlp_portname.u.wwn[3], 4604 ndlp->nlp_portname.u.wwn[4], 4605 ndlp->nlp_portname.u.wwn[5], 4606 ndlp->nlp_portname.u.wwn[6], 4607 ndlp->nlp_portname.u.wwn[7]); 4608 goto out_tgt_busy; 4609 } 4610 } 4611 4612 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp); 4613 if (lpfc_cmd == NULL) { 4614 lpfc_rampdown_queue_depth(phba); 4615 4616 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, 4617 "0707 driver's buffer pool is empty, " 4618 "IO busied\n"); 4619 goto out_host_busy; 4620 } 4621 4622 /* 4623 * Store the midlayer's command structure for the completion phase 4624 * and complete the command initialization. 4625 */ 4626 lpfc_cmd->pCmd = cmnd; 4627 lpfc_cmd->rdata = rdata; 4628 lpfc_cmd->ndlp = ndlp; 4629 lpfc_cmd->timeout = 0; 4630 lpfc_cmd->start_time = jiffies; 4631 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 4632 4633 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 4634 if (vport->phba->cfg_enable_bg) { 4635 lpfc_printf_vlog(vport, 4636 KERN_INFO, LOG_SCSI_CMD, 4637 "9033 BLKGRD: rcvd %s cmd:x%x " 4638 "sector x%llx cnt %u pt %x\n", 4639 dif_op_str[scsi_get_prot_op(cmnd)], 4640 cmnd->cmnd[0], 4641 (unsigned long long)scsi_get_lba(cmnd), 4642 blk_rq_sectors(cmnd->request), 4643 (cmnd->cmnd[1]>>5)); 4644 } 4645 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 4646 } else { 4647 if (vport->phba->cfg_enable_bg) { 4648 lpfc_printf_vlog(vport, 4649 KERN_INFO, LOG_SCSI_CMD, 4650 "9038 BLKGRD: rcvd PROT_NORMAL cmd: " 4651 "x%x sector x%llx cnt %u pt %x\n", 4652 cmnd->cmnd[0], 4653 (unsigned long long)scsi_get_lba(cmnd), 4654 blk_rq_sectors(cmnd->request), 4655 (cmnd->cmnd[1]>>5)); 4656 } 4657 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 4658 } 4659 4660 if (err) 4661 goto out_host_busy_free_buf; 4662 4663 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 4664 4665 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, 4666 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 4667 if (err) { 4668 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4669 "3376 FCP could not issue IOCB err %x" 4670 "FCP cmd x%x <%d/%llu> " 4671 "sid: x%x did: x%x oxid: x%x " 4672 "Data: x%x x%x x%x x%x\n", 4673 err, cmnd->cmnd[0], 4674 cmnd->device ? cmnd->device->id : 0xffff, 4675 cmnd->device ? cmnd->device->lun : (u64) -1, 4676 vport->fc_myDID, ndlp->nlp_DID, 4677 phba->sli_rev == LPFC_SLI_REV4 ? 4678 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, 4679 lpfc_cmd->cur_iocbq.iocb.ulpContext, 4680 lpfc_cmd->cur_iocbq.iocb.ulpIoTag, 4681 lpfc_cmd->cur_iocbq.iocb.ulpTimeout, 4682 (uint32_t) 4683 (cmnd->request->timeout / 1000)); 4684 4685 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) { 4686 case WRITE_DATA: 4687 atomic_dec(&phba->fc4ScsiOutputRequests); 4688 break; 4689 case READ_DATA: 4690 atomic_dec(&phba->fc4ScsiInputRequests); 4691 break; 4692 default: 4693 atomic_dec(&phba->fc4ScsiControlRequests); 4694 } 4695 goto out_host_busy_free_buf; 4696 } 4697 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 4698 lpfc_sli_handle_fast_ring_event(phba, 4699 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 4700 4701 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 4702 lpfc_poll_rearm_timer(phba); 4703 } 4704 4705 return 0; 4706 4707 out_host_busy_free_buf: 4708 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 4709 lpfc_release_scsi_buf(phba, lpfc_cmd); 4710 out_host_busy: 4711 return SCSI_MLQUEUE_HOST_BUSY; 4712 4713 out_tgt_busy: 4714 return SCSI_MLQUEUE_TARGET_BUSY; 4715 4716 out_fail_command: 4717 cmnd->scsi_done(cmnd); 4718 return 0; 4719 } 4720 4721 4722 /** 4723 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point 4724 * @cmnd: Pointer to scsi_cmnd data structure. 4725 * 4726 * This routine aborts @cmnd pending in base driver. 4727 * 4728 * Return code : 4729 * 0x2003 - Error 4730 * 0x2002 - Success 4731 **/ 4732 static int 4733 lpfc_abort_handler(struct scsi_cmnd *cmnd) 4734 { 4735 struct Scsi_Host *shost = cmnd->device->host; 4736 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4737 struct lpfc_hba *phba = vport->phba; 4738 struct lpfc_iocbq *iocb; 4739 struct lpfc_iocbq *abtsiocb; 4740 struct lpfc_scsi_buf *lpfc_cmd; 4741 IOCB_t *cmd, *icmd; 4742 int ret = SUCCESS, status = 0; 4743 struct lpfc_sli_ring *pring_s4 = NULL; 4744 int ret_val; 4745 unsigned long flags; 4746 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 4747 4748 status = fc_block_scsi_eh(cmnd); 4749 if (status != 0 && status != SUCCESS) 4750 return status; 4751 4752 spin_lock_irqsave(&phba->hbalock, flags); 4753 /* driver queued commands are in process of being flushed */ 4754 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { 4755 spin_unlock_irqrestore(&phba->hbalock, flags); 4756 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4757 "3168 SCSI Layer abort requested I/O has been " 4758 "flushed by LLD.\n"); 4759 return FAILED; 4760 } 4761 4762 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 4763 if (!lpfc_cmd || !lpfc_cmd->pCmd) { 4764 spin_unlock_irqrestore(&phba->hbalock, flags); 4765 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4766 "2873 SCSI Layer I/O Abort Request IO CMPL Status " 4767 "x%x ID %d LUN %llu\n", 4768 SUCCESS, cmnd->device->id, cmnd->device->lun); 4769 return SUCCESS; 4770 } 4771 4772 iocb = &lpfc_cmd->cur_iocbq; 4773 if (phba->sli_rev == LPFC_SLI_REV4) { 4774 if (!(phba->cfg_fof) || 4775 (!(iocb->iocb_flag & LPFC_IO_FOF))) { 4776 pring_s4 = 4777 phba->sli4_hba.fcp_wq[iocb->hba_wqidx]->pring; 4778 } else { 4779 iocb->hba_wqidx = 0; 4780 pring_s4 = phba->sli4_hba.oas_wq->pring; 4781 } 4782 if (!pring_s4) { 4783 ret = FAILED; 4784 goto out_unlock; 4785 } 4786 spin_lock(&pring_s4->ring_lock); 4787 } 4788 /* the command is in process of being cancelled */ 4789 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { 4790 if (phba->sli_rev == LPFC_SLI_REV4) 4791 spin_unlock(&pring_s4->ring_lock); 4792 spin_unlock_irqrestore(&phba->hbalock, flags); 4793 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4794 "3169 SCSI Layer abort requested I/O has been " 4795 "cancelled by LLD.\n"); 4796 return FAILED; 4797 } 4798 /* 4799 * If pCmd field of the corresponding lpfc_scsi_buf structure 4800 * points to a different SCSI command, then the driver has 4801 * already completed this command, but the midlayer did not 4802 * see the completion before the eh fired. Just return SUCCESS. 4803 */ 4804 if (lpfc_cmd->pCmd != cmnd) { 4805 if (phba->sli_rev == LPFC_SLI_REV4) 4806 spin_unlock(&pring_s4->ring_lock); 4807 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4808 "3170 SCSI Layer abort requested I/O has been " 4809 "completed by LLD.\n"); 4810 goto out_unlock; 4811 } 4812 4813 BUG_ON(iocb->context1 != lpfc_cmd); 4814 4815 /* abort issued in recovery is still in progress */ 4816 if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) { 4817 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4818 "3389 SCSI Layer I/O Abort Request is pending\n"); 4819 if (phba->sli_rev == LPFC_SLI_REV4) 4820 spin_unlock(&pring_s4->ring_lock); 4821 spin_unlock_irqrestore(&phba->hbalock, flags); 4822 goto wait_for_cmpl; 4823 } 4824 4825 abtsiocb = __lpfc_sli_get_iocbq(phba); 4826 if (abtsiocb == NULL) { 4827 ret = FAILED; 4828 if (phba->sli_rev == LPFC_SLI_REV4) 4829 spin_unlock(&pring_s4->ring_lock); 4830 goto out_unlock; 4831 } 4832 4833 /* Indicate the IO is being aborted by the driver. */ 4834 iocb->iocb_flag |= LPFC_DRIVER_ABORTED; 4835 4836 /* 4837 * The scsi command can not be in txq and it is in flight because the 4838 * pCmd is still pointig at the SCSI command we have to abort. There 4839 * is no need to search the txcmplq. Just send an abort to the FW. 4840 */ 4841 4842 cmd = &iocb->iocb; 4843 icmd = &abtsiocb->iocb; 4844 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 4845 icmd->un.acxri.abortContextTag = cmd->ulpContext; 4846 if (phba->sli_rev == LPFC_SLI_REV4) 4847 icmd->un.acxri.abortIoTag = iocb->sli4_xritag; 4848 else 4849 icmd->un.acxri.abortIoTag = cmd->ulpIoTag; 4850 4851 icmd->ulpLe = 1; 4852 icmd->ulpClass = cmd->ulpClass; 4853 4854 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 4855 abtsiocb->hba_wqidx = iocb->hba_wqidx; 4856 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 4857 if (iocb->iocb_flag & LPFC_IO_FOF) 4858 abtsiocb->iocb_flag |= LPFC_IO_FOF; 4859 4860 if (lpfc_is_link_up(phba)) 4861 icmd->ulpCommand = CMD_ABORT_XRI_CN; 4862 else 4863 icmd->ulpCommand = CMD_CLOSE_XRI_CN; 4864 4865 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 4866 abtsiocb->vport = vport; 4867 lpfc_cmd->waitq = &waitq; 4868 if (phba->sli_rev == LPFC_SLI_REV4) { 4869 /* Note: both hbalock and ring_lock must be set here */ 4870 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, 4871 abtsiocb, 0); 4872 spin_unlock(&pring_s4->ring_lock); 4873 } else { 4874 ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, 4875 abtsiocb, 0); 4876 } 4877 /* no longer need the lock after this point */ 4878 spin_unlock_irqrestore(&phba->hbalock, flags); 4879 4880 4881 if (ret_val == IOCB_ERROR) { 4882 if (phba->sli_rev == LPFC_SLI_REV4) 4883 spin_lock_irqsave(&pring_s4->ring_lock, flags); 4884 else 4885 spin_lock_irqsave(&phba->hbalock, flags); 4886 /* Indicate the IO is not being aborted by the driver. */ 4887 iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; 4888 lpfc_cmd->waitq = NULL; 4889 if (phba->sli_rev == LPFC_SLI_REV4) 4890 spin_unlock_irqrestore(&pring_s4->ring_lock, flags); 4891 else 4892 spin_unlock_irqrestore(&phba->hbalock, flags); 4893 lpfc_sli_release_iocbq(phba, abtsiocb); 4894 ret = FAILED; 4895 goto out; 4896 } 4897 4898 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 4899 lpfc_sli_handle_fast_ring_event(phba, 4900 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 4901 4902 wait_for_cmpl: 4903 /* Wait for abort to complete */ 4904 wait_event_timeout(waitq, 4905 (lpfc_cmd->pCmd != cmnd), 4906 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); 4907 4908 spin_lock_irqsave(shost->host_lock, flags); 4909 lpfc_cmd->waitq = NULL; 4910 spin_unlock_irqrestore(shost->host_lock, flags); 4911 4912 if (lpfc_cmd->pCmd == cmnd) { 4913 ret = FAILED; 4914 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 4915 "0748 abort handler timed out waiting " 4916 "for aborting I/O (xri:x%x) to complete: " 4917 "ret %#x, ID %d, LUN %llu\n", 4918 iocb->sli4_xritag, ret, 4919 cmnd->device->id, cmnd->device->lun); 4920 } 4921 goto out; 4922 4923 out_unlock: 4924 spin_unlock_irqrestore(&phba->hbalock, flags); 4925 out: 4926 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4927 "0749 SCSI Layer I/O Abort Request Status x%x ID %d " 4928 "LUN %llu\n", ret, cmnd->device->id, 4929 cmnd->device->lun); 4930 return ret; 4931 } 4932 4933 static char * 4934 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd) 4935 { 4936 switch (task_mgmt_cmd) { 4937 case FCP_ABORT_TASK_SET: 4938 return "ABORT_TASK_SET"; 4939 case FCP_CLEAR_TASK_SET: 4940 return "FCP_CLEAR_TASK_SET"; 4941 case FCP_BUS_RESET: 4942 return "FCP_BUS_RESET"; 4943 case FCP_LUN_RESET: 4944 return "FCP_LUN_RESET"; 4945 case FCP_TARGET_RESET: 4946 return "FCP_TARGET_RESET"; 4947 case FCP_CLEAR_ACA: 4948 return "FCP_CLEAR_ACA"; 4949 case FCP_TERMINATE_TASK: 4950 return "FCP_TERMINATE_TASK"; 4951 default: 4952 return "unknown"; 4953 } 4954 } 4955 4956 4957 /** 4958 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed 4959 * @vport: The virtual port for which this call is being executed. 4960 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 4961 * 4962 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded 4963 * 4964 * Return code : 4965 * 0x2003 - Error 4966 * 0x2002 - Success 4967 **/ 4968 static int 4969 lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd) 4970 { 4971 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 4972 uint32_t rsp_info; 4973 uint32_t rsp_len; 4974 uint8_t rsp_info_code; 4975 int ret = FAILED; 4976 4977 4978 if (fcprsp == NULL) 4979 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4980 "0703 fcp_rsp is missing\n"); 4981 else { 4982 rsp_info = fcprsp->rspStatus2; 4983 rsp_len = be32_to_cpu(fcprsp->rspRspLen); 4984 rsp_info_code = fcprsp->rspInfo3; 4985 4986 4987 lpfc_printf_vlog(vport, KERN_INFO, 4988 LOG_FCP, 4989 "0706 fcp_rsp valid 0x%x," 4990 " rsp len=%d code 0x%x\n", 4991 rsp_info, 4992 rsp_len, rsp_info_code); 4993 4994 if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) { 4995 switch (rsp_info_code) { 4996 case RSP_NO_FAILURE: 4997 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4998 "0715 Task Mgmt No Failure\n"); 4999 ret = SUCCESS; 5000 break; 5001 case RSP_TM_NOT_SUPPORTED: /* TM rejected */ 5002 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5003 "0716 Task Mgmt Target " 5004 "reject\n"); 5005 break; 5006 case RSP_TM_NOT_COMPLETED: /* TM failed */ 5007 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5008 "0717 Task Mgmt Target " 5009 "failed TM\n"); 5010 break; 5011 case RSP_TM_INVALID_LU: /* TM to invalid LU! */ 5012 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5013 "0718 Task Mgmt to invalid " 5014 "LUN\n"); 5015 break; 5016 } 5017 } 5018 } 5019 return ret; 5020 } 5021 5022 5023 /** 5024 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler 5025 * @vport: The virtual port for which this call is being executed. 5026 * @rdata: Pointer to remote port local data 5027 * @tgt_id: Target ID of remote device. 5028 * @lun_id: Lun number for the TMF 5029 * @task_mgmt_cmd: type of TMF to send 5030 * 5031 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to 5032 * a remote port. 5033 * 5034 * Return Code: 5035 * 0x2003 - Error 5036 * 0x2002 - Success. 5037 **/ 5038 static int 5039 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd, 5040 unsigned int tgt_id, uint64_t lun_id, 5041 uint8_t task_mgmt_cmd) 5042 { 5043 struct lpfc_hba *phba = vport->phba; 5044 struct lpfc_scsi_buf *lpfc_cmd; 5045 struct lpfc_iocbq *iocbq; 5046 struct lpfc_iocbq *iocbqrsp; 5047 struct lpfc_rport_data *rdata; 5048 struct lpfc_nodelist *pnode; 5049 int ret; 5050 int status; 5051 5052 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5053 if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode)) 5054 return FAILED; 5055 pnode = rdata->pnode; 5056 5057 lpfc_cmd = lpfc_get_scsi_buf(phba, pnode); 5058 if (lpfc_cmd == NULL) 5059 return FAILED; 5060 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; 5061 lpfc_cmd->rdata = rdata; 5062 lpfc_cmd->pCmd = cmnd; 5063 lpfc_cmd->ndlp = pnode; 5064 5065 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, 5066 task_mgmt_cmd); 5067 if (!status) { 5068 lpfc_release_scsi_buf(phba, lpfc_cmd); 5069 return FAILED; 5070 } 5071 5072 iocbq = &lpfc_cmd->cur_iocbq; 5073 iocbqrsp = lpfc_sli_get_iocbq(phba); 5074 if (iocbqrsp == NULL) { 5075 lpfc_release_scsi_buf(phba, lpfc_cmd); 5076 return FAILED; 5077 } 5078 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 5079 5080 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5081 "0702 Issue %s to TGT %d LUN %llu " 5082 "rpi x%x nlp_flag x%x Data: x%x x%x\n", 5083 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, 5084 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag, 5085 iocbq->iocb_flag); 5086 5087 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, 5088 iocbq, iocbqrsp, lpfc_cmd->timeout); 5089 if ((status != IOCB_SUCCESS) || 5090 (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) { 5091 if (status != IOCB_SUCCESS || 5092 iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR) 5093 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5094 "0727 TMF %s to TGT %d LUN %llu " 5095 "failed (%d, %d) iocb_flag x%x\n", 5096 lpfc_taskmgmt_name(task_mgmt_cmd), 5097 tgt_id, lun_id, 5098 iocbqrsp->iocb.ulpStatus, 5099 iocbqrsp->iocb.un.ulpWord[4], 5100 iocbq->iocb_flag); 5101 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */ 5102 if (status == IOCB_SUCCESS) { 5103 if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 5104 /* Something in the FCP_RSP was invalid. 5105 * Check conditions */ 5106 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd); 5107 else 5108 ret = FAILED; 5109 } else if (status == IOCB_TIMEDOUT) { 5110 ret = TIMEOUT_ERROR; 5111 } else { 5112 ret = FAILED; 5113 } 5114 } else 5115 ret = SUCCESS; 5116 5117 lpfc_sli_release_iocbq(phba, iocbqrsp); 5118 5119 if (ret != TIMEOUT_ERROR) 5120 lpfc_release_scsi_buf(phba, lpfc_cmd); 5121 5122 return ret; 5123 } 5124 5125 /** 5126 * lpfc_chk_tgt_mapped - 5127 * @vport: The virtual port to check on 5128 * @cmnd: Pointer to scsi_cmnd data structure. 5129 * 5130 * This routine delays until the scsi target (aka rport) for the 5131 * command exists (is present and logged in) or we declare it non-existent. 5132 * 5133 * Return code : 5134 * 0x2003 - Error 5135 * 0x2002 - Success 5136 **/ 5137 static int 5138 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) 5139 { 5140 struct lpfc_rport_data *rdata; 5141 struct lpfc_nodelist *pnode; 5142 unsigned long later; 5143 5144 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5145 if (!rdata) { 5146 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5147 "0797 Tgt Map rport failure: rdata x%p\n", rdata); 5148 return FAILED; 5149 } 5150 pnode = rdata->pnode; 5151 /* 5152 * If target is not in a MAPPED state, delay until 5153 * target is rediscovered or devloss timeout expires. 5154 */ 5155 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 5156 while (time_after(later, jiffies)) { 5157 if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 5158 return FAILED; 5159 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 5160 return SUCCESS; 5161 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 5162 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5163 if (!rdata) 5164 return FAILED; 5165 pnode = rdata->pnode; 5166 } 5167 if (!pnode || !NLP_CHK_NODE_ACT(pnode) || 5168 (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 5169 return FAILED; 5170 return SUCCESS; 5171 } 5172 5173 /** 5174 * lpfc_reset_flush_io_context - 5175 * @vport: The virtual port (scsi_host) for the flush context 5176 * @tgt_id: If aborting by Target contect - specifies the target id 5177 * @lun_id: If aborting by Lun context - specifies the lun id 5178 * @context: specifies the context level to flush at. 5179 * 5180 * After a reset condition via TMF, we need to flush orphaned i/o 5181 * contexts from the adapter. This routine aborts any contexts 5182 * outstanding, then waits for their completions. The wait is 5183 * bounded by devloss_tmo though. 5184 * 5185 * Return code : 5186 * 0x2003 - Error 5187 * 0x2002 - Success 5188 **/ 5189 static int 5190 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, 5191 uint64_t lun_id, lpfc_ctx_cmd context) 5192 { 5193 struct lpfc_hba *phba = vport->phba; 5194 unsigned long later; 5195 int cnt; 5196 5197 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 5198 if (cnt) 5199 lpfc_sli_abort_taskmgmt(vport, 5200 &phba->sli.sli3_ring[LPFC_FCP_RING], 5201 tgt_id, lun_id, context); 5202 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 5203 while (time_after(later, jiffies) && cnt) { 5204 schedule_timeout_uninterruptible(msecs_to_jiffies(20)); 5205 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 5206 } 5207 if (cnt) { 5208 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5209 "0724 I/O flush failure for context %s : cnt x%x\n", 5210 ((context == LPFC_CTX_LUN) ? "LUN" : 5211 ((context == LPFC_CTX_TGT) ? "TGT" : 5212 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))), 5213 cnt); 5214 return FAILED; 5215 } 5216 return SUCCESS; 5217 } 5218 5219 /** 5220 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point 5221 * @cmnd: Pointer to scsi_cmnd data structure. 5222 * 5223 * This routine does a device reset by sending a LUN_RESET task management 5224 * command. 5225 * 5226 * Return code : 5227 * 0x2003 - Error 5228 * 0x2002 - Success 5229 **/ 5230 static int 5231 lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 5232 { 5233 struct Scsi_Host *shost = cmnd->device->host; 5234 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5235 struct lpfc_rport_data *rdata; 5236 struct lpfc_nodelist *pnode; 5237 unsigned tgt_id = cmnd->device->id; 5238 uint64_t lun_id = cmnd->device->lun; 5239 struct lpfc_scsi_event_header scsi_event; 5240 int status; 5241 5242 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5243 if (!rdata || !rdata->pnode) { 5244 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5245 "0798 Device Reset rport failure: rdata x%p\n", 5246 rdata); 5247 return FAILED; 5248 } 5249 pnode = rdata->pnode; 5250 status = fc_block_scsi_eh(cmnd); 5251 if (status != 0 && status != SUCCESS) 5252 return status; 5253 5254 status = lpfc_chk_tgt_mapped(vport, cmnd); 5255 if (status == FAILED) { 5256 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5257 "0721 Device Reset rport failure: rdata x%p\n", rdata); 5258 return FAILED; 5259 } 5260 5261 scsi_event.event_type = FC_REG_SCSI_EVENT; 5262 scsi_event.subcategory = LPFC_EVENT_LUNRESET; 5263 scsi_event.lun = lun_id; 5264 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); 5265 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); 5266 5267 fc_host_post_vendor_event(shost, fc_get_event_number(), 5268 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 5269 5270 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id, 5271 FCP_LUN_RESET); 5272 5273 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5274 "0713 SCSI layer issued Device Reset (%d, %llu) " 5275 "return x%x\n", tgt_id, lun_id, status); 5276 5277 /* 5278 * We have to clean up i/o as : they may be orphaned by the TMF; 5279 * or if the TMF failed, they may be in an indeterminate state. 5280 * So, continue on. 5281 * We will report success if all the i/o aborts successfully. 5282 */ 5283 if (status == SUCCESS) 5284 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 5285 LPFC_CTX_LUN); 5286 5287 return status; 5288 } 5289 5290 /** 5291 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point 5292 * @cmnd: Pointer to scsi_cmnd data structure. 5293 * 5294 * This routine does a target reset by sending a TARGET_RESET task management 5295 * command. 5296 * 5297 * Return code : 5298 * 0x2003 - Error 5299 * 0x2002 - Success 5300 **/ 5301 static int 5302 lpfc_target_reset_handler(struct scsi_cmnd *cmnd) 5303 { 5304 struct Scsi_Host *shost = cmnd->device->host; 5305 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5306 struct lpfc_rport_data *rdata; 5307 struct lpfc_nodelist *pnode; 5308 unsigned tgt_id = cmnd->device->id; 5309 uint64_t lun_id = cmnd->device->lun; 5310 struct lpfc_scsi_event_header scsi_event; 5311 int status; 5312 5313 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5314 if (!rdata) { 5315 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5316 "0799 Target Reset rport failure: rdata x%p\n", rdata); 5317 return FAILED; 5318 } 5319 pnode = rdata->pnode; 5320 status = fc_block_scsi_eh(cmnd); 5321 if (status != 0 && status != SUCCESS) 5322 return status; 5323 5324 status = lpfc_chk_tgt_mapped(vport, cmnd); 5325 if (status == FAILED) { 5326 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5327 "0722 Target Reset rport failure: rdata x%p\n", rdata); 5328 if (pnode) { 5329 spin_lock_irq(shost->host_lock); 5330 pnode->nlp_flag &= ~NLP_NPR_ADISC; 5331 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 5332 spin_unlock_irq(shost->host_lock); 5333 } 5334 lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 5335 LPFC_CTX_TGT); 5336 return FAST_IO_FAIL; 5337 } 5338 5339 scsi_event.event_type = FC_REG_SCSI_EVENT; 5340 scsi_event.subcategory = LPFC_EVENT_TGTRESET; 5341 scsi_event.lun = 0; 5342 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); 5343 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); 5344 5345 fc_host_post_vendor_event(shost, fc_get_event_number(), 5346 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 5347 5348 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id, 5349 FCP_TARGET_RESET); 5350 5351 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5352 "0723 SCSI layer issued Target Reset (%d, %llu) " 5353 "return x%x\n", tgt_id, lun_id, status); 5354 5355 /* 5356 * We have to clean up i/o as : they may be orphaned by the TMF; 5357 * or if the TMF failed, they may be in an indeterminate state. 5358 * So, continue on. 5359 * We will report success if all the i/o aborts successfully. 5360 */ 5361 if (status == SUCCESS) 5362 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 5363 LPFC_CTX_TGT); 5364 return status; 5365 } 5366 5367 /** 5368 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point 5369 * @cmnd: Pointer to scsi_cmnd data structure. 5370 * 5371 * This routine does target reset to all targets on @cmnd->device->host. 5372 * This emulates Parallel SCSI Bus Reset Semantics. 5373 * 5374 * Return code : 5375 * 0x2003 - Error 5376 * 0x2002 - Success 5377 **/ 5378 static int 5379 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 5380 { 5381 struct Scsi_Host *shost = cmnd->device->host; 5382 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5383 struct lpfc_nodelist *ndlp = NULL; 5384 struct lpfc_scsi_event_header scsi_event; 5385 int match; 5386 int ret = SUCCESS, status, i; 5387 5388 scsi_event.event_type = FC_REG_SCSI_EVENT; 5389 scsi_event.subcategory = LPFC_EVENT_BUSRESET; 5390 scsi_event.lun = 0; 5391 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name)); 5392 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); 5393 5394 fc_host_post_vendor_event(shost, fc_get_event_number(), 5395 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 5396 5397 status = fc_block_scsi_eh(cmnd); 5398 if (status != 0 && status != SUCCESS) 5399 return status; 5400 5401 /* 5402 * Since the driver manages a single bus device, reset all 5403 * targets known to the driver. Should any target reset 5404 * fail, this routine returns failure to the midlayer. 5405 */ 5406 for (i = 0; i < LPFC_MAX_TARGET; i++) { 5407 /* Search for mapped node by target ID */ 5408 match = 0; 5409 spin_lock_irq(shost->host_lock); 5410 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 5411 if (!NLP_CHK_NODE_ACT(ndlp)) 5412 continue; 5413 if (vport->phba->cfg_fcp2_no_tgt_reset && 5414 (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE)) 5415 continue; 5416 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 5417 ndlp->nlp_sid == i && 5418 ndlp->rport && 5419 ndlp->nlp_type & NLP_FCP_TARGET) { 5420 match = 1; 5421 break; 5422 } 5423 } 5424 spin_unlock_irq(shost->host_lock); 5425 if (!match) 5426 continue; 5427 5428 status = lpfc_send_taskmgmt(vport, cmnd, 5429 i, 0, FCP_TARGET_RESET); 5430 5431 if (status != SUCCESS) { 5432 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5433 "0700 Bus Reset on target %d failed\n", 5434 i); 5435 ret = FAILED; 5436 } 5437 } 5438 /* 5439 * We have to clean up i/o as : they may be orphaned by the TMFs 5440 * above; or if any of the TMFs failed, they may be in an 5441 * indeterminate state. 5442 * We will report success if all the i/o aborts successfully. 5443 */ 5444 5445 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST); 5446 if (status != SUCCESS) 5447 ret = FAILED; 5448 5449 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5450 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); 5451 return ret; 5452 } 5453 5454 /** 5455 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt 5456 * @cmnd: Pointer to scsi_cmnd data structure. 5457 * 5458 * This routine does host reset to the adaptor port. It brings the HBA 5459 * offline, performs a board restart, and then brings the board back online. 5460 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local 5461 * reject all outstanding SCSI commands to the host and error returned 5462 * back to SCSI mid-level. As this will be SCSI mid-level's last resort 5463 * of error handling, it will only return error if resetting of the adapter 5464 * is not successful; in all other cases, will return success. 5465 * 5466 * Return code : 5467 * 0x2003 - Error 5468 * 0x2002 - Success 5469 **/ 5470 static int 5471 lpfc_host_reset_handler(struct scsi_cmnd *cmnd) 5472 { 5473 struct Scsi_Host *shost = cmnd->device->host; 5474 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5475 struct lpfc_hba *phba = vport->phba; 5476 int rc, ret = SUCCESS; 5477 5478 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5479 "3172 SCSI layer issued Host Reset Data:\n"); 5480 5481 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 5482 lpfc_offline(phba); 5483 rc = lpfc_sli_brdrestart(phba); 5484 if (rc) 5485 ret = FAILED; 5486 rc = lpfc_online(phba); 5487 if (rc) 5488 ret = FAILED; 5489 lpfc_unblock_mgmt_io(phba); 5490 5491 if (ret == FAILED) { 5492 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5493 "3323 Failed host reset, bring it offline\n"); 5494 lpfc_sli4_offline_eratt(phba); 5495 } 5496 return ret; 5497 } 5498 5499 /** 5500 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point 5501 * @sdev: Pointer to scsi_device. 5502 * 5503 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's 5504 * globally available list of scsi buffers. This routine also makes sure scsi 5505 * buffer is not allocated more than HBA limit conveyed to midlayer. This list 5506 * of scsi buffer exists for the lifetime of the driver. 5507 * 5508 * Return codes: 5509 * non-0 - Error 5510 * 0 - Success 5511 **/ 5512 static int 5513 lpfc_slave_alloc(struct scsi_device *sdev) 5514 { 5515 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 5516 struct lpfc_hba *phba = vport->phba; 5517 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 5518 uint32_t total = 0; 5519 uint32_t num_to_alloc = 0; 5520 int num_allocated = 0; 5521 uint32_t sdev_cnt; 5522 struct lpfc_device_data *device_data; 5523 unsigned long flags; 5524 struct lpfc_name target_wwpn; 5525 5526 if (!rport || fc_remote_port_chkready(rport)) 5527 return -ENXIO; 5528 5529 if (phba->cfg_fof) { 5530 5531 /* 5532 * Check to see if the device data structure for the lun 5533 * exists. If not, create one. 5534 */ 5535 5536 u64_to_wwn(rport->port_name, target_wwpn.u.wwn); 5537 spin_lock_irqsave(&phba->devicelock, flags); 5538 device_data = __lpfc_get_device_data(phba, 5539 &phba->luns, 5540 &vport->fc_portname, 5541 &target_wwpn, 5542 sdev->lun); 5543 if (!device_data) { 5544 spin_unlock_irqrestore(&phba->devicelock, flags); 5545 device_data = lpfc_create_device_data(phba, 5546 &vport->fc_portname, 5547 &target_wwpn, 5548 sdev->lun, 5549 phba->cfg_XLanePriority, 5550 true); 5551 if (!device_data) 5552 return -ENOMEM; 5553 spin_lock_irqsave(&phba->devicelock, flags); 5554 list_add_tail(&device_data->listentry, &phba->luns); 5555 } 5556 device_data->rport_data = rport->dd_data; 5557 device_data->available = true; 5558 spin_unlock_irqrestore(&phba->devicelock, flags); 5559 sdev->hostdata = device_data; 5560 } else { 5561 sdev->hostdata = rport->dd_data; 5562 } 5563 sdev_cnt = atomic_inc_return(&phba->sdev_cnt); 5564 5565 /* 5566 * Populate the cmds_per_lun count scsi_bufs into this host's globally 5567 * available list of scsi buffers. Don't allocate more than the 5568 * HBA limit conveyed to the midlayer via the host structure. The 5569 * formula accounts for the lun_queue_depth + error handlers + 1 5570 * extra. This list of scsi bufs exists for the lifetime of the driver. 5571 */ 5572 total = phba->total_scsi_bufs; 5573 num_to_alloc = vport->cfg_lun_queue_depth + 2; 5574 5575 /* If allocated buffers are enough do nothing */ 5576 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total) 5577 return 0; 5578 5579 /* Allow some exchanges to be available always to complete discovery */ 5580 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 5581 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5582 "0704 At limitation of %d preallocated " 5583 "command buffers\n", total); 5584 return 0; 5585 /* Allow some exchanges to be available always to complete discovery */ 5586 } else if (total + num_to_alloc > 5587 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 5588 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5589 "0705 Allocation request of %d " 5590 "command buffers will exceed max of %d. " 5591 "Reducing allocation request to %d.\n", 5592 num_to_alloc, phba->cfg_hba_queue_depth, 5593 (phba->cfg_hba_queue_depth - total)); 5594 num_to_alloc = phba->cfg_hba_queue_depth - total; 5595 } 5596 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc); 5597 if (num_to_alloc != num_allocated) { 5598 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5599 "0708 Allocation request of %d " 5600 "command buffers did not succeed. " 5601 "Allocated %d buffers.\n", 5602 num_to_alloc, num_allocated); 5603 } 5604 if (num_allocated > 0) 5605 phba->total_scsi_bufs += num_allocated; 5606 return 0; 5607 } 5608 5609 /** 5610 * lpfc_slave_configure - scsi_host_template slave_configure entry point 5611 * @sdev: Pointer to scsi_device. 5612 * 5613 * This routine configures following items 5614 * - Tag command queuing support for @sdev if supported. 5615 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set. 5616 * 5617 * Return codes: 5618 * 0 - Success 5619 **/ 5620 static int 5621 lpfc_slave_configure(struct scsi_device *sdev) 5622 { 5623 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 5624 struct lpfc_hba *phba = vport->phba; 5625 5626 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth); 5627 5628 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 5629 lpfc_sli_handle_fast_ring_event(phba, 5630 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 5631 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 5632 lpfc_poll_rearm_timer(phba); 5633 } 5634 5635 return 0; 5636 } 5637 5638 /** 5639 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure 5640 * @sdev: Pointer to scsi_device. 5641 * 5642 * This routine sets @sdev hostatdata filed to null. 5643 **/ 5644 static void 5645 lpfc_slave_destroy(struct scsi_device *sdev) 5646 { 5647 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 5648 struct lpfc_hba *phba = vport->phba; 5649 unsigned long flags; 5650 struct lpfc_device_data *device_data = sdev->hostdata; 5651 5652 atomic_dec(&phba->sdev_cnt); 5653 if ((phba->cfg_fof) && (device_data)) { 5654 spin_lock_irqsave(&phba->devicelock, flags); 5655 device_data->available = false; 5656 if (!device_data->oas_enabled) 5657 lpfc_delete_device_data(phba, device_data); 5658 spin_unlock_irqrestore(&phba->devicelock, flags); 5659 } 5660 sdev->hostdata = NULL; 5661 return; 5662 } 5663 5664 /** 5665 * lpfc_create_device_data - creates and initializes device data structure for OAS 5666 * @pha: Pointer to host bus adapter structure. 5667 * @vport_wwpn: Pointer to vport's wwpn information 5668 * @target_wwpn: Pointer to target's wwpn information 5669 * @lun: Lun on target 5670 * @atomic_create: Flag to indicate if memory should be allocated using the 5671 * GFP_ATOMIC flag or not. 5672 * 5673 * This routine creates a device data structure which will contain identifying 5674 * information for the device (host wwpn, target wwpn, lun), state of OAS, 5675 * whether or not the corresponding lun is available by the system, 5676 * and pointer to the rport data. 5677 * 5678 * Return codes: 5679 * NULL - Error 5680 * Pointer to lpfc_device_data - Success 5681 **/ 5682 struct lpfc_device_data* 5683 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 5684 struct lpfc_name *target_wwpn, uint64_t lun, 5685 uint32_t pri, bool atomic_create) 5686 { 5687 5688 struct lpfc_device_data *lun_info; 5689 int memory_flags; 5690 5691 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 5692 !(phba->cfg_fof)) 5693 return NULL; 5694 5695 /* Attempt to create the device data to contain lun info */ 5696 5697 if (atomic_create) 5698 memory_flags = GFP_ATOMIC; 5699 else 5700 memory_flags = GFP_KERNEL; 5701 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags); 5702 if (!lun_info) 5703 return NULL; 5704 INIT_LIST_HEAD(&lun_info->listentry); 5705 lun_info->rport_data = NULL; 5706 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn, 5707 sizeof(struct lpfc_name)); 5708 memcpy(&lun_info->device_id.target_wwpn, target_wwpn, 5709 sizeof(struct lpfc_name)); 5710 lun_info->device_id.lun = lun; 5711 lun_info->oas_enabled = false; 5712 lun_info->priority = pri; 5713 lun_info->available = false; 5714 return lun_info; 5715 } 5716 5717 /** 5718 * lpfc_delete_device_data - frees a device data structure for OAS 5719 * @pha: Pointer to host bus adapter structure. 5720 * @lun_info: Pointer to device data structure to free. 5721 * 5722 * This routine frees the previously allocated device data structure passed. 5723 * 5724 **/ 5725 void 5726 lpfc_delete_device_data(struct lpfc_hba *phba, 5727 struct lpfc_device_data *lun_info) 5728 { 5729 5730 if (unlikely(!phba) || !lun_info || 5731 !(phba->cfg_fof)) 5732 return; 5733 5734 if (!list_empty(&lun_info->listentry)) 5735 list_del(&lun_info->listentry); 5736 mempool_free(lun_info, phba->device_data_mem_pool); 5737 return; 5738 } 5739 5740 /** 5741 * __lpfc_get_device_data - returns the device data for the specified lun 5742 * @pha: Pointer to host bus adapter structure. 5743 * @list: Point to list to search. 5744 * @vport_wwpn: Pointer to vport's wwpn information 5745 * @target_wwpn: Pointer to target's wwpn information 5746 * @lun: Lun on target 5747 * 5748 * This routine searches the list passed for the specified lun's device data. 5749 * This function does not hold locks, it is the responsibility of the caller 5750 * to ensure the proper lock is held before calling the function. 5751 * 5752 * Return codes: 5753 * NULL - Error 5754 * Pointer to lpfc_device_data - Success 5755 **/ 5756 struct lpfc_device_data* 5757 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list, 5758 struct lpfc_name *vport_wwpn, 5759 struct lpfc_name *target_wwpn, uint64_t lun) 5760 { 5761 5762 struct lpfc_device_data *lun_info; 5763 5764 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn || 5765 !phba->cfg_fof) 5766 return NULL; 5767 5768 /* Check to see if the lun is already enabled for OAS. */ 5769 5770 list_for_each_entry(lun_info, list, listentry) { 5771 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, 5772 sizeof(struct lpfc_name)) == 0) && 5773 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, 5774 sizeof(struct lpfc_name)) == 0) && 5775 (lun_info->device_id.lun == lun)) 5776 return lun_info; 5777 } 5778 5779 return NULL; 5780 } 5781 5782 /** 5783 * lpfc_find_next_oas_lun - searches for the next oas lun 5784 * @pha: Pointer to host bus adapter structure. 5785 * @vport_wwpn: Pointer to vport's wwpn information 5786 * @target_wwpn: Pointer to target's wwpn information 5787 * @starting_lun: Pointer to the lun to start searching for 5788 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information 5789 * @found_target_wwpn: Pointer to the found lun's target wwpn information 5790 * @found_lun: Pointer to the found lun. 5791 * @found_lun_status: Pointer to status of the found lun. 5792 * 5793 * This routine searches the luns list for the specified lun 5794 * or the first lun for the vport/target. If the vport wwpn contains 5795 * a zero value then a specific vport is not specified. In this case 5796 * any vport which contains the lun will be considered a match. If the 5797 * target wwpn contains a zero value then a specific target is not specified. 5798 * In this case any target which contains the lun will be considered a 5799 * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status 5800 * are returned. The function will also return the next lun if available. 5801 * If the next lun is not found, starting_lun parameter will be set to 5802 * NO_MORE_OAS_LUN. 5803 * 5804 * Return codes: 5805 * non-0 - Error 5806 * 0 - Success 5807 **/ 5808 bool 5809 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 5810 struct lpfc_name *target_wwpn, uint64_t *starting_lun, 5811 struct lpfc_name *found_vport_wwpn, 5812 struct lpfc_name *found_target_wwpn, 5813 uint64_t *found_lun, 5814 uint32_t *found_lun_status, 5815 uint32_t *found_lun_pri) 5816 { 5817 5818 unsigned long flags; 5819 struct lpfc_device_data *lun_info; 5820 struct lpfc_device_id *device_id; 5821 uint64_t lun; 5822 bool found = false; 5823 5824 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 5825 !starting_lun || !found_vport_wwpn || 5826 !found_target_wwpn || !found_lun || !found_lun_status || 5827 (*starting_lun == NO_MORE_OAS_LUN) || 5828 !phba->cfg_fof) 5829 return false; 5830 5831 lun = *starting_lun; 5832 *found_lun = NO_MORE_OAS_LUN; 5833 *starting_lun = NO_MORE_OAS_LUN; 5834 5835 /* Search for lun or the lun closet in value */ 5836 5837 spin_lock_irqsave(&phba->devicelock, flags); 5838 list_for_each_entry(lun_info, &phba->luns, listentry) { 5839 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) || 5840 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, 5841 sizeof(struct lpfc_name)) == 0)) && 5842 ((wwn_to_u64(target_wwpn->u.wwn) == 0) || 5843 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, 5844 sizeof(struct lpfc_name)) == 0)) && 5845 (lun_info->oas_enabled)) { 5846 device_id = &lun_info->device_id; 5847 if ((!found) && 5848 ((lun == FIND_FIRST_OAS_LUN) || 5849 (device_id->lun == lun))) { 5850 *found_lun = device_id->lun; 5851 memcpy(found_vport_wwpn, 5852 &device_id->vport_wwpn, 5853 sizeof(struct lpfc_name)); 5854 memcpy(found_target_wwpn, 5855 &device_id->target_wwpn, 5856 sizeof(struct lpfc_name)); 5857 if (lun_info->available) 5858 *found_lun_status = 5859 OAS_LUN_STATUS_EXISTS; 5860 else 5861 *found_lun_status = 0; 5862 *found_lun_pri = lun_info->priority; 5863 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT) 5864 memset(vport_wwpn, 0x0, 5865 sizeof(struct lpfc_name)); 5866 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET) 5867 memset(target_wwpn, 0x0, 5868 sizeof(struct lpfc_name)); 5869 found = true; 5870 } else if (found) { 5871 *starting_lun = device_id->lun; 5872 memcpy(vport_wwpn, &device_id->vport_wwpn, 5873 sizeof(struct lpfc_name)); 5874 memcpy(target_wwpn, &device_id->target_wwpn, 5875 sizeof(struct lpfc_name)); 5876 break; 5877 } 5878 } 5879 } 5880 spin_unlock_irqrestore(&phba->devicelock, flags); 5881 return found; 5882 } 5883 5884 /** 5885 * lpfc_enable_oas_lun - enables a lun for OAS operations 5886 * @pha: Pointer to host bus adapter structure. 5887 * @vport_wwpn: Pointer to vport's wwpn information 5888 * @target_wwpn: Pointer to target's wwpn information 5889 * @lun: Lun 5890 * 5891 * This routine enables a lun for oas operations. The routines does so by 5892 * doing the following : 5893 * 5894 * 1) Checks to see if the device data for the lun has been created. 5895 * 2) If found, sets the OAS enabled flag if not set and returns. 5896 * 3) Otherwise, creates a device data structure. 5897 * 4) If successfully created, indicates the device data is for an OAS lun, 5898 * indicates the lun is not available and add to the list of luns. 5899 * 5900 * Return codes: 5901 * false - Error 5902 * true - Success 5903 **/ 5904 bool 5905 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 5906 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) 5907 { 5908 5909 struct lpfc_device_data *lun_info; 5910 unsigned long flags; 5911 5912 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 5913 !phba->cfg_fof) 5914 return false; 5915 5916 spin_lock_irqsave(&phba->devicelock, flags); 5917 5918 /* Check to see if the device data for the lun has been created */ 5919 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn, 5920 target_wwpn, lun); 5921 if (lun_info) { 5922 if (!lun_info->oas_enabled) 5923 lun_info->oas_enabled = true; 5924 lun_info->priority = pri; 5925 spin_unlock_irqrestore(&phba->devicelock, flags); 5926 return true; 5927 } 5928 5929 /* Create an lun info structure and add to list of luns */ 5930 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun, 5931 pri, false); 5932 if (lun_info) { 5933 lun_info->oas_enabled = true; 5934 lun_info->priority = pri; 5935 lun_info->available = false; 5936 list_add_tail(&lun_info->listentry, &phba->luns); 5937 spin_unlock_irqrestore(&phba->devicelock, flags); 5938 return true; 5939 } 5940 spin_unlock_irqrestore(&phba->devicelock, flags); 5941 return false; 5942 } 5943 5944 /** 5945 * lpfc_disable_oas_lun - disables a lun for OAS operations 5946 * @pha: Pointer to host bus adapter structure. 5947 * @vport_wwpn: Pointer to vport's wwpn information 5948 * @target_wwpn: Pointer to target's wwpn information 5949 * @lun: Lun 5950 * 5951 * This routine disables a lun for oas operations. The routines does so by 5952 * doing the following : 5953 * 5954 * 1) Checks to see if the device data for the lun is created. 5955 * 2) If present, clears the flag indicating this lun is for OAS. 5956 * 3) If the lun is not available by the system, the device data is 5957 * freed. 5958 * 5959 * Return codes: 5960 * false - Error 5961 * true - Success 5962 **/ 5963 bool 5964 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 5965 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) 5966 { 5967 5968 struct lpfc_device_data *lun_info; 5969 unsigned long flags; 5970 5971 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 5972 !phba->cfg_fof) 5973 return false; 5974 5975 spin_lock_irqsave(&phba->devicelock, flags); 5976 5977 /* Check to see if the lun is available. */ 5978 lun_info = __lpfc_get_device_data(phba, 5979 &phba->luns, vport_wwpn, 5980 target_wwpn, lun); 5981 if (lun_info) { 5982 lun_info->oas_enabled = false; 5983 lun_info->priority = pri; 5984 if (!lun_info->available) 5985 lpfc_delete_device_data(phba, lun_info); 5986 spin_unlock_irqrestore(&phba->devicelock, flags); 5987 return true; 5988 } 5989 5990 spin_unlock_irqrestore(&phba->devicelock, flags); 5991 return false; 5992 } 5993 5994 static int 5995 lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) 5996 { 5997 return SCSI_MLQUEUE_HOST_BUSY; 5998 } 5999 6000 static int 6001 lpfc_no_handler(struct scsi_cmnd *cmnd) 6002 { 6003 return FAILED; 6004 } 6005 6006 static int 6007 lpfc_no_slave(struct scsi_device *sdev) 6008 { 6009 return -ENODEV; 6010 } 6011 6012 struct scsi_host_template lpfc_template_nvme = { 6013 .module = THIS_MODULE, 6014 .name = LPFC_DRIVER_NAME, 6015 .proc_name = LPFC_DRIVER_NAME, 6016 .info = lpfc_info, 6017 .queuecommand = lpfc_no_command, 6018 .eh_abort_handler = lpfc_no_handler, 6019 .eh_device_reset_handler = lpfc_no_handler, 6020 .eh_target_reset_handler = lpfc_no_handler, 6021 .eh_bus_reset_handler = lpfc_no_handler, 6022 .eh_host_reset_handler = lpfc_no_handler, 6023 .slave_alloc = lpfc_no_slave, 6024 .slave_configure = lpfc_no_slave, 6025 .scan_finished = lpfc_scan_finished, 6026 .this_id = -1, 6027 .sg_tablesize = 1, 6028 .cmd_per_lun = 1, 6029 .use_clustering = ENABLE_CLUSTERING, 6030 .shost_attrs = lpfc_hba_attrs, 6031 .max_sectors = 0xFFFF, 6032 .vendor_id = LPFC_NL_VENDOR_ID, 6033 .track_queue_depth = 0, 6034 }; 6035 6036 struct scsi_host_template lpfc_template_no_hr = { 6037 .module = THIS_MODULE, 6038 .name = LPFC_DRIVER_NAME, 6039 .proc_name = LPFC_DRIVER_NAME, 6040 .info = lpfc_info, 6041 .queuecommand = lpfc_queuecommand, 6042 .eh_timed_out = fc_eh_timed_out, 6043 .eh_abort_handler = lpfc_abort_handler, 6044 .eh_device_reset_handler = lpfc_device_reset_handler, 6045 .eh_target_reset_handler = lpfc_target_reset_handler, 6046 .eh_bus_reset_handler = lpfc_bus_reset_handler, 6047 .slave_alloc = lpfc_slave_alloc, 6048 .slave_configure = lpfc_slave_configure, 6049 .slave_destroy = lpfc_slave_destroy, 6050 .scan_finished = lpfc_scan_finished, 6051 .this_id = -1, 6052 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 6053 .cmd_per_lun = LPFC_CMD_PER_LUN, 6054 .use_clustering = ENABLE_CLUSTERING, 6055 .shost_attrs = lpfc_hba_attrs, 6056 .max_sectors = 0xFFFF, 6057 .vendor_id = LPFC_NL_VENDOR_ID, 6058 .change_queue_depth = scsi_change_queue_depth, 6059 .track_queue_depth = 1, 6060 }; 6061 6062 struct scsi_host_template lpfc_template = { 6063 .module = THIS_MODULE, 6064 .name = LPFC_DRIVER_NAME, 6065 .proc_name = LPFC_DRIVER_NAME, 6066 .info = lpfc_info, 6067 .queuecommand = lpfc_queuecommand, 6068 .eh_timed_out = fc_eh_timed_out, 6069 .eh_abort_handler = lpfc_abort_handler, 6070 .eh_device_reset_handler = lpfc_device_reset_handler, 6071 .eh_target_reset_handler = lpfc_target_reset_handler, 6072 .eh_bus_reset_handler = lpfc_bus_reset_handler, 6073 .eh_host_reset_handler = lpfc_host_reset_handler, 6074 .slave_alloc = lpfc_slave_alloc, 6075 .slave_configure = lpfc_slave_configure, 6076 .slave_destroy = lpfc_slave_destroy, 6077 .scan_finished = lpfc_scan_finished, 6078 .this_id = -1, 6079 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 6080 .cmd_per_lun = LPFC_CMD_PER_LUN, 6081 .use_clustering = ENABLE_CLUSTERING, 6082 .shost_attrs = lpfc_hba_attrs, 6083 .max_sectors = 0xFFFF, 6084 .vendor_id = LPFC_NL_VENDOR_ID, 6085 .change_queue_depth = scsi_change_queue_depth, 6086 .track_queue_depth = 1, 6087 }; 6088 6089 struct scsi_host_template lpfc_vport_template = { 6090 .module = THIS_MODULE, 6091 .name = LPFC_DRIVER_NAME, 6092 .proc_name = LPFC_DRIVER_NAME, 6093 .info = lpfc_info, 6094 .queuecommand = lpfc_queuecommand, 6095 .eh_timed_out = fc_eh_timed_out, 6096 .eh_abort_handler = lpfc_abort_handler, 6097 .eh_device_reset_handler = lpfc_device_reset_handler, 6098 .eh_target_reset_handler = lpfc_target_reset_handler, 6099 .slave_alloc = lpfc_slave_alloc, 6100 .slave_configure = lpfc_slave_configure, 6101 .slave_destroy = lpfc_slave_destroy, 6102 .scan_finished = lpfc_scan_finished, 6103 .this_id = -1, 6104 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 6105 .cmd_per_lun = LPFC_CMD_PER_LUN, 6106 .use_clustering = ENABLE_CLUSTERING, 6107 .shost_attrs = lpfc_vport_attrs, 6108 .max_sectors = 0xFFFF, 6109 .change_queue_depth = scsi_change_queue_depth, 6110 .track_queue_depth = 1, 6111 }; 6112