1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/interrupt.h> 26 #include <linux/export.h> 27 #include <linux/delay.h> 28 #include <asm/unaligned.h> 29 #include <linux/t10-pi.h> 30 #include <linux/crc-t10dif.h> 31 #include <linux/blk-cgroup.h> 32 #include <net/checksum.h> 33 34 #include <scsi/scsi.h> 35 #include <scsi/scsi_device.h> 36 #include <scsi/scsi_eh.h> 37 #include <scsi/scsi_host.h> 38 #include <scsi/scsi_tcq.h> 39 #include <scsi/scsi_transport_fc.h> 40 41 #include "lpfc_version.h" 42 #include "lpfc_hw4.h" 43 #include "lpfc_hw.h" 44 #include "lpfc_sli.h" 45 #include "lpfc_sli4.h" 46 #include "lpfc_nl.h" 47 #include "lpfc_disc.h" 48 #include "lpfc.h" 49 #include "lpfc_scsi.h" 50 #include "lpfc_logmsg.h" 51 #include "lpfc_crtn.h" 52 #include "lpfc_vport.h" 53 54 #define LPFC_RESET_WAIT 2 55 #define LPFC_ABORT_WAIT 2 56 57 static char *dif_op_str[] = { 58 "PROT_NORMAL", 59 "PROT_READ_INSERT", 60 "PROT_WRITE_STRIP", 61 "PROT_READ_STRIP", 62 "PROT_WRITE_INSERT", 63 "PROT_READ_PASS", 64 "PROT_WRITE_PASS", 65 }; 66 67 struct scsi_dif_tuple { 68 __be16 guard_tag; /* Checksum */ 69 __be16 app_tag; /* Opaque storage */ 70 __be32 ref_tag; /* Target LBA or indirect LBA */ 71 }; 72 73 static struct lpfc_rport_data * 74 lpfc_rport_data_from_scsi_device(struct scsi_device *sdev) 75 { 76 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata; 77 78 if (vport->phba->cfg_fof) 79 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data; 80 else 81 return (struct lpfc_rport_data *)sdev->hostdata; 82 } 83 84 static void 85 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb); 86 static void 87 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb); 88 static int 89 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc); 90 static void 91 lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash, 92 struct lpfc_vmid *vmp); 93 static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd 94 *cmd, struct lpfc_vmid *vmp, 95 union lpfc_vmid_io_tag *tag); 96 static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport, 97 struct lpfc_vmid *vmid); 98 99 static inline unsigned 100 lpfc_cmd_blksize(struct scsi_cmnd *sc) 101 { 102 return sc->device->sector_size; 103 } 104 105 #define LPFC_CHECK_PROTECT_GUARD 1 106 #define LPFC_CHECK_PROTECT_REF 2 107 static inline unsigned 108 lpfc_cmd_protect(struct scsi_cmnd *sc, int flag) 109 { 110 return 1; 111 } 112 113 static inline unsigned 114 lpfc_cmd_guard_csum(struct scsi_cmnd *sc) 115 { 116 if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF) 117 return 0; 118 if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP) 119 return 1; 120 return 0; 121 } 122 123 /** 124 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge. 125 * @phba: Pointer to HBA object. 126 * @lpfc_cmd: lpfc scsi command object pointer. 127 * 128 * This function is called from the lpfc_prep_task_mgmt_cmd function to 129 * set the last bit in the response sge entry. 130 **/ 131 static void 132 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, 133 struct lpfc_io_buf *lpfc_cmd) 134 { 135 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 136 if (sgl) { 137 sgl += 1; 138 sgl->word2 = le32_to_cpu(sgl->word2); 139 bf_set(lpfc_sli4_sge_last, sgl, 1); 140 sgl->word2 = cpu_to_le32(sgl->word2); 141 } 142 } 143 144 #define LPFC_INVALID_REFTAG ((u32)-1) 145 146 /** 147 * lpfc_update_stats - Update statistical data for the command completion 148 * @vport: The virtual port on which this call is executing. 149 * @lpfc_cmd: lpfc scsi command object pointer. 150 * 151 * This function is called when there is a command completion and this 152 * function updates the statistical data for the command completion. 153 **/ 154 static void 155 lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd) 156 { 157 struct lpfc_hba *phba = vport->phba; 158 struct lpfc_rport_data *rdata; 159 struct lpfc_nodelist *pnode; 160 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 161 unsigned long flags; 162 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 163 unsigned long latency; 164 int i; 165 166 if (!vport->stat_data_enabled || 167 vport->stat_data_blocked || 168 (cmd->result)) 169 return; 170 171 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time); 172 rdata = lpfc_cmd->rdata; 173 pnode = rdata->pnode; 174 175 spin_lock_irqsave(shost->host_lock, flags); 176 if (!pnode || 177 !pnode->lat_data || 178 (phba->bucket_type == LPFC_NO_BUCKET)) { 179 spin_unlock_irqrestore(shost->host_lock, flags); 180 return; 181 } 182 183 if (phba->bucket_type == LPFC_LINEAR_BUCKET) { 184 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/ 185 phba->bucket_step; 186 /* check array subscript bounds */ 187 if (i < 0) 188 i = 0; 189 else if (i >= LPFC_MAX_BUCKET_COUNT) 190 i = LPFC_MAX_BUCKET_COUNT - 1; 191 } else { 192 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++) 193 if (latency <= (phba->bucket_base + 194 ((1<<i)*phba->bucket_step))) 195 break; 196 } 197 198 pnode->lat_data[i].cmd_count++; 199 spin_unlock_irqrestore(shost->host_lock, flags); 200 } 201 202 /** 203 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread 204 * @phba: The Hba for which this call is being executed. 205 * 206 * This routine is called when there is resource error in driver or firmware. 207 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine 208 * posts at most 1 event each second. This routine wakes up worker thread of 209 * @phba to process WORKER_RAM_DOWN_EVENT event. 210 * 211 * This routine should be called with no lock held. 212 **/ 213 void 214 lpfc_rampdown_queue_depth(struct lpfc_hba *phba) 215 { 216 unsigned long flags; 217 uint32_t evt_posted; 218 unsigned long expires; 219 220 spin_lock_irqsave(&phba->hbalock, flags); 221 atomic_inc(&phba->num_rsrc_err); 222 phba->last_rsrc_error_time = jiffies; 223 224 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL; 225 if (time_after(expires, jiffies)) { 226 spin_unlock_irqrestore(&phba->hbalock, flags); 227 return; 228 } 229 230 phba->last_ramp_down_time = jiffies; 231 232 spin_unlock_irqrestore(&phba->hbalock, flags); 233 234 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 235 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE; 236 if (!evt_posted) 237 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; 238 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 239 240 if (!evt_posted) 241 lpfc_worker_wake_up(phba); 242 return; 243 } 244 245 /** 246 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler 247 * @phba: The Hba for which this call is being executed. 248 * 249 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker 250 * thread.This routine reduces queue depth for all scsi device on each vport 251 * associated with @phba. 252 **/ 253 void 254 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) 255 { 256 struct lpfc_vport **vports; 257 struct Scsi_Host *shost; 258 struct scsi_device *sdev; 259 unsigned long new_queue_depth; 260 unsigned long num_rsrc_err, num_cmd_success; 261 int i; 262 263 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 264 num_cmd_success = atomic_read(&phba->num_cmd_success); 265 266 /* 267 * The error and success command counters are global per 268 * driver instance. If another handler has already 269 * operated on this error event, just exit. 270 */ 271 if (num_rsrc_err == 0) 272 return; 273 274 vports = lpfc_create_vport_work_array(phba); 275 if (vports != NULL) 276 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 277 shost = lpfc_shost_from_vport(vports[i]); 278 shost_for_each_device(sdev, shost) { 279 new_queue_depth = 280 sdev->queue_depth * num_rsrc_err / 281 (num_rsrc_err + num_cmd_success); 282 if (!new_queue_depth) 283 new_queue_depth = sdev->queue_depth - 1; 284 else 285 new_queue_depth = sdev->queue_depth - 286 new_queue_depth; 287 scsi_change_queue_depth(sdev, new_queue_depth); 288 } 289 } 290 lpfc_destroy_vport_work_array(phba, vports); 291 atomic_set(&phba->num_rsrc_err, 0); 292 atomic_set(&phba->num_cmd_success, 0); 293 } 294 295 /** 296 * lpfc_scsi_dev_block - set all scsi hosts to block state 297 * @phba: Pointer to HBA context object. 298 * 299 * This function walks vport list and set each SCSI host to block state 300 * by invoking fc_remote_port_delete() routine. This function is invoked 301 * with EEH when device's PCI slot has been permanently disabled. 302 **/ 303 void 304 lpfc_scsi_dev_block(struct lpfc_hba *phba) 305 { 306 struct lpfc_vport **vports; 307 struct Scsi_Host *shost; 308 struct scsi_device *sdev; 309 struct fc_rport *rport; 310 int i; 311 312 vports = lpfc_create_vport_work_array(phba); 313 if (vports != NULL) 314 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 315 shost = lpfc_shost_from_vport(vports[i]); 316 shost_for_each_device(sdev, shost) { 317 rport = starget_to_rport(scsi_target(sdev)); 318 fc_remote_port_delete(rport); 319 } 320 } 321 lpfc_destroy_vport_work_array(phba, vports); 322 } 323 324 /** 325 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec 326 * @vport: The virtual port for which this call being executed. 327 * @num_to_alloc: The requested number of buffers to allocate. 328 * 329 * This routine allocates a scsi buffer for device with SLI-3 interface spec, 330 * the scsi buffer contains all the necessary information needed to initiate 331 * a SCSI I/O. The non-DMAable buffer region contains information to build 332 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP, 333 * and the initial BPL. In addition to allocating memory, the FCP CMND and 334 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB. 335 * 336 * Return codes: 337 * int - number of scsi buffers that were allocated. 338 * 0 = failure, less than num_to_alloc is a partial failure. 339 **/ 340 static int 341 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) 342 { 343 struct lpfc_hba *phba = vport->phba; 344 struct lpfc_io_buf *psb; 345 struct ulp_bde64 *bpl; 346 IOCB_t *iocb; 347 dma_addr_t pdma_phys_fcp_cmd; 348 dma_addr_t pdma_phys_fcp_rsp; 349 dma_addr_t pdma_phys_sgl; 350 uint16_t iotag; 351 int bcnt, bpl_size; 352 353 bpl_size = phba->cfg_sg_dma_buf_size - 354 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 355 356 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 357 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", 358 num_to_alloc, phba->cfg_sg_dma_buf_size, 359 (int)sizeof(struct fcp_cmnd), 360 (int)sizeof(struct fcp_rsp), bpl_size); 361 362 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 363 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL); 364 if (!psb) 365 break; 366 367 /* 368 * Get memory from the pci pool to map the virt space to pci 369 * bus space for an I/O. The DMA buffer includes space for the 370 * struct fcp_cmnd, struct fcp_rsp and the number of bde's 371 * necessary to support the sg_tablesize. 372 */ 373 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 374 GFP_KERNEL, &psb->dma_handle); 375 if (!psb->data) { 376 kfree(psb); 377 break; 378 } 379 380 381 /* Allocate iotag for psb->cur_iocbq. */ 382 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 383 if (iotag == 0) { 384 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 385 psb->data, psb->dma_handle); 386 kfree(psb); 387 break; 388 } 389 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 390 391 psb->fcp_cmnd = psb->data; 392 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 393 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) + 394 sizeof(struct fcp_rsp); 395 396 /* Initialize local short-hand pointers. */ 397 bpl = (struct ulp_bde64 *)psb->dma_sgl; 398 pdma_phys_fcp_cmd = psb->dma_handle; 399 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); 400 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) + 401 sizeof(struct fcp_rsp); 402 403 /* 404 * The first two bdes are the FCP_CMD and FCP_RSP. The balance 405 * are sg list bdes. Initialize the first two and leave the 406 * rest for queuecommand. 407 */ 408 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); 409 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); 410 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); 411 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 412 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); 413 414 /* Setup the physical region for the FCP RSP */ 415 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); 416 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); 417 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); 418 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 419 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); 420 421 /* 422 * Since the IOCB for the FCP I/O is built into this 423 * lpfc_scsi_buf, initialize it with all known data now. 424 */ 425 iocb = &psb->cur_iocbq.iocb; 426 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 427 if ((phba->sli_rev == 3) && 428 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 429 /* fill in immediate fcp command BDE */ 430 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; 431 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 432 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, 433 unsli3.fcp_ext.icd); 434 iocb->un.fcpi64.bdl.addrHigh = 0; 435 iocb->ulpBdeCount = 0; 436 iocb->ulpLe = 0; 437 /* fill in response BDE */ 438 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = 439 BUFF_TYPE_BDE_64; 440 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = 441 sizeof(struct fcp_rsp); 442 iocb->unsli3.fcp_ext.rbde.addrLow = 443 putPaddrLow(pdma_phys_fcp_rsp); 444 iocb->unsli3.fcp_ext.rbde.addrHigh = 445 putPaddrHigh(pdma_phys_fcp_rsp); 446 } else { 447 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 448 iocb->un.fcpi64.bdl.bdeSize = 449 (2 * sizeof(struct ulp_bde64)); 450 iocb->un.fcpi64.bdl.addrLow = 451 putPaddrLow(pdma_phys_sgl); 452 iocb->un.fcpi64.bdl.addrHigh = 453 putPaddrHigh(pdma_phys_sgl); 454 iocb->ulpBdeCount = 1; 455 iocb->ulpLe = 1; 456 } 457 iocb->ulpClass = CLASS3; 458 psb->status = IOSTAT_SUCCESS; 459 /* Put it back into the SCSI buffer list */ 460 psb->cur_iocbq.context1 = psb; 461 spin_lock_init(&psb->buf_lock); 462 lpfc_release_scsi_buf_s3(phba, psb); 463 464 } 465 466 return bcnt; 467 } 468 469 /** 470 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport 471 * @vport: pointer to lpfc vport data structure. 472 * 473 * This routine is invoked by the vport cleanup for deletions and the cleanup 474 * for an ndlp on removal. 475 **/ 476 void 477 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) 478 { 479 struct lpfc_hba *phba = vport->phba; 480 struct lpfc_io_buf *psb, *next_psb; 481 struct lpfc_sli4_hdw_queue *qp; 482 unsigned long iflag = 0; 483 int idx; 484 485 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 486 return; 487 488 spin_lock_irqsave(&phba->hbalock, iflag); 489 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 490 qp = &phba->sli4_hba.hdwq[idx]; 491 492 spin_lock(&qp->abts_io_buf_list_lock); 493 list_for_each_entry_safe(psb, next_psb, 494 &qp->lpfc_abts_io_buf_list, list) { 495 if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) 496 continue; 497 498 if (psb->rdata && psb->rdata->pnode && 499 psb->rdata->pnode->vport == vport) 500 psb->rdata = NULL; 501 } 502 spin_unlock(&qp->abts_io_buf_list_lock); 503 } 504 spin_unlock_irqrestore(&phba->hbalock, iflag); 505 } 506 507 /** 508 * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort 509 * @phba: pointer to lpfc hba data structure. 510 * @axri: pointer to the fcp xri abort wcqe structure. 511 * @idx: index into hdwq 512 * 513 * This routine is invoked by the worker thread to process a SLI4 fast-path 514 * FCP or NVME aborted xri. 515 **/ 516 void 517 lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, 518 struct sli4_wcqe_xri_aborted *axri, int idx) 519 { 520 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 521 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 522 struct lpfc_io_buf *psb, *next_psb; 523 struct lpfc_sli4_hdw_queue *qp; 524 unsigned long iflag = 0; 525 struct lpfc_iocbq *iocbq; 526 int i; 527 struct lpfc_nodelist *ndlp; 528 int rrq_empty = 0; 529 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring; 530 struct scsi_cmnd *cmd; 531 532 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 533 return; 534 535 qp = &phba->sli4_hba.hdwq[idx]; 536 spin_lock_irqsave(&phba->hbalock, iflag); 537 spin_lock(&qp->abts_io_buf_list_lock); 538 list_for_each_entry_safe(psb, next_psb, 539 &qp->lpfc_abts_io_buf_list, list) { 540 if (psb->cur_iocbq.sli4_xritag == xri) { 541 list_del_init(&psb->list); 542 psb->flags &= ~LPFC_SBUF_XBUSY; 543 psb->status = IOSTAT_SUCCESS; 544 if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) { 545 qp->abts_nvme_io_bufs--; 546 spin_unlock(&qp->abts_io_buf_list_lock); 547 spin_unlock_irqrestore(&phba->hbalock, iflag); 548 lpfc_sli4_nvme_xri_aborted(phba, axri, psb); 549 return; 550 } 551 qp->abts_scsi_io_bufs--; 552 spin_unlock(&qp->abts_io_buf_list_lock); 553 554 if (psb->rdata && psb->rdata->pnode) 555 ndlp = psb->rdata->pnode; 556 else 557 ndlp = NULL; 558 559 rrq_empty = list_empty(&phba->active_rrq_list); 560 spin_unlock_irqrestore(&phba->hbalock, iflag); 561 if (ndlp) { 562 lpfc_set_rrq_active(phba, ndlp, 563 psb->cur_iocbq.sli4_lxritag, rxid, 1); 564 lpfc_sli4_abts_err_handler(phba, ndlp, axri); 565 } 566 567 if (phba->cfg_fcp_wait_abts_rsp) { 568 spin_lock_irqsave(&psb->buf_lock, iflag); 569 cmd = psb->pCmd; 570 psb->pCmd = NULL; 571 spin_unlock_irqrestore(&psb->buf_lock, iflag); 572 573 /* The sdev is not guaranteed to be valid post 574 * scsi_done upcall. 575 */ 576 if (cmd) 577 cmd->scsi_done(cmd); 578 579 /* 580 * We expect there is an abort thread waiting 581 * for command completion wake up the thread. 582 */ 583 spin_lock_irqsave(&psb->buf_lock, iflag); 584 psb->cur_iocbq.iocb_flag &= 585 ~LPFC_DRIVER_ABORTED; 586 if (psb->waitq) 587 wake_up(psb->waitq); 588 spin_unlock_irqrestore(&psb->buf_lock, iflag); 589 } 590 591 lpfc_release_scsi_buf_s4(phba, psb); 592 if (rrq_empty) 593 lpfc_worker_wake_up(phba); 594 return; 595 } 596 } 597 spin_unlock(&qp->abts_io_buf_list_lock); 598 for (i = 1; i <= phba->sli.last_iotag; i++) { 599 iocbq = phba->sli.iocbq_lookup[i]; 600 601 if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 602 (iocbq->iocb_flag & LPFC_IO_LIBDFC)) 603 continue; 604 if (iocbq->sli4_xritag != xri) 605 continue; 606 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 607 psb->flags &= ~LPFC_SBUF_XBUSY; 608 spin_unlock_irqrestore(&phba->hbalock, iflag); 609 if (!list_empty(&pring->txq)) 610 lpfc_worker_wake_up(phba); 611 return; 612 613 } 614 spin_unlock_irqrestore(&phba->hbalock, iflag); 615 } 616 617 /** 618 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 619 * @phba: The HBA for which this call is being executed. 620 * @ndlp: pointer to a node-list data structure. 621 * @cmnd: Pointer to scsi_cmnd data structure. 622 * 623 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 624 * and returns to caller. 625 * 626 * Return codes: 627 * NULL - Error 628 * Pointer to lpfc_scsi_buf - Success 629 **/ 630 static struct lpfc_io_buf * 631 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 632 struct scsi_cmnd *cmnd) 633 { 634 struct lpfc_io_buf *lpfc_cmd = NULL; 635 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; 636 unsigned long iflag = 0; 637 638 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); 639 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf, 640 list); 641 if (!lpfc_cmd) { 642 spin_lock(&phba->scsi_buf_list_put_lock); 643 list_splice(&phba->lpfc_scsi_buf_list_put, 644 &phba->lpfc_scsi_buf_list_get); 645 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 646 list_remove_head(scsi_buf_list_get, lpfc_cmd, 647 struct lpfc_io_buf, list); 648 spin_unlock(&phba->scsi_buf_list_put_lock); 649 } 650 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); 651 652 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) { 653 atomic_inc(&ndlp->cmd_pending); 654 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; 655 } 656 return lpfc_cmd; 657 } 658 /** 659 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA 660 * @phba: The HBA for which this call is being executed. 661 * @ndlp: pointer to a node-list data structure. 662 * @cmnd: Pointer to scsi_cmnd data structure. 663 * 664 * This routine removes a scsi buffer from head of @hdwq io_buf_list 665 * and returns to caller. 666 * 667 * Return codes: 668 * NULL - Error 669 * Pointer to lpfc_scsi_buf - Success 670 **/ 671 static struct lpfc_io_buf * 672 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 673 struct scsi_cmnd *cmnd) 674 { 675 struct lpfc_io_buf *lpfc_cmd; 676 struct lpfc_sli4_hdw_queue *qp; 677 struct sli4_sge *sgl; 678 dma_addr_t pdma_phys_fcp_rsp; 679 dma_addr_t pdma_phys_fcp_cmd; 680 uint32_t cpu, idx; 681 int tag; 682 struct fcp_cmd_rsp_buf *tmp = NULL; 683 684 cpu = raw_smp_processor_id(); 685 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { 686 tag = blk_mq_unique_tag(cmnd->request); 687 idx = blk_mq_unique_tag_to_hwq(tag); 688 } else { 689 idx = phba->sli4_hba.cpu_map[cpu].hdwq; 690 } 691 692 lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx, 693 !phba->cfg_xri_rebalancing); 694 if (!lpfc_cmd) { 695 qp = &phba->sli4_hba.hdwq[idx]; 696 qp->empty_io_bufs++; 697 return NULL; 698 } 699 700 /* Setup key fields in buffer that may have been changed 701 * if other protocols used this buffer. 702 */ 703 lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP; 704 lpfc_cmd->prot_seg_cnt = 0; 705 lpfc_cmd->seg_cnt = 0; 706 lpfc_cmd->timeout = 0; 707 lpfc_cmd->flags = 0; 708 lpfc_cmd->start_time = jiffies; 709 lpfc_cmd->waitq = NULL; 710 lpfc_cmd->cpu = cpu; 711 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 712 lpfc_cmd->prot_data_type = 0; 713 #endif 714 tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd); 715 if (!tmp) { 716 lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq); 717 return NULL; 718 } 719 720 lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd; 721 lpfc_cmd->fcp_rsp = tmp->fcp_rsp; 722 723 /* 724 * The first two SGEs are the FCP_CMD and FCP_RSP. 725 * The balance are sg list bdes. Initialize the 726 * first two and leave the rest for queuecommand. 727 */ 728 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 729 pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle; 730 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); 731 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); 732 sgl->word2 = le32_to_cpu(sgl->word2); 733 bf_set(lpfc_sli4_sge_last, sgl, 0); 734 sgl->word2 = cpu_to_le32(sgl->word2); 735 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); 736 sgl++; 737 738 /* Setup the physical region for the FCP RSP */ 739 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); 740 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); 741 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); 742 sgl->word2 = le32_to_cpu(sgl->word2); 743 bf_set(lpfc_sli4_sge_last, sgl, 1); 744 sgl->word2 = cpu_to_le32(sgl->word2); 745 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); 746 747 if (lpfc_ndlp_check_qdepth(phba, ndlp)) { 748 atomic_inc(&ndlp->cmd_pending); 749 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; 750 } 751 return lpfc_cmd; 752 } 753 /** 754 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 755 * @phba: The HBA for which this call is being executed. 756 * @ndlp: pointer to a node-list data structure. 757 * @cmnd: Pointer to scsi_cmnd data structure. 758 * 759 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 760 * and returns to caller. 761 * 762 * Return codes: 763 * NULL - Error 764 * Pointer to lpfc_scsi_buf - Success 765 **/ 766 static struct lpfc_io_buf* 767 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 768 struct scsi_cmnd *cmnd) 769 { 770 return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd); 771 } 772 773 /** 774 * lpfc_release_scsi_buf_s3 - Return a scsi buffer back to hba scsi buf list 775 * @phba: The Hba for which this call is being executed. 776 * @psb: The scsi buffer which is being released. 777 * 778 * This routine releases @psb scsi buffer by adding it to tail of @phba 779 * lpfc_scsi_buf_list list. 780 **/ 781 static void 782 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 783 { 784 unsigned long iflag = 0; 785 786 psb->seg_cnt = 0; 787 psb->prot_seg_cnt = 0; 788 789 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); 790 psb->pCmd = NULL; 791 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP; 792 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); 793 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); 794 } 795 796 /** 797 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list. 798 * @phba: The Hba for which this call is being executed. 799 * @psb: The scsi buffer which is being released. 800 * 801 * This routine releases @psb scsi buffer by adding it to tail of @hdwq 802 * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer 803 * and cannot be reused for at least RA_TOV amount of time if it was 804 * aborted. 805 **/ 806 static void 807 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 808 { 809 struct lpfc_sli4_hdw_queue *qp; 810 unsigned long iflag = 0; 811 812 psb->seg_cnt = 0; 813 psb->prot_seg_cnt = 0; 814 815 qp = psb->hdwq; 816 if (psb->flags & LPFC_SBUF_XBUSY) { 817 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); 818 if (!phba->cfg_fcp_wait_abts_rsp) 819 psb->pCmd = NULL; 820 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list); 821 qp->abts_scsi_io_bufs++; 822 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); 823 } else { 824 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp); 825 } 826 } 827 828 /** 829 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. 830 * @phba: The Hba for which this call is being executed. 831 * @psb: The scsi buffer which is being released. 832 * 833 * This routine releases @psb scsi buffer by adding it to tail of @phba 834 * lpfc_scsi_buf_list list. 835 **/ 836 static void 837 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 838 { 839 if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp) 840 atomic_dec(&psb->ndlp->cmd_pending); 841 842 psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH; 843 phba->lpfc_release_scsi_buf(phba, psb); 844 } 845 846 /** 847 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB 848 * @data: A pointer to the immediate command data portion of the IOCB. 849 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. 850 * 851 * The routine copies the entire FCP command from @fcp_cmnd to @data while 852 * byte swapping the data to big endian format for transmission on the wire. 853 **/ 854 static void 855 lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd) 856 { 857 int i, j; 858 859 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); 860 i += sizeof(uint32_t), j++) { 861 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); 862 } 863 } 864 865 /** 866 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 867 * @phba: The Hba for which this call is being executed. 868 * @lpfc_cmd: The scsi buffer which is going to be mapped. 869 * 870 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 871 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans 872 * through sg elements and format the bde. This routine also initializes all 873 * IOCB fields which are dependent on scsi command request buffer. 874 * 875 * Return codes: 876 * 1 - Error 877 * 0 - Success 878 **/ 879 static int 880 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 881 { 882 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 883 struct scatterlist *sgel = NULL; 884 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 885 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; 886 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq; 887 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 888 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; 889 dma_addr_t physaddr; 890 uint32_t num_bde = 0; 891 int nseg, datadir = scsi_cmnd->sc_data_direction; 892 893 /* 894 * There are three possibilities here - use scatter-gather segment, use 895 * the single mapping, or neither. Start the lpfc command prep by 896 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 897 * data bde entry. 898 */ 899 bpl += 2; 900 if (scsi_sg_count(scsi_cmnd)) { 901 /* 902 * The driver stores the segment count returned from pci_map_sg 903 * because this a count of dma-mappings used to map the use_sg 904 * pages. They are not guaranteed to be the same for those 905 * architectures that implement an IOMMU. 906 */ 907 908 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), 909 scsi_sg_count(scsi_cmnd), datadir); 910 if (unlikely(!nseg)) 911 return 1; 912 913 lpfc_cmd->seg_cnt = nseg; 914 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 915 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 916 "9064 BLKGRD: %s: Too many sg segments" 917 " from dma_map_sg. Config %d, seg_cnt" 918 " %d\n", __func__, phba->cfg_sg_seg_cnt, 919 lpfc_cmd->seg_cnt); 920 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 921 lpfc_cmd->seg_cnt = 0; 922 scsi_dma_unmap(scsi_cmnd); 923 return 2; 924 } 925 926 /* 927 * The driver established a maximum scatter-gather segment count 928 * during probe that limits the number of sg elements in any 929 * single scsi command. Just run through the seg_cnt and format 930 * the bde's. 931 * When using SLI-3 the driver will try to fit all the BDEs into 932 * the IOCB. If it can't then the BDEs get added to a BPL as it 933 * does for SLI-2 mode. 934 */ 935 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { 936 physaddr = sg_dma_address(sgel); 937 if (phba->sli_rev == 3 && 938 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 939 !(iocbq->iocb_flag & DSS_SECURITY_OP) && 940 nseg <= LPFC_EXT_DATA_BDE_COUNT) { 941 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 942 data_bde->tus.f.bdeSize = sg_dma_len(sgel); 943 data_bde->addrLow = putPaddrLow(physaddr); 944 data_bde->addrHigh = putPaddrHigh(physaddr); 945 data_bde++; 946 } else { 947 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 948 bpl->tus.f.bdeSize = sg_dma_len(sgel); 949 bpl->tus.w = le32_to_cpu(bpl->tus.w); 950 bpl->addrLow = 951 le32_to_cpu(putPaddrLow(physaddr)); 952 bpl->addrHigh = 953 le32_to_cpu(putPaddrHigh(physaddr)); 954 bpl++; 955 } 956 } 957 } 958 959 /* 960 * Finish initializing those IOCB fields that are dependent on the 961 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is 962 * explicitly reinitialized and for SLI-3 the extended bde count is 963 * explicitly reinitialized since all iocb memory resources are reused. 964 */ 965 if (phba->sli_rev == 3 && 966 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 967 !(iocbq->iocb_flag & DSS_SECURITY_OP)) { 968 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { 969 /* 970 * The extended IOCB format can only fit 3 BDE or a BPL. 971 * This I/O has more than 3 BDE so the 1st data bde will 972 * be a BPL that is filled in here. 973 */ 974 physaddr = lpfc_cmd->dma_handle; 975 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64; 976 data_bde->tus.f.bdeSize = (num_bde * 977 sizeof(struct ulp_bde64)); 978 physaddr += (sizeof(struct fcp_cmnd) + 979 sizeof(struct fcp_rsp) + 980 (2 * sizeof(struct ulp_bde64))); 981 data_bde->addrHigh = putPaddrHigh(physaddr); 982 data_bde->addrLow = putPaddrLow(physaddr); 983 /* ebde count includes the response bde and data bpl */ 984 iocb_cmd->unsli3.fcp_ext.ebde_count = 2; 985 } else { 986 /* ebde count includes the response bde and data bdes */ 987 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 988 } 989 } else { 990 iocb_cmd->un.fcpi64.bdl.bdeSize = 991 ((num_bde + 2) * sizeof(struct ulp_bde64)); 992 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 993 } 994 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 995 996 /* 997 * Due to difference in data length between DIF/non-DIF paths, 998 * we need to set word 4 of IOCB here 999 */ 1000 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 1001 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); 1002 return 0; 1003 } 1004 1005 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1006 1007 /* Return BG_ERR_INIT if error injection is detected by Initiator */ 1008 #define BG_ERR_INIT 0x1 1009 /* Return BG_ERR_TGT if error injection is detected by Target */ 1010 #define BG_ERR_TGT 0x2 1011 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */ 1012 #define BG_ERR_SWAP 0x10 1013 /* 1014 * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for 1015 * error injection 1016 */ 1017 #define BG_ERR_CHECK 0x20 1018 1019 /** 1020 * lpfc_bg_err_inject - Determine if we should inject an error 1021 * @phba: The Hba for which this call is being executed. 1022 * @sc: The SCSI command to examine 1023 * @reftag: (out) BlockGuard reference tag for transmitted data 1024 * @apptag: (out) BlockGuard application tag for transmitted data 1025 * @new_guard: (in) Value to replace CRC with if needed 1026 * 1027 * Returns BG_ERR_* bit mask or 0 if request ignored 1028 **/ 1029 static int 1030 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1031 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard) 1032 { 1033 struct scatterlist *sgpe; /* s/g prot entry */ 1034 struct lpfc_io_buf *lpfc_cmd = NULL; 1035 struct scsi_dif_tuple *src = NULL; 1036 struct lpfc_nodelist *ndlp; 1037 struct lpfc_rport_data *rdata; 1038 uint32_t op = scsi_get_prot_op(sc); 1039 uint32_t blksize; 1040 uint32_t numblks; 1041 u32 lba; 1042 int rc = 0; 1043 int blockoff = 0; 1044 1045 if (op == SCSI_PROT_NORMAL) 1046 return 0; 1047 1048 sgpe = scsi_prot_sglist(sc); 1049 lba = t10_pi_ref_tag(sc->request); 1050 if (lba == LPFC_INVALID_REFTAG) 1051 return 0; 1052 1053 /* First check if we need to match the LBA */ 1054 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) { 1055 blksize = lpfc_cmd_blksize(sc); 1056 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize; 1057 1058 /* Make sure we have the right LBA if one is specified */ 1059 if (phba->lpfc_injerr_lba < (u64)lba || 1060 (phba->lpfc_injerr_lba >= (u64)(lba + numblks))) 1061 return 0; 1062 if (sgpe) { 1063 blockoff = phba->lpfc_injerr_lba - (u64)lba; 1064 numblks = sg_dma_len(sgpe) / 1065 sizeof(struct scsi_dif_tuple); 1066 if (numblks < blockoff) 1067 blockoff = numblks; 1068 } 1069 } 1070 1071 /* Next check if we need to match the remote NPortID or WWPN */ 1072 rdata = lpfc_rport_data_from_scsi_device(sc->device); 1073 if (rdata && rdata->pnode) { 1074 ndlp = rdata->pnode; 1075 1076 /* Make sure we have the right NPortID if one is specified */ 1077 if (phba->lpfc_injerr_nportid && 1078 (phba->lpfc_injerr_nportid != ndlp->nlp_DID)) 1079 return 0; 1080 1081 /* 1082 * Make sure we have the right WWPN if one is specified. 1083 * wwn[0] should be a non-zero NAA in a good WWPN. 1084 */ 1085 if (phba->lpfc_injerr_wwpn.u.wwn[0] && 1086 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn, 1087 sizeof(struct lpfc_name)) != 0)) 1088 return 0; 1089 } 1090 1091 /* Setup a ptr to the protection data if the SCSI host provides it */ 1092 if (sgpe) { 1093 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 1094 src += blockoff; 1095 lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble; 1096 } 1097 1098 /* Should we change the Reference Tag */ 1099 if (reftag) { 1100 if (phba->lpfc_injerr_wref_cnt) { 1101 switch (op) { 1102 case SCSI_PROT_WRITE_PASS: 1103 if (src) { 1104 /* 1105 * For WRITE_PASS, force the error 1106 * to be sent on the wire. It should 1107 * be detected by the Target. 1108 * If blockoff != 0 error will be 1109 * inserted in middle of the IO. 1110 */ 1111 1112 lpfc_printf_log(phba, KERN_ERR, 1113 LOG_TRACE_EVENT, 1114 "9076 BLKGRD: Injecting reftag error: " 1115 "write lba x%lx + x%x oldrefTag x%x\n", 1116 (unsigned long)lba, blockoff, 1117 be32_to_cpu(src->ref_tag)); 1118 1119 /* 1120 * Save the old ref_tag so we can 1121 * restore it on completion. 1122 */ 1123 if (lpfc_cmd) { 1124 lpfc_cmd->prot_data_type = 1125 LPFC_INJERR_REFTAG; 1126 lpfc_cmd->prot_data_segment = 1127 src; 1128 lpfc_cmd->prot_data = 1129 src->ref_tag; 1130 } 1131 src->ref_tag = cpu_to_be32(0xDEADBEEF); 1132 phba->lpfc_injerr_wref_cnt--; 1133 if (phba->lpfc_injerr_wref_cnt == 0) { 1134 phba->lpfc_injerr_nportid = 0; 1135 phba->lpfc_injerr_lba = 1136 LPFC_INJERR_LBA_OFF; 1137 memset(&phba->lpfc_injerr_wwpn, 1138 0, sizeof(struct lpfc_name)); 1139 } 1140 rc = BG_ERR_TGT | BG_ERR_CHECK; 1141 1142 break; 1143 } 1144 fallthrough; 1145 case SCSI_PROT_WRITE_INSERT: 1146 /* 1147 * For WRITE_INSERT, force the error 1148 * to be sent on the wire. It should be 1149 * detected by the Target. 1150 */ 1151 /* DEADBEEF will be the reftag on the wire */ 1152 *reftag = 0xDEADBEEF; 1153 phba->lpfc_injerr_wref_cnt--; 1154 if (phba->lpfc_injerr_wref_cnt == 0) { 1155 phba->lpfc_injerr_nportid = 0; 1156 phba->lpfc_injerr_lba = 1157 LPFC_INJERR_LBA_OFF; 1158 memset(&phba->lpfc_injerr_wwpn, 1159 0, sizeof(struct lpfc_name)); 1160 } 1161 rc = BG_ERR_TGT | BG_ERR_CHECK; 1162 1163 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1164 "9078 BLKGRD: Injecting reftag error: " 1165 "write lba x%lx\n", (unsigned long)lba); 1166 break; 1167 case SCSI_PROT_WRITE_STRIP: 1168 /* 1169 * For WRITE_STRIP and WRITE_PASS, 1170 * force the error on data 1171 * being copied from SLI-Host to SLI-Port. 1172 */ 1173 *reftag = 0xDEADBEEF; 1174 phba->lpfc_injerr_wref_cnt--; 1175 if (phba->lpfc_injerr_wref_cnt == 0) { 1176 phba->lpfc_injerr_nportid = 0; 1177 phba->lpfc_injerr_lba = 1178 LPFC_INJERR_LBA_OFF; 1179 memset(&phba->lpfc_injerr_wwpn, 1180 0, sizeof(struct lpfc_name)); 1181 } 1182 rc = BG_ERR_INIT; 1183 1184 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1185 "9077 BLKGRD: Injecting reftag error: " 1186 "write lba x%lx\n", (unsigned long)lba); 1187 break; 1188 } 1189 } 1190 if (phba->lpfc_injerr_rref_cnt) { 1191 switch (op) { 1192 case SCSI_PROT_READ_INSERT: 1193 case SCSI_PROT_READ_STRIP: 1194 case SCSI_PROT_READ_PASS: 1195 /* 1196 * For READ_STRIP and READ_PASS, force the 1197 * error on data being read off the wire. It 1198 * should force an IO error to the driver. 1199 */ 1200 *reftag = 0xDEADBEEF; 1201 phba->lpfc_injerr_rref_cnt--; 1202 if (phba->lpfc_injerr_rref_cnt == 0) { 1203 phba->lpfc_injerr_nportid = 0; 1204 phba->lpfc_injerr_lba = 1205 LPFC_INJERR_LBA_OFF; 1206 memset(&phba->lpfc_injerr_wwpn, 1207 0, sizeof(struct lpfc_name)); 1208 } 1209 rc = BG_ERR_INIT; 1210 1211 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1212 "9079 BLKGRD: Injecting reftag error: " 1213 "read lba x%lx\n", (unsigned long)lba); 1214 break; 1215 } 1216 } 1217 } 1218 1219 /* Should we change the Application Tag */ 1220 if (apptag) { 1221 if (phba->lpfc_injerr_wapp_cnt) { 1222 switch (op) { 1223 case SCSI_PROT_WRITE_PASS: 1224 if (src) { 1225 /* 1226 * For WRITE_PASS, force the error 1227 * to be sent on the wire. It should 1228 * be detected by the Target. 1229 * If blockoff != 0 error will be 1230 * inserted in middle of the IO. 1231 */ 1232 1233 lpfc_printf_log(phba, KERN_ERR, 1234 LOG_TRACE_EVENT, 1235 "9080 BLKGRD: Injecting apptag error: " 1236 "write lba x%lx + x%x oldappTag x%x\n", 1237 (unsigned long)lba, blockoff, 1238 be16_to_cpu(src->app_tag)); 1239 1240 /* 1241 * Save the old app_tag so we can 1242 * restore it on completion. 1243 */ 1244 if (lpfc_cmd) { 1245 lpfc_cmd->prot_data_type = 1246 LPFC_INJERR_APPTAG; 1247 lpfc_cmd->prot_data_segment = 1248 src; 1249 lpfc_cmd->prot_data = 1250 src->app_tag; 1251 } 1252 src->app_tag = cpu_to_be16(0xDEAD); 1253 phba->lpfc_injerr_wapp_cnt--; 1254 if (phba->lpfc_injerr_wapp_cnt == 0) { 1255 phba->lpfc_injerr_nportid = 0; 1256 phba->lpfc_injerr_lba = 1257 LPFC_INJERR_LBA_OFF; 1258 memset(&phba->lpfc_injerr_wwpn, 1259 0, sizeof(struct lpfc_name)); 1260 } 1261 rc = BG_ERR_TGT | BG_ERR_CHECK; 1262 break; 1263 } 1264 fallthrough; 1265 case SCSI_PROT_WRITE_INSERT: 1266 /* 1267 * For WRITE_INSERT, force the 1268 * error to be sent on the wire. It should be 1269 * detected by the Target. 1270 */ 1271 /* DEAD will be the apptag on the wire */ 1272 *apptag = 0xDEAD; 1273 phba->lpfc_injerr_wapp_cnt--; 1274 if (phba->lpfc_injerr_wapp_cnt == 0) { 1275 phba->lpfc_injerr_nportid = 0; 1276 phba->lpfc_injerr_lba = 1277 LPFC_INJERR_LBA_OFF; 1278 memset(&phba->lpfc_injerr_wwpn, 1279 0, sizeof(struct lpfc_name)); 1280 } 1281 rc = BG_ERR_TGT | BG_ERR_CHECK; 1282 1283 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1284 "0813 BLKGRD: Injecting apptag error: " 1285 "write lba x%lx\n", (unsigned long)lba); 1286 break; 1287 case SCSI_PROT_WRITE_STRIP: 1288 /* 1289 * For WRITE_STRIP and WRITE_PASS, 1290 * force the error on data 1291 * being copied from SLI-Host to SLI-Port. 1292 */ 1293 *apptag = 0xDEAD; 1294 phba->lpfc_injerr_wapp_cnt--; 1295 if (phba->lpfc_injerr_wapp_cnt == 0) { 1296 phba->lpfc_injerr_nportid = 0; 1297 phba->lpfc_injerr_lba = 1298 LPFC_INJERR_LBA_OFF; 1299 memset(&phba->lpfc_injerr_wwpn, 1300 0, sizeof(struct lpfc_name)); 1301 } 1302 rc = BG_ERR_INIT; 1303 1304 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1305 "0812 BLKGRD: Injecting apptag error: " 1306 "write lba x%lx\n", (unsigned long)lba); 1307 break; 1308 } 1309 } 1310 if (phba->lpfc_injerr_rapp_cnt) { 1311 switch (op) { 1312 case SCSI_PROT_READ_INSERT: 1313 case SCSI_PROT_READ_STRIP: 1314 case SCSI_PROT_READ_PASS: 1315 /* 1316 * For READ_STRIP and READ_PASS, force the 1317 * error on data being read off the wire. It 1318 * should force an IO error to the driver. 1319 */ 1320 *apptag = 0xDEAD; 1321 phba->lpfc_injerr_rapp_cnt--; 1322 if (phba->lpfc_injerr_rapp_cnt == 0) { 1323 phba->lpfc_injerr_nportid = 0; 1324 phba->lpfc_injerr_lba = 1325 LPFC_INJERR_LBA_OFF; 1326 memset(&phba->lpfc_injerr_wwpn, 1327 0, sizeof(struct lpfc_name)); 1328 } 1329 rc = BG_ERR_INIT; 1330 1331 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1332 "0814 BLKGRD: Injecting apptag error: " 1333 "read lba x%lx\n", (unsigned long)lba); 1334 break; 1335 } 1336 } 1337 } 1338 1339 1340 /* Should we change the Guard Tag */ 1341 if (new_guard) { 1342 if (phba->lpfc_injerr_wgrd_cnt) { 1343 switch (op) { 1344 case SCSI_PROT_WRITE_PASS: 1345 rc = BG_ERR_CHECK; 1346 fallthrough; 1347 1348 case SCSI_PROT_WRITE_INSERT: 1349 /* 1350 * For WRITE_INSERT, force the 1351 * error to be sent on the wire. It should be 1352 * detected by the Target. 1353 */ 1354 phba->lpfc_injerr_wgrd_cnt--; 1355 if (phba->lpfc_injerr_wgrd_cnt == 0) { 1356 phba->lpfc_injerr_nportid = 0; 1357 phba->lpfc_injerr_lba = 1358 LPFC_INJERR_LBA_OFF; 1359 memset(&phba->lpfc_injerr_wwpn, 1360 0, sizeof(struct lpfc_name)); 1361 } 1362 1363 rc |= BG_ERR_TGT | BG_ERR_SWAP; 1364 /* Signals the caller to swap CRC->CSUM */ 1365 1366 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1367 "0817 BLKGRD: Injecting guard error: " 1368 "write lba x%lx\n", (unsigned long)lba); 1369 break; 1370 case SCSI_PROT_WRITE_STRIP: 1371 /* 1372 * For WRITE_STRIP and WRITE_PASS, 1373 * force the error on data 1374 * being copied from SLI-Host to SLI-Port. 1375 */ 1376 phba->lpfc_injerr_wgrd_cnt--; 1377 if (phba->lpfc_injerr_wgrd_cnt == 0) { 1378 phba->lpfc_injerr_nportid = 0; 1379 phba->lpfc_injerr_lba = 1380 LPFC_INJERR_LBA_OFF; 1381 memset(&phba->lpfc_injerr_wwpn, 1382 0, sizeof(struct lpfc_name)); 1383 } 1384 1385 rc = BG_ERR_INIT | BG_ERR_SWAP; 1386 /* Signals the caller to swap CRC->CSUM */ 1387 1388 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1389 "0816 BLKGRD: Injecting guard error: " 1390 "write lba x%lx\n", (unsigned long)lba); 1391 break; 1392 } 1393 } 1394 if (phba->lpfc_injerr_rgrd_cnt) { 1395 switch (op) { 1396 case SCSI_PROT_READ_INSERT: 1397 case SCSI_PROT_READ_STRIP: 1398 case SCSI_PROT_READ_PASS: 1399 /* 1400 * For READ_STRIP and READ_PASS, force the 1401 * error on data being read off the wire. It 1402 * should force an IO error to the driver. 1403 */ 1404 phba->lpfc_injerr_rgrd_cnt--; 1405 if (phba->lpfc_injerr_rgrd_cnt == 0) { 1406 phba->lpfc_injerr_nportid = 0; 1407 phba->lpfc_injerr_lba = 1408 LPFC_INJERR_LBA_OFF; 1409 memset(&phba->lpfc_injerr_wwpn, 1410 0, sizeof(struct lpfc_name)); 1411 } 1412 1413 rc = BG_ERR_INIT | BG_ERR_SWAP; 1414 /* Signals the caller to swap CRC->CSUM */ 1415 1416 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1417 "0818 BLKGRD: Injecting guard error: " 1418 "read lba x%lx\n", (unsigned long)lba); 1419 } 1420 } 1421 } 1422 1423 return rc; 1424 } 1425 #endif 1426 1427 /** 1428 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with 1429 * the specified SCSI command. 1430 * @phba: The Hba for which this call is being executed. 1431 * @sc: The SCSI command to examine 1432 * @txop: (out) BlockGuard operation for transmitted data 1433 * @rxop: (out) BlockGuard operation for received data 1434 * 1435 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined 1436 * 1437 **/ 1438 static int 1439 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1440 uint8_t *txop, uint8_t *rxop) 1441 { 1442 uint8_t ret = 0; 1443 1444 if (lpfc_cmd_guard_csum(sc)) { 1445 switch (scsi_get_prot_op(sc)) { 1446 case SCSI_PROT_READ_INSERT: 1447 case SCSI_PROT_WRITE_STRIP: 1448 *rxop = BG_OP_IN_NODIF_OUT_CSUM; 1449 *txop = BG_OP_IN_CSUM_OUT_NODIF; 1450 break; 1451 1452 case SCSI_PROT_READ_STRIP: 1453 case SCSI_PROT_WRITE_INSERT: 1454 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1455 *txop = BG_OP_IN_NODIF_OUT_CRC; 1456 break; 1457 1458 case SCSI_PROT_READ_PASS: 1459 case SCSI_PROT_WRITE_PASS: 1460 *rxop = BG_OP_IN_CRC_OUT_CSUM; 1461 *txop = BG_OP_IN_CSUM_OUT_CRC; 1462 break; 1463 1464 case SCSI_PROT_NORMAL: 1465 default: 1466 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1467 "9063 BLKGRD: Bad op/guard:%d/IP combination\n", 1468 scsi_get_prot_op(sc)); 1469 ret = 1; 1470 break; 1471 1472 } 1473 } else { 1474 switch (scsi_get_prot_op(sc)) { 1475 case SCSI_PROT_READ_STRIP: 1476 case SCSI_PROT_WRITE_INSERT: 1477 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1478 *txop = BG_OP_IN_NODIF_OUT_CRC; 1479 break; 1480 1481 case SCSI_PROT_READ_PASS: 1482 case SCSI_PROT_WRITE_PASS: 1483 *rxop = BG_OP_IN_CRC_OUT_CRC; 1484 *txop = BG_OP_IN_CRC_OUT_CRC; 1485 break; 1486 1487 case SCSI_PROT_READ_INSERT: 1488 case SCSI_PROT_WRITE_STRIP: 1489 *rxop = BG_OP_IN_NODIF_OUT_CRC; 1490 *txop = BG_OP_IN_CRC_OUT_NODIF; 1491 break; 1492 1493 case SCSI_PROT_NORMAL: 1494 default: 1495 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1496 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n", 1497 scsi_get_prot_op(sc)); 1498 ret = 1; 1499 break; 1500 } 1501 } 1502 1503 return ret; 1504 } 1505 1506 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1507 /** 1508 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with 1509 * the specified SCSI command in order to force a guard tag error. 1510 * @phba: The Hba for which this call is being executed. 1511 * @sc: The SCSI command to examine 1512 * @txop: (out) BlockGuard operation for transmitted data 1513 * @rxop: (out) BlockGuard operation for received data 1514 * 1515 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined 1516 * 1517 **/ 1518 static int 1519 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1520 uint8_t *txop, uint8_t *rxop) 1521 { 1522 uint8_t ret = 0; 1523 1524 if (lpfc_cmd_guard_csum(sc)) { 1525 switch (scsi_get_prot_op(sc)) { 1526 case SCSI_PROT_READ_INSERT: 1527 case SCSI_PROT_WRITE_STRIP: 1528 *rxop = BG_OP_IN_NODIF_OUT_CRC; 1529 *txop = BG_OP_IN_CRC_OUT_NODIF; 1530 break; 1531 1532 case SCSI_PROT_READ_STRIP: 1533 case SCSI_PROT_WRITE_INSERT: 1534 *rxop = BG_OP_IN_CSUM_OUT_NODIF; 1535 *txop = BG_OP_IN_NODIF_OUT_CSUM; 1536 break; 1537 1538 case SCSI_PROT_READ_PASS: 1539 case SCSI_PROT_WRITE_PASS: 1540 *rxop = BG_OP_IN_CSUM_OUT_CRC; 1541 *txop = BG_OP_IN_CRC_OUT_CSUM; 1542 break; 1543 1544 case SCSI_PROT_NORMAL: 1545 default: 1546 break; 1547 1548 } 1549 } else { 1550 switch (scsi_get_prot_op(sc)) { 1551 case SCSI_PROT_READ_STRIP: 1552 case SCSI_PROT_WRITE_INSERT: 1553 *rxop = BG_OP_IN_CSUM_OUT_NODIF; 1554 *txop = BG_OP_IN_NODIF_OUT_CSUM; 1555 break; 1556 1557 case SCSI_PROT_READ_PASS: 1558 case SCSI_PROT_WRITE_PASS: 1559 *rxop = BG_OP_IN_CSUM_OUT_CSUM; 1560 *txop = BG_OP_IN_CSUM_OUT_CSUM; 1561 break; 1562 1563 case SCSI_PROT_READ_INSERT: 1564 case SCSI_PROT_WRITE_STRIP: 1565 *rxop = BG_OP_IN_NODIF_OUT_CSUM; 1566 *txop = BG_OP_IN_CSUM_OUT_NODIF; 1567 break; 1568 1569 case SCSI_PROT_NORMAL: 1570 default: 1571 break; 1572 } 1573 } 1574 1575 return ret; 1576 } 1577 #endif 1578 1579 /** 1580 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data 1581 * @phba: The Hba for which this call is being executed. 1582 * @sc: pointer to scsi command we're working on 1583 * @bpl: pointer to buffer list for protection groups 1584 * @datasegcnt: number of segments of data that have been dma mapped 1585 * 1586 * This function sets up BPL buffer list for protection groups of 1587 * type LPFC_PG_TYPE_NO_DIF 1588 * 1589 * This is usually used when the HBA is instructed to generate 1590 * DIFs and insert them into data stream (or strip DIF from 1591 * incoming data stream) 1592 * 1593 * The buffer list consists of just one protection group described 1594 * below: 1595 * +-------------------------+ 1596 * start of prot group --> | PDE_5 | 1597 * +-------------------------+ 1598 * | PDE_6 | 1599 * +-------------------------+ 1600 * | Data BDE | 1601 * +-------------------------+ 1602 * |more Data BDE's ... (opt)| 1603 * +-------------------------+ 1604 * 1605 * 1606 * Note: Data s/g buffers have been dma mapped 1607 * 1608 * Returns the number of BDEs added to the BPL. 1609 **/ 1610 static int 1611 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1612 struct ulp_bde64 *bpl, int datasegcnt) 1613 { 1614 struct scatterlist *sgde = NULL; /* s/g data entry */ 1615 struct lpfc_pde5 *pde5 = NULL; 1616 struct lpfc_pde6 *pde6 = NULL; 1617 dma_addr_t physaddr; 1618 int i = 0, num_bde = 0, status; 1619 int datadir = sc->sc_data_direction; 1620 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1621 uint32_t rc; 1622 #endif 1623 uint32_t checking = 1; 1624 uint32_t reftag; 1625 uint8_t txop, rxop; 1626 1627 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1628 if (status) 1629 goto out; 1630 1631 /* extract some info from the scsi command for pde*/ 1632 reftag = t10_pi_ref_tag(sc->request); 1633 if (reftag == LPFC_INVALID_REFTAG) 1634 goto out; 1635 1636 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1637 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 1638 if (rc) { 1639 if (rc & BG_ERR_SWAP) 1640 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 1641 if (rc & BG_ERR_CHECK) 1642 checking = 0; 1643 } 1644 #endif 1645 1646 /* setup PDE5 with what we have */ 1647 pde5 = (struct lpfc_pde5 *) bpl; 1648 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1649 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1650 1651 /* Endianness conversion if necessary for PDE5 */ 1652 pde5->word0 = cpu_to_le32(pde5->word0); 1653 pde5->reftag = cpu_to_le32(reftag); 1654 1655 /* advance bpl and increment bde count */ 1656 num_bde++; 1657 bpl++; 1658 pde6 = (struct lpfc_pde6 *) bpl; 1659 1660 /* setup PDE6 with the rest of the info */ 1661 memset(pde6, 0, sizeof(struct lpfc_pde6)); 1662 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 1663 bf_set(pde6_optx, pde6, txop); 1664 bf_set(pde6_oprx, pde6, rxop); 1665 1666 /* 1667 * We only need to check the data on READs, for WRITEs 1668 * protection data is automatically generated, not checked. 1669 */ 1670 if (datadir == DMA_FROM_DEVICE) { 1671 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) 1672 bf_set(pde6_ce, pde6, checking); 1673 else 1674 bf_set(pde6_ce, pde6, 0); 1675 1676 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) 1677 bf_set(pde6_re, pde6, checking); 1678 else 1679 bf_set(pde6_re, pde6, 0); 1680 } 1681 bf_set(pde6_ai, pde6, 1); 1682 bf_set(pde6_ae, pde6, 0); 1683 bf_set(pde6_apptagval, pde6, 0); 1684 1685 /* Endianness conversion if necessary for PDE6 */ 1686 pde6->word0 = cpu_to_le32(pde6->word0); 1687 pde6->word1 = cpu_to_le32(pde6->word1); 1688 pde6->word2 = cpu_to_le32(pde6->word2); 1689 1690 /* advance bpl and increment bde count */ 1691 num_bde++; 1692 bpl++; 1693 1694 /* assumption: caller has already run dma_map_sg on command data */ 1695 scsi_for_each_sg(sc, sgde, datasegcnt, i) { 1696 physaddr = sg_dma_address(sgde); 1697 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1698 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1699 bpl->tus.f.bdeSize = sg_dma_len(sgde); 1700 if (datadir == DMA_TO_DEVICE) 1701 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1702 else 1703 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 1704 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1705 bpl++; 1706 num_bde++; 1707 } 1708 1709 out: 1710 return num_bde; 1711 } 1712 1713 /** 1714 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data 1715 * @phba: The Hba for which this call is being executed. 1716 * @sc: pointer to scsi command we're working on 1717 * @bpl: pointer to buffer list for protection groups 1718 * @datacnt: number of segments of data that have been dma mapped 1719 * @protcnt: number of segment of protection data that have been dma mapped 1720 * 1721 * This function sets up BPL buffer list for protection groups of 1722 * type LPFC_PG_TYPE_DIF 1723 * 1724 * This is usually used when DIFs are in their own buffers, 1725 * separate from the data. The HBA can then by instructed 1726 * to place the DIFs in the outgoing stream. For read operations, 1727 * The HBA could extract the DIFs and place it in DIF buffers. 1728 * 1729 * The buffer list for this type consists of one or more of the 1730 * protection groups described below: 1731 * +-------------------------+ 1732 * start of first prot group --> | PDE_5 | 1733 * +-------------------------+ 1734 * | PDE_6 | 1735 * +-------------------------+ 1736 * | PDE_7 (Prot BDE) | 1737 * +-------------------------+ 1738 * | Data BDE | 1739 * +-------------------------+ 1740 * |more Data BDE's ... (opt)| 1741 * +-------------------------+ 1742 * start of new prot group --> | PDE_5 | 1743 * +-------------------------+ 1744 * | ... | 1745 * +-------------------------+ 1746 * 1747 * Note: It is assumed that both data and protection s/g buffers have been 1748 * mapped for DMA 1749 * 1750 * Returns the number of BDEs added to the BPL. 1751 **/ 1752 static int 1753 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1754 struct ulp_bde64 *bpl, int datacnt, int protcnt) 1755 { 1756 struct scatterlist *sgde = NULL; /* s/g data entry */ 1757 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 1758 struct lpfc_pde5 *pde5 = NULL; 1759 struct lpfc_pde6 *pde6 = NULL; 1760 struct lpfc_pde7 *pde7 = NULL; 1761 dma_addr_t dataphysaddr, protphysaddr; 1762 unsigned short curr_data = 0, curr_prot = 0; 1763 unsigned int split_offset; 1764 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; 1765 unsigned int protgrp_blks, protgrp_bytes; 1766 unsigned int remainder, subtotal; 1767 int status; 1768 int datadir = sc->sc_data_direction; 1769 unsigned char pgdone = 0, alldone = 0; 1770 unsigned blksize; 1771 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1772 uint32_t rc; 1773 #endif 1774 uint32_t checking = 1; 1775 uint32_t reftag; 1776 uint8_t txop, rxop; 1777 int num_bde = 0; 1778 1779 sgpe = scsi_prot_sglist(sc); 1780 sgde = scsi_sglist(sc); 1781 1782 if (!sgpe || !sgde) { 1783 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1784 "9020 Invalid s/g entry: data=x%px prot=x%px\n", 1785 sgpe, sgde); 1786 return 0; 1787 } 1788 1789 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1790 if (status) 1791 goto out; 1792 1793 /* extract some info from the scsi command */ 1794 blksize = lpfc_cmd_blksize(sc); 1795 reftag = t10_pi_ref_tag(sc->request); 1796 if (reftag == LPFC_INVALID_REFTAG) 1797 goto out; 1798 1799 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1800 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 1801 if (rc) { 1802 if (rc & BG_ERR_SWAP) 1803 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 1804 if (rc & BG_ERR_CHECK) 1805 checking = 0; 1806 } 1807 #endif 1808 1809 split_offset = 0; 1810 do { 1811 /* Check to see if we ran out of space */ 1812 if (num_bde >= (phba->cfg_total_seg_cnt - 2)) 1813 return num_bde + 3; 1814 1815 /* setup PDE5 with what we have */ 1816 pde5 = (struct lpfc_pde5 *) bpl; 1817 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1818 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1819 1820 /* Endianness conversion if necessary for PDE5 */ 1821 pde5->word0 = cpu_to_le32(pde5->word0); 1822 pde5->reftag = cpu_to_le32(reftag); 1823 1824 /* advance bpl and increment bde count */ 1825 num_bde++; 1826 bpl++; 1827 pde6 = (struct lpfc_pde6 *) bpl; 1828 1829 /* setup PDE6 with the rest of the info */ 1830 memset(pde6, 0, sizeof(struct lpfc_pde6)); 1831 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 1832 bf_set(pde6_optx, pde6, txop); 1833 bf_set(pde6_oprx, pde6, rxop); 1834 1835 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) 1836 bf_set(pde6_ce, pde6, checking); 1837 else 1838 bf_set(pde6_ce, pde6, 0); 1839 1840 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) 1841 bf_set(pde6_re, pde6, checking); 1842 else 1843 bf_set(pde6_re, pde6, 0); 1844 1845 bf_set(pde6_ai, pde6, 1); 1846 bf_set(pde6_ae, pde6, 0); 1847 bf_set(pde6_apptagval, pde6, 0); 1848 1849 /* Endianness conversion if necessary for PDE6 */ 1850 pde6->word0 = cpu_to_le32(pde6->word0); 1851 pde6->word1 = cpu_to_le32(pde6->word1); 1852 pde6->word2 = cpu_to_le32(pde6->word2); 1853 1854 /* advance bpl and increment bde count */ 1855 num_bde++; 1856 bpl++; 1857 1858 /* setup the first BDE that points to protection buffer */ 1859 protphysaddr = sg_dma_address(sgpe) + protgroup_offset; 1860 protgroup_len = sg_dma_len(sgpe) - protgroup_offset; 1861 1862 /* must be integer multiple of the DIF block length */ 1863 BUG_ON(protgroup_len % 8); 1864 1865 pde7 = (struct lpfc_pde7 *) bpl; 1866 memset(pde7, 0, sizeof(struct lpfc_pde7)); 1867 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR); 1868 1869 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr)); 1870 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr)); 1871 1872 protgrp_blks = protgroup_len / 8; 1873 protgrp_bytes = protgrp_blks * blksize; 1874 1875 /* check if this pde is crossing the 4K boundary; if so split */ 1876 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) { 1877 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff); 1878 protgroup_offset += protgroup_remainder; 1879 protgrp_blks = protgroup_remainder / 8; 1880 protgrp_bytes = protgrp_blks * blksize; 1881 } else { 1882 protgroup_offset = 0; 1883 curr_prot++; 1884 } 1885 1886 num_bde++; 1887 1888 /* setup BDE's for data blocks associated with DIF data */ 1889 pgdone = 0; 1890 subtotal = 0; /* total bytes processed for current prot grp */ 1891 while (!pgdone) { 1892 /* Check to see if we ran out of space */ 1893 if (num_bde >= phba->cfg_total_seg_cnt) 1894 return num_bde + 1; 1895 1896 if (!sgde) { 1897 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1898 "9065 BLKGRD:%s Invalid data segment\n", 1899 __func__); 1900 return 0; 1901 } 1902 bpl++; 1903 dataphysaddr = sg_dma_address(sgde) + split_offset; 1904 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr)); 1905 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr)); 1906 1907 remainder = sg_dma_len(sgde) - split_offset; 1908 1909 if ((subtotal + remainder) <= protgrp_bytes) { 1910 /* we can use this whole buffer */ 1911 bpl->tus.f.bdeSize = remainder; 1912 split_offset = 0; 1913 1914 if ((subtotal + remainder) == protgrp_bytes) 1915 pgdone = 1; 1916 } else { 1917 /* must split this buffer with next prot grp */ 1918 bpl->tus.f.bdeSize = protgrp_bytes - subtotal; 1919 split_offset += bpl->tus.f.bdeSize; 1920 } 1921 1922 subtotal += bpl->tus.f.bdeSize; 1923 1924 if (datadir == DMA_TO_DEVICE) 1925 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1926 else 1927 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 1928 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1929 1930 num_bde++; 1931 curr_data++; 1932 1933 if (split_offset) 1934 break; 1935 1936 /* Move to the next s/g segment if possible */ 1937 sgde = sg_next(sgde); 1938 1939 } 1940 1941 if (protgroup_offset) { 1942 /* update the reference tag */ 1943 reftag += protgrp_blks; 1944 bpl++; 1945 continue; 1946 } 1947 1948 /* are we done ? */ 1949 if (curr_prot == protcnt) { 1950 alldone = 1; 1951 } else if (curr_prot < protcnt) { 1952 /* advance to next prot buffer */ 1953 sgpe = sg_next(sgpe); 1954 bpl++; 1955 1956 /* update the reference tag */ 1957 reftag += protgrp_blks; 1958 } else { 1959 /* if we're here, we have a bug */ 1960 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1961 "9054 BLKGRD: bug in %s\n", __func__); 1962 } 1963 1964 } while (!alldone); 1965 out: 1966 1967 return num_bde; 1968 } 1969 1970 /** 1971 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data 1972 * @phba: The Hba for which this call is being executed. 1973 * @sc: pointer to scsi command we're working on 1974 * @sgl: pointer to buffer list for protection groups 1975 * @datasegcnt: number of segments of data that have been dma mapped 1976 * @lpfc_cmd: lpfc scsi command object pointer. 1977 * 1978 * This function sets up SGL buffer list for protection groups of 1979 * type LPFC_PG_TYPE_NO_DIF 1980 * 1981 * This is usually used when the HBA is instructed to generate 1982 * DIFs and insert them into data stream (or strip DIF from 1983 * incoming data stream) 1984 * 1985 * The buffer list consists of just one protection group described 1986 * below: 1987 * +-------------------------+ 1988 * start of prot group --> | DI_SEED | 1989 * +-------------------------+ 1990 * | Data SGE | 1991 * +-------------------------+ 1992 * |more Data SGE's ... (opt)| 1993 * +-------------------------+ 1994 * 1995 * 1996 * Note: Data s/g buffers have been dma mapped 1997 * 1998 * Returns the number of SGEs added to the SGL. 1999 **/ 2000 static int 2001 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, 2002 struct sli4_sge *sgl, int datasegcnt, 2003 struct lpfc_io_buf *lpfc_cmd) 2004 { 2005 struct scatterlist *sgde = NULL; /* s/g data entry */ 2006 struct sli4_sge_diseed *diseed = NULL; 2007 dma_addr_t physaddr; 2008 int i = 0, num_sge = 0, status; 2009 uint32_t reftag; 2010 uint8_t txop, rxop; 2011 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2012 uint32_t rc; 2013 #endif 2014 uint32_t checking = 1; 2015 uint32_t dma_len; 2016 uint32_t dma_offset = 0; 2017 struct sli4_hybrid_sgl *sgl_xtra = NULL; 2018 int j; 2019 bool lsp_just_set = false; 2020 2021 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 2022 if (status) 2023 goto out; 2024 2025 /* extract some info from the scsi command for pde*/ 2026 reftag = t10_pi_ref_tag(sc->request); 2027 if (reftag == LPFC_INVALID_REFTAG) 2028 goto out; 2029 2030 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2031 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 2032 if (rc) { 2033 if (rc & BG_ERR_SWAP) 2034 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 2035 if (rc & BG_ERR_CHECK) 2036 checking = 0; 2037 } 2038 #endif 2039 2040 /* setup DISEED with what we have */ 2041 diseed = (struct sli4_sge_diseed *) sgl; 2042 memset(diseed, 0, sizeof(struct sli4_sge_diseed)); 2043 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); 2044 2045 /* Endianness conversion if necessary */ 2046 diseed->ref_tag = cpu_to_le32(reftag); 2047 diseed->ref_tag_tran = diseed->ref_tag; 2048 2049 /* 2050 * We only need to check the data on READs, for WRITEs 2051 * protection data is automatically generated, not checked. 2052 */ 2053 if (sc->sc_data_direction == DMA_FROM_DEVICE) { 2054 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) 2055 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); 2056 else 2057 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); 2058 2059 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) 2060 bf_set(lpfc_sli4_sge_dif_re, diseed, checking); 2061 else 2062 bf_set(lpfc_sli4_sge_dif_re, diseed, 0); 2063 } 2064 2065 /* setup DISEED with the rest of the info */ 2066 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); 2067 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); 2068 2069 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); 2070 bf_set(lpfc_sli4_sge_dif_me, diseed, 0); 2071 2072 /* Endianness conversion if necessary for DISEED */ 2073 diseed->word2 = cpu_to_le32(diseed->word2); 2074 diseed->word3 = cpu_to_le32(diseed->word3); 2075 2076 /* advance bpl and increment sge count */ 2077 num_sge++; 2078 sgl++; 2079 2080 /* assumption: caller has already run dma_map_sg on command data */ 2081 sgde = scsi_sglist(sc); 2082 j = 3; 2083 for (i = 0; i < datasegcnt; i++) { 2084 /* clear it */ 2085 sgl->word2 = 0; 2086 2087 /* do we need to expand the segment */ 2088 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) && 2089 ((datasegcnt - 1) != i)) { 2090 /* set LSP type */ 2091 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); 2092 2093 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd); 2094 2095 if (unlikely(!sgl_xtra)) { 2096 lpfc_cmd->seg_cnt = 0; 2097 return 0; 2098 } 2099 sgl->addr_lo = cpu_to_le32(putPaddrLow( 2100 sgl_xtra->dma_phys_sgl)); 2101 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 2102 sgl_xtra->dma_phys_sgl)); 2103 2104 } else { 2105 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 2106 } 2107 2108 if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) { 2109 if ((datasegcnt - 1) == i) 2110 bf_set(lpfc_sli4_sge_last, sgl, 1); 2111 physaddr = sg_dma_address(sgde); 2112 dma_len = sg_dma_len(sgde); 2113 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); 2114 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); 2115 2116 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 2117 sgl->word2 = cpu_to_le32(sgl->word2); 2118 sgl->sge_len = cpu_to_le32(dma_len); 2119 2120 dma_offset += dma_len; 2121 sgde = sg_next(sgde); 2122 2123 sgl++; 2124 num_sge++; 2125 lsp_just_set = false; 2126 2127 } else { 2128 sgl->word2 = cpu_to_le32(sgl->word2); 2129 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); 2130 2131 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 2132 i = i - 1; 2133 2134 lsp_just_set = true; 2135 } 2136 2137 j++; 2138 2139 } 2140 2141 out: 2142 return num_sge; 2143 } 2144 2145 /** 2146 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data 2147 * @phba: The Hba for which this call is being executed. 2148 * @sc: pointer to scsi command we're working on 2149 * @sgl: pointer to buffer list for protection groups 2150 * @datacnt: number of segments of data that have been dma mapped 2151 * @protcnt: number of segment of protection data that have been dma mapped 2152 * @lpfc_cmd: lpfc scsi command object pointer. 2153 * 2154 * This function sets up SGL buffer list for protection groups of 2155 * type LPFC_PG_TYPE_DIF 2156 * 2157 * This is usually used when DIFs are in their own buffers, 2158 * separate from the data. The HBA can then by instructed 2159 * to place the DIFs in the outgoing stream. For read operations, 2160 * The HBA could extract the DIFs and place it in DIF buffers. 2161 * 2162 * The buffer list for this type consists of one or more of the 2163 * protection groups described below: 2164 * +-------------------------+ 2165 * start of first prot group --> | DISEED | 2166 * +-------------------------+ 2167 * | DIF (Prot SGE) | 2168 * +-------------------------+ 2169 * | Data SGE | 2170 * +-------------------------+ 2171 * |more Data SGE's ... (opt)| 2172 * +-------------------------+ 2173 * start of new prot group --> | DISEED | 2174 * +-------------------------+ 2175 * | ... | 2176 * +-------------------------+ 2177 * 2178 * Note: It is assumed that both data and protection s/g buffers have been 2179 * mapped for DMA 2180 * 2181 * Returns the number of SGEs added to the SGL. 2182 **/ 2183 static int 2184 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, 2185 struct sli4_sge *sgl, int datacnt, int protcnt, 2186 struct lpfc_io_buf *lpfc_cmd) 2187 { 2188 struct scatterlist *sgde = NULL; /* s/g data entry */ 2189 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 2190 struct sli4_sge_diseed *diseed = NULL; 2191 dma_addr_t dataphysaddr, protphysaddr; 2192 unsigned short curr_data = 0, curr_prot = 0; 2193 unsigned int split_offset; 2194 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; 2195 unsigned int protgrp_blks, protgrp_bytes; 2196 unsigned int remainder, subtotal; 2197 int status; 2198 unsigned char pgdone = 0, alldone = 0; 2199 unsigned blksize; 2200 uint32_t reftag; 2201 uint8_t txop, rxop; 2202 uint32_t dma_len; 2203 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2204 uint32_t rc; 2205 #endif 2206 uint32_t checking = 1; 2207 uint32_t dma_offset = 0; 2208 int num_sge = 0, j = 2; 2209 struct sli4_hybrid_sgl *sgl_xtra = NULL; 2210 2211 sgpe = scsi_prot_sglist(sc); 2212 sgde = scsi_sglist(sc); 2213 2214 if (!sgpe || !sgde) { 2215 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2216 "9082 Invalid s/g entry: data=x%px prot=x%px\n", 2217 sgpe, sgde); 2218 return 0; 2219 } 2220 2221 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 2222 if (status) 2223 goto out; 2224 2225 /* extract some info from the scsi command */ 2226 blksize = lpfc_cmd_blksize(sc); 2227 reftag = t10_pi_ref_tag(sc->request); 2228 if (reftag == LPFC_INVALID_REFTAG) 2229 goto out; 2230 2231 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2232 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 2233 if (rc) { 2234 if (rc & BG_ERR_SWAP) 2235 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 2236 if (rc & BG_ERR_CHECK) 2237 checking = 0; 2238 } 2239 #endif 2240 2241 split_offset = 0; 2242 do { 2243 /* Check to see if we ran out of space */ 2244 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) && 2245 !(phba->cfg_xpsgl)) 2246 return num_sge + 3; 2247 2248 /* DISEED and DIF have to be together */ 2249 if (!((j + 1) % phba->border_sge_num) || 2250 !((j + 2) % phba->border_sge_num) || 2251 !((j + 3) % phba->border_sge_num)) { 2252 sgl->word2 = 0; 2253 2254 /* set LSP type */ 2255 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); 2256 2257 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd); 2258 2259 if (unlikely(!sgl_xtra)) { 2260 goto out; 2261 } else { 2262 sgl->addr_lo = cpu_to_le32(putPaddrLow( 2263 sgl_xtra->dma_phys_sgl)); 2264 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 2265 sgl_xtra->dma_phys_sgl)); 2266 } 2267 2268 sgl->word2 = cpu_to_le32(sgl->word2); 2269 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); 2270 2271 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 2272 j = 0; 2273 } 2274 2275 /* setup DISEED with what we have */ 2276 diseed = (struct sli4_sge_diseed *) sgl; 2277 memset(diseed, 0, sizeof(struct sli4_sge_diseed)); 2278 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); 2279 2280 /* Endianness conversion if necessary */ 2281 diseed->ref_tag = cpu_to_le32(reftag); 2282 diseed->ref_tag_tran = diseed->ref_tag; 2283 2284 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) { 2285 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); 2286 2287 } else { 2288 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); 2289 /* 2290 * When in this mode, the hardware will replace 2291 * the guard tag from the host with a 2292 * newly generated good CRC for the wire. 2293 * Switch to raw mode here to avoid this 2294 * behavior. What the host sends gets put on the wire. 2295 */ 2296 if (txop == BG_OP_IN_CRC_OUT_CRC) { 2297 txop = BG_OP_RAW_MODE; 2298 rxop = BG_OP_RAW_MODE; 2299 } 2300 } 2301 2302 2303 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) 2304 bf_set(lpfc_sli4_sge_dif_re, diseed, checking); 2305 else 2306 bf_set(lpfc_sli4_sge_dif_re, diseed, 0); 2307 2308 /* setup DISEED with the rest of the info */ 2309 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); 2310 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); 2311 2312 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); 2313 bf_set(lpfc_sli4_sge_dif_me, diseed, 0); 2314 2315 /* Endianness conversion if necessary for DISEED */ 2316 diseed->word2 = cpu_to_le32(diseed->word2); 2317 diseed->word3 = cpu_to_le32(diseed->word3); 2318 2319 /* advance sgl and increment bde count */ 2320 num_sge++; 2321 2322 sgl++; 2323 j++; 2324 2325 /* setup the first BDE that points to protection buffer */ 2326 protphysaddr = sg_dma_address(sgpe) + protgroup_offset; 2327 protgroup_len = sg_dma_len(sgpe) - protgroup_offset; 2328 2329 /* must be integer multiple of the DIF block length */ 2330 BUG_ON(protgroup_len % 8); 2331 2332 /* Now setup DIF SGE */ 2333 sgl->word2 = 0; 2334 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF); 2335 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr)); 2336 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr)); 2337 sgl->word2 = cpu_to_le32(sgl->word2); 2338 sgl->sge_len = 0; 2339 2340 protgrp_blks = protgroup_len / 8; 2341 protgrp_bytes = protgrp_blks * blksize; 2342 2343 /* check if DIF SGE is crossing the 4K boundary; if so split */ 2344 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) { 2345 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff); 2346 protgroup_offset += protgroup_remainder; 2347 protgrp_blks = protgroup_remainder / 8; 2348 protgrp_bytes = protgrp_blks * blksize; 2349 } else { 2350 protgroup_offset = 0; 2351 curr_prot++; 2352 } 2353 2354 num_sge++; 2355 2356 /* setup SGE's for data blocks associated with DIF data */ 2357 pgdone = 0; 2358 subtotal = 0; /* total bytes processed for current prot grp */ 2359 2360 sgl++; 2361 j++; 2362 2363 while (!pgdone) { 2364 /* Check to see if we ran out of space */ 2365 if ((num_sge >= phba->cfg_total_seg_cnt) && 2366 !phba->cfg_xpsgl) 2367 return num_sge + 1; 2368 2369 if (!sgde) { 2370 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2371 "9086 BLKGRD:%s Invalid data segment\n", 2372 __func__); 2373 return 0; 2374 } 2375 2376 if (!((j + 1) % phba->border_sge_num)) { 2377 sgl->word2 = 0; 2378 2379 /* set LSP type */ 2380 bf_set(lpfc_sli4_sge_type, sgl, 2381 LPFC_SGE_TYPE_LSP); 2382 2383 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, 2384 lpfc_cmd); 2385 2386 if (unlikely(!sgl_xtra)) { 2387 goto out; 2388 } else { 2389 sgl->addr_lo = cpu_to_le32( 2390 putPaddrLow(sgl_xtra->dma_phys_sgl)); 2391 sgl->addr_hi = cpu_to_le32( 2392 putPaddrHigh(sgl_xtra->dma_phys_sgl)); 2393 } 2394 2395 sgl->word2 = cpu_to_le32(sgl->word2); 2396 sgl->sge_len = cpu_to_le32( 2397 phba->cfg_sg_dma_buf_size); 2398 2399 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 2400 } else { 2401 dataphysaddr = sg_dma_address(sgde) + 2402 split_offset; 2403 2404 remainder = sg_dma_len(sgde) - split_offset; 2405 2406 if ((subtotal + remainder) <= protgrp_bytes) { 2407 /* we can use this whole buffer */ 2408 dma_len = remainder; 2409 split_offset = 0; 2410 2411 if ((subtotal + remainder) == 2412 protgrp_bytes) 2413 pgdone = 1; 2414 } else { 2415 /* must split this buffer with next 2416 * prot grp 2417 */ 2418 dma_len = protgrp_bytes - subtotal; 2419 split_offset += dma_len; 2420 } 2421 2422 subtotal += dma_len; 2423 2424 sgl->word2 = 0; 2425 sgl->addr_lo = cpu_to_le32(putPaddrLow( 2426 dataphysaddr)); 2427 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 2428 dataphysaddr)); 2429 bf_set(lpfc_sli4_sge_last, sgl, 0); 2430 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 2431 bf_set(lpfc_sli4_sge_type, sgl, 2432 LPFC_SGE_TYPE_DATA); 2433 2434 sgl->sge_len = cpu_to_le32(dma_len); 2435 dma_offset += dma_len; 2436 2437 num_sge++; 2438 curr_data++; 2439 2440 if (split_offset) { 2441 sgl++; 2442 j++; 2443 break; 2444 } 2445 2446 /* Move to the next s/g segment if possible */ 2447 sgde = sg_next(sgde); 2448 2449 sgl++; 2450 } 2451 2452 j++; 2453 } 2454 2455 if (protgroup_offset) { 2456 /* update the reference tag */ 2457 reftag += protgrp_blks; 2458 continue; 2459 } 2460 2461 /* are we done ? */ 2462 if (curr_prot == protcnt) { 2463 /* mark the last SGL */ 2464 sgl--; 2465 bf_set(lpfc_sli4_sge_last, sgl, 1); 2466 alldone = 1; 2467 } else if (curr_prot < protcnt) { 2468 /* advance to next prot buffer */ 2469 sgpe = sg_next(sgpe); 2470 2471 /* update the reference tag */ 2472 reftag += protgrp_blks; 2473 } else { 2474 /* if we're here, we have a bug */ 2475 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2476 "9085 BLKGRD: bug in %s\n", __func__); 2477 } 2478 2479 } while (!alldone); 2480 2481 out: 2482 2483 return num_sge; 2484 } 2485 2486 /** 2487 * lpfc_prot_group_type - Get prtotection group type of SCSI command 2488 * @phba: The Hba for which this call is being executed. 2489 * @sc: pointer to scsi command we're working on 2490 * 2491 * Given a SCSI command that supports DIF, determine composition of protection 2492 * groups involved in setting up buffer lists 2493 * 2494 * Returns: Protection group type (with or without DIF) 2495 * 2496 **/ 2497 static int 2498 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) 2499 { 2500 int ret = LPFC_PG_TYPE_INVALID; 2501 unsigned char op = scsi_get_prot_op(sc); 2502 2503 switch (op) { 2504 case SCSI_PROT_READ_STRIP: 2505 case SCSI_PROT_WRITE_INSERT: 2506 ret = LPFC_PG_TYPE_NO_DIF; 2507 break; 2508 case SCSI_PROT_READ_INSERT: 2509 case SCSI_PROT_WRITE_STRIP: 2510 case SCSI_PROT_READ_PASS: 2511 case SCSI_PROT_WRITE_PASS: 2512 ret = LPFC_PG_TYPE_DIF_BUF; 2513 break; 2514 default: 2515 if (phba) 2516 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2517 "9021 Unsupported protection op:%d\n", 2518 op); 2519 break; 2520 } 2521 return ret; 2522 } 2523 2524 /** 2525 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard 2526 * @phba: The Hba for which this call is being executed. 2527 * @lpfc_cmd: The scsi buffer which is going to be adjusted. 2528 * 2529 * Adjust the data length to account for how much data 2530 * is actually on the wire. 2531 * 2532 * returns the adjusted data length 2533 **/ 2534 static int 2535 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, 2536 struct lpfc_io_buf *lpfc_cmd) 2537 { 2538 struct scsi_cmnd *sc = lpfc_cmd->pCmd; 2539 int fcpdl; 2540 2541 fcpdl = scsi_bufflen(sc); 2542 2543 /* Check if there is protection data on the wire */ 2544 if (sc->sc_data_direction == DMA_FROM_DEVICE) { 2545 /* Read check for protection data */ 2546 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) 2547 return fcpdl; 2548 2549 } else { 2550 /* Write check for protection data */ 2551 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) 2552 return fcpdl; 2553 } 2554 2555 /* 2556 * If we are in DIF Type 1 mode every data block has a 8 byte 2557 * DIF (trailer) attached to it. Must ajust FCP data length 2558 * to account for the protection data. 2559 */ 2560 fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8; 2561 2562 return fcpdl; 2563 } 2564 2565 /** 2566 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 2567 * @phba: The Hba for which this call is being executed. 2568 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 2569 * 2570 * This is the protection/DIF aware version of 2571 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the 2572 * two functions eventually, but for now, it's here. 2573 * RETURNS 0 - SUCCESS, 2574 * 1 - Failed DMA map, retry. 2575 * 2 - Invalid scsi cmd or prot-type. Do not rety. 2576 **/ 2577 static int 2578 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, 2579 struct lpfc_io_buf *lpfc_cmd) 2580 { 2581 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 2582 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 2583 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; 2584 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 2585 uint32_t num_bde = 0; 2586 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 2587 int prot_group_type = 0; 2588 int fcpdl; 2589 int ret = 1; 2590 struct lpfc_vport *vport = phba->pport; 2591 2592 /* 2593 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd 2594 * fcp_rsp regions to the first data bde entry 2595 */ 2596 bpl += 2; 2597 if (scsi_sg_count(scsi_cmnd)) { 2598 /* 2599 * The driver stores the segment count returned from pci_map_sg 2600 * because this a count of dma-mappings used to map the use_sg 2601 * pages. They are not guaranteed to be the same for those 2602 * architectures that implement an IOMMU. 2603 */ 2604 datasegcnt = dma_map_sg(&phba->pcidev->dev, 2605 scsi_sglist(scsi_cmnd), 2606 scsi_sg_count(scsi_cmnd), datadir); 2607 if (unlikely(!datasegcnt)) 2608 return 1; 2609 2610 lpfc_cmd->seg_cnt = datasegcnt; 2611 2612 /* First check if data segment count from SCSI Layer is good */ 2613 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 2614 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 2615 ret = 2; 2616 goto err; 2617 } 2618 2619 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 2620 2621 switch (prot_group_type) { 2622 case LPFC_PG_TYPE_NO_DIF: 2623 2624 /* Here we need to add a PDE5 and PDE6 to the count */ 2625 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) { 2626 ret = 2; 2627 goto err; 2628 } 2629 2630 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, 2631 datasegcnt); 2632 /* we should have 2 or more entries in buffer list */ 2633 if (num_bde < 2) { 2634 ret = 2; 2635 goto err; 2636 } 2637 break; 2638 2639 case LPFC_PG_TYPE_DIF_BUF: 2640 /* 2641 * This type indicates that protection buffers are 2642 * passed to the driver, so that needs to be prepared 2643 * for DMA 2644 */ 2645 protsegcnt = dma_map_sg(&phba->pcidev->dev, 2646 scsi_prot_sglist(scsi_cmnd), 2647 scsi_prot_sg_count(scsi_cmnd), datadir); 2648 if (unlikely(!protsegcnt)) { 2649 scsi_dma_unmap(scsi_cmnd); 2650 return 1; 2651 } 2652 2653 lpfc_cmd->prot_seg_cnt = protsegcnt; 2654 2655 /* 2656 * There is a minimun of 4 BPLs used for every 2657 * protection data segment. 2658 */ 2659 if ((lpfc_cmd->prot_seg_cnt * 4) > 2660 (phba->cfg_total_seg_cnt - 2)) { 2661 ret = 2; 2662 goto err; 2663 } 2664 2665 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, 2666 datasegcnt, protsegcnt); 2667 /* we should have 3 or more entries in buffer list */ 2668 if ((num_bde < 3) || 2669 (num_bde > phba->cfg_total_seg_cnt)) { 2670 ret = 2; 2671 goto err; 2672 } 2673 break; 2674 2675 case LPFC_PG_TYPE_INVALID: 2676 default: 2677 scsi_dma_unmap(scsi_cmnd); 2678 lpfc_cmd->seg_cnt = 0; 2679 2680 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2681 "9022 Unexpected protection group %i\n", 2682 prot_group_type); 2683 return 2; 2684 } 2685 } 2686 2687 /* 2688 * Finish initializing those IOCB fields that are dependent on the 2689 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly 2690 * reinitialized since all iocb memory resources are used many times 2691 * for transmit, receive, and continuation bpl's. 2692 */ 2693 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 2694 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64)); 2695 iocb_cmd->ulpBdeCount = 1; 2696 iocb_cmd->ulpLe = 1; 2697 2698 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); 2699 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 2700 2701 /* 2702 * Due to difference in data length between DIF/non-DIF paths, 2703 * we need to set word 4 of IOCB here 2704 */ 2705 iocb_cmd->un.fcpi.fcpi_parm = fcpdl; 2706 2707 /* 2708 * For First burst, we may need to adjust the initial transfer 2709 * length for DIF 2710 */ 2711 if (iocb_cmd->un.fcpi.fcpi_XRdy && 2712 (fcpdl < vport->cfg_first_burst_size)) 2713 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl; 2714 2715 return 0; 2716 err: 2717 if (lpfc_cmd->seg_cnt) 2718 scsi_dma_unmap(scsi_cmnd); 2719 if (lpfc_cmd->prot_seg_cnt) 2720 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), 2721 scsi_prot_sg_count(scsi_cmnd), 2722 scsi_cmnd->sc_data_direction); 2723 2724 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2725 "9023 Cannot setup S/G List for HBA" 2726 "IO segs %d/%d BPL %d SCSI %d: %d %d\n", 2727 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, 2728 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, 2729 prot_group_type, num_bde); 2730 2731 lpfc_cmd->seg_cnt = 0; 2732 lpfc_cmd->prot_seg_cnt = 0; 2733 return ret; 2734 } 2735 2736 /* 2737 * This function calcuates the T10 DIF guard tag 2738 * on the specified data using a CRC algorithmn 2739 * using crc_t10dif. 2740 */ 2741 static uint16_t 2742 lpfc_bg_crc(uint8_t *data, int count) 2743 { 2744 uint16_t crc = 0; 2745 uint16_t x; 2746 2747 crc = crc_t10dif(data, count); 2748 x = cpu_to_be16(crc); 2749 return x; 2750 } 2751 2752 /* 2753 * This function calcuates the T10 DIF guard tag 2754 * on the specified data using a CSUM algorithmn 2755 * using ip_compute_csum. 2756 */ 2757 static uint16_t 2758 lpfc_bg_csum(uint8_t *data, int count) 2759 { 2760 uint16_t ret; 2761 2762 ret = ip_compute_csum(data, count); 2763 return ret; 2764 } 2765 2766 /* 2767 * This function examines the protection data to try to determine 2768 * what type of T10-DIF error occurred. 2769 */ 2770 static void 2771 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 2772 { 2773 struct scatterlist *sgpe; /* s/g prot entry */ 2774 struct scatterlist *sgde; /* s/g data entry */ 2775 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 2776 struct scsi_dif_tuple *src = NULL; 2777 uint8_t *data_src = NULL; 2778 uint16_t guard_tag; 2779 uint16_t start_app_tag, app_tag; 2780 uint32_t start_ref_tag, ref_tag; 2781 int prot, protsegcnt; 2782 int err_type, len, data_len; 2783 int chk_ref, chk_app, chk_guard; 2784 uint16_t sum; 2785 unsigned blksize; 2786 2787 err_type = BGS_GUARD_ERR_MASK; 2788 sum = 0; 2789 guard_tag = 0; 2790 2791 /* First check to see if there is protection data to examine */ 2792 prot = scsi_get_prot_op(cmd); 2793 if ((prot == SCSI_PROT_READ_STRIP) || 2794 (prot == SCSI_PROT_WRITE_INSERT) || 2795 (prot == SCSI_PROT_NORMAL)) 2796 goto out; 2797 2798 /* Currently the driver just supports ref_tag and guard_tag checking */ 2799 chk_ref = 1; 2800 chk_app = 0; 2801 chk_guard = 0; 2802 2803 /* Setup a ptr to the protection data provided by the SCSI host */ 2804 sgpe = scsi_prot_sglist(cmd); 2805 protsegcnt = lpfc_cmd->prot_seg_cnt; 2806 2807 if (sgpe && protsegcnt) { 2808 2809 /* 2810 * We will only try to verify guard tag if the segment 2811 * data length is a multiple of the blksize. 2812 */ 2813 sgde = scsi_sglist(cmd); 2814 blksize = lpfc_cmd_blksize(cmd); 2815 data_src = (uint8_t *)sg_virt(sgde); 2816 data_len = sgde->length; 2817 if ((data_len & (blksize - 1)) == 0) 2818 chk_guard = 1; 2819 2820 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 2821 start_ref_tag = t10_pi_ref_tag(cmd->request); 2822 if (start_ref_tag == LPFC_INVALID_REFTAG) 2823 goto out; 2824 start_app_tag = src->app_tag; 2825 len = sgpe->length; 2826 while (src && protsegcnt) { 2827 while (len) { 2828 2829 /* 2830 * First check to see if a protection data 2831 * check is valid 2832 */ 2833 if ((src->ref_tag == T10_PI_REF_ESCAPE) || 2834 (src->app_tag == T10_PI_APP_ESCAPE)) { 2835 start_ref_tag++; 2836 goto skipit; 2837 } 2838 2839 /* First Guard Tag checking */ 2840 if (chk_guard) { 2841 guard_tag = src->guard_tag; 2842 if (lpfc_cmd_guard_csum(cmd)) 2843 sum = lpfc_bg_csum(data_src, 2844 blksize); 2845 else 2846 sum = lpfc_bg_crc(data_src, 2847 blksize); 2848 if ((guard_tag != sum)) { 2849 err_type = BGS_GUARD_ERR_MASK; 2850 goto out; 2851 } 2852 } 2853 2854 /* Reference Tag checking */ 2855 ref_tag = be32_to_cpu(src->ref_tag); 2856 if (chk_ref && (ref_tag != start_ref_tag)) { 2857 err_type = BGS_REFTAG_ERR_MASK; 2858 goto out; 2859 } 2860 start_ref_tag++; 2861 2862 /* App Tag checking */ 2863 app_tag = src->app_tag; 2864 if (chk_app && (app_tag != start_app_tag)) { 2865 err_type = BGS_APPTAG_ERR_MASK; 2866 goto out; 2867 } 2868 skipit: 2869 len -= sizeof(struct scsi_dif_tuple); 2870 if (len < 0) 2871 len = 0; 2872 src++; 2873 2874 data_src += blksize; 2875 data_len -= blksize; 2876 2877 /* 2878 * Are we at the end of the Data segment? 2879 * The data segment is only used for Guard 2880 * tag checking. 2881 */ 2882 if (chk_guard && (data_len == 0)) { 2883 chk_guard = 0; 2884 sgde = sg_next(sgde); 2885 if (!sgde) 2886 goto out; 2887 2888 data_src = (uint8_t *)sg_virt(sgde); 2889 data_len = sgde->length; 2890 if ((data_len & (blksize - 1)) == 0) 2891 chk_guard = 1; 2892 } 2893 } 2894 2895 /* Goto the next Protection data segment */ 2896 sgpe = sg_next(sgpe); 2897 if (sgpe) { 2898 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 2899 len = sgpe->length; 2900 } else { 2901 src = NULL; 2902 } 2903 protsegcnt--; 2904 } 2905 } 2906 out: 2907 if (err_type == BGS_GUARD_ERR_MASK) { 2908 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); 2909 set_host_byte(cmd, DID_ABORT); 2910 phba->bg_guard_err_cnt++; 2911 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2912 "9069 BLKGRD: reftag %x grd_tag err %x != %x\n", 2913 t10_pi_ref_tag(cmd->request), 2914 sum, guard_tag); 2915 2916 } else if (err_type == BGS_REFTAG_ERR_MASK) { 2917 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); 2918 set_host_byte(cmd, DID_ABORT); 2919 2920 phba->bg_reftag_err_cnt++; 2921 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2922 "9066 BLKGRD: reftag %x ref_tag err %x != %x\n", 2923 t10_pi_ref_tag(cmd->request), 2924 ref_tag, start_ref_tag); 2925 2926 } else if (err_type == BGS_APPTAG_ERR_MASK) { 2927 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); 2928 set_host_byte(cmd, DID_ABORT); 2929 2930 phba->bg_apptag_err_cnt++; 2931 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2932 "9041 BLKGRD: reftag %x app_tag err %x != %x\n", 2933 t10_pi_ref_tag(cmd->request), 2934 app_tag, start_app_tag); 2935 } 2936 } 2937 2938 /* 2939 * This function checks for BlockGuard errors detected by 2940 * the HBA. In case of errors, the ASC/ASCQ fields in the 2941 * sense buffer will be set accordingly, paired with 2942 * ILLEGAL_REQUEST to signal to the kernel that the HBA 2943 * detected corruption. 2944 * 2945 * Returns: 2946 * 0 - No error found 2947 * 1 - BlockGuard error found 2948 * -1 - Internal error (bad profile, ...etc) 2949 */ 2950 static int 2951 lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, 2952 struct lpfc_wcqe_complete *wcqe) 2953 { 2954 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 2955 int ret = 0; 2956 u32 status = bf_get(lpfc_wcqe_c_status, wcqe); 2957 u32 bghm = 0; 2958 u32 bgstat = 0; 2959 u64 failing_sector = 0; 2960 2961 if (status == CQE_STATUS_DI_ERROR) { 2962 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */ 2963 bgstat |= BGS_GUARD_ERR_MASK; 2964 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* AppTag Check failed */ 2965 bgstat |= BGS_APPTAG_ERR_MASK; 2966 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* RefTag Check failed */ 2967 bgstat |= BGS_REFTAG_ERR_MASK; 2968 2969 /* Check to see if there was any good data before the error */ 2970 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { 2971 bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK; 2972 bghm = wcqe->total_data_placed; 2973 } 2974 2975 /* 2976 * Set ALL the error bits to indicate we don't know what 2977 * type of error it is. 2978 */ 2979 if (!bgstat) 2980 bgstat |= (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | 2981 BGS_GUARD_ERR_MASK); 2982 } 2983 2984 if (lpfc_bgs_get_guard_err(bgstat)) { 2985 ret = 1; 2986 2987 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); 2988 set_host_byte(cmd, DID_ABORT); 2989 phba->bg_guard_err_cnt++; 2990 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2991 "9059 BLKGRD: Guard Tag error in cmd" 2992 " 0x%x lba 0x%llx blk cnt 0x%x " 2993 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 2994 (unsigned long long)scsi_get_lba(cmd), 2995 blk_rq_sectors(cmd->request), bgstat, bghm); 2996 } 2997 2998 if (lpfc_bgs_get_reftag_err(bgstat)) { 2999 ret = 1; 3000 3001 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); 3002 set_host_byte(cmd, DID_ABORT); 3003 3004 phba->bg_reftag_err_cnt++; 3005 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3006 "9060 BLKGRD: Ref Tag error in cmd" 3007 " 0x%x lba 0x%llx blk cnt 0x%x " 3008 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3009 (unsigned long long)scsi_get_lba(cmd), 3010 blk_rq_sectors(cmd->request), bgstat, bghm); 3011 } 3012 3013 if (lpfc_bgs_get_apptag_err(bgstat)) { 3014 ret = 1; 3015 3016 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); 3017 set_host_byte(cmd, DID_ABORT); 3018 3019 phba->bg_apptag_err_cnt++; 3020 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3021 "9062 BLKGRD: App Tag error in cmd" 3022 " 0x%x lba 0x%llx blk cnt 0x%x " 3023 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3024 (unsigned long long)scsi_get_lba(cmd), 3025 blk_rq_sectors(cmd->request), bgstat, bghm); 3026 } 3027 3028 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 3029 /* 3030 * setup sense data descriptor 0 per SPC-4 as an information 3031 * field, and put the failing LBA in it. 3032 * This code assumes there was also a guard/app/ref tag error 3033 * indication. 3034 */ 3035 cmd->sense_buffer[7] = 0xc; /* Additional sense length */ 3036 cmd->sense_buffer[8] = 0; /* Information descriptor type */ 3037 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */ 3038 cmd->sense_buffer[10] = 0x80; /* Validity bit */ 3039 3040 /* bghm is a "on the wire" FC frame based count */ 3041 switch (scsi_get_prot_op(cmd)) { 3042 case SCSI_PROT_READ_INSERT: 3043 case SCSI_PROT_WRITE_STRIP: 3044 bghm /= cmd->device->sector_size; 3045 break; 3046 case SCSI_PROT_READ_STRIP: 3047 case SCSI_PROT_WRITE_INSERT: 3048 case SCSI_PROT_READ_PASS: 3049 case SCSI_PROT_WRITE_PASS: 3050 bghm /= (cmd->device->sector_size + 3051 sizeof(struct scsi_dif_tuple)); 3052 break; 3053 } 3054 3055 failing_sector = scsi_get_lba(cmd); 3056 failing_sector += bghm; 3057 3058 /* Descriptor Information */ 3059 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]); 3060 } 3061 3062 if (!ret) { 3063 /* No error was reported - problem in FW? */ 3064 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3065 "9068 BLKGRD: Unknown error in cmd" 3066 " 0x%x lba 0x%llx blk cnt 0x%x " 3067 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3068 (unsigned long long)scsi_get_lba(cmd), 3069 blk_rq_sectors(cmd->request), bgstat, bghm); 3070 3071 /* Calcuate what type of error it was */ 3072 lpfc_calc_bg_err(phba, lpfc_cmd); 3073 } 3074 return ret; 3075 } 3076 3077 /* 3078 * This function checks for BlockGuard errors detected by 3079 * the HBA. In case of errors, the ASC/ASCQ fields in the 3080 * sense buffer will be set accordingly, paired with 3081 * ILLEGAL_REQUEST to signal to the kernel that the HBA 3082 * detected corruption. 3083 * 3084 * Returns: 3085 * 0 - No error found 3086 * 1 - BlockGuard error found 3087 * -1 - Internal error (bad profile, ...etc) 3088 */ 3089 static int 3090 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, 3091 struct lpfc_iocbq *pIocbOut) 3092 { 3093 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 3094 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg; 3095 int ret = 0; 3096 uint32_t bghm = bgf->bghm; 3097 uint32_t bgstat = bgf->bgstat; 3098 uint64_t failing_sector = 0; 3099 3100 if (lpfc_bgs_get_invalid_prof(bgstat)) { 3101 cmd->result = DID_ERROR << 16; 3102 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3103 "9072 BLKGRD: Invalid BG Profile in cmd " 3104 "0x%x reftag 0x%x blk cnt 0x%x " 3105 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3106 t10_pi_ref_tag(cmd->request), 3107 blk_rq_sectors(cmd->request), bgstat, bghm); 3108 ret = (-1); 3109 goto out; 3110 } 3111 3112 if (lpfc_bgs_get_uninit_dif_block(bgstat)) { 3113 cmd->result = DID_ERROR << 16; 3114 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3115 "9073 BLKGRD: Invalid BG PDIF Block in cmd " 3116 "0x%x reftag 0x%x blk cnt 0x%x " 3117 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3118 t10_pi_ref_tag(cmd->request), 3119 blk_rq_sectors(cmd->request), bgstat, bghm); 3120 ret = (-1); 3121 goto out; 3122 } 3123 3124 if (lpfc_bgs_get_guard_err(bgstat)) { 3125 ret = 1; 3126 3127 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); 3128 set_host_byte(cmd, DID_ABORT); 3129 phba->bg_guard_err_cnt++; 3130 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3131 "9055 BLKGRD: Guard Tag error in cmd " 3132 "0x%x reftag 0x%x blk cnt 0x%x " 3133 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3134 t10_pi_ref_tag(cmd->request), 3135 blk_rq_sectors(cmd->request), bgstat, bghm); 3136 } 3137 3138 if (lpfc_bgs_get_reftag_err(bgstat)) { 3139 ret = 1; 3140 3141 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); 3142 set_host_byte(cmd, DID_ABORT); 3143 3144 phba->bg_reftag_err_cnt++; 3145 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3146 "9056 BLKGRD: Ref Tag error in cmd " 3147 "0x%x reftag 0x%x blk cnt 0x%x " 3148 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3149 t10_pi_ref_tag(cmd->request), 3150 blk_rq_sectors(cmd->request), bgstat, bghm); 3151 } 3152 3153 if (lpfc_bgs_get_apptag_err(bgstat)) { 3154 ret = 1; 3155 3156 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); 3157 set_host_byte(cmd, DID_ABORT); 3158 3159 phba->bg_apptag_err_cnt++; 3160 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3161 "9061 BLKGRD: App Tag error in cmd " 3162 "0x%x reftag 0x%x blk cnt 0x%x " 3163 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3164 t10_pi_ref_tag(cmd->request), 3165 blk_rq_sectors(cmd->request), bgstat, bghm); 3166 } 3167 3168 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 3169 /* 3170 * setup sense data descriptor 0 per SPC-4 as an information 3171 * field, and put the failing LBA in it. 3172 * This code assumes there was also a guard/app/ref tag error 3173 * indication. 3174 */ 3175 cmd->sense_buffer[7] = 0xc; /* Additional sense length */ 3176 cmd->sense_buffer[8] = 0; /* Information descriptor type */ 3177 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */ 3178 cmd->sense_buffer[10] = 0x80; /* Validity bit */ 3179 3180 /* bghm is a "on the wire" FC frame based count */ 3181 switch (scsi_get_prot_op(cmd)) { 3182 case SCSI_PROT_READ_INSERT: 3183 case SCSI_PROT_WRITE_STRIP: 3184 bghm /= cmd->device->sector_size; 3185 break; 3186 case SCSI_PROT_READ_STRIP: 3187 case SCSI_PROT_WRITE_INSERT: 3188 case SCSI_PROT_READ_PASS: 3189 case SCSI_PROT_WRITE_PASS: 3190 bghm /= (cmd->device->sector_size + 3191 sizeof(struct scsi_dif_tuple)); 3192 break; 3193 } 3194 3195 failing_sector = scsi_get_lba(cmd); 3196 failing_sector += bghm; 3197 3198 /* Descriptor Information */ 3199 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]); 3200 } 3201 3202 if (!ret) { 3203 /* No error was reported - problem in FW? */ 3204 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3205 "9057 BLKGRD: Unknown error in cmd " 3206 "0x%x reftag 0x%x blk cnt 0x%x " 3207 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3208 t10_pi_ref_tag(cmd->request), 3209 blk_rq_sectors(cmd->request), bgstat, bghm); 3210 3211 /* Calcuate what type of error it was */ 3212 lpfc_calc_bg_err(phba, lpfc_cmd); 3213 } 3214 out: 3215 return ret; 3216 } 3217 3218 /** 3219 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec 3220 * @phba: The Hba for which this call is being executed. 3221 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3222 * 3223 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 3224 * field of @lpfc_cmd for device with SLI-4 interface spec. 3225 * 3226 * Return codes: 3227 * 2 - Error - Do not retry 3228 * 1 - Error - Retry 3229 * 0 - Success 3230 **/ 3231 static int 3232 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 3233 { 3234 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 3235 struct scatterlist *sgel = NULL; 3236 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 3237 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 3238 struct sli4_sge *first_data_sgl; 3239 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; 3240 struct lpfc_vport *vport = phba->pport; 3241 union lpfc_wqe128 *wqe = &pwqeq->wqe; 3242 dma_addr_t physaddr; 3243 uint32_t num_bde = 0; 3244 uint32_t dma_len; 3245 uint32_t dma_offset = 0; 3246 int nseg, i, j; 3247 struct ulp_bde64 *bde; 3248 bool lsp_just_set = false; 3249 struct sli4_hybrid_sgl *sgl_xtra = NULL; 3250 3251 /* 3252 * There are three possibilities here - use scatter-gather segment, use 3253 * the single mapping, or neither. Start the lpfc command prep by 3254 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 3255 * data bde entry. 3256 */ 3257 if (scsi_sg_count(scsi_cmnd)) { 3258 /* 3259 * The driver stores the segment count returned from pci_map_sg 3260 * because this a count of dma-mappings used to map the use_sg 3261 * pages. They are not guaranteed to be the same for those 3262 * architectures that implement an IOMMU. 3263 */ 3264 3265 nseg = scsi_dma_map(scsi_cmnd); 3266 if (unlikely(nseg <= 0)) 3267 return 1; 3268 sgl += 1; 3269 /* clear the last flag in the fcp_rsp map entry */ 3270 sgl->word2 = le32_to_cpu(sgl->word2); 3271 bf_set(lpfc_sli4_sge_last, sgl, 0); 3272 sgl->word2 = cpu_to_le32(sgl->word2); 3273 sgl += 1; 3274 first_data_sgl = sgl; 3275 lpfc_cmd->seg_cnt = nseg; 3276 if (!phba->cfg_xpsgl && 3277 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 3278 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3279 "9074 BLKGRD:" 3280 " %s: Too many sg segments from " 3281 "dma_map_sg. Config %d, seg_cnt %d\n", 3282 __func__, phba->cfg_sg_seg_cnt, 3283 lpfc_cmd->seg_cnt); 3284 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 3285 lpfc_cmd->seg_cnt = 0; 3286 scsi_dma_unmap(scsi_cmnd); 3287 return 2; 3288 } 3289 3290 /* 3291 * The driver established a maximum scatter-gather segment count 3292 * during probe that limits the number of sg elements in any 3293 * single scsi command. Just run through the seg_cnt and format 3294 * the sge's. 3295 * When using SLI-3 the driver will try to fit all the BDEs into 3296 * the IOCB. If it can't then the BDEs get added to a BPL as it 3297 * does for SLI-2 mode. 3298 */ 3299 3300 /* for tracking segment boundaries */ 3301 sgel = scsi_sglist(scsi_cmnd); 3302 j = 2; 3303 for (i = 0; i < nseg; i++) { 3304 sgl->word2 = 0; 3305 if ((num_bde + 1) == nseg) { 3306 bf_set(lpfc_sli4_sge_last, sgl, 1); 3307 bf_set(lpfc_sli4_sge_type, sgl, 3308 LPFC_SGE_TYPE_DATA); 3309 } else { 3310 bf_set(lpfc_sli4_sge_last, sgl, 0); 3311 3312 /* do we need to expand the segment */ 3313 if (!lsp_just_set && 3314 !((j + 1) % phba->border_sge_num) && 3315 ((nseg - 1) != i)) { 3316 /* set LSP type */ 3317 bf_set(lpfc_sli4_sge_type, sgl, 3318 LPFC_SGE_TYPE_LSP); 3319 3320 sgl_xtra = lpfc_get_sgl_per_hdwq( 3321 phba, lpfc_cmd); 3322 3323 if (unlikely(!sgl_xtra)) { 3324 lpfc_cmd->seg_cnt = 0; 3325 scsi_dma_unmap(scsi_cmnd); 3326 return 1; 3327 } 3328 sgl->addr_lo = cpu_to_le32(putPaddrLow( 3329 sgl_xtra->dma_phys_sgl)); 3330 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 3331 sgl_xtra->dma_phys_sgl)); 3332 3333 } else { 3334 bf_set(lpfc_sli4_sge_type, sgl, 3335 LPFC_SGE_TYPE_DATA); 3336 } 3337 } 3338 3339 if (!(bf_get(lpfc_sli4_sge_type, sgl) & 3340 LPFC_SGE_TYPE_LSP)) { 3341 if ((nseg - 1) == i) 3342 bf_set(lpfc_sli4_sge_last, sgl, 1); 3343 3344 physaddr = sg_dma_address(sgel); 3345 dma_len = sg_dma_len(sgel); 3346 sgl->addr_lo = cpu_to_le32(putPaddrLow( 3347 physaddr)); 3348 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 3349 physaddr)); 3350 3351 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 3352 sgl->word2 = cpu_to_le32(sgl->word2); 3353 sgl->sge_len = cpu_to_le32(dma_len); 3354 3355 dma_offset += dma_len; 3356 sgel = sg_next(sgel); 3357 3358 sgl++; 3359 lsp_just_set = false; 3360 3361 } else { 3362 sgl->word2 = cpu_to_le32(sgl->word2); 3363 sgl->sge_len = cpu_to_le32( 3364 phba->cfg_sg_dma_buf_size); 3365 3366 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 3367 i = i - 1; 3368 3369 lsp_just_set = true; 3370 } 3371 3372 j++; 3373 } 3374 /* 3375 * Setup the first Payload BDE. For FCoE we just key off 3376 * Performance Hints, for FC we use lpfc_enable_pbde. 3377 * We populate words 13-15 of IOCB/WQE. 3378 */ 3379 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || 3380 phba->cfg_enable_pbde) { 3381 bde = (struct ulp_bde64 *) 3382 &wqe->words[13]; 3383 bde->addrLow = first_data_sgl->addr_lo; 3384 bde->addrHigh = first_data_sgl->addr_hi; 3385 bde->tus.f.bdeSize = 3386 le32_to_cpu(first_data_sgl->sge_len); 3387 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 3388 bde->tus.w = cpu_to_le32(bde->tus.w); 3389 3390 } else { 3391 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); 3392 } 3393 } else { 3394 sgl += 1; 3395 /* clear the last flag in the fcp_rsp map entry */ 3396 sgl->word2 = le32_to_cpu(sgl->word2); 3397 bf_set(lpfc_sli4_sge_last, sgl, 1); 3398 sgl->word2 = cpu_to_le32(sgl->word2); 3399 3400 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || 3401 phba->cfg_enable_pbde) { 3402 bde = (struct ulp_bde64 *) 3403 &wqe->words[13]; 3404 memset(bde, 0, (sizeof(uint32_t) * 3)); 3405 } 3406 } 3407 3408 /* Word 11 */ 3409 if (phba->cfg_enable_pbde) 3410 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1); 3411 3412 /* 3413 * Finish initializing those IOCB fields that are dependent on the 3414 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is 3415 * explicitly reinitialized. 3416 * all iocb memory resources are reused. 3417 */ 3418 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 3419 /* Set first-burst provided it was successfully negotiated */ 3420 if (!(phba->hba_flag & HBA_FCOE_MODE) && 3421 vport->cfg_first_burst_size && 3422 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { 3423 u32 init_len, total_len; 3424 3425 total_len = be32_to_cpu(fcp_cmnd->fcpDl); 3426 init_len = min(total_len, vport->cfg_first_burst_size); 3427 3428 /* Word 4 & 5 */ 3429 wqe->fcp_iwrite.initial_xfer_len = init_len; 3430 wqe->fcp_iwrite.total_xfer_len = total_len; 3431 } else { 3432 /* Word 4 */ 3433 wqe->fcp_iwrite.total_xfer_len = 3434 be32_to_cpu(fcp_cmnd->fcpDl); 3435 } 3436 3437 /* 3438 * If the OAS driver feature is enabled and the lun is enabled for 3439 * OAS, set the oas iocb related flags. 3440 */ 3441 if ((phba->cfg_fof) && ((struct lpfc_device_data *) 3442 scsi_cmnd->device->hostdata)->oas_enabled) { 3443 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); 3444 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *) 3445 scsi_cmnd->device->hostdata)->priority; 3446 3447 /* Word 10 */ 3448 bf_set(wqe_oas, &wqe->generic.wqe_com, 1); 3449 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1); 3450 3451 if (lpfc_cmd->cur_iocbq.priority) 3452 bf_set(wqe_ccp, &wqe->generic.wqe_com, 3453 (lpfc_cmd->cur_iocbq.priority << 1)); 3454 else 3455 bf_set(wqe_ccp, &wqe->generic.wqe_com, 3456 (phba->cfg_XLanePriority << 1)); 3457 } 3458 3459 return 0; 3460 } 3461 3462 /** 3463 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec 3464 * @phba: The Hba for which this call is being executed. 3465 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3466 * 3467 * This is the protection/DIF aware version of 3468 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the 3469 * two functions eventually, but for now, it's here 3470 * Return codes: 3471 * 2 - Error - Do not retry 3472 * 1 - Error - Retry 3473 * 0 - Success 3474 **/ 3475 static int 3476 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, 3477 struct lpfc_io_buf *lpfc_cmd) 3478 { 3479 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 3480 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 3481 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl); 3482 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; 3483 union lpfc_wqe128 *wqe = &pwqeq->wqe; 3484 uint32_t num_sge = 0; 3485 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 3486 int prot_group_type = 0; 3487 int fcpdl; 3488 int ret = 1; 3489 struct lpfc_vport *vport = phba->pport; 3490 3491 /* 3492 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd 3493 * fcp_rsp regions to the first data sge entry 3494 */ 3495 if (scsi_sg_count(scsi_cmnd)) { 3496 /* 3497 * The driver stores the segment count returned from pci_map_sg 3498 * because this a count of dma-mappings used to map the use_sg 3499 * pages. They are not guaranteed to be the same for those 3500 * architectures that implement an IOMMU. 3501 */ 3502 datasegcnt = dma_map_sg(&phba->pcidev->dev, 3503 scsi_sglist(scsi_cmnd), 3504 scsi_sg_count(scsi_cmnd), datadir); 3505 if (unlikely(!datasegcnt)) 3506 return 1; 3507 3508 sgl += 1; 3509 /* clear the last flag in the fcp_rsp map entry */ 3510 sgl->word2 = le32_to_cpu(sgl->word2); 3511 bf_set(lpfc_sli4_sge_last, sgl, 0); 3512 sgl->word2 = cpu_to_le32(sgl->word2); 3513 3514 sgl += 1; 3515 lpfc_cmd->seg_cnt = datasegcnt; 3516 3517 /* First check if data segment count from SCSI Layer is good */ 3518 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt && 3519 !phba->cfg_xpsgl) { 3520 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 3521 ret = 2; 3522 goto err; 3523 } 3524 3525 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 3526 3527 switch (prot_group_type) { 3528 case LPFC_PG_TYPE_NO_DIF: 3529 /* Here we need to add a DISEED to the count */ 3530 if (((lpfc_cmd->seg_cnt + 1) > 3531 phba->cfg_total_seg_cnt) && 3532 !phba->cfg_xpsgl) { 3533 ret = 2; 3534 goto err; 3535 } 3536 3537 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, 3538 datasegcnt, lpfc_cmd); 3539 3540 /* we should have 2 or more entries in buffer list */ 3541 if (num_sge < 2) { 3542 ret = 2; 3543 goto err; 3544 } 3545 break; 3546 3547 case LPFC_PG_TYPE_DIF_BUF: 3548 /* 3549 * This type indicates that protection buffers are 3550 * passed to the driver, so that needs to be prepared 3551 * for DMA 3552 */ 3553 protsegcnt = dma_map_sg(&phba->pcidev->dev, 3554 scsi_prot_sglist(scsi_cmnd), 3555 scsi_prot_sg_count(scsi_cmnd), datadir); 3556 if (unlikely(!protsegcnt)) { 3557 scsi_dma_unmap(scsi_cmnd); 3558 return 1; 3559 } 3560 3561 lpfc_cmd->prot_seg_cnt = protsegcnt; 3562 /* 3563 * There is a minimun of 3 SGEs used for every 3564 * protection data segment. 3565 */ 3566 if (((lpfc_cmd->prot_seg_cnt * 3) > 3567 (phba->cfg_total_seg_cnt - 2)) && 3568 !phba->cfg_xpsgl) { 3569 ret = 2; 3570 goto err; 3571 } 3572 3573 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, 3574 datasegcnt, protsegcnt, lpfc_cmd); 3575 3576 /* we should have 3 or more entries in buffer list */ 3577 if (num_sge < 3 || 3578 (num_sge > phba->cfg_total_seg_cnt && 3579 !phba->cfg_xpsgl)) { 3580 ret = 2; 3581 goto err; 3582 } 3583 break; 3584 3585 case LPFC_PG_TYPE_INVALID: 3586 default: 3587 scsi_dma_unmap(scsi_cmnd); 3588 lpfc_cmd->seg_cnt = 0; 3589 3590 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3591 "9083 Unexpected protection group %i\n", 3592 prot_group_type); 3593 return 2; 3594 } 3595 } 3596 3597 switch (scsi_get_prot_op(scsi_cmnd)) { 3598 case SCSI_PROT_WRITE_STRIP: 3599 case SCSI_PROT_READ_STRIP: 3600 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP; 3601 break; 3602 case SCSI_PROT_WRITE_INSERT: 3603 case SCSI_PROT_READ_INSERT: 3604 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT; 3605 break; 3606 case SCSI_PROT_WRITE_PASS: 3607 case SCSI_PROT_READ_PASS: 3608 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS; 3609 break; 3610 } 3611 3612 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); 3613 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 3614 3615 /* Set first-burst provided it was successfully negotiated */ 3616 if (!(phba->hba_flag & HBA_FCOE_MODE) && 3617 vport->cfg_first_burst_size && 3618 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { 3619 u32 init_len, total_len; 3620 3621 total_len = be32_to_cpu(fcp_cmnd->fcpDl); 3622 init_len = min(total_len, vport->cfg_first_burst_size); 3623 3624 /* Word 4 & 5 */ 3625 wqe->fcp_iwrite.initial_xfer_len = init_len; 3626 wqe->fcp_iwrite.total_xfer_len = total_len; 3627 } else { 3628 /* Word 4 */ 3629 wqe->fcp_iwrite.total_xfer_len = 3630 be32_to_cpu(fcp_cmnd->fcpDl); 3631 } 3632 3633 /* 3634 * If the OAS driver feature is enabled and the lun is enabled for 3635 * OAS, set the oas iocb related flags. 3636 */ 3637 if ((phba->cfg_fof) && ((struct lpfc_device_data *) 3638 scsi_cmnd->device->hostdata)->oas_enabled) { 3639 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); 3640 3641 /* Word 10 */ 3642 bf_set(wqe_oas, &wqe->generic.wqe_com, 1); 3643 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1); 3644 bf_set(wqe_ccp, &wqe->generic.wqe_com, 3645 (phba->cfg_XLanePriority << 1)); 3646 } 3647 3648 /* Word 7. DIF Flags */ 3649 if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_PASS) 3650 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); 3651 else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_STRIP) 3652 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); 3653 else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_INSERT) 3654 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); 3655 3656 lpfc_cmd->cur_iocbq.iocb_flag &= ~(LPFC_IO_DIF_PASS | 3657 LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT); 3658 3659 return 0; 3660 err: 3661 if (lpfc_cmd->seg_cnt) 3662 scsi_dma_unmap(scsi_cmnd); 3663 if (lpfc_cmd->prot_seg_cnt) 3664 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), 3665 scsi_prot_sg_count(scsi_cmnd), 3666 scsi_cmnd->sc_data_direction); 3667 3668 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3669 "9084 Cannot setup S/G List for HBA" 3670 "IO segs %d/%d SGL %d SCSI %d: %d %d\n", 3671 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, 3672 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, 3673 prot_group_type, num_sge); 3674 3675 lpfc_cmd->seg_cnt = 0; 3676 lpfc_cmd->prot_seg_cnt = 0; 3677 return ret; 3678 } 3679 3680 /** 3681 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer 3682 * @phba: The Hba for which this call is being executed. 3683 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3684 * 3685 * This routine wraps the actual DMA mapping function pointer from the 3686 * lpfc_hba struct. 3687 * 3688 * Return codes: 3689 * 1 - Error 3690 * 0 - Success 3691 **/ 3692 static inline int 3693 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 3694 { 3695 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 3696 } 3697 3698 /** 3699 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer 3700 * using BlockGuard. 3701 * @phba: The Hba for which this call is being executed. 3702 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3703 * 3704 * This routine wraps the actual DMA mapping function pointer from the 3705 * lpfc_hba struct. 3706 * 3707 * Return codes: 3708 * 1 - Error 3709 * 0 - Success 3710 **/ 3711 static inline int 3712 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 3713 { 3714 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 3715 } 3716 3717 /** 3718 * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi 3719 * buffer 3720 * @vport: Pointer to vport object. 3721 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3722 * @tmo: Timeout value for IO 3723 * 3724 * This routine initializes IOCB/WQE data structure from scsi command 3725 * 3726 * Return codes: 3727 * 1 - Error 3728 * 0 - Success 3729 **/ 3730 static inline int 3731 lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, 3732 uint8_t tmo) 3733 { 3734 return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo); 3735 } 3736 3737 /** 3738 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error 3739 * @phba: Pointer to hba context object. 3740 * @vport: Pointer to vport object. 3741 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error. 3742 * @fcpi_parm: FCP Initiator parameter. 3743 * 3744 * This function posts an event when there is a SCSI command reporting 3745 * error from the scsi device. 3746 **/ 3747 static void 3748 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, 3749 struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) { 3750 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 3751 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 3752 uint32_t resp_info = fcprsp->rspStatus2; 3753 uint32_t scsi_status = fcprsp->rspStatus3; 3754 struct lpfc_fast_path_event *fast_path_evt = NULL; 3755 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; 3756 unsigned long flags; 3757 3758 if (!pnode) 3759 return; 3760 3761 /* If there is queuefull or busy condition send a scsi event */ 3762 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || 3763 (cmnd->result == SAM_STAT_BUSY)) { 3764 fast_path_evt = lpfc_alloc_fast_evt(phba); 3765 if (!fast_path_evt) 3766 return; 3767 fast_path_evt->un.scsi_evt.event_type = 3768 FC_REG_SCSI_EVENT; 3769 fast_path_evt->un.scsi_evt.subcategory = 3770 (cmnd->result == SAM_STAT_TASK_SET_FULL) ? 3771 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY; 3772 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun; 3773 memcpy(&fast_path_evt->un.scsi_evt.wwpn, 3774 &pnode->nlp_portname, sizeof(struct lpfc_name)); 3775 memcpy(&fast_path_evt->un.scsi_evt.wwnn, 3776 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3777 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen && 3778 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) { 3779 fast_path_evt = lpfc_alloc_fast_evt(phba); 3780 if (!fast_path_evt) 3781 return; 3782 fast_path_evt->un.check_cond_evt.scsi_event.event_type = 3783 FC_REG_SCSI_EVENT; 3784 fast_path_evt->un.check_cond_evt.scsi_event.subcategory = 3785 LPFC_EVENT_CHECK_COND; 3786 fast_path_evt->un.check_cond_evt.scsi_event.lun = 3787 cmnd->device->lun; 3788 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn, 3789 &pnode->nlp_portname, sizeof(struct lpfc_name)); 3790 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn, 3791 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3792 fast_path_evt->un.check_cond_evt.sense_key = 3793 cmnd->sense_buffer[2] & 0xf; 3794 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12]; 3795 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13]; 3796 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 3797 fcpi_parm && 3798 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) || 3799 ((scsi_status == SAM_STAT_GOOD) && 3800 !(resp_info & (RESID_UNDER | RESID_OVER))))) { 3801 /* 3802 * If status is good or resid does not match with fcp_param and 3803 * there is valid fcpi_parm, then there is a read_check error 3804 */ 3805 fast_path_evt = lpfc_alloc_fast_evt(phba); 3806 if (!fast_path_evt) 3807 return; 3808 fast_path_evt->un.read_check_error.header.event_type = 3809 FC_REG_FABRIC_EVENT; 3810 fast_path_evt->un.read_check_error.header.subcategory = 3811 LPFC_EVENT_FCPRDCHKERR; 3812 memcpy(&fast_path_evt->un.read_check_error.header.wwpn, 3813 &pnode->nlp_portname, sizeof(struct lpfc_name)); 3814 memcpy(&fast_path_evt->un.read_check_error.header.wwnn, 3815 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3816 fast_path_evt->un.read_check_error.lun = cmnd->device->lun; 3817 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0]; 3818 fast_path_evt->un.read_check_error.fcpiparam = 3819 fcpi_parm; 3820 } else 3821 return; 3822 3823 fast_path_evt->vport = vport; 3824 spin_lock_irqsave(&phba->hbalock, flags); 3825 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); 3826 spin_unlock_irqrestore(&phba->hbalock, flags); 3827 lpfc_worker_wake_up(phba); 3828 return; 3829 } 3830 3831 /** 3832 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev 3833 * @phba: The HBA for which this call is being executed. 3834 * @psb: The scsi buffer which is going to be un-mapped. 3835 * 3836 * This routine does DMA un-mapping of scatter gather list of scsi command 3837 * field of @lpfc_cmd for device with SLI-3 interface spec. 3838 **/ 3839 static void 3840 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 3841 { 3842 /* 3843 * There are only two special cases to consider. (1) the scsi command 3844 * requested scatter-gather usage or (2) the scsi command allocated 3845 * a request buffer, but did not request use_sg. There is a third 3846 * case, but it does not require resource deallocation. 3847 */ 3848 if (psb->seg_cnt > 0) 3849 scsi_dma_unmap(psb->pCmd); 3850 if (psb->prot_seg_cnt > 0) 3851 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd), 3852 scsi_prot_sg_count(psb->pCmd), 3853 psb->pCmd->sc_data_direction); 3854 } 3855 3856 /** 3857 * lpfc_handle_fcp_err - FCP response handler 3858 * @vport: The virtual port for which this call is being executed. 3859 * @lpfc_cmd: Pointer to lpfc_io_buf data structure. 3860 * @fcpi_parm: FCP Initiator parameter. 3861 * 3862 * This routine is called to process response IOCB with status field 3863 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command 3864 * based upon SCSI and FCP error. 3865 **/ 3866 static void 3867 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, 3868 uint32_t fcpi_parm) 3869 { 3870 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 3871 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 3872 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 3873 uint32_t resp_info = fcprsp->rspStatus2; 3874 uint32_t scsi_status = fcprsp->rspStatus3; 3875 uint32_t *lp; 3876 uint32_t host_status = DID_OK; 3877 uint32_t rsplen = 0; 3878 uint32_t fcpDl; 3879 uint32_t logit = LOG_FCP | LOG_FCP_ERROR; 3880 3881 3882 /* 3883 * If this is a task management command, there is no 3884 * scsi packet associated with this lpfc_cmd. The driver 3885 * consumes it. 3886 */ 3887 if (fcpcmd->fcpCntl2) { 3888 scsi_status = 0; 3889 goto out; 3890 } 3891 3892 if (resp_info & RSP_LEN_VALID) { 3893 rsplen = be32_to_cpu(fcprsp->rspRspLen); 3894 if (rsplen != 0 && rsplen != 4 && rsplen != 8) { 3895 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3896 "2719 Invalid response length: " 3897 "tgt x%x lun x%llx cmnd x%x rsplen " 3898 "x%x\n", cmnd->device->id, 3899 cmnd->device->lun, cmnd->cmnd[0], 3900 rsplen); 3901 host_status = DID_ERROR; 3902 goto out; 3903 } 3904 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) { 3905 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3906 "2757 Protocol failure detected during " 3907 "processing of FCP I/O op: " 3908 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n", 3909 cmnd->device->id, 3910 cmnd->device->lun, cmnd->cmnd[0], 3911 fcprsp->rspInfo3); 3912 host_status = DID_ERROR; 3913 goto out; 3914 } 3915 } 3916 3917 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 3918 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); 3919 if (snslen > SCSI_SENSE_BUFFERSIZE) 3920 snslen = SCSI_SENSE_BUFFERSIZE; 3921 3922 if (resp_info & RSP_LEN_VALID) 3923 rsplen = be32_to_cpu(fcprsp->rspRspLen); 3924 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); 3925 } 3926 lp = (uint32_t *)cmnd->sense_buffer; 3927 3928 /* special handling for under run conditions */ 3929 if (!scsi_status && (resp_info & RESID_UNDER)) { 3930 /* don't log under runs if fcp set... */ 3931 if (vport->cfg_log_verbose & LOG_FCP) 3932 logit = LOG_FCP_ERROR; 3933 /* unless operator says so */ 3934 if (vport->cfg_log_verbose & LOG_FCP_UNDER) 3935 logit = LOG_FCP_UNDER; 3936 } 3937 3938 lpfc_printf_vlog(vport, KERN_WARNING, logit, 3939 "9024 FCP command x%x failed: x%x SNS x%x x%x " 3940 "Data: x%x x%x x%x x%x x%x\n", 3941 cmnd->cmnd[0], scsi_status, 3942 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 3943 be32_to_cpu(fcprsp->rspResId), 3944 be32_to_cpu(fcprsp->rspSnsLen), 3945 be32_to_cpu(fcprsp->rspRspLen), 3946 fcprsp->rspInfo3); 3947 3948 scsi_set_resid(cmnd, 0); 3949 fcpDl = be32_to_cpu(fcpcmd->fcpDl); 3950 if (resp_info & RESID_UNDER) { 3951 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); 3952 3953 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER, 3954 "9025 FCP Underrun, expected %d, " 3955 "residual %d Data: x%x x%x x%x\n", 3956 fcpDl, 3957 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], 3958 cmnd->underflow); 3959 3960 /* 3961 * If there is an under run, check if under run reported by 3962 * storage array is same as the under run reported by HBA. 3963 * If this is not same, there is a dropped frame. 3964 */ 3965 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) { 3966 lpfc_printf_vlog(vport, KERN_WARNING, 3967 LOG_FCP | LOG_FCP_ERROR, 3968 "9026 FCP Read Check Error " 3969 "and Underrun Data: x%x x%x x%x x%x\n", 3970 fcpDl, 3971 scsi_get_resid(cmnd), fcpi_parm, 3972 cmnd->cmnd[0]); 3973 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 3974 host_status = DID_ERROR; 3975 } 3976 /* 3977 * The cmnd->underflow is the minimum number of bytes that must 3978 * be transferred for this command. Provided a sense condition 3979 * is not present, make sure the actual amount transferred is at 3980 * least the underflow value or fail. 3981 */ 3982 if (!(resp_info & SNS_LEN_VALID) && 3983 (scsi_status == SAM_STAT_GOOD) && 3984 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) 3985 < cmnd->underflow)) { 3986 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 3987 "9027 FCP command x%x residual " 3988 "underrun converted to error " 3989 "Data: x%x x%x x%x\n", 3990 cmnd->cmnd[0], scsi_bufflen(cmnd), 3991 scsi_get_resid(cmnd), cmnd->underflow); 3992 host_status = DID_ERROR; 3993 } 3994 } else if (resp_info & RESID_OVER) { 3995 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3996 "9028 FCP command x%x residual overrun error. " 3997 "Data: x%x x%x\n", cmnd->cmnd[0], 3998 scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 3999 host_status = DID_ERROR; 4000 4001 /* 4002 * Check SLI validation that all the transfer was actually done 4003 * (fcpi_parm should be zero). Apply check only to reads. 4004 */ 4005 } else if (fcpi_parm) { 4006 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 4007 "9029 FCP %s Check Error Data: " 4008 "x%x x%x x%x x%x x%x\n", 4009 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ? 4010 "Read" : "Write"), 4011 fcpDl, be32_to_cpu(fcprsp->rspResId), 4012 fcpi_parm, cmnd->cmnd[0], scsi_status); 4013 4014 /* There is some issue with the LPe12000 that causes it 4015 * to miscalculate the fcpi_parm and falsely trip this 4016 * recovery logic. Detect this case and don't error when true. 4017 */ 4018 if (fcpi_parm > fcpDl) 4019 goto out; 4020 4021 switch (scsi_status) { 4022 case SAM_STAT_GOOD: 4023 case SAM_STAT_CHECK_CONDITION: 4024 /* Fabric dropped a data frame. Fail any successful 4025 * command in which we detected dropped frames. 4026 * A status of good or some check conditions could 4027 * be considered a successful command. 4028 */ 4029 host_status = DID_ERROR; 4030 break; 4031 } 4032 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 4033 } 4034 4035 out: 4036 cmnd->result = host_status << 16 | scsi_status; 4037 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm); 4038 } 4039 4040 /** 4041 * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO 4042 * @phba: The hba for which this call is being executed. 4043 * @pwqeIn: The command WQE for the scsi cmnd. 4044 * @wcqe: Pointer to driver response CQE object. 4045 * 4046 * This routine assigns scsi command result by looking into response WQE 4047 * status field appropriately. This routine handles QUEUE FULL condition as 4048 * well by ramping down device queue depth. 4049 **/ 4050 static void 4051 lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, 4052 struct lpfc_wcqe_complete *wcqe) 4053 { 4054 struct lpfc_io_buf *lpfc_cmd = 4055 (struct lpfc_io_buf *)pwqeIn->context1; 4056 struct lpfc_vport *vport = pwqeIn->vport; 4057 struct lpfc_rport_data *rdata; 4058 struct lpfc_nodelist *ndlp; 4059 struct scsi_cmnd *cmd; 4060 unsigned long flags; 4061 struct lpfc_fast_path_event *fast_path_evt; 4062 struct Scsi_Host *shost; 4063 u32 logit = LOG_FCP; 4064 u32 status, idx; 4065 unsigned long iflags = 0; 4066 u8 wait_xb_clr = 0; 4067 4068 /* Sanity check on return of outstanding command */ 4069 if (!lpfc_cmd) { 4070 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4071 "9032 Null lpfc_cmd pointer. No " 4072 "release, skip completion\n"); 4073 return; 4074 } 4075 4076 rdata = lpfc_cmd->rdata; 4077 ndlp = rdata->pnode; 4078 4079 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 4080 /* TOREMOVE - currently this flag is checked during 4081 * the release of lpfc_iocbq. Remove once we move 4082 * to lpfc_wqe_job construct. 4083 * 4084 * This needs to be done outside buf_lock 4085 */ 4086 spin_lock_irqsave(&phba->hbalock, iflags); 4087 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_EXCHANGE_BUSY; 4088 spin_unlock_irqrestore(&phba->hbalock, iflags); 4089 } 4090 4091 /* Guard against abort handler being called at same time */ 4092 spin_lock(&lpfc_cmd->buf_lock); 4093 4094 /* Sanity check on return of outstanding command */ 4095 cmd = lpfc_cmd->pCmd; 4096 if (!cmd) { 4097 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4098 "9042 I/O completion: Not an active IO\n"); 4099 spin_unlock(&lpfc_cmd->buf_lock); 4100 lpfc_release_scsi_buf(phba, lpfc_cmd); 4101 return; 4102 } 4103 idx = lpfc_cmd->cur_iocbq.hba_wqidx; 4104 if (phba->sli4_hba.hdwq) 4105 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; 4106 4107 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4108 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) 4109 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); 4110 #endif 4111 shost = cmd->device->host; 4112 4113 status = bf_get(lpfc_wcqe_c_status, wcqe); 4114 lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK); 4115 lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK); 4116 4117 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; 4118 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 4119 lpfc_cmd->flags |= LPFC_SBUF_XBUSY; 4120 if (phba->cfg_fcp_wait_abts_rsp) 4121 wait_xb_clr = 1; 4122 } 4123 4124 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4125 if (lpfc_cmd->prot_data_type) { 4126 struct scsi_dif_tuple *src = NULL; 4127 4128 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; 4129 /* 4130 * Used to restore any changes to protection 4131 * data for error injection. 4132 */ 4133 switch (lpfc_cmd->prot_data_type) { 4134 case LPFC_INJERR_REFTAG: 4135 src->ref_tag = 4136 lpfc_cmd->prot_data; 4137 break; 4138 case LPFC_INJERR_APPTAG: 4139 src->app_tag = 4140 (uint16_t)lpfc_cmd->prot_data; 4141 break; 4142 case LPFC_INJERR_GUARD: 4143 src->guard_tag = 4144 (uint16_t)lpfc_cmd->prot_data; 4145 break; 4146 default: 4147 break; 4148 } 4149 4150 lpfc_cmd->prot_data = 0; 4151 lpfc_cmd->prot_data_type = 0; 4152 lpfc_cmd->prot_data_segment = NULL; 4153 } 4154 #endif 4155 if (unlikely(lpfc_cmd->status)) { 4156 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 4157 (lpfc_cmd->result & IOERR_DRVR_MASK)) 4158 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 4159 else if (lpfc_cmd->status >= IOSTAT_CNT) 4160 lpfc_cmd->status = IOSTAT_DEFAULT; 4161 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && 4162 !lpfc_cmd->fcp_rsp->rspStatus3 && 4163 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && 4164 !(vport->cfg_log_verbose & LOG_FCP_UNDER)) 4165 logit = 0; 4166 else 4167 logit = LOG_FCP | LOG_FCP_UNDER; 4168 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4169 "9034 FCP cmd x%x failed <%d/%lld> " 4170 "status: x%x result: x%x " 4171 "sid: x%x did: x%x oxid: x%x " 4172 "Data: x%x x%x x%x\n", 4173 cmd->cmnd[0], 4174 cmd->device ? cmd->device->id : 0xffff, 4175 cmd->device ? cmd->device->lun : 0xffff, 4176 lpfc_cmd->status, lpfc_cmd->result, 4177 vport->fc_myDID, 4178 (ndlp) ? ndlp->nlp_DID : 0, 4179 lpfc_cmd->cur_iocbq.sli4_xritag, 4180 wcqe->parameter, wcqe->total_data_placed, 4181 lpfc_cmd->cur_iocbq.iotag); 4182 } 4183 4184 switch (lpfc_cmd->status) { 4185 case IOSTAT_SUCCESS: 4186 cmd->result = DID_OK << 16; 4187 break; 4188 case IOSTAT_FCP_RSP_ERROR: 4189 lpfc_handle_fcp_err(vport, lpfc_cmd, 4190 pwqeIn->wqe.fcp_iread.total_xfer_len - 4191 wcqe->total_data_placed); 4192 break; 4193 case IOSTAT_NPORT_BSY: 4194 case IOSTAT_FABRIC_BSY: 4195 cmd->result = DID_TRANSPORT_DISRUPTED << 16; 4196 fast_path_evt = lpfc_alloc_fast_evt(phba); 4197 if (!fast_path_evt) 4198 break; 4199 fast_path_evt->un.fabric_evt.event_type = 4200 FC_REG_FABRIC_EVENT; 4201 fast_path_evt->un.fabric_evt.subcategory = 4202 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? 4203 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; 4204 if (ndlp) { 4205 memcpy(&fast_path_evt->un.fabric_evt.wwpn, 4206 &ndlp->nlp_portname, 4207 sizeof(struct lpfc_name)); 4208 memcpy(&fast_path_evt->un.fabric_evt.wwnn, 4209 &ndlp->nlp_nodename, 4210 sizeof(struct lpfc_name)); 4211 } 4212 fast_path_evt->vport = vport; 4213 fast_path_evt->work_evt.evt = 4214 LPFC_EVT_FASTPATH_MGMT_EVT; 4215 spin_lock_irqsave(&phba->hbalock, flags); 4216 list_add_tail(&fast_path_evt->work_evt.evt_listp, 4217 &phba->work_list); 4218 spin_unlock_irqrestore(&phba->hbalock, flags); 4219 lpfc_worker_wake_up(phba); 4220 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4221 "9035 Fabric/Node busy FCP cmd x%x failed" 4222 " <%d/%lld> " 4223 "status: x%x result: x%x " 4224 "sid: x%x did: x%x oxid: x%x " 4225 "Data: x%x x%x x%x\n", 4226 cmd->cmnd[0], 4227 cmd->device ? cmd->device->id : 0xffff, 4228 cmd->device ? cmd->device->lun : 0xffff, 4229 lpfc_cmd->status, lpfc_cmd->result, 4230 vport->fc_myDID, 4231 (ndlp) ? ndlp->nlp_DID : 0, 4232 lpfc_cmd->cur_iocbq.sli4_xritag, 4233 wcqe->parameter, 4234 wcqe->total_data_placed, 4235 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 4236 break; 4237 case IOSTAT_REMOTE_STOP: 4238 if (ndlp) { 4239 /* This I/O was aborted by the target, we don't 4240 * know the rxid and because we did not send the 4241 * ABTS we cannot generate and RRQ. 4242 */ 4243 lpfc_set_rrq_active(phba, ndlp, 4244 lpfc_cmd->cur_iocbq.sli4_lxritag, 4245 0, 0); 4246 } 4247 fallthrough; 4248 case IOSTAT_LOCAL_REJECT: 4249 if (lpfc_cmd->result & IOERR_DRVR_MASK) 4250 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 4251 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || 4252 lpfc_cmd->result == 4253 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || 4254 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || 4255 lpfc_cmd->result == 4256 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { 4257 cmd->result = DID_NO_CONNECT << 16; 4258 break; 4259 } 4260 if (lpfc_cmd->result == IOERR_INVALID_RPI || 4261 lpfc_cmd->result == IOERR_NO_RESOURCES || 4262 lpfc_cmd->result == IOERR_ABORT_REQUESTED || 4263 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { 4264 cmd->result = DID_REQUEUE << 16; 4265 break; 4266 } 4267 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || 4268 lpfc_cmd->result == IOERR_TX_DMA_FAILED) && 4269 status == CQE_STATUS_DI_ERROR) { 4270 if (scsi_get_prot_op(cmd) != 4271 SCSI_PROT_NORMAL) { 4272 /* 4273 * This is a response for a BG enabled 4274 * cmd. Parse BG error 4275 */ 4276 lpfc_sli4_parse_bg_err(phba, lpfc_cmd, 4277 wcqe); 4278 break; 4279 } 4280 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 4281 "9040 non-zero BGSTAT on unprotected cmd\n"); 4282 } 4283 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4284 "9036 Local Reject FCP cmd x%x failed" 4285 " <%d/%lld> " 4286 "status: x%x result: x%x " 4287 "sid: x%x did: x%x oxid: x%x " 4288 "Data: x%x x%x x%x\n", 4289 cmd->cmnd[0], 4290 cmd->device ? cmd->device->id : 0xffff, 4291 cmd->device ? cmd->device->lun : 0xffff, 4292 lpfc_cmd->status, lpfc_cmd->result, 4293 vport->fc_myDID, 4294 (ndlp) ? ndlp->nlp_DID : 0, 4295 lpfc_cmd->cur_iocbq.sli4_xritag, 4296 wcqe->parameter, 4297 wcqe->total_data_placed, 4298 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 4299 fallthrough; 4300 default: 4301 if (lpfc_cmd->status >= IOSTAT_CNT) 4302 lpfc_cmd->status = IOSTAT_DEFAULT; 4303 cmd->result = DID_ERROR << 16; 4304 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 4305 "9037 FCP Completion Error: xri %x " 4306 "status x%x result x%x [x%x] " 4307 "placed x%x\n", 4308 lpfc_cmd->cur_iocbq.sli4_xritag, 4309 lpfc_cmd->status, lpfc_cmd->result, 4310 wcqe->parameter, 4311 wcqe->total_data_placed); 4312 } 4313 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 4314 u32 *lp = (u32 *)cmd->sense_buffer; 4315 4316 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4317 "9039 Iodone <%d/%llu> cmd x%px, error " 4318 "x%x SNS x%x x%x Data: x%x x%x\n", 4319 cmd->device->id, cmd->device->lun, cmd, 4320 cmd->result, *lp, *(lp + 3), cmd->retries, 4321 scsi_get_resid(cmd)); 4322 } 4323 4324 lpfc_update_stats(vport, lpfc_cmd); 4325 4326 if (vport->cfg_max_scsicmpl_time && 4327 time_after(jiffies, lpfc_cmd->start_time + 4328 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { 4329 spin_lock_irqsave(shost->host_lock, flags); 4330 if (ndlp) { 4331 if (ndlp->cmd_qdepth > 4332 atomic_read(&ndlp->cmd_pending) && 4333 (atomic_read(&ndlp->cmd_pending) > 4334 LPFC_MIN_TGT_QDEPTH) && 4335 (cmd->cmnd[0] == READ_10 || 4336 cmd->cmnd[0] == WRITE_10)) 4337 ndlp->cmd_qdepth = 4338 atomic_read(&ndlp->cmd_pending); 4339 4340 ndlp->last_change_time = jiffies; 4341 } 4342 spin_unlock_irqrestore(shost->host_lock, flags); 4343 } 4344 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 4345 4346 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4347 if (lpfc_cmd->ts_cmd_start) { 4348 lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp; 4349 lpfc_cmd->ts_data_io = ktime_get_ns(); 4350 phba->ktime_last_cmd = lpfc_cmd->ts_data_io; 4351 lpfc_io_ktime(phba, lpfc_cmd); 4352 } 4353 #endif 4354 if (wait_xb_clr) 4355 goto out; 4356 lpfc_cmd->pCmd = NULL; 4357 spin_unlock(&lpfc_cmd->buf_lock); 4358 4359 /* The sdev is not guaranteed to be valid post scsi_done upcall. */ 4360 cmd->scsi_done(cmd); 4361 4362 /* 4363 * If there is an abort thread waiting for command completion 4364 * wake up the thread. 4365 */ 4366 spin_lock(&lpfc_cmd->buf_lock); 4367 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED; 4368 if (lpfc_cmd->waitq) 4369 wake_up(lpfc_cmd->waitq); 4370 out: 4371 spin_unlock(&lpfc_cmd->buf_lock); 4372 lpfc_release_scsi_buf(phba, lpfc_cmd); 4373 } 4374 4375 /** 4376 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine 4377 * @phba: The Hba for which this call is being executed. 4378 * @pIocbIn: The command IOCBQ for the scsi cmnd. 4379 * @pIocbOut: The response IOCBQ for the scsi cmnd. 4380 * 4381 * This routine assigns scsi command result by looking into response IOCB 4382 * status field appropriately. This routine handles QUEUE FULL condition as 4383 * well by ramping down device queue depth. 4384 **/ 4385 static void 4386 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 4387 struct lpfc_iocbq *pIocbOut) 4388 { 4389 struct lpfc_io_buf *lpfc_cmd = 4390 (struct lpfc_io_buf *) pIocbIn->context1; 4391 struct lpfc_vport *vport = pIocbIn->vport; 4392 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 4393 struct lpfc_nodelist *pnode = rdata->pnode; 4394 struct scsi_cmnd *cmd; 4395 unsigned long flags; 4396 struct lpfc_fast_path_event *fast_path_evt; 4397 struct Scsi_Host *shost; 4398 int idx; 4399 uint32_t logit = LOG_FCP; 4400 4401 /* Guard against abort handler being called at same time */ 4402 spin_lock(&lpfc_cmd->buf_lock); 4403 4404 /* Sanity check on return of outstanding command */ 4405 cmd = lpfc_cmd->pCmd; 4406 if (!cmd || !phba) { 4407 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4408 "2621 IO completion: Not an active IO\n"); 4409 spin_unlock(&lpfc_cmd->buf_lock); 4410 return; 4411 } 4412 4413 idx = lpfc_cmd->cur_iocbq.hba_wqidx; 4414 if (phba->sli4_hba.hdwq) 4415 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; 4416 4417 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4418 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) 4419 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); 4420 #endif 4421 shost = cmd->device->host; 4422 4423 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK); 4424 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 4425 /* pick up SLI4 exchange busy status from HBA */ 4426 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; 4427 if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY) 4428 lpfc_cmd->flags |= LPFC_SBUF_XBUSY; 4429 4430 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4431 if (lpfc_cmd->prot_data_type) { 4432 struct scsi_dif_tuple *src = NULL; 4433 4434 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; 4435 /* 4436 * Used to restore any changes to protection 4437 * data for error injection. 4438 */ 4439 switch (lpfc_cmd->prot_data_type) { 4440 case LPFC_INJERR_REFTAG: 4441 src->ref_tag = 4442 lpfc_cmd->prot_data; 4443 break; 4444 case LPFC_INJERR_APPTAG: 4445 src->app_tag = 4446 (uint16_t)lpfc_cmd->prot_data; 4447 break; 4448 case LPFC_INJERR_GUARD: 4449 src->guard_tag = 4450 (uint16_t)lpfc_cmd->prot_data; 4451 break; 4452 default: 4453 break; 4454 } 4455 4456 lpfc_cmd->prot_data = 0; 4457 lpfc_cmd->prot_data_type = 0; 4458 lpfc_cmd->prot_data_segment = NULL; 4459 } 4460 #endif 4461 4462 if (unlikely(lpfc_cmd->status)) { 4463 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 4464 (lpfc_cmd->result & IOERR_DRVR_MASK)) 4465 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 4466 else if (lpfc_cmd->status >= IOSTAT_CNT) 4467 lpfc_cmd->status = IOSTAT_DEFAULT; 4468 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && 4469 !lpfc_cmd->fcp_rsp->rspStatus3 && 4470 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && 4471 !(vport->cfg_log_verbose & LOG_FCP_UNDER)) 4472 logit = 0; 4473 else 4474 logit = LOG_FCP | LOG_FCP_UNDER; 4475 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4476 "9030 FCP cmd x%x failed <%d/%lld> " 4477 "status: x%x result: x%x " 4478 "sid: x%x did: x%x oxid: x%x " 4479 "Data: x%x x%x\n", 4480 cmd->cmnd[0], 4481 cmd->device ? cmd->device->id : 0xffff, 4482 cmd->device ? cmd->device->lun : 0xffff, 4483 lpfc_cmd->status, lpfc_cmd->result, 4484 vport->fc_myDID, 4485 (pnode) ? pnode->nlp_DID : 0, 4486 phba->sli_rev == LPFC_SLI_REV4 ? 4487 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, 4488 pIocbOut->iocb.ulpContext, 4489 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 4490 4491 switch (lpfc_cmd->status) { 4492 case IOSTAT_FCP_RSP_ERROR: 4493 /* Call FCP RSP handler to determine result */ 4494 lpfc_handle_fcp_err(vport, lpfc_cmd, 4495 pIocbOut->iocb.un.fcpi.fcpi_parm); 4496 break; 4497 case IOSTAT_NPORT_BSY: 4498 case IOSTAT_FABRIC_BSY: 4499 cmd->result = DID_TRANSPORT_DISRUPTED << 16; 4500 fast_path_evt = lpfc_alloc_fast_evt(phba); 4501 if (!fast_path_evt) 4502 break; 4503 fast_path_evt->un.fabric_evt.event_type = 4504 FC_REG_FABRIC_EVENT; 4505 fast_path_evt->un.fabric_evt.subcategory = 4506 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? 4507 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; 4508 if (pnode) { 4509 memcpy(&fast_path_evt->un.fabric_evt.wwpn, 4510 &pnode->nlp_portname, 4511 sizeof(struct lpfc_name)); 4512 memcpy(&fast_path_evt->un.fabric_evt.wwnn, 4513 &pnode->nlp_nodename, 4514 sizeof(struct lpfc_name)); 4515 } 4516 fast_path_evt->vport = vport; 4517 fast_path_evt->work_evt.evt = 4518 LPFC_EVT_FASTPATH_MGMT_EVT; 4519 spin_lock_irqsave(&phba->hbalock, flags); 4520 list_add_tail(&fast_path_evt->work_evt.evt_listp, 4521 &phba->work_list); 4522 spin_unlock_irqrestore(&phba->hbalock, flags); 4523 lpfc_worker_wake_up(phba); 4524 break; 4525 case IOSTAT_LOCAL_REJECT: 4526 case IOSTAT_REMOTE_STOP: 4527 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || 4528 lpfc_cmd->result == 4529 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || 4530 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || 4531 lpfc_cmd->result == 4532 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { 4533 cmd->result = DID_NO_CONNECT << 16; 4534 break; 4535 } 4536 if (lpfc_cmd->result == IOERR_INVALID_RPI || 4537 lpfc_cmd->result == IOERR_NO_RESOURCES || 4538 lpfc_cmd->result == IOERR_ABORT_REQUESTED || 4539 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { 4540 cmd->result = DID_REQUEUE << 16; 4541 break; 4542 } 4543 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || 4544 lpfc_cmd->result == IOERR_TX_DMA_FAILED) && 4545 pIocbOut->iocb.unsli3.sli3_bg.bgstat) { 4546 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 4547 /* 4548 * This is a response for a BG enabled 4549 * cmd. Parse BG error 4550 */ 4551 lpfc_parse_bg_err(phba, lpfc_cmd, 4552 pIocbOut); 4553 break; 4554 } else { 4555 lpfc_printf_vlog(vport, KERN_WARNING, 4556 LOG_BG, 4557 "9031 non-zero BGSTAT " 4558 "on unprotected cmd\n"); 4559 } 4560 } 4561 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP) 4562 && (phba->sli_rev == LPFC_SLI_REV4) 4563 && pnode) { 4564 /* This IO was aborted by the target, we don't 4565 * know the rxid and because we did not send the 4566 * ABTS we cannot generate and RRQ. 4567 */ 4568 lpfc_set_rrq_active(phba, pnode, 4569 lpfc_cmd->cur_iocbq.sli4_lxritag, 4570 0, 0); 4571 } 4572 fallthrough; 4573 default: 4574 cmd->result = DID_ERROR << 16; 4575 break; 4576 } 4577 4578 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 4579 cmd->result = DID_TRANSPORT_DISRUPTED << 16 | 4580 SAM_STAT_BUSY; 4581 } else 4582 cmd->result = DID_OK << 16; 4583 4584 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 4585 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 4586 4587 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4588 "0710 Iodone <%d/%llu> cmd x%px, error " 4589 "x%x SNS x%x x%x Data: x%x x%x\n", 4590 cmd->device->id, cmd->device->lun, cmd, 4591 cmd->result, *lp, *(lp + 3), cmd->retries, 4592 scsi_get_resid(cmd)); 4593 } 4594 4595 lpfc_update_stats(vport, lpfc_cmd); 4596 if (vport->cfg_max_scsicmpl_time && 4597 time_after(jiffies, lpfc_cmd->start_time + 4598 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { 4599 spin_lock_irqsave(shost->host_lock, flags); 4600 if (pnode) { 4601 if (pnode->cmd_qdepth > 4602 atomic_read(&pnode->cmd_pending) && 4603 (atomic_read(&pnode->cmd_pending) > 4604 LPFC_MIN_TGT_QDEPTH) && 4605 ((cmd->cmnd[0] == READ_10) || 4606 (cmd->cmnd[0] == WRITE_10))) 4607 pnode->cmd_qdepth = 4608 atomic_read(&pnode->cmd_pending); 4609 4610 pnode->last_change_time = jiffies; 4611 } 4612 spin_unlock_irqrestore(shost->host_lock, flags); 4613 } 4614 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 4615 4616 lpfc_cmd->pCmd = NULL; 4617 spin_unlock(&lpfc_cmd->buf_lock); 4618 4619 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4620 if (lpfc_cmd->ts_cmd_start) { 4621 lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp; 4622 lpfc_cmd->ts_data_io = ktime_get_ns(); 4623 phba->ktime_last_cmd = lpfc_cmd->ts_data_io; 4624 lpfc_io_ktime(phba, lpfc_cmd); 4625 } 4626 #endif 4627 4628 /* The sdev is not guaranteed to be valid post scsi_done upcall. */ 4629 cmd->scsi_done(cmd); 4630 4631 /* 4632 * If there is an abort thread waiting for command completion 4633 * wake up the thread. 4634 */ 4635 spin_lock(&lpfc_cmd->buf_lock); 4636 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED; 4637 if (lpfc_cmd->waitq) 4638 wake_up(lpfc_cmd->waitq); 4639 spin_unlock(&lpfc_cmd->buf_lock); 4640 4641 lpfc_release_scsi_buf(phba, lpfc_cmd); 4642 } 4643 4644 /** 4645 * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO 4646 * @vport: Pointer to vport object. 4647 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 4648 * @tmo: timeout value for the IO 4649 * 4650 * Based on the data-direction of the command, initialize IOCB 4651 * in the I/O buffer. Fill in the IOCB fields which are independent 4652 * of the scsi buffer 4653 * 4654 * RETURNS 0 - SUCCESS, 4655 **/ 4656 static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport, 4657 struct lpfc_io_buf *lpfc_cmd, 4658 uint8_t tmo) 4659 { 4660 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 4661 struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq; 4662 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 4663 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 4664 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp; 4665 int datadir = scsi_cmnd->sc_data_direction; 4666 u32 fcpdl; 4667 4668 piocbq->iocb.un.fcpi.fcpi_XRdy = 0; 4669 4670 /* 4671 * There are three possibilities here - use scatter-gather segment, use 4672 * the single mapping, or neither. Start the lpfc command prep by 4673 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 4674 * data bde entry. 4675 */ 4676 if (scsi_sg_count(scsi_cmnd)) { 4677 if (datadir == DMA_TO_DEVICE) { 4678 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 4679 iocb_cmd->ulpPU = PARM_READ_CHECK; 4680 if (vport->cfg_first_burst_size && 4681 (pnode->nlp_flag & NLP_FIRSTBURST)) { 4682 u32 xrdy_len; 4683 4684 fcpdl = scsi_bufflen(scsi_cmnd); 4685 xrdy_len = min(fcpdl, 4686 vport->cfg_first_burst_size); 4687 piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len; 4688 } 4689 fcp_cmnd->fcpCntl3 = WRITE_DATA; 4690 } else { 4691 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 4692 iocb_cmd->ulpPU = PARM_READ_CHECK; 4693 fcp_cmnd->fcpCntl3 = READ_DATA; 4694 } 4695 } else { 4696 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; 4697 iocb_cmd->un.fcpi.fcpi_parm = 0; 4698 iocb_cmd->ulpPU = 0; 4699 fcp_cmnd->fcpCntl3 = 0; 4700 } 4701 4702 /* 4703 * Finish initializing those IOCB fields that are independent 4704 * of the scsi_cmnd request_buffer 4705 */ 4706 piocbq->iocb.ulpContext = pnode->nlp_rpi; 4707 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 4708 piocbq->iocb.ulpFCP2Rcvy = 1; 4709 else 4710 piocbq->iocb.ulpFCP2Rcvy = 0; 4711 4712 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); 4713 piocbq->context1 = lpfc_cmd; 4714 if (!piocbq->iocb_cmpl) 4715 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 4716 piocbq->iocb.ulpTimeout = tmo; 4717 piocbq->vport = vport; 4718 return 0; 4719 } 4720 4721 /** 4722 * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO 4723 * @vport: Pointer to vport object. 4724 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 4725 * @tmo: timeout value for the IO 4726 * 4727 * Based on the data-direction of the command copy WQE template 4728 * to I/O buffer WQE. Fill in the WQE fields which are independent 4729 * of the scsi buffer 4730 * 4731 * RETURNS 0 - SUCCESS, 4732 **/ 4733 static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport, 4734 struct lpfc_io_buf *lpfc_cmd, 4735 uint8_t tmo) 4736 { 4737 struct lpfc_hba *phba = vport->phba; 4738 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 4739 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 4740 struct lpfc_sli4_hdw_queue *hdwq = NULL; 4741 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; 4742 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp; 4743 union lpfc_wqe128 *wqe = &pwqeq->wqe; 4744 u16 idx = lpfc_cmd->hdwq_no; 4745 int datadir = scsi_cmnd->sc_data_direction; 4746 4747 hdwq = &phba->sli4_hba.hdwq[idx]; 4748 4749 /* Initialize 64 bytes only */ 4750 memset(wqe, 0, sizeof(union lpfc_wqe128)); 4751 4752 /* 4753 * There are three possibilities here - use scatter-gather segment, use 4754 * the single mapping, or neither. 4755 */ 4756 if (scsi_sg_count(scsi_cmnd)) { 4757 if (datadir == DMA_TO_DEVICE) { 4758 /* From the iwrite template, initialize words 7 - 11 */ 4759 memcpy(&wqe->words[7], 4760 &lpfc_iwrite_cmd_template.words[7], 4761 sizeof(uint32_t) * 5); 4762 4763 fcp_cmnd->fcpCntl3 = WRITE_DATA; 4764 if (hdwq) 4765 hdwq->scsi_cstat.output_requests++; 4766 } else { 4767 /* From the iread template, initialize words 7 - 11 */ 4768 memcpy(&wqe->words[7], 4769 &lpfc_iread_cmd_template.words[7], 4770 sizeof(uint32_t) * 5); 4771 4772 /* Word 7 */ 4773 bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo); 4774 4775 fcp_cmnd->fcpCntl3 = READ_DATA; 4776 if (hdwq) 4777 hdwq->scsi_cstat.input_requests++; 4778 } 4779 } else { 4780 /* From the icmnd template, initialize words 4 - 11 */ 4781 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], 4782 sizeof(uint32_t) * 8); 4783 4784 /* Word 7 */ 4785 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo); 4786 4787 fcp_cmnd->fcpCntl3 = 0; 4788 if (hdwq) 4789 hdwq->scsi_cstat.control_requests++; 4790 } 4791 4792 /* 4793 * Finish initializing those WQE fields that are independent 4794 * of the request_buffer 4795 */ 4796 4797 /* Word 3 */ 4798 bf_set(payload_offset_len, &wqe->fcp_icmd, 4799 sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 4800 4801 /* Word 6 */ 4802 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 4803 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]); 4804 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); 4805 4806 /* Word 7*/ 4807 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 4808 bf_set(wqe_erp, &wqe->generic.wqe_com, 1); 4809 4810 bf_set(wqe_class, &wqe->generic.wqe_com, 4811 (pnode->nlp_fcp_info & 0x0f)); 4812 4813 /* Word 8 */ 4814 wqe->generic.wqe_com.abort_tag = pwqeq->iotag; 4815 4816 /* Word 9 */ 4817 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); 4818 4819 pwqeq->vport = vport; 4820 pwqeq->vport = vport; 4821 pwqeq->context1 = lpfc_cmd; 4822 pwqeq->hba_wqidx = lpfc_cmd->hdwq_no; 4823 pwqeq->wqe_cmpl = lpfc_fcp_io_cmd_wqe_cmpl; 4824 4825 return 0; 4826 } 4827 4828 /** 4829 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit 4830 * @vport: The virtual port for which this call is being executed. 4831 * @lpfc_cmd: The scsi command which needs to send. 4832 * @pnode: Pointer to lpfc_nodelist. 4833 * 4834 * This routine initializes fcp_cmnd and iocb data structure from scsi command 4835 * to transfer for device with SLI3 interface spec. 4836 **/ 4837 static int 4838 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, 4839 struct lpfc_nodelist *pnode) 4840 { 4841 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 4842 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 4843 u8 *ptr; 4844 4845 if (!pnode) 4846 return 0; 4847 4848 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 4849 /* clear task management bits */ 4850 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; 4851 4852 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 4853 &lpfc_cmd->fcp_cmnd->fcp_lun); 4854 4855 ptr = &fcp_cmnd->fcpCdb[0]; 4856 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 4857 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) { 4858 ptr += scsi_cmnd->cmd_len; 4859 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len)); 4860 } 4861 4862 fcp_cmnd->fcpCntl1 = SIMPLE_Q; 4863 4864 lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout); 4865 4866 return 0; 4867 } 4868 4869 /** 4870 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit 4871 * @vport: The virtual port for which this call is being executed. 4872 * @lpfc_cmd: Pointer to lpfc_io_buf data structure. 4873 * @lun: Logical unit number. 4874 * @task_mgmt_cmd: SCSI task management command. 4875 * 4876 * This routine creates FCP information unit corresponding to @task_mgmt_cmd 4877 * for device with SLI-3 interface spec. 4878 * 4879 * Return codes: 4880 * 0 - Error 4881 * 1 - Success 4882 **/ 4883 static int 4884 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, 4885 struct lpfc_io_buf *lpfc_cmd, 4886 uint64_t lun, 4887 uint8_t task_mgmt_cmd) 4888 { 4889 struct lpfc_iocbq *piocbq; 4890 IOCB_t *piocb; 4891 struct fcp_cmnd *fcp_cmnd; 4892 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 4893 struct lpfc_nodelist *ndlp = rdata->pnode; 4894 4895 if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE) 4896 return 0; 4897 4898 piocbq = &(lpfc_cmd->cur_iocbq); 4899 piocbq->vport = vport; 4900 4901 piocb = &piocbq->iocb; 4902 4903 fcp_cmnd = lpfc_cmd->fcp_cmnd; 4904 /* Clear out any old data in the FCP command area */ 4905 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 4906 int_to_scsilun(lun, &fcp_cmnd->fcp_lun); 4907 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 4908 if (vport->phba->sli_rev == 3 && 4909 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED)) 4910 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); 4911 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 4912 piocb->ulpContext = ndlp->nlp_rpi; 4913 if (vport->phba->sli_rev == LPFC_SLI_REV4) { 4914 piocb->ulpContext = 4915 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 4916 } 4917 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0; 4918 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); 4919 piocb->ulpPU = 0; 4920 piocb->un.fcpi.fcpi_parm = 0; 4921 4922 /* ulpTimeout is only one byte */ 4923 if (lpfc_cmd->timeout > 0xff) { 4924 /* 4925 * Do not timeout the command at the firmware level. 4926 * The driver will provide the timeout mechanism. 4927 */ 4928 piocb->ulpTimeout = 0; 4929 } else 4930 piocb->ulpTimeout = lpfc_cmd->timeout; 4931 4932 if (vport->phba->sli_rev == LPFC_SLI_REV4) 4933 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd); 4934 4935 return 1; 4936 } 4937 4938 /** 4939 * lpfc_scsi_api_table_setup - Set up scsi api function jump table 4940 * @phba: The hba struct for which this call is being executed. 4941 * @dev_grp: The HBA PCI-Device group number. 4942 * 4943 * This routine sets up the SCSI interface API function jump table in @phba 4944 * struct. 4945 * Returns: 0 - success, -ENODEV - failure. 4946 **/ 4947 int 4948 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4949 { 4950 4951 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf; 4952 4953 switch (dev_grp) { 4954 case LPFC_PCI_DEV_LP: 4955 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; 4956 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3; 4957 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; 4958 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3; 4959 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3; 4960 break; 4961 case LPFC_PCI_DEV_OC: 4962 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; 4963 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4; 4964 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; 4965 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4; 4966 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4; 4967 break; 4968 default: 4969 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4970 "1418 Invalid HBA PCI-device group: 0x%x\n", 4971 dev_grp); 4972 return -ENODEV; 4973 } 4974 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; 4975 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 4976 return 0; 4977 } 4978 4979 /** 4980 * lpfc_tskmgmt_def_cmpl - IOCB completion routine for task management command 4981 * @phba: The Hba for which this call is being executed. 4982 * @cmdiocbq: Pointer to lpfc_iocbq data structure. 4983 * @rspiocbq: Pointer to lpfc_iocbq data structure. 4984 * 4985 * This routine is IOCB completion routine for device reset and target reset 4986 * routine. This routine release scsi buffer associated with lpfc_cmd. 4987 **/ 4988 static void 4989 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, 4990 struct lpfc_iocbq *cmdiocbq, 4991 struct lpfc_iocbq *rspiocbq) 4992 { 4993 struct lpfc_io_buf *lpfc_cmd = 4994 (struct lpfc_io_buf *) cmdiocbq->context1; 4995 if (lpfc_cmd) 4996 lpfc_release_scsi_buf(phba, lpfc_cmd); 4997 return; 4998 } 4999 5000 /** 5001 * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check 5002 * if issuing a pci_bus_reset is possibly unsafe 5003 * @phba: lpfc_hba pointer. 5004 * 5005 * Description: 5006 * Walks the bus_list to ensure only PCI devices with Emulex 5007 * vendor id, device ids that support hot reset, and only one occurrence 5008 * of function 0. 5009 * 5010 * Returns: 5011 * -EBADSLT, detected invalid device 5012 * 0, successful 5013 */ 5014 int 5015 lpfc_check_pci_resettable(struct lpfc_hba *phba) 5016 { 5017 const struct pci_dev *pdev = phba->pcidev; 5018 struct pci_dev *ptr = NULL; 5019 u8 counter = 0; 5020 5021 /* Walk the list of devices on the pci_dev's bus */ 5022 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) { 5023 /* Check for Emulex Vendor ID */ 5024 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) { 5025 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5026 "8346 Non-Emulex vendor found: " 5027 "0x%04x\n", ptr->vendor); 5028 return -EBADSLT; 5029 } 5030 5031 /* Check for valid Emulex Device ID */ 5032 switch (ptr->device) { 5033 case PCI_DEVICE_ID_LANCER_FC: 5034 case PCI_DEVICE_ID_LANCER_G6_FC: 5035 case PCI_DEVICE_ID_LANCER_G7_FC: 5036 break; 5037 default: 5038 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5039 "8347 Incapable PCI reset device: " 5040 "0x%04x\n", ptr->device); 5041 return -EBADSLT; 5042 } 5043 5044 /* Check for only one function 0 ID to ensure only one HBA on 5045 * secondary bus 5046 */ 5047 if (ptr->devfn == 0) { 5048 if (++counter > 1) { 5049 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5050 "8348 More than one device on " 5051 "secondary bus found\n"); 5052 return -EBADSLT; 5053 } 5054 } 5055 } 5056 5057 return 0; 5058 } 5059 5060 /** 5061 * lpfc_info - Info entry point of scsi_host_template data structure 5062 * @host: The scsi host for which this call is being executed. 5063 * 5064 * This routine provides module information about hba. 5065 * 5066 * Reutrn code: 5067 * Pointer to char - Success. 5068 **/ 5069 const char * 5070 lpfc_info(struct Scsi_Host *host) 5071 { 5072 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; 5073 struct lpfc_hba *phba = vport->phba; 5074 int link_speed = 0; 5075 static char lpfcinfobuf[384]; 5076 char tmp[384] = {0}; 5077 5078 memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf)); 5079 if (phba && phba->pcidev){ 5080 /* Model Description */ 5081 scnprintf(tmp, sizeof(tmp), phba->ModelDesc); 5082 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5083 sizeof(lpfcinfobuf)) 5084 goto buffer_done; 5085 5086 /* PCI Info */ 5087 scnprintf(tmp, sizeof(tmp), 5088 " on PCI bus %02x device %02x irq %d", 5089 phba->pcidev->bus->number, phba->pcidev->devfn, 5090 phba->pcidev->irq); 5091 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5092 sizeof(lpfcinfobuf)) 5093 goto buffer_done; 5094 5095 /* Port Number */ 5096 if (phba->Port[0]) { 5097 scnprintf(tmp, sizeof(tmp), " port %s", phba->Port); 5098 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5099 sizeof(lpfcinfobuf)) 5100 goto buffer_done; 5101 } 5102 5103 /* Link Speed */ 5104 link_speed = lpfc_sli_port_speed_get(phba); 5105 if (link_speed != 0) { 5106 scnprintf(tmp, sizeof(tmp), 5107 " Logical Link Speed: %d Mbps", link_speed); 5108 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5109 sizeof(lpfcinfobuf)) 5110 goto buffer_done; 5111 } 5112 5113 /* PCI resettable */ 5114 if (!lpfc_check_pci_resettable(phba)) { 5115 scnprintf(tmp, sizeof(tmp), " PCI resettable"); 5116 strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)); 5117 } 5118 } 5119 5120 buffer_done: 5121 return lpfcinfobuf; 5122 } 5123 5124 /** 5125 * lpfc_poll_rearm_timer - Routine to modify fcp_poll timer of hba 5126 * @phba: The Hba for which this call is being executed. 5127 * 5128 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo. 5129 * The default value of cfg_poll_tmo is 10 milliseconds. 5130 **/ 5131 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) 5132 { 5133 unsigned long poll_tmo_expires = 5134 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); 5135 5136 if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq)) 5137 mod_timer(&phba->fcp_poll_timer, 5138 poll_tmo_expires); 5139 } 5140 5141 /** 5142 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA 5143 * @phba: The Hba for which this call is being executed. 5144 * 5145 * This routine starts the fcp_poll_timer of @phba. 5146 **/ 5147 void lpfc_poll_start_timer(struct lpfc_hba * phba) 5148 { 5149 lpfc_poll_rearm_timer(phba); 5150 } 5151 5152 /** 5153 * lpfc_poll_timeout - Restart polling timer 5154 * @t: Timer construct where lpfc_hba data structure pointer is obtained. 5155 * 5156 * This routine restarts fcp_poll timer, when FCP ring polling is enable 5157 * and FCP Ring interrupt is disable. 5158 **/ 5159 void lpfc_poll_timeout(struct timer_list *t) 5160 { 5161 struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer); 5162 5163 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 5164 lpfc_sli_handle_fast_ring_event(phba, 5165 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 5166 5167 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 5168 lpfc_poll_rearm_timer(phba); 5169 } 5170 } 5171 5172 /* 5173 * lpfc_get_vmid_from_hashtable - search the UUID in the hash table 5174 * @vport: The virtual port for which this call is being executed. 5175 * @hash: calculated hash value 5176 * @buf: uuid associated with the VE 5177 * Return the VMID entry associated with the UUID 5178 * Make sure to acquire the appropriate lock before invoking this routine. 5179 */ 5180 struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport, 5181 u32 hash, u8 *buf) 5182 { 5183 struct lpfc_vmid *vmp; 5184 5185 hash_for_each_possible(vport->hash_table, vmp, hnode, hash) { 5186 if (memcmp(&vmp->host_vmid[0], buf, 16) == 0) 5187 return vmp; 5188 } 5189 return NULL; 5190 } 5191 5192 /* 5193 * lpfc_put_vmid_in_hashtable - put the VMID in the hash table 5194 * @vport: The virtual port for which this call is being executed. 5195 * @hash - calculated hash value 5196 * @vmp: Pointer to a VMID entry representing a VM sending I/O 5197 * 5198 * This routine will insert the newly acquired VMID entity in the hash table. 5199 * Make sure to acquire the appropriate lock before invoking this routine. 5200 */ 5201 static void 5202 lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash, 5203 struct lpfc_vmid *vmp) 5204 { 5205 hash_add(vport->hash_table, &vmp->hnode, hash); 5206 } 5207 5208 /* 5209 * lpfc_vmid_hash_fn - create a hash value of the UUID 5210 * @vmid: uuid associated with the VE 5211 * @len: length of the VMID string 5212 * Returns the calculated hash value 5213 */ 5214 int lpfc_vmid_hash_fn(const char *vmid, int len) 5215 { 5216 int c; 5217 int hash = 0; 5218 5219 if (len == 0) 5220 return 0; 5221 while (len--) { 5222 c = *vmid++; 5223 if (c >= 'A' && c <= 'Z') 5224 c += 'a' - 'A'; 5225 5226 hash = (hash + (c << LPFC_VMID_HASH_SHIFT) + 5227 (c >> LPFC_VMID_HASH_SHIFT)) * 19; 5228 } 5229 5230 return hash & LPFC_VMID_HASH_MASK; 5231 } 5232 5233 /* 5234 * lpfc_vmid_update_entry - update the vmid entry in the hash table 5235 * @vport: The virtual port for which this call is being executed. 5236 * @cmd: address of scsi cmd descriptor 5237 * @vmp: Pointer to a VMID entry representing a VM sending I/O 5238 * @tag: VMID tag 5239 */ 5240 static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd 5241 *cmd, struct lpfc_vmid *vmp, 5242 union lpfc_vmid_io_tag *tag) 5243 { 5244 u64 *lta; 5245 5246 if (vport->vmid_priority_tagging) 5247 tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid; 5248 else 5249 tag->app_id = vmp->un.app_id; 5250 5251 if (cmd->sc_data_direction == DMA_TO_DEVICE) 5252 vmp->io_wr_cnt++; 5253 else 5254 vmp->io_rd_cnt++; 5255 5256 /* update the last access timestamp in the table */ 5257 lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id()); 5258 *lta = jiffies; 5259 } 5260 5261 static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport, 5262 struct lpfc_vmid *vmid) 5263 { 5264 u32 hash; 5265 struct lpfc_vmid *pvmid; 5266 5267 if (vport->port_type == LPFC_PHYSICAL_PORT) { 5268 vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport); 5269 } else { 5270 hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len); 5271 pvmid = 5272 lpfc_get_vmid_from_hashtable(vport->phba->pport, hash, 5273 vmid->host_vmid); 5274 if (pvmid) 5275 vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid; 5276 else 5277 vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport); 5278 } 5279 } 5280 5281 /* 5282 * lpfc_vmid_get_appid - get the VMID associated with the UUID 5283 * @vport: The virtual port for which this call is being executed. 5284 * @uuid: UUID associated with the VE 5285 * @cmd: address of scsi_cmd descriptor 5286 * @tag: VMID tag 5287 * Returns status of the function 5288 */ 5289 static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct 5290 scsi_cmnd * cmd, union lpfc_vmid_io_tag *tag) 5291 { 5292 struct lpfc_vmid *vmp = NULL; 5293 int hash, len, rc, i; 5294 5295 /* check if QFPA is complete */ 5296 if (lpfc_vmid_is_type_priority_tag(vport) && !(vport->vmid_flag & 5297 LPFC_VMID_QFPA_CMPL)) { 5298 vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA; 5299 return -EAGAIN; 5300 } 5301 5302 /* search if the UUID has already been mapped to the VMID */ 5303 len = strlen(uuid); 5304 hash = lpfc_vmid_hash_fn(uuid, len); 5305 5306 /* search for the VMID in the table */ 5307 read_lock(&vport->vmid_lock); 5308 vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid); 5309 5310 /* if found, check if its already registered */ 5311 if (vmp && vmp->flag & LPFC_VMID_REGISTERED) { 5312 read_unlock(&vport->vmid_lock); 5313 lpfc_vmid_update_entry(vport, cmd, vmp, tag); 5314 rc = 0; 5315 } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER || 5316 vmp->flag & LPFC_VMID_DE_REGISTER)) { 5317 /* else if register or dereg request has already been sent */ 5318 /* Hence VMID tag will not be added for this I/O */ 5319 read_unlock(&vport->vmid_lock); 5320 rc = -EBUSY; 5321 } else { 5322 /* The VMID was not found in the hashtable. At this point, */ 5323 /* drop the read lock first before proceeding further */ 5324 read_unlock(&vport->vmid_lock); 5325 /* start the process to obtain one as per the */ 5326 /* type of the VMID indicated */ 5327 write_lock(&vport->vmid_lock); 5328 vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid); 5329 5330 /* while the read lock was released, in case the entry was */ 5331 /* added by other context or is in process of being added */ 5332 if (vmp && vmp->flag & LPFC_VMID_REGISTERED) { 5333 lpfc_vmid_update_entry(vport, cmd, vmp, tag); 5334 write_unlock(&vport->vmid_lock); 5335 return 0; 5336 } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) { 5337 write_unlock(&vport->vmid_lock); 5338 return -EBUSY; 5339 } 5340 5341 /* else search and allocate a free slot in the hash table */ 5342 if (vport->cur_vmid_cnt < vport->max_vmid) { 5343 for (i = 0; i < vport->max_vmid; i++) { 5344 vmp = vport->vmid + i; 5345 if (vmp->flag == LPFC_VMID_SLOT_FREE) 5346 break; 5347 } 5348 if (i == vport->max_vmid) 5349 vmp = NULL; 5350 } else { 5351 vmp = NULL; 5352 } 5353 5354 if (!vmp) { 5355 write_unlock(&vport->vmid_lock); 5356 return -ENOMEM; 5357 } 5358 5359 /* Add the vmid and register */ 5360 lpfc_put_vmid_in_hashtable(vport, hash, vmp); 5361 vmp->vmid_len = len; 5362 memcpy(vmp->host_vmid, uuid, vmp->vmid_len); 5363 vmp->io_rd_cnt = 0; 5364 vmp->io_wr_cnt = 0; 5365 vmp->flag = LPFC_VMID_SLOT_USED; 5366 5367 vmp->delete_inactive = 5368 vport->vmid_inactivity_timeout ? 1 : 0; 5369 5370 /* if type priority tag, get next available VMID */ 5371 if (lpfc_vmid_is_type_priority_tag(vport)) 5372 lpfc_vmid_assign_cs_ctl(vport, vmp); 5373 5374 /* allocate the per cpu variable for holding */ 5375 /* the last access time stamp only if VMID is enabled */ 5376 if (!vmp->last_io_time) 5377 vmp->last_io_time = __alloc_percpu(sizeof(u64), 5378 __alignof__(struct 5379 lpfc_vmid)); 5380 if (!vmp->last_io_time) { 5381 hash_del(&vmp->hnode); 5382 vmp->flag = LPFC_VMID_SLOT_FREE; 5383 write_unlock(&vport->vmid_lock); 5384 return -EIO; 5385 } 5386 5387 write_unlock(&vport->vmid_lock); 5388 5389 /* complete transaction with switch */ 5390 if (lpfc_vmid_is_type_priority_tag(vport)) 5391 rc = lpfc_vmid_uvem(vport, vmp, true); 5392 else 5393 rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp); 5394 if (!rc) { 5395 write_lock(&vport->vmid_lock); 5396 vport->cur_vmid_cnt++; 5397 vmp->flag |= LPFC_VMID_REQ_REGISTER; 5398 write_unlock(&vport->vmid_lock); 5399 } else { 5400 write_lock(&vport->vmid_lock); 5401 hash_del(&vmp->hnode); 5402 vmp->flag = LPFC_VMID_SLOT_FREE; 5403 free_percpu(vmp->last_io_time); 5404 write_unlock(&vport->vmid_lock); 5405 return -EIO; 5406 } 5407 5408 /* finally, enable the idle timer once */ 5409 if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) { 5410 mod_timer(&vport->phba->inactive_vmid_poll, 5411 jiffies + 5412 msecs_to_jiffies(1000 * LPFC_VMID_TIMER)); 5413 vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD; 5414 } 5415 } 5416 return rc; 5417 } 5418 5419 /* 5420 * lpfc_is_command_vm_io - get the UUID from blk cgroup 5421 * @cmd: Pointer to scsi_cmnd data structure 5422 * Returns UUID if present, otherwise NULL 5423 */ 5424 static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd) 5425 { 5426 char *uuid = NULL; 5427 5428 if (cmd->request) { 5429 if (cmd->request->bio) 5430 uuid = blkcg_get_fc_appid(cmd->request->bio); 5431 } 5432 return uuid; 5433 } 5434 5435 /** 5436 * lpfc_queuecommand - scsi_host_template queuecommand entry point 5437 * @shost: kernel scsi host pointer. 5438 * @cmnd: Pointer to scsi_cmnd data structure. 5439 * 5440 * Driver registers this routine to scsi midlayer to submit a @cmd to process. 5441 * This routine prepares an IOCB from scsi command and provides to firmware. 5442 * The @done callback is invoked after driver finished processing the command. 5443 * 5444 * Return value : 5445 * 0 - Success 5446 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. 5447 **/ 5448 static int 5449 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) 5450 { 5451 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5452 struct lpfc_hba *phba = vport->phba; 5453 struct lpfc_rport_data *rdata; 5454 struct lpfc_nodelist *ndlp; 5455 struct lpfc_io_buf *lpfc_cmd; 5456 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 5457 int err, idx; 5458 u8 *uuid = NULL; 5459 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 5460 uint64_t start = 0L; 5461 5462 if (phba->ktime_on) 5463 start = ktime_get_ns(); 5464 #endif 5465 5466 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5467 5468 /* sanity check on references */ 5469 if (unlikely(!rdata) || unlikely(!rport)) 5470 goto out_fail_command; 5471 5472 err = fc_remote_port_chkready(rport); 5473 if (err) { 5474 cmnd->result = err; 5475 goto out_fail_command; 5476 } 5477 ndlp = rdata->pnode; 5478 5479 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && 5480 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) { 5481 5482 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5483 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" 5484 " op:%02x str=%s without registering for" 5485 " BlockGuard - Rejecting command\n", 5486 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 5487 dif_op_str[scsi_get_prot_op(cmnd)]); 5488 goto out_fail_command; 5489 } 5490 5491 /* 5492 * Catch race where our node has transitioned, but the 5493 * transport is still transitioning. 5494 */ 5495 if (!ndlp) 5496 goto out_tgt_busy; 5497 if (lpfc_ndlp_check_qdepth(phba, ndlp)) { 5498 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) { 5499 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, 5500 "3377 Target Queue Full, scsi Id:%d " 5501 "Qdepth:%d Pending command:%d" 5502 " WWNN:%02x:%02x:%02x:%02x:" 5503 "%02x:%02x:%02x:%02x, " 5504 " WWPN:%02x:%02x:%02x:%02x:" 5505 "%02x:%02x:%02x:%02x", 5506 ndlp->nlp_sid, ndlp->cmd_qdepth, 5507 atomic_read(&ndlp->cmd_pending), 5508 ndlp->nlp_nodename.u.wwn[0], 5509 ndlp->nlp_nodename.u.wwn[1], 5510 ndlp->nlp_nodename.u.wwn[2], 5511 ndlp->nlp_nodename.u.wwn[3], 5512 ndlp->nlp_nodename.u.wwn[4], 5513 ndlp->nlp_nodename.u.wwn[5], 5514 ndlp->nlp_nodename.u.wwn[6], 5515 ndlp->nlp_nodename.u.wwn[7], 5516 ndlp->nlp_portname.u.wwn[0], 5517 ndlp->nlp_portname.u.wwn[1], 5518 ndlp->nlp_portname.u.wwn[2], 5519 ndlp->nlp_portname.u.wwn[3], 5520 ndlp->nlp_portname.u.wwn[4], 5521 ndlp->nlp_portname.u.wwn[5], 5522 ndlp->nlp_portname.u.wwn[6], 5523 ndlp->nlp_portname.u.wwn[7]); 5524 goto out_tgt_busy; 5525 } 5526 } 5527 5528 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd); 5529 if (lpfc_cmd == NULL) { 5530 lpfc_rampdown_queue_depth(phba); 5531 5532 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, 5533 "0707 driver's buffer pool is empty, " 5534 "IO busied\n"); 5535 goto out_host_busy; 5536 } 5537 5538 /* 5539 * Store the midlayer's command structure for the completion phase 5540 * and complete the command initialization. 5541 */ 5542 lpfc_cmd->pCmd = cmnd; 5543 lpfc_cmd->rdata = rdata; 5544 lpfc_cmd->ndlp = ndlp; 5545 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; 5546 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 5547 5548 err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 5549 if (err) 5550 goto out_host_busy_release_buf; 5551 5552 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 5553 if (vport->phba->cfg_enable_bg) { 5554 lpfc_printf_vlog(vport, 5555 KERN_INFO, LOG_SCSI_CMD, 5556 "9033 BLKGRD: rcvd %s cmd:x%x " 5557 "reftag x%x cnt %u pt %x\n", 5558 dif_op_str[scsi_get_prot_op(cmnd)], 5559 cmnd->cmnd[0], 5560 t10_pi_ref_tag(cmnd->request), 5561 blk_rq_sectors(cmnd->request), 5562 (cmnd->cmnd[1]>>5)); 5563 } 5564 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 5565 } else { 5566 if (vport->phba->cfg_enable_bg) { 5567 lpfc_printf_vlog(vport, 5568 KERN_INFO, LOG_SCSI_CMD, 5569 "9038 BLKGRD: rcvd PROT_NORMAL cmd: " 5570 "x%x reftag x%x cnt %u pt %x\n", 5571 cmnd->cmnd[0], 5572 t10_pi_ref_tag(cmnd->request), 5573 blk_rq_sectors(cmnd->request), 5574 (cmnd->cmnd[1]>>5)); 5575 } 5576 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 5577 } 5578 5579 if (unlikely(err)) { 5580 if (err == 2) { 5581 cmnd->result = DID_ERROR << 16; 5582 goto out_fail_command_release_buf; 5583 } 5584 goto out_host_busy_free_buf; 5585 } 5586 5587 5588 /* check the necessary and sufficient condition to support VMID */ 5589 if (lpfc_is_vmid_enabled(phba) && 5590 (ndlp->vmid_support || 5591 phba->pport->vmid_priority_tagging == 5592 LPFC_VMID_PRIO_TAG_ALL_TARGETS)) { 5593 /* is the I/O generated by a VM, get the associated virtual */ 5594 /* entity id */ 5595 uuid = lpfc_is_command_vm_io(cmnd); 5596 5597 if (uuid) { 5598 err = lpfc_vmid_get_appid(vport, uuid, cmnd, 5599 (union lpfc_vmid_io_tag *) 5600 &lpfc_cmd->cur_iocbq.vmid_tag); 5601 if (!err) 5602 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_VMID; 5603 } 5604 } 5605 5606 atomic_inc(&ndlp->cmd_pending); 5607 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 5608 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) 5609 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); 5610 #endif 5611 /* Issue I/O to adapter */ 5612 err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING, 5613 &lpfc_cmd->cur_iocbq, 5614 SLI_IOCB_RET_IOCB); 5615 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 5616 if (start) { 5617 lpfc_cmd->ts_cmd_start = start; 5618 lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd; 5619 lpfc_cmd->ts_cmd_wqput = ktime_get_ns(); 5620 } else { 5621 lpfc_cmd->ts_cmd_start = 0; 5622 } 5623 #endif 5624 if (err) { 5625 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5626 "3376 FCP could not issue IOCB err %x " 5627 "FCP cmd x%x <%d/%llu> " 5628 "sid: x%x did: x%x oxid: x%x " 5629 "Data: x%x x%x x%x x%x\n", 5630 err, cmnd->cmnd[0], 5631 cmnd->device ? cmnd->device->id : 0xffff, 5632 cmnd->device ? cmnd->device->lun : (u64)-1, 5633 vport->fc_myDID, ndlp->nlp_DID, 5634 phba->sli_rev == LPFC_SLI_REV4 ? 5635 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, 5636 phba->sli_rev == LPFC_SLI_REV4 ? 5637 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] : 5638 lpfc_cmd->cur_iocbq.iocb.ulpContext, 5639 lpfc_cmd->cur_iocbq.iotag, 5640 phba->sli_rev == LPFC_SLI_REV4 ? 5641 bf_get(wqe_tmo, 5642 &lpfc_cmd->cur_iocbq.wqe.generic.wqe_com) : 5643 lpfc_cmd->cur_iocbq.iocb.ulpTimeout, 5644 (uint32_t) 5645 (cmnd->request->timeout / 1000)); 5646 5647 goto out_host_busy_free_buf; 5648 } 5649 5650 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 5651 lpfc_sli_handle_fast_ring_event(phba, 5652 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 5653 5654 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 5655 lpfc_poll_rearm_timer(phba); 5656 } 5657 5658 if (phba->cfg_xri_rebalancing) 5659 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no); 5660 5661 return 0; 5662 5663 out_host_busy_free_buf: 5664 idx = lpfc_cmd->hdwq_no; 5665 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 5666 if (phba->sli4_hba.hdwq) { 5667 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) { 5668 case WRITE_DATA: 5669 phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--; 5670 break; 5671 case READ_DATA: 5672 phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--; 5673 break; 5674 default: 5675 phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--; 5676 } 5677 } 5678 out_host_busy_release_buf: 5679 lpfc_release_scsi_buf(phba, lpfc_cmd); 5680 out_host_busy: 5681 return SCSI_MLQUEUE_HOST_BUSY; 5682 5683 out_tgt_busy: 5684 return SCSI_MLQUEUE_TARGET_BUSY; 5685 5686 out_fail_command_release_buf: 5687 lpfc_release_scsi_buf(phba, lpfc_cmd); 5688 5689 out_fail_command: 5690 cmnd->scsi_done(cmnd); 5691 return 0; 5692 } 5693 5694 /* 5695 * lpfc_vmid_vport_cleanup - cleans up the resources associated with a vport 5696 * @vport: The virtual port for which this call is being executed. 5697 */ 5698 void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport) 5699 { 5700 u32 bucket; 5701 struct lpfc_vmid *cur; 5702 5703 if (vport->port_type == LPFC_PHYSICAL_PORT) 5704 del_timer_sync(&vport->phba->inactive_vmid_poll); 5705 5706 kfree(vport->qfpa_res); 5707 kfree(vport->vmid_priority.vmid_range); 5708 kfree(vport->vmid); 5709 5710 if (!hash_empty(vport->hash_table)) 5711 hash_for_each(vport->hash_table, bucket, cur, hnode) 5712 hash_del(&cur->hnode); 5713 5714 vport->qfpa_res = NULL; 5715 vport->vmid_priority.vmid_range = NULL; 5716 vport->vmid = NULL; 5717 vport->cur_vmid_cnt = 0; 5718 } 5719 5720 /** 5721 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point 5722 * @cmnd: Pointer to scsi_cmnd data structure. 5723 * 5724 * This routine aborts @cmnd pending in base driver. 5725 * 5726 * Return code : 5727 * 0x2003 - Error 5728 * 0x2002 - Success 5729 **/ 5730 static int 5731 lpfc_abort_handler(struct scsi_cmnd *cmnd) 5732 { 5733 struct Scsi_Host *shost = cmnd->device->host; 5734 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5735 struct lpfc_hba *phba = vport->phba; 5736 struct lpfc_iocbq *iocb; 5737 struct lpfc_io_buf *lpfc_cmd; 5738 int ret = SUCCESS, status = 0; 5739 struct lpfc_sli_ring *pring_s4 = NULL; 5740 struct lpfc_sli_ring *pring = NULL; 5741 int ret_val; 5742 unsigned long flags; 5743 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 5744 5745 status = fc_block_scsi_eh(cmnd); 5746 if (status != 0 && status != SUCCESS) 5747 return status; 5748 5749 lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble; 5750 if (!lpfc_cmd) 5751 return ret; 5752 5753 spin_lock_irqsave(&phba->hbalock, flags); 5754 /* driver queued commands are in process of being flushed */ 5755 if (phba->hba_flag & HBA_IOQ_FLUSH) { 5756 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5757 "3168 SCSI Layer abort requested I/O has been " 5758 "flushed by LLD.\n"); 5759 ret = FAILED; 5760 goto out_unlock; 5761 } 5762 5763 /* Guard against IO completion being called at same time */ 5764 spin_lock(&lpfc_cmd->buf_lock); 5765 5766 if (!lpfc_cmd->pCmd) { 5767 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5768 "2873 SCSI Layer I/O Abort Request IO CMPL Status " 5769 "x%x ID %d LUN %llu\n", 5770 SUCCESS, cmnd->device->id, cmnd->device->lun); 5771 goto out_unlock_buf; 5772 } 5773 5774 iocb = &lpfc_cmd->cur_iocbq; 5775 if (phba->sli_rev == LPFC_SLI_REV4) { 5776 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring; 5777 if (!pring_s4) { 5778 ret = FAILED; 5779 goto out_unlock_buf; 5780 } 5781 spin_lock(&pring_s4->ring_lock); 5782 } 5783 /* the command is in process of being cancelled */ 5784 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { 5785 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5786 "3169 SCSI Layer abort requested I/O has been " 5787 "cancelled by LLD.\n"); 5788 ret = FAILED; 5789 goto out_unlock_ring; 5790 } 5791 /* 5792 * If pCmd field of the corresponding lpfc_io_buf structure 5793 * points to a different SCSI command, then the driver has 5794 * already completed this command, but the midlayer did not 5795 * see the completion before the eh fired. Just return SUCCESS. 5796 */ 5797 if (lpfc_cmd->pCmd != cmnd) { 5798 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5799 "3170 SCSI Layer abort requested I/O has been " 5800 "completed by LLD.\n"); 5801 goto out_unlock_ring; 5802 } 5803 5804 BUG_ON(iocb->context1 != lpfc_cmd); 5805 5806 /* abort issued in recovery is still in progress */ 5807 if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) { 5808 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5809 "3389 SCSI Layer I/O Abort Request is pending\n"); 5810 if (phba->sli_rev == LPFC_SLI_REV4) 5811 spin_unlock(&pring_s4->ring_lock); 5812 spin_unlock(&lpfc_cmd->buf_lock); 5813 spin_unlock_irqrestore(&phba->hbalock, flags); 5814 goto wait_for_cmpl; 5815 } 5816 5817 lpfc_cmd->waitq = &waitq; 5818 if (phba->sli_rev == LPFC_SLI_REV4) { 5819 spin_unlock(&pring_s4->ring_lock); 5820 ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb, 5821 lpfc_sli4_abort_fcp_cmpl); 5822 } else { 5823 pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; 5824 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb, 5825 lpfc_sli_abort_fcp_cmpl); 5826 } 5827 5828 /* Make sure HBA is alive */ 5829 lpfc_issue_hb_tmo(phba); 5830 5831 if (ret_val != IOCB_SUCCESS) { 5832 /* Indicate the IO is not being aborted by the driver. */ 5833 lpfc_cmd->waitq = NULL; 5834 spin_unlock(&lpfc_cmd->buf_lock); 5835 spin_unlock_irqrestore(&phba->hbalock, flags); 5836 ret = FAILED; 5837 goto out; 5838 } 5839 5840 /* no longer need the lock after this point */ 5841 spin_unlock(&lpfc_cmd->buf_lock); 5842 spin_unlock_irqrestore(&phba->hbalock, flags); 5843 5844 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 5845 lpfc_sli_handle_fast_ring_event(phba, 5846 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 5847 5848 wait_for_cmpl: 5849 /* 5850 * iocb_flag is set to LPFC_DRIVER_ABORTED before we wait 5851 * for abort to complete. 5852 */ 5853 wait_event_timeout(waitq, 5854 (lpfc_cmd->pCmd != cmnd), 5855 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); 5856 5857 spin_lock(&lpfc_cmd->buf_lock); 5858 5859 if (lpfc_cmd->pCmd == cmnd) { 5860 ret = FAILED; 5861 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5862 "0748 abort handler timed out waiting " 5863 "for aborting I/O (xri:x%x) to complete: " 5864 "ret %#x, ID %d, LUN %llu\n", 5865 iocb->sli4_xritag, ret, 5866 cmnd->device->id, cmnd->device->lun); 5867 } 5868 5869 lpfc_cmd->waitq = NULL; 5870 5871 spin_unlock(&lpfc_cmd->buf_lock); 5872 goto out; 5873 5874 out_unlock_ring: 5875 if (phba->sli_rev == LPFC_SLI_REV4) 5876 spin_unlock(&pring_s4->ring_lock); 5877 out_unlock_buf: 5878 spin_unlock(&lpfc_cmd->buf_lock); 5879 out_unlock: 5880 spin_unlock_irqrestore(&phba->hbalock, flags); 5881 out: 5882 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5883 "0749 SCSI Layer I/O Abort Request Status x%x ID %d " 5884 "LUN %llu\n", ret, cmnd->device->id, 5885 cmnd->device->lun); 5886 return ret; 5887 } 5888 5889 static char * 5890 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd) 5891 { 5892 switch (task_mgmt_cmd) { 5893 case FCP_ABORT_TASK_SET: 5894 return "ABORT_TASK_SET"; 5895 case FCP_CLEAR_TASK_SET: 5896 return "FCP_CLEAR_TASK_SET"; 5897 case FCP_BUS_RESET: 5898 return "FCP_BUS_RESET"; 5899 case FCP_LUN_RESET: 5900 return "FCP_LUN_RESET"; 5901 case FCP_TARGET_RESET: 5902 return "FCP_TARGET_RESET"; 5903 case FCP_CLEAR_ACA: 5904 return "FCP_CLEAR_ACA"; 5905 case FCP_TERMINATE_TASK: 5906 return "FCP_TERMINATE_TASK"; 5907 default: 5908 return "unknown"; 5909 } 5910 } 5911 5912 5913 /** 5914 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed 5915 * @vport: The virtual port for which this call is being executed. 5916 * @lpfc_cmd: Pointer to lpfc_io_buf data structure. 5917 * 5918 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded 5919 * 5920 * Return code : 5921 * 0x2003 - Error 5922 * 0x2002 - Success 5923 **/ 5924 static int 5925 lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd) 5926 { 5927 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 5928 uint32_t rsp_info; 5929 uint32_t rsp_len; 5930 uint8_t rsp_info_code; 5931 int ret = FAILED; 5932 5933 5934 if (fcprsp == NULL) 5935 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5936 "0703 fcp_rsp is missing\n"); 5937 else { 5938 rsp_info = fcprsp->rspStatus2; 5939 rsp_len = be32_to_cpu(fcprsp->rspRspLen); 5940 rsp_info_code = fcprsp->rspInfo3; 5941 5942 5943 lpfc_printf_vlog(vport, KERN_INFO, 5944 LOG_FCP, 5945 "0706 fcp_rsp valid 0x%x," 5946 " rsp len=%d code 0x%x\n", 5947 rsp_info, 5948 rsp_len, rsp_info_code); 5949 5950 /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN 5951 * field specifies the number of valid bytes of FCP_RSP_INFO. 5952 * The FCP_RSP_LEN field shall be set to 0x04 or 0x08 5953 */ 5954 if ((fcprsp->rspStatus2 & RSP_LEN_VALID) && 5955 ((rsp_len == 8) || (rsp_len == 4))) { 5956 switch (rsp_info_code) { 5957 case RSP_NO_FAILURE: 5958 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5959 "0715 Task Mgmt No Failure\n"); 5960 ret = SUCCESS; 5961 break; 5962 case RSP_TM_NOT_SUPPORTED: /* TM rejected */ 5963 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5964 "0716 Task Mgmt Target " 5965 "reject\n"); 5966 break; 5967 case RSP_TM_NOT_COMPLETED: /* TM failed */ 5968 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5969 "0717 Task Mgmt Target " 5970 "failed TM\n"); 5971 break; 5972 case RSP_TM_INVALID_LU: /* TM to invalid LU! */ 5973 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5974 "0718 Task Mgmt to invalid " 5975 "LUN\n"); 5976 break; 5977 } 5978 } 5979 } 5980 return ret; 5981 } 5982 5983 5984 /** 5985 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler 5986 * @vport: The virtual port for which this call is being executed. 5987 * @cmnd: Pointer to scsi_cmnd data structure. 5988 * @tgt_id: Target ID of remote device. 5989 * @lun_id: Lun number for the TMF 5990 * @task_mgmt_cmd: type of TMF to send 5991 * 5992 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to 5993 * a remote port. 5994 * 5995 * Return Code: 5996 * 0x2003 - Error 5997 * 0x2002 - Success. 5998 **/ 5999 static int 6000 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd, 6001 unsigned int tgt_id, uint64_t lun_id, 6002 uint8_t task_mgmt_cmd) 6003 { 6004 struct lpfc_hba *phba = vport->phba; 6005 struct lpfc_io_buf *lpfc_cmd; 6006 struct lpfc_iocbq *iocbq; 6007 struct lpfc_iocbq *iocbqrsp; 6008 struct lpfc_rport_data *rdata; 6009 struct lpfc_nodelist *pnode; 6010 int ret; 6011 int status; 6012 6013 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 6014 if (!rdata || !rdata->pnode) 6015 return FAILED; 6016 pnode = rdata->pnode; 6017 6018 lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL); 6019 if (lpfc_cmd == NULL) 6020 return FAILED; 6021 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; 6022 lpfc_cmd->rdata = rdata; 6023 lpfc_cmd->pCmd = cmnd; 6024 lpfc_cmd->ndlp = pnode; 6025 6026 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, 6027 task_mgmt_cmd); 6028 if (!status) { 6029 lpfc_release_scsi_buf(phba, lpfc_cmd); 6030 return FAILED; 6031 } 6032 6033 iocbq = &lpfc_cmd->cur_iocbq; 6034 iocbqrsp = lpfc_sli_get_iocbq(phba); 6035 if (iocbqrsp == NULL) { 6036 lpfc_release_scsi_buf(phba, lpfc_cmd); 6037 return FAILED; 6038 } 6039 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 6040 6041 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 6042 "0702 Issue %s to TGT %d LUN %llu " 6043 "rpi x%x nlp_flag x%x Data: x%x x%x\n", 6044 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, 6045 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag, 6046 iocbq->iocb_flag); 6047 6048 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, 6049 iocbq, iocbqrsp, lpfc_cmd->timeout); 6050 if ((status != IOCB_SUCCESS) || 6051 (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) { 6052 if (status != IOCB_SUCCESS || 6053 iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR) 6054 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6055 "0727 TMF %s to TGT %d LUN %llu " 6056 "failed (%d, %d) iocb_flag x%x\n", 6057 lpfc_taskmgmt_name(task_mgmt_cmd), 6058 tgt_id, lun_id, 6059 iocbqrsp->iocb.ulpStatus, 6060 iocbqrsp->iocb.un.ulpWord[4], 6061 iocbq->iocb_flag); 6062 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */ 6063 if (status == IOCB_SUCCESS) { 6064 if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 6065 /* Something in the FCP_RSP was invalid. 6066 * Check conditions */ 6067 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd); 6068 else 6069 ret = FAILED; 6070 } else if (status == IOCB_TIMEDOUT) { 6071 ret = TIMEOUT_ERROR; 6072 } else { 6073 ret = FAILED; 6074 } 6075 } else 6076 ret = SUCCESS; 6077 6078 lpfc_sli_release_iocbq(phba, iocbqrsp); 6079 6080 if (ret != TIMEOUT_ERROR) 6081 lpfc_release_scsi_buf(phba, lpfc_cmd); 6082 6083 return ret; 6084 } 6085 6086 /** 6087 * lpfc_chk_tgt_mapped - 6088 * @vport: The virtual port to check on 6089 * @cmnd: Pointer to scsi_cmnd data structure. 6090 * 6091 * This routine delays until the scsi target (aka rport) for the 6092 * command exists (is present and logged in) or we declare it non-existent. 6093 * 6094 * Return code : 6095 * 0x2003 - Error 6096 * 0x2002 - Success 6097 **/ 6098 static int 6099 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) 6100 { 6101 struct lpfc_rport_data *rdata; 6102 struct lpfc_nodelist *pnode; 6103 unsigned long later; 6104 6105 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 6106 if (!rdata) { 6107 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 6108 "0797 Tgt Map rport failure: rdata x%px\n", rdata); 6109 return FAILED; 6110 } 6111 pnode = rdata->pnode; 6112 /* 6113 * If target is not in a MAPPED state, delay until 6114 * target is rediscovered or devloss timeout expires. 6115 */ 6116 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 6117 while (time_after(later, jiffies)) { 6118 if (!pnode) 6119 return FAILED; 6120 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 6121 return SUCCESS; 6122 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 6123 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 6124 if (!rdata) 6125 return FAILED; 6126 pnode = rdata->pnode; 6127 } 6128 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 6129 return FAILED; 6130 return SUCCESS; 6131 } 6132 6133 /** 6134 * lpfc_reset_flush_io_context - 6135 * @vport: The virtual port (scsi_host) for the flush context 6136 * @tgt_id: If aborting by Target contect - specifies the target id 6137 * @lun_id: If aborting by Lun context - specifies the lun id 6138 * @context: specifies the context level to flush at. 6139 * 6140 * After a reset condition via TMF, we need to flush orphaned i/o 6141 * contexts from the adapter. This routine aborts any contexts 6142 * outstanding, then waits for their completions. The wait is 6143 * bounded by devloss_tmo though. 6144 * 6145 * Return code : 6146 * 0x2003 - Error 6147 * 0x2002 - Success 6148 **/ 6149 static int 6150 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, 6151 uint64_t lun_id, lpfc_ctx_cmd context) 6152 { 6153 struct lpfc_hba *phba = vport->phba; 6154 unsigned long later; 6155 int cnt; 6156 6157 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 6158 if (cnt) 6159 lpfc_sli_abort_taskmgmt(vport, 6160 &phba->sli.sli3_ring[LPFC_FCP_RING], 6161 tgt_id, lun_id, context); 6162 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 6163 while (time_after(later, jiffies) && cnt) { 6164 schedule_timeout_uninterruptible(msecs_to_jiffies(20)); 6165 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 6166 } 6167 if (cnt) { 6168 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6169 "0724 I/O flush failure for context %s : cnt x%x\n", 6170 ((context == LPFC_CTX_LUN) ? "LUN" : 6171 ((context == LPFC_CTX_TGT) ? "TGT" : 6172 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))), 6173 cnt); 6174 return FAILED; 6175 } 6176 return SUCCESS; 6177 } 6178 6179 /** 6180 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point 6181 * @cmnd: Pointer to scsi_cmnd data structure. 6182 * 6183 * This routine does a device reset by sending a LUN_RESET task management 6184 * command. 6185 * 6186 * Return code : 6187 * 0x2003 - Error 6188 * 0x2002 - Success 6189 **/ 6190 static int 6191 lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 6192 { 6193 struct Scsi_Host *shost = cmnd->device->host; 6194 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6195 struct lpfc_rport_data *rdata; 6196 struct lpfc_nodelist *pnode; 6197 unsigned tgt_id = cmnd->device->id; 6198 uint64_t lun_id = cmnd->device->lun; 6199 struct lpfc_scsi_event_header scsi_event; 6200 int status; 6201 u32 logit = LOG_FCP; 6202 6203 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 6204 if (!rdata || !rdata->pnode) { 6205 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6206 "0798 Device Reset rdata failure: rdata x%px\n", 6207 rdata); 6208 return FAILED; 6209 } 6210 pnode = rdata->pnode; 6211 status = fc_block_scsi_eh(cmnd); 6212 if (status != 0 && status != SUCCESS) 6213 return status; 6214 6215 status = lpfc_chk_tgt_mapped(vport, cmnd); 6216 if (status == FAILED) { 6217 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6218 "0721 Device Reset rport failure: rdata x%px\n", rdata); 6219 return FAILED; 6220 } 6221 6222 scsi_event.event_type = FC_REG_SCSI_EVENT; 6223 scsi_event.subcategory = LPFC_EVENT_LUNRESET; 6224 scsi_event.lun = lun_id; 6225 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); 6226 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); 6227 6228 fc_host_post_vendor_event(shost, fc_get_event_number(), 6229 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 6230 6231 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id, 6232 FCP_LUN_RESET); 6233 if (status != SUCCESS) 6234 logit = LOG_TRACE_EVENT; 6235 6236 lpfc_printf_vlog(vport, KERN_ERR, logit, 6237 "0713 SCSI layer issued Device Reset (%d, %llu) " 6238 "return x%x\n", tgt_id, lun_id, status); 6239 6240 /* 6241 * We have to clean up i/o as : they may be orphaned by the TMF; 6242 * or if the TMF failed, they may be in an indeterminate state. 6243 * So, continue on. 6244 * We will report success if all the i/o aborts successfully. 6245 */ 6246 if (status == SUCCESS) 6247 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 6248 LPFC_CTX_LUN); 6249 6250 return status; 6251 } 6252 6253 /** 6254 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point 6255 * @cmnd: Pointer to scsi_cmnd data structure. 6256 * 6257 * This routine does a target reset by sending a TARGET_RESET task management 6258 * command. 6259 * 6260 * Return code : 6261 * 0x2003 - Error 6262 * 0x2002 - Success 6263 **/ 6264 static int 6265 lpfc_target_reset_handler(struct scsi_cmnd *cmnd) 6266 { 6267 struct Scsi_Host *shost = cmnd->device->host; 6268 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6269 struct lpfc_rport_data *rdata; 6270 struct lpfc_nodelist *pnode; 6271 unsigned tgt_id = cmnd->device->id; 6272 uint64_t lun_id = cmnd->device->lun; 6273 struct lpfc_scsi_event_header scsi_event; 6274 int status; 6275 u32 logit = LOG_FCP; 6276 unsigned long flags; 6277 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 6278 6279 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 6280 if (!rdata || !rdata->pnode) { 6281 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6282 "0799 Target Reset rdata failure: rdata x%px\n", 6283 rdata); 6284 return FAILED; 6285 } 6286 pnode = rdata->pnode; 6287 status = fc_block_scsi_eh(cmnd); 6288 if (status != 0 && status != SUCCESS) 6289 return status; 6290 6291 status = lpfc_chk_tgt_mapped(vport, cmnd); 6292 if (status == FAILED) { 6293 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6294 "0722 Target Reset rport failure: rdata x%px\n", rdata); 6295 if (pnode) { 6296 spin_lock_irqsave(&pnode->lock, flags); 6297 pnode->nlp_flag &= ~NLP_NPR_ADISC; 6298 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 6299 spin_unlock_irqrestore(&pnode->lock, flags); 6300 } 6301 lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 6302 LPFC_CTX_TGT); 6303 return FAST_IO_FAIL; 6304 } 6305 6306 scsi_event.event_type = FC_REG_SCSI_EVENT; 6307 scsi_event.subcategory = LPFC_EVENT_TGTRESET; 6308 scsi_event.lun = 0; 6309 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); 6310 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); 6311 6312 fc_host_post_vendor_event(shost, fc_get_event_number(), 6313 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 6314 6315 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id, 6316 FCP_TARGET_RESET); 6317 if (status != SUCCESS) 6318 logit = LOG_TRACE_EVENT; 6319 spin_lock_irqsave(&pnode->lock, flags); 6320 if (status != SUCCESS && 6321 (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)) && 6322 !pnode->logo_waitq) { 6323 pnode->logo_waitq = &waitq; 6324 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 6325 pnode->nlp_flag |= NLP_ISSUE_LOGO; 6326 pnode->upcall_flags |= NLP_WAIT_FOR_LOGO; 6327 spin_unlock_irqrestore(&pnode->lock, flags); 6328 lpfc_unreg_rpi(vport, pnode); 6329 wait_event_timeout(waitq, 6330 (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)), 6331 msecs_to_jiffies(vport->cfg_devloss_tmo * 6332 1000)); 6333 6334 if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) { 6335 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6336 "0725 SCSI layer TGTRST failed & LOGO TMO " 6337 " (%d, %llu) return x%x\n", tgt_id, 6338 lun_id, status); 6339 spin_lock_irqsave(&pnode->lock, flags); 6340 pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO; 6341 } else { 6342 spin_lock_irqsave(&pnode->lock, flags); 6343 } 6344 pnode->logo_waitq = NULL; 6345 spin_unlock_irqrestore(&pnode->lock, flags); 6346 status = SUCCESS; 6347 } else { 6348 status = FAILED; 6349 spin_unlock_irqrestore(&pnode->lock, flags); 6350 } 6351 6352 lpfc_printf_vlog(vport, KERN_ERR, logit, 6353 "0723 SCSI layer issued Target Reset (%d, %llu) " 6354 "return x%x\n", tgt_id, lun_id, status); 6355 6356 /* 6357 * We have to clean up i/o as : they may be orphaned by the TMF; 6358 * or if the TMF failed, they may be in an indeterminate state. 6359 * So, continue on. 6360 * We will report success if all the i/o aborts successfully. 6361 */ 6362 if (status == SUCCESS) 6363 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 6364 LPFC_CTX_TGT); 6365 return status; 6366 } 6367 6368 /** 6369 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point 6370 * @cmnd: Pointer to scsi_cmnd data structure. 6371 * 6372 * This routine does target reset to all targets on @cmnd->device->host. 6373 * This emulates Parallel SCSI Bus Reset Semantics. 6374 * 6375 * Return code : 6376 * 0x2003 - Error 6377 * 0x2002 - Success 6378 **/ 6379 static int 6380 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 6381 { 6382 struct Scsi_Host *shost = cmnd->device->host; 6383 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6384 struct lpfc_nodelist *ndlp = NULL; 6385 struct lpfc_scsi_event_header scsi_event; 6386 int match; 6387 int ret = SUCCESS, status, i; 6388 u32 logit = LOG_FCP; 6389 6390 scsi_event.event_type = FC_REG_SCSI_EVENT; 6391 scsi_event.subcategory = LPFC_EVENT_BUSRESET; 6392 scsi_event.lun = 0; 6393 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name)); 6394 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); 6395 6396 fc_host_post_vendor_event(shost, fc_get_event_number(), 6397 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 6398 6399 status = fc_block_scsi_eh(cmnd); 6400 if (status != 0 && status != SUCCESS) 6401 return status; 6402 6403 /* 6404 * Since the driver manages a single bus device, reset all 6405 * targets known to the driver. Should any target reset 6406 * fail, this routine returns failure to the midlayer. 6407 */ 6408 for (i = 0; i < LPFC_MAX_TARGET; i++) { 6409 /* Search for mapped node by target ID */ 6410 match = 0; 6411 spin_lock_irq(shost->host_lock); 6412 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 6413 6414 if (vport->phba->cfg_fcp2_no_tgt_reset && 6415 (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE)) 6416 continue; 6417 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 6418 ndlp->nlp_sid == i && 6419 ndlp->rport && 6420 ndlp->nlp_type & NLP_FCP_TARGET) { 6421 match = 1; 6422 break; 6423 } 6424 } 6425 spin_unlock_irq(shost->host_lock); 6426 if (!match) 6427 continue; 6428 6429 status = lpfc_send_taskmgmt(vport, cmnd, 6430 i, 0, FCP_TARGET_RESET); 6431 6432 if (status != SUCCESS) { 6433 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6434 "0700 Bus Reset on target %d failed\n", 6435 i); 6436 ret = FAILED; 6437 } 6438 } 6439 /* 6440 * We have to clean up i/o as : they may be orphaned by the TMFs 6441 * above; or if any of the TMFs failed, they may be in an 6442 * indeterminate state. 6443 * We will report success if all the i/o aborts successfully. 6444 */ 6445 6446 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST); 6447 if (status != SUCCESS) 6448 ret = FAILED; 6449 if (ret == FAILED) 6450 logit = LOG_TRACE_EVENT; 6451 6452 lpfc_printf_vlog(vport, KERN_ERR, logit, 6453 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); 6454 return ret; 6455 } 6456 6457 /** 6458 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt 6459 * @cmnd: Pointer to scsi_cmnd data structure. 6460 * 6461 * This routine does host reset to the adaptor port. It brings the HBA 6462 * offline, performs a board restart, and then brings the board back online. 6463 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local 6464 * reject all outstanding SCSI commands to the host and error returned 6465 * back to SCSI mid-level. As this will be SCSI mid-level's last resort 6466 * of error handling, it will only return error if resetting of the adapter 6467 * is not successful; in all other cases, will return success. 6468 * 6469 * Return code : 6470 * 0x2003 - Error 6471 * 0x2002 - Success 6472 **/ 6473 static int 6474 lpfc_host_reset_handler(struct scsi_cmnd *cmnd) 6475 { 6476 struct Scsi_Host *shost = cmnd->device->host; 6477 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6478 struct lpfc_hba *phba = vport->phba; 6479 int rc, ret = SUCCESS; 6480 6481 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 6482 "3172 SCSI layer issued Host Reset Data:\n"); 6483 6484 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 6485 lpfc_offline(phba); 6486 rc = lpfc_sli_brdrestart(phba); 6487 if (rc) 6488 goto error; 6489 6490 rc = lpfc_online(phba); 6491 if (rc) 6492 goto error; 6493 6494 lpfc_unblock_mgmt_io(phba); 6495 6496 return ret; 6497 error: 6498 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6499 "3323 Failed host reset\n"); 6500 lpfc_unblock_mgmt_io(phba); 6501 return FAILED; 6502 } 6503 6504 /** 6505 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point 6506 * @sdev: Pointer to scsi_device. 6507 * 6508 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's 6509 * globally available list of scsi buffers. This routine also makes sure scsi 6510 * buffer is not allocated more than HBA limit conveyed to midlayer. This list 6511 * of scsi buffer exists for the lifetime of the driver. 6512 * 6513 * Return codes: 6514 * non-0 - Error 6515 * 0 - Success 6516 **/ 6517 static int 6518 lpfc_slave_alloc(struct scsi_device *sdev) 6519 { 6520 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 6521 struct lpfc_hba *phba = vport->phba; 6522 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 6523 uint32_t total = 0; 6524 uint32_t num_to_alloc = 0; 6525 int num_allocated = 0; 6526 uint32_t sdev_cnt; 6527 struct lpfc_device_data *device_data; 6528 unsigned long flags; 6529 struct lpfc_name target_wwpn; 6530 6531 if (!rport || fc_remote_port_chkready(rport)) 6532 return -ENXIO; 6533 6534 if (phba->cfg_fof) { 6535 6536 /* 6537 * Check to see if the device data structure for the lun 6538 * exists. If not, create one. 6539 */ 6540 6541 u64_to_wwn(rport->port_name, target_wwpn.u.wwn); 6542 spin_lock_irqsave(&phba->devicelock, flags); 6543 device_data = __lpfc_get_device_data(phba, 6544 &phba->luns, 6545 &vport->fc_portname, 6546 &target_wwpn, 6547 sdev->lun); 6548 if (!device_data) { 6549 spin_unlock_irqrestore(&phba->devicelock, flags); 6550 device_data = lpfc_create_device_data(phba, 6551 &vport->fc_portname, 6552 &target_wwpn, 6553 sdev->lun, 6554 phba->cfg_XLanePriority, 6555 true); 6556 if (!device_data) 6557 return -ENOMEM; 6558 spin_lock_irqsave(&phba->devicelock, flags); 6559 list_add_tail(&device_data->listentry, &phba->luns); 6560 } 6561 device_data->rport_data = rport->dd_data; 6562 device_data->available = true; 6563 spin_unlock_irqrestore(&phba->devicelock, flags); 6564 sdev->hostdata = device_data; 6565 } else { 6566 sdev->hostdata = rport->dd_data; 6567 } 6568 sdev_cnt = atomic_inc_return(&phba->sdev_cnt); 6569 6570 /* For SLI4, all IO buffers are pre-allocated */ 6571 if (phba->sli_rev == LPFC_SLI_REV4) 6572 return 0; 6573 6574 /* This code path is now ONLY for SLI3 adapters */ 6575 6576 /* 6577 * Populate the cmds_per_lun count scsi_bufs into this host's globally 6578 * available list of scsi buffers. Don't allocate more than the 6579 * HBA limit conveyed to the midlayer via the host structure. The 6580 * formula accounts for the lun_queue_depth + error handlers + 1 6581 * extra. This list of scsi bufs exists for the lifetime of the driver. 6582 */ 6583 total = phba->total_scsi_bufs; 6584 num_to_alloc = vport->cfg_lun_queue_depth + 2; 6585 6586 /* If allocated buffers are enough do nothing */ 6587 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total) 6588 return 0; 6589 6590 /* Allow some exchanges to be available always to complete discovery */ 6591 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 6592 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 6593 "0704 At limitation of %d preallocated " 6594 "command buffers\n", total); 6595 return 0; 6596 /* Allow some exchanges to be available always to complete discovery */ 6597 } else if (total + num_to_alloc > 6598 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 6599 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 6600 "0705 Allocation request of %d " 6601 "command buffers will exceed max of %d. " 6602 "Reducing allocation request to %d.\n", 6603 num_to_alloc, phba->cfg_hba_queue_depth, 6604 (phba->cfg_hba_queue_depth - total)); 6605 num_to_alloc = phba->cfg_hba_queue_depth - total; 6606 } 6607 num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc); 6608 if (num_to_alloc != num_allocated) { 6609 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6610 "0708 Allocation request of %d " 6611 "command buffers did not succeed. " 6612 "Allocated %d buffers.\n", 6613 num_to_alloc, num_allocated); 6614 } 6615 if (num_allocated > 0) 6616 phba->total_scsi_bufs += num_allocated; 6617 return 0; 6618 } 6619 6620 /** 6621 * lpfc_slave_configure - scsi_host_template slave_configure entry point 6622 * @sdev: Pointer to scsi_device. 6623 * 6624 * This routine configures following items 6625 * - Tag command queuing support for @sdev if supported. 6626 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set. 6627 * 6628 * Return codes: 6629 * 0 - Success 6630 **/ 6631 static int 6632 lpfc_slave_configure(struct scsi_device *sdev) 6633 { 6634 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 6635 struct lpfc_hba *phba = vport->phba; 6636 6637 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth); 6638 6639 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 6640 lpfc_sli_handle_fast_ring_event(phba, 6641 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 6642 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 6643 lpfc_poll_rearm_timer(phba); 6644 } 6645 6646 return 0; 6647 } 6648 6649 /** 6650 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure 6651 * @sdev: Pointer to scsi_device. 6652 * 6653 * This routine sets @sdev hostatdata filed to null. 6654 **/ 6655 static void 6656 lpfc_slave_destroy(struct scsi_device *sdev) 6657 { 6658 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 6659 struct lpfc_hba *phba = vport->phba; 6660 unsigned long flags; 6661 struct lpfc_device_data *device_data = sdev->hostdata; 6662 6663 atomic_dec(&phba->sdev_cnt); 6664 if ((phba->cfg_fof) && (device_data)) { 6665 spin_lock_irqsave(&phba->devicelock, flags); 6666 device_data->available = false; 6667 if (!device_data->oas_enabled) 6668 lpfc_delete_device_data(phba, device_data); 6669 spin_unlock_irqrestore(&phba->devicelock, flags); 6670 } 6671 sdev->hostdata = NULL; 6672 return; 6673 } 6674 6675 /** 6676 * lpfc_create_device_data - creates and initializes device data structure for OAS 6677 * @phba: Pointer to host bus adapter structure. 6678 * @vport_wwpn: Pointer to vport's wwpn information 6679 * @target_wwpn: Pointer to target's wwpn information 6680 * @lun: Lun on target 6681 * @pri: Priority 6682 * @atomic_create: Flag to indicate if memory should be allocated using the 6683 * GFP_ATOMIC flag or not. 6684 * 6685 * This routine creates a device data structure which will contain identifying 6686 * information for the device (host wwpn, target wwpn, lun), state of OAS, 6687 * whether or not the corresponding lun is available by the system, 6688 * and pointer to the rport data. 6689 * 6690 * Return codes: 6691 * NULL - Error 6692 * Pointer to lpfc_device_data - Success 6693 **/ 6694 struct lpfc_device_data* 6695 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 6696 struct lpfc_name *target_wwpn, uint64_t lun, 6697 uint32_t pri, bool atomic_create) 6698 { 6699 6700 struct lpfc_device_data *lun_info; 6701 int memory_flags; 6702 6703 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 6704 !(phba->cfg_fof)) 6705 return NULL; 6706 6707 /* Attempt to create the device data to contain lun info */ 6708 6709 if (atomic_create) 6710 memory_flags = GFP_ATOMIC; 6711 else 6712 memory_flags = GFP_KERNEL; 6713 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags); 6714 if (!lun_info) 6715 return NULL; 6716 INIT_LIST_HEAD(&lun_info->listentry); 6717 lun_info->rport_data = NULL; 6718 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn, 6719 sizeof(struct lpfc_name)); 6720 memcpy(&lun_info->device_id.target_wwpn, target_wwpn, 6721 sizeof(struct lpfc_name)); 6722 lun_info->device_id.lun = lun; 6723 lun_info->oas_enabled = false; 6724 lun_info->priority = pri; 6725 lun_info->available = false; 6726 return lun_info; 6727 } 6728 6729 /** 6730 * lpfc_delete_device_data - frees a device data structure for OAS 6731 * @phba: Pointer to host bus adapter structure. 6732 * @lun_info: Pointer to device data structure to free. 6733 * 6734 * This routine frees the previously allocated device data structure passed. 6735 * 6736 **/ 6737 void 6738 lpfc_delete_device_data(struct lpfc_hba *phba, 6739 struct lpfc_device_data *lun_info) 6740 { 6741 6742 if (unlikely(!phba) || !lun_info || 6743 !(phba->cfg_fof)) 6744 return; 6745 6746 if (!list_empty(&lun_info->listentry)) 6747 list_del(&lun_info->listentry); 6748 mempool_free(lun_info, phba->device_data_mem_pool); 6749 return; 6750 } 6751 6752 /** 6753 * __lpfc_get_device_data - returns the device data for the specified lun 6754 * @phba: Pointer to host bus adapter structure. 6755 * @list: Point to list to search. 6756 * @vport_wwpn: Pointer to vport's wwpn information 6757 * @target_wwpn: Pointer to target's wwpn information 6758 * @lun: Lun on target 6759 * 6760 * This routine searches the list passed for the specified lun's device data. 6761 * This function does not hold locks, it is the responsibility of the caller 6762 * to ensure the proper lock is held before calling the function. 6763 * 6764 * Return codes: 6765 * NULL - Error 6766 * Pointer to lpfc_device_data - Success 6767 **/ 6768 struct lpfc_device_data* 6769 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list, 6770 struct lpfc_name *vport_wwpn, 6771 struct lpfc_name *target_wwpn, uint64_t lun) 6772 { 6773 6774 struct lpfc_device_data *lun_info; 6775 6776 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn || 6777 !phba->cfg_fof) 6778 return NULL; 6779 6780 /* Check to see if the lun is already enabled for OAS. */ 6781 6782 list_for_each_entry(lun_info, list, listentry) { 6783 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, 6784 sizeof(struct lpfc_name)) == 0) && 6785 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, 6786 sizeof(struct lpfc_name)) == 0) && 6787 (lun_info->device_id.lun == lun)) 6788 return lun_info; 6789 } 6790 6791 return NULL; 6792 } 6793 6794 /** 6795 * lpfc_find_next_oas_lun - searches for the next oas lun 6796 * @phba: Pointer to host bus adapter structure. 6797 * @vport_wwpn: Pointer to vport's wwpn information 6798 * @target_wwpn: Pointer to target's wwpn information 6799 * @starting_lun: Pointer to the lun to start searching for 6800 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information 6801 * @found_target_wwpn: Pointer to the found lun's target wwpn information 6802 * @found_lun: Pointer to the found lun. 6803 * @found_lun_status: Pointer to status of the found lun. 6804 * @found_lun_pri: Pointer to priority of the found lun. 6805 * 6806 * This routine searches the luns list for the specified lun 6807 * or the first lun for the vport/target. If the vport wwpn contains 6808 * a zero value then a specific vport is not specified. In this case 6809 * any vport which contains the lun will be considered a match. If the 6810 * target wwpn contains a zero value then a specific target is not specified. 6811 * In this case any target which contains the lun will be considered a 6812 * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status 6813 * are returned. The function will also return the next lun if available. 6814 * If the next lun is not found, starting_lun parameter will be set to 6815 * NO_MORE_OAS_LUN. 6816 * 6817 * Return codes: 6818 * non-0 - Error 6819 * 0 - Success 6820 **/ 6821 bool 6822 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 6823 struct lpfc_name *target_wwpn, uint64_t *starting_lun, 6824 struct lpfc_name *found_vport_wwpn, 6825 struct lpfc_name *found_target_wwpn, 6826 uint64_t *found_lun, 6827 uint32_t *found_lun_status, 6828 uint32_t *found_lun_pri) 6829 { 6830 6831 unsigned long flags; 6832 struct lpfc_device_data *lun_info; 6833 struct lpfc_device_id *device_id; 6834 uint64_t lun; 6835 bool found = false; 6836 6837 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 6838 !starting_lun || !found_vport_wwpn || 6839 !found_target_wwpn || !found_lun || !found_lun_status || 6840 (*starting_lun == NO_MORE_OAS_LUN) || 6841 !phba->cfg_fof) 6842 return false; 6843 6844 lun = *starting_lun; 6845 *found_lun = NO_MORE_OAS_LUN; 6846 *starting_lun = NO_MORE_OAS_LUN; 6847 6848 /* Search for lun or the lun closet in value */ 6849 6850 spin_lock_irqsave(&phba->devicelock, flags); 6851 list_for_each_entry(lun_info, &phba->luns, listentry) { 6852 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) || 6853 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, 6854 sizeof(struct lpfc_name)) == 0)) && 6855 ((wwn_to_u64(target_wwpn->u.wwn) == 0) || 6856 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, 6857 sizeof(struct lpfc_name)) == 0)) && 6858 (lun_info->oas_enabled)) { 6859 device_id = &lun_info->device_id; 6860 if ((!found) && 6861 ((lun == FIND_FIRST_OAS_LUN) || 6862 (device_id->lun == lun))) { 6863 *found_lun = device_id->lun; 6864 memcpy(found_vport_wwpn, 6865 &device_id->vport_wwpn, 6866 sizeof(struct lpfc_name)); 6867 memcpy(found_target_wwpn, 6868 &device_id->target_wwpn, 6869 sizeof(struct lpfc_name)); 6870 if (lun_info->available) 6871 *found_lun_status = 6872 OAS_LUN_STATUS_EXISTS; 6873 else 6874 *found_lun_status = 0; 6875 *found_lun_pri = lun_info->priority; 6876 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT) 6877 memset(vport_wwpn, 0x0, 6878 sizeof(struct lpfc_name)); 6879 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET) 6880 memset(target_wwpn, 0x0, 6881 sizeof(struct lpfc_name)); 6882 found = true; 6883 } else if (found) { 6884 *starting_lun = device_id->lun; 6885 memcpy(vport_wwpn, &device_id->vport_wwpn, 6886 sizeof(struct lpfc_name)); 6887 memcpy(target_wwpn, &device_id->target_wwpn, 6888 sizeof(struct lpfc_name)); 6889 break; 6890 } 6891 } 6892 } 6893 spin_unlock_irqrestore(&phba->devicelock, flags); 6894 return found; 6895 } 6896 6897 /** 6898 * lpfc_enable_oas_lun - enables a lun for OAS operations 6899 * @phba: Pointer to host bus adapter structure. 6900 * @vport_wwpn: Pointer to vport's wwpn information 6901 * @target_wwpn: Pointer to target's wwpn information 6902 * @lun: Lun 6903 * @pri: Priority 6904 * 6905 * This routine enables a lun for oas operations. The routines does so by 6906 * doing the following : 6907 * 6908 * 1) Checks to see if the device data for the lun has been created. 6909 * 2) If found, sets the OAS enabled flag if not set and returns. 6910 * 3) Otherwise, creates a device data structure. 6911 * 4) If successfully created, indicates the device data is for an OAS lun, 6912 * indicates the lun is not available and add to the list of luns. 6913 * 6914 * Return codes: 6915 * false - Error 6916 * true - Success 6917 **/ 6918 bool 6919 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 6920 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) 6921 { 6922 6923 struct lpfc_device_data *lun_info; 6924 unsigned long flags; 6925 6926 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 6927 !phba->cfg_fof) 6928 return false; 6929 6930 spin_lock_irqsave(&phba->devicelock, flags); 6931 6932 /* Check to see if the device data for the lun has been created */ 6933 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn, 6934 target_wwpn, lun); 6935 if (lun_info) { 6936 if (!lun_info->oas_enabled) 6937 lun_info->oas_enabled = true; 6938 lun_info->priority = pri; 6939 spin_unlock_irqrestore(&phba->devicelock, flags); 6940 return true; 6941 } 6942 6943 /* Create an lun info structure and add to list of luns */ 6944 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun, 6945 pri, true); 6946 if (lun_info) { 6947 lun_info->oas_enabled = true; 6948 lun_info->priority = pri; 6949 lun_info->available = false; 6950 list_add_tail(&lun_info->listentry, &phba->luns); 6951 spin_unlock_irqrestore(&phba->devicelock, flags); 6952 return true; 6953 } 6954 spin_unlock_irqrestore(&phba->devicelock, flags); 6955 return false; 6956 } 6957 6958 /** 6959 * lpfc_disable_oas_lun - disables a lun for OAS operations 6960 * @phba: Pointer to host bus adapter structure. 6961 * @vport_wwpn: Pointer to vport's wwpn information 6962 * @target_wwpn: Pointer to target's wwpn information 6963 * @lun: Lun 6964 * @pri: Priority 6965 * 6966 * This routine disables a lun for oas operations. The routines does so by 6967 * doing the following : 6968 * 6969 * 1) Checks to see if the device data for the lun is created. 6970 * 2) If present, clears the flag indicating this lun is for OAS. 6971 * 3) If the lun is not available by the system, the device data is 6972 * freed. 6973 * 6974 * Return codes: 6975 * false - Error 6976 * true - Success 6977 **/ 6978 bool 6979 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 6980 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) 6981 { 6982 6983 struct lpfc_device_data *lun_info; 6984 unsigned long flags; 6985 6986 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 6987 !phba->cfg_fof) 6988 return false; 6989 6990 spin_lock_irqsave(&phba->devicelock, flags); 6991 6992 /* Check to see if the lun is available. */ 6993 lun_info = __lpfc_get_device_data(phba, 6994 &phba->luns, vport_wwpn, 6995 target_wwpn, lun); 6996 if (lun_info) { 6997 lun_info->oas_enabled = false; 6998 lun_info->priority = pri; 6999 if (!lun_info->available) 7000 lpfc_delete_device_data(phba, lun_info); 7001 spin_unlock_irqrestore(&phba->devicelock, flags); 7002 return true; 7003 } 7004 7005 spin_unlock_irqrestore(&phba->devicelock, flags); 7006 return false; 7007 } 7008 7009 static int 7010 lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) 7011 { 7012 return SCSI_MLQUEUE_HOST_BUSY; 7013 } 7014 7015 static int 7016 lpfc_no_handler(struct scsi_cmnd *cmnd) 7017 { 7018 return FAILED; 7019 } 7020 7021 static int 7022 lpfc_no_slave(struct scsi_device *sdev) 7023 { 7024 return -ENODEV; 7025 } 7026 7027 struct scsi_host_template lpfc_template_nvme = { 7028 .module = THIS_MODULE, 7029 .name = LPFC_DRIVER_NAME, 7030 .proc_name = LPFC_DRIVER_NAME, 7031 .info = lpfc_info, 7032 .queuecommand = lpfc_no_command, 7033 .eh_abort_handler = lpfc_no_handler, 7034 .eh_device_reset_handler = lpfc_no_handler, 7035 .eh_target_reset_handler = lpfc_no_handler, 7036 .eh_bus_reset_handler = lpfc_no_handler, 7037 .eh_host_reset_handler = lpfc_no_handler, 7038 .slave_alloc = lpfc_no_slave, 7039 .slave_configure = lpfc_no_slave, 7040 .scan_finished = lpfc_scan_finished, 7041 .this_id = -1, 7042 .sg_tablesize = 1, 7043 .cmd_per_lun = 1, 7044 .shost_attrs = lpfc_hba_attrs, 7045 .max_sectors = 0xFFFFFFFF, 7046 .vendor_id = LPFC_NL_VENDOR_ID, 7047 .track_queue_depth = 0, 7048 }; 7049 7050 struct scsi_host_template lpfc_template = { 7051 .module = THIS_MODULE, 7052 .name = LPFC_DRIVER_NAME, 7053 .proc_name = LPFC_DRIVER_NAME, 7054 .info = lpfc_info, 7055 .queuecommand = lpfc_queuecommand, 7056 .eh_timed_out = fc_eh_timed_out, 7057 .eh_should_retry_cmd = fc_eh_should_retry_cmd, 7058 .eh_abort_handler = lpfc_abort_handler, 7059 .eh_device_reset_handler = lpfc_device_reset_handler, 7060 .eh_target_reset_handler = lpfc_target_reset_handler, 7061 .eh_bus_reset_handler = lpfc_bus_reset_handler, 7062 .eh_host_reset_handler = lpfc_host_reset_handler, 7063 .slave_alloc = lpfc_slave_alloc, 7064 .slave_configure = lpfc_slave_configure, 7065 .slave_destroy = lpfc_slave_destroy, 7066 .scan_finished = lpfc_scan_finished, 7067 .this_id = -1, 7068 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 7069 .cmd_per_lun = LPFC_CMD_PER_LUN, 7070 .shost_attrs = lpfc_hba_attrs, 7071 .max_sectors = 0xFFFFFFFF, 7072 .vendor_id = LPFC_NL_VENDOR_ID, 7073 .change_queue_depth = scsi_change_queue_depth, 7074 .track_queue_depth = 1, 7075 }; 7076