1 /* 2 * This file is part of the Chelsio FCoE driver for Linux. 3 * 4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/device.h> 36 #include <linux/delay.h> 37 #include <linux/ctype.h> 38 #include <linux/kernel.h> 39 #include <linux/slab.h> 40 #include <linux/string.h> 41 #include <linux/compiler.h> 42 #include <linux/export.h> 43 #include <linux/module.h> 44 #include <asm/unaligned.h> 45 #include <asm/page.h> 46 #include <scsi/scsi.h> 47 #include <scsi/scsi_device.h> 48 #include <scsi/scsi_transport_fc.h> 49 50 #include "csio_hw.h" 51 #include "csio_lnode.h" 52 #include "csio_rnode.h" 53 #include "csio_scsi.h" 54 #include "csio_init.h" 55 56 int csio_scsi_eqsize = 65536; 57 int csio_scsi_iqlen = 128; 58 int csio_scsi_ioreqs = 2048; 59 uint32_t csio_max_scan_tmo; 60 uint32_t csio_delta_scan_tmo = 5; 61 int csio_lun_qdepth = 32; 62 63 static int csio_ddp_descs = 128; 64 65 static int csio_do_abrt_cls(struct csio_hw *, 66 struct csio_ioreq *, bool); 67 68 static void csio_scsis_uninit(struct csio_ioreq *, enum csio_scsi_ev); 69 static void csio_scsis_io_active(struct csio_ioreq *, enum csio_scsi_ev); 70 static void csio_scsis_tm_active(struct csio_ioreq *, enum csio_scsi_ev); 71 static void csio_scsis_aborting(struct csio_ioreq *, enum csio_scsi_ev); 72 static void csio_scsis_closing(struct csio_ioreq *, enum csio_scsi_ev); 73 static void csio_scsis_shost_cmpl_await(struct csio_ioreq *, enum csio_scsi_ev); 74 75 /* 76 * csio_scsi_match_io - Match an ioreq with the given SCSI level data. 77 * @ioreq: The I/O request 78 * @sld: Level information 79 * 80 * Should be called with lock held. 81 * 82 */ 83 static bool 84 csio_scsi_match_io(struct csio_ioreq *ioreq, struct csio_scsi_level_data *sld) 85 { 86 struct scsi_cmnd *scmnd = csio_scsi_cmnd(ioreq); 87 88 switch (sld->level) { 89 case CSIO_LEV_LUN: 90 if (scmnd == NULL) 91 return false; 92 93 return ((ioreq->lnode == sld->lnode) && 94 (ioreq->rnode == sld->rnode) && 95 ((uint64_t)scmnd->device->lun == sld->oslun)); 96 97 case CSIO_LEV_RNODE: 98 return ((ioreq->lnode == sld->lnode) && 99 (ioreq->rnode == sld->rnode)); 100 case CSIO_LEV_LNODE: 101 return (ioreq->lnode == sld->lnode); 102 case CSIO_LEV_ALL: 103 return true; 104 default: 105 return false; 106 } 107 } 108 109 /* 110 * csio_scsi_gather_active_ios - Gather active I/Os based on level 111 * @scm: SCSI module 112 * @sld: Level information 113 * @dest: The queue where these I/Os have to be gathered. 114 * 115 * Should be called with lock held. 116 */ 117 static void 118 csio_scsi_gather_active_ios(struct csio_scsim *scm, 119 struct csio_scsi_level_data *sld, 120 struct list_head *dest) 121 { 122 struct list_head *tmp, *next; 123 124 if (list_empty(&scm->active_q)) 125 return; 126 127 /* Just splice the entire active_q into dest */ 128 if (sld->level == CSIO_LEV_ALL) { 129 list_splice_tail_init(&scm->active_q, dest); 130 return; 131 } 132 133 list_for_each_safe(tmp, next, &scm->active_q) { 134 if (csio_scsi_match_io((struct csio_ioreq *)tmp, sld)) { 135 list_del_init(tmp); 136 list_add_tail(tmp, dest); 137 } 138 } 139 } 140 141 static inline bool 142 csio_scsi_itnexus_loss_error(uint16_t error) 143 { 144 switch (error) { 145 case FW_ERR_LINK_DOWN: 146 case FW_RDEV_NOT_READY: 147 case FW_ERR_RDEV_LOST: 148 case FW_ERR_RDEV_LOGO: 149 case FW_ERR_RDEV_IMPL_LOGO: 150 return 1; 151 } 152 return 0; 153 } 154 155 /* 156 * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod. 157 * @req: IO req structure. 158 * @addr: DMA location to place the payload. 159 * 160 * This routine is shared between FCP_WRITE, FCP_READ and FCP_CMD requests. 161 */ 162 static inline void 163 csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr) 164 { 165 struct fcp_cmnd *fcp_cmnd = (struct fcp_cmnd *)addr; 166 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); 167 168 /* Check for Task Management */ 169 if (likely(scmnd->SCp.Message == 0)) { 170 int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun); 171 fcp_cmnd->fc_tm_flags = 0; 172 fcp_cmnd->fc_cmdref = 0; 173 174 memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16); 175 if (scmnd->flags & SCMD_TAGGED) 176 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; 177 else 178 fcp_cmnd->fc_pri_ta = 0; 179 fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd)); 180 181 if (req->nsge) 182 if (req->datadir == DMA_TO_DEVICE) 183 fcp_cmnd->fc_flags = FCP_CFL_WRDATA; 184 else 185 fcp_cmnd->fc_flags = FCP_CFL_RDDATA; 186 else 187 fcp_cmnd->fc_flags = 0; 188 } else { 189 memset(fcp_cmnd, 0, sizeof(*fcp_cmnd)); 190 int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun); 191 fcp_cmnd->fc_tm_flags = (uint8_t)scmnd->SCp.Message; 192 } 193 } 194 195 /* 196 * csio_scsi_init_cmd_wr - Initialize the SCSI CMD WR. 197 * @req: IO req structure. 198 * @addr: DMA location to place the payload. 199 * @size: Size of WR (including FW WR + immed data + rsp SG entry 200 * 201 * Wrapper for populating fw_scsi_cmd_wr. 202 */ 203 static inline void 204 csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size) 205 { 206 struct csio_hw *hw = req->lnode->hwp; 207 struct csio_rnode *rn = req->rnode; 208 struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr; 209 struct csio_dma_buf *dma_buf; 210 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; 211 212 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_CMD_WR) | 213 FW_SCSI_CMD_WR_IMMDLEN(imm)); 214 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | 215 FW_WR_LEN16_V( 216 DIV_ROUND_UP(size, 16))); 217 218 wr->cookie = (uintptr_t) req; 219 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); 220 wr->tmo_val = (uint8_t) req->tmo; 221 wr->r3 = 0; 222 memset(&wr->r5, 0, 8); 223 224 /* Get RSP DMA buffer */ 225 dma_buf = &req->dma_buf; 226 227 /* Prepare RSP SGL */ 228 wr->rsp_dmalen = cpu_to_be32(dma_buf->len); 229 wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); 230 231 wr->r6 = 0; 232 233 wr->u.fcoe.ctl_pri = 0; 234 wr->u.fcoe.cp_en_class = 0; 235 wr->u.fcoe.r4_lo[0] = 0; 236 wr->u.fcoe.r4_lo[1] = 0; 237 238 /* Frame a FCP command */ 239 csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)addr + 240 sizeof(struct fw_scsi_cmd_wr))); 241 } 242 243 #define CSIO_SCSI_CMD_WR_SZ(_imm) \ 244 (sizeof(struct fw_scsi_cmd_wr) + /* WR size */ \ 245 ALIGN((_imm), 16)) /* Immed data */ 246 247 #define CSIO_SCSI_CMD_WR_SZ_16(_imm) \ 248 (ALIGN(CSIO_SCSI_CMD_WR_SZ((_imm)), 16)) 249 250 /* 251 * csio_scsi_cmd - Create a SCSI CMD WR. 252 * @req: IO req structure. 253 * 254 * Gets a WR slot in the ingress queue and initializes it with SCSI CMD WR. 255 * 256 */ 257 static inline void 258 csio_scsi_cmd(struct csio_ioreq *req) 259 { 260 struct csio_wr_pair wrp; 261 struct csio_hw *hw = req->lnode->hwp; 262 struct csio_scsim *scsim = csio_hw_to_scsim(hw); 263 uint32_t size = CSIO_SCSI_CMD_WR_SZ_16(scsim->proto_cmd_len); 264 265 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); 266 if (unlikely(req->drv_status != 0)) 267 return; 268 269 if (wrp.size1 >= size) { 270 /* Initialize WR in one shot */ 271 csio_scsi_init_cmd_wr(req, wrp.addr1, size); 272 } else { 273 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); 274 275 /* 276 * Make a temporary copy of the WR and write back 277 * the copy into the WR pair. 278 */ 279 csio_scsi_init_cmd_wr(req, (void *)tmpwr, size); 280 memcpy(wrp.addr1, tmpwr, wrp.size1); 281 memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); 282 } 283 } 284 285 /* 286 * csio_scsi_init_ulptx_dsgl - Fill in a ULP_TX_SC_DSGL 287 * @hw: HW module 288 * @req: IO request 289 * @sgl: ULP TX SGL pointer. 290 * 291 */ 292 static inline void 293 csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req, 294 struct ulptx_sgl *sgl) 295 { 296 struct ulptx_sge_pair *sge_pair = NULL; 297 struct scatterlist *sgel; 298 uint32_t i = 0; 299 uint32_t xfer_len; 300 struct list_head *tmp; 301 struct csio_dma_buf *dma_buf; 302 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); 303 304 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE | 305 ULPTX_NSGE(req->nsge)); 306 /* Now add the data SGLs */ 307 if (likely(!req->dcopy)) { 308 scsi_for_each_sg(scmnd, sgel, req->nsge, i) { 309 if (i == 0) { 310 sgl->addr0 = cpu_to_be64(sg_dma_address(sgel)); 311 sgl->len0 = cpu_to_be32(sg_dma_len(sgel)); 312 sge_pair = (struct ulptx_sge_pair *)(sgl + 1); 313 continue; 314 } 315 if ((i - 1) & 0x1) { 316 sge_pair->addr[1] = cpu_to_be64( 317 sg_dma_address(sgel)); 318 sge_pair->len[1] = cpu_to_be32( 319 sg_dma_len(sgel)); 320 sge_pair++; 321 } else { 322 sge_pair->addr[0] = cpu_to_be64( 323 sg_dma_address(sgel)); 324 sge_pair->len[0] = cpu_to_be32( 325 sg_dma_len(sgel)); 326 } 327 } 328 } else { 329 /* Program sg elements with driver's DDP buffer */ 330 xfer_len = scsi_bufflen(scmnd); 331 list_for_each(tmp, &req->gen_list) { 332 dma_buf = (struct csio_dma_buf *)tmp; 333 if (i == 0) { 334 sgl->addr0 = cpu_to_be64(dma_buf->paddr); 335 sgl->len0 = cpu_to_be32( 336 min(xfer_len, dma_buf->len)); 337 sge_pair = (struct ulptx_sge_pair *)(sgl + 1); 338 } else if ((i - 1) & 0x1) { 339 sge_pair->addr[1] = cpu_to_be64(dma_buf->paddr); 340 sge_pair->len[1] = cpu_to_be32( 341 min(xfer_len, dma_buf->len)); 342 sge_pair++; 343 } else { 344 sge_pair->addr[0] = cpu_to_be64(dma_buf->paddr); 345 sge_pair->len[0] = cpu_to_be32( 346 min(xfer_len, dma_buf->len)); 347 } 348 xfer_len -= min(xfer_len, dma_buf->len); 349 i++; 350 } 351 } 352 } 353 354 /* 355 * csio_scsi_init_read_wr - Initialize the READ SCSI WR. 356 * @req: IO req structure. 357 * @wrp: DMA location to place the payload. 358 * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL 359 * 360 * Wrapper for populating fw_scsi_read_wr. 361 */ 362 static inline void 363 csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size) 364 { 365 struct csio_hw *hw = req->lnode->hwp; 366 struct csio_rnode *rn = req->rnode; 367 struct fw_scsi_read_wr *wr = (struct fw_scsi_read_wr *)wrp; 368 struct ulptx_sgl *sgl; 369 struct csio_dma_buf *dma_buf; 370 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; 371 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); 372 373 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_READ_WR) | 374 FW_SCSI_READ_WR_IMMDLEN(imm)); 375 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | 376 FW_WR_LEN16_V(DIV_ROUND_UP(size, 16))); 377 wr->cookie = (uintptr_t)req; 378 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); 379 wr->tmo_val = (uint8_t)(req->tmo); 380 wr->use_xfer_cnt = 1; 381 wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); 382 wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); 383 /* Get RSP DMA buffer */ 384 dma_buf = &req->dma_buf; 385 386 /* Prepare RSP SGL */ 387 wr->rsp_dmalen = cpu_to_be32(dma_buf->len); 388 wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); 389 390 wr->r4 = 0; 391 392 wr->u.fcoe.ctl_pri = 0; 393 wr->u.fcoe.cp_en_class = 0; 394 wr->u.fcoe.r3_lo[0] = 0; 395 wr->u.fcoe.r3_lo[1] = 0; 396 csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp + 397 sizeof(struct fw_scsi_read_wr))); 398 399 /* Move WR pointer past command and immediate data */ 400 sgl = (struct ulptx_sgl *)((uintptr_t)wrp + 401 sizeof(struct fw_scsi_read_wr) + ALIGN(imm, 16)); 402 403 /* Fill in the DSGL */ 404 csio_scsi_init_ultptx_dsgl(hw, req, sgl); 405 } 406 407 /* 408 * csio_scsi_init_write_wr - Initialize the WRITE SCSI WR. 409 * @req: IO req structure. 410 * @wrp: DMA location to place the payload. 411 * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL 412 * 413 * Wrapper for populating fw_scsi_write_wr. 414 */ 415 static inline void 416 csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size) 417 { 418 struct csio_hw *hw = req->lnode->hwp; 419 struct csio_rnode *rn = req->rnode; 420 struct fw_scsi_write_wr *wr = (struct fw_scsi_write_wr *)wrp; 421 struct ulptx_sgl *sgl; 422 struct csio_dma_buf *dma_buf; 423 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; 424 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); 425 426 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_WRITE_WR) | 427 FW_SCSI_WRITE_WR_IMMDLEN(imm)); 428 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | 429 FW_WR_LEN16_V(DIV_ROUND_UP(size, 16))); 430 wr->cookie = (uintptr_t)req; 431 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); 432 wr->tmo_val = (uint8_t)(req->tmo); 433 wr->use_xfer_cnt = 1; 434 wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); 435 wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); 436 /* Get RSP DMA buffer */ 437 dma_buf = &req->dma_buf; 438 439 /* Prepare RSP SGL */ 440 wr->rsp_dmalen = cpu_to_be32(dma_buf->len); 441 wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); 442 443 wr->r4 = 0; 444 445 wr->u.fcoe.ctl_pri = 0; 446 wr->u.fcoe.cp_en_class = 0; 447 wr->u.fcoe.r3_lo[0] = 0; 448 wr->u.fcoe.r3_lo[1] = 0; 449 csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp + 450 sizeof(struct fw_scsi_write_wr))); 451 452 /* Move WR pointer past command and immediate data */ 453 sgl = (struct ulptx_sgl *)((uintptr_t)wrp + 454 sizeof(struct fw_scsi_write_wr) + ALIGN(imm, 16)); 455 456 /* Fill in the DSGL */ 457 csio_scsi_init_ultptx_dsgl(hw, req, sgl); 458 } 459 460 /* Calculate WR size needed for fw_scsi_read_wr/fw_scsi_write_wr */ 461 #define CSIO_SCSI_DATA_WRSZ(req, oper, sz, imm) \ 462 do { \ 463 (sz) = sizeof(struct fw_scsi_##oper##_wr) + /* WR size */ \ 464 ALIGN((imm), 16) + /* Immed data */ \ 465 sizeof(struct ulptx_sgl); /* ulptx_sgl */ \ 466 \ 467 if (unlikely((req)->nsge > 1)) \ 468 (sz) += (sizeof(struct ulptx_sge_pair) * \ 469 (ALIGN(((req)->nsge - 1), 2) / 2)); \ 470 /* Data SGE */ \ 471 } while (0) 472 473 /* 474 * csio_scsi_read - Create a SCSI READ WR. 475 * @req: IO req structure. 476 * 477 * Gets a WR slot in the ingress queue and initializes it with 478 * SCSI READ WR. 479 * 480 */ 481 static inline void 482 csio_scsi_read(struct csio_ioreq *req) 483 { 484 struct csio_wr_pair wrp; 485 uint32_t size; 486 struct csio_hw *hw = req->lnode->hwp; 487 struct csio_scsim *scsim = csio_hw_to_scsim(hw); 488 489 CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len); 490 size = ALIGN(size, 16); 491 492 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); 493 if (likely(req->drv_status == 0)) { 494 if (likely(wrp.size1 >= size)) { 495 /* Initialize WR in one shot */ 496 csio_scsi_init_read_wr(req, wrp.addr1, size); 497 } else { 498 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); 499 /* 500 * Make a temporary copy of the WR and write back 501 * the copy into the WR pair. 502 */ 503 csio_scsi_init_read_wr(req, (void *)tmpwr, size); 504 memcpy(wrp.addr1, tmpwr, wrp.size1); 505 memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); 506 } 507 } 508 } 509 510 /* 511 * csio_scsi_write - Create a SCSI WRITE WR. 512 * @req: IO req structure. 513 * 514 * Gets a WR slot in the ingress queue and initializes it with 515 * SCSI WRITE WR. 516 * 517 */ 518 static inline void 519 csio_scsi_write(struct csio_ioreq *req) 520 { 521 struct csio_wr_pair wrp; 522 uint32_t size; 523 struct csio_hw *hw = req->lnode->hwp; 524 struct csio_scsim *scsim = csio_hw_to_scsim(hw); 525 526 CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len); 527 size = ALIGN(size, 16); 528 529 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); 530 if (likely(req->drv_status == 0)) { 531 if (likely(wrp.size1 >= size)) { 532 /* Initialize WR in one shot */ 533 csio_scsi_init_write_wr(req, wrp.addr1, size); 534 } else { 535 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); 536 /* 537 * Make a temporary copy of the WR and write back 538 * the copy into the WR pair. 539 */ 540 csio_scsi_init_write_wr(req, (void *)tmpwr, size); 541 memcpy(wrp.addr1, tmpwr, wrp.size1); 542 memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); 543 } 544 } 545 } 546 547 /* 548 * csio_setup_ddp - Setup DDP buffers for Read request. 549 * @req: IO req structure. 550 * 551 * Checks SGLs/Data buffers are virtually contiguous required for DDP. 552 * If contiguous,driver posts SGLs in the WR otherwise post internal 553 * buffers for such request for DDP. 554 */ 555 static inline void 556 csio_setup_ddp(struct csio_scsim *scsim, struct csio_ioreq *req) 557 { 558 #ifdef __CSIO_DEBUG__ 559 struct csio_hw *hw = req->lnode->hwp; 560 #endif 561 struct scatterlist *sgel = NULL; 562 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); 563 uint64_t sg_addr = 0; 564 uint32_t ddp_pagesz = 4096; 565 uint32_t buf_off; 566 struct csio_dma_buf *dma_buf = NULL; 567 uint32_t alloc_len = 0; 568 uint32_t xfer_len = 0; 569 uint32_t sg_len = 0; 570 uint32_t i; 571 572 scsi_for_each_sg(scmnd, sgel, req->nsge, i) { 573 sg_addr = sg_dma_address(sgel); 574 sg_len = sg_dma_len(sgel); 575 576 buf_off = sg_addr & (ddp_pagesz - 1); 577 578 /* Except 1st buffer,all buffer addr have to be Page aligned */ 579 if (i != 0 && buf_off) { 580 csio_dbg(hw, "SGL addr not DDP aligned (%llx:%d)\n", 581 sg_addr, sg_len); 582 goto unaligned; 583 } 584 585 /* Except last buffer,all buffer must end on page boundary */ 586 if ((i != (req->nsge - 1)) && 587 ((buf_off + sg_len) & (ddp_pagesz - 1))) { 588 csio_dbg(hw, 589 "SGL addr not ending on page boundary" 590 "(%llx:%d)\n", sg_addr, sg_len); 591 goto unaligned; 592 } 593 } 594 595 /* SGL's are virtually contiguous. HW will DDP to SGLs */ 596 req->dcopy = 0; 597 csio_scsi_read(req); 598 599 return; 600 601 unaligned: 602 CSIO_INC_STATS(scsim, n_unaligned); 603 /* 604 * For unaligned SGLs, driver will allocate internal DDP buffer. 605 * Once command is completed data from DDP buffer copied to SGLs 606 */ 607 req->dcopy = 1; 608 609 /* Use gen_list to store the DDP buffers */ 610 INIT_LIST_HEAD(&req->gen_list); 611 xfer_len = scsi_bufflen(scmnd); 612 613 i = 0; 614 /* Allocate ddp buffers for this request */ 615 while (alloc_len < xfer_len) { 616 dma_buf = csio_get_scsi_ddp(scsim); 617 if (dma_buf == NULL || i > scsim->max_sge) { 618 req->drv_status = -EBUSY; 619 break; 620 } 621 alloc_len += dma_buf->len; 622 /* Added to IO req */ 623 list_add_tail(&dma_buf->list, &req->gen_list); 624 i++; 625 } 626 627 if (!req->drv_status) { 628 /* set number of ddp bufs used */ 629 req->nsge = i; 630 csio_scsi_read(req); 631 return; 632 } 633 634 /* release dma descs */ 635 if (i > 0) 636 csio_put_scsi_ddp_list(scsim, &req->gen_list, i); 637 } 638 639 /* 640 * csio_scsi_init_abrt_cls_wr - Initialize an ABORT/CLOSE WR. 641 * @req: IO req structure. 642 * @addr: DMA location to place the payload. 643 * @size: Size of WR 644 * @abort: abort OR close 645 * 646 * Wrapper for populating fw_scsi_cmd_wr. 647 */ 648 static inline void 649 csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size, 650 bool abort) 651 { 652 struct csio_hw *hw = req->lnode->hwp; 653 struct csio_rnode *rn = req->rnode; 654 struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr; 655 656 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_ABRT_CLS_WR)); 657 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | 658 FW_WR_LEN16_V( 659 DIV_ROUND_UP(size, 16))); 660 661 wr->cookie = (uintptr_t) req; 662 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); 663 wr->tmo_val = (uint8_t) req->tmo; 664 /* 0 for CHK_ALL_IO tells FW to look up t_cookie */ 665 wr->sub_opcode_to_chk_all_io = 666 (FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(abort) | 667 FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(0)); 668 wr->r3[0] = 0; 669 wr->r3[1] = 0; 670 wr->r3[2] = 0; 671 wr->r3[3] = 0; 672 /* Since we re-use the same ioreq for abort as well */ 673 wr->t_cookie = (uintptr_t) req; 674 } 675 676 static inline void 677 csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort) 678 { 679 struct csio_wr_pair wrp; 680 struct csio_hw *hw = req->lnode->hwp; 681 uint32_t size = ALIGN(sizeof(struct fw_scsi_abrt_cls_wr), 16); 682 683 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); 684 if (req->drv_status != 0) 685 return; 686 687 if (wrp.size1 >= size) { 688 /* Initialize WR in one shot */ 689 csio_scsi_init_abrt_cls_wr(req, wrp.addr1, size, abort); 690 } else { 691 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); 692 /* 693 * Make a temporary copy of the WR and write back 694 * the copy into the WR pair. 695 */ 696 csio_scsi_init_abrt_cls_wr(req, (void *)tmpwr, size, abort); 697 memcpy(wrp.addr1, tmpwr, wrp.size1); 698 memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); 699 } 700 } 701 702 /*****************************************************************************/ 703 /* START: SCSI SM */ 704 /*****************************************************************************/ 705 static void 706 csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt) 707 { 708 struct csio_hw *hw = req->lnode->hwp; 709 struct csio_scsim *scsim = csio_hw_to_scsim(hw); 710 711 switch (evt) { 712 case CSIO_SCSIE_START_IO: 713 714 if (req->nsge) { 715 if (req->datadir == DMA_TO_DEVICE) { 716 req->dcopy = 0; 717 csio_scsi_write(req); 718 } else 719 csio_setup_ddp(scsim, req); 720 } else { 721 csio_scsi_cmd(req); 722 } 723 724 if (likely(req->drv_status == 0)) { 725 /* change state and enqueue on active_q */ 726 csio_set_state(&req->sm, csio_scsis_io_active); 727 list_add_tail(&req->sm.sm_list, &scsim->active_q); 728 csio_wr_issue(hw, req->eq_idx, false); 729 CSIO_INC_STATS(scsim, n_active); 730 731 return; 732 } 733 break; 734 735 case CSIO_SCSIE_START_TM: 736 csio_scsi_cmd(req); 737 if (req->drv_status == 0) { 738 /* 739 * NOTE: We collect the affected I/Os prior to issuing 740 * LUN reset, and not after it. This is to prevent 741 * aborting I/Os that get issued after the LUN reset, 742 * but prior to LUN reset completion (in the event that 743 * the host stack has not blocked I/Os to a LUN that is 744 * being reset. 745 */ 746 csio_set_state(&req->sm, csio_scsis_tm_active); 747 list_add_tail(&req->sm.sm_list, &scsim->active_q); 748 csio_wr_issue(hw, req->eq_idx, false); 749 CSIO_INC_STATS(scsim, n_tm_active); 750 } 751 return; 752 753 case CSIO_SCSIE_ABORT: 754 case CSIO_SCSIE_CLOSE: 755 /* 756 * NOTE: 757 * We could get here due to : 758 * - a window in the cleanup path of the SCSI module 759 * (csio_scsi_abort_io()). Please see NOTE in this function. 760 * - a window in the time we tried to issue an abort/close 761 * of a request to FW, and the FW completed the request 762 * itself. 763 * Print a message for now, and return INVAL either way. 764 */ 765 req->drv_status = -EINVAL; 766 csio_warn(hw, "Trying to abort/close completed IO:%p!\n", req); 767 break; 768 769 default: 770 csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); 771 CSIO_DB_ASSERT(0); 772 } 773 } 774 775 static void 776 csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt) 777 { 778 struct csio_hw *hw = req->lnode->hwp; 779 struct csio_scsim *scm = csio_hw_to_scsim(hw); 780 struct csio_rnode *rn; 781 782 switch (evt) { 783 case CSIO_SCSIE_COMPLETED: 784 CSIO_DEC_STATS(scm, n_active); 785 list_del_init(&req->sm.sm_list); 786 csio_set_state(&req->sm, csio_scsis_uninit); 787 /* 788 * In MSIX mode, with multiple queues, the SCSI compeltions 789 * could reach us sooner than the FW events sent to indicate 790 * I-T nexus loss (link down, remote device logo etc). We 791 * dont want to be returning such I/Os to the upper layer 792 * immediately, since we wouldnt have reported the I-T nexus 793 * loss itself. This forces us to serialize such completions 794 * with the reporting of the I-T nexus loss. Therefore, we 795 * internally queue up such up such completions in the rnode. 796 * The reporting of I-T nexus loss to the upper layer is then 797 * followed by the returning of I/Os in this internal queue. 798 * Having another state alongwith another queue helps us take 799 * actions for events such as ABORT received while we are 800 * in this rnode queue. 801 */ 802 if (unlikely(req->wr_status != FW_SUCCESS)) { 803 rn = req->rnode; 804 /* 805 * FW says remote device is lost, but rnode 806 * doesnt reflect it. 807 */ 808 if (csio_scsi_itnexus_loss_error(req->wr_status) && 809 csio_is_rnode_ready(rn)) { 810 csio_set_state(&req->sm, 811 csio_scsis_shost_cmpl_await); 812 list_add_tail(&req->sm.sm_list, 813 &rn->host_cmpl_q); 814 } 815 } 816 817 break; 818 819 case CSIO_SCSIE_ABORT: 820 csio_scsi_abrt_cls(req, SCSI_ABORT); 821 if (req->drv_status == 0) { 822 csio_wr_issue(hw, req->eq_idx, false); 823 csio_set_state(&req->sm, csio_scsis_aborting); 824 } 825 break; 826 827 case CSIO_SCSIE_CLOSE: 828 csio_scsi_abrt_cls(req, SCSI_CLOSE); 829 if (req->drv_status == 0) { 830 csio_wr_issue(hw, req->eq_idx, false); 831 csio_set_state(&req->sm, csio_scsis_closing); 832 } 833 break; 834 835 case CSIO_SCSIE_DRVCLEANUP: 836 req->wr_status = FW_HOSTERROR; 837 CSIO_DEC_STATS(scm, n_active); 838 csio_set_state(&req->sm, csio_scsis_uninit); 839 break; 840 841 default: 842 csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); 843 CSIO_DB_ASSERT(0); 844 } 845 } 846 847 static void 848 csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt) 849 { 850 struct csio_hw *hw = req->lnode->hwp; 851 struct csio_scsim *scm = csio_hw_to_scsim(hw); 852 853 switch (evt) { 854 case CSIO_SCSIE_COMPLETED: 855 CSIO_DEC_STATS(scm, n_tm_active); 856 list_del_init(&req->sm.sm_list); 857 csio_set_state(&req->sm, csio_scsis_uninit); 858 859 break; 860 861 case CSIO_SCSIE_ABORT: 862 csio_scsi_abrt_cls(req, SCSI_ABORT); 863 if (req->drv_status == 0) { 864 csio_wr_issue(hw, req->eq_idx, false); 865 csio_set_state(&req->sm, csio_scsis_aborting); 866 } 867 break; 868 869 870 case CSIO_SCSIE_CLOSE: 871 csio_scsi_abrt_cls(req, SCSI_CLOSE); 872 if (req->drv_status == 0) { 873 csio_wr_issue(hw, req->eq_idx, false); 874 csio_set_state(&req->sm, csio_scsis_closing); 875 } 876 break; 877 878 case CSIO_SCSIE_DRVCLEANUP: 879 req->wr_status = FW_HOSTERROR; 880 CSIO_DEC_STATS(scm, n_tm_active); 881 csio_set_state(&req->sm, csio_scsis_uninit); 882 break; 883 884 default: 885 csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); 886 CSIO_DB_ASSERT(0); 887 } 888 } 889 890 static void 891 csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt) 892 { 893 struct csio_hw *hw = req->lnode->hwp; 894 struct csio_scsim *scm = csio_hw_to_scsim(hw); 895 896 switch (evt) { 897 case CSIO_SCSIE_COMPLETED: 898 csio_dbg(hw, 899 "ioreq %p recvd cmpltd (wr_status:%d) " 900 "in aborting st\n", req, req->wr_status); 901 /* 902 * Use -ECANCELED to explicitly tell the ABORTED event that 903 * the original I/O was returned to driver by FW. 904 * We dont really care if the I/O was returned with success by 905 * FW (because the ABORT and completion of the I/O crossed each 906 * other), or any other return value. Once we are in aborting 907 * state, the success or failure of the I/O is unimportant to 908 * us. 909 */ 910 req->drv_status = -ECANCELED; 911 break; 912 913 case CSIO_SCSIE_ABORT: 914 CSIO_INC_STATS(scm, n_abrt_dups); 915 break; 916 917 case CSIO_SCSIE_ABORTED: 918 919 csio_dbg(hw, "abort of %p return status:0x%x drv_status:%x\n", 920 req, req->wr_status, req->drv_status); 921 /* 922 * Check if original I/O WR completed before the Abort 923 * completion. 924 */ 925 if (req->drv_status != -ECANCELED) { 926 csio_warn(hw, 927 "Abort completed before original I/O," 928 " req:%p\n", req); 929 CSIO_DB_ASSERT(0); 930 } 931 932 /* 933 * There are the following possible scenarios: 934 * 1. The abort completed successfully, FW returned FW_SUCCESS. 935 * 2. The completion of an I/O and the receipt of 936 * abort for that I/O by the FW crossed each other. 937 * The FW returned FW_EINVAL. The original I/O would have 938 * returned with FW_SUCCESS or any other SCSI error. 939 * 3. The FW couldnt sent the abort out on the wire, as there 940 * was an I-T nexus loss (link down, remote device logged 941 * out etc). FW sent back an appropriate IT nexus loss status 942 * for the abort. 943 * 4. FW sent an abort, but abort timed out (remote device 944 * didnt respond). FW replied back with 945 * FW_SCSI_ABORT_TIMEDOUT. 946 * 5. FW couldnt genuinely abort the request for some reason, 947 * and sent us an error. 948 * 949 * The first 3 scenarios are treated as succesful abort 950 * operations by the host, while the last 2 are failed attempts 951 * to abort. Manipulate the return value of the request 952 * appropriately, so that host can convey these results 953 * back to the upper layer. 954 */ 955 if ((req->wr_status == FW_SUCCESS) || 956 (req->wr_status == FW_EINVAL) || 957 csio_scsi_itnexus_loss_error(req->wr_status)) 958 req->wr_status = FW_SCSI_ABORT_REQUESTED; 959 960 CSIO_DEC_STATS(scm, n_active); 961 list_del_init(&req->sm.sm_list); 962 csio_set_state(&req->sm, csio_scsis_uninit); 963 break; 964 965 case CSIO_SCSIE_DRVCLEANUP: 966 req->wr_status = FW_HOSTERROR; 967 CSIO_DEC_STATS(scm, n_active); 968 csio_set_state(&req->sm, csio_scsis_uninit); 969 break; 970 971 case CSIO_SCSIE_CLOSE: 972 /* 973 * We can receive this event from the module 974 * cleanup paths, if the FW forgot to reply to the ABORT WR 975 * and left this ioreq in this state. For now, just ignore 976 * the event. The CLOSE event is sent to this state, as 977 * the LINK may have already gone down. 978 */ 979 break; 980 981 default: 982 csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); 983 CSIO_DB_ASSERT(0); 984 } 985 } 986 987 static void 988 csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt) 989 { 990 struct csio_hw *hw = req->lnode->hwp; 991 struct csio_scsim *scm = csio_hw_to_scsim(hw); 992 993 switch (evt) { 994 case CSIO_SCSIE_COMPLETED: 995 csio_dbg(hw, 996 "ioreq %p recvd cmpltd (wr_status:%d) " 997 "in closing st\n", req, req->wr_status); 998 /* 999 * Use -ECANCELED to explicitly tell the CLOSED event that 1000 * the original I/O was returned to driver by FW. 1001 * We dont really care if the I/O was returned with success by 1002 * FW (because the CLOSE and completion of the I/O crossed each 1003 * other), or any other return value. Once we are in aborting 1004 * state, the success or failure of the I/O is unimportant to 1005 * us. 1006 */ 1007 req->drv_status = -ECANCELED; 1008 break; 1009 1010 case CSIO_SCSIE_CLOSED: 1011 /* 1012 * Check if original I/O WR completed before the Close 1013 * completion. 1014 */ 1015 if (req->drv_status != -ECANCELED) { 1016 csio_fatal(hw, 1017 "Close completed before original I/O," 1018 " req:%p\n", req); 1019 CSIO_DB_ASSERT(0); 1020 } 1021 1022 /* 1023 * Either close succeeded, or we issued close to FW at the 1024 * same time FW compelted it to us. Either way, the I/O 1025 * is closed. 1026 */ 1027 CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) || 1028 (req->wr_status == FW_EINVAL)); 1029 req->wr_status = FW_SCSI_CLOSE_REQUESTED; 1030 1031 CSIO_DEC_STATS(scm, n_active); 1032 list_del_init(&req->sm.sm_list); 1033 csio_set_state(&req->sm, csio_scsis_uninit); 1034 break; 1035 1036 case CSIO_SCSIE_CLOSE: 1037 break; 1038 1039 case CSIO_SCSIE_DRVCLEANUP: 1040 req->wr_status = FW_HOSTERROR; 1041 CSIO_DEC_STATS(scm, n_active); 1042 csio_set_state(&req->sm, csio_scsis_uninit); 1043 break; 1044 1045 default: 1046 csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); 1047 CSIO_DB_ASSERT(0); 1048 } 1049 } 1050 1051 static void 1052 csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt) 1053 { 1054 switch (evt) { 1055 case CSIO_SCSIE_ABORT: 1056 case CSIO_SCSIE_CLOSE: 1057 /* 1058 * Just succeed the abort request, and hope that 1059 * the remote device unregister path will cleanup 1060 * this I/O to the upper layer within a sane 1061 * amount of time. 1062 */ 1063 /* 1064 * A close can come in during a LINK DOWN. The FW would have 1065 * returned us the I/O back, but not the remote device lost 1066 * FW event. In this interval, if the I/O times out at the upper 1067 * layer, a close can come in. Take the same action as abort: 1068 * return success, and hope that the remote device unregister 1069 * path will cleanup this I/O. If the FW still doesnt send 1070 * the msg, the close times out, and the upper layer resorts 1071 * to the next level of error recovery. 1072 */ 1073 req->drv_status = 0; 1074 break; 1075 case CSIO_SCSIE_DRVCLEANUP: 1076 csio_set_state(&req->sm, csio_scsis_uninit); 1077 break; 1078 default: 1079 csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n", 1080 evt, req); 1081 CSIO_DB_ASSERT(0); 1082 } 1083 } 1084 1085 /* 1086 * csio_scsi_cmpl_handler - WR completion handler for SCSI. 1087 * @hw: HW module. 1088 * @wr: The completed WR from the ingress queue. 1089 * @len: Length of the WR. 1090 * @flb: Freelist buffer array. 1091 * @priv: Private object 1092 * @scsiwr: Pointer to SCSI WR. 1093 * 1094 * This is the WR completion handler called per completion from the 1095 * ISR. It is called with lock held. It walks past the RSS and CPL message 1096 * header where the actual WR is present. 1097 * It then gets the status, WR handle (ioreq pointer) and the len of 1098 * the WR, based on WR opcode. Only on a non-good status is the entire 1099 * WR copied into the WR cache (ioreq->fw_wr). 1100 * The ioreq corresponding to the WR is returned to the caller. 1101 * NOTE: The SCSI queue doesnt allocate a freelist today, hence 1102 * no freelist buffer is expected. 1103 */ 1104 struct csio_ioreq * 1105 csio_scsi_cmpl_handler(struct csio_hw *hw, void *wr, uint32_t len, 1106 struct csio_fl_dma_buf *flb, void *priv, uint8_t **scsiwr) 1107 { 1108 struct csio_ioreq *ioreq = NULL; 1109 struct cpl_fw6_msg *cpl; 1110 uint8_t *tempwr; 1111 uint8_t status; 1112 struct csio_scsim *scm = csio_hw_to_scsim(hw); 1113 1114 /* skip RSS header */ 1115 cpl = (struct cpl_fw6_msg *)((uintptr_t)wr + sizeof(__be64)); 1116 1117 if (unlikely(cpl->opcode != CPL_FW6_MSG)) { 1118 csio_warn(hw, "Error: Invalid CPL msg %x recvd on SCSI q\n", 1119 cpl->opcode); 1120 CSIO_INC_STATS(scm, n_inval_cplop); 1121 return NULL; 1122 } 1123 1124 tempwr = (uint8_t *)(cpl->data); 1125 status = csio_wr_status(tempwr); 1126 *scsiwr = tempwr; 1127 1128 if (likely((*tempwr == FW_SCSI_READ_WR) || 1129 (*tempwr == FW_SCSI_WRITE_WR) || 1130 (*tempwr == FW_SCSI_CMD_WR))) { 1131 ioreq = (struct csio_ioreq *)((uintptr_t) 1132 (((struct fw_scsi_read_wr *)tempwr)->cookie)); 1133 CSIO_DB_ASSERT(virt_addr_valid(ioreq)); 1134 1135 ioreq->wr_status = status; 1136 1137 return ioreq; 1138 } 1139 1140 if (*tempwr == FW_SCSI_ABRT_CLS_WR) { 1141 ioreq = (struct csio_ioreq *)((uintptr_t) 1142 (((struct fw_scsi_abrt_cls_wr *)tempwr)->cookie)); 1143 CSIO_DB_ASSERT(virt_addr_valid(ioreq)); 1144 1145 ioreq->wr_status = status; 1146 return ioreq; 1147 } 1148 1149 csio_warn(hw, "WR with invalid opcode in SCSI IQ: %x\n", *tempwr); 1150 CSIO_INC_STATS(scm, n_inval_scsiop); 1151 return NULL; 1152 } 1153 1154 /* 1155 * csio_scsi_cleanup_io_q - Cleanup the given queue. 1156 * @scm: SCSI module. 1157 * @q: Queue to be cleaned up. 1158 * 1159 * Called with lock held. Has to exit with lock held. 1160 */ 1161 void 1162 csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q) 1163 { 1164 struct csio_hw *hw = scm->hw; 1165 struct csio_ioreq *ioreq; 1166 struct list_head *tmp, *next; 1167 struct scsi_cmnd *scmnd; 1168 1169 /* Call back the completion routines of the active_q */ 1170 list_for_each_safe(tmp, next, q) { 1171 ioreq = (struct csio_ioreq *)tmp; 1172 csio_scsi_drvcleanup(ioreq); 1173 list_del_init(&ioreq->sm.sm_list); 1174 scmnd = csio_scsi_cmnd(ioreq); 1175 spin_unlock_irq(&hw->lock); 1176 1177 /* 1178 * Upper layers may have cleared this command, hence this 1179 * check to avoid accessing stale references. 1180 */ 1181 if (scmnd != NULL) 1182 ioreq->io_cbfn(hw, ioreq); 1183 1184 spin_lock_irq(&scm->freelist_lock); 1185 csio_put_scsi_ioreq(scm, ioreq); 1186 spin_unlock_irq(&scm->freelist_lock); 1187 1188 spin_lock_irq(&hw->lock); 1189 } 1190 } 1191 1192 #define CSIO_SCSI_ABORT_Q_POLL_MS 2000 1193 1194 static void 1195 csio_abrt_cls(struct csio_ioreq *ioreq, struct scsi_cmnd *scmnd) 1196 { 1197 struct csio_lnode *ln = ioreq->lnode; 1198 struct csio_hw *hw = ln->hwp; 1199 int ready = 0; 1200 struct csio_scsim *scsim = csio_hw_to_scsim(hw); 1201 int rv; 1202 1203 if (csio_scsi_cmnd(ioreq) != scmnd) { 1204 CSIO_INC_STATS(scsim, n_abrt_race_comp); 1205 return; 1206 } 1207 1208 ready = csio_is_lnode_ready(ln); 1209 1210 rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE)); 1211 if (rv != 0) { 1212 if (ready) 1213 CSIO_INC_STATS(scsim, n_abrt_busy_error); 1214 else 1215 CSIO_INC_STATS(scsim, n_cls_busy_error); 1216 } 1217 } 1218 1219 /* 1220 * csio_scsi_abort_io_q - Abort all I/Os on given queue 1221 * @scm: SCSI module. 1222 * @q: Queue to abort. 1223 * @tmo: Timeout in ms 1224 * 1225 * Attempt to abort all I/Os on given queue, and wait for a max 1226 * of tmo milliseconds for them to complete. Returns success 1227 * if all I/Os are aborted. Else returns -ETIMEDOUT. 1228 * Should be entered with lock held. Exits with lock held. 1229 * NOTE: 1230 * Lock has to be held across the loop that aborts I/Os, since dropping the lock 1231 * in between can cause the list to be corrupted. As a result, the caller 1232 * of this function has to ensure that the number of I/os to be aborted 1233 * is finite enough to not cause lock-held-for-too-long issues. 1234 */ 1235 static int 1236 csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo) 1237 { 1238 struct csio_hw *hw = scm->hw; 1239 struct list_head *tmp, *next; 1240 int count = DIV_ROUND_UP(tmo, CSIO_SCSI_ABORT_Q_POLL_MS); 1241 struct scsi_cmnd *scmnd; 1242 1243 if (list_empty(q)) 1244 return 0; 1245 1246 csio_dbg(hw, "Aborting SCSI I/Os\n"); 1247 1248 /* Now abort/close I/Os in the queue passed */ 1249 list_for_each_safe(tmp, next, q) { 1250 scmnd = csio_scsi_cmnd((struct csio_ioreq *)tmp); 1251 csio_abrt_cls((struct csio_ioreq *)tmp, scmnd); 1252 } 1253 1254 /* Wait till all active I/Os are completed/aborted/closed */ 1255 while (!list_empty(q) && count--) { 1256 spin_unlock_irq(&hw->lock); 1257 msleep(CSIO_SCSI_ABORT_Q_POLL_MS); 1258 spin_lock_irq(&hw->lock); 1259 } 1260 1261 /* all aborts completed */ 1262 if (list_empty(q)) 1263 return 0; 1264 1265 return -ETIMEDOUT; 1266 } 1267 1268 /* 1269 * csio_scsim_cleanup_io - Cleanup all I/Os in SCSI module. 1270 * @scm: SCSI module. 1271 * @abort: abort required. 1272 * Called with lock held, should exit with lock held. 1273 * Can sleep when waiting for I/Os to complete. 1274 */ 1275 int 1276 csio_scsim_cleanup_io(struct csio_scsim *scm, bool abort) 1277 { 1278 struct csio_hw *hw = scm->hw; 1279 int rv = 0; 1280 int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS); 1281 1282 /* No I/Os pending */ 1283 if (list_empty(&scm->active_q)) 1284 return 0; 1285 1286 /* Wait until all active I/Os are completed */ 1287 while (!list_empty(&scm->active_q) && count--) { 1288 spin_unlock_irq(&hw->lock); 1289 msleep(CSIO_SCSI_ABORT_Q_POLL_MS); 1290 spin_lock_irq(&hw->lock); 1291 } 1292 1293 /* all I/Os completed */ 1294 if (list_empty(&scm->active_q)) 1295 return 0; 1296 1297 /* Else abort */ 1298 if (abort) { 1299 rv = csio_scsi_abort_io_q(scm, &scm->active_q, 30000); 1300 if (rv == 0) 1301 return rv; 1302 csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n"); 1303 } 1304 1305 csio_scsi_cleanup_io_q(scm, &scm->active_q); 1306 1307 CSIO_DB_ASSERT(list_empty(&scm->active_q)); 1308 1309 return rv; 1310 } 1311 1312 /* 1313 * csio_scsim_cleanup_io_lnode - Cleanup all I/Os of given lnode. 1314 * @scm: SCSI module. 1315 * @lnode: lnode 1316 * 1317 * Called with lock held, should exit with lock held. 1318 * Can sleep (with dropped lock) when waiting for I/Os to complete. 1319 */ 1320 int 1321 csio_scsim_cleanup_io_lnode(struct csio_scsim *scm, struct csio_lnode *ln) 1322 { 1323 struct csio_hw *hw = scm->hw; 1324 struct csio_scsi_level_data sld; 1325 int rv; 1326 int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS); 1327 1328 csio_dbg(hw, "Gathering all SCSI I/Os on lnode %p\n", ln); 1329 1330 sld.level = CSIO_LEV_LNODE; 1331 sld.lnode = ln; 1332 INIT_LIST_HEAD(&ln->cmpl_q); 1333 csio_scsi_gather_active_ios(scm, &sld, &ln->cmpl_q); 1334 1335 /* No I/Os pending on this lnode */ 1336 if (list_empty(&ln->cmpl_q)) 1337 return 0; 1338 1339 /* Wait until all active I/Os on this lnode are completed */ 1340 while (!list_empty(&ln->cmpl_q) && count--) { 1341 spin_unlock_irq(&hw->lock); 1342 msleep(CSIO_SCSI_ABORT_Q_POLL_MS); 1343 spin_lock_irq(&hw->lock); 1344 } 1345 1346 /* all I/Os completed */ 1347 if (list_empty(&ln->cmpl_q)) 1348 return 0; 1349 1350 csio_dbg(hw, "Some I/Os pending on ln:%p, aborting them..\n", ln); 1351 1352 /* I/Os are pending, abort them */ 1353 rv = csio_scsi_abort_io_q(scm, &ln->cmpl_q, 30000); 1354 if (rv != 0) { 1355 csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n"); 1356 csio_scsi_cleanup_io_q(scm, &ln->cmpl_q); 1357 } 1358 1359 CSIO_DB_ASSERT(list_empty(&ln->cmpl_q)); 1360 1361 return rv; 1362 } 1363 1364 static ssize_t 1365 csio_show_hw_state(struct device *dev, 1366 struct device_attribute *attr, char *buf) 1367 { 1368 struct csio_lnode *ln = shost_priv(class_to_shost(dev)); 1369 struct csio_hw *hw = csio_lnode_to_hw(ln); 1370 1371 if (csio_is_hw_ready(hw)) 1372 return snprintf(buf, PAGE_SIZE, "ready\n"); 1373 else 1374 return snprintf(buf, PAGE_SIZE, "not ready\n"); 1375 } 1376 1377 /* Device reset */ 1378 static ssize_t 1379 csio_device_reset(struct device *dev, 1380 struct device_attribute *attr, const char *buf, size_t count) 1381 { 1382 struct csio_lnode *ln = shost_priv(class_to_shost(dev)); 1383 struct csio_hw *hw = csio_lnode_to_hw(ln); 1384 1385 if (*buf != '1') 1386 return -EINVAL; 1387 1388 /* Delete NPIV lnodes */ 1389 csio_lnodes_exit(hw, 1); 1390 1391 /* Block upper IOs */ 1392 csio_lnodes_block_request(hw); 1393 1394 spin_lock_irq(&hw->lock); 1395 csio_hw_reset(hw); 1396 spin_unlock_irq(&hw->lock); 1397 1398 /* Unblock upper IOs */ 1399 csio_lnodes_unblock_request(hw); 1400 return count; 1401 } 1402 1403 /* disable port */ 1404 static ssize_t 1405 csio_disable_port(struct device *dev, 1406 struct device_attribute *attr, const char *buf, size_t count) 1407 { 1408 struct csio_lnode *ln = shost_priv(class_to_shost(dev)); 1409 struct csio_hw *hw = csio_lnode_to_hw(ln); 1410 bool disable; 1411 1412 if (*buf == '1' || *buf == '0') 1413 disable = (*buf == '1') ? true : false; 1414 else 1415 return -EINVAL; 1416 1417 /* Block upper IOs */ 1418 csio_lnodes_block_by_port(hw, ln->portid); 1419 1420 spin_lock_irq(&hw->lock); 1421 csio_disable_lnodes(hw, ln->portid, disable); 1422 spin_unlock_irq(&hw->lock); 1423 1424 /* Unblock upper IOs */ 1425 csio_lnodes_unblock_by_port(hw, ln->portid); 1426 return count; 1427 } 1428 1429 /* Show debug level */ 1430 static ssize_t 1431 csio_show_dbg_level(struct device *dev, 1432 struct device_attribute *attr, char *buf) 1433 { 1434 struct csio_lnode *ln = shost_priv(class_to_shost(dev)); 1435 1436 return snprintf(buf, PAGE_SIZE, "%x\n", ln->params.log_level); 1437 } 1438 1439 /* Store debug level */ 1440 static ssize_t 1441 csio_store_dbg_level(struct device *dev, 1442 struct device_attribute *attr, const char *buf, size_t count) 1443 { 1444 struct csio_lnode *ln = shost_priv(class_to_shost(dev)); 1445 struct csio_hw *hw = csio_lnode_to_hw(ln); 1446 uint32_t dbg_level = 0; 1447 1448 if (!isdigit(buf[0])) 1449 return -EINVAL; 1450 1451 if (sscanf(buf, "%i", &dbg_level)) 1452 return -EINVAL; 1453 1454 ln->params.log_level = dbg_level; 1455 hw->params.log_level = dbg_level; 1456 1457 return 0; 1458 } 1459 1460 static DEVICE_ATTR(hw_state, S_IRUGO, csio_show_hw_state, NULL); 1461 static DEVICE_ATTR(device_reset, S_IWUSR, NULL, csio_device_reset); 1462 static DEVICE_ATTR(disable_port, S_IWUSR, NULL, csio_disable_port); 1463 static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level, 1464 csio_store_dbg_level); 1465 1466 static struct device_attribute *csio_fcoe_lport_attrs[] = { 1467 &dev_attr_hw_state, 1468 &dev_attr_device_reset, 1469 &dev_attr_disable_port, 1470 &dev_attr_dbg_level, 1471 NULL, 1472 }; 1473 1474 static ssize_t 1475 csio_show_num_reg_rnodes(struct device *dev, 1476 struct device_attribute *attr, char *buf) 1477 { 1478 struct csio_lnode *ln = shost_priv(class_to_shost(dev)); 1479 1480 return snprintf(buf, PAGE_SIZE, "%d\n", ln->num_reg_rnodes); 1481 } 1482 1483 static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL); 1484 1485 static struct device_attribute *csio_fcoe_vport_attrs[] = { 1486 &dev_attr_num_reg_rnodes, 1487 &dev_attr_dbg_level, 1488 NULL, 1489 }; 1490 1491 static inline uint32_t 1492 csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req) 1493 { 1494 struct scsi_cmnd *scmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); 1495 struct scatterlist *sg; 1496 uint32_t bytes_left; 1497 uint32_t bytes_copy; 1498 uint32_t buf_off = 0; 1499 uint32_t start_off = 0; 1500 uint32_t sg_off = 0; 1501 void *sg_addr; 1502 void *buf_addr; 1503 struct csio_dma_buf *dma_buf; 1504 1505 bytes_left = scsi_bufflen(scmnd); 1506 sg = scsi_sglist(scmnd); 1507 dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list); 1508 1509 /* Copy data from driver buffer to SGs of SCSI CMD */ 1510 while (bytes_left > 0 && sg && dma_buf) { 1511 if (buf_off >= dma_buf->len) { 1512 buf_off = 0; 1513 dma_buf = (struct csio_dma_buf *) 1514 csio_list_next(dma_buf); 1515 continue; 1516 } 1517 1518 if (start_off >= sg->length) { 1519 start_off -= sg->length; 1520 sg = sg_next(sg); 1521 continue; 1522 } 1523 1524 buf_addr = dma_buf->vaddr + buf_off; 1525 sg_off = sg->offset + start_off; 1526 bytes_copy = min((dma_buf->len - buf_off), 1527 sg->length - start_off); 1528 bytes_copy = min((uint32_t)(PAGE_SIZE - (sg_off & ~PAGE_MASK)), 1529 bytes_copy); 1530 1531 sg_addr = kmap_atomic(sg_page(sg) + (sg_off >> PAGE_SHIFT)); 1532 if (!sg_addr) { 1533 csio_err(hw, "failed to kmap sg:%p of ioreq:%p\n", 1534 sg, req); 1535 break; 1536 } 1537 1538 csio_dbg(hw, "copy_to_sgl:sg_addr %p sg_off %d buf %p len %d\n", 1539 sg_addr, sg_off, buf_addr, bytes_copy); 1540 memcpy(sg_addr + (sg_off & ~PAGE_MASK), buf_addr, bytes_copy); 1541 kunmap_atomic(sg_addr); 1542 1543 start_off += bytes_copy; 1544 buf_off += bytes_copy; 1545 bytes_left -= bytes_copy; 1546 } 1547 1548 if (bytes_left > 0) 1549 return DID_ERROR; 1550 else 1551 return DID_OK; 1552 } 1553 1554 /* 1555 * csio_scsi_err_handler - SCSI error handler. 1556 * @hw: HW module. 1557 * @req: IO request. 1558 * 1559 */ 1560 static inline void 1561 csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req) 1562 { 1563 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); 1564 struct csio_scsim *scm = csio_hw_to_scsim(hw); 1565 struct fcp_resp_with_ext *fcp_resp; 1566 struct fcp_resp_rsp_info *rsp_info; 1567 struct csio_dma_buf *dma_buf; 1568 uint8_t flags, scsi_status = 0; 1569 uint32_t host_status = DID_OK; 1570 uint32_t rsp_len = 0, sns_len = 0; 1571 struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); 1572 1573 1574 switch (req->wr_status) { 1575 case FW_HOSTERROR: 1576 if (unlikely(!csio_is_hw_ready(hw))) 1577 return; 1578 1579 host_status = DID_ERROR; 1580 CSIO_INC_STATS(scm, n_hosterror); 1581 1582 break; 1583 case FW_SCSI_RSP_ERR: 1584 dma_buf = &req->dma_buf; 1585 fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr; 1586 rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1); 1587 flags = fcp_resp->resp.fr_flags; 1588 scsi_status = fcp_resp->resp.fr_status; 1589 1590 if (flags & FCP_RSP_LEN_VAL) { 1591 rsp_len = be32_to_cpu(fcp_resp->ext.fr_rsp_len); 1592 if ((rsp_len != 0 && rsp_len != 4 && rsp_len != 8) || 1593 (rsp_info->rsp_code != FCP_TMF_CMPL)) { 1594 host_status = DID_ERROR; 1595 goto out; 1596 } 1597 } 1598 1599 if ((flags & FCP_SNS_LEN_VAL) && fcp_resp->ext.fr_sns_len) { 1600 sns_len = be32_to_cpu(fcp_resp->ext.fr_sns_len); 1601 if (sns_len > SCSI_SENSE_BUFFERSIZE) 1602 sns_len = SCSI_SENSE_BUFFERSIZE; 1603 1604 memcpy(cmnd->sense_buffer, 1605 &rsp_info->_fr_resvd[0] + rsp_len, sns_len); 1606 CSIO_INC_STATS(scm, n_autosense); 1607 } 1608 1609 scsi_set_resid(cmnd, 0); 1610 1611 /* Under run */ 1612 if (flags & FCP_RESID_UNDER) { 1613 scsi_set_resid(cmnd, 1614 be32_to_cpu(fcp_resp->ext.fr_resid)); 1615 1616 if (!(flags & FCP_SNS_LEN_VAL) && 1617 (scsi_status == SAM_STAT_GOOD) && 1618 ((scsi_bufflen(cmnd) - scsi_get_resid(cmnd)) 1619 < cmnd->underflow)) 1620 host_status = DID_ERROR; 1621 } else if (flags & FCP_RESID_OVER) 1622 host_status = DID_ERROR; 1623 1624 CSIO_INC_STATS(scm, n_rsperror); 1625 break; 1626 1627 case FW_SCSI_OVER_FLOW_ERR: 1628 csio_warn(hw, 1629 "Over-flow error,cmnd:0x%x expected len:0x%x" 1630 " resid:0x%x\n", cmnd->cmnd[0], 1631 scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 1632 host_status = DID_ERROR; 1633 CSIO_INC_STATS(scm, n_ovflerror); 1634 break; 1635 1636 case FW_SCSI_UNDER_FLOW_ERR: 1637 csio_warn(hw, 1638 "Under-flow error,cmnd:0x%x expected" 1639 " len:0x%x resid:0x%x lun:0x%llx ssn:0x%x\n", 1640 cmnd->cmnd[0], scsi_bufflen(cmnd), 1641 scsi_get_resid(cmnd), cmnd->device->lun, 1642 rn->flowid); 1643 host_status = DID_ERROR; 1644 CSIO_INC_STATS(scm, n_unflerror); 1645 break; 1646 1647 case FW_SCSI_ABORT_REQUESTED: 1648 case FW_SCSI_ABORTED: 1649 case FW_SCSI_CLOSE_REQUESTED: 1650 csio_dbg(hw, "Req %p cmd:%p op:%x %s\n", req, cmnd, 1651 cmnd->cmnd[0], 1652 (req->wr_status == FW_SCSI_CLOSE_REQUESTED) ? 1653 "closed" : "aborted"); 1654 /* 1655 * csio_eh_abort_handler checks this value to 1656 * succeed or fail the abort request. 1657 */ 1658 host_status = DID_REQUEUE; 1659 if (req->wr_status == FW_SCSI_CLOSE_REQUESTED) 1660 CSIO_INC_STATS(scm, n_closed); 1661 else 1662 CSIO_INC_STATS(scm, n_aborted); 1663 break; 1664 1665 case FW_SCSI_ABORT_TIMEDOUT: 1666 /* FW timed out the abort itself */ 1667 csio_dbg(hw, "FW timed out abort req:%p cmnd:%p status:%x\n", 1668 req, cmnd, req->wr_status); 1669 host_status = DID_ERROR; 1670 CSIO_INC_STATS(scm, n_abrt_timedout); 1671 break; 1672 1673 case FW_RDEV_NOT_READY: 1674 /* 1675 * In firmware, a RDEV can get into this state 1676 * temporarily, before moving into dissapeared/lost 1677 * state. So, the driver should complete the request equivalent 1678 * to device-disappeared! 1679 */ 1680 CSIO_INC_STATS(scm, n_rdev_nr_error); 1681 host_status = DID_ERROR; 1682 break; 1683 1684 case FW_ERR_RDEV_LOST: 1685 CSIO_INC_STATS(scm, n_rdev_lost_error); 1686 host_status = DID_ERROR; 1687 break; 1688 1689 case FW_ERR_RDEV_LOGO: 1690 CSIO_INC_STATS(scm, n_rdev_logo_error); 1691 host_status = DID_ERROR; 1692 break; 1693 1694 case FW_ERR_RDEV_IMPL_LOGO: 1695 host_status = DID_ERROR; 1696 break; 1697 1698 case FW_ERR_LINK_DOWN: 1699 CSIO_INC_STATS(scm, n_link_down_error); 1700 host_status = DID_ERROR; 1701 break; 1702 1703 case FW_FCOE_NO_XCHG: 1704 CSIO_INC_STATS(scm, n_no_xchg_error); 1705 host_status = DID_ERROR; 1706 break; 1707 1708 default: 1709 csio_err(hw, "Unknown SCSI FW WR status:%d req:%p cmnd:%p\n", 1710 req->wr_status, req, cmnd); 1711 CSIO_DB_ASSERT(0); 1712 1713 CSIO_INC_STATS(scm, n_unknown_error); 1714 host_status = DID_ERROR; 1715 break; 1716 } 1717 1718 out: 1719 if (req->nsge > 0) 1720 scsi_dma_unmap(cmnd); 1721 1722 cmnd->result = (((host_status) << 16) | scsi_status); 1723 cmnd->scsi_done(cmnd); 1724 1725 /* Wake up waiting threads */ 1726 csio_scsi_cmnd(req) = NULL; 1727 complete_all(&req->cmplobj); 1728 } 1729 1730 /* 1731 * csio_scsi_cbfn - SCSI callback function. 1732 * @hw: HW module. 1733 * @req: IO request. 1734 * 1735 */ 1736 static void 1737 csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req) 1738 { 1739 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); 1740 uint8_t scsi_status = SAM_STAT_GOOD; 1741 uint32_t host_status = DID_OK; 1742 1743 if (likely(req->wr_status == FW_SUCCESS)) { 1744 if (req->nsge > 0) { 1745 scsi_dma_unmap(cmnd); 1746 if (req->dcopy) 1747 host_status = csio_scsi_copy_to_sgl(hw, req); 1748 } 1749 1750 cmnd->result = (((host_status) << 16) | scsi_status); 1751 cmnd->scsi_done(cmnd); 1752 csio_scsi_cmnd(req) = NULL; 1753 CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success); 1754 } else { 1755 /* Error handling */ 1756 csio_scsi_err_handler(hw, req); 1757 } 1758 } 1759 1760 /** 1761 * csio_queuecommand - Entry point to kickstart an I/O request. 1762 * @host: The scsi_host pointer. 1763 * @cmnd: The I/O request from ML. 1764 * 1765 * This routine does the following: 1766 * - Checks for HW and Rnode module readiness. 1767 * - Gets a free ioreq structure (which is already initialized 1768 * to uninit during its allocation). 1769 * - Maps SG elements. 1770 * - Initializes ioreq members. 1771 * - Kicks off the SCSI state machine for this IO. 1772 * - Returns busy status on error. 1773 */ 1774 static int 1775 csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd) 1776 { 1777 struct csio_lnode *ln = shost_priv(host); 1778 struct csio_hw *hw = csio_lnode_to_hw(ln); 1779 struct csio_scsim *scsim = csio_hw_to_scsim(hw); 1780 struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); 1781 struct csio_ioreq *ioreq = NULL; 1782 unsigned long flags; 1783 int nsge = 0; 1784 int rv = SCSI_MLQUEUE_HOST_BUSY, nr; 1785 int retval; 1786 int cpu; 1787 struct csio_scsi_qset *sqset; 1788 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 1789 1790 if (!blk_rq_cpu_valid(cmnd->request)) 1791 cpu = smp_processor_id(); 1792 else 1793 cpu = cmnd->request->cpu; 1794 1795 sqset = &hw->sqset[ln->portid][cpu]; 1796 1797 nr = fc_remote_port_chkready(rport); 1798 if (nr) { 1799 cmnd->result = nr; 1800 CSIO_INC_STATS(scsim, n_rn_nr_error); 1801 goto err_done; 1802 } 1803 1804 if (unlikely(!csio_is_hw_ready(hw))) { 1805 cmnd->result = (DID_REQUEUE << 16); 1806 CSIO_INC_STATS(scsim, n_hw_nr_error); 1807 goto err_done; 1808 } 1809 1810 /* Get req->nsge, if there are SG elements to be mapped */ 1811 nsge = scsi_dma_map(cmnd); 1812 if (unlikely(nsge < 0)) { 1813 CSIO_INC_STATS(scsim, n_dmamap_error); 1814 goto err; 1815 } 1816 1817 /* Do we support so many mappings? */ 1818 if (unlikely(nsge > scsim->max_sge)) { 1819 csio_warn(hw, 1820 "More SGEs than can be supported." 1821 " SGEs: %d, Max SGEs: %d\n", nsge, scsim->max_sge); 1822 CSIO_INC_STATS(scsim, n_unsupp_sge_error); 1823 goto err_dma_unmap; 1824 } 1825 1826 /* Get a free ioreq structure - SM is already set to uninit */ 1827 ioreq = csio_get_scsi_ioreq_lock(hw, scsim); 1828 if (!ioreq) { 1829 csio_err(hw, "Out of I/O request elements. Active #:%d\n", 1830 scsim->stats.n_active); 1831 CSIO_INC_STATS(scsim, n_no_req_error); 1832 goto err_dma_unmap; 1833 } 1834 1835 ioreq->nsge = nsge; 1836 ioreq->lnode = ln; 1837 ioreq->rnode = rn; 1838 ioreq->iq_idx = sqset->iq_idx; 1839 ioreq->eq_idx = sqset->eq_idx; 1840 ioreq->wr_status = 0; 1841 ioreq->drv_status = 0; 1842 csio_scsi_cmnd(ioreq) = (void *)cmnd; 1843 ioreq->tmo = 0; 1844 ioreq->datadir = cmnd->sc_data_direction; 1845 1846 if (cmnd->sc_data_direction == DMA_TO_DEVICE) { 1847 CSIO_INC_STATS(ln, n_output_requests); 1848 ln->stats.n_output_bytes += scsi_bufflen(cmnd); 1849 } else if (cmnd->sc_data_direction == DMA_FROM_DEVICE) { 1850 CSIO_INC_STATS(ln, n_input_requests); 1851 ln->stats.n_input_bytes += scsi_bufflen(cmnd); 1852 } else 1853 CSIO_INC_STATS(ln, n_control_requests); 1854 1855 /* Set cbfn */ 1856 ioreq->io_cbfn = csio_scsi_cbfn; 1857 1858 /* Needed during abort */ 1859 cmnd->host_scribble = (unsigned char *)ioreq; 1860 cmnd->SCp.Message = 0; 1861 1862 /* Kick off SCSI IO SM on the ioreq */ 1863 spin_lock_irqsave(&hw->lock, flags); 1864 retval = csio_scsi_start_io(ioreq); 1865 spin_unlock_irqrestore(&hw->lock, flags); 1866 1867 if (retval != 0) { 1868 csio_err(hw, "ioreq: %p couldnt be started, status:%d\n", 1869 ioreq, retval); 1870 CSIO_INC_STATS(scsim, n_busy_error); 1871 goto err_put_req; 1872 } 1873 1874 return 0; 1875 1876 err_put_req: 1877 csio_put_scsi_ioreq_lock(hw, scsim, ioreq); 1878 err_dma_unmap: 1879 if (nsge > 0) 1880 scsi_dma_unmap(cmnd); 1881 err: 1882 return rv; 1883 1884 err_done: 1885 cmnd->scsi_done(cmnd); 1886 return 0; 1887 } 1888 1889 static int 1890 csio_do_abrt_cls(struct csio_hw *hw, struct csio_ioreq *ioreq, bool abort) 1891 { 1892 int rv; 1893 int cpu = smp_processor_id(); 1894 struct csio_lnode *ln = ioreq->lnode; 1895 struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu]; 1896 1897 ioreq->tmo = CSIO_SCSI_ABRT_TMO_MS; 1898 /* 1899 * Use current processor queue for posting the abort/close, but retain 1900 * the ingress queue ID of the original I/O being aborted/closed - we 1901 * need the abort/close completion to be received on the same queue 1902 * as the original I/O. 1903 */ 1904 ioreq->eq_idx = sqset->eq_idx; 1905 1906 if (abort == SCSI_ABORT) 1907 rv = csio_scsi_abort(ioreq); 1908 else 1909 rv = csio_scsi_close(ioreq); 1910 1911 return rv; 1912 } 1913 1914 static int 1915 csio_eh_abort_handler(struct scsi_cmnd *cmnd) 1916 { 1917 struct csio_ioreq *ioreq; 1918 struct csio_lnode *ln = shost_priv(cmnd->device->host); 1919 struct csio_hw *hw = csio_lnode_to_hw(ln); 1920 struct csio_scsim *scsim = csio_hw_to_scsim(hw); 1921 int ready = 0, ret; 1922 unsigned long tmo = 0; 1923 int rv; 1924 struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); 1925 1926 ret = fc_block_scsi_eh(cmnd); 1927 if (ret) 1928 return ret; 1929 1930 ioreq = (struct csio_ioreq *)cmnd->host_scribble; 1931 if (!ioreq) 1932 return SUCCESS; 1933 1934 if (!rn) 1935 return FAILED; 1936 1937 csio_dbg(hw, 1938 "Request to abort ioreq:%p cmd:%p cdb:%08llx" 1939 " ssni:0x%x lun:%llu iq:0x%x\n", 1940 ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid, 1941 cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx)); 1942 1943 if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) != cmnd) { 1944 CSIO_INC_STATS(scsim, n_abrt_race_comp); 1945 return SUCCESS; 1946 } 1947 1948 ready = csio_is_lnode_ready(ln); 1949 tmo = CSIO_SCSI_ABRT_TMO_MS; 1950 1951 spin_lock_irq(&hw->lock); 1952 rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE)); 1953 spin_unlock_irq(&hw->lock); 1954 1955 if (rv != 0) { 1956 if (rv == -EINVAL) { 1957 /* Return success, if abort/close request issued on 1958 * already completed IO 1959 */ 1960 return SUCCESS; 1961 } 1962 if (ready) 1963 CSIO_INC_STATS(scsim, n_abrt_busy_error); 1964 else 1965 CSIO_INC_STATS(scsim, n_cls_busy_error); 1966 1967 goto inval_scmnd; 1968 } 1969 1970 /* Wait for completion */ 1971 init_completion(&ioreq->cmplobj); 1972 wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo)); 1973 1974 /* FW didnt respond to abort within our timeout */ 1975 if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) { 1976 1977 csio_err(hw, "Abort timed out -- req: %p\n", ioreq); 1978 CSIO_INC_STATS(scsim, n_abrt_timedout); 1979 1980 inval_scmnd: 1981 if (ioreq->nsge > 0) 1982 scsi_dma_unmap(cmnd); 1983 1984 spin_lock_irq(&hw->lock); 1985 csio_scsi_cmnd(ioreq) = NULL; 1986 spin_unlock_irq(&hw->lock); 1987 1988 cmnd->result = (DID_ERROR << 16); 1989 cmnd->scsi_done(cmnd); 1990 1991 return FAILED; 1992 } 1993 1994 /* FW successfully aborted the request */ 1995 if (host_byte(cmnd->result) == DID_REQUEUE) { 1996 csio_info(hw, 1997 "Aborted SCSI command to (%d:%llu) serial#:0x%lx\n", 1998 cmnd->device->id, cmnd->device->lun, 1999 cmnd->serial_number); 2000 return SUCCESS; 2001 } else { 2002 csio_info(hw, 2003 "Failed to abort SCSI command, (%d:%llu) serial#:0x%lx\n", 2004 cmnd->device->id, cmnd->device->lun, 2005 cmnd->serial_number); 2006 return FAILED; 2007 } 2008 } 2009 2010 /* 2011 * csio_tm_cbfn - TM callback function. 2012 * @hw: HW module. 2013 * @req: IO request. 2014 * 2015 * Cache the result in 'cmnd', since ioreq will be freed soon 2016 * after we return from here, and the waiting thread shouldnt trust 2017 * the ioreq contents. 2018 */ 2019 static void 2020 csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req) 2021 { 2022 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); 2023 struct csio_dma_buf *dma_buf; 2024 uint8_t flags = 0; 2025 struct fcp_resp_with_ext *fcp_resp; 2026 struct fcp_resp_rsp_info *rsp_info; 2027 2028 csio_dbg(hw, "req: %p in csio_tm_cbfn status: %d\n", 2029 req, req->wr_status); 2030 2031 /* Cache FW return status */ 2032 cmnd->SCp.Status = req->wr_status; 2033 2034 /* Special handling based on FCP response */ 2035 2036 /* 2037 * FW returns us this error, if flags were set. FCP4 says 2038 * FCP_RSP_LEN_VAL in flags shall be set for TM completions. 2039 * So if a target were to set this bit, we expect that the 2040 * rsp_code is set to FCP_TMF_CMPL for a successful TM 2041 * completion. Any other rsp_code means TM operation failed. 2042 * If a target were to just ignore setting flags, we treat 2043 * the TM operation as success, and FW returns FW_SUCCESS. 2044 */ 2045 if (req->wr_status == FW_SCSI_RSP_ERR) { 2046 dma_buf = &req->dma_buf; 2047 fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr; 2048 rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1); 2049 2050 flags = fcp_resp->resp.fr_flags; 2051 2052 /* Modify return status if flags indicate success */ 2053 if (flags & FCP_RSP_LEN_VAL) 2054 if (rsp_info->rsp_code == FCP_TMF_CMPL) 2055 cmnd->SCp.Status = FW_SUCCESS; 2056 2057 csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code); 2058 } 2059 2060 /* Wake up the TM handler thread */ 2061 csio_scsi_cmnd(req) = NULL; 2062 } 2063 2064 static int 2065 csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd) 2066 { 2067 struct csio_lnode *ln = shost_priv(cmnd->device->host); 2068 struct csio_hw *hw = csio_lnode_to_hw(ln); 2069 struct csio_scsim *scsim = csio_hw_to_scsim(hw); 2070 struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); 2071 struct csio_ioreq *ioreq = NULL; 2072 struct csio_scsi_qset *sqset; 2073 unsigned long flags; 2074 int retval; 2075 int count, ret; 2076 LIST_HEAD(local_q); 2077 struct csio_scsi_level_data sld; 2078 2079 if (!rn) 2080 goto fail; 2081 2082 csio_dbg(hw, "Request to reset LUN:%llu (ssni:0x%x tgtid:%d)\n", 2083 cmnd->device->lun, rn->flowid, rn->scsi_id); 2084 2085 if (!csio_is_lnode_ready(ln)) { 2086 csio_err(hw, 2087 "LUN reset cannot be issued on non-ready" 2088 " local node vnpi:0x%x (LUN:%llu)\n", 2089 ln->vnp_flowid, cmnd->device->lun); 2090 goto fail; 2091 } 2092 2093 /* Lnode is ready, now wait on rport node readiness */ 2094 ret = fc_block_scsi_eh(cmnd); 2095 if (ret) 2096 return ret; 2097 2098 /* 2099 * If we have blocked in the previous call, at this point, either the 2100 * remote node has come back online, or device loss timer has fired 2101 * and the remote node is destroyed. Allow the LUN reset only for 2102 * the former case, since LUN reset is a TMF I/O on the wire, and we 2103 * need a valid session to issue it. 2104 */ 2105 if (fc_remote_port_chkready(rn->rport)) { 2106 csio_err(hw, 2107 "LUN reset cannot be issued on non-ready" 2108 " remote node ssni:0x%x (LUN:%llu)\n", 2109 rn->flowid, cmnd->device->lun); 2110 goto fail; 2111 } 2112 2113 /* Get a free ioreq structure - SM is already set to uninit */ 2114 ioreq = csio_get_scsi_ioreq_lock(hw, scsim); 2115 2116 if (!ioreq) { 2117 csio_err(hw, "Out of IO request elements. Active # :%d\n", 2118 scsim->stats.n_active); 2119 goto fail; 2120 } 2121 2122 sqset = &hw->sqset[ln->portid][smp_processor_id()]; 2123 ioreq->nsge = 0; 2124 ioreq->lnode = ln; 2125 ioreq->rnode = rn; 2126 ioreq->iq_idx = sqset->iq_idx; 2127 ioreq->eq_idx = sqset->eq_idx; 2128 2129 csio_scsi_cmnd(ioreq) = cmnd; 2130 cmnd->host_scribble = (unsigned char *)ioreq; 2131 cmnd->SCp.Status = 0; 2132 2133 cmnd->SCp.Message = FCP_TMF_LUN_RESET; 2134 ioreq->tmo = CSIO_SCSI_LUNRST_TMO_MS / 1000; 2135 2136 /* 2137 * FW times the LUN reset for ioreq->tmo, so we got to wait a little 2138 * longer (10s for now) than that to allow FW to return the timed 2139 * out command. 2140 */ 2141 count = DIV_ROUND_UP((ioreq->tmo + 10) * 1000, CSIO_SCSI_TM_POLL_MS); 2142 2143 /* Set cbfn */ 2144 ioreq->io_cbfn = csio_tm_cbfn; 2145 2146 /* Save of the ioreq info for later use */ 2147 sld.level = CSIO_LEV_LUN; 2148 sld.lnode = ioreq->lnode; 2149 sld.rnode = ioreq->rnode; 2150 sld.oslun = cmnd->device->lun; 2151 2152 spin_lock_irqsave(&hw->lock, flags); 2153 /* Kick off TM SM on the ioreq */ 2154 retval = csio_scsi_start_tm(ioreq); 2155 spin_unlock_irqrestore(&hw->lock, flags); 2156 2157 if (retval != 0) { 2158 csio_err(hw, "Failed to issue LUN reset, req:%p, status:%d\n", 2159 ioreq, retval); 2160 goto fail_ret_ioreq; 2161 } 2162 2163 csio_dbg(hw, "Waiting max %d secs for LUN reset completion\n", 2164 count * (CSIO_SCSI_TM_POLL_MS / 1000)); 2165 /* Wait for completion */ 2166 while ((((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) 2167 && count--) 2168 msleep(CSIO_SCSI_TM_POLL_MS); 2169 2170 /* LUN reset timed-out */ 2171 if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) { 2172 csio_err(hw, "LUN reset (%d:%llu) timed out\n", 2173 cmnd->device->id, cmnd->device->lun); 2174 2175 spin_lock_irq(&hw->lock); 2176 csio_scsi_drvcleanup(ioreq); 2177 list_del_init(&ioreq->sm.sm_list); 2178 spin_unlock_irq(&hw->lock); 2179 2180 goto fail_ret_ioreq; 2181 } 2182 2183 /* LUN reset returned, check cached status */ 2184 if (cmnd->SCp.Status != FW_SUCCESS) { 2185 csio_err(hw, "LUN reset failed (%d:%llu), status: %d\n", 2186 cmnd->device->id, cmnd->device->lun, cmnd->SCp.Status); 2187 goto fail; 2188 } 2189 2190 /* LUN reset succeeded, Start aborting affected I/Os */ 2191 /* 2192 * Since the host guarantees during LUN reset that there 2193 * will not be any more I/Os to that LUN, until the LUN reset 2194 * completes, we gather pending I/Os after the LUN reset. 2195 */ 2196 spin_lock_irq(&hw->lock); 2197 csio_scsi_gather_active_ios(scsim, &sld, &local_q); 2198 2199 retval = csio_scsi_abort_io_q(scsim, &local_q, 30000); 2200 spin_unlock_irq(&hw->lock); 2201 2202 /* Aborts may have timed out */ 2203 if (retval != 0) { 2204 csio_err(hw, 2205 "Attempt to abort I/Os during LUN reset of %llu" 2206 " returned %d\n", cmnd->device->lun, retval); 2207 /* Return I/Os back to active_q */ 2208 spin_lock_irq(&hw->lock); 2209 list_splice_tail_init(&local_q, &scsim->active_q); 2210 spin_unlock_irq(&hw->lock); 2211 goto fail; 2212 } 2213 2214 CSIO_INC_STATS(rn, n_lun_rst); 2215 2216 csio_info(hw, "LUN reset occurred (%d:%llu)\n", 2217 cmnd->device->id, cmnd->device->lun); 2218 2219 return SUCCESS; 2220 2221 fail_ret_ioreq: 2222 csio_put_scsi_ioreq_lock(hw, scsim, ioreq); 2223 fail: 2224 CSIO_INC_STATS(rn, n_lun_rst_fail); 2225 return FAILED; 2226 } 2227 2228 static int 2229 csio_slave_alloc(struct scsi_device *sdev) 2230 { 2231 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 2232 2233 if (!rport || fc_remote_port_chkready(rport)) 2234 return -ENXIO; 2235 2236 sdev->hostdata = *((struct csio_lnode **)(rport->dd_data)); 2237 2238 return 0; 2239 } 2240 2241 static int 2242 csio_slave_configure(struct scsi_device *sdev) 2243 { 2244 scsi_change_queue_depth(sdev, csio_lun_qdepth); 2245 return 0; 2246 } 2247 2248 static void 2249 csio_slave_destroy(struct scsi_device *sdev) 2250 { 2251 sdev->hostdata = NULL; 2252 } 2253 2254 static int 2255 csio_scan_finished(struct Scsi_Host *shost, unsigned long time) 2256 { 2257 struct csio_lnode *ln = shost_priv(shost); 2258 int rv = 1; 2259 2260 spin_lock_irq(shost->host_lock); 2261 if (!ln->hwp || csio_list_deleted(&ln->sm.sm_list)) 2262 goto out; 2263 2264 rv = csio_scan_done(ln, jiffies, time, csio_max_scan_tmo * HZ, 2265 csio_delta_scan_tmo * HZ); 2266 out: 2267 spin_unlock_irq(shost->host_lock); 2268 2269 return rv; 2270 } 2271 2272 struct scsi_host_template csio_fcoe_shost_template = { 2273 .module = THIS_MODULE, 2274 .name = CSIO_DRV_DESC, 2275 .proc_name = KBUILD_MODNAME, 2276 .queuecommand = csio_queuecommand, 2277 .eh_abort_handler = csio_eh_abort_handler, 2278 .eh_device_reset_handler = csio_eh_lun_reset_handler, 2279 .slave_alloc = csio_slave_alloc, 2280 .slave_configure = csio_slave_configure, 2281 .slave_destroy = csio_slave_destroy, 2282 .scan_finished = csio_scan_finished, 2283 .this_id = -1, 2284 .sg_tablesize = CSIO_SCSI_MAX_SGE, 2285 .cmd_per_lun = CSIO_MAX_CMD_PER_LUN, 2286 .use_clustering = ENABLE_CLUSTERING, 2287 .shost_attrs = csio_fcoe_lport_attrs, 2288 .max_sectors = CSIO_MAX_SECTOR_SIZE, 2289 .use_blk_tags = 1, 2290 }; 2291 2292 struct scsi_host_template csio_fcoe_shost_vport_template = { 2293 .module = THIS_MODULE, 2294 .name = CSIO_DRV_DESC, 2295 .proc_name = KBUILD_MODNAME, 2296 .queuecommand = csio_queuecommand, 2297 .eh_abort_handler = csio_eh_abort_handler, 2298 .eh_device_reset_handler = csio_eh_lun_reset_handler, 2299 .slave_alloc = csio_slave_alloc, 2300 .slave_configure = csio_slave_configure, 2301 .slave_destroy = csio_slave_destroy, 2302 .scan_finished = csio_scan_finished, 2303 .this_id = -1, 2304 .sg_tablesize = CSIO_SCSI_MAX_SGE, 2305 .cmd_per_lun = CSIO_MAX_CMD_PER_LUN, 2306 .use_clustering = ENABLE_CLUSTERING, 2307 .shost_attrs = csio_fcoe_vport_attrs, 2308 .max_sectors = CSIO_MAX_SECTOR_SIZE, 2309 .use_blk_tags = 1, 2310 }; 2311 2312 /* 2313 * csio_scsi_alloc_ddp_bufs - Allocate buffers for DDP of unaligned SGLs. 2314 * @scm: SCSI Module 2315 * @hw: HW device. 2316 * @buf_size: buffer size 2317 * @num_buf : Number of buffers. 2318 * 2319 * This routine allocates DMA buffers required for SCSI Data xfer, if 2320 * each SGL buffer for a SCSI Read request posted by SCSI midlayer are 2321 * not virtually contiguous. 2322 */ 2323 static int 2324 csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw, 2325 int buf_size, int num_buf) 2326 { 2327 int n = 0; 2328 struct list_head *tmp; 2329 struct csio_dma_buf *ddp_desc = NULL; 2330 uint32_t unit_size = 0; 2331 2332 if (!num_buf) 2333 return 0; 2334 2335 if (!buf_size) 2336 return -EINVAL; 2337 2338 INIT_LIST_HEAD(&scm->ddp_freelist); 2339 2340 /* Align buf size to page size */ 2341 buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK; 2342 /* Initialize dma descriptors */ 2343 for (n = 0; n < num_buf; n++) { 2344 /* Set unit size to request size */ 2345 unit_size = buf_size; 2346 ddp_desc = kzalloc(sizeof(struct csio_dma_buf), GFP_KERNEL); 2347 if (!ddp_desc) { 2348 csio_err(hw, 2349 "Failed to allocate ddp descriptors," 2350 " Num allocated = %d.\n", 2351 scm->stats.n_free_ddp); 2352 goto no_mem; 2353 } 2354 2355 /* Allocate Dma buffers for DDP */ 2356 ddp_desc->vaddr = pci_alloc_consistent(hw->pdev, unit_size, 2357 &ddp_desc->paddr); 2358 if (!ddp_desc->vaddr) { 2359 csio_err(hw, 2360 "SCSI response DMA buffer (ddp) allocation" 2361 " failed!\n"); 2362 kfree(ddp_desc); 2363 goto no_mem; 2364 } 2365 2366 ddp_desc->len = unit_size; 2367 2368 /* Added it to scsi ddp freelist */ 2369 list_add_tail(&ddp_desc->list, &scm->ddp_freelist); 2370 CSIO_INC_STATS(scm, n_free_ddp); 2371 } 2372 2373 return 0; 2374 no_mem: 2375 /* release dma descs back to freelist and free dma memory */ 2376 list_for_each(tmp, &scm->ddp_freelist) { 2377 ddp_desc = (struct csio_dma_buf *) tmp; 2378 tmp = csio_list_prev(tmp); 2379 pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr, 2380 ddp_desc->paddr); 2381 list_del_init(&ddp_desc->list); 2382 kfree(ddp_desc); 2383 } 2384 scm->stats.n_free_ddp = 0; 2385 2386 return -ENOMEM; 2387 } 2388 2389 /* 2390 * csio_scsi_free_ddp_bufs - free DDP buffers of unaligned SGLs. 2391 * @scm: SCSI Module 2392 * @hw: HW device. 2393 * 2394 * This routine frees ddp buffers. 2395 */ 2396 static void 2397 csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw) 2398 { 2399 struct list_head *tmp; 2400 struct csio_dma_buf *ddp_desc; 2401 2402 /* release dma descs back to freelist and free dma memory */ 2403 list_for_each(tmp, &scm->ddp_freelist) { 2404 ddp_desc = (struct csio_dma_buf *) tmp; 2405 tmp = csio_list_prev(tmp); 2406 pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr, 2407 ddp_desc->paddr); 2408 list_del_init(&ddp_desc->list); 2409 kfree(ddp_desc); 2410 } 2411 scm->stats.n_free_ddp = 0; 2412 } 2413 2414 /** 2415 * csio_scsim_init - Initialize SCSI Module 2416 * @scm: SCSI Module 2417 * @hw: HW module 2418 * 2419 */ 2420 int 2421 csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw) 2422 { 2423 int i; 2424 struct csio_ioreq *ioreq; 2425 struct csio_dma_buf *dma_buf; 2426 2427 INIT_LIST_HEAD(&scm->active_q); 2428 scm->hw = hw; 2429 2430 scm->proto_cmd_len = sizeof(struct fcp_cmnd); 2431 scm->proto_rsp_len = CSIO_SCSI_RSP_LEN; 2432 scm->max_sge = CSIO_SCSI_MAX_SGE; 2433 2434 spin_lock_init(&scm->freelist_lock); 2435 2436 /* Pre-allocate ioreqs and initialize them */ 2437 INIT_LIST_HEAD(&scm->ioreq_freelist); 2438 for (i = 0; i < csio_scsi_ioreqs; i++) { 2439 2440 ioreq = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL); 2441 if (!ioreq) { 2442 csio_err(hw, 2443 "I/O request element allocation failed, " 2444 " Num allocated = %d.\n", 2445 scm->stats.n_free_ioreq); 2446 2447 goto free_ioreq; 2448 } 2449 2450 /* Allocate Dma buffers for Response Payload */ 2451 dma_buf = &ioreq->dma_buf; 2452 dma_buf->vaddr = pci_pool_alloc(hw->scsi_pci_pool, GFP_KERNEL, 2453 &dma_buf->paddr); 2454 if (!dma_buf->vaddr) { 2455 csio_err(hw, 2456 "SCSI response DMA buffer allocation" 2457 " failed!\n"); 2458 kfree(ioreq); 2459 goto free_ioreq; 2460 } 2461 2462 dma_buf->len = scm->proto_rsp_len; 2463 2464 /* Set state to uninit */ 2465 csio_init_state(&ioreq->sm, csio_scsis_uninit); 2466 INIT_LIST_HEAD(&ioreq->gen_list); 2467 init_completion(&ioreq->cmplobj); 2468 2469 list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist); 2470 CSIO_INC_STATS(scm, n_free_ioreq); 2471 } 2472 2473 if (csio_scsi_alloc_ddp_bufs(scm, hw, PAGE_SIZE, csio_ddp_descs)) 2474 goto free_ioreq; 2475 2476 return 0; 2477 2478 free_ioreq: 2479 /* 2480 * Free up existing allocations, since an error 2481 * from here means we are returning for good 2482 */ 2483 while (!list_empty(&scm->ioreq_freelist)) { 2484 struct csio_sm *tmp; 2485 2486 tmp = list_first_entry(&scm->ioreq_freelist, 2487 struct csio_sm, sm_list); 2488 list_del_init(&tmp->sm_list); 2489 ioreq = (struct csio_ioreq *)tmp; 2490 2491 dma_buf = &ioreq->dma_buf; 2492 pci_pool_free(hw->scsi_pci_pool, dma_buf->vaddr, 2493 dma_buf->paddr); 2494 2495 kfree(ioreq); 2496 } 2497 2498 scm->stats.n_free_ioreq = 0; 2499 2500 return -ENOMEM; 2501 } 2502 2503 /** 2504 * csio_scsim_exit: Uninitialize SCSI Module 2505 * @scm: SCSI Module 2506 * 2507 */ 2508 void 2509 csio_scsim_exit(struct csio_scsim *scm) 2510 { 2511 struct csio_ioreq *ioreq; 2512 struct csio_dma_buf *dma_buf; 2513 2514 while (!list_empty(&scm->ioreq_freelist)) { 2515 struct csio_sm *tmp; 2516 2517 tmp = list_first_entry(&scm->ioreq_freelist, 2518 struct csio_sm, sm_list); 2519 list_del_init(&tmp->sm_list); 2520 ioreq = (struct csio_ioreq *)tmp; 2521 2522 dma_buf = &ioreq->dma_buf; 2523 pci_pool_free(scm->hw->scsi_pci_pool, dma_buf->vaddr, 2524 dma_buf->paddr); 2525 2526 kfree(ioreq); 2527 } 2528 2529 scm->stats.n_free_ioreq = 0; 2530 2531 csio_scsi_free_ddp_bufs(scm, scm->hw); 2532 } 2533