1 /* 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2020-2023, Broadcom Inc. All rights reserved. 5 * Support: <fbsd-storage-driver.pdl@broadcom.com> 6 * 7 * Authors: Sumit Saxena <sumit.saxena@broadcom.com> 8 * Chandrakanth Patil <chandrakanth.patil@broadcom.com> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions are 12 * met: 13 * 14 * 1. Redistributions of source code must retain the above copyright notice, 15 * this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright notice, 17 * this list of conditions and the following disclaimer in the documentation and/or other 18 * materials provided with the distribution. 19 * 3. Neither the name of the Broadcom Inc. nor the names of its contributors 20 * may be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGE. 34 * 35 * The views and conclusions contained in the software and documentation are 36 * those of the authors and should not be interpreted as representing 37 * official policies,either expressed or implied, of the FreeBSD Project. 38 * 39 * Mail to: Broadcom Inc 1320 Ridder Park Dr, San Jose, CA 95131 40 * 41 * Broadcom Inc. (Broadcom) MPI3MR Adapter FreeBSD 42 */ 43 44 #include <sys/types.h> 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/selinfo.h> 49 #include <sys/module.h> 50 #include <sys/bus.h> 51 #include <sys/conf.h> 52 #include <sys/bio.h> 53 #include <sys/malloc.h> 54 #include <sys/uio.h> 55 #include <sys/sysctl.h> 56 #include <sys/endian.h> 57 #include <sys/queue.h> 58 #include <sys/kthread.h> 59 #include <sys/taskqueue.h> 60 #include <sys/sbuf.h> 61 62 #include <machine/bus.h> 63 #include <machine/resource.h> 64 #include <sys/rman.h> 65 66 #include <machine/stdarg.h> 67 68 #include <cam/cam.h> 69 #include <cam/cam_ccb.h> 70 #include <cam/cam_debug.h> 71 #include <cam/cam_sim.h> 72 #include <cam/cam_xpt_sim.h> 73 #include <cam/cam_xpt_periph.h> 74 #include <cam/cam_periph.h> 75 #include <cam/scsi/scsi_all.h> 76 #include <cam/scsi/scsi_message.h> 77 #include <cam/scsi/smp_all.h> 78 79 #include <dev/nvme/nvme.h> 80 #include "mpi/mpi30_api.h" 81 #include "mpi3mr_cam.h" 82 #include "mpi3mr.h" 83 #include <sys/time.h> /* XXX for pcpu.h */ 84 #include <sys/pcpu.h> /* XXX for PCPU_GET */ 85 86 #define smp_processor_id() PCPU_GET(cpuid) 87 88 static void 89 mpi3mr_enqueue_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm); 90 static void 91 mpi3mr_map_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm); 92 void 93 mpi3mr_release_simq_reinit(struct mpi3mr_cam_softc *cam_sc); 94 static void 95 mpi3mr_freeup_events(struct mpi3mr_softc *sc); 96 97 extern int 98 mpi3mr_register_events(struct mpi3mr_softc *sc); 99 extern void mpi3mr_add_sg_single(void *paddr, U8 flags, U32 length, 100 bus_addr_t dma_addr); 101 102 static U32 event_count; 103 104 static void mpi3mr_prepare_sgls(void *arg, 105 bus_dma_segment_t *segs, int nsegs, int error) 106 { 107 struct mpi3mr_softc *sc; 108 struct mpi3mr_cmd *cm; 109 u_int i; 110 bus_addr_t chain_dma; 111 void *chain; 112 U8 *sg_local; 113 U32 chain_length; 114 int sges_left; 115 U32 sges_in_segment; 116 U8 simple_sgl_flags; 117 U8 simple_sgl_flags_last; 118 U8 last_chain_sgl_flags; 119 struct mpi3mr_chain *chain_req; 120 Mpi3SCSIIORequest_t *scsiio_req; 121 union ccb *ccb; 122 123 cm = (struct mpi3mr_cmd *)arg; 124 sc = cm->sc; 125 scsiio_req = (Mpi3SCSIIORequest_t *) &cm->io_request; 126 ccb = cm->ccb; 127 128 if (error) { 129 device_printf(sc->mpi3mr_dev, "%s: error=%d\n",__func__, error); 130 if (error == EFBIG) { 131 mpi3mr_set_ccbstatus(ccb, CAM_REQ_TOO_BIG); 132 } else { 133 mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 134 } 135 mpi3mr_release_command(cm); 136 xpt_done(ccb); 137 return; 138 } 139 140 if (cm->data_dir == MPI3MR_READ) 141 bus_dmamap_sync(sc->buffer_dmat, cm->dmamap, 142 BUS_DMASYNC_PREREAD); 143 if (cm->data_dir == MPI3MR_WRITE) 144 bus_dmamap_sync(sc->buffer_dmat, cm->dmamap, 145 BUS_DMASYNC_PREWRITE); 146 147 KASSERT(nsegs <= MPI3MR_SG_DEPTH && nsegs > 0, 148 ("%s: bad SGE count: %d\n", device_get_nameunit(sc->mpi3mr_dev), nsegs)); 149 KASSERT(scsiio_req->DataLength != 0, 150 ("%s: Data segments (%d), but DataLength == 0\n", 151 device_get_nameunit(sc->mpi3mr_dev), nsegs)); 152 153 simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 154 MPI3_SGE_FLAGS_DLAS_SYSTEM; 155 simple_sgl_flags_last = simple_sgl_flags | 156 MPI3_SGE_FLAGS_END_OF_LIST; 157 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 158 MPI3_SGE_FLAGS_DLAS_SYSTEM; 159 160 sg_local = (U8 *)&scsiio_req->SGL; 161 162 sges_left = nsegs; 163 164 sges_in_segment = (sc->facts.op_req_sz - 165 offsetof(Mpi3SCSIIORequest_t, SGL))/sizeof(Mpi3SGESimple_t); 166 167 i = 0; 168 169 mpi3mr_dprint(sc, MPI3MR_TRACE, "SGE count: %d IO size: %d\n", 170 nsegs, scsiio_req->DataLength); 171 172 if (sges_left <= sges_in_segment) 173 goto fill_in_last_segment; 174 175 /* fill in main message segment when there is a chain following */ 176 while (sges_in_segment > 1) { 177 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 178 segs[i].ds_len, segs[i].ds_addr); 179 sg_local += sizeof(Mpi3SGESimple_t); 180 sges_left--; 181 sges_in_segment--; 182 i++; 183 } 184 185 chain_req = &sc->chain_sgl_list[cm->hosttag]; 186 187 chain = chain_req->buf; 188 chain_dma = chain_req->buf_phys; 189 memset(chain_req->buf, 0, PAGE_SIZE); 190 sges_in_segment = sges_left; 191 chain_length = sges_in_segment * sizeof(Mpi3SGESimple_t); 192 193 mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags, 194 chain_length, chain_dma); 195 196 sg_local = chain; 197 198 fill_in_last_segment: 199 while (sges_left > 0) { 200 if (sges_left == 1) 201 mpi3mr_add_sg_single(sg_local, 202 simple_sgl_flags_last, segs[i].ds_len, 203 segs[i].ds_addr); 204 else 205 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 206 segs[i].ds_len, segs[i].ds_addr); 207 sg_local += sizeof(Mpi3SGESimple_t); 208 sges_left--; 209 i++; 210 } 211 212 /* 213 * Now that we've created the sgls, we send the request to the device. 214 * Unlike in Linux, dmaload isn't guaranteed to load every time, but 215 * this function is always called when the resources are available, so 216 * we can send the request to hardware here always. mpi3mr_map_request 217 * knows about this quirk and will only take evasive action when an 218 * error other than EINPROGRESS is returned from dmaload. 219 */ 220 mpi3mr_enqueue_request(sc, cm); 221 222 return; 223 } 224 225 static void 226 mpi3mr_map_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm) 227 { 228 u_int32_t retcode = 0; 229 union ccb *ccb; 230 231 ccb = cm->ccb; 232 if (cm->data != NULL) { 233 mtx_lock(&sc->io_lock); 234 /* Map data buffer into bus space */ 235 retcode = bus_dmamap_load_ccb(sc->buffer_dmat, cm->dmamap, 236 ccb, mpi3mr_prepare_sgls, cm, 0); 237 mtx_unlock(&sc->io_lock); 238 if (retcode != 0 && retcode != EINPROGRESS) { 239 device_printf(sc->mpi3mr_dev, 240 "bus_dmamap_load(): retcode = %d\n", retcode); 241 /* 242 * Any other error means prepare_sgls wasn't called, and 243 * will never be called, so we have to mop up. This error 244 * should never happen, though. 245 */ 246 mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 247 mpi3mr_release_command(cm); 248 xpt_done(ccb); 249 } 250 } else { 251 /* 252 * No data, we enqueue it directly here. 253 */ 254 mpi3mr_enqueue_request(sc, cm); 255 } 256 } 257 258 void 259 mpi3mr_unmap_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cmd) 260 { 261 if (cmd->data != NULL) { 262 if (cmd->data_dir == MPI3MR_READ) 263 bus_dmamap_sync(sc->buffer_dmat, cmd->dmamap, BUS_DMASYNC_POSTREAD); 264 if (cmd->data_dir == MPI3MR_WRITE) 265 bus_dmamap_sync(sc->buffer_dmat, cmd->dmamap, BUS_DMASYNC_POSTWRITE); 266 mtx_lock(&sc->io_lock); 267 bus_dmamap_unload(sc->buffer_dmat, cmd->dmamap); 268 mtx_unlock(&sc->io_lock); 269 } 270 } 271 272 /** 273 * mpi3mr_allow_unmap_to_fw - Whether an unmap is allowed to fw 274 * @sc: Adapter instance reference 275 * @ccb: SCSI Command reference 276 * 277 * The controller hardware cannot handle certain unmap commands 278 * for NVMe drives, this routine checks those and return true 279 * and completes the SCSI command with proper status and sense 280 * data. 281 * 282 * Return: TRUE for allowed unmap, FALSE otherwise. 283 */ 284 static bool mpi3mr_allow_unmap_to_fw(struct mpi3mr_softc *sc, 285 union ccb *ccb) 286 { 287 struct ccb_scsiio *csio; 288 uint16_t param_list_len, block_desc_len, trunc_param_len = 0; 289 290 csio = &ccb->csio; 291 param_list_len = (uint16_t) ((scsiio_cdb_ptr(csio)[7] << 8) | scsiio_cdb_ptr(csio)[8]); 292 293 switch(pci_get_revid(sc->mpi3mr_dev)) { 294 case SAS4116_CHIP_REV_A0: 295 if (!param_list_len) { 296 mpi3mr_dprint(sc, MPI3MR_ERROR, 297 "%s: CDB received with zero parameter length\n", 298 __func__); 299 mpi3mr_print_cdb(ccb); 300 mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP); 301 xpt_done(ccb); 302 return false; 303 } 304 305 if (param_list_len < 24) { 306 mpi3mr_dprint(sc, MPI3MR_ERROR, 307 "%s: CDB received with invalid param_list_len: %d\n", 308 __func__, param_list_len); 309 mpi3mr_print_cdb(ccb); 310 scsi_set_sense_data(&ccb->csio.sense_data, 311 /*sense_format*/ SSD_TYPE_FIXED, 312 /*current_error*/ 1, 313 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 314 /*asc*/ 0x1A, 315 /*ascq*/ 0x00, 316 /*extra args*/ SSD_ELEM_NONE); 317 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 318 ccb->ccb_h.status = 319 CAM_SCSI_STATUS_ERROR | 320 CAM_AUTOSNS_VALID; 321 return false; 322 } 323 324 if (param_list_len != csio->dxfer_len) { 325 mpi3mr_dprint(sc, MPI3MR_ERROR, 326 "%s: CDB received with param_list_len: %d bufflen: %d\n", 327 __func__, param_list_len, csio->dxfer_len); 328 mpi3mr_print_cdb(ccb); 329 scsi_set_sense_data(&ccb->csio.sense_data, 330 /*sense_format*/ SSD_TYPE_FIXED, 331 /*current_error*/ 1, 332 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 333 /*asc*/ 0x1A, 334 /*ascq*/ 0x00, 335 /*extra args*/ SSD_ELEM_NONE); 336 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 337 ccb->ccb_h.status = 338 CAM_SCSI_STATUS_ERROR | 339 CAM_AUTOSNS_VALID; 340 xpt_done(ccb); 341 return false; 342 } 343 344 block_desc_len = (uint16_t) (csio->data_ptr[2] << 8 | csio->data_ptr[3]); 345 346 if (block_desc_len < 16) { 347 mpi3mr_dprint(sc, MPI3MR_ERROR, 348 "%s: Invalid descriptor length in param list: %d\n", 349 __func__, block_desc_len); 350 mpi3mr_print_cdb(ccb); 351 scsi_set_sense_data(&ccb->csio.sense_data, 352 /*sense_format*/ SSD_TYPE_FIXED, 353 /*current_error*/ 1, 354 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 355 /*asc*/ 0x26, 356 /*ascq*/ 0x00, 357 /*extra args*/ SSD_ELEM_NONE); 358 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 359 ccb->ccb_h.status = 360 CAM_SCSI_STATUS_ERROR | 361 CAM_AUTOSNS_VALID; 362 xpt_done(ccb); 363 return false; 364 } 365 366 if (param_list_len > (block_desc_len + 8)) { 367 mpi3mr_print_cdb(ccb); 368 mpi3mr_dprint(sc, MPI3MR_INFO, 369 "%s: Truncating param_list_len(%d) to block_desc_len+8(%d)\n", 370 __func__, param_list_len, (block_desc_len + 8)); 371 param_list_len = block_desc_len + 8; 372 scsiio_cdb_ptr(csio)[7] = (param_list_len >> 8) | 0xff; 373 scsiio_cdb_ptr(csio)[8] = param_list_len | 0xff; 374 mpi3mr_print_cdb(ccb); 375 } 376 break; 377 378 case SAS4116_CHIP_REV_B0: 379 if ((param_list_len > 24) && ((param_list_len - 8) & 0xF)) { 380 trunc_param_len -= (param_list_len - 8) & 0xF; 381 mpi3mr_print_cdb(ccb); 382 mpi3mr_dprint(sc, MPI3MR_INFO, 383 "%s: Truncating param_list_len from (%d) to (%d)\n", 384 __func__, param_list_len, trunc_param_len); 385 scsiio_cdb_ptr(csio)[7] = (param_list_len >> 8) | 0xff; 386 scsiio_cdb_ptr(csio)[8] = param_list_len | 0xff; 387 mpi3mr_print_cdb(ccb); 388 } 389 break; 390 } 391 392 return true; 393 } 394 395 /** 396 * mpi3mr_tm_response_name - get TM response as a string 397 * @resp_code: TM response code 398 * 399 * Convert known task management response code as a readable 400 * string. 401 * 402 * Return: response code string. 403 */ 404 static const char* mpi3mr_tm_response_name(U8 resp_code) 405 { 406 char *desc; 407 408 switch (resp_code) { 409 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 410 desc = "task management request completed"; 411 break; 412 case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME: 413 desc = "invalid frame"; 414 break; 415 case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED: 416 desc = "task management request not supported"; 417 break; 418 case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED: 419 desc = "task management request failed"; 420 break; 421 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 422 desc = "task management request succeeded"; 423 break; 424 case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN: 425 desc = "invalid LUN"; 426 break; 427 case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG: 428 desc = "overlapped tag attempted"; 429 break; 430 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 431 desc = "task queued, however not sent to target"; 432 break; 433 case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED: 434 desc = "task management request denied by NVMe device"; 435 break; 436 default: 437 desc = "unknown"; 438 break; 439 } 440 441 return desc; 442 } 443 444 void mpi3mr_poll_pend_io_completions(struct mpi3mr_softc *sc) 445 { 446 int i; 447 int num_of_reply_queues = sc->num_queues; 448 struct mpi3mr_irq_context *irq_ctx; 449 450 for (i = 0; i < num_of_reply_queues; i++) { 451 irq_ctx = &sc->irq_ctx[i]; 452 mpi3mr_complete_io_cmd(sc, irq_ctx); 453 } 454 } 455 456 void 457 trigger_reset_from_watchdog(struct mpi3mr_softc *sc, U8 reset_type, U32 reset_reason) 458 { 459 if (sc->reset_in_progress) { 460 mpi3mr_dprint(sc, MPI3MR_INFO, "Another reset is in progress, no need to trigger the reset\n"); 461 return; 462 } 463 sc->reset.type = reset_type; 464 sc->reset.reason = reset_reason; 465 466 return; 467 } 468 469 /** 470 * mpi3mr_issue_tm - Issue Task Management request 471 * @sc: Adapter instance reference 472 * @tm_type: Task Management type 473 * @handle: Device handle 474 * @lun: lun ID 475 * @htag: Host tag of the TM request 476 * @timeout: TM timeout value 477 * @drv_cmd: Internal command tracker 478 * @resp_code: Response code place holder 479 * @cmd: Timed out command reference 480 * 481 * Issues a Task Management Request to the controller for a 482 * specified target, lun and command and wait for its completion 483 * and check TM response. Recover the TM if it timed out by 484 * issuing controller reset. 485 * 486 * Return: 0 on success, non-zero on errors 487 */ 488 static int 489 mpi3mr_issue_tm(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cmd, 490 U8 tm_type, unsigned long timeout) 491 { 492 int retval = 0; 493 MPI3_SCSI_TASK_MGMT_REQUEST tm_req; 494 MPI3_SCSI_TASK_MGMT_REPLY *tm_reply = NULL; 495 struct mpi3mr_drvr_cmd *drv_cmd = NULL; 496 struct mpi3mr_target *tgtdev = NULL; 497 struct mpi3mr_op_req_queue *op_req_q = NULL; 498 union ccb *ccb; 499 U8 resp_code; 500 501 502 if (sc->unrecoverable) { 503 mpi3mr_dprint(sc, MPI3MR_INFO, 504 "Controller is in unrecoverable state!! TM not required\n"); 505 return retval; 506 } 507 if (sc->reset_in_progress) { 508 mpi3mr_dprint(sc, MPI3MR_INFO, 509 "controller reset in progress!! TM not required\n"); 510 return retval; 511 } 512 513 if (!cmd->ccb) { 514 mpi3mr_dprint(sc, MPI3MR_ERROR, "SCSIIO command timed-out with NULL ccb\n"); 515 return retval; 516 } 517 ccb = cmd->ccb; 518 519 tgtdev = cmd->targ; 520 if (tgtdev == NULL) { 521 mpi3mr_dprint(sc, MPI3MR_ERROR, "Device does not exist target ID:0x%x," 522 "TM is not required\n", ccb->ccb_h.target_id); 523 return retval; 524 } 525 if (tgtdev->dev_removed == 1) { 526 mpi3mr_dprint(sc, MPI3MR_ERROR, "Device(0x%x) is removed, TM is not required\n", 527 ccb->ccb_h.target_id); 528 return retval; 529 } 530 531 drv_cmd = &sc->host_tm_cmds; 532 mtx_lock(&drv_cmd->lock); 533 534 memset(&tm_req, 0, sizeof(tm_req)); 535 tm_req.DevHandle = htole16(tgtdev->dev_handle); 536 tm_req.TaskType = tm_type; 537 tm_req.HostTag = htole16(MPI3MR_HOSTTAG_TMS); 538 int_to_lun(ccb->ccb_h.target_lun, tm_req.LUN); 539 tm_req.Function = MPI3_FUNCTION_SCSI_TASK_MGMT; 540 drv_cmd->state = MPI3MR_CMD_PENDING; 541 drv_cmd->is_waiting = 1; 542 drv_cmd->callback = NULL; 543 544 if (ccb) { 545 if (tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { 546 op_req_q = &sc->op_req_q[cmd->req_qidx]; 547 tm_req.TaskHostTag = htole16(cmd->hosttag); 548 tm_req.TaskRequestQueueID = htole16(op_req_q->qid); 549 } 550 } 551 552 if (tgtdev) 553 mpi3mr_atomic_inc(&tgtdev->block_io); 554 555 if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) { 556 if ((tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) 557 && tgtdev->dev_spec.pcie_inf.abort_to) 558 timeout = tgtdev->dev_spec.pcie_inf.abort_to; 559 else if ((tm_type == MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET) 560 && tgtdev->dev_spec.pcie_inf.reset_to) 561 timeout = tgtdev->dev_spec.pcie_inf.reset_to; 562 } 563 564 sc->tm_chan = (void *)&drv_cmd; 565 566 mpi3mr_dprint(sc, MPI3MR_DEBUG_TM, 567 "posting task management request: type(%d), handle(0x%04x)\n", 568 tm_type, tgtdev->dev_handle); 569 570 init_completion(&drv_cmd->completion); 571 retval = mpi3mr_submit_admin_cmd(sc, &tm_req, sizeof(tm_req)); 572 if (retval) { 573 mpi3mr_dprint(sc, MPI3MR_ERROR, 574 "posting task management request is failed\n"); 575 retval = -1; 576 goto out_unlock; 577 } 578 wait_for_completion_timeout_tm(&drv_cmd->completion, timeout, sc); 579 580 if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) { 581 drv_cmd->is_waiting = 0; 582 retval = -1; 583 if (!(drv_cmd->state & MPI3MR_CMD_RESET)) { 584 mpi3mr_dprint(sc, MPI3MR_ERROR, 585 "task management request timed out after %ld seconds\n", timeout); 586 if (sc->mpi3mr_debug & MPI3MR_DEBUG_TM) { 587 mpi3mr_dprint(sc, MPI3MR_INFO, "tm_request dump\n"); 588 mpi3mr_hexdump(&tm_req, sizeof(tm_req), 8); 589 } 590 trigger_reset_from_watchdog(sc, MPI3MR_TRIGGER_SOFT_RESET, MPI3MR_RESET_FROM_TM_TIMEOUT); 591 retval = ETIMEDOUT; 592 } 593 goto out_unlock; 594 } 595 596 if (!(drv_cmd->state & MPI3MR_CMD_REPLYVALID)) { 597 mpi3mr_dprint(sc, MPI3MR_ERROR, 598 "invalid task management reply message\n"); 599 retval = -1; 600 goto out_unlock; 601 } 602 tm_reply = (MPI3_SCSI_TASK_MGMT_REPLY *)drv_cmd->reply; 603 604 switch (drv_cmd->ioc_status) { 605 case MPI3_IOCSTATUS_SUCCESS: 606 resp_code = tm_reply->ResponseData & MPI3MR_RI_MASK_RESPCODE; 607 break; 608 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 609 resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE; 610 break; 611 default: 612 mpi3mr_dprint(sc, MPI3MR_ERROR, 613 "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n", 614 tgtdev->dev_handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo); 615 retval = -1; 616 goto out_unlock; 617 } 618 619 switch (resp_code) { 620 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 621 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 622 break; 623 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 624 if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 625 retval = -1; 626 break; 627 default: 628 retval = -1; 629 break; 630 } 631 632 mpi3mr_dprint(sc, MPI3MR_DEBUG_TM, 633 "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x)" 634 "termination_count(%u), response:%s(0x%x)\n", tm_type, tgtdev->dev_handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo, 635 tm_reply->TerminationCount, mpi3mr_tm_response_name(resp_code), resp_code); 636 637 if (retval) 638 goto out_unlock; 639 640 mpi3mr_disable_interrupts(sc); 641 mpi3mr_poll_pend_io_completions(sc); 642 mpi3mr_enable_interrupts(sc); 643 mpi3mr_poll_pend_io_completions(sc); 644 645 switch (tm_type) { 646 case MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK: 647 if (cmd->state == MPI3MR_CMD_STATE_IN_TM) { 648 mpi3mr_dprint(sc, MPI3MR_ERROR, 649 "%s: task abort returned success from firmware but corresponding CCB (%p) was not terminated" 650 "marking task abort failed!\n", sc->name, cmd->ccb); 651 retval = -1; 652 } 653 break; 654 case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 655 if (mpi3mr_atomic_read(&tgtdev->outstanding)) { 656 mpi3mr_dprint(sc, MPI3MR_ERROR, 657 "%s: target reset returned success from firmware but IOs are still pending on the target (%p)" 658 "marking target reset failed!\n", 659 sc->name, tgtdev); 660 retval = -1; 661 } 662 break; 663 default: 664 break; 665 } 666 667 out_unlock: 668 drv_cmd->state = MPI3MR_CMD_NOTUSED; 669 mtx_unlock(&drv_cmd->lock); 670 if (tgtdev && mpi3mr_atomic_read(&tgtdev->block_io) > 0) 671 mpi3mr_atomic_dec(&tgtdev->block_io); 672 673 return retval; 674 } 675 676 /** 677 * mpi3mr_task_abort- Abort error handling callback 678 * @cmd: Timed out command reference 679 * 680 * Issue Abort Task Management if the command is in LLD scope 681 * and verify if it is aborted successfully and return status 682 * accordingly. 683 * 684 * Return: SUCCESS of successful abort the SCSI command else FAILED 685 */ 686 static int mpi3mr_task_abort(struct mpi3mr_cmd *cmd) 687 { 688 int retval = 0; 689 struct mpi3mr_softc *sc; 690 union ccb *ccb; 691 692 sc = cmd->sc; 693 694 if (!cmd->ccb) { 695 mpi3mr_dprint(sc, MPI3MR_ERROR, "SCSIIO command timed-out with NULL ccb\n"); 696 return retval; 697 } 698 ccb = cmd->ccb; 699 700 mpi3mr_dprint(sc, MPI3MR_INFO, 701 "attempting abort task for ccb(%p)\n", ccb); 702 703 mpi3mr_print_cdb(ccb); 704 705 if (cmd->state != MPI3MR_CMD_STATE_BUSY) { 706 mpi3mr_dprint(sc, MPI3MR_INFO, 707 "%s: ccb is not in driver scope, abort task is not required\n", 708 sc->name); 709 return retval; 710 } 711 cmd->state = MPI3MR_CMD_STATE_IN_TM; 712 713 retval = mpi3mr_issue_tm(sc, cmd, MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK, MPI3MR_ABORTTM_TIMEOUT); 714 715 mpi3mr_dprint(sc, MPI3MR_INFO, 716 "abort task is %s for ccb(%p)\n", ((retval == 0) ? "SUCCESS" : "FAILED"), ccb); 717 718 return retval; 719 } 720 721 /** 722 * mpi3mr_target_reset - Target reset error handling callback 723 * @cmd: Timed out command reference 724 * 725 * Issue Target reset Task Management and verify the SCSI commands are 726 * terminated successfully and return status accordingly. 727 * 728 * Return: SUCCESS of successful termination of the SCSI commands else 729 * FAILED 730 */ 731 static int mpi3mr_target_reset(struct mpi3mr_cmd *cmd) 732 { 733 int retval = 0; 734 struct mpi3mr_softc *sc; 735 struct mpi3mr_target *target; 736 737 sc = cmd->sc; 738 739 target = cmd->targ; 740 if (target == NULL) { 741 mpi3mr_dprint(sc, MPI3MR_XINFO, "Device does not exist for target:0x%p," 742 "target reset is not required\n", target); 743 return retval; 744 } 745 746 mpi3mr_dprint(sc, MPI3MR_INFO, 747 "attempting target reset on target(%d)\n", target->per_id); 748 749 750 if (mpi3mr_atomic_read(&target->outstanding)) { 751 mpi3mr_dprint(sc, MPI3MR_INFO, 752 "no outstanding IOs on the target(%d)," 753 " target reset not required.\n", target->per_id); 754 return retval; 755 } 756 757 retval = mpi3mr_issue_tm(sc, cmd, MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, MPI3MR_RESETTM_TIMEOUT); 758 759 mpi3mr_dprint(sc, MPI3MR_INFO, 760 "target reset is %s for target(%d)\n", ((retval == 0) ? "SUCCESS" : "FAILED"), 761 target->per_id); 762 763 return retval; 764 } 765 766 /** 767 * mpi3mr_get_fw_pending_ios - Calculate pending I/O count 768 * @sc: Adapter instance reference 769 * 770 * Calculate the pending I/Os for the controller and return. 771 * 772 * Return: Number of pending I/Os 773 */ 774 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_softc *sc) 775 { 776 U16 i, pend_ios = 0; 777 778 for (i = 0; i < sc->num_queues; i++) 779 pend_ios += mpi3mr_atomic_read(&sc->op_reply_q[i].pend_ios); 780 return pend_ios; 781 } 782 783 /** 784 * mpi3mr_wait_for_host_io - block for I/Os to complete 785 * @sc: Adapter instance reference 786 * @timeout: time out in seconds 787 * 788 * Waits for pending I/Os for the given adapter to complete or 789 * to hit the timeout. 790 * 791 * Return: Nothing 792 */ 793 static int mpi3mr_wait_for_host_io(struct mpi3mr_softc *sc, U32 timeout) 794 { 795 enum mpi3mr_iocstate iocstate; 796 797 iocstate = mpi3mr_get_iocstate(sc); 798 if (iocstate != MRIOC_STATE_READY) { 799 mpi3mr_dprint(sc, MPI3MR_XINFO, "%s :Controller is in NON-READY state! Proceed with Reset\n", __func__); 800 return -1; 801 } 802 803 if (!mpi3mr_get_fw_pending_ios(sc)) 804 return 0; 805 806 mpi3mr_dprint(sc, MPI3MR_INFO, 807 "%s :Waiting for %d seconds prior to reset for %d pending I/Os to complete\n", 808 __func__, timeout, mpi3mr_get_fw_pending_ios(sc)); 809 810 int i; 811 for (i = 0; i < timeout; i++) { 812 if (!mpi3mr_get_fw_pending_ios(sc)) { 813 mpi3mr_dprint(sc, MPI3MR_INFO, "%s :All pending I/Os got completed while waiting! Reset not required\n", __func__); 814 return 0; 815 816 } 817 iocstate = mpi3mr_get_iocstate(sc); 818 if (iocstate != MRIOC_STATE_READY) { 819 mpi3mr_dprint(sc, MPI3MR_XINFO, "%s :Controller state becomes NON-READY while waiting! dont wait further" 820 "Proceed with Reset\n", __func__); 821 return -1; 822 } 823 DELAY(1000 * 1000); 824 } 825 826 mpi3mr_dprint(sc, MPI3MR_INFO, "%s :Pending I/Os after wait exaust is %d! Proceed with Reset\n", __func__, 827 mpi3mr_get_fw_pending_ios(sc)); 828 829 return -1; 830 } 831 832 static void 833 mpi3mr_scsiio_timeout(void *data) 834 { 835 int retval = 0; 836 struct mpi3mr_softc *sc; 837 struct mpi3mr_cmd *cmd; 838 struct mpi3mr_target *targ_dev = NULL; 839 840 if (!data) 841 return; 842 843 cmd = (struct mpi3mr_cmd *)data; 844 sc = cmd->sc; 845 846 if (cmd->ccb == NULL) { 847 mpi3mr_dprint(sc, MPI3MR_ERROR, "SCSIIO command timed-out with NULL ccb\n"); 848 return; 849 } 850 851 /* 852 * TMs are not supported for IO timeouts on VD/LD, so directly issue controller reset 853 * with max timeout for outstanding IOs to complete is 180sec. 854 */ 855 targ_dev = cmd->targ; 856 if (targ_dev && (targ_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)) { 857 if (mpi3mr_wait_for_host_io(sc, MPI3MR_RAID_ERRREC_RESET_TIMEOUT)) 858 trigger_reset_from_watchdog(sc, MPI3MR_TRIGGER_SOFT_RESET, MPI3MR_RESET_FROM_SCSIIO_TIMEOUT); 859 return; 860 } 861 862 /* Issue task abort to recover the timed out IO */ 863 retval = mpi3mr_task_abort(cmd); 864 if (!retval || (retval == ETIMEDOUT)) 865 return; 866 867 /* 868 * task abort has failed to recover the timed out IO, 869 * try with the target reset 870 */ 871 retval = mpi3mr_target_reset(cmd); 872 if (!retval || (retval == ETIMEDOUT)) 873 return; 874 875 /* 876 * task abort and target reset has failed. So issue Controller reset(soft reset) 877 * through OCR thread context 878 */ 879 trigger_reset_from_watchdog(sc, MPI3MR_TRIGGER_SOFT_RESET, MPI3MR_RESET_FROM_SCSIIO_TIMEOUT); 880 881 return; 882 } 883 884 void int_to_lun(unsigned int lun, U8 *req_lun) 885 { 886 int i; 887 888 memset(req_lun, 0, sizeof(*req_lun)); 889 890 for (i = 0; i < sizeof(lun); i += 2) { 891 req_lun[i] = (lun >> 8) & 0xFF; 892 req_lun[i+1] = lun & 0xFF; 893 lun = lun >> 16; 894 } 895 896 } 897 898 static U16 get_req_queue_index(struct mpi3mr_softc *sc) 899 { 900 U16 i = 0, reply_q_index = 0, reply_q_pend_ios = 0; 901 902 reply_q_pend_ios = mpi3mr_atomic_read(&sc->op_reply_q[0].pend_ios); 903 for (i = 0; i < sc->num_queues; i++) { 904 if (reply_q_pend_ios > mpi3mr_atomic_read(&sc->op_reply_q[i].pend_ios)) { 905 reply_q_pend_ios = mpi3mr_atomic_read(&sc->op_reply_q[i].pend_ios); 906 reply_q_index = i; 907 } 908 } 909 910 return reply_q_index; 911 } 912 913 static void 914 mpi3mr_action_scsiio(struct mpi3mr_cam_softc *cam_sc, union ccb *ccb) 915 { 916 Mpi3SCSIIORequest_t *req = NULL; 917 struct ccb_scsiio *csio; 918 struct mpi3mr_softc *sc; 919 struct mpi3mr_target *targ; 920 struct mpi3mr_cmd *cm; 921 uint8_t scsi_opcode, queue_idx; 922 uint32_t mpi_control; 923 924 sc = cam_sc->sc; 925 mtx_assert(&sc->mpi3mr_mtx, MA_OWNED); 926 927 if (sc->unrecoverable) { 928 mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 929 xpt_done(ccb); 930 return; 931 } 932 933 csio = &ccb->csio; 934 KASSERT(csio->ccb_h.target_id < cam_sc->maxtargets, 935 ("Target %d out of bounds in XPT_SCSI_IO\n", 936 csio->ccb_h.target_id)); 937 938 scsi_opcode = scsiio_cdb_ptr(csio)[0]; 939 940 if ((sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN) && 941 !((scsi_opcode == SYNCHRONIZE_CACHE) || 942 (scsi_opcode == START_STOP_UNIT))) { 943 mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP); 944 xpt_done(ccb); 945 return; 946 } 947 948 targ = mpi3mr_find_target_by_per_id(cam_sc, csio->ccb_h.target_id); 949 if (targ == NULL) { 950 mpi3mr_dprint(sc, MPI3MR_XINFO, "Device with target ID: 0x%x does not exist\n", 951 csio->ccb_h.target_id); 952 mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 953 xpt_done(ccb); 954 return; 955 } 956 957 if (targ && targ->is_hidden) { 958 mpi3mr_dprint(sc, MPI3MR_XINFO, "Device with target ID: 0x%x is hidden\n", 959 csio->ccb_h.target_id); 960 mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 961 xpt_done(ccb); 962 return; 963 } 964 965 if (targ->dev_removed == 1) { 966 mpi3mr_dprint(sc, MPI3MR_XINFO, "Device with target ID: 0x%x is removed\n", csio->ccb_h.target_id); 967 mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 968 xpt_done(ccb); 969 return; 970 } 971 972 if (targ->dev_handle == 0x0) { 973 mpi3mr_dprint(sc, MPI3MR_ERROR, "%s NULL handle for target 0x%x\n", 974 __func__, csio->ccb_h.target_id); 975 mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 976 xpt_done(ccb); 977 return; 978 } 979 980 if (mpi3mr_atomic_read(&targ->block_io) || 981 (sc->reset_in_progress == 1) || (sc->prepare_for_reset == 1)) { 982 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s target is busy target_id: 0x%x\n", 983 __func__, csio->ccb_h.target_id); 984 mpi3mr_set_ccbstatus(ccb, CAM_REQUEUE_REQ); 985 xpt_done(ccb); 986 return; 987 } 988 989 /* 990 * Sometimes, it is possible to get a command that is not "In 991 * Progress" and was actually aborted by the upper layer. Check for 992 * this here and complete the command without error. 993 */ 994 if (mpi3mr_get_ccbstatus(ccb) != CAM_REQ_INPROG) { 995 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s Command is not in progress for " 996 "target %u\n", __func__, csio->ccb_h.target_id); 997 xpt_done(ccb); 998 return; 999 } 1000 /* 1001 * If devinfo is 0 this will be a volume. In that case don't tell CAM 1002 * that the volume has timed out. We want volumes to be enumerated 1003 * until they are deleted/removed, not just failed. 1004 */ 1005 if (targ->flags & MPI3MRSAS_TARGET_INREMOVAL) { 1006 if (targ->devinfo == 0) 1007 mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP); 1008 else 1009 mpi3mr_set_ccbstatus(ccb, CAM_SEL_TIMEOUT); 1010 xpt_done(ccb); 1011 return; 1012 } 1013 1014 if ((scsi_opcode == UNMAP) && 1015 (pci_get_device(sc->mpi3mr_dev) == MPI3_MFGPAGE_DEVID_SAS4116) && 1016 (targ->dev_type == MPI3_DEVICE_DEVFORM_PCIE) && 1017 (mpi3mr_allow_unmap_to_fw(sc, ccb) == false)) 1018 return; 1019 1020 cm = mpi3mr_get_command(sc); 1021 if (cm == NULL || (sc->mpi3mr_flags & MPI3MR_FLAGS_DIAGRESET)) { 1022 if (cm != NULL) { 1023 mpi3mr_release_command(cm); 1024 } 1025 if ((cam_sc->flags & MPI3MRSAS_QUEUE_FROZEN) == 0) { 1026 xpt_freeze_simq(cam_sc->sim, 1); 1027 cam_sc->flags |= MPI3MRSAS_QUEUE_FROZEN; 1028 } 1029 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1030 mpi3mr_set_ccbstatus(ccb, CAM_REQUEUE_REQ); 1031 xpt_done(ccb); 1032 return; 1033 } 1034 1035 switch (csio->ccb_h.flags & CAM_DIR_MASK) { 1036 case CAM_DIR_IN: 1037 mpi_control = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ; 1038 cm->data_dir = MPI3MR_READ; 1039 break; 1040 case CAM_DIR_OUT: 1041 mpi_control = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE; 1042 cm->data_dir = MPI3MR_WRITE; 1043 break; 1044 case CAM_DIR_NONE: 1045 default: 1046 mpi_control = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER; 1047 break; 1048 } 1049 1050 if (csio->cdb_len > 16) 1051 mpi_control |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16; 1052 1053 req = (Mpi3SCSIIORequest_t *)&cm->io_request; 1054 bzero(req, sizeof(*req)); 1055 req->Function = MPI3_FUNCTION_SCSI_IO; 1056 req->HostTag = cm->hosttag; 1057 req->DataLength = htole32(csio->dxfer_len); 1058 req->DevHandle = htole16(targ->dev_handle); 1059 1060 /* 1061 * It looks like the hardware doesn't require an explicit tag 1062 * number for each transaction. SAM Task Management not supported 1063 * at the moment. 1064 */ 1065 switch (csio->tag_action) { 1066 case MSG_HEAD_OF_Q_TAG: 1067 mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_HEADOFQ; 1068 break; 1069 case MSG_ORDERED_Q_TAG: 1070 mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ORDEREDQ; 1071 break; 1072 case MSG_ACA_TASK: 1073 mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ACAQ; 1074 break; 1075 case CAM_TAG_ACTION_NONE: 1076 case MSG_SIMPLE_Q_TAG: 1077 default: 1078 mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ; 1079 break; 1080 } 1081 1082 req->Flags = htole32(mpi_control); 1083 1084 if (csio->ccb_h.flags & CAM_CDB_POINTER) 1085 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len); 1086 else { 1087 KASSERT(csio->cdb_len <= IOCDBLEN, 1088 ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER " 1089 "is not set", csio->cdb_len)); 1090 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len); 1091 } 1092 1093 cm->length = csio->dxfer_len; 1094 cm->targ = targ; 1095 int_to_lun(csio->ccb_h.target_lun, req->LUN); 1096 cm->ccb = ccb; 1097 csio->ccb_h.qos.sim_data = sbinuptime(); 1098 queue_idx = get_req_queue_index(sc); 1099 cm->req_qidx = queue_idx; 1100 1101 mpi3mr_dprint(sc, MPI3MR_TRACE, "[QID:%d]: func: %s line:%d CDB: 0x%x targetid: %x SMID: 0x%x\n", 1102 (queue_idx + 1), __func__, __LINE__, scsi_opcode, csio->ccb_h.target_id, cm->hosttag); 1103 1104 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { 1105 case CAM_DATA_PADDR: 1106 case CAM_DATA_SG_PADDR: 1107 device_printf(sc->mpi3mr_dev, "%s: physical addresses not supported\n", 1108 __func__); 1109 mpi3mr_set_ccbstatus(ccb, CAM_REQ_INVALID); 1110 mpi3mr_release_command(cm); 1111 xpt_done(ccb); 1112 return; 1113 case CAM_DATA_SG: 1114 device_printf(sc->mpi3mr_dev, "%s: scatter gather is not supported\n", 1115 __func__); 1116 mpi3mr_set_ccbstatus(ccb, CAM_REQ_INVALID); 1117 mpi3mr_release_command(cm); 1118 xpt_done(ccb); 1119 return; 1120 case CAM_DATA_VADDR: 1121 case CAM_DATA_BIO: 1122 if (csio->dxfer_len > (MPI3MR_SG_DEPTH * MPI3MR_4K_PGSZ)) { 1123 mpi3mr_set_ccbstatus(ccb, CAM_REQ_TOO_BIG); 1124 mpi3mr_release_command(cm); 1125 xpt_done(ccb); 1126 return; 1127 } 1128 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1129 cm->length = csio->dxfer_len; 1130 if (cm->length) 1131 cm->data = csio->data_ptr; 1132 break; 1133 default: 1134 mpi3mr_set_ccbstatus(ccb, CAM_REQ_INVALID); 1135 mpi3mr_release_command(cm); 1136 xpt_done(ccb); 1137 return; 1138 } 1139 1140 /* Prepare SGEs and queue to hardware */ 1141 mpi3mr_map_request(sc, cm); 1142 } 1143 1144 static void 1145 mpi3mr_enqueue_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm) 1146 { 1147 static int ratelimit; 1148 struct mpi3mr_op_req_queue *opreqq = &sc->op_req_q[cm->req_qidx]; 1149 struct mpi3mr_throttle_group_info *tg = NULL; 1150 uint32_t data_len_blks = 0; 1151 uint32_t tracked_io_sz = 0; 1152 uint32_t ioc_pend_data_len = 0, tg_pend_data_len = 0; 1153 struct mpi3mr_target *targ = cm->targ; 1154 union ccb *ccb = cm->ccb; 1155 Mpi3SCSIIORequest_t *req = (Mpi3SCSIIORequest_t *)&cm->io_request; 1156 1157 if (sc->iot_enable) { 1158 data_len_blks = ccb->csio.dxfer_len >> 9; 1159 1160 if ((data_len_blks >= sc->io_throttle_data_length) && 1161 targ->io_throttle_enabled) { 1162 1163 tracked_io_sz = data_len_blks; 1164 tg = targ->throttle_group; 1165 if (tg) { 1166 mpi3mr_atomic_add(&sc->pend_large_data_sz, data_len_blks); 1167 mpi3mr_atomic_add(&tg->pend_large_data_sz, data_len_blks); 1168 1169 ioc_pend_data_len = mpi3mr_atomic_read(&sc->pend_large_data_sz); 1170 tg_pend_data_len = mpi3mr_atomic_read(&tg->pend_large_data_sz); 1171 1172 if (ratelimit % 1000) { 1173 mpi3mr_dprint(sc, MPI3MR_IOT, 1174 "large vd_io persist_id(%d), handle(0x%04x), data_len(%d)," 1175 "ioc_pending(%d), tg_pending(%d), ioc_high(%d), tg_high(%d)\n", 1176 targ->per_id, targ->dev_handle, 1177 data_len_blks, ioc_pend_data_len, 1178 tg_pend_data_len, sc->io_throttle_high, 1179 tg->high); 1180 ratelimit++; 1181 } 1182 1183 if (!tg->io_divert && ((ioc_pend_data_len >= 1184 sc->io_throttle_high) || 1185 (tg_pend_data_len >= tg->high))) { 1186 tg->io_divert = 1; 1187 mpi3mr_dprint(sc, MPI3MR_IOT, 1188 "VD: Setting divert flag for tg_id(%d), persist_id(%d)\n", 1189 tg->id, targ->per_id); 1190 if (sc->mpi3mr_debug | MPI3MR_IOT) 1191 mpi3mr_print_cdb(ccb); 1192 mpi3mr_set_io_divert_for_all_vd_in_tg(sc, 1193 tg, 1); 1194 } 1195 } else { 1196 mpi3mr_atomic_add(&sc->pend_large_data_sz, data_len_blks); 1197 ioc_pend_data_len = mpi3mr_atomic_read(&sc->pend_large_data_sz); 1198 if (ratelimit % 1000) { 1199 mpi3mr_dprint(sc, MPI3MR_IOT, 1200 "large pd_io persist_id(%d), handle(0x%04x), data_len(%d), ioc_pending(%d), ioc_high(%d)\n", 1201 targ->per_id, targ->dev_handle, 1202 data_len_blks, ioc_pend_data_len, 1203 sc->io_throttle_high); 1204 ratelimit++; 1205 } 1206 1207 if (ioc_pend_data_len >= sc->io_throttle_high) { 1208 targ->io_divert = 1; 1209 mpi3mr_dprint(sc, MPI3MR_IOT, 1210 "PD: Setting divert flag for persist_id(%d)\n", 1211 targ->per_id); 1212 if (sc->mpi3mr_debug | MPI3MR_IOT) 1213 mpi3mr_print_cdb(ccb); 1214 } 1215 } 1216 } 1217 1218 if (targ->io_divert) { 1219 req->MsgFlags |= MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 1220 req->Flags = htole32(le32toh(req->Flags) | MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING); 1221 } 1222 } 1223 1224 if (mpi3mr_submit_io(sc, opreqq, (U8 *)&cm->io_request)) { 1225 if (tracked_io_sz) { 1226 mpi3mr_atomic_sub(&sc->pend_large_data_sz, tracked_io_sz); 1227 if (tg) 1228 mpi3mr_atomic_sub(&tg->pend_large_data_sz, tracked_io_sz); 1229 } 1230 mpi3mr_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); 1231 mpi3mr_release_command(cm); 1232 xpt_done(ccb); 1233 } else { 1234 callout_reset_sbt(&cm->callout, mstosbt(ccb->ccb_h.timeout), 0, 1235 mpi3mr_scsiio_timeout, cm, 0); 1236 cm->callout_owner = true; 1237 mpi3mr_atomic_inc(&sc->fw_outstanding); 1238 mpi3mr_atomic_inc(&targ->outstanding); 1239 if (mpi3mr_atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater) 1240 sc->io_cmds_highwater++; 1241 } 1242 1243 return; 1244 } 1245 1246 static void 1247 mpi3mr_cam_poll(struct cam_sim *sim) 1248 { 1249 struct mpi3mr_cam_softc *cam_sc; 1250 struct mpi3mr_irq_context *irq_ctx; 1251 struct mpi3mr_softc *sc; 1252 int i; 1253 1254 cam_sc = cam_sim_softc(sim); 1255 sc = cam_sc->sc; 1256 1257 mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "func: %s line: %d is called\n", 1258 __func__, __LINE__); 1259 1260 for (i = 0; i < sc->num_queues; i++) { 1261 irq_ctx = sc->irq_ctx + i; 1262 if (irq_ctx->op_reply_q->qid) { 1263 mpi3mr_complete_io_cmd(sc, irq_ctx); 1264 } 1265 } 1266 } 1267 1268 static void 1269 mpi3mr_cam_action(struct cam_sim *sim, union ccb *ccb) 1270 { 1271 struct mpi3mr_cam_softc *cam_sc; 1272 struct mpi3mr_target *targ; 1273 1274 cam_sc = cam_sim_softc(sim); 1275 1276 mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "ccb func_code 0x%x target id: 0x%x\n", 1277 ccb->ccb_h.func_code, ccb->ccb_h.target_id); 1278 1279 mtx_assert(&cam_sc->sc->mpi3mr_mtx, MA_OWNED); 1280 1281 switch (ccb->ccb_h.func_code) { 1282 case XPT_PATH_INQ: 1283 { 1284 struct ccb_pathinq *cpi = &ccb->cpi; 1285 1286 cpi->version_num = 1; 1287 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 1288 cpi->target_sprt = 0; 1289 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN; 1290 cpi->hba_eng_cnt = 0; 1291 cpi->max_target = cam_sc->maxtargets - 1; 1292 cpi->max_lun = 0; 1293 1294 /* 1295 * initiator_id is set here to an ID outside the set of valid 1296 * target IDs (including volumes). 1297 */ 1298 cpi->initiator_id = cam_sc->maxtargets; 1299 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1300 strlcpy(cpi->hba_vid, "Broadcom", HBA_IDLEN); 1301 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 1302 cpi->unit_number = cam_sim_unit(sim); 1303 cpi->bus_id = cam_sim_bus(sim); 1304 /* 1305 * XXXSLM-I think this needs to change based on config page or 1306 * something instead of hardcoded to 150000. 1307 */ 1308 cpi->base_transfer_speed = 150000; 1309 cpi->transport = XPORT_SAS; 1310 cpi->transport_version = 0; 1311 cpi->protocol = PROTO_SCSI; 1312 cpi->protocol_version = SCSI_REV_SPC; 1313 1314 targ = mpi3mr_find_target_by_per_id(cam_sc, ccb->ccb_h.target_id); 1315 1316 if (targ && (targ->dev_type == MPI3_DEVICE_DEVFORM_PCIE) && 1317 ((targ->dev_spec.pcie_inf.dev_info & 1318 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 1319 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE)) { 1320 cpi->maxio = targ->dev_spec.pcie_inf.mdts; 1321 mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO, 1322 "PCI device target_id: %u max io size: %u\n", 1323 ccb->ccb_h.target_id, cpi->maxio); 1324 } else { 1325 cpi->maxio = PAGE_SIZE * (MPI3MR_SG_DEPTH - 1); 1326 } 1327 mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP); 1328 break; 1329 } 1330 case XPT_GET_TRAN_SETTINGS: 1331 { 1332 struct ccb_trans_settings *cts; 1333 struct ccb_trans_settings_sas *sas; 1334 struct ccb_trans_settings_scsi *scsi; 1335 1336 cts = &ccb->cts; 1337 sas = &cts->xport_specific.sas; 1338 scsi = &cts->proto_specific.scsi; 1339 1340 KASSERT(cts->ccb_h.target_id < cam_sc->maxtargets, 1341 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n", 1342 cts->ccb_h.target_id)); 1343 targ = mpi3mr_find_target_by_per_id(cam_sc, cts->ccb_h.target_id); 1344 1345 if (targ == NULL) { 1346 mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "Device with target ID: 0x%x does not exist\n", 1347 cts->ccb_h.target_id); 1348 mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 1349 break; 1350 } 1351 1352 if ((targ->dev_handle == 0x0) || (targ->dev_removed == 1)) { 1353 mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 1354 break; 1355 } 1356 1357 cts->protocol_version = SCSI_REV_SPC2; 1358 cts->transport = XPORT_SAS; 1359 cts->transport_version = 0; 1360 1361 sas->valid = CTS_SAS_VALID_SPEED; 1362 1363 switch (targ->link_rate) { 1364 case 0x08: 1365 sas->bitrate = 150000; 1366 break; 1367 case 0x09: 1368 sas->bitrate = 300000; 1369 break; 1370 case 0x0a: 1371 sas->bitrate = 600000; 1372 break; 1373 case 0x0b: 1374 sas->bitrate = 1200000; 1375 break; 1376 default: 1377 sas->valid = 0; 1378 } 1379 1380 cts->protocol = PROTO_SCSI; 1381 scsi->valid = CTS_SCSI_VALID_TQ; 1382 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 1383 1384 mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP); 1385 break; 1386 } 1387 case XPT_CALC_GEOMETRY: 1388 cam_calc_geometry(&ccb->ccg, /*extended*/1); 1389 mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP); 1390 break; 1391 case XPT_RESET_DEV: 1392 mpi3mr_dprint(cam_sc->sc, MPI3MR_INFO, "mpi3mr_action " 1393 "XPT_RESET_DEV\n"); 1394 return; 1395 case XPT_RESET_BUS: 1396 case XPT_ABORT: 1397 case XPT_TERM_IO: 1398 mpi3mr_dprint(cam_sc->sc, MPI3MR_INFO, "mpi3mr_action faking success " 1399 "for abort or reset\n"); 1400 mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP); 1401 break; 1402 case XPT_SCSI_IO: 1403 mpi3mr_action_scsiio(cam_sc, ccb); 1404 return; 1405 default: 1406 mpi3mr_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL); 1407 break; 1408 } 1409 xpt_done(ccb); 1410 } 1411 1412 void 1413 mpi3mr_startup_increment(struct mpi3mr_cam_softc *cam_sc) 1414 { 1415 if ((cam_sc->flags & MPI3MRSAS_IN_STARTUP) != 0) { 1416 if (cam_sc->startup_refcount++ == 0) { 1417 /* just starting, freeze the simq */ 1418 mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO, 1419 "%s freezing simq\n", __func__); 1420 xpt_hold_boot(); 1421 } 1422 mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO, "%s refcount %u\n", __func__, 1423 cam_sc->startup_refcount); 1424 } 1425 } 1426 1427 void 1428 mpi3mr_release_simq_reinit(struct mpi3mr_cam_softc *cam_sc) 1429 { 1430 if (cam_sc->flags & MPI3MRSAS_QUEUE_FROZEN) { 1431 cam_sc->flags &= ~MPI3MRSAS_QUEUE_FROZEN; 1432 xpt_release_simq(cam_sc->sim, 1); 1433 mpi3mr_dprint(cam_sc->sc, MPI3MR_INFO, "Unfreezing SIM queue\n"); 1434 } 1435 } 1436 1437 void 1438 mpi3mr_rescan_target(struct mpi3mr_softc *sc, struct mpi3mr_target *targ) 1439 { 1440 struct mpi3mr_cam_softc *cam_sc = sc->cam_sc; 1441 path_id_t pathid; 1442 target_id_t targetid; 1443 union ccb *ccb; 1444 1445 pathid = cam_sim_path(cam_sc->sim); 1446 if (targ == NULL) 1447 targetid = CAM_TARGET_WILDCARD; 1448 else 1449 targetid = targ->per_id; 1450 1451 /* 1452 * Allocate a CCB and schedule a rescan. 1453 */ 1454 ccb = xpt_alloc_ccb_nowait(); 1455 if (ccb == NULL) { 1456 mpi3mr_dprint(sc, MPI3MR_ERROR, "unable to alloc CCB for rescan\n"); 1457 return; 1458 } 1459 1460 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid, 1461 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1462 mpi3mr_dprint(sc, MPI3MR_ERROR, "unable to create path for rescan\n"); 1463 xpt_free_ccb(ccb); 1464 return; 1465 } 1466 1467 if (targetid == CAM_TARGET_WILDCARD) 1468 ccb->ccb_h.func_code = XPT_SCAN_BUS; 1469 else 1470 ccb->ccb_h.func_code = XPT_SCAN_TGT; 1471 1472 mpi3mr_dprint(sc, MPI3MR_EVENT, "%s target id 0x%x\n", __func__, targetid); 1473 xpt_rescan(ccb); 1474 } 1475 1476 void 1477 mpi3mr_startup_decrement(struct mpi3mr_cam_softc *cam_sc) 1478 { 1479 if ((cam_sc->flags & MPI3MRSAS_IN_STARTUP) != 0) { 1480 if (--cam_sc->startup_refcount == 0) { 1481 /* finished all discovery-related actions, release 1482 * the simq and rescan for the latest topology. 1483 */ 1484 mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO, 1485 "%s releasing simq\n", __func__); 1486 cam_sc->flags &= ~MPI3MRSAS_IN_STARTUP; 1487 xpt_release_simq(cam_sc->sim, 1); 1488 xpt_release_boot(); 1489 } 1490 mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO, "%s refcount %u\n", __func__, 1491 cam_sc->startup_refcount); 1492 } 1493 } 1494 1495 static void 1496 mpi3mr_fw_event_free(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fw_event) 1497 { 1498 if (!fw_event) 1499 return; 1500 1501 if (fw_event->event_data != NULL) { 1502 free(fw_event->event_data, M_MPI3MR); 1503 fw_event->event_data = NULL; 1504 } 1505 1506 free(fw_event, M_MPI3MR); 1507 fw_event = NULL; 1508 } 1509 1510 static void 1511 mpi3mr_freeup_events(struct mpi3mr_softc *sc) 1512 { 1513 struct mpi3mr_fw_event_work *fw_event = NULL; 1514 mtx_lock(&sc->mpi3mr_mtx); 1515 while ((fw_event = TAILQ_FIRST(&sc->cam_sc->ev_queue)) != NULL) { 1516 TAILQ_REMOVE(&sc->cam_sc->ev_queue, fw_event, ev_link); 1517 mpi3mr_fw_event_free(sc, fw_event); 1518 } 1519 mtx_unlock(&sc->mpi3mr_mtx); 1520 } 1521 1522 static void 1523 mpi3mr_sastopochg_evt_debug(struct mpi3mr_softc *sc, 1524 Mpi3EventDataSasTopologyChangeList_t *event_data) 1525 { 1526 int i; 1527 U16 handle; 1528 U8 reason_code, phy_number; 1529 char *status_str = NULL; 1530 U8 link_rate, prev_link_rate; 1531 1532 switch (event_data->ExpStatus) { 1533 case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 1534 status_str = "remove"; 1535 break; 1536 case MPI3_EVENT_SAS_TOPO_ES_RESPONDING: 1537 status_str = "responding"; 1538 break; 1539 case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 1540 status_str = "remove delay"; 1541 break; 1542 case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER: 1543 status_str = "direct attached"; 1544 break; 1545 default: 1546 status_str = "unknown status"; 1547 break; 1548 } 1549 1550 mpi3mr_dprint(sc, MPI3MR_INFO, "%s :sas topology change: (%s)\n", 1551 __func__, status_str); 1552 mpi3mr_dprint(sc, MPI3MR_INFO, 1553 "%s :\texpander_handle(0x%04x), enclosure_handle(0x%04x) " 1554 "start_phy(%02d), num_entries(%d)\n", __func__, 1555 (event_data->ExpanderDevHandle), 1556 (event_data->EnclosureHandle), 1557 event_data->StartPhyNum, event_data->NumEntries); 1558 for (i = 0; i < event_data->NumEntries; i++) { 1559 handle = (event_data->PhyEntry[i].AttachedDevHandle); 1560 if (!handle) 1561 continue; 1562 phy_number = event_data->StartPhyNum + i; 1563 reason_code = event_data->PhyEntry[i].Status & 1564 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1565 switch (reason_code) { 1566 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1567 status_str = "target remove"; 1568 break; 1569 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 1570 status_str = "delay target remove"; 1571 break; 1572 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1573 status_str = "link rate change"; 1574 break; 1575 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1576 status_str = "target responding"; 1577 break; 1578 default: 1579 status_str = "unknown"; 1580 break; 1581 } 1582 link_rate = event_data->PhyEntry[i].LinkRate >> 4; 1583 prev_link_rate = event_data->PhyEntry[i].LinkRate & 0xF; 1584 mpi3mr_dprint(sc, MPI3MR_INFO, "%s :\tphy(%02d), attached_handle(0x%04x): %s:" 1585 " link rate: new(0x%02x), old(0x%02x)\n", __func__, 1586 phy_number, handle, status_str, link_rate, prev_link_rate); 1587 } 1588 } 1589 1590 static void 1591 mpi3mr_process_sastopochg_evt(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fwevt) 1592 { 1593 1594 Mpi3EventDataSasTopologyChangeList_t *event_data = 1595 (Mpi3EventDataSasTopologyChangeList_t *)fwevt->event_data; 1596 int i; 1597 U16 handle; 1598 U8 reason_code, link_rate; 1599 struct mpi3mr_target *target = NULL; 1600 1601 1602 mpi3mr_sastopochg_evt_debug(sc, event_data); 1603 1604 for (i = 0; i < event_data->NumEntries; i++) { 1605 handle = le16toh(event_data->PhyEntry[i].AttachedDevHandle); 1606 link_rate = event_data->PhyEntry[i].LinkRate >> 4; 1607 1608 if (!handle) 1609 continue; 1610 target = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle); 1611 1612 if (!target) 1613 continue; 1614 1615 target->link_rate = link_rate; 1616 reason_code = event_data->PhyEntry[i].Status & 1617 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1618 1619 switch (reason_code) { 1620 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1621 if (target->exposed_to_os) 1622 mpi3mr_remove_device_from_os(sc, target->dev_handle); 1623 mpi3mr_remove_device_from_list(sc, target, false); 1624 break; 1625 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1626 break; 1627 default: 1628 break; 1629 } 1630 } 1631 1632 /* 1633 * refcount was incremented for this event in 1634 * mpi3mr_evt_handler. Decrement it here because the event has 1635 * been processed. 1636 */ 1637 mpi3mr_startup_decrement(sc->cam_sc); 1638 return; 1639 } 1640 1641 static inline void 1642 mpi3mr_logdata_evt_bh(struct mpi3mr_softc *sc, 1643 struct mpi3mr_fw_event_work *fwevt) 1644 { 1645 mpi3mr_app_save_logdata(sc, fwevt->event_data, 1646 fwevt->event_data_size); 1647 } 1648 1649 static void 1650 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_softc *sc, 1651 Mpi3EventDataPcieTopologyChangeList_t *event_data) 1652 { 1653 int i; 1654 U16 handle; 1655 U16 reason_code; 1656 U8 port_number; 1657 char *status_str = NULL; 1658 U8 link_rate, prev_link_rate; 1659 1660 switch (event_data->SwitchStatus) { 1661 case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: 1662 status_str = "remove"; 1663 break; 1664 case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING: 1665 status_str = "responding"; 1666 break; 1667 case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: 1668 status_str = "remove delay"; 1669 break; 1670 case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH: 1671 status_str = "direct attached"; 1672 break; 1673 default: 1674 status_str = "unknown status"; 1675 break; 1676 } 1677 mpi3mr_dprint(sc, MPI3MR_INFO, "%s :pcie topology change: (%s)\n", 1678 __func__, status_str); 1679 mpi3mr_dprint(sc, MPI3MR_INFO, 1680 "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x)" 1681 "start_port(%02d), num_entries(%d)\n", __func__, 1682 le16toh(event_data->SwitchDevHandle), 1683 le16toh(event_data->EnclosureHandle), 1684 event_data->StartPortNum, event_data->NumEntries); 1685 for (i = 0; i < event_data->NumEntries; i++) { 1686 handle = 1687 le16toh(event_data->PortEntry[i].AttachedDevHandle); 1688 if (!handle) 1689 continue; 1690 port_number = event_data->StartPortNum + i; 1691 reason_code = event_data->PortEntry[i].PortStatus; 1692 switch (reason_code) { 1693 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1694 status_str = "target remove"; 1695 break; 1696 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 1697 status_str = "delay target remove"; 1698 break; 1699 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 1700 status_str = "link rate change"; 1701 break; 1702 case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE: 1703 status_str = "target responding"; 1704 break; 1705 default: 1706 status_str = "unknown"; 1707 break; 1708 } 1709 link_rate = event_data->PortEntry[i].CurrentPortInfo & 1710 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1711 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo & 1712 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1713 mpi3mr_dprint(sc, MPI3MR_INFO, "%s :\tport(%02d), attached_handle(0x%04x): %s:" 1714 " link rate: new(0x%02x), old(0x%02x)\n", __func__, 1715 port_number, handle, status_str, link_rate, prev_link_rate); 1716 } 1717 } 1718 1719 static void mpi3mr_process_pcietopochg_evt(struct mpi3mr_softc *sc, 1720 struct mpi3mr_fw_event_work *fwevt) 1721 { 1722 Mpi3EventDataPcieTopologyChangeList_t *event_data = 1723 (Mpi3EventDataPcieTopologyChangeList_t *)fwevt->event_data; 1724 int i; 1725 U16 handle; 1726 U8 reason_code, link_rate; 1727 struct mpi3mr_target *target = NULL; 1728 1729 1730 mpi3mr_pcietopochg_evt_debug(sc, event_data); 1731 1732 for (i = 0; i < event_data->NumEntries; i++) { 1733 handle = 1734 le16toh(event_data->PortEntry[i].AttachedDevHandle); 1735 if (!handle) 1736 continue; 1737 target = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle); 1738 if (!target) 1739 continue; 1740 1741 link_rate = event_data->PortEntry[i].CurrentPortInfo & 1742 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1743 target->link_rate = link_rate; 1744 1745 reason_code = event_data->PortEntry[i].PortStatus; 1746 1747 switch (reason_code) { 1748 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1749 if (target->exposed_to_os) 1750 mpi3mr_remove_device_from_os(sc, target->dev_handle); 1751 mpi3mr_remove_device_from_list(sc, target, false); 1752 break; 1753 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 1754 break; 1755 default: 1756 break; 1757 } 1758 } 1759 1760 /* 1761 * refcount was incremented for this event in 1762 * mpi3mr_evt_handler. Decrement it here because the event has 1763 * been processed. 1764 */ 1765 mpi3mr_startup_decrement(sc->cam_sc); 1766 return; 1767 } 1768 1769 void mpi3mr_add_device(struct mpi3mr_softc *sc, U16 per_id) 1770 { 1771 struct mpi3mr_target *target; 1772 1773 mpi3mr_dprint(sc, MPI3MR_EVENT, 1774 "Adding device(persistent id: 0x%x)\n", per_id); 1775 1776 mpi3mr_startup_increment(sc->cam_sc); 1777 target = mpi3mr_find_target_by_per_id(sc->cam_sc, per_id); 1778 1779 if (!target) { 1780 mpi3mr_dprint(sc, MPI3MR_INFO, "Not available in driver's" 1781 "internal target list, persistent_id: %d\n", 1782 per_id); 1783 goto out; 1784 } 1785 1786 if (target->is_hidden) { 1787 mpi3mr_dprint(sc, MPI3MR_EVENT, "Target is hidden, persistent_id: %d\n", 1788 per_id); 1789 goto out; 1790 } 1791 1792 if (!target->exposed_to_os && !sc->reset_in_progress) { 1793 mpi3mr_rescan_target(sc, target); 1794 mpi3mr_dprint(sc, MPI3MR_INFO, 1795 "Added device persistent_id: %d dev_handle: %d\n", per_id, target->dev_handle); 1796 target->exposed_to_os = 1; 1797 } 1798 1799 out: 1800 mpi3mr_startup_decrement(sc->cam_sc); 1801 } 1802 1803 int mpi3mr_remove_device_from_os(struct mpi3mr_softc *sc, U16 handle) 1804 { 1805 U32 i = 0; 1806 int retval = 0; 1807 struct mpi3mr_target *target; 1808 1809 mpi3mr_dprint(sc, MPI3MR_EVENT, 1810 "Removing Device (dev_handle: %d)\n", handle); 1811 1812 target = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle); 1813 1814 if (!target) { 1815 mpi3mr_dprint(sc, MPI3MR_INFO, 1816 "Device (persistent_id: %d dev_handle: %d) is already removed from driver's list\n", 1817 target->per_id, handle); 1818 mpi3mr_rescan_target(sc, NULL); 1819 retval = -1; 1820 goto out; 1821 } 1822 1823 target->flags |= MPI3MRSAS_TARGET_INREMOVAL; 1824 1825 while (mpi3mr_atomic_read(&target->outstanding) && (i < 30)) { 1826 i++; 1827 if (!(i % 2)) { 1828 mpi3mr_dprint(sc, MPI3MR_INFO, 1829 "[%2d]waiting for " 1830 "waiting for outstanding commands to complete on target: %d\n", 1831 i, target->per_id); 1832 } 1833 DELAY(1000 * 1000); 1834 } 1835 1836 if (target->exposed_to_os && !sc->reset_in_progress) { 1837 mpi3mr_rescan_target(sc, target); 1838 mpi3mr_dprint(sc, MPI3MR_INFO, 1839 "Removed device(persistent_id: %d dev_handle: %d)\n", target->per_id, handle); 1840 target->exposed_to_os = 0; 1841 } 1842 1843 target->flags &= ~MPI3MRSAS_TARGET_INREMOVAL; 1844 out: 1845 return retval; 1846 } 1847 1848 void mpi3mr_remove_device_from_list(struct mpi3mr_softc *sc, 1849 struct mpi3mr_target *target, bool must_delete) 1850 { 1851 mtx_lock_spin(&sc->target_lock); 1852 if ((target->state == MPI3MR_DEV_REMOVE_HS_STARTED) || 1853 (must_delete == true)) { 1854 TAILQ_REMOVE(&sc->cam_sc->tgt_list, target, tgt_next); 1855 target->state = MPI3MR_DEV_DELETED; 1856 } 1857 mtx_unlock_spin(&sc->target_lock); 1858 1859 if (target->state == MPI3MR_DEV_DELETED) { 1860 free(target, M_MPI3MR); 1861 target = NULL; 1862 } 1863 1864 return; 1865 } 1866 1867 /** 1868 * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf 1869 * @sc: Adapter instance reference 1870 * @fwevt: Firmware event 1871 * 1872 * Process Device Status Change event and based on device's new 1873 * information, either expose the device to the upper layers, or 1874 * remove the device from upper layers. 1875 * 1876 * Return: Nothing. 1877 */ 1878 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_softc *sc, 1879 struct mpi3mr_fw_event_work *fwevt) 1880 { 1881 U16 dev_handle = 0; 1882 U8 uhide = 0, delete = 0, cleanup = 0; 1883 struct mpi3mr_target *tgtdev = NULL; 1884 Mpi3EventDataDeviceStatusChange_t *evtdata = 1885 (Mpi3EventDataDeviceStatusChange_t *)fwevt->event_data; 1886 1887 1888 1889 dev_handle = le16toh(evtdata->DevHandle); 1890 mpi3mr_dprint(sc, MPI3MR_INFO, 1891 "%s :device status change: handle(0x%04x): reason code(0x%x)\n", 1892 __func__, dev_handle, evtdata->ReasonCode); 1893 switch (evtdata->ReasonCode) { 1894 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 1895 delete = 1; 1896 break; 1897 case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN: 1898 uhide = 1; 1899 break; 1900 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 1901 delete = 1; 1902 cleanup = 1; 1903 break; 1904 default: 1905 mpi3mr_dprint(sc, MPI3MR_INFO, "%s :Unhandled reason code(0x%x)\n", __func__, 1906 evtdata->ReasonCode); 1907 break; 1908 } 1909 1910 tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle); 1911 if (!tgtdev) 1912 return; 1913 1914 if (uhide) { 1915 if (!tgtdev->exposed_to_os) 1916 mpi3mr_add_device(sc, tgtdev->per_id); 1917 } 1918 1919 if (delete) 1920 mpi3mr_remove_device_from_os(sc, dev_handle); 1921 1922 if (cleanup) 1923 mpi3mr_remove_device_from_list(sc, tgtdev, false); 1924 } 1925 1926 /** 1927 * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf 1928 * @sc: Adapter instance reference 1929 * @dev_pg0: New device page0 1930 * 1931 * Process Device Info Change event and based on device's new 1932 * information, either expose the device to the upper layers, or 1933 * remove the device from upper layers or update the details of 1934 * the device. 1935 * 1936 * Return: Nothing. 1937 */ 1938 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_softc *sc, 1939 Mpi3DevicePage0_t *dev_pg0) 1940 { 1941 struct mpi3mr_target *tgtdev = NULL; 1942 U16 dev_handle = 0, perst_id = 0; 1943 1944 perst_id = le16toh(dev_pg0->PersistentID); 1945 dev_handle = le16toh(dev_pg0->DevHandle); 1946 mpi3mr_dprint(sc, MPI3MR_INFO, 1947 "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n", 1948 __func__, dev_handle, perst_id); 1949 tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle); 1950 if (!tgtdev) 1951 return; 1952 1953 mpi3mr_update_device(sc, tgtdev, dev_pg0, false); 1954 if (!tgtdev->is_hidden && !tgtdev->exposed_to_os) 1955 mpi3mr_add_device(sc, perst_id); 1956 1957 if (tgtdev->is_hidden && tgtdev->exposed_to_os) 1958 mpi3mr_remove_device_from_os(sc, tgtdev->dev_handle); 1959 } 1960 1961 static void 1962 mpi3mr_fw_work(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fw_event) 1963 { 1964 if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN) 1965 goto out; 1966 1967 if (!fw_event->process_event) 1968 goto evt_ack; 1969 1970 mpi3mr_dprint(sc, MPI3MR_EVENT, "(%d)->(%s) Working on Event: [%x]\n", 1971 event_count++, __func__, fw_event->event); 1972 1973 switch (fw_event->event) { 1974 case MPI3_EVENT_DEVICE_ADDED: 1975 { 1976 Mpi3DevicePage0_t *dev_pg0 = 1977 (Mpi3DevicePage0_t *) fw_event->event_data; 1978 mpi3mr_add_device(sc, dev_pg0->PersistentID); 1979 break; 1980 } 1981 case MPI3_EVENT_DEVICE_INFO_CHANGED: 1982 { 1983 mpi3mr_devinfochg_evt_bh(sc, 1984 (Mpi3DevicePage0_t *) fw_event->event_data); 1985 break; 1986 } 1987 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 1988 { 1989 mpi3mr_devstatuschg_evt_bh(sc, fw_event); 1990 break; 1991 } 1992 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 1993 { 1994 mpi3mr_process_sastopochg_evt(sc, fw_event); 1995 break; 1996 } 1997 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 1998 { 1999 mpi3mr_process_pcietopochg_evt(sc, fw_event); 2000 break; 2001 } 2002 case MPI3_EVENT_LOG_DATA: 2003 { 2004 mpi3mr_logdata_evt_bh(sc, fw_event); 2005 break; 2006 } 2007 default: 2008 mpi3mr_dprint(sc, MPI3MR_TRACE,"Unhandled event 0x%0X\n", 2009 fw_event->event); 2010 break; 2011 2012 } 2013 2014 evt_ack: 2015 if (fw_event->send_ack) { 2016 mpi3mr_dprint(sc, MPI3MR_EVENT,"Process event ACK for event 0x%0X\n", 2017 fw_event->event); 2018 mpi3mr_process_event_ack(sc, fw_event->event, 2019 fw_event->event_context); 2020 } 2021 2022 out: 2023 mpi3mr_dprint(sc, MPI3MR_EVENT, "(%d)->(%s) Event Free: [%x]\n", event_count, 2024 __func__, fw_event->event); 2025 2026 mpi3mr_fw_event_free(sc, fw_event); 2027 } 2028 2029 void 2030 mpi3mr_firmware_event_work(void *arg, int pending) 2031 { 2032 struct mpi3mr_fw_event_work *fw_event; 2033 struct mpi3mr_softc *sc; 2034 2035 sc = (struct mpi3mr_softc *)arg; 2036 2037 mtx_lock(&sc->fwevt_lock); 2038 while ((fw_event = TAILQ_FIRST(&sc->cam_sc->ev_queue)) != NULL) { 2039 TAILQ_REMOVE(&sc->cam_sc->ev_queue, fw_event, ev_link); 2040 mtx_unlock(&sc->fwevt_lock); 2041 mpi3mr_fw_work(sc, fw_event); 2042 mtx_lock(&sc->fwevt_lock); 2043 } 2044 mtx_unlock(&sc->fwevt_lock); 2045 } 2046 2047 2048 /* 2049 * mpi3mr_cam_attach - CAM layer registration 2050 * @sc: Adapter reference 2051 * 2052 * This function does simq allocation, cam registration, xpt_bus registration, 2053 * event taskqueue initialization and async event handler registration. 2054 * 2055 * Return: 0 on success and proper error codes on failure 2056 */ 2057 int 2058 mpi3mr_cam_attach(struct mpi3mr_softc *sc) 2059 { 2060 struct mpi3mr_cam_softc *cam_sc; 2061 cam_status status; 2062 int unit, error = 0, reqs; 2063 2064 mpi3mr_dprint(sc, MPI3MR_XINFO, "Starting CAM Attach\n"); 2065 2066 cam_sc = malloc(sizeof(struct mpi3mr_cam_softc), M_MPI3MR, M_WAITOK|M_ZERO); 2067 if (!cam_sc) { 2068 mpi3mr_dprint(sc, MPI3MR_ERROR, 2069 "Failed to allocate memory for controller CAM instance\n"); 2070 return (ENOMEM); 2071 } 2072 2073 cam_sc->maxtargets = sc->facts.max_perids + 1; 2074 2075 TAILQ_INIT(&cam_sc->tgt_list); 2076 2077 sc->cam_sc = cam_sc; 2078 cam_sc->sc = sc; 2079 2080 reqs = sc->max_host_ios; 2081 2082 if ((cam_sc->devq = cam_simq_alloc(reqs)) == NULL) { 2083 mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate SIMQ\n"); 2084 error = ENOMEM; 2085 goto out; 2086 } 2087 2088 unit = device_get_unit(sc->mpi3mr_dev); 2089 cam_sc->sim = cam_sim_alloc(mpi3mr_cam_action, mpi3mr_cam_poll, "mpi3mr", cam_sc, 2090 unit, &sc->mpi3mr_mtx, reqs, reqs, cam_sc->devq); 2091 if (cam_sc->sim == NULL) { 2092 mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate SIM\n"); 2093 error = EINVAL; 2094 goto out; 2095 } 2096 2097 TAILQ_INIT(&cam_sc->ev_queue); 2098 2099 /* Initialize taskqueue for Event Handling */ 2100 TASK_INIT(&cam_sc->ev_task, 0, mpi3mr_firmware_event_work, sc); 2101 cam_sc->ev_tq = taskqueue_create("mpi3mr_taskq", M_NOWAIT | M_ZERO, 2102 taskqueue_thread_enqueue, &cam_sc->ev_tq); 2103 taskqueue_start_threads(&cam_sc->ev_tq, 1, PRIBIO, "%s taskq", 2104 device_get_nameunit(sc->mpi3mr_dev)); 2105 2106 mtx_lock(&sc->mpi3mr_mtx); 2107 2108 /* 2109 * XXX There should be a bus for every port on the adapter, but since 2110 * we're just going to fake the topology for now, we'll pretend that 2111 * everything is just a target on a single bus. 2112 */ 2113 if ((error = xpt_bus_register(cam_sc->sim, sc->mpi3mr_dev, 0)) != 0) { 2114 mpi3mr_dprint(sc, MPI3MR_ERROR, 2115 "Error 0x%x registering SCSI bus\n", error); 2116 mtx_unlock(&sc->mpi3mr_mtx); 2117 goto out; 2118 } 2119 2120 /* 2121 * Assume that discovery events will start right away. 2122 * 2123 * Hold off boot until discovery is complete. 2124 */ 2125 cam_sc->flags |= MPI3MRSAS_IN_STARTUP | MPI3MRSAS_IN_DISCOVERY; 2126 sc->cam_sc->startup_refcount = 0; 2127 mpi3mr_startup_increment(cam_sc); 2128 2129 callout_init(&cam_sc->discovery_callout, 1 /*mpsafe*/); 2130 2131 /* 2132 * Register for async events so we can determine the EEDP 2133 * capabilities of devices. 2134 */ 2135 status = xpt_create_path(&cam_sc->path, /*periph*/NULL, 2136 cam_sim_path(sc->cam_sc->sim), CAM_TARGET_WILDCARD, 2137 CAM_LUN_WILDCARD); 2138 if (status != CAM_REQ_CMP) { 2139 mpi3mr_dprint(sc, MPI3MR_ERROR, 2140 "Error 0x%x creating sim path\n", status); 2141 cam_sc->path = NULL; 2142 } 2143 2144 if (status != CAM_REQ_CMP) { 2145 /* 2146 * EEDP use is the exception, not the rule. 2147 * Warn the user, but do not fail to attach. 2148 */ 2149 mpi3mr_dprint(sc, MPI3MR_INFO, "EEDP capabilities disabled.\n"); 2150 } 2151 2152 mtx_unlock(&sc->mpi3mr_mtx); 2153 2154 error = mpi3mr_register_events(sc); 2155 2156 out: 2157 mpi3mr_dprint(sc, MPI3MR_XINFO, "%s Exiting CAM attach, error: 0x%x n", __func__, error); 2158 return (error); 2159 } 2160 2161 int 2162 mpi3mr_cam_detach(struct mpi3mr_softc *sc) 2163 { 2164 struct mpi3mr_cam_softc *cam_sc; 2165 struct mpi3mr_target *target; 2166 2167 mpi3mr_dprint(sc, MPI3MR_XINFO, "%s, Starting CAM detach\n", __func__); 2168 if (sc->cam_sc == NULL) 2169 return (0); 2170 2171 cam_sc = sc->cam_sc; 2172 2173 mpi3mr_freeup_events(sc); 2174 2175 /* 2176 * Drain and free the event handling taskqueue with the lock 2177 * unheld so that any parallel processing tasks drain properly 2178 * without deadlocking. 2179 */ 2180 if (cam_sc->ev_tq != NULL) 2181 taskqueue_free(cam_sc->ev_tq); 2182 2183 mtx_lock(&sc->mpi3mr_mtx); 2184 2185 while (cam_sc->startup_refcount != 0) 2186 mpi3mr_startup_decrement(cam_sc); 2187 2188 /* Deregister our async handler */ 2189 if (cam_sc->path != NULL) { 2190 xpt_free_path(cam_sc->path); 2191 cam_sc->path = NULL; 2192 } 2193 2194 if (cam_sc->flags & MPI3MRSAS_IN_STARTUP) 2195 xpt_release_simq(cam_sc->sim, 1); 2196 2197 if (cam_sc->sim != NULL) { 2198 xpt_bus_deregister(cam_sim_path(cam_sc->sim)); 2199 cam_sim_free(cam_sc->sim, FALSE); 2200 } 2201 2202 mtx_unlock(&sc->mpi3mr_mtx); 2203 2204 if (cam_sc->devq != NULL) 2205 cam_simq_free(cam_sc->devq); 2206 2207 get_target: 2208 mtx_lock_spin(&sc->target_lock); 2209 TAILQ_FOREACH(target, &cam_sc->tgt_list, tgt_next) { 2210 TAILQ_REMOVE(&sc->cam_sc->tgt_list, target, tgt_next); 2211 mtx_unlock_spin(&sc->target_lock); 2212 goto out_tgt_free; 2213 } 2214 mtx_unlock_spin(&sc->target_lock); 2215 out_tgt_free: 2216 if (target) { 2217 free(target, M_MPI3MR); 2218 target = NULL; 2219 goto get_target; 2220 } 2221 2222 free(cam_sc, M_MPI3MR); 2223 sc->cam_sc = NULL; 2224 2225 mpi3mr_dprint(sc, MPI3MR_XINFO, "%s, Exiting CAM detach\n", __func__); 2226 return (0); 2227 } 2228