1 /* 2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy 3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy 4 * Support: freebsdraid@avagotech.com 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 2. Redistributions 12 * in binary form must reproduce the above copyright notice, this list of 13 * conditions and the following disclaimer in the documentation and/or other 14 * materials provided with the distribution. 3. Neither the name of the 15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or 16 * promote products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "dev/mrsas/mrsas.h" 37 38 #include <cam/cam.h> 39 #include <cam/cam_ccb.h> 40 #include <cam/cam_sim.h> 41 #include <cam/cam_xpt_sim.h> 42 #include <cam/cam_debug.h> 43 #include <cam/cam_periph.h> 44 #include <cam/cam_xpt_periph.h> 45 46 #include <cam/scsi/scsi_all.h> 47 #include <cam/scsi/scsi_message.h> 48 #include <sys/taskqueue.h> 49 #include <sys/kernel.h> 50 51 #include <sys/time.h> /* XXX for pcpu.h */ 52 #include <sys/pcpu.h> /* XXX for PCPU_GET */ 53 54 #define smp_processor_id() PCPU_GET(cpuid) 55 56 /* 57 * Function prototypes 58 */ 59 int mrsas_cam_attach(struct mrsas_softc *sc); 60 int mrsas_find_io_type(struct cam_sim *sim, union ccb *ccb); 61 int mrsas_bus_scan(struct mrsas_softc *sc); 62 int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim); 63 int 64 mrsas_map_request(struct mrsas_softc *sc, 65 struct mrsas_mpt_cmd *cmd, union ccb *ccb); 66 int 67 mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 68 union ccb *ccb); 69 int 70 mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 71 union ccb *ccb); 72 int 73 mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 74 union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible); 75 int 76 mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 77 union ccb *ccb, u_int32_t device_id, 78 MRSAS_RAID_SCSI_IO_REQUEST * io_request); 79 void mrsas_xpt_freeze(struct mrsas_softc *sc); 80 void mrsas_xpt_release(struct mrsas_softc *sc); 81 void mrsas_cam_detach(struct mrsas_softc *sc); 82 void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd); 83 void mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); 84 void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); 85 void 86 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 87 u_int32_t req_desc_hi); 88 void 89 mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, 90 u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb, 91 MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, 92 u_int32_t ld_block_size); 93 static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim); 94 static void mrsas_cam_poll(struct cam_sim *sim); 95 static void mrsas_action(struct cam_sim *sim, union ccb *ccb); 96 static void mrsas_scsiio_timeout(void *data); 97 static int mrsas_track_scsiio(struct mrsas_softc *sc, target_id_t id, u_int32_t bus_id); 98 static void mrsas_tm_response_code(struct mrsas_softc *sc, 99 MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply); 100 static int mrsas_issue_tm(struct mrsas_softc *sc, 101 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc); 102 static void 103 mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, 104 int nseg, int error); 105 static int32_t 106 mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim, 107 union ccb *ccb); 108 struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc); 109 MRSAS_REQUEST_DESCRIPTOR_UNION * 110 mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index); 111 112 extern int mrsas_reset_targets(struct mrsas_softc *sc); 113 extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); 114 extern u_int32_t 115 MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); 116 extern void mrsas_isr(void *arg); 117 extern void mrsas_aen_handler(struct mrsas_softc *sc); 118 extern u_int8_t 119 MR_BuildRaidContext(struct mrsas_softc *sc, 120 struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context, 121 MR_DRV_RAID_MAP_ALL * map); 122 extern u_int16_t 123 MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, 124 MR_DRV_RAID_MAP_ALL * map); 125 extern u_int16_t 126 mrsas_get_updated_dev_handle(struct mrsas_softc *sc, 127 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info); 128 extern int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex); 129 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 130 extern void mrsas_disable_intr(struct mrsas_softc *sc); 131 extern void mrsas_enable_intr(struct mrsas_softc *sc); 132 void mrsas_prepare_secondRaid1_IO(struct mrsas_softc *sc, 133 struct mrsas_mpt_cmd *cmd); 134 135 /* 136 * mrsas_cam_attach: Main entry to CAM subsystem 137 * input: Adapter instance soft state 138 * 139 * This function is called from mrsas_attach() during initialization to perform 140 * SIM allocations and XPT bus registration. If the kernel version is 7.4 or 141 * earlier, it would also initiate a bus scan. 142 */ 143 int 144 mrsas_cam_attach(struct mrsas_softc *sc) 145 { 146 struct cam_devq *devq; 147 int mrsas_cam_depth; 148 149 mrsas_cam_depth = sc->max_scsi_cmds; 150 151 if ((devq = cam_simq_alloc(mrsas_cam_depth)) == NULL) { 152 device_printf(sc->mrsas_dev, "Cannot allocate SIM queue\n"); 153 return (ENOMEM); 154 } 155 /* 156 * Create SIM for bus 0 and register, also create path 157 */ 158 sc->sim_0 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc, 159 device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth, 160 mrsas_cam_depth, devq); 161 if (sc->sim_0 == NULL) { 162 cam_simq_free(devq); 163 device_printf(sc->mrsas_dev, "Cannot register SIM\n"); 164 return (ENXIO); 165 } 166 /* Initialize taskqueue for Event Handling */ 167 TASK_INIT(&sc->ev_task, 0, (void *)mrsas_aen_handler, sc); 168 sc->ev_tq = taskqueue_create("mrsas_taskq", M_NOWAIT | M_ZERO, 169 taskqueue_thread_enqueue, &sc->ev_tq); 170 171 /* Run the task queue with lowest priority */ 172 taskqueue_start_threads(&sc->ev_tq, 1, 255, "%s taskq", 173 device_get_nameunit(sc->mrsas_dev)); 174 mtx_lock(&sc->sim_lock); 175 if (xpt_bus_register(sc->sim_0, sc->mrsas_dev, 0) != CAM_SUCCESS) { 176 cam_sim_free(sc->sim_0, TRUE); /* passing true frees the devq */ 177 mtx_unlock(&sc->sim_lock); 178 return (ENXIO); 179 } 180 if (xpt_create_path(&sc->path_0, NULL, cam_sim_path(sc->sim_0), 181 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 182 xpt_bus_deregister(cam_sim_path(sc->sim_0)); 183 cam_sim_free(sc->sim_0, TRUE); /* passing true will free the 184 * devq */ 185 mtx_unlock(&sc->sim_lock); 186 return (ENXIO); 187 } 188 mtx_unlock(&sc->sim_lock); 189 190 /* 191 * Create SIM for bus 1 and register, also create path 192 */ 193 sc->sim_1 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc, 194 device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth, 195 mrsas_cam_depth, devq); 196 if (sc->sim_1 == NULL) { 197 cam_simq_free(devq); 198 device_printf(sc->mrsas_dev, "Cannot register SIM\n"); 199 return (ENXIO); 200 } 201 mtx_lock(&sc->sim_lock); 202 if (xpt_bus_register(sc->sim_1, sc->mrsas_dev, 1) != CAM_SUCCESS) { 203 cam_sim_free(sc->sim_1, TRUE); /* passing true frees the devq */ 204 mtx_unlock(&sc->sim_lock); 205 return (ENXIO); 206 } 207 if (xpt_create_path(&sc->path_1, NULL, cam_sim_path(sc->sim_1), 208 CAM_TARGET_WILDCARD, 209 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 210 xpt_bus_deregister(cam_sim_path(sc->sim_1)); 211 cam_sim_free(sc->sim_1, TRUE); 212 mtx_unlock(&sc->sim_lock); 213 return (ENXIO); 214 } 215 mtx_unlock(&sc->sim_lock); 216 217 #if (__FreeBSD_version <= 704000) 218 if (mrsas_bus_scan(sc)) { 219 device_printf(sc->mrsas_dev, "Error in bus scan.\n"); 220 return (1); 221 } 222 #endif 223 return (0); 224 } 225 226 /* 227 * mrsas_cam_detach: De-allocates and teardown CAM 228 * input: Adapter instance soft state 229 * 230 * De-registers and frees the paths and SIMs. 231 */ 232 void 233 mrsas_cam_detach(struct mrsas_softc *sc) 234 { 235 if (sc->ev_tq != NULL) 236 taskqueue_free(sc->ev_tq); 237 mtx_lock(&sc->sim_lock); 238 if (sc->path_0) 239 xpt_free_path(sc->path_0); 240 if (sc->sim_0) { 241 xpt_bus_deregister(cam_sim_path(sc->sim_0)); 242 cam_sim_free(sc->sim_0, FALSE); 243 } 244 if (sc->path_1) 245 xpt_free_path(sc->path_1); 246 if (sc->sim_1) { 247 xpt_bus_deregister(cam_sim_path(sc->sim_1)); 248 cam_sim_free(sc->sim_1, TRUE); 249 } 250 mtx_unlock(&sc->sim_lock); 251 } 252 253 /* 254 * mrsas_action: SIM callback entry point 255 * input: pointer to SIM pointer to CAM Control Block 256 * 257 * This function processes CAM subsystem requests. The type of request is stored 258 * in ccb->ccb_h.func_code. The preprocessor #ifdef is necessary because 259 * ccb->cpi.maxio is not supported for FreeBSD version 7.4 or earlier. 260 */ 261 static void 262 mrsas_action(struct cam_sim *sim, union ccb *ccb) 263 { 264 struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim); 265 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 266 u_int32_t device_id; 267 268 /* 269 * Check if the system going down 270 * or the adapter is in unrecoverable critical error 271 */ 272 if (sc->remove_in_progress || 273 (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) { 274 ccb->ccb_h.status |= CAM_DEV_NOT_THERE; 275 xpt_done(ccb); 276 return; 277 } 278 279 switch (ccb->ccb_h.func_code) { 280 case XPT_SCSI_IO: 281 { 282 device_id = ccb_h->target_id; 283 284 /* 285 * bus 0 is LD, bus 1 is for system-PD 286 */ 287 if (cam_sim_bus(sim) == 1 && 288 sc->pd_list[device_id].driveState != MR_PD_STATE_SYSTEM) { 289 ccb->ccb_h.status |= CAM_DEV_NOT_THERE; 290 xpt_done(ccb); 291 } else { 292 if (mrsas_startio(sc, sim, ccb)) { 293 ccb->ccb_h.status |= CAM_REQ_INVALID; 294 xpt_done(ccb); 295 } 296 } 297 break; 298 } 299 case XPT_ABORT: 300 { 301 ccb->ccb_h.status = CAM_UA_ABORT; 302 xpt_done(ccb); 303 break; 304 } 305 case XPT_RESET_BUS: 306 { 307 xpt_done(ccb); 308 break; 309 } 310 case XPT_GET_TRAN_SETTINGS: 311 { 312 ccb->cts.protocol = PROTO_SCSI; 313 ccb->cts.protocol_version = SCSI_REV_2; 314 ccb->cts.transport = XPORT_SPI; 315 ccb->cts.transport_version = 2; 316 ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC; 317 ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB; 318 ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; 319 ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; 320 ccb->ccb_h.status = CAM_REQ_CMP; 321 xpt_done(ccb); 322 break; 323 } 324 case XPT_SET_TRAN_SETTINGS: 325 { 326 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 327 xpt_done(ccb); 328 break; 329 } 330 case XPT_CALC_GEOMETRY: 331 { 332 cam_calc_geometry(&ccb->ccg, 1); 333 xpt_done(ccb); 334 break; 335 } 336 case XPT_PATH_INQ: 337 { 338 ccb->cpi.version_num = 1; 339 ccb->cpi.hba_inquiry = 0; 340 ccb->cpi.target_sprt = 0; 341 #if (__FreeBSD_version >= 902001) 342 ccb->cpi.hba_misc = PIM_UNMAPPED; 343 #else 344 ccb->cpi.hba_misc = 0; 345 #endif 346 ccb->cpi.hba_eng_cnt = 0; 347 ccb->cpi.max_lun = MRSAS_SCSI_MAX_LUNS; 348 ccb->cpi.unit_number = cam_sim_unit(sim); 349 ccb->cpi.bus_id = cam_sim_bus(sim); 350 ccb->cpi.initiator_id = MRSAS_SCSI_INITIATOR_ID; 351 ccb->cpi.base_transfer_speed = 150000; 352 strlcpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN); 353 strlcpy(ccb->cpi.hba_vid, "AVAGO", HBA_IDLEN); 354 strlcpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN); 355 ccb->cpi.transport = XPORT_SPI; 356 ccb->cpi.transport_version = 2; 357 ccb->cpi.protocol = PROTO_SCSI; 358 ccb->cpi.protocol_version = SCSI_REV_2; 359 if (ccb->cpi.bus_id == 0) 360 ccb->cpi.max_target = MRSAS_MAX_PD - 1; 361 else 362 ccb->cpi.max_target = MRSAS_MAX_LD_IDS - 1; 363 #if (__FreeBSD_version > 704000) 364 ccb->cpi.maxio = sc->max_num_sge * MRSAS_PAGE_SIZE; 365 #endif 366 ccb->ccb_h.status = CAM_REQ_CMP; 367 xpt_done(ccb); 368 break; 369 } 370 default: 371 { 372 ccb->ccb_h.status = CAM_REQ_INVALID; 373 xpt_done(ccb); 374 break; 375 } 376 } 377 } 378 379 /* 380 * mrsas_scsiio_timeout: Callback function for IO timed out 381 * input: mpt command context 382 * 383 * This function will execute after timeout value provided by ccb header from 384 * CAM layer, if timer expires. Driver will run timer for all DCDM and LDIO 385 * coming from CAM layer. This function is callback function for IO timeout 386 * and it runs in no-sleep context. Set do_timedout_reset in Adapter context 387 * so that it will execute OCR/Kill adpter from ocr_thread context. 388 */ 389 static void 390 mrsas_scsiio_timeout(void *data) 391 { 392 struct mrsas_mpt_cmd *cmd; 393 struct mrsas_softc *sc; 394 u_int32_t target_id; 395 396 if (!data) 397 return; 398 399 cmd = (struct mrsas_mpt_cmd *)data; 400 sc = cmd->sc; 401 402 if (cmd->ccb_ptr == NULL) { 403 printf("command timeout with NULL ccb\n"); 404 return; 405 } 406 407 /* 408 * Below callout is dummy entry so that it will be cancelled from 409 * mrsas_cmd_done(). Now Controller will go to OCR/Kill Adapter based 410 * on OCR enable/disable property of Controller from ocr_thread 411 * context. 412 */ 413 #if (__FreeBSD_version >= 1000510) 414 callout_reset_sbt(&cmd->cm_callout, SBT_1S * 180, 0, 415 mrsas_scsiio_timeout, cmd, 0); 416 #else 417 callout_reset(&cmd->cm_callout, (180000 * hz) / 1000, 418 mrsas_scsiio_timeout, cmd); 419 #endif 420 421 if (cmd->ccb_ptr->cpi.bus_id == 0) 422 target_id = cmd->ccb_ptr->ccb_h.target_id; 423 else 424 target_id = (cmd->ccb_ptr->ccb_h.target_id + (MRSAS_MAX_PD - 1)); 425 426 /* Save the cmd to be processed for TM, if it is not there in the array */ 427 if (sc->target_reset_pool[target_id] == NULL) { 428 sc->target_reset_pool[target_id] = cmd; 429 mrsas_atomic_inc(&sc->target_reset_outstanding); 430 } 431 432 return; 433 } 434 435 /* 436 * mrsas_startio: SCSI IO entry point 437 * input: Adapter instance soft state 438 * pointer to CAM Control Block 439 * 440 * This function is the SCSI IO entry point and it initiates IO processing. It 441 * copies the IO and depending if the IO is read/write or inquiry, it would 442 * call mrsas_build_ldio() or mrsas_build_dcdb(), respectively. It returns 0 443 * if the command is sent to firmware successfully, otherwise it returns 1. 444 */ 445 static int32_t 446 mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim, 447 union ccb *ccb) 448 { 449 struct mrsas_mpt_cmd *cmd, *r1_cmd = NULL; 450 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 451 struct ccb_scsiio *csio = &(ccb->csio); 452 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 453 u_int8_t cmd_type; 454 455 if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE && 456 (!sc->fw_sync_cache_support)) { 457 ccb->ccb_h.status = CAM_REQ_CMP; 458 xpt_done(ccb); 459 return (0); 460 } 461 ccb_h->status |= CAM_SIM_QUEUED; 462 cmd = mrsas_get_mpt_cmd(sc); 463 464 if (!cmd) { 465 ccb_h->status |= CAM_REQUEUE_REQ; 466 xpt_done(ccb); 467 return (0); 468 } 469 470 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 471 if (ccb_h->flags & CAM_DIR_IN) 472 cmd->flags |= MRSAS_DIR_IN; 473 if (ccb_h->flags & CAM_DIR_OUT) 474 cmd->flags |= MRSAS_DIR_OUT; 475 } else 476 cmd->flags = MRSAS_DIR_NONE; /* no data */ 477 478 /* For FreeBSD 9.2 and higher */ 479 #if (__FreeBSD_version >= 902001) 480 /* 481 * XXX We don't yet support physical addresses here. 482 */ 483 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { 484 case CAM_DATA_PADDR: 485 case CAM_DATA_SG_PADDR: 486 device_printf(sc->mrsas_dev, "%s: physical addresses not supported\n", 487 __func__); 488 mrsas_release_mpt_cmd(cmd); 489 ccb_h->status = CAM_REQ_INVALID; 490 ccb_h->status &= ~CAM_SIM_QUEUED; 491 goto done; 492 case CAM_DATA_SG: 493 device_printf(sc->mrsas_dev, "%s: scatter gather is not supported\n", 494 __func__); 495 mrsas_release_mpt_cmd(cmd); 496 ccb_h->status = CAM_REQ_INVALID; 497 goto done; 498 case CAM_DATA_VADDR: 499 if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) { 500 mrsas_release_mpt_cmd(cmd); 501 ccb_h->status = CAM_REQ_TOO_BIG; 502 goto done; 503 } 504 cmd->length = csio->dxfer_len; 505 if (cmd->length) 506 cmd->data = csio->data_ptr; 507 break; 508 case CAM_DATA_BIO: 509 if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) { 510 mrsas_release_mpt_cmd(cmd); 511 ccb_h->status = CAM_REQ_TOO_BIG; 512 goto done; 513 } 514 cmd->length = csio->dxfer_len; 515 if (cmd->length) 516 cmd->data = csio->data_ptr; 517 break; 518 default: 519 ccb->ccb_h.status = CAM_REQ_INVALID; 520 goto done; 521 } 522 #else 523 if (!(ccb_h->flags & CAM_DATA_PHYS)) { /* Virtual data address */ 524 if (!(ccb_h->flags & CAM_SCATTER_VALID)) { 525 if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) { 526 mrsas_release_mpt_cmd(cmd); 527 ccb_h->status = CAM_REQ_TOO_BIG; 528 goto done; 529 } 530 cmd->length = csio->dxfer_len; 531 if (cmd->length) 532 cmd->data = csio->data_ptr; 533 } else { 534 mrsas_release_mpt_cmd(cmd); 535 ccb_h->status = CAM_REQ_INVALID; 536 goto done; 537 } 538 } else { /* Data addresses are physical. */ 539 mrsas_release_mpt_cmd(cmd); 540 ccb_h->status = CAM_REQ_INVALID; 541 ccb_h->status &= ~CAM_SIM_QUEUED; 542 goto done; 543 } 544 #endif 545 /* save ccb ptr */ 546 cmd->ccb_ptr = ccb; 547 548 req_desc = mrsas_get_request_desc(sc, (cmd->index) - 1); 549 if (!req_desc) { 550 device_printf(sc->mrsas_dev, "Cannot get request_descriptor.\n"); 551 return (FAIL); 552 } 553 memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION)); 554 cmd->request_desc = req_desc; 555 556 if (ccb_h->flags & CAM_CDB_POINTER) 557 bcopy(csio->cdb_io.cdb_ptr, cmd->io_request->CDB.CDB32, csio->cdb_len); 558 else 559 bcopy(csio->cdb_io.cdb_bytes, cmd->io_request->CDB.CDB32, csio->cdb_len); 560 mtx_lock(&sc->raidmap_lock); 561 562 /* Check for IO type READ-WRITE targeted for Logical Volume */ 563 cmd_type = mrsas_find_io_type(sim, ccb); 564 switch (cmd_type) { 565 case READ_WRITE_LDIO: 566 /* Build READ-WRITE IO for Logical Volume */ 567 if (mrsas_build_ldio_rw(sc, cmd, ccb)) { 568 device_printf(sc->mrsas_dev, "Build RW LDIO failed.\n"); 569 mtx_unlock(&sc->raidmap_lock); 570 mrsas_release_mpt_cmd(cmd); 571 return (1); 572 } 573 break; 574 case NON_READ_WRITE_LDIO: 575 /* Build NON READ-WRITE IO for Logical Volume */ 576 if (mrsas_build_ldio_nonrw(sc, cmd, ccb)) { 577 device_printf(sc->mrsas_dev, "Build NON-RW LDIO failed.\n"); 578 mtx_unlock(&sc->raidmap_lock); 579 mrsas_release_mpt_cmd(cmd); 580 return (1); 581 } 582 break; 583 case READ_WRITE_SYSPDIO: 584 case NON_READ_WRITE_SYSPDIO: 585 if (sc->secure_jbod_support && 586 (cmd_type == NON_READ_WRITE_SYSPDIO)) { 587 /* Build NON-RW IO for JBOD */ 588 if (mrsas_build_syspdio(sc, cmd, ccb, sim, 0)) { 589 device_printf(sc->mrsas_dev, 590 "Build SYSPDIO failed.\n"); 591 mtx_unlock(&sc->raidmap_lock); 592 mrsas_release_mpt_cmd(cmd); 593 return (1); 594 } 595 } else { 596 /* Build RW IO for JBOD */ 597 if (mrsas_build_syspdio(sc, cmd, ccb, sim, 1)) { 598 device_printf(sc->mrsas_dev, 599 "Build SYSPDIO failed.\n"); 600 mtx_unlock(&sc->raidmap_lock); 601 mrsas_release_mpt_cmd(cmd); 602 return (1); 603 } 604 } 605 } 606 mtx_unlock(&sc->raidmap_lock); 607 608 if (cmd->flags == MRSAS_DIR_IN) /* from device */ 609 cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_READ; 610 else if (cmd->flags == MRSAS_DIR_OUT) /* to device */ 611 cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE; 612 613 cmd->io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING; 614 cmd->io_request->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; 615 cmd->io_request->SenseBufferLowAddress = cmd->sense_phys_addr; 616 cmd->io_request->SenseBufferLength = MRSAS_SCSI_SENSE_BUFFERSIZE; 617 618 req_desc = cmd->request_desc; 619 req_desc->SCSIIO.SMID = cmd->index; 620 621 /* 622 * Start timer for IO timeout. Default timeout value is 90 second. 623 */ 624 cmd->callout_owner = true; 625 #if (__FreeBSD_version >= 1000510) 626 callout_reset_sbt(&cmd->cm_callout, SBT_1S * 180, 0, 627 mrsas_scsiio_timeout, cmd, 0); 628 #else 629 callout_reset(&cmd->cm_callout, (180000 * hz) / 1000, 630 mrsas_scsiio_timeout, cmd); 631 #endif 632 633 if (mrsas_atomic_inc_return(&sc->fw_outstanding) > sc->io_cmds_highwater) 634 sc->io_cmds_highwater++; 635 636 /* 637 * if it is raid 1/10 fp write capable. 638 * try to get second command from pool and construct it. 639 * From FW, it has confirmed that lba values of two PDs corresponds to 640 * single R1/10 LD are always same 641 * 642 */ 643 /* 644 * driver side count always should be less than max_fw_cmds to get 645 * new command 646 */ 647 if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) { 648 mrsas_atomic_inc(&sc->fw_outstanding); 649 mrsas_prepare_secondRaid1_IO(sc, cmd); 650 mrsas_fire_cmd(sc, req_desc->addr.u.low, 651 req_desc->addr.u.high); 652 r1_cmd = cmd->peer_cmd; 653 mrsas_fire_cmd(sc, r1_cmd->request_desc->addr.u.low, 654 r1_cmd->request_desc->addr.u.high); 655 } else { 656 mrsas_fire_cmd(sc, req_desc->addr.u.low, 657 req_desc->addr.u.high); 658 } 659 660 return (0); 661 662 done: 663 xpt_done(ccb); 664 return (0); 665 } 666 667 /* 668 * mrsas_find_io_type: Determines if IO is read/write or inquiry 669 * input: pointer to CAM Control Block 670 * 671 * This function determines if the IO is read/write or inquiry. It returns a 1 672 * if the IO is read/write and 0 if it is inquiry. 673 */ 674 int 675 mrsas_find_io_type(struct cam_sim *sim, union ccb *ccb) 676 { 677 struct ccb_scsiio *csio = &(ccb->csio); 678 679 switch (csio->cdb_io.cdb_bytes[0]) { 680 case READ_10: 681 case WRITE_10: 682 case READ_12: 683 case WRITE_12: 684 case READ_6: 685 case WRITE_6: 686 case READ_16: 687 case WRITE_16: 688 return (cam_sim_bus(sim) ? 689 READ_WRITE_SYSPDIO : READ_WRITE_LDIO); 690 default: 691 return (cam_sim_bus(sim) ? 692 NON_READ_WRITE_SYSPDIO : NON_READ_WRITE_LDIO); 693 } 694 } 695 696 /* 697 * mrsas_get_mpt_cmd: Get a cmd from free command pool 698 * input: Adapter instance soft state 699 * 700 * This function removes an MPT command from the command free list and 701 * initializes it. 702 */ 703 struct mrsas_mpt_cmd * 704 mrsas_get_mpt_cmd(struct mrsas_softc *sc) 705 { 706 struct mrsas_mpt_cmd *cmd = NULL; 707 708 mtx_lock(&sc->mpt_cmd_pool_lock); 709 if (!TAILQ_EMPTY(&sc->mrsas_mpt_cmd_list_head)) { 710 cmd = TAILQ_FIRST(&sc->mrsas_mpt_cmd_list_head); 711 TAILQ_REMOVE(&sc->mrsas_mpt_cmd_list_head, cmd, next); 712 } else { 713 goto out; 714 } 715 716 memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE); 717 cmd->data = NULL; 718 cmd->length = 0; 719 cmd->flags = 0; 720 cmd->error_code = 0; 721 cmd->load_balance = 0; 722 cmd->ccb_ptr = NULL; 723 out: 724 mtx_unlock(&sc->mpt_cmd_pool_lock); 725 return cmd; 726 } 727 728 /* 729 * mrsas_release_mpt_cmd: Return a cmd to free command pool 730 * input: Command packet for return to free command pool 731 * 732 * This function returns an MPT command to the free command list. 733 */ 734 void 735 mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd) 736 { 737 struct mrsas_softc *sc = cmd->sc; 738 739 mtx_lock(&sc->mpt_cmd_pool_lock); 740 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 741 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; 742 cmd->peer_cmd = NULL; 743 cmd->cmd_completed = 0; 744 memset((uint8_t *)cmd->io_request, 0, 745 sizeof(MRSAS_RAID_SCSI_IO_REQUEST)); 746 TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd, next); 747 mtx_unlock(&sc->mpt_cmd_pool_lock); 748 749 return; 750 } 751 752 /* 753 * mrsas_get_request_desc: Get request descriptor from array 754 * input: Adapter instance soft state 755 * SMID index 756 * 757 * This function returns a pointer to the request descriptor. 758 */ 759 MRSAS_REQUEST_DESCRIPTOR_UNION * 760 mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index) 761 { 762 u_int8_t *p; 763 764 KASSERT(index < sc->max_fw_cmds, ("req_desc is out of range")); 765 p = sc->req_desc + sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * index; 766 767 return (MRSAS_REQUEST_DESCRIPTOR_UNION *) p; 768 } 769 770 771 772 773 /* mrsas_prepare_secondRaid1_IO 774 * It prepares the raid 1 second IO 775 */ 776 void 777 mrsas_prepare_secondRaid1_IO(struct mrsas_softc *sc, 778 struct mrsas_mpt_cmd *cmd) 779 { 780 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL; 781 struct mrsas_mpt_cmd *r1_cmd; 782 783 r1_cmd = cmd->peer_cmd; 784 req_desc = cmd->request_desc; 785 786 /* 787 * copy the io request frame as well as 8 SGEs data for r1 788 * command 789 */ 790 memcpy(r1_cmd->io_request, cmd->io_request, 791 (sizeof(MRSAS_RAID_SCSI_IO_REQUEST))); 792 memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL, 793 (sc->max_sge_in_main_msg * sizeof(MPI2_SGE_IO_UNION))); 794 795 /* sense buffer is different for r1 command */ 796 r1_cmd->io_request->SenseBufferLowAddress = r1_cmd->sense_phys_addr; 797 r1_cmd->ccb_ptr = cmd->ccb_ptr; 798 799 req_desc2 = mrsas_get_request_desc(sc, r1_cmd->index - 1); 800 req_desc2->addr.Words = 0; 801 r1_cmd->request_desc = req_desc2; 802 req_desc2->SCSIIO.SMID = r1_cmd->index; 803 req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags; 804 r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle; 805 r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle; 806 r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle; 807 cmd->io_request->RaidContext.raid_context_g35.smid.peerSMID = 808 r1_cmd->index; 809 r1_cmd->io_request->RaidContext.raid_context_g35.smid.peerSMID = 810 cmd->index; 811 /* 812 * MSIxIndex of both commands request descriptors 813 * should be same 814 */ 815 r1_cmd->request_desc->SCSIIO.MSIxIndex = cmd->request_desc->SCSIIO.MSIxIndex; 816 /* span arm is different for r1 cmd */ 817 r1_cmd->io_request->RaidContext.raid_context_g35.spanArm = 818 cmd->io_request->RaidContext.raid_context_g35.spanArm + 1; 819 820 } 821 822 823 /* 824 * mrsas_build_ldio_rw: Builds an LDIO command 825 * input: Adapter instance soft state 826 * Pointer to command packet 827 * Pointer to CCB 828 * 829 * This function builds the LDIO command packet. It returns 0 if the command is 830 * built successfully, otherwise it returns a 1. 831 */ 832 int 833 mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 834 union ccb *ccb) 835 { 836 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 837 struct ccb_scsiio *csio = &(ccb->csio); 838 u_int32_t device_id; 839 MRSAS_RAID_SCSI_IO_REQUEST *io_request; 840 841 device_id = ccb_h->target_id; 842 843 io_request = cmd->io_request; 844 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; 845 io_request->RaidContext.raid_context.status = 0; 846 io_request->RaidContext.raid_context.exStatus = 0; 847 848 /* just the cdb len, other flags zero, and ORed-in later for FP */ 849 io_request->IoFlags = csio->cdb_len; 850 851 if (mrsas_setup_io(sc, cmd, ccb, device_id, io_request) != SUCCESS) 852 device_printf(sc->mrsas_dev, "Build ldio or fpio error\n"); 853 854 io_request->DataLength = cmd->length; 855 856 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) { 857 if (cmd->sge_count > sc->max_num_sge) { 858 device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds" 859 "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge); 860 return (FAIL); 861 } 862 if (sc->is_ventura) 863 io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count; 864 else { 865 /* 866 * numSGE store lower 8 bit of sge_count. numSGEExt store 867 * higher 8 bit of sge_count 868 */ 869 io_request->RaidContext.raid_context.numSGE = cmd->sge_count; 870 io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8); 871 } 872 873 } else { 874 device_printf(sc->mrsas_dev, "Data map/load failed.\n"); 875 return (FAIL); 876 } 877 return (0); 878 } 879 880 /* stream detection on read and and write IOs */ 881 static void 882 mrsas_stream_detect(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 883 struct IO_REQUEST_INFO *io_info) 884 { 885 u_int32_t device_id = io_info->ldTgtId; 886 LD_STREAM_DETECT *current_ld_SD = sc->streamDetectByLD[device_id]; 887 u_int32_t *track_stream = ¤t_ld_SD->mruBitMap; 888 u_int32_t streamNum, shiftedValues, unshiftedValues; 889 u_int32_t indexValueMask, shiftedValuesMask; 890 int i; 891 boolean_t isReadAhead = false; 892 STREAM_DETECT *current_SD; 893 894 /* find possible stream */ 895 for (i = 0; i < MAX_STREAMS_TRACKED; ++i) { 896 streamNum = (*track_stream >> (i * BITS_PER_INDEX_STREAM)) & 897 STREAM_MASK; 898 current_SD = ¤t_ld_SD->streamTrack[streamNum]; 899 /* 900 * if we found a stream, update the raid context and 901 * also update the mruBitMap 902 */ 903 if (current_SD->nextSeqLBA && 904 io_info->ldStartBlock >= current_SD->nextSeqLBA && 905 (io_info->ldStartBlock <= (current_SD->nextSeqLBA+32)) && 906 (current_SD->isRead == io_info->isRead)) { 907 if (io_info->ldStartBlock != current_SD->nextSeqLBA && 908 (!io_info->isRead || !isReadAhead)) { 909 /* 910 * Once the API availible we need to change this. 911 * At this point we are not allowing any gap 912 */ 913 continue; 914 } 915 cmd->io_request->RaidContext.raid_context_g35.streamDetected = TRUE; 916 current_SD->nextSeqLBA = io_info->ldStartBlock + io_info->numBlocks; 917 /* 918 * update the mruBitMap LRU 919 */ 920 shiftedValuesMask = (1 << i * BITS_PER_INDEX_STREAM) - 1 ; 921 shiftedValues = ((*track_stream & shiftedValuesMask) << 922 BITS_PER_INDEX_STREAM); 923 indexValueMask = STREAM_MASK << i * BITS_PER_INDEX_STREAM; 924 unshiftedValues = (*track_stream) & 925 (~(shiftedValuesMask | indexValueMask)); 926 *track_stream = 927 (unshiftedValues | shiftedValues | streamNum); 928 return; 929 } 930 } 931 /* 932 * if we did not find any stream, create a new one from the least recently used 933 */ 934 streamNum = (*track_stream >> 935 ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) & STREAM_MASK; 936 current_SD = ¤t_ld_SD->streamTrack[streamNum]; 937 current_SD->isRead = io_info->isRead; 938 current_SD->nextSeqLBA = io_info->ldStartBlock + io_info->numBlocks; 939 *track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | streamNum); 940 return; 941 } 942 943 944 /* 945 * mrsas_setup_io: Set up data including Fast Path I/O 946 * input: Adapter instance soft state 947 * Pointer to command packet 948 * Pointer to CCB 949 * 950 * This function builds the DCDB inquiry command. It returns 0 if the command 951 * is built successfully, otherwise it returns a 1. 952 */ 953 int 954 mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 955 union ccb *ccb, u_int32_t device_id, 956 MRSAS_RAID_SCSI_IO_REQUEST * io_request) 957 { 958 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 959 struct ccb_scsiio *csio = &(ccb->csio); 960 struct IO_REQUEST_INFO io_info; 961 MR_DRV_RAID_MAP_ALL *map_ptr; 962 struct mrsas_mpt_cmd *r1_cmd = NULL; 963 964 MR_LD_RAID *raid; 965 u_int8_t fp_possible; 966 u_int32_t start_lba_hi, start_lba_lo, ld_block_size, ld; 967 u_int32_t datalength = 0; 968 969 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; 970 971 start_lba_lo = 0; 972 start_lba_hi = 0; 973 fp_possible = 0; 974 975 /* 976 * READ_6 (0x08) or WRITE_6 (0x0A) cdb 977 */ 978 if (csio->cdb_len == 6) { 979 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[4]; 980 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[1] << 16) | 981 ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 8) | 982 (u_int32_t)csio->cdb_io.cdb_bytes[3]; 983 start_lba_lo &= 0x1FFFFF; 984 } 985 /* 986 * READ_10 (0x28) or WRITE_6 (0x2A) cdb 987 */ 988 else if (csio->cdb_len == 10) { 989 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[8] | 990 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 8); 991 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) | 992 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) | 993 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 | 994 ((u_int32_t)csio->cdb_io.cdb_bytes[5]); 995 } 996 /* 997 * READ_12 (0xA8) or WRITE_12 (0xAA) cdb 998 */ 999 else if (csio->cdb_len == 12) { 1000 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[6] << 24 | 1001 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) | 1002 ((u_int32_t)csio->cdb_io.cdb_bytes[8] << 8) | 1003 ((u_int32_t)csio->cdb_io.cdb_bytes[9]); 1004 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) | 1005 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) | 1006 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 | 1007 ((u_int32_t)csio->cdb_io.cdb_bytes[5]); 1008 } 1009 /* 1010 * READ_16 (0x88) or WRITE_16 (0xx8A) cdb 1011 */ 1012 else if (csio->cdb_len == 16) { 1013 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[10] << 24 | 1014 ((u_int32_t)csio->cdb_io.cdb_bytes[11] << 16) | 1015 ((u_int32_t)csio->cdb_io.cdb_bytes[12] << 8) | 1016 ((u_int32_t)csio->cdb_io.cdb_bytes[13]); 1017 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[6] << 24) | 1018 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) | 1019 (u_int32_t)csio->cdb_io.cdb_bytes[8] << 8 | 1020 ((u_int32_t)csio->cdb_io.cdb_bytes[9]); 1021 start_lba_hi = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) | 1022 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) | 1023 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 | 1024 ((u_int32_t)csio->cdb_io.cdb_bytes[5]); 1025 } 1026 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); 1027 io_info.ldStartBlock = ((u_int64_t)start_lba_hi << 32) | start_lba_lo; 1028 io_info.numBlocks = datalength; 1029 io_info.ldTgtId = device_id; 1030 io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 1031 1032 io_request->DataLength = cmd->length; 1033 1034 switch (ccb_h->flags & CAM_DIR_MASK) { 1035 case CAM_DIR_IN: 1036 io_info.isRead = 1; 1037 break; 1038 case CAM_DIR_OUT: 1039 io_info.isRead = 0; 1040 break; 1041 case CAM_DIR_NONE: 1042 default: 1043 mrsas_dprint(sc, MRSAS_TRACE, "From %s : DMA Flag is %d \n", __func__, ccb_h->flags & CAM_DIR_MASK); 1044 break; 1045 } 1046 1047 map_ptr = sc->ld_drv_map[(sc->map_id & 1)]; 1048 ld_block_size = MR_LdBlockSizeGet(device_id, map_ptr); 1049 1050 ld = MR_TargetIdToLdGet(device_id, map_ptr); 1051 if ((ld >= MAX_LOGICAL_DRIVES_EXT) || (!sc->fast_path_io)) { 1052 io_request->RaidContext.raid_context.regLockFlags = 0; 1053 fp_possible = 0; 1054 } else { 1055 if (MR_BuildRaidContext(sc, &io_info, &io_request->RaidContext.raid_context, map_ptr)) 1056 fp_possible = io_info.fpOkForIo; 1057 } 1058 1059 raid = MR_LdRaidGet(ld, map_ptr); 1060 /* Store the TM capability value in cmd */ 1061 cmd->tmCapable = raid->capability.tmCapable; 1062 1063 cmd->request_desc->SCSIIO.MSIxIndex = 1064 sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0; 1065 1066 if (sc->is_ventura) { 1067 if (sc->streamDetectByLD) { 1068 mtx_lock(&sc->stream_lock); 1069 mrsas_stream_detect(sc, cmd, &io_info); 1070 mtx_unlock(&sc->stream_lock); 1071 /* In ventura if stream detected for a read and 1072 * it is read ahead capable make this IO as LDIO */ 1073 if (io_request->RaidContext.raid_context_g35.streamDetected && 1074 io_info.isRead && io_info.raCapable) 1075 fp_possible = FALSE; 1076 } 1077 1078 /* Set raid 1/10 fast path write capable bit in io_info. 1079 * Note - reset peer_cmd and r1_alt_dev_handle if fp_possible 1080 * disabled after this point. Try not to add more check for 1081 * fp_possible toggle after this. 1082 */ 1083 if (fp_possible && 1084 (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) && 1085 (raid->level == 1) && !io_info.isRead) { 1086 r1_cmd = mrsas_get_mpt_cmd(sc); 1087 if (!r1_cmd) { 1088 fp_possible = FALSE; 1089 printf("Avago debug fp disable from %s %d \n", 1090 __func__, __LINE__); 1091 } else { 1092 cmd->peer_cmd = r1_cmd; 1093 r1_cmd->peer_cmd = cmd; 1094 } 1095 } 1096 } 1097 1098 if (fp_possible) { 1099 mrsas_set_pd_lba(io_request, csio->cdb_len, &io_info, ccb, map_ptr, 1100 start_lba_lo, ld_block_size); 1101 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1102 cmd->request_desc->SCSIIO.RequestFlags = 1103 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << 1104 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1105 if (sc->mrsas_gen3_ctrl) { 1106 if (io_request->RaidContext.raid_context.regLockFlags == REGION_TYPE_UNUSED) 1107 cmd->request_desc->SCSIIO.RequestFlags = 1108 (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK << 1109 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1110 io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA; 1111 io_request->RaidContext.raid_context.nseg = 0x1; 1112 io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; 1113 io_request->RaidContext.raid_context.regLockFlags |= 1114 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | 1115 MR_RL_FLAGS_SEQ_NUM_ENABLE); 1116 } else if (sc->is_ventura) { 1117 io_request->RaidContext.raid_context_g35.Type = MPI2_TYPE_CUDA; 1118 io_request->RaidContext.raid_context_g35.nseg = 0x1; 1119 io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1; 1120 io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; 1121 if (io_request->RaidContext.raid_context_g35.routingFlags.bits.sld) { 1122 io_request->RaidContext.raid_context_g35.RAIDFlags = 1123 (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS 1124 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); 1125 } 1126 } 1127 if ((sc->load_balance_info[device_id].loadBalanceFlag) && 1128 (io_info.isRead)) { 1129 io_info.devHandle = 1130 mrsas_get_updated_dev_handle(sc, 1131 &sc->load_balance_info[device_id], &io_info); 1132 cmd->load_balance = MRSAS_LOAD_BALANCE_FLAG; 1133 cmd->pd_r1_lb = io_info.pd_after_lb; 1134 if (sc->is_ventura) 1135 io_request->RaidContext.raid_context_g35.spanArm = io_info.span_arm; 1136 else 1137 io_request->RaidContext.raid_context.spanArm = io_info.span_arm; 1138 } else 1139 cmd->load_balance = 0; 1140 1141 if (sc->is_ventura) 1142 cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle; 1143 else 1144 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 1145 1146 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; 1147 io_request->DevHandle = io_info.devHandle; 1148 } else { 1149 /* Not FP IO */ 1150 io_request->RaidContext.raid_context.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec; 1151 cmd->request_desc->SCSIIO.RequestFlags = 1152 (MRSAS_REQ_DESCRIPT_FLAGS_LD_IO << 1153 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1154 if (sc->mrsas_gen3_ctrl) { 1155 if (io_request->RaidContext.raid_context.regLockFlags == REGION_TYPE_UNUSED) 1156 cmd->request_desc->SCSIIO.RequestFlags = 1157 (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK << 1158 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1159 io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA; 1160 io_request->RaidContext.raid_context.regLockFlags |= 1161 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | 1162 MR_RL_FLAGS_SEQ_NUM_ENABLE); 1163 io_request->RaidContext.raid_context.nseg = 0x1; 1164 } else if (sc->is_ventura) { 1165 io_request->RaidContext.raid_context_g35.Type = MPI2_TYPE_CUDA; 1166 io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1; 1167 io_request->RaidContext.raid_context_g35.nseg = 0x1; 1168 } 1169 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST; 1170 io_request->DevHandle = device_id; 1171 } 1172 return (0); 1173 } 1174 1175 /* 1176 * mrsas_build_ldio_nonrw: Builds an LDIO command 1177 * input: Adapter instance soft state 1178 * Pointer to command packet 1179 * Pointer to CCB 1180 * 1181 * This function builds the LDIO command packet. It returns 0 if the command is 1182 * built successfully, otherwise it returns a 1. 1183 */ 1184 int 1185 mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 1186 union ccb *ccb) 1187 { 1188 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 1189 u_int32_t device_id, ld; 1190 MR_DRV_RAID_MAP_ALL *map_ptr; 1191 MR_LD_RAID *raid; 1192 RAID_CONTEXT *pRAID_Context; 1193 MRSAS_RAID_SCSI_IO_REQUEST *io_request; 1194 1195 io_request = cmd->io_request; 1196 device_id = ccb_h->target_id; 1197 1198 map_ptr = sc->ld_drv_map[(sc->map_id & 1)]; 1199 ld = MR_TargetIdToLdGet(device_id, map_ptr); 1200 raid = MR_LdRaidGet(ld, map_ptr); 1201 /* get RAID_Context pointer */ 1202 pRAID_Context = &io_request->RaidContext.raid_context; 1203 /* Store the TM capability value in cmd */ 1204 cmd->tmCapable = raid->capability.tmCapable; 1205 1206 /* FW path for LD Non-RW (SCSI management commands) */ 1207 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST; 1208 io_request->DevHandle = device_id; 1209 cmd->request_desc->SCSIIO.RequestFlags = 1210 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 1211 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1212 1213 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; 1214 io_request->LUN[1] = ccb_h->target_lun & 0xF; 1215 io_request->DataLength = cmd->length; 1216 1217 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) { 1218 if (cmd->sge_count > sc->max_num_sge) { 1219 device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds" 1220 "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge); 1221 return (1); 1222 } 1223 if (sc->is_ventura) 1224 io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count; 1225 else { 1226 /* 1227 * numSGE store lower 8 bit of sge_count. numSGEExt store 1228 * higher 8 bit of sge_count 1229 */ 1230 io_request->RaidContext.raid_context.numSGE = cmd->sge_count; 1231 io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8); 1232 } 1233 } else { 1234 device_printf(sc->mrsas_dev, "Data map/load failed.\n"); 1235 return (1); 1236 } 1237 return (0); 1238 } 1239 1240 /* 1241 * mrsas_build_syspdio: Builds an DCDB command 1242 * input: Adapter instance soft state 1243 * Pointer to command packet 1244 * Pointer to CCB 1245 * 1246 * This function builds the DCDB inquiry command. It returns 0 if the command 1247 * is built successfully, otherwise it returns a 1. 1248 */ 1249 int 1250 mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 1251 union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible) 1252 { 1253 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 1254 u_int32_t device_id; 1255 MR_DRV_RAID_MAP_ALL *local_map_ptr; 1256 MRSAS_RAID_SCSI_IO_REQUEST *io_request; 1257 RAID_CONTEXT *pRAID_Context; 1258 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 1259 1260 io_request = cmd->io_request; 1261 /* get RAID_Context pointer */ 1262 pRAID_Context = &io_request->RaidContext.raid_context; 1263 device_id = ccb_h->target_id; 1264 local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)]; 1265 io_request->RaidContext.raid_context.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD 1266 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; 1267 io_request->RaidContext.raid_context.regLockFlags = 0; 1268 io_request->RaidContext.raid_context.regLockRowLBA = 0; 1269 io_request->RaidContext.raid_context.regLockLength = 0; 1270 1271 /* If FW supports PD sequence number */ 1272 if (sc->use_seqnum_jbod_fp && 1273 sc->pd_list[device_id].driveType == 0x00) { 1274 //printf("Using Drv seq num\n"); 1275 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id - 1) & 1]; 1276 cmd->tmCapable = pd_sync->seq[device_id].capability.tmCapable; 1277 /* More than 256 PD/JBOD support for Ventura */ 1278 if (sc->support_morethan256jbod) 1279 io_request->RaidContext.raid_context.VirtualDiskTgtId = 1280 pd_sync->seq[device_id].pdTargetId; 1281 else 1282 io_request->RaidContext.raid_context.VirtualDiskTgtId = 1283 device_id + 255; 1284 io_request->RaidContext.raid_context.configSeqNum = pd_sync->seq[device_id].seqNum; 1285 io_request->DevHandle = pd_sync->seq[device_id].devHandle; 1286 if (sc->is_ventura) 1287 io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1; 1288 else 1289 io_request->RaidContext.raid_context.regLockFlags |= 1290 (MR_RL_FLAGS_SEQ_NUM_ENABLE | MR_RL_FLAGS_GRANT_DESTINATION_CUDA); 1291 /* raid_context.Type = MPI2_TYPE_CUDA is valid only, 1292 * if FW support Jbod Sequence number 1293 */ 1294 io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA; 1295 io_request->RaidContext.raid_context.nseg = 0x1; 1296 } else if (sc->fast_path_io) { 1297 //printf("Using LD RAID map\n"); 1298 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; 1299 io_request->RaidContext.raid_context.configSeqNum = 0; 1300 local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)]; 1301 io_request->DevHandle = 1302 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; 1303 } else { 1304 //printf("Using FW PATH\n"); 1305 /* Want to send all IO via FW path */ 1306 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; 1307 io_request->RaidContext.raid_context.configSeqNum = 0; 1308 io_request->DevHandle = MR_DEVHANDLE_INVALID; 1309 } 1310 1311 cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; 1312 cmd->request_desc->SCSIIO.MSIxIndex = 1313 sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0; 1314 1315 if (!fp_possible) { 1316 /* system pd firmware path */ 1317 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST; 1318 cmd->request_desc->SCSIIO.RequestFlags = 1319 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 1320 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1321 io_request->RaidContext.raid_context.timeoutValue = 1322 local_map_ptr->raidMap.fpPdIoTimeoutSec; 1323 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; 1324 } else { 1325 /* system pd fast path */ 1326 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1327 io_request->RaidContext.raid_context.timeoutValue = local_map_ptr->raidMap.fpPdIoTimeoutSec; 1328 1329 /* 1330 * NOTE - For system pd RW cmds only IoFlags will be FAST_PATH 1331 * Because the NON RW cmds will now go via FW Queue 1332 * and not the Exception queue 1333 */ 1334 if (sc->mrsas_gen3_ctrl || sc->is_ventura) 1335 io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; 1336 1337 cmd->request_desc->SCSIIO.RequestFlags = 1338 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << 1339 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1340 } 1341 1342 io_request->LUN[1] = ccb_h->target_lun & 0xF; 1343 io_request->DataLength = cmd->length; 1344 1345 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) { 1346 if (cmd->sge_count > sc->max_num_sge) { 1347 device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds" 1348 "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge); 1349 return (1); 1350 } 1351 if (sc->is_ventura) 1352 io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count; 1353 else { 1354 /* 1355 * numSGE store lower 8 bit of sge_count. numSGEExt store 1356 * higher 8 bit of sge_count 1357 */ 1358 io_request->RaidContext.raid_context.numSGE = cmd->sge_count; 1359 io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8); 1360 } 1361 } else { 1362 device_printf(sc->mrsas_dev, "Data map/load failed.\n"); 1363 return (1); 1364 } 1365 return (0); 1366 } 1367 1368 /* 1369 * mrsas_map_request: Map and load data 1370 * input: Adapter instance soft state 1371 * Pointer to command packet 1372 * 1373 * For data from OS, map and load the data buffer into bus space. The SG list 1374 * is built in the callback. If the bus dmamap load is not successful, 1375 * cmd->error_code will contain the error code and a 1 is returned. 1376 */ 1377 int 1378 mrsas_map_request(struct mrsas_softc *sc, 1379 struct mrsas_mpt_cmd *cmd, union ccb *ccb) 1380 { 1381 u_int32_t retcode = 0; 1382 struct cam_sim *sim; 1383 1384 sim = xpt_path_sim(cmd->ccb_ptr->ccb_h.path); 1385 1386 if (cmd->data != NULL) { 1387 /* Map data buffer into bus space */ 1388 mtx_lock(&sc->io_lock); 1389 #if (__FreeBSD_version >= 902001) 1390 retcode = bus_dmamap_load_ccb(sc->data_tag, cmd->data_dmamap, ccb, 1391 mrsas_data_load_cb, cmd, 0); 1392 #else 1393 retcode = bus_dmamap_load(sc->data_tag, cmd->data_dmamap, cmd->data, 1394 cmd->length, mrsas_data_load_cb, cmd, BUS_DMA_NOWAIT); 1395 #endif 1396 mtx_unlock(&sc->io_lock); 1397 if (retcode) 1398 device_printf(sc->mrsas_dev, "bus_dmamap_load(): retcode = %d\n", retcode); 1399 if (retcode == EINPROGRESS) { 1400 device_printf(sc->mrsas_dev, "request load in progress\n"); 1401 mrsas_freeze_simq(cmd, sim); 1402 } 1403 } 1404 if (cmd->error_code) 1405 return (1); 1406 return (retcode); 1407 } 1408 1409 /* 1410 * mrsas_unmap_request: Unmap and unload data 1411 * input: Adapter instance soft state 1412 * Pointer to command packet 1413 * 1414 * This function unmaps and unloads data from OS. 1415 */ 1416 void 1417 mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd) 1418 { 1419 if (cmd->data != NULL) { 1420 if (cmd->flags & MRSAS_DIR_IN) 1421 bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTREAD); 1422 if (cmd->flags & MRSAS_DIR_OUT) 1423 bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTWRITE); 1424 mtx_lock(&sc->io_lock); 1425 bus_dmamap_unload(sc->data_tag, cmd->data_dmamap); 1426 mtx_unlock(&sc->io_lock); 1427 } 1428 } 1429 1430 /* 1431 * mrsas_data_load_cb: Callback entry point 1432 * input: Pointer to command packet as argument 1433 * Pointer to segment 1434 * Number of segments Error 1435 * 1436 * This is the callback function of the bus dma map load. It builds the SG 1437 * list. 1438 */ 1439 static void 1440 mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1441 { 1442 struct mrsas_mpt_cmd *cmd = (struct mrsas_mpt_cmd *)arg; 1443 struct mrsas_softc *sc = cmd->sc; 1444 MRSAS_RAID_SCSI_IO_REQUEST *io_request; 1445 pMpi25IeeeSgeChain64_t sgl_ptr; 1446 int i = 0, sg_processed = 0; 1447 1448 if (error) { 1449 cmd->error_code = error; 1450 device_printf(sc->mrsas_dev, "mrsas_data_load_cb: error=%d\n", error); 1451 if (error == EFBIG) { 1452 cmd->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG; 1453 return; 1454 } 1455 } 1456 if (cmd->flags & MRSAS_DIR_IN) 1457 bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap, 1458 BUS_DMASYNC_PREREAD); 1459 if (cmd->flags & MRSAS_DIR_OUT) 1460 bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap, 1461 BUS_DMASYNC_PREWRITE); 1462 if (nseg > sc->max_num_sge) { 1463 device_printf(sc->mrsas_dev, "SGE count is too large or 0.\n"); 1464 return; 1465 } 1466 io_request = cmd->io_request; 1467 sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL; 1468 1469 if (sc->mrsas_gen3_ctrl || sc->is_ventura) { 1470 pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr; 1471 1472 sgl_ptr_end += sc->max_sge_in_main_msg - 1; 1473 sgl_ptr_end->Flags = 0; 1474 } 1475 if (nseg != 0) { 1476 for (i = 0; i < nseg; i++) { 1477 sgl_ptr->Address = segs[i].ds_addr; 1478 sgl_ptr->Length = segs[i].ds_len; 1479 sgl_ptr->Flags = 0; 1480 if (sc->mrsas_gen3_ctrl || sc->is_ventura) { 1481 if (i == nseg - 1) 1482 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; 1483 } 1484 sgl_ptr++; 1485 sg_processed = i + 1; 1486 if ((sg_processed == (sc->max_sge_in_main_msg - 1)) && 1487 (nseg > sc->max_sge_in_main_msg)) { 1488 pMpi25IeeeSgeChain64_t sg_chain; 1489 1490 if (sc->mrsas_gen3_ctrl || sc->is_ventura) { 1491 if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) 1492 != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) 1493 cmd->io_request->ChainOffset = sc->chain_offset_io_request; 1494 else 1495 cmd->io_request->ChainOffset = 0; 1496 } else 1497 cmd->io_request->ChainOffset = sc->chain_offset_io_request; 1498 sg_chain = sgl_ptr; 1499 if (sc->mrsas_gen3_ctrl || sc->is_ventura) 1500 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; 1501 else 1502 sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); 1503 sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) * (nseg - sg_processed)); 1504 sg_chain->Address = cmd->chain_frame_phys_addr; 1505 sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->chain_frame; 1506 } 1507 } 1508 } 1509 cmd->sge_count = nseg; 1510 } 1511 1512 /* 1513 * mrsas_freeze_simq: Freeze SIM queue 1514 * input: Pointer to command packet 1515 * Pointer to SIM 1516 * 1517 * This function freezes the sim queue. 1518 */ 1519 static void 1520 mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim) 1521 { 1522 union ccb *ccb = (union ccb *)(cmd->ccb_ptr); 1523 1524 xpt_freeze_simq(sim, 1); 1525 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1526 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 1527 } 1528 1529 void 1530 mrsas_xpt_freeze(struct mrsas_softc *sc) 1531 { 1532 xpt_freeze_simq(sc->sim_0, 1); 1533 xpt_freeze_simq(sc->sim_1, 1); 1534 } 1535 1536 void 1537 mrsas_xpt_release(struct mrsas_softc *sc) 1538 { 1539 xpt_release_simq(sc->sim_0, 1); 1540 xpt_release_simq(sc->sim_1, 1); 1541 } 1542 1543 /* 1544 * mrsas_cmd_done: Perform remaining command completion 1545 * input: Adapter instance soft state Pointer to command packet 1546 * 1547 * This function calls ummap request and releases the MPT command. 1548 */ 1549 void 1550 mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd) 1551 { 1552 mrsas_unmap_request(sc, cmd); 1553 1554 mtx_lock(&sc->sim_lock); 1555 if (cmd->callout_owner) { 1556 callout_stop(&cmd->cm_callout); 1557 cmd->callout_owner = false; 1558 } 1559 xpt_done(cmd->ccb_ptr); 1560 cmd->ccb_ptr = NULL; 1561 mtx_unlock(&sc->sim_lock); 1562 mrsas_release_mpt_cmd(cmd); 1563 } 1564 1565 /* 1566 * mrsas_cam_poll: Polling entry point 1567 * input: Pointer to SIM 1568 * 1569 * This is currently a stub function. 1570 */ 1571 static void 1572 mrsas_cam_poll(struct cam_sim *sim) 1573 { 1574 int i; 1575 struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim); 1576 1577 if (sc->msix_vectors != 0){ 1578 for (i=0; i<sc->msix_vectors; i++){ 1579 mrsas_complete_cmd(sc, i); 1580 } 1581 } else { 1582 mrsas_complete_cmd(sc, 0); 1583 } 1584 } 1585 1586 /* 1587 * mrsas_bus_scan: Perform bus scan 1588 * input: Adapter instance soft state 1589 * 1590 * This mrsas_bus_scan function is needed for FreeBSD 7.x. Also, it should not 1591 * be called in FreeBSD 8.x and later versions, where the bus scan is 1592 * automatic. 1593 */ 1594 int 1595 mrsas_bus_scan(struct mrsas_softc *sc) 1596 { 1597 union ccb *ccb_0; 1598 union ccb *ccb_1; 1599 1600 if ((ccb_0 = xpt_alloc_ccb()) == NULL) { 1601 return (ENOMEM); 1602 } 1603 if ((ccb_1 = xpt_alloc_ccb()) == NULL) { 1604 xpt_free_ccb(ccb_0); 1605 return (ENOMEM); 1606 } 1607 mtx_lock(&sc->sim_lock); 1608 if (xpt_create_path(&ccb_0->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_0), 1609 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1610 xpt_free_ccb(ccb_0); 1611 xpt_free_ccb(ccb_1); 1612 mtx_unlock(&sc->sim_lock); 1613 return (EIO); 1614 } 1615 if (xpt_create_path(&ccb_1->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_1), 1616 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1617 xpt_free_ccb(ccb_0); 1618 xpt_free_ccb(ccb_1); 1619 mtx_unlock(&sc->sim_lock); 1620 return (EIO); 1621 } 1622 mtx_unlock(&sc->sim_lock); 1623 xpt_rescan(ccb_0); 1624 xpt_rescan(ccb_1); 1625 1626 return (0); 1627 } 1628 1629 /* 1630 * mrsas_bus_scan_sim: Perform bus scan per SIM 1631 * input: adapter instance soft state 1632 * 1633 * This function will be called from Event handler on LD creation/deletion, 1634 * JBOD on/off. 1635 */ 1636 int 1637 mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim) 1638 { 1639 union ccb *ccb; 1640 1641 if ((ccb = xpt_alloc_ccb()) == NULL) { 1642 return (ENOMEM); 1643 } 1644 mtx_lock(&sc->sim_lock); 1645 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sim), 1646 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1647 xpt_free_ccb(ccb); 1648 mtx_unlock(&sc->sim_lock); 1649 return (EIO); 1650 } 1651 mtx_unlock(&sc->sim_lock); 1652 xpt_rescan(ccb); 1653 1654 return (0); 1655 } 1656 1657 /* 1658 * mrsas_track_scsiio: Track IOs for a given target in the mpt_cmd_list 1659 * input: Adapter instance soft state 1660 * Target ID of target 1661 * Bus ID of the target 1662 * 1663 * This function checks for any pending IO in the whole mpt_cmd_list pool 1664 * with the bus_id and target_id passed in arguments. If some IO is found 1665 * that means target reset is not successfully completed. 1666 * 1667 * Returns FAIL if IOs pending to the target device, else return SUCCESS 1668 */ 1669 static int 1670 mrsas_track_scsiio(struct mrsas_softc *sc, target_id_t tgt_id, u_int32_t bus_id) 1671 { 1672 int i; 1673 struct mrsas_mpt_cmd *mpt_cmd = NULL; 1674 1675 for (i = 0 ; i < sc->max_fw_cmds; i++) { 1676 mpt_cmd = sc->mpt_cmd_list[i]; 1677 1678 /* 1679 * Check if the target_id and bus_id is same as the timeout IO 1680 */ 1681 if (mpt_cmd->ccb_ptr) { 1682 /* bus_id = 1 denotes a VD */ 1683 if (bus_id == 1) 1684 tgt_id = (mpt_cmd->ccb_ptr->ccb_h.target_id - (MRSAS_MAX_PD - 1)); 1685 1686 if (mpt_cmd->ccb_ptr->cpi.bus_id == bus_id && 1687 mpt_cmd->ccb_ptr->ccb_h.target_id == tgt_id) { 1688 device_printf(sc->mrsas_dev, 1689 "IO commands pending to target id %d\n", tgt_id); 1690 return FAIL; 1691 } 1692 } 1693 } 1694 1695 return SUCCESS; 1696 } 1697 1698 #if TM_DEBUG 1699 /* 1700 * mrsas_tm_response_code: Prints TM response code received from FW 1701 * input: Adapter instance soft state 1702 * MPI reply returned from firmware 1703 * 1704 * Returns nothing. 1705 */ 1706 static void 1707 mrsas_tm_response_code(struct mrsas_softc *sc, 1708 MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply) 1709 { 1710 char *desc; 1711 1712 switch (mpi_reply->ResponseCode) { 1713 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: 1714 desc = "task management request completed"; 1715 break; 1716 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: 1717 desc = "invalid frame"; 1718 break; 1719 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: 1720 desc = "task management request not supported"; 1721 break; 1722 case MPI2_SCSITASKMGMT_RSP_TM_FAILED: 1723 desc = "task management request failed"; 1724 break; 1725 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: 1726 desc = "task management request succeeded"; 1727 break; 1728 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: 1729 desc = "invalid lun"; 1730 break; 1731 case 0xA: 1732 desc = "overlapped tag attempted"; 1733 break; 1734 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: 1735 desc = "task queued, however not sent to target"; 1736 break; 1737 default: 1738 desc = "unknown"; 1739 break; 1740 } 1741 device_printf(sc->mrsas_dev, "response_code(%01x): %s\n", 1742 mpi_reply->ResponseCode, desc); 1743 device_printf(sc->mrsas_dev, 1744 "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo\n" 1745 "0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n", 1746 mpi_reply->TerminationCount, mpi_reply->DevHandle, 1747 mpi_reply->Function, mpi_reply->TaskType, 1748 mpi_reply->IOCStatus, mpi_reply->IOCLogInfo); 1749 } 1750 #endif 1751 1752 /* 1753 * mrsas_issue_tm: Fires the TM command to FW and waits for completion 1754 * input: Adapter instance soft state 1755 * reqest descriptor compiled by mrsas_reset_targets 1756 * 1757 * Returns FAIL if TM command TIMEDOUT from FW else SUCCESS. 1758 */ 1759 static int 1760 mrsas_issue_tm(struct mrsas_softc *sc, 1761 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc) 1762 { 1763 int sleep_stat; 1764 1765 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); 1766 sleep_stat = msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "tm_sleep", 50*hz); 1767 1768 if (sleep_stat == EWOULDBLOCK) { 1769 device_printf(sc->mrsas_dev, "tm cmd TIMEDOUT\n"); 1770 return FAIL; 1771 } 1772 1773 return SUCCESS; 1774 } 1775 1776 /* 1777 * mrsas_reset_targets : Gathers info to fire a target reset command 1778 * input: Adapter instance soft state 1779 * 1780 * This function compiles data for a target reset command to be fired to the FW 1781 * and then traverse the target_reset_pool to see targets with TIMEDOUT IOs. 1782 * 1783 * Returns SUCCESS or FAIL 1784 */ 1785 int mrsas_reset_targets(struct mrsas_softc *sc) 1786 { 1787 struct mrsas_mpt_cmd *tm_mpt_cmd = NULL; 1788 struct mrsas_mpt_cmd *tgt_mpt_cmd = NULL; 1789 MR_TASK_MANAGE_REQUEST *mr_request; 1790 MPI2_SCSI_TASK_MANAGE_REQUEST *tm_mpi_request; 1791 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 1792 int retCode = FAIL, count, i, outstanding; 1793 u_int32_t MSIxIndex, bus_id; 1794 target_id_t tgt_id; 1795 #if TM_DEBUG 1796 MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply; 1797 #endif 1798 1799 outstanding = mrsas_atomic_read(&sc->fw_outstanding); 1800 1801 if (!outstanding) { 1802 device_printf(sc->mrsas_dev, "NO IOs pending...\n"); 1803 mrsas_atomic_set(&sc->target_reset_outstanding, 0); 1804 retCode = SUCCESS; 1805 goto return_status; 1806 } else if (sc->adprecovery != MRSAS_HBA_OPERATIONAL) { 1807 device_printf(sc->mrsas_dev, "Controller is not operational\n"); 1808 goto return_status; 1809 } else { 1810 /* Some more error checks will be added in future */ 1811 } 1812 1813 /* Get an mpt frame and an index to fire the TM cmd */ 1814 tm_mpt_cmd = mrsas_get_mpt_cmd(sc); 1815 if (!tm_mpt_cmd) { 1816 retCode = FAIL; 1817 goto return_status; 1818 } 1819 1820 req_desc = mrsas_get_request_desc(sc, (tm_mpt_cmd->index) - 1); 1821 if (!req_desc) { 1822 device_printf(sc->mrsas_dev, "Cannot get request_descriptor for tm.\n"); 1823 retCode = FAIL; 1824 goto release_mpt; 1825 } 1826 memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION)); 1827 1828 req_desc->HighPriority.SMID = tm_mpt_cmd->index; 1829 req_desc->HighPriority.RequestFlags = 1830 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << 1831 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1832 req_desc->HighPriority.MSIxIndex = 0; 1833 req_desc->HighPriority.LMID = 0; 1834 req_desc->HighPriority.Reserved1 = 0; 1835 tm_mpt_cmd->request_desc = req_desc; 1836 1837 mr_request = (MR_TASK_MANAGE_REQUEST *) tm_mpt_cmd->io_request; 1838 memset(mr_request, 0, sizeof(MR_TASK_MANAGE_REQUEST)); 1839 1840 tm_mpi_request = (MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest; 1841 tm_mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 1842 tm_mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 1843 tm_mpi_request->TaskMID = 0; /* smid task */ 1844 tm_mpi_request->LUN[1] = 0; 1845 1846 /* Traverse the tm_mpt pool to get valid entries */ 1847 for (i = 0 ; i < MRSAS_MAX_TM_TARGETS; i++) { 1848 if(!sc->target_reset_pool[i]) { 1849 continue; 1850 } else { 1851 tgt_mpt_cmd = sc->target_reset_pool[i]; 1852 } 1853 1854 tgt_id = i; 1855 1856 /* See if the target is tm capable or NOT */ 1857 if (!tgt_mpt_cmd->tmCapable) { 1858 device_printf(sc->mrsas_dev, "Task management NOT SUPPORTED for " 1859 "CAM target:%d\n", tgt_id); 1860 1861 retCode = FAIL; 1862 goto release_mpt; 1863 } 1864 1865 tm_mpi_request->DevHandle = tgt_mpt_cmd->io_request->DevHandle; 1866 1867 if (i < (MRSAS_MAX_PD - 1)) { 1868 mr_request->uTmReqReply.tmReqFlags.isTMForPD = 1; 1869 bus_id = 0; 1870 } else { 1871 mr_request->uTmReqReply.tmReqFlags.isTMForLD = 1; 1872 bus_id = 1; 1873 } 1874 1875 device_printf(sc->mrsas_dev, "TM will be fired for " 1876 "CAM target:%d and bus_id %d\n", tgt_id, bus_id); 1877 1878 sc->ocr_chan = (void *)&tm_mpt_cmd; 1879 retCode = mrsas_issue_tm(sc, req_desc); 1880 if (retCode == FAIL) 1881 goto release_mpt; 1882 1883 #if TM_DEBUG 1884 mpi_reply = 1885 (MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->uTmReqReply.TMReply; 1886 mrsas_tm_response_code(sc, mpi_reply); 1887 #endif 1888 mrsas_atomic_dec(&sc->target_reset_outstanding); 1889 sc->target_reset_pool[i] = NULL; 1890 1891 /* Check for pending cmds in the mpt_cmd_pool with the tgt_id */ 1892 mrsas_disable_intr(sc); 1893 /* Wait for 1 second to complete parallel ISR calling same 1894 * mrsas_complete_cmd() 1895 */ 1896 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_reset_wakeup", 1897 1 * hz); 1898 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 1899 mtx_unlock(&sc->sim_lock); 1900 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 1901 mrsas_complete_cmd(sc, MSIxIndex); 1902 mtx_lock(&sc->sim_lock); 1903 retCode = mrsas_track_scsiio(sc, tgt_id, bus_id); 1904 mrsas_enable_intr(sc); 1905 1906 if (retCode == FAIL) 1907 goto release_mpt; 1908 } 1909 1910 device_printf(sc->mrsas_dev, "Number of targets outstanding " 1911 "after reset: %d\n", mrsas_atomic_read(&sc->target_reset_outstanding)); 1912 1913 release_mpt: 1914 mrsas_release_mpt_cmd(tm_mpt_cmd); 1915 return_status: 1916 device_printf(sc->mrsas_dev, "target reset %s!!\n", 1917 (retCode == SUCCESS) ? "SUCCESS" : "FAIL"); 1918 1919 return retCode; 1920 } 1921 1922