1 /* 2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy 3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy 4 * Support: freebsdraid@avagotech.com 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 2. Redistributions 12 * in binary form must reproduce the above copyright notice, this list of 13 * conditions and the following disclaimer in the documentation and/or other 14 * materials provided with the distribution. 3. Neither the name of the 15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or 16 * promote products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "dev/mrsas/mrsas.h" 37 38 #include <cam/cam.h> 39 #include <cam/cam_ccb.h> 40 #include <cam/cam_sim.h> 41 #include <cam/cam_xpt_sim.h> 42 #include <cam/cam_debug.h> 43 #include <cam/cam_periph.h> 44 #include <cam/cam_xpt_periph.h> 45 46 #include <cam/scsi/scsi_all.h> 47 #include <cam/scsi/scsi_message.h> 48 #include <sys/taskqueue.h> 49 #include <sys/kernel.h> 50 51 #include <sys/time.h> /* XXX for pcpu.h */ 52 #include <sys/pcpu.h> /* XXX for PCPU_GET */ 53 54 #define smp_processor_id() PCPU_GET(cpuid) 55 56 /* 57 * Function prototypes 58 */ 59 int mrsas_cam_attach(struct mrsas_softc *sc); 60 int mrsas_find_io_type(struct cam_sim *sim, union ccb *ccb); 61 int mrsas_bus_scan(struct mrsas_softc *sc); 62 int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim); 63 int 64 mrsas_map_request(struct mrsas_softc *sc, 65 struct mrsas_mpt_cmd *cmd, union ccb *ccb); 66 int 67 mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 68 union ccb *ccb); 69 int 70 mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 71 union ccb *ccb); 72 int 73 mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 74 union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible); 75 int 76 mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 77 union ccb *ccb, u_int32_t device_id, 78 MRSAS_RAID_SCSI_IO_REQUEST * io_request); 79 void mrsas_xpt_freeze(struct mrsas_softc *sc); 80 void mrsas_xpt_release(struct mrsas_softc *sc); 81 void mrsas_cam_detach(struct mrsas_softc *sc); 82 void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd); 83 void mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); 84 void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); 85 void 86 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 87 u_int32_t req_desc_hi); 88 void 89 mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, 90 u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb, 91 MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, 92 u_int32_t ld_block_size); 93 static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim); 94 static void mrsas_cam_poll(struct cam_sim *sim); 95 static void mrsas_action(struct cam_sim *sim, union ccb *ccb); 96 static void mrsas_scsiio_timeout(void *data); 97 static int mrsas_track_scsiio(struct mrsas_softc *sc, target_id_t id, u_int32_t bus_id); 98 static void mrsas_tm_response_code(struct mrsas_softc *sc, 99 MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply); 100 static int mrsas_issue_tm(struct mrsas_softc *sc, 101 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc); 102 static void 103 mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, 104 int nseg, int error); 105 static int32_t 106 mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim, 107 union ccb *ccb); 108 109 static boolean_t mrsas_is_prp_possible(struct mrsas_mpt_cmd *cmd, 110 bus_dma_segment_t *segs, int nsegs); 111 static void mrsas_build_ieee_sgl(struct mrsas_mpt_cmd *cmd, 112 bus_dma_segment_t *segs, int nseg); 113 static void mrsas_build_prp_nvme(struct mrsas_mpt_cmd *cmd, 114 bus_dma_segment_t *segs, int nseg); 115 116 struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc); 117 MRSAS_REQUEST_DESCRIPTOR_UNION * 118 mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index); 119 120 extern int mrsas_reset_targets(struct mrsas_softc *sc); 121 extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); 122 extern u_int32_t 123 MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); 124 extern void mrsas_isr(void *arg); 125 extern void mrsas_aen_handler(struct mrsas_softc *sc); 126 extern u_int8_t 127 MR_BuildRaidContext(struct mrsas_softc *sc, 128 struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context, 129 MR_DRV_RAID_MAP_ALL * map); 130 extern u_int16_t 131 MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, 132 MR_DRV_RAID_MAP_ALL * map); 133 extern u_int16_t 134 mrsas_get_updated_dev_handle(struct mrsas_softc *sc, 135 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info); 136 extern int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex); 137 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 138 extern void mrsas_disable_intr(struct mrsas_softc *sc); 139 extern void mrsas_enable_intr(struct mrsas_softc *sc); 140 void mrsas_prepare_secondRaid1_IO(struct mrsas_softc *sc, 141 struct mrsas_mpt_cmd *cmd); 142 143 /* 144 * mrsas_cam_attach: Main entry to CAM subsystem 145 * input: Adapter instance soft state 146 * 147 * This function is called from mrsas_attach() during initialization to perform 148 * SIM allocations and XPT bus registration. If the kernel version is 7.4 or 149 * earlier, it would also initiate a bus scan. 150 */ 151 int 152 mrsas_cam_attach(struct mrsas_softc *sc) 153 { 154 struct cam_devq *devq; 155 int mrsas_cam_depth; 156 157 mrsas_cam_depth = sc->max_scsi_cmds; 158 159 if ((devq = cam_simq_alloc(mrsas_cam_depth)) == NULL) { 160 device_printf(sc->mrsas_dev, "Cannot allocate SIM queue\n"); 161 return (ENOMEM); 162 } 163 /* 164 * Create SIM for bus 0 and register, also create path 165 */ 166 sc->sim_0 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc, 167 device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth, 168 mrsas_cam_depth, devq); 169 if (sc->sim_0 == NULL) { 170 cam_simq_free(devq); 171 device_printf(sc->mrsas_dev, "Cannot register SIM\n"); 172 return (ENXIO); 173 } 174 /* Initialize taskqueue for Event Handling */ 175 TASK_INIT(&sc->ev_task, 0, (void *)mrsas_aen_handler, sc); 176 sc->ev_tq = taskqueue_create("mrsas_taskq", M_NOWAIT | M_ZERO, 177 taskqueue_thread_enqueue, &sc->ev_tq); 178 179 /* Run the task queue with lowest priority */ 180 taskqueue_start_threads(&sc->ev_tq, 1, 255, "%s taskq", 181 device_get_nameunit(sc->mrsas_dev)); 182 mtx_lock(&sc->sim_lock); 183 if (xpt_bus_register(sc->sim_0, sc->mrsas_dev, 0) != CAM_SUCCESS) { 184 cam_sim_free(sc->sim_0, TRUE); /* passing true frees the devq */ 185 mtx_unlock(&sc->sim_lock); 186 return (ENXIO); 187 } 188 if (xpt_create_path(&sc->path_0, NULL, cam_sim_path(sc->sim_0), 189 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 190 xpt_bus_deregister(cam_sim_path(sc->sim_0)); 191 cam_sim_free(sc->sim_0, TRUE); /* passing true will free the 192 * devq */ 193 mtx_unlock(&sc->sim_lock); 194 return (ENXIO); 195 } 196 mtx_unlock(&sc->sim_lock); 197 198 /* 199 * Create SIM for bus 1 and register, also create path 200 */ 201 sc->sim_1 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc, 202 device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth, 203 mrsas_cam_depth, devq); 204 if (sc->sim_1 == NULL) { 205 cam_simq_free(devq); 206 device_printf(sc->mrsas_dev, "Cannot register SIM\n"); 207 return (ENXIO); 208 } 209 mtx_lock(&sc->sim_lock); 210 if (xpt_bus_register(sc->sim_1, sc->mrsas_dev, 1) != CAM_SUCCESS) { 211 cam_sim_free(sc->sim_1, TRUE); /* passing true frees the devq */ 212 mtx_unlock(&sc->sim_lock); 213 return (ENXIO); 214 } 215 if (xpt_create_path(&sc->path_1, NULL, cam_sim_path(sc->sim_1), 216 CAM_TARGET_WILDCARD, 217 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 218 xpt_bus_deregister(cam_sim_path(sc->sim_1)); 219 cam_sim_free(sc->sim_1, TRUE); 220 mtx_unlock(&sc->sim_lock); 221 return (ENXIO); 222 } 223 mtx_unlock(&sc->sim_lock); 224 225 return (0); 226 } 227 228 /* 229 * mrsas_cam_detach: De-allocates and teardown CAM 230 * input: Adapter instance soft state 231 * 232 * De-registers and frees the paths and SIMs. 233 */ 234 void 235 mrsas_cam_detach(struct mrsas_softc *sc) 236 { 237 if (sc->ev_tq != NULL) 238 taskqueue_free(sc->ev_tq); 239 mtx_lock(&sc->sim_lock); 240 if (sc->path_0) 241 xpt_free_path(sc->path_0); 242 if (sc->sim_0) { 243 xpt_bus_deregister(cam_sim_path(sc->sim_0)); 244 cam_sim_free(sc->sim_0, FALSE); 245 } 246 if (sc->path_1) 247 xpt_free_path(sc->path_1); 248 if (sc->sim_1) { 249 xpt_bus_deregister(cam_sim_path(sc->sim_1)); 250 cam_sim_free(sc->sim_1, TRUE); 251 } 252 mtx_unlock(&sc->sim_lock); 253 } 254 255 /* 256 * mrsas_action: SIM callback entry point 257 * input: pointer to SIM pointer to CAM Control Block 258 * 259 * This function processes CAM subsystem requests. The type of request is stored 260 * in ccb->ccb_h.func_code. The preprocessor #ifdef is necessary because 261 * ccb->cpi.maxio is not supported for FreeBSD version 7.4 or earlier. 262 */ 263 static void 264 mrsas_action(struct cam_sim *sim, union ccb *ccb) 265 { 266 struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim); 267 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 268 u_int32_t device_id; 269 270 /* 271 * Check if the system going down 272 * or the adapter is in unrecoverable critical error 273 */ 274 if (sc->remove_in_progress || 275 (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) { 276 ccb->ccb_h.status |= CAM_DEV_NOT_THERE; 277 xpt_done(ccb); 278 return; 279 } 280 281 switch (ccb->ccb_h.func_code) { 282 case XPT_SCSI_IO: 283 { 284 device_id = ccb_h->target_id; 285 286 /* 287 * bus 0 is LD, bus 1 is for system-PD 288 */ 289 if (cam_sim_bus(sim) == 1 && 290 sc->pd_list[device_id].driveState != MR_PD_STATE_SYSTEM) { 291 ccb->ccb_h.status |= CAM_DEV_NOT_THERE; 292 xpt_done(ccb); 293 } else { 294 if (mrsas_startio(sc, sim, ccb)) { 295 ccb->ccb_h.status |= CAM_REQ_INVALID; 296 xpt_done(ccb); 297 } 298 } 299 break; 300 } 301 case XPT_ABORT: 302 { 303 ccb->ccb_h.status = CAM_UA_ABORT; 304 xpt_done(ccb); 305 break; 306 } 307 case XPT_RESET_BUS: 308 { 309 xpt_done(ccb); 310 break; 311 } 312 case XPT_GET_TRAN_SETTINGS: 313 { 314 ccb->cts.protocol = PROTO_SCSI; 315 ccb->cts.protocol_version = SCSI_REV_2; 316 ccb->cts.transport = XPORT_SPI; 317 ccb->cts.transport_version = 2; 318 ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC; 319 ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB; 320 ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; 321 ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; 322 ccb->ccb_h.status = CAM_REQ_CMP; 323 xpt_done(ccb); 324 break; 325 } 326 case XPT_SET_TRAN_SETTINGS: 327 { 328 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 329 xpt_done(ccb); 330 break; 331 } 332 case XPT_CALC_GEOMETRY: 333 { 334 cam_calc_geometry(&ccb->ccg, 1); 335 xpt_done(ccb); 336 break; 337 } 338 case XPT_PATH_INQ: 339 { 340 ccb->cpi.version_num = 1; 341 ccb->cpi.hba_inquiry = 0; 342 ccb->cpi.target_sprt = 0; 343 ccb->cpi.hba_misc = PIM_UNMAPPED; 344 ccb->cpi.hba_eng_cnt = 0; 345 ccb->cpi.max_lun = MRSAS_SCSI_MAX_LUNS; 346 ccb->cpi.unit_number = cam_sim_unit(sim); 347 ccb->cpi.bus_id = cam_sim_bus(sim); 348 ccb->cpi.initiator_id = MRSAS_SCSI_INITIATOR_ID; 349 ccb->cpi.base_transfer_speed = 150000; 350 strlcpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN); 351 strlcpy(ccb->cpi.hba_vid, "AVAGO", HBA_IDLEN); 352 strlcpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN); 353 ccb->cpi.transport = XPORT_SPI; 354 ccb->cpi.transport_version = 2; 355 ccb->cpi.protocol = PROTO_SCSI; 356 ccb->cpi.protocol_version = SCSI_REV_2; 357 if (ccb->cpi.bus_id == 0) 358 ccb->cpi.max_target = MRSAS_MAX_PD - 1; 359 else 360 ccb->cpi.max_target = MRSAS_MAX_LD_IDS - 1; 361 ccb->cpi.maxio = sc->max_sectors_per_req * 512; 362 ccb->ccb_h.status = CAM_REQ_CMP; 363 xpt_done(ccb); 364 break; 365 } 366 default: 367 { 368 ccb->ccb_h.status = CAM_REQ_INVALID; 369 xpt_done(ccb); 370 break; 371 } 372 } 373 } 374 375 /* 376 * mrsas_scsiio_timeout: Callback function for IO timed out 377 * input: mpt command context 378 * 379 * This function will execute after timeout value provided by ccb header from 380 * CAM layer, if timer expires. Driver will run timer for all DCDM and LDIO 381 * coming from CAM layer. This function is callback function for IO timeout 382 * and it runs in no-sleep context. Set do_timedout_reset in Adapter context 383 * so that it will execute OCR/Kill adpter from ocr_thread context. 384 */ 385 static void 386 mrsas_scsiio_timeout(void *data) 387 { 388 struct mrsas_mpt_cmd *cmd; 389 struct mrsas_softc *sc; 390 u_int32_t target_id; 391 392 if (!data) 393 return; 394 395 cmd = (struct mrsas_mpt_cmd *)data; 396 sc = cmd->sc; 397 398 if (cmd->ccb_ptr == NULL) { 399 printf("command timeout with NULL ccb\n"); 400 return; 401 } 402 403 /* 404 * Below callout is dummy entry so that it will be cancelled from 405 * mrsas_cmd_done(). Now Controller will go to OCR/Kill Adapter based 406 * on OCR enable/disable property of Controller from ocr_thread 407 * context. 408 */ 409 callout_reset_sbt(&cmd->cm_callout, SBT_1S * 180, 0, 410 mrsas_scsiio_timeout, cmd, 0); 411 412 if (cmd->ccb_ptr->cpi.bus_id == 0) 413 target_id = cmd->ccb_ptr->ccb_h.target_id; 414 else 415 target_id = (cmd->ccb_ptr->ccb_h.target_id + (MRSAS_MAX_PD - 1)); 416 417 /* Save the cmd to be processed for TM, if it is not there in the array */ 418 if (sc->target_reset_pool[target_id] == NULL) { 419 sc->target_reset_pool[target_id] = cmd; 420 mrsas_atomic_inc(&sc->target_reset_outstanding); 421 } 422 423 return; 424 } 425 426 /* 427 * mrsas_startio: SCSI IO entry point 428 * input: Adapter instance soft state 429 * pointer to CAM Control Block 430 * 431 * This function is the SCSI IO entry point and it initiates IO processing. It 432 * copies the IO and depending if the IO is read/write or inquiry, it would 433 * call mrsas_build_ldio() or mrsas_build_dcdb(), respectively. It returns 0 434 * if the command is sent to firmware successfully, otherwise it returns 1. 435 */ 436 static int32_t 437 mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim, 438 union ccb *ccb) 439 { 440 struct mrsas_mpt_cmd *cmd, *r1_cmd = NULL; 441 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 442 struct ccb_scsiio *csio = &(ccb->csio); 443 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 444 u_int8_t cmd_type; 445 446 if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE && 447 (!sc->fw_sync_cache_support)) { 448 ccb->ccb_h.status = CAM_REQ_CMP; 449 xpt_done(ccb); 450 return (0); 451 } 452 ccb_h->status |= CAM_SIM_QUEUED; 453 454 if (mrsas_atomic_inc_return(&sc->fw_outstanding) > sc->max_scsi_cmds) { 455 ccb_h->status |= CAM_REQUEUE_REQ; 456 xpt_done(ccb); 457 mrsas_atomic_dec(&sc->fw_outstanding); 458 return (0); 459 } 460 461 cmd = mrsas_get_mpt_cmd(sc); 462 463 if (!cmd) { 464 ccb_h->status |= CAM_REQUEUE_REQ; 465 xpt_done(ccb); 466 mrsas_atomic_dec(&sc->fw_outstanding); 467 return (0); 468 } 469 470 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 471 if (ccb_h->flags & CAM_DIR_IN) 472 cmd->flags |= MRSAS_DIR_IN; 473 if (ccb_h->flags & CAM_DIR_OUT) 474 cmd->flags |= MRSAS_DIR_OUT; 475 } else 476 cmd->flags = MRSAS_DIR_NONE; /* no data */ 477 478 /* 479 * XXX We don't yet support physical addresses here. 480 */ 481 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { 482 case CAM_DATA_PADDR: 483 case CAM_DATA_SG_PADDR: 484 device_printf(sc->mrsas_dev, "%s: physical addresses not supported\n", 485 __func__); 486 mrsas_release_mpt_cmd(cmd); 487 ccb_h->status = CAM_REQ_INVALID; 488 ccb_h->status &= ~CAM_SIM_QUEUED; 489 goto done; 490 case CAM_DATA_SG: 491 device_printf(sc->mrsas_dev, "%s: scatter gather is not supported\n", 492 __func__); 493 mrsas_release_mpt_cmd(cmd); 494 ccb_h->status = CAM_REQ_INVALID; 495 goto done; 496 case CAM_DATA_VADDR: 497 cmd->length = csio->dxfer_len; 498 if (cmd->length) 499 cmd->data = csio->data_ptr; 500 break; 501 case CAM_DATA_BIO: 502 cmd->length = csio->dxfer_len; 503 if (cmd->length) 504 cmd->data = csio->data_ptr; 505 break; 506 default: 507 ccb->ccb_h.status = CAM_REQ_INVALID; 508 goto done; 509 } 510 511 /* save ccb ptr */ 512 cmd->ccb_ptr = ccb; 513 514 req_desc = mrsas_get_request_desc(sc, (cmd->index) - 1); 515 if (!req_desc) { 516 device_printf(sc->mrsas_dev, "Cannot get request_descriptor.\n"); 517 return (FAIL); 518 } 519 memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION)); 520 cmd->request_desc = req_desc; 521 522 if (ccb_h->flags & CAM_CDB_POINTER) 523 bcopy(csio->cdb_io.cdb_ptr, cmd->io_request->CDB.CDB32, csio->cdb_len); 524 else 525 bcopy(csio->cdb_io.cdb_bytes, cmd->io_request->CDB.CDB32, csio->cdb_len); 526 mtx_lock(&sc->raidmap_lock); 527 528 /* Check for IO type READ-WRITE targeted for Logical Volume */ 529 cmd_type = mrsas_find_io_type(sim, ccb); 530 switch (cmd_type) { 531 case READ_WRITE_LDIO: 532 /* Build READ-WRITE IO for Logical Volume */ 533 if (mrsas_build_ldio_rw(sc, cmd, ccb)) { 534 device_printf(sc->mrsas_dev, "Build RW LDIO failed.\n"); 535 mtx_unlock(&sc->raidmap_lock); 536 mrsas_release_mpt_cmd(cmd); 537 return (1); 538 } 539 break; 540 case NON_READ_WRITE_LDIO: 541 /* Build NON READ-WRITE IO for Logical Volume */ 542 if (mrsas_build_ldio_nonrw(sc, cmd, ccb)) { 543 device_printf(sc->mrsas_dev, "Build NON-RW LDIO failed.\n"); 544 mtx_unlock(&sc->raidmap_lock); 545 mrsas_release_mpt_cmd(cmd); 546 return (1); 547 } 548 break; 549 case READ_WRITE_SYSPDIO: 550 case NON_READ_WRITE_SYSPDIO: 551 if (sc->secure_jbod_support && 552 (cmd_type == NON_READ_WRITE_SYSPDIO)) { 553 /* Build NON-RW IO for JBOD */ 554 if (mrsas_build_syspdio(sc, cmd, ccb, sim, 0)) { 555 device_printf(sc->mrsas_dev, 556 "Build SYSPDIO failed.\n"); 557 mtx_unlock(&sc->raidmap_lock); 558 mrsas_release_mpt_cmd(cmd); 559 return (1); 560 } 561 } else { 562 /* Build RW IO for JBOD */ 563 if (mrsas_build_syspdio(sc, cmd, ccb, sim, 1)) { 564 device_printf(sc->mrsas_dev, 565 "Build SYSPDIO failed.\n"); 566 mtx_unlock(&sc->raidmap_lock); 567 mrsas_release_mpt_cmd(cmd); 568 return (1); 569 } 570 } 571 } 572 mtx_unlock(&sc->raidmap_lock); 573 574 if (cmd->flags == MRSAS_DIR_IN) /* from device */ 575 cmd->io_request->Control |= htole32(MPI2_SCSIIO_CONTROL_READ); 576 else if (cmd->flags == MRSAS_DIR_OUT) /* to device */ 577 cmd->io_request->Control |= htole32(MPI2_SCSIIO_CONTROL_WRITE); 578 579 cmd->io_request->SGLFlags = htole16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING); 580 cmd->io_request->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; 581 cmd->io_request->SenseBufferLowAddress = htole32(cmd->sense_phys_addr & 0xFFFFFFFF); 582 cmd->io_request->SenseBufferLength = MRSAS_SCSI_SENSE_BUFFERSIZE; 583 584 req_desc = cmd->request_desc; 585 req_desc->SCSIIO.SMID = htole16(cmd->index); 586 587 /* 588 * Start timer for IO timeout. Default timeout value is 90 second. 589 */ 590 cmd->callout_owner = true; 591 callout_reset_sbt(&cmd->cm_callout, SBT_1S * 180, 0, 592 mrsas_scsiio_timeout, cmd, 0); 593 594 if (mrsas_atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater) 595 sc->io_cmds_highwater++; 596 597 /* 598 * if it is raid 1/10 fp write capable. 599 * try to get second command from pool and construct it. 600 * From FW, it has confirmed that lba values of two PDs corresponds to 601 * single R1/10 LD are always same 602 * 603 */ 604 /* 605 * driver side count always should be less than max_fw_cmds to get 606 * new command 607 */ 608 if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) { 609 mrsas_prepare_secondRaid1_IO(sc, cmd); 610 mrsas_fire_cmd(sc, req_desc->addr.u.low, 611 req_desc->addr.u.high); 612 r1_cmd = cmd->peer_cmd; 613 mrsas_fire_cmd(sc, r1_cmd->request_desc->addr.u.low, 614 r1_cmd->request_desc->addr.u.high); 615 } else { 616 mrsas_fire_cmd(sc, req_desc->addr.u.low, 617 req_desc->addr.u.high); 618 } 619 620 return (0); 621 622 done: 623 xpt_done(ccb); 624 mrsas_atomic_dec(&sc->fw_outstanding); 625 return (0); 626 } 627 628 /* 629 * mrsas_find_io_type: Determines if IO is read/write or inquiry 630 * input: pointer to CAM Control Block 631 * 632 * This function determines if the IO is read/write or inquiry. It returns a 1 633 * if the IO is read/write and 0 if it is inquiry. 634 */ 635 int 636 mrsas_find_io_type(struct cam_sim *sim, union ccb *ccb) 637 { 638 struct ccb_scsiio *csio = &(ccb->csio); 639 640 switch (csio->cdb_io.cdb_bytes[0]) { 641 case READ_10: 642 case WRITE_10: 643 case READ_12: 644 case WRITE_12: 645 case READ_6: 646 case WRITE_6: 647 case READ_16: 648 case WRITE_16: 649 return (cam_sim_bus(sim) ? 650 READ_WRITE_SYSPDIO : READ_WRITE_LDIO); 651 default: 652 return (cam_sim_bus(sim) ? 653 NON_READ_WRITE_SYSPDIO : NON_READ_WRITE_LDIO); 654 } 655 } 656 657 /* 658 * mrsas_get_mpt_cmd: Get a cmd from free command pool 659 * input: Adapter instance soft state 660 * 661 * This function removes an MPT command from the command free list and 662 * initializes it. 663 */ 664 struct mrsas_mpt_cmd * 665 mrsas_get_mpt_cmd(struct mrsas_softc *sc) 666 { 667 struct mrsas_mpt_cmd *cmd = NULL; 668 669 mtx_lock(&sc->mpt_cmd_pool_lock); 670 if (!TAILQ_EMPTY(&sc->mrsas_mpt_cmd_list_head)) { 671 cmd = TAILQ_FIRST(&sc->mrsas_mpt_cmd_list_head); 672 TAILQ_REMOVE(&sc->mrsas_mpt_cmd_list_head, cmd, next); 673 } else { 674 goto out; 675 } 676 677 memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE); 678 cmd->data = NULL; 679 cmd->length = 0; 680 cmd->flags = 0; 681 cmd->error_code = 0; 682 cmd->load_balance = 0; 683 cmd->ccb_ptr = NULL; 684 out: 685 mtx_unlock(&sc->mpt_cmd_pool_lock); 686 return cmd; 687 } 688 689 /* 690 * mrsas_release_mpt_cmd: Return a cmd to free command pool 691 * input: Command packet for return to free command pool 692 * 693 * This function returns an MPT command to the free command list. 694 */ 695 void 696 mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd) 697 { 698 struct mrsas_softc *sc = cmd->sc; 699 700 mtx_lock(&sc->mpt_cmd_pool_lock); 701 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 702 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; 703 cmd->peer_cmd = NULL; 704 cmd->cmd_completed = 0; 705 memset((uint8_t *)cmd->io_request, 0, 706 sizeof(MRSAS_RAID_SCSI_IO_REQUEST)); 707 TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd, next); 708 mtx_unlock(&sc->mpt_cmd_pool_lock); 709 710 return; 711 } 712 713 /* 714 * mrsas_get_request_desc: Get request descriptor from array 715 * input: Adapter instance soft state 716 * SMID index 717 * 718 * This function returns a pointer to the request descriptor. 719 */ 720 MRSAS_REQUEST_DESCRIPTOR_UNION * 721 mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index) 722 { 723 u_int8_t *p; 724 725 KASSERT(index < sc->max_fw_cmds, ("req_desc is out of range")); 726 p = sc->req_desc + sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * index; 727 728 return (MRSAS_REQUEST_DESCRIPTOR_UNION *) p; 729 } 730 731 /* mrsas_prepare_secondRaid1_IO 732 * It prepares the raid 1 second IO 733 */ 734 void 735 mrsas_prepare_secondRaid1_IO(struct mrsas_softc *sc, 736 struct mrsas_mpt_cmd *cmd) 737 { 738 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL; 739 struct mrsas_mpt_cmd *r1_cmd; 740 741 r1_cmd = cmd->peer_cmd; 742 req_desc = cmd->request_desc; 743 744 /* 745 * copy the io request frame as well as 8 SGEs data for r1 746 * command 747 */ 748 memcpy(r1_cmd->io_request, cmd->io_request, 749 (sizeof(MRSAS_RAID_SCSI_IO_REQUEST))); 750 memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL, 751 (sc->max_sge_in_main_msg * sizeof(MPI2_SGE_IO_UNION))); 752 753 /* sense buffer is different for r1 command */ 754 r1_cmd->io_request->SenseBufferLowAddress = htole32(r1_cmd->sense_phys_addr & 0xFFFFFFFF); 755 r1_cmd->ccb_ptr = cmd->ccb_ptr; 756 757 req_desc2 = mrsas_get_request_desc(sc, r1_cmd->index - 1); 758 req_desc2->addr.Words = 0; 759 r1_cmd->request_desc = req_desc2; 760 req_desc2->SCSIIO.SMID = r1_cmd->index; 761 req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags; 762 r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle; 763 r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle; 764 r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle; 765 cmd->io_request->RaidContext.raid_context_g35.smid.peerSMID = 766 r1_cmd->index; 767 r1_cmd->io_request->RaidContext.raid_context_g35.smid.peerSMID = 768 cmd->index; 769 /* 770 * MSIxIndex of both commands request descriptors 771 * should be same 772 */ 773 r1_cmd->request_desc->SCSIIO.MSIxIndex = cmd->request_desc->SCSIIO.MSIxIndex; 774 /* span arm is different for r1 cmd */ 775 r1_cmd->io_request->RaidContext.raid_context_g35.spanArm = 776 cmd->io_request->RaidContext.raid_context_g35.spanArm + 1; 777 778 } 779 780 /* 781 * mrsas_build_ldio_rw: Builds an LDIO command 782 * input: Adapter instance soft state 783 * Pointer to command packet 784 * Pointer to CCB 785 * 786 * This function builds the LDIO command packet. It returns 0 if the command is 787 * built successfully, otherwise it returns a 1. 788 */ 789 int 790 mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 791 union ccb *ccb) 792 { 793 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 794 struct ccb_scsiio *csio = &(ccb->csio); 795 u_int32_t device_id; 796 MRSAS_RAID_SCSI_IO_REQUEST *io_request; 797 798 device_id = ccb_h->target_id; 799 800 io_request = cmd->io_request; 801 io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id); 802 io_request->RaidContext.raid_context.status = 0; 803 io_request->RaidContext.raid_context.exStatus = 0; 804 805 /* just the cdb len, other flags zero, and ORed-in later for FP */ 806 io_request->IoFlags = htole16(csio->cdb_len); 807 808 if (mrsas_setup_io(sc, cmd, ccb, device_id, io_request) != SUCCESS) 809 device_printf(sc->mrsas_dev, "Build ldio or fpio error\n"); 810 811 io_request->DataLength = htole32(cmd->length); 812 813 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) { 814 if (sc->is_ventura || sc->is_aero) 815 io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count; 816 else { 817 /* 818 * numSGE store lower 8 bit of sge_count. numSGEExt store 819 * higher 8 bit of sge_count 820 */ 821 io_request->RaidContext.raid_context.numSGE = cmd->sge_count; 822 io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8); 823 } 824 825 } else { 826 device_printf(sc->mrsas_dev, "Data map/load failed.\n"); 827 return (FAIL); 828 } 829 return (0); 830 } 831 832 /* stream detection on read and and write IOs */ 833 static void 834 mrsas_stream_detect(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 835 struct IO_REQUEST_INFO *io_info) 836 { 837 u_int32_t device_id = io_info->ldTgtId; 838 LD_STREAM_DETECT *current_ld_SD = sc->streamDetectByLD[device_id]; 839 u_int32_t *track_stream = ¤t_ld_SD->mruBitMap; 840 u_int32_t streamNum, shiftedValues, unshiftedValues; 841 u_int32_t indexValueMask, shiftedValuesMask; 842 int i; 843 boolean_t isReadAhead = false; 844 STREAM_DETECT *current_SD; 845 846 /* find possible stream */ 847 for (i = 0; i < MAX_STREAMS_TRACKED; ++i) { 848 streamNum = (*track_stream >> (i * BITS_PER_INDEX_STREAM)) & 849 STREAM_MASK; 850 current_SD = ¤t_ld_SD->streamTrack[streamNum]; 851 /* 852 * if we found a stream, update the raid context and 853 * also update the mruBitMap 854 */ 855 if (current_SD->nextSeqLBA && 856 io_info->ldStartBlock >= current_SD->nextSeqLBA && 857 (io_info->ldStartBlock <= (current_SD->nextSeqLBA+32)) && 858 (current_SD->isRead == io_info->isRead)) { 859 if (io_info->ldStartBlock != current_SD->nextSeqLBA && 860 (!io_info->isRead || !isReadAhead)) { 861 /* 862 * Once the API availible we need to change this. 863 * At this point we are not allowing any gap 864 */ 865 continue; 866 } 867 cmd->io_request->RaidContext.raid_context_g35.streamDetected = TRUE; 868 current_SD->nextSeqLBA = io_info->ldStartBlock + io_info->numBlocks; 869 /* 870 * update the mruBitMap LRU 871 */ 872 shiftedValuesMask = (1 << i * BITS_PER_INDEX_STREAM) - 1 ; 873 shiftedValues = ((*track_stream & shiftedValuesMask) << 874 BITS_PER_INDEX_STREAM); 875 indexValueMask = STREAM_MASK << i * BITS_PER_INDEX_STREAM; 876 unshiftedValues = (*track_stream) & 877 (~(shiftedValuesMask | indexValueMask)); 878 *track_stream = 879 (unshiftedValues | shiftedValues | streamNum); 880 return; 881 } 882 } 883 /* 884 * if we did not find any stream, create a new one from the least recently used 885 */ 886 streamNum = (*track_stream >> 887 ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) & STREAM_MASK; 888 current_SD = ¤t_ld_SD->streamTrack[streamNum]; 889 current_SD->isRead = io_info->isRead; 890 current_SD->nextSeqLBA = io_info->ldStartBlock + io_info->numBlocks; 891 *track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | streamNum); 892 return; 893 } 894 895 /* 896 * mrsas_setup_io: Set up data including Fast Path I/O 897 * input: Adapter instance soft state 898 * Pointer to command packet 899 * Pointer to CCB 900 * 901 * This function builds the DCDB inquiry command. It returns 0 if the command 902 * is built successfully, otherwise it returns a 1. 903 */ 904 int 905 mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 906 union ccb *ccb, u_int32_t device_id, 907 MRSAS_RAID_SCSI_IO_REQUEST * io_request) 908 { 909 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 910 struct ccb_scsiio *csio = &(ccb->csio); 911 struct IO_REQUEST_INFO io_info; 912 MR_DRV_RAID_MAP_ALL *map_ptr; 913 struct mrsas_mpt_cmd *r1_cmd = NULL; 914 915 MR_LD_RAID *raid; 916 u_int8_t fp_possible; 917 u_int32_t start_lba_hi, start_lba_lo, ld_block_size, ld; 918 u_int32_t datalength = 0; 919 920 io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id); 921 922 start_lba_lo = 0; 923 start_lba_hi = 0; 924 fp_possible = 0; 925 926 /* 927 * READ_6 (0x08) or WRITE_6 (0x0A) cdb 928 */ 929 if (csio->cdb_len == 6) { 930 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[4]; 931 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[1] << 16) | 932 ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 8) | 933 (u_int32_t)csio->cdb_io.cdb_bytes[3]; 934 start_lba_lo &= 0x1FFFFF; 935 } 936 /* 937 * READ_10 (0x28) or WRITE_6 (0x2A) cdb 938 */ 939 else if (csio->cdb_len == 10) { 940 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[8] | 941 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 8); 942 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) | 943 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) | 944 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 | 945 ((u_int32_t)csio->cdb_io.cdb_bytes[5]); 946 } 947 /* 948 * READ_12 (0xA8) or WRITE_12 (0xAA) cdb 949 */ 950 else if (csio->cdb_len == 12) { 951 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[6] << 24 | 952 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) | 953 ((u_int32_t)csio->cdb_io.cdb_bytes[8] << 8) | 954 ((u_int32_t)csio->cdb_io.cdb_bytes[9]); 955 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) | 956 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) | 957 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 | 958 ((u_int32_t)csio->cdb_io.cdb_bytes[5]); 959 } 960 /* 961 * READ_16 (0x88) or WRITE_16 (0xx8A) cdb 962 */ 963 else if (csio->cdb_len == 16) { 964 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[10] << 24 | 965 ((u_int32_t)csio->cdb_io.cdb_bytes[11] << 16) | 966 ((u_int32_t)csio->cdb_io.cdb_bytes[12] << 8) | 967 ((u_int32_t)csio->cdb_io.cdb_bytes[13]); 968 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[6] << 24) | 969 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) | 970 (u_int32_t)csio->cdb_io.cdb_bytes[8] << 8 | 971 ((u_int32_t)csio->cdb_io.cdb_bytes[9]); 972 start_lba_hi = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) | 973 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) | 974 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 | 975 ((u_int32_t)csio->cdb_io.cdb_bytes[5]); 976 } 977 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); 978 io_info.ldStartBlock = ((u_int64_t)start_lba_hi << 32) | start_lba_lo; 979 io_info.numBlocks = datalength; 980 io_info.ldTgtId = device_id; 981 io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 982 983 io_request->DataLength = htole32(cmd->length); 984 985 switch (ccb_h->flags & CAM_DIR_MASK) { 986 case CAM_DIR_IN: 987 io_info.isRead = 1; 988 break; 989 case CAM_DIR_OUT: 990 io_info.isRead = 0; 991 break; 992 case CAM_DIR_NONE: 993 default: 994 mrsas_dprint(sc, MRSAS_TRACE, "From %s : DMA Flag is %d \n", __func__, ccb_h->flags & CAM_DIR_MASK); 995 break; 996 } 997 998 map_ptr = sc->ld_drv_map[(sc->map_id & 1)]; 999 ld_block_size = MR_LdBlockSizeGet(device_id, map_ptr); 1000 1001 ld = MR_TargetIdToLdGet(device_id, map_ptr); 1002 if ((ld >= MAX_LOGICAL_DRIVES_EXT) || (!sc->fast_path_io)) { 1003 io_request->RaidContext.raid_context.regLockFlags = 0; 1004 fp_possible = 0; 1005 } else { 1006 if (MR_BuildRaidContext(sc, &io_info, &io_request->RaidContext.raid_context, map_ptr)) 1007 fp_possible = io_info.fpOkForIo; 1008 } 1009 1010 raid = MR_LdRaidGet(ld, map_ptr); 1011 /* Store the TM capability value in cmd */ 1012 cmd->tmCapable = raid->capability.tmCapable; 1013 1014 cmd->request_desc->SCSIIO.MSIxIndex = 1015 sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0; 1016 1017 if (sc->is_ventura || sc->is_aero) { 1018 if (sc->streamDetectByLD) { 1019 mtx_lock(&sc->stream_lock); 1020 mrsas_stream_detect(sc, cmd, &io_info); 1021 mtx_unlock(&sc->stream_lock); 1022 /* In ventura if stream detected for a read and 1023 * it is read ahead capable make this IO as LDIO */ 1024 if (io_request->RaidContext.raid_context_g35.streamDetected && 1025 io_info.isRead && io_info.raCapable) 1026 fp_possible = FALSE; 1027 } 1028 1029 /* Set raid 1/10 fast path write capable bit in io_info. 1030 * Note - reset peer_cmd and r1_alt_dev_handle if fp_possible 1031 * disabled after this point. Try not to add more check for 1032 * fp_possible toggle after this. 1033 */ 1034 if (fp_possible && 1035 (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) && 1036 (raid->level == 1) && !io_info.isRead) { 1037 if (mrsas_atomic_inc_return(&sc->fw_outstanding) > sc->max_scsi_cmds) { 1038 fp_possible = FALSE; 1039 mrsas_atomic_dec(&sc->fw_outstanding); 1040 } else { 1041 r1_cmd = mrsas_get_mpt_cmd(sc); 1042 if (!r1_cmd) { 1043 fp_possible = FALSE; 1044 mrsas_atomic_dec(&sc->fw_outstanding); 1045 } 1046 else { 1047 cmd->peer_cmd = r1_cmd; 1048 r1_cmd->peer_cmd = cmd; 1049 } 1050 } 1051 } 1052 } 1053 1054 if (fp_possible) { 1055 mrsas_set_pd_lba(io_request, csio->cdb_len, &io_info, ccb, map_ptr, 1056 start_lba_lo, ld_block_size); 1057 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1058 cmd->request_desc->SCSIIO.RequestFlags = 1059 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << 1060 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1061 if (sc->mrsas_gen3_ctrl) { 1062 if (io_request->RaidContext.raid_context.regLockFlags == REGION_TYPE_UNUSED) 1063 cmd->request_desc->SCSIIO.RequestFlags = 1064 (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK << 1065 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1066 io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA; 1067 io_request->RaidContext.raid_context.nseg = 0x1; 1068 io_request->IoFlags |= htole16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 1069 io_request->RaidContext.raid_context.regLockFlags |= 1070 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | 1071 MR_RL_FLAGS_SEQ_NUM_ENABLE); 1072 } else if (sc->is_ventura || sc->is_aero) { 1073 io_request->RaidContext.raid_context_g35.Type = MPI2_TYPE_CUDA; 1074 io_request->RaidContext.raid_context_g35.nseg = 0x1; 1075 io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1; 1076 io_request->IoFlags |= htole16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 1077 if (io_request->RaidContext.raid_context_g35.routingFlags.bits.sld) { 1078 io_request->RaidContext.raid_context_g35.RAIDFlags = 1079 (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS 1080 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); 1081 } 1082 } 1083 if ((sc->load_balance_info[device_id].loadBalanceFlag) && 1084 (io_info.isRead)) { 1085 io_info.devHandle = 1086 mrsas_get_updated_dev_handle(sc, 1087 &sc->load_balance_info[device_id], &io_info); 1088 cmd->load_balance = MRSAS_LOAD_BALANCE_FLAG; 1089 cmd->pd_r1_lb = io_info.pd_after_lb; 1090 if (sc->is_ventura || sc->is_aero) 1091 io_request->RaidContext.raid_context_g35.spanArm = io_info.span_arm; 1092 else 1093 io_request->RaidContext.raid_context.spanArm = io_info.span_arm; 1094 } else 1095 cmd->load_balance = 0; 1096 1097 if (sc->is_ventura || sc->is_aero) 1098 cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle; 1099 else 1100 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 1101 1102 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; 1103 io_request->DevHandle = io_info.devHandle; 1104 cmd->pdInterface = io_info.pdInterface; 1105 } else { 1106 /* Not FP IO */ 1107 io_request->RaidContext.raid_context.timeoutValue = htole16(map_ptr->raidMap.fpPdIoTimeoutSec); 1108 cmd->request_desc->SCSIIO.RequestFlags = 1109 (MRSAS_REQ_DESCRIPT_FLAGS_LD_IO << 1110 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1111 if (sc->mrsas_gen3_ctrl) { 1112 if (io_request->RaidContext.raid_context.regLockFlags == REGION_TYPE_UNUSED) 1113 cmd->request_desc->SCSIIO.RequestFlags = 1114 (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK << 1115 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1116 io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA; 1117 io_request->RaidContext.raid_context.regLockFlags |= 1118 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | 1119 MR_RL_FLAGS_SEQ_NUM_ENABLE); 1120 io_request->RaidContext.raid_context.nseg = 0x1; 1121 } else if (sc->is_ventura || sc->is_aero) { 1122 io_request->RaidContext.raid_context_g35.Type = MPI2_TYPE_CUDA; 1123 io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1; 1124 io_request->RaidContext.raid_context_g35.nseg = 0x1; 1125 } 1126 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST; 1127 io_request->DevHandle = htole16(device_id); 1128 } 1129 return (0); 1130 } 1131 1132 /* 1133 * mrsas_build_ldio_nonrw: Builds an LDIO command 1134 * input: Adapter instance soft state 1135 * Pointer to command packet 1136 * Pointer to CCB 1137 * 1138 * This function builds the LDIO command packet. It returns 0 if the command is 1139 * built successfully, otherwise it returns a 1. 1140 */ 1141 int 1142 mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 1143 union ccb *ccb) 1144 { 1145 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 1146 u_int32_t device_id, ld; 1147 MR_DRV_RAID_MAP_ALL *map_ptr; 1148 MR_LD_RAID *raid; 1149 MRSAS_RAID_SCSI_IO_REQUEST *io_request; 1150 1151 io_request = cmd->io_request; 1152 device_id = ccb_h->target_id; 1153 1154 map_ptr = sc->ld_drv_map[(sc->map_id & 1)]; 1155 ld = MR_TargetIdToLdGet(device_id, map_ptr); 1156 raid = MR_LdRaidGet(ld, map_ptr); 1157 /* Store the TM capability value in cmd */ 1158 cmd->tmCapable = raid->capability.tmCapable; 1159 1160 /* FW path for LD Non-RW (SCSI management commands) */ 1161 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST; 1162 io_request->DevHandle = device_id; 1163 cmd->request_desc->SCSIIO.RequestFlags = 1164 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 1165 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1166 1167 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; 1168 io_request->LUN[1] = ccb_h->target_lun & 0xF; 1169 io_request->DataLength = cmd->length; 1170 1171 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) { 1172 if (sc->is_ventura || sc->is_aero) 1173 io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count; 1174 else { 1175 /* 1176 * numSGE store lower 8 bit of sge_count. numSGEExt store 1177 * higher 8 bit of sge_count 1178 */ 1179 io_request->RaidContext.raid_context.numSGE = cmd->sge_count; 1180 io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8); 1181 } 1182 } else { 1183 device_printf(sc->mrsas_dev, "Data map/load failed.\n"); 1184 return (1); 1185 } 1186 return (0); 1187 } 1188 1189 /* 1190 * mrsas_build_syspdio: Builds an DCDB command 1191 * input: Adapter instance soft state 1192 * Pointer to command packet 1193 * Pointer to CCB 1194 * 1195 * This function builds the DCDB inquiry command. It returns 0 if the command 1196 * is built successfully, otherwise it returns a 1. 1197 */ 1198 int 1199 mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 1200 union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible) 1201 { 1202 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 1203 u_int32_t device_id; 1204 MR_DRV_RAID_MAP_ALL *local_map_ptr; 1205 MRSAS_RAID_SCSI_IO_REQUEST *io_request; 1206 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 1207 1208 io_request = cmd->io_request; 1209 device_id = ccb_h->target_id; 1210 local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)]; 1211 io_request->RaidContext.raid_context.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD 1212 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; 1213 io_request->RaidContext.raid_context.regLockFlags = 0; 1214 io_request->RaidContext.raid_context.regLockRowLBA = 0; 1215 io_request->RaidContext.raid_context.regLockLength = 0; 1216 1217 cmd->pdInterface = sc->target_list[device_id].interface_type; 1218 1219 /* If FW supports PD sequence number */ 1220 if (sc->use_seqnum_jbod_fp && 1221 sc->pd_list[device_id].driveType == 0x00) { 1222 //printf("Using Drv seq num\n"); 1223 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id - 1) & 1]; 1224 cmd->tmCapable = pd_sync->seq[device_id].capability.tmCapable; 1225 /* More than 256 PD/JBOD support for Ventura */ 1226 if (sc->support_morethan256jbod) 1227 io_request->RaidContext.raid_context.VirtualDiskTgtId = 1228 pd_sync->seq[device_id].pdTargetId; 1229 else 1230 io_request->RaidContext.raid_context.VirtualDiskTgtId = 1231 htole16(device_id + 255); 1232 io_request->RaidContext.raid_context.configSeqNum = pd_sync->seq[device_id].seqNum; 1233 io_request->DevHandle = pd_sync->seq[device_id].devHandle; 1234 if (sc->is_ventura || sc->is_aero) 1235 io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1; 1236 else 1237 io_request->RaidContext.raid_context.regLockFlags |= 1238 (MR_RL_FLAGS_SEQ_NUM_ENABLE | MR_RL_FLAGS_GRANT_DESTINATION_CUDA); 1239 /* raid_context.Type = MPI2_TYPE_CUDA is valid only, 1240 * if FW support Jbod Sequence number 1241 */ 1242 io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA; 1243 io_request->RaidContext.raid_context.nseg = 0x1; 1244 } else if (sc->fast_path_io) { 1245 //printf("Using LD RAID map\n"); 1246 io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id); 1247 io_request->RaidContext.raid_context.configSeqNum = 0; 1248 local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)]; 1249 io_request->DevHandle = 1250 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; 1251 } else { 1252 //printf("Using FW PATH\n"); 1253 /* Want to send all IO via FW path */ 1254 io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id); 1255 io_request->RaidContext.raid_context.configSeqNum = 0; 1256 io_request->DevHandle = MR_DEVHANDLE_INVALID; 1257 } 1258 1259 cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; 1260 cmd->request_desc->SCSIIO.MSIxIndex = 1261 sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0; 1262 1263 if (!fp_possible) { 1264 /* system pd firmware path */ 1265 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST; 1266 cmd->request_desc->SCSIIO.RequestFlags = 1267 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 1268 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1269 io_request->RaidContext.raid_context.timeoutValue = 1270 htole16(local_map_ptr->raidMap.fpPdIoTimeoutSec); 1271 io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id); 1272 } else { 1273 /* system pd fast path */ 1274 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1275 io_request->RaidContext.raid_context.timeoutValue = htole16(local_map_ptr->raidMap.fpPdIoTimeoutSec); 1276 1277 /* 1278 * NOTE - For system pd RW cmds only IoFlags will be FAST_PATH 1279 * Because the NON RW cmds will now go via FW Queue 1280 * and not the Exception queue 1281 */ 1282 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) 1283 io_request->IoFlags |= htole16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 1284 1285 cmd->request_desc->SCSIIO.RequestFlags = 1286 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << 1287 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1288 } 1289 1290 io_request->LUN[1] = ccb_h->target_lun & 0xF; 1291 io_request->DataLength = htole32(cmd->length); 1292 1293 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) { 1294 if (sc->is_ventura || sc->is_aero) 1295 io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count; 1296 else { 1297 /* 1298 * numSGE store lower 8 bit of sge_count. numSGEExt store 1299 * higher 8 bit of sge_count 1300 */ 1301 io_request->RaidContext.raid_context.numSGE = cmd->sge_count; 1302 io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8); 1303 } 1304 } else { 1305 device_printf(sc->mrsas_dev, "Data map/load failed.\n"); 1306 return (1); 1307 } 1308 return (0); 1309 } 1310 1311 /* 1312 * mrsas_is_prp_possible: This function will tell whether PRPs should be built or not 1313 * sc: Adapter instance soft state 1314 * cmd: MPT command frame pointer 1315 * nsesg: Number of OS SGEs 1316 * 1317 * This function will check whether IO is qualified to build PRPs 1318 * return: true: if PRP should be built 1319 * false: if IEEE SGLs should be built 1320 */ 1321 static boolean_t mrsas_is_prp_possible(struct mrsas_mpt_cmd *cmd, 1322 bus_dma_segment_t *segs, int nsegs) 1323 { 1324 struct mrsas_softc *sc = cmd->sc; 1325 int i; 1326 u_int32_t data_length = 0; 1327 bool build_prp = false; 1328 u_int32_t mr_nvme_pg_size; 1329 1330 mr_nvme_pg_size = max(sc->nvme_page_size, MR_DEFAULT_NVME_PAGE_SIZE); 1331 data_length = cmd->length; 1332 1333 if (data_length > (mr_nvme_pg_size * 5)) 1334 build_prp = true; 1335 else if ((data_length > (mr_nvme_pg_size * 4)) && 1336 (data_length <= (mr_nvme_pg_size * 5))) { 1337 /* check if 1st SG entry size is < residual beyond 4 pages */ 1338 if ((segs[0].ds_len) < (data_length - (mr_nvme_pg_size * 4))) 1339 build_prp = true; 1340 } 1341 1342 /*check for SGE holes here*/ 1343 for (i = 0; i < nsegs; i++) { 1344 /* check for mid SGEs */ 1345 if ((i != 0) && (i != (nsegs - 1))) { 1346 if ((segs[i].ds_addr % mr_nvme_pg_size) || 1347 (segs[i].ds_len % mr_nvme_pg_size)) { 1348 build_prp = false; 1349 mrsas_atomic_inc(&sc->sge_holes); 1350 break; 1351 } 1352 } 1353 1354 /* check for first SGE*/ 1355 if ((nsegs > 1) && (i == 0)) { 1356 if ((segs[i].ds_addr + segs[i].ds_len) % mr_nvme_pg_size) { 1357 build_prp = false; 1358 mrsas_atomic_inc(&sc->sge_holes); 1359 break; 1360 } 1361 } 1362 1363 /* check for Last SGE*/ 1364 if ((nsegs > 1) && (i == (nsegs - 1))) { 1365 if (segs[i].ds_addr % mr_nvme_pg_size) { 1366 build_prp = false; 1367 mrsas_atomic_inc(&sc->sge_holes); 1368 break; 1369 } 1370 } 1371 } 1372 1373 return build_prp; 1374 } 1375 1376 /* 1377 * mrsas_map_request: Map and load data 1378 * input: Adapter instance soft state 1379 * Pointer to command packet 1380 * 1381 * For data from OS, map and load the data buffer into bus space. The SG list 1382 * is built in the callback. If the bus dmamap load is not successful, 1383 * cmd->error_code will contain the error code and a 1 is returned. 1384 */ 1385 int 1386 mrsas_map_request(struct mrsas_softc *sc, 1387 struct mrsas_mpt_cmd *cmd, union ccb *ccb) 1388 { 1389 u_int32_t retcode = 0; 1390 struct cam_sim *sim; 1391 1392 sim = xpt_path_sim(cmd->ccb_ptr->ccb_h.path); 1393 1394 if (cmd->data != NULL) { 1395 /* Map data buffer into bus space */ 1396 mtx_lock(&sc->io_lock); 1397 retcode = bus_dmamap_load_ccb(sc->data_tag, cmd->data_dmamap, ccb, 1398 mrsas_data_load_cb, cmd, 0); 1399 mtx_unlock(&sc->io_lock); 1400 if (retcode) 1401 device_printf(sc->mrsas_dev, "bus_dmamap_load(): retcode = %d\n", retcode); 1402 if (retcode == EINPROGRESS) { 1403 device_printf(sc->mrsas_dev, "request load in progress\n"); 1404 mrsas_freeze_simq(cmd, sim); 1405 } 1406 } 1407 if (cmd->error_code) 1408 return (1); 1409 return (retcode); 1410 } 1411 1412 /* 1413 * mrsas_unmap_request: Unmap and unload data 1414 * input: Adapter instance soft state 1415 * Pointer to command packet 1416 * 1417 * This function unmaps and unloads data from OS. 1418 */ 1419 void 1420 mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd) 1421 { 1422 if (cmd->data != NULL) { 1423 if (cmd->flags & MRSAS_DIR_IN) 1424 bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTREAD); 1425 if (cmd->flags & MRSAS_DIR_OUT) 1426 bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTWRITE); 1427 mtx_lock(&sc->io_lock); 1428 bus_dmamap_unload(sc->data_tag, cmd->data_dmamap); 1429 mtx_unlock(&sc->io_lock); 1430 } 1431 } 1432 1433 /** 1434 * mrsas_build_ieee_sgl - Prepare IEEE SGLs 1435 * @sc: Adapter soft state 1436 * @segs: OS SGEs pointers 1437 * @nseg: Number of OS SGEs 1438 * @cmd: Fusion command frame 1439 * return: void 1440 */ 1441 static void mrsas_build_ieee_sgl(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nseg) 1442 { 1443 struct mrsas_softc *sc = cmd->sc; 1444 MRSAS_RAID_SCSI_IO_REQUEST *io_request; 1445 pMpi25IeeeSgeChain64_t sgl_ptr; 1446 int i = 0, sg_processed = 0; 1447 1448 io_request = cmd->io_request; 1449 sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL; 1450 1451 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { 1452 pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr; 1453 1454 sgl_ptr_end += sc->max_sge_in_main_msg - 1; 1455 sgl_ptr_end->Flags = 0; 1456 } 1457 if (nseg != 0) { 1458 for (i = 0; i < nseg; i++) { 1459 sgl_ptr->Address = htole64(segs[i].ds_addr); 1460 sgl_ptr->Length = htole32(segs[i].ds_len); 1461 sgl_ptr->Flags = 0; 1462 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { 1463 if (i == nseg - 1) 1464 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; 1465 } 1466 sgl_ptr++; 1467 sg_processed = i + 1; 1468 if ((sg_processed == (sc->max_sge_in_main_msg - 1)) && 1469 (nseg > sc->max_sge_in_main_msg)) { 1470 pMpi25IeeeSgeChain64_t sg_chain; 1471 1472 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { 1473 if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) 1474 != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) 1475 cmd->io_request->ChainOffset = sc->chain_offset_io_request; 1476 else 1477 cmd->io_request->ChainOffset = 0; 1478 } else 1479 cmd->io_request->ChainOffset = sc->chain_offset_io_request; 1480 sg_chain = sgl_ptr; 1481 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) 1482 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; 1483 else 1484 sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); 1485 sg_chain->Length = htole32((sizeof(MPI2_SGE_IO_UNION) * (nseg - sg_processed))); 1486 sg_chain->Address = htole64(cmd->chain_frame_phys_addr); 1487 sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->chain_frame; 1488 } 1489 } 1490 } 1491 } 1492 1493 /** 1494 * mrsas_build_prp_nvme - Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only 1495 * @sc: Adapter soft state 1496 * @segs: OS SGEs pointers 1497 * @nseg: Number of OS SGEs 1498 * @cmd: Fusion command frame 1499 * return: void 1500 */ 1501 static void mrsas_build_prp_nvme(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nseg) 1502 { 1503 struct mrsas_softc *sc = cmd->sc; 1504 int sge_len, offset, num_prp_in_chain = 0; 1505 pMpi25IeeeSgeChain64_t main_chain_element, ptr_first_sgl, sgl_ptr; 1506 u_int64_t *ptr_sgl; 1507 bus_addr_t ptr_sgl_phys; 1508 u_int64_t sge_addr; 1509 u_int32_t page_mask, page_mask_result, i = 0; 1510 u_int32_t first_prp_len; 1511 int data_len = cmd->length; 1512 u_int32_t mr_nvme_pg_size = max(sc->nvme_page_size, 1513 MR_DEFAULT_NVME_PAGE_SIZE); 1514 1515 sgl_ptr = (pMpi25IeeeSgeChain64_t) &cmd->io_request->SGL; 1516 /* 1517 * NVMe has a very convoluted PRP format. One PRP is required 1518 * for each page or partial page. We need to split up OS SG 1519 * entries if they are longer than one page or cross a page 1520 * boundary. We also have to insert a PRP list pointer entry as 1521 * the last entry in each physical page of the PRP list. 1522 * 1523 * NOTE: The first PRP "entry" is actually placed in the first 1524 * SGL entry in the main message in IEEE 64 format. The 2nd 1525 * entry in the main message is the chain element, and the rest 1526 * of the PRP entries are built in the contiguous PCIe buffer. 1527 */ 1528 page_mask = mr_nvme_pg_size - 1; 1529 ptr_sgl = (u_int64_t *) cmd->chain_frame; 1530 ptr_sgl_phys = cmd->chain_frame_phys_addr; 1531 memset(ptr_sgl, 0, sc->max_chain_frame_sz); 1532 1533 /* Build chain frame element which holds all PRPs except first*/ 1534 main_chain_element = (pMpi25IeeeSgeChain64_t) 1535 ((u_int8_t *)sgl_ptr + sizeof(MPI25_IEEE_SGE_CHAIN64)); 1536 1537 main_chain_element->Address = cmd->chain_frame_phys_addr; 1538 main_chain_element->NextChainOffset = 0; 1539 main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 1540 IEEE_SGE_FLAGS_SYSTEM_ADDR | 1541 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; 1542 1543 /* Build first PRP, SGE need not to be PAGE aligned*/ 1544 ptr_first_sgl = sgl_ptr; 1545 sge_addr = segs[i].ds_addr; 1546 sge_len = segs[i].ds_len; 1547 i++; 1548 1549 offset = (u_int32_t) (sge_addr & page_mask); 1550 first_prp_len = mr_nvme_pg_size - offset; 1551 1552 ptr_first_sgl->Address = sge_addr; 1553 ptr_first_sgl->Length = first_prp_len; 1554 1555 data_len -= first_prp_len; 1556 1557 if (sge_len > first_prp_len) { 1558 sge_addr += first_prp_len; 1559 sge_len -= first_prp_len; 1560 } else if (sge_len == first_prp_len) { 1561 sge_addr = segs[i].ds_addr; 1562 sge_len = segs[i].ds_len; 1563 i++; 1564 } 1565 1566 for (;;) { 1567 offset = (u_int32_t) (sge_addr & page_mask); 1568 1569 /* Put PRP pointer due to page boundary*/ 1570 page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask; 1571 if (!page_mask_result) { 1572 device_printf(sc->mrsas_dev, "BRCM: Put prp pointer as we are at page boundary" 1573 " ptr_sgl: 0x%p\n", ptr_sgl); 1574 ptr_sgl_phys++; 1575 *ptr_sgl = (uintptr_t)ptr_sgl_phys; 1576 ptr_sgl++; 1577 num_prp_in_chain++; 1578 } 1579 1580 *ptr_sgl = sge_addr; 1581 ptr_sgl++; 1582 ptr_sgl_phys++; 1583 num_prp_in_chain++; 1584 1585 sge_addr += mr_nvme_pg_size; 1586 sge_len -= mr_nvme_pg_size; 1587 data_len -= mr_nvme_pg_size; 1588 1589 if (data_len <= 0) 1590 break; 1591 1592 if (sge_len > 0) 1593 continue; 1594 1595 sge_addr = segs[i].ds_addr; 1596 sge_len = segs[i].ds_len; 1597 i++; 1598 } 1599 1600 main_chain_element->Length = num_prp_in_chain * sizeof(u_int64_t); 1601 mrsas_atomic_inc(&sc->prp_count); 1602 1603 } 1604 1605 /* 1606 * mrsas_data_load_cb: Callback entry point to build SGLs 1607 * input: Pointer to command packet as argument 1608 * Pointer to segment 1609 * Number of segments Error 1610 * 1611 * This is the callback function of the bus dma map load. It builds SG list 1612 */ 1613 static void 1614 mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1615 { 1616 struct mrsas_mpt_cmd *cmd = (struct mrsas_mpt_cmd *)arg; 1617 struct mrsas_softc *sc = cmd->sc; 1618 boolean_t build_prp = false; 1619 1620 if (error) { 1621 cmd->error_code = error; 1622 device_printf(sc->mrsas_dev, "mrsas_data_load_cb_prp: error=%d\n", error); 1623 if (error == EFBIG) { 1624 cmd->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG; 1625 return; 1626 } 1627 } 1628 if (cmd->flags & MRSAS_DIR_IN) 1629 bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap, 1630 BUS_DMASYNC_PREREAD); 1631 if (cmd->flags & MRSAS_DIR_OUT) 1632 bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap, 1633 BUS_DMASYNC_PREWRITE); 1634 1635 /* Check for whether PRPs should be built or IEEE SGLs*/ 1636 if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) && 1637 (cmd->pdInterface == NVME_PD)) 1638 build_prp = mrsas_is_prp_possible(cmd, segs, nseg); 1639 1640 if (build_prp == true) 1641 mrsas_build_prp_nvme(cmd, segs, nseg); 1642 else 1643 mrsas_build_ieee_sgl(cmd, segs, nseg); 1644 1645 cmd->sge_count = nseg; 1646 } 1647 1648 /* 1649 * mrsas_freeze_simq: Freeze SIM queue 1650 * input: Pointer to command packet 1651 * Pointer to SIM 1652 * 1653 * This function freezes the sim queue. 1654 */ 1655 static void 1656 mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim) 1657 { 1658 union ccb *ccb = (union ccb *)(cmd->ccb_ptr); 1659 1660 xpt_freeze_simq(sim, 1); 1661 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1662 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 1663 } 1664 1665 void 1666 mrsas_xpt_freeze(struct mrsas_softc *sc) 1667 { 1668 xpt_freeze_simq(sc->sim_0, 1); 1669 xpt_freeze_simq(sc->sim_1, 1); 1670 } 1671 1672 void 1673 mrsas_xpt_release(struct mrsas_softc *sc) 1674 { 1675 xpt_release_simq(sc->sim_0, 1); 1676 xpt_release_simq(sc->sim_1, 1); 1677 } 1678 1679 /* 1680 * mrsas_cmd_done: Perform remaining command completion 1681 * input: Adapter instance soft state Pointer to command packet 1682 * 1683 * This function calls ummap request and releases the MPT command. 1684 */ 1685 void 1686 mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd) 1687 { 1688 mrsas_unmap_request(sc, cmd); 1689 1690 mtx_lock(&sc->sim_lock); 1691 if (cmd->callout_owner) { 1692 callout_stop(&cmd->cm_callout); 1693 cmd->callout_owner = false; 1694 } 1695 xpt_done(cmd->ccb_ptr); 1696 cmd->ccb_ptr = NULL; 1697 mtx_unlock(&sc->sim_lock); 1698 mrsas_release_mpt_cmd(cmd); 1699 } 1700 1701 /* 1702 * mrsas_cam_poll: Polling entry point 1703 * input: Pointer to SIM 1704 * 1705 * This is currently a stub function. 1706 */ 1707 static void 1708 mrsas_cam_poll(struct cam_sim *sim) 1709 { 1710 int i; 1711 struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim); 1712 1713 if (sc->msix_vectors != 0){ 1714 for (i=0; i<sc->msix_vectors; i++){ 1715 mrsas_complete_cmd(sc, i); 1716 } 1717 } else { 1718 mrsas_complete_cmd(sc, 0); 1719 } 1720 } 1721 1722 /* 1723 * mrsas_bus_scan: Perform bus scan 1724 * input: Adapter instance soft state 1725 * 1726 * This mrsas_bus_scan function is needed for FreeBSD 7.x. Also, it should not 1727 * be called in FreeBSD 8.x and later versions, where the bus scan is 1728 * automatic. 1729 */ 1730 int 1731 mrsas_bus_scan(struct mrsas_softc *sc) 1732 { 1733 union ccb *ccb_0; 1734 union ccb *ccb_1; 1735 1736 if ((ccb_0 = xpt_alloc_ccb()) == NULL) { 1737 return (ENOMEM); 1738 } 1739 if ((ccb_1 = xpt_alloc_ccb()) == NULL) { 1740 xpt_free_ccb(ccb_0); 1741 return (ENOMEM); 1742 } 1743 mtx_lock(&sc->sim_lock); 1744 if (xpt_create_path(&ccb_0->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_0), 1745 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1746 xpt_free_ccb(ccb_0); 1747 xpt_free_ccb(ccb_1); 1748 mtx_unlock(&sc->sim_lock); 1749 return (EIO); 1750 } 1751 if (xpt_create_path(&ccb_1->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_1), 1752 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1753 xpt_free_ccb(ccb_0); 1754 xpt_free_ccb(ccb_1); 1755 mtx_unlock(&sc->sim_lock); 1756 return (EIO); 1757 } 1758 mtx_unlock(&sc->sim_lock); 1759 xpt_rescan(ccb_0); 1760 xpt_rescan(ccb_1); 1761 1762 return (0); 1763 } 1764 1765 /* 1766 * mrsas_bus_scan_sim: Perform bus scan per SIM 1767 * input: adapter instance soft state 1768 * 1769 * This function will be called from Event handler on LD creation/deletion, 1770 * JBOD on/off. 1771 */ 1772 int 1773 mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim) 1774 { 1775 union ccb *ccb; 1776 1777 if ((ccb = xpt_alloc_ccb()) == NULL) { 1778 return (ENOMEM); 1779 } 1780 mtx_lock(&sc->sim_lock); 1781 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sim), 1782 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1783 xpt_free_ccb(ccb); 1784 mtx_unlock(&sc->sim_lock); 1785 return (EIO); 1786 } 1787 mtx_unlock(&sc->sim_lock); 1788 xpt_rescan(ccb); 1789 1790 return (0); 1791 } 1792 1793 /* 1794 * mrsas_track_scsiio: Track IOs for a given target in the mpt_cmd_list 1795 * input: Adapter instance soft state 1796 * Target ID of target 1797 * Bus ID of the target 1798 * 1799 * This function checks for any pending IO in the whole mpt_cmd_list pool 1800 * with the bus_id and target_id passed in arguments. If some IO is found 1801 * that means target reset is not successfully completed. 1802 * 1803 * Returns FAIL if IOs pending to the target device, else return SUCCESS 1804 */ 1805 static int 1806 mrsas_track_scsiio(struct mrsas_softc *sc, target_id_t tgt_id, u_int32_t bus_id) 1807 { 1808 int i; 1809 struct mrsas_mpt_cmd *mpt_cmd = NULL; 1810 1811 for (i = 0 ; i < sc->max_fw_cmds; i++) { 1812 mpt_cmd = sc->mpt_cmd_list[i]; 1813 1814 /* 1815 * Check if the target_id and bus_id is same as the timeout IO 1816 */ 1817 if (mpt_cmd->ccb_ptr) { 1818 /* bus_id = 1 denotes a VD */ 1819 if (bus_id == 1) 1820 tgt_id = 1821 (mpt_cmd->ccb_ptr->ccb_h.target_id - (MRSAS_MAX_PD - 1)); 1822 1823 if (mpt_cmd->ccb_ptr->cpi.bus_id == bus_id && 1824 mpt_cmd->ccb_ptr->ccb_h.target_id == tgt_id) { 1825 device_printf(sc->mrsas_dev, 1826 "IO commands pending to target id %d\n", tgt_id); 1827 return FAIL; 1828 } 1829 } 1830 } 1831 1832 return SUCCESS; 1833 } 1834 1835 #if TM_DEBUG 1836 /* 1837 * mrsas_tm_response_code: Prints TM response code received from FW 1838 * input: Adapter instance soft state 1839 * MPI reply returned from firmware 1840 * 1841 * Returns nothing. 1842 */ 1843 static void 1844 mrsas_tm_response_code(struct mrsas_softc *sc, 1845 MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply) 1846 { 1847 char *desc; 1848 1849 switch (mpi_reply->ResponseCode) { 1850 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: 1851 desc = "task management request completed"; 1852 break; 1853 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: 1854 desc = "invalid frame"; 1855 break; 1856 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: 1857 desc = "task management request not supported"; 1858 break; 1859 case MPI2_SCSITASKMGMT_RSP_TM_FAILED: 1860 desc = "task management request failed"; 1861 break; 1862 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: 1863 desc = "task management request succeeded"; 1864 break; 1865 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: 1866 desc = "invalid lun"; 1867 break; 1868 case 0xA: 1869 desc = "overlapped tag attempted"; 1870 break; 1871 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: 1872 desc = "task queued, however not sent to target"; 1873 break; 1874 default: 1875 desc = "unknown"; 1876 break; 1877 } 1878 device_printf(sc->mrsas_dev, "response_code(%01x): %s\n", 1879 mpi_reply->ResponseCode, desc); 1880 device_printf(sc->mrsas_dev, 1881 "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo\n" 1882 "0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n", 1883 mpi_reply->TerminationCount, mpi_reply->DevHandle, 1884 mpi_reply->Function, mpi_reply->TaskType, 1885 mpi_reply->IOCStatus, mpi_reply->IOCLogInfo); 1886 } 1887 #endif 1888 1889 /* 1890 * mrsas_issue_tm: Fires the TM command to FW and waits for completion 1891 * input: Adapter instance soft state 1892 * request descriptor compiled by mrsas_reset_targets 1893 * 1894 * Returns FAIL if TM command TIMEDOUT from FW else SUCCESS. 1895 */ 1896 static int 1897 mrsas_issue_tm(struct mrsas_softc *sc, 1898 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc) 1899 { 1900 int sleep_stat; 1901 1902 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); 1903 sleep_stat = msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "tm_sleep", 50*hz); 1904 1905 if (sleep_stat == EWOULDBLOCK) { 1906 device_printf(sc->mrsas_dev, "tm cmd TIMEDOUT\n"); 1907 return FAIL; 1908 } 1909 1910 return SUCCESS; 1911 } 1912 1913 /* 1914 * mrsas_reset_targets : Gathers info to fire a target reset command 1915 * input: Adapter instance soft state 1916 * 1917 * This function compiles data for a target reset command to be fired to the FW 1918 * and then traverse the target_reset_pool to see targets with TIMEDOUT IOs. 1919 * 1920 * Returns SUCCESS or FAIL 1921 */ 1922 int mrsas_reset_targets(struct mrsas_softc *sc) 1923 { 1924 struct mrsas_mpt_cmd *tm_mpt_cmd = NULL; 1925 struct mrsas_mpt_cmd *tgt_mpt_cmd = NULL; 1926 MR_TASK_MANAGE_REQUEST *mr_request; 1927 MPI2_SCSI_TASK_MANAGE_REQUEST *tm_mpi_request; 1928 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 1929 int retCode = FAIL, count, i, outstanding; 1930 u_int32_t MSIxIndex, bus_id; 1931 target_id_t tgt_id; 1932 #if TM_DEBUG 1933 MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply; 1934 #endif 1935 1936 outstanding = mrsas_atomic_read(&sc->fw_outstanding); 1937 1938 if (!outstanding) { 1939 device_printf(sc->mrsas_dev, "NO IOs pending...\n"); 1940 mrsas_atomic_set(&sc->target_reset_outstanding, 0); 1941 retCode = SUCCESS; 1942 goto return_status; 1943 } else if (sc->adprecovery != MRSAS_HBA_OPERATIONAL) { 1944 device_printf(sc->mrsas_dev, "Controller is not operational\n"); 1945 goto return_status; 1946 } else { 1947 /* Some more error checks will be added in future */ 1948 } 1949 1950 /* Get an mpt frame and an index to fire the TM cmd */ 1951 tm_mpt_cmd = mrsas_get_mpt_cmd(sc); 1952 if (!tm_mpt_cmd) { 1953 retCode = FAIL; 1954 goto return_status; 1955 } 1956 1957 req_desc = mrsas_get_request_desc(sc, (tm_mpt_cmd->index) - 1); 1958 if (!req_desc) { 1959 device_printf(sc->mrsas_dev, "Cannot get request_descriptor for tm.\n"); 1960 retCode = FAIL; 1961 goto release_mpt; 1962 } 1963 memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION)); 1964 1965 req_desc->HighPriority.SMID = tm_mpt_cmd->index; 1966 req_desc->HighPriority.RequestFlags = 1967 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << 1968 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1969 req_desc->HighPriority.MSIxIndex = 0; 1970 req_desc->HighPriority.LMID = 0; 1971 req_desc->HighPriority.Reserved1 = 0; 1972 tm_mpt_cmd->request_desc = req_desc; 1973 1974 mr_request = (MR_TASK_MANAGE_REQUEST *) tm_mpt_cmd->io_request; 1975 memset(mr_request, 0, sizeof(MR_TASK_MANAGE_REQUEST)); 1976 1977 tm_mpi_request = (MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest; 1978 tm_mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 1979 tm_mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 1980 tm_mpi_request->TaskMID = 0; /* smid task */ 1981 tm_mpi_request->LUN[1] = 0; 1982 1983 /* Traverse the tm_mpt pool to get valid entries */ 1984 for (i = 0 ; i < MRSAS_MAX_TM_TARGETS; i++) { 1985 if(!sc->target_reset_pool[i]) { 1986 continue; 1987 } else { 1988 tgt_mpt_cmd = sc->target_reset_pool[i]; 1989 } 1990 1991 tgt_id = i; 1992 1993 /* See if the target is tm capable or NOT */ 1994 if (!tgt_mpt_cmd->tmCapable) { 1995 device_printf(sc->mrsas_dev, "Task management NOT SUPPORTED for " 1996 "CAM target:%d\n", tgt_id); 1997 1998 retCode = FAIL; 1999 goto release_mpt; 2000 } 2001 2002 tm_mpi_request->DevHandle = tgt_mpt_cmd->io_request->DevHandle; 2003 2004 if (i < (MRSAS_MAX_PD - 1)) { 2005 mr_request->uTmReqReply.tmReqFlags.isTMForPD = 1; 2006 bus_id = 0; 2007 } else { 2008 mr_request->uTmReqReply.tmReqFlags.isTMForLD = 1; 2009 bus_id = 1; 2010 } 2011 2012 device_printf(sc->mrsas_dev, "TM will be fired for " 2013 "CAM target:%d and bus_id %d\n", tgt_id, bus_id); 2014 2015 sc->ocr_chan = (void *)&tm_mpt_cmd; 2016 retCode = mrsas_issue_tm(sc, req_desc); 2017 if (retCode == FAIL) 2018 goto release_mpt; 2019 2020 #if TM_DEBUG 2021 mpi_reply = 2022 (MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->uTmReqReply.TMReply; 2023 mrsas_tm_response_code(sc, mpi_reply); 2024 #endif 2025 mrsas_atomic_dec(&sc->target_reset_outstanding); 2026 sc->target_reset_pool[i] = NULL; 2027 2028 /* Check for pending cmds in the mpt_cmd_pool with the tgt_id */ 2029 mrsas_disable_intr(sc); 2030 /* Wait for 1 second to complete parallel ISR calling same 2031 * mrsas_complete_cmd() 2032 */ 2033 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_reset_wakeup", 2034 1 * hz); 2035 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2036 mtx_unlock(&sc->sim_lock); 2037 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 2038 mrsas_complete_cmd(sc, MSIxIndex); 2039 mtx_lock(&sc->sim_lock); 2040 retCode = mrsas_track_scsiio(sc, tgt_id, bus_id); 2041 mrsas_enable_intr(sc); 2042 2043 if (retCode == FAIL) 2044 goto release_mpt; 2045 } 2046 2047 device_printf(sc->mrsas_dev, "Number of targets outstanding " 2048 "after reset: %d\n", mrsas_atomic_read(&sc->target_reset_outstanding)); 2049 2050 release_mpt: 2051 mrsas_release_mpt_cmd(tm_mpt_cmd); 2052 return_status: 2053 device_printf(sc->mrsas_dev, "target reset %s!!\n", 2054 (retCode == SUCCESS) ? "SUCCESS" : "FAIL"); 2055 2056 return retCode; 2057 } 2058