1 /*- 2 * Copyright (c) 2018 Microsemi Corporation. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* $FreeBSD$ */ 28 /* 29 * CAM interface for smartpqi driver 30 */ 31 32 #include "smartpqi_includes.h" 33 34 /* 35 * Set cam sim properties of the smartpqi adapter. 36 */ 37 static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi) 38 { 39 40 pqisrc_softstate_t *softs = (struct pqisrc_softstate *) 41 cam_sim_softc(sim); 42 DBG_FUNC("IN\n"); 43 44 cpi->version_num = 1; 45 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 46 cpi->target_sprt = 0; 47 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; 48 cpi->hba_eng_cnt = 0; 49 cpi->max_lun = PQI_MAX_MULTILUN; 50 cpi->max_target = 1088; 51 cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE; 52 cpi->initiator_id = 255; 53 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 54 strncpy(cpi->hba_vid, "Microsemi", HBA_IDLEN); 55 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 56 cpi->unit_number = cam_sim_unit(sim); 57 cpi->bus_id = cam_sim_bus(sim); 58 cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */ 59 cpi->protocol = PROTO_SCSI; 60 cpi->protocol_version = SCSI_REV_SPC4; 61 cpi->transport = XPORT_SPI; 62 cpi->transport_version = 2; 63 cpi->ccb_h.status = CAM_REQ_CMP; 64 65 DBG_FUNC("OUT\n"); 66 } 67 68 /* 69 * Get transport settings of the smartpqi adapter 70 */ 71 static void get_transport_settings(struct pqisrc_softstate *softs, 72 struct ccb_trans_settings *cts) 73 { 74 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; 75 struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas; 76 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; 77 78 DBG_FUNC("IN\n"); 79 80 cts->protocol = PROTO_SCSI; 81 cts->protocol_version = SCSI_REV_SPC4; 82 cts->transport = XPORT_SPI; 83 cts->transport_version = 2; 84 spi->valid = CTS_SPI_VALID_DISC; 85 spi->flags = CTS_SPI_FLAGS_DISC_ENB; 86 scsi->valid = CTS_SCSI_VALID_TQ; 87 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 88 sas->valid = CTS_SAS_VALID_SPEED; 89 cts->ccb_h.status = CAM_REQ_CMP; 90 91 DBG_FUNC("OUT\n"); 92 } 93 94 /* 95 * Add the target to CAM layer and rescan, when a new device is found 96 */ 97 void os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { 98 union ccb *ccb; 99 100 DBG_FUNC("IN\n"); 101 102 if(softs->os_specific.sim_registered) { 103 if ((ccb = xpt_alloc_ccb_nowait()) == NULL) { 104 DBG_ERR("rescan failed (can't allocate CCB)\n"); 105 return; 106 } 107 108 if (xpt_create_path(&ccb->ccb_h.path, NULL, 109 cam_sim_path(softs->os_specific.sim), 110 device->target, device->lun) != CAM_REQ_CMP) { 111 DBG_ERR("rescan failed (can't create path)\n"); 112 xpt_free_ccb(ccb); 113 return; 114 } 115 xpt_rescan(ccb); 116 } 117 118 DBG_FUNC("OUT\n"); 119 } 120 121 /* 122 * Remove the device from CAM layer when deleted or hot removed 123 */ 124 void os_remove_device(pqisrc_softstate_t *softs, 125 pqi_scsi_dev_t *device) { 126 struct cam_path *tmppath; 127 128 DBG_FUNC("IN\n"); 129 130 if(softs->os_specific.sim_registered) { 131 if (xpt_create_path(&tmppath, NULL, 132 cam_sim_path(softs->os_specific.sim), 133 device->target, device->lun) != CAM_REQ_CMP) { 134 DBG_ERR("unable to create path for async event"); 135 return; 136 } 137 xpt_async(AC_LOST_DEVICE, tmppath, NULL); 138 xpt_free_path(tmppath); 139 pqisrc_free_device(softs, device); 140 OS_SLEEP(10000); 141 } 142 143 DBG_FUNC("OUT\n"); 144 145 } 146 147 /* 148 * Function to release the frozen simq 149 */ 150 static void pqi_release_camq( rcb_t *rcb ) 151 { 152 pqisrc_softstate_t *softs; 153 struct ccb_scsiio *csio; 154 155 csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; 156 softs = rcb->softs; 157 158 DBG_FUNC("IN\n"); 159 160 if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) { 161 softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY; 162 if (csio->ccb_h.status & CAM_RELEASE_SIMQ) 163 xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0); 164 else 165 csio->ccb_h.status |= CAM_RELEASE_SIMQ; 166 } 167 168 DBG_FUNC("OUT\n"); 169 } 170 171 /* 172 * Function to dma-unmap the completed request 173 */ 174 static void pqi_unmap_request(void *arg) 175 { 176 pqisrc_softstate_t *softs; 177 rcb_t *rcb; 178 179 DBG_IO("IN rcb = %p\n", arg); 180 181 rcb = (rcb_t *)arg; 182 softs = rcb->softs; 183 184 if (!(rcb->cm_flags & PQI_CMD_MAPPED)) 185 return; 186 187 if (rcb->bcount != 0 ) { 188 if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE) 189 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, 190 rcb->cm_datamap, 191 BUS_DMASYNC_POSTREAD); 192 if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE) 193 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, 194 rcb->cm_datamap, 195 BUS_DMASYNC_POSTWRITE); 196 bus_dmamap_unload(softs->os_specific.pqi_buffer_dmat, 197 rcb->cm_datamap); 198 } 199 rcb->cm_flags &= ~PQI_CMD_MAPPED; 200 201 if(rcb->sgt && rcb->nseg) 202 os_mem_free(rcb->softs, (void*)rcb->sgt, 203 rcb->nseg*sizeof(sgt_t)); 204 205 pqisrc_put_tag(&softs->taglist, rcb->tag); 206 207 DBG_IO("OUT\n"); 208 } 209 210 /* 211 * Construct meaningful LD name for volume here. 212 */ 213 static void 214 smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio) 215 { 216 struct scsi_inquiry_data *inq = NULL; 217 uint8_t *cdb = NULL; 218 pqi_scsi_dev_t *device = NULL; 219 220 DBG_FUNC("IN\n"); 221 222 cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ? 223 (uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes; 224 if(cdb[0] == INQUIRY && 225 (cdb[1] & SI_EVPD) == 0 && 226 (csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN && 227 csio->dxfer_len >= SHORT_INQUIRY_LENGTH) { 228 229 inq = (struct scsi_inquiry_data *)csio->data_ptr; 230 231 device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun]; 232 233 /* Let the disks be probed and dealt with via CAM. Only for LD 234 let it fall through and inquiry be tweaked */ 235 if( !device || !pqisrc_is_logical_device(device) || 236 (device->devtype != DISK_DEVICE) || 237 pqisrc_is_external_raid_device(device)) { 238 return; 239 } 240 241 strncpy(inq->vendor, "MSCC", 242 SID_VENDOR_SIZE); 243 strncpy(inq->product, 244 pqisrc_raidlevel_to_string(device->raid_level), 245 SID_PRODUCT_SIZE); 246 strncpy(inq->revision, device->volume_offline?"OFF":"OK", 247 SID_REVISION_SIZE); 248 } 249 250 DBG_FUNC("OUT\n"); 251 } 252 253 /* 254 * Handle completion of a command - pass results back through the CCB 255 */ 256 void 257 os_io_response_success(rcb_t *rcb) 258 { 259 struct ccb_scsiio *csio; 260 261 DBG_IO("IN rcb = %p\n", rcb); 262 263 if (rcb == NULL) 264 panic("rcb is null"); 265 266 csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; 267 268 if (csio == NULL) 269 panic("csio is null"); 270 271 rcb->status = REQUEST_SUCCESS; 272 csio->ccb_h.status = CAM_REQ_CMP; 273 274 smartpqi_fix_ld_inquiry(rcb->softs, csio); 275 pqi_release_camq(rcb); 276 pqi_unmap_request(rcb); 277 xpt_done((union ccb *)csio); 278 279 DBG_IO("OUT\n"); 280 } 281 282 /* 283 * Error response handling for raid IO 284 */ 285 void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info) 286 { 287 struct ccb_scsiio *csio; 288 pqisrc_softstate_t *softs; 289 290 DBG_IO("IN\n"); 291 292 csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; 293 294 if (csio == NULL) 295 panic("csio is null"); 296 297 softs = rcb->softs; 298 299 ASSERT(err_info != NULL); 300 csio->scsi_status = err_info->status; 301 csio->ccb_h.status = CAM_REQ_CMP_ERR; 302 303 if (csio->ccb_h.func_code == XPT_SCSI_IO) { 304 /* 305 * Handle specific SCSI status values. 306 */ 307 switch(csio->scsi_status) { 308 case PQI_RAID_STATUS_QUEUE_FULL: 309 csio->ccb_h.status = CAM_REQ_CMP; 310 DBG_ERR("Queue Full error"); 311 break; 312 /* check condition, sense data included */ 313 case PQI_RAID_STATUS_CHECK_CONDITION: 314 { 315 uint16_t sense_data_len = 316 LE_16(err_info->sense_data_len); 317 uint8_t *sense_data = NULL; 318 if (sense_data_len) 319 sense_data = err_info->data; 320 memset(&csio->sense_data, 0, csio->sense_len); 321 sense_data_len = (sense_data_len > 322 csio->sense_len) ? 323 csio->sense_len : 324 sense_data_len; 325 if (sense_data) 326 memcpy(&csio->sense_data, sense_data, 327 sense_data_len); 328 if (csio->sense_len > sense_data_len) 329 csio->sense_resid = csio->sense_len 330 - sense_data_len; 331 else 332 csio->sense_resid = 0; 333 csio->ccb_h.status = CAM_SCSI_STATUS_ERROR 334 | CAM_AUTOSNS_VALID 335 | CAM_REQ_CMP_ERR; 336 337 } 338 break; 339 340 case PQI_RAID_DATA_IN_OUT_UNDERFLOW: 341 { 342 uint32_t resid = 0; 343 resid = rcb->bcount-err_info->data_out_transferred; 344 csio->resid = resid; 345 csio->ccb_h.status = CAM_REQ_CMP; 346 break; 347 } 348 default: 349 csio->ccb_h.status = CAM_REQ_CMP; 350 break; 351 } 352 } 353 354 if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) { 355 softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY; 356 if (csio->ccb_h.status & CAM_RELEASE_SIMQ) 357 xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0); 358 else 359 csio->ccb_h.status |= CAM_RELEASE_SIMQ; 360 } 361 362 pqi_unmap_request(rcb); 363 xpt_done((union ccb *)csio); 364 365 DBG_IO("OUT\n"); 366 } 367 368 369 /* 370 * Error response handling for aio. 371 */ 372 void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info) 373 { 374 struct ccb_scsiio *csio; 375 pqisrc_softstate_t *softs; 376 377 DBG_IO("IN\n"); 378 379 if (rcb == NULL) 380 panic("rcb is null"); 381 382 rcb->status = REQUEST_SUCCESS; 383 csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; 384 if (csio == NULL) 385 panic("csio is null"); 386 387 softs = rcb->softs; 388 389 switch (err_info->service_resp) { 390 case PQI_AIO_SERV_RESPONSE_COMPLETE: 391 csio->ccb_h.status = err_info->status; 392 break; 393 case PQI_AIO_SERV_RESPONSE_FAILURE: 394 switch(err_info->status) { 395 case PQI_AIO_STATUS_IO_ABORTED: 396 csio->ccb_h.status = CAM_REQ_ABORTED; 397 DBG_WARN_BTL(rcb->dvp, "IO aborted\n"); 398 break; 399 case PQI_AIO_STATUS_UNDERRUN: 400 csio->ccb_h.status = CAM_REQ_CMP; 401 csio->resid = 402 LE_32(err_info->resd_count); 403 break; 404 case PQI_AIO_STATUS_OVERRUN: 405 csio->ccb_h.status = CAM_REQ_CMP; 406 break; 407 case PQI_AIO_STATUS_AIO_PATH_DISABLED: 408 DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n"); 409 rcb->dvp->offload_enabled = false; 410 csio->ccb_h.status |= CAM_REQUEUE_REQ; 411 break; 412 case PQI_AIO_STATUS_IO_ERROR: 413 case PQI_AIO_STATUS_IO_NO_DEVICE: 414 case PQI_AIO_STATUS_INVALID_DEVICE: 415 default: 416 DBG_WARN_BTL(rcb->dvp,"IO Error/Invalid/No device\n"); 417 csio->ccb_h.status |= 418 CAM_SCSI_STATUS_ERROR; 419 break; 420 } 421 break; 422 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: 423 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: 424 csio->ccb_h.status = CAM_REQ_CMP; 425 break; 426 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: 427 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: 428 DBG_WARN_BTL(rcb->dvp,"TMF rejected/Incorrect Lun\n"); 429 csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 430 break; 431 default: 432 DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n"); 433 csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 434 break; 435 } 436 if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) { 437 csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION; 438 uint8_t *sense_data = NULL; 439 unsigned sense_data_len = LE_16(err_info->data_len); 440 if (sense_data_len) 441 sense_data = err_info->data; 442 DBG_ERR_BTL(rcb->dvp, "SCSI_STATUS_CHECK_COND sense size %u\n", 443 sense_data_len); 444 memset(&csio->sense_data, 0, csio->sense_len); 445 if (sense_data) 446 memcpy(&csio->sense_data, sense_data, ((sense_data_len > 447 csio->sense_len) ? csio->sense_len : sense_data_len)); 448 if (csio->sense_len > sense_data_len) 449 csio->sense_resid = csio->sense_len - sense_data_len; 450 else 451 csio->sense_resid = 0; 452 csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; 453 } 454 455 smartpqi_fix_ld_inquiry(softs, csio); 456 pqi_release_camq(rcb); 457 pqi_unmap_request(rcb); 458 xpt_done((union ccb *)csio); 459 DBG_IO("OUT\n"); 460 } 461 462 /* 463 * Command-mapping helper function - populate this command's s/g table. 464 */ 465 static void 466 pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) 467 { 468 pqisrc_softstate_t *softs; 469 rcb_t *rcb; 470 471 rcb = (rcb_t *)arg; 472 softs = rcb->softs; 473 474 if( error || nseg > softs->pqi_cap.max_sg_elem ) 475 { 476 xpt_freeze_simq(softs->os_specific.sim, 1); 477 rcb->cm_ccb->ccb_h.status |= (CAM_REQUEUE_REQ| 478 CAM_RELEASE_SIMQ); 479 DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n", 480 error, nseg, softs->pqi_cap.max_sg_elem); 481 pqi_unmap_request(rcb); 482 xpt_done((union ccb *)rcb->cm_ccb); 483 return; 484 } 485 486 rcb->sgt = os_mem_alloc(softs, nseg * sizeof(rcb_t)); 487 rcb->nseg = nseg; 488 if (rcb->sgt != NULL) { 489 for (int i = 0; i < nseg; i++) { 490 rcb->sgt[i].addr = segs[i].ds_addr; 491 rcb->sgt[i].len = segs[i].ds_len; 492 rcb->sgt[i].flags = 0; 493 } 494 } 495 496 if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE) 497 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, 498 rcb->cm_datamap, BUS_DMASYNC_PREREAD); 499 if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE) 500 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, 501 rcb->cm_datamap, BUS_DMASYNC_PREWRITE); 502 503 /* Call IO functions depending on pd or ld */ 504 rcb->status = REQUEST_PENDING; 505 506 error = pqisrc_build_send_io(softs, rcb); 507 508 if (error) { 509 rcb->req_pending = false; 510 xpt_freeze_simq(softs->os_specific.sim, 1); 511 rcb->cm_ccb->ccb_h.status |= (CAM_REQUEUE_REQ 512 |CAM_RELEASE_SIMQ); 513 DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error); 514 pqi_unmap_request(rcb); 515 xpt_done((union ccb *)rcb->cm_ccb); 516 return; 517 } 518 } 519 520 /* 521 * Function to dma-map the request buffer 522 */ 523 static int pqi_map_request( rcb_t *rcb ) 524 { 525 pqisrc_softstate_t *softs = rcb->softs; 526 int error = PQI_STATUS_SUCCESS; 527 union ccb *ccb = rcb->cm_ccb; 528 529 DBG_FUNC("IN\n"); 530 531 /* check that mapping is necessary */ 532 if (rcb->cm_flags & PQI_CMD_MAPPED) 533 return(0); 534 rcb->cm_flags |= PQI_CMD_MAPPED; 535 536 if (rcb->bcount) { 537 error = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat, 538 rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0); 539 if (error != 0){ 540 DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed = %d count = %d\n", 541 error, rcb->bcount); 542 return error; 543 } 544 } else { 545 /* 546 * Set up the command to go to the controller. If there are no 547 * data buffers associated with the command then it can bypass 548 * busdma. 549 */ 550 /* Call IO functions depending on pd or ld */ 551 rcb->status = REQUEST_PENDING; 552 553 error = pqisrc_build_send_io(softs, rcb); 554 555 } 556 557 DBG_FUNC("OUT error = %d\n", error); 558 559 return error; 560 } 561 562 /* 563 * Function to clear the request control block 564 */ 565 void os_reset_rcb( rcb_t *rcb ) 566 { 567 rcb->error_info = NULL; 568 rcb->req = NULL; 569 rcb->status = -1; 570 rcb->tag = INVALID_ELEM; 571 rcb->dvp = NULL; 572 rcb->cdbp = NULL; 573 rcb->softs = NULL; 574 rcb->cm_flags = 0; 575 rcb->cm_data = NULL; 576 rcb->bcount = 0; 577 rcb->nseg = 0; 578 rcb->sgt = NULL; 579 rcb->cm_ccb = NULL; 580 rcb->encrypt_enable = false; 581 rcb->ioaccel_handle = 0; 582 rcb->resp_qid = 0; 583 rcb->req_pending = false; 584 } 585 586 /* 587 * Callback function for the lun rescan 588 */ 589 static void smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb) 590 { 591 xpt_free_path(ccb->ccb_h.path); 592 xpt_free_ccb(ccb); 593 } 594 595 596 /* 597 * Function to rescan the lun 598 */ 599 static void smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target, 600 int lun) 601 { 602 union ccb *ccb = NULL; 603 cam_status status = 0; 604 struct cam_path *path = NULL; 605 606 DBG_FUNC("IN\n"); 607 608 ccb = xpt_alloc_ccb_nowait(); 609 status = xpt_create_path(&path, NULL, 610 cam_sim_path(softs->os_specific.sim), target, lun); 611 if (status != CAM_REQ_CMP) { 612 DBG_ERR("xpt_create_path status(%d) != CAM_REQ_CMP \n", 613 status); 614 xpt_free_ccb(ccb); 615 return; 616 } 617 618 bzero(ccb, sizeof(union ccb)); 619 xpt_setup_ccb(&ccb->ccb_h, path, 5); 620 ccb->ccb_h.func_code = XPT_SCAN_LUN; 621 ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb; 622 ccb->crcn.flags = CAM_FLAG_NONE; 623 624 xpt_action(ccb); 625 626 DBG_FUNC("OUT\n"); 627 } 628 629 /* 630 * Function to rescan the lun under each target 631 */ 632 void smartpqi_target_rescan(struct pqisrc_softstate *softs) 633 { 634 int target = 0, lun = 0; 635 636 DBG_FUNC("IN\n"); 637 638 for(target = 0; target < PQI_MAX_DEVICES; target++){ 639 for(lun = 0; lun < PQI_MAX_MULTILUN; lun++){ 640 if(softs->device_list[target][lun]){ 641 smartpqi_lun_rescan(softs, target, lun); 642 } 643 } 644 } 645 646 DBG_FUNC("OUT\n"); 647 } 648 649 /* 650 * Set the mode of tagged command queueing for the current task. 651 */ 652 uint8_t os_get_task_attr(rcb_t *rcb) 653 { 654 union ccb *ccb = rcb->cm_ccb; 655 uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE; 656 657 switch(ccb->csio.tag_action) { 658 case MSG_HEAD_OF_Q_TAG: 659 tag_action = SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE; 660 break; 661 case MSG_ORDERED_Q_TAG: 662 tag_action = SOP_TASK_ATTRIBUTE_ORDERED; 663 break; 664 case MSG_SIMPLE_Q_TAG: 665 default: 666 tag_action = SOP_TASK_ATTRIBUTE_SIMPLE; 667 break; 668 } 669 return tag_action; 670 } 671 672 /* 673 * Complete all outstanding commands 674 */ 675 void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs) 676 { 677 int tag = 0; 678 679 DBG_FUNC("IN\n"); 680 681 for (tag = 1; tag < softs->max_outstanding_io; tag++) { 682 rcb_t *prcb = &softs->rcb[tag]; 683 if(prcb->req_pending && prcb->cm_ccb ) { 684 prcb->req_pending = false; 685 prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP; 686 xpt_done((union ccb *)prcb->cm_ccb); 687 prcb->cm_ccb = NULL; 688 } 689 } 690 691 DBG_FUNC("OUT\n"); 692 } 693 694 /* 695 * IO handling functionality entry point 696 */ 697 static int pqisrc_io_start(struct cam_sim *sim, union ccb *ccb) 698 { 699 rcb_t *rcb; 700 uint32_t tag, no_transfer = 0; 701 pqisrc_softstate_t *softs = (struct pqisrc_softstate *) 702 cam_sim_softc(sim); 703 int32_t error = PQI_STATUS_FAILURE; 704 pqi_scsi_dev_t *dvp; 705 706 DBG_FUNC("IN\n"); 707 708 if( softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL ) { 709 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 710 DBG_INFO("Device = %d not there\n", ccb->ccb_h.target_id); 711 return PQI_STATUS_FAILURE; 712 } 713 714 dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; 715 /* Check controller state */ 716 if (IN_PQI_RESET(softs)) { 717 ccb->ccb_h.status = CAM_SCSI_BUS_RESET 718 | CAM_BUSY | CAM_REQ_INPROG; 719 DBG_WARN("Device = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id); 720 return error; 721 } 722 /* Check device state */ 723 if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) { 724 ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP; 725 DBG_WARN("Device = %d GONE/OFFLINE\n", ccb->ccb_h.target_id); 726 return error; 727 } 728 /* Check device reset */ 729 if (DEV_RESET(dvp)) { 730 ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY; 731 DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id); 732 return error; 733 } 734 735 if (dvp->expose_device == false) { 736 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 737 DBG_INFO("Device = %d not exposed\n", ccb->ccb_h.target_id); 738 return error; 739 } 740 741 tag = pqisrc_get_tag(&softs->taglist); 742 if( tag == INVALID_ELEM ) { 743 DBG_ERR("Get Tag failed\n"); 744 xpt_freeze_simq(softs->os_specific.sim, 1); 745 softs->os_specific.pqi_flags |= PQI_FLAG_BUSY; 746 ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ); 747 return PQI_STATUS_FAILURE; 748 } 749 750 DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist); 751 752 rcb = &softs->rcb[tag]; 753 os_reset_rcb( rcb ); 754 rcb->tag = tag; 755 rcb->softs = softs; 756 rcb->cmdlen = ccb->csio.cdb_len; 757 ccb->ccb_h.sim_priv.entries[0].ptr = rcb; 758 759 switch (ccb->ccb_h.flags & CAM_DIR_MASK) { 760 case CAM_DIR_IN: 761 rcb->data_dir = SOP_DATA_DIR_FROM_DEVICE; 762 break; 763 case CAM_DIR_OUT: 764 rcb->data_dir = SOP_DATA_DIR_TO_DEVICE; 765 break; 766 case CAM_DIR_NONE: 767 no_transfer = 1; 768 break; 769 default: 770 DBG_ERR("Unknown Dir\n"); 771 break; 772 } 773 rcb->cm_ccb = ccb; 774 rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; 775 776 if (!no_transfer) { 777 rcb->cm_data = (void *)ccb->csio.data_ptr; 778 rcb->bcount = ccb->csio.dxfer_len; 779 } else { 780 rcb->cm_data = NULL; 781 rcb->bcount = 0; 782 } 783 /* 784 * Submit the request to the adapter. 785 * 786 * Note that this may fail if we're unable to map the request (and 787 * if we ever learn a transport layer other than simple, may fail 788 * if the adapter rejects the command). 789 */ 790 if ((error = pqi_map_request(rcb)) != 0) { 791 rcb->req_pending = false; 792 xpt_freeze_simq(softs->os_specific.sim, 1); 793 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 794 if (error == EINPROGRESS) { 795 DBG_WARN("In Progress on %d\n", ccb->ccb_h.target_id); 796 error = 0; 797 } else { 798 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 799 DBG_WARN("Requeue req error = %d target = %d\n", error, 800 ccb->ccb_h.target_id); 801 pqi_unmap_request(rcb); 802 } 803 } 804 805 DBG_FUNC("OUT error = %d\n", error); 806 return error; 807 } 808 809 /* 810 * Abort a task, task management functionality 811 */ 812 static int 813 pqisrc_scsi_abort_task(pqisrc_softstate_t *softs, union ccb *ccb) 814 { 815 rcb_t *rcb = ccb->ccb_h.sim_priv.entries[0].ptr; 816 uint32_t abort_tag = rcb->tag; 817 uint32_t tag = 0; 818 int rval = PQI_STATUS_SUCCESS; 819 uint16_t qid; 820 821 DBG_FUNC("IN\n"); 822 823 qid = (uint16_t)rcb->resp_qid; 824 825 tag = pqisrc_get_tag(&softs->taglist); 826 rcb = &softs->rcb[tag]; 827 rcb->tag = tag; 828 rcb->resp_qid = qid; 829 830 rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, abort_tag, 831 SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK); 832 833 if (PQI_STATUS_SUCCESS == rval) { 834 rval = rcb->status; 835 if (REQUEST_SUCCESS == rval) { 836 ccb->ccb_h.status = CAM_REQ_ABORTED; 837 } 838 } 839 pqisrc_put_tag(&softs->taglist, abort_tag); 840 pqisrc_put_tag(&softs->taglist,rcb->tag); 841 842 DBG_FUNC("OUT rval = %d\n", rval); 843 844 return rval; 845 } 846 847 /* 848 * Abort a taskset, task management functionality 849 */ 850 static int 851 pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb) 852 { 853 rcb_t *rcb = NULL; 854 uint32_t tag = 0; 855 int rval = PQI_STATUS_SUCCESS; 856 857 DBG_FUNC("IN\n"); 858 859 tag = pqisrc_get_tag(&softs->taglist); 860 rcb = &softs->rcb[tag]; 861 rcb->tag = tag; 862 863 rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, 0, 864 SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET); 865 866 if (rval == PQI_STATUS_SUCCESS) { 867 rval = rcb->status; 868 } 869 870 pqisrc_put_tag(&softs->taglist,rcb->tag); 871 872 DBG_FUNC("OUT rval = %d\n", rval); 873 874 return rval; 875 } 876 877 /* 878 * Target reset task management functionality 879 */ 880 static int 881 pqisrc_target_reset( pqisrc_softstate_t *softs, union ccb *ccb) 882 { 883 pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; 884 rcb_t *rcb = NULL; 885 uint32_t tag = 0; 886 int rval = PQI_STATUS_SUCCESS; 887 888 DBG_FUNC("IN\n"); 889 890 if (devp == NULL) { 891 DBG_ERR("bad target t%d\n", ccb->ccb_h.target_id); 892 return (-1); 893 } 894 895 tag = pqisrc_get_tag(&softs->taglist); 896 rcb = &softs->rcb[tag]; 897 rcb->tag = tag; 898 899 devp->reset_in_progress = true; 900 rval = pqisrc_send_tmf(softs, devp, rcb, 0, 901 SOP_TASK_MANAGEMENT_LUN_RESET); 902 if (PQI_STATUS_SUCCESS == rval) { 903 rval = rcb->status; 904 } 905 devp->reset_in_progress = false; 906 pqisrc_put_tag(&softs->taglist,rcb->tag); 907 908 DBG_FUNC("OUT rval = %d\n", rval); 909 910 return ((rval == REQUEST_SUCCESS) ? 911 PQI_STATUS_SUCCESS : PQI_STATUS_FAILURE); 912 } 913 914 /* 915 * cam entry point of the smartpqi module. 916 */ 917 static void smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb) 918 { 919 struct pqisrc_softstate *softs = cam_sim_softc(sim); 920 struct ccb_hdr *ccb_h = &ccb->ccb_h; 921 922 DBG_FUNC("IN\n"); 923 924 switch (ccb_h->func_code) { 925 case XPT_SCSI_IO: 926 { 927 if(!pqisrc_io_start(sim, ccb)) { 928 return; 929 } 930 break; 931 } 932 case XPT_CALC_GEOMETRY: 933 { 934 struct ccb_calc_geometry *ccg; 935 ccg = &ccb->ccg; 936 if (ccg->block_size == 0) { 937 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 938 ccb->ccb_h.status = CAM_REQ_INVALID; 939 break; 940 } 941 cam_calc_geometry(ccg, /* extended */ 1); 942 ccb->ccb_h.status = CAM_REQ_CMP; 943 break; 944 } 945 case XPT_PATH_INQ: 946 { 947 update_sim_properties(sim, &ccb->cpi); 948 ccb->ccb_h.status = CAM_REQ_CMP; 949 break; 950 } 951 case XPT_GET_TRAN_SETTINGS: 952 get_transport_settings(softs, &ccb->cts); 953 ccb->ccb_h.status = CAM_REQ_CMP; 954 break; 955 case XPT_ABORT: 956 if(pqisrc_scsi_abort_task(softs, ccb)) { 957 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 958 xpt_done(ccb); 959 DBG_ERR("Abort task failed on %d\n", 960 ccb->ccb_h.target_id); 961 return; 962 } 963 break; 964 case XPT_TERM_IO: 965 if (pqisrc_scsi_abort_task_set(softs, ccb)) { 966 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 967 DBG_ERR("Abort task set failed on %d\n", 968 ccb->ccb_h.target_id); 969 xpt_done(ccb); 970 return; 971 } 972 break; 973 case XPT_RESET_DEV: 974 if(pqisrc_target_reset(softs, ccb)) { 975 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 976 DBG_ERR("Target reset failed on %d\n", 977 ccb->ccb_h.target_id); 978 xpt_done(ccb); 979 return; 980 } else { 981 ccb->ccb_h.status = CAM_REQ_CMP; 982 } 983 break; 984 case XPT_RESET_BUS: 985 ccb->ccb_h.status = CAM_REQ_CMP; 986 break; 987 case XPT_SET_TRAN_SETTINGS: 988 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 989 return; 990 default: 991 DBG_WARN("UNSUPPORTED FUNC CODE\n"); 992 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 993 break; 994 } 995 xpt_done(ccb); 996 997 DBG_FUNC("OUT\n"); 998 } 999 1000 /* 1001 * Function to poll the response, when interrupts are unavailable 1002 * This also serves supporting crash dump. 1003 */ 1004 static void smartpqi_poll(struct cam_sim *sim) 1005 { 1006 struct pqisrc_softstate *softs = cam_sim_softc(sim); 1007 int i; 1008 1009 for (i = 1; i < softs->intr_count; i++ ) 1010 pqisrc_process_response_queue(softs, i); 1011 } 1012 1013 /* 1014 * Function to adjust the queue depth of a device 1015 */ 1016 void smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth) 1017 { 1018 struct ccb_relsim crs; 1019 1020 DBG_INFO("IN\n"); 1021 1022 xpt_setup_ccb(&crs.ccb_h, path, 5); 1023 crs.ccb_h.func_code = XPT_REL_SIMQ; 1024 crs.ccb_h.flags = CAM_DEV_QFREEZE; 1025 crs.release_flags = RELSIM_ADJUST_OPENINGS; 1026 crs.openings = queue_depth; 1027 xpt_action((union ccb *)&crs); 1028 if(crs.ccb_h.status != CAM_REQ_CMP) { 1029 printf("XPT_REL_SIMQ failed stat=%d\n", crs.ccb_h.status); 1030 } 1031 1032 DBG_INFO("OUT\n"); 1033 } 1034 1035 /* 1036 * Function to register async callback for setting queue depth 1037 */ 1038 static void 1039 smartpqi_async(void *callback_arg, u_int32_t code, 1040 struct cam_path *path, void *arg) 1041 { 1042 struct pqisrc_softstate *softs; 1043 softs = (struct pqisrc_softstate*)callback_arg; 1044 1045 DBG_FUNC("IN\n"); 1046 1047 switch (code) { 1048 case AC_FOUND_DEVICE: 1049 { 1050 struct ccb_getdev *cgd; 1051 cgd = (struct ccb_getdev *)arg; 1052 if (cgd == NULL) { 1053 break; 1054 } 1055 uint32_t t_id = cgd->ccb_h.target_id; 1056 1057 if (t_id <= (PQI_CTLR_INDEX - 1)) { 1058 if (softs != NULL) { 1059 pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun]; 1060 smartpqi_adjust_queue_depth(path, 1061 dvp->queue_depth); 1062 } 1063 } 1064 break; 1065 } 1066 default: 1067 break; 1068 } 1069 1070 DBG_FUNC("OUT\n"); 1071 } 1072 1073 /* 1074 * Function to register sim with CAM layer for smartpqi driver 1075 */ 1076 int register_sim(struct pqisrc_softstate *softs, int card_index) 1077 { 1078 int error = 0; 1079 int max_transactions; 1080 union ccb *ccb = NULL; 1081 cam_status status = 0; 1082 struct ccb_setasync csa; 1083 struct cam_sim *sim; 1084 1085 DBG_FUNC("IN\n"); 1086 1087 max_transactions = softs->max_io_for_scsi_ml; 1088 softs->os_specific.devq = cam_simq_alloc(max_transactions); 1089 if (softs->os_specific.devq == NULL) { 1090 DBG_ERR("cam_simq_alloc failed txns = %d\n", 1091 max_transactions); 1092 return PQI_STATUS_FAILURE; 1093 } 1094 1095 sim = cam_sim_alloc(smartpqi_cam_action, \ 1096 smartpqi_poll, "smartpqi", softs, \ 1097 card_index, &softs->os_specific.cam_lock, \ 1098 1, max_transactions, softs->os_specific.devq); 1099 if (sim == NULL) { 1100 DBG_ERR("cam_sim_alloc failed txns = %d\n", 1101 max_transactions); 1102 cam_simq_free(softs->os_specific.devq); 1103 return PQI_STATUS_FAILURE; 1104 } 1105 1106 softs->os_specific.sim = sim; 1107 mtx_lock(&softs->os_specific.cam_lock); 1108 status = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0); 1109 if (status != CAM_SUCCESS) { 1110 DBG_ERR("xpt_bus_register failed status=%d\n", status); 1111 cam_sim_free(softs->os_specific.sim, FALSE); 1112 cam_simq_free(softs->os_specific.devq); 1113 mtx_unlock(&softs->os_specific.cam_lock); 1114 return PQI_STATUS_FAILURE; 1115 } 1116 1117 softs->os_specific.sim_registered = TRUE; 1118 ccb = xpt_alloc_ccb_nowait(); 1119 if (ccb == NULL) { 1120 DBG_ERR("xpt_create_path failed\n"); 1121 return PQI_STATUS_FAILURE; 1122 } 1123 1124 if (xpt_create_path(&ccb->ccb_h.path, NULL, 1125 cam_sim_path(softs->os_specific.sim), 1126 CAM_TARGET_WILDCARD, 1127 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1128 DBG_ERR("xpt_create_path failed\n"); 1129 xpt_free_ccb(ccb); 1130 xpt_bus_deregister(cam_sim_path(softs->os_specific.sim)); 1131 cam_sim_free(softs->os_specific.sim, TRUE); 1132 mtx_unlock(&softs->os_specific.cam_lock); 1133 return PQI_STATUS_FAILURE; 1134 } 1135 /* 1136 * Callback to set the queue depth per target which is 1137 * derived from the FW. 1138 */ 1139 softs->os_specific.path = ccb->ccb_h.path; 1140 xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5); 1141 csa.ccb_h.func_code = XPT_SASYNC_CB; 1142 csa.event_enable = AC_FOUND_DEVICE; 1143 csa.callback = smartpqi_async; 1144 csa.callback_arg = softs; 1145 xpt_action((union ccb *)&csa); 1146 if (csa.ccb_h.status != CAM_REQ_CMP) { 1147 DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n", 1148 csa.ccb_h.status); 1149 } 1150 1151 mtx_unlock(&softs->os_specific.cam_lock); 1152 DBG_INFO("OUT\n"); 1153 return error; 1154 } 1155 1156 /* 1157 * Function to deregister smartpqi sim from cam layer 1158 */ 1159 void deregister_sim(struct pqisrc_softstate *softs) 1160 { 1161 struct ccb_setasync csa; 1162 1163 DBG_FUNC("IN\n"); 1164 1165 if (softs->os_specific.mtx_init) { 1166 mtx_lock(&softs->os_specific.cam_lock); 1167 } 1168 1169 1170 xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5); 1171 csa.ccb_h.func_code = XPT_SASYNC_CB; 1172 csa.event_enable = 0; 1173 csa.callback = smartpqi_async; 1174 csa.callback_arg = softs; 1175 xpt_action((union ccb *)&csa); 1176 xpt_free_path(softs->os_specific.path); 1177 1178 xpt_release_simq(softs->os_specific.sim, 0); 1179 1180 xpt_bus_deregister(cam_sim_path(softs->os_specific.sim)); 1181 softs->os_specific.sim_registered = FALSE; 1182 1183 if (softs->os_specific.sim) { 1184 cam_sim_free(softs->os_specific.sim, FALSE); 1185 softs->os_specific.sim = NULL; 1186 } 1187 if (softs->os_specific.mtx_init) { 1188 mtx_unlock(&softs->os_specific.cam_lock); 1189 } 1190 if (softs->os_specific.devq != NULL) { 1191 cam_simq_free(softs->os_specific.devq); 1192 } 1193 if (softs->os_specific.mtx_init) { 1194 mtx_destroy(&softs->os_specific.cam_lock); 1195 softs->os_specific.mtx_init = FALSE; 1196 } 1197 1198 mtx_destroy(&softs->os_specific.map_lock); 1199 1200 DBG_FUNC("OUT\n"); 1201 } 1202