1 /*- 2 * Copyright (c) 2018 Microsemi Corporation. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* $FreeBSD$ */ 28 /* 29 * CAM interface for smartpqi driver 30 */ 31 32 #include "smartpqi_includes.h" 33 34 /* 35 * Set cam sim properties of the smartpqi adapter. 36 */ 37 static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi) 38 { 39 40 pqisrc_softstate_t *softs = (struct pqisrc_softstate *) 41 cam_sim_softc(sim); 42 DBG_FUNC("IN\n"); 43 44 cpi->version_num = 1; 45 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 46 cpi->target_sprt = 0; 47 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; 48 cpi->hba_eng_cnt = 0; 49 cpi->max_lun = PQI_MAX_MULTILUN; 50 cpi->max_target = 1088; 51 cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE; 52 cpi->initiator_id = 255; 53 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 54 strncpy(cpi->hba_vid, "Microsemi", HBA_IDLEN); 55 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 56 cpi->unit_number = cam_sim_unit(sim); 57 cpi->bus_id = cam_sim_bus(sim); 58 cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */ 59 cpi->protocol = PROTO_SCSI; 60 cpi->protocol_version = SCSI_REV_SPC4; 61 cpi->transport = XPORT_SPI; 62 cpi->transport_version = 2; 63 cpi->ccb_h.status = CAM_REQ_CMP; 64 65 DBG_FUNC("OUT\n"); 66 } 67 68 /* 69 * Get transport settings of the smartpqi adapter 70 */ 71 static void get_transport_settings(struct pqisrc_softstate *softs, 72 struct ccb_trans_settings *cts) 73 { 74 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; 75 struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas; 76 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; 77 78 DBG_FUNC("IN\n"); 79 80 cts->protocol = PROTO_SCSI; 81 cts->protocol_version = SCSI_REV_SPC4; 82 cts->transport = XPORT_SPI; 83 cts->transport_version = 2; 84 spi->valid = CTS_SPI_VALID_DISC; 85 spi->flags = CTS_SPI_FLAGS_DISC_ENB; 86 scsi->valid = CTS_SCSI_VALID_TQ; 87 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 88 sas->valid = CTS_SAS_VALID_SPEED; 89 cts->ccb_h.status = CAM_REQ_CMP; 90 91 DBG_FUNC("OUT\n"); 92 } 93 94 /* 95 * Add the target to CAM layer and rescan, when a new device is found 96 */ 97 void os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { 98 union ccb *ccb; 99 100 DBG_FUNC("IN\n"); 101 102 if(softs->os_specific.sim_registered) { 103 if ((ccb = xpt_alloc_ccb_nowait()) == NULL) { 104 DBG_ERR("rescan failed (can't allocate CCB)\n"); 105 return; 106 } 107 108 if (xpt_create_path(&ccb->ccb_h.path, NULL, 109 cam_sim_path(softs->os_specific.sim), 110 device->target, device->lun) != CAM_REQ_CMP) { 111 DBG_ERR("rescan failed (can't create path)\n"); 112 xpt_free_ccb(ccb); 113 return; 114 } 115 xpt_rescan(ccb); 116 } 117 118 DBG_FUNC("OUT\n"); 119 } 120 121 /* 122 * Remove the device from CAM layer when deleted or hot removed 123 */ 124 void os_remove_device(pqisrc_softstate_t *softs, 125 pqi_scsi_dev_t *device) { 126 struct cam_path *tmppath; 127 128 DBG_FUNC("IN\n"); 129 130 if(softs->os_specific.sim_registered) { 131 if (xpt_create_path(&tmppath, NULL, 132 cam_sim_path(softs->os_specific.sim), 133 device->target, device->lun) != CAM_REQ_CMP) { 134 DBG_ERR("unable to create path for async event"); 135 return; 136 } 137 xpt_async(AC_LOST_DEVICE, tmppath, NULL); 138 xpt_free_path(tmppath); 139 pqisrc_free_device(softs, device); 140 } 141 142 DBG_FUNC("OUT\n"); 143 144 } 145 146 /* 147 * Function to release the frozen simq 148 */ 149 static void pqi_release_camq( rcb_t *rcb ) 150 { 151 pqisrc_softstate_t *softs; 152 struct ccb_scsiio *csio; 153 154 csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; 155 softs = rcb->softs; 156 157 DBG_FUNC("IN\n"); 158 159 if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) { 160 softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY; 161 if (csio->ccb_h.status & CAM_RELEASE_SIMQ) 162 xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0); 163 else 164 csio->ccb_h.status |= CAM_RELEASE_SIMQ; 165 } 166 167 DBG_FUNC("OUT\n"); 168 } 169 170 /* 171 * Function to dma-unmap the completed request 172 */ 173 static void pqi_unmap_request(void *arg) 174 { 175 pqisrc_softstate_t *softs; 176 rcb_t *rcb; 177 178 DBG_IO("IN rcb = %p\n", arg); 179 180 rcb = (rcb_t *)arg; 181 softs = rcb->softs; 182 183 if (!(rcb->cm_flags & PQI_CMD_MAPPED)) 184 return; 185 186 if (rcb->bcount != 0 ) { 187 if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE) 188 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, 189 rcb->cm_datamap, 190 BUS_DMASYNC_POSTREAD); 191 if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE) 192 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, 193 rcb->cm_datamap, 194 BUS_DMASYNC_POSTWRITE); 195 bus_dmamap_unload(softs->os_specific.pqi_buffer_dmat, 196 rcb->cm_datamap); 197 } 198 rcb->cm_flags &= ~PQI_CMD_MAPPED; 199 200 if(rcb->sgt && rcb->nseg) 201 os_mem_free(rcb->softs, (void*)rcb->sgt, 202 rcb->nseg*sizeof(sgt_t)); 203 204 pqisrc_put_tag(&softs->taglist, rcb->tag); 205 206 DBG_IO("OUT\n"); 207 } 208 209 /* 210 * Construct meaningful LD name for volume here. 211 */ 212 static void 213 smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio) 214 { 215 struct scsi_inquiry_data *inq = NULL; 216 uint8_t *cdb = NULL; 217 pqi_scsi_dev_t *device = NULL; 218 219 DBG_FUNC("IN\n"); 220 221 cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ? 222 (uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes; 223 if(cdb[0] == INQUIRY && 224 (cdb[1] & SI_EVPD) == 0 && 225 (csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN && 226 csio->dxfer_len >= SHORT_INQUIRY_LENGTH) { 227 228 inq = (struct scsi_inquiry_data *)csio->data_ptr; 229 230 device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun]; 231 232 /* Let the disks be probed and dealt with via CAM. Only for LD 233 let it fall through and inquiry be tweaked */ 234 if( !device || !pqisrc_is_logical_device(device) || 235 (device->devtype != DISK_DEVICE) || 236 pqisrc_is_external_raid_device(device)) { 237 return; 238 } 239 240 strncpy(inq->vendor, "MSCC", 241 SID_VENDOR_SIZE); 242 strncpy(inq->product, 243 pqisrc_raidlevel_to_string(device->raid_level), 244 SID_PRODUCT_SIZE); 245 strncpy(inq->revision, device->volume_offline?"OFF":"OK", 246 SID_REVISION_SIZE); 247 } 248 249 DBG_FUNC("OUT\n"); 250 } 251 252 /* 253 * Handle completion of a command - pass results back through the CCB 254 */ 255 void 256 os_io_response_success(rcb_t *rcb) 257 { 258 struct ccb_scsiio *csio; 259 260 DBG_IO("IN rcb = %p\n", rcb); 261 262 if (rcb == NULL) 263 panic("rcb is null"); 264 265 csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; 266 267 if (csio == NULL) 268 panic("csio is null"); 269 270 rcb->status = REQUEST_SUCCESS; 271 csio->ccb_h.status = CAM_REQ_CMP; 272 273 smartpqi_fix_ld_inquiry(rcb->softs, csio); 274 pqi_release_camq(rcb); 275 pqi_unmap_request(rcb); 276 xpt_done((union ccb *)csio); 277 278 DBG_IO("OUT\n"); 279 } 280 281 /* 282 * Error response handling for raid IO 283 */ 284 void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info) 285 { 286 struct ccb_scsiio *csio; 287 pqisrc_softstate_t *softs; 288 289 DBG_IO("IN\n"); 290 291 csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; 292 293 if (csio == NULL) 294 panic("csio is null"); 295 296 softs = rcb->softs; 297 298 ASSERT(err_info != NULL); 299 csio->scsi_status = err_info->status; 300 csio->ccb_h.status = CAM_REQ_CMP_ERR; 301 302 if (csio->ccb_h.func_code == XPT_SCSI_IO) { 303 /* 304 * Handle specific SCSI status values. 305 */ 306 switch(csio->scsi_status) { 307 case PQI_RAID_STATUS_QUEUE_FULL: 308 csio->ccb_h.status = CAM_REQ_CMP; 309 DBG_ERR("Queue Full error"); 310 break; 311 /* check condition, sense data included */ 312 case PQI_RAID_STATUS_CHECK_CONDITION: 313 { 314 uint16_t sense_data_len = 315 LE_16(err_info->sense_data_len); 316 uint8_t *sense_data = NULL; 317 if (sense_data_len) 318 sense_data = err_info->data; 319 memset(&csio->sense_data, 0, csio->sense_len); 320 sense_data_len = (sense_data_len > 321 csio->sense_len) ? 322 csio->sense_len : 323 sense_data_len; 324 if (sense_data) 325 memcpy(&csio->sense_data, sense_data, 326 sense_data_len); 327 if (csio->sense_len > sense_data_len) 328 csio->sense_resid = csio->sense_len 329 - sense_data_len; 330 else 331 csio->sense_resid = 0; 332 csio->ccb_h.status = CAM_SCSI_STATUS_ERROR 333 | CAM_AUTOSNS_VALID 334 | CAM_REQ_CMP_ERR; 335 336 } 337 break; 338 339 case PQI_RAID_DATA_IN_OUT_UNDERFLOW: 340 { 341 uint32_t resid = 0; 342 resid = rcb->bcount-err_info->data_out_transferred; 343 csio->resid = resid; 344 csio->ccb_h.status = CAM_REQ_CMP; 345 break; 346 } 347 default: 348 csio->ccb_h.status = CAM_REQ_CMP; 349 break; 350 } 351 } 352 353 if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) { 354 softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY; 355 if (csio->ccb_h.status & CAM_RELEASE_SIMQ) 356 xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0); 357 else 358 csio->ccb_h.status |= CAM_RELEASE_SIMQ; 359 } 360 361 pqi_unmap_request(rcb); 362 xpt_done((union ccb *)csio); 363 364 DBG_IO("OUT\n"); 365 } 366 367 368 /* 369 * Error response handling for aio. 370 */ 371 void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info) 372 { 373 struct ccb_scsiio *csio; 374 pqisrc_softstate_t *softs; 375 376 DBG_IO("IN\n"); 377 378 if (rcb == NULL) 379 panic("rcb is null"); 380 381 rcb->status = REQUEST_SUCCESS; 382 csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; 383 if (csio == NULL) 384 panic("csio is null"); 385 386 softs = rcb->softs; 387 388 switch (err_info->service_resp) { 389 case PQI_AIO_SERV_RESPONSE_COMPLETE: 390 csio->ccb_h.status = err_info->status; 391 break; 392 case PQI_AIO_SERV_RESPONSE_FAILURE: 393 switch(err_info->status) { 394 case PQI_AIO_STATUS_IO_ABORTED: 395 csio->ccb_h.status = CAM_REQ_ABORTED; 396 DBG_WARN_BTL(rcb->dvp, "IO aborted\n"); 397 break; 398 case PQI_AIO_STATUS_UNDERRUN: 399 csio->ccb_h.status = CAM_REQ_CMP; 400 csio->resid = 401 LE_32(err_info->resd_count); 402 break; 403 case PQI_AIO_STATUS_OVERRUN: 404 csio->ccb_h.status = CAM_REQ_CMP; 405 break; 406 case PQI_AIO_STATUS_AIO_PATH_DISABLED: 407 DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n"); 408 rcb->dvp->offload_enabled = false; 409 csio->ccb_h.status |= CAM_REQUEUE_REQ; 410 break; 411 case PQI_AIO_STATUS_IO_ERROR: 412 case PQI_AIO_STATUS_IO_NO_DEVICE: 413 case PQI_AIO_STATUS_INVALID_DEVICE: 414 default: 415 DBG_WARN_BTL(rcb->dvp,"IO Error/Invalid/No device\n"); 416 csio->ccb_h.status |= 417 CAM_SCSI_STATUS_ERROR; 418 break; 419 } 420 break; 421 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: 422 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: 423 csio->ccb_h.status = CAM_REQ_CMP; 424 break; 425 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: 426 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: 427 DBG_WARN_BTL(rcb->dvp,"TMF rejected/Incorrect Lun\n"); 428 csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 429 break; 430 default: 431 DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n"); 432 csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 433 break; 434 } 435 if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) { 436 csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION; 437 uint8_t *sense_data = NULL; 438 unsigned sense_data_len = LE_16(err_info->data_len); 439 if (sense_data_len) 440 sense_data = err_info->data; 441 DBG_ERR_BTL(rcb->dvp, "SCSI_STATUS_CHECK_COND sense size %u\n", 442 sense_data_len); 443 memset(&csio->sense_data, 0, csio->sense_len); 444 if (sense_data) 445 memcpy(&csio->sense_data, sense_data, ((sense_data_len > 446 csio->sense_len) ? csio->sense_len : sense_data_len)); 447 if (csio->sense_len > sense_data_len) 448 csio->sense_resid = csio->sense_len - sense_data_len; 449 else 450 csio->sense_resid = 0; 451 csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; 452 } 453 454 smartpqi_fix_ld_inquiry(softs, csio); 455 pqi_release_camq(rcb); 456 pqi_unmap_request(rcb); 457 xpt_done((union ccb *)csio); 458 DBG_IO("OUT\n"); 459 } 460 461 /* 462 * Command-mapping helper function - populate this command's s/g table. 463 */ 464 static void 465 pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) 466 { 467 pqisrc_softstate_t *softs; 468 rcb_t *rcb; 469 470 rcb = (rcb_t *)arg; 471 softs = rcb->softs; 472 473 if( error || nseg > softs->pqi_cap.max_sg_elem ) 474 { 475 xpt_freeze_simq(softs->os_specific.sim, 1); 476 rcb->cm_ccb->ccb_h.status |= (CAM_REQUEUE_REQ| 477 CAM_RELEASE_SIMQ); 478 DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n", 479 error, nseg, softs->pqi_cap.max_sg_elem); 480 pqi_unmap_request(rcb); 481 xpt_done((union ccb *)rcb->cm_ccb); 482 return; 483 } 484 485 rcb->sgt = os_mem_alloc(softs, nseg * sizeof(rcb_t)); 486 if (rcb->sgt == NULL) { 487 xpt_freeze_simq(softs->os_specific.sim, 1); 488 rcb->cm_ccb->ccb_h.status |= (CAM_REQUEUE_REQ| 489 CAM_RELEASE_SIMQ); 490 DBG_ERR_BTL(rcb->dvp, "os_mem_alloc() failed; nseg = %d\n", nseg); 491 pqi_unmap_request(rcb); 492 xpt_done((union ccb *)rcb->cm_ccb); 493 return; 494 } 495 496 rcb->nseg = nseg; 497 for (int i = 0; i < nseg; i++) { 498 rcb->sgt[i].addr = segs[i].ds_addr; 499 rcb->sgt[i].len = segs[i].ds_len; 500 rcb->sgt[i].flags = 0; 501 } 502 503 if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE) 504 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, 505 rcb->cm_datamap, BUS_DMASYNC_PREREAD); 506 if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE) 507 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, 508 rcb->cm_datamap, BUS_DMASYNC_PREWRITE); 509 510 /* Call IO functions depending on pd or ld */ 511 rcb->status = REQUEST_PENDING; 512 513 error = pqisrc_build_send_io(softs, rcb); 514 515 if (error) { 516 rcb->req_pending = false; 517 xpt_freeze_simq(softs->os_specific.sim, 1); 518 rcb->cm_ccb->ccb_h.status |= (CAM_REQUEUE_REQ 519 |CAM_RELEASE_SIMQ); 520 DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error); 521 pqi_unmap_request(rcb); 522 xpt_done((union ccb *)rcb->cm_ccb); 523 return; 524 } 525 } 526 527 /* 528 * Function to dma-map the request buffer 529 */ 530 static int pqi_map_request( rcb_t *rcb ) 531 { 532 pqisrc_softstate_t *softs = rcb->softs; 533 int error = PQI_STATUS_SUCCESS; 534 union ccb *ccb = rcb->cm_ccb; 535 536 DBG_FUNC("IN\n"); 537 538 /* check that mapping is necessary */ 539 if (rcb->cm_flags & PQI_CMD_MAPPED) 540 return(0); 541 rcb->cm_flags |= PQI_CMD_MAPPED; 542 543 if (rcb->bcount) { 544 error = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat, 545 rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0); 546 if (error != 0){ 547 DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed = %d count = %d\n", 548 error, rcb->bcount); 549 return error; 550 } 551 } else { 552 /* 553 * Set up the command to go to the controller. If there are no 554 * data buffers associated with the command then it can bypass 555 * busdma. 556 */ 557 /* Call IO functions depending on pd or ld */ 558 rcb->status = REQUEST_PENDING; 559 560 error = pqisrc_build_send_io(softs, rcb); 561 562 } 563 564 DBG_FUNC("OUT error = %d\n", error); 565 566 return error; 567 } 568 569 /* 570 * Function to clear the request control block 571 */ 572 void os_reset_rcb( rcb_t *rcb ) 573 { 574 rcb->error_info = NULL; 575 rcb->req = NULL; 576 rcb->status = -1; 577 rcb->tag = INVALID_ELEM; 578 rcb->dvp = NULL; 579 rcb->cdbp = NULL; 580 rcb->softs = NULL; 581 rcb->cm_flags = 0; 582 rcb->cm_data = NULL; 583 rcb->bcount = 0; 584 rcb->nseg = 0; 585 rcb->sgt = NULL; 586 rcb->cm_ccb = NULL; 587 rcb->encrypt_enable = false; 588 rcb->ioaccel_handle = 0; 589 rcb->resp_qid = 0; 590 rcb->req_pending = false; 591 } 592 593 /* 594 * Callback function for the lun rescan 595 */ 596 static void smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb) 597 { 598 xpt_free_path(ccb->ccb_h.path); 599 xpt_free_ccb(ccb); 600 } 601 602 603 /* 604 * Function to rescan the lun 605 */ 606 static void smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target, 607 int lun) 608 { 609 union ccb *ccb = NULL; 610 cam_status status = 0; 611 struct cam_path *path = NULL; 612 613 DBG_FUNC("IN\n"); 614 615 ccb = xpt_alloc_ccb_nowait(); 616 status = xpt_create_path(&path, NULL, 617 cam_sim_path(softs->os_specific.sim), target, lun); 618 if (status != CAM_REQ_CMP) { 619 DBG_ERR("xpt_create_path status(%d) != CAM_REQ_CMP \n", 620 status); 621 xpt_free_ccb(ccb); 622 return; 623 } 624 625 bzero(ccb, sizeof(union ccb)); 626 xpt_setup_ccb(&ccb->ccb_h, path, 5); 627 ccb->ccb_h.func_code = XPT_SCAN_LUN; 628 ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb; 629 ccb->crcn.flags = CAM_FLAG_NONE; 630 631 xpt_action(ccb); 632 633 DBG_FUNC("OUT\n"); 634 } 635 636 /* 637 * Function to rescan the lun under each target 638 */ 639 void smartpqi_target_rescan(struct pqisrc_softstate *softs) 640 { 641 int target = 0, lun = 0; 642 643 DBG_FUNC("IN\n"); 644 645 for(target = 0; target < PQI_MAX_DEVICES; target++){ 646 for(lun = 0; lun < PQI_MAX_MULTILUN; lun++){ 647 if(softs->device_list[target][lun]){ 648 smartpqi_lun_rescan(softs, target, lun); 649 } 650 } 651 } 652 653 DBG_FUNC("OUT\n"); 654 } 655 656 /* 657 * Set the mode of tagged command queueing for the current task. 658 */ 659 uint8_t os_get_task_attr(rcb_t *rcb) 660 { 661 union ccb *ccb = rcb->cm_ccb; 662 uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE; 663 664 switch(ccb->csio.tag_action) { 665 case MSG_HEAD_OF_Q_TAG: 666 tag_action = SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE; 667 break; 668 case MSG_ORDERED_Q_TAG: 669 tag_action = SOP_TASK_ATTRIBUTE_ORDERED; 670 break; 671 case MSG_SIMPLE_Q_TAG: 672 default: 673 tag_action = SOP_TASK_ATTRIBUTE_SIMPLE; 674 break; 675 } 676 return tag_action; 677 } 678 679 /* 680 * Complete all outstanding commands 681 */ 682 void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs) 683 { 684 int tag = 0; 685 686 DBG_FUNC("IN\n"); 687 688 for (tag = 1; tag < softs->max_outstanding_io; tag++) { 689 rcb_t *prcb = &softs->rcb[tag]; 690 if(prcb->req_pending && prcb->cm_ccb ) { 691 prcb->req_pending = false; 692 prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP; 693 xpt_done((union ccb *)prcb->cm_ccb); 694 prcb->cm_ccb = NULL; 695 } 696 } 697 698 DBG_FUNC("OUT\n"); 699 } 700 701 /* 702 * IO handling functionality entry point 703 */ 704 static int pqisrc_io_start(struct cam_sim *sim, union ccb *ccb) 705 { 706 rcb_t *rcb; 707 uint32_t tag, no_transfer = 0; 708 pqisrc_softstate_t *softs = (struct pqisrc_softstate *) 709 cam_sim_softc(sim); 710 int32_t error = PQI_STATUS_FAILURE; 711 pqi_scsi_dev_t *dvp; 712 713 DBG_FUNC("IN\n"); 714 715 if( softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL ) { 716 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 717 DBG_INFO("Device = %d not there\n", ccb->ccb_h.target_id); 718 return PQI_STATUS_FAILURE; 719 } 720 721 dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; 722 /* Check controller state */ 723 if (IN_PQI_RESET(softs)) { 724 ccb->ccb_h.status = CAM_SCSI_BUS_RESET 725 | CAM_BUSY | CAM_REQ_INPROG; 726 DBG_WARN("Device = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id); 727 return error; 728 } 729 /* Check device state */ 730 if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) { 731 ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP; 732 DBG_WARN("Device = %d GONE/OFFLINE\n", ccb->ccb_h.target_id); 733 return error; 734 } 735 /* Check device reset */ 736 if (DEV_RESET(dvp)) { 737 ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY; 738 DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id); 739 return error; 740 } 741 742 if (dvp->expose_device == false) { 743 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 744 DBG_INFO("Device = %d not exposed\n", ccb->ccb_h.target_id); 745 return error; 746 } 747 748 tag = pqisrc_get_tag(&softs->taglist); 749 if( tag == INVALID_ELEM ) { 750 DBG_ERR("Get Tag failed\n"); 751 xpt_freeze_simq(softs->os_specific.sim, 1); 752 softs->os_specific.pqi_flags |= PQI_FLAG_BUSY; 753 ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ); 754 return PQI_STATUS_FAILURE; 755 } 756 757 DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist); 758 759 rcb = &softs->rcb[tag]; 760 os_reset_rcb( rcb ); 761 rcb->tag = tag; 762 rcb->softs = softs; 763 rcb->cmdlen = ccb->csio.cdb_len; 764 ccb->ccb_h.sim_priv.entries[0].ptr = rcb; 765 766 switch (ccb->ccb_h.flags & CAM_DIR_MASK) { 767 case CAM_DIR_IN: 768 rcb->data_dir = SOP_DATA_DIR_FROM_DEVICE; 769 break; 770 case CAM_DIR_OUT: 771 rcb->data_dir = SOP_DATA_DIR_TO_DEVICE; 772 break; 773 case CAM_DIR_NONE: 774 no_transfer = 1; 775 break; 776 default: 777 DBG_ERR("Unknown Dir\n"); 778 break; 779 } 780 rcb->cm_ccb = ccb; 781 rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; 782 783 if (!no_transfer) { 784 rcb->cm_data = (void *)ccb->csio.data_ptr; 785 rcb->bcount = ccb->csio.dxfer_len; 786 } else { 787 rcb->cm_data = NULL; 788 rcb->bcount = 0; 789 } 790 /* 791 * Submit the request to the adapter. 792 * 793 * Note that this may fail if we're unable to map the request (and 794 * if we ever learn a transport layer other than simple, may fail 795 * if the adapter rejects the command). 796 */ 797 if ((error = pqi_map_request(rcb)) != 0) { 798 rcb->req_pending = false; 799 xpt_freeze_simq(softs->os_specific.sim, 1); 800 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 801 if (error == EINPROGRESS) { 802 DBG_WARN("In Progress on %d\n", ccb->ccb_h.target_id); 803 error = 0; 804 } else { 805 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 806 DBG_WARN("Requeue req error = %d target = %d\n", error, 807 ccb->ccb_h.target_id); 808 pqi_unmap_request(rcb); 809 } 810 } 811 812 DBG_FUNC("OUT error = %d\n", error); 813 return error; 814 } 815 816 /* 817 * Abort a task, task management functionality 818 */ 819 static int 820 pqisrc_scsi_abort_task(pqisrc_softstate_t *softs, union ccb *ccb) 821 { 822 rcb_t *rcb = ccb->ccb_h.sim_priv.entries[0].ptr; 823 uint32_t abort_tag = rcb->tag; 824 uint32_t tag = 0; 825 int rval = PQI_STATUS_SUCCESS; 826 uint16_t qid; 827 828 DBG_FUNC("IN\n"); 829 830 qid = (uint16_t)rcb->resp_qid; 831 832 tag = pqisrc_get_tag(&softs->taglist); 833 rcb = &softs->rcb[tag]; 834 rcb->tag = tag; 835 rcb->resp_qid = qid; 836 837 rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, abort_tag, 838 SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK); 839 840 if (PQI_STATUS_SUCCESS == rval) { 841 rval = rcb->status; 842 if (REQUEST_SUCCESS == rval) { 843 ccb->ccb_h.status = CAM_REQ_ABORTED; 844 } 845 } 846 pqisrc_put_tag(&softs->taglist, abort_tag); 847 pqisrc_put_tag(&softs->taglist,rcb->tag); 848 849 DBG_FUNC("OUT rval = %d\n", rval); 850 851 return rval; 852 } 853 854 /* 855 * Abort a taskset, task management functionality 856 */ 857 static int 858 pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb) 859 { 860 rcb_t *rcb = NULL; 861 uint32_t tag = 0; 862 int rval = PQI_STATUS_SUCCESS; 863 864 DBG_FUNC("IN\n"); 865 866 tag = pqisrc_get_tag(&softs->taglist); 867 rcb = &softs->rcb[tag]; 868 rcb->tag = tag; 869 870 rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, 0, 871 SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET); 872 873 if (rval == PQI_STATUS_SUCCESS) { 874 rval = rcb->status; 875 } 876 877 pqisrc_put_tag(&softs->taglist,rcb->tag); 878 879 DBG_FUNC("OUT rval = %d\n", rval); 880 881 return rval; 882 } 883 884 /* 885 * Target reset task management functionality 886 */ 887 static int 888 pqisrc_target_reset( pqisrc_softstate_t *softs, union ccb *ccb) 889 { 890 pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; 891 rcb_t *rcb = NULL; 892 uint32_t tag = 0; 893 int rval = PQI_STATUS_SUCCESS; 894 895 DBG_FUNC("IN\n"); 896 897 if (devp == NULL) { 898 DBG_ERR("bad target t%d\n", ccb->ccb_h.target_id); 899 return (-1); 900 } 901 902 tag = pqisrc_get_tag(&softs->taglist); 903 rcb = &softs->rcb[tag]; 904 rcb->tag = tag; 905 906 devp->reset_in_progress = true; 907 rval = pqisrc_send_tmf(softs, devp, rcb, 0, 908 SOP_TASK_MANAGEMENT_LUN_RESET); 909 if (PQI_STATUS_SUCCESS == rval) { 910 rval = rcb->status; 911 } 912 devp->reset_in_progress = false; 913 pqisrc_put_tag(&softs->taglist,rcb->tag); 914 915 DBG_FUNC("OUT rval = %d\n", rval); 916 917 return ((rval == REQUEST_SUCCESS) ? 918 PQI_STATUS_SUCCESS : PQI_STATUS_FAILURE); 919 } 920 921 /* 922 * cam entry point of the smartpqi module. 923 */ 924 static void smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb) 925 { 926 struct pqisrc_softstate *softs = cam_sim_softc(sim); 927 struct ccb_hdr *ccb_h = &ccb->ccb_h; 928 929 DBG_FUNC("IN\n"); 930 931 switch (ccb_h->func_code) { 932 case XPT_SCSI_IO: 933 { 934 if(!pqisrc_io_start(sim, ccb)) { 935 return; 936 } 937 break; 938 } 939 case XPT_CALC_GEOMETRY: 940 { 941 struct ccb_calc_geometry *ccg; 942 ccg = &ccb->ccg; 943 if (ccg->block_size == 0) { 944 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 945 ccb->ccb_h.status = CAM_REQ_INVALID; 946 break; 947 } 948 cam_calc_geometry(ccg, /* extended */ 1); 949 ccb->ccb_h.status = CAM_REQ_CMP; 950 break; 951 } 952 case XPT_PATH_INQ: 953 { 954 update_sim_properties(sim, &ccb->cpi); 955 ccb->ccb_h.status = CAM_REQ_CMP; 956 break; 957 } 958 case XPT_GET_TRAN_SETTINGS: 959 get_transport_settings(softs, &ccb->cts); 960 ccb->ccb_h.status = CAM_REQ_CMP; 961 break; 962 case XPT_ABORT: 963 if(pqisrc_scsi_abort_task(softs, ccb)) { 964 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 965 xpt_done(ccb); 966 DBG_ERR("Abort task failed on %d\n", 967 ccb->ccb_h.target_id); 968 return; 969 } 970 break; 971 case XPT_TERM_IO: 972 if (pqisrc_scsi_abort_task_set(softs, ccb)) { 973 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 974 DBG_ERR("Abort task set failed on %d\n", 975 ccb->ccb_h.target_id); 976 xpt_done(ccb); 977 return; 978 } 979 break; 980 case XPT_RESET_DEV: 981 if(pqisrc_target_reset(softs, ccb)) { 982 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 983 DBG_ERR("Target reset failed on %d\n", 984 ccb->ccb_h.target_id); 985 xpt_done(ccb); 986 return; 987 } else { 988 ccb->ccb_h.status = CAM_REQ_CMP; 989 } 990 break; 991 case XPT_RESET_BUS: 992 ccb->ccb_h.status = CAM_REQ_CMP; 993 break; 994 case XPT_SET_TRAN_SETTINGS: 995 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 996 return; 997 default: 998 DBG_WARN("UNSUPPORTED FUNC CODE\n"); 999 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1000 break; 1001 } 1002 xpt_done(ccb); 1003 1004 DBG_FUNC("OUT\n"); 1005 } 1006 1007 /* 1008 * Function to poll the response, when interrupts are unavailable 1009 * This also serves supporting crash dump. 1010 */ 1011 static void smartpqi_poll(struct cam_sim *sim) 1012 { 1013 struct pqisrc_softstate *softs = cam_sim_softc(sim); 1014 int i; 1015 1016 for (i = 1; i < softs->intr_count; i++ ) 1017 pqisrc_process_response_queue(softs, i); 1018 } 1019 1020 /* 1021 * Function to adjust the queue depth of a device 1022 */ 1023 void smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth) 1024 { 1025 struct ccb_relsim crs; 1026 1027 DBG_INFO("IN\n"); 1028 1029 xpt_setup_ccb(&crs.ccb_h, path, 5); 1030 crs.ccb_h.func_code = XPT_REL_SIMQ; 1031 crs.ccb_h.flags = CAM_DEV_QFREEZE; 1032 crs.release_flags = RELSIM_ADJUST_OPENINGS; 1033 crs.openings = queue_depth; 1034 xpt_action((union ccb *)&crs); 1035 if(crs.ccb_h.status != CAM_REQ_CMP) { 1036 printf("XPT_REL_SIMQ failed stat=%d\n", crs.ccb_h.status); 1037 } 1038 1039 DBG_INFO("OUT\n"); 1040 } 1041 1042 /* 1043 * Function to register async callback for setting queue depth 1044 */ 1045 static void 1046 smartpqi_async(void *callback_arg, u_int32_t code, 1047 struct cam_path *path, void *arg) 1048 { 1049 struct pqisrc_softstate *softs; 1050 softs = (struct pqisrc_softstate*)callback_arg; 1051 1052 DBG_FUNC("IN\n"); 1053 1054 switch (code) { 1055 case AC_FOUND_DEVICE: 1056 { 1057 struct ccb_getdev *cgd; 1058 cgd = (struct ccb_getdev *)arg; 1059 if (cgd == NULL) { 1060 break; 1061 } 1062 uint32_t t_id = cgd->ccb_h.target_id; 1063 1064 if (t_id <= (PQI_CTLR_INDEX - 1)) { 1065 if (softs != NULL) { 1066 pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun]; 1067 smartpqi_adjust_queue_depth(path, 1068 dvp->queue_depth); 1069 } 1070 } 1071 break; 1072 } 1073 default: 1074 break; 1075 } 1076 1077 DBG_FUNC("OUT\n"); 1078 } 1079 1080 /* 1081 * Function to register sim with CAM layer for smartpqi driver 1082 */ 1083 int register_sim(struct pqisrc_softstate *softs, int card_index) 1084 { 1085 int error = 0; 1086 int max_transactions; 1087 union ccb *ccb = NULL; 1088 cam_status status = 0; 1089 struct ccb_setasync csa; 1090 struct cam_sim *sim; 1091 1092 DBG_FUNC("IN\n"); 1093 1094 max_transactions = softs->max_io_for_scsi_ml; 1095 softs->os_specific.devq = cam_simq_alloc(max_transactions); 1096 if (softs->os_specific.devq == NULL) { 1097 DBG_ERR("cam_simq_alloc failed txns = %d\n", 1098 max_transactions); 1099 return PQI_STATUS_FAILURE; 1100 } 1101 1102 sim = cam_sim_alloc(smartpqi_cam_action, \ 1103 smartpqi_poll, "smartpqi", softs, \ 1104 card_index, &softs->os_specific.cam_lock, \ 1105 1, max_transactions, softs->os_specific.devq); 1106 if (sim == NULL) { 1107 DBG_ERR("cam_sim_alloc failed txns = %d\n", 1108 max_transactions); 1109 cam_simq_free(softs->os_specific.devq); 1110 return PQI_STATUS_FAILURE; 1111 } 1112 1113 softs->os_specific.sim = sim; 1114 mtx_lock(&softs->os_specific.cam_lock); 1115 status = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0); 1116 if (status != CAM_SUCCESS) { 1117 DBG_ERR("xpt_bus_register failed status=%d\n", status); 1118 cam_sim_free(softs->os_specific.sim, FALSE); 1119 cam_simq_free(softs->os_specific.devq); 1120 mtx_unlock(&softs->os_specific.cam_lock); 1121 return PQI_STATUS_FAILURE; 1122 } 1123 1124 softs->os_specific.sim_registered = TRUE; 1125 ccb = xpt_alloc_ccb_nowait(); 1126 if (ccb == NULL) { 1127 DBG_ERR("xpt_create_path failed\n"); 1128 return PQI_STATUS_FAILURE; 1129 } 1130 1131 if (xpt_create_path(&ccb->ccb_h.path, NULL, 1132 cam_sim_path(softs->os_specific.sim), 1133 CAM_TARGET_WILDCARD, 1134 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1135 DBG_ERR("xpt_create_path failed\n"); 1136 xpt_free_ccb(ccb); 1137 xpt_bus_deregister(cam_sim_path(softs->os_specific.sim)); 1138 cam_sim_free(softs->os_specific.sim, TRUE); 1139 mtx_unlock(&softs->os_specific.cam_lock); 1140 return PQI_STATUS_FAILURE; 1141 } 1142 /* 1143 * Callback to set the queue depth per target which is 1144 * derived from the FW. 1145 */ 1146 softs->os_specific.path = ccb->ccb_h.path; 1147 xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5); 1148 csa.ccb_h.func_code = XPT_SASYNC_CB; 1149 csa.event_enable = AC_FOUND_DEVICE; 1150 csa.callback = smartpqi_async; 1151 csa.callback_arg = softs; 1152 xpt_action((union ccb *)&csa); 1153 if (csa.ccb_h.status != CAM_REQ_CMP) { 1154 DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n", 1155 csa.ccb_h.status); 1156 } 1157 1158 mtx_unlock(&softs->os_specific.cam_lock); 1159 DBG_INFO("OUT\n"); 1160 return error; 1161 } 1162 1163 /* 1164 * Function to deregister smartpqi sim from cam layer 1165 */ 1166 void deregister_sim(struct pqisrc_softstate *softs) 1167 { 1168 struct ccb_setasync csa; 1169 1170 DBG_FUNC("IN\n"); 1171 1172 if (softs->os_specific.mtx_init) { 1173 mtx_lock(&softs->os_specific.cam_lock); 1174 } 1175 1176 1177 xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5); 1178 csa.ccb_h.func_code = XPT_SASYNC_CB; 1179 csa.event_enable = 0; 1180 csa.callback = smartpqi_async; 1181 csa.callback_arg = softs; 1182 xpt_action((union ccb *)&csa); 1183 xpt_free_path(softs->os_specific.path); 1184 1185 xpt_release_simq(softs->os_specific.sim, 0); 1186 1187 xpt_bus_deregister(cam_sim_path(softs->os_specific.sim)); 1188 softs->os_specific.sim_registered = FALSE; 1189 1190 if (softs->os_specific.sim) { 1191 cam_sim_free(softs->os_specific.sim, FALSE); 1192 softs->os_specific.sim = NULL; 1193 } 1194 if (softs->os_specific.mtx_init) { 1195 mtx_unlock(&softs->os_specific.cam_lock); 1196 } 1197 if (softs->os_specific.devq != NULL) { 1198 cam_simq_free(softs->os_specific.devq); 1199 } 1200 if (softs->os_specific.mtx_init) { 1201 mtx_destroy(&softs->os_specific.cam_lock); 1202 softs->os_specific.mtx_init = FALSE; 1203 } 1204 1205 mtx_destroy(&softs->os_specific.map_lock); 1206 1207 DBG_FUNC("OUT\n"); 1208 } 1209