1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * BSD LICENSE 5 * 6 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * * Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * * Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 #include <dev/isci/isci.h> 35 36 #include <cam/scsi/scsi_all.h> 37 #include <cam/scsi/scsi_message.h> 38 39 #include <dev/isci/scil/intel_sas.h> 40 41 #include <dev/isci/scil/sci_util.h> 42 43 #include <dev/isci/scil/scif_io_request.h> 44 #include <dev/isci/scil/scif_controller.h> 45 #include <dev/isci/scil/scif_remote_device.h> 46 #include <dev/isci/scil/scif_user_callback.h> 47 48 #include <dev/isci/scil/scic_io_request.h> 49 #include <dev/isci/scil/scic_user_callback.h> 50 51 /** 52 * @brief This user callback will inform the user that an IO request has 53 * completed. 54 * 55 * @param[in] controller This parameter specifies the controller on 56 * which the IO request is completing. 57 * @param[in] remote_device This parameter specifies the remote device on 58 * which this request is completing. 59 * @param[in] io_request This parameter specifies the IO request that has 60 * completed. 61 * @param[in] completion_status This parameter specifies the results of 62 * the IO request operation. SCI_IO_SUCCESS indicates 63 * successful completion. 64 * 65 * @return none 66 */ 67 void 68 scif_cb_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, 69 SCI_REMOTE_DEVICE_HANDLE_T remote_device, 70 SCI_IO_REQUEST_HANDLE_T io_request, SCI_IO_STATUS completion_status) 71 { 72 struct ISCI_IO_REQUEST *isci_request = 73 (struct ISCI_IO_REQUEST *)sci_object_get_association(io_request); 74 75 scif_controller_complete_io(scif_controller, remote_device, io_request); 76 isci_io_request_complete(scif_controller, remote_device, isci_request, 77 completion_status); 78 } 79 80 void 81 isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, 82 SCI_REMOTE_DEVICE_HANDLE_T remote_device, 83 struct ISCI_IO_REQUEST *isci_request, SCI_IO_STATUS completion_status) 84 { 85 struct ISCI_CONTROLLER *isci_controller; 86 struct ISCI_REMOTE_DEVICE *isci_remote_device; 87 union ccb *ccb; 88 BOOL complete_ccb; 89 struct ccb_scsiio *csio; 90 91 complete_ccb = TRUE; 92 isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(scif_controller); 93 isci_remote_device = 94 (struct ISCI_REMOTE_DEVICE *) sci_object_get_association(remote_device); 95 96 ccb = isci_request->ccb; 97 csio = &ccb->csio; 98 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 99 100 switch (completion_status) { 101 case SCI_IO_SUCCESS: 102 case SCI_IO_SUCCESS_COMPLETE_BEFORE_START: 103 if (ccb->ccb_h.func_code == XPT_SMP_IO) { 104 void *smp_response = 105 scif_io_request_get_response_iu_address( 106 isci_request->sci_object); 107 108 memcpy(ccb->smpio.smp_response, smp_response, 109 ccb->smpio.smp_response_len); 110 } 111 ccb->ccb_h.status |= CAM_REQ_CMP; 112 break; 113 114 case SCI_IO_SUCCESS_IO_DONE_EARLY: 115 ccb->ccb_h.status |= CAM_REQ_CMP; 116 ccb->csio.resid = ccb->csio.dxfer_len - 117 scif_io_request_get_number_of_bytes_transferred( 118 isci_request->sci_object); 119 break; 120 121 case SCI_IO_FAILURE_RESPONSE_VALID: 122 { 123 SCI_SSP_RESPONSE_IU_T * response_buffer; 124 uint32_t sense_length; 125 int error_code, sense_key, asc, ascq; 126 127 response_buffer = (SCI_SSP_RESPONSE_IU_T *) 128 scif_io_request_get_response_iu_address( 129 isci_request->sci_object); 130 131 sense_length = sci_ssp_get_sense_data_length( 132 response_buffer->sense_data_length); 133 134 sense_length = MIN(csio->sense_len, sense_length); 135 136 memcpy(&csio->sense_data, response_buffer->data, sense_length); 137 138 csio->sense_resid = csio->sense_len - sense_length; 139 csio->scsi_status = response_buffer->status; 140 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 141 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 142 scsi_extract_sense( &csio->sense_data, &error_code, &sense_key, 143 &asc, &ascq ); 144 isci_log_message(1, "ISCI", 145 "isci: bus=%x target=%x lun=%x cdb[0]=%x status=%x key=%x asc=%x ascq=%x\n", 146 ccb->ccb_h.path_id, ccb->ccb_h.target_id, 147 ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio), 148 csio->scsi_status, sense_key, asc, ascq); 149 break; 150 } 151 152 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: 153 isci_remote_device_reset(isci_remote_device, NULL); 154 ccb->ccb_h.status |= CAM_REQ_TERMIO; 155 isci_log_message(0, "ISCI", 156 "isci: bus=%x target=%x lun=%x cdb[0]=%x remote device reset required\n", 157 ccb->ccb_h.path_id, ccb->ccb_h.target_id, 158 ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio)); 159 break; 160 161 case SCI_IO_FAILURE_TERMINATED: 162 ccb->ccb_h.status |= CAM_REQ_TERMIO; 163 isci_log_message(0, "ISCI", 164 "isci: bus=%x target=%x lun=%x cdb[0]=%x terminated\n", 165 ccb->ccb_h.path_id, ccb->ccb_h.target_id, 166 ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio)); 167 break; 168 169 case SCI_IO_FAILURE_INVALID_STATE: 170 case SCI_IO_FAILURE_INSUFFICIENT_RESOURCES: 171 complete_ccb = FALSE; 172 break; 173 174 case SCI_IO_FAILURE_INVALID_REMOTE_DEVICE: 175 ccb->ccb_h.status |= CAM_DEV_NOT_THERE; 176 break; 177 178 case SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE: 179 { 180 struct ccb_relsim ccb_relsim; 181 struct cam_path *path; 182 183 xpt_create_path(&path, NULL, 184 cam_sim_path(isci_controller->sim), 185 isci_remote_device->index, 0); 186 187 memset(&ccb_relsim, 0, sizeof(ccb_relsim)); 188 xpt_setup_ccb(&ccb_relsim.ccb_h, path, 5); 189 ccb_relsim.ccb_h.func_code = XPT_REL_SIMQ; 190 ccb_relsim.ccb_h.flags = CAM_DEV_QFREEZE; 191 ccb_relsim.release_flags = RELSIM_ADJUST_OPENINGS; 192 ccb_relsim.openings = 193 scif_remote_device_get_max_queue_depth(remote_device); 194 xpt_action((union ccb *)&ccb_relsim); 195 xpt_free_path(path); 196 complete_ccb = FALSE; 197 } 198 break; 199 200 case SCI_IO_FAILURE: 201 case SCI_IO_FAILURE_REQUIRES_SCSI_ABORT: 202 case SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL: 203 case SCI_IO_FAILURE_PROTOCOL_VIOLATION: 204 case SCI_IO_FAILURE_INVALID_PARAMETER_VALUE: 205 case SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR: 206 default: 207 isci_log_message(1, "ISCI", 208 "isci: bus=%x target=%x lun=%x cdb[0]=%x completion status=%x\n", 209 ccb->ccb_h.path_id, ccb->ccb_h.target_id, 210 ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio), 211 completion_status); 212 ccb->ccb_h.status |= CAM_REQ_CMP_ERR; 213 break; 214 } 215 216 callout_stop(&isci_request->parent.timer); 217 bus_dmamap_sync(isci_request->parent.dma_tag, 218 isci_request->parent.dma_map, 219 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 220 221 bus_dmamap_unload(isci_request->parent.dma_tag, 222 isci_request->parent.dma_map); 223 224 isci_request->ccb = NULL; 225 226 sci_pool_put(isci_controller->request_pool, 227 (struct ISCI_REQUEST *)isci_request); 228 229 if (complete_ccb) { 230 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 231 /* ccb will be completed with some type of non-success 232 * status. So temporarily freeze the queue until the 233 * upper layers can act on the status. The 234 * CAM_DEV_QFRZN flag will then release the queue 235 * after the status is acted upon. 236 */ 237 ccb->ccb_h.status |= CAM_DEV_QFRZN; 238 xpt_freeze_devq(ccb->ccb_h.path, 1); 239 } 240 241 if (ccb->ccb_h.status & CAM_SIM_QUEUED) { 242 243 KASSERT(ccb == isci_remote_device->queued_ccb_in_progress, 244 ("multiple internally queued ccbs in flight")); 245 246 TAILQ_REMOVE(&isci_remote_device->queued_ccbs, 247 &ccb->ccb_h, sim_links.tqe); 248 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 249 250 /* 251 * This CCB that was in the queue was completed, so 252 * set the in_progress pointer to NULL denoting that 253 * we can retry another CCB from the queue. We only 254 * allow one CCB at a time from the queue to be 255 * in progress so that we can effectively maintain 256 * ordering. 257 */ 258 isci_remote_device->queued_ccb_in_progress = NULL; 259 } 260 261 if (isci_remote_device->frozen_lun_mask != 0) { 262 isci_remote_device_release_device_queue(isci_remote_device); 263 } 264 265 xpt_done(ccb); 266 267 if (isci_controller->is_frozen == TRUE) { 268 isci_controller->is_frozen = FALSE; 269 xpt_release_simq(isci_controller->sim, TRUE); 270 } 271 } else { 272 isci_remote_device_freeze_lun_queue(isci_remote_device, 273 ccb->ccb_h.target_lun); 274 275 if (ccb->ccb_h.status & CAM_SIM_QUEUED) { 276 277 KASSERT(ccb == isci_remote_device->queued_ccb_in_progress, 278 ("multiple internally queued ccbs in flight")); 279 280 /* 281 * Do nothing, CCB is already on the device's queue. 282 * We leave it on the queue, to be retried again 283 * next time a CCB on this device completes, or we 284 * get a ready notification for this device. 285 */ 286 isci_log_message(1, "ISCI", "already queued %p %x\n", 287 ccb, scsiio_cdb_ptr(csio)); 288 289 isci_remote_device->queued_ccb_in_progress = NULL; 290 291 } else { 292 isci_log_message(1, "ISCI", "queue %p %x\n", ccb, 293 scsiio_cdb_ptr(csio)); 294 ccb->ccb_h.status |= CAM_SIM_QUEUED; 295 296 TAILQ_INSERT_TAIL(&isci_remote_device->queued_ccbs, 297 &ccb->ccb_h, sim_links.tqe); 298 } 299 } 300 } 301 302 /** 303 * @brief This callback method asks the user to provide the physical 304 * address for the supplied virtual address when building an 305 * io request object. 306 * 307 * @param[in] controller This parameter is the core controller object 308 * handle. 309 * @param[in] io_request This parameter is the io request object handle 310 * for which the physical address is being requested. 311 * @param[in] virtual_address This parameter is the virtual address which 312 * is to be returned as a physical address. 313 * @param[out] physical_address The physical address for the supplied virtual 314 * address. 315 * 316 * @return None. 317 */ 318 void 319 scic_cb_io_request_get_physical_address(SCI_CONTROLLER_HANDLE_T controller, 320 SCI_IO_REQUEST_HANDLE_T io_request, void *virtual_address, 321 SCI_PHYSICAL_ADDRESS *physical_address) 322 { 323 SCI_IO_REQUEST_HANDLE_T scif_request = 324 sci_object_get_association(io_request); 325 struct ISCI_REQUEST *isci_request = 326 sci_object_get_association(scif_request); 327 328 if(isci_request != NULL) { 329 /* isci_request is not NULL, meaning this is a request initiated 330 * by CAM or the isci layer (i.e. device reset for I/O 331 * timeout). Therefore we can calculate the physical address 332 * based on the address we stored in the struct ISCI_REQUEST 333 * object. 334 */ 335 *physical_address = isci_request->physical_address + 336 (uintptr_t)virtual_address - 337 (uintptr_t)isci_request; 338 } else { 339 /* isci_request is NULL, meaning this is a request generated 340 * internally by SCIL (i.e. for SMP requests or NCQ error 341 * recovery). Therefore we calculate the physical address 342 * based on the controller's uncached controller memory buffer, 343 * since we know that this is what SCIL uses for internal 344 * framework requests. 345 */ 346 SCI_CONTROLLER_HANDLE_T scif_controller = 347 (SCI_CONTROLLER_HANDLE_T) sci_object_get_association(controller); 348 struct ISCI_CONTROLLER *isci_controller = 349 (struct ISCI_CONTROLLER *)sci_object_get_association(scif_controller); 350 U64 virt_addr_offset = (uintptr_t)virtual_address - 351 (U64)isci_controller->uncached_controller_memory.virtual_address; 352 353 *physical_address = 354 isci_controller->uncached_controller_memory.physical_address 355 + virt_addr_offset; 356 } 357 } 358 359 /** 360 * @brief This callback method asks the user to provide the address for 361 * the command descriptor block (CDB) associated with this IO request. 362 * 363 * @param[in] scif_user_io_request This parameter points to the user's 364 * IO request object. It is a cookie that allows the user to 365 * provide the necessary information for this callback. 366 * 367 * @return This method returns the virtual address of the CDB. 368 */ 369 void * 370 scif_cb_io_request_get_cdb_address(void * scif_user_io_request) 371 { 372 struct ISCI_IO_REQUEST *isci_request = 373 (struct ISCI_IO_REQUEST *)scif_user_io_request; 374 375 return (scsiio_cdb_ptr(&isci_request->ccb->csio)); 376 } 377 378 /** 379 * @brief This callback method asks the user to provide the length of 380 * the command descriptor block (CDB) associated with this IO request. 381 * 382 * @param[in] scif_user_io_request This parameter points to the user's 383 * IO request object. It is a cookie that allows the user to 384 * provide the necessary information for this callback. 385 * 386 * @return This method returns the length of the CDB. 387 */ 388 uint32_t 389 scif_cb_io_request_get_cdb_length(void * scif_user_io_request) 390 { 391 struct ISCI_IO_REQUEST *isci_request = 392 (struct ISCI_IO_REQUEST *)scif_user_io_request; 393 394 return (isci_request->ccb->csio.cdb_len); 395 } 396 397 /** 398 * @brief This callback method asks the user to provide the Logical Unit (LUN) 399 * associated with this IO request. 400 * 401 * @note The contents of the value returned from this callback are defined 402 * by the protocol standard (e.g. T10 SAS specification). Please 403 * refer to the transport command information unit description 404 * in the associated standard. 405 * 406 * @param[in] scif_user_io_request This parameter points to the user's 407 * IO request object. It is a cookie that allows the user to 408 * provide the necessary information for this callback. 409 * 410 * @return This method returns the LUN associated with this request. 411 */ 412 uint32_t 413 scif_cb_io_request_get_lun(void * scif_user_io_request) 414 { 415 struct ISCI_IO_REQUEST *isci_request = 416 (struct ISCI_IO_REQUEST *)scif_user_io_request; 417 418 return (isci_request->ccb->ccb_h.target_lun); 419 } 420 421 /** 422 * @brief This callback method asks the user to provide the task attribute 423 * associated with this IO request. 424 * 425 * @note The contents of the value returned from this callback are defined 426 * by the protocol standard (e.g. T10 SAS specification). Please 427 * refer to the transport command information unit description 428 * in the associated standard. 429 * 430 * @param[in] scif_user_io_request This parameter points to the user's 431 * IO request object. It is a cookie that allows the user to 432 * provide the necessary information for this callback. 433 * 434 * @return This method returns the task attribute associated with this 435 * IO request. 436 */ 437 uint32_t 438 scif_cb_io_request_get_task_attribute(void * scif_user_io_request) 439 { 440 struct ISCI_IO_REQUEST *isci_request = 441 (struct ISCI_IO_REQUEST *)scif_user_io_request; 442 uint32_t task_attribute; 443 444 if((isci_request->ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) 445 switch(isci_request->ccb->csio.tag_action) { 446 case MSG_HEAD_OF_Q_TAG: 447 task_attribute = SCI_SAS_HEAD_OF_QUEUE_ATTRIBUTE; 448 break; 449 450 case MSG_ORDERED_Q_TAG: 451 task_attribute = SCI_SAS_ORDERED_ATTRIBUTE; 452 break; 453 454 case MSG_ACA_TASK: 455 task_attribute = SCI_SAS_ACA_ATTRIBUTE; 456 break; 457 458 default: 459 task_attribute = SCI_SAS_SIMPLE_ATTRIBUTE; 460 break; 461 } 462 else 463 task_attribute = SCI_SAS_SIMPLE_ATTRIBUTE; 464 465 return (task_attribute); 466 } 467 468 /** 469 * @brief This callback method asks the user to provide the command priority 470 * associated with this IO request. 471 * 472 * @note The contents of the value returned from this callback are defined 473 * by the protocol standard (e.g. T10 SAS specification). Please 474 * refer to the transport command information unit description 475 * in the associated standard. 476 * 477 * @param[in] scif_user_io_request This parameter points to the user's 478 * IO request object. It is a cookie that allows the user to 479 * provide the necessary information for this callback. 480 * 481 * @return This method returns the command priority associated with this 482 * IO request. 483 */ 484 uint32_t 485 scif_cb_io_request_get_command_priority(void * scif_user_io_request) 486 { 487 return (0); 488 } 489 490 /** 491 * @brief This method simply returns the virtual address associated 492 * with the scsi_io and byte_offset supplied parameters. 493 * 494 * @note This callback is not utilized in the fast path. The expectation 495 * is that this method is utilized for items such as SCSI to ATA 496 * translation for commands like INQUIRY, READ CAPACITY, etc. 497 * 498 * @param[in] scif_user_io_request This parameter points to the user's 499 * IO request object. It is a cookie that allows the user to 500 * provide the necessary information for this callback. 501 * @param[in] byte_offset This parameter specifies the offset into the data 502 * buffers pointed to by the SGL. The byte offset starts at 0 503 * and continues until the last byte pointed to be the last SGL 504 * element. 505 * 506 * @return A virtual address pointer to the location specified by the 507 * parameters. 508 */ 509 uint8_t * 510 scif_cb_io_request_get_virtual_address_from_sgl(void * scif_user_io_request, 511 uint32_t byte_offset) 512 { 513 struct ISCI_IO_REQUEST *isci_request; 514 union ccb *ccb; 515 516 517 isci_request = scif_user_io_request; 518 ccb = isci_request->ccb; 519 520 /* 521 * This callback is only invoked for SCSI/ATA translation of 522 * PIO commands such as INQUIRY and READ_CAPACITY, to allow 523 * the driver to write the translated data directly into the 524 * data buffer. It is never invoked for READ/WRITE commands. 525 * The driver currently assumes only READ/WRITE commands will 526 * be unmapped. 527 * 528 * As a safeguard against future changes to unmapped commands, 529 * add an explicit panic here should the DATA_MASK != VADDR. 530 * Otherwise, we would return some garbage pointer back to the 531 * caller which would result in a panic or more subtle data 532 * corruption later on. 533 */ 534 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) 535 panic("%s: requesting pointer into unmapped ccb", __func__); 536 537 return (ccb->csio.data_ptr + byte_offset); 538 } 539 540 /** 541 * @brief This callback method asks the user to provide the number of 542 * bytes to be transferred as part of this request. 543 * 544 * @param[in] scif_user_io_request This parameter points to the user's 545 * IO request object. It is a cookie that allows the user to 546 * provide the necessary information for this callback. 547 * 548 * @return This method returns the number of payload data bytes to be 549 * transferred for this IO request. 550 */ 551 uint32_t 552 scif_cb_io_request_get_transfer_length(void * scif_user_io_request) 553 { 554 struct ISCI_IO_REQUEST *isci_request = 555 (struct ISCI_IO_REQUEST *)scif_user_io_request; 556 557 return (isci_request->ccb->csio.dxfer_len); 558 559 } 560 561 /** 562 * @brief This callback method asks the user to provide the data direction 563 * for this request. 564 * 565 * @param[in] scif_user_io_request This parameter points to the user's 566 * IO request object. It is a cookie that allows the user to 567 * provide the necessary information for this callback. 568 * 569 * @return This method returns the value of SCI_IO_REQUEST_DATA_OUT, 570 * SCI_IO_REQUEST_DATA_IN, or SCI_IO_REQUEST_NO_DATA. 571 */ 572 SCI_IO_REQUEST_DATA_DIRECTION 573 scif_cb_io_request_get_data_direction(void * scif_user_io_request) 574 { 575 struct ISCI_IO_REQUEST *isci_request = 576 (struct ISCI_IO_REQUEST *)scif_user_io_request; 577 578 switch (isci_request->ccb->ccb_h.flags & CAM_DIR_MASK) { 579 case CAM_DIR_IN: 580 return (SCI_IO_REQUEST_DATA_IN); 581 case CAM_DIR_OUT: 582 return (SCI_IO_REQUEST_DATA_OUT); 583 default: 584 return (SCI_IO_REQUEST_NO_DATA); 585 } 586 } 587 588 /** 589 * @brief This callback method asks the user to provide the address 590 * to where the next Scatter-Gather Element is located. 591 * 592 * Details regarding usage: 593 * - Regarding the first SGE: the user should initialize an index, 594 * or a pointer, prior to construction of the request that will 595 * reference the very first scatter-gather element. This is 596 * important since this method is called for every scatter-gather 597 * element, including the first element. 598 * - Regarding the last SGE: the user should return NULL from this 599 * method when this method is called and the SGL has exhausted 600 * all elements. 601 * 602 * @param[in] scif_user_io_request This parameter points to the user's 603 * IO request object. It is a cookie that allows the user to 604 * provide the necessary information for this callback. 605 * @param[in] current_sge_address This parameter specifies the address for 606 * the current SGE (i.e. the one that has just processed). 607 * @param[out] next_sge An address specifying the location for the next scatter 608 * gather element to be processed. 609 * 610 * @return None. 611 */ 612 void 613 scif_cb_io_request_get_next_sge(void * scif_user_io_request, 614 void * current_sge_address, void ** next_sge) 615 { 616 struct ISCI_IO_REQUEST *isci_request = 617 (struct ISCI_IO_REQUEST *)scif_user_io_request; 618 619 if (isci_request->current_sge_index == isci_request->num_segments) 620 *next_sge = NULL; 621 else { 622 bus_dma_segment_t *sge = 623 &isci_request->sge[isci_request->current_sge_index]; 624 625 isci_request->current_sge_index++; 626 *next_sge = sge; 627 } 628 } 629 630 /** 631 * @brief This callback method asks the user to provide the contents of the 632 * "address" field in the Scatter-Gather Element. 633 * 634 * @param[in] scif_user_io_request This parameter points to the user's 635 * IO request object. It is a cookie that allows the user to 636 * provide the necessary information for this callback. 637 * @param[in] sge_address This parameter specifies the address for the 638 * SGE from which to retrieve the address field. 639 * 640 * @return A physical address specifying the contents of the SGE's address 641 * field. 642 */ 643 SCI_PHYSICAL_ADDRESS 644 scif_cb_sge_get_address_field(void *scif_user_io_request, void *sge_address) 645 { 646 bus_dma_segment_t *sge = (bus_dma_segment_t *)sge_address; 647 648 return ((SCI_PHYSICAL_ADDRESS)sge->ds_addr); 649 } 650 651 /** 652 * @brief This callback method asks the user to provide the contents of the 653 * "length" field in the Scatter-Gather Element. 654 * 655 * @param[in] scif_user_io_request This parameter points to the user's 656 * IO request object. It is a cookie that allows the user to 657 * provide the necessary information for this callback. 658 * @param[in] sge_address This parameter specifies the address for the 659 * SGE from which to retrieve the address field. 660 * 661 * @return This method returns the length field specified inside the SGE 662 * referenced by the sge_address parameter. 663 */ 664 uint32_t 665 scif_cb_sge_get_length_field(void *scif_user_io_request, void *sge_address) 666 { 667 bus_dma_segment_t *sge = (bus_dma_segment_t *)sge_address; 668 669 return ((uint32_t)sge->ds_len); 670 } 671 672 void 673 isci_request_construct(struct ISCI_REQUEST *request, 674 SCI_CONTROLLER_HANDLE_T scif_controller_handle, 675 bus_dma_tag_t io_buffer_dma_tag, bus_addr_t physical_address) 676 { 677 678 request->controller_handle = scif_controller_handle; 679 request->dma_tag = io_buffer_dma_tag; 680 request->physical_address = physical_address; 681 bus_dmamap_create(request->dma_tag, 0, &request->dma_map); 682 callout_init(&request->timer, 1); 683 } 684 685 static void 686 isci_io_request_construct(void *arg, bus_dma_segment_t *seg, int nseg, 687 int error) 688 { 689 union ccb *ccb; 690 struct ISCI_IO_REQUEST *io_request = (struct ISCI_IO_REQUEST *)arg; 691 SCI_REMOTE_DEVICE_HANDLE_T *device = io_request->parent.remote_device_handle; 692 SCI_STATUS status; 693 694 io_request->num_segments = nseg; 695 io_request->sge = seg; 696 ccb = io_request->ccb; 697 698 if (error != 0) { 699 ccb->ccb_h.status = CAM_REQ_INVALID; 700 xpt_done(ccb); 701 return; 702 } 703 704 status = scif_io_request_construct( 705 io_request->parent.controller_handle, 706 io_request->parent.remote_device_handle, 707 SCI_CONTROLLER_INVALID_IO_TAG, (void *)io_request, 708 (void *)((char*)io_request + sizeof(struct ISCI_IO_REQUEST)), 709 &io_request->sci_object); 710 711 if (status != SCI_SUCCESS) { 712 isci_io_request_complete(io_request->parent.controller_handle, 713 device, io_request, (SCI_IO_STATUS)status); 714 return; 715 } 716 717 sci_object_set_association(io_request->sci_object, io_request); 718 719 bus_dmamap_sync(io_request->parent.dma_tag, io_request->parent.dma_map, 720 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 721 722 status = (SCI_STATUS)scif_controller_start_io( 723 io_request->parent.controller_handle, device, 724 io_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG); 725 726 if (status != SCI_SUCCESS) { 727 isci_io_request_complete(io_request->parent.controller_handle, 728 device, io_request, (SCI_IO_STATUS)status); 729 return; 730 } 731 732 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) 733 callout_reset_sbt(&io_request->parent.timer, 734 SBT_1MS * ccb->ccb_h.timeout, 0, isci_io_request_timeout, 735 io_request, 0); 736 } 737 738 void 739 isci_io_request_execute_scsi_io(union ccb *ccb, 740 struct ISCI_CONTROLLER *controller) 741 { 742 target_id_t target_id = ccb->ccb_h.target_id; 743 struct ISCI_REQUEST *request; 744 struct ISCI_IO_REQUEST *io_request; 745 struct ISCI_REMOTE_DEVICE *device = 746 controller->remote_device[target_id]; 747 int error; 748 749 if (device == NULL) { 750 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 751 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 752 ccb->ccb_h.status |= CAM_DEV_NOT_THERE; 753 xpt_done(ccb); 754 return; 755 } 756 757 if (sci_pool_empty(controller->request_pool)) { 758 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 759 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 760 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 761 xpt_freeze_simq(controller->sim, 1); 762 controller->is_frozen = TRUE; 763 xpt_done(ccb); 764 return; 765 } 766 767 ASSERT(device->is_resetting == FALSE); 768 769 sci_pool_get(controller->request_pool, request); 770 io_request = (struct ISCI_IO_REQUEST *)request; 771 772 io_request->ccb = ccb; 773 io_request->current_sge_index = 0; 774 io_request->parent.remote_device_handle = device->sci_object; 775 776 error = bus_dmamap_load_ccb(io_request->parent.dma_tag, 777 io_request->parent.dma_map, ccb, 778 isci_io_request_construct, io_request, 0x0); 779 /* A resource shortage from BUSDMA will be automatically 780 * continued at a later point, pushing the CCB processing 781 * forward, which will in turn unfreeze the simq. 782 */ 783 if (error == EINPROGRESS) { 784 xpt_freeze_simq(controller->sim, 1); 785 ccb->ccb_h.flags |= CAM_RELEASE_SIMQ; 786 } 787 } 788 789 void 790 isci_io_request_timeout(void *arg) 791 { 792 struct ISCI_IO_REQUEST *request = (struct ISCI_IO_REQUEST *)arg; 793 struct ISCI_REMOTE_DEVICE *remote_device = (struct ISCI_REMOTE_DEVICE *) 794 sci_object_get_association(request->parent.remote_device_handle); 795 struct ISCI_CONTROLLER *controller = remote_device->domain->controller; 796 797 mtx_lock(&controller->lock); 798 isci_remote_device_reset(remote_device, NULL); 799 mtx_unlock(&controller->lock); 800 } 801 802 /** 803 * @brief This callback method gets the size of and pointer to the buffer 804 * (if any) containing the request buffer for an SMP request. 805 * 806 * @param[in] core_request This parameter specifies the SCI core's request 807 * object associated with the SMP request. 808 * @param[out] smp_request_buffer This parameter returns a pointer to the 809 * payload portion of the SMP request - i.e. everything after 810 * the SMP request header. 811 * 812 * @return Size of the request buffer in bytes. This does *not* include 813 * the size of the SMP request header. 814 */ 815 static uint32_t 816 smp_io_request_cb_get_request_buffer(SCI_IO_REQUEST_HANDLE_T core_request, 817 uint8_t ** smp_request_buffer) 818 { 819 struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *) 820 sci_object_get_association(sci_object_get_association(core_request)); 821 822 *smp_request_buffer = isci_request->ccb->smpio.smp_request + 823 sizeof(SMP_REQUEST_HEADER_T); 824 825 return (isci_request->ccb->smpio.smp_request_len - 826 sizeof(SMP_REQUEST_HEADER_T)); 827 } 828 829 /** 830 * @brief This callback method gets the SMP function for an SMP request. 831 * 832 * @param[in] core_request This parameter specifies the SCI core's request 833 * object associated with the SMP request. 834 * 835 * @return SMP function for the SMP request. 836 */ 837 static uint8_t 838 smp_io_request_cb_get_function(SCI_IO_REQUEST_HANDLE_T core_request) 839 { 840 struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *) 841 sci_object_get_association(sci_object_get_association(core_request)); 842 SMP_REQUEST_HEADER_T *header = 843 (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request; 844 845 return (header->function); 846 } 847 848 /** 849 * @brief This callback method gets the SMP frame type for an SMP request. 850 * 851 * @param[in] core_request This parameter specifies the SCI core's request 852 * object associated with the SMP request. 853 * 854 * @return SMP frame type for the SMP request. 855 */ 856 static uint8_t 857 smp_io_request_cb_get_frame_type(SCI_IO_REQUEST_HANDLE_T core_request) 858 { 859 struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *) 860 sci_object_get_association(sci_object_get_association(core_request)); 861 SMP_REQUEST_HEADER_T *header = 862 (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request; 863 864 return (header->smp_frame_type); 865 } 866 867 /** 868 * @brief This callback method gets the allocated response length for an SMP request. 869 * 870 * @param[in] core_request This parameter specifies the SCI core's request 871 * object associated with the SMP request. 872 * 873 * @return Allocated response length for the SMP request. 874 */ 875 static uint8_t 876 smp_io_request_cb_get_allocated_response_length( 877 SCI_IO_REQUEST_HANDLE_T core_request) 878 { 879 struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *) 880 sci_object_get_association(sci_object_get_association(core_request)); 881 SMP_REQUEST_HEADER_T *header = 882 (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request; 883 884 return (header->allocated_response_length); 885 } 886 887 static SCI_STATUS 888 isci_smp_request_construct(struct ISCI_IO_REQUEST *request) 889 { 890 SCI_STATUS status; 891 SCIC_SMP_PASSTHRU_REQUEST_CALLBACKS_T callbacks; 892 893 status = scif_request_construct(request->parent.controller_handle, 894 request->parent.remote_device_handle, SCI_CONTROLLER_INVALID_IO_TAG, 895 (void *)request, 896 (void *)((char*)request + sizeof(struct ISCI_IO_REQUEST)), 897 &request->sci_object); 898 899 if (status == SCI_SUCCESS) { 900 callbacks.scic_cb_smp_passthru_get_request = 901 &smp_io_request_cb_get_request_buffer; 902 callbacks.scic_cb_smp_passthru_get_function = 903 &smp_io_request_cb_get_function; 904 callbacks.scic_cb_smp_passthru_get_frame_type = 905 &smp_io_request_cb_get_frame_type; 906 callbacks.scic_cb_smp_passthru_get_allocated_response_length = 907 &smp_io_request_cb_get_allocated_response_length; 908 909 /* create the smp passthrough part of the io request */ 910 status = scic_io_request_construct_smp_pass_through( 911 scif_io_request_get_scic_handle(request->sci_object), 912 &callbacks); 913 } 914 915 return (status); 916 } 917 918 void 919 isci_io_request_execute_smp_io(union ccb *ccb, 920 struct ISCI_CONTROLLER *controller) 921 { 922 SCI_STATUS status; 923 target_id_t target_id = ccb->ccb_h.target_id; 924 struct ISCI_REQUEST *request; 925 struct ISCI_IO_REQUEST *io_request; 926 SCI_REMOTE_DEVICE_HANDLE_T smp_device_handle; 927 struct ISCI_REMOTE_DEVICE *end_device = controller->remote_device[target_id]; 928 929 /* SMP commands are sent to an end device, because SMP devices are not 930 * exposed to the kernel. It is our responsibility to use this method 931 * to get the SMP device that contains the specified end device. If 932 * the device is direct-attached, the handle will come back NULL, and 933 * we'll just fail the SMP_IO with DEV_NOT_THERE. 934 */ 935 scif_remote_device_get_containing_device(end_device->sci_object, 936 &smp_device_handle); 937 938 if (smp_device_handle == NULL) { 939 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 940 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 941 ccb->ccb_h.status |= CAM_DEV_NOT_THERE; 942 xpt_done(ccb); 943 return; 944 } 945 946 if (sci_pool_empty(controller->request_pool)) { 947 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 948 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 949 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 950 xpt_freeze_simq(controller->sim, 1); 951 controller->is_frozen = TRUE; 952 xpt_done(ccb); 953 return; 954 } 955 956 ASSERT(device->is_resetting == FALSE); 957 958 sci_pool_get(controller->request_pool, request); 959 io_request = (struct ISCI_IO_REQUEST *)request; 960 961 io_request->ccb = ccb; 962 io_request->parent.remote_device_handle = smp_device_handle; 963 964 status = isci_smp_request_construct(io_request); 965 966 if (status != SCI_SUCCESS) { 967 isci_io_request_complete(controller->scif_controller_handle, 968 smp_device_handle, io_request, (SCI_IO_STATUS)status); 969 return; 970 } 971 972 sci_object_set_association(io_request->sci_object, io_request); 973 974 status = (SCI_STATUS) scif_controller_start_io( 975 controller->scif_controller_handle, smp_device_handle, 976 io_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG); 977 978 if (status != SCI_SUCCESS) { 979 isci_io_request_complete(controller->scif_controller_handle, 980 smp_device_handle, io_request, (SCI_IO_STATUS)status); 981 return; 982 } 983 984 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) 985 callout_reset_sbt(&io_request->parent.timer, 986 SBT_1MS * ccb->ccb_h.timeout, 0, isci_io_request_timeout, 987 request, 0); 988 } 989