1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <dev/isci/isci.h> 35 36 #include <cam/scsi/scsi_all.h> 37 #include <cam/scsi/scsi_message.h> 38 39 #include <dev/isci/scil/intel_sas.h> 40 41 #include <dev/isci/scil/sci_util.h> 42 43 #include <dev/isci/scil/scif_io_request.h> 44 #include <dev/isci/scil/scif_controller.h> 45 #include <dev/isci/scil/scif_remote_device.h> 46 #include <dev/isci/scil/scif_user_callback.h> 47 48 #include <dev/isci/scil/scic_io_request.h> 49 #include <dev/isci/scil/scic_user_callback.h> 50 51 /** 52 * @brief This user callback will inform the user that an IO request has 53 * completed. 54 * 55 * @param[in] controller This parameter specifies the controller on 56 * which the IO request is completing. 57 * @param[in] remote_device This parameter specifies the remote device on 58 * which this request is completing. 59 * @param[in] io_request This parameter specifies the IO request that has 60 * completed. 61 * @param[in] completion_status This parameter specifies the results of 62 * the IO request operation. SCI_IO_SUCCESS indicates 63 * successful completion. 64 * 65 * @return none 66 */ 67 void 68 scif_cb_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, 69 SCI_REMOTE_DEVICE_HANDLE_T remote_device, 70 SCI_IO_REQUEST_HANDLE_T io_request, SCI_IO_STATUS completion_status) 71 { 72 struct ISCI_IO_REQUEST *isci_request = 73 (struct ISCI_IO_REQUEST *)sci_object_get_association(io_request); 74 75 scif_controller_complete_io(scif_controller, remote_device, io_request); 76 isci_io_request_complete(scif_controller, remote_device, isci_request, 77 completion_status); 78 } 79 80 void 81 isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, 82 SCI_REMOTE_DEVICE_HANDLE_T remote_device, 83 struct ISCI_IO_REQUEST *isci_request, SCI_IO_STATUS completion_status) 84 { 85 struct ISCI_CONTROLLER *isci_controller; 86 struct ISCI_REMOTE_DEVICE *isci_remote_device; 87 union ccb *ccb; 88 BOOL complete_ccb; 89 90 complete_ccb = TRUE; 91 isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(scif_controller); 92 isci_remote_device = 93 (struct ISCI_REMOTE_DEVICE *) sci_object_get_association(remote_device); 94 95 ccb = isci_request->ccb; 96 97 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 98 99 switch (completion_status) { 100 case SCI_IO_SUCCESS: 101 case SCI_IO_SUCCESS_COMPLETE_BEFORE_START: 102 #if __FreeBSD_version >= 900026 103 if (ccb->ccb_h.func_code == XPT_SMP_IO) { 104 void *smp_response = 105 scif_io_request_get_response_iu_address( 106 isci_request->sci_object); 107 108 memcpy(ccb->smpio.smp_response, smp_response, 109 ccb->smpio.smp_response_len); 110 } 111 #endif 112 ccb->ccb_h.status |= CAM_REQ_CMP; 113 break; 114 115 case SCI_IO_SUCCESS_IO_DONE_EARLY: 116 ccb->ccb_h.status |= CAM_REQ_CMP; 117 ccb->csio.resid = ccb->csio.dxfer_len - 118 scif_io_request_get_number_of_bytes_transferred( 119 isci_request->sci_object); 120 break; 121 122 case SCI_IO_FAILURE_RESPONSE_VALID: 123 { 124 SCI_SSP_RESPONSE_IU_T * response_buffer; 125 uint32_t sense_length; 126 int error_code, sense_key, asc, ascq; 127 struct ccb_scsiio *csio = &ccb->csio; 128 129 response_buffer = (SCI_SSP_RESPONSE_IU_T *) 130 scif_io_request_get_response_iu_address( 131 isci_request->sci_object); 132 133 sense_length = sci_ssp_get_sense_data_length( 134 response_buffer->sense_data_length); 135 136 sense_length = MIN(csio->sense_len, sense_length); 137 138 memcpy(&csio->sense_data, response_buffer->data, sense_length); 139 140 csio->sense_resid = csio->sense_len - sense_length; 141 csio->scsi_status = response_buffer->status; 142 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 143 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 144 scsi_extract_sense( &csio->sense_data, &error_code, &sense_key, 145 &asc, &ascq ); 146 isci_log_message(1, "ISCI", 147 "isci: bus=%x target=%x lun=%x cdb[0]=%x status=%x key=%x asc=%x ascq=%x\n", 148 ccb->ccb_h.path_id, ccb->ccb_h.target_id, 149 ccb->ccb_h.target_lun, csio->cdb_io.cdb_bytes[0], 150 csio->scsi_status, sense_key, asc, ascq); 151 break; 152 } 153 154 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: 155 isci_remote_device_reset(isci_remote_device, NULL); 156 157 /* drop through */ 158 case SCI_IO_FAILURE_TERMINATED: 159 ccb->ccb_h.status |= CAM_REQ_TERMIO; 160 isci_log_message(1, "ISCI", 161 "isci: bus=%x target=%x lun=%x cdb[0]=%x terminated\n", 162 ccb->ccb_h.path_id, ccb->ccb_h.target_id, 163 ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0]); 164 break; 165 166 case SCI_IO_FAILURE_INVALID_STATE: 167 case SCI_IO_FAILURE_INSUFFICIENT_RESOURCES: 168 complete_ccb = FALSE; 169 break; 170 171 case SCI_IO_FAILURE_INVALID_REMOTE_DEVICE: 172 ccb->ccb_h.status |= CAM_DEV_NOT_THERE; 173 break; 174 175 case SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE: 176 { 177 struct ccb_relsim ccb_relsim; 178 struct cam_path *path; 179 180 xpt_create_path(&path, NULL, 181 cam_sim_path(isci_controller->sim), 182 isci_remote_device->index, 0); 183 184 xpt_setup_ccb(&ccb_relsim.ccb_h, path, 5); 185 ccb_relsim.ccb_h.func_code = XPT_REL_SIMQ; 186 ccb_relsim.ccb_h.flags = CAM_DEV_QFREEZE; 187 ccb_relsim.release_flags = RELSIM_ADJUST_OPENINGS; 188 ccb_relsim.openings = 189 scif_remote_device_get_max_queue_depth(remote_device); 190 xpt_action((union ccb *)&ccb_relsim); 191 xpt_free_path(path); 192 complete_ccb = FALSE; 193 } 194 break; 195 196 case SCI_IO_FAILURE: 197 case SCI_IO_FAILURE_REQUIRES_SCSI_ABORT: 198 case SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL: 199 case SCI_IO_FAILURE_PROTOCOL_VIOLATION: 200 case SCI_IO_FAILURE_INVALID_PARAMETER_VALUE: 201 case SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR: 202 default: 203 isci_log_message(1, "ISCI", 204 "isci: bus=%x target=%x lun=%x cdb[0]=%x completion status=%x\n", 205 ccb->ccb_h.path_id, ccb->ccb_h.target_id, 206 ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0], 207 completion_status); 208 ccb->ccb_h.status |= CAM_REQ_CMP_ERR; 209 break; 210 } 211 212 callout_stop(&isci_request->parent.timer); 213 bus_dmamap_sync(isci_request->parent.dma_tag, 214 isci_request->parent.dma_map, 215 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 216 217 bus_dmamap_unload(isci_request->parent.dma_tag, 218 isci_request->parent.dma_map); 219 220 isci_request->ccb = NULL; 221 222 sci_pool_put(isci_controller->request_pool, 223 (struct ISCI_REQUEST *)isci_request); 224 225 if (complete_ccb) { 226 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 227 /* ccb will be completed with some type of non-success 228 * status. So temporarily freeze the queue until the 229 * upper layers can act on the status. The 230 * CAM_DEV_QFRZN flag will then release the queue 231 * after the status is acted upon. 232 */ 233 ccb->ccb_h.status |= CAM_DEV_QFRZN; 234 xpt_freeze_devq(ccb->ccb_h.path, 1); 235 } 236 237 if (ccb->ccb_h.status & CAM_SIM_QUEUED) { 238 239 KASSERT(ccb == isci_remote_device->queued_ccb_in_progress, 240 ("multiple internally queued ccbs in flight")); 241 242 TAILQ_REMOVE(&isci_remote_device->queued_ccbs, 243 &ccb->ccb_h, sim_links.tqe); 244 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 245 246 /* 247 * This CCB that was in the queue was completed, so 248 * set the in_progress pointer to NULL denoting that 249 * we can retry another CCB from the queue. We only 250 * allow one CCB at a time from the queue to be 251 * in progress so that we can effectively maintain 252 * ordering. 253 */ 254 isci_remote_device->queued_ccb_in_progress = NULL; 255 } 256 257 if (isci_remote_device->frozen_lun_mask != 0) { 258 isci_remote_device_release_device_queue(isci_remote_device); 259 } 260 261 xpt_done(ccb); 262 263 if (isci_controller->is_frozen == TRUE) { 264 isci_controller->is_frozen = FALSE; 265 xpt_release_simq(isci_controller->sim, TRUE); 266 } 267 } else { 268 isci_remote_device_freeze_lun_queue(isci_remote_device, 269 ccb->ccb_h.target_lun); 270 271 if (ccb->ccb_h.status & CAM_SIM_QUEUED) { 272 273 KASSERT(ccb == isci_remote_device->queued_ccb_in_progress, 274 ("multiple internally queued ccbs in flight")); 275 276 /* 277 * Do nothing, CCB is already on the device's queue. 278 * We leave it on the queue, to be retried again 279 * next time a CCB on this device completes, or we 280 * get a ready notification for this device. 281 */ 282 isci_log_message(1, "ISCI", "already queued %p %x\n", 283 ccb, ccb->csio.cdb_io.cdb_bytes[0]); 284 285 isci_remote_device->queued_ccb_in_progress = NULL; 286 287 } else { 288 isci_log_message(1, "ISCI", "queue %p %x\n", ccb, 289 ccb->csio.cdb_io.cdb_bytes[0]); 290 ccb->ccb_h.status |= CAM_SIM_QUEUED; 291 292 TAILQ_INSERT_TAIL(&isci_remote_device->queued_ccbs, 293 &ccb->ccb_h, sim_links.tqe); 294 } 295 } 296 } 297 298 /** 299 * @brief This callback method asks the user to provide the physical 300 * address for the supplied virtual address when building an 301 * io request object. 302 * 303 * @param[in] controller This parameter is the core controller object 304 * handle. 305 * @param[in] io_request This parameter is the io request object handle 306 * for which the physical address is being requested. 307 * @param[in] virtual_address This paramter is the virtual address which 308 * is to be returned as a physical address. 309 * @param[out] physical_address The physical address for the supplied virtual 310 * address. 311 * 312 * @return None. 313 */ 314 void 315 scic_cb_io_request_get_physical_address(SCI_CONTROLLER_HANDLE_T controller, 316 SCI_IO_REQUEST_HANDLE_T io_request, void *virtual_address, 317 SCI_PHYSICAL_ADDRESS *physical_address) 318 { 319 SCI_IO_REQUEST_HANDLE_T scif_request = 320 sci_object_get_association(io_request); 321 struct ISCI_REQUEST *isci_request = 322 sci_object_get_association(scif_request); 323 324 if(isci_request != NULL) { 325 /* isci_request is not NULL, meaning this is a request initiated 326 * by CAM or the isci layer (i.e. device reset for I/O 327 * timeout). Therefore we can calculate the physical address 328 * based on the address we stored in the struct ISCI_REQUEST 329 * object. 330 */ 331 *physical_address = isci_request->physical_address + 332 (uintptr_t)virtual_address - 333 (uintptr_t)isci_request; 334 } else { 335 /* isci_request is NULL, meaning this is a request generated 336 * internally by SCIL (i.e. for SMP requests or NCQ error 337 * recovery). Therefore we calculate the physical address 338 * based on the controller's uncached controller memory buffer, 339 * since we know that this is what SCIL uses for internal 340 * framework requests. 341 */ 342 SCI_CONTROLLER_HANDLE_T scif_controller = 343 (SCI_CONTROLLER_HANDLE_T) sci_object_get_association(controller); 344 struct ISCI_CONTROLLER *isci_controller = 345 (struct ISCI_CONTROLLER *)sci_object_get_association(scif_controller); 346 U64 virt_addr_offset = (uintptr_t)virtual_address - 347 (U64)isci_controller->uncached_controller_memory.virtual_address; 348 349 *physical_address = 350 isci_controller->uncached_controller_memory.physical_address 351 + virt_addr_offset; 352 } 353 } 354 355 /** 356 * @brief This callback method asks the user to provide the address for 357 * the command descriptor block (CDB) associated with this IO request. 358 * 359 * @param[in] scif_user_io_request This parameter points to the user's 360 * IO request object. It is a cookie that allows the user to 361 * provide the necessary information for this callback. 362 * 363 * @return This method returns the virtual address of the CDB. 364 */ 365 void * 366 scif_cb_io_request_get_cdb_address(void * scif_user_io_request) 367 { 368 struct ISCI_IO_REQUEST *isci_request = 369 (struct ISCI_IO_REQUEST *)scif_user_io_request; 370 371 return (isci_request->ccb->csio.cdb_io.cdb_bytes); 372 } 373 374 /** 375 * @brief This callback method asks the user to provide the length of 376 * the command descriptor block (CDB) associated with this IO request. 377 * 378 * @param[in] scif_user_io_request This parameter points to the user's 379 * IO request object. It is a cookie that allows the user to 380 * provide the necessary information for this callback. 381 * 382 * @return This method returns the length of the CDB. 383 */ 384 uint32_t 385 scif_cb_io_request_get_cdb_length(void * scif_user_io_request) 386 { 387 struct ISCI_IO_REQUEST *isci_request = 388 (struct ISCI_IO_REQUEST *)scif_user_io_request; 389 390 return (isci_request->ccb->csio.cdb_len); 391 } 392 393 /** 394 * @brief This callback method asks the user to provide the Logical Unit (LUN) 395 * associated with this IO request. 396 * 397 * @note The contents of the value returned from this callback are defined 398 * by the protocol standard (e.g. T10 SAS specification). Please 399 * refer to the transport command information unit description 400 * in the associated standard. 401 * 402 * @param[in] scif_user_io_request This parameter points to the user's 403 * IO request object. It is a cookie that allows the user to 404 * provide the necessary information for this callback. 405 * 406 * @return This method returns the LUN associated with this request. 407 */ 408 uint32_t 409 scif_cb_io_request_get_lun(void * scif_user_io_request) 410 { 411 struct ISCI_IO_REQUEST *isci_request = 412 (struct ISCI_IO_REQUEST *)scif_user_io_request; 413 414 return (isci_request->ccb->ccb_h.target_lun); 415 } 416 417 /** 418 * @brief This callback method asks the user to provide the task attribute 419 * associated with this IO request. 420 * 421 * @note The contents of the value returned from this callback are defined 422 * by the protocol standard (e.g. T10 SAS specification). Please 423 * refer to the transport command information unit description 424 * in the associated standard. 425 * 426 * @param[in] scif_user_io_request This parameter points to the user's 427 * IO request object. It is a cookie that allows the user to 428 * provide the necessary information for this callback. 429 * 430 * @return This method returns the task attribute associated with this 431 * IO request. 432 */ 433 uint32_t 434 scif_cb_io_request_get_task_attribute(void * scif_user_io_request) 435 { 436 struct ISCI_IO_REQUEST *isci_request = 437 (struct ISCI_IO_REQUEST *)scif_user_io_request; 438 uint32_t task_attribute; 439 440 if((isci_request->ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) 441 switch(isci_request->ccb->csio.tag_action) { 442 case MSG_HEAD_OF_Q_TAG: 443 task_attribute = SCI_SAS_HEAD_OF_QUEUE_ATTRIBUTE; 444 break; 445 446 case MSG_ORDERED_Q_TAG: 447 task_attribute = SCI_SAS_ORDERED_ATTRIBUTE; 448 break; 449 450 case MSG_ACA_TASK: 451 task_attribute = SCI_SAS_ACA_ATTRIBUTE; 452 break; 453 454 default: 455 task_attribute = SCI_SAS_SIMPLE_ATTRIBUTE; 456 break; 457 } 458 else 459 task_attribute = SCI_SAS_SIMPLE_ATTRIBUTE; 460 461 return (task_attribute); 462 } 463 464 /** 465 * @brief This callback method asks the user to provide the command priority 466 * associated with this IO request. 467 * 468 * @note The contents of the value returned from this callback are defined 469 * by the protocol standard (e.g. T10 SAS specification). Please 470 * refer to the transport command information unit description 471 * in the associated standard. 472 * 473 * @param[in] scif_user_io_request This parameter points to the user's 474 * IO request object. It is a cookie that allows the user to 475 * provide the necessary information for this callback. 476 * 477 * @return This method returns the command priority associated with this 478 * IO request. 479 */ 480 uint32_t 481 scif_cb_io_request_get_command_priority(void * scif_user_io_request) 482 { 483 return (0); 484 } 485 486 /** 487 * @brief This method simply returns the virtual address associated 488 * with the scsi_io and byte_offset supplied parameters. 489 * 490 * @note This callback is not utilized in the fast path. The expectation 491 * is that this method is utilized for items such as SCSI to ATA 492 * translation for commands like INQUIRY, READ CAPACITY, etc. 493 * 494 * @param[in] scif_user_io_request This parameter points to the user's 495 * IO request object. It is a cookie that allows the user to 496 * provide the necessary information for this callback. 497 * @param[in] byte_offset This parameter specifies the offset into the data 498 * buffers pointed to by the SGL. The byte offset starts at 0 499 * and continues until the last byte pointed to be the last SGL 500 * element. 501 * 502 * @return A virtual address pointer to the location specified by the 503 * parameters. 504 */ 505 uint8_t * 506 scif_cb_io_request_get_virtual_address_from_sgl(void * scif_user_io_request, 507 uint32_t byte_offset) 508 { 509 struct ISCI_IO_REQUEST *isci_request; 510 union ccb *ccb; 511 512 513 isci_request = scif_user_io_request; 514 ccb = isci_request->ccb; 515 516 /* 517 * This callback is only invoked for SCSI/ATA translation of 518 * PIO commands such as INQUIRY and READ_CAPACITY, to allow 519 * the driver to write the translated data directly into the 520 * data buffer. It is never invoked for READ/WRITE commands. 521 * The driver currently assumes only READ/WRITE commands will 522 * be unmapped. 523 * 524 * As a safeguard against future changes to unmapped commands, 525 * add an explicit panic here should the DATA_MASK != VADDR. 526 * Otherwise, we would return some garbage pointer back to the 527 * caller which would result in a panic or more subtle data 528 * corruption later on. 529 */ 530 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) 531 panic("%s: requesting pointer into unmapped ccb", __func__); 532 533 return (ccb->csio.data_ptr + byte_offset); 534 } 535 536 /** 537 * @brief This callback method asks the user to provide the number of 538 * bytes to be transfered as part of this request. 539 * 540 * @param[in] scif_user_io_request This parameter points to the user's 541 * IO request object. It is a cookie that allows the user to 542 * provide the necessary information for this callback. 543 * 544 * @return This method returns the number of payload data bytes to be 545 * transfered for this IO request. 546 */ 547 uint32_t 548 scif_cb_io_request_get_transfer_length(void * scif_user_io_request) 549 { 550 struct ISCI_IO_REQUEST *isci_request = 551 (struct ISCI_IO_REQUEST *)scif_user_io_request; 552 553 return (isci_request->ccb->csio.dxfer_len); 554 555 } 556 557 /** 558 * @brief This callback method asks the user to provide the data direction 559 * for this request. 560 * 561 * @param[in] scif_user_io_request This parameter points to the user's 562 * IO request object. It is a cookie that allows the user to 563 * provide the necessary information for this callback. 564 * 565 * @return This method returns the value of SCI_IO_REQUEST_DATA_OUT, 566 * SCI_IO_REQUEST_DATA_IN, or SCI_IO_REQUEST_NO_DATA. 567 */ 568 SCI_IO_REQUEST_DATA_DIRECTION 569 scif_cb_io_request_get_data_direction(void * scif_user_io_request) 570 { 571 struct ISCI_IO_REQUEST *isci_request = 572 (struct ISCI_IO_REQUEST *)scif_user_io_request; 573 574 switch (isci_request->ccb->ccb_h.flags & CAM_DIR_MASK) { 575 case CAM_DIR_IN: 576 return (SCI_IO_REQUEST_DATA_IN); 577 case CAM_DIR_OUT: 578 return (SCI_IO_REQUEST_DATA_OUT); 579 default: 580 return (SCI_IO_REQUEST_NO_DATA); 581 } 582 } 583 584 /** 585 * @brief This callback method asks the user to provide the address 586 * to where the next Scatter-Gather Element is located. 587 * 588 * Details regarding usage: 589 * - Regarding the first SGE: the user should initialize an index, 590 * or a pointer, prior to construction of the request that will 591 * reference the very first scatter-gather element. This is 592 * important since this method is called for every scatter-gather 593 * element, including the first element. 594 * - Regarding the last SGE: the user should return NULL from this 595 * method when this method is called and the SGL has exhausted 596 * all elements. 597 * 598 * @param[in] scif_user_io_request This parameter points to the user's 599 * IO request object. It is a cookie that allows the user to 600 * provide the necessary information for this callback. 601 * @param[in] current_sge_address This parameter specifies the address for 602 * the current SGE (i.e. the one that has just processed). 603 * @param[out] next_sge An address specifying the location for the next scatter 604 * gather element to be processed. 605 * 606 * @return None. 607 */ 608 void 609 scif_cb_io_request_get_next_sge(void * scif_user_io_request, 610 void * current_sge_address, void ** next_sge) 611 { 612 struct ISCI_IO_REQUEST *isci_request = 613 (struct ISCI_IO_REQUEST *)scif_user_io_request; 614 615 if (isci_request->current_sge_index == isci_request->num_segments) 616 *next_sge = NULL; 617 else { 618 bus_dma_segment_t *sge = 619 &isci_request->sge[isci_request->current_sge_index]; 620 621 isci_request->current_sge_index++; 622 *next_sge = sge; 623 } 624 } 625 626 /** 627 * @brief This callback method asks the user to provide the contents of the 628 * "address" field in the Scatter-Gather Element. 629 * 630 * @param[in] scif_user_io_request This parameter points to the user's 631 * IO request object. It is a cookie that allows the user to 632 * provide the necessary information for this callback. 633 * @param[in] sge_address This parameter specifies the address for the 634 * SGE from which to retrieve the address field. 635 * 636 * @return A physical address specifying the contents of the SGE's address 637 * field. 638 */ 639 SCI_PHYSICAL_ADDRESS 640 scif_cb_sge_get_address_field(void *scif_user_io_request, void *sge_address) 641 { 642 bus_dma_segment_t *sge = (bus_dma_segment_t *)sge_address; 643 644 return ((SCI_PHYSICAL_ADDRESS)sge->ds_addr); 645 } 646 647 /** 648 * @brief This callback method asks the user to provide the contents of the 649 * "length" field in the Scatter-Gather Element. 650 * 651 * @param[in] scif_user_io_request This parameter points to the user's 652 * IO request object. It is a cookie that allows the user to 653 * provide the necessary information for this callback. 654 * @param[in] sge_address This parameter specifies the address for the 655 * SGE from which to retrieve the address field. 656 * 657 * @return This method returns the length field specified inside the SGE 658 * referenced by the sge_address parameter. 659 */ 660 uint32_t 661 scif_cb_sge_get_length_field(void *scif_user_io_request, void *sge_address) 662 { 663 bus_dma_segment_t *sge = (bus_dma_segment_t *)sge_address; 664 665 return ((uint32_t)sge->ds_len); 666 } 667 668 void 669 isci_request_construct(struct ISCI_REQUEST *request, 670 SCI_CONTROLLER_HANDLE_T scif_controller_handle, 671 bus_dma_tag_t io_buffer_dma_tag, bus_addr_t physical_address) 672 { 673 674 request->controller_handle = scif_controller_handle; 675 request->dma_tag = io_buffer_dma_tag; 676 request->physical_address = physical_address; 677 bus_dmamap_create(request->dma_tag, 0, &request->dma_map); 678 callout_init(&request->timer, CALLOUT_MPSAFE); 679 } 680 681 static void 682 isci_io_request_construct(void *arg, bus_dma_segment_t *seg, int nseg, 683 int error) 684 { 685 union ccb *ccb; 686 struct ISCI_IO_REQUEST *io_request = (struct ISCI_IO_REQUEST *)arg; 687 SCI_REMOTE_DEVICE_HANDLE_T *device = io_request->parent.remote_device_handle; 688 SCI_STATUS status; 689 690 io_request->num_segments = nseg; 691 io_request->sge = seg; 692 ccb = io_request->ccb; 693 694 if (error != 0) { 695 ccb->ccb_h.status = CAM_REQ_INVALID; 696 xpt_done(ccb); 697 return; 698 } 699 700 status = scif_io_request_construct( 701 io_request->parent.controller_handle, 702 io_request->parent.remote_device_handle, 703 SCI_CONTROLLER_INVALID_IO_TAG, (void *)io_request, 704 (void *)((char*)io_request + sizeof(struct ISCI_IO_REQUEST)), 705 &io_request->sci_object); 706 707 if (status != SCI_SUCCESS) { 708 isci_io_request_complete(io_request->parent.controller_handle, 709 device, io_request, (SCI_IO_STATUS)status); 710 return; 711 } 712 713 sci_object_set_association(io_request->sci_object, io_request); 714 715 bus_dmamap_sync(io_request->parent.dma_tag, io_request->parent.dma_map, 716 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 717 718 status = (SCI_STATUS)scif_controller_start_io( 719 io_request->parent.controller_handle, device, 720 io_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG); 721 722 if (status != SCI_SUCCESS) { 723 isci_io_request_complete(io_request->parent.controller_handle, 724 device, io_request, (SCI_IO_STATUS)status); 725 return; 726 } 727 728 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) 729 callout_reset(&io_request->parent.timer, ccb->ccb_h.timeout, 730 isci_io_request_timeout, io_request); 731 } 732 733 void 734 isci_io_request_execute_scsi_io(union ccb *ccb, 735 struct ISCI_CONTROLLER *controller) 736 { 737 target_id_t target_id = ccb->ccb_h.target_id; 738 struct ISCI_REQUEST *request; 739 struct ISCI_IO_REQUEST *io_request; 740 struct ISCI_REMOTE_DEVICE *device = 741 controller->remote_device[target_id]; 742 int error; 743 744 if (device == NULL) { 745 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 746 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 747 ccb->ccb_h.status |= CAM_DEV_NOT_THERE; 748 xpt_done(ccb); 749 return; 750 } 751 752 if (sci_pool_empty(controller->request_pool)) { 753 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 754 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 755 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 756 xpt_freeze_simq(controller->sim, 1); 757 controller->is_frozen = TRUE; 758 xpt_done(ccb); 759 return; 760 } 761 762 ASSERT(device->is_resetting == FALSE); 763 764 sci_pool_get(controller->request_pool, request); 765 io_request = (struct ISCI_IO_REQUEST *)request; 766 767 io_request->ccb = ccb; 768 io_request->current_sge_index = 0; 769 io_request->parent.remote_device_handle = device->sci_object; 770 771 error = bus_dmamap_load_ccb(io_request->parent.dma_tag, 772 io_request->parent.dma_map, ccb, 773 isci_io_request_construct, io_request, 0x0); 774 /* A resource shortage from BUSDMA will be automatically 775 * continued at a later point, pushing the CCB processing 776 * forward, which will in turn unfreeze the simq. 777 */ 778 if (error == EINPROGRESS) { 779 xpt_freeze_simq(controller->sim, 1); 780 ccb->ccb_h.flags |= CAM_RELEASE_SIMQ; 781 } 782 } 783 784 void 785 isci_io_request_timeout(void *arg) 786 { 787 struct ISCI_IO_REQUEST *request = (struct ISCI_IO_REQUEST *)arg; 788 struct ISCI_REMOTE_DEVICE *remote_device = (struct ISCI_REMOTE_DEVICE *) 789 sci_object_get_association(request->parent.remote_device_handle); 790 struct ISCI_CONTROLLER *controller = remote_device->domain->controller; 791 792 mtx_lock(&controller->lock); 793 isci_remote_device_reset(remote_device, NULL); 794 mtx_unlock(&controller->lock); 795 } 796 797 #if __FreeBSD_version >= 900026 798 /** 799 * @brief This callback method gets the size of and pointer to the buffer 800 * (if any) containing the request buffer for an SMP request. 801 * 802 * @param[in] core_request This parameter specifies the SCI core's request 803 * object associated with the SMP request. 804 * @param[out] smp_request_buffer This parameter returns a pointer to the 805 * payload portion of the SMP request - i.e. everything after 806 * the SMP request header. 807 * 808 * @return Size of the request buffer in bytes. This does *not* include 809 * the size of the SMP request header. 810 */ 811 static uint32_t 812 smp_io_request_cb_get_request_buffer(SCI_IO_REQUEST_HANDLE_T core_request, 813 uint8_t ** smp_request_buffer) 814 { 815 struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *) 816 sci_object_get_association(sci_object_get_association(core_request)); 817 818 *smp_request_buffer = isci_request->ccb->smpio.smp_request + 819 sizeof(SMP_REQUEST_HEADER_T); 820 821 return (isci_request->ccb->smpio.smp_request_len - 822 sizeof(SMP_REQUEST_HEADER_T)); 823 } 824 825 /** 826 * @brief This callback method gets the SMP function for an SMP request. 827 * 828 * @param[in] core_request This parameter specifies the SCI core's request 829 * object associated with the SMP request. 830 * 831 * @return SMP function for the SMP request. 832 */ 833 static uint8_t 834 smp_io_request_cb_get_function(SCI_IO_REQUEST_HANDLE_T core_request) 835 { 836 struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *) 837 sci_object_get_association(sci_object_get_association(core_request)); 838 SMP_REQUEST_HEADER_T *header = 839 (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request; 840 841 return (header->function); 842 } 843 844 /** 845 * @brief This callback method gets the SMP frame type for an SMP request. 846 * 847 * @param[in] core_request This parameter specifies the SCI core's request 848 * object associated with the SMP request. 849 * 850 * @return SMP frame type for the SMP request. 851 */ 852 static uint8_t 853 smp_io_request_cb_get_frame_type(SCI_IO_REQUEST_HANDLE_T core_request) 854 { 855 struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *) 856 sci_object_get_association(sci_object_get_association(core_request)); 857 SMP_REQUEST_HEADER_T *header = 858 (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request; 859 860 return (header->smp_frame_type); 861 } 862 863 /** 864 * @brief This callback method gets the allocated response length for an SMP request. 865 * 866 * @param[in] core_request This parameter specifies the SCI core's request 867 * object associated with the SMP request. 868 * 869 * @return Allocated response length for the SMP request. 870 */ 871 static uint8_t 872 smp_io_request_cb_get_allocated_response_length( 873 SCI_IO_REQUEST_HANDLE_T core_request) 874 { 875 struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *) 876 sci_object_get_association(sci_object_get_association(core_request)); 877 SMP_REQUEST_HEADER_T *header = 878 (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request; 879 880 return (header->allocated_response_length); 881 } 882 883 static SCI_STATUS 884 isci_smp_request_construct(struct ISCI_IO_REQUEST *request) 885 { 886 SCI_STATUS status; 887 SCIC_SMP_PASSTHRU_REQUEST_CALLBACKS_T callbacks; 888 889 status = scif_request_construct(request->parent.controller_handle, 890 request->parent.remote_device_handle, SCI_CONTROLLER_INVALID_IO_TAG, 891 (void *)request, 892 (void *)((char*)request + sizeof(struct ISCI_IO_REQUEST)), 893 &request->sci_object); 894 895 if (status == SCI_SUCCESS) { 896 callbacks.scic_cb_smp_passthru_get_request = 897 &smp_io_request_cb_get_request_buffer; 898 callbacks.scic_cb_smp_passthru_get_function = 899 &smp_io_request_cb_get_function; 900 callbacks.scic_cb_smp_passthru_get_frame_type = 901 &smp_io_request_cb_get_frame_type; 902 callbacks.scic_cb_smp_passthru_get_allocated_response_length = 903 &smp_io_request_cb_get_allocated_response_length; 904 905 /* create the smp passthrough part of the io request */ 906 status = scic_io_request_construct_smp_pass_through( 907 scif_io_request_get_scic_handle(request->sci_object), 908 &callbacks); 909 } 910 911 return (status); 912 } 913 914 void 915 isci_io_request_execute_smp_io(union ccb *ccb, 916 struct ISCI_CONTROLLER *controller) 917 { 918 SCI_STATUS status; 919 target_id_t target_id = ccb->ccb_h.target_id; 920 struct ISCI_REQUEST *request; 921 struct ISCI_IO_REQUEST *io_request; 922 SCI_REMOTE_DEVICE_HANDLE_T smp_device_handle; 923 struct ISCI_REMOTE_DEVICE *end_device = controller->remote_device[target_id]; 924 925 /* SMP commands are sent to an end device, because SMP devices are not 926 * exposed to the kernel. It is our responsibility to use this method 927 * to get the SMP device that contains the specified end device. If 928 * the device is direct-attached, the handle will come back NULL, and 929 * we'll just fail the SMP_IO with DEV_NOT_THERE. 930 */ 931 scif_remote_device_get_containing_device(end_device->sci_object, 932 &smp_device_handle); 933 934 if (smp_device_handle == NULL) { 935 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 936 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 937 ccb->ccb_h.status |= CAM_DEV_NOT_THERE; 938 xpt_done(ccb); 939 return; 940 } 941 942 if (sci_pool_empty(controller->request_pool)) { 943 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 944 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 945 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 946 xpt_freeze_simq(controller->sim, 1); 947 controller->is_frozen = TRUE; 948 xpt_done(ccb); 949 return; 950 } 951 952 ASSERT(device->is_resetting == FALSE); 953 954 sci_pool_get(controller->request_pool, request); 955 io_request = (struct ISCI_IO_REQUEST *)request; 956 957 io_request->ccb = ccb; 958 io_request->parent.remote_device_handle = smp_device_handle; 959 960 status = isci_smp_request_construct(io_request); 961 962 if (status != SCI_SUCCESS) { 963 isci_io_request_complete(controller->scif_controller_handle, 964 smp_device_handle, io_request, (SCI_IO_STATUS)status); 965 return; 966 } 967 968 sci_object_set_association(io_request->sci_object, io_request); 969 970 status = (SCI_STATUS) scif_controller_start_io( 971 controller->scif_controller_handle, smp_device_handle, 972 io_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG); 973 974 if (status != SCI_SUCCESS) { 975 isci_io_request_complete(controller->scif_controller_handle, 976 smp_device_handle, io_request, (SCI_IO_STATUS)status); 977 return; 978 } 979 980 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) 981 callout_reset(&io_request->parent.timer, ccb->ccb_h.timeout, 982 isci_io_request_timeout, request); 983 } 984 #endif 985