1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <dev/isci/isci.h> 35 36 #include <cam/scsi/scsi_all.h> 37 #include <cam/scsi/scsi_message.h> 38 39 #include <dev/isci/scil/intel_sas.h> 40 41 #include <dev/isci/scil/sci_util.h> 42 43 #include <dev/isci/scil/scif_io_request.h> 44 #include <dev/isci/scil/scif_controller.h> 45 #include <dev/isci/scil/scif_remote_device.h> 46 #include <dev/isci/scil/scif_user_callback.h> 47 48 #include <dev/isci/scil/scic_io_request.h> 49 #include <dev/isci/scil/scic_user_callback.h> 50 51 /** 52 * @brief This user callback will inform the user that an IO request has 53 * completed. 54 * 55 * @param[in] controller This parameter specifies the controller on 56 * which the IO request is completing. 57 * @param[in] remote_device This parameter specifies the remote device on 58 * which this request is completing. 59 * @param[in] io_request This parameter specifies the IO request that has 60 * completed. 61 * @param[in] completion_status This parameter specifies the results of 62 * the IO request operation. SCI_IO_SUCCESS indicates 63 * successful completion. 64 * 65 * @return none 66 */ 67 void 68 scif_cb_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, 69 SCI_REMOTE_DEVICE_HANDLE_T remote_device, 70 SCI_IO_REQUEST_HANDLE_T io_request, SCI_IO_STATUS completion_status) 71 { 72 struct ISCI_IO_REQUEST *isci_request = 73 (struct ISCI_IO_REQUEST *)sci_object_get_association(io_request); 74 75 scif_controller_complete_io(scif_controller, remote_device, io_request); 76 isci_io_request_complete(scif_controller, remote_device, isci_request, 77 completion_status); 78 } 79 80 void 81 isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, 82 SCI_REMOTE_DEVICE_HANDLE_T remote_device, 83 struct ISCI_IO_REQUEST *isci_request, SCI_IO_STATUS completion_status) 84 { 85 struct ISCI_CONTROLLER *isci_controller; 86 struct ISCI_REMOTE_DEVICE *isci_remote_device; 87 union ccb *ccb; 88 BOOL complete_ccb; 89 90 complete_ccb = TRUE; 91 isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(scif_controller); 92 isci_remote_device = 93 (struct ISCI_REMOTE_DEVICE *) sci_object_get_association(remote_device); 94 95 ccb = isci_request->ccb; 96 97 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 98 99 switch (completion_status) { 100 case SCI_IO_SUCCESS: 101 case SCI_IO_SUCCESS_COMPLETE_BEFORE_START: 102 #if __FreeBSD_version >= 900026 103 if (ccb->ccb_h.func_code == XPT_SMP_IO) { 104 void *smp_response = 105 scif_io_request_get_response_iu_address( 106 isci_request->sci_object); 107 108 memcpy(ccb->smpio.smp_response, smp_response, 109 ccb->smpio.smp_response_len); 110 } 111 #endif 112 ccb->ccb_h.status |= CAM_REQ_CMP; 113 break; 114 115 case SCI_IO_SUCCESS_IO_DONE_EARLY: 116 ccb->ccb_h.status |= CAM_REQ_CMP; 117 ccb->csio.resid = ccb->csio.dxfer_len - 118 scif_io_request_get_number_of_bytes_transferred( 119 isci_request->sci_object); 120 break; 121 122 case SCI_IO_FAILURE_RESPONSE_VALID: 123 { 124 SCI_SSP_RESPONSE_IU_T * response_buffer; 125 uint32_t sense_length; 126 int error_code, sense_key, asc, ascq; 127 struct ccb_scsiio *csio = &ccb->csio; 128 129 response_buffer = (SCI_SSP_RESPONSE_IU_T *) 130 scif_io_request_get_response_iu_address( 131 isci_request->sci_object); 132 133 sense_length = sci_ssp_get_sense_data_length( 134 response_buffer->sense_data_length); 135 136 sense_length = MIN(csio->sense_len, sense_length); 137 138 memcpy(&csio->sense_data, response_buffer->data, sense_length); 139 140 csio->sense_resid = csio->sense_len - sense_length; 141 csio->scsi_status = response_buffer->status; 142 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 143 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 144 scsi_extract_sense( &csio->sense_data, &error_code, &sense_key, 145 &asc, &ascq ); 146 isci_log_message(1, "ISCI", 147 "isci: bus=%x target=%x lun=%x cdb[0]=%x status=%x key=%x asc=%x ascq=%x\n", 148 ccb->ccb_h.path_id, ccb->ccb_h.target_id, 149 ccb->ccb_h.target_lun, csio->cdb_io.cdb_bytes[0], 150 csio->scsi_status, sense_key, asc, ascq); 151 break; 152 } 153 154 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: 155 isci_remote_device_reset(isci_remote_device, NULL); 156 ccb->ccb_h.status |= CAM_REQ_TERMIO; 157 isci_log_message(0, "ISCI", 158 "isci: bus=%x target=%x lun=%x cdb[0]=%x remote device reset required\n", 159 ccb->ccb_h.path_id, ccb->ccb_h.target_id, 160 ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0]); 161 break; 162 163 case SCI_IO_FAILURE_TERMINATED: 164 ccb->ccb_h.status |= CAM_REQ_TERMIO; 165 isci_log_message(0, "ISCI", 166 "isci: bus=%x target=%x lun=%x cdb[0]=%x terminated\n", 167 ccb->ccb_h.path_id, ccb->ccb_h.target_id, 168 ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0]); 169 break; 170 171 case SCI_IO_FAILURE_INVALID_STATE: 172 case SCI_IO_FAILURE_INSUFFICIENT_RESOURCES: 173 complete_ccb = FALSE; 174 break; 175 176 case SCI_IO_FAILURE_INVALID_REMOTE_DEVICE: 177 ccb->ccb_h.status |= CAM_DEV_NOT_THERE; 178 break; 179 180 case SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE: 181 { 182 struct ccb_relsim ccb_relsim; 183 struct cam_path *path; 184 185 xpt_create_path(&path, NULL, 186 cam_sim_path(isci_controller->sim), 187 isci_remote_device->index, 0); 188 189 xpt_setup_ccb(&ccb_relsim.ccb_h, path, 5); 190 ccb_relsim.ccb_h.func_code = XPT_REL_SIMQ; 191 ccb_relsim.ccb_h.flags = CAM_DEV_QFREEZE; 192 ccb_relsim.release_flags = RELSIM_ADJUST_OPENINGS; 193 ccb_relsim.openings = 194 scif_remote_device_get_max_queue_depth(remote_device); 195 xpt_action((union ccb *)&ccb_relsim); 196 xpt_free_path(path); 197 complete_ccb = FALSE; 198 } 199 break; 200 201 case SCI_IO_FAILURE: 202 case SCI_IO_FAILURE_REQUIRES_SCSI_ABORT: 203 case SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL: 204 case SCI_IO_FAILURE_PROTOCOL_VIOLATION: 205 case SCI_IO_FAILURE_INVALID_PARAMETER_VALUE: 206 case SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR: 207 default: 208 isci_log_message(1, "ISCI", 209 "isci: bus=%x target=%x lun=%x cdb[0]=%x completion status=%x\n", 210 ccb->ccb_h.path_id, ccb->ccb_h.target_id, 211 ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0], 212 completion_status); 213 ccb->ccb_h.status |= CAM_REQ_CMP_ERR; 214 break; 215 } 216 217 callout_stop(&isci_request->parent.timer); 218 bus_dmamap_sync(isci_request->parent.dma_tag, 219 isci_request->parent.dma_map, 220 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 221 222 bus_dmamap_unload(isci_request->parent.dma_tag, 223 isci_request->parent.dma_map); 224 225 isci_request->ccb = NULL; 226 227 sci_pool_put(isci_controller->request_pool, 228 (struct ISCI_REQUEST *)isci_request); 229 230 if (complete_ccb) { 231 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 232 /* ccb will be completed with some type of non-success 233 * status. So temporarily freeze the queue until the 234 * upper layers can act on the status. The 235 * CAM_DEV_QFRZN flag will then release the queue 236 * after the status is acted upon. 237 */ 238 ccb->ccb_h.status |= CAM_DEV_QFRZN; 239 xpt_freeze_devq(ccb->ccb_h.path, 1); 240 } 241 242 if (ccb->ccb_h.status & CAM_SIM_QUEUED) { 243 244 KASSERT(ccb == isci_remote_device->queued_ccb_in_progress, 245 ("multiple internally queued ccbs in flight")); 246 247 TAILQ_REMOVE(&isci_remote_device->queued_ccbs, 248 &ccb->ccb_h, sim_links.tqe); 249 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 250 251 /* 252 * This CCB that was in the queue was completed, so 253 * set the in_progress pointer to NULL denoting that 254 * we can retry another CCB from the queue. We only 255 * allow one CCB at a time from the queue to be 256 * in progress so that we can effectively maintain 257 * ordering. 258 */ 259 isci_remote_device->queued_ccb_in_progress = NULL; 260 } 261 262 if (isci_remote_device->frozen_lun_mask != 0) { 263 isci_remote_device_release_device_queue(isci_remote_device); 264 } 265 266 xpt_done(ccb); 267 268 if (isci_controller->is_frozen == TRUE) { 269 isci_controller->is_frozen = FALSE; 270 xpt_release_simq(isci_controller->sim, TRUE); 271 } 272 } else { 273 isci_remote_device_freeze_lun_queue(isci_remote_device, 274 ccb->ccb_h.target_lun); 275 276 if (ccb->ccb_h.status & CAM_SIM_QUEUED) { 277 278 KASSERT(ccb == isci_remote_device->queued_ccb_in_progress, 279 ("multiple internally queued ccbs in flight")); 280 281 /* 282 * Do nothing, CCB is already on the device's queue. 283 * We leave it on the queue, to be retried again 284 * next time a CCB on this device completes, or we 285 * get a ready notification for this device. 286 */ 287 isci_log_message(1, "ISCI", "already queued %p %x\n", 288 ccb, ccb->csio.cdb_io.cdb_bytes[0]); 289 290 isci_remote_device->queued_ccb_in_progress = NULL; 291 292 } else { 293 isci_log_message(1, "ISCI", "queue %p %x\n", ccb, 294 ccb->csio.cdb_io.cdb_bytes[0]); 295 ccb->ccb_h.status |= CAM_SIM_QUEUED; 296 297 TAILQ_INSERT_TAIL(&isci_remote_device->queued_ccbs, 298 &ccb->ccb_h, sim_links.tqe); 299 } 300 } 301 } 302 303 /** 304 * @brief This callback method asks the user to provide the physical 305 * address for the supplied virtual address when building an 306 * io request object. 307 * 308 * @param[in] controller This parameter is the core controller object 309 * handle. 310 * @param[in] io_request This parameter is the io request object handle 311 * for which the physical address is being requested. 312 * @param[in] virtual_address This paramter is the virtual address which 313 * is to be returned as a physical address. 314 * @param[out] physical_address The physical address for the supplied virtual 315 * address. 316 * 317 * @return None. 318 */ 319 void 320 scic_cb_io_request_get_physical_address(SCI_CONTROLLER_HANDLE_T controller, 321 SCI_IO_REQUEST_HANDLE_T io_request, void *virtual_address, 322 SCI_PHYSICAL_ADDRESS *physical_address) 323 { 324 SCI_IO_REQUEST_HANDLE_T scif_request = 325 sci_object_get_association(io_request); 326 struct ISCI_REQUEST *isci_request = 327 sci_object_get_association(scif_request); 328 329 if(isci_request != NULL) { 330 /* isci_request is not NULL, meaning this is a request initiated 331 * by CAM or the isci layer (i.e. device reset for I/O 332 * timeout). Therefore we can calculate the physical address 333 * based on the address we stored in the struct ISCI_REQUEST 334 * object. 335 */ 336 *physical_address = isci_request->physical_address + 337 (uintptr_t)virtual_address - 338 (uintptr_t)isci_request; 339 } else { 340 /* isci_request is NULL, meaning this is a request generated 341 * internally by SCIL (i.e. for SMP requests or NCQ error 342 * recovery). Therefore we calculate the physical address 343 * based on the controller's uncached controller memory buffer, 344 * since we know that this is what SCIL uses for internal 345 * framework requests. 346 */ 347 SCI_CONTROLLER_HANDLE_T scif_controller = 348 (SCI_CONTROLLER_HANDLE_T) sci_object_get_association(controller); 349 struct ISCI_CONTROLLER *isci_controller = 350 (struct ISCI_CONTROLLER *)sci_object_get_association(scif_controller); 351 U64 virt_addr_offset = (uintptr_t)virtual_address - 352 (U64)isci_controller->uncached_controller_memory.virtual_address; 353 354 *physical_address = 355 isci_controller->uncached_controller_memory.physical_address 356 + virt_addr_offset; 357 } 358 } 359 360 /** 361 * @brief This callback method asks the user to provide the address for 362 * the command descriptor block (CDB) associated with this IO request. 363 * 364 * @param[in] scif_user_io_request This parameter points to the user's 365 * IO request object. It is a cookie that allows the user to 366 * provide the necessary information for this callback. 367 * 368 * @return This method returns the virtual address of the CDB. 369 */ 370 void * 371 scif_cb_io_request_get_cdb_address(void * scif_user_io_request) 372 { 373 struct ISCI_IO_REQUEST *isci_request = 374 (struct ISCI_IO_REQUEST *)scif_user_io_request; 375 376 return (isci_request->ccb->csio.cdb_io.cdb_bytes); 377 } 378 379 /** 380 * @brief This callback method asks the user to provide the length of 381 * the command descriptor block (CDB) associated with this IO request. 382 * 383 * @param[in] scif_user_io_request This parameter points to the user's 384 * IO request object. It is a cookie that allows the user to 385 * provide the necessary information for this callback. 386 * 387 * @return This method returns the length of the CDB. 388 */ 389 uint32_t 390 scif_cb_io_request_get_cdb_length(void * scif_user_io_request) 391 { 392 struct ISCI_IO_REQUEST *isci_request = 393 (struct ISCI_IO_REQUEST *)scif_user_io_request; 394 395 return (isci_request->ccb->csio.cdb_len); 396 } 397 398 /** 399 * @brief This callback method asks the user to provide the Logical Unit (LUN) 400 * associated with this IO request. 401 * 402 * @note The contents of the value returned from this callback are defined 403 * by the protocol standard (e.g. T10 SAS specification). Please 404 * refer to the transport command information unit description 405 * in the associated standard. 406 * 407 * @param[in] scif_user_io_request This parameter points to the user's 408 * IO request object. It is a cookie that allows the user to 409 * provide the necessary information for this callback. 410 * 411 * @return This method returns the LUN associated with this request. 412 */ 413 uint32_t 414 scif_cb_io_request_get_lun(void * scif_user_io_request) 415 { 416 struct ISCI_IO_REQUEST *isci_request = 417 (struct ISCI_IO_REQUEST *)scif_user_io_request; 418 419 return (isci_request->ccb->ccb_h.target_lun); 420 } 421 422 /** 423 * @brief This callback method asks the user to provide the task attribute 424 * associated with this IO request. 425 * 426 * @note The contents of the value returned from this callback are defined 427 * by the protocol standard (e.g. T10 SAS specification). Please 428 * refer to the transport command information unit description 429 * in the associated standard. 430 * 431 * @param[in] scif_user_io_request This parameter points to the user's 432 * IO request object. It is a cookie that allows the user to 433 * provide the necessary information for this callback. 434 * 435 * @return This method returns the task attribute associated with this 436 * IO request. 437 */ 438 uint32_t 439 scif_cb_io_request_get_task_attribute(void * scif_user_io_request) 440 { 441 struct ISCI_IO_REQUEST *isci_request = 442 (struct ISCI_IO_REQUEST *)scif_user_io_request; 443 uint32_t task_attribute; 444 445 if((isci_request->ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) 446 switch(isci_request->ccb->csio.tag_action) { 447 case MSG_HEAD_OF_Q_TAG: 448 task_attribute = SCI_SAS_HEAD_OF_QUEUE_ATTRIBUTE; 449 break; 450 451 case MSG_ORDERED_Q_TAG: 452 task_attribute = SCI_SAS_ORDERED_ATTRIBUTE; 453 break; 454 455 case MSG_ACA_TASK: 456 task_attribute = SCI_SAS_ACA_ATTRIBUTE; 457 break; 458 459 default: 460 task_attribute = SCI_SAS_SIMPLE_ATTRIBUTE; 461 break; 462 } 463 else 464 task_attribute = SCI_SAS_SIMPLE_ATTRIBUTE; 465 466 return (task_attribute); 467 } 468 469 /** 470 * @brief This callback method asks the user to provide the command priority 471 * associated with this IO request. 472 * 473 * @note The contents of the value returned from this callback are defined 474 * by the protocol standard (e.g. T10 SAS specification). Please 475 * refer to the transport command information unit description 476 * in the associated standard. 477 * 478 * @param[in] scif_user_io_request This parameter points to the user's 479 * IO request object. It is a cookie that allows the user to 480 * provide the necessary information for this callback. 481 * 482 * @return This method returns the command priority associated with this 483 * IO request. 484 */ 485 uint32_t 486 scif_cb_io_request_get_command_priority(void * scif_user_io_request) 487 { 488 return (0); 489 } 490 491 /** 492 * @brief This method simply returns the virtual address associated 493 * with the scsi_io and byte_offset supplied parameters. 494 * 495 * @note This callback is not utilized in the fast path. The expectation 496 * is that this method is utilized for items such as SCSI to ATA 497 * translation for commands like INQUIRY, READ CAPACITY, etc. 498 * 499 * @param[in] scif_user_io_request This parameter points to the user's 500 * IO request object. It is a cookie that allows the user to 501 * provide the necessary information for this callback. 502 * @param[in] byte_offset This parameter specifies the offset into the data 503 * buffers pointed to by the SGL. The byte offset starts at 0 504 * and continues until the last byte pointed to be the last SGL 505 * element. 506 * 507 * @return A virtual address pointer to the location specified by the 508 * parameters. 509 */ 510 uint8_t * 511 scif_cb_io_request_get_virtual_address_from_sgl(void * scif_user_io_request, 512 uint32_t byte_offset) 513 { 514 struct ISCI_IO_REQUEST *isci_request; 515 union ccb *ccb; 516 517 518 isci_request = scif_user_io_request; 519 ccb = isci_request->ccb; 520 521 /* 522 * This callback is only invoked for SCSI/ATA translation of 523 * PIO commands such as INQUIRY and READ_CAPACITY, to allow 524 * the driver to write the translated data directly into the 525 * data buffer. It is never invoked for READ/WRITE commands. 526 * The driver currently assumes only READ/WRITE commands will 527 * be unmapped. 528 * 529 * As a safeguard against future changes to unmapped commands, 530 * add an explicit panic here should the DATA_MASK != VADDR. 531 * Otherwise, we would return some garbage pointer back to the 532 * caller which would result in a panic or more subtle data 533 * corruption later on. 534 */ 535 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) 536 panic("%s: requesting pointer into unmapped ccb", __func__); 537 538 return (ccb->csio.data_ptr + byte_offset); 539 } 540 541 /** 542 * @brief This callback method asks the user to provide the number of 543 * bytes to be transfered as part of this request. 544 * 545 * @param[in] scif_user_io_request This parameter points to the user's 546 * IO request object. It is a cookie that allows the user to 547 * provide the necessary information for this callback. 548 * 549 * @return This method returns the number of payload data bytes to be 550 * transfered for this IO request. 551 */ 552 uint32_t 553 scif_cb_io_request_get_transfer_length(void * scif_user_io_request) 554 { 555 struct ISCI_IO_REQUEST *isci_request = 556 (struct ISCI_IO_REQUEST *)scif_user_io_request; 557 558 return (isci_request->ccb->csio.dxfer_len); 559 560 } 561 562 /** 563 * @brief This callback method asks the user to provide the data direction 564 * for this request. 565 * 566 * @param[in] scif_user_io_request This parameter points to the user's 567 * IO request object. It is a cookie that allows the user to 568 * provide the necessary information for this callback. 569 * 570 * @return This method returns the value of SCI_IO_REQUEST_DATA_OUT, 571 * SCI_IO_REQUEST_DATA_IN, or SCI_IO_REQUEST_NO_DATA. 572 */ 573 SCI_IO_REQUEST_DATA_DIRECTION 574 scif_cb_io_request_get_data_direction(void * scif_user_io_request) 575 { 576 struct ISCI_IO_REQUEST *isci_request = 577 (struct ISCI_IO_REQUEST *)scif_user_io_request; 578 579 switch (isci_request->ccb->ccb_h.flags & CAM_DIR_MASK) { 580 case CAM_DIR_IN: 581 return (SCI_IO_REQUEST_DATA_IN); 582 case CAM_DIR_OUT: 583 return (SCI_IO_REQUEST_DATA_OUT); 584 default: 585 return (SCI_IO_REQUEST_NO_DATA); 586 } 587 } 588 589 /** 590 * @brief This callback method asks the user to provide the address 591 * to where the next Scatter-Gather Element is located. 592 * 593 * Details regarding usage: 594 * - Regarding the first SGE: the user should initialize an index, 595 * or a pointer, prior to construction of the request that will 596 * reference the very first scatter-gather element. This is 597 * important since this method is called for every scatter-gather 598 * element, including the first element. 599 * - Regarding the last SGE: the user should return NULL from this 600 * method when this method is called and the SGL has exhausted 601 * all elements. 602 * 603 * @param[in] scif_user_io_request This parameter points to the user's 604 * IO request object. It is a cookie that allows the user to 605 * provide the necessary information for this callback. 606 * @param[in] current_sge_address This parameter specifies the address for 607 * the current SGE (i.e. the one that has just processed). 608 * @param[out] next_sge An address specifying the location for the next scatter 609 * gather element to be processed. 610 * 611 * @return None. 612 */ 613 void 614 scif_cb_io_request_get_next_sge(void * scif_user_io_request, 615 void * current_sge_address, void ** next_sge) 616 { 617 struct ISCI_IO_REQUEST *isci_request = 618 (struct ISCI_IO_REQUEST *)scif_user_io_request; 619 620 if (isci_request->current_sge_index == isci_request->num_segments) 621 *next_sge = NULL; 622 else { 623 bus_dma_segment_t *sge = 624 &isci_request->sge[isci_request->current_sge_index]; 625 626 isci_request->current_sge_index++; 627 *next_sge = sge; 628 } 629 } 630 631 /** 632 * @brief This callback method asks the user to provide the contents of the 633 * "address" field in the Scatter-Gather Element. 634 * 635 * @param[in] scif_user_io_request This parameter points to the user's 636 * IO request object. It is a cookie that allows the user to 637 * provide the necessary information for this callback. 638 * @param[in] sge_address This parameter specifies the address for the 639 * SGE from which to retrieve the address field. 640 * 641 * @return A physical address specifying the contents of the SGE's address 642 * field. 643 */ 644 SCI_PHYSICAL_ADDRESS 645 scif_cb_sge_get_address_field(void *scif_user_io_request, void *sge_address) 646 { 647 bus_dma_segment_t *sge = (bus_dma_segment_t *)sge_address; 648 649 return ((SCI_PHYSICAL_ADDRESS)sge->ds_addr); 650 } 651 652 /** 653 * @brief This callback method asks the user to provide the contents of the 654 * "length" field in the Scatter-Gather Element. 655 * 656 * @param[in] scif_user_io_request This parameter points to the user's 657 * IO request object. It is a cookie that allows the user to 658 * provide the necessary information for this callback. 659 * @param[in] sge_address This parameter specifies the address for the 660 * SGE from which to retrieve the address field. 661 * 662 * @return This method returns the length field specified inside the SGE 663 * referenced by the sge_address parameter. 664 */ 665 uint32_t 666 scif_cb_sge_get_length_field(void *scif_user_io_request, void *sge_address) 667 { 668 bus_dma_segment_t *sge = (bus_dma_segment_t *)sge_address; 669 670 return ((uint32_t)sge->ds_len); 671 } 672 673 void 674 isci_request_construct(struct ISCI_REQUEST *request, 675 SCI_CONTROLLER_HANDLE_T scif_controller_handle, 676 bus_dma_tag_t io_buffer_dma_tag, bus_addr_t physical_address) 677 { 678 679 request->controller_handle = scif_controller_handle; 680 request->dma_tag = io_buffer_dma_tag; 681 request->physical_address = physical_address; 682 bus_dmamap_create(request->dma_tag, 0, &request->dma_map); 683 callout_init(&request->timer, 1); 684 } 685 686 static void 687 isci_io_request_construct(void *arg, bus_dma_segment_t *seg, int nseg, 688 int error) 689 { 690 union ccb *ccb; 691 struct ISCI_IO_REQUEST *io_request = (struct ISCI_IO_REQUEST *)arg; 692 SCI_REMOTE_DEVICE_HANDLE_T *device = io_request->parent.remote_device_handle; 693 SCI_STATUS status; 694 695 io_request->num_segments = nseg; 696 io_request->sge = seg; 697 ccb = io_request->ccb; 698 699 if (error != 0) { 700 ccb->ccb_h.status = CAM_REQ_INVALID; 701 xpt_done(ccb); 702 return; 703 } 704 705 status = scif_io_request_construct( 706 io_request->parent.controller_handle, 707 io_request->parent.remote_device_handle, 708 SCI_CONTROLLER_INVALID_IO_TAG, (void *)io_request, 709 (void *)((char*)io_request + sizeof(struct ISCI_IO_REQUEST)), 710 &io_request->sci_object); 711 712 if (status != SCI_SUCCESS) { 713 isci_io_request_complete(io_request->parent.controller_handle, 714 device, io_request, (SCI_IO_STATUS)status); 715 return; 716 } 717 718 sci_object_set_association(io_request->sci_object, io_request); 719 720 bus_dmamap_sync(io_request->parent.dma_tag, io_request->parent.dma_map, 721 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 722 723 status = (SCI_STATUS)scif_controller_start_io( 724 io_request->parent.controller_handle, device, 725 io_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG); 726 727 if (status != SCI_SUCCESS) { 728 isci_io_request_complete(io_request->parent.controller_handle, 729 device, io_request, (SCI_IO_STATUS)status); 730 return; 731 } 732 733 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) 734 callout_reset_sbt(&io_request->parent.timer, 735 SBT_1MS * ccb->ccb_h.timeout, 0, isci_io_request_timeout, 736 io_request, 0); 737 } 738 739 void 740 isci_io_request_execute_scsi_io(union ccb *ccb, 741 struct ISCI_CONTROLLER *controller) 742 { 743 target_id_t target_id = ccb->ccb_h.target_id; 744 struct ISCI_REQUEST *request; 745 struct ISCI_IO_REQUEST *io_request; 746 struct ISCI_REMOTE_DEVICE *device = 747 controller->remote_device[target_id]; 748 int error; 749 750 if (device == NULL) { 751 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 752 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 753 ccb->ccb_h.status |= CAM_DEV_NOT_THERE; 754 xpt_done(ccb); 755 return; 756 } 757 758 if (sci_pool_empty(controller->request_pool)) { 759 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 760 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 761 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 762 xpt_freeze_simq(controller->sim, 1); 763 controller->is_frozen = TRUE; 764 xpt_done(ccb); 765 return; 766 } 767 768 ASSERT(device->is_resetting == FALSE); 769 770 sci_pool_get(controller->request_pool, request); 771 io_request = (struct ISCI_IO_REQUEST *)request; 772 773 io_request->ccb = ccb; 774 io_request->current_sge_index = 0; 775 io_request->parent.remote_device_handle = device->sci_object; 776 777 error = bus_dmamap_load_ccb(io_request->parent.dma_tag, 778 io_request->parent.dma_map, ccb, 779 isci_io_request_construct, io_request, 0x0); 780 /* A resource shortage from BUSDMA will be automatically 781 * continued at a later point, pushing the CCB processing 782 * forward, which will in turn unfreeze the simq. 783 */ 784 if (error == EINPROGRESS) { 785 xpt_freeze_simq(controller->sim, 1); 786 ccb->ccb_h.flags |= CAM_RELEASE_SIMQ; 787 } 788 } 789 790 void 791 isci_io_request_timeout(void *arg) 792 { 793 struct ISCI_IO_REQUEST *request = (struct ISCI_IO_REQUEST *)arg; 794 struct ISCI_REMOTE_DEVICE *remote_device = (struct ISCI_REMOTE_DEVICE *) 795 sci_object_get_association(request->parent.remote_device_handle); 796 struct ISCI_CONTROLLER *controller = remote_device->domain->controller; 797 798 mtx_lock(&controller->lock); 799 isci_remote_device_reset(remote_device, NULL); 800 mtx_unlock(&controller->lock); 801 } 802 803 #if __FreeBSD_version >= 900026 804 /** 805 * @brief This callback method gets the size of and pointer to the buffer 806 * (if any) containing the request buffer for an SMP request. 807 * 808 * @param[in] core_request This parameter specifies the SCI core's request 809 * object associated with the SMP request. 810 * @param[out] smp_request_buffer This parameter returns a pointer to the 811 * payload portion of the SMP request - i.e. everything after 812 * the SMP request header. 813 * 814 * @return Size of the request buffer in bytes. This does *not* include 815 * the size of the SMP request header. 816 */ 817 static uint32_t 818 smp_io_request_cb_get_request_buffer(SCI_IO_REQUEST_HANDLE_T core_request, 819 uint8_t ** smp_request_buffer) 820 { 821 struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *) 822 sci_object_get_association(sci_object_get_association(core_request)); 823 824 *smp_request_buffer = isci_request->ccb->smpio.smp_request + 825 sizeof(SMP_REQUEST_HEADER_T); 826 827 return (isci_request->ccb->smpio.smp_request_len - 828 sizeof(SMP_REQUEST_HEADER_T)); 829 } 830 831 /** 832 * @brief This callback method gets the SMP function for an SMP request. 833 * 834 * @param[in] core_request This parameter specifies the SCI core's request 835 * object associated with the SMP request. 836 * 837 * @return SMP function for the SMP request. 838 */ 839 static uint8_t 840 smp_io_request_cb_get_function(SCI_IO_REQUEST_HANDLE_T core_request) 841 { 842 struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *) 843 sci_object_get_association(sci_object_get_association(core_request)); 844 SMP_REQUEST_HEADER_T *header = 845 (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request; 846 847 return (header->function); 848 } 849 850 /** 851 * @brief This callback method gets the SMP frame type for an SMP request. 852 * 853 * @param[in] core_request This parameter specifies the SCI core's request 854 * object associated with the SMP request. 855 * 856 * @return SMP frame type for the SMP request. 857 */ 858 static uint8_t 859 smp_io_request_cb_get_frame_type(SCI_IO_REQUEST_HANDLE_T core_request) 860 { 861 struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *) 862 sci_object_get_association(sci_object_get_association(core_request)); 863 SMP_REQUEST_HEADER_T *header = 864 (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request; 865 866 return (header->smp_frame_type); 867 } 868 869 /** 870 * @brief This callback method gets the allocated response length for an SMP request. 871 * 872 * @param[in] core_request This parameter specifies the SCI core's request 873 * object associated with the SMP request. 874 * 875 * @return Allocated response length for the SMP request. 876 */ 877 static uint8_t 878 smp_io_request_cb_get_allocated_response_length( 879 SCI_IO_REQUEST_HANDLE_T core_request) 880 { 881 struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *) 882 sci_object_get_association(sci_object_get_association(core_request)); 883 SMP_REQUEST_HEADER_T *header = 884 (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request; 885 886 return (header->allocated_response_length); 887 } 888 889 static SCI_STATUS 890 isci_smp_request_construct(struct ISCI_IO_REQUEST *request) 891 { 892 SCI_STATUS status; 893 SCIC_SMP_PASSTHRU_REQUEST_CALLBACKS_T callbacks; 894 895 status = scif_request_construct(request->parent.controller_handle, 896 request->parent.remote_device_handle, SCI_CONTROLLER_INVALID_IO_TAG, 897 (void *)request, 898 (void *)((char*)request + sizeof(struct ISCI_IO_REQUEST)), 899 &request->sci_object); 900 901 if (status == SCI_SUCCESS) { 902 callbacks.scic_cb_smp_passthru_get_request = 903 &smp_io_request_cb_get_request_buffer; 904 callbacks.scic_cb_smp_passthru_get_function = 905 &smp_io_request_cb_get_function; 906 callbacks.scic_cb_smp_passthru_get_frame_type = 907 &smp_io_request_cb_get_frame_type; 908 callbacks.scic_cb_smp_passthru_get_allocated_response_length = 909 &smp_io_request_cb_get_allocated_response_length; 910 911 /* create the smp passthrough part of the io request */ 912 status = scic_io_request_construct_smp_pass_through( 913 scif_io_request_get_scic_handle(request->sci_object), 914 &callbacks); 915 } 916 917 return (status); 918 } 919 920 void 921 isci_io_request_execute_smp_io(union ccb *ccb, 922 struct ISCI_CONTROLLER *controller) 923 { 924 SCI_STATUS status; 925 target_id_t target_id = ccb->ccb_h.target_id; 926 struct ISCI_REQUEST *request; 927 struct ISCI_IO_REQUEST *io_request; 928 SCI_REMOTE_DEVICE_HANDLE_T smp_device_handle; 929 struct ISCI_REMOTE_DEVICE *end_device = controller->remote_device[target_id]; 930 931 /* SMP commands are sent to an end device, because SMP devices are not 932 * exposed to the kernel. It is our responsibility to use this method 933 * to get the SMP device that contains the specified end device. If 934 * the device is direct-attached, the handle will come back NULL, and 935 * we'll just fail the SMP_IO with DEV_NOT_THERE. 936 */ 937 scif_remote_device_get_containing_device(end_device->sci_object, 938 &smp_device_handle); 939 940 if (smp_device_handle == NULL) { 941 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 942 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 943 ccb->ccb_h.status |= CAM_DEV_NOT_THERE; 944 xpt_done(ccb); 945 return; 946 } 947 948 if (sci_pool_empty(controller->request_pool)) { 949 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 950 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 951 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 952 xpt_freeze_simq(controller->sim, 1); 953 controller->is_frozen = TRUE; 954 xpt_done(ccb); 955 return; 956 } 957 958 ASSERT(device->is_resetting == FALSE); 959 960 sci_pool_get(controller->request_pool, request); 961 io_request = (struct ISCI_IO_REQUEST *)request; 962 963 io_request->ccb = ccb; 964 io_request->parent.remote_device_handle = smp_device_handle; 965 966 status = isci_smp_request_construct(io_request); 967 968 if (status != SCI_SUCCESS) { 969 isci_io_request_complete(controller->scif_controller_handle, 970 smp_device_handle, io_request, (SCI_IO_STATUS)status); 971 return; 972 } 973 974 sci_object_set_association(io_request->sci_object, io_request); 975 976 status = (SCI_STATUS) scif_controller_start_io( 977 controller->scif_controller_handle, smp_device_handle, 978 io_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG); 979 980 if (status != SCI_SUCCESS) { 981 isci_io_request_complete(controller->scif_controller_handle, 982 smp_device_handle, io_request, (SCI_IO_STATUS)status); 983 return; 984 } 985 986 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) 987 callout_reset_sbt(&io_request->parent.timer, 988 SBT_1MS * ccb->ccb_h.timeout, 0, isci_io_request_timeout, 989 request, 0); 990 } 991 #endif 992