1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <dev/isci/isci.h> 35 36 #include <sys/conf.h> 37 #include <sys/malloc.h> 38 39 #include <cam/cam_periph.h> 40 #include <cam/cam_xpt_periph.h> 41 42 #include <dev/isci/scil/sci_memory_descriptor_list.h> 43 #include <dev/isci/scil/sci_memory_descriptor_list_decorator.h> 44 45 #include <dev/isci/scil/scif_controller.h> 46 #include <dev/isci/scil/scif_library.h> 47 #include <dev/isci/scil/scif_io_request.h> 48 #include <dev/isci/scil/scif_task_request.h> 49 #include <dev/isci/scil/scif_remote_device.h> 50 #include <dev/isci/scil/scif_domain.h> 51 #include <dev/isci/scil/scif_user_callback.h> 52 53 void isci_action(struct cam_sim *sim, union ccb *ccb); 54 void isci_poll(struct cam_sim *sim); 55 56 #define ccb_sim_ptr sim_priv.entries[0].ptr 57 58 /** 59 * @brief This user callback will inform the user that the controller has 60 * had a serious unexpected error. The user should not the error, 61 * disable interrupts, and wait for current ongoing processing to 62 * complete. Subsequently, the user should reset the controller. 63 * 64 * @param[in] controller This parameter specifies the controller that had 65 * an error. 66 * 67 * @return none 68 */ 69 void scif_cb_controller_error(SCI_CONTROLLER_HANDLE_T controller, 70 SCI_CONTROLLER_ERROR error) 71 { 72 73 isci_log_message(0, "ISCI", "scif_cb_controller_error: 0x%x\n", 74 error); 75 } 76 77 /** 78 * @brief This user callback will inform the user that the controller has 79 * finished the start process. 80 * 81 * @param[in] controller This parameter specifies the controller that was 82 * started. 83 * @param[in] completion_status This parameter specifies the results of 84 * the start operation. SCI_SUCCESS indicates successful 85 * completion. 86 * 87 * @return none 88 */ 89 void scif_cb_controller_start_complete(SCI_CONTROLLER_HANDLE_T controller, 90 SCI_STATUS completion_status) 91 { 92 uint32_t index; 93 struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *) 94 sci_object_get_association(controller); 95 96 isci_controller->is_started = TRUE; 97 98 /* Set bits for all domains. We will clear them one-by-one once 99 * the domains complete discovery, or return error when calling 100 * scif_domain_discover. Once all bits are clear, we will register 101 * the controller with CAM. 102 */ 103 isci_controller->initial_discovery_mask = (1 << SCI_MAX_DOMAINS) - 1; 104 105 for(index = 0; index < SCI_MAX_DOMAINS; index++) { 106 SCI_STATUS status; 107 SCI_DOMAIN_HANDLE_T domain = 108 isci_controller->domain[index].sci_object; 109 110 status = scif_domain_discover( 111 domain, 112 scif_domain_get_suggested_discover_timeout(domain), 113 DEVICE_TIMEOUT 114 ); 115 116 if (status != SCI_SUCCESS) 117 { 118 isci_controller_domain_discovery_complete( 119 isci_controller, &isci_controller->domain[index]); 120 } 121 } 122 } 123 124 /** 125 * @brief This user callback will inform the user that the controller has 126 * finished the stop process. Note, after user calls 127 * scif_controller_stop(), before user receives this controller stop 128 * complete callback, user should not expect any callback from 129 * framework, such like scif_cb_domain_change_notification(). 130 * 131 * @param[in] controller This parameter specifies the controller that was 132 * stopped. 133 * @param[in] completion_status This parameter specifies the results of 134 * the stop operation. SCI_SUCCESS indicates successful 135 * completion. 136 * 137 * @return none 138 */ 139 void scif_cb_controller_stop_complete(SCI_CONTROLLER_HANDLE_T controller, 140 SCI_STATUS completion_status) 141 { 142 struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *) 143 sci_object_get_association(controller); 144 145 isci_controller->is_started = FALSE; 146 } 147 148 /** 149 * @brief This method will be invoked to allocate memory dynamically. 150 * 151 * @param[in] controller This parameter represents the controller 152 * object for which to allocate memory. 153 * @param[out] mde This parameter represents the memory descriptor to 154 * be filled in by the user that will reference the newly 155 * allocated memory. 156 * 157 * @return none 158 */ 159 void scif_cb_controller_allocate_memory(SCI_CONTROLLER_HANDLE_T controller, 160 SCI_PHYSICAL_MEMORY_DESCRIPTOR_T *mde) 161 { 162 163 } 164 165 /** 166 * @brief This method will be invoked to allocate memory dynamically. 167 * 168 * @param[in] controller This parameter represents the controller 169 * object for which to allocate memory. 170 * @param[out] mde This parameter represents the memory descriptor to 171 * be filled in by the user that will reference the newly 172 * allocated memory. 173 * 174 * @return none 175 */ 176 void scif_cb_controller_free_memory(SCI_CONTROLLER_HANDLE_T controller, 177 SCI_PHYSICAL_MEMORY_DESCRIPTOR_T * mde) 178 { 179 180 } 181 182 void isci_controller_construct(struct ISCI_CONTROLLER *controller, 183 struct isci_softc *isci) 184 { 185 SCI_CONTROLLER_HANDLE_T scif_controller_handle; 186 187 scif_library_allocate_controller(isci->sci_library_handle, 188 &scif_controller_handle); 189 190 scif_controller_construct(isci->sci_library_handle, 191 scif_controller_handle, NULL); 192 193 controller->isci = isci; 194 controller->scif_controller_handle = scif_controller_handle; 195 196 /* This allows us to later use 197 * sci_object_get_association(scif_controller_handle) 198 * inside of a callback routine to get our struct ISCI_CONTROLLER object 199 */ 200 sci_object_set_association(scif_controller_handle, (void *)controller); 201 202 controller->is_started = FALSE; 203 controller->is_frozen = FALSE; 204 controller->sim = NULL; 205 controller->initial_discovery_mask = 0; 206 207 sci_fast_list_init(&controller->pending_device_reset_list); 208 209 mtx_init(&controller->lock, "isci", NULL, MTX_DEF); 210 211 uint32_t domain_index; 212 213 for(domain_index = 0; domain_index < SCI_MAX_DOMAINS; domain_index++) { 214 isci_domain_construct( &controller->domain[domain_index], 215 domain_index, controller); 216 } 217 218 controller->timer_memory = malloc( 219 sizeof(struct ISCI_TIMER) * SCI_MAX_TIMERS, M_ISCI, 220 M_NOWAIT | M_ZERO); 221 222 sci_pool_initialize(controller->timer_pool); 223 224 struct ISCI_TIMER *timer = (struct ISCI_TIMER *) 225 controller->timer_memory; 226 227 for ( int i = 0; i < SCI_MAX_TIMERS; i++ ) { 228 sci_pool_put(controller->timer_pool, timer++); 229 } 230 } 231 232 SCI_STATUS isci_controller_initialize(struct ISCI_CONTROLLER *controller) 233 { 234 SCIC_USER_PARAMETERS_T scic_user_parameters; 235 SCI_CONTROLLER_HANDLE_T scic_controller_handle; 236 unsigned long tunable; 237 int i; 238 239 scic_controller_handle = 240 scif_controller_get_scic_handle(controller->scif_controller_handle); 241 242 if (controller->isci->oem_parameters_found == TRUE) 243 { 244 scic_oem_parameters_set( 245 scic_controller_handle, 246 &controller->oem_parameters, 247 (uint8_t)(controller->oem_parameters_version)); 248 } 249 250 scic_user_parameters_get(scic_controller_handle, &scic_user_parameters); 251 252 if (TUNABLE_ULONG_FETCH("hw.isci.no_outbound_task_timeout", &tunable)) 253 scic_user_parameters.sds1.no_outbound_task_timeout = 254 (uint8_t)tunable; 255 256 if (TUNABLE_ULONG_FETCH("hw.isci.ssp_max_occupancy_timeout", &tunable)) 257 scic_user_parameters.sds1.ssp_max_occupancy_timeout = 258 (uint16_t)tunable; 259 260 if (TUNABLE_ULONG_FETCH("hw.isci.stp_max_occupancy_timeout", &tunable)) 261 scic_user_parameters.sds1.stp_max_occupancy_timeout = 262 (uint16_t)tunable; 263 264 if (TUNABLE_ULONG_FETCH("hw.isci.ssp_inactivity_timeout", &tunable)) 265 scic_user_parameters.sds1.ssp_inactivity_timeout = 266 (uint16_t)tunable; 267 268 if (TUNABLE_ULONG_FETCH("hw.isci.stp_inactivity_timeout", &tunable)) 269 scic_user_parameters.sds1.stp_inactivity_timeout = 270 (uint16_t)tunable; 271 272 if (TUNABLE_ULONG_FETCH("hw.isci.max_speed_generation", &tunable)) 273 for (i = 0; i < SCI_MAX_PHYS; i++) 274 scic_user_parameters.sds1.phys[i].max_speed_generation = 275 (uint8_t)tunable; 276 277 scic_user_parameters_set(scic_controller_handle, &scic_user_parameters); 278 279 /* Scheduler bug in SCU requires SCIL to reserve some task contexts as a 280 * a workaround - one per domain. 281 */ 282 controller->queue_depth = SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS; 283 284 if (TUNABLE_INT_FETCH("hw.isci.controller_queue_depth", 285 &controller->queue_depth)) { 286 controller->queue_depth = max(1, min(controller->queue_depth, 287 SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS)); 288 } 289 290 /* Reserve one request so that we can ensure we have one available TC 291 * to do internal device resets. 292 */ 293 controller->sim_queue_depth = controller->queue_depth - 1; 294 295 /* Although we save one TC to do internal device resets, it is possible 296 * we could end up using several TCs for simultaneous device resets 297 * while at the same time having CAM fill our controller queue. To 298 * simulate this condition, and how our driver handles it, we can set 299 * this io_shortage parameter, which will tell CAM that we have a 300 * large queue depth than we really do. 301 */ 302 uint32_t io_shortage = 0; 303 TUNABLE_INT_FETCH("hw.isci.io_shortage", &io_shortage); 304 controller->sim_queue_depth += io_shortage; 305 306 /* Attach to CAM using xpt_bus_register now, then immediately freeze 307 * the simq. It will get released later when initial domain discovery 308 * is complete. 309 */ 310 controller->has_been_scanned = FALSE; 311 mtx_lock(&controller->lock); 312 isci_controller_attach_to_cam(controller); 313 xpt_freeze_simq(controller->sim, 1); 314 mtx_unlock(&controller->lock); 315 316 return (scif_controller_initialize(controller->scif_controller_handle)); 317 } 318 319 int isci_controller_allocate_memory(struct ISCI_CONTROLLER *controller) 320 { 321 int error; 322 device_t device = controller->isci->device; 323 uint32_t max_segment_size = isci_io_request_get_max_io_size(); 324 uint32_t status = 0; 325 struct ISCI_MEMORY *uncached_controller_memory = 326 &controller->uncached_controller_memory; 327 struct ISCI_MEMORY *cached_controller_memory = 328 &controller->cached_controller_memory; 329 struct ISCI_MEMORY *request_memory = 330 &controller->request_memory; 331 POINTER_UINT virtual_address; 332 bus_addr_t physical_address; 333 334 controller->mdl = sci_controller_get_memory_descriptor_list_handle( 335 controller->scif_controller_handle); 336 337 uncached_controller_memory->size = sci_mdl_decorator_get_memory_size( 338 controller->mdl, SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS); 339 340 error = isci_allocate_dma_buffer(device, uncached_controller_memory); 341 342 if (error != 0) 343 return (error); 344 345 sci_mdl_decorator_assign_memory( controller->mdl, 346 SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS, 347 uncached_controller_memory->virtual_address, 348 uncached_controller_memory->physical_address); 349 350 cached_controller_memory->size = sci_mdl_decorator_get_memory_size( 351 controller->mdl, 352 SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS 353 ); 354 355 error = isci_allocate_dma_buffer(device, cached_controller_memory); 356 357 if (error != 0) 358 return (error); 359 360 sci_mdl_decorator_assign_memory(controller->mdl, 361 SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS, 362 cached_controller_memory->virtual_address, 363 cached_controller_memory->physical_address); 364 365 request_memory->size = 366 controller->queue_depth * isci_io_request_get_object_size(); 367 368 error = isci_allocate_dma_buffer(device, request_memory); 369 370 if (error != 0) 371 return (error); 372 373 /* For STP PIO testing, we want to ensure we can force multiple SGLs 374 * since this has been a problem area in SCIL. This tunable parameter 375 * will allow us to force DMA segments to a smaller size, ensuring 376 * that even if a physically contiguous buffer is attached to this 377 * I/O, the DMA subsystem will pass us multiple segments in our DMA 378 * load callback. 379 */ 380 TUNABLE_INT_FETCH("hw.isci.max_segment_size", &max_segment_size); 381 382 /* Create DMA tag for our I/O requests. Then we can create DMA maps based off 383 * of this tag and store them in each of our ISCI_IO_REQUEST objects. This 384 * will enable better performance than creating the DMA maps everytime we get 385 * an I/O. 386 */ 387 status = bus_dma_tag_create(bus_get_dma_tag(device), 0x1, 0x0, 388 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 389 isci_io_request_get_max_io_size(), 390 SCI_MAX_SCATTER_GATHER_ELEMENTS, max_segment_size, 0, NULL, NULL, 391 &controller->buffer_dma_tag); 392 393 sci_pool_initialize(controller->request_pool); 394 395 virtual_address = request_memory->virtual_address; 396 physical_address = request_memory->physical_address; 397 398 for (int i = 0; i < controller->queue_depth; i++) { 399 struct ISCI_REQUEST *request = 400 (struct ISCI_REQUEST *)virtual_address; 401 402 isci_request_construct(request, 403 controller->scif_controller_handle, 404 controller->buffer_dma_tag, physical_address); 405 406 sci_pool_put(controller->request_pool, request); 407 408 virtual_address += isci_request_get_object_size(); 409 physical_address += isci_request_get_object_size(); 410 } 411 412 uint32_t remote_device_size = sizeof(struct ISCI_REMOTE_DEVICE) + 413 scif_remote_device_get_object_size(); 414 415 controller->remote_device_memory = (uint8_t *) malloc( 416 remote_device_size * SCI_MAX_REMOTE_DEVICES, M_ISCI, 417 M_NOWAIT | M_ZERO); 418 419 sci_pool_initialize(controller->remote_device_pool); 420 421 uint8_t *remote_device_memory_ptr = controller->remote_device_memory; 422 423 for (int i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { 424 struct ISCI_REMOTE_DEVICE *remote_device = 425 (struct ISCI_REMOTE_DEVICE *)remote_device_memory_ptr; 426 427 controller->remote_device[i] = NULL; 428 remote_device->index = i; 429 remote_device->is_resetting = FALSE; 430 remote_device->frozen_lun_mask = 0; 431 sci_fast_list_element_init(remote_device, 432 &remote_device->pending_device_reset_element); 433 434 /* 435 * For the first SCI_MAX_DOMAINS device objects, do not put 436 * them in the pool, rather assign them to each domain. This 437 * ensures that any device attached directly to port "i" will 438 * always get CAM target id "i". 439 */ 440 if (i < SCI_MAX_DOMAINS) 441 controller->domain[i].da_remote_device = remote_device; 442 else 443 sci_pool_put(controller->remote_device_pool, 444 remote_device); 445 remote_device_memory_ptr += remote_device_size; 446 } 447 448 return (0); 449 } 450 451 void isci_controller_start(void *controller_handle) 452 { 453 struct ISCI_CONTROLLER *controller = 454 (struct ISCI_CONTROLLER *)controller_handle; 455 SCI_CONTROLLER_HANDLE_T scif_controller_handle = 456 controller->scif_controller_handle; 457 458 scif_controller_start(scif_controller_handle, 459 scif_controller_get_suggested_start_timeout(scif_controller_handle)); 460 461 scic_controller_enable_interrupts( 462 scif_controller_get_scic_handle(controller->scif_controller_handle)); 463 } 464 465 void isci_controller_domain_discovery_complete( 466 struct ISCI_CONTROLLER *isci_controller, struct ISCI_DOMAIN *isci_domain) 467 { 468 if (!isci_controller->has_been_scanned) 469 { 470 /* Controller has not been scanned yet. We'll clear 471 * the discovery bit for this domain, then check if all bits 472 * are now clear. That would indicate that all domains are 473 * done with discovery and we can then proceed with initial 474 * scan. 475 */ 476 477 isci_controller->initial_discovery_mask &= 478 ~(1 << isci_domain->index); 479 480 if (isci_controller->initial_discovery_mask == 0) { 481 struct isci_softc *driver = isci_controller->isci; 482 uint8_t next_index = isci_controller->index + 1; 483 484 isci_controller->has_been_scanned = TRUE; 485 486 /* Unfreeze simq to allow initial scan to proceed. */ 487 xpt_release_simq(isci_controller->sim, TRUE); 488 489 #if __FreeBSD_version < 800000 490 /* When driver is loaded after boot, we need to 491 * explicitly rescan here for versions <8.0, because 492 * CAM only automatically scans new buses at boot 493 * time. 494 */ 495 union ccb *ccb = xpt_alloc_ccb_nowait(); 496 497 xpt_create_path(&ccb->ccb_h.path, xpt_periph, 498 cam_sim_path(isci_controller->sim), 499 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 500 501 xpt_rescan(ccb); 502 #endif 503 504 if (next_index < driver->controller_count) { 505 /* There are more controllers that need to 506 * start. So start the next one. 507 */ 508 isci_controller_start( 509 &driver->controllers[next_index]); 510 } 511 else 512 { 513 /* All controllers have been started and completed discovery. 514 * Disestablish the config hook while will signal to the 515 * kernel during boot that it is safe to try to find and 516 * mount the root partition. 517 */ 518 config_intrhook_disestablish( 519 &driver->config_hook); 520 } 521 } 522 } 523 } 524 525 int isci_controller_attach_to_cam(struct ISCI_CONTROLLER *controller) 526 { 527 struct isci_softc *isci = controller->isci; 528 device_t parent = device_get_parent(isci->device); 529 int unit = device_get_unit(isci->device); 530 struct cam_devq *isci_devq = cam_simq_alloc(controller->sim_queue_depth); 531 532 if(isci_devq == NULL) { 533 isci_log_message(0, "ISCI", "isci_devq is NULL \n"); 534 return (-1); 535 } 536 537 controller->sim = cam_sim_alloc(isci_action, isci_poll, "isci", 538 controller, unit, &controller->lock, controller->sim_queue_depth, 539 controller->sim_queue_depth, isci_devq); 540 541 if(controller->sim == NULL) { 542 isci_log_message(0, "ISCI", "cam_sim_alloc... fails\n"); 543 cam_simq_free(isci_devq); 544 return (-1); 545 } 546 547 if(xpt_bus_register(controller->sim, parent, controller->index) 548 != CAM_SUCCESS) { 549 isci_log_message(0, "ISCI", "xpt_bus_register...fails \n"); 550 cam_sim_free(controller->sim, TRUE); 551 mtx_unlock(&controller->lock); 552 return (-1); 553 } 554 555 if(xpt_create_path(&controller->path, NULL, 556 cam_sim_path(controller->sim), CAM_TARGET_WILDCARD, 557 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 558 isci_log_message(0, "ISCI", "xpt_create_path....fails\n"); 559 xpt_bus_deregister(cam_sim_path(controller->sim)); 560 cam_sim_free(controller->sim, TRUE); 561 mtx_unlock(&controller->lock); 562 return (-1); 563 } 564 565 return (0); 566 } 567 568 void isci_poll(struct cam_sim *sim) 569 { 570 struct ISCI_CONTROLLER *controller = 571 (struct ISCI_CONTROLLER *)cam_sim_softc(sim); 572 573 isci_interrupt_poll_handler(controller); 574 } 575 576 void isci_action(struct cam_sim *sim, union ccb *ccb) 577 { 578 struct ISCI_CONTROLLER *controller = 579 (struct ISCI_CONTROLLER *)cam_sim_softc(sim); 580 581 switch ( ccb->ccb_h.func_code ) { 582 case XPT_PATH_INQ: 583 { 584 struct ccb_pathinq *cpi = &ccb->cpi; 585 int bus = cam_sim_bus(sim); 586 ccb->ccb_h.ccb_sim_ptr = sim; 587 cpi->version_num = 1; 588 cpi->hba_inquiry = PI_TAG_ABLE; 589 cpi->target_sprt = 0; 590 cpi->hba_misc = PIM_NOBUSRESET | PIM_SEQSCAN; 591 cpi->hba_eng_cnt = 0; 592 cpi->max_target = SCI_MAX_REMOTE_DEVICES - 1; 593 cpi->max_lun = ISCI_MAX_LUN; 594 #if __FreeBSD_version >= 800102 595 cpi->maxio = isci_io_request_get_max_io_size(); 596 #endif 597 cpi->unit_number = cam_sim_unit(sim); 598 cpi->bus_id = bus; 599 cpi->initiator_id = SCI_MAX_REMOTE_DEVICES; 600 cpi->base_transfer_speed = 300000; 601 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 602 strncpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN); 603 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 604 cpi->transport = XPORT_SAS; 605 cpi->transport_version = 0; 606 cpi->protocol = PROTO_SCSI; 607 cpi->protocol_version = SCSI_REV_SPC2; 608 cpi->ccb_h.status = CAM_REQ_CMP; 609 xpt_done(ccb); 610 } 611 break; 612 case XPT_GET_TRAN_SETTINGS: 613 { 614 struct ccb_trans_settings *general_settings = &ccb->cts; 615 struct ccb_trans_settings_sas *sas_settings = 616 &general_settings->xport_specific.sas; 617 struct ccb_trans_settings_scsi *scsi_settings = 618 &general_settings->proto_specific.scsi; 619 struct ISCI_REMOTE_DEVICE *remote_device; 620 621 remote_device = controller->remote_device[ccb->ccb_h.target_id]; 622 623 if (remote_device == NULL) { 624 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 625 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 626 ccb->ccb_h.status |= CAM_DEV_NOT_THERE; 627 xpt_done(ccb); 628 break; 629 } 630 631 general_settings->protocol = PROTO_SCSI; 632 general_settings->transport = XPORT_SAS; 633 general_settings->protocol_version = SCSI_REV_SPC2; 634 general_settings->transport_version = 0; 635 scsi_settings->valid = CTS_SCSI_VALID_TQ; 636 scsi_settings->flags = CTS_SCSI_FLAGS_TAG_ENB; 637 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 638 ccb->ccb_h.status |= CAM_REQ_CMP; 639 640 sas_settings->bitrate = 641 isci_remote_device_get_bitrate(remote_device); 642 643 if (sas_settings->bitrate != 0) 644 sas_settings->valid = CTS_SAS_VALID_SPEED; 645 646 xpt_done(ccb); 647 } 648 break; 649 case XPT_SCSI_IO: 650 isci_io_request_execute_scsi_io(ccb, controller); 651 break; 652 #if __FreeBSD_version >= 900026 653 case XPT_SMP_IO: 654 isci_io_request_execute_smp_io(ccb, controller); 655 break; 656 #endif 657 case XPT_SET_TRAN_SETTINGS: 658 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 659 ccb->ccb_h.status |= CAM_REQ_CMP; 660 xpt_done(ccb); 661 break; 662 case XPT_CALC_GEOMETRY: 663 cam_calc_geometry(&ccb->ccg, /*extended*/1); 664 xpt_done(ccb); 665 break; 666 case XPT_RESET_DEV: 667 { 668 struct ISCI_REMOTE_DEVICE *remote_device = 669 controller->remote_device[ccb->ccb_h.target_id]; 670 671 if (remote_device != NULL) 672 isci_remote_device_reset(remote_device, ccb); 673 else { 674 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 675 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 676 ccb->ccb_h.status |= CAM_DEV_NOT_THERE; 677 xpt_done(ccb); 678 } 679 } 680 break; 681 case XPT_RESET_BUS: 682 ccb->ccb_h.status = CAM_REQ_CMP; 683 xpt_done(ccb); 684 break; 685 default: 686 isci_log_message(0, "ISCI", "Unhandled func_code 0x%x\n", 687 ccb->ccb_h.func_code); 688 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 689 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 690 ccb->ccb_h.status |= CAM_REQ_INVALID; 691 xpt_done(ccb); 692 break; 693 } 694 } 695 696