Lines Matching +full:wait +full:- +full:retry +full:- +full:us
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
6 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
64 * disable interrupts, and wait for current ongoing processing to
99 isci_controller->is_started = TRUE; in scif_cb_controller_start_complete()
101 /* Set bits for all domains. We will clear them one-by-one once in scif_cb_controller_start_complete()
106 isci_controller->initial_discovery_mask = (1 << SCI_MAX_DOMAINS) - 1; in scif_cb_controller_start_complete()
111 isci_controller->domain[index].sci_object; in scif_cb_controller_start_complete()
122 isci_controller, &isci_controller->domain[index]); in scif_cb_controller_start_complete()
148 isci_controller->is_started = FALSE; in scif_cb_controller_stop_complete()
183 if (!sci_pool_empty(isci_controller->unmap_buffer_pool)) { in scif_cb_controller_allocate_memory()
184 sci_pool_get(isci_controller->unmap_buffer_pool, in scif_cb_controller_allocate_memory()
185 mde->virtual_address); in scif_cb_controller_allocate_memory()
187 mde->virtual_address = contigmalloc(PAGE_SIZE, in scif_cb_controller_allocate_memory()
189 mde->constant_memory_alignment, 0); in scif_cb_controller_allocate_memory()
191 if (mde->virtual_address != NULL) in scif_cb_controller_allocate_memory()
192 bus_dmamap_load(isci_controller->buffer_dma_tag, in scif_cb_controller_allocate_memory()
193 NULL, mde->virtual_address, PAGE_SIZE, in scif_cb_controller_allocate_memory()
194 isci_single_map, &mde->physical_address, in scif_cb_controller_allocate_memory()
220 sci_pool_put(isci_controller->unmap_buffer_pool, in scif_cb_controller_free_memory()
221 mde->virtual_address); in scif_cb_controller_free_memory()
229 scif_library_allocate_controller(isci->sci_library_handle, in isci_controller_construct()
232 scif_controller_construct(isci->sci_library_handle, in isci_controller_construct()
235 controller->isci = isci; in isci_controller_construct()
236 controller->scif_controller_handle = scif_controller_handle; in isci_controller_construct()
238 /* This allows us to later use in isci_controller_construct()
244 controller->is_started = FALSE; in isci_controller_construct()
245 controller->is_frozen = FALSE; in isci_controller_construct()
246 controller->release_queued_ccbs = FALSE; in isci_controller_construct()
247 controller->sim = NULL; in isci_controller_construct()
248 controller->initial_discovery_mask = 0; in isci_controller_construct()
250 sci_fast_list_init(&controller->pending_device_reset_list); in isci_controller_construct()
252 mtx_init(&controller->lock, "isci", NULL, MTX_DEF); in isci_controller_construct()
257 isci_domain_construct( &controller->domain[domain_index], in isci_controller_construct()
261 controller->timer_memory = malloc( in isci_controller_construct()
265 sci_pool_initialize(controller->timer_pool); in isci_controller_construct()
268 controller->timer_memory; in isci_controller_construct()
271 sci_pool_put(controller->timer_pool, timer++); in isci_controller_construct()
274 sci_pool_initialize(controller->unmap_buffer_pool); in isci_controller_construct()
282 phy->led_fault = onoff; in isci_led_fault_func()
283 scic_sgpio_update_led_state(phy->handle, 1 << phy->index, in isci_led_fault_func()
284 phy->led_fault, phy->led_locate, 0); in isci_led_fault_func()
292 phy->led_locate = onoff; in isci_led_locate_func()
293 scic_sgpio_update_led_state(phy->handle, 1 << phy->index, in isci_led_locate_func()
294 phy->led_fault, phy->led_locate, 0); in isci_led_locate_func()
308 scif_controller_get_scic_handle(controller->scif_controller_handle); in isci_controller_initialize()
310 if (controller->isci->oem_parameters_found == TRUE) in isci_controller_initialize()
314 &controller->oem_parameters, in isci_controller_initialize()
315 (uint8_t)(controller->oem_parameters_version)); in isci_controller_initialize()
348 * a workaround - one per domain. in isci_controller_initialize()
350 controller->queue_depth = SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS; in isci_controller_initialize()
353 &controller->queue_depth)) { in isci_controller_initialize()
354 controller->queue_depth = max(1, min(controller->queue_depth, in isci_controller_initialize()
355 SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS)); in isci_controller_initialize()
361 controller->sim_queue_depth = controller->queue_depth - 1; in isci_controller_initialize()
372 controller->sim_queue_depth += io_shortage; in isci_controller_initialize()
376 controller->fail_on_task_timeout = fail_on_timeout; in isci_controller_initialize()
382 controller->has_been_scanned = FALSE; in isci_controller_initialize()
383 mtx_lock(&controller->lock); in isci_controller_initialize()
385 xpt_freeze_simq(controller->sim, 1); in isci_controller_initialize()
386 mtx_unlock(&controller->lock); in isci_controller_initialize()
389 controller->phys[i].handle = scic_controller_handle; in isci_controller_initialize()
390 controller->phys[i].index = i; in isci_controller_initialize()
393 controller->phys[i].led_fault = 0; in isci_controller_initialize()
394 sprintf(led_name, "isci.bus%d.port%d.fault", controller->index, i); in isci_controller_initialize()
395 controller->phys[i].cdev_fault = led_create(isci_led_fault_func, in isci_controller_initialize()
396 &controller->phys[i], led_name); in isci_controller_initialize()
399 controller->phys[i].led_locate = 0; in isci_controller_initialize()
400 sprintf(led_name, "isci.bus%d.port%d.locate", controller->index, i); in isci_controller_initialize()
401 controller->phys[i].cdev_locate = led_create(isci_led_locate_func, in isci_controller_initialize()
402 &controller->phys[i], led_name); in isci_controller_initialize()
405 return (scif_controller_initialize(controller->scif_controller_handle)); in isci_controller_initialize()
411 device_t device = controller->isci->device; in isci_controller_allocate_memory()
414 &controller->uncached_controller_memory; in isci_controller_allocate_memory()
416 &controller->cached_controller_memory; in isci_controller_allocate_memory()
418 &controller->request_memory; in isci_controller_allocate_memory()
422 controller->mdl = sci_controller_get_memory_descriptor_list_handle( in isci_controller_allocate_memory()
423 controller->scif_controller_handle); in isci_controller_allocate_memory()
425 uncached_controller_memory->size = sci_mdl_decorator_get_memory_size( in isci_controller_allocate_memory()
426 controller->mdl, SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS); in isci_controller_allocate_memory()
434 sci_mdl_decorator_assign_memory( controller->mdl, in isci_controller_allocate_memory()
436 uncached_controller_memory->virtual_address, in isci_controller_allocate_memory()
437 uncached_controller_memory->physical_address); in isci_controller_allocate_memory()
439 cached_controller_memory->size = sci_mdl_decorator_get_memory_size( in isci_controller_allocate_memory()
440 controller->mdl, in isci_controller_allocate_memory()
450 sci_mdl_decorator_assign_memory(controller->mdl, in isci_controller_allocate_memory()
452 cached_controller_memory->virtual_address, in isci_controller_allocate_memory()
453 cached_controller_memory->physical_address); in isci_controller_allocate_memory()
455 request_memory->size = in isci_controller_allocate_memory()
456 controller->queue_depth * isci_io_request_get_object_size(); in isci_controller_allocate_memory()
465 * will allow us to force DMA segments to a smaller size, ensuring in isci_controller_allocate_memory()
467 * I/O, the DMA subsystem will pass us multiple segments in our DMA in isci_controller_allocate_memory()
481 busdma_lock_mutex, &controller->lock, in isci_controller_allocate_memory()
482 &controller->buffer_dma_tag); in isci_controller_allocate_memory()
487 sci_pool_initialize(controller->request_pool); in isci_controller_allocate_memory()
489 virtual_address = request_memory->virtual_address; in isci_controller_allocate_memory()
490 physical_address = request_memory->physical_address; in isci_controller_allocate_memory()
492 for (int i = 0; i < controller->queue_depth; i++) { in isci_controller_allocate_memory()
497 controller->scif_controller_handle, in isci_controller_allocate_memory()
498 controller->buffer_dma_tag, physical_address); in isci_controller_allocate_memory()
500 sci_pool_put(controller->request_pool, request); in isci_controller_allocate_memory()
509 controller->remote_device_memory = (uint8_t *) malloc( in isci_controller_allocate_memory()
513 sci_pool_initialize(controller->remote_device_pool); in isci_controller_allocate_memory()
515 uint8_t *remote_device_memory_ptr = controller->remote_device_memory; in isci_controller_allocate_memory()
521 controller->remote_device[i] = NULL; in isci_controller_allocate_memory()
522 remote_device->index = i; in isci_controller_allocate_memory()
523 remote_device->is_resetting = FALSE; in isci_controller_allocate_memory()
524 remote_device->frozen_lun_mask = 0; in isci_controller_allocate_memory()
526 &remote_device->pending_device_reset_element); in isci_controller_allocate_memory()
527 TAILQ_INIT(&remote_device->queued_ccbs); in isci_controller_allocate_memory()
528 remote_device->release_queued_ccb = FALSE; in isci_controller_allocate_memory()
529 remote_device->queued_ccb_in_progress = NULL; in isci_controller_allocate_memory()
538 controller->domain[i].da_remote_device = remote_device; in isci_controller_allocate_memory()
540 sci_pool_put(controller->remote_device_pool, in isci_controller_allocate_memory()
553 controller->scif_controller_handle; in isci_controller_start()
559 scif_controller_get_scic_handle(controller->scif_controller_handle)); in isci_controller_start()
565 if (!isci_controller->has_been_scanned) in isci_controller_domain_discovery_complete()
574 isci_controller->initial_discovery_mask &= in isci_controller_domain_discovery_complete()
575 ~(1 << isci_domain->index); in isci_controller_domain_discovery_complete()
577 if (isci_controller->initial_discovery_mask == 0) { in isci_controller_domain_discovery_complete()
578 struct isci_softc *driver = isci_controller->isci; in isci_controller_domain_discovery_complete()
579 uint8_t next_index = isci_controller->index + 1; in isci_controller_domain_discovery_complete()
581 isci_controller->has_been_scanned = TRUE; in isci_controller_domain_discovery_complete()
584 xpt_release_simq(isci_controller->sim, TRUE); in isci_controller_domain_discovery_complete()
586 if (next_index < driver->controller_count) { in isci_controller_domain_discovery_complete()
591 &driver->controllers[next_index]); in isci_controller_domain_discovery_complete()
601 &driver->config_hook); in isci_controller_domain_discovery_complete()
609 struct isci_softc *isci = controller->isci; in isci_controller_attach_to_cam()
610 device_t parent = device_get_parent(isci->device); in isci_controller_attach_to_cam()
611 int unit = device_get_unit(isci->device); in isci_controller_attach_to_cam()
612 struct cam_devq *isci_devq = cam_simq_alloc(controller->sim_queue_depth); in isci_controller_attach_to_cam()
616 return (-1); in isci_controller_attach_to_cam()
619 controller->sim = cam_sim_alloc(isci_action, isci_poll, "isci", in isci_controller_attach_to_cam()
620 controller, unit, &controller->lock, controller->sim_queue_depth, in isci_controller_attach_to_cam()
621 controller->sim_queue_depth, isci_devq); in isci_controller_attach_to_cam()
623 if(controller->sim == NULL) { in isci_controller_attach_to_cam()
626 return (-1); in isci_controller_attach_to_cam()
629 if(xpt_bus_register(controller->sim, parent, controller->index) in isci_controller_attach_to_cam()
632 cam_sim_free(controller->sim, TRUE); in isci_controller_attach_to_cam()
633 mtx_unlock(&controller->lock); in isci_controller_attach_to_cam()
634 return (-1); in isci_controller_attach_to_cam()
637 if(xpt_create_path(&controller->path, NULL, in isci_controller_attach_to_cam()
638 cam_sim_path(controller->sim), CAM_TARGET_WILDCARD, in isci_controller_attach_to_cam()
641 xpt_bus_deregister(cam_sim_path(controller->sim)); in isci_controller_attach_to_cam()
642 cam_sim_free(controller->sim, TRUE); in isci_controller_attach_to_cam()
643 mtx_unlock(&controller->lock); in isci_controller_attach_to_cam()
644 return (-1); in isci_controller_attach_to_cam()
663 switch ( ccb->ccb_h.func_code ) { in isci_action()
666 struct ccb_pathinq *cpi = &ccb->cpi; in isci_action()
668 ccb->ccb_h.ccb_sim_ptr = sim; in isci_action()
669 cpi->version_num = 1; in isci_action()
670 cpi->hba_inquiry = PI_TAG_ABLE; in isci_action()
671 cpi->target_sprt = 0; in isci_action()
672 cpi->hba_misc = PIM_NOBUSRESET | PIM_SEQSCAN | in isci_action()
674 cpi->hba_eng_cnt = 0; in isci_action()
675 cpi->max_target = SCI_MAX_REMOTE_DEVICES - 1; in isci_action()
676 cpi->max_lun = ISCI_MAX_LUN; in isci_action()
677 cpi->maxio = isci_io_request_get_max_io_size(); in isci_action()
678 cpi->unit_number = cam_sim_unit(sim); in isci_action()
679 cpi->bus_id = bus; in isci_action()
680 cpi->initiator_id = SCI_MAX_REMOTE_DEVICES; in isci_action()
681 cpi->base_transfer_speed = 300000; in isci_action()
682 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); in isci_action()
683 strlcpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN); in isci_action()
684 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); in isci_action()
685 cpi->transport = XPORT_SAS; in isci_action()
686 cpi->transport_version = 0; in isci_action()
687 cpi->protocol = PROTO_SCSI; in isci_action()
688 cpi->protocol_version = SCSI_REV_SPC2; in isci_action()
689 cpi->ccb_h.status = CAM_REQ_CMP; in isci_action()
695 struct ccb_trans_settings *general_settings = &ccb->cts; in isci_action()
697 &general_settings->xport_specific.sas; in isci_action()
699 &general_settings->proto_specific.scsi; in isci_action()
702 remote_device = controller->remote_device[ccb->ccb_h.target_id]; in isci_action()
705 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; in isci_action()
706 ccb->ccb_h.status &= ~CAM_STATUS_MASK; in isci_action()
707 ccb->ccb_h.status |= CAM_DEV_NOT_THERE; in isci_action()
712 general_settings->protocol = PROTO_SCSI; in isci_action()
713 general_settings->transport = XPORT_SAS; in isci_action()
714 general_settings->protocol_version = SCSI_REV_SPC2; in isci_action()
715 general_settings->transport_version = 0; in isci_action()
716 scsi_settings->valid = CTS_SCSI_VALID_TQ; in isci_action()
717 scsi_settings->flags = CTS_SCSI_FLAGS_TAG_ENB; in isci_action()
718 ccb->ccb_h.status &= ~CAM_STATUS_MASK; in isci_action()
719 ccb->ccb_h.status |= CAM_REQ_CMP; in isci_action()
721 sas_settings->bitrate = in isci_action()
724 if (sas_settings->bitrate != 0) in isci_action()
725 sas_settings->valid = CTS_SAS_VALID_SPEED; in isci_action()
731 if (ccb->ccb_h.flags & CAM_CDB_PHYS) { in isci_action()
732 ccb->ccb_h.status = CAM_REQ_INVALID; in isci_action()
742 ccb->ccb_h.status &= ~CAM_STATUS_MASK; in isci_action()
743 ccb->ccb_h.status |= CAM_REQ_CMP; in isci_action()
747 cam_calc_geometry(&ccb->ccg, /*extended*/1); in isci_action()
753 controller->remote_device[ccb->ccb_h.target_id]; in isci_action()
758 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; in isci_action()
759 ccb->ccb_h.status &= ~CAM_STATUS_MASK; in isci_action()
760 ccb->ccb_h.status |= CAM_DEV_NOT_THERE; in isci_action()
766 ccb->ccb_h.status = CAM_REQ_CMP; in isci_action()
771 ccb->ccb_h.func_code); in isci_action()
772 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; in isci_action()
773 ccb->ccb_h.status &= ~CAM_STATUS_MASK; in isci_action()
774 ccb->ccb_h.status |= CAM_REQ_INVALID; in isci_action()
781 * Unfortunately, SCIL doesn't cleanly handle retry conditions.
784 * tagged/non-tagged ATA commands, or running out of NCQ slots), we queue
786 * a ready notification, we will retry the first I/O on the queue.
788 * the context of the completion handler, so we need to retry these I/O after
799 KASSERT(mtx_owned(&controller->lock), ("controller lock not owned")); in isci_controller_release_queued_ccbs()
801 controller->release_queued_ccbs = FALSE; in isci_controller_release_queued_ccbs()
806 dev = controller->remote_device[dev_idx]; in isci_controller_release_queued_ccbs()
808 dev->release_queued_ccb == TRUE && in isci_controller_release_queued_ccbs()
809 dev->queued_ccb_in_progress == NULL) { in isci_controller_release_queued_ccbs()
810 dev->release_queued_ccb = FALSE; in isci_controller_release_queued_ccbs()
811 ccb_h = TAILQ_FIRST(&dev->queued_ccbs); in isci_controller_release_queued_ccbs()
816 ptr = scsiio_cdb_ptr(&((union ccb *)ccb_h)->csio); in isci_controller_release_queued_ccbs()
819 dev->queued_ccb_in_progress = (union ccb *)ccb_h; in isci_controller_release_queued_ccbs()