xref: /freebsd/sys/dev/isci/isci_controller.c (revision b7c60aadbbd5c846a250c05791fe7406d6d78bf4)
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  *   * Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *   * Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in
15  *     the documentation and/or other materials provided with the
16  *     distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <dev/isci/isci.h>
35 
36 #include <sys/conf.h>
37 #include <sys/malloc.h>
38 
39 #include <dev/isci/scil/sci_memory_descriptor_list.h>
40 #include <dev/isci/scil/sci_memory_descriptor_list_decorator.h>
41 
42 #include <dev/isci/scil/scif_controller.h>
43 #include <dev/isci/scil/scif_library.h>
44 #include <dev/isci/scil/scif_io_request.h>
45 #include <dev/isci/scil/scif_task_request.h>
46 #include <dev/isci/scil/scif_remote_device.h>
47 #include <dev/isci/scil/scif_domain.h>
48 #include <dev/isci/scil/scif_user_callback.h>
49 
50 void isci_action(struct cam_sim *sim, union ccb *ccb);
51 void isci_poll(struct cam_sim *sim);
52 
53 #define ccb_sim_ptr sim_priv.entries[0].ptr
54 
55 /**
56  * @brief This user callback will inform the user that the controller has
57  *        had a serious unexpected error.  The user should not the error,
58  *        disable interrupts, and wait for current ongoing processing to
59  *        complete.  Subsequently, the user should reset the controller.
60  *
61  * @param[in]  controller This parameter specifies the controller that had
62  *                        an error.
63  *
64  * @return none
65  */
66 void scif_cb_controller_error(SCI_CONTROLLER_HANDLE_T controller,
67     SCI_CONTROLLER_ERROR error)
68 {
69 
70 	isci_log_message(0, "ISCI", "scif_cb_controller_error: 0x%x\n",
71 	    error);
72 }
73 
74 /**
75  * @brief This user callback will inform the user that the controller has
76  *        finished the start process.
77  *
78  * @param[in]  controller This parameter specifies the controller that was
79  *             started.
80  * @param[in]  completion_status This parameter specifies the results of
81  *             the start operation.  SCI_SUCCESS indicates successful
82  *             completion.
83  *
84  * @return none
85  */
86 void scif_cb_controller_start_complete(SCI_CONTROLLER_HANDLE_T controller,
87     SCI_STATUS completion_status)
88 {
89 	uint32_t index;
90 	struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
91 	    sci_object_get_association(controller);
92 
93 	isci_controller->is_started = TRUE;
94 
95 	/* Set bits for all domains.  We will clear them one-by-one once
96 	 *  the domains complete discovery, or return error when calling
97 	 *  scif_domain_discover.  Once all bits are clear, we will register
98 	 *  the controller with CAM.
99 	 */
100 	isci_controller->initial_discovery_mask = (1 << SCI_MAX_DOMAINS) - 1;
101 
102 	for(index = 0; index < SCI_MAX_DOMAINS; index++) {
103 		SCI_STATUS status;
104 		SCI_DOMAIN_HANDLE_T domain =
105 		    isci_controller->domain[index].sci_object;
106 
107 		status = scif_domain_discover(
108 			domain,
109 			scif_domain_get_suggested_discover_timeout(domain),
110 			DEVICE_TIMEOUT
111 		);
112 
113 		if (status != SCI_SUCCESS)
114 		{
115 			isci_controller_domain_discovery_complete(
116 			    isci_controller, &isci_controller->domain[index]);
117 		}
118 	}
119 }
120 
121 /**
122  * @brief This user callback will inform the user that the controller has
123  *        finished the stop process. Note, after user calls
124  *        scif_controller_stop(), before user receives this controller stop
125  *        complete callback, user should not expect any callback from
126  *        framework, such like scif_cb_domain_change_notification().
127  *
128  * @param[in]  controller This parameter specifies the controller that was
129  *             stopped.
130  * @param[in]  completion_status This parameter specifies the results of
131  *             the stop operation.  SCI_SUCCESS indicates successful
132  *             completion.
133  *
134  * @return none
135  */
136 void scif_cb_controller_stop_complete(SCI_CONTROLLER_HANDLE_T controller,
137     SCI_STATUS completion_status)
138 {
139 	struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
140 	    sci_object_get_association(controller);
141 
142 	isci_controller->is_started = FALSE;
143 }
144 
145 /**
146  * @brief This method will be invoked to allocate memory dynamically.
147  *
148  * @param[in]  controller This parameter represents the controller
149  *             object for which to allocate memory.
150  * @param[out] mde This parameter represents the memory descriptor to
151  *             be filled in by the user that will reference the newly
152  *             allocated memory.
153  *
154  * @return none
155  */
156 void scif_cb_controller_allocate_memory(SCI_CONTROLLER_HANDLE_T controller,
157     SCI_PHYSICAL_MEMORY_DESCRIPTOR_T *mde)
158 {
159 
160 }
161 
162 /**
163  * @brief This method will be invoked to allocate memory dynamically.
164  *
165  * @param[in]  controller This parameter represents the controller
166  *             object for which to allocate memory.
167  * @param[out] mde This parameter represents the memory descriptor to
168  *             be filled in by the user that will reference the newly
169  *             allocated memory.
170  *
171  * @return none
172  */
173 void scif_cb_controller_free_memory(SCI_CONTROLLER_HANDLE_T controller,
174     SCI_PHYSICAL_MEMORY_DESCRIPTOR_T * mde)
175 {
176 
177 }
178 
179 void isci_controller_construct(struct ISCI_CONTROLLER *controller,
180     struct isci_softc *isci)
181 {
182 	SCI_CONTROLLER_HANDLE_T scif_controller_handle;
183 
184 	scif_library_allocate_controller(isci->sci_library_handle,
185 	    &scif_controller_handle);
186 
187 	scif_controller_construct(isci->sci_library_handle,
188 	    scif_controller_handle, NULL);
189 
190 	controller->isci = isci;
191 	controller->scif_controller_handle = scif_controller_handle;
192 
193 	/* This allows us to later use
194 	 *  sci_object_get_association(scif_controller_handle)
195 	 * inside of a callback routine to get our struct ISCI_CONTROLLER object
196 	 */
197 	sci_object_set_association(scif_controller_handle, (void *)controller);
198 
199 	controller->is_started = FALSE;
200 	controller->is_frozen = FALSE;
201 	controller->sim = NULL;
202 	controller->initial_discovery_mask = 0;
203 
204 	sci_fast_list_init(&controller->pending_device_reset_list);
205 
206 	mtx_init(&controller->lock, "isci", NULL, MTX_DEF);
207 
208 	uint32_t domain_index;
209 
210 	for(domain_index = 0; domain_index < SCI_MAX_DOMAINS; domain_index++) {
211 		isci_domain_construct( &controller->domain[domain_index],
212 		    domain_index, controller);
213 	}
214 
215 	controller->timer_memory = malloc(
216 	    sizeof(struct ISCI_TIMER) * SCI_MAX_TIMERS, M_ISCI,
217 	    M_NOWAIT | M_ZERO);
218 
219 	sci_pool_initialize(controller->timer_pool);
220 
221 	struct ISCI_TIMER *timer = (struct ISCI_TIMER *)
222 	    controller->timer_memory;
223 
224 	for ( int i = 0; i < SCI_MAX_TIMERS; i++ ) {
225 		sci_pool_put(controller->timer_pool, timer++);
226 	}
227 }
228 
229 SCI_STATUS isci_controller_initialize(struct ISCI_CONTROLLER *controller)
230 {
231 	SCIC_USER_PARAMETERS_T scic_user_parameters;
232 	SCI_CONTROLLER_HANDLE_T scic_controller_handle;
233 	unsigned long tunable;
234 	int i;
235 
236 	scic_controller_handle =
237 	    scif_controller_get_scic_handle(controller->scif_controller_handle);
238 
239 	if (controller->isci->oem_parameters_found == TRUE)
240 	{
241 		scic_oem_parameters_set(
242 		    scic_controller_handle,
243 		    &controller->oem_parameters,
244 		    (uint8_t)(controller->oem_parameters_version));
245 	}
246 
247 	scic_user_parameters_get(scic_controller_handle, &scic_user_parameters);
248 
249 	if (TUNABLE_ULONG_FETCH("hw.isci.no_outbound_task_timeout", &tunable))
250 		scic_user_parameters.sds1.no_outbound_task_timeout =
251 		    (uint8_t)tunable;
252 
253 	if (TUNABLE_ULONG_FETCH("hw.isci.ssp_max_occupancy_timeout", &tunable))
254 		scic_user_parameters.sds1.ssp_max_occupancy_timeout =
255 		    (uint16_t)tunable;
256 
257 	if (TUNABLE_ULONG_FETCH("hw.isci.stp_max_occupancy_timeout", &tunable))
258 		scic_user_parameters.sds1.stp_max_occupancy_timeout =
259 		    (uint16_t)tunable;
260 
261 	if (TUNABLE_ULONG_FETCH("hw.isci.ssp_inactivity_timeout", &tunable))
262 		scic_user_parameters.sds1.ssp_inactivity_timeout =
263 		    (uint16_t)tunable;
264 
265 	if (TUNABLE_ULONG_FETCH("hw.isci.stp_inactivity_timeout", &tunable))
266 		scic_user_parameters.sds1.stp_inactivity_timeout =
267 		    (uint16_t)tunable;
268 
269 	if (TUNABLE_ULONG_FETCH("hw.isci.max_speed_generation", &tunable))
270 		for (i = 0; i < SCI_MAX_PHYS; i++)
271 			scic_user_parameters.sds1.phys[i].max_speed_generation =
272 			    (uint8_t)tunable;
273 
274 	scic_user_parameters_set(scic_controller_handle, &scic_user_parameters);
275 
276 	/* Scheduler bug in SCU requires SCIL to reserve some task contexts as a
277 	 *  a workaround - one per domain.
278 	 */
279 	controller->queue_depth = SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS;
280 
281 	if (TUNABLE_INT_FETCH("hw.isci.controller_queue_depth",
282 	    &controller->queue_depth)) {
283 		controller->queue_depth = max(1, min(controller->queue_depth,
284 		    SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS));
285 	}
286 
287 	/* Reserve one request so that we can ensure we have one available TC
288 	 *  to do internal device resets.
289 	 */
290 	controller->sim_queue_depth = controller->queue_depth - 1;
291 
292 	/* Although we save one TC to do internal device resets, it is possible
293 	 *  we could end up using several TCs for simultaneous device resets
294 	 *  while at the same time having CAM fill our controller queue.  To
295 	 *  simulate this condition, and how our driver handles it, we can set
296 	 *  this io_shortage parameter, which will tell CAM that we have a
297 	 *  large queue depth than we really do.
298 	 */
299 	uint32_t io_shortage = 0;
300 	TUNABLE_INT_FETCH("hw.isci.io_shortage", &io_shortage);
301 	controller->sim_queue_depth += io_shortage;
302 
303 	return (scif_controller_initialize(controller->scif_controller_handle));
304 }
305 
306 int isci_controller_allocate_memory(struct ISCI_CONTROLLER *controller)
307 {
308 	int error;
309 	device_t device =  controller->isci->device;
310 	uint32_t max_segment_size = isci_io_request_get_max_io_size();
311 	uint32_t status = 0;
312 	struct ISCI_MEMORY *uncached_controller_memory =
313 	    &controller->uncached_controller_memory;
314 	struct ISCI_MEMORY *cached_controller_memory =
315 	    &controller->cached_controller_memory;
316 	struct ISCI_MEMORY *request_memory =
317 	    &controller->request_memory;
318 	POINTER_UINT virtual_address;
319 	bus_addr_t physical_address;
320 
321 	controller->mdl = sci_controller_get_memory_descriptor_list_handle(
322 	    controller->scif_controller_handle);
323 
324 	uncached_controller_memory->size = sci_mdl_decorator_get_memory_size(
325 	    controller->mdl, SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS);
326 
327 	error = isci_allocate_dma_buffer(device, uncached_controller_memory);
328 
329 	if (error != 0)
330 	    return (error);
331 
332 	sci_mdl_decorator_assign_memory( controller->mdl,
333 	    SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS,
334 	    uncached_controller_memory->virtual_address,
335 	    uncached_controller_memory->physical_address);
336 
337 	cached_controller_memory->size = sci_mdl_decorator_get_memory_size(
338 	    controller->mdl,
339 	    SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
340 	);
341 
342 	error = isci_allocate_dma_buffer(device, cached_controller_memory);
343 
344 	if (error != 0)
345 	    return (error);
346 
347 	sci_mdl_decorator_assign_memory(controller->mdl,
348 	    SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS,
349 	    cached_controller_memory->virtual_address,
350 	    cached_controller_memory->physical_address);
351 
352 	request_memory->size =
353 	    controller->queue_depth * isci_io_request_get_object_size();
354 
355 	error = isci_allocate_dma_buffer(device, request_memory);
356 
357 	if (error != 0)
358 	    return (error);
359 
360 	/* For STP PIO testing, we want to ensure we can force multiple SGLs
361 	 *  since this has been a problem area in SCIL.  This tunable parameter
362 	 *  will allow us to force DMA segments to a smaller size, ensuring
363 	 *  that even if a physically contiguous buffer is attached to this
364 	 *  I/O, the DMA subsystem will pass us multiple segments in our DMA
365 	 *  load callback.
366 	 */
367 	TUNABLE_INT_FETCH("hw.isci.max_segment_size", &max_segment_size);
368 
369 	/* Create DMA tag for our I/O requests.  Then we can create DMA maps based off
370 	 *  of this tag and store them in each of our ISCI_IO_REQUEST objects.  This
371 	 *  will enable better performance than creating the DMA maps everytime we get
372 	 *  an I/O.
373 	 */
374 	status = bus_dma_tag_create(bus_get_dma_tag(device), 0x1, 0x0,
375 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
376 	    isci_io_request_get_max_io_size(),
377 	    SCI_MAX_SCATTER_GATHER_ELEMENTS, max_segment_size, 0, NULL, NULL,
378 	    &controller->buffer_dma_tag);
379 
380 	sci_pool_initialize(controller->request_pool);
381 
382 	virtual_address = request_memory->virtual_address;
383 	physical_address = request_memory->physical_address;
384 
385 	for (int i = 0; i < controller->queue_depth; i++) {
386 		struct ISCI_REQUEST *request =
387 		    (struct ISCI_REQUEST *)virtual_address;
388 
389 		isci_request_construct(request,
390 		    controller->scif_controller_handle,
391 		    controller->buffer_dma_tag, physical_address);
392 
393 		sci_pool_put(controller->request_pool, request);
394 
395 		virtual_address += isci_request_get_object_size();
396 		physical_address += isci_request_get_object_size();
397 	}
398 
399 	uint32_t remote_device_size = sizeof(struct ISCI_REMOTE_DEVICE) +
400 	    scif_remote_device_get_object_size();
401 
402 	controller->remote_device_memory = (uint8_t *) malloc(
403 	    remote_device_size * SCI_MAX_REMOTE_DEVICES, M_ISCI,
404 	    M_NOWAIT | M_ZERO);
405 
406 	sci_pool_initialize(controller->remote_device_pool);
407 
408 	uint8_t *remote_device_memory_ptr = controller->remote_device_memory;
409 
410 	for (int i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
411 		struct ISCI_REMOTE_DEVICE *remote_device =
412 		    (struct ISCI_REMOTE_DEVICE *)remote_device_memory_ptr;
413 
414 		controller->remote_device[i] = NULL;
415 		remote_device->index = i;
416 		remote_device->is_resetting = FALSE;
417 		remote_device->frozen_lun_mask = 0;
418 		sci_fast_list_element_init(remote_device,
419 		    &remote_device->pending_device_reset_element);
420 		sci_pool_put(controller->remote_device_pool, remote_device);
421 		remote_device_memory_ptr += remote_device_size;
422 	}
423 
424 	return (0);
425 }
426 
427 void isci_controller_start(void *controller_handle)
428 {
429 	struct ISCI_CONTROLLER *controller =
430 	    (struct ISCI_CONTROLLER *)controller_handle;
431 	SCI_CONTROLLER_HANDLE_T scif_controller_handle =
432 	    controller->scif_controller_handle;
433 
434 	scif_controller_start(scif_controller_handle,
435 	    scif_controller_get_suggested_start_timeout(scif_controller_handle));
436 
437 	scic_controller_enable_interrupts(
438 	    scif_controller_get_scic_handle(controller->scif_controller_handle));
439 }
440 
441 void isci_controller_domain_discovery_complete(
442     struct ISCI_CONTROLLER *isci_controller, struct ISCI_DOMAIN *isci_domain)
443 {
444 	if (isci_controller->sim == NULL)
445 	{
446 		/* Controller has not been attached to CAM yet.  We'll clear
447 		 *  the discovery bit for this domain, then check if all bits
448 		 *  are now clear.  That would indicate that all domains are
449 		 *  done with discovery and we can then attach the controller
450 		 *  to CAM.
451 		 */
452 
453 		isci_controller->initial_discovery_mask &=
454 		    ~(1 << isci_domain->index);
455 
456 		if (isci_controller->initial_discovery_mask == 0) {
457 			struct isci_softc *driver = isci_controller->isci;
458 			uint8_t next_index = isci_controller->index + 1;
459 
460 			isci_controller_attach_to_cam(isci_controller);
461 
462 			if (next_index < driver->controller_count) {
463 				/*  There are more controllers that need to
464 				 *   start.  So start the next one.
465 				 */
466 				isci_controller_start(
467 				    &driver->controllers[next_index]);
468 			}
469 			else
470 			{
471 				/* All controllers have been started and completed discovery.
472 				 *  Disestablish the config hook while will signal to the
473 				 *  kernel during boot that it is safe to try to find and
474 				 *  mount the root partition.
475 				 */
476 				config_intrhook_disestablish(
477 				    &driver->config_hook);
478 			}
479 		}
480 	}
481 }
482 
483 int isci_controller_attach_to_cam(struct ISCI_CONTROLLER *controller)
484 {
485 	struct isci_softc *isci = controller->isci;
486 	device_t parent = device_get_parent(isci->device);
487 	int unit = device_get_unit(isci->device);
488 	struct cam_devq *isci_devq = cam_simq_alloc(controller->sim_queue_depth);
489 
490 	if(isci_devq == NULL) {
491 		isci_log_message(0, "ISCI", "isci_devq is NULL \n");
492 		return (-1);
493 	}
494 
495 	controller->sim = cam_sim_alloc(isci_action, isci_poll, "isci",
496 	    controller, unit, &controller->lock, controller->sim_queue_depth,
497 	    controller->sim_queue_depth, isci_devq);
498 
499 	if(controller->sim == NULL) {
500 		isci_log_message(0, "ISCI", "cam_sim_alloc... fails\n");
501 		cam_simq_free(isci_devq);
502 		return (-1);
503 	}
504 
505 	if(xpt_bus_register(controller->sim, parent, controller->index)
506 	    != CAM_SUCCESS) {
507 		isci_log_message(0, "ISCI", "xpt_bus_register...fails \n");
508 		cam_sim_free(controller->sim, TRUE);
509 		mtx_unlock(&controller->lock);
510 		return (-1);
511 	}
512 
513 	if(xpt_create_path(&controller->path, NULL,
514 	    cam_sim_path(controller->sim), CAM_TARGET_WILDCARD,
515 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
516 		isci_log_message(0, "ISCI", "xpt_create_path....fails\n");
517 		xpt_bus_deregister(cam_sim_path(controller->sim));
518 		cam_sim_free(controller->sim, TRUE);
519 		mtx_unlock(&controller->lock);
520 		return (-1);
521 	}
522 
523 	return (0);
524 }
525 
526 void isci_poll(struct cam_sim *sim)
527 {
528 	struct ISCI_CONTROLLER *controller =
529 	    (struct ISCI_CONTROLLER *)cam_sim_softc(sim);
530 
531 	isci_interrupt_poll_handler(controller);
532 }
533 
534 void isci_action(struct cam_sim *sim, union ccb *ccb)
535 {
536 	struct ISCI_CONTROLLER *controller =
537 	    (struct ISCI_CONTROLLER *)cam_sim_softc(sim);
538 
539 	switch ( ccb->ccb_h.func_code ) {
540 	case XPT_PATH_INQ:
541 		{
542 			struct ccb_pathinq *cpi = &ccb->cpi;
543 			int bus = cam_sim_bus(sim);
544 			ccb->ccb_h.ccb_sim_ptr = sim;
545 			cpi->version_num = 1;
546 			cpi->hba_inquiry = PI_TAG_ABLE;
547 			cpi->target_sprt = 0;
548 			cpi->hba_misc = PIM_NOBUSRESET | PIM_SEQSCAN;
549 			cpi->hba_eng_cnt = 0;
550 			cpi->max_target = SCI_MAX_REMOTE_DEVICES - 1;
551 			cpi->max_lun = ISCI_MAX_LUN;
552 #if __FreeBSD_version >= 704100
553 			cpi->maxio = isci_io_request_get_max_io_size();
554 #endif
555 			cpi->unit_number = cam_sim_unit(sim);
556 			cpi->bus_id = bus;
557 			cpi->initiator_id = SCI_MAX_REMOTE_DEVICES;
558 			cpi->base_transfer_speed = 300000;
559 			strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
560 			strncpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN);
561 			strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
562 			cpi->transport = XPORT_SAS;
563 			cpi->transport_version = 0;
564 			cpi->protocol = PROTO_SCSI;
565 			cpi->protocol_version = SCSI_REV_SPC2;
566 			cpi->ccb_h.status = CAM_REQ_CMP;
567 			xpt_done(ccb);
568 		}
569 		break;
570 	case XPT_GET_TRAN_SETTINGS:
571 		{
572 			struct ccb_trans_settings *general_settings = &ccb->cts;
573 			struct ccb_trans_settings_sas *sas_settings =
574 			    &general_settings->xport_specific.sas;
575 			struct ccb_trans_settings_scsi *scsi_settings =
576 			    &general_settings->proto_specific.scsi;
577 			struct ISCI_REMOTE_DEVICE *remote_device;
578 
579 			remote_device = controller->remote_device[ccb->ccb_h.target_id];
580 
581 			if (remote_device == NULL) {
582 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
583 				ccb->ccb_h.status &= ~CAM_STATUS_MASK;
584 				ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
585 				xpt_done(ccb);
586 				break;
587 			}
588 
589 			general_settings->protocol = PROTO_SCSI;
590 			general_settings->transport = XPORT_SAS;
591 			general_settings->protocol_version = SCSI_REV_SPC2;
592 			general_settings->transport_version = 0;
593 			scsi_settings->valid = CTS_SCSI_VALID_TQ;
594 			scsi_settings->flags = CTS_SCSI_FLAGS_TAG_ENB;
595 			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
596 			ccb->ccb_h.status |= CAM_REQ_CMP;
597 
598 			sas_settings->bitrate =
599 			    isci_remote_device_get_bitrate(remote_device);
600 
601 			if (sas_settings->bitrate != 0)
602 				sas_settings->valid = CTS_SAS_VALID_SPEED;
603 
604 			xpt_done(ccb);
605 		}
606 		break;
607 	case XPT_SCSI_IO:
608 		isci_io_request_execute_scsi_io(ccb, controller);
609 		break;
610 #if __FreeBSD_version >= 900026
611 	case XPT_SMP_IO:
612 		isci_io_request_execute_smp_io(ccb, controller);
613 		break;
614 #endif
615 	case XPT_SET_TRAN_SETTINGS:
616 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
617 		ccb->ccb_h.status |= CAM_REQ_CMP;
618 		xpt_done(ccb);
619 		break;
620 	case XPT_CALC_GEOMETRY:
621 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
622 		xpt_done(ccb);
623 		break;
624 	case XPT_RESET_DEV:
625 		{
626 			struct ISCI_REMOTE_DEVICE *remote_device =
627 			    controller->remote_device[ccb->ccb_h.target_id];
628 
629 			if (remote_device != NULL)
630 				isci_remote_device_reset(remote_device, ccb);
631 			else {
632 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
633 				ccb->ccb_h.status &= ~CAM_STATUS_MASK;
634 				ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
635 				xpt_done(ccb);
636 			}
637 		}
638 		break;
639 	case XPT_RESET_BUS:
640 		ccb->ccb_h.status = CAM_REQ_CMP;
641 		xpt_done(ccb);
642 		break;
643 	default:
644 		isci_log_message(0, "ISCI", "Unhandled func_code 0x%x\n",
645 		    ccb->ccb_h.func_code);
646 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
647 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
648 		ccb->ccb_h.status |= CAM_REQ_INVALID;
649 		xpt_done(ccb);
650 		break;
651 	}
652 }
653 
654