xref: /freebsd/sys/dev/isci/isci_io_request.c (revision b4e38a41f584ad4391c04b8cfec81f46176b18b0)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * BSD LICENSE
5  *
6  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *   * Redistributions of source code must retain the above copyright
14  *     notice, this list of conditions and the following disclaimer.
15  *   * Redistributions in binary form must reproduce the above copyright
16  *     notice, this list of conditions and the following disclaimer in
17  *     the documentation and/or other materials provided with the
18  *     distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <dev/isci/isci.h>
37 
38 #include <cam/scsi/scsi_all.h>
39 #include <cam/scsi/scsi_message.h>
40 
41 #include <dev/isci/scil/intel_sas.h>
42 
43 #include <dev/isci/scil/sci_util.h>
44 
45 #include <dev/isci/scil/scif_io_request.h>
46 #include <dev/isci/scil/scif_controller.h>
47 #include <dev/isci/scil/scif_remote_device.h>
48 #include <dev/isci/scil/scif_user_callback.h>
49 
50 #include <dev/isci/scil/scic_io_request.h>
51 #include <dev/isci/scil/scic_user_callback.h>
52 
53 /**
54  * @brief This user callback will inform the user that an IO request has
55  *        completed.
56  *
57  * @param[in]  controller This parameter specifies the controller on
58  *             which the IO request is completing.
59  * @param[in]  remote_device This parameter specifies the remote device on
60  *             which this request is completing.
61  * @param[in]  io_request This parameter specifies the IO request that has
62  *             completed.
63  * @param[in]  completion_status This parameter specifies the results of
64  *             the IO request operation.  SCI_IO_SUCCESS indicates
65  *             successful completion.
66  *
67  * @return none
68  */
69 void
70 scif_cb_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller,
71     SCI_REMOTE_DEVICE_HANDLE_T remote_device,
72     SCI_IO_REQUEST_HANDLE_T io_request, SCI_IO_STATUS completion_status)
73 {
74 	struct ISCI_IO_REQUEST *isci_request =
75 	    (struct ISCI_IO_REQUEST *)sci_object_get_association(io_request);
76 
77 	scif_controller_complete_io(scif_controller, remote_device, io_request);
78 	isci_io_request_complete(scif_controller, remote_device, isci_request,
79 	    completion_status);
80 }
81 
82 void
83 isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller,
84     SCI_REMOTE_DEVICE_HANDLE_T remote_device,
85     struct ISCI_IO_REQUEST *isci_request, SCI_IO_STATUS completion_status)
86 {
87 	struct ISCI_CONTROLLER *isci_controller;
88 	struct ISCI_REMOTE_DEVICE *isci_remote_device;
89 	union ccb *ccb;
90 	BOOL complete_ccb;
91 	struct ccb_scsiio *csio;
92 
93 	complete_ccb = TRUE;
94 	isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(scif_controller);
95 	isci_remote_device =
96 		(struct ISCI_REMOTE_DEVICE *) sci_object_get_association(remote_device);
97 
98 	ccb = isci_request->ccb;
99 	csio = &ccb->csio;
100 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
101 
102 	switch (completion_status) {
103 	case SCI_IO_SUCCESS:
104 	case SCI_IO_SUCCESS_COMPLETE_BEFORE_START:
105 		if (ccb->ccb_h.func_code == XPT_SMP_IO) {
106 			void *smp_response =
107 			    scif_io_request_get_response_iu_address(
108 			        isci_request->sci_object);
109 
110 			memcpy(ccb->smpio.smp_response, smp_response,
111 			    ccb->smpio.smp_response_len);
112 		}
113 		ccb->ccb_h.status |= CAM_REQ_CMP;
114 		break;
115 
116 	case SCI_IO_SUCCESS_IO_DONE_EARLY:
117 		ccb->ccb_h.status |= CAM_REQ_CMP;
118 		ccb->csio.resid = ccb->csio.dxfer_len -
119 		    scif_io_request_get_number_of_bytes_transferred(
120 		        isci_request->sci_object);
121 		break;
122 
123 	case SCI_IO_FAILURE_RESPONSE_VALID:
124 	{
125 		SCI_SSP_RESPONSE_IU_T * response_buffer;
126 		uint32_t sense_length;
127 		int error_code, sense_key, asc, ascq;
128 
129 		response_buffer = (SCI_SSP_RESPONSE_IU_T *)
130 		    scif_io_request_get_response_iu_address(
131 		        isci_request->sci_object);
132 
133 		sense_length = sci_ssp_get_sense_data_length(
134 		    response_buffer->sense_data_length);
135 
136 		sense_length = MIN(csio->sense_len, sense_length);
137 
138 		memcpy(&csio->sense_data, response_buffer->data, sense_length);
139 
140 		csio->sense_resid = csio->sense_len - sense_length;
141 		csio->scsi_status = response_buffer->status;
142 		ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
143 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
144 		scsi_extract_sense( &csio->sense_data, &error_code, &sense_key,
145 		    &asc, &ascq );
146 		isci_log_message(1, "ISCI",
147 		    "isci: bus=%x target=%x lun=%x cdb[0]=%x status=%x key=%x asc=%x ascq=%x\n",
148 		    ccb->ccb_h.path_id, ccb->ccb_h.target_id,
149 		    ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio),
150 		    csio->scsi_status, sense_key, asc, ascq);
151 		break;
152 	}
153 
154 	case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
155 		isci_remote_device_reset(isci_remote_device, NULL);
156 		ccb->ccb_h.status |= CAM_REQ_TERMIO;
157 		isci_log_message(0, "ISCI",
158 		    "isci: bus=%x target=%x lun=%x cdb[0]=%x remote device reset required\n",
159 		    ccb->ccb_h.path_id, ccb->ccb_h.target_id,
160 		    ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio));
161 		break;
162 
163 	case SCI_IO_FAILURE_TERMINATED:
164 		ccb->ccb_h.status |= CAM_REQ_TERMIO;
165 		isci_log_message(0, "ISCI",
166 		    "isci: bus=%x target=%x lun=%x cdb[0]=%x terminated\n",
167 		    ccb->ccb_h.path_id, ccb->ccb_h.target_id,
168 		    ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio));
169 		break;
170 
171 	case SCI_IO_FAILURE_INVALID_STATE:
172 	case SCI_IO_FAILURE_INSUFFICIENT_RESOURCES:
173 		complete_ccb = FALSE;
174 		break;
175 
176 	case SCI_IO_FAILURE_INVALID_REMOTE_DEVICE:
177 		ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
178 		break;
179 
180 	case SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE:
181 		{
182 			struct ccb_relsim ccb_relsim;
183 			struct cam_path *path;
184 
185 			xpt_create_path(&path, NULL,
186 			    cam_sim_path(isci_controller->sim),
187 			    isci_remote_device->index, 0);
188 
189 			xpt_setup_ccb(&ccb_relsim.ccb_h, path, 5);
190 			ccb_relsim.ccb_h.func_code = XPT_REL_SIMQ;
191 			ccb_relsim.ccb_h.flags = CAM_DEV_QFREEZE;
192 			ccb_relsim.release_flags = RELSIM_ADJUST_OPENINGS;
193 			ccb_relsim.openings =
194 			    scif_remote_device_get_max_queue_depth(remote_device);
195 			xpt_action((union ccb *)&ccb_relsim);
196 			xpt_free_path(path);
197 			complete_ccb = FALSE;
198 		}
199 		break;
200 
201 	case SCI_IO_FAILURE:
202 	case SCI_IO_FAILURE_REQUIRES_SCSI_ABORT:
203 	case SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL:
204 	case SCI_IO_FAILURE_PROTOCOL_VIOLATION:
205 	case SCI_IO_FAILURE_INVALID_PARAMETER_VALUE:
206 	case SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR:
207 	default:
208 		isci_log_message(1, "ISCI",
209 		    "isci: bus=%x target=%x lun=%x cdb[0]=%x completion status=%x\n",
210 		    ccb->ccb_h.path_id, ccb->ccb_h.target_id,
211 		    ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio),
212 		    completion_status);
213 		ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
214 		break;
215 	}
216 
217 	callout_stop(&isci_request->parent.timer);
218 	bus_dmamap_sync(isci_request->parent.dma_tag,
219 	    isci_request->parent.dma_map,
220 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
221 
222 	bus_dmamap_unload(isci_request->parent.dma_tag,
223 	    isci_request->parent.dma_map);
224 
225 	isci_request->ccb = NULL;
226 
227 	sci_pool_put(isci_controller->request_pool,
228 	    (struct ISCI_REQUEST *)isci_request);
229 
230 	if (complete_ccb) {
231 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
232 			/* ccb will be completed with some type of non-success
233 			 *  status.  So temporarily freeze the queue until the
234 			 *  upper layers can act on the status.  The
235 			 *  CAM_DEV_QFRZN flag will then release the queue
236 			 *  after the status is acted upon.
237 			 */
238 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
239 			xpt_freeze_devq(ccb->ccb_h.path, 1);
240 		}
241 
242 		if (ccb->ccb_h.status & CAM_SIM_QUEUED) {
243 
244 			KASSERT(ccb == isci_remote_device->queued_ccb_in_progress,
245 			    ("multiple internally queued ccbs in flight"));
246 
247 			TAILQ_REMOVE(&isci_remote_device->queued_ccbs,
248 			    &ccb->ccb_h, sim_links.tqe);
249 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
250 
251 			/*
252 			 * This CCB that was in the queue was completed, so
253 			 *  set the in_progress pointer to NULL denoting that
254 			 *  we can retry another CCB from the queue.  We only
255 			 *  allow one CCB at a time from the queue to be
256 			 *  in progress so that we can effectively maintain
257 			 *  ordering.
258 			 */
259 			isci_remote_device->queued_ccb_in_progress = NULL;
260 		}
261 
262 		if (isci_remote_device->frozen_lun_mask != 0) {
263 			isci_remote_device_release_device_queue(isci_remote_device);
264 		}
265 
266 		xpt_done(ccb);
267 
268 		if (isci_controller->is_frozen == TRUE) {
269 			isci_controller->is_frozen = FALSE;
270 			xpt_release_simq(isci_controller->sim, TRUE);
271 		}
272 	} else {
273 		isci_remote_device_freeze_lun_queue(isci_remote_device,
274 		    ccb->ccb_h.target_lun);
275 
276 		if (ccb->ccb_h.status & CAM_SIM_QUEUED) {
277 
278 			KASSERT(ccb == isci_remote_device->queued_ccb_in_progress,
279 			    ("multiple internally queued ccbs in flight"));
280 
281 			/*
282 			 *  Do nothing, CCB is already on the device's queue.
283 			 *   We leave it on the queue, to be retried again
284 			 *   next time a CCB on this device completes, or we
285 			 *   get a ready notification for this device.
286 			 */
287 			isci_log_message(1, "ISCI", "already queued %p %x\n",
288 			    ccb, scsiio_cdb_ptr(csio));
289 
290 			isci_remote_device->queued_ccb_in_progress = NULL;
291 
292 		} else {
293 			isci_log_message(1, "ISCI", "queue %p %x\n", ccb,
294 			    scsiio_cdb_ptr(csio));
295 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
296 
297 			TAILQ_INSERT_TAIL(&isci_remote_device->queued_ccbs,
298 			    &ccb->ccb_h, sim_links.tqe);
299 		}
300 	}
301 }
302 
303 /**
304  * @brief This callback method asks the user to provide the physical
305  *        address for the supplied virtual address when building an
306  *        io request object.
307  *
308  * @param[in] controller This parameter is the core controller object
309  *            handle.
310  * @param[in] io_request This parameter is the io request object handle
311  *            for which the physical address is being requested.
312  * @param[in] virtual_address This parameter is the virtual address which
313  *            is to be returned as a physical address.
314  * @param[out] physical_address The physical address for the supplied virtual
315  *             address.
316  *
317  * @return None.
318  */
319 void
320 scic_cb_io_request_get_physical_address(SCI_CONTROLLER_HANDLE_T	controller,
321     SCI_IO_REQUEST_HANDLE_T io_request, void *virtual_address,
322     SCI_PHYSICAL_ADDRESS *physical_address)
323 {
324 	SCI_IO_REQUEST_HANDLE_T scif_request =
325 	    sci_object_get_association(io_request);
326 	struct ISCI_REQUEST *isci_request =
327 	    sci_object_get_association(scif_request);
328 
329 	if(isci_request != NULL) {
330 		/* isci_request is not NULL, meaning this is a request initiated
331 		 *  by CAM or the isci layer (i.e. device reset for I/O
332 		 *  timeout).  Therefore we can calculate the physical address
333 		 *  based on the address we stored in the struct ISCI_REQUEST
334 		 *  object.
335 		 */
336 		*physical_address = isci_request->physical_address +
337 		    (uintptr_t)virtual_address -
338 		    (uintptr_t)isci_request;
339 	} else {
340 		/* isci_request is NULL, meaning this is a request generated
341 		 *  internally by SCIL (i.e. for SMP requests or NCQ error
342 		 *  recovery).  Therefore we calculate the physical address
343 		 *  based on the controller's uncached controller memory buffer,
344 		 *  since we know that this is what SCIL uses for internal
345 		 *  framework requests.
346 		 */
347 		SCI_CONTROLLER_HANDLE_T scif_controller =
348 		    (SCI_CONTROLLER_HANDLE_T) sci_object_get_association(controller);
349 		struct ISCI_CONTROLLER *isci_controller =
350 		    (struct ISCI_CONTROLLER *)sci_object_get_association(scif_controller);
351 		U64 virt_addr_offset = (uintptr_t)virtual_address -
352 		    (U64)isci_controller->uncached_controller_memory.virtual_address;
353 
354 		*physical_address =
355 		    isci_controller->uncached_controller_memory.physical_address
356 		    + virt_addr_offset;
357 	}
358 }
359 
360 /**
361  * @brief This callback method asks the user to provide the address for
362  *        the command descriptor block (CDB) associated with this IO request.
363  *
364  * @param[in] scif_user_io_request This parameter points to the user's
365  *            IO request object.  It is a cookie that allows the user to
366  *            provide the necessary information for this callback.
367  *
368  * @return This method returns the virtual address of the CDB.
369  */
370 void *
371 scif_cb_io_request_get_cdb_address(void * scif_user_io_request)
372 {
373 	struct ISCI_IO_REQUEST *isci_request =
374 	    (struct ISCI_IO_REQUEST *)scif_user_io_request;
375 
376 	return (scsiio_cdb_ptr(&isci_request->ccb->csio));
377 }
378 
379 /**
380  * @brief This callback method asks the user to provide the length of
381  *        the command descriptor block (CDB) associated with this IO request.
382  *
383  * @param[in] scif_user_io_request This parameter points to the user's
384  *            IO request object.  It is a cookie that allows the user to
385  *            provide the necessary information for this callback.
386  *
387  * @return This method returns the length of the CDB.
388  */
389 uint32_t
390 scif_cb_io_request_get_cdb_length(void * scif_user_io_request)
391 {
392 	struct ISCI_IO_REQUEST *isci_request =
393 	    (struct ISCI_IO_REQUEST *)scif_user_io_request;
394 
395 	return (isci_request->ccb->csio.cdb_len);
396 }
397 
398 /**
399  * @brief This callback method asks the user to provide the Logical Unit (LUN)
400  *        associated with this IO request.
401  *
402  * @note The contents of the value returned from this callback are defined
403  *       by the protocol standard (e.g. T10 SAS specification).  Please
404  *       refer to the transport command information unit description
405  *       in the associated standard.
406  *
407  * @param[in] scif_user_io_request This parameter points to the user's
408  *            IO request object.  It is a cookie that allows the user to
409  *            provide the necessary information for this callback.
410  *
411  * @return This method returns the LUN associated with this request.
412  */
413 uint32_t
414 scif_cb_io_request_get_lun(void * scif_user_io_request)
415 {
416 	struct ISCI_IO_REQUEST *isci_request =
417 	    (struct ISCI_IO_REQUEST *)scif_user_io_request;
418 
419 	return (isci_request->ccb->ccb_h.target_lun);
420 }
421 
422 /**
423  * @brief This callback method asks the user to provide the task attribute
424  *        associated with this IO request.
425  *
426  * @note The contents of the value returned from this callback are defined
427  *       by the protocol standard (e.g. T10 SAS specification).  Please
428  *       refer to the transport command information unit description
429  *       in the associated standard.
430  *
431  * @param[in] scif_user_io_request This parameter points to the user's
432  *            IO request object.  It is a cookie that allows the user to
433  *            provide the necessary information for this callback.
434  *
435  * @return This method returns the task attribute associated with this
436  *         IO request.
437  */
438 uint32_t
439 scif_cb_io_request_get_task_attribute(void * scif_user_io_request)
440 {
441 	struct ISCI_IO_REQUEST *isci_request =
442 	    (struct ISCI_IO_REQUEST *)scif_user_io_request;
443 	uint32_t task_attribute;
444 
445 	if((isci_request->ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0)
446 		switch(isci_request->ccb->csio.tag_action) {
447 		case MSG_HEAD_OF_Q_TAG:
448 			task_attribute = SCI_SAS_HEAD_OF_QUEUE_ATTRIBUTE;
449 			break;
450 
451 		case MSG_ORDERED_Q_TAG:
452 			task_attribute = SCI_SAS_ORDERED_ATTRIBUTE;
453 			break;
454 
455 		case MSG_ACA_TASK:
456 			task_attribute = SCI_SAS_ACA_ATTRIBUTE;
457 			break;
458 
459 		default:
460 			task_attribute = SCI_SAS_SIMPLE_ATTRIBUTE;
461 			break;
462 		}
463 	else
464 		task_attribute = SCI_SAS_SIMPLE_ATTRIBUTE;
465 
466 	return (task_attribute);
467 }
468 
469 /**
470  * @brief This callback method asks the user to provide the command priority
471  *        associated with this IO request.
472  *
473  * @note The contents of the value returned from this callback are defined
474  *       by the protocol standard (e.g. T10 SAS specification).  Please
475  *       refer to the transport command information unit description
476  *       in the associated standard.
477  *
478  * @param[in] scif_user_io_request This parameter points to the user's
479  *            IO request object.  It is a cookie that allows the user to
480  *            provide the necessary information for this callback.
481  *
482  * @return This method returns the command priority associated with this
483  *         IO request.
484  */
485 uint32_t
486 scif_cb_io_request_get_command_priority(void * scif_user_io_request)
487 {
488 	return (0);
489 }
490 
491 /**
492  * @brief This method simply returns the virtual address associated
493  *        with the scsi_io and byte_offset supplied parameters.
494  *
495  * @note This callback is not utilized in the fast path.  The expectation
496  *       is that this method is utilized for items such as SCSI to ATA
497  *       translation for commands like INQUIRY, READ CAPACITY, etc.
498  *
499  * @param[in] scif_user_io_request This parameter points to the user's
500  *            IO request object.  It is a cookie that allows the user to
501  *            provide the necessary information for this callback.
502  * @param[in] byte_offset This parameter specifies the offset into the data
503  *            buffers pointed to by the SGL.  The byte offset starts at 0
504  *            and continues until the last byte pointed to be the last SGL
505  *            element.
506  *
507  * @return A virtual address pointer to the location specified by the
508  *         parameters.
509  */
510 uint8_t *
511 scif_cb_io_request_get_virtual_address_from_sgl(void * scif_user_io_request,
512     uint32_t byte_offset)
513 {
514 	struct ISCI_IO_REQUEST	*isci_request;
515 	union ccb		*ccb;
516 
517 
518 	isci_request = scif_user_io_request;
519 	ccb = isci_request->ccb;
520 
521 	/*
522 	 * This callback is only invoked for SCSI/ATA translation of
523 	 *  PIO commands such as INQUIRY and READ_CAPACITY, to allow
524 	 *  the driver to write the translated data directly into the
525 	 *  data buffer.  It is never invoked for READ/WRITE commands.
526 	 *  The driver currently assumes only READ/WRITE commands will
527 	 *  be unmapped.
528 	 *
529 	 * As a safeguard against future changes to unmapped commands,
530 	 *  add an explicit panic here should the DATA_MASK != VADDR.
531 	 *  Otherwise, we would return some garbage pointer back to the
532 	 *  caller which would result in a panic or more subtle data
533 	 *  corruption later on.
534 	 */
535 	if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
536 		panic("%s: requesting pointer into unmapped ccb", __func__);
537 
538 	return (ccb->csio.data_ptr + byte_offset);
539 }
540 
541 /**
542  * @brief This callback method asks the user to provide the number of
543  *        bytes to be transferred as part of this request.
544  *
545  * @param[in] scif_user_io_request This parameter points to the user's
546  *            IO request object.  It is a cookie that allows the user to
547  *            provide the necessary information for this callback.
548  *
549  * @return This method returns the number of payload data bytes to be
550  *         transferred for this IO request.
551  */
552 uint32_t
553 scif_cb_io_request_get_transfer_length(void * scif_user_io_request)
554 {
555 	struct ISCI_IO_REQUEST *isci_request =
556 	    (struct ISCI_IO_REQUEST *)scif_user_io_request;
557 
558 	return (isci_request->ccb->csio.dxfer_len);
559 
560 }
561 
562 /**
563  * @brief This callback method asks the user to provide the data direction
564  *        for this request.
565  *
566  * @param[in] scif_user_io_request This parameter points to the user's
567  *            IO request object.  It is a cookie that allows the user to
568  *            provide the necessary information for this callback.
569  *
570  * @return This method returns the value of SCI_IO_REQUEST_DATA_OUT,
571  *         SCI_IO_REQUEST_DATA_IN, or SCI_IO_REQUEST_NO_DATA.
572  */
573 SCI_IO_REQUEST_DATA_DIRECTION
574 scif_cb_io_request_get_data_direction(void * scif_user_io_request)
575 {
576 	struct ISCI_IO_REQUEST *isci_request =
577 	    (struct ISCI_IO_REQUEST *)scif_user_io_request;
578 
579 	switch (isci_request->ccb->ccb_h.flags & CAM_DIR_MASK) {
580 	case CAM_DIR_IN:
581 		return (SCI_IO_REQUEST_DATA_IN);
582 	case CAM_DIR_OUT:
583 		return (SCI_IO_REQUEST_DATA_OUT);
584 	default:
585 		return (SCI_IO_REQUEST_NO_DATA);
586 	}
587 }
588 
589 /**
590  * @brief This callback method asks the user to provide the address
591  *        to where the next Scatter-Gather Element is located.
592  *
593  * Details regarding usage:
594  *   - Regarding the first SGE: the user should initialize an index,
595  *     or a pointer, prior to construction of the request that will
596  *     reference the very first scatter-gather element.  This is
597  *     important since this method is called for every scatter-gather
598  *     element, including the first element.
599  *   - Regarding the last SGE: the user should return NULL from this
600  *     method when this method is called and the SGL has exhausted
601  *     all elements.
602  *
603  * @param[in] scif_user_io_request This parameter points to the user's
604  *            IO request object.  It is a cookie that allows the user to
605  *            provide the necessary information for this callback.
606  * @param[in] current_sge_address This parameter specifies the address for
607  *            the current SGE (i.e. the one that has just processed).
608  * @param[out] next_sge An address specifying the location for the next scatter
609  *             gather element to be processed.
610  *
611  * @return None.
612  */
613 void
614 scif_cb_io_request_get_next_sge(void * scif_user_io_request,
615     void * current_sge_address, void ** next_sge)
616 {
617 	struct ISCI_IO_REQUEST *isci_request =
618 	    (struct ISCI_IO_REQUEST *)scif_user_io_request;
619 
620 	if (isci_request->current_sge_index == isci_request->num_segments)
621 		*next_sge = NULL;
622 	else {
623 		bus_dma_segment_t *sge =
624 		    &isci_request->sge[isci_request->current_sge_index];
625 
626 		isci_request->current_sge_index++;
627 		*next_sge = sge;
628 	}
629 }
630 
631 /**
632  * @brief This callback method asks the user to provide the contents of the
633  *        "address" field in the Scatter-Gather Element.
634  *
635  * @param[in] scif_user_io_request This parameter points to the user's
636  *            IO request object.  It is a cookie that allows the user to
637  *            provide the necessary information for this callback.
638  * @param[in] sge_address This parameter specifies the address for the
639  *            SGE from which to retrieve the address field.
640  *
641  * @return A physical address specifying the contents of the SGE's address
642  *         field.
643  */
644 SCI_PHYSICAL_ADDRESS
645 scif_cb_sge_get_address_field(void *scif_user_io_request, void *sge_address)
646 {
647 	bus_dma_segment_t *sge = (bus_dma_segment_t *)sge_address;
648 
649 	return ((SCI_PHYSICAL_ADDRESS)sge->ds_addr);
650 }
651 
652 /**
653  * @brief This callback method asks the user to provide the contents of the
654  *        "length" field in the Scatter-Gather Element.
655  *
656  * @param[in] scif_user_io_request This parameter points to the user's
657  *            IO request object.  It is a cookie that allows the user to
658  *            provide the necessary information for this callback.
659  * @param[in] sge_address This parameter specifies the address for the
660  *            SGE from which to retrieve the address field.
661  *
662  * @return This method returns the length field specified inside the SGE
663  *         referenced by the sge_address parameter.
664  */
665 uint32_t
666 scif_cb_sge_get_length_field(void *scif_user_io_request, void *sge_address)
667 {
668 	bus_dma_segment_t *sge = (bus_dma_segment_t *)sge_address;
669 
670 	return ((uint32_t)sge->ds_len);
671 }
672 
673 void
674 isci_request_construct(struct ISCI_REQUEST *request,
675     SCI_CONTROLLER_HANDLE_T scif_controller_handle,
676     bus_dma_tag_t io_buffer_dma_tag, bus_addr_t physical_address)
677 {
678 
679 	request->controller_handle = scif_controller_handle;
680 	request->dma_tag = io_buffer_dma_tag;
681 	request->physical_address = physical_address;
682 	bus_dmamap_create(request->dma_tag, 0, &request->dma_map);
683 	callout_init(&request->timer, 1);
684 }
685 
686 static void
687 isci_io_request_construct(void *arg, bus_dma_segment_t *seg, int nseg,
688     int error)
689 {
690 	union ccb *ccb;
691 	struct ISCI_IO_REQUEST *io_request = (struct ISCI_IO_REQUEST *)arg;
692 	SCI_REMOTE_DEVICE_HANDLE_T *device = io_request->parent.remote_device_handle;
693 	SCI_STATUS status;
694 
695 	io_request->num_segments = nseg;
696 	io_request->sge = seg;
697 	ccb = io_request->ccb;
698 
699 	if (error != 0) {
700 		ccb->ccb_h.status = CAM_REQ_INVALID;
701 		xpt_done(ccb);
702 		return;
703 	}
704 
705 	status = scif_io_request_construct(
706 	    io_request->parent.controller_handle,
707 	    io_request->parent.remote_device_handle,
708 	    SCI_CONTROLLER_INVALID_IO_TAG, (void *)io_request,
709 	    (void *)((char*)io_request + sizeof(struct ISCI_IO_REQUEST)),
710 	    &io_request->sci_object);
711 
712 	if (status != SCI_SUCCESS) {
713 		isci_io_request_complete(io_request->parent.controller_handle,
714 		    device, io_request, (SCI_IO_STATUS)status);
715 		return;
716 	}
717 
718 	sci_object_set_association(io_request->sci_object, io_request);
719 
720 	bus_dmamap_sync(io_request->parent.dma_tag, io_request->parent.dma_map,
721 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
722 
723 	status = (SCI_STATUS)scif_controller_start_io(
724 	    io_request->parent.controller_handle, device,
725 	    io_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG);
726 
727 	if (status != SCI_SUCCESS) {
728 		isci_io_request_complete(io_request->parent.controller_handle,
729 		    device, io_request, (SCI_IO_STATUS)status);
730 		return;
731 	}
732 
733 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY)
734 		callout_reset_sbt(&io_request->parent.timer,
735 		    SBT_1MS * ccb->ccb_h.timeout, 0, isci_io_request_timeout,
736 		    io_request, 0);
737 }
738 
739 void
740 isci_io_request_execute_scsi_io(union ccb *ccb,
741     struct ISCI_CONTROLLER *controller)
742 {
743 	target_id_t target_id = ccb->ccb_h.target_id;
744 	struct ISCI_REQUEST *request;
745 	struct ISCI_IO_REQUEST *io_request;
746 	struct ISCI_REMOTE_DEVICE *device =
747 	    controller->remote_device[target_id];
748 	int error;
749 
750 	if (device == NULL) {
751 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
752 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
753 		ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
754 		xpt_done(ccb);
755 		return;
756 	}
757 
758 	if (sci_pool_empty(controller->request_pool)) {
759 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
760 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
761 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
762 		xpt_freeze_simq(controller->sim, 1);
763 		controller->is_frozen = TRUE;
764 		xpt_done(ccb);
765 		return;
766 	}
767 
768 	ASSERT(device->is_resetting == FALSE);
769 
770 	sci_pool_get(controller->request_pool, request);
771 	io_request = (struct ISCI_IO_REQUEST *)request;
772 
773 	io_request->ccb = ccb;
774 	io_request->current_sge_index = 0;
775 	io_request->parent.remote_device_handle = device->sci_object;
776 
777 	error = bus_dmamap_load_ccb(io_request->parent.dma_tag,
778 	    io_request->parent.dma_map, ccb,
779 	    isci_io_request_construct, io_request, 0x0);
780 	/* A resource shortage from BUSDMA will be automatically
781 	 * continued at a later point, pushing the CCB processing
782 	 * forward, which will in turn unfreeze the simq.
783 	 */
784 	if (error == EINPROGRESS) {
785 		xpt_freeze_simq(controller->sim, 1);
786 		ccb->ccb_h.flags |= CAM_RELEASE_SIMQ;
787 	}
788 }
789 
790 void
791 isci_io_request_timeout(void *arg)
792 {
793 	struct ISCI_IO_REQUEST *request = (struct ISCI_IO_REQUEST *)arg;
794 	struct ISCI_REMOTE_DEVICE *remote_device = (struct ISCI_REMOTE_DEVICE *)
795 		sci_object_get_association(request->parent.remote_device_handle);
796 	struct ISCI_CONTROLLER *controller = remote_device->domain->controller;
797 
798 	mtx_lock(&controller->lock);
799 	isci_remote_device_reset(remote_device, NULL);
800 	mtx_unlock(&controller->lock);
801 }
802 
803 /**
804  * @brief This callback method gets the size of and pointer to the buffer
805  *         (if any) containing the request buffer for an SMP request.
806  *
807  * @param[in]  core_request This parameter specifies the SCI core's request
808  *             object associated with the SMP request.
809  * @param[out] smp_request_buffer This parameter returns a pointer to the
810  *             payload portion of the SMP request - i.e. everything after
811  *             the SMP request header.
812  *
813  * @return Size of the request buffer in bytes.  This does *not* include
814  *          the size of the SMP request header.
815  */
816 static uint32_t
817 smp_io_request_cb_get_request_buffer(SCI_IO_REQUEST_HANDLE_T core_request,
818     uint8_t ** smp_request_buffer)
819 {
820 	struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)
821 	    sci_object_get_association(sci_object_get_association(core_request));
822 
823 	*smp_request_buffer = isci_request->ccb->smpio.smp_request +
824 	    sizeof(SMP_REQUEST_HEADER_T);
825 
826 	return (isci_request->ccb->smpio.smp_request_len -
827 	    sizeof(SMP_REQUEST_HEADER_T));
828 }
829 
830 /**
831  * @brief This callback method gets the SMP function for an SMP request.
832  *
833  * @param[in]  core_request This parameter specifies the SCI core's request
834  *             object associated with the SMP request.
835  *
836  * @return SMP function for the SMP request.
837  */
838 static uint8_t
839 smp_io_request_cb_get_function(SCI_IO_REQUEST_HANDLE_T core_request)
840 {
841 	struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)
842 	    sci_object_get_association(sci_object_get_association(core_request));
843 	SMP_REQUEST_HEADER_T *header =
844 	    (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request;
845 
846 	return (header->function);
847 }
848 
849 /**
850  * @brief This callback method gets the SMP frame type for an SMP request.
851  *
852  * @param[in]  core_request This parameter specifies the SCI core's request
853  *             object associated with the SMP request.
854  *
855  * @return SMP frame type for the SMP request.
856  */
857 static uint8_t
858 smp_io_request_cb_get_frame_type(SCI_IO_REQUEST_HANDLE_T core_request)
859 {
860 	struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)
861 	    sci_object_get_association(sci_object_get_association(core_request));
862 	SMP_REQUEST_HEADER_T *header =
863 	    (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request;
864 
865 	return (header->smp_frame_type);
866 }
867 
868 /**
869  * @brief This callback method gets the allocated response length for an SMP request.
870  *
871  * @param[in]  core_request This parameter specifies the SCI core's request
872  *             object associated with the SMP request.
873  *
874  * @return Allocated response length for the SMP request.
875  */
876 static uint8_t
877 smp_io_request_cb_get_allocated_response_length(
878     SCI_IO_REQUEST_HANDLE_T core_request)
879 {
880 	struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)
881 	    sci_object_get_association(sci_object_get_association(core_request));
882 	SMP_REQUEST_HEADER_T *header =
883 	    (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request;
884 
885 	return (header->allocated_response_length);
886 }
887 
888 static SCI_STATUS
889 isci_smp_request_construct(struct ISCI_IO_REQUEST *request)
890 {
891 	SCI_STATUS status;
892 	SCIC_SMP_PASSTHRU_REQUEST_CALLBACKS_T callbacks;
893 
894 	status = scif_request_construct(request->parent.controller_handle,
895 	    request->parent.remote_device_handle, SCI_CONTROLLER_INVALID_IO_TAG,
896 	    (void *)request,
897 	    (void *)((char*)request + sizeof(struct ISCI_IO_REQUEST)),
898 	    &request->sci_object);
899 
900 	if (status == SCI_SUCCESS) {
901 		callbacks.scic_cb_smp_passthru_get_request =
902 		    &smp_io_request_cb_get_request_buffer;
903 		callbacks.scic_cb_smp_passthru_get_function =
904 		    &smp_io_request_cb_get_function;
905 		callbacks.scic_cb_smp_passthru_get_frame_type =
906 		    &smp_io_request_cb_get_frame_type;
907 		callbacks.scic_cb_smp_passthru_get_allocated_response_length =
908 		    &smp_io_request_cb_get_allocated_response_length;
909 
910 		/* create the smp passthrough part of the io request */
911 		status = scic_io_request_construct_smp_pass_through(
912 		    scif_io_request_get_scic_handle(request->sci_object),
913 		    &callbacks);
914 	}
915 
916 	return (status);
917 }
918 
919 void
920 isci_io_request_execute_smp_io(union ccb *ccb,
921     struct ISCI_CONTROLLER *controller)
922 {
923 	SCI_STATUS status;
924 	target_id_t target_id = ccb->ccb_h.target_id;
925 	struct ISCI_REQUEST *request;
926 	struct ISCI_IO_REQUEST *io_request;
927 	SCI_REMOTE_DEVICE_HANDLE_T smp_device_handle;
928 	struct ISCI_REMOTE_DEVICE *end_device = controller->remote_device[target_id];
929 
930 	/* SMP commands are sent to an end device, because SMP devices are not
931 	 *  exposed to the kernel.  It is our responsibility to use this method
932 	 *  to get the SMP device that contains the specified end device.  If
933 	 *  the device is direct-attached, the handle will come back NULL, and
934 	 *  we'll just fail the SMP_IO with DEV_NOT_THERE.
935 	 */
936 	scif_remote_device_get_containing_device(end_device->sci_object,
937 	    &smp_device_handle);
938 
939 	if (smp_device_handle == NULL) {
940 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
941 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
942 		ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
943 		xpt_done(ccb);
944 		return;
945 	}
946 
947 	if (sci_pool_empty(controller->request_pool)) {
948 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
949 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
950 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
951 		xpt_freeze_simq(controller->sim, 1);
952 		controller->is_frozen = TRUE;
953 		xpt_done(ccb);
954 		return;
955 	}
956 
957 	ASSERT(device->is_resetting == FALSE);
958 
959 	sci_pool_get(controller->request_pool, request);
960 	io_request = (struct ISCI_IO_REQUEST *)request;
961 
962 	io_request->ccb = ccb;
963 	io_request->parent.remote_device_handle = smp_device_handle;
964 
965 	status = isci_smp_request_construct(io_request);
966 
967 	if (status != SCI_SUCCESS) {
968 		isci_io_request_complete(controller->scif_controller_handle,
969 		    smp_device_handle, io_request, (SCI_IO_STATUS)status);
970 		return;
971 	}
972 
973 	sci_object_set_association(io_request->sci_object, io_request);
974 
975 	status = (SCI_STATUS) scif_controller_start_io(
976 	    controller->scif_controller_handle, smp_device_handle,
977 	    io_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG);
978 
979 	if (status != SCI_SUCCESS) {
980 		isci_io_request_complete(controller->scif_controller_handle,
981 		    smp_device_handle, io_request, (SCI_IO_STATUS)status);
982 		return;
983 	}
984 
985 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY)
986 		callout_reset_sbt(&io_request->parent.timer,
987 		    SBT_1MS *  ccb->ccb_h.timeout, 0, isci_io_request_timeout,
988 		    request, 0);
989 }
990