xref: /freebsd/sys/dev/isci/isci_io_request.c (revision f6a3b357e9be4c6423c85eff9a847163a0d307c8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * BSD LICENSE
5  *
6  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *   * Redistributions of source code must retain the above copyright
14  *     notice, this list of conditions and the following disclaimer.
15  *   * Redistributions in binary form must reproduce the above copyright
16  *     notice, this list of conditions and the following disclaimer in
17  *     the documentation and/or other materials provided with the
18  *     distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <dev/isci/isci.h>
37 
38 #include <cam/scsi/scsi_all.h>
39 #include <cam/scsi/scsi_message.h>
40 
41 #include <dev/isci/scil/intel_sas.h>
42 
43 #include <dev/isci/scil/sci_util.h>
44 
45 #include <dev/isci/scil/scif_io_request.h>
46 #include <dev/isci/scil/scif_controller.h>
47 #include <dev/isci/scil/scif_remote_device.h>
48 #include <dev/isci/scil/scif_user_callback.h>
49 
50 #include <dev/isci/scil/scic_io_request.h>
51 #include <dev/isci/scil/scic_user_callback.h>
52 
53 /**
54  * @brief This user callback will inform the user that an IO request has
55  *        completed.
56  *
57  * @param[in]  controller This parameter specifies the controller on
58  *             which the IO request is completing.
59  * @param[in]  remote_device This parameter specifies the remote device on
60  *             which this request is completing.
61  * @param[in]  io_request This parameter specifies the IO request that has
62  *             completed.
63  * @param[in]  completion_status This parameter specifies the results of
64  *             the IO request operation.  SCI_IO_SUCCESS indicates
65  *             successful completion.
66  *
67  * @return none
68  */
69 void
70 scif_cb_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller,
71     SCI_REMOTE_DEVICE_HANDLE_T remote_device,
72     SCI_IO_REQUEST_HANDLE_T io_request, SCI_IO_STATUS completion_status)
73 {
74 	struct ISCI_IO_REQUEST *isci_request =
75 	    (struct ISCI_IO_REQUEST *)sci_object_get_association(io_request);
76 
77 	scif_controller_complete_io(scif_controller, remote_device, io_request);
78 	isci_io_request_complete(scif_controller, remote_device, isci_request,
79 	    completion_status);
80 }
81 
82 void
83 isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller,
84     SCI_REMOTE_DEVICE_HANDLE_T remote_device,
85     struct ISCI_IO_REQUEST *isci_request, SCI_IO_STATUS completion_status)
86 {
87 	struct ISCI_CONTROLLER *isci_controller;
88 	struct ISCI_REMOTE_DEVICE *isci_remote_device;
89 	union ccb *ccb;
90 	BOOL complete_ccb;
91 	struct ccb_scsiio *csio;
92 
93 	complete_ccb = TRUE;
94 	isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(scif_controller);
95 	isci_remote_device =
96 		(struct ISCI_REMOTE_DEVICE *) sci_object_get_association(remote_device);
97 
98 	ccb = isci_request->ccb;
99 	csio = &ccb->csio;
100 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
101 
102 	switch (completion_status) {
103 	case SCI_IO_SUCCESS:
104 	case SCI_IO_SUCCESS_COMPLETE_BEFORE_START:
105 #if __FreeBSD_version >= 900026
106 		if (ccb->ccb_h.func_code == XPT_SMP_IO) {
107 			void *smp_response =
108 			    scif_io_request_get_response_iu_address(
109 			        isci_request->sci_object);
110 
111 			memcpy(ccb->smpio.smp_response, smp_response,
112 			    ccb->smpio.smp_response_len);
113 		}
114 #endif
115 		ccb->ccb_h.status |= CAM_REQ_CMP;
116 		break;
117 
118 	case SCI_IO_SUCCESS_IO_DONE_EARLY:
119 		ccb->ccb_h.status |= CAM_REQ_CMP;
120 		ccb->csio.resid = ccb->csio.dxfer_len -
121 		    scif_io_request_get_number_of_bytes_transferred(
122 		        isci_request->sci_object);
123 		break;
124 
125 	case SCI_IO_FAILURE_RESPONSE_VALID:
126 	{
127 		SCI_SSP_RESPONSE_IU_T * response_buffer;
128 		uint32_t sense_length;
129 		int error_code, sense_key, asc, ascq;
130 
131 		response_buffer = (SCI_SSP_RESPONSE_IU_T *)
132 		    scif_io_request_get_response_iu_address(
133 		        isci_request->sci_object);
134 
135 		sense_length = sci_ssp_get_sense_data_length(
136 		    response_buffer->sense_data_length);
137 
138 		sense_length = MIN(csio->sense_len, sense_length);
139 
140 		memcpy(&csio->sense_data, response_buffer->data, sense_length);
141 
142 		csio->sense_resid = csio->sense_len - sense_length;
143 		csio->scsi_status = response_buffer->status;
144 		ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
145 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
146 		scsi_extract_sense( &csio->sense_data, &error_code, &sense_key,
147 		    &asc, &ascq );
148 		isci_log_message(1, "ISCI",
149 		    "isci: bus=%x target=%x lun=%x cdb[0]=%x status=%x key=%x asc=%x ascq=%x\n",
150 		    ccb->ccb_h.path_id, ccb->ccb_h.target_id,
151 		    ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio),
152 		    csio->scsi_status, sense_key, asc, ascq);
153 		break;
154 	}
155 
156 	case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
157 		isci_remote_device_reset(isci_remote_device, NULL);
158 		ccb->ccb_h.status |= CAM_REQ_TERMIO;
159 		isci_log_message(0, "ISCI",
160 		    "isci: bus=%x target=%x lun=%x cdb[0]=%x remote device reset required\n",
161 		    ccb->ccb_h.path_id, ccb->ccb_h.target_id,
162 		    ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio));
163 		break;
164 
165 	case SCI_IO_FAILURE_TERMINATED:
166 		ccb->ccb_h.status |= CAM_REQ_TERMIO;
167 		isci_log_message(0, "ISCI",
168 		    "isci: bus=%x target=%x lun=%x cdb[0]=%x terminated\n",
169 		    ccb->ccb_h.path_id, ccb->ccb_h.target_id,
170 		    ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio));
171 		break;
172 
173 	case SCI_IO_FAILURE_INVALID_STATE:
174 	case SCI_IO_FAILURE_INSUFFICIENT_RESOURCES:
175 		complete_ccb = FALSE;
176 		break;
177 
178 	case SCI_IO_FAILURE_INVALID_REMOTE_DEVICE:
179 		ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
180 		break;
181 
182 	case SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE:
183 		{
184 			struct ccb_relsim ccb_relsim;
185 			struct cam_path *path;
186 
187 			xpt_create_path(&path, NULL,
188 			    cam_sim_path(isci_controller->sim),
189 			    isci_remote_device->index, 0);
190 
191 			xpt_setup_ccb(&ccb_relsim.ccb_h, path, 5);
192 			ccb_relsim.ccb_h.func_code = XPT_REL_SIMQ;
193 			ccb_relsim.ccb_h.flags = CAM_DEV_QFREEZE;
194 			ccb_relsim.release_flags = RELSIM_ADJUST_OPENINGS;
195 			ccb_relsim.openings =
196 			    scif_remote_device_get_max_queue_depth(remote_device);
197 			xpt_action((union ccb *)&ccb_relsim);
198 			xpt_free_path(path);
199 			complete_ccb = FALSE;
200 		}
201 		break;
202 
203 	case SCI_IO_FAILURE:
204 	case SCI_IO_FAILURE_REQUIRES_SCSI_ABORT:
205 	case SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL:
206 	case SCI_IO_FAILURE_PROTOCOL_VIOLATION:
207 	case SCI_IO_FAILURE_INVALID_PARAMETER_VALUE:
208 	case SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR:
209 	default:
210 		isci_log_message(1, "ISCI",
211 		    "isci: bus=%x target=%x lun=%x cdb[0]=%x completion status=%x\n",
212 		    ccb->ccb_h.path_id, ccb->ccb_h.target_id,
213 		    ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio),
214 		    completion_status);
215 		ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
216 		break;
217 	}
218 
219 	callout_stop(&isci_request->parent.timer);
220 	bus_dmamap_sync(isci_request->parent.dma_tag,
221 	    isci_request->parent.dma_map,
222 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
223 
224 	bus_dmamap_unload(isci_request->parent.dma_tag,
225 	    isci_request->parent.dma_map);
226 
227 	isci_request->ccb = NULL;
228 
229 	sci_pool_put(isci_controller->request_pool,
230 	    (struct ISCI_REQUEST *)isci_request);
231 
232 	if (complete_ccb) {
233 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
234 			/* ccb will be completed with some type of non-success
235 			 *  status.  So temporarily freeze the queue until the
236 			 *  upper layers can act on the status.  The
237 			 *  CAM_DEV_QFRZN flag will then release the queue
238 			 *  after the status is acted upon.
239 			 */
240 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
241 			xpt_freeze_devq(ccb->ccb_h.path, 1);
242 		}
243 
244 		if (ccb->ccb_h.status & CAM_SIM_QUEUED) {
245 
246 			KASSERT(ccb == isci_remote_device->queued_ccb_in_progress,
247 			    ("multiple internally queued ccbs in flight"));
248 
249 			TAILQ_REMOVE(&isci_remote_device->queued_ccbs,
250 			    &ccb->ccb_h, sim_links.tqe);
251 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
252 
253 			/*
254 			 * This CCB that was in the queue was completed, so
255 			 *  set the in_progress pointer to NULL denoting that
256 			 *  we can retry another CCB from the queue.  We only
257 			 *  allow one CCB at a time from the queue to be
258 			 *  in progress so that we can effectively maintain
259 			 *  ordering.
260 			 */
261 			isci_remote_device->queued_ccb_in_progress = NULL;
262 		}
263 
264 		if (isci_remote_device->frozen_lun_mask != 0) {
265 			isci_remote_device_release_device_queue(isci_remote_device);
266 		}
267 
268 		xpt_done(ccb);
269 
270 		if (isci_controller->is_frozen == TRUE) {
271 			isci_controller->is_frozen = FALSE;
272 			xpt_release_simq(isci_controller->sim, TRUE);
273 		}
274 	} else {
275 		isci_remote_device_freeze_lun_queue(isci_remote_device,
276 		    ccb->ccb_h.target_lun);
277 
278 		if (ccb->ccb_h.status & CAM_SIM_QUEUED) {
279 
280 			KASSERT(ccb == isci_remote_device->queued_ccb_in_progress,
281 			    ("multiple internally queued ccbs in flight"));
282 
283 			/*
284 			 *  Do nothing, CCB is already on the device's queue.
285 			 *   We leave it on the queue, to be retried again
286 			 *   next time a CCB on this device completes, or we
287 			 *   get a ready notification for this device.
288 			 */
289 			isci_log_message(1, "ISCI", "already queued %p %x\n",
290 			    ccb, scsiio_cdb_ptr(csio));
291 
292 			isci_remote_device->queued_ccb_in_progress = NULL;
293 
294 		} else {
295 			isci_log_message(1, "ISCI", "queue %p %x\n", ccb,
296 			    scsiio_cdb_ptr(csio));
297 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
298 
299 			TAILQ_INSERT_TAIL(&isci_remote_device->queued_ccbs,
300 			    &ccb->ccb_h, sim_links.tqe);
301 		}
302 	}
303 }
304 
305 /**
306  * @brief This callback method asks the user to provide the physical
307  *        address for the supplied virtual address when building an
308  *        io request object.
309  *
310  * @param[in] controller This parameter is the core controller object
311  *            handle.
312  * @param[in] io_request This parameter is the io request object handle
313  *            for which the physical address is being requested.
314  * @param[in] virtual_address This parameter is the virtual address which
315  *            is to be returned as a physical address.
316  * @param[out] physical_address The physical address for the supplied virtual
317  *             address.
318  *
319  * @return None.
320  */
321 void
322 scic_cb_io_request_get_physical_address(SCI_CONTROLLER_HANDLE_T	controller,
323     SCI_IO_REQUEST_HANDLE_T io_request, void *virtual_address,
324     SCI_PHYSICAL_ADDRESS *physical_address)
325 {
326 	SCI_IO_REQUEST_HANDLE_T scif_request =
327 	    sci_object_get_association(io_request);
328 	struct ISCI_REQUEST *isci_request =
329 	    sci_object_get_association(scif_request);
330 
331 	if(isci_request != NULL) {
332 		/* isci_request is not NULL, meaning this is a request initiated
333 		 *  by CAM or the isci layer (i.e. device reset for I/O
334 		 *  timeout).  Therefore we can calculate the physical address
335 		 *  based on the address we stored in the struct ISCI_REQUEST
336 		 *  object.
337 		 */
338 		*physical_address = isci_request->physical_address +
339 		    (uintptr_t)virtual_address -
340 		    (uintptr_t)isci_request;
341 	} else {
342 		/* isci_request is NULL, meaning this is a request generated
343 		 *  internally by SCIL (i.e. for SMP requests or NCQ error
344 		 *  recovery).  Therefore we calculate the physical address
345 		 *  based on the controller's uncached controller memory buffer,
346 		 *  since we know that this is what SCIL uses for internal
347 		 *  framework requests.
348 		 */
349 		SCI_CONTROLLER_HANDLE_T scif_controller =
350 		    (SCI_CONTROLLER_HANDLE_T) sci_object_get_association(controller);
351 		struct ISCI_CONTROLLER *isci_controller =
352 		    (struct ISCI_CONTROLLER *)sci_object_get_association(scif_controller);
353 		U64 virt_addr_offset = (uintptr_t)virtual_address -
354 		    (U64)isci_controller->uncached_controller_memory.virtual_address;
355 
356 		*physical_address =
357 		    isci_controller->uncached_controller_memory.physical_address
358 		    + virt_addr_offset;
359 	}
360 }
361 
362 /**
363  * @brief This callback method asks the user to provide the address for
364  *        the command descriptor block (CDB) associated with this IO request.
365  *
366  * @param[in] scif_user_io_request This parameter points to the user's
367  *            IO request object.  It is a cookie that allows the user to
368  *            provide the necessary information for this callback.
369  *
370  * @return This method returns the virtual address of the CDB.
371  */
372 void *
373 scif_cb_io_request_get_cdb_address(void * scif_user_io_request)
374 {
375 	struct ISCI_IO_REQUEST *isci_request =
376 	    (struct ISCI_IO_REQUEST *)scif_user_io_request;
377 
378 	return (scsiio_cdb_ptr(&isci_request->ccb->csio));
379 }
380 
381 /**
382  * @brief This callback method asks the user to provide the length of
383  *        the command descriptor block (CDB) associated with this IO request.
384  *
385  * @param[in] scif_user_io_request This parameter points to the user's
386  *            IO request object.  It is a cookie that allows the user to
387  *            provide the necessary information for this callback.
388  *
389  * @return This method returns the length of the CDB.
390  */
391 uint32_t
392 scif_cb_io_request_get_cdb_length(void * scif_user_io_request)
393 {
394 	struct ISCI_IO_REQUEST *isci_request =
395 	    (struct ISCI_IO_REQUEST *)scif_user_io_request;
396 
397 	return (isci_request->ccb->csio.cdb_len);
398 }
399 
400 /**
401  * @brief This callback method asks the user to provide the Logical Unit (LUN)
402  *        associated with this IO request.
403  *
404  * @note The contents of the value returned from this callback are defined
405  *       by the protocol standard (e.g. T10 SAS specification).  Please
406  *       refer to the transport command information unit description
407  *       in the associated standard.
408  *
409  * @param[in] scif_user_io_request This parameter points to the user's
410  *            IO request object.  It is a cookie that allows the user to
411  *            provide the necessary information for this callback.
412  *
413  * @return This method returns the LUN associated with this request.
414  */
415 uint32_t
416 scif_cb_io_request_get_lun(void * scif_user_io_request)
417 {
418 	struct ISCI_IO_REQUEST *isci_request =
419 	    (struct ISCI_IO_REQUEST *)scif_user_io_request;
420 
421 	return (isci_request->ccb->ccb_h.target_lun);
422 }
423 
424 /**
425  * @brief This callback method asks the user to provide the task attribute
426  *        associated with this IO request.
427  *
428  * @note The contents of the value returned from this callback are defined
429  *       by the protocol standard (e.g. T10 SAS specification).  Please
430  *       refer to the transport command information unit description
431  *       in the associated standard.
432  *
433  * @param[in] scif_user_io_request This parameter points to the user's
434  *            IO request object.  It is a cookie that allows the user to
435  *            provide the necessary information for this callback.
436  *
437  * @return This method returns the task attribute associated with this
438  *         IO request.
439  */
440 uint32_t
441 scif_cb_io_request_get_task_attribute(void * scif_user_io_request)
442 {
443 	struct ISCI_IO_REQUEST *isci_request =
444 	    (struct ISCI_IO_REQUEST *)scif_user_io_request;
445 	uint32_t task_attribute;
446 
447 	if((isci_request->ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0)
448 		switch(isci_request->ccb->csio.tag_action) {
449 		case MSG_HEAD_OF_Q_TAG:
450 			task_attribute = SCI_SAS_HEAD_OF_QUEUE_ATTRIBUTE;
451 			break;
452 
453 		case MSG_ORDERED_Q_TAG:
454 			task_attribute = SCI_SAS_ORDERED_ATTRIBUTE;
455 			break;
456 
457 		case MSG_ACA_TASK:
458 			task_attribute = SCI_SAS_ACA_ATTRIBUTE;
459 			break;
460 
461 		default:
462 			task_attribute = SCI_SAS_SIMPLE_ATTRIBUTE;
463 			break;
464 		}
465 	else
466 		task_attribute = SCI_SAS_SIMPLE_ATTRIBUTE;
467 
468 	return (task_attribute);
469 }
470 
471 /**
472  * @brief This callback method asks the user to provide the command priority
473  *        associated with this IO request.
474  *
475  * @note The contents of the value returned from this callback are defined
476  *       by the protocol standard (e.g. T10 SAS specification).  Please
477  *       refer to the transport command information unit description
478  *       in the associated standard.
479  *
480  * @param[in] scif_user_io_request This parameter points to the user's
481  *            IO request object.  It is a cookie that allows the user to
482  *            provide the necessary information for this callback.
483  *
484  * @return This method returns the command priority associated with this
485  *         IO request.
486  */
487 uint32_t
488 scif_cb_io_request_get_command_priority(void * scif_user_io_request)
489 {
490 	return (0);
491 }
492 
493 /**
494  * @brief This method simply returns the virtual address associated
495  *        with the scsi_io and byte_offset supplied parameters.
496  *
497  * @note This callback is not utilized in the fast path.  The expectation
498  *       is that this method is utilized for items such as SCSI to ATA
499  *       translation for commands like INQUIRY, READ CAPACITY, etc.
500  *
501  * @param[in] scif_user_io_request This parameter points to the user's
502  *            IO request object.  It is a cookie that allows the user to
503  *            provide the necessary information for this callback.
504  * @param[in] byte_offset This parameter specifies the offset into the data
505  *            buffers pointed to by the SGL.  The byte offset starts at 0
506  *            and continues until the last byte pointed to be the last SGL
507  *            element.
508  *
509  * @return A virtual address pointer to the location specified by the
510  *         parameters.
511  */
512 uint8_t *
513 scif_cb_io_request_get_virtual_address_from_sgl(void * scif_user_io_request,
514     uint32_t byte_offset)
515 {
516 	struct ISCI_IO_REQUEST	*isci_request;
517 	union ccb		*ccb;
518 
519 
520 	isci_request = scif_user_io_request;
521 	ccb = isci_request->ccb;
522 
523 	/*
524 	 * This callback is only invoked for SCSI/ATA translation of
525 	 *  PIO commands such as INQUIRY and READ_CAPACITY, to allow
526 	 *  the driver to write the translated data directly into the
527 	 *  data buffer.  It is never invoked for READ/WRITE commands.
528 	 *  The driver currently assumes only READ/WRITE commands will
529 	 *  be unmapped.
530 	 *
531 	 * As a safeguard against future changes to unmapped commands,
532 	 *  add an explicit panic here should the DATA_MASK != VADDR.
533 	 *  Otherwise, we would return some garbage pointer back to the
534 	 *  caller which would result in a panic or more subtle data
535 	 *  corruption later on.
536 	 */
537 	if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
538 		panic("%s: requesting pointer into unmapped ccb", __func__);
539 
540 	return (ccb->csio.data_ptr + byte_offset);
541 }
542 
543 /**
544  * @brief This callback method asks the user to provide the number of
545  *        bytes to be transferred as part of this request.
546  *
547  * @param[in] scif_user_io_request This parameter points to the user's
548  *            IO request object.  It is a cookie that allows the user to
549  *            provide the necessary information for this callback.
550  *
551  * @return This method returns the number of payload data bytes to be
552  *         transferred for this IO request.
553  */
554 uint32_t
555 scif_cb_io_request_get_transfer_length(void * scif_user_io_request)
556 {
557 	struct ISCI_IO_REQUEST *isci_request =
558 	    (struct ISCI_IO_REQUEST *)scif_user_io_request;
559 
560 	return (isci_request->ccb->csio.dxfer_len);
561 
562 }
563 
564 /**
565  * @brief This callback method asks the user to provide the data direction
566  *        for this request.
567  *
568  * @param[in] scif_user_io_request This parameter points to the user's
569  *            IO request object.  It is a cookie that allows the user to
570  *            provide the necessary information for this callback.
571  *
572  * @return This method returns the value of SCI_IO_REQUEST_DATA_OUT,
573  *         SCI_IO_REQUEST_DATA_IN, or SCI_IO_REQUEST_NO_DATA.
574  */
575 SCI_IO_REQUEST_DATA_DIRECTION
576 scif_cb_io_request_get_data_direction(void * scif_user_io_request)
577 {
578 	struct ISCI_IO_REQUEST *isci_request =
579 	    (struct ISCI_IO_REQUEST *)scif_user_io_request;
580 
581 	switch (isci_request->ccb->ccb_h.flags & CAM_DIR_MASK) {
582 	case CAM_DIR_IN:
583 		return (SCI_IO_REQUEST_DATA_IN);
584 	case CAM_DIR_OUT:
585 		return (SCI_IO_REQUEST_DATA_OUT);
586 	default:
587 		return (SCI_IO_REQUEST_NO_DATA);
588 	}
589 }
590 
591 /**
592  * @brief This callback method asks the user to provide the address
593  *        to where the next Scatter-Gather Element is located.
594  *
595  * Details regarding usage:
596  *   - Regarding the first SGE: the user should initialize an index,
597  *     or a pointer, prior to construction of the request that will
598  *     reference the very first scatter-gather element.  This is
599  *     important since this method is called for every scatter-gather
600  *     element, including the first element.
601  *   - Regarding the last SGE: the user should return NULL from this
602  *     method when this method is called and the SGL has exhausted
603  *     all elements.
604  *
605  * @param[in] scif_user_io_request This parameter points to the user's
606  *            IO request object.  It is a cookie that allows the user to
607  *            provide the necessary information for this callback.
608  * @param[in] current_sge_address This parameter specifies the address for
609  *            the current SGE (i.e. the one that has just processed).
610  * @param[out] next_sge An address specifying the location for the next scatter
611  *             gather element to be processed.
612  *
613  * @return None.
614  */
615 void
616 scif_cb_io_request_get_next_sge(void * scif_user_io_request,
617     void * current_sge_address, void ** next_sge)
618 {
619 	struct ISCI_IO_REQUEST *isci_request =
620 	    (struct ISCI_IO_REQUEST *)scif_user_io_request;
621 
622 	if (isci_request->current_sge_index == isci_request->num_segments)
623 		*next_sge = NULL;
624 	else {
625 		bus_dma_segment_t *sge =
626 		    &isci_request->sge[isci_request->current_sge_index];
627 
628 		isci_request->current_sge_index++;
629 		*next_sge = sge;
630 	}
631 }
632 
633 /**
634  * @brief This callback method asks the user to provide the contents of the
635  *        "address" field in the Scatter-Gather Element.
636  *
637  * @param[in] scif_user_io_request This parameter points to the user's
638  *            IO request object.  It is a cookie that allows the user to
639  *            provide the necessary information for this callback.
640  * @param[in] sge_address This parameter specifies the address for the
641  *            SGE from which to retrieve the address field.
642  *
643  * @return A physical address specifying the contents of the SGE's address
644  *         field.
645  */
646 SCI_PHYSICAL_ADDRESS
647 scif_cb_sge_get_address_field(void *scif_user_io_request, void *sge_address)
648 {
649 	bus_dma_segment_t *sge = (bus_dma_segment_t *)sge_address;
650 
651 	return ((SCI_PHYSICAL_ADDRESS)sge->ds_addr);
652 }
653 
654 /**
655  * @brief This callback method asks the user to provide the contents of the
656  *        "length" field in the Scatter-Gather Element.
657  *
658  * @param[in] scif_user_io_request This parameter points to the user's
659  *            IO request object.  It is a cookie that allows the user to
660  *            provide the necessary information for this callback.
661  * @param[in] sge_address This parameter specifies the address for the
662  *            SGE from which to retrieve the address field.
663  *
664  * @return This method returns the length field specified inside the SGE
665  *         referenced by the sge_address parameter.
666  */
667 uint32_t
668 scif_cb_sge_get_length_field(void *scif_user_io_request, void *sge_address)
669 {
670 	bus_dma_segment_t *sge = (bus_dma_segment_t *)sge_address;
671 
672 	return ((uint32_t)sge->ds_len);
673 }
674 
675 void
676 isci_request_construct(struct ISCI_REQUEST *request,
677     SCI_CONTROLLER_HANDLE_T scif_controller_handle,
678     bus_dma_tag_t io_buffer_dma_tag, bus_addr_t physical_address)
679 {
680 
681 	request->controller_handle = scif_controller_handle;
682 	request->dma_tag = io_buffer_dma_tag;
683 	request->physical_address = physical_address;
684 	bus_dmamap_create(request->dma_tag, 0, &request->dma_map);
685 	callout_init(&request->timer, 1);
686 }
687 
688 static void
689 isci_io_request_construct(void *arg, bus_dma_segment_t *seg, int nseg,
690     int error)
691 {
692 	union ccb *ccb;
693 	struct ISCI_IO_REQUEST *io_request = (struct ISCI_IO_REQUEST *)arg;
694 	SCI_REMOTE_DEVICE_HANDLE_T *device = io_request->parent.remote_device_handle;
695 	SCI_STATUS status;
696 
697 	io_request->num_segments = nseg;
698 	io_request->sge = seg;
699 	ccb = io_request->ccb;
700 
701 	if (error != 0) {
702 		ccb->ccb_h.status = CAM_REQ_INVALID;
703 		xpt_done(ccb);
704 		return;
705 	}
706 
707 	status = scif_io_request_construct(
708 	    io_request->parent.controller_handle,
709 	    io_request->parent.remote_device_handle,
710 	    SCI_CONTROLLER_INVALID_IO_TAG, (void *)io_request,
711 	    (void *)((char*)io_request + sizeof(struct ISCI_IO_REQUEST)),
712 	    &io_request->sci_object);
713 
714 	if (status != SCI_SUCCESS) {
715 		isci_io_request_complete(io_request->parent.controller_handle,
716 		    device, io_request, (SCI_IO_STATUS)status);
717 		return;
718 	}
719 
720 	sci_object_set_association(io_request->sci_object, io_request);
721 
722 	bus_dmamap_sync(io_request->parent.dma_tag, io_request->parent.dma_map,
723 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
724 
725 	status = (SCI_STATUS)scif_controller_start_io(
726 	    io_request->parent.controller_handle, device,
727 	    io_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG);
728 
729 	if (status != SCI_SUCCESS) {
730 		isci_io_request_complete(io_request->parent.controller_handle,
731 		    device, io_request, (SCI_IO_STATUS)status);
732 		return;
733 	}
734 
735 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY)
736 		callout_reset_sbt(&io_request->parent.timer,
737 		    SBT_1MS * ccb->ccb_h.timeout, 0, isci_io_request_timeout,
738 		    io_request, 0);
739 }
740 
741 void
742 isci_io_request_execute_scsi_io(union ccb *ccb,
743     struct ISCI_CONTROLLER *controller)
744 {
745 	target_id_t target_id = ccb->ccb_h.target_id;
746 	struct ISCI_REQUEST *request;
747 	struct ISCI_IO_REQUEST *io_request;
748 	struct ISCI_REMOTE_DEVICE *device =
749 	    controller->remote_device[target_id];
750 	int error;
751 
752 	if (device == NULL) {
753 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
754 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
755 		ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
756 		xpt_done(ccb);
757 		return;
758 	}
759 
760 	if (sci_pool_empty(controller->request_pool)) {
761 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
762 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
763 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
764 		xpt_freeze_simq(controller->sim, 1);
765 		controller->is_frozen = TRUE;
766 		xpt_done(ccb);
767 		return;
768 	}
769 
770 	ASSERT(device->is_resetting == FALSE);
771 
772 	sci_pool_get(controller->request_pool, request);
773 	io_request = (struct ISCI_IO_REQUEST *)request;
774 
775 	io_request->ccb = ccb;
776 	io_request->current_sge_index = 0;
777 	io_request->parent.remote_device_handle = device->sci_object;
778 
779 	error = bus_dmamap_load_ccb(io_request->parent.dma_tag,
780 	    io_request->parent.dma_map, ccb,
781 	    isci_io_request_construct, io_request, 0x0);
782 	/* A resource shortage from BUSDMA will be automatically
783 	 * continued at a later point, pushing the CCB processing
784 	 * forward, which will in turn unfreeze the simq.
785 	 */
786 	if (error == EINPROGRESS) {
787 		xpt_freeze_simq(controller->sim, 1);
788 		ccb->ccb_h.flags |= CAM_RELEASE_SIMQ;
789 	}
790 }
791 
792 void
793 isci_io_request_timeout(void *arg)
794 {
795 	struct ISCI_IO_REQUEST *request = (struct ISCI_IO_REQUEST *)arg;
796 	struct ISCI_REMOTE_DEVICE *remote_device = (struct ISCI_REMOTE_DEVICE *)
797 		sci_object_get_association(request->parent.remote_device_handle);
798 	struct ISCI_CONTROLLER *controller = remote_device->domain->controller;
799 
800 	mtx_lock(&controller->lock);
801 	isci_remote_device_reset(remote_device, NULL);
802 	mtx_unlock(&controller->lock);
803 }
804 
805 #if __FreeBSD_version >= 900026
806 /**
807  * @brief This callback method gets the size of and pointer to the buffer
808  *         (if any) containing the request buffer for an SMP request.
809  *
810  * @param[in]  core_request This parameter specifies the SCI core's request
811  *             object associated with the SMP request.
812  * @param[out] smp_request_buffer This parameter returns a pointer to the
813  *             payload portion of the SMP request - i.e. everything after
814  *             the SMP request header.
815  *
816  * @return Size of the request buffer in bytes.  This does *not* include
817  *          the size of the SMP request header.
818  */
819 static uint32_t
820 smp_io_request_cb_get_request_buffer(SCI_IO_REQUEST_HANDLE_T core_request,
821     uint8_t ** smp_request_buffer)
822 {
823 	struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)
824 	    sci_object_get_association(sci_object_get_association(core_request));
825 
826 	*smp_request_buffer = isci_request->ccb->smpio.smp_request +
827 	    sizeof(SMP_REQUEST_HEADER_T);
828 
829 	return (isci_request->ccb->smpio.smp_request_len -
830 	    sizeof(SMP_REQUEST_HEADER_T));
831 }
832 
833 /**
834  * @brief This callback method gets the SMP function for an SMP request.
835  *
836  * @param[in]  core_request This parameter specifies the SCI core's request
837  *             object associated with the SMP request.
838  *
839  * @return SMP function for the SMP request.
840  */
841 static uint8_t
842 smp_io_request_cb_get_function(SCI_IO_REQUEST_HANDLE_T core_request)
843 {
844 	struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)
845 	    sci_object_get_association(sci_object_get_association(core_request));
846 	SMP_REQUEST_HEADER_T *header =
847 	    (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request;
848 
849 	return (header->function);
850 }
851 
852 /**
853  * @brief This callback method gets the SMP frame type for an SMP request.
854  *
855  * @param[in]  core_request This parameter specifies the SCI core's request
856  *             object associated with the SMP request.
857  *
858  * @return SMP frame type for the SMP request.
859  */
860 static uint8_t
861 smp_io_request_cb_get_frame_type(SCI_IO_REQUEST_HANDLE_T core_request)
862 {
863 	struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)
864 	    sci_object_get_association(sci_object_get_association(core_request));
865 	SMP_REQUEST_HEADER_T *header =
866 	    (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request;
867 
868 	return (header->smp_frame_type);
869 }
870 
871 /**
872  * @brief This callback method gets the allocated response length for an SMP request.
873  *
874  * @param[in]  core_request This parameter specifies the SCI core's request
875  *             object associated with the SMP request.
876  *
877  * @return Allocated response length for the SMP request.
878  */
879 static uint8_t
880 smp_io_request_cb_get_allocated_response_length(
881     SCI_IO_REQUEST_HANDLE_T core_request)
882 {
883 	struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)
884 	    sci_object_get_association(sci_object_get_association(core_request));
885 	SMP_REQUEST_HEADER_T *header =
886 	    (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request;
887 
888 	return (header->allocated_response_length);
889 }
890 
891 static SCI_STATUS
892 isci_smp_request_construct(struct ISCI_IO_REQUEST *request)
893 {
894 	SCI_STATUS status;
895 	SCIC_SMP_PASSTHRU_REQUEST_CALLBACKS_T callbacks;
896 
897 	status = scif_request_construct(request->parent.controller_handle,
898 	    request->parent.remote_device_handle, SCI_CONTROLLER_INVALID_IO_TAG,
899 	    (void *)request,
900 	    (void *)((char*)request + sizeof(struct ISCI_IO_REQUEST)),
901 	    &request->sci_object);
902 
903 	if (status == SCI_SUCCESS) {
904 		callbacks.scic_cb_smp_passthru_get_request =
905 		    &smp_io_request_cb_get_request_buffer;
906 		callbacks.scic_cb_smp_passthru_get_function =
907 		    &smp_io_request_cb_get_function;
908 		callbacks.scic_cb_smp_passthru_get_frame_type =
909 		    &smp_io_request_cb_get_frame_type;
910 		callbacks.scic_cb_smp_passthru_get_allocated_response_length =
911 		    &smp_io_request_cb_get_allocated_response_length;
912 
913 		/* create the smp passthrough part of the io request */
914 		status = scic_io_request_construct_smp_pass_through(
915 		    scif_io_request_get_scic_handle(request->sci_object),
916 		    &callbacks);
917 	}
918 
919 	return (status);
920 }
921 
922 void
923 isci_io_request_execute_smp_io(union ccb *ccb,
924     struct ISCI_CONTROLLER *controller)
925 {
926 	SCI_STATUS status;
927 	target_id_t target_id = ccb->ccb_h.target_id;
928 	struct ISCI_REQUEST *request;
929 	struct ISCI_IO_REQUEST *io_request;
930 	SCI_REMOTE_DEVICE_HANDLE_T smp_device_handle;
931 	struct ISCI_REMOTE_DEVICE *end_device = controller->remote_device[target_id];
932 
933 	/* SMP commands are sent to an end device, because SMP devices are not
934 	 *  exposed to the kernel.  It is our responsibility to use this method
935 	 *  to get the SMP device that contains the specified end device.  If
936 	 *  the device is direct-attached, the handle will come back NULL, and
937 	 *  we'll just fail the SMP_IO with DEV_NOT_THERE.
938 	 */
939 	scif_remote_device_get_containing_device(end_device->sci_object,
940 	    &smp_device_handle);
941 
942 	if (smp_device_handle == NULL) {
943 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
944 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
945 		ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
946 		xpt_done(ccb);
947 		return;
948 	}
949 
950 	if (sci_pool_empty(controller->request_pool)) {
951 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
952 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
953 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
954 		xpt_freeze_simq(controller->sim, 1);
955 		controller->is_frozen = TRUE;
956 		xpt_done(ccb);
957 		return;
958 	}
959 
960 	ASSERT(device->is_resetting == FALSE);
961 
962 	sci_pool_get(controller->request_pool, request);
963 	io_request = (struct ISCI_IO_REQUEST *)request;
964 
965 	io_request->ccb = ccb;
966 	io_request->parent.remote_device_handle = smp_device_handle;
967 
968 	status = isci_smp_request_construct(io_request);
969 
970 	if (status != SCI_SUCCESS) {
971 		isci_io_request_complete(controller->scif_controller_handle,
972 		    smp_device_handle, io_request, (SCI_IO_STATUS)status);
973 		return;
974 	}
975 
976 	sci_object_set_association(io_request->sci_object, io_request);
977 
978 	status = (SCI_STATUS) scif_controller_start_io(
979 	    controller->scif_controller_handle, smp_device_handle,
980 	    io_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG);
981 
982 	if (status != SCI_SUCCESS) {
983 		isci_io_request_complete(controller->scif_controller_handle,
984 		    smp_device_handle, io_request, (SCI_IO_STATUS)status);
985 		return;
986 	}
987 
988 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY)
989 		callout_reset_sbt(&io_request->parent.timer,
990 		    SBT_1MS *  ccb->ccb_h.timeout, 0, isci_io_request_timeout,
991 		    request, 0);
992 }
993 #endif
994