xref: /linux/drivers/scsi/isci/task.c (revision f1f52e75939b56c40b3d153ae99faf2720250242)
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55 
56 #include <linux/completion.h>
57 #include <linux/irqflags.h>
58 #include "sas.h"
59 #include "remote_device.h"
60 #include "remote_node_context.h"
61 #include "isci.h"
62 #include "request.h"
63 #include "sata.h"
64 #include "task.h"
65 #include "timers.h"
66 
67 /**
68 * isci_task_refuse() - complete the request to the upper layer driver in
69 *     the case where an I/O needs to be completed back in the submit path.
70 * @ihost: host on which the the request was queued
71 * @task: request to complete
72 * @response: response code for the completed task.
73 * @status: status code for the completed task.
74 *
75 */
76 static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
77 			     enum service_response response,
78 			     enum exec_status status)
79 
80 {
81 	enum isci_completion_selection disposition;
82 
83 	disposition = isci_perform_normal_io_completion;
84 	disposition = isci_task_set_completion_status(task, response, status,
85 						      disposition);
86 
87 	/* Tasks aborted specifically by a call to the lldd_abort_task
88 	 * function should not be completed to the host in the regular path.
89 	 */
90 	switch (disposition) {
91 		case isci_perform_normal_io_completion:
92 			/* Normal notification (task_done) */
93 			dev_dbg(&ihost->pdev->dev,
94 				"%s: Normal - task = %p, response=%d, "
95 				"status=%d\n",
96 				__func__, task, response, status);
97 
98 			task->lldd_task = NULL;
99 
100 			isci_execpath_callback(ihost, task, task->task_done);
101 			break;
102 
103 		case isci_perform_aborted_io_completion:
104 			/* No notification because this request is already in the
105 			* abort path.
106 			*/
107 			dev_warn(&ihost->pdev->dev,
108 				 "%s: Aborted - task = %p, response=%d, "
109 				"status=%d\n",
110 				 __func__, task, response, status);
111 			break;
112 
113 		case isci_perform_error_io_completion:
114 			/* Use sas_task_abort */
115 			dev_warn(&ihost->pdev->dev,
116 				 "%s: Error - task = %p, response=%d, "
117 				"status=%d\n",
118 				 __func__, task, response, status);
119 
120 			isci_execpath_callback(ihost, task, sas_task_abort);
121 			break;
122 
123 		default:
124 			dev_warn(&ihost->pdev->dev,
125 				 "%s: isci task notification default case!",
126 				 __func__);
127 			sas_task_abort(task);
128 			break;
129 	}
130 }
131 
132 #define for_each_sas_task(num, task) \
133 	for (; num > 0; num--,\
134 	     task = list_entry(task->list.next, struct sas_task, list))
135 
136 /**
137  * isci_task_execute_task() - This function is one of the SAS Domain Template
138  *    functions. This function is called by libsas to send a task down to
139  *    hardware.
140  * @task: This parameter specifies the SAS task to send.
141  * @num: This parameter specifies the number of tasks to queue.
142  * @gfp_flags: This parameter specifies the context of this call.
143  *
144  * status, zero indicates success.
145  */
146 int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
147 {
148 	struct isci_host *ihost = dev_to_ihost(task->dev);
149 	struct isci_request *request = NULL;
150 	struct isci_remote_device *device;
151 	unsigned long flags;
152 	int ret;
153 	enum sci_status status;
154 	enum isci_status device_status;
155 
156 	dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
157 
158 	/* Check if we have room for more tasks */
159 	ret = isci_host_can_queue(ihost, num);
160 
161 	if (ret) {
162 		dev_warn(&ihost->pdev->dev, "%s: queue full\n", __func__);
163 		return ret;
164 	}
165 
166 	for_each_sas_task(num, task) {
167 		dev_dbg(&ihost->pdev->dev,
168 			"task = %p, num = %d; dev = %p; cmd = %p\n",
169 			    task, num, task->dev, task->uldd_task);
170 
171 		device = task->dev->lldd_dev;
172 
173 		if (device)
174 			device_status = device->status;
175 		else
176 			device_status = isci_freed;
177 
178 		/* From this point onward, any process that needs to guarantee
179 		 * that there is no kernel I/O being started will have to wait
180 		 * for the quiesce spinlock.
181 		 */
182 
183 		if (device_status != isci_ready_for_io) {
184 
185 			/* Forces a retry from scsi mid layer. */
186 			dev_dbg(&ihost->pdev->dev,
187 				"%s: task %p: isci_host->status = %d, "
188 				"device = %p; device_status = 0x%x\n\n",
189 				__func__,
190 				task,
191 				isci_host_get_state(ihost),
192 				device,
193 				device_status);
194 
195 			if (device_status == isci_ready) {
196 				/* Indicate QUEUE_FULL so that the scsi midlayer
197 				* retries.
198 				*/
199 				isci_task_refuse(ihost, task,
200 						 SAS_TASK_COMPLETE,
201 						 SAS_QUEUE_FULL);
202 			} else {
203 				/* Else, the device is going down. */
204 				isci_task_refuse(ihost, task,
205 						 SAS_TASK_UNDELIVERED,
206 						 SAS_DEVICE_UNKNOWN);
207 			}
208 			isci_host_can_dequeue(ihost, 1);
209 		} else {
210 			/* There is a device and it's ready for I/O. */
211 			spin_lock_irqsave(&task->task_state_lock, flags);
212 
213 			if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
214 
215 				spin_unlock_irqrestore(&task->task_state_lock,
216 						       flags);
217 
218 				isci_task_refuse(ihost, task,
219 						 SAS_TASK_UNDELIVERED,
220 						 SAM_STAT_TASK_ABORTED);
221 
222 				/* The I/O was aborted. */
223 
224 			} else {
225 				task->task_state_flags |= SAS_TASK_AT_INITIATOR;
226 				spin_unlock_irqrestore(&task->task_state_lock, flags);
227 
228 				/* build and send the request. */
229 				status = isci_request_execute(ihost, task, &request,
230 							      gfp_flags);
231 
232 				if (status != SCI_SUCCESS) {
233 
234 					spin_lock_irqsave(&task->task_state_lock, flags);
235 					/* Did not really start this command. */
236 					task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
237 					spin_unlock_irqrestore(&task->task_state_lock, flags);
238 
239 					/* Indicate QUEUE_FULL so that the scsi
240 					* midlayer retries. if the request
241 					* failed for remote device reasons,
242 					* it gets returned as
243 					* SAS_TASK_UNDELIVERED next time
244 					* through.
245 					*/
246 					isci_task_refuse(ihost, task,
247 							 SAS_TASK_COMPLETE,
248 							 SAS_QUEUE_FULL);
249 					isci_host_can_dequeue(ihost, 1);
250 				}
251 			}
252 		}
253 	}
254 	return 0;
255 }
256 
257 
258 
259 /**
260  * isci_task_request_build() - This function builds the task request object.
261  * @isci_host: This parameter specifies the ISCI host object
262  * @request: This parameter points to the isci_request object allocated in the
263  *    request construct function.
264  * @tmf: This parameter is the task management struct to be built
265  *
266  * SCI_SUCCESS on successfull completion, or specific failure code.
267  */
268 static enum sci_status isci_task_request_build(
269 	struct isci_host *isci_host,
270 	struct isci_request **isci_request,
271 	struct isci_tmf *isci_tmf)
272 {
273 	struct scic_sds_remote_device *sci_device;
274 	enum sci_status status = SCI_FAILURE;
275 	struct isci_request *request = NULL;
276 	struct isci_remote_device *isci_device;
277 	struct domain_device *dev;
278 
279 	dev_dbg(&isci_host->pdev->dev,
280 		"%s: isci_tmf = %p\n", __func__, isci_tmf);
281 
282 	isci_device = isci_tmf->device;
283 	sci_device = &isci_device->sci;
284 	dev = isci_device->domain_dev;
285 
286 	/* do common allocation and init of request object. */
287 	status = isci_request_alloc_tmf(
288 		isci_host,
289 		isci_tmf,
290 		&request,
291 		isci_device,
292 		GFP_ATOMIC
293 		);
294 
295 	if (status != SCI_SUCCESS)
296 		goto out;
297 
298 	/* let the core do it's construct. */
299 	status = scic_task_request_construct(&isci_host->sci, sci_device,
300 					     SCI_CONTROLLER_INVALID_IO_TAG,
301 					     &request->sci);
302 
303 	if (status != SCI_SUCCESS) {
304 		dev_warn(&isci_host->pdev->dev,
305 			 "%s: scic_task_request_construct failed - "
306 			 "status = 0x%x\n",
307 			 __func__,
308 			 status);
309 		goto errout;
310 	}
311 
312 	/* XXX convert to get this from task->tproto like other drivers */
313 	if (dev->dev_type == SAS_END_DEV) {
314 		isci_tmf->proto = SAS_PROTOCOL_SSP;
315 		status = scic_task_request_construct_ssp(&request->sci);
316 		if (status != SCI_SUCCESS)
317 			goto errout;
318 	}
319 
320 	if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
321 		isci_tmf->proto = SAS_PROTOCOL_SATA;
322 		status = isci_sata_management_task_request_build(request);
323 
324 		if (status != SCI_SUCCESS)
325 			goto errout;
326 	}
327 
328 	goto out;
329 
330  errout:
331 
332 	/* release the dma memory if we fail. */
333 	isci_request_free(isci_host, request);
334 	request = NULL;
335 
336  out:
337 	*isci_request = request;
338 	return status;
339 }
340 
341 /**
342  * isci_tmf_timeout_cb() - This function is called as a kernel callback when
343  *    the timeout period for the TMF has expired.
344  *
345  *
346  */
347 static void isci_tmf_timeout_cb(void *tmf_request_arg)
348 {
349 	struct isci_request *request = (struct isci_request *)tmf_request_arg;
350 	struct isci_tmf *tmf = isci_request_access_tmf(request);
351 	enum sci_status status;
352 
353 	/* This task management request has timed-out.  Terminate the request
354 	 * so that the request eventually completes to the requestor in the
355 	 * request completion callback path.
356 	 */
357 	/* Note - the timer callback function itself has provided spinlock
358 	 * exclusion from the start and completion paths.  No need to take
359 	 * the request->isci_host->scic_lock here.
360 	 */
361 
362 	if (tmf->timeout_timer != NULL) {
363 		/* Call the users callback, if any. */
364 		if (tmf->cb_state_func != NULL)
365 			tmf->cb_state_func(isci_tmf_timed_out, tmf,
366 					   tmf->cb_data);
367 
368 		/* Terminate the TMF transmit request. */
369 		status = scic_controller_terminate_request(
370 			&request->isci_host->sci,
371 			&request->isci_device->sci,
372 			&request->sci);
373 
374 		dev_dbg(&request->isci_host->pdev->dev,
375 			"%s: tmf_request = %p; tmf = %p; status = %d\n",
376 			__func__, request, tmf, status);
377 	} else
378 		dev_dbg(&request->isci_host->pdev->dev,
379 			"%s: timer already canceled! "
380 			"tmf_request = %p; tmf = %p\n",
381 			__func__, request, tmf);
382 
383 	/* No need to unlock since the caller to this callback is doing it for
384 	 * us.
385 	 * request->isci_host->scic_lock
386 	 */
387 }
388 
389 /**
390  * isci_task_execute_tmf() - This function builds and sends a task request,
391  *    then waits for the completion.
392  * @isci_host: This parameter specifies the ISCI host object
393  * @tmf: This parameter is the pointer to the task management structure for
394  *    this request.
395  * @timeout_ms: This parameter specifies the timeout period for the task
396  *    management request.
397  *
398  * TMF_RESP_FUNC_COMPLETE on successful completion of the TMF (this includes
399  * error conditions reported in the IU status), or TMF_RESP_FUNC_FAILED.
400  */
401 int isci_task_execute_tmf(
402 	struct isci_host *isci_host,
403 	struct isci_tmf *tmf,
404 	unsigned long timeout_ms)
405 {
406 	DECLARE_COMPLETION_ONSTACK(completion);
407 	enum sci_task_status status = SCI_TASK_FAILURE;
408 	struct scic_sds_remote_device *sci_device;
409 	struct isci_remote_device *isci_device = tmf->device;
410 	struct isci_request *request;
411 	int ret = TMF_RESP_FUNC_FAILED;
412 	unsigned long flags;
413 
414 	/* sanity check, return TMF_RESP_FUNC_FAILED
415 	 * if the device is not there and ready.
416 	 */
417 	if (!isci_device || isci_device->status != isci_ready_for_io) {
418 		dev_dbg(&isci_host->pdev->dev,
419 			"%s: isci_device = %p not ready (%d)\n",
420 			__func__,
421 			isci_device, isci_device->status);
422 		return TMF_RESP_FUNC_FAILED;
423 	} else
424 		dev_dbg(&isci_host->pdev->dev,
425 			"%s: isci_device = %p\n",
426 			__func__, isci_device);
427 
428 	sci_device = &isci_device->sci;
429 
430 	/* Assign the pointer to the TMF's completion kernel wait structure. */
431 	tmf->complete = &completion;
432 
433 	isci_task_request_build(
434 		isci_host,
435 		&request,
436 		tmf
437 		);
438 
439 	if (!request) {
440 		dev_warn(&isci_host->pdev->dev,
441 			"%s: isci_task_request_build failed\n",
442 			__func__);
443 		return TMF_RESP_FUNC_FAILED;
444 	}
445 
446 	/* Allocate the TMF timeout timer. */
447 	spin_lock_irqsave(&isci_host->scic_lock, flags);
448 	tmf->timeout_timer = isci_timer_create(isci_host, request, isci_tmf_timeout_cb);
449 
450 	/* Start the timer. */
451 	if (tmf->timeout_timer)
452 		isci_timer_start(tmf->timeout_timer, timeout_ms);
453 	else
454 		dev_warn(&isci_host->pdev->dev,
455 			 "%s: isci_timer_create failed!!!!\n",
456 			 __func__);
457 
458 	/* start the TMF io. */
459 	status = scic_controller_start_task(
460 		&isci_host->sci,
461 		sci_device,
462 		&request->sci,
463 		SCI_CONTROLLER_INVALID_IO_TAG);
464 
465 	if (status != SCI_TASK_SUCCESS) {
466 		dev_warn(&isci_host->pdev->dev,
467 			 "%s: start_io failed - status = 0x%x, request = %p\n",
468 			 __func__,
469 			 status,
470 			 request);
471 		goto cleanup_request;
472 	}
473 
474 	/* Call the users callback, if any. */
475 	if (tmf->cb_state_func != NULL)
476 		tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
477 
478 	/* Change the state of the TMF-bearing request to "started". */
479 	isci_request_change_state(request, started);
480 
481 	/* add the request to the remote device request list. */
482 	list_add(&request->dev_node, &isci_device->reqs_in_process);
483 
484 	spin_unlock_irqrestore(&isci_host->scic_lock, flags);
485 
486 	/* Wait for the TMF to complete, or a timeout. */
487 	wait_for_completion(&completion);
488 
489 	isci_print_tmf(tmf);
490 
491 	if (tmf->status == SCI_SUCCESS)
492 		ret =  TMF_RESP_FUNC_COMPLETE;
493 	else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
494 		dev_dbg(&isci_host->pdev->dev,
495 			"%s: tmf.status == "
496 			"SCI_FAILURE_IO_RESPONSE_VALID\n",
497 			__func__);
498 		ret =  TMF_RESP_FUNC_COMPLETE;
499 	}
500 	/* Else - leave the default "failed" status alone. */
501 
502 	dev_dbg(&isci_host->pdev->dev,
503 		"%s: completed request = %p\n",
504 		__func__,
505 		request);
506 
507 	if (request->io_request_completion != NULL) {
508 
509 		/* The fact that this is non-NULL for a TMF request
510 		 * means there is a thread waiting for this TMF to
511 		 * finish.
512 		 */
513 		complete(request->io_request_completion);
514 	}
515 
516 	spin_lock_irqsave(&isci_host->scic_lock, flags);
517 
518  cleanup_request:
519 
520 	/* Clean up the timer if needed. */
521 	if (tmf->timeout_timer) {
522 		isci_del_timer(isci_host, tmf->timeout_timer);
523 		tmf->timeout_timer = NULL;
524 	}
525 
526 	spin_unlock_irqrestore(&isci_host->scic_lock, flags);
527 
528 	isci_request_free(isci_host, request);
529 
530 	return ret;
531 }
532 
533 void isci_task_build_tmf(
534 	struct isci_tmf *tmf,
535 	struct isci_remote_device *isci_device,
536 	enum isci_tmf_function_codes code,
537 	void (*tmf_sent_cb)(enum isci_tmf_cb_state,
538 			    struct isci_tmf *,
539 			    void *),
540 	void *cb_data)
541 {
542 	dev_dbg(&isci_device->isci_port->isci_host->pdev->dev,
543 		"%s: isci_device = %p\n", __func__, isci_device);
544 
545 	memset(tmf, 0, sizeof(*tmf));
546 
547 	tmf->device        = isci_device;
548 	tmf->tmf_code      = code;
549 	tmf->timeout_timer = NULL;
550 	tmf->cb_state_func = tmf_sent_cb;
551 	tmf->cb_data       = cb_data;
552 }
553 
554 static void isci_task_build_abort_task_tmf(
555 	struct isci_tmf *tmf,
556 	struct isci_remote_device *isci_device,
557 	enum isci_tmf_function_codes code,
558 	void (*tmf_sent_cb)(enum isci_tmf_cb_state,
559 			    struct isci_tmf *,
560 			    void *),
561 	struct isci_request *old_request)
562 {
563 	isci_task_build_tmf(tmf, isci_device, code, tmf_sent_cb,
564 			    (void *)old_request);
565 	tmf->io_tag = old_request->io_tag;
566 }
567 
568 static struct isci_request *isci_task_get_request_from_task(
569 	struct sas_task *task,
570 	struct isci_remote_device **isci_device)
571 {
572 
573 	struct isci_request *request = NULL;
574 	unsigned long flags;
575 
576 	spin_lock_irqsave(&task->task_state_lock, flags);
577 
578 	request = task->lldd_task;
579 
580 	/* If task is already done, the request isn't valid */
581 	if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
582 	    (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
583 	    (request != NULL)) {
584 
585 		if (isci_device != NULL)
586 			*isci_device = request->isci_device;
587 	}
588 
589 	spin_unlock_irqrestore(&task->task_state_lock, flags);
590 
591 	return request;
592 }
593 
594 /**
595  * isci_task_validate_request_to_abort() - This function checks the given I/O
596  *    against the "started" state.  If the request is still "started", it's
597  *    state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
598  *    BEFORE CALLING THIS FUNCTION.
599  * @isci_request: This parameter specifies the request object to control.
600  * @isci_host: This parameter specifies the ISCI host object
601  * @isci_device: This is the device to which the request is pending.
602  * @aborted_io_completion: This is a completion structure that will be added to
603  *    the request in case it is changed to aborting; this completion is
604  *    triggered when the request is fully completed.
605  *
606  * Either "started" on successful change of the task status to "aborted", or
607  * "unallocated" if the task cannot be controlled.
608  */
609 static enum isci_request_status isci_task_validate_request_to_abort(
610 	struct isci_request *isci_request,
611 	struct isci_host *isci_host,
612 	struct isci_remote_device *isci_device,
613 	struct completion *aborted_io_completion)
614 {
615 	enum isci_request_status old_state = unallocated;
616 
617 	/* Only abort the task if it's in the
618 	 *  device's request_in_process list
619 	 */
620 	if (isci_request && !list_empty(&isci_request->dev_node)) {
621 		old_state = isci_request_change_started_to_aborted(
622 			isci_request, aborted_io_completion);
623 
624 	}
625 
626 	return old_state;
627 }
628 
629 static void isci_request_cleanup_completed_loiterer(
630 	struct isci_host *isci_host,
631 	struct isci_remote_device *isci_device,
632 	struct isci_request *isci_request)
633 {
634 	struct sas_task     *task;
635 	unsigned long       flags;
636 
637 	task = (isci_request->ttype == io_task)
638 		? isci_request_access_task(isci_request)
639 		: NULL;
640 
641 	dev_dbg(&isci_host->pdev->dev,
642 		"%s: isci_device=%p, request=%p, task=%p\n",
643 		__func__, isci_device, isci_request, task);
644 
645 	spin_lock_irqsave(&isci_host->scic_lock, flags);
646 	list_del_init(&isci_request->dev_node);
647 	spin_unlock_irqrestore(&isci_host->scic_lock, flags);
648 
649 	if (task != NULL) {
650 
651 		spin_lock_irqsave(&task->task_state_lock, flags);
652 		task->lldd_task = NULL;
653 
654 		isci_set_task_doneflags(task);
655 
656 		/* If this task is not in the abort path, call task_done. */
657 		if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
658 
659 			spin_unlock_irqrestore(&task->task_state_lock, flags);
660 			task->task_done(task);
661 		} else
662 			spin_unlock_irqrestore(&task->task_state_lock, flags);
663 	}
664 	isci_request_free(isci_host, isci_request);
665 }
666 
667 /**
668 * @isci_termination_timed_out(): this function will deal with a request for
669 * which the wait for termination has timed-out.
670 *
671 * @isci_host    This SCU.
672 * @isci_request The I/O request being terminated.
673 */
674 static void
675 isci_termination_timed_out(
676 	struct isci_host    * host,
677 	struct isci_request * request
678 	)
679 {
680 	unsigned long state_flags;
681 
682 	dev_warn(&host->pdev->dev,
683 		"%s: host = %p; request = %p\n",
684 		__func__, host, request);
685 
686 	/* At this point, the request to terminate
687 	* has timed out. The best we can do is to
688 	* have the request die a silent death
689 	* if it ever completes.
690 	*/
691 	spin_lock_irqsave(&request->state_lock, state_flags);
692 
693 	if (request->status == started) {
694 
695 		/* Set the request state to "dead",
696 		* and clear the task pointer so that an actual
697 		* completion event callback doesn't do
698 		* anything.
699 		*/
700 		request->status = dead;
701 
702 		/* Clear the timeout completion event pointer.*/
703 		request->io_request_completion = NULL;
704 
705 		if (request->ttype == io_task) {
706 
707 			/* Break links with the sas_task. */
708 			if (request->ttype_ptr.io_task_ptr != NULL) {
709 
710 				request->ttype_ptr.io_task_ptr->lldd_task = NULL;
711 				request->ttype_ptr.io_task_ptr            = NULL;
712 			}
713 		}
714 	}
715 	spin_unlock_irqrestore(&request->state_lock, state_flags);
716 }
717 
718 
719 /**
720  * isci_terminate_request_core() - This function will terminate the given
721  *    request, and wait for it to complete.  This function must only be called
722  *    from a thread that can wait.  Note that the request is terminated and
723  *    completed (back to the host, if started there).
724  * @isci_host: This SCU.
725  * @isci_device: The target.
726  * @isci_request: The I/O request to be terminated.
727  *
728  *
729  */
730 static void isci_terminate_request_core(
731 	struct isci_host *isci_host,
732 	struct isci_remote_device *isci_device,
733 	struct isci_request *isci_request)
734 {
735 	enum sci_status status      = SCI_SUCCESS;
736 	bool was_terminated         = false;
737 	bool needs_cleanup_handling = false;
738 	enum isci_request_status request_status;
739 	unsigned long flags;
740 	unsigned long timeout_remaining;
741 
742 
743 	dev_dbg(&isci_host->pdev->dev,
744 		"%s: device = %p; request = %p\n",
745 		__func__, isci_device, isci_request);
746 
747 	spin_lock_irqsave(&isci_host->scic_lock, flags);
748 
749 	/* Note that we are not going to control
750 	* the target to abort the request.
751 	*/
752 	isci_request->complete_in_target = true;
753 
754 	/* Make sure the request wasn't just sitting around signalling
755 	 * device condition (if the request handle is NULL, then the
756 	 * request completed but needed additional handling here).
757 	 */
758 	if (!isci_request->terminated) {
759 		was_terminated = true;
760 		needs_cleanup_handling = true;
761 		status = scic_controller_terminate_request(
762 			&isci_host->sci,
763 			&isci_device->sci,
764 			&isci_request->sci);
765 	}
766 	spin_unlock_irqrestore(&isci_host->scic_lock, flags);
767 
768 	/*
769 	 * The only time the request to terminate will
770 	 * fail is when the io request is completed and
771 	 * being aborted.
772 	 */
773 	if (status != SCI_SUCCESS) {
774 		dev_err(&isci_host->pdev->dev,
775 			"%s: scic_controller_terminate_request"
776 			" returned = 0x%x\n",
777 			__func__,
778 			status);
779 		/* Clear the completion pointer from the request. */
780 		isci_request->io_request_completion = NULL;
781 
782 	} else {
783 		if (was_terminated) {
784 			dev_dbg(&isci_host->pdev->dev,
785 				"%s: before completion wait (%p)\n",
786 				__func__,
787 				isci_request->io_request_completion);
788 
789 			/* Wait here for the request to complete. */
790 			#define TERMINATION_TIMEOUT_MSEC 50
791 			timeout_remaining
792 				= wait_for_completion_timeout(
793 				   isci_request->io_request_completion,
794 				   msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC));
795 
796 			if (!timeout_remaining) {
797 
798 				isci_termination_timed_out(isci_host,
799 							   isci_request);
800 
801 				dev_err(&isci_host->pdev->dev,
802 					"%s: *** Timeout waiting for "
803 					"termination(%p/%p)\n",
804 					__func__,
805 					isci_request->io_request_completion,
806 					isci_request);
807 
808 			} else
809 				dev_dbg(&isci_host->pdev->dev,
810 					"%s: after completion wait (%p)\n",
811 					__func__,
812 					isci_request->io_request_completion);
813 		}
814 		/* Clear the completion pointer from the request. */
815 		isci_request->io_request_completion = NULL;
816 
817 		/* Peek at the status of the request.  This will tell
818 		* us if there was special handling on the request such that it
819 		* needs to be detached and freed here.
820 		*/
821 		spin_lock_irqsave(&isci_request->state_lock, flags);
822 		request_status = isci_request_get_state(isci_request);
823 
824 		if ((isci_request->ttype == io_task) /* TMFs are in their own thread */
825 		    && ((request_status == aborted)
826 			|| (request_status == aborting)
827 			|| (request_status == terminating)
828 			|| (request_status == completed)
829 			|| (request_status == dead)
830 			)
831 		    ) {
832 
833 			/* The completion routine won't free a request in
834 			* the aborted/aborting/etc. states, so we do
835 			* it here.
836 			*/
837 			needs_cleanup_handling = true;
838 		}
839 		spin_unlock_irqrestore(&isci_request->state_lock, flags);
840 
841 		if (needs_cleanup_handling)
842 			isci_request_cleanup_completed_loiterer(
843 				isci_host, isci_device, isci_request
844 				);
845 	}
846 }
847 
848 static void isci_terminate_request(
849 	struct isci_host *isci_host,
850 	struct isci_remote_device *isci_device,
851 	struct isci_request *isci_request,
852 	enum isci_request_status new_request_state)
853 {
854 	enum isci_request_status old_state;
855 	DECLARE_COMPLETION_ONSTACK(request_completion);
856 
857 	/* Change state to "new_request_state" if it is currently "started" */
858 	old_state = isci_request_change_started_to_newstate(
859 		isci_request,
860 		&request_completion,
861 		new_request_state
862 		);
863 
864 	if ((old_state == started) ||
865 	    (old_state == completed) ||
866 	    (old_state == aborting)) {
867 
868 		/* If the old_state is started:
869 		 * This request was not already being aborted. If it had been,
870 		 * then the aborting I/O (ie. the TMF request) would not be in
871 		 * the aborting state, and thus would be terminated here.  Note
872 		 * that since the TMF completion's call to the kernel function
873 		 * "complete()" does not happen until the pending I/O request
874 		 * terminate fully completes, we do not have to implement a
875 		 * special wait here for already aborting requests - the
876 		 * termination of the TMF request will force the request
877 		 * to finish it's already started terminate.
878 		 *
879 		 * If old_state == completed:
880 		 * This request completed from the SCU hardware perspective
881 		 * and now just needs cleaning up in terms of freeing the
882 		 * request and potentially calling up to libsas.
883 		 *
884 		 * If old_state == aborting:
885 		 * This request has already gone through a TMF timeout, but may
886 		 * not have been terminated; needs cleaning up at least.
887 		 */
888 		isci_terminate_request_core(isci_host, isci_device,
889 					    isci_request);
890 	}
891 }
892 
893 /**
894  * isci_terminate_pending_requests() - This function will change the all of the
895  *    requests on the given device's state to "aborting", will terminate the
896  *    requests, and wait for them to complete.  This function must only be
897  *    called from a thread that can wait.  Note that the requests are all
898  *    terminated and completed (back to the host, if started there).
899  * @isci_host: This parameter specifies SCU.
900  * @isci_device: This parameter specifies the target.
901  *
902  *
903  */
904 void isci_terminate_pending_requests(
905 	struct isci_host *isci_host,
906 	struct isci_remote_device *isci_device,
907 	enum isci_request_status new_request_state)
908 {
909 	struct isci_request *request;
910 	struct isci_request *next_request;
911 	unsigned long       flags;
912 	struct list_head    aborted_request_list;
913 
914 	INIT_LIST_HEAD(&aborted_request_list);
915 
916 	dev_dbg(&isci_host->pdev->dev,
917 		"%s: isci_device = %p (new request state = %d)\n",
918 		__func__, isci_device, new_request_state);
919 
920 	spin_lock_irqsave(&isci_host->scic_lock, flags);
921 
922 	/* Move all of the pending requests off of the device list. */
923 	list_splice_init(&isci_device->reqs_in_process,
924 			 &aborted_request_list);
925 
926 	spin_unlock_irqrestore(&isci_host->scic_lock, flags);
927 
928 	/* Iterate through the now-local list. */
929 	list_for_each_entry_safe(request, next_request,
930 				 &aborted_request_list, dev_node) {
931 
932 		dev_warn(&isci_host->pdev->dev,
933 			"%s: isci_device=%p request=%p; task=%p\n",
934 			__func__,
935 			isci_device, request,
936 			((request->ttype == io_task)
937 				? isci_request_access_task(request)
938 				: NULL));
939 
940 		/* Mark all still pending I/O with the selected next
941 		* state, terminate and free it.
942 		*/
943 		isci_terminate_request(isci_host, isci_device,
944 				       request, new_request_state
945 				       );
946 	}
947 }
948 
949 /**
950  * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
951  *    Template functions.
952  * @lun: This parameter specifies the lun to be reset.
953  *
954  * status, zero indicates success.
955  */
956 static int isci_task_send_lu_reset_sas(
957 	struct isci_host *isci_host,
958 	struct isci_remote_device *isci_device,
959 	u8 *lun)
960 {
961 	struct isci_tmf tmf;
962 	int ret = TMF_RESP_FUNC_FAILED;
963 
964 	dev_dbg(&isci_host->pdev->dev,
965 		"%s: isci_host = %p, isci_device = %p\n",
966 		__func__, isci_host, isci_device);
967 	/* Send the LUN reset to the target.  By the time the call returns,
968 	 * the TMF has fully exected in the target (in which case the return
969 	 * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
970 	 * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
971 	 */
972 	isci_task_build_tmf(&tmf, isci_device, isci_tmf_ssp_lun_reset, NULL,
973 			    NULL);
974 
975 	#define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
976 	ret = isci_task_execute_tmf(isci_host, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
977 
978 	if (ret == TMF_RESP_FUNC_COMPLETE)
979 		dev_dbg(&isci_host->pdev->dev,
980 			"%s: %p: TMF_LU_RESET passed\n",
981 			__func__, isci_device);
982 	else
983 		dev_dbg(&isci_host->pdev->dev,
984 			"%s: %p: TMF_LU_RESET failed (%x)\n",
985 			__func__, isci_device, ret);
986 
987 	return ret;
988 }
989 
990 /**
991  * isci_task_lu_reset() - This function is one of the SAS Domain Template
992  *    functions. This is one of the Task Management functoins called by libsas,
993  *    to reset the given lun. Note the assumption that while this call is
994  *    executing, no I/O will be sent by the host to the device.
995  * @lun: This parameter specifies the lun to be reset.
996  *
997  * status, zero indicates success.
998  */
999 int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
1000 {
1001 	struct isci_host *isci_host = dev_to_ihost(domain_device);
1002 	struct isci_remote_device *isci_device = NULL;
1003 	int ret;
1004 	bool device_stopping = false;
1005 
1006 	isci_device = domain_device->lldd_dev;
1007 
1008 	dev_dbg(&isci_host->pdev->dev,
1009 		"%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
1010 		 __func__, domain_device, isci_host, isci_device);
1011 
1012 	if (isci_device != NULL) {
1013 		device_stopping = (isci_device->status == isci_stopping)
1014 				  || (isci_device->status == isci_stopped);
1015 		set_bit(IDEV_EH, &isci_device->flags);
1016 	}
1017 
1018 	/* If there is a device reset pending on any request in the
1019 	 * device's list, fail this LUN reset request in order to
1020 	 * escalate to the device reset.
1021 	 */
1022 	if (!isci_device || device_stopping ||
1023 	    isci_device_is_reset_pending(isci_host, isci_device)) {
1024 		dev_warn(&isci_host->pdev->dev,
1025 			 "%s: No dev (%p), or "
1026 			 "RESET PENDING: domain_device=%p\n",
1027 			 __func__, isci_device, domain_device);
1028 		return TMF_RESP_FUNC_FAILED;
1029 	}
1030 
1031 	/* Send the task management part of the reset. */
1032 	if (sas_protocol_ata(domain_device->tproto)) {
1033 		ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun);
1034 	} else
1035 		ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
1036 
1037 	/* If the LUN reset worked, all the I/O can now be terminated. */
1038 	if (ret == TMF_RESP_FUNC_COMPLETE)
1039 		/* Terminate all I/O now. */
1040 		isci_terminate_pending_requests(isci_host,
1041 						isci_device,
1042 						terminating);
1043 
1044 	return ret;
1045 }
1046 
1047 
1048 /*	 int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
1049 int isci_task_clear_nexus_port(struct asd_sas_port *port)
1050 {
1051 	return TMF_RESP_FUNC_FAILED;
1052 }
1053 
1054 
1055 
1056 int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
1057 {
1058 	return TMF_RESP_FUNC_FAILED;
1059 }
1060 
1061 /* Task Management Functions. Must be called from process context.	 */
1062 
1063 /**
1064  * isci_abort_task_process_cb() - This is a helper function for the abort task
1065  *    TMF command.  It manages the request state with respect to the successful
1066  *    transmission / completion of the abort task request.
1067  * @cb_state: This parameter specifies when this function was called - after
1068  *    the TMF request has been started and after it has timed-out.
1069  * @tmf: This parameter specifies the TMF in progress.
1070  *
1071  *
1072  */
1073 static void isci_abort_task_process_cb(
1074 	enum isci_tmf_cb_state cb_state,
1075 	struct isci_tmf *tmf,
1076 	void *cb_data)
1077 {
1078 	struct isci_request *old_request;
1079 
1080 	old_request = (struct isci_request *)cb_data;
1081 
1082 	dev_dbg(&old_request->isci_host->pdev->dev,
1083 		"%s: tmf=%p, old_request=%p\n",
1084 		__func__, tmf, old_request);
1085 
1086 	switch (cb_state) {
1087 
1088 	case isci_tmf_started:
1089 		/* The TMF has been started.  Nothing to do here, since the
1090 		 * request state was already set to "aborted" by the abort
1091 		 * task function.
1092 		 */
1093 		if ((old_request->status != aborted)
1094 			&& (old_request->status != completed))
1095 			dev_err(&old_request->isci_host->pdev->dev,
1096 				"%s: Bad request status (%d): tmf=%p, old_request=%p\n",
1097 				__func__, old_request->status, tmf, old_request);
1098 		break;
1099 
1100 	case isci_tmf_timed_out:
1101 
1102 		/* Set the task's state to "aborting", since the abort task
1103 		 * function thread set it to "aborted" (above) in anticipation
1104 		 * of the task management request working correctly.  Since the
1105 		 * timeout has now fired, the TMF request failed.  We set the
1106 		 * state such that the request completion will indicate the
1107 		 * device is no longer present.
1108 		 */
1109 		isci_request_change_state(old_request, aborting);
1110 		break;
1111 
1112 	default:
1113 		dev_err(&old_request->isci_host->pdev->dev,
1114 			"%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
1115 			__func__, cb_state, tmf, old_request);
1116 		break;
1117 	}
1118 }
1119 
1120 /**
1121  * isci_task_abort_task() - This function is one of the SAS Domain Template
1122  *    functions. This function is called by libsas to abort a specified task.
1123  * @task: This parameter specifies the SAS task to abort.
1124  *
1125  * status, zero indicates success.
1126  */
1127 int isci_task_abort_task(struct sas_task *task)
1128 {
1129 	struct isci_host *isci_host = dev_to_ihost(task->dev);
1130 	DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
1131 	struct isci_request       *old_request = NULL;
1132 	enum isci_request_status  old_state;
1133 	struct isci_remote_device *isci_device = NULL;
1134 	struct isci_tmf           tmf;
1135 	int                       ret = TMF_RESP_FUNC_FAILED;
1136 	unsigned long             flags;
1137 	bool                      any_dev_reset = false;
1138 	bool                      device_stopping;
1139 
1140 	/* Get the isci_request reference from the task.  Note that
1141 	 * this check does not depend on the pending request list
1142 	 * in the device, because tasks driving resets may land here
1143 	 * after completion in the core.
1144 	 */
1145 	old_request = isci_task_get_request_from_task(task, &isci_device);
1146 
1147 	dev_dbg(&isci_host->pdev->dev,
1148 		"%s: task = %p\n", __func__, task);
1149 
1150 	/* Check if the device has been / is currently being removed.
1151 	 * If so, no task management will be done, and the I/O will
1152 	 * be terminated.
1153 	 */
1154 	device_stopping = (isci_device->status == isci_stopping)
1155 			  || (isci_device->status == isci_stopped);
1156 
1157 	/* XXX need to fix device lookup lifetime (needs to be done
1158 	 * under scic_lock, among other things...), but for now assume
1159 	 * the device is available like the above code
1160 	 */
1161 	set_bit(IDEV_EH, &isci_device->flags);
1162 
1163 	/* This version of the driver will fail abort requests for
1164 	 * SATA/STP.  Failing the abort request this way will cause the
1165 	 * SCSI error handler thread to escalate to LUN reset
1166 	 */
1167 	if (sas_protocol_ata(task->task_proto) && !device_stopping) {
1168 		dev_warn(&isci_host->pdev->dev,
1169 			    " task %p is for a STP/SATA device;"
1170 			    " returning TMF_RESP_FUNC_FAILED\n"
1171 			    " to cause a LUN reset...\n", task);
1172 		return TMF_RESP_FUNC_FAILED;
1173 	}
1174 
1175 	dev_dbg(&isci_host->pdev->dev,
1176 		"%s: old_request == %p\n", __func__, old_request);
1177 
1178 	if (!device_stopping)
1179 		any_dev_reset = isci_device_is_reset_pending(isci_host,isci_device);
1180 
1181 	spin_lock_irqsave(&task->task_state_lock, flags);
1182 
1183 	/* Don't do resets to stopping devices. */
1184 	if (device_stopping) {
1185 
1186 		task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
1187 		any_dev_reset = false;
1188 
1189 	} else	/* See if there is a pending device reset for this device. */
1190 		any_dev_reset = any_dev_reset
1191 			|| (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
1192 
1193 	/* If the extraction of the request reference from the task
1194 	 * failed, then the request has been completed (or if there is a
1195 	 * pending reset then this abort request function must be failed
1196 	 * in order to escalate to the target reset).
1197 	 */
1198 	if ((old_request == NULL) || any_dev_reset) {
1199 
1200 		/* If the device reset task flag is set, fail the task
1201 		 * management request.  Otherwise, the original request
1202 		 * has completed.
1203 		 */
1204 		if (any_dev_reset) {
1205 
1206 			/* Turn off the task's DONE to make sure this
1207 			 * task is escalated to a target reset.
1208 			 */
1209 			task->task_state_flags &= ~SAS_TASK_STATE_DONE;
1210 
1211 			/* Make the reset happen as soon as possible. */
1212 			task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
1213 
1214 			spin_unlock_irqrestore(&task->task_state_lock, flags);
1215 
1216 			/* Fail the task management request in order to
1217 			 * escalate to the target reset.
1218 			 */
1219 			ret = TMF_RESP_FUNC_FAILED;
1220 
1221 			dev_dbg(&isci_host->pdev->dev,
1222 				"%s: Failing task abort in order to "
1223 				"escalate to target reset because\n"
1224 				"SAS_TASK_NEED_DEV_RESET is set for "
1225 				"task %p on dev %p\n",
1226 				__func__, task, isci_device);
1227 
1228 
1229 		} else {
1230 			/* The request has already completed and there
1231 			 * is nothing to do here other than to set the task
1232 			 * done bit, and indicate that the task abort function
1233 			 * was sucessful.
1234 			 */
1235 			isci_set_task_doneflags(task);
1236 
1237 			spin_unlock_irqrestore(&task->task_state_lock, flags);
1238 
1239 			ret = TMF_RESP_FUNC_COMPLETE;
1240 
1241 			dev_dbg(&isci_host->pdev->dev,
1242 				"%s: abort task not needed for %p\n",
1243 				__func__, task);
1244 		}
1245 
1246 		return ret;
1247 	}
1248 	else
1249 		spin_unlock_irqrestore(&task->task_state_lock, flags);
1250 
1251 	spin_lock_irqsave(&isci_host->scic_lock, flags);
1252 
1253 	/* Check the request status and change to "aborted" if currently
1254 	 * "starting"; if true then set the I/O kernel completion
1255 	 * struct that will be triggered when the request completes.
1256 	 */
1257 	old_state = isci_task_validate_request_to_abort(
1258 				old_request, isci_host, isci_device,
1259 				&aborted_io_completion);
1260 	if ((old_state != started) &&
1261 	    (old_state != completed) &&
1262 	    (old_state != aborting)) {
1263 
1264 		spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1265 
1266 		/* The request was already being handled by someone else (because
1267 		* they got to set the state away from started).
1268 		*/
1269 		dev_dbg(&isci_host->pdev->dev,
1270 			"%s:  device = %p; old_request %p already being aborted\n",
1271 			__func__,
1272 			isci_device, old_request);
1273 
1274 		return TMF_RESP_FUNC_COMPLETE;
1275 	}
1276 	if ((task->task_proto == SAS_PROTOCOL_SMP)
1277 	    || device_stopping
1278 	    || old_request->complete_in_target
1279 	    ) {
1280 
1281 		spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1282 
1283 		dev_dbg(&isci_host->pdev->dev,
1284 			"%s: SMP request (%d)"
1285 			" or device is stopping (%d)"
1286 			" or complete_in_target (%d), thus no TMF\n",
1287 			__func__, (task->task_proto == SAS_PROTOCOL_SMP),
1288 			device_stopping, old_request->complete_in_target);
1289 
1290 		/* Set the state on the task. */
1291 		isci_task_all_done(task);
1292 
1293 		ret = TMF_RESP_FUNC_COMPLETE;
1294 
1295 		/* Stopping and SMP devices are not sent a TMF, and are not
1296 		 * reset, but the outstanding I/O request is terminated below.
1297 		 */
1298 	} else {
1299 		/* Fill in the tmf stucture */
1300 		isci_task_build_abort_task_tmf(&tmf, isci_device,
1301 					       isci_tmf_ssp_task_abort,
1302 					       isci_abort_task_process_cb,
1303 					       old_request);
1304 
1305 		spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1306 
1307 		#define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
1308 		ret = isci_task_execute_tmf(isci_host, &tmf,
1309 					    ISCI_ABORT_TASK_TIMEOUT_MS);
1310 
1311 		if (ret != TMF_RESP_FUNC_COMPLETE)
1312 			dev_err(&isci_host->pdev->dev,
1313 				"%s: isci_task_send_tmf failed\n",
1314 				__func__);
1315 	}
1316 	if (ret == TMF_RESP_FUNC_COMPLETE) {
1317 		old_request->complete_in_target = true;
1318 
1319 		/* Clean up the request on our side, and wait for the aborted I/O to
1320 		* complete.
1321 		*/
1322 		isci_terminate_request_core(isci_host, isci_device, old_request);
1323 	}
1324 
1325 	/* Make sure we do not leave a reference to aborted_io_completion */
1326 	old_request->io_request_completion = NULL;
1327 	return ret;
1328 }
1329 
1330 /**
1331  * isci_task_abort_task_set() - This function is one of the SAS Domain Template
1332  *    functions. This is one of the Task Management functoins called by libsas,
1333  *    to abort all task for the given lun.
1334  * @d_device: This parameter specifies the domain device associated with this
1335  *    request.
1336  * @lun: This parameter specifies the lun associated with this request.
1337  *
1338  * status, zero indicates success.
1339  */
1340 int isci_task_abort_task_set(
1341 	struct domain_device *d_device,
1342 	u8 *lun)
1343 {
1344 	return TMF_RESP_FUNC_FAILED;
1345 }
1346 
1347 
1348 /**
1349  * isci_task_clear_aca() - This function is one of the SAS Domain Template
1350  *    functions. This is one of the Task Management functoins called by libsas.
1351  * @d_device: This parameter specifies the domain device associated with this
1352  *    request.
1353  * @lun: This parameter specifies the lun	 associated with this request.
1354  *
1355  * status, zero indicates success.
1356  */
1357 int isci_task_clear_aca(
1358 	struct domain_device *d_device,
1359 	u8 *lun)
1360 {
1361 	return TMF_RESP_FUNC_FAILED;
1362 }
1363 
1364 
1365 
1366 /**
1367  * isci_task_clear_task_set() - This function is one of the SAS Domain Template
1368  *    functions. This is one of the Task Management functoins called by libsas.
1369  * @d_device: This parameter specifies the domain device associated with this
1370  *    request.
1371  * @lun: This parameter specifies the lun	 associated with this request.
1372  *
1373  * status, zero indicates success.
1374  */
1375 int isci_task_clear_task_set(
1376 	struct domain_device *d_device,
1377 	u8 *lun)
1378 {
1379 	return TMF_RESP_FUNC_FAILED;
1380 }
1381 
1382 
1383 /**
1384  * isci_task_query_task() - This function is implemented to cause libsas to
1385  *    correctly escalate the failed abort to a LUN or target reset (this is
1386  *    because sas_scsi_find_task libsas function does not correctly interpret
1387  *    all return codes from the abort task call).  When TMF_RESP_FUNC_SUCC is
1388  *    returned, libsas turns this into a LUN reset; when FUNC_FAILED is
1389  *    returned, libsas will turn this into a target reset
1390  * @task: This parameter specifies the sas task being queried.
1391  * @lun: This parameter specifies the lun associated with this request.
1392  *
1393  * status, zero indicates success.
1394  */
1395 int isci_task_query_task(
1396 	struct sas_task *task)
1397 {
1398 	/* See if there is a pending device reset for this device. */
1399 	if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
1400 		return TMF_RESP_FUNC_FAILED;
1401 	else
1402 		return TMF_RESP_FUNC_SUCC;
1403 }
1404 
1405 /*
1406  * isci_task_request_complete() - This function is called by the sci core when
1407  *    an task request completes.
1408  * @ihost: This parameter specifies the ISCI host object
1409  * @ireq: This parameter is the completed isci_request object.
1410  * @completion_status: This parameter specifies the completion status from the
1411  *    sci core.
1412  *
1413  * none.
1414  */
1415 void
1416 isci_task_request_complete(struct isci_host *ihost,
1417 			   struct isci_request *ireq,
1418 			   enum sci_task_status completion_status)
1419 {
1420 	struct isci_remote_device *idev = ireq->isci_device;
1421 	enum isci_request_status old_state;
1422 	struct isci_tmf *tmf = isci_request_access_tmf(ireq);
1423 	struct completion *tmf_complete;
1424 	struct scic_sds_request *sci_req = &ireq->sci;
1425 
1426 	dev_dbg(&ihost->pdev->dev,
1427 		"%s: request = %p, status=%d\n",
1428 		__func__, ireq, completion_status);
1429 
1430 	old_state = isci_request_change_state(ireq, completed);
1431 
1432 	tmf->status = completion_status;
1433 	ireq->complete_in_target = true;
1434 
1435 	if (tmf->proto == SAS_PROTOCOL_SSP) {
1436 		memcpy(&tmf->resp.resp_iu,
1437 		       &sci_req->ssp.rsp,
1438 		       SSP_RESP_IU_MAX_SIZE);
1439 	} else if (tmf->proto == SAS_PROTOCOL_SATA) {
1440 		memcpy(&tmf->resp.d2h_fis,
1441 		       &sci_req->stp.rsp,
1442 		       sizeof(struct dev_to_host_fis));
1443 	}
1444 
1445 	/* Manage the timer if it is still running. */
1446 	if (tmf->timeout_timer) {
1447 		isci_del_timer(ihost, tmf->timeout_timer);
1448 		tmf->timeout_timer = NULL;
1449 	}
1450 
1451 	/* PRINT_TMF( ((struct isci_tmf *)request->task)); */
1452 	tmf_complete = tmf->complete;
1453 
1454 	scic_controller_complete_io(&ihost->sci, &idev->sci, &ireq->sci);
1455 	/* set the 'terminated' flag handle to make sure it cannot be terminated
1456 	 *  or completed again.
1457 	 */
1458 	ireq->terminated = true;;
1459 
1460 	isci_request_change_state(ireq, unallocated);
1461 	list_del_init(&ireq->dev_node);
1462 
1463 	/* The task management part completes last. */
1464 	complete(tmf_complete);
1465 }
1466 
1467 static int isci_reset_device(struct domain_device *dev, int hard_reset)
1468 {
1469 	struct isci_remote_device *idev = dev->lldd_dev;
1470 	struct sas_phy *phy = sas_find_local_phy(dev);
1471 	struct isci_host *ihost = dev_to_ihost(dev);
1472 	enum sci_status status;
1473 	unsigned long flags;
1474 	int rc;
1475 
1476 	dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
1477 
1478 	if (!idev) {
1479 		dev_warn(&ihost->pdev->dev,
1480 			 "%s: idev is GONE!\n",
1481 			 __func__);
1482 
1483 		return TMF_RESP_FUNC_COMPLETE; /* Nothing to reset. */
1484 	}
1485 
1486 	spin_lock_irqsave(&ihost->scic_lock, flags);
1487 	status = scic_remote_device_reset(&idev->sci);
1488 	if (status != SCI_SUCCESS) {
1489 		spin_unlock_irqrestore(&ihost->scic_lock, flags);
1490 
1491 		dev_warn(&ihost->pdev->dev,
1492 			 "%s: scic_remote_device_reset(%p) returned %d!\n",
1493 			 __func__, idev, status);
1494 
1495 		return TMF_RESP_FUNC_FAILED;
1496 	}
1497 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1498 
1499 	/* Make sure all pending requests are able to be fully terminated. */
1500 	isci_device_clear_reset_pending(ihost, idev);
1501 
1502 	rc = sas_phy_reset(phy, hard_reset);
1503 	msleep(2000); /* just like mvsas */
1504 
1505 	/* Terminate in-progress I/O now. */
1506 	isci_remote_device_nuke_requests(ihost, idev);
1507 
1508 	spin_lock_irqsave(&ihost->scic_lock, flags);
1509 	status = scic_remote_device_reset_complete(&idev->sci);
1510 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1511 
1512 	if (status != SCI_SUCCESS) {
1513 		dev_warn(&ihost->pdev->dev,
1514 			 "%s: scic_remote_device_reset_complete(%p) "
1515 			 "returned %d!\n", __func__, idev, status);
1516 	}
1517 
1518 	dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
1519 
1520 	return rc;
1521 }
1522 
1523 int isci_task_I_T_nexus_reset(struct domain_device *dev)
1524 {
1525 	struct isci_host *ihost = dev_to_ihost(dev);
1526 	int ret = TMF_RESP_FUNC_FAILED, hard_reset = 1;
1527 	struct isci_remote_device *idev;
1528 	unsigned long flags;
1529 
1530 	/* XXX mvsas is not protecting against ->lldd_dev_gone(), are we
1531 	 * being too paranoid, or is mvsas busted?!
1532 	 */
1533 	spin_lock_irqsave(&ihost->scic_lock, flags);
1534 	idev = dev->lldd_dev;
1535 	if (!idev || !test_bit(IDEV_EH, &idev->flags))
1536 		ret = TMF_RESP_FUNC_COMPLETE;
1537 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1538 
1539 	if (ret == TMF_RESP_FUNC_COMPLETE)
1540 		return ret;
1541 
1542 	if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
1543 		hard_reset = 0;
1544 
1545 	return isci_reset_device(dev, hard_reset);
1546 }
1547 
1548 int isci_bus_reset_handler(struct scsi_cmnd *cmd)
1549 {
1550 	struct domain_device *dev = sdev_to_domain_dev(cmd->device);
1551 	int hard_reset = 1;
1552 
1553 	if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
1554 		hard_reset = 0;
1555 
1556 	return isci_reset_device(dev, hard_reset);
1557 }
1558