xref: /linux/drivers/scsi/isci/task.c (revision cc9203bf381a465cd115762b9cf7c9a313c874bc)
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55 
56 #include <linux/completion.h>
57 #include <linux/irqflags.h>
58 #include "sas.h"
59 #include "scic_task_request.h"
60 #include "scic_io_request.h"
61 #include "remote_device.h"
62 #include "remote_node_context.h"
63 #include "isci.h"
64 #include "request.h"
65 #include "sata.h"
66 #include "task.h"
67 #include "scic_sds_request.h"
68 #include "timers.h"
69 
70 /**
71 * isci_task_refuse() - complete the request to the upper layer driver in
72 *     the case where an I/O needs to be completed back in the submit path.
73 * @ihost: host on which the the request was queued
74 * @task: request to complete
75 * @response: response code for the completed task.
76 * @status: status code for the completed task.
77 *
78 */
79 static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
80 			     enum service_response response,
81 			     enum exec_status status)
82 
83 {
84 	enum isci_completion_selection disposition;
85 
86 	disposition = isci_perform_normal_io_completion;
87 	disposition = isci_task_set_completion_status(task, response, status,
88 						      disposition);
89 
90 	/* Tasks aborted specifically by a call to the lldd_abort_task
91 	 * function should not be completed to the host in the regular path.
92 	 */
93 	switch (disposition) {
94 		case isci_perform_normal_io_completion:
95 			/* Normal notification (task_done) */
96 			dev_dbg(&ihost->pdev->dev,
97 				"%s: Normal - task = %p, response=%d, "
98 				"status=%d\n",
99 				__func__, task, response, status);
100 
101 			task->lldd_task = NULL;
102 
103 			isci_execpath_callback(ihost, task, task->task_done);
104 			break;
105 
106 		case isci_perform_aborted_io_completion:
107 			/* No notification because this request is already in the
108 			* abort path.
109 			*/
110 			dev_warn(&ihost->pdev->dev,
111 				 "%s: Aborted - task = %p, response=%d, "
112 				"status=%d\n",
113 				 __func__, task, response, status);
114 			break;
115 
116 		case isci_perform_error_io_completion:
117 			/* Use sas_task_abort */
118 			dev_warn(&ihost->pdev->dev,
119 				 "%s: Error - task = %p, response=%d, "
120 				"status=%d\n",
121 				 __func__, task, response, status);
122 
123 			isci_execpath_callback(ihost, task, sas_task_abort);
124 			break;
125 
126 		default:
127 			dev_warn(&ihost->pdev->dev,
128 				 "%s: isci task notification default case!",
129 				 __func__);
130 			sas_task_abort(task);
131 			break;
132 	}
133 }
134 
135 #define for_each_sas_task(num, task) \
136 	for (; num > 0; num--,\
137 	     task = list_entry(task->list.next, struct sas_task, list))
138 
139 /**
140  * isci_task_execute_task() - This function is one of the SAS Domain Template
141  *    functions. This function is called by libsas to send a task down to
142  *    hardware.
143  * @task: This parameter specifies the SAS task to send.
144  * @num: This parameter specifies the number of tasks to queue.
145  * @gfp_flags: This parameter specifies the context of this call.
146  *
147  * status, zero indicates success.
148  */
149 int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
150 {
151 	struct isci_host *ihost = dev_to_ihost(task->dev);
152 	struct isci_request *request = NULL;
153 	struct isci_remote_device *device;
154 	unsigned long flags;
155 	int ret;
156 	enum sci_status status;
157 	enum isci_status device_status;
158 
159 	dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
160 
161 	/* Check if we have room for more tasks */
162 	ret = isci_host_can_queue(ihost, num);
163 
164 	if (ret) {
165 		dev_warn(&ihost->pdev->dev, "%s: queue full\n", __func__);
166 		return ret;
167 	}
168 
169 	for_each_sas_task(num, task) {
170 		dev_dbg(&ihost->pdev->dev,
171 			"task = %p, num = %d; dev = %p; cmd = %p\n",
172 			    task, num, task->dev, task->uldd_task);
173 
174 		device = task->dev->lldd_dev;
175 
176 		if (device)
177 			device_status = device->status;
178 		else
179 			device_status = isci_freed;
180 
181 		/* From this point onward, any process that needs to guarantee
182 		 * that there is no kernel I/O being started will have to wait
183 		 * for the quiesce spinlock.
184 		 */
185 
186 		if (device_status != isci_ready_for_io) {
187 
188 			/* Forces a retry from scsi mid layer. */
189 			dev_dbg(&ihost->pdev->dev,
190 				"%s: task %p: isci_host->status = %d, "
191 				"device = %p; device_status = 0x%x\n\n",
192 				__func__,
193 				task,
194 				isci_host_get_state(ihost),
195 				device,
196 				device_status);
197 
198 			if (device_status == isci_ready) {
199 				/* Indicate QUEUE_FULL so that the scsi midlayer
200 				* retries.
201 				*/
202 				isci_task_refuse(ihost, task,
203 						 SAS_TASK_COMPLETE,
204 						 SAS_QUEUE_FULL);
205 			} else {
206 				/* Else, the device is going down. */
207 				isci_task_refuse(ihost, task,
208 						 SAS_TASK_UNDELIVERED,
209 						 SAS_DEVICE_UNKNOWN);
210 			}
211 			isci_host_can_dequeue(ihost, 1);
212 		} else {
213 			/* There is a device and it's ready for I/O. */
214 			spin_lock_irqsave(&task->task_state_lock, flags);
215 
216 			if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
217 
218 				spin_unlock_irqrestore(&task->task_state_lock,
219 						       flags);
220 
221 				isci_task_refuse(ihost, task,
222 						 SAS_TASK_UNDELIVERED,
223 						 SAM_STAT_TASK_ABORTED);
224 
225 				/* The I/O was aborted. */
226 
227 			} else {
228 				task->task_state_flags |= SAS_TASK_AT_INITIATOR;
229 				spin_unlock_irqrestore(&task->task_state_lock, flags);
230 
231 				/* build and send the request. */
232 				status = isci_request_execute(ihost, task, &request,
233 							      gfp_flags);
234 
235 				if (status != SCI_SUCCESS) {
236 
237 					spin_lock_irqsave(&task->task_state_lock, flags);
238 					/* Did not really start this command. */
239 					task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
240 					spin_unlock_irqrestore(&task->task_state_lock, flags);
241 
242 					/* Indicate QUEUE_FULL so that the scsi
243 					* midlayer retries. if the request
244 					* failed for remote device reasons,
245 					* it gets returned as
246 					* SAS_TASK_UNDELIVERED next time
247 					* through.
248 					*/
249 					isci_task_refuse(ihost, task,
250 							 SAS_TASK_COMPLETE,
251 							 SAS_QUEUE_FULL);
252 					isci_host_can_dequeue(ihost, 1);
253 				}
254 			}
255 		}
256 	}
257 	return 0;
258 }
259 
260 
261 
262 /**
263  * isci_task_request_build() - This function builds the task request object.
264  * @isci_host: This parameter specifies the ISCI host object
265  * @request: This parameter points to the isci_request object allocated in the
266  *    request construct function.
267  * @tmf: This parameter is the task management struct to be built
268  *
269  * SCI_SUCCESS on successfull completion, or specific failure code.
270  */
271 static enum sci_status isci_task_request_build(
272 	struct isci_host *isci_host,
273 	struct isci_request **isci_request,
274 	struct isci_tmf *isci_tmf)
275 {
276 	struct scic_sds_remote_device *sci_device;
277 	enum sci_status status = SCI_FAILURE;
278 	struct isci_request *request = NULL;
279 	struct isci_remote_device *isci_device;
280 	struct domain_device *dev;
281 
282 	dev_dbg(&isci_host->pdev->dev,
283 		"%s: isci_tmf = %p\n", __func__, isci_tmf);
284 
285 	isci_device = isci_tmf->device;
286 	sci_device = &isci_device->sci;
287 	dev = isci_device->domain_dev;
288 
289 	/* do common allocation and init of request object. */
290 	status = isci_request_alloc_tmf(
291 		isci_host,
292 		isci_tmf,
293 		&request,
294 		isci_device,
295 		GFP_ATOMIC
296 		);
297 
298 	if (status != SCI_SUCCESS)
299 		goto out;
300 
301 	/* let the core do it's construct. */
302 	status = scic_task_request_construct(&isci_host->sci, sci_device,
303 					     SCI_CONTROLLER_INVALID_IO_TAG,
304 					     &request->sci);
305 
306 	if (status != SCI_SUCCESS) {
307 		dev_warn(&isci_host->pdev->dev,
308 			 "%s: scic_task_request_construct failed - "
309 			 "status = 0x%x\n",
310 			 __func__,
311 			 status);
312 		goto errout;
313 	}
314 
315 	/* XXX convert to get this from task->tproto like other drivers */
316 	if (dev->dev_type == SAS_END_DEV) {
317 		isci_tmf->proto = SAS_PROTOCOL_SSP;
318 		status = scic_task_request_construct_ssp(&request->sci);
319 		if (status != SCI_SUCCESS)
320 			goto errout;
321 	}
322 
323 	if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
324 		isci_tmf->proto = SAS_PROTOCOL_SATA;
325 		status = isci_sata_management_task_request_build(request);
326 
327 		if (status != SCI_SUCCESS)
328 			goto errout;
329 	}
330 
331 	goto out;
332 
333  errout:
334 
335 	/* release the dma memory if we fail. */
336 	isci_request_free(isci_host, request);
337 	request = NULL;
338 
339  out:
340 	*isci_request = request;
341 	return status;
342 }
343 
344 /**
345  * isci_tmf_timeout_cb() - This function is called as a kernel callback when
346  *    the timeout period for the TMF has expired.
347  *
348  *
349  */
350 static void isci_tmf_timeout_cb(void *tmf_request_arg)
351 {
352 	struct isci_request *request = (struct isci_request *)tmf_request_arg;
353 	struct isci_tmf *tmf = isci_request_access_tmf(request);
354 	enum sci_status status;
355 
356 	/* This task management request has timed-out.  Terminate the request
357 	 * so that the request eventually completes to the requestor in the
358 	 * request completion callback path.
359 	 */
360 	/* Note - the timer callback function itself has provided spinlock
361 	 * exclusion from the start and completion paths.  No need to take
362 	 * the request->isci_host->scic_lock here.
363 	 */
364 
365 	if (tmf->timeout_timer != NULL) {
366 		/* Call the users callback, if any. */
367 		if (tmf->cb_state_func != NULL)
368 			tmf->cb_state_func(isci_tmf_timed_out, tmf,
369 					   tmf->cb_data);
370 
371 		/* Terminate the TMF transmit request. */
372 		status = scic_controller_terminate_request(
373 			&request->isci_host->sci,
374 			&request->isci_device->sci,
375 			&request->sci);
376 
377 		dev_dbg(&request->isci_host->pdev->dev,
378 			"%s: tmf_request = %p; tmf = %p; status = %d\n",
379 			__func__, request, tmf, status);
380 	} else
381 		dev_dbg(&request->isci_host->pdev->dev,
382 			"%s: timer already canceled! "
383 			"tmf_request = %p; tmf = %p\n",
384 			__func__, request, tmf);
385 
386 	/* No need to unlock since the caller to this callback is doing it for
387 	 * us.
388 	 * request->isci_host->scic_lock
389 	 */
390 }
391 
392 /**
393  * isci_task_execute_tmf() - This function builds and sends a task request,
394  *    then waits for the completion.
395  * @isci_host: This parameter specifies the ISCI host object
396  * @tmf: This parameter is the pointer to the task management structure for
397  *    this request.
398  * @timeout_ms: This parameter specifies the timeout period for the task
399  *    management request.
400  *
401  * TMF_RESP_FUNC_COMPLETE on successful completion of the TMF (this includes
402  * error conditions reported in the IU status), or TMF_RESP_FUNC_FAILED.
403  */
404 int isci_task_execute_tmf(
405 	struct isci_host *isci_host,
406 	struct isci_tmf *tmf,
407 	unsigned long timeout_ms)
408 {
409 	DECLARE_COMPLETION_ONSTACK(completion);
410 	enum sci_task_status status = SCI_TASK_FAILURE;
411 	struct scic_sds_remote_device *sci_device;
412 	struct isci_remote_device *isci_device = tmf->device;
413 	struct isci_request *request;
414 	int ret = TMF_RESP_FUNC_FAILED;
415 	unsigned long flags;
416 
417 	/* sanity check, return TMF_RESP_FUNC_FAILED
418 	 * if the device is not there and ready.
419 	 */
420 	if (!isci_device || isci_device->status != isci_ready_for_io) {
421 		dev_dbg(&isci_host->pdev->dev,
422 			"%s: isci_device = %p not ready (%d)\n",
423 			__func__,
424 			isci_device, isci_device->status);
425 		return TMF_RESP_FUNC_FAILED;
426 	} else
427 		dev_dbg(&isci_host->pdev->dev,
428 			"%s: isci_device = %p\n",
429 			__func__, isci_device);
430 
431 	sci_device = &isci_device->sci;
432 
433 	/* Assign the pointer to the TMF's completion kernel wait structure. */
434 	tmf->complete = &completion;
435 
436 	isci_task_request_build(
437 		isci_host,
438 		&request,
439 		tmf
440 		);
441 
442 	if (!request) {
443 		dev_warn(&isci_host->pdev->dev,
444 			"%s: isci_task_request_build failed\n",
445 			__func__);
446 		return TMF_RESP_FUNC_FAILED;
447 	}
448 
449 	/* Allocate the TMF timeout timer. */
450 	spin_lock_irqsave(&isci_host->scic_lock, flags);
451 	tmf->timeout_timer = isci_timer_create(isci_host, request, isci_tmf_timeout_cb);
452 
453 	/* Start the timer. */
454 	if (tmf->timeout_timer)
455 		isci_timer_start(tmf->timeout_timer, timeout_ms);
456 	else
457 		dev_warn(&isci_host->pdev->dev,
458 			 "%s: isci_timer_create failed!!!!\n",
459 			 __func__);
460 
461 	/* start the TMF io. */
462 	status = scic_controller_start_task(
463 		&isci_host->sci,
464 		sci_device,
465 		&request->sci,
466 		SCI_CONTROLLER_INVALID_IO_TAG);
467 
468 	if (status != SCI_TASK_SUCCESS) {
469 		dev_warn(&isci_host->pdev->dev,
470 			 "%s: start_io failed - status = 0x%x, request = %p\n",
471 			 __func__,
472 			 status,
473 			 request);
474 		goto cleanup_request;
475 	}
476 
477 	/* Call the users callback, if any. */
478 	if (tmf->cb_state_func != NULL)
479 		tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
480 
481 	/* Change the state of the TMF-bearing request to "started". */
482 	isci_request_change_state(request, started);
483 
484 	/* add the request to the remote device request list. */
485 	list_add(&request->dev_node, &isci_device->reqs_in_process);
486 
487 	spin_unlock_irqrestore(&isci_host->scic_lock, flags);
488 
489 	/* Wait for the TMF to complete, or a timeout. */
490 	wait_for_completion(&completion);
491 
492 	isci_print_tmf(tmf);
493 
494 	if (tmf->status == SCI_SUCCESS)
495 		ret =  TMF_RESP_FUNC_COMPLETE;
496 	else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
497 		dev_dbg(&isci_host->pdev->dev,
498 			"%s: tmf.status == "
499 			"SCI_FAILURE_IO_RESPONSE_VALID\n",
500 			__func__);
501 		ret =  TMF_RESP_FUNC_COMPLETE;
502 	}
503 	/* Else - leave the default "failed" status alone. */
504 
505 	dev_dbg(&isci_host->pdev->dev,
506 		"%s: completed request = %p\n",
507 		__func__,
508 		request);
509 
510 	if (request->io_request_completion != NULL) {
511 
512 		/* The fact that this is non-NULL for a TMF request
513 		 * means there is a thread waiting for this TMF to
514 		 * finish.
515 		 */
516 		complete(request->io_request_completion);
517 	}
518 
519 	spin_lock_irqsave(&isci_host->scic_lock, flags);
520 
521  cleanup_request:
522 
523 	/* Clean up the timer if needed. */
524 	if (tmf->timeout_timer) {
525 		isci_del_timer(isci_host, tmf->timeout_timer);
526 		tmf->timeout_timer = NULL;
527 	}
528 
529 	spin_unlock_irqrestore(&isci_host->scic_lock, flags);
530 
531 	isci_request_free(isci_host, request);
532 
533 	return ret;
534 }
535 
536 void isci_task_build_tmf(
537 	struct isci_tmf *tmf,
538 	struct isci_remote_device *isci_device,
539 	enum isci_tmf_function_codes code,
540 	void (*tmf_sent_cb)(enum isci_tmf_cb_state,
541 			    struct isci_tmf *,
542 			    void *),
543 	void *cb_data)
544 {
545 	dev_dbg(&isci_device->isci_port->isci_host->pdev->dev,
546 		"%s: isci_device = %p\n", __func__, isci_device);
547 
548 	memset(tmf, 0, sizeof(*tmf));
549 
550 	tmf->device        = isci_device;
551 	tmf->tmf_code      = code;
552 	tmf->timeout_timer = NULL;
553 	tmf->cb_state_func = tmf_sent_cb;
554 	tmf->cb_data       = cb_data;
555 }
556 
557 static void isci_task_build_abort_task_tmf(
558 	struct isci_tmf *tmf,
559 	struct isci_remote_device *isci_device,
560 	enum isci_tmf_function_codes code,
561 	void (*tmf_sent_cb)(enum isci_tmf_cb_state,
562 			    struct isci_tmf *,
563 			    void *),
564 	struct isci_request *old_request)
565 {
566 	isci_task_build_tmf(tmf, isci_device, code, tmf_sent_cb,
567 			    (void *)old_request);
568 	tmf->io_tag = old_request->io_tag;
569 }
570 
571 static struct isci_request *isci_task_get_request_from_task(
572 	struct sas_task *task,
573 	struct isci_remote_device **isci_device)
574 {
575 
576 	struct isci_request *request = NULL;
577 	unsigned long flags;
578 
579 	spin_lock_irqsave(&task->task_state_lock, flags);
580 
581 	request = task->lldd_task;
582 
583 	/* If task is already done, the request isn't valid */
584 	if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
585 	    (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
586 	    (request != NULL)) {
587 
588 		if (isci_device != NULL)
589 			*isci_device = request->isci_device;
590 	}
591 
592 	spin_unlock_irqrestore(&task->task_state_lock, flags);
593 
594 	return request;
595 }
596 
597 /**
598  * isci_task_validate_request_to_abort() - This function checks the given I/O
599  *    against the "started" state.  If the request is still "started", it's
600  *    state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
601  *    BEFORE CALLING THIS FUNCTION.
602  * @isci_request: This parameter specifies the request object to control.
603  * @isci_host: This parameter specifies the ISCI host object
604  * @isci_device: This is the device to which the request is pending.
605  * @aborted_io_completion: This is a completion structure that will be added to
606  *    the request in case it is changed to aborting; this completion is
607  *    triggered when the request is fully completed.
608  *
609  * Either "started" on successful change of the task status to "aborted", or
610  * "unallocated" if the task cannot be controlled.
611  */
612 static enum isci_request_status isci_task_validate_request_to_abort(
613 	struct isci_request *isci_request,
614 	struct isci_host *isci_host,
615 	struct isci_remote_device *isci_device,
616 	struct completion *aborted_io_completion)
617 {
618 	enum isci_request_status old_state = unallocated;
619 
620 	/* Only abort the task if it's in the
621 	 *  device's request_in_process list
622 	 */
623 	if (isci_request && !list_empty(&isci_request->dev_node)) {
624 		old_state = isci_request_change_started_to_aborted(
625 			isci_request, aborted_io_completion);
626 
627 	}
628 
629 	return old_state;
630 }
631 
632 static void isci_request_cleanup_completed_loiterer(
633 	struct isci_host *isci_host,
634 	struct isci_remote_device *isci_device,
635 	struct isci_request *isci_request)
636 {
637 	struct sas_task     *task;
638 	unsigned long       flags;
639 
640 	task = (isci_request->ttype == io_task)
641 		? isci_request_access_task(isci_request)
642 		: NULL;
643 
644 	dev_dbg(&isci_host->pdev->dev,
645 		"%s: isci_device=%p, request=%p, task=%p\n",
646 		__func__, isci_device, isci_request, task);
647 
648 	spin_lock_irqsave(&isci_host->scic_lock, flags);
649 	list_del_init(&isci_request->dev_node);
650 	spin_unlock_irqrestore(&isci_host->scic_lock, flags);
651 
652 	if (task != NULL) {
653 
654 		spin_lock_irqsave(&task->task_state_lock, flags);
655 		task->lldd_task = NULL;
656 
657 		isci_set_task_doneflags(task);
658 
659 		/* If this task is not in the abort path, call task_done. */
660 		if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
661 
662 			spin_unlock_irqrestore(&task->task_state_lock, flags);
663 			task->task_done(task);
664 		} else
665 			spin_unlock_irqrestore(&task->task_state_lock, flags);
666 	}
667 	isci_request_free(isci_host, isci_request);
668 }
669 
670 /**
671 * @isci_termination_timed_out(): this function will deal with a request for
672 * which the wait for termination has timed-out.
673 *
674 * @isci_host    This SCU.
675 * @isci_request The I/O request being terminated.
676 */
677 static void
678 isci_termination_timed_out(
679 	struct isci_host    * host,
680 	struct isci_request * request
681 	)
682 {
683 	unsigned long state_flags;
684 
685 	dev_warn(&host->pdev->dev,
686 		"%s: host = %p; request = %p\n",
687 		__func__, host, request);
688 
689 	/* At this point, the request to terminate
690 	* has timed out. The best we can do is to
691 	* have the request die a silent death
692 	* if it ever completes.
693 	*/
694 	spin_lock_irqsave(&request->state_lock, state_flags);
695 
696 	if (request->status == started) {
697 
698 		/* Set the request state to "dead",
699 		* and clear the task pointer so that an actual
700 		* completion event callback doesn't do
701 		* anything.
702 		*/
703 		request->status = dead;
704 
705 		/* Clear the timeout completion event pointer.*/
706 		request->io_request_completion = NULL;
707 
708 		if (request->ttype == io_task) {
709 
710 			/* Break links with the sas_task. */
711 			if (request->ttype_ptr.io_task_ptr != NULL) {
712 
713 				request->ttype_ptr.io_task_ptr->lldd_task = NULL;
714 				request->ttype_ptr.io_task_ptr            = NULL;
715 			}
716 		}
717 	}
718 	spin_unlock_irqrestore(&request->state_lock, state_flags);
719 }
720 
721 
722 /**
723  * isci_terminate_request_core() - This function will terminate the given
724  *    request, and wait for it to complete.  This function must only be called
725  *    from a thread that can wait.  Note that the request is terminated and
726  *    completed (back to the host, if started there).
727  * @isci_host: This SCU.
728  * @isci_device: The target.
729  * @isci_request: The I/O request to be terminated.
730  *
731  *
732  */
733 static void isci_terminate_request_core(
734 	struct isci_host *isci_host,
735 	struct isci_remote_device *isci_device,
736 	struct isci_request *isci_request)
737 {
738 	enum sci_status status      = SCI_SUCCESS;
739 	bool was_terminated         = false;
740 	bool needs_cleanup_handling = false;
741 	enum isci_request_status request_status;
742 	unsigned long flags;
743 	unsigned long timeout_remaining;
744 
745 
746 	dev_dbg(&isci_host->pdev->dev,
747 		"%s: device = %p; request = %p\n",
748 		__func__, isci_device, isci_request);
749 
750 	spin_lock_irqsave(&isci_host->scic_lock, flags);
751 
752 	/* Note that we are not going to control
753 	* the target to abort the request.
754 	*/
755 	isci_request->complete_in_target = true;
756 
757 	/* Make sure the request wasn't just sitting around signalling
758 	 * device condition (if the request handle is NULL, then the
759 	 * request completed but needed additional handling here).
760 	 */
761 	if (!isci_request->terminated) {
762 		was_terminated = true;
763 		needs_cleanup_handling = true;
764 		status = scic_controller_terminate_request(
765 			&isci_host->sci,
766 			&isci_device->sci,
767 			&isci_request->sci);
768 	}
769 	spin_unlock_irqrestore(&isci_host->scic_lock, flags);
770 
771 	/*
772 	 * The only time the request to terminate will
773 	 * fail is when the io request is completed and
774 	 * being aborted.
775 	 */
776 	if (status != SCI_SUCCESS) {
777 		dev_err(&isci_host->pdev->dev,
778 			"%s: scic_controller_terminate_request"
779 			" returned = 0x%x\n",
780 			__func__,
781 			status);
782 		/* Clear the completion pointer from the request. */
783 		isci_request->io_request_completion = NULL;
784 
785 	} else {
786 		if (was_terminated) {
787 			dev_dbg(&isci_host->pdev->dev,
788 				"%s: before completion wait (%p)\n",
789 				__func__,
790 				isci_request->io_request_completion);
791 
792 			/* Wait here for the request to complete. */
793 			#define TERMINATION_TIMEOUT_MSEC 50
794 			timeout_remaining
795 				= wait_for_completion_timeout(
796 				   isci_request->io_request_completion,
797 				   msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC));
798 
799 			if (!timeout_remaining) {
800 
801 				isci_termination_timed_out(isci_host,
802 							   isci_request);
803 
804 				dev_err(&isci_host->pdev->dev,
805 					"%s: *** Timeout waiting for "
806 					"termination(%p/%p)\n",
807 					__func__,
808 					isci_request->io_request_completion,
809 					isci_request);
810 
811 			} else
812 				dev_dbg(&isci_host->pdev->dev,
813 					"%s: after completion wait (%p)\n",
814 					__func__,
815 					isci_request->io_request_completion);
816 		}
817 		/* Clear the completion pointer from the request. */
818 		isci_request->io_request_completion = NULL;
819 
820 		/* Peek at the status of the request.  This will tell
821 		* us if there was special handling on the request such that it
822 		* needs to be detached and freed here.
823 		*/
824 		spin_lock_irqsave(&isci_request->state_lock, flags);
825 		request_status = isci_request_get_state(isci_request);
826 
827 		if ((isci_request->ttype == io_task) /* TMFs are in their own thread */
828 		    && ((request_status == aborted)
829 			|| (request_status == aborting)
830 			|| (request_status == terminating)
831 			|| (request_status == completed)
832 			|| (request_status == dead)
833 			)
834 		    ) {
835 
836 			/* The completion routine won't free a request in
837 			* the aborted/aborting/etc. states, so we do
838 			* it here.
839 			*/
840 			needs_cleanup_handling = true;
841 		}
842 		spin_unlock_irqrestore(&isci_request->state_lock, flags);
843 
844 		if (needs_cleanup_handling)
845 			isci_request_cleanup_completed_loiterer(
846 				isci_host, isci_device, isci_request
847 				);
848 	}
849 }
850 
851 static void isci_terminate_request(
852 	struct isci_host *isci_host,
853 	struct isci_remote_device *isci_device,
854 	struct isci_request *isci_request,
855 	enum isci_request_status new_request_state)
856 {
857 	enum isci_request_status old_state;
858 	DECLARE_COMPLETION_ONSTACK(request_completion);
859 
860 	/* Change state to "new_request_state" if it is currently "started" */
861 	old_state = isci_request_change_started_to_newstate(
862 		isci_request,
863 		&request_completion,
864 		new_request_state
865 		);
866 
867 	if ((old_state == started) ||
868 	    (old_state == completed) ||
869 	    (old_state == aborting)) {
870 
871 		/* If the old_state is started:
872 		 * This request was not already being aborted. If it had been,
873 		 * then the aborting I/O (ie. the TMF request) would not be in
874 		 * the aborting state, and thus would be terminated here.  Note
875 		 * that since the TMF completion's call to the kernel function
876 		 * "complete()" does not happen until the pending I/O request
877 		 * terminate fully completes, we do not have to implement a
878 		 * special wait here for already aborting requests - the
879 		 * termination of the TMF request will force the request
880 		 * to finish it's already started terminate.
881 		 *
882 		 * If old_state == completed:
883 		 * This request completed from the SCU hardware perspective
884 		 * and now just needs cleaning up in terms of freeing the
885 		 * request and potentially calling up to libsas.
886 		 *
887 		 * If old_state == aborting:
888 		 * This request has already gone through a TMF timeout, but may
889 		 * not have been terminated; needs cleaning up at least.
890 		 */
891 		isci_terminate_request_core(isci_host, isci_device,
892 					    isci_request);
893 	}
894 }
895 
896 /**
897  * isci_terminate_pending_requests() - This function will change the all of the
898  *    requests on the given device's state to "aborting", will terminate the
899  *    requests, and wait for them to complete.  This function must only be
900  *    called from a thread that can wait.  Note that the requests are all
901  *    terminated and completed (back to the host, if started there).
902  * @isci_host: This parameter specifies SCU.
903  * @isci_device: This parameter specifies the target.
904  *
905  *
906  */
907 void isci_terminate_pending_requests(
908 	struct isci_host *isci_host,
909 	struct isci_remote_device *isci_device,
910 	enum isci_request_status new_request_state)
911 {
912 	struct isci_request *request;
913 	struct isci_request *next_request;
914 	unsigned long       flags;
915 	struct list_head    aborted_request_list;
916 
917 	INIT_LIST_HEAD(&aborted_request_list);
918 
919 	dev_dbg(&isci_host->pdev->dev,
920 		"%s: isci_device = %p (new request state = %d)\n",
921 		__func__, isci_device, new_request_state);
922 
923 	spin_lock_irqsave(&isci_host->scic_lock, flags);
924 
925 	/* Move all of the pending requests off of the device list. */
926 	list_splice_init(&isci_device->reqs_in_process,
927 			 &aborted_request_list);
928 
929 	spin_unlock_irqrestore(&isci_host->scic_lock, flags);
930 
931 	/* Iterate through the now-local list. */
932 	list_for_each_entry_safe(request, next_request,
933 				 &aborted_request_list, dev_node) {
934 
935 		dev_warn(&isci_host->pdev->dev,
936 			"%s: isci_device=%p request=%p; task=%p\n",
937 			__func__,
938 			isci_device, request,
939 			((request->ttype == io_task)
940 				? isci_request_access_task(request)
941 				: NULL));
942 
943 		/* Mark all still pending I/O with the selected next
944 		* state, terminate and free it.
945 		*/
946 		isci_terminate_request(isci_host, isci_device,
947 				       request, new_request_state
948 				       );
949 	}
950 }
951 
952 /**
953  * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
954  *    Template functions.
955  * @lun: This parameter specifies the lun to be reset.
956  *
957  * status, zero indicates success.
958  */
959 static int isci_task_send_lu_reset_sas(
960 	struct isci_host *isci_host,
961 	struct isci_remote_device *isci_device,
962 	u8 *lun)
963 {
964 	struct isci_tmf tmf;
965 	int ret = TMF_RESP_FUNC_FAILED;
966 
967 	dev_dbg(&isci_host->pdev->dev,
968 		"%s: isci_host = %p, isci_device = %p\n",
969 		__func__, isci_host, isci_device);
970 	/* Send the LUN reset to the target.  By the time the call returns,
971 	 * the TMF has fully exected in the target (in which case the return
972 	 * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
973 	 * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
974 	 */
975 	isci_task_build_tmf(&tmf, isci_device, isci_tmf_ssp_lun_reset, NULL,
976 			    NULL);
977 
978 	#define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
979 	ret = isci_task_execute_tmf(isci_host, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
980 
981 	if (ret == TMF_RESP_FUNC_COMPLETE)
982 		dev_dbg(&isci_host->pdev->dev,
983 			"%s: %p: TMF_LU_RESET passed\n",
984 			__func__, isci_device);
985 	else
986 		dev_dbg(&isci_host->pdev->dev,
987 			"%s: %p: TMF_LU_RESET failed (%x)\n",
988 			__func__, isci_device, ret);
989 
990 	return ret;
991 }
992 
993 /**
994  * isci_task_lu_reset() - This function is one of the SAS Domain Template
995  *    functions. This is one of the Task Management functoins called by libsas,
996  *    to reset the given lun. Note the assumption that while this call is
997  *    executing, no I/O will be sent by the host to the device.
998  * @lun: This parameter specifies the lun to be reset.
999  *
1000  * status, zero indicates success.
1001  */
1002 int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
1003 {
1004 	struct isci_host *isci_host = dev_to_ihost(domain_device);
1005 	struct isci_remote_device *isci_device = NULL;
1006 	int ret;
1007 	bool device_stopping = false;
1008 
1009 	isci_device = domain_device->lldd_dev;
1010 
1011 	dev_dbg(&isci_host->pdev->dev,
1012 		"%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
1013 		 __func__, domain_device, isci_host, isci_device);
1014 
1015 	if (isci_device != NULL) {
1016 		device_stopping = (isci_device->status == isci_stopping)
1017 				  || (isci_device->status == isci_stopped);
1018 		set_bit(IDEV_EH, &isci_device->flags);
1019 	}
1020 
1021 	/* If there is a device reset pending on any request in the
1022 	 * device's list, fail this LUN reset request in order to
1023 	 * escalate to the device reset.
1024 	 */
1025 	if (!isci_device || device_stopping ||
1026 	    isci_device_is_reset_pending(isci_host, isci_device)) {
1027 		dev_warn(&isci_host->pdev->dev,
1028 			 "%s: No dev (%p), or "
1029 			 "RESET PENDING: domain_device=%p\n",
1030 			 __func__, isci_device, domain_device);
1031 		return TMF_RESP_FUNC_FAILED;
1032 	}
1033 
1034 	/* Send the task management part of the reset. */
1035 	if (sas_protocol_ata(domain_device->tproto)) {
1036 		ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun);
1037 	} else
1038 		ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
1039 
1040 	/* If the LUN reset worked, all the I/O can now be terminated. */
1041 	if (ret == TMF_RESP_FUNC_COMPLETE)
1042 		/* Terminate all I/O now. */
1043 		isci_terminate_pending_requests(isci_host,
1044 						isci_device,
1045 						terminating);
1046 
1047 	return ret;
1048 }
1049 
1050 
1051 /*	 int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
1052 int isci_task_clear_nexus_port(struct asd_sas_port *port)
1053 {
1054 	return TMF_RESP_FUNC_FAILED;
1055 }
1056 
1057 
1058 
1059 int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
1060 {
1061 	return TMF_RESP_FUNC_FAILED;
1062 }
1063 
1064 /* Task Management Functions. Must be called from process context.	 */
1065 
1066 /**
1067  * isci_abort_task_process_cb() - This is a helper function for the abort task
1068  *    TMF command.  It manages the request state with respect to the successful
1069  *    transmission / completion of the abort task request.
1070  * @cb_state: This parameter specifies when this function was called - after
1071  *    the TMF request has been started and after it has timed-out.
1072  * @tmf: This parameter specifies the TMF in progress.
1073  *
1074  *
1075  */
1076 static void isci_abort_task_process_cb(
1077 	enum isci_tmf_cb_state cb_state,
1078 	struct isci_tmf *tmf,
1079 	void *cb_data)
1080 {
1081 	struct isci_request *old_request;
1082 
1083 	old_request = (struct isci_request *)cb_data;
1084 
1085 	dev_dbg(&old_request->isci_host->pdev->dev,
1086 		"%s: tmf=%p, old_request=%p\n",
1087 		__func__, tmf, old_request);
1088 
1089 	switch (cb_state) {
1090 
1091 	case isci_tmf_started:
1092 		/* The TMF has been started.  Nothing to do here, since the
1093 		 * request state was already set to "aborted" by the abort
1094 		 * task function.
1095 		 */
1096 		if ((old_request->status != aborted)
1097 			&& (old_request->status != completed))
1098 			dev_err(&old_request->isci_host->pdev->dev,
1099 				"%s: Bad request status (%d): tmf=%p, old_request=%p\n",
1100 				__func__, old_request->status, tmf, old_request);
1101 		break;
1102 
1103 	case isci_tmf_timed_out:
1104 
1105 		/* Set the task's state to "aborting", since the abort task
1106 		 * function thread set it to "aborted" (above) in anticipation
1107 		 * of the task management request working correctly.  Since the
1108 		 * timeout has now fired, the TMF request failed.  We set the
1109 		 * state such that the request completion will indicate the
1110 		 * device is no longer present.
1111 		 */
1112 		isci_request_change_state(old_request, aborting);
1113 		break;
1114 
1115 	default:
1116 		dev_err(&old_request->isci_host->pdev->dev,
1117 			"%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
1118 			__func__, cb_state, tmf, old_request);
1119 		break;
1120 	}
1121 }
1122 
1123 /**
1124  * isci_task_abort_task() - This function is one of the SAS Domain Template
1125  *    functions. This function is called by libsas to abort a specified task.
1126  * @task: This parameter specifies the SAS task to abort.
1127  *
1128  * status, zero indicates success.
1129  */
1130 int isci_task_abort_task(struct sas_task *task)
1131 {
1132 	struct isci_host *isci_host = dev_to_ihost(task->dev);
1133 	DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
1134 	struct isci_request       *old_request = NULL;
1135 	enum isci_request_status  old_state;
1136 	struct isci_remote_device *isci_device = NULL;
1137 	struct isci_tmf           tmf;
1138 	int                       ret = TMF_RESP_FUNC_FAILED;
1139 	unsigned long             flags;
1140 	bool                      any_dev_reset = false;
1141 	bool                      device_stopping;
1142 
1143 	/* Get the isci_request reference from the task.  Note that
1144 	 * this check does not depend on the pending request list
1145 	 * in the device, because tasks driving resets may land here
1146 	 * after completion in the core.
1147 	 */
1148 	old_request = isci_task_get_request_from_task(task, &isci_device);
1149 
1150 	dev_dbg(&isci_host->pdev->dev,
1151 		"%s: task = %p\n", __func__, task);
1152 
1153 	/* Check if the device has been / is currently being removed.
1154 	 * If so, no task management will be done, and the I/O will
1155 	 * be terminated.
1156 	 */
1157 	device_stopping = (isci_device->status == isci_stopping)
1158 			  || (isci_device->status == isci_stopped);
1159 
1160 	/* XXX need to fix device lookup lifetime (needs to be done
1161 	 * under scic_lock, among other things...), but for now assume
1162 	 * the device is available like the above code
1163 	 */
1164 	set_bit(IDEV_EH, &isci_device->flags);
1165 
1166 	/* This version of the driver will fail abort requests for
1167 	 * SATA/STP.  Failing the abort request this way will cause the
1168 	 * SCSI error handler thread to escalate to LUN reset
1169 	 */
1170 	if (sas_protocol_ata(task->task_proto) && !device_stopping) {
1171 		dev_warn(&isci_host->pdev->dev,
1172 			    " task %p is for a STP/SATA device;"
1173 			    " returning TMF_RESP_FUNC_FAILED\n"
1174 			    " to cause a LUN reset...\n", task);
1175 		return TMF_RESP_FUNC_FAILED;
1176 	}
1177 
1178 	dev_dbg(&isci_host->pdev->dev,
1179 		"%s: old_request == %p\n", __func__, old_request);
1180 
1181 	if (!device_stopping)
1182 		any_dev_reset = isci_device_is_reset_pending(isci_host,isci_device);
1183 
1184 	spin_lock_irqsave(&task->task_state_lock, flags);
1185 
1186 	/* Don't do resets to stopping devices. */
1187 	if (device_stopping) {
1188 
1189 		task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
1190 		any_dev_reset = false;
1191 
1192 	} else	/* See if there is a pending device reset for this device. */
1193 		any_dev_reset = any_dev_reset
1194 			|| (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
1195 
1196 	/* If the extraction of the request reference from the task
1197 	 * failed, then the request has been completed (or if there is a
1198 	 * pending reset then this abort request function must be failed
1199 	 * in order to escalate to the target reset).
1200 	 */
1201 	if ((old_request == NULL) || any_dev_reset) {
1202 
1203 		/* If the device reset task flag is set, fail the task
1204 		 * management request.  Otherwise, the original request
1205 		 * has completed.
1206 		 */
1207 		if (any_dev_reset) {
1208 
1209 			/* Turn off the task's DONE to make sure this
1210 			 * task is escalated to a target reset.
1211 			 */
1212 			task->task_state_flags &= ~SAS_TASK_STATE_DONE;
1213 
1214 			/* Make the reset happen as soon as possible. */
1215 			task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
1216 
1217 			spin_unlock_irqrestore(&task->task_state_lock, flags);
1218 
1219 			/* Fail the task management request in order to
1220 			 * escalate to the target reset.
1221 			 */
1222 			ret = TMF_RESP_FUNC_FAILED;
1223 
1224 			dev_dbg(&isci_host->pdev->dev,
1225 				"%s: Failing task abort in order to "
1226 				"escalate to target reset because\n"
1227 				"SAS_TASK_NEED_DEV_RESET is set for "
1228 				"task %p on dev %p\n",
1229 				__func__, task, isci_device);
1230 
1231 
1232 		} else {
1233 			/* The request has already completed and there
1234 			 * is nothing to do here other than to set the task
1235 			 * done bit, and indicate that the task abort function
1236 			 * was sucessful.
1237 			 */
1238 			isci_set_task_doneflags(task);
1239 
1240 			spin_unlock_irqrestore(&task->task_state_lock, flags);
1241 
1242 			ret = TMF_RESP_FUNC_COMPLETE;
1243 
1244 			dev_dbg(&isci_host->pdev->dev,
1245 				"%s: abort task not needed for %p\n",
1246 				__func__, task);
1247 		}
1248 
1249 		return ret;
1250 	}
1251 	else
1252 		spin_unlock_irqrestore(&task->task_state_lock, flags);
1253 
1254 	spin_lock_irqsave(&isci_host->scic_lock, flags);
1255 
1256 	/* Check the request status and change to "aborted" if currently
1257 	 * "starting"; if true then set the I/O kernel completion
1258 	 * struct that will be triggered when the request completes.
1259 	 */
1260 	old_state = isci_task_validate_request_to_abort(
1261 				old_request, isci_host, isci_device,
1262 				&aborted_io_completion);
1263 	if ((old_state != started) &&
1264 	    (old_state != completed) &&
1265 	    (old_state != aborting)) {
1266 
1267 		spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1268 
1269 		/* The request was already being handled by someone else (because
1270 		* they got to set the state away from started).
1271 		*/
1272 		dev_dbg(&isci_host->pdev->dev,
1273 			"%s:  device = %p; old_request %p already being aborted\n",
1274 			__func__,
1275 			isci_device, old_request);
1276 
1277 		return TMF_RESP_FUNC_COMPLETE;
1278 	}
1279 	if ((task->task_proto == SAS_PROTOCOL_SMP)
1280 	    || device_stopping
1281 	    || old_request->complete_in_target
1282 	    ) {
1283 
1284 		spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1285 
1286 		dev_dbg(&isci_host->pdev->dev,
1287 			"%s: SMP request (%d)"
1288 			" or device is stopping (%d)"
1289 			" or complete_in_target (%d), thus no TMF\n",
1290 			__func__, (task->task_proto == SAS_PROTOCOL_SMP),
1291 			device_stopping, old_request->complete_in_target);
1292 
1293 		/* Set the state on the task. */
1294 		isci_task_all_done(task);
1295 
1296 		ret = TMF_RESP_FUNC_COMPLETE;
1297 
1298 		/* Stopping and SMP devices are not sent a TMF, and are not
1299 		 * reset, but the outstanding I/O request is terminated below.
1300 		 */
1301 	} else {
1302 		/* Fill in the tmf stucture */
1303 		isci_task_build_abort_task_tmf(&tmf, isci_device,
1304 					       isci_tmf_ssp_task_abort,
1305 					       isci_abort_task_process_cb,
1306 					       old_request);
1307 
1308 		spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1309 
1310 		#define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
1311 		ret = isci_task_execute_tmf(isci_host, &tmf,
1312 					    ISCI_ABORT_TASK_TIMEOUT_MS);
1313 
1314 		if (ret != TMF_RESP_FUNC_COMPLETE)
1315 			dev_err(&isci_host->pdev->dev,
1316 				"%s: isci_task_send_tmf failed\n",
1317 				__func__);
1318 	}
1319 	if (ret == TMF_RESP_FUNC_COMPLETE) {
1320 		old_request->complete_in_target = true;
1321 
1322 		/* Clean up the request on our side, and wait for the aborted I/O to
1323 		* complete.
1324 		*/
1325 		isci_terminate_request_core(isci_host, isci_device, old_request);
1326 	}
1327 
1328 	/* Make sure we do not leave a reference to aborted_io_completion */
1329 	old_request->io_request_completion = NULL;
1330 	return ret;
1331 }
1332 
1333 /**
1334  * isci_task_abort_task_set() - This function is one of the SAS Domain Template
1335  *    functions. This is one of the Task Management functoins called by libsas,
1336  *    to abort all task for the given lun.
1337  * @d_device: This parameter specifies the domain device associated with this
1338  *    request.
1339  * @lun: This parameter specifies the lun associated with this request.
1340  *
1341  * status, zero indicates success.
1342  */
1343 int isci_task_abort_task_set(
1344 	struct domain_device *d_device,
1345 	u8 *lun)
1346 {
1347 	return TMF_RESP_FUNC_FAILED;
1348 }
1349 
1350 
1351 /**
1352  * isci_task_clear_aca() - This function is one of the SAS Domain Template
1353  *    functions. This is one of the Task Management functoins called by libsas.
1354  * @d_device: This parameter specifies the domain device associated with this
1355  *    request.
1356  * @lun: This parameter specifies the lun	 associated with this request.
1357  *
1358  * status, zero indicates success.
1359  */
1360 int isci_task_clear_aca(
1361 	struct domain_device *d_device,
1362 	u8 *lun)
1363 {
1364 	return TMF_RESP_FUNC_FAILED;
1365 }
1366 
1367 
1368 
1369 /**
1370  * isci_task_clear_task_set() - This function is one of the SAS Domain Template
1371  *    functions. This is one of the Task Management functoins called by libsas.
1372  * @d_device: This parameter specifies the domain device associated with this
1373  *    request.
1374  * @lun: This parameter specifies the lun	 associated with this request.
1375  *
1376  * status, zero indicates success.
1377  */
1378 int isci_task_clear_task_set(
1379 	struct domain_device *d_device,
1380 	u8 *lun)
1381 {
1382 	return TMF_RESP_FUNC_FAILED;
1383 }
1384 
1385 
1386 /**
1387  * isci_task_query_task() - This function is implemented to cause libsas to
1388  *    correctly escalate the failed abort to a LUN or target reset (this is
1389  *    because sas_scsi_find_task libsas function does not correctly interpret
1390  *    all return codes from the abort task call).  When TMF_RESP_FUNC_SUCC is
1391  *    returned, libsas turns this into a LUN reset; when FUNC_FAILED is
1392  *    returned, libsas will turn this into a target reset
1393  * @task: This parameter specifies the sas task being queried.
1394  * @lun: This parameter specifies the lun associated with this request.
1395  *
1396  * status, zero indicates success.
1397  */
1398 int isci_task_query_task(
1399 	struct sas_task *task)
1400 {
1401 	/* See if there is a pending device reset for this device. */
1402 	if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
1403 		return TMF_RESP_FUNC_FAILED;
1404 	else
1405 		return TMF_RESP_FUNC_SUCC;
1406 }
1407 
1408 /*
1409  * isci_task_request_complete() - This function is called by the sci core when
1410  *    an task request completes.
1411  * @ihost: This parameter specifies the ISCI host object
1412  * @ireq: This parameter is the completed isci_request object.
1413  * @completion_status: This parameter specifies the completion status from the
1414  *    sci core.
1415  *
1416  * none.
1417  */
1418 void
1419 isci_task_request_complete(struct isci_host *ihost,
1420 			   struct isci_request *ireq,
1421 			   enum sci_task_status completion_status)
1422 {
1423 	struct isci_remote_device *idev = ireq->isci_device;
1424 	enum isci_request_status old_state;
1425 	struct isci_tmf *tmf = isci_request_access_tmf(ireq);
1426 	struct completion *tmf_complete;
1427 	struct scic_sds_request *sci_req = &ireq->sci;
1428 
1429 	dev_dbg(&ihost->pdev->dev,
1430 		"%s: request = %p, status=%d\n",
1431 		__func__, ireq, completion_status);
1432 
1433 	old_state = isci_request_change_state(ireq, completed);
1434 
1435 	tmf->status = completion_status;
1436 	ireq->complete_in_target = true;
1437 
1438 	if (tmf->proto == SAS_PROTOCOL_SSP) {
1439 		memcpy(&tmf->resp.resp_iu,
1440 		       &sci_req->ssp.rsp,
1441 		       SSP_RESP_IU_MAX_SIZE);
1442 	} else if (tmf->proto == SAS_PROTOCOL_SATA) {
1443 		memcpy(&tmf->resp.d2h_fis,
1444 		       &sci_req->stp.rsp,
1445 		       sizeof(struct dev_to_host_fis));
1446 	}
1447 
1448 	/* Manage the timer if it is still running. */
1449 	if (tmf->timeout_timer) {
1450 		isci_del_timer(ihost, tmf->timeout_timer);
1451 		tmf->timeout_timer = NULL;
1452 	}
1453 
1454 	/* PRINT_TMF( ((struct isci_tmf *)request->task)); */
1455 	tmf_complete = tmf->complete;
1456 
1457 	scic_controller_complete_io(&ihost->sci, &idev->sci, &ireq->sci);
1458 	/* set the 'terminated' flag handle to make sure it cannot be terminated
1459 	 *  or completed again.
1460 	 */
1461 	ireq->terminated = true;;
1462 
1463 	isci_request_change_state(ireq, unallocated);
1464 	list_del_init(&ireq->dev_node);
1465 
1466 	/* The task management part completes last. */
1467 	complete(tmf_complete);
1468 }
1469 
1470 static int isci_reset_device(struct domain_device *dev, int hard_reset)
1471 {
1472 	struct isci_remote_device *idev = dev->lldd_dev;
1473 	struct sas_phy *phy = sas_find_local_phy(dev);
1474 	struct isci_host *ihost = dev_to_ihost(dev);
1475 	enum sci_status status;
1476 	unsigned long flags;
1477 	int rc;
1478 
1479 	dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
1480 
1481 	if (!idev) {
1482 		dev_warn(&ihost->pdev->dev,
1483 			 "%s: idev is GONE!\n",
1484 			 __func__);
1485 
1486 		return TMF_RESP_FUNC_COMPLETE; /* Nothing to reset. */
1487 	}
1488 
1489 	spin_lock_irqsave(&ihost->scic_lock, flags);
1490 	status = scic_remote_device_reset(&idev->sci);
1491 	if (status != SCI_SUCCESS) {
1492 		spin_unlock_irqrestore(&ihost->scic_lock, flags);
1493 
1494 		dev_warn(&ihost->pdev->dev,
1495 			 "%s: scic_remote_device_reset(%p) returned %d!\n",
1496 			 __func__, idev, status);
1497 
1498 		return TMF_RESP_FUNC_FAILED;
1499 	}
1500 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1501 
1502 	/* Make sure all pending requests are able to be fully terminated. */
1503 	isci_device_clear_reset_pending(ihost, idev);
1504 
1505 	rc = sas_phy_reset(phy, hard_reset);
1506 	msleep(2000); /* just like mvsas */
1507 
1508 	/* Terminate in-progress I/O now. */
1509 	isci_remote_device_nuke_requests(ihost, idev);
1510 
1511 	spin_lock_irqsave(&ihost->scic_lock, flags);
1512 	status = scic_remote_device_reset_complete(&idev->sci);
1513 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1514 
1515 	if (status != SCI_SUCCESS) {
1516 		dev_warn(&ihost->pdev->dev,
1517 			 "%s: scic_remote_device_reset_complete(%p) "
1518 			 "returned %d!\n", __func__, idev, status);
1519 	}
1520 
1521 	dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
1522 
1523 	return rc;
1524 }
1525 
1526 int isci_task_I_T_nexus_reset(struct domain_device *dev)
1527 {
1528 	struct isci_host *ihost = dev_to_ihost(dev);
1529 	int ret = TMF_RESP_FUNC_FAILED, hard_reset = 1;
1530 	struct isci_remote_device *idev;
1531 	unsigned long flags;
1532 
1533 	/* XXX mvsas is not protecting against ->lldd_dev_gone(), are we
1534 	 * being too paranoid, or is mvsas busted?!
1535 	 */
1536 	spin_lock_irqsave(&ihost->scic_lock, flags);
1537 	idev = dev->lldd_dev;
1538 	if (!idev || !test_bit(IDEV_EH, &idev->flags))
1539 		ret = TMF_RESP_FUNC_COMPLETE;
1540 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1541 
1542 	if (ret == TMF_RESP_FUNC_COMPLETE)
1543 		return ret;
1544 
1545 	if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
1546 		hard_reset = 0;
1547 
1548 	return isci_reset_device(dev, hard_reset);
1549 }
1550 
1551 int isci_bus_reset_handler(struct scsi_cmnd *cmd)
1552 {
1553 	struct domain_device *dev = sdev_to_domain_dev(cmd->device);
1554 	int hard_reset = 1;
1555 
1556 	if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
1557 		hard_reset = 0;
1558 
1559 	return isci_reset_device(dev, hard_reset);
1560 }
1561