xref: /linux/drivers/scsi/isci/task.c (revision fd18388bc5820b3e7807302ac18e8e7de83c9f4c)
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55 
56 #include <linux/completion.h>
57 #include <linux/irqflags.h>
58 #include "sas.h"
59 #include "remote_device.h"
60 #include "remote_node_context.h"
61 #include "isci.h"
62 #include "request.h"
63 #include "sata.h"
64 #include "task.h"
65 #include "timers.h"
66 
67 /**
68 * isci_task_refuse() - complete the request to the upper layer driver in
69 *     the case where an I/O needs to be completed back in the submit path.
70 * @ihost: host on which the the request was queued
71 * @task: request to complete
72 * @response: response code for the completed task.
73 * @status: status code for the completed task.
74 *
75 */
76 static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
77 			     enum service_response response,
78 			     enum exec_status status)
79 
80 {
81 	enum isci_completion_selection disposition;
82 
83 	disposition = isci_perform_normal_io_completion;
84 	disposition = isci_task_set_completion_status(task, response, status,
85 						      disposition);
86 
87 	/* Tasks aborted specifically by a call to the lldd_abort_task
88 	 * function should not be completed to the host in the regular path.
89 	 */
90 	switch (disposition) {
91 		case isci_perform_normal_io_completion:
92 			/* Normal notification (task_done) */
93 			dev_dbg(&ihost->pdev->dev,
94 				"%s: Normal - task = %p, response=%d, "
95 				"status=%d\n",
96 				__func__, task, response, status);
97 
98 			task->lldd_task = NULL;
99 
100 			isci_execpath_callback(ihost, task, task->task_done);
101 			break;
102 
103 		case isci_perform_aborted_io_completion:
104 			/* No notification because this request is already in the
105 			* abort path.
106 			*/
107 			dev_warn(&ihost->pdev->dev,
108 				 "%s: Aborted - task = %p, response=%d, "
109 				"status=%d\n",
110 				 __func__, task, response, status);
111 			break;
112 
113 		case isci_perform_error_io_completion:
114 			/* Use sas_task_abort */
115 			dev_warn(&ihost->pdev->dev,
116 				 "%s: Error - task = %p, response=%d, "
117 				"status=%d\n",
118 				 __func__, task, response, status);
119 
120 			isci_execpath_callback(ihost, task, sas_task_abort);
121 			break;
122 
123 		default:
124 			dev_warn(&ihost->pdev->dev,
125 				 "%s: isci task notification default case!",
126 				 __func__);
127 			sas_task_abort(task);
128 			break;
129 	}
130 }
131 
132 #define for_each_sas_task(num, task) \
133 	for (; num > 0; num--,\
134 	     task = list_entry(task->list.next, struct sas_task, list))
135 
136 /**
137  * isci_task_execute_task() - This function is one of the SAS Domain Template
138  *    functions. This function is called by libsas to send a task down to
139  *    hardware.
140  * @task: This parameter specifies the SAS task to send.
141  * @num: This parameter specifies the number of tasks to queue.
142  * @gfp_flags: This parameter specifies the context of this call.
143  *
144  * status, zero indicates success.
145  */
146 int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
147 {
148 	struct isci_host *ihost = dev_to_ihost(task->dev);
149 	struct isci_request *request = NULL;
150 	struct isci_remote_device *device;
151 	unsigned long flags;
152 	int ret;
153 	enum sci_status status;
154 	enum isci_status device_status;
155 
156 	dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
157 
158 	/* Check if we have room for more tasks */
159 	ret = isci_host_can_queue(ihost, num);
160 
161 	if (ret) {
162 		dev_warn(&ihost->pdev->dev, "%s: queue full\n", __func__);
163 		return ret;
164 	}
165 
166 	for_each_sas_task(num, task) {
167 		dev_dbg(&ihost->pdev->dev,
168 			"task = %p, num = %d; dev = %p; cmd = %p\n",
169 			    task, num, task->dev, task->uldd_task);
170 
171 		device = task->dev->lldd_dev;
172 
173 		if (device)
174 			device_status = device->status;
175 		else
176 			device_status = isci_freed;
177 
178 		/* From this point onward, any process that needs to guarantee
179 		 * that there is no kernel I/O being started will have to wait
180 		 * for the quiesce spinlock.
181 		 */
182 
183 		if (device_status != isci_ready_for_io) {
184 
185 			/* Forces a retry from scsi mid layer. */
186 			dev_dbg(&ihost->pdev->dev,
187 				"%s: task %p: isci_host->status = %d, "
188 				"device = %p; device_status = 0x%x\n\n",
189 				__func__,
190 				task,
191 				isci_host_get_state(ihost),
192 				device,
193 				device_status);
194 
195 			if (device_status == isci_ready) {
196 				/* Indicate QUEUE_FULL so that the scsi midlayer
197 				* retries.
198 				*/
199 				isci_task_refuse(ihost, task,
200 						 SAS_TASK_COMPLETE,
201 						 SAS_QUEUE_FULL);
202 			} else {
203 				/* Else, the device is going down. */
204 				isci_task_refuse(ihost, task,
205 						 SAS_TASK_UNDELIVERED,
206 						 SAS_DEVICE_UNKNOWN);
207 			}
208 			isci_host_can_dequeue(ihost, 1);
209 		} else {
210 			/* There is a device and it's ready for I/O. */
211 			spin_lock_irqsave(&task->task_state_lock, flags);
212 
213 			if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
214 
215 				spin_unlock_irqrestore(&task->task_state_lock,
216 						       flags);
217 
218 				isci_task_refuse(ihost, task,
219 						 SAS_TASK_UNDELIVERED,
220 						 SAM_STAT_TASK_ABORTED);
221 
222 				/* The I/O was aborted. */
223 
224 			} else {
225 				task->task_state_flags |= SAS_TASK_AT_INITIATOR;
226 				spin_unlock_irqrestore(&task->task_state_lock, flags);
227 
228 				/* build and send the request. */
229 				status = isci_request_execute(ihost, task, &request,
230 							      gfp_flags);
231 
232 				if (status != SCI_SUCCESS) {
233 
234 					spin_lock_irqsave(&task->task_state_lock, flags);
235 					/* Did not really start this command. */
236 					task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
237 					spin_unlock_irqrestore(&task->task_state_lock, flags);
238 
239 					/* Indicate QUEUE_FULL so that the scsi
240 					* midlayer retries. if the request
241 					* failed for remote device reasons,
242 					* it gets returned as
243 					* SAS_TASK_UNDELIVERED next time
244 					* through.
245 					*/
246 					isci_task_refuse(ihost, task,
247 							 SAS_TASK_COMPLETE,
248 							 SAS_QUEUE_FULL);
249 					isci_host_can_dequeue(ihost, 1);
250 				}
251 			}
252 		}
253 	}
254 	return 0;
255 }
256 
257 
258 
259 /**
260  * isci_task_request_build() - This function builds the task request object.
261  * @isci_host: This parameter specifies the ISCI host object
262  * @request: This parameter points to the isci_request object allocated in the
263  *    request construct function.
264  * @tmf: This parameter is the task management struct to be built
265  *
266  * SCI_SUCCESS on successfull completion, or specific failure code.
267  */
268 static enum sci_status isci_task_request_build(
269 	struct isci_host *isci_host,
270 	struct isci_request **isci_request,
271 	struct isci_tmf *isci_tmf)
272 {
273 	struct scic_sds_remote_device *sci_device;
274 	enum sci_status status = SCI_FAILURE;
275 	struct isci_request *request = NULL;
276 	struct isci_remote_device *isci_device;
277 	struct domain_device *dev;
278 
279 	dev_dbg(&isci_host->pdev->dev,
280 		"%s: isci_tmf = %p\n", __func__, isci_tmf);
281 
282 	isci_device = isci_tmf->device;
283 	sci_device = &isci_device->sci;
284 	dev = isci_device->domain_dev;
285 
286 	/* do common allocation and init of request object. */
287 	status = isci_request_alloc_tmf(
288 		isci_host,
289 		isci_tmf,
290 		&request,
291 		isci_device,
292 		GFP_ATOMIC
293 		);
294 
295 	if (status != SCI_SUCCESS)
296 		goto out;
297 
298 	/* let the core do it's construct. */
299 	status = scic_task_request_construct(&isci_host->sci, sci_device,
300 					     SCI_CONTROLLER_INVALID_IO_TAG,
301 					     &request->sci);
302 
303 	if (status != SCI_SUCCESS) {
304 		dev_warn(&isci_host->pdev->dev,
305 			 "%s: scic_task_request_construct failed - "
306 			 "status = 0x%x\n",
307 			 __func__,
308 			 status);
309 		goto errout;
310 	}
311 
312 	/* XXX convert to get this from task->tproto like other drivers */
313 	if (dev->dev_type == SAS_END_DEV) {
314 		isci_tmf->proto = SAS_PROTOCOL_SSP;
315 		status = scic_task_request_construct_ssp(&request->sci);
316 		if (status != SCI_SUCCESS)
317 			goto errout;
318 	}
319 
320 	if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
321 		isci_tmf->proto = SAS_PROTOCOL_SATA;
322 		status = isci_sata_management_task_request_build(request);
323 
324 		if (status != SCI_SUCCESS)
325 			goto errout;
326 	}
327 
328 	goto out;
329 
330  errout:
331 
332 	/* release the dma memory if we fail. */
333 	isci_request_free(isci_host, request);
334 	request = NULL;
335 
336  out:
337 	*isci_request = request;
338 	return status;
339 }
340 
341 /**
342  * isci_task_execute_tmf() - This function builds and sends a task request,
343  *    then waits for the completion.
344  * @isci_host: This parameter specifies the ISCI host object
345  * @tmf: This parameter is the pointer to the task management structure for
346  *    this request.
347  * @timeout_ms: This parameter specifies the timeout period for the task
348  *    management request.
349  *
350  * TMF_RESP_FUNC_COMPLETE on successful completion of the TMF (this includes
351  * error conditions reported in the IU status), or TMF_RESP_FUNC_FAILED.
352  */
353 int isci_task_execute_tmf(
354 	struct isci_host *isci_host,
355 	struct isci_tmf *tmf,
356 	unsigned long timeout_ms)
357 {
358 	DECLARE_COMPLETION_ONSTACK(completion);
359 	enum sci_task_status status = SCI_TASK_FAILURE;
360 	struct scic_sds_remote_device *sci_device;
361 	struct isci_remote_device *isci_device = tmf->device;
362 	struct isci_request *request;
363 	int ret = TMF_RESP_FUNC_FAILED;
364 	unsigned long flags;
365 	unsigned long timeleft;
366 
367 	/* sanity check, return TMF_RESP_FUNC_FAILED
368 	 * if the device is not there and ready.
369 	 */
370 	if (!isci_device || isci_device->status != isci_ready_for_io) {
371 		dev_dbg(&isci_host->pdev->dev,
372 			"%s: isci_device = %p not ready (%d)\n",
373 			__func__,
374 			isci_device, isci_device->status);
375 		return TMF_RESP_FUNC_FAILED;
376 	} else
377 		dev_dbg(&isci_host->pdev->dev,
378 			"%s: isci_device = %p\n",
379 			__func__, isci_device);
380 
381 	sci_device = &isci_device->sci;
382 
383 	/* Assign the pointer to the TMF's completion kernel wait structure. */
384 	tmf->complete = &completion;
385 
386 	isci_task_request_build(
387 		isci_host,
388 		&request,
389 		tmf
390 		);
391 
392 	if (!request) {
393 		dev_warn(&isci_host->pdev->dev,
394 			"%s: isci_task_request_build failed\n",
395 			__func__);
396 		return TMF_RESP_FUNC_FAILED;
397 	}
398 
399 	spin_lock_irqsave(&isci_host->scic_lock, flags);
400 
401 	/* start the TMF io. */
402 	status = scic_controller_start_task(
403 		&isci_host->sci,
404 		sci_device,
405 		&request->sci,
406 		SCI_CONTROLLER_INVALID_IO_TAG);
407 
408 	if (status != SCI_TASK_SUCCESS) {
409 		dev_warn(&isci_host->pdev->dev,
410 			 "%s: start_io failed - status = 0x%x, request = %p\n",
411 			 __func__,
412 			 status,
413 			 request);
414 		spin_unlock_irqrestore(&isci_host->scic_lock, flags);
415 		goto cleanup_request;
416 	}
417 
418 	if (tmf->cb_state_func != NULL)
419 		tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
420 
421 	isci_request_change_state(request, started);
422 
423 	/* add the request to the remote device request list. */
424 	list_add(&request->dev_node, &isci_device->reqs_in_process);
425 
426 	spin_unlock_irqrestore(&isci_host->scic_lock, flags);
427 
428 	/* Wait for the TMF to complete, or a timeout. */
429 	timeleft = wait_for_completion_timeout(&completion,
430 				       jiffies + msecs_to_jiffies(timeout_ms));
431 
432 	if (timeleft == 0) {
433 		spin_lock_irqsave(&isci_host->scic_lock, flags);
434 
435 		if (tmf->cb_state_func != NULL)
436 			tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data);
437 
438 		status = scic_controller_terminate_request(
439 			&request->isci_host->sci,
440 			&request->isci_device->sci,
441 			&request->sci);
442 
443 		spin_unlock_irqrestore(&isci_host->scic_lock, flags);
444 	}
445 
446 	isci_print_tmf(tmf);
447 
448 	if (tmf->status == SCI_SUCCESS)
449 		ret =  TMF_RESP_FUNC_COMPLETE;
450 	else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
451 		dev_dbg(&isci_host->pdev->dev,
452 			"%s: tmf.status == "
453 			"SCI_FAILURE_IO_RESPONSE_VALID\n",
454 			__func__);
455 		ret =  TMF_RESP_FUNC_COMPLETE;
456 	}
457 	/* Else - leave the default "failed" status alone. */
458 
459 	dev_dbg(&isci_host->pdev->dev,
460 		"%s: completed request = %p\n",
461 		__func__,
462 		request);
463 
464 	if (request->io_request_completion != NULL) {
465 		/* A thread is waiting for this TMF to finish. */
466 		complete(request->io_request_completion);
467 	}
468 
469  cleanup_request:
470 	isci_request_free(isci_host, request);
471 	return ret;
472 }
473 
474 void isci_task_build_tmf(
475 	struct isci_tmf *tmf,
476 	struct isci_remote_device *isci_device,
477 	enum isci_tmf_function_codes code,
478 	void (*tmf_sent_cb)(enum isci_tmf_cb_state,
479 			    struct isci_tmf *,
480 			    void *),
481 	void *cb_data)
482 {
483 	dev_dbg(&isci_device->isci_port->isci_host->pdev->dev,
484 		"%s: isci_device = %p\n", __func__, isci_device);
485 
486 	memset(tmf, 0, sizeof(*tmf));
487 
488 	tmf->device        = isci_device;
489 	tmf->tmf_code      = code;
490 
491 	tmf->cb_state_func = tmf_sent_cb;
492 	tmf->cb_data       = cb_data;
493 }
494 
495 static void isci_task_build_abort_task_tmf(
496 	struct isci_tmf *tmf,
497 	struct isci_remote_device *isci_device,
498 	enum isci_tmf_function_codes code,
499 	void (*tmf_sent_cb)(enum isci_tmf_cb_state,
500 			    struct isci_tmf *,
501 			    void *),
502 	struct isci_request *old_request)
503 {
504 	isci_task_build_tmf(tmf, isci_device, code, tmf_sent_cb,
505 			    (void *)old_request);
506 	tmf->io_tag = old_request->io_tag;
507 }
508 
509 static struct isci_request *isci_task_get_request_from_task(
510 	struct sas_task *task,
511 	struct isci_remote_device **isci_device)
512 {
513 
514 	struct isci_request *request = NULL;
515 	unsigned long flags;
516 
517 	spin_lock_irqsave(&task->task_state_lock, flags);
518 
519 	request = task->lldd_task;
520 
521 	/* If task is already done, the request isn't valid */
522 	if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
523 	    (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
524 	    (request != NULL)) {
525 
526 		if (isci_device != NULL)
527 			*isci_device = request->isci_device;
528 	}
529 
530 	spin_unlock_irqrestore(&task->task_state_lock, flags);
531 
532 	return request;
533 }
534 
535 /**
536  * isci_task_validate_request_to_abort() - This function checks the given I/O
537  *    against the "started" state.  If the request is still "started", it's
538  *    state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
539  *    BEFORE CALLING THIS FUNCTION.
540  * @isci_request: This parameter specifies the request object to control.
541  * @isci_host: This parameter specifies the ISCI host object
542  * @isci_device: This is the device to which the request is pending.
543  * @aborted_io_completion: This is a completion structure that will be added to
544  *    the request in case it is changed to aborting; this completion is
545  *    triggered when the request is fully completed.
546  *
547  * Either "started" on successful change of the task status to "aborted", or
548  * "unallocated" if the task cannot be controlled.
549  */
550 static enum isci_request_status isci_task_validate_request_to_abort(
551 	struct isci_request *isci_request,
552 	struct isci_host *isci_host,
553 	struct isci_remote_device *isci_device,
554 	struct completion *aborted_io_completion)
555 {
556 	enum isci_request_status old_state = unallocated;
557 
558 	/* Only abort the task if it's in the
559 	 *  device's request_in_process list
560 	 */
561 	if (isci_request && !list_empty(&isci_request->dev_node)) {
562 		old_state = isci_request_change_started_to_aborted(
563 			isci_request, aborted_io_completion);
564 
565 	}
566 
567 	return old_state;
568 }
569 
570 static void isci_request_cleanup_completed_loiterer(
571 	struct isci_host *isci_host,
572 	struct isci_remote_device *isci_device,
573 	struct isci_request *isci_request)
574 {
575 	struct sas_task     *task;
576 	unsigned long       flags;
577 
578 	task = (isci_request->ttype == io_task)
579 		? isci_request_access_task(isci_request)
580 		: NULL;
581 
582 	dev_dbg(&isci_host->pdev->dev,
583 		"%s: isci_device=%p, request=%p, task=%p\n",
584 		__func__, isci_device, isci_request, task);
585 
586 	spin_lock_irqsave(&isci_host->scic_lock, flags);
587 	list_del_init(&isci_request->dev_node);
588 	spin_unlock_irqrestore(&isci_host->scic_lock, flags);
589 
590 	if (task != NULL) {
591 
592 		spin_lock_irqsave(&task->task_state_lock, flags);
593 		task->lldd_task = NULL;
594 
595 		isci_set_task_doneflags(task);
596 
597 		/* If this task is not in the abort path, call task_done. */
598 		if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
599 
600 			spin_unlock_irqrestore(&task->task_state_lock, flags);
601 			task->task_done(task);
602 		} else
603 			spin_unlock_irqrestore(&task->task_state_lock, flags);
604 	}
605 	isci_request_free(isci_host, isci_request);
606 }
607 
608 /**
609 * @isci_termination_timed_out(): this function will deal with a request for
610 * which the wait for termination has timed-out.
611 *
612 * @isci_host    This SCU.
613 * @isci_request The I/O request being terminated.
614 */
615 static void
616 isci_termination_timed_out(
617 	struct isci_host    * host,
618 	struct isci_request * request
619 	)
620 {
621 	unsigned long state_flags;
622 
623 	dev_warn(&host->pdev->dev,
624 		"%s: host = %p; request = %p\n",
625 		__func__, host, request);
626 
627 	/* At this point, the request to terminate
628 	* has timed out. The best we can do is to
629 	* have the request die a silent death
630 	* if it ever completes.
631 	*/
632 	spin_lock_irqsave(&request->state_lock, state_flags);
633 
634 	if (request->status == started) {
635 
636 		/* Set the request state to "dead",
637 		* and clear the task pointer so that an actual
638 		* completion event callback doesn't do
639 		* anything.
640 		*/
641 		request->status = dead;
642 
643 		/* Clear the timeout completion event pointer.*/
644 		request->io_request_completion = NULL;
645 
646 		if (request->ttype == io_task) {
647 
648 			/* Break links with the sas_task. */
649 			if (request->ttype_ptr.io_task_ptr != NULL) {
650 
651 				request->ttype_ptr.io_task_ptr->lldd_task = NULL;
652 				request->ttype_ptr.io_task_ptr            = NULL;
653 			}
654 		}
655 	}
656 	spin_unlock_irqrestore(&request->state_lock, state_flags);
657 }
658 
659 
660 /**
661  * isci_terminate_request_core() - This function will terminate the given
662  *    request, and wait for it to complete.  This function must only be called
663  *    from a thread that can wait.  Note that the request is terminated and
664  *    completed (back to the host, if started there).
665  * @isci_host: This SCU.
666  * @isci_device: The target.
667  * @isci_request: The I/O request to be terminated.
668  *
669  *
670  */
671 static void isci_terminate_request_core(
672 	struct isci_host *isci_host,
673 	struct isci_remote_device *isci_device,
674 	struct isci_request *isci_request)
675 {
676 	enum sci_status status      = SCI_SUCCESS;
677 	bool was_terminated         = false;
678 	bool needs_cleanup_handling = false;
679 	enum isci_request_status request_status;
680 	unsigned long flags;
681 	unsigned long timeout_remaining;
682 
683 
684 	dev_dbg(&isci_host->pdev->dev,
685 		"%s: device = %p; request = %p\n",
686 		__func__, isci_device, isci_request);
687 
688 	spin_lock_irqsave(&isci_host->scic_lock, flags);
689 
690 	/* Note that we are not going to control
691 	* the target to abort the request.
692 	*/
693 	isci_request->complete_in_target = true;
694 
695 	/* Make sure the request wasn't just sitting around signalling
696 	 * device condition (if the request handle is NULL, then the
697 	 * request completed but needed additional handling here).
698 	 */
699 	if (!isci_request->terminated) {
700 		was_terminated = true;
701 		needs_cleanup_handling = true;
702 		status = scic_controller_terminate_request(
703 			&isci_host->sci,
704 			&isci_device->sci,
705 			&isci_request->sci);
706 	}
707 	spin_unlock_irqrestore(&isci_host->scic_lock, flags);
708 
709 	/*
710 	 * The only time the request to terminate will
711 	 * fail is when the io request is completed and
712 	 * being aborted.
713 	 */
714 	if (status != SCI_SUCCESS) {
715 		dev_err(&isci_host->pdev->dev,
716 			"%s: scic_controller_terminate_request"
717 			" returned = 0x%x\n",
718 			__func__,
719 			status);
720 		/* Clear the completion pointer from the request. */
721 		isci_request->io_request_completion = NULL;
722 
723 	} else {
724 		if (was_terminated) {
725 			dev_dbg(&isci_host->pdev->dev,
726 				"%s: before completion wait (%p)\n",
727 				__func__,
728 				isci_request->io_request_completion);
729 
730 			/* Wait here for the request to complete. */
731 			#define TERMINATION_TIMEOUT_MSEC 50
732 			timeout_remaining
733 				= wait_for_completion_timeout(
734 				   isci_request->io_request_completion,
735 				   msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC));
736 
737 			if (!timeout_remaining) {
738 
739 				isci_termination_timed_out(isci_host,
740 							   isci_request);
741 
742 				dev_err(&isci_host->pdev->dev,
743 					"%s: *** Timeout waiting for "
744 					"termination(%p/%p)\n",
745 					__func__,
746 					isci_request->io_request_completion,
747 					isci_request);
748 
749 			} else
750 				dev_dbg(&isci_host->pdev->dev,
751 					"%s: after completion wait (%p)\n",
752 					__func__,
753 					isci_request->io_request_completion);
754 		}
755 		/* Clear the completion pointer from the request. */
756 		isci_request->io_request_completion = NULL;
757 
758 		/* Peek at the status of the request.  This will tell
759 		* us if there was special handling on the request such that it
760 		* needs to be detached and freed here.
761 		*/
762 		spin_lock_irqsave(&isci_request->state_lock, flags);
763 		request_status = isci_request_get_state(isci_request);
764 
765 		if ((isci_request->ttype == io_task) /* TMFs are in their own thread */
766 		    && ((request_status == aborted)
767 			|| (request_status == aborting)
768 			|| (request_status == terminating)
769 			|| (request_status == completed)
770 			|| (request_status == dead)
771 			)
772 		    ) {
773 
774 			/* The completion routine won't free a request in
775 			* the aborted/aborting/etc. states, so we do
776 			* it here.
777 			*/
778 			needs_cleanup_handling = true;
779 		}
780 		spin_unlock_irqrestore(&isci_request->state_lock, flags);
781 
782 		if (needs_cleanup_handling)
783 			isci_request_cleanup_completed_loiterer(
784 				isci_host, isci_device, isci_request
785 				);
786 	}
787 }
788 
789 static void isci_terminate_request(
790 	struct isci_host *isci_host,
791 	struct isci_remote_device *isci_device,
792 	struct isci_request *isci_request,
793 	enum isci_request_status new_request_state)
794 {
795 	enum isci_request_status old_state;
796 	DECLARE_COMPLETION_ONSTACK(request_completion);
797 
798 	/* Change state to "new_request_state" if it is currently "started" */
799 	old_state = isci_request_change_started_to_newstate(
800 		isci_request,
801 		&request_completion,
802 		new_request_state
803 		);
804 
805 	if ((old_state == started) ||
806 	    (old_state == completed) ||
807 	    (old_state == aborting)) {
808 
809 		/* If the old_state is started:
810 		 * This request was not already being aborted. If it had been,
811 		 * then the aborting I/O (ie. the TMF request) would not be in
812 		 * the aborting state, and thus would be terminated here.  Note
813 		 * that since the TMF completion's call to the kernel function
814 		 * "complete()" does not happen until the pending I/O request
815 		 * terminate fully completes, we do not have to implement a
816 		 * special wait here for already aborting requests - the
817 		 * termination of the TMF request will force the request
818 		 * to finish it's already started terminate.
819 		 *
820 		 * If old_state == completed:
821 		 * This request completed from the SCU hardware perspective
822 		 * and now just needs cleaning up in terms of freeing the
823 		 * request and potentially calling up to libsas.
824 		 *
825 		 * If old_state == aborting:
826 		 * This request has already gone through a TMF timeout, but may
827 		 * not have been terminated; needs cleaning up at least.
828 		 */
829 		isci_terminate_request_core(isci_host, isci_device,
830 					    isci_request);
831 	}
832 }
833 
834 /**
835  * isci_terminate_pending_requests() - This function will change the all of the
836  *    requests on the given device's state to "aborting", will terminate the
837  *    requests, and wait for them to complete.  This function must only be
838  *    called from a thread that can wait.  Note that the requests are all
839  *    terminated and completed (back to the host, if started there).
840  * @isci_host: This parameter specifies SCU.
841  * @isci_device: This parameter specifies the target.
842  *
843  *
844  */
845 void isci_terminate_pending_requests(
846 	struct isci_host *isci_host,
847 	struct isci_remote_device *isci_device,
848 	enum isci_request_status new_request_state)
849 {
850 	struct isci_request *request;
851 	struct isci_request *next_request;
852 	unsigned long       flags;
853 	struct list_head    aborted_request_list;
854 
855 	INIT_LIST_HEAD(&aborted_request_list);
856 
857 	dev_dbg(&isci_host->pdev->dev,
858 		"%s: isci_device = %p (new request state = %d)\n",
859 		__func__, isci_device, new_request_state);
860 
861 	spin_lock_irqsave(&isci_host->scic_lock, flags);
862 
863 	/* Move all of the pending requests off of the device list. */
864 	list_splice_init(&isci_device->reqs_in_process,
865 			 &aborted_request_list);
866 
867 	spin_unlock_irqrestore(&isci_host->scic_lock, flags);
868 
869 	/* Iterate through the now-local list. */
870 	list_for_each_entry_safe(request, next_request,
871 				 &aborted_request_list, dev_node) {
872 
873 		dev_warn(&isci_host->pdev->dev,
874 			"%s: isci_device=%p request=%p; task=%p\n",
875 			__func__,
876 			isci_device, request,
877 			((request->ttype == io_task)
878 				? isci_request_access_task(request)
879 				: NULL));
880 
881 		/* Mark all still pending I/O with the selected next
882 		* state, terminate and free it.
883 		*/
884 		isci_terminate_request(isci_host, isci_device,
885 				       request, new_request_state
886 				       );
887 	}
888 }
889 
890 /**
891  * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
892  *    Template functions.
893  * @lun: This parameter specifies the lun to be reset.
894  *
895  * status, zero indicates success.
896  */
897 static int isci_task_send_lu_reset_sas(
898 	struct isci_host *isci_host,
899 	struct isci_remote_device *isci_device,
900 	u8 *lun)
901 {
902 	struct isci_tmf tmf;
903 	int ret = TMF_RESP_FUNC_FAILED;
904 
905 	dev_dbg(&isci_host->pdev->dev,
906 		"%s: isci_host = %p, isci_device = %p\n",
907 		__func__, isci_host, isci_device);
908 	/* Send the LUN reset to the target.  By the time the call returns,
909 	 * the TMF has fully exected in the target (in which case the return
910 	 * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
911 	 * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
912 	 */
913 	isci_task_build_tmf(&tmf, isci_device, isci_tmf_ssp_lun_reset, NULL,
914 			    NULL);
915 
916 	#define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
917 	ret = isci_task_execute_tmf(isci_host, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
918 
919 	if (ret == TMF_RESP_FUNC_COMPLETE)
920 		dev_dbg(&isci_host->pdev->dev,
921 			"%s: %p: TMF_LU_RESET passed\n",
922 			__func__, isci_device);
923 	else
924 		dev_dbg(&isci_host->pdev->dev,
925 			"%s: %p: TMF_LU_RESET failed (%x)\n",
926 			__func__, isci_device, ret);
927 
928 	return ret;
929 }
930 
931 /**
932  * isci_task_lu_reset() - This function is one of the SAS Domain Template
933  *    functions. This is one of the Task Management functoins called by libsas,
934  *    to reset the given lun. Note the assumption that while this call is
935  *    executing, no I/O will be sent by the host to the device.
936  * @lun: This parameter specifies the lun to be reset.
937  *
938  * status, zero indicates success.
939  */
940 int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
941 {
942 	struct isci_host *isci_host = dev_to_ihost(domain_device);
943 	struct isci_remote_device *isci_device = NULL;
944 	int ret;
945 	bool device_stopping = false;
946 
947 	isci_device = domain_device->lldd_dev;
948 
949 	dev_dbg(&isci_host->pdev->dev,
950 		"%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
951 		 __func__, domain_device, isci_host, isci_device);
952 
953 	if (isci_device != NULL) {
954 		device_stopping = (isci_device->status == isci_stopping)
955 				  || (isci_device->status == isci_stopped);
956 		set_bit(IDEV_EH, &isci_device->flags);
957 	}
958 
959 	/* If there is a device reset pending on any request in the
960 	 * device's list, fail this LUN reset request in order to
961 	 * escalate to the device reset.
962 	 */
963 	if (!isci_device || device_stopping ||
964 	    isci_device_is_reset_pending(isci_host, isci_device)) {
965 		dev_warn(&isci_host->pdev->dev,
966 			 "%s: No dev (%p), or "
967 			 "RESET PENDING: domain_device=%p\n",
968 			 __func__, isci_device, domain_device);
969 		return TMF_RESP_FUNC_FAILED;
970 	}
971 
972 	/* Send the task management part of the reset. */
973 	if (sas_protocol_ata(domain_device->tproto)) {
974 		ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun);
975 	} else
976 		ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
977 
978 	/* If the LUN reset worked, all the I/O can now be terminated. */
979 	if (ret == TMF_RESP_FUNC_COMPLETE)
980 		/* Terminate all I/O now. */
981 		isci_terminate_pending_requests(isci_host,
982 						isci_device,
983 						terminating);
984 
985 	return ret;
986 }
987 
988 
989 /*	 int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
990 int isci_task_clear_nexus_port(struct asd_sas_port *port)
991 {
992 	return TMF_RESP_FUNC_FAILED;
993 }
994 
995 
996 
997 int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
998 {
999 	return TMF_RESP_FUNC_FAILED;
1000 }
1001 
1002 /* Task Management Functions. Must be called from process context.	 */
1003 
1004 /**
1005  * isci_abort_task_process_cb() - This is a helper function for the abort task
1006  *    TMF command.  It manages the request state with respect to the successful
1007  *    transmission / completion of the abort task request.
1008  * @cb_state: This parameter specifies when this function was called - after
1009  *    the TMF request has been started and after it has timed-out.
1010  * @tmf: This parameter specifies the TMF in progress.
1011  *
1012  *
1013  */
1014 static void isci_abort_task_process_cb(
1015 	enum isci_tmf_cb_state cb_state,
1016 	struct isci_tmf *tmf,
1017 	void *cb_data)
1018 {
1019 	struct isci_request *old_request;
1020 
1021 	old_request = (struct isci_request *)cb_data;
1022 
1023 	dev_dbg(&old_request->isci_host->pdev->dev,
1024 		"%s: tmf=%p, old_request=%p\n",
1025 		__func__, tmf, old_request);
1026 
1027 	switch (cb_state) {
1028 
1029 	case isci_tmf_started:
1030 		/* The TMF has been started.  Nothing to do here, since the
1031 		 * request state was already set to "aborted" by the abort
1032 		 * task function.
1033 		 */
1034 		if ((old_request->status != aborted)
1035 			&& (old_request->status != completed))
1036 			dev_err(&old_request->isci_host->pdev->dev,
1037 				"%s: Bad request status (%d): tmf=%p, old_request=%p\n",
1038 				__func__, old_request->status, tmf, old_request);
1039 		break;
1040 
1041 	case isci_tmf_timed_out:
1042 
1043 		/* Set the task's state to "aborting", since the abort task
1044 		 * function thread set it to "aborted" (above) in anticipation
1045 		 * of the task management request working correctly.  Since the
1046 		 * timeout has now fired, the TMF request failed.  We set the
1047 		 * state such that the request completion will indicate the
1048 		 * device is no longer present.
1049 		 */
1050 		isci_request_change_state(old_request, aborting);
1051 		break;
1052 
1053 	default:
1054 		dev_err(&old_request->isci_host->pdev->dev,
1055 			"%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
1056 			__func__, cb_state, tmf, old_request);
1057 		break;
1058 	}
1059 }
1060 
1061 /**
1062  * isci_task_abort_task() - This function is one of the SAS Domain Template
1063  *    functions. This function is called by libsas to abort a specified task.
1064  * @task: This parameter specifies the SAS task to abort.
1065  *
1066  * status, zero indicates success.
1067  */
1068 int isci_task_abort_task(struct sas_task *task)
1069 {
1070 	struct isci_host *isci_host = dev_to_ihost(task->dev);
1071 	DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
1072 	struct isci_request       *old_request = NULL;
1073 	enum isci_request_status  old_state;
1074 	struct isci_remote_device *isci_device = NULL;
1075 	struct isci_tmf           tmf;
1076 	int                       ret = TMF_RESP_FUNC_FAILED;
1077 	unsigned long             flags;
1078 	bool                      any_dev_reset = false;
1079 	bool                      device_stopping;
1080 
1081 	/* Get the isci_request reference from the task.  Note that
1082 	 * this check does not depend on the pending request list
1083 	 * in the device, because tasks driving resets may land here
1084 	 * after completion in the core.
1085 	 */
1086 	old_request = isci_task_get_request_from_task(task, &isci_device);
1087 
1088 	dev_dbg(&isci_host->pdev->dev,
1089 		"%s: task = %p\n", __func__, task);
1090 
1091 	/* Check if the device has been / is currently being removed.
1092 	 * If so, no task management will be done, and the I/O will
1093 	 * be terminated.
1094 	 */
1095 	device_stopping = (isci_device->status == isci_stopping)
1096 			  || (isci_device->status == isci_stopped);
1097 
1098 	/* XXX need to fix device lookup lifetime (needs to be done
1099 	 * under scic_lock, among other things...), but for now assume
1100 	 * the device is available like the above code
1101 	 */
1102 	set_bit(IDEV_EH, &isci_device->flags);
1103 
1104 	/* This version of the driver will fail abort requests for
1105 	 * SATA/STP.  Failing the abort request this way will cause the
1106 	 * SCSI error handler thread to escalate to LUN reset
1107 	 */
1108 	if (sas_protocol_ata(task->task_proto) && !device_stopping) {
1109 		dev_warn(&isci_host->pdev->dev,
1110 			    " task %p is for a STP/SATA device;"
1111 			    " returning TMF_RESP_FUNC_FAILED\n"
1112 			    " to cause a LUN reset...\n", task);
1113 		return TMF_RESP_FUNC_FAILED;
1114 	}
1115 
1116 	dev_dbg(&isci_host->pdev->dev,
1117 		"%s: old_request == %p\n", __func__, old_request);
1118 
1119 	if (!device_stopping)
1120 		any_dev_reset = isci_device_is_reset_pending(isci_host,isci_device);
1121 
1122 	spin_lock_irqsave(&task->task_state_lock, flags);
1123 
1124 	/* Don't do resets to stopping devices. */
1125 	if (device_stopping) {
1126 
1127 		task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
1128 		any_dev_reset = false;
1129 
1130 	} else	/* See if there is a pending device reset for this device. */
1131 		any_dev_reset = any_dev_reset
1132 			|| (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
1133 
1134 	/* If the extraction of the request reference from the task
1135 	 * failed, then the request has been completed (or if there is a
1136 	 * pending reset then this abort request function must be failed
1137 	 * in order to escalate to the target reset).
1138 	 */
1139 	if ((old_request == NULL) || any_dev_reset) {
1140 
1141 		/* If the device reset task flag is set, fail the task
1142 		 * management request.  Otherwise, the original request
1143 		 * has completed.
1144 		 */
1145 		if (any_dev_reset) {
1146 
1147 			/* Turn off the task's DONE to make sure this
1148 			 * task is escalated to a target reset.
1149 			 */
1150 			task->task_state_flags &= ~SAS_TASK_STATE_DONE;
1151 
1152 			/* Make the reset happen as soon as possible. */
1153 			task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
1154 
1155 			spin_unlock_irqrestore(&task->task_state_lock, flags);
1156 
1157 			/* Fail the task management request in order to
1158 			 * escalate to the target reset.
1159 			 */
1160 			ret = TMF_RESP_FUNC_FAILED;
1161 
1162 			dev_dbg(&isci_host->pdev->dev,
1163 				"%s: Failing task abort in order to "
1164 				"escalate to target reset because\n"
1165 				"SAS_TASK_NEED_DEV_RESET is set for "
1166 				"task %p on dev %p\n",
1167 				__func__, task, isci_device);
1168 
1169 
1170 		} else {
1171 			/* The request has already completed and there
1172 			 * is nothing to do here other than to set the task
1173 			 * done bit, and indicate that the task abort function
1174 			 * was sucessful.
1175 			 */
1176 			isci_set_task_doneflags(task);
1177 
1178 			spin_unlock_irqrestore(&task->task_state_lock, flags);
1179 
1180 			ret = TMF_RESP_FUNC_COMPLETE;
1181 
1182 			dev_dbg(&isci_host->pdev->dev,
1183 				"%s: abort task not needed for %p\n",
1184 				__func__, task);
1185 		}
1186 
1187 		return ret;
1188 	}
1189 	else
1190 		spin_unlock_irqrestore(&task->task_state_lock, flags);
1191 
1192 	spin_lock_irqsave(&isci_host->scic_lock, flags);
1193 
1194 	/* Check the request status and change to "aborted" if currently
1195 	 * "starting"; if true then set the I/O kernel completion
1196 	 * struct that will be triggered when the request completes.
1197 	 */
1198 	old_state = isci_task_validate_request_to_abort(
1199 				old_request, isci_host, isci_device,
1200 				&aborted_io_completion);
1201 	if ((old_state != started) &&
1202 	    (old_state != completed) &&
1203 	    (old_state != aborting)) {
1204 
1205 		spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1206 
1207 		/* The request was already being handled by someone else (because
1208 		* they got to set the state away from started).
1209 		*/
1210 		dev_dbg(&isci_host->pdev->dev,
1211 			"%s:  device = %p; old_request %p already being aborted\n",
1212 			__func__,
1213 			isci_device, old_request);
1214 
1215 		return TMF_RESP_FUNC_COMPLETE;
1216 	}
1217 	if ((task->task_proto == SAS_PROTOCOL_SMP)
1218 	    || device_stopping
1219 	    || old_request->complete_in_target
1220 	    ) {
1221 
1222 		spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1223 
1224 		dev_dbg(&isci_host->pdev->dev,
1225 			"%s: SMP request (%d)"
1226 			" or device is stopping (%d)"
1227 			" or complete_in_target (%d), thus no TMF\n",
1228 			__func__, (task->task_proto == SAS_PROTOCOL_SMP),
1229 			device_stopping, old_request->complete_in_target);
1230 
1231 		/* Set the state on the task. */
1232 		isci_task_all_done(task);
1233 
1234 		ret = TMF_RESP_FUNC_COMPLETE;
1235 
1236 		/* Stopping and SMP devices are not sent a TMF, and are not
1237 		 * reset, but the outstanding I/O request is terminated below.
1238 		 */
1239 	} else {
1240 		/* Fill in the tmf stucture */
1241 		isci_task_build_abort_task_tmf(&tmf, isci_device,
1242 					       isci_tmf_ssp_task_abort,
1243 					       isci_abort_task_process_cb,
1244 					       old_request);
1245 
1246 		spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1247 
1248 		#define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
1249 		ret = isci_task_execute_tmf(isci_host, &tmf,
1250 					    ISCI_ABORT_TASK_TIMEOUT_MS);
1251 
1252 		if (ret != TMF_RESP_FUNC_COMPLETE)
1253 			dev_err(&isci_host->pdev->dev,
1254 				"%s: isci_task_send_tmf failed\n",
1255 				__func__);
1256 	}
1257 	if (ret == TMF_RESP_FUNC_COMPLETE) {
1258 		old_request->complete_in_target = true;
1259 
1260 		/* Clean up the request on our side, and wait for the aborted I/O to
1261 		* complete.
1262 		*/
1263 		isci_terminate_request_core(isci_host, isci_device, old_request);
1264 	}
1265 
1266 	/* Make sure we do not leave a reference to aborted_io_completion */
1267 	old_request->io_request_completion = NULL;
1268 	return ret;
1269 }
1270 
1271 /**
1272  * isci_task_abort_task_set() - This function is one of the SAS Domain Template
1273  *    functions. This is one of the Task Management functoins called by libsas,
1274  *    to abort all task for the given lun.
1275  * @d_device: This parameter specifies the domain device associated with this
1276  *    request.
1277  * @lun: This parameter specifies the lun associated with this request.
1278  *
1279  * status, zero indicates success.
1280  */
1281 int isci_task_abort_task_set(
1282 	struct domain_device *d_device,
1283 	u8 *lun)
1284 {
1285 	return TMF_RESP_FUNC_FAILED;
1286 }
1287 
1288 
1289 /**
1290  * isci_task_clear_aca() - This function is one of the SAS Domain Template
1291  *    functions. This is one of the Task Management functoins called by libsas.
1292  * @d_device: This parameter specifies the domain device associated with this
1293  *    request.
1294  * @lun: This parameter specifies the lun	 associated with this request.
1295  *
1296  * status, zero indicates success.
1297  */
1298 int isci_task_clear_aca(
1299 	struct domain_device *d_device,
1300 	u8 *lun)
1301 {
1302 	return TMF_RESP_FUNC_FAILED;
1303 }
1304 
1305 
1306 
1307 /**
1308  * isci_task_clear_task_set() - This function is one of the SAS Domain Template
1309  *    functions. This is one of the Task Management functoins called by libsas.
1310  * @d_device: This parameter specifies the domain device associated with this
1311  *    request.
1312  * @lun: This parameter specifies the lun	 associated with this request.
1313  *
1314  * status, zero indicates success.
1315  */
1316 int isci_task_clear_task_set(
1317 	struct domain_device *d_device,
1318 	u8 *lun)
1319 {
1320 	return TMF_RESP_FUNC_FAILED;
1321 }
1322 
1323 
1324 /**
1325  * isci_task_query_task() - This function is implemented to cause libsas to
1326  *    correctly escalate the failed abort to a LUN or target reset (this is
1327  *    because sas_scsi_find_task libsas function does not correctly interpret
1328  *    all return codes from the abort task call).  When TMF_RESP_FUNC_SUCC is
1329  *    returned, libsas turns this into a LUN reset; when FUNC_FAILED is
1330  *    returned, libsas will turn this into a target reset
1331  * @task: This parameter specifies the sas task being queried.
1332  * @lun: This parameter specifies the lun associated with this request.
1333  *
1334  * status, zero indicates success.
1335  */
1336 int isci_task_query_task(
1337 	struct sas_task *task)
1338 {
1339 	/* See if there is a pending device reset for this device. */
1340 	if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
1341 		return TMF_RESP_FUNC_FAILED;
1342 	else
1343 		return TMF_RESP_FUNC_SUCC;
1344 }
1345 
1346 /*
1347  * isci_task_request_complete() - This function is called by the sci core when
1348  *    an task request completes.
1349  * @ihost: This parameter specifies the ISCI host object
1350  * @ireq: This parameter is the completed isci_request object.
1351  * @completion_status: This parameter specifies the completion status from the
1352  *    sci core.
1353  *
1354  * none.
1355  */
1356 void
1357 isci_task_request_complete(struct isci_host *ihost,
1358 			   struct isci_request *ireq,
1359 			   enum sci_task_status completion_status)
1360 {
1361 	struct isci_remote_device *idev = ireq->isci_device;
1362 	enum isci_request_status old_state;
1363 	struct isci_tmf *tmf = isci_request_access_tmf(ireq);
1364 	struct completion *tmf_complete;
1365 	struct scic_sds_request *sci_req = &ireq->sci;
1366 
1367 	dev_dbg(&ihost->pdev->dev,
1368 		"%s: request = %p, status=%d\n",
1369 		__func__, ireq, completion_status);
1370 
1371 	old_state = isci_request_change_state(ireq, completed);
1372 
1373 	tmf->status = completion_status;
1374 	ireq->complete_in_target = true;
1375 
1376 	if (tmf->proto == SAS_PROTOCOL_SSP) {
1377 		memcpy(&tmf->resp.resp_iu,
1378 		       &sci_req->ssp.rsp,
1379 		       SSP_RESP_IU_MAX_SIZE);
1380 	} else if (tmf->proto == SAS_PROTOCOL_SATA) {
1381 		memcpy(&tmf->resp.d2h_fis,
1382 		       &sci_req->stp.rsp,
1383 		       sizeof(struct dev_to_host_fis));
1384 	}
1385 
1386 	/* PRINT_TMF( ((struct isci_tmf *)request->task)); */
1387 	tmf_complete = tmf->complete;
1388 
1389 	scic_controller_complete_io(&ihost->sci, &idev->sci, &ireq->sci);
1390 	/* set the 'terminated' flag handle to make sure it cannot be terminated
1391 	 *  or completed again.
1392 	 */
1393 	ireq->terminated = true;;
1394 
1395 	isci_request_change_state(ireq, unallocated);
1396 	list_del_init(&ireq->dev_node);
1397 
1398 	/* The task management part completes last. */
1399 	complete(tmf_complete);
1400 }
1401 
1402 static int isci_reset_device(struct domain_device *dev, int hard_reset)
1403 {
1404 	struct isci_remote_device *idev = dev->lldd_dev;
1405 	struct sas_phy *phy = sas_find_local_phy(dev);
1406 	struct isci_host *ihost = dev_to_ihost(dev);
1407 	enum sci_status status;
1408 	unsigned long flags;
1409 	int rc;
1410 
1411 	dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
1412 
1413 	if (!idev) {
1414 		dev_warn(&ihost->pdev->dev,
1415 			 "%s: idev is GONE!\n",
1416 			 __func__);
1417 
1418 		return TMF_RESP_FUNC_COMPLETE; /* Nothing to reset. */
1419 	}
1420 
1421 	spin_lock_irqsave(&ihost->scic_lock, flags);
1422 	status = scic_remote_device_reset(&idev->sci);
1423 	if (status != SCI_SUCCESS) {
1424 		spin_unlock_irqrestore(&ihost->scic_lock, flags);
1425 
1426 		dev_warn(&ihost->pdev->dev,
1427 			 "%s: scic_remote_device_reset(%p) returned %d!\n",
1428 			 __func__, idev, status);
1429 
1430 		return TMF_RESP_FUNC_FAILED;
1431 	}
1432 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1433 
1434 	/* Make sure all pending requests are able to be fully terminated. */
1435 	isci_device_clear_reset_pending(ihost, idev);
1436 
1437 	rc = sas_phy_reset(phy, hard_reset);
1438 	msleep(2000); /* just like mvsas */
1439 
1440 	/* Terminate in-progress I/O now. */
1441 	isci_remote_device_nuke_requests(ihost, idev);
1442 
1443 	spin_lock_irqsave(&ihost->scic_lock, flags);
1444 	status = scic_remote_device_reset_complete(&idev->sci);
1445 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1446 
1447 	if (status != SCI_SUCCESS) {
1448 		dev_warn(&ihost->pdev->dev,
1449 			 "%s: scic_remote_device_reset_complete(%p) "
1450 			 "returned %d!\n", __func__, idev, status);
1451 	}
1452 
1453 	dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
1454 
1455 	return rc;
1456 }
1457 
1458 int isci_task_I_T_nexus_reset(struct domain_device *dev)
1459 {
1460 	struct isci_host *ihost = dev_to_ihost(dev);
1461 	int ret = TMF_RESP_FUNC_FAILED, hard_reset = 1;
1462 	struct isci_remote_device *idev;
1463 	unsigned long flags;
1464 
1465 	/* XXX mvsas is not protecting against ->lldd_dev_gone(), are we
1466 	 * being too paranoid, or is mvsas busted?!
1467 	 */
1468 	spin_lock_irqsave(&ihost->scic_lock, flags);
1469 	idev = dev->lldd_dev;
1470 	if (!idev || !test_bit(IDEV_EH, &idev->flags))
1471 		ret = TMF_RESP_FUNC_COMPLETE;
1472 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1473 
1474 	if (ret == TMF_RESP_FUNC_COMPLETE)
1475 		return ret;
1476 
1477 	if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
1478 		hard_reset = 0;
1479 
1480 	return isci_reset_device(dev, hard_reset);
1481 }
1482 
1483 int isci_bus_reset_handler(struct scsi_cmnd *cmd)
1484 {
1485 	struct domain_device *dev = sdev_to_domain_dev(cmd->device);
1486 	int hard_reset = 1;
1487 
1488 	if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
1489 		hard_reset = 0;
1490 
1491 	return isci_reset_device(dev, hard_reset);
1492 }
1493