xref: /linux/drivers/scsi/aic94xx/aic94xx_tmf.c (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 /*
2  * Aic94xx Task Management Functions
3  *
4  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
5  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6  *
7  * This file is licensed under GPLv2.
8  *
9  * This file is part of the aic94xx driver.
10  *
11  * The aic94xx driver is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License as
13  * published by the Free Software Foundation; version 2 of the
14  * License.
15  *
16  * The aic94xx driver is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with the aic94xx driver; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
24  *
25  */
26 
27 #include <linux/spinlock.h>
28 #include <linux/gfp.h>
29 #include "aic94xx.h"
30 #include "aic94xx_sas.h"
31 #include "aic94xx_hwi.h"
32 
33 /* ---------- Internal enqueue ---------- */
34 
35 static int asd_enqueue_internal(struct asd_ascb *ascb,
36 		void (*tasklet_complete)(struct asd_ascb *,
37 					 struct done_list_struct *),
38 				void (*timed_out)(struct timer_list *t))
39 {
40 	int res;
41 
42 	ascb->tasklet_complete = tasklet_complete;
43 	ascb->uldd_timer = 1;
44 
45 	ascb->timer.function = timed_out;
46 	ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
47 
48 	add_timer(&ascb->timer);
49 
50 	res = asd_post_ascb_list(ascb->ha, ascb, 1);
51 	if (unlikely(res))
52 		del_timer(&ascb->timer);
53 	return res;
54 }
55 
56 /* ---------- CLEAR NEXUS ---------- */
57 
58 struct tasklet_completion_status {
59 	int	dl_opcode;
60 	int	tmf_state;
61 	u8	tag_valid:1;
62 	__be16	tag;
63 };
64 
65 #define DECLARE_TCS(tcs) \
66 	struct tasklet_completion_status tcs = { \
67 		.dl_opcode = 0, \
68 		.tmf_state = 0, \
69 		.tag_valid = 0, \
70 		.tag = 0, \
71 	}
72 
73 
74 static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
75 					     struct done_list_struct *dl)
76 {
77 	struct tasklet_completion_status *tcs = ascb->uldd_task;
78 	ASD_DPRINTK("%s: here\n", __func__);
79 	if (!del_timer(&ascb->timer)) {
80 		ASD_DPRINTK("%s: couldn't delete timer\n", __func__);
81 		return;
82 	}
83 	ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode);
84 	tcs->dl_opcode = dl->opcode;
85 	complete(ascb->completion);
86 	asd_ascb_free(ascb);
87 }
88 
89 static void asd_clear_nexus_timedout(struct timer_list *t)
90 {
91 	struct asd_ascb *ascb = from_timer(ascb, t, timer);
92 	struct tasklet_completion_status *tcs = ascb->uldd_task;
93 
94 	ASD_DPRINTK("%s: here\n", __func__);
95 	tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
96 	complete(ascb->completion);
97 }
98 
99 #define CLEAR_NEXUS_PRE         \
100 	struct asd_ascb *ascb; \
101 	struct scb *scb; \
102 	int res; \
103 	DECLARE_COMPLETION_ONSTACK(completion); \
104 	DECLARE_TCS(tcs); \
105 		\
106 	ASD_DPRINTK("%s: PRE\n", __func__); \
107         res = 1;                \
108 	ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
109 	if (!ascb)              \
110 		return -ENOMEM; \
111                                 \
112 	ascb->completion = &completion; \
113 	ascb->uldd_task = &tcs; \
114 	scb = ascb->scb;        \
115 	scb->header.opcode = CLEAR_NEXUS
116 
117 #define CLEAR_NEXUS_POST        \
118 	ASD_DPRINTK("%s: POST\n", __func__); \
119 	res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
120 				   asd_clear_nexus_timedout);              \
121 	if (res)                \
122 		goto out_err;   \
123 	ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \
124 	wait_for_completion(&completion); \
125 	res = tcs.dl_opcode; \
126 	if (res == TC_NO_ERROR) \
127 		res = TMF_RESP_FUNC_COMPLETE;   \
128 	return res; \
129 out_err:                        \
130 	asd_ascb_free(ascb);    \
131 	return res
132 
133 int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
134 {
135 	struct asd_ha_struct *asd_ha = sas_ha->lldd_ha;
136 
137 	CLEAR_NEXUS_PRE;
138 	scb->clear_nexus.nexus = NEXUS_ADAPTER;
139 	CLEAR_NEXUS_POST;
140 }
141 
142 int asd_clear_nexus_port(struct asd_sas_port *port)
143 {
144 	struct asd_ha_struct *asd_ha = port->ha->lldd_ha;
145 
146 	CLEAR_NEXUS_PRE;
147 	scb->clear_nexus.nexus = NEXUS_PORT;
148 	scb->clear_nexus.conn_mask = port->phy_mask;
149 	CLEAR_NEXUS_POST;
150 }
151 
152 enum clear_nexus_phase {
153 	NEXUS_PHASE_PRE,
154 	NEXUS_PHASE_POST,
155 	NEXUS_PHASE_RESUME,
156 };
157 
158 static int asd_clear_nexus_I_T(struct domain_device *dev,
159 			       enum clear_nexus_phase phase)
160 {
161 	struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
162 
163 	CLEAR_NEXUS_PRE;
164 	scb->clear_nexus.nexus = NEXUS_I_T;
165 	switch (phase) {
166 	case NEXUS_PHASE_PRE:
167 		scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX;
168 		break;
169 	case NEXUS_PHASE_POST:
170 		scb->clear_nexus.flags = SEND_Q | NOTINQ;
171 		break;
172 	case NEXUS_PHASE_RESUME:
173 		scb->clear_nexus.flags = RESUME_TX;
174 	}
175 	scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
176 						   dev->lldd_dev);
177 	CLEAR_NEXUS_POST;
178 }
179 
180 int asd_I_T_nexus_reset(struct domain_device *dev)
181 {
182 	int res, tmp_res, i;
183 	struct sas_phy *phy = sas_get_local_phy(dev);
184 	/* Standard mandates link reset for ATA  (type 0) and
185 	 * hard reset for SSP (type 1) */
186 	int reset_type = (dev->dev_type == SAS_SATA_DEV ||
187 			  (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
188 
189 	asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
190 	/* send a hard reset */
191 	ASD_DPRINTK("sending %s reset to %s\n",
192 		    reset_type ? "hard" : "soft", dev_name(&phy->dev));
193 	res = sas_phy_reset(phy, reset_type);
194 	if (res == TMF_RESP_FUNC_COMPLETE || res == -ENODEV) {
195 		/* wait for the maximum settle time */
196 		msleep(500);
197 		/* clear all outstanding commands (keep nexus suspended) */
198 		asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST);
199 	}
200 	for (i = 0 ; i < 3; i++) {
201 		tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME);
202 		if (tmp_res == TC_RESUME)
203 			goto out;
204 		msleep(500);
205 	}
206 
207 	/* This is a bit of a problem:  the sequencer is still suspended
208 	 * and is refusing to resume.  Hope it will resume on a bigger hammer
209 	 * or the disk is lost */
210 	dev_printk(KERN_ERR, &phy->dev,
211 		   "Failed to resume nexus after reset 0x%x\n", tmp_res);
212 
213 	res = TMF_RESP_FUNC_FAILED;
214  out:
215 	sas_put_local_phy(phy);
216 	return res;
217 }
218 
219 static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
220 {
221 	struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
222 
223 	CLEAR_NEXUS_PRE;
224 	scb->clear_nexus.nexus = NEXUS_I_T_L;
225 	scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
226 	memcpy(scb->clear_nexus.ssp_task.lun, lun, 8);
227 	scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
228 						   dev->lldd_dev);
229 	CLEAR_NEXUS_POST;
230 }
231 
232 static int asd_clear_nexus_tag(struct sas_task *task)
233 {
234 	struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
235 	struct asd_ascb *tascb = task->lldd_task;
236 
237 	CLEAR_NEXUS_PRE;
238 	scb->clear_nexus.nexus = NEXUS_TAG;
239 	memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8);
240 	scb->clear_nexus.ssp_task.tag = tascb->tag;
241 	if (task->dev->tproto)
242 		scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
243 							  task->dev->lldd_dev);
244 	CLEAR_NEXUS_POST;
245 }
246 
247 static int asd_clear_nexus_index(struct sas_task *task)
248 {
249 	struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
250 	struct asd_ascb *tascb = task->lldd_task;
251 
252 	CLEAR_NEXUS_PRE;
253 	scb->clear_nexus.nexus = NEXUS_TRANS_CX;
254 	if (task->dev->tproto)
255 		scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
256 							  task->dev->lldd_dev);
257 	scb->clear_nexus.index = cpu_to_le16(tascb->tc_index);
258 	CLEAR_NEXUS_POST;
259 }
260 
261 /* ---------- TMFs ---------- */
262 
263 static void asd_tmf_timedout(struct timer_list *t)
264 {
265 	struct asd_ascb *ascb = from_timer(ascb, t, timer);
266 	struct tasklet_completion_status *tcs = ascb->uldd_task;
267 
268 	ASD_DPRINTK("tmf timed out\n");
269 	tcs->tmf_state = TMF_RESP_FUNC_FAILED;
270 	complete(ascb->completion);
271 }
272 
273 static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
274 				    struct done_list_struct *dl)
275 {
276 	struct asd_ha_struct *asd_ha = ascb->ha;
277 	unsigned long flags;
278 	struct tc_resp_sb_struct {
279 		__le16 index_escb;
280 		u8     len_lsb;
281 		u8     flags;
282 	} __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
283 
284 	int  edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
285 	struct asd_ascb *escb;
286 	struct asd_dma_tok *edb;
287 	struct ssp_frame_hdr *fh;
288 	struct ssp_response_iu   *ru;
289 	int res = TMF_RESP_FUNC_FAILED;
290 
291 	ASD_DPRINTK("tmf resp tasklet\n");
292 
293 	spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
294 	escb = asd_tc_index_find(&asd_ha->seq,
295 				 (int)le16_to_cpu(resp_sb->index_escb));
296 	spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
297 
298 	if (!escb) {
299 		ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
300 		return res;
301 	}
302 
303 	edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
304 	ascb->tag = *(__be16 *)(edb->vaddr+4);
305 	fh = edb->vaddr + 16;
306 	ru = edb->vaddr + 16 + sizeof(*fh);
307 	res = ru->status;
308 	if (ru->datapres == 1)	  /* Response data present */
309 		res = ru->resp_data[3];
310 #if 0
311 	ascb->tag = fh->tag;
312 #endif
313 	ascb->tag_valid = 1;
314 
315 	asd_invalidate_edb(escb, edb_id);
316 	return res;
317 }
318 
319 static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
320 				     struct done_list_struct *dl)
321 {
322 	struct tasklet_completion_status *tcs;
323 
324 	if (!del_timer(&ascb->timer))
325 		return;
326 
327 	tcs = ascb->uldd_task;
328 	ASD_DPRINTK("tmf tasklet complete\n");
329 
330 	tcs->dl_opcode = dl->opcode;
331 
332 	if (dl->opcode == TC_SSP_RESP) {
333 		tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl);
334 		tcs->tag_valid = ascb->tag_valid;
335 		tcs->tag = ascb->tag;
336 	}
337 
338 	complete(ascb->completion);
339 	asd_ascb_free(ascb);
340 }
341 
342 static int asd_clear_nexus(struct sas_task *task)
343 {
344 	int res = TMF_RESP_FUNC_FAILED;
345 	int leftover;
346 	struct asd_ascb *tascb = task->lldd_task;
347 	DECLARE_COMPLETION_ONSTACK(completion);
348 	unsigned long flags;
349 
350 	tascb->completion = &completion;
351 
352 	ASD_DPRINTK("task not done, clearing nexus\n");
353 	if (tascb->tag_valid)
354 		res = asd_clear_nexus_tag(task);
355 	else
356 		res = asd_clear_nexus_index(task);
357 	leftover = wait_for_completion_timeout(&completion,
358 					       AIC94XX_SCB_TIMEOUT);
359 	tascb->completion = NULL;
360 	ASD_DPRINTK("came back from clear nexus\n");
361 	spin_lock_irqsave(&task->task_state_lock, flags);
362 	if (leftover < 1)
363 		res = TMF_RESP_FUNC_FAILED;
364 	if (task->task_state_flags & SAS_TASK_STATE_DONE)
365 		res = TMF_RESP_FUNC_COMPLETE;
366 	spin_unlock_irqrestore(&task->task_state_lock, flags);
367 
368 	return res;
369 }
370 
371 /**
372  * asd_abort_task -- ABORT TASK TMF
373  * @task: the task to be aborted
374  *
375  * Before calling ABORT TASK the task state flags should be ORed with
376  * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under
377  * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called.
378  *
379  * Implements the ABORT TASK TMF, I_T_L_Q nexus.
380  * Returns: SAS TMF responses (see sas_task.h),
381  *          -ENOMEM,
382  *          -SAS_QUEUE_FULL.
383  *
384  * When ABORT TASK returns, the caller of ABORT TASK checks first the
385  * task->task_state_flags, and then the return value of ABORT TASK.
386  *
387  * If the task has task state bit SAS_TASK_STATE_DONE set, then the
388  * task was completed successfully prior to it being aborted.  The
389  * caller of ABORT TASK has responsibility to call task->task_done()
390  * xor free the task, depending on their framework.  The return code
391  * is TMF_RESP_FUNC_FAILED in this case.
392  *
393  * Else the SAS_TASK_STATE_DONE bit is not set,
394  * 	If the return code is TMF_RESP_FUNC_COMPLETE, then
395  * 		the task was aborted successfully.  The caller of
396  * 		ABORT TASK has responsibility to call task->task_done()
397  *              to finish the task, xor free the task depending on their
398  *		framework.
399  *	else
400  * 		the ABORT TASK returned some kind of error. The task
401  *              was _not_ cancelled.  Nothing can be assumed.
402  *		The caller of ABORT TASK may wish to retry.
403  */
404 int asd_abort_task(struct sas_task *task)
405 {
406 	struct asd_ascb *tascb = task->lldd_task;
407 	struct asd_ha_struct *asd_ha = tascb->ha;
408 	int res = 1;
409 	unsigned long flags;
410 	struct asd_ascb *ascb = NULL;
411 	struct scb *scb;
412 	int leftover;
413 	DECLARE_TCS(tcs);
414 	DECLARE_COMPLETION_ONSTACK(completion);
415 	DECLARE_COMPLETION_ONSTACK(tascb_completion);
416 
417 	tascb->completion = &tascb_completion;
418 
419 	spin_lock_irqsave(&task->task_state_lock, flags);
420 	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
421 		spin_unlock_irqrestore(&task->task_state_lock, flags);
422 		res = TMF_RESP_FUNC_COMPLETE;
423 		ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
424 		goto out_done;
425 	}
426 	spin_unlock_irqrestore(&task->task_state_lock, flags);
427 
428 	ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
429 	if (!ascb)
430 		return -ENOMEM;
431 
432 	ascb->uldd_task = &tcs;
433 	ascb->completion = &completion;
434 	scb = ascb->scb;
435 	scb->header.opcode = SCB_ABORT_TASK;
436 
437 	switch (task->task_proto) {
438 	case SAS_PROTOCOL_SATA:
439 	case SAS_PROTOCOL_STP:
440 		scb->abort_task.proto_conn_rate = (1 << 5); /* STP */
441 		break;
442 	case SAS_PROTOCOL_SSP:
443 		scb->abort_task.proto_conn_rate  = (1 << 4); /* SSP */
444 		scb->abort_task.proto_conn_rate |= task->dev->linkrate;
445 		break;
446 	case SAS_PROTOCOL_SMP:
447 		break;
448 	default:
449 		break;
450 	}
451 
452 	if (task->task_proto == SAS_PROTOCOL_SSP) {
453 		scb->abort_task.ssp_frame.frame_type = SSP_TASK;
454 		memcpy(scb->abort_task.ssp_frame.hashed_dest_addr,
455 		       task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
456 		memcpy(scb->abort_task.ssp_frame.hashed_src_addr,
457 		       task->dev->port->ha->hashed_sas_addr,
458 		       HASHED_SAS_ADDR_SIZE);
459 		scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
460 
461 		memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8);
462 		scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK;
463 		scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF);
464 	}
465 
466 	scb->abort_task.sister_scb = cpu_to_le16(0xFFFF);
467 	scb->abort_task.conn_handle = cpu_to_le16(
468 		(u16)(unsigned long)task->dev->lldd_dev);
469 	scb->abort_task.retry_count = 1;
470 	scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index);
471 	scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
472 
473 	res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
474 				   asd_tmf_timedout);
475 	if (res)
476 		goto out_free;
477 	wait_for_completion(&completion);
478 	ASD_DPRINTK("tmf came back\n");
479 
480 	tascb->tag = tcs.tag;
481 	tascb->tag_valid = tcs.tag_valid;
482 
483 	spin_lock_irqsave(&task->task_state_lock, flags);
484 	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
485 		spin_unlock_irqrestore(&task->task_state_lock, flags);
486 		res = TMF_RESP_FUNC_COMPLETE;
487 		ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
488 		goto out_done;
489 	}
490 	spin_unlock_irqrestore(&task->task_state_lock, flags);
491 
492 	if (tcs.dl_opcode == TC_SSP_RESP) {
493 		/* The task to be aborted has been sent to the device.
494 		 * We got a Response IU for the ABORT TASK TMF. */
495 		if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE)
496 			res = asd_clear_nexus(task);
497 		else
498 			res = tcs.tmf_state;
499 	} else if (tcs.dl_opcode == TC_NO_ERROR &&
500 		   tcs.tmf_state == TMF_RESP_FUNC_FAILED) {
501 		/* timeout */
502 		res = TMF_RESP_FUNC_FAILED;
503 	} else {
504 		/* In the following we assume that the managing layer
505 		 * will _never_ make a mistake, when issuing ABORT
506 		 * TASK.
507 		 */
508 		switch (tcs.dl_opcode) {
509 		default:
510 			res = asd_clear_nexus(task);
511 			/* fallthrough */
512 		case TC_NO_ERROR:
513 			break;
514 			/* The task hasn't been sent to the device xor
515 			 * we never got a (sane) Response IU for the
516 			 * ABORT TASK TMF.
517 			 */
518 		case TF_NAK_RECV:
519 			res = TMF_RESP_INVALID_FRAME;
520 			break;
521 		case TF_TMF_TASK_DONE:	/* done but not reported yet */
522 			res = TMF_RESP_FUNC_FAILED;
523 			leftover =
524 				wait_for_completion_timeout(&tascb_completion,
525 							  AIC94XX_SCB_TIMEOUT);
526 			spin_lock_irqsave(&task->task_state_lock, flags);
527 			if (leftover < 1)
528 				res = TMF_RESP_FUNC_FAILED;
529 			if (task->task_state_flags & SAS_TASK_STATE_DONE)
530 				res = TMF_RESP_FUNC_COMPLETE;
531 			spin_unlock_irqrestore(&task->task_state_lock, flags);
532 			break;
533 		case TF_TMF_NO_TAG:
534 		case TF_TMF_TAG_FREE: /* the tag is in the free list */
535 		case TF_TMF_NO_CONN_HANDLE: /* no such device */
536 			res = TMF_RESP_FUNC_COMPLETE;
537 			break;
538 		case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
539 			res = TMF_RESP_FUNC_ESUPP;
540 			break;
541 		}
542 	}
543  out_done:
544 	tascb->completion = NULL;
545 	if (res == TMF_RESP_FUNC_COMPLETE) {
546 		task->lldd_task = NULL;
547 		mb();
548 		asd_ascb_free(tascb);
549 	}
550 	ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
551 	return res;
552 
553  out_free:
554 	asd_ascb_free(ascb);
555 	ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
556 	return res;
557 }
558 
559 /**
560  * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus
561  * @dev: pointer to struct domain_device of interest
562  * @lun: pointer to u8[8] which is the LUN
563  * @tmf: the TMF to be performed (see sas_task.h or the SAS spec)
564  * @index: the transaction context of the task to be queried if QT TMF
565  *
566  * This function is used to send ABORT TASK SET, CLEAR ACA,
567  * CLEAR TASK SET, LU RESET and QUERY TASK TMFs.
568  *
569  * No SCBs should be queued to the I_T_L nexus when this SCB is
570  * pending.
571  *
572  * Returns: TMF response code (see sas_task.h or the SAS spec)
573  */
574 static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
575 				int tmf, int index)
576 {
577 	struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
578 	struct asd_ascb *ascb;
579 	int res = 1;
580 	struct scb *scb;
581 	DECLARE_COMPLETION_ONSTACK(completion);
582 	DECLARE_TCS(tcs);
583 
584 	if (!(dev->tproto & SAS_PROTOCOL_SSP))
585 		return TMF_RESP_FUNC_ESUPP;
586 
587 	ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
588 	if (!ascb)
589 		return -ENOMEM;
590 
591 	ascb->completion = &completion;
592 	ascb->uldd_task = &tcs;
593 	scb = ascb->scb;
594 
595 	if (tmf == TMF_QUERY_TASK)
596 		scb->header.opcode = QUERY_SSP_TASK;
597 	else
598 		scb->header.opcode = INITIATE_SSP_TMF;
599 
600 	scb->ssp_tmf.proto_conn_rate  = (1 << 4); /* SSP */
601 	scb->ssp_tmf.proto_conn_rate |= dev->linkrate;
602 	/* SSP frame header */
603 	scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK;
604 	memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr,
605 	       dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
606 	memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr,
607 	       dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
608 	scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF);
609 	/* SSP Task IU */
610 	memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8);
611 	scb->ssp_tmf.ssp_task.tmf = tmf;
612 
613 	scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF);
614 	scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long)
615 					      dev->lldd_dev);
616 	scb->ssp_tmf.retry_count = 1;
617 	scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
618 	if (tmf == TMF_QUERY_TASK)
619 		scb->ssp_tmf.index = cpu_to_le16(index);
620 
621 	res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
622 				   asd_tmf_timedout);
623 	if (res)
624 		goto out_err;
625 	wait_for_completion(&completion);
626 
627 	switch (tcs.dl_opcode) {
628 	case TC_NO_ERROR:
629 		res = TMF_RESP_FUNC_COMPLETE;
630 		break;
631 	case TF_NAK_RECV:
632 		res = TMF_RESP_INVALID_FRAME;
633 		break;
634 	case TF_TMF_TASK_DONE:
635 		res = TMF_RESP_FUNC_FAILED;
636 		break;
637 	case TF_TMF_NO_TAG:
638 	case TF_TMF_TAG_FREE: /* the tag is in the free list */
639 	case TF_TMF_NO_CONN_HANDLE: /* no such device */
640 		res = TMF_RESP_FUNC_COMPLETE;
641 		break;
642 	case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
643 		res = TMF_RESP_FUNC_ESUPP;
644 		break;
645 	default:
646 		/* Allow TMF response codes to propagate upwards */
647 		res = tcs.dl_opcode;
648 		break;
649 	}
650 	return res;
651 out_err:
652 	asd_ascb_free(ascb);
653 	return res;
654 }
655 
656 int asd_abort_task_set(struct domain_device *dev, u8 *lun)
657 {
658 	int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0);
659 
660 	if (res == TMF_RESP_FUNC_COMPLETE)
661 		asd_clear_nexus_I_T_L(dev, lun);
662 	return res;
663 }
664 
665 int asd_clear_aca(struct domain_device *dev, u8 *lun)
666 {
667 	int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0);
668 
669 	if (res == TMF_RESP_FUNC_COMPLETE)
670 		asd_clear_nexus_I_T_L(dev, lun);
671 	return res;
672 }
673 
674 int asd_clear_task_set(struct domain_device *dev, u8 *lun)
675 {
676 	int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
677 
678 	if (res == TMF_RESP_FUNC_COMPLETE)
679 		asd_clear_nexus_I_T_L(dev, lun);
680 	return res;
681 }
682 
683 int asd_lu_reset(struct domain_device *dev, u8 *lun)
684 {
685 	int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0);
686 
687 	if (res == TMF_RESP_FUNC_COMPLETE)
688 		asd_clear_nexus_I_T_L(dev, lun);
689 	return res;
690 }
691 
692 /**
693  * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus
694  * task: pointer to sas_task struct of interest
695  *
696  * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set,
697  * or TMF_RESP_FUNC_SUCC if the task is in the task set.
698  *
699  * Normally the management layer sets the task to aborted state,
700  * and then calls query task and then abort task.
701  */
702 int asd_query_task(struct sas_task *task)
703 {
704 	struct asd_ascb *ascb = task->lldd_task;
705 	int index;
706 
707 	if (ascb) {
708 		index = ascb->tc_index;
709 		return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN,
710 					    TMF_QUERY_TASK, index);
711 	}
712 	return TMF_RESP_FUNC_COMPLETE;
713 }
714