xref: /linux/drivers/scsi/libsas/sas_scsi_host.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Serial Attached SCSI (SAS) class SCSI Host glue.
4  *
5  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
6  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7  */
8 
9 #include <linux/kthread.h>
10 #include <linux/firmware.h>
11 #include <linux/export.h>
12 #include <linux/ctype.h>
13 #include <linux/hex.h>
14 #include <linux/kernel.h>
15 
16 #include "sas_internal.h"
17 
18 #include <scsi/scsi_host.h>
19 #include <scsi/scsi_device.h>
20 #include <scsi/scsi_tcq.h>
21 #include <scsi/scsi.h>
22 #include <scsi/scsi_eh.h>
23 #include <scsi/scsi_transport.h>
24 #include <scsi/scsi_transport_sas.h>
25 #include <scsi/sas_ata.h>
26 #include "scsi_sas_internal.h"
27 #include "scsi_transport_api.h"
28 #include "scsi_priv.h"
29 
30 #include <linux/err.h>
31 #include <linux/blkdev.h>
32 #include <linux/freezer.h>
33 #include <linux/gfp.h>
34 #include <linux/scatterlist.h>
35 #include <linux/libata.h>
36 
37 /* record final status and free the task */
38 static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
39 {
40 	struct task_status_struct *ts = &task->task_status;
41 	enum scsi_host_status hs = DID_OK;
42 	enum exec_status stat = SAS_SAM_STAT_GOOD;
43 
44 	if (ts->resp == SAS_TASK_UNDELIVERED) {
45 		/* transport error */
46 		hs = DID_NO_CONNECT;
47 	} else { /* ts->resp == SAS_TASK_COMPLETE */
48 		/* task delivered, what happened afterwards? */
49 		switch (ts->stat) {
50 		case SAS_DEV_NO_RESPONSE:
51 		case SAS_INTERRUPTED:
52 		case SAS_PHY_DOWN:
53 		case SAS_NAK_R_ERR:
54 		case SAS_OPEN_TO:
55 			hs = DID_NO_CONNECT;
56 			break;
57 		case SAS_DATA_UNDERRUN:
58 			scsi_set_resid(sc, ts->residual);
59 			if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow)
60 				hs = DID_ERROR;
61 			break;
62 		case SAS_DATA_OVERRUN:
63 			hs = DID_ERROR;
64 			break;
65 		case SAS_QUEUE_FULL:
66 			hs = DID_SOFT_ERROR; /* retry */
67 			break;
68 		case SAS_DEVICE_UNKNOWN:
69 			hs = DID_BAD_TARGET;
70 			break;
71 		case SAS_OPEN_REJECT:
72 			if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
73 				hs = DID_SOFT_ERROR; /* retry */
74 			else
75 				hs = DID_ERROR;
76 			break;
77 		case SAS_PROTO_RESPONSE:
78 			pr_notice("LLDD:%s sent SAS_PROTO_RESP for an SSP task; please report this\n",
79 				  task->dev->port->ha->sas_ha_name);
80 			break;
81 		case SAS_ABORTED_TASK:
82 			hs = DID_ABORT;
83 			break;
84 		case SAS_SAM_STAT_CHECK_CONDITION:
85 			memcpy(sc->sense_buffer, ts->buf,
86 			       min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
87 			stat = SAS_SAM_STAT_CHECK_CONDITION;
88 			break;
89 		default:
90 			stat = ts->stat;
91 			break;
92 		}
93 	}
94 
95 	sc->result = (hs << 16) | stat;
96 	ASSIGN_SAS_TASK(sc, NULL);
97 	sas_free_task(task);
98 }
99 
100 static void sas_scsi_task_done(struct sas_task *task)
101 {
102 	struct scsi_cmnd *sc = task->uldd_task;
103 	struct domain_device *dev = task->dev;
104 	struct sas_ha_struct *ha = dev->port->ha;
105 	unsigned long flags;
106 
107 	spin_lock_irqsave(&dev->done_lock, flags);
108 	if (test_bit(SAS_HA_FROZEN, &ha->state))
109 		task = NULL;
110 	else
111 		ASSIGN_SAS_TASK(sc, NULL);
112 	spin_unlock_irqrestore(&dev->done_lock, flags);
113 
114 	if (unlikely(!task)) {
115 		/* task will be completed by the error handler */
116 		pr_debug("task done but aborted\n");
117 		return;
118 	}
119 
120 	if (unlikely(!sc)) {
121 		pr_debug("task_done called with non existing SCSI cmnd!\n");
122 		sas_free_task(task);
123 		return;
124 	}
125 
126 	sas_end_task(sc, task);
127 	scsi_done(sc);
128 }
129 
130 static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
131 					       struct domain_device *dev,
132 					       gfp_t gfp_flags)
133 {
134 	struct sas_task *task = sas_alloc_task(gfp_flags);
135 	struct scsi_lun lun;
136 
137 	if (!task)
138 		return NULL;
139 
140 	task->uldd_task = cmd;
141 	ASSIGN_SAS_TASK(cmd, task);
142 
143 	task->dev = dev;
144 	task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
145 
146 	int_to_scsilun(cmd->device->lun, &lun);
147 	memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
148 	task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
149 	task->ssp_task.cmd = cmd;
150 
151 	task->scatter = scsi_sglist(cmd);
152 	task->num_scatter = scsi_sg_count(cmd);
153 	task->total_xfer_len = scsi_bufflen(cmd);
154 	task->data_dir = cmd->sc_data_direction;
155 
156 	task->task_done = sas_scsi_task_done;
157 
158 	return task;
159 }
160 
161 enum scsi_qc_status sas_queuecommand(struct Scsi_Host *host,
162 				     struct scsi_cmnd *cmd)
163 {
164 	struct sas_internal *i = to_sas_internal(host->transportt);
165 	struct domain_device *dev = cmd_to_domain_dev(cmd);
166 	struct sas_task *task;
167 	int res = 0;
168 
169 	/* If the device fell off, no sense in issuing commands */
170 	if (test_bit(SAS_DEV_GONE, &dev->state)) {
171 		cmd->result = DID_BAD_TARGET << 16;
172 		goto out_done;
173 	}
174 
175 	if (dev_is_sata(dev)) {
176 		spin_lock_irq(dev->sata_dev.ap->lock);
177 		res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
178 		spin_unlock_irq(dev->sata_dev.ap->lock);
179 		return res;
180 	}
181 
182 	task = sas_create_task(cmd, dev, GFP_ATOMIC);
183 	if (!task)
184 		return SCSI_MLQUEUE_HOST_BUSY;
185 
186 	res = i->dft->lldd_execute_task(task, GFP_ATOMIC);
187 	if (res)
188 		goto out_free_task;
189 	return 0;
190 
191 out_free_task:
192 	pr_debug("lldd_execute_task returned: %d\n", res);
193 	ASSIGN_SAS_TASK(cmd, NULL);
194 	sas_free_task(task);
195 	if (res == -SAS_QUEUE_FULL)
196 		cmd->result = DID_SOFT_ERROR << 16; /* retry */
197 	else
198 		cmd->result = DID_ERROR << 16;
199 out_done:
200 	scsi_done(cmd);
201 	return 0;
202 }
203 EXPORT_SYMBOL_GPL(sas_queuecommand);
204 
205 static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
206 {
207 	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
208 	struct domain_device *dev = cmd_to_domain_dev(cmd);
209 	struct sas_task *task = TO_SAS_TASK(cmd);
210 
211 	/* At this point, we only get called following an actual abort
212 	 * of the task, so we should be guaranteed not to be racing with
213 	 * any completions from the LLD.  Task is freed after this.
214 	 */
215 	sas_end_task(cmd, task);
216 
217 	if (dev_is_sata(dev)) {
218 		/* defer commands to libata so that libata EH can
219 		 * handle ata qcs correctly
220 		 */
221 		list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q);
222 		return;
223 	}
224 
225 	/* now finish the command and move it on to the error
226 	 * handler done list, this also takes it off the
227 	 * error handler pending list.
228 	 */
229 	scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
230 }
231 
232 static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
233 {
234 	struct scsi_cmnd *cmd, *n;
235 
236 	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
237 		if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
238 		    cmd->device->lun == my_cmd->device->lun)
239 			sas_eh_finish_cmd(cmd);
240 	}
241 }
242 
243 static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
244 				     struct domain_device *dev)
245 {
246 	struct scsi_cmnd *cmd, *n;
247 
248 	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
249 		struct domain_device *x = cmd_to_domain_dev(cmd);
250 
251 		if (x == dev)
252 			sas_eh_finish_cmd(cmd);
253 	}
254 }
255 
256 static void sas_scsi_clear_queue_port(struct list_head *error_q,
257 				      struct asd_sas_port *port)
258 {
259 	struct scsi_cmnd *cmd, *n;
260 
261 	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
262 		struct domain_device *dev = cmd_to_domain_dev(cmd);
263 		struct asd_sas_port *x = dev->port;
264 
265 		if (x == port)
266 			sas_eh_finish_cmd(cmd);
267 	}
268 }
269 
270 enum task_disposition {
271 	TASK_IS_DONE,
272 	TASK_IS_ABORTED,
273 	TASK_IS_AT_LU,
274 	TASK_IS_NOT_AT_LU,
275 	TASK_ABORT_FAILED,
276 };
277 
278 static enum task_disposition sas_scsi_find_task(struct sas_task *task)
279 {
280 	unsigned long flags;
281 	int i, res;
282 	struct sas_internal *si =
283 		to_sas_internal(task->dev->port->ha->shost->transportt);
284 
285 	for (i = 0; i < 5; i++) {
286 		pr_notice("%s: aborting task 0x%p\n", __func__, task);
287 		res = si->dft->lldd_abort_task(task);
288 
289 		spin_lock_irqsave(&task->task_state_lock, flags);
290 		if (task->task_state_flags & SAS_TASK_STATE_DONE) {
291 			spin_unlock_irqrestore(&task->task_state_lock, flags);
292 			pr_debug("%s: task 0x%p is done\n", __func__, task);
293 			return TASK_IS_DONE;
294 		}
295 		spin_unlock_irqrestore(&task->task_state_lock, flags);
296 
297 		if (res == TMF_RESP_FUNC_COMPLETE) {
298 			pr_notice("%s: task 0x%p is aborted\n",
299 				  __func__, task);
300 			return TASK_IS_ABORTED;
301 		} else if (si->dft->lldd_query_task) {
302 			pr_notice("%s: querying task 0x%p\n", __func__, task);
303 			res = si->dft->lldd_query_task(task);
304 			switch (res) {
305 			case TMF_RESP_FUNC_SUCC:
306 				pr_notice("%s: task 0x%p at LU\n", __func__,
307 					  task);
308 				return TASK_IS_AT_LU;
309 			case TMF_RESP_FUNC_COMPLETE:
310 				pr_notice("%s: task 0x%p not at LU\n",
311 					  __func__, task);
312 				return TASK_IS_NOT_AT_LU;
313 			case TMF_RESP_FUNC_FAILED:
314 				pr_notice("%s: task 0x%p failed to abort\n",
315 					  __func__, task);
316 				return TASK_ABORT_FAILED;
317 			default:
318 				pr_notice("%s: task 0x%p result code %d not handled\n",
319 					  __func__, task, res);
320 			}
321 		}
322 	}
323 	return TASK_ABORT_FAILED;
324 }
325 
326 static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
327 {
328 	int res = TMF_RESP_FUNC_FAILED;
329 	struct scsi_lun lun;
330 	struct sas_internal *i =
331 		to_sas_internal(dev->port->ha->shost->transportt);
332 
333 	int_to_scsilun(cmd->device->lun, &lun);
334 
335 	pr_notice("eh: device %016llx LUN 0x%llx has the task\n",
336 		  SAS_ADDR(dev->sas_addr),
337 		  cmd->device->lun);
338 
339 	if (i->dft->lldd_abort_task_set)
340 		res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
341 
342 	if (res == TMF_RESP_FUNC_FAILED) {
343 		if (i->dft->lldd_clear_task_set)
344 			res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun);
345 	}
346 
347 	if (res == TMF_RESP_FUNC_FAILED) {
348 		if (i->dft->lldd_lu_reset)
349 			res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
350 	}
351 
352 	return res;
353 }
354 
355 static int sas_recover_I_T(struct domain_device *dev)
356 {
357 	int res = TMF_RESP_FUNC_FAILED;
358 	struct sas_internal *i =
359 		to_sas_internal(dev->port->ha->shost->transportt);
360 
361 	pr_notice("I_T nexus reset for dev %016llx\n",
362 		  SAS_ADDR(dev->sas_addr));
363 
364 	if (i->dft->lldd_I_T_nexus_reset)
365 		res = i->dft->lldd_I_T_nexus_reset(dev);
366 
367 	return res;
368 }
369 
370 /* take a reference on the last known good phy for this device */
371 struct sas_phy *sas_get_local_phy(struct domain_device *dev)
372 {
373 	struct sas_ha_struct *ha = dev->port->ha;
374 	struct sas_phy *phy;
375 	unsigned long flags;
376 
377 	/* a published domain device always has a valid phy, it may be
378 	 * stale, but it is never NULL
379 	 */
380 	BUG_ON(!dev->phy);
381 
382 	spin_lock_irqsave(&ha->phy_port_lock, flags);
383 	phy = dev->phy;
384 	get_device(&phy->dev);
385 	spin_unlock_irqrestore(&ha->phy_port_lock, flags);
386 
387 	return phy;
388 }
389 EXPORT_SYMBOL_GPL(sas_get_local_phy);
390 
391 static int sas_queue_reset(struct domain_device *dev, int reset_type, u64 lun)
392 {
393 	struct sas_ha_struct *ha = dev->port->ha;
394 	int scheduled = 0, tries = 100;
395 
396 	/* ata: promote lun reset to bus reset */
397 	if (dev_is_sata(dev)) {
398 		sas_ata_schedule_reset(dev);
399 		return SUCCESS;
400 	}
401 
402 	while (!scheduled && tries--) {
403 		spin_lock_irq(&ha->lock);
404 		if (!test_bit(SAS_DEV_EH_PENDING, &dev->state) &&
405 		    !test_bit(reset_type, &dev->state)) {
406 			scheduled = 1;
407 			ha->eh_active++;
408 			list_add_tail(&dev->ssp_dev.eh_list_node, &ha->eh_dev_q);
409 			set_bit(SAS_DEV_EH_PENDING, &dev->state);
410 			set_bit(reset_type, &dev->state);
411 			int_to_scsilun(lun, &dev->ssp_dev.reset_lun);
412 			scsi_schedule_eh(ha->shost);
413 		}
414 		spin_unlock_irq(&ha->lock);
415 
416 		if (scheduled)
417 			return SUCCESS;
418 	}
419 
420 	pr_warn("%s reset of %s failed\n",
421 		reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus",
422 		dev_name(&dev->rphy->dev));
423 
424 	return FAILED;
425 }
426 
427 int sas_eh_abort_handler(struct scsi_cmnd *cmd)
428 {
429 	int res = TMF_RESP_FUNC_FAILED;
430 	struct sas_task *task = TO_SAS_TASK(cmd);
431 	struct Scsi_Host *host = cmd->device->host;
432 	struct domain_device *dev = cmd_to_domain_dev(cmd);
433 	struct sas_internal *i = to_sas_internal(host->transportt);
434 	unsigned long flags;
435 
436 	if (!i->dft->lldd_abort_task)
437 		return FAILED;
438 
439 	spin_lock_irqsave(host->host_lock, flags);
440 	/* We cannot do async aborts for SATA devices */
441 	if (dev_is_sata(dev) && !host->host_eh_scheduled) {
442 		spin_unlock_irqrestore(host->host_lock, flags);
443 		return FAILED;
444 	}
445 	spin_unlock_irqrestore(host->host_lock, flags);
446 
447 	if (task)
448 		res = i->dft->lldd_abort_task(task);
449 	else
450 		pr_notice("no task to abort\n");
451 	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
452 		return SUCCESS;
453 
454 	return FAILED;
455 }
456 EXPORT_SYMBOL_GPL(sas_eh_abort_handler);
457 
458 /* Attempt to send a LUN reset message to a device */
459 int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
460 {
461 	int res;
462 	struct scsi_lun lun;
463 	struct Scsi_Host *host = cmd->device->host;
464 	struct domain_device *dev = cmd_to_domain_dev(cmd);
465 	struct sas_internal *i = to_sas_internal(host->transportt);
466 
467 	if (current != host->ehandler)
468 		return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun);
469 
470 	int_to_scsilun(cmd->device->lun, &lun);
471 
472 	if (!i->dft->lldd_lu_reset)
473 		return FAILED;
474 
475 	res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
476 	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
477 		return SUCCESS;
478 
479 	return FAILED;
480 }
481 EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
482 
483 int sas_eh_target_reset_handler(struct scsi_cmnd *cmd)
484 {
485 	int res;
486 	struct Scsi_Host *host = cmd->device->host;
487 	struct domain_device *dev = cmd_to_domain_dev(cmd);
488 	struct sas_internal *i = to_sas_internal(host->transportt);
489 
490 	if (current != host->ehandler)
491 		return sas_queue_reset(dev, SAS_DEV_RESET, 0);
492 
493 	if (!i->dft->lldd_I_T_nexus_reset)
494 		return FAILED;
495 
496 	res = i->dft->lldd_I_T_nexus_reset(dev);
497 	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE ||
498 	    res == -ENODEV)
499 		return SUCCESS;
500 
501 	return FAILED;
502 }
503 EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler);
504 
505 /* Try to reset a device */
506 static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
507 {
508 	int res;
509 	struct Scsi_Host *shost = cmd->device->host;
510 
511 	if (!shost->hostt->eh_device_reset_handler)
512 		goto try_target_reset;
513 
514 	res = shost->hostt->eh_device_reset_handler(cmd);
515 	if (res == SUCCESS)
516 		return res;
517 
518 try_target_reset:
519 	if (shost->hostt->eh_target_reset_handler)
520 		return shost->hostt->eh_target_reset_handler(cmd);
521 
522 	return FAILED;
523 }
524 
525 static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q)
526 {
527 	struct scsi_cmnd *cmd, *n;
528 	enum task_disposition res = TASK_IS_DONE;
529 	int tmf_resp, need_reset;
530 	struct sas_internal *i = to_sas_internal(shost->transportt);
531 	unsigned long flags;
532 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
533 	LIST_HEAD(done);
534 
535 	/* clean out any commands that won the completion vs eh race */
536 	list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
537 		struct domain_device *dev = cmd_to_domain_dev(cmd);
538 		struct sas_task *task;
539 
540 		spin_lock_irqsave(&dev->done_lock, flags);
541 		/* by this point the lldd has either observed
542 		 * SAS_HA_FROZEN and is leaving the task alone, or has
543 		 * won the race with eh and decided to complete it
544 		 */
545 		task = TO_SAS_TASK(cmd);
546 		spin_unlock_irqrestore(&dev->done_lock, flags);
547 
548 		if (!task)
549 			list_move_tail(&cmd->eh_entry, &done);
550 	}
551 
552  Again:
553 	list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
554 		struct sas_task *task = TO_SAS_TASK(cmd);
555 
556 		list_del_init(&cmd->eh_entry);
557 
558 		spin_lock_irqsave(&task->task_state_lock, flags);
559 		need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
560 		spin_unlock_irqrestore(&task->task_state_lock, flags);
561 
562 		if (need_reset) {
563 			pr_notice("%s: task 0x%p requests reset\n",
564 				  __func__, task);
565 			goto reset;
566 		}
567 
568 		pr_debug("trying to find task 0x%p\n", task);
569 		res = sas_scsi_find_task(task);
570 
571 		switch (res) {
572 		case TASK_IS_DONE:
573 			pr_notice("%s: task 0x%p is done\n", __func__,
574 				    task);
575 			sas_eh_finish_cmd(cmd);
576 			continue;
577 		case TASK_IS_ABORTED:
578 			pr_notice("%s: task 0x%p is aborted\n",
579 				  __func__, task);
580 			sas_eh_finish_cmd(cmd);
581 			continue;
582 		case TASK_IS_AT_LU:
583 			pr_info("task 0x%p is at LU: lu recover\n", task);
584  reset:
585 			tmf_resp = sas_recover_lu(task->dev, cmd);
586 			if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
587 				pr_notice("dev %016llx LU 0x%llx is recovered\n",
588 					  SAS_ADDR(task->dev),
589 					  cmd->device->lun);
590 				sas_eh_finish_cmd(cmd);
591 				sas_scsi_clear_queue_lu(work_q, cmd);
592 				goto Again;
593 			}
594 			fallthrough;
595 		case TASK_IS_NOT_AT_LU:
596 		case TASK_ABORT_FAILED:
597 			pr_notice("task 0x%p is not at LU: I_T recover\n",
598 				  task);
599 			tmf_resp = sas_recover_I_T(task->dev);
600 			if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
601 			    tmf_resp == -ENODEV) {
602 				struct domain_device *dev = task->dev;
603 				pr_notice("I_T %016llx recovered\n",
604 					  SAS_ADDR(task->dev->sas_addr));
605 				sas_eh_finish_cmd(cmd);
606 				sas_scsi_clear_queue_I_T(work_q, dev);
607 				goto Again;
608 			}
609 			/* Hammer time :-) */
610 			try_to_reset_cmd_device(cmd);
611 			if (i->dft->lldd_clear_nexus_port) {
612 				struct asd_sas_port *port = task->dev->port;
613 				pr_debug("clearing nexus for port:%d\n",
614 					  port->id);
615 				res = i->dft->lldd_clear_nexus_port(port);
616 				if (res == TMF_RESP_FUNC_COMPLETE) {
617 					pr_notice("clear nexus port:%d succeeded\n",
618 						  port->id);
619 					sas_eh_finish_cmd(cmd);
620 					sas_scsi_clear_queue_port(work_q,
621 								  port);
622 					goto Again;
623 				}
624 			}
625 			if (i->dft->lldd_clear_nexus_ha) {
626 				pr_debug("clear nexus ha\n");
627 				res = i->dft->lldd_clear_nexus_ha(ha);
628 				if (res == TMF_RESP_FUNC_COMPLETE) {
629 					pr_notice("clear nexus ha succeeded\n");
630 					sas_eh_finish_cmd(cmd);
631 					goto clear_q;
632 				}
633 			}
634 			/* If we are here -- this means that no amount
635 			 * of effort could recover from errors.  Quite
636 			 * possibly the HA just disappeared.
637 			 */
638 			pr_err("error from device %016llx, LUN 0x%llx couldn't be recovered in any way\n",
639 			       SAS_ADDR(task->dev->sas_addr),
640 			       cmd->device->lun);
641 
642 			sas_eh_finish_cmd(cmd);
643 			goto clear_q;
644 		}
645 	}
646  out:
647 	list_splice_tail(&done, work_q);
648 	list_splice_tail_init(&ha->eh_ata_q, work_q);
649 	return;
650 
651  clear_q:
652 	pr_debug("--- Exit %s -- clear_q\n", __func__);
653 	list_for_each_entry_safe(cmd, n, work_q, eh_entry)
654 		sas_eh_finish_cmd(cmd);
655 	goto out;
656 }
657 
658 static void sas_eh_handle_resets(struct Scsi_Host *shost)
659 {
660 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
661 	struct sas_internal *i = to_sas_internal(shost->transportt);
662 
663 	/* handle directed resets to sas devices */
664 	spin_lock_irq(&ha->lock);
665 	while (!list_empty(&ha->eh_dev_q)) {
666 		struct domain_device *dev;
667 		struct ssp_device *ssp;
668 
669 		ssp = list_entry(ha->eh_dev_q.next, typeof(*ssp), eh_list_node);
670 		list_del_init(&ssp->eh_list_node);
671 		dev = container_of(ssp, typeof(*dev), ssp_dev);
672 		kref_get(&dev->kref);
673 		WARN_ONCE(dev_is_sata(dev), "ssp reset to ata device?\n");
674 
675 		spin_unlock_irq(&ha->lock);
676 
677 		if (test_and_clear_bit(SAS_DEV_LU_RESET, &dev->state))
678 			i->dft->lldd_lu_reset(dev, ssp->reset_lun.scsi_lun);
679 
680 		if (test_and_clear_bit(SAS_DEV_RESET, &dev->state))
681 			i->dft->lldd_I_T_nexus_reset(dev);
682 
683 		sas_put_device(dev);
684 		spin_lock_irq(&ha->lock);
685 		clear_bit(SAS_DEV_EH_PENDING, &dev->state);
686 		ha->eh_active--;
687 	}
688 	spin_unlock_irq(&ha->lock);
689 }
690 
691 
692 void sas_scsi_recover_host(struct Scsi_Host *shost)
693 {
694 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
695 	LIST_HEAD(eh_work_q);
696 	int tries = 0;
697 	bool retry;
698 
699 retry:
700 	tries++;
701 	retry = true;
702 	spin_lock_irq(shost->host_lock);
703 	list_splice_init(&shost->eh_cmd_q, &eh_work_q);
704 	spin_unlock_irq(shost->host_lock);
705 
706 	pr_notice("Enter %s busy: %d failed: %d\n",
707 		  __func__, scsi_host_busy(shost), shost->host_failed);
708 	/*
709 	 * Deal with commands that still have SAS tasks (i.e. they didn't
710 	 * complete via the normal sas_task completion mechanism),
711 	 * SAS_HA_FROZEN gives eh dominion over all sas_task completion.
712 	 */
713 	set_bit(SAS_HA_FROZEN, &ha->state);
714 	sas_eh_handle_sas_errors(shost, &eh_work_q);
715 	clear_bit(SAS_HA_FROZEN, &ha->state);
716 	if (list_empty(&eh_work_q))
717 		goto out;
718 
719 	/*
720 	 * Now deal with SCSI commands that completed ok but have a an error
721 	 * code (and hopefully sense data) attached.  This is roughly what
722 	 * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
723 	 * command we see here has no sas_task and is thus unknown to the HA.
724 	 */
725 	sas_ata_eh(shost, &eh_work_q);
726 	if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
727 		scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
728 
729 out:
730 	sas_eh_handle_resets(shost);
731 
732 	/* now link into libata eh --- if we have any ata devices */
733 	sas_ata_strategy_handler(shost);
734 
735 	scsi_eh_flush_done_q(&ha->eh_done_q);
736 
737 	/* check if any new eh work was scheduled during the last run */
738 	spin_lock_irq(&ha->lock);
739 	if (ha->eh_active == 0) {
740 		shost->host_eh_scheduled = 0;
741 		retry = false;
742 	}
743 	spin_unlock_irq(&ha->lock);
744 
745 	if (retry)
746 		goto retry;
747 
748 	pr_notice("--- Exit %s: busy: %d failed: %d tries: %d\n",
749 		  __func__, scsi_host_busy(shost),
750 		  shost->host_failed, tries);
751 }
752 
753 int sas_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
754 {
755 	struct domain_device *dev = sdev_to_domain_dev(sdev);
756 
757 	if (dev_is_sata(dev))
758 		return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg);
759 
760 	return -EINVAL;
761 }
762 EXPORT_SYMBOL_GPL(sas_ioctl);
763 
764 struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
765 {
766 	struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
767 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
768 	struct domain_device *found_dev = NULL;
769 	int i;
770 	unsigned long flags;
771 
772 	spin_lock_irqsave(&ha->phy_port_lock, flags);
773 	for (i = 0; i < ha->num_phys; i++) {
774 		struct asd_sas_port *port = ha->sas_port[i];
775 		struct domain_device *dev;
776 
777 		spin_lock(&port->dev_list_lock);
778 		list_for_each_entry(dev, &port->dev_list, dev_list_node) {
779 			if (rphy == dev->rphy) {
780 				found_dev = dev;
781 				spin_unlock(&port->dev_list_lock);
782 				goto found;
783 			}
784 		}
785 		spin_unlock(&port->dev_list_lock);
786 	}
787  found:
788 	spin_unlock_irqrestore(&ha->phy_port_lock, flags);
789 
790 	return found_dev;
791 }
792 
793 int sas_target_alloc(struct scsi_target *starget)
794 {
795 	struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
796 	struct domain_device *found_dev = sas_find_dev_by_rphy(rphy);
797 
798 	if (!found_dev)
799 		return -ENODEV;
800 
801 	kref_get(&found_dev->kref);
802 	starget->hostdata = found_dev;
803 	return 0;
804 }
805 EXPORT_SYMBOL_GPL(sas_target_alloc);
806 
807 #define SAS_DEF_QD 256
808 
809 int sas_sdev_configure(struct scsi_device *scsi_dev, struct queue_limits *lim)
810 {
811 	struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
812 
813 	BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
814 
815 	if (dev_is_sata(dev)) {
816 		ata_sas_sdev_configure(scsi_dev, lim, dev->sata_dev.ap);
817 		return 0;
818 	}
819 
820 	sas_read_port_mode_page(scsi_dev);
821 
822 	if (scsi_dev->tagged_supported) {
823 		scsi_change_queue_depth(scsi_dev, SAS_DEF_QD);
824 	} else {
825 		pr_notice("device %016llx, LUN 0x%llx doesn't support TCQ\n",
826 			  SAS_ADDR(dev->sas_addr), scsi_dev->lun);
827 		scsi_change_queue_depth(scsi_dev, 1);
828 	}
829 
830 	scsi_dev->allow_restart = 1;
831 
832 	return 0;
833 }
834 EXPORT_SYMBOL_GPL(sas_sdev_configure);
835 
836 int sas_change_queue_depth(struct scsi_device *sdev, int depth)
837 {
838 	struct domain_device *dev = sdev_to_domain_dev(sdev);
839 
840 	if (dev_is_sata(dev))
841 		return ata_change_queue_depth(dev->sata_dev.ap, sdev, depth);
842 
843 	if (!sdev->tagged_supported)
844 		depth = 1;
845 	return scsi_change_queue_depth(sdev, depth);
846 }
847 EXPORT_SYMBOL_GPL(sas_change_queue_depth);
848 
849 int sas_bios_param(struct scsi_device *scsi_dev,
850 			  struct gendisk *unused,
851 			  sector_t capacity, int *hsc)
852 {
853 	hsc[0] = 255;
854 	hsc[1] = 63;
855 	sector_div(capacity, 255*63);
856 	hsc[2] = capacity;
857 
858 	return 0;
859 }
860 EXPORT_SYMBOL_GPL(sas_bios_param);
861 
862 void sas_task_internal_done(struct sas_task *task)
863 {
864 	timer_delete(&task->slow_task->timer);
865 	complete(&task->slow_task->completion);
866 }
867 
868 void sas_task_internal_timedout(struct timer_list *t)
869 {
870 	struct sas_task_slow *slow = timer_container_of(slow, t, timer);
871 	struct sas_task *task = slow->task;
872 	bool is_completed = true;
873 	unsigned long flags;
874 
875 	spin_lock_irqsave(&task->task_state_lock, flags);
876 	if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
877 		task->task_state_flags |= SAS_TASK_STATE_ABORTED;
878 		is_completed = false;
879 	}
880 	spin_unlock_irqrestore(&task->task_state_lock, flags);
881 
882 	if (!is_completed)
883 		complete(&task->slow_task->completion);
884 }
885 
886 #define TASK_TIMEOUT			(20 * HZ)
887 #define TASK_RETRY			3
888 
889 static int sas_execute_internal_abort(struct domain_device *device,
890 				      enum sas_internal_abort type, u16 tag,
891 				      unsigned int qid, void *data)
892 {
893 	struct sas_ha_struct *ha = device->port->ha;
894 	struct sas_internal *i = to_sas_internal(ha->shost->transportt);
895 	struct sas_task *task = NULL;
896 	int res, retry;
897 
898 	for (retry = 0; retry < TASK_RETRY; retry++) {
899 		task = sas_alloc_slow_task(GFP_KERNEL);
900 		if (!task)
901 			return -ENOMEM;
902 
903 		task->dev = device;
904 		task->task_proto = SAS_PROTOCOL_INTERNAL_ABORT;
905 		task->task_done = sas_task_internal_done;
906 		task->slow_task->timer.function = sas_task_internal_timedout;
907 		task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
908 		add_timer(&task->slow_task->timer);
909 
910 		task->abort_task.tag = tag;
911 		task->abort_task.type = type;
912 		task->abort_task.qid = qid;
913 
914 		res = i->dft->lldd_execute_task(task, GFP_KERNEL);
915 		if (res) {
916 			timer_delete_sync(&task->slow_task->timer);
917 			pr_err("Executing internal abort failed %016llx (%d)\n",
918 			       SAS_ADDR(device->sas_addr), res);
919 			break;
920 		}
921 
922 		wait_for_completion(&task->slow_task->completion);
923 		res = TMF_RESP_FUNC_FAILED;
924 
925 		/* Even if the internal abort timed out, return direct. */
926 		if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
927 			bool quit = true;
928 
929 			if (i->dft->lldd_abort_timeout)
930 				quit = i->dft->lldd_abort_timeout(task, data);
931 			else
932 				pr_err("Internal abort: timeout %016llx\n",
933 				       SAS_ADDR(device->sas_addr));
934 			res = -EIO;
935 			if (quit)
936 				break;
937 		}
938 
939 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
940 			task->task_status.stat == SAS_SAM_STAT_GOOD) {
941 			res = TMF_RESP_FUNC_COMPLETE;
942 			break;
943 		}
944 
945 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
946 			task->task_status.stat == TMF_RESP_FUNC_SUCC) {
947 			res = TMF_RESP_FUNC_SUCC;
948 			break;
949 		}
950 
951 		pr_err("Internal abort: task to dev %016llx response: 0x%x status 0x%x\n",
952 		       SAS_ADDR(device->sas_addr), task->task_status.resp,
953 		       task->task_status.stat);
954 		sas_free_task(task);
955 		task = NULL;
956 	}
957 	BUG_ON(retry == TASK_RETRY && task != NULL);
958 	sas_free_task(task);
959 	return res;
960 }
961 
962 int sas_execute_internal_abort_single(struct domain_device *device, u16 tag,
963 				      unsigned int qid, void *data)
964 {
965 	return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_SINGLE,
966 					  tag, qid, data);
967 }
968 EXPORT_SYMBOL_GPL(sas_execute_internal_abort_single);
969 
970 int sas_execute_internal_abort_dev(struct domain_device *device,
971 				   unsigned int qid, void *data)
972 {
973 	return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_DEV,
974 					  SCSI_NO_TAG, qid, data);
975 }
976 EXPORT_SYMBOL_GPL(sas_execute_internal_abort_dev);
977 
978 int sas_execute_tmf(struct domain_device *device, void *parameter,
979 		    int para_len, int force_phy_id,
980 		    struct sas_tmf_task *tmf)
981 {
982 	struct sas_task *task;
983 	struct sas_internal *i =
984 		to_sas_internal(device->port->ha->shost->transportt);
985 	int res, retry;
986 
987 	for (retry = 0; retry < TASK_RETRY; retry++) {
988 		task = sas_alloc_slow_task(GFP_KERNEL);
989 		if (!task)
990 			return -ENOMEM;
991 
992 		task->dev = device;
993 		task->task_proto = device->tproto;
994 
995 		if (dev_is_sata(device)) {
996 			task->ata_task.device_control_reg_update = 1;
997 			if (force_phy_id >= 0) {
998 				task->ata_task.force_phy = true;
999 				task->ata_task.force_phy_id = force_phy_id;
1000 			}
1001 			memcpy(&task->ata_task.fis, parameter, para_len);
1002 		} else {
1003 			memcpy(&task->ssp_task, parameter, para_len);
1004 		}
1005 
1006 		task->task_done = sas_task_internal_done;
1007 		task->tmf = tmf;
1008 
1009 		task->slow_task->timer.function = sas_task_internal_timedout;
1010 		task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
1011 		add_timer(&task->slow_task->timer);
1012 
1013 		res = i->dft->lldd_execute_task(task, GFP_KERNEL);
1014 		if (res) {
1015 			timer_delete_sync(&task->slow_task->timer);
1016 			pr_err("executing TMF task failed %016llx (%d)\n",
1017 			       SAS_ADDR(device->sas_addr), res);
1018 			break;
1019 		}
1020 
1021 		wait_for_completion(&task->slow_task->completion);
1022 
1023 		if (i->dft->lldd_tmf_exec_complete)
1024 			i->dft->lldd_tmf_exec_complete(device);
1025 
1026 		res = TMF_RESP_FUNC_FAILED;
1027 
1028 		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1029 			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1030 				pr_err("TMF task timeout for %016llx and not done\n",
1031 				       SAS_ADDR(device->sas_addr));
1032 				if (i->dft->lldd_tmf_aborted)
1033 					i->dft->lldd_tmf_aborted(task);
1034 				break;
1035 			}
1036 			pr_warn("TMF task timeout for %016llx and done\n",
1037 				SAS_ADDR(device->sas_addr));
1038 		}
1039 
1040 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1041 		    task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1042 			res = TMF_RESP_FUNC_COMPLETE;
1043 			break;
1044 		}
1045 
1046 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1047 		    task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1048 			res = TMF_RESP_FUNC_SUCC;
1049 			break;
1050 		}
1051 
1052 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1053 		    task->task_status.stat == SAS_DATA_UNDERRUN) {
1054 			/* no error, but return the number of bytes of
1055 			 * underrun
1056 			 */
1057 			pr_warn("TMF task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
1058 				SAS_ADDR(device->sas_addr),
1059 				task->task_status.resp,
1060 				task->task_status.stat);
1061 			res = task->task_status.residual;
1062 			break;
1063 		}
1064 
1065 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1066 		    task->task_status.stat == SAS_DATA_OVERRUN) {
1067 			pr_warn("TMF task blocked task error %016llx\n",
1068 				SAS_ADDR(device->sas_addr));
1069 			res = -EMSGSIZE;
1070 			break;
1071 		}
1072 
1073 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1074 		    task->task_status.stat == SAS_OPEN_REJECT) {
1075 			pr_warn("TMF task open reject failed  %016llx\n",
1076 				SAS_ADDR(device->sas_addr));
1077 			res = -EIO;
1078 		} else {
1079 			pr_warn("TMF task to dev %016llx resp: 0x%x status 0x%x\n",
1080 				SAS_ADDR(device->sas_addr),
1081 				task->task_status.resp,
1082 				task->task_status.stat);
1083 		}
1084 		sas_free_task(task);
1085 		task = NULL;
1086 	}
1087 
1088 	if (retry == TASK_RETRY)
1089 		pr_warn("executing TMF for %016llx failed after %d attempts!\n",
1090 			SAS_ADDR(device->sas_addr), TASK_RETRY);
1091 	sas_free_task(task);
1092 
1093 	return res;
1094 }
1095 
1096 static int sas_execute_ssp_tmf(struct domain_device *device, u8 *lun,
1097 			       struct sas_tmf_task *tmf)
1098 {
1099 	struct sas_ssp_task ssp_task;
1100 
1101 	if (!(device->tproto & SAS_PROTOCOL_SSP))
1102 		return TMF_RESP_FUNC_ESUPP;
1103 
1104 	memcpy(ssp_task.LUN, lun, 8);
1105 
1106 	return sas_execute_tmf(device, &ssp_task, sizeof(ssp_task), -1, tmf);
1107 }
1108 
1109 int sas_abort_task_set(struct domain_device *dev, u8 *lun)
1110 {
1111 	struct sas_tmf_task tmf_task = {
1112 		.tmf = TMF_ABORT_TASK_SET,
1113 	};
1114 
1115 	return sas_execute_ssp_tmf(dev, lun, &tmf_task);
1116 }
1117 EXPORT_SYMBOL_GPL(sas_abort_task_set);
1118 
1119 int sas_clear_task_set(struct domain_device *dev, u8 *lun)
1120 {
1121 	struct sas_tmf_task tmf_task = {
1122 		.tmf = TMF_CLEAR_TASK_SET,
1123 	};
1124 
1125 	return sas_execute_ssp_tmf(dev, lun, &tmf_task);
1126 }
1127 EXPORT_SYMBOL_GPL(sas_clear_task_set);
1128 
1129 int sas_lu_reset(struct domain_device *dev, u8 *lun)
1130 {
1131 	struct sas_tmf_task tmf_task = {
1132 		.tmf = TMF_LU_RESET,
1133 	};
1134 
1135 	return sas_execute_ssp_tmf(dev, lun, &tmf_task);
1136 }
1137 EXPORT_SYMBOL_GPL(sas_lu_reset);
1138 
1139 int sas_query_task(struct sas_task *task, u16 tag)
1140 {
1141 	struct sas_tmf_task tmf_task = {
1142 		.tmf = TMF_QUERY_TASK,
1143 		.tag_of_task_to_be_managed = tag,
1144 	};
1145 	struct scsi_cmnd *cmnd = task->uldd_task;
1146 	struct domain_device *dev = task->dev;
1147 	struct scsi_lun lun;
1148 
1149 	int_to_scsilun(cmnd->device->lun, &lun);
1150 
1151 	return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1152 }
1153 EXPORT_SYMBOL_GPL(sas_query_task);
1154 
1155 int sas_abort_task(struct sas_task *task, u16 tag)
1156 {
1157 	struct sas_tmf_task tmf_task = {
1158 		.tmf = TMF_ABORT_TASK,
1159 		.tag_of_task_to_be_managed = tag,
1160 	};
1161 	struct scsi_cmnd *cmnd = task->uldd_task;
1162 	struct domain_device *dev = task->dev;
1163 	struct scsi_lun lun;
1164 
1165 	int_to_scsilun(cmnd->device->lun, &lun);
1166 
1167 	return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1168 }
1169 EXPORT_SYMBOL_GPL(sas_abort_task);
1170 
1171 /*
1172  * Tell an upper layer that it needs to initiate an abort for a given task.
1173  * This should only ever be called by an LLDD.
1174  */
1175 void sas_task_abort(struct sas_task *task)
1176 {
1177 	struct scsi_cmnd *sc = task->uldd_task;
1178 
1179 	/* Escape for libsas internal commands */
1180 	if (!sc) {
1181 		struct sas_task_slow *slow = task->slow_task;
1182 
1183 		if (!slow)
1184 			return;
1185 		if (!timer_delete(&slow->timer))
1186 			return;
1187 		slow->timer.function(&slow->timer);
1188 		return;
1189 	}
1190 
1191 	if (dev_is_sata(task->dev))
1192 		sas_ata_task_abort(task);
1193 	else
1194 		blk_abort_request(scsi_cmd_to_rq(sc));
1195 }
1196 EXPORT_SYMBOL_GPL(sas_task_abort);
1197 
1198 int sas_sdev_init(struct scsi_device *sdev)
1199 {
1200 	if (dev_is_sata(sdev_to_domain_dev(sdev)) && sdev->lun)
1201 		return -ENXIO;
1202 
1203 	return 0;
1204 }
1205 EXPORT_SYMBOL_GPL(sas_sdev_init);
1206 
1207 void sas_target_destroy(struct scsi_target *starget)
1208 {
1209 	struct domain_device *found_dev = starget->hostdata;
1210 
1211 	if (!found_dev)
1212 		return;
1213 
1214 	starget->hostdata = NULL;
1215 	sas_put_device(found_dev);
1216 }
1217 EXPORT_SYMBOL_GPL(sas_target_destroy);
1218 
1219 #define SAS_STRING_ADDR_SIZE	16
1220 
1221 int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
1222 {
1223 	int res;
1224 	const struct firmware *fw;
1225 
1226 	res = request_firmware(&fw, "sas_addr", &shost->shost_gendev);
1227 	if (res)
1228 		return res;
1229 
1230 	if (fw->size < SAS_STRING_ADDR_SIZE) {
1231 		res = -ENODEV;
1232 		goto out;
1233 	}
1234 
1235 	res = hex2bin(addr, fw->data, strnlen(fw->data, SAS_ADDR_SIZE * 2) / 2);
1236 	if (res)
1237 		goto out;
1238 
1239 out:
1240 	release_firmware(fw);
1241 	return res;
1242 }
1243 EXPORT_SYMBOL_GPL(sas_request_addr);
1244 
1245