1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55 #include <scsi/sas.h>
56 #include <linux/bitops.h>
57 #include "isci.h"
58 #include "port.h"
59 #include "remote_device.h"
60 #include "request.h"
61 #include "remote_node_context.h"
62 #include "scu_event_codes.h"
63 #include "task.h"
64
65 #undef C
66 #define C(a) (#a)
dev_state_name(enum sci_remote_device_states state)67 const char *dev_state_name(enum sci_remote_device_states state)
68 {
69 static const char * const strings[] = REMOTE_DEV_STATES;
70
71 return strings[state];
72 }
73 #undef C
74
sci_remote_device_suspend(struct isci_remote_device * idev,enum sci_remote_node_suspension_reasons reason)75 enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
76 enum sci_remote_node_suspension_reasons reason)
77 {
78 return sci_remote_node_context_suspend(&idev->rnc, reason,
79 SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
80 }
81
82 /**
83 * isci_remote_device_ready() - This function is called by the ihost when the
84 * remote device is ready. We mark the isci device as ready and signal the
85 * waiting proccess.
86 * @ihost: our valid isci_host
87 * @idev: remote device
88 *
89 */
isci_remote_device_ready(struct isci_host * ihost,struct isci_remote_device * idev)90 static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
91 {
92 dev_dbg(&ihost->pdev->dev,
93 "%s: idev = %p\n", __func__, idev);
94
95 clear_bit(IDEV_IO_NCQERROR, &idev->flags);
96 set_bit(IDEV_IO_READY, &idev->flags);
97 if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
98 wake_up(&ihost->eventq);
99 }
100
sci_remote_device_terminate_req(struct isci_host * ihost,struct isci_remote_device * idev,int check_abort,struct isci_request * ireq)101 static enum sci_status sci_remote_device_terminate_req(
102 struct isci_host *ihost,
103 struct isci_remote_device *idev,
104 int check_abort,
105 struct isci_request *ireq)
106 {
107 if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
108 (ireq->target_device != idev) ||
109 (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags)))
110 return SCI_SUCCESS;
111
112 dev_dbg(&ihost->pdev->dev,
113 "%s: idev=%p; flags=%lx; req=%p; req target=%p\n",
114 __func__, idev, idev->flags, ireq, ireq->target_device);
115
116 set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
117
118 return sci_controller_terminate_request(ihost, idev, ireq);
119 }
120
sci_remote_device_terminate_reqs_checkabort(struct isci_remote_device * idev,int chk)121 static enum sci_status sci_remote_device_terminate_reqs_checkabort(
122 struct isci_remote_device *idev,
123 int chk)
124 {
125 struct isci_host *ihost = idev->owning_port->owning_controller;
126 enum sci_status status = SCI_SUCCESS;
127 u32 i;
128
129 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
130 struct isci_request *ireq = ihost->reqs[i];
131 enum sci_status s;
132
133 s = sci_remote_device_terminate_req(ihost, idev, chk, ireq);
134 if (s != SCI_SUCCESS)
135 status = s;
136 }
137 return status;
138 }
139
isci_compare_suspendcount(struct isci_remote_device * idev,u32 localcount)140 static bool isci_compare_suspendcount(
141 struct isci_remote_device *idev,
142 u32 localcount)
143 {
144 smp_rmb();
145
146 /* Check for a change in the suspend count, or the RNC
147 * being destroyed.
148 */
149 return (localcount != idev->rnc.suspend_count)
150 || sci_remote_node_context_is_being_destroyed(&idev->rnc);
151 }
152
isci_check_reqterm(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq,u32 localcount)153 static bool isci_check_reqterm(
154 struct isci_host *ihost,
155 struct isci_remote_device *idev,
156 struct isci_request *ireq,
157 u32 localcount)
158 {
159 unsigned long flags;
160 bool res;
161
162 spin_lock_irqsave(&ihost->scic_lock, flags);
163 res = isci_compare_suspendcount(idev, localcount)
164 && !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
165 spin_unlock_irqrestore(&ihost->scic_lock, flags);
166
167 return res;
168 }
169
isci_check_devempty(struct isci_host * ihost,struct isci_remote_device * idev,u32 localcount)170 static bool isci_check_devempty(
171 struct isci_host *ihost,
172 struct isci_remote_device *idev,
173 u32 localcount)
174 {
175 unsigned long flags;
176 bool res;
177
178 spin_lock_irqsave(&ihost->scic_lock, flags);
179 res = isci_compare_suspendcount(idev, localcount)
180 && idev->started_request_count == 0;
181 spin_unlock_irqrestore(&ihost->scic_lock, flags);
182
183 return res;
184 }
185
isci_remote_device_terminate_requests(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)186 enum sci_status isci_remote_device_terminate_requests(
187 struct isci_host *ihost,
188 struct isci_remote_device *idev,
189 struct isci_request *ireq)
190 {
191 enum sci_status status = SCI_SUCCESS;
192 unsigned long flags;
193 u32 rnc_suspend_count;
194
195 spin_lock_irqsave(&ihost->scic_lock, flags);
196
197 if (isci_get_device(idev) == NULL) {
198 dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n",
199 __func__, idev);
200 spin_unlock_irqrestore(&ihost->scic_lock, flags);
201 status = SCI_FAILURE;
202 } else {
203 /* If already suspended, don't wait for another suspension. */
204 smp_rmb();
205 rnc_suspend_count
206 = sci_remote_node_context_is_suspended(&idev->rnc)
207 ? 0 : idev->rnc.suspend_count;
208
209 dev_dbg(&ihost->pdev->dev,
210 "%s: idev=%p, ireq=%p; started_request_count=%d, "
211 "rnc_suspend_count=%d, rnc.suspend_count=%d"
212 "about to wait\n",
213 __func__, idev, ireq, idev->started_request_count,
214 rnc_suspend_count, idev->rnc.suspend_count);
215
216 #define MAX_SUSPEND_MSECS 10000
217 if (ireq) {
218 /* Terminate a specific TC. */
219 set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
220 sci_remote_device_terminate_req(ihost, idev, 0, ireq);
221 spin_unlock_irqrestore(&ihost->scic_lock, flags);
222 if (!wait_event_timeout(ihost->eventq,
223 isci_check_reqterm(ihost, idev, ireq,
224 rnc_suspend_count),
225 msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
226
227 dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n",
228 __func__, ihost->id);
229 dev_dbg(&ihost->pdev->dev,
230 "%s: ******* Timeout waiting for "
231 "suspend; idev=%p, current state %s; "
232 "started_request_count=%d, flags=%lx\n\t"
233 "rnc_suspend_count=%d, rnc.suspend_count=%d "
234 "RNC: current state %s, current "
235 "suspend_type %x dest state %d;\n"
236 "ireq=%p, ireq->flags = %lx\n",
237 __func__, idev,
238 dev_state_name(idev->sm.current_state_id),
239 idev->started_request_count, idev->flags,
240 rnc_suspend_count, idev->rnc.suspend_count,
241 rnc_state_name(idev->rnc.sm.current_state_id),
242 idev->rnc.suspend_type,
243 idev->rnc.destination_state,
244 ireq, ireq->flags);
245 }
246 spin_lock_irqsave(&ihost->scic_lock, flags);
247 clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
248 if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
249 isci_free_tag(ihost, ireq->io_tag);
250 spin_unlock_irqrestore(&ihost->scic_lock, flags);
251 } else {
252 /* Terminate all TCs. */
253 sci_remote_device_terminate_requests(idev);
254 spin_unlock_irqrestore(&ihost->scic_lock, flags);
255 if (!wait_event_timeout(ihost->eventq,
256 isci_check_devempty(ihost, idev,
257 rnc_suspend_count),
258 msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
259
260 dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n",
261 __func__, ihost->id);
262 dev_dbg(&ihost->pdev->dev,
263 "%s: ******* Timeout waiting for "
264 "suspend; idev=%p, current state %s; "
265 "started_request_count=%d, flags=%lx\n\t"
266 "rnc_suspend_count=%d, "
267 "RNC: current state %s, "
268 "rnc.suspend_count=%d, current "
269 "suspend_type %x dest state %d\n",
270 __func__, idev,
271 dev_state_name(idev->sm.current_state_id),
272 idev->started_request_count, idev->flags,
273 rnc_suspend_count,
274 rnc_state_name(idev->rnc.sm.current_state_id),
275 idev->rnc.suspend_count,
276 idev->rnc.suspend_type,
277 idev->rnc.destination_state);
278 }
279 }
280 dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n",
281 __func__, idev);
282 isci_put_device(idev);
283 }
284 return status;
285 }
286
287 /**
288 * isci_remote_device_not_ready() - This function is called by the ihost when
289 * the remote device is not ready. We mark the isci device as ready (not
290 * "ready_for_io") and signal the waiting proccess.
291 * @ihost: This parameter specifies the isci host object.
292 * @idev: This parameter specifies the remote device
293 * @reason: Reason to switch on
294 *
295 * sci_lock is held on entrance to this function.
296 */
isci_remote_device_not_ready(struct isci_host * ihost,struct isci_remote_device * idev,u32 reason)297 static void isci_remote_device_not_ready(struct isci_host *ihost,
298 struct isci_remote_device *idev,
299 u32 reason)
300 {
301 dev_dbg(&ihost->pdev->dev,
302 "%s: isci_device = %p; reason = %d\n", __func__, idev, reason);
303
304 switch (reason) {
305 case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
306 set_bit(IDEV_IO_NCQERROR, &idev->flags);
307
308 /* Suspend the remote device so the I/O can be terminated. */
309 sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
310
311 /* Kill all outstanding requests for the device. */
312 sci_remote_device_terminate_requests(idev);
313
314 fallthrough; /* into the default case */
315 default:
316 clear_bit(IDEV_IO_READY, &idev->flags);
317 break;
318 }
319 }
320
321 /* called once the remote node context is ready to be freed.
322 * The remote device can now report that its stop operation is complete. none
323 */
rnc_destruct_done(void * _dev)324 static void rnc_destruct_done(void *_dev)
325 {
326 struct isci_remote_device *idev = _dev;
327
328 BUG_ON(idev->started_request_count != 0);
329 sci_change_state(&idev->sm, SCI_DEV_STOPPED);
330 }
331
sci_remote_device_terminate_requests(struct isci_remote_device * idev)332 enum sci_status sci_remote_device_terminate_requests(
333 struct isci_remote_device *idev)
334 {
335 return sci_remote_device_terminate_reqs_checkabort(idev, 0);
336 }
337
sci_remote_device_stop(struct isci_remote_device * idev,u32 timeout)338 enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
339 u32 timeout)
340 {
341 struct sci_base_state_machine *sm = &idev->sm;
342 enum sci_remote_device_states state = sm->current_state_id;
343
344 switch (state) {
345 case SCI_DEV_INITIAL:
346 case SCI_DEV_FAILED:
347 case SCI_DEV_FINAL:
348 default:
349 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
350 __func__, dev_state_name(state));
351 return SCI_FAILURE_INVALID_STATE;
352 case SCI_DEV_STOPPED:
353 return SCI_SUCCESS;
354 case SCI_DEV_STARTING:
355 /* device not started so there had better be no requests */
356 BUG_ON(idev->started_request_count != 0);
357 sci_remote_node_context_destruct(&idev->rnc,
358 rnc_destruct_done, idev);
359 /* Transition to the stopping state and wait for the
360 * remote node to complete being posted and invalidated.
361 */
362 sci_change_state(sm, SCI_DEV_STOPPING);
363 return SCI_SUCCESS;
364 case SCI_DEV_READY:
365 case SCI_STP_DEV_IDLE:
366 case SCI_STP_DEV_CMD:
367 case SCI_STP_DEV_NCQ:
368 case SCI_STP_DEV_NCQ_ERROR:
369 case SCI_STP_DEV_AWAIT_RESET:
370 case SCI_SMP_DEV_IDLE:
371 case SCI_SMP_DEV_CMD:
372 sci_change_state(sm, SCI_DEV_STOPPING);
373 if (idev->started_request_count == 0)
374 sci_remote_node_context_destruct(&idev->rnc,
375 rnc_destruct_done,
376 idev);
377 else {
378 sci_remote_device_suspend(
379 idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
380 sci_remote_device_terminate_requests(idev);
381 }
382 return SCI_SUCCESS;
383 case SCI_DEV_STOPPING:
384 /* All requests should have been terminated, but if there is an
385 * attempt to stop a device already in the stopping state, then
386 * try again to terminate.
387 */
388 return sci_remote_device_terminate_requests(idev);
389 case SCI_DEV_RESETTING:
390 sci_change_state(sm, SCI_DEV_STOPPING);
391 return SCI_SUCCESS;
392 }
393 }
394
sci_remote_device_frame_handler(struct isci_remote_device * idev,u32 frame_index)395 enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
396 u32 frame_index)
397 {
398 struct sci_base_state_machine *sm = &idev->sm;
399 enum sci_remote_device_states state = sm->current_state_id;
400 struct isci_host *ihost = idev->owning_port->owning_controller;
401 enum sci_status status;
402
403 switch (state) {
404 case SCI_DEV_INITIAL:
405 case SCI_DEV_STOPPED:
406 case SCI_DEV_STARTING:
407 case SCI_STP_DEV_IDLE:
408 case SCI_SMP_DEV_IDLE:
409 case SCI_DEV_FINAL:
410 default:
411 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
412 __func__, dev_state_name(state));
413 /* Return the frame back to the controller */
414 sci_controller_release_frame(ihost, frame_index);
415 return SCI_FAILURE_INVALID_STATE;
416 case SCI_DEV_READY:
417 case SCI_STP_DEV_NCQ_ERROR:
418 case SCI_STP_DEV_AWAIT_RESET:
419 case SCI_DEV_STOPPING:
420 case SCI_DEV_FAILED:
421 case SCI_DEV_RESETTING: {
422 struct isci_request *ireq;
423 struct ssp_frame_hdr hdr;
424 void *frame_header;
425 ssize_t word_cnt;
426
427 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
428 frame_index,
429 &frame_header);
430 if (status != SCI_SUCCESS)
431 return status;
432
433 word_cnt = sizeof(hdr) / sizeof(u32);
434 sci_swab32_cpy(&hdr, frame_header, word_cnt);
435
436 ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
437 if (ireq && ireq->target_device == idev) {
438 /* The IO request is now in charge of releasing the frame */
439 status = sci_io_request_frame_handler(ireq, frame_index);
440 } else {
441 /* We could not map this tag to a valid IO
442 * request Just toss the frame and continue
443 */
444 sci_controller_release_frame(ihost, frame_index);
445 }
446 break;
447 }
448 case SCI_STP_DEV_NCQ: {
449 struct dev_to_host_fis *hdr;
450
451 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
452 frame_index,
453 (void **)&hdr);
454 if (status != SCI_SUCCESS)
455 return status;
456
457 if (hdr->fis_type == FIS_SETDEVBITS &&
458 (hdr->status & ATA_ERR)) {
459 idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
460
461 /* TODO Check sactive and complete associated IO if any. */
462 sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
463 } else if (hdr->fis_type == FIS_REGD2H &&
464 (hdr->status & ATA_ERR)) {
465 /*
466 * Some devices return D2H FIS when an NCQ error is detected.
467 * Treat this like an SDB error FIS ready reason.
468 */
469 idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
470 sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR);
471 } else
472 status = SCI_FAILURE;
473
474 sci_controller_release_frame(ihost, frame_index);
475 break;
476 }
477 case SCI_STP_DEV_CMD:
478 case SCI_SMP_DEV_CMD:
479 /* The device does not process any UF received from the hardware while
480 * in this state. All unsolicited frames are forwarded to the io request
481 * object.
482 */
483 status = sci_io_request_frame_handler(idev->working_request, frame_index);
484 break;
485 }
486
487 return status;
488 }
489
is_remote_device_ready(struct isci_remote_device * idev)490 static bool is_remote_device_ready(struct isci_remote_device *idev)
491 {
492
493 struct sci_base_state_machine *sm = &idev->sm;
494 enum sci_remote_device_states state = sm->current_state_id;
495
496 switch (state) {
497 case SCI_DEV_READY:
498 case SCI_STP_DEV_IDLE:
499 case SCI_STP_DEV_CMD:
500 case SCI_STP_DEV_NCQ:
501 case SCI_STP_DEV_NCQ_ERROR:
502 case SCI_STP_DEV_AWAIT_RESET:
503 case SCI_SMP_DEV_IDLE:
504 case SCI_SMP_DEV_CMD:
505 return true;
506 default:
507 return false;
508 }
509 }
510
511 /*
512 * called once the remote node context has transisitioned to a ready
513 * state (after suspending RX and/or TX due to early D2H fis)
514 */
atapi_remote_device_resume_done(void * _dev)515 static void atapi_remote_device_resume_done(void *_dev)
516 {
517 struct isci_remote_device *idev = _dev;
518 struct isci_request *ireq = idev->working_request;
519
520 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
521 }
522
sci_remote_device_event_handler(struct isci_remote_device * idev,u32 event_code)523 enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
524 u32 event_code)
525 {
526 enum sci_status status;
527 struct sci_base_state_machine *sm = &idev->sm;
528 enum sci_remote_device_states state = sm->current_state_id;
529
530 switch (scu_get_event_type(event_code)) {
531 case SCU_EVENT_TYPE_RNC_OPS_MISC:
532 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
533 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
534 status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
535 break;
536 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
537 if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
538 status = SCI_SUCCESS;
539
540 /* Suspend the associated RNC */
541 sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
542
543 dev_dbg(scirdev_to_dev(idev),
544 "%s: device: %p event code: %x: %s\n",
545 __func__, idev, event_code,
546 is_remote_device_ready(idev)
547 ? "I_T_Nexus_Timeout event"
548 : "I_T_Nexus_Timeout event in wrong state");
549
550 break;
551 }
552 fallthrough; /* and treat as unhandled */
553 default:
554 dev_dbg(scirdev_to_dev(idev),
555 "%s: device: %p event code: %x: %s\n",
556 __func__, idev, event_code,
557 is_remote_device_ready(idev)
558 ? "unexpected event"
559 : "unexpected event in wrong state");
560 status = SCI_FAILURE_INVALID_STATE;
561 break;
562 }
563
564 if (status != SCI_SUCCESS)
565 return status;
566
567 /* Decode device-specific states that may require an RNC resume during
568 * normal operation. When the abort path is active, these resumes are
569 * managed when the abort path exits.
570 */
571 if (state == SCI_STP_DEV_ATAPI_ERROR) {
572 /* For ATAPI error state resume the RNC right away. */
573 if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
574 scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) {
575 return sci_remote_node_context_resume(&idev->rnc,
576 atapi_remote_device_resume_done,
577 idev);
578 }
579 }
580
581 if (state == SCI_STP_DEV_IDLE) {
582
583 /* We pick up suspension events to handle specifically to this
584 * state. We resume the RNC right away.
585 */
586 if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
587 scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX)
588 status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
589 }
590
591 return status;
592 }
593
sci_remote_device_start_request(struct isci_remote_device * idev,struct isci_request * ireq,enum sci_status status)594 static void sci_remote_device_start_request(struct isci_remote_device *idev,
595 struct isci_request *ireq,
596 enum sci_status status)
597 {
598 struct isci_port *iport = idev->owning_port;
599
600 /* cleanup requests that failed after starting on the port */
601 if (status != SCI_SUCCESS)
602 sci_port_complete_io(iport, idev, ireq);
603 else {
604 kref_get(&idev->kref);
605 idev->started_request_count++;
606 }
607 }
608
sci_remote_device_start_io(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)609 enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
610 struct isci_remote_device *idev,
611 struct isci_request *ireq)
612 {
613 struct sci_base_state_machine *sm = &idev->sm;
614 enum sci_remote_device_states state = sm->current_state_id;
615 struct isci_port *iport = idev->owning_port;
616 enum sci_status status;
617
618 switch (state) {
619 case SCI_DEV_INITIAL:
620 case SCI_DEV_STOPPED:
621 case SCI_DEV_STARTING:
622 case SCI_STP_DEV_NCQ_ERROR:
623 case SCI_DEV_STOPPING:
624 case SCI_DEV_FAILED:
625 case SCI_DEV_RESETTING:
626 case SCI_DEV_FINAL:
627 default:
628 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
629 __func__, dev_state_name(state));
630 return SCI_FAILURE_INVALID_STATE;
631 case SCI_DEV_READY:
632 /* attempt to start an io request for this device object. The remote
633 * device object will issue the start request for the io and if
634 * successful it will start the request for the port object then
635 * increment its own request count.
636 */
637 status = sci_port_start_io(iport, idev, ireq);
638 if (status != SCI_SUCCESS)
639 return status;
640
641 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
642 if (status != SCI_SUCCESS)
643 break;
644
645 status = sci_request_start(ireq);
646 break;
647 case SCI_STP_DEV_IDLE: {
648 /* handle the start io operation for a sata device that is in
649 * the command idle state. - Evalute the type of IO request to
650 * be started - If its an NCQ request change to NCQ substate -
651 * If its any other command change to the CMD substate
652 *
653 * If this is a softreset we may want to have a different
654 * substate.
655 */
656 enum sci_remote_device_states new_state;
657 struct sas_task *task = isci_request_access_task(ireq);
658
659 status = sci_port_start_io(iport, idev, ireq);
660 if (status != SCI_SUCCESS)
661 return status;
662
663 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
664 if (status != SCI_SUCCESS)
665 break;
666
667 status = sci_request_start(ireq);
668 if (status != SCI_SUCCESS)
669 break;
670
671 if (task->ata_task.use_ncq)
672 new_state = SCI_STP_DEV_NCQ;
673 else {
674 idev->working_request = ireq;
675 new_state = SCI_STP_DEV_CMD;
676 }
677 sci_change_state(sm, new_state);
678 break;
679 }
680 case SCI_STP_DEV_NCQ: {
681 struct sas_task *task = isci_request_access_task(ireq);
682
683 if (task->ata_task.use_ncq) {
684 status = sci_port_start_io(iport, idev, ireq);
685 if (status != SCI_SUCCESS)
686 return status;
687
688 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
689 if (status != SCI_SUCCESS)
690 break;
691
692 status = sci_request_start(ireq);
693 } else
694 return SCI_FAILURE_INVALID_STATE;
695 break;
696 }
697 case SCI_STP_DEV_AWAIT_RESET:
698 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
699 case SCI_SMP_DEV_IDLE:
700 status = sci_port_start_io(iport, idev, ireq);
701 if (status != SCI_SUCCESS)
702 return status;
703
704 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
705 if (status != SCI_SUCCESS)
706 break;
707
708 status = sci_request_start(ireq);
709 if (status != SCI_SUCCESS)
710 break;
711
712 idev->working_request = ireq;
713 sci_change_state(&idev->sm, SCI_SMP_DEV_CMD);
714 break;
715 case SCI_STP_DEV_CMD:
716 case SCI_SMP_DEV_CMD:
717 /* device is already handling a command it can not accept new commands
718 * until this one is complete.
719 */
720 return SCI_FAILURE_INVALID_STATE;
721 }
722
723 sci_remote_device_start_request(idev, ireq, status);
724 return status;
725 }
726
common_complete_io(struct isci_port * iport,struct isci_remote_device * idev,struct isci_request * ireq)727 static enum sci_status common_complete_io(struct isci_port *iport,
728 struct isci_remote_device *idev,
729 struct isci_request *ireq)
730 {
731 enum sci_status status;
732
733 status = sci_request_complete(ireq);
734 if (status != SCI_SUCCESS)
735 return status;
736
737 status = sci_port_complete_io(iport, idev, ireq);
738 if (status != SCI_SUCCESS)
739 return status;
740
741 sci_remote_device_decrement_request_count(idev);
742 return status;
743 }
744
sci_remote_device_complete_io(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)745 enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
746 struct isci_remote_device *idev,
747 struct isci_request *ireq)
748 {
749 struct sci_base_state_machine *sm = &idev->sm;
750 enum sci_remote_device_states state = sm->current_state_id;
751 struct isci_port *iport = idev->owning_port;
752 enum sci_status status;
753
754 switch (state) {
755 case SCI_DEV_INITIAL:
756 case SCI_DEV_STOPPED:
757 case SCI_DEV_STARTING:
758 case SCI_STP_DEV_IDLE:
759 case SCI_SMP_DEV_IDLE:
760 case SCI_DEV_FAILED:
761 case SCI_DEV_FINAL:
762 default:
763 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
764 __func__, dev_state_name(state));
765 return SCI_FAILURE_INVALID_STATE;
766 case SCI_DEV_READY:
767 case SCI_STP_DEV_AWAIT_RESET:
768 case SCI_DEV_RESETTING:
769 status = common_complete_io(iport, idev, ireq);
770 break;
771 case SCI_STP_DEV_CMD:
772 case SCI_STP_DEV_NCQ:
773 case SCI_STP_DEV_NCQ_ERROR:
774 case SCI_STP_DEV_ATAPI_ERROR:
775 status = common_complete_io(iport, idev, ireq);
776 if (status != SCI_SUCCESS)
777 break;
778
779 if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
780 /* This request causes hardware error, device needs to be Lun Reset.
781 * So here we force the state machine to IDLE state so the rest IOs
782 * can reach RNC state handler, these IOs will be completed by RNC with
783 * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
784 */
785 sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
786 } else if (idev->started_request_count == 0)
787 sci_change_state(sm, SCI_STP_DEV_IDLE);
788 break;
789 case SCI_SMP_DEV_CMD:
790 status = common_complete_io(iport, idev, ireq);
791 if (status != SCI_SUCCESS)
792 break;
793 sci_change_state(sm, SCI_SMP_DEV_IDLE);
794 break;
795 case SCI_DEV_STOPPING:
796 status = common_complete_io(iport, idev, ireq);
797 if (status != SCI_SUCCESS)
798 break;
799
800 if (idev->started_request_count == 0)
801 sci_remote_node_context_destruct(&idev->rnc,
802 rnc_destruct_done,
803 idev);
804 break;
805 }
806
807 if (status != SCI_SUCCESS)
808 dev_err(scirdev_to_dev(idev),
809 "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
810 "could not complete\n", __func__, iport,
811 idev, ireq, status);
812 else
813 isci_put_device(idev);
814
815 return status;
816 }
817
sci_remote_device_continue_request(void * dev)818 static void sci_remote_device_continue_request(void *dev)
819 {
820 struct isci_remote_device *idev = dev;
821
822 /* we need to check if this request is still valid to continue. */
823 if (idev->working_request)
824 sci_controller_continue_io(idev->working_request);
825 }
826
sci_remote_device_start_task(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)827 enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
828 struct isci_remote_device *idev,
829 struct isci_request *ireq)
830 {
831 struct sci_base_state_machine *sm = &idev->sm;
832 enum sci_remote_device_states state = sm->current_state_id;
833 struct isci_port *iport = idev->owning_port;
834 enum sci_status status;
835
836 switch (state) {
837 case SCI_DEV_INITIAL:
838 case SCI_DEV_STOPPED:
839 case SCI_DEV_STARTING:
840 case SCI_SMP_DEV_IDLE:
841 case SCI_SMP_DEV_CMD:
842 case SCI_DEV_STOPPING:
843 case SCI_DEV_FAILED:
844 case SCI_DEV_RESETTING:
845 case SCI_DEV_FINAL:
846 default:
847 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
848 __func__, dev_state_name(state));
849 return SCI_FAILURE_INVALID_STATE;
850 case SCI_STP_DEV_IDLE:
851 case SCI_STP_DEV_CMD:
852 case SCI_STP_DEV_NCQ:
853 case SCI_STP_DEV_NCQ_ERROR:
854 case SCI_STP_DEV_AWAIT_RESET:
855 status = sci_port_start_io(iport, idev, ireq);
856 if (status != SCI_SUCCESS)
857 return status;
858
859 status = sci_request_start(ireq);
860 if (status != SCI_SUCCESS)
861 goto out;
862
863 /* Note: If the remote device state is not IDLE this will
864 * replace the request that probably resulted in the task
865 * management request.
866 */
867 idev->working_request = ireq;
868 sci_change_state(sm, SCI_STP_DEV_CMD);
869
870 /* The remote node context must cleanup the TCi to NCQ mapping
871 * table. The only way to do this correctly is to either write
872 * to the TLCR register or to invalidate and repost the RNC. In
873 * either case the remote node context state machine will take
874 * the correct action when the remote node context is suspended
875 * and later resumed.
876 */
877 sci_remote_device_suspend(idev,
878 SCI_SW_SUSPEND_LINKHANG_DETECT);
879
880 status = sci_remote_node_context_start_task(&idev->rnc, ireq,
881 sci_remote_device_continue_request, idev);
882
883 out:
884 sci_remote_device_start_request(idev, ireq, status);
885 /* We need to let the controller start request handler know that
886 * it can't post TC yet. We will provide a callback function to
887 * post TC when RNC gets resumed.
888 */
889 return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
890 case SCI_DEV_READY:
891 status = sci_port_start_io(iport, idev, ireq);
892 if (status != SCI_SUCCESS)
893 return status;
894
895 /* Resume the RNC as needed: */
896 status = sci_remote_node_context_start_task(&idev->rnc, ireq,
897 NULL, NULL);
898 if (status != SCI_SUCCESS)
899 break;
900
901 status = sci_request_start(ireq);
902 break;
903 }
904 sci_remote_device_start_request(idev, ireq, status);
905
906 return status;
907 }
908
sci_remote_device_post_request(struct isci_remote_device * idev,u32 request)909 void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
910 {
911 struct isci_port *iport = idev->owning_port;
912 u32 context;
913
914 context = request |
915 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
916 (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
917 idev->rnc.remote_node_index;
918
919 sci_controller_post_request(iport->owning_controller, context);
920 }
921
922 /* called once the remote node context has transisitioned to a
923 * ready state. This is the indication that the remote device object can also
924 * transition to ready.
925 */
remote_device_resume_done(void * _dev)926 static void remote_device_resume_done(void *_dev)
927 {
928 struct isci_remote_device *idev = _dev;
929
930 if (is_remote_device_ready(idev))
931 return;
932
933 /* go 'ready' if we are not already in a ready state */
934 sci_change_state(&idev->sm, SCI_DEV_READY);
935 }
936
sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void * _dev)937 static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
938 {
939 struct isci_remote_device *idev = _dev;
940 struct isci_host *ihost = idev->owning_port->owning_controller;
941
942 /* For NCQ operation we do not issue a isci_remote_device_not_ready().
943 * As a result, avoid sending the ready notification.
944 */
945 if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ)
946 isci_remote_device_ready(ihost, idev);
947 }
948
sci_remote_device_initial_state_enter(struct sci_base_state_machine * sm)949 static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
950 {
951 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
952
953 /* Initial state is a transitional state to the stopped state */
954 sci_change_state(&idev->sm, SCI_DEV_STOPPED);
955 }
956
957 /**
958 * sci_remote_device_destruct() - free remote node context and destruct
959 * @idev: This parameter specifies the remote device to be destructed.
960 *
961 * Remote device objects are a limited resource. As such, they must be
962 * protected. Thus calls to construct and destruct are mutually exclusive and
963 * non-reentrant. The return value shall indicate if the device was
964 * successfully destructed or if some failure occurred. enum sci_status This value
965 * is returned if the device is successfully destructed.
966 * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied
967 * device isn't valid (e.g. it's already been destoryed, the handle isn't
968 * valid, etc.).
969 */
sci_remote_device_destruct(struct isci_remote_device * idev)970 static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
971 {
972 struct sci_base_state_machine *sm = &idev->sm;
973 enum sci_remote_device_states state = sm->current_state_id;
974 struct isci_host *ihost;
975
976 if (state != SCI_DEV_STOPPED) {
977 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
978 __func__, dev_state_name(state));
979 return SCI_FAILURE_INVALID_STATE;
980 }
981
982 ihost = idev->owning_port->owning_controller;
983 sci_controller_free_remote_node_context(ihost, idev,
984 idev->rnc.remote_node_index);
985 idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
986 sci_change_state(sm, SCI_DEV_FINAL);
987
988 return SCI_SUCCESS;
989 }
990
991 /**
992 * isci_remote_device_deconstruct() - This function frees an isci_remote_device.
993 * @ihost: This parameter specifies the isci host object.
994 * @idev: This parameter specifies the remote device to be freed.
995 *
996 */
isci_remote_device_deconstruct(struct isci_host * ihost,struct isci_remote_device * idev)997 static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
998 {
999 dev_dbg(&ihost->pdev->dev,
1000 "%s: isci_device = %p\n", __func__, idev);
1001
1002 /* There should not be any outstanding io's. All paths to
1003 * here should go through isci_remote_device_nuke_requests.
1004 * If we hit this condition, we will need a way to complete
1005 * io requests in process */
1006 BUG_ON(idev->started_request_count > 0);
1007
1008 sci_remote_device_destruct(idev);
1009 list_del_init(&idev->node);
1010 isci_put_device(idev);
1011 }
1012
sci_remote_device_stopped_state_enter(struct sci_base_state_machine * sm)1013 static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
1014 {
1015 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1016 struct isci_host *ihost = idev->owning_port->owning_controller;
1017 u32 prev_state;
1018
1019 /* If we are entering from the stopping state let the SCI User know that
1020 * the stop operation has completed.
1021 */
1022 prev_state = idev->sm.previous_state_id;
1023 if (prev_state == SCI_DEV_STOPPING)
1024 isci_remote_device_deconstruct(ihost, idev);
1025
1026 sci_controller_remote_device_stopped(ihost, idev);
1027 }
1028
sci_remote_device_starting_state_enter(struct sci_base_state_machine * sm)1029 static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
1030 {
1031 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1032 struct isci_host *ihost = idev->owning_port->owning_controller;
1033
1034 isci_remote_device_not_ready(ihost, idev,
1035 SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
1036 }
1037
sci_remote_device_ready_state_enter(struct sci_base_state_machine * sm)1038 static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
1039 {
1040 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1041 struct isci_host *ihost = idev->owning_port->owning_controller;
1042 struct domain_device *dev = idev->domain_dev;
1043
1044 if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
1045 sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
1046 } else if (dev_is_expander(dev->dev_type)) {
1047 sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
1048 } else
1049 isci_remote_device_ready(ihost, idev);
1050 }
1051
sci_remote_device_ready_state_exit(struct sci_base_state_machine * sm)1052 static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
1053 {
1054 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1055 struct domain_device *dev = idev->domain_dev;
1056
1057 if (dev->dev_type == SAS_END_DEVICE) {
1058 struct isci_host *ihost = idev->owning_port->owning_controller;
1059
1060 isci_remote_device_not_ready(ihost, idev,
1061 SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED);
1062 }
1063 }
1064
sci_remote_device_resetting_state_enter(struct sci_base_state_machine * sm)1065 static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
1066 {
1067 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1068 struct isci_host *ihost = idev->owning_port->owning_controller;
1069
1070 dev_dbg(&ihost->pdev->dev,
1071 "%s: isci_device = %p\n", __func__, idev);
1072
1073 sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
1074 }
1075
sci_remote_device_resetting_state_exit(struct sci_base_state_machine * sm)1076 static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
1077 {
1078 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1079 struct isci_host *ihost = idev->owning_port->owning_controller;
1080
1081 dev_dbg(&ihost->pdev->dev,
1082 "%s: isci_device = %p\n", __func__, idev);
1083
1084 sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
1085 }
1086
sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine * sm)1087 static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
1088 {
1089 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1090
1091 idev->working_request = NULL;
1092 if (sci_remote_node_context_is_ready(&idev->rnc)) {
1093 /*
1094 * Since the RNC is ready, it's alright to finish completion
1095 * processing (e.g. signal the remote device is ready). */
1096 sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
1097 } else {
1098 sci_remote_node_context_resume(&idev->rnc,
1099 sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
1100 idev);
1101 }
1102 }
1103
sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine * sm)1104 static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
1105 {
1106 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1107 struct isci_host *ihost = idev->owning_port->owning_controller;
1108
1109 BUG_ON(idev->working_request == NULL);
1110
1111 isci_remote_device_not_ready(ihost, idev,
1112 SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
1113 }
1114
sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine * sm)1115 static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
1116 {
1117 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1118 struct isci_host *ihost = idev->owning_port->owning_controller;
1119
1120 if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
1121 isci_remote_device_not_ready(ihost, idev,
1122 idev->not_ready_reason);
1123 }
1124
sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine * sm)1125 static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
1126 {
1127 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1128 struct isci_host *ihost = idev->owning_port->owning_controller;
1129
1130 isci_remote_device_ready(ihost, idev);
1131 }
1132
sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine * sm)1133 static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
1134 {
1135 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1136 struct isci_host *ihost = idev->owning_port->owning_controller;
1137
1138 BUG_ON(idev->working_request == NULL);
1139
1140 isci_remote_device_not_ready(ihost, idev,
1141 SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
1142 }
1143
sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine * sm)1144 static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
1145 {
1146 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1147
1148 idev->working_request = NULL;
1149 }
1150
1151 static const struct sci_base_state sci_remote_device_state_table[] = {
1152 [SCI_DEV_INITIAL] = {
1153 .enter_state = sci_remote_device_initial_state_enter,
1154 },
1155 [SCI_DEV_STOPPED] = {
1156 .enter_state = sci_remote_device_stopped_state_enter,
1157 },
1158 [SCI_DEV_STARTING] = {
1159 .enter_state = sci_remote_device_starting_state_enter,
1160 },
1161 [SCI_DEV_READY] = {
1162 .enter_state = sci_remote_device_ready_state_enter,
1163 .exit_state = sci_remote_device_ready_state_exit
1164 },
1165 [SCI_STP_DEV_IDLE] = {
1166 .enter_state = sci_stp_remote_device_ready_idle_substate_enter,
1167 },
1168 [SCI_STP_DEV_CMD] = {
1169 .enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
1170 },
1171 [SCI_STP_DEV_NCQ] = { },
1172 [SCI_STP_DEV_NCQ_ERROR] = {
1173 .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
1174 },
1175 [SCI_STP_DEV_ATAPI_ERROR] = { },
1176 [SCI_STP_DEV_AWAIT_RESET] = { },
1177 [SCI_SMP_DEV_IDLE] = {
1178 .enter_state = sci_smp_remote_device_ready_idle_substate_enter,
1179 },
1180 [SCI_SMP_DEV_CMD] = {
1181 .enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
1182 .exit_state = sci_smp_remote_device_ready_cmd_substate_exit,
1183 },
1184 [SCI_DEV_STOPPING] = { },
1185 [SCI_DEV_FAILED] = { },
1186 [SCI_DEV_RESETTING] = {
1187 .enter_state = sci_remote_device_resetting_state_enter,
1188 .exit_state = sci_remote_device_resetting_state_exit
1189 },
1190 [SCI_DEV_FINAL] = { },
1191 };
1192
1193 /**
1194 * sci_remote_device_construct() - common construction
1195 * @iport: SAS/SATA port through which this device is accessed.
1196 * @idev: remote device to construct
1197 *
1198 * This routine just performs benign initialization and does not
1199 * allocate the remote_node_context which is left to
1200 * sci_remote_device_[de]a_construct(). sci_remote_device_destruct()
1201 * frees the remote_node_context(s) for the device.
1202 */
sci_remote_device_construct(struct isci_port * iport,struct isci_remote_device * idev)1203 static void sci_remote_device_construct(struct isci_port *iport,
1204 struct isci_remote_device *idev)
1205 {
1206 idev->owning_port = iport;
1207 idev->started_request_count = 0;
1208
1209 sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
1210
1211 sci_remote_node_context_construct(&idev->rnc,
1212 SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
1213 }
1214
1215 /*
1216 * sci_remote_device_da_construct() - construct direct attached device.
1217 *
1218 * The information (e.g. IAF, Signature FIS, etc.) necessary to build
1219 * the device is known to the SCI Core since it is contained in the
1220 * sci_phy object. Remote node context(s) is/are a global resource
1221 * allocated by this routine, freed by sci_remote_device_destruct().
1222 *
1223 * Returns:
1224 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
1225 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
1226 * sata-only controller instance.
1227 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
1228 */
sci_remote_device_da_construct(struct isci_port * iport,struct isci_remote_device * idev)1229 static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1230 struct isci_remote_device *idev)
1231 {
1232 enum sci_status status;
1233 struct sci_port_properties properties;
1234
1235 sci_remote_device_construct(iport, idev);
1236
1237 sci_port_get_properties(iport, &properties);
1238 /* Get accurate port width from port's phy mask for a DA device. */
1239 idev->device_port_width = hweight32(properties.phy_mask);
1240
1241 status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1242 idev,
1243 &idev->rnc.remote_node_index);
1244
1245 if (status != SCI_SUCCESS)
1246 return status;
1247
1248 idev->connection_rate = sci_port_get_max_allowed_speed(iport);
1249
1250 return SCI_SUCCESS;
1251 }
1252
1253 /*
1254 * sci_remote_device_ea_construct() - construct expander attached device
1255 *
1256 * Remote node context(s) is/are a global resource allocated by this
1257 * routine, freed by sci_remote_device_destruct().
1258 *
1259 * Returns:
1260 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
1261 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
1262 * sata-only controller instance.
1263 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
1264 */
sci_remote_device_ea_construct(struct isci_port * iport,struct isci_remote_device * idev)1265 static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
1266 struct isci_remote_device *idev)
1267 {
1268 struct domain_device *dev = idev->domain_dev;
1269 enum sci_status status;
1270
1271 sci_remote_device_construct(iport, idev);
1272
1273 status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1274 idev,
1275 &idev->rnc.remote_node_index);
1276 if (status != SCI_SUCCESS)
1277 return status;
1278
1279 /* For SAS-2 the physical link rate is actually a logical link
1280 * rate that incorporates multiplexing. The SCU doesn't
1281 * incorporate multiplexing and for the purposes of the
1282 * connection the logical link rate is that same as the
1283 * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
1284 * one another, so this code works for both situations.
1285 */
1286 idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
1287 dev->linkrate);
1288
1289 /* / @todo Should I assign the port width by reading all of the phys on the port? */
1290 idev->device_port_width = 1;
1291
1292 return SCI_SUCCESS;
1293 }
1294
sci_remote_device_resume(struct isci_remote_device * idev,scics_sds_remote_node_context_callback cb_fn,void * cb_p)1295 enum sci_status sci_remote_device_resume(
1296 struct isci_remote_device *idev,
1297 scics_sds_remote_node_context_callback cb_fn,
1298 void *cb_p)
1299 {
1300 enum sci_status status;
1301
1302 status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p);
1303 if (status != SCI_SUCCESS)
1304 dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n",
1305 __func__, status);
1306 return status;
1307 }
1308
isci_remote_device_resume_from_abort_complete(void * cbparam)1309 static void isci_remote_device_resume_from_abort_complete(void *cbparam)
1310 {
1311 struct isci_remote_device *idev = cbparam;
1312 struct isci_host *ihost = idev->owning_port->owning_controller;
1313 scics_sds_remote_node_context_callback abort_resume_cb =
1314 idev->abort_resume_cb;
1315
1316 dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n",
1317 __func__, abort_resume_cb);
1318
1319 if (abort_resume_cb != NULL) {
1320 idev->abort_resume_cb = NULL;
1321 abort_resume_cb(idev->abort_resume_cbparam);
1322 }
1323 clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1324 wake_up(&ihost->eventq);
1325 }
1326
isci_remote_device_test_resume_done(struct isci_host * ihost,struct isci_remote_device * idev)1327 static bool isci_remote_device_test_resume_done(
1328 struct isci_host *ihost,
1329 struct isci_remote_device *idev)
1330 {
1331 unsigned long flags;
1332 bool done;
1333
1334 spin_lock_irqsave(&ihost->scic_lock, flags);
1335 done = !test_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags)
1336 || test_bit(IDEV_STOP_PENDING, &idev->flags)
1337 || sci_remote_node_context_is_being_destroyed(&idev->rnc);
1338 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1339
1340 return done;
1341 }
1342
isci_remote_device_wait_for_resume_from_abort(struct isci_host * ihost,struct isci_remote_device * idev)1343 static void isci_remote_device_wait_for_resume_from_abort(
1344 struct isci_host *ihost,
1345 struct isci_remote_device *idev)
1346 {
1347 dev_dbg(&ihost->pdev->dev, "%s: starting resume wait: %p\n",
1348 __func__, idev);
1349
1350 #define MAX_RESUME_MSECS 10000
1351 if (!wait_event_timeout(ihost->eventq,
1352 isci_remote_device_test_resume_done(ihost, idev),
1353 msecs_to_jiffies(MAX_RESUME_MSECS))) {
1354
1355 dev_warn(&ihost->pdev->dev, "%s: #### Timeout waiting for "
1356 "resume: %p\n", __func__, idev);
1357 }
1358 clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1359
1360 dev_dbg(&ihost->pdev->dev, "%s: resume wait done: %p\n",
1361 __func__, idev);
1362 }
1363
isci_remote_device_resume_from_abort(struct isci_host * ihost,struct isci_remote_device * idev)1364 enum sci_status isci_remote_device_resume_from_abort(
1365 struct isci_host *ihost,
1366 struct isci_remote_device *idev)
1367 {
1368 unsigned long flags;
1369 enum sci_status status = SCI_SUCCESS;
1370 int destroyed;
1371
1372 spin_lock_irqsave(&ihost->scic_lock, flags);
1373 /* Preserve any current resume callbacks, for instance from other
1374 * resumptions.
1375 */
1376 idev->abort_resume_cb = idev->rnc.user_callback;
1377 idev->abort_resume_cbparam = idev->rnc.user_cookie;
1378 set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1379 clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
1380 destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc);
1381 if (!destroyed)
1382 status = sci_remote_device_resume(
1383 idev, isci_remote_device_resume_from_abort_complete,
1384 idev);
1385 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1386 if (!destroyed && (status == SCI_SUCCESS))
1387 isci_remote_device_wait_for_resume_from_abort(ihost, idev);
1388 else
1389 clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1390
1391 return status;
1392 }
1393
1394 /**
1395 * sci_remote_device_start() - This method will start the supplied remote
1396 * device. This method enables normal IO requests to flow through to the
1397 * remote device.
1398 * @idev: This parameter specifies the device to be started.
1399 * @timeout: This parameter specifies the number of milliseconds in which the
1400 * start operation should complete.
1401 *
1402 * An indication of whether the device was successfully started. SCI_SUCCESS
1403 * This value is returned if the device was successfully started.
1404 * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
1405 * the device when there have been no phys added to it.
1406 */
sci_remote_device_start(struct isci_remote_device * idev,u32 timeout)1407 static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
1408 u32 timeout)
1409 {
1410 struct sci_base_state_machine *sm = &idev->sm;
1411 enum sci_remote_device_states state = sm->current_state_id;
1412 enum sci_status status;
1413
1414 if (state != SCI_DEV_STOPPED) {
1415 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
1416 __func__, dev_state_name(state));
1417 return SCI_FAILURE_INVALID_STATE;
1418 }
1419
1420 status = sci_remote_device_resume(idev, remote_device_resume_done,
1421 idev);
1422 if (status != SCI_SUCCESS)
1423 return status;
1424
1425 sci_change_state(sm, SCI_DEV_STARTING);
1426
1427 return SCI_SUCCESS;
1428 }
1429
isci_remote_device_construct(struct isci_port * iport,struct isci_remote_device * idev)1430 static enum sci_status isci_remote_device_construct(struct isci_port *iport,
1431 struct isci_remote_device *idev)
1432 {
1433 struct isci_host *ihost = iport->isci_host;
1434 struct domain_device *dev = idev->domain_dev;
1435 enum sci_status status;
1436
1437 if (dev->parent && dev_is_expander(dev->parent->dev_type))
1438 status = sci_remote_device_ea_construct(iport, idev);
1439 else
1440 status = sci_remote_device_da_construct(iport, idev);
1441
1442 if (status != SCI_SUCCESS) {
1443 dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
1444 __func__, status);
1445
1446 return status;
1447 }
1448
1449 /* start the device. */
1450 status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
1451
1452 if (status != SCI_SUCCESS)
1453 dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
1454 status);
1455
1456 return status;
1457 }
1458
1459 /**
1460 * isci_remote_device_alloc()
1461 * This function builds the isci_remote_device when a libsas dev_found message
1462 * is received.
1463 * @ihost: This parameter specifies the isci host object.
1464 * @iport: This parameter specifies the isci_port connected to this device.
1465 *
1466 * pointer to new isci_remote_device.
1467 */
1468 static struct isci_remote_device *
isci_remote_device_alloc(struct isci_host * ihost,struct isci_port * iport)1469 isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
1470 {
1471 struct isci_remote_device *idev;
1472 int i;
1473
1474 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
1475 idev = &ihost->devices[i];
1476 if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
1477 break;
1478 }
1479
1480 if (i >= SCI_MAX_REMOTE_DEVICES) {
1481 dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
1482 return NULL;
1483 }
1484 if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
1485 return NULL;
1486
1487 return idev;
1488 }
1489
isci_remote_device_release(struct kref * kref)1490 void isci_remote_device_release(struct kref *kref)
1491 {
1492 struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
1493 struct isci_host *ihost = idev->isci_port->isci_host;
1494
1495 idev->domain_dev = NULL;
1496 idev->isci_port = NULL;
1497 clear_bit(IDEV_START_PENDING, &idev->flags);
1498 clear_bit(IDEV_STOP_PENDING, &idev->flags);
1499 clear_bit(IDEV_IO_READY, &idev->flags);
1500 clear_bit(IDEV_GONE, &idev->flags);
1501 smp_mb__before_atomic();
1502 clear_bit(IDEV_ALLOCATED, &idev->flags);
1503 wake_up(&ihost->eventq);
1504 }
1505
1506 /**
1507 * isci_remote_device_stop() - This function is called internally to stop the
1508 * remote device.
1509 * @ihost: This parameter specifies the isci host object.
1510 * @idev: This parameter specifies the remote device.
1511 *
1512 * The status of the ihost request to stop.
1513 */
isci_remote_device_stop(struct isci_host * ihost,struct isci_remote_device * idev)1514 enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
1515 {
1516 enum sci_status status;
1517 unsigned long flags;
1518
1519 dev_dbg(&ihost->pdev->dev,
1520 "%s: isci_device = %p\n", __func__, idev);
1521
1522 spin_lock_irqsave(&ihost->scic_lock, flags);
1523 idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
1524 set_bit(IDEV_GONE, &idev->flags);
1525
1526 set_bit(IDEV_STOP_PENDING, &idev->flags);
1527 status = sci_remote_device_stop(idev, 50);
1528 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1529
1530 /* Wait for the stop complete callback. */
1531 if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
1532 /* nothing to wait for */;
1533 else
1534 wait_for_device_stop(ihost, idev);
1535
1536 dev_dbg(&ihost->pdev->dev,
1537 "%s: isci_device = %p, waiting done.\n", __func__, idev);
1538
1539 return status;
1540 }
1541
1542 /**
1543 * isci_remote_device_gone() - This function is called by libsas when a domain
1544 * device is removed.
1545 * @dev: This parameter specifies the libsas domain device.
1546 */
isci_remote_device_gone(struct domain_device * dev)1547 void isci_remote_device_gone(struct domain_device *dev)
1548 {
1549 struct isci_host *ihost = dev_to_ihost(dev);
1550 struct isci_remote_device *idev = dev->lldd_dev;
1551
1552 dev_dbg(&ihost->pdev->dev,
1553 "%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
1554 __func__, dev, idev, idev->isci_port);
1555
1556 isci_remote_device_stop(ihost, idev);
1557 }
1558
1559
1560 /**
1561 * isci_remote_device_found() - This function is called by libsas when a remote
1562 * device is discovered. A remote device object is created and started. the
1563 * function then sleeps until the sci core device started message is
1564 * received.
1565 * @dev: This parameter specifies the libsas domain device.
1566 *
1567 * status, zero indicates success.
1568 */
isci_remote_device_found(struct domain_device * dev)1569 int isci_remote_device_found(struct domain_device *dev)
1570 {
1571 struct isci_host *isci_host = dev_to_ihost(dev);
1572 struct isci_port *isci_port = dev->port->lldd_port;
1573 struct isci_remote_device *isci_device;
1574 enum sci_status status;
1575
1576 dev_dbg(&isci_host->pdev->dev,
1577 "%s: domain_device = %p\n", __func__, dev);
1578
1579 if (!isci_port)
1580 return -ENODEV;
1581
1582 isci_device = isci_remote_device_alloc(isci_host, isci_port);
1583 if (!isci_device)
1584 return -ENODEV;
1585
1586 kref_init(&isci_device->kref);
1587 INIT_LIST_HEAD(&isci_device->node);
1588
1589 spin_lock_irq(&isci_host->scic_lock);
1590 isci_device->domain_dev = dev;
1591 isci_device->isci_port = isci_port;
1592 list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
1593
1594 set_bit(IDEV_START_PENDING, &isci_device->flags);
1595 status = isci_remote_device_construct(isci_port, isci_device);
1596
1597 dev_dbg(&isci_host->pdev->dev,
1598 "%s: isci_device = %p\n",
1599 __func__, isci_device);
1600
1601 if (status == SCI_SUCCESS) {
1602 /* device came up, advertise it to the world */
1603 dev->lldd_dev = isci_device;
1604 } else
1605 isci_put_device(isci_device);
1606 spin_unlock_irq(&isci_host->scic_lock);
1607
1608 /* wait for the device ready callback. */
1609 wait_for_device_start(isci_host, isci_device);
1610
1611 return status == SCI_SUCCESS ? 0 : -ENODEV;
1612 }
1613
isci_remote_device_suspend_terminate(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)1614 enum sci_status isci_remote_device_suspend_terminate(
1615 struct isci_host *ihost,
1616 struct isci_remote_device *idev,
1617 struct isci_request *ireq)
1618 {
1619 unsigned long flags;
1620 enum sci_status status;
1621
1622 /* Put the device into suspension. */
1623 spin_lock_irqsave(&ihost->scic_lock, flags);
1624 set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
1625 sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
1626 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1627
1628 /* Terminate and wait for the completions. */
1629 status = isci_remote_device_terminate_requests(ihost, idev, ireq);
1630 if (status != SCI_SUCCESS)
1631 dev_dbg(&ihost->pdev->dev,
1632 "%s: isci_remote_device_terminate_requests(%p) "
1633 "returned %d!\n",
1634 __func__, idev, status);
1635
1636 /* NOTE: RNC resumption is left to the caller! */
1637 return status;
1638 }
1639
isci_remote_device_is_safe_to_abort(struct isci_remote_device * idev)1640 int isci_remote_device_is_safe_to_abort(
1641 struct isci_remote_device *idev)
1642 {
1643 return sci_remote_node_context_is_safe_to_abort(&idev->rnc);
1644 }
1645
sci_remote_device_abort_requests_pending_abort(struct isci_remote_device * idev)1646 enum sci_status sci_remote_device_abort_requests_pending_abort(
1647 struct isci_remote_device *idev)
1648 {
1649 return sci_remote_device_terminate_reqs_checkabort(idev, 1);
1650 }
1651
isci_dev_set_hang_detection_timeout(struct isci_remote_device * idev,u32 timeout)1652 void isci_dev_set_hang_detection_timeout(
1653 struct isci_remote_device *idev,
1654 u32 timeout)
1655 {
1656 if (dev_is_sata(idev->domain_dev)) {
1657 if (timeout) {
1658 if (test_and_set_bit(IDEV_RNC_LLHANG_ENABLED,
1659 &idev->flags))
1660 return; /* Already enabled. */
1661 } else if (!test_and_clear_bit(IDEV_RNC_LLHANG_ENABLED,
1662 &idev->flags))
1663 return; /* Not enabled. */
1664
1665 sci_port_set_hang_detection_timeout(idev->owning_port,
1666 timeout);
1667 }
1668 }
1669