1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55 #include <scsi/sas.h>
56 #include <linux/bitops.h>
57 #include "isci.h"
58 #include "port.h"
59 #include "remote_device.h"
60 #include "request.h"
61 #include "remote_node_context.h"
62 #include "scu_event_codes.h"
63 #include "task.h"
64
65 #undef C
66 #define C(a) (#a)
dev_state_name(enum sci_remote_device_states state)67 const char *dev_state_name(enum sci_remote_device_states state)
68 {
69 static const char * const strings[] = REMOTE_DEV_STATES;
70
71 return strings[state];
72 }
73 #undef C
74
sci_remote_device_suspend(struct isci_remote_device * idev,enum sci_remote_node_suspension_reasons reason)75 enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
76 enum sci_remote_node_suspension_reasons reason)
77 {
78 return sci_remote_node_context_suspend(&idev->rnc, reason,
79 SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
80 }
81
82 /**
83 * isci_remote_device_ready() - This function is called by the ihost when the
84 * remote device is ready. We mark the isci device as ready and signal the
85 * waiting proccess.
86 * @ihost: our valid isci_host
87 * @idev: remote device
88 *
89 */
isci_remote_device_ready(struct isci_host * ihost,struct isci_remote_device * idev)90 static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
91 {
92 dev_dbg(&ihost->pdev->dev,
93 "%s: idev = %p\n", __func__, idev);
94
95 clear_bit(IDEV_IO_NCQERROR, &idev->flags);
96 set_bit(IDEV_IO_READY, &idev->flags);
97 if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
98 wake_up(&ihost->eventq);
99 }
100
sci_remote_device_terminate_req(struct isci_host * ihost,struct isci_remote_device * idev,int check_abort,struct isci_request * ireq)101 static enum sci_status sci_remote_device_terminate_req(
102 struct isci_host *ihost,
103 struct isci_remote_device *idev,
104 int check_abort,
105 struct isci_request *ireq)
106 {
107 if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
108 (ireq->target_device != idev) ||
109 (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags)))
110 return SCI_SUCCESS;
111
112 dev_dbg(&ihost->pdev->dev,
113 "%s: idev=%p; flags=%lx; req=%p; req target=%p\n",
114 __func__, idev, idev->flags, ireq, ireq->target_device);
115
116 set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
117
118 return sci_controller_terminate_request(ihost, idev, ireq);
119 }
120
sci_remote_device_terminate_reqs_checkabort(struct isci_remote_device * idev,int chk)121 static enum sci_status sci_remote_device_terminate_reqs_checkabort(
122 struct isci_remote_device *idev,
123 int chk)
124 {
125 struct isci_host *ihost = idev->owning_port->owning_controller;
126 enum sci_status status = SCI_SUCCESS;
127 u32 i;
128
129 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
130 struct isci_request *ireq = ihost->reqs[i];
131 enum sci_status s;
132
133 s = sci_remote_device_terminate_req(ihost, idev, chk, ireq);
134 if (s != SCI_SUCCESS)
135 status = s;
136 }
137 return status;
138 }
139
isci_compare_suspendcount(struct isci_remote_device * idev,u32 localcount)140 static bool isci_compare_suspendcount(
141 struct isci_remote_device *idev,
142 u32 localcount)
143 {
144 smp_rmb();
145
146 /* Check for a change in the suspend count, or the RNC
147 * being destroyed.
148 */
149 return (localcount != idev->rnc.suspend_count)
150 || sci_remote_node_context_is_being_destroyed(&idev->rnc);
151 }
152
isci_check_reqterm(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq,u32 localcount)153 static bool isci_check_reqterm(
154 struct isci_host *ihost,
155 struct isci_remote_device *idev,
156 struct isci_request *ireq,
157 u32 localcount)
158 {
159 unsigned long flags;
160 bool res;
161
162 spin_lock_irqsave(&ihost->scic_lock, flags);
163 res = isci_compare_suspendcount(idev, localcount)
164 && !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
165 spin_unlock_irqrestore(&ihost->scic_lock, flags);
166
167 return res;
168 }
169
isci_check_devempty(struct isci_host * ihost,struct isci_remote_device * idev,u32 localcount)170 static bool isci_check_devempty(
171 struct isci_host *ihost,
172 struct isci_remote_device *idev,
173 u32 localcount)
174 {
175 unsigned long flags;
176 bool res;
177
178 spin_lock_irqsave(&ihost->scic_lock, flags);
179 res = isci_compare_suspendcount(idev, localcount)
180 && idev->started_request_count == 0;
181 spin_unlock_irqrestore(&ihost->scic_lock, flags);
182
183 return res;
184 }
185
isci_remote_device_terminate_requests(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)186 enum sci_status isci_remote_device_terminate_requests(
187 struct isci_host *ihost,
188 struct isci_remote_device *idev,
189 struct isci_request *ireq)
190 {
191 enum sci_status status = SCI_SUCCESS;
192 unsigned long flags;
193 u32 rnc_suspend_count;
194
195 spin_lock_irqsave(&ihost->scic_lock, flags);
196
197 if (isci_get_device(idev) == NULL) {
198 dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n",
199 __func__, idev);
200 spin_unlock_irqrestore(&ihost->scic_lock, flags);
201 status = SCI_FAILURE;
202 } else {
203 /* If already suspended, don't wait for another suspension. */
204 smp_rmb();
205 rnc_suspend_count
206 = sci_remote_node_context_is_suspended(&idev->rnc)
207 ? 0 : idev->rnc.suspend_count;
208
209 dev_dbg(&ihost->pdev->dev,
210 "%s: idev=%p, ireq=%p; started_request_count=%d, "
211 "rnc_suspend_count=%d, rnc.suspend_count=%d"
212 "about to wait\n",
213 __func__, idev, ireq, idev->started_request_count,
214 rnc_suspend_count, idev->rnc.suspend_count);
215
216 #define MAX_SUSPEND_MSECS 10000
217 if (ireq) {
218 /* Terminate a specific TC. */
219 set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
220 sci_remote_device_terminate_req(ihost, idev, 0, ireq);
221 spin_unlock_irqrestore(&ihost->scic_lock, flags);
222 if (!wait_event_timeout(ihost->eventq,
223 isci_check_reqterm(ihost, idev, ireq,
224 rnc_suspend_count),
225 msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
226
227 dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n",
228 __func__, ihost->id);
229 dev_dbg(&ihost->pdev->dev,
230 "%s: ******* Timeout waiting for "
231 "suspend; idev=%p, current state %s; "
232 "started_request_count=%d, flags=%lx\n\t"
233 "rnc_suspend_count=%d, rnc.suspend_count=%d "
234 "RNC: current state %s, current "
235 "suspend_type %x dest state %d;\n"
236 "ireq=%p, ireq->flags = %lx\n",
237 __func__, idev,
238 dev_state_name(idev->sm.current_state_id),
239 idev->started_request_count, idev->flags,
240 rnc_suspend_count, idev->rnc.suspend_count,
241 rnc_state_name(idev->rnc.sm.current_state_id),
242 idev->rnc.suspend_type,
243 idev->rnc.destination_state,
244 ireq, ireq->flags);
245 }
246 spin_lock_irqsave(&ihost->scic_lock, flags);
247 clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
248 if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
249 isci_free_tag(ihost, ireq->io_tag);
250 spin_unlock_irqrestore(&ihost->scic_lock, flags);
251 } else {
252 /* Terminate all TCs. */
253 sci_remote_device_terminate_requests(idev);
254 spin_unlock_irqrestore(&ihost->scic_lock, flags);
255 if (!wait_event_timeout(ihost->eventq,
256 isci_check_devempty(ihost, idev,
257 rnc_suspend_count),
258 msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
259
260 dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n",
261 __func__, ihost->id);
262 dev_dbg(&ihost->pdev->dev,
263 "%s: ******* Timeout waiting for "
264 "suspend; idev=%p, current state %s; "
265 "started_request_count=%d, flags=%lx\n\t"
266 "rnc_suspend_count=%d, "
267 "RNC: current state %s, "
268 "rnc.suspend_count=%d, current "
269 "suspend_type %x dest state %d\n",
270 __func__, idev,
271 dev_state_name(idev->sm.current_state_id),
272 idev->started_request_count, idev->flags,
273 rnc_suspend_count,
274 rnc_state_name(idev->rnc.sm.current_state_id),
275 idev->rnc.suspend_count,
276 idev->rnc.suspend_type,
277 idev->rnc.destination_state);
278 }
279 }
280 dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n",
281 __func__, idev);
282 isci_put_device(idev);
283 }
284 return status;
285 }
286
287 /**
288 * isci_remote_device_not_ready() - This function is called by the ihost when
289 * the remote device is not ready. We mark the isci device as ready (not
290 * "ready_for_io") and signal the waiting proccess.
291 * @ihost: This parameter specifies the isci host object.
292 * @idev: This parameter specifies the remote device
293 * @reason: Reason to switch on
294 *
295 * sci_lock is held on entrance to this function.
296 */
isci_remote_device_not_ready(struct isci_host * ihost,struct isci_remote_device * idev,u32 reason)297 static void isci_remote_device_not_ready(struct isci_host *ihost,
298 struct isci_remote_device *idev,
299 u32 reason)
300 {
301 dev_dbg(&ihost->pdev->dev,
302 "%s: isci_device = %p; reason = %d\n", __func__, idev, reason);
303
304 switch (reason) {
305 case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
306 set_bit(IDEV_IO_NCQERROR, &idev->flags);
307
308 /* Suspend the remote device so the I/O can be terminated. */
309 sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
310
311 /* Kill all outstanding requests for the device. */
312 sci_remote_device_terminate_requests(idev);
313
314 fallthrough; /* into the default case */
315 default:
316 clear_bit(IDEV_IO_READY, &idev->flags);
317 break;
318 }
319 }
320
321 /* called once the remote node context is ready to be freed.
322 * The remote device can now report that its stop operation is complete. none
323 */
rnc_destruct_done(void * _dev)324 static void rnc_destruct_done(void *_dev)
325 {
326 struct isci_remote_device *idev = _dev;
327
328 BUG_ON(idev->started_request_count != 0);
329 sci_change_state(&idev->sm, SCI_DEV_STOPPED);
330 }
331
sci_remote_device_terminate_requests(struct isci_remote_device * idev)332 enum sci_status sci_remote_device_terminate_requests(
333 struct isci_remote_device *idev)
334 {
335 return sci_remote_device_terminate_reqs_checkabort(idev, 0);
336 }
337
sci_remote_device_stop(struct isci_remote_device * idev,u32 timeout)338 enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
339 u32 timeout)
340 {
341 struct sci_base_state_machine *sm = &idev->sm;
342 enum sci_remote_device_states state = sm->current_state_id;
343
344 switch (state) {
345 case SCI_DEV_INITIAL:
346 case SCI_DEV_FAILED:
347 case SCI_DEV_FINAL:
348 default:
349 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
350 __func__, dev_state_name(state));
351 return SCI_FAILURE_INVALID_STATE;
352 case SCI_DEV_STOPPED:
353 return SCI_SUCCESS;
354 case SCI_DEV_STARTING:
355 /* device not started so there had better be no requests */
356 BUG_ON(idev->started_request_count != 0);
357 sci_remote_node_context_destruct(&idev->rnc,
358 rnc_destruct_done, idev);
359 /* Transition to the stopping state and wait for the
360 * remote node to complete being posted and invalidated.
361 */
362 sci_change_state(sm, SCI_DEV_STOPPING);
363 return SCI_SUCCESS;
364 case SCI_DEV_READY:
365 case SCI_STP_DEV_IDLE:
366 case SCI_STP_DEV_CMD:
367 case SCI_STP_DEV_NCQ:
368 case SCI_STP_DEV_NCQ_ERROR:
369 case SCI_STP_DEV_AWAIT_RESET:
370 case SCI_SMP_DEV_IDLE:
371 case SCI_SMP_DEV_CMD:
372 sci_change_state(sm, SCI_DEV_STOPPING);
373 if (idev->started_request_count == 0)
374 sci_remote_node_context_destruct(&idev->rnc,
375 rnc_destruct_done,
376 idev);
377 else {
378 sci_remote_device_suspend(
379 idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
380 sci_remote_device_terminate_requests(idev);
381 }
382 return SCI_SUCCESS;
383 case SCI_DEV_STOPPING:
384 /* All requests should have been terminated, but if there is an
385 * attempt to stop a device already in the stopping state, then
386 * try again to terminate.
387 */
388 return sci_remote_device_terminate_requests(idev);
389 case SCI_DEV_RESETTING:
390 sci_change_state(sm, SCI_DEV_STOPPING);
391 return SCI_SUCCESS;
392 }
393 }
394
sci_remote_device_reset(struct isci_remote_device * idev)395 enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
396 {
397 struct sci_base_state_machine *sm = &idev->sm;
398 enum sci_remote_device_states state = sm->current_state_id;
399
400 switch (state) {
401 case SCI_DEV_INITIAL:
402 case SCI_DEV_STOPPED:
403 case SCI_DEV_STARTING:
404 case SCI_SMP_DEV_IDLE:
405 case SCI_SMP_DEV_CMD:
406 case SCI_DEV_STOPPING:
407 case SCI_DEV_FAILED:
408 case SCI_DEV_RESETTING:
409 case SCI_DEV_FINAL:
410 default:
411 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
412 __func__, dev_state_name(state));
413 return SCI_FAILURE_INVALID_STATE;
414 case SCI_DEV_READY:
415 case SCI_STP_DEV_IDLE:
416 case SCI_STP_DEV_CMD:
417 case SCI_STP_DEV_NCQ:
418 case SCI_STP_DEV_NCQ_ERROR:
419 case SCI_STP_DEV_AWAIT_RESET:
420 sci_change_state(sm, SCI_DEV_RESETTING);
421 return SCI_SUCCESS;
422 }
423 }
424
sci_remote_device_frame_handler(struct isci_remote_device * idev,u32 frame_index)425 enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
426 u32 frame_index)
427 {
428 struct sci_base_state_machine *sm = &idev->sm;
429 enum sci_remote_device_states state = sm->current_state_id;
430 struct isci_host *ihost = idev->owning_port->owning_controller;
431 enum sci_status status;
432
433 switch (state) {
434 case SCI_DEV_INITIAL:
435 case SCI_DEV_STOPPED:
436 case SCI_DEV_STARTING:
437 case SCI_STP_DEV_IDLE:
438 case SCI_SMP_DEV_IDLE:
439 case SCI_DEV_FINAL:
440 default:
441 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
442 __func__, dev_state_name(state));
443 /* Return the frame back to the controller */
444 sci_controller_release_frame(ihost, frame_index);
445 return SCI_FAILURE_INVALID_STATE;
446 case SCI_DEV_READY:
447 case SCI_STP_DEV_NCQ_ERROR:
448 case SCI_STP_DEV_AWAIT_RESET:
449 case SCI_DEV_STOPPING:
450 case SCI_DEV_FAILED:
451 case SCI_DEV_RESETTING: {
452 struct isci_request *ireq;
453 struct ssp_frame_hdr hdr;
454 void *frame_header;
455 ssize_t word_cnt;
456
457 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
458 frame_index,
459 &frame_header);
460 if (status != SCI_SUCCESS)
461 return status;
462
463 word_cnt = sizeof(hdr) / sizeof(u32);
464 sci_swab32_cpy(&hdr, frame_header, word_cnt);
465
466 ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
467 if (ireq && ireq->target_device == idev) {
468 /* The IO request is now in charge of releasing the frame */
469 status = sci_io_request_frame_handler(ireq, frame_index);
470 } else {
471 /* We could not map this tag to a valid IO
472 * request Just toss the frame and continue
473 */
474 sci_controller_release_frame(ihost, frame_index);
475 }
476 break;
477 }
478 case SCI_STP_DEV_NCQ: {
479 struct dev_to_host_fis *hdr;
480
481 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
482 frame_index,
483 (void **)&hdr);
484 if (status != SCI_SUCCESS)
485 return status;
486
487 if (hdr->fis_type == FIS_SETDEVBITS &&
488 (hdr->status & ATA_ERR)) {
489 idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
490
491 /* TODO Check sactive and complete associated IO if any. */
492 sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
493 } else if (hdr->fis_type == FIS_REGD2H &&
494 (hdr->status & ATA_ERR)) {
495 /*
496 * Some devices return D2H FIS when an NCQ error is detected.
497 * Treat this like an SDB error FIS ready reason.
498 */
499 idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
500 sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR);
501 } else
502 status = SCI_FAILURE;
503
504 sci_controller_release_frame(ihost, frame_index);
505 break;
506 }
507 case SCI_STP_DEV_CMD:
508 case SCI_SMP_DEV_CMD:
509 /* The device does not process any UF received from the hardware while
510 * in this state. All unsolicited frames are forwarded to the io request
511 * object.
512 */
513 status = sci_io_request_frame_handler(idev->working_request, frame_index);
514 break;
515 }
516
517 return status;
518 }
519
is_remote_device_ready(struct isci_remote_device * idev)520 static bool is_remote_device_ready(struct isci_remote_device *idev)
521 {
522
523 struct sci_base_state_machine *sm = &idev->sm;
524 enum sci_remote_device_states state = sm->current_state_id;
525
526 switch (state) {
527 case SCI_DEV_READY:
528 case SCI_STP_DEV_IDLE:
529 case SCI_STP_DEV_CMD:
530 case SCI_STP_DEV_NCQ:
531 case SCI_STP_DEV_NCQ_ERROR:
532 case SCI_STP_DEV_AWAIT_RESET:
533 case SCI_SMP_DEV_IDLE:
534 case SCI_SMP_DEV_CMD:
535 return true;
536 default:
537 return false;
538 }
539 }
540
541 /*
542 * called once the remote node context has transisitioned to a ready
543 * state (after suspending RX and/or TX due to early D2H fis)
544 */
atapi_remote_device_resume_done(void * _dev)545 static void atapi_remote_device_resume_done(void *_dev)
546 {
547 struct isci_remote_device *idev = _dev;
548 struct isci_request *ireq = idev->working_request;
549
550 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
551 }
552
sci_remote_device_event_handler(struct isci_remote_device * idev,u32 event_code)553 enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
554 u32 event_code)
555 {
556 enum sci_status status;
557 struct sci_base_state_machine *sm = &idev->sm;
558 enum sci_remote_device_states state = sm->current_state_id;
559
560 switch (scu_get_event_type(event_code)) {
561 case SCU_EVENT_TYPE_RNC_OPS_MISC:
562 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
563 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
564 status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
565 break;
566 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
567 if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
568 status = SCI_SUCCESS;
569
570 /* Suspend the associated RNC */
571 sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
572
573 dev_dbg(scirdev_to_dev(idev),
574 "%s: device: %p event code: %x: %s\n",
575 __func__, idev, event_code,
576 is_remote_device_ready(idev)
577 ? "I_T_Nexus_Timeout event"
578 : "I_T_Nexus_Timeout event in wrong state");
579
580 break;
581 }
582 fallthrough; /* and treat as unhandled */
583 default:
584 dev_dbg(scirdev_to_dev(idev),
585 "%s: device: %p event code: %x: %s\n",
586 __func__, idev, event_code,
587 is_remote_device_ready(idev)
588 ? "unexpected event"
589 : "unexpected event in wrong state");
590 status = SCI_FAILURE_INVALID_STATE;
591 break;
592 }
593
594 if (status != SCI_SUCCESS)
595 return status;
596
597 /* Decode device-specific states that may require an RNC resume during
598 * normal operation. When the abort path is active, these resumes are
599 * managed when the abort path exits.
600 */
601 if (state == SCI_STP_DEV_ATAPI_ERROR) {
602 /* For ATAPI error state resume the RNC right away. */
603 if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
604 scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) {
605 return sci_remote_node_context_resume(&idev->rnc,
606 atapi_remote_device_resume_done,
607 idev);
608 }
609 }
610
611 if (state == SCI_STP_DEV_IDLE) {
612
613 /* We pick up suspension events to handle specifically to this
614 * state. We resume the RNC right away.
615 */
616 if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
617 scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX)
618 status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
619 }
620
621 return status;
622 }
623
sci_remote_device_start_request(struct isci_remote_device * idev,struct isci_request * ireq,enum sci_status status)624 static void sci_remote_device_start_request(struct isci_remote_device *idev,
625 struct isci_request *ireq,
626 enum sci_status status)
627 {
628 struct isci_port *iport = idev->owning_port;
629
630 /* cleanup requests that failed after starting on the port */
631 if (status != SCI_SUCCESS)
632 sci_port_complete_io(iport, idev, ireq);
633 else {
634 kref_get(&idev->kref);
635 idev->started_request_count++;
636 }
637 }
638
sci_remote_device_start_io(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)639 enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
640 struct isci_remote_device *idev,
641 struct isci_request *ireq)
642 {
643 struct sci_base_state_machine *sm = &idev->sm;
644 enum sci_remote_device_states state = sm->current_state_id;
645 struct isci_port *iport = idev->owning_port;
646 enum sci_status status;
647
648 switch (state) {
649 case SCI_DEV_INITIAL:
650 case SCI_DEV_STOPPED:
651 case SCI_DEV_STARTING:
652 case SCI_STP_DEV_NCQ_ERROR:
653 case SCI_DEV_STOPPING:
654 case SCI_DEV_FAILED:
655 case SCI_DEV_RESETTING:
656 case SCI_DEV_FINAL:
657 default:
658 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
659 __func__, dev_state_name(state));
660 return SCI_FAILURE_INVALID_STATE;
661 case SCI_DEV_READY:
662 /* attempt to start an io request for this device object. The remote
663 * device object will issue the start request for the io and if
664 * successful it will start the request for the port object then
665 * increment its own request count.
666 */
667 status = sci_port_start_io(iport, idev, ireq);
668 if (status != SCI_SUCCESS)
669 return status;
670
671 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
672 if (status != SCI_SUCCESS)
673 break;
674
675 status = sci_request_start(ireq);
676 break;
677 case SCI_STP_DEV_IDLE: {
678 /* handle the start io operation for a sata device that is in
679 * the command idle state. - Evalute the type of IO request to
680 * be started - If its an NCQ request change to NCQ substate -
681 * If its any other command change to the CMD substate
682 *
683 * If this is a softreset we may want to have a different
684 * substate.
685 */
686 enum sci_remote_device_states new_state;
687 struct sas_task *task = isci_request_access_task(ireq);
688
689 status = sci_port_start_io(iport, idev, ireq);
690 if (status != SCI_SUCCESS)
691 return status;
692
693 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
694 if (status != SCI_SUCCESS)
695 break;
696
697 status = sci_request_start(ireq);
698 if (status != SCI_SUCCESS)
699 break;
700
701 if (task->ata_task.use_ncq)
702 new_state = SCI_STP_DEV_NCQ;
703 else {
704 idev->working_request = ireq;
705 new_state = SCI_STP_DEV_CMD;
706 }
707 sci_change_state(sm, new_state);
708 break;
709 }
710 case SCI_STP_DEV_NCQ: {
711 struct sas_task *task = isci_request_access_task(ireq);
712
713 if (task->ata_task.use_ncq) {
714 status = sci_port_start_io(iport, idev, ireq);
715 if (status != SCI_SUCCESS)
716 return status;
717
718 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
719 if (status != SCI_SUCCESS)
720 break;
721
722 status = sci_request_start(ireq);
723 } else
724 return SCI_FAILURE_INVALID_STATE;
725 break;
726 }
727 case SCI_STP_DEV_AWAIT_RESET:
728 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
729 case SCI_SMP_DEV_IDLE:
730 status = sci_port_start_io(iport, idev, ireq);
731 if (status != SCI_SUCCESS)
732 return status;
733
734 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
735 if (status != SCI_SUCCESS)
736 break;
737
738 status = sci_request_start(ireq);
739 if (status != SCI_SUCCESS)
740 break;
741
742 idev->working_request = ireq;
743 sci_change_state(&idev->sm, SCI_SMP_DEV_CMD);
744 break;
745 case SCI_STP_DEV_CMD:
746 case SCI_SMP_DEV_CMD:
747 /* device is already handling a command it can not accept new commands
748 * until this one is complete.
749 */
750 return SCI_FAILURE_INVALID_STATE;
751 }
752
753 sci_remote_device_start_request(idev, ireq, status);
754 return status;
755 }
756
common_complete_io(struct isci_port * iport,struct isci_remote_device * idev,struct isci_request * ireq)757 static enum sci_status common_complete_io(struct isci_port *iport,
758 struct isci_remote_device *idev,
759 struct isci_request *ireq)
760 {
761 enum sci_status status;
762
763 status = sci_request_complete(ireq);
764 if (status != SCI_SUCCESS)
765 return status;
766
767 status = sci_port_complete_io(iport, idev, ireq);
768 if (status != SCI_SUCCESS)
769 return status;
770
771 sci_remote_device_decrement_request_count(idev);
772 return status;
773 }
774
sci_remote_device_complete_io(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)775 enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
776 struct isci_remote_device *idev,
777 struct isci_request *ireq)
778 {
779 struct sci_base_state_machine *sm = &idev->sm;
780 enum sci_remote_device_states state = sm->current_state_id;
781 struct isci_port *iport = idev->owning_port;
782 enum sci_status status;
783
784 switch (state) {
785 case SCI_DEV_INITIAL:
786 case SCI_DEV_STOPPED:
787 case SCI_DEV_STARTING:
788 case SCI_STP_DEV_IDLE:
789 case SCI_SMP_DEV_IDLE:
790 case SCI_DEV_FAILED:
791 case SCI_DEV_FINAL:
792 default:
793 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
794 __func__, dev_state_name(state));
795 return SCI_FAILURE_INVALID_STATE;
796 case SCI_DEV_READY:
797 case SCI_STP_DEV_AWAIT_RESET:
798 case SCI_DEV_RESETTING:
799 status = common_complete_io(iport, idev, ireq);
800 break;
801 case SCI_STP_DEV_CMD:
802 case SCI_STP_DEV_NCQ:
803 case SCI_STP_DEV_NCQ_ERROR:
804 case SCI_STP_DEV_ATAPI_ERROR:
805 status = common_complete_io(iport, idev, ireq);
806 if (status != SCI_SUCCESS)
807 break;
808
809 if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
810 /* This request causes hardware error, device needs to be Lun Reset.
811 * So here we force the state machine to IDLE state so the rest IOs
812 * can reach RNC state handler, these IOs will be completed by RNC with
813 * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
814 */
815 sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
816 } else if (idev->started_request_count == 0)
817 sci_change_state(sm, SCI_STP_DEV_IDLE);
818 break;
819 case SCI_SMP_DEV_CMD:
820 status = common_complete_io(iport, idev, ireq);
821 if (status != SCI_SUCCESS)
822 break;
823 sci_change_state(sm, SCI_SMP_DEV_IDLE);
824 break;
825 case SCI_DEV_STOPPING:
826 status = common_complete_io(iport, idev, ireq);
827 if (status != SCI_SUCCESS)
828 break;
829
830 if (idev->started_request_count == 0)
831 sci_remote_node_context_destruct(&idev->rnc,
832 rnc_destruct_done,
833 idev);
834 break;
835 }
836
837 if (status != SCI_SUCCESS)
838 dev_err(scirdev_to_dev(idev),
839 "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
840 "could not complete\n", __func__, iport,
841 idev, ireq, status);
842 else
843 isci_put_device(idev);
844
845 return status;
846 }
847
sci_remote_device_continue_request(void * dev)848 static void sci_remote_device_continue_request(void *dev)
849 {
850 struct isci_remote_device *idev = dev;
851
852 /* we need to check if this request is still valid to continue. */
853 if (idev->working_request)
854 sci_controller_continue_io(idev->working_request);
855 }
856
sci_remote_device_start_task(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)857 enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
858 struct isci_remote_device *idev,
859 struct isci_request *ireq)
860 {
861 struct sci_base_state_machine *sm = &idev->sm;
862 enum sci_remote_device_states state = sm->current_state_id;
863 struct isci_port *iport = idev->owning_port;
864 enum sci_status status;
865
866 switch (state) {
867 case SCI_DEV_INITIAL:
868 case SCI_DEV_STOPPED:
869 case SCI_DEV_STARTING:
870 case SCI_SMP_DEV_IDLE:
871 case SCI_SMP_DEV_CMD:
872 case SCI_DEV_STOPPING:
873 case SCI_DEV_FAILED:
874 case SCI_DEV_RESETTING:
875 case SCI_DEV_FINAL:
876 default:
877 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
878 __func__, dev_state_name(state));
879 return SCI_FAILURE_INVALID_STATE;
880 case SCI_STP_DEV_IDLE:
881 case SCI_STP_DEV_CMD:
882 case SCI_STP_DEV_NCQ:
883 case SCI_STP_DEV_NCQ_ERROR:
884 case SCI_STP_DEV_AWAIT_RESET:
885 status = sci_port_start_io(iport, idev, ireq);
886 if (status != SCI_SUCCESS)
887 return status;
888
889 status = sci_request_start(ireq);
890 if (status != SCI_SUCCESS)
891 goto out;
892
893 /* Note: If the remote device state is not IDLE this will
894 * replace the request that probably resulted in the task
895 * management request.
896 */
897 idev->working_request = ireq;
898 sci_change_state(sm, SCI_STP_DEV_CMD);
899
900 /* The remote node context must cleanup the TCi to NCQ mapping
901 * table. The only way to do this correctly is to either write
902 * to the TLCR register or to invalidate and repost the RNC. In
903 * either case the remote node context state machine will take
904 * the correct action when the remote node context is suspended
905 * and later resumed.
906 */
907 sci_remote_device_suspend(idev,
908 SCI_SW_SUSPEND_LINKHANG_DETECT);
909
910 status = sci_remote_node_context_start_task(&idev->rnc, ireq,
911 sci_remote_device_continue_request, idev);
912
913 out:
914 sci_remote_device_start_request(idev, ireq, status);
915 /* We need to let the controller start request handler know that
916 * it can't post TC yet. We will provide a callback function to
917 * post TC when RNC gets resumed.
918 */
919 return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
920 case SCI_DEV_READY:
921 status = sci_port_start_io(iport, idev, ireq);
922 if (status != SCI_SUCCESS)
923 return status;
924
925 /* Resume the RNC as needed: */
926 status = sci_remote_node_context_start_task(&idev->rnc, ireq,
927 NULL, NULL);
928 if (status != SCI_SUCCESS)
929 break;
930
931 status = sci_request_start(ireq);
932 break;
933 }
934 sci_remote_device_start_request(idev, ireq, status);
935
936 return status;
937 }
938
sci_remote_device_post_request(struct isci_remote_device * idev,u32 request)939 void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
940 {
941 struct isci_port *iport = idev->owning_port;
942 u32 context;
943
944 context = request |
945 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
946 (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
947 idev->rnc.remote_node_index;
948
949 sci_controller_post_request(iport->owning_controller, context);
950 }
951
952 /* called once the remote node context has transisitioned to a
953 * ready state. This is the indication that the remote device object can also
954 * transition to ready.
955 */
remote_device_resume_done(void * _dev)956 static void remote_device_resume_done(void *_dev)
957 {
958 struct isci_remote_device *idev = _dev;
959
960 if (is_remote_device_ready(idev))
961 return;
962
963 /* go 'ready' if we are not already in a ready state */
964 sci_change_state(&idev->sm, SCI_DEV_READY);
965 }
966
sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void * _dev)967 static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
968 {
969 struct isci_remote_device *idev = _dev;
970 struct isci_host *ihost = idev->owning_port->owning_controller;
971
972 /* For NCQ operation we do not issue a isci_remote_device_not_ready().
973 * As a result, avoid sending the ready notification.
974 */
975 if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ)
976 isci_remote_device_ready(ihost, idev);
977 }
978
sci_remote_device_initial_state_enter(struct sci_base_state_machine * sm)979 static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
980 {
981 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
982
983 /* Initial state is a transitional state to the stopped state */
984 sci_change_state(&idev->sm, SCI_DEV_STOPPED);
985 }
986
987 /**
988 * sci_remote_device_destruct() - free remote node context and destruct
989 * @idev: This parameter specifies the remote device to be destructed.
990 *
991 * Remote device objects are a limited resource. As such, they must be
992 * protected. Thus calls to construct and destruct are mutually exclusive and
993 * non-reentrant. The return value shall indicate if the device was
994 * successfully destructed or if some failure occurred. enum sci_status This value
995 * is returned if the device is successfully destructed.
996 * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied
997 * device isn't valid (e.g. it's already been destoryed, the handle isn't
998 * valid, etc.).
999 */
sci_remote_device_destruct(struct isci_remote_device * idev)1000 static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
1001 {
1002 struct sci_base_state_machine *sm = &idev->sm;
1003 enum sci_remote_device_states state = sm->current_state_id;
1004 struct isci_host *ihost;
1005
1006 if (state != SCI_DEV_STOPPED) {
1007 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
1008 __func__, dev_state_name(state));
1009 return SCI_FAILURE_INVALID_STATE;
1010 }
1011
1012 ihost = idev->owning_port->owning_controller;
1013 sci_controller_free_remote_node_context(ihost, idev,
1014 idev->rnc.remote_node_index);
1015 idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
1016 sci_change_state(sm, SCI_DEV_FINAL);
1017
1018 return SCI_SUCCESS;
1019 }
1020
1021 /**
1022 * isci_remote_device_deconstruct() - This function frees an isci_remote_device.
1023 * @ihost: This parameter specifies the isci host object.
1024 * @idev: This parameter specifies the remote device to be freed.
1025 *
1026 */
isci_remote_device_deconstruct(struct isci_host * ihost,struct isci_remote_device * idev)1027 static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
1028 {
1029 dev_dbg(&ihost->pdev->dev,
1030 "%s: isci_device = %p\n", __func__, idev);
1031
1032 /* There should not be any outstanding io's. All paths to
1033 * here should go through isci_remote_device_nuke_requests.
1034 * If we hit this condition, we will need a way to complete
1035 * io requests in process */
1036 BUG_ON(idev->started_request_count > 0);
1037
1038 sci_remote_device_destruct(idev);
1039 list_del_init(&idev->node);
1040 isci_put_device(idev);
1041 }
1042
sci_remote_device_stopped_state_enter(struct sci_base_state_machine * sm)1043 static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
1044 {
1045 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1046 struct isci_host *ihost = idev->owning_port->owning_controller;
1047 u32 prev_state;
1048
1049 /* If we are entering from the stopping state let the SCI User know that
1050 * the stop operation has completed.
1051 */
1052 prev_state = idev->sm.previous_state_id;
1053 if (prev_state == SCI_DEV_STOPPING)
1054 isci_remote_device_deconstruct(ihost, idev);
1055
1056 sci_controller_remote_device_stopped(ihost, idev);
1057 }
1058
sci_remote_device_starting_state_enter(struct sci_base_state_machine * sm)1059 static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
1060 {
1061 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1062 struct isci_host *ihost = idev->owning_port->owning_controller;
1063
1064 isci_remote_device_not_ready(ihost, idev,
1065 SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
1066 }
1067
sci_remote_device_ready_state_enter(struct sci_base_state_machine * sm)1068 static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
1069 {
1070 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1071 struct isci_host *ihost = idev->owning_port->owning_controller;
1072 struct domain_device *dev = idev->domain_dev;
1073
1074 if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
1075 sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
1076 } else if (dev_is_expander(dev->dev_type)) {
1077 sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
1078 } else
1079 isci_remote_device_ready(ihost, idev);
1080 }
1081
sci_remote_device_ready_state_exit(struct sci_base_state_machine * sm)1082 static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
1083 {
1084 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1085 struct domain_device *dev = idev->domain_dev;
1086
1087 if (dev->dev_type == SAS_END_DEVICE) {
1088 struct isci_host *ihost = idev->owning_port->owning_controller;
1089
1090 isci_remote_device_not_ready(ihost, idev,
1091 SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED);
1092 }
1093 }
1094
sci_remote_device_resetting_state_enter(struct sci_base_state_machine * sm)1095 static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
1096 {
1097 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1098 struct isci_host *ihost = idev->owning_port->owning_controller;
1099
1100 dev_dbg(&ihost->pdev->dev,
1101 "%s: isci_device = %p\n", __func__, idev);
1102
1103 sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
1104 }
1105
sci_remote_device_resetting_state_exit(struct sci_base_state_machine * sm)1106 static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
1107 {
1108 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1109 struct isci_host *ihost = idev->owning_port->owning_controller;
1110
1111 dev_dbg(&ihost->pdev->dev,
1112 "%s: isci_device = %p\n", __func__, idev);
1113
1114 sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
1115 }
1116
sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine * sm)1117 static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
1118 {
1119 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1120
1121 idev->working_request = NULL;
1122 if (sci_remote_node_context_is_ready(&idev->rnc)) {
1123 /*
1124 * Since the RNC is ready, it's alright to finish completion
1125 * processing (e.g. signal the remote device is ready). */
1126 sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
1127 } else {
1128 sci_remote_node_context_resume(&idev->rnc,
1129 sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
1130 idev);
1131 }
1132 }
1133
sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine * sm)1134 static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
1135 {
1136 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1137 struct isci_host *ihost = idev->owning_port->owning_controller;
1138
1139 BUG_ON(idev->working_request == NULL);
1140
1141 isci_remote_device_not_ready(ihost, idev,
1142 SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
1143 }
1144
sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine * sm)1145 static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
1146 {
1147 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1148 struct isci_host *ihost = idev->owning_port->owning_controller;
1149
1150 if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
1151 isci_remote_device_not_ready(ihost, idev,
1152 idev->not_ready_reason);
1153 }
1154
sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine * sm)1155 static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
1156 {
1157 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1158 struct isci_host *ihost = idev->owning_port->owning_controller;
1159
1160 isci_remote_device_ready(ihost, idev);
1161 }
1162
sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine * sm)1163 static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
1164 {
1165 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1166 struct isci_host *ihost = idev->owning_port->owning_controller;
1167
1168 BUG_ON(idev->working_request == NULL);
1169
1170 isci_remote_device_not_ready(ihost, idev,
1171 SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
1172 }
1173
sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine * sm)1174 static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
1175 {
1176 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1177
1178 idev->working_request = NULL;
1179 }
1180
1181 static const struct sci_base_state sci_remote_device_state_table[] = {
1182 [SCI_DEV_INITIAL] = {
1183 .enter_state = sci_remote_device_initial_state_enter,
1184 },
1185 [SCI_DEV_STOPPED] = {
1186 .enter_state = sci_remote_device_stopped_state_enter,
1187 },
1188 [SCI_DEV_STARTING] = {
1189 .enter_state = sci_remote_device_starting_state_enter,
1190 },
1191 [SCI_DEV_READY] = {
1192 .enter_state = sci_remote_device_ready_state_enter,
1193 .exit_state = sci_remote_device_ready_state_exit
1194 },
1195 [SCI_STP_DEV_IDLE] = {
1196 .enter_state = sci_stp_remote_device_ready_idle_substate_enter,
1197 },
1198 [SCI_STP_DEV_CMD] = {
1199 .enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
1200 },
1201 [SCI_STP_DEV_NCQ] = { },
1202 [SCI_STP_DEV_NCQ_ERROR] = {
1203 .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
1204 },
1205 [SCI_STP_DEV_ATAPI_ERROR] = { },
1206 [SCI_STP_DEV_AWAIT_RESET] = { },
1207 [SCI_SMP_DEV_IDLE] = {
1208 .enter_state = sci_smp_remote_device_ready_idle_substate_enter,
1209 },
1210 [SCI_SMP_DEV_CMD] = {
1211 .enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
1212 .exit_state = sci_smp_remote_device_ready_cmd_substate_exit,
1213 },
1214 [SCI_DEV_STOPPING] = { },
1215 [SCI_DEV_FAILED] = { },
1216 [SCI_DEV_RESETTING] = {
1217 .enter_state = sci_remote_device_resetting_state_enter,
1218 .exit_state = sci_remote_device_resetting_state_exit
1219 },
1220 [SCI_DEV_FINAL] = { },
1221 };
1222
1223 /**
1224 * sci_remote_device_construct() - common construction
1225 * @iport: SAS/SATA port through which this device is accessed.
1226 * @idev: remote device to construct
1227 *
1228 * This routine just performs benign initialization and does not
1229 * allocate the remote_node_context which is left to
1230 * sci_remote_device_[de]a_construct(). sci_remote_device_destruct()
1231 * frees the remote_node_context(s) for the device.
1232 */
sci_remote_device_construct(struct isci_port * iport,struct isci_remote_device * idev)1233 static void sci_remote_device_construct(struct isci_port *iport,
1234 struct isci_remote_device *idev)
1235 {
1236 idev->owning_port = iport;
1237 idev->started_request_count = 0;
1238
1239 sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
1240
1241 sci_remote_node_context_construct(&idev->rnc,
1242 SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
1243 }
1244
1245 /*
1246 * sci_remote_device_da_construct() - construct direct attached device.
1247 *
1248 * The information (e.g. IAF, Signature FIS, etc.) necessary to build
1249 * the device is known to the SCI Core since it is contained in the
1250 * sci_phy object. Remote node context(s) is/are a global resource
1251 * allocated by this routine, freed by sci_remote_device_destruct().
1252 *
1253 * Returns:
1254 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
1255 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
1256 * sata-only controller instance.
1257 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
1258 */
sci_remote_device_da_construct(struct isci_port * iport,struct isci_remote_device * idev)1259 static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1260 struct isci_remote_device *idev)
1261 {
1262 enum sci_status status;
1263 struct sci_port_properties properties;
1264
1265 sci_remote_device_construct(iport, idev);
1266
1267 sci_port_get_properties(iport, &properties);
1268 /* Get accurate port width from port's phy mask for a DA device. */
1269 idev->device_port_width = hweight32(properties.phy_mask);
1270
1271 status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1272 idev,
1273 &idev->rnc.remote_node_index);
1274
1275 if (status != SCI_SUCCESS)
1276 return status;
1277
1278 idev->connection_rate = sci_port_get_max_allowed_speed(iport);
1279
1280 return SCI_SUCCESS;
1281 }
1282
1283 /*
1284 * sci_remote_device_ea_construct() - construct expander attached device
1285 *
1286 * Remote node context(s) is/are a global resource allocated by this
1287 * routine, freed by sci_remote_device_destruct().
1288 *
1289 * Returns:
1290 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
1291 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
1292 * sata-only controller instance.
1293 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
1294 */
sci_remote_device_ea_construct(struct isci_port * iport,struct isci_remote_device * idev)1295 static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
1296 struct isci_remote_device *idev)
1297 {
1298 struct domain_device *dev = idev->domain_dev;
1299 enum sci_status status;
1300
1301 sci_remote_device_construct(iport, idev);
1302
1303 status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1304 idev,
1305 &idev->rnc.remote_node_index);
1306 if (status != SCI_SUCCESS)
1307 return status;
1308
1309 /* For SAS-2 the physical link rate is actually a logical link
1310 * rate that incorporates multiplexing. The SCU doesn't
1311 * incorporate multiplexing and for the purposes of the
1312 * connection the logical link rate is that same as the
1313 * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
1314 * one another, so this code works for both situations.
1315 */
1316 idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
1317 dev->linkrate);
1318
1319 /* / @todo Should I assign the port width by reading all of the phys on the port? */
1320 idev->device_port_width = 1;
1321
1322 return SCI_SUCCESS;
1323 }
1324
sci_remote_device_resume(struct isci_remote_device * idev,scics_sds_remote_node_context_callback cb_fn,void * cb_p)1325 enum sci_status sci_remote_device_resume(
1326 struct isci_remote_device *idev,
1327 scics_sds_remote_node_context_callback cb_fn,
1328 void *cb_p)
1329 {
1330 enum sci_status status;
1331
1332 status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p);
1333 if (status != SCI_SUCCESS)
1334 dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n",
1335 __func__, status);
1336 return status;
1337 }
1338
isci_remote_device_resume_from_abort_complete(void * cbparam)1339 static void isci_remote_device_resume_from_abort_complete(void *cbparam)
1340 {
1341 struct isci_remote_device *idev = cbparam;
1342 struct isci_host *ihost = idev->owning_port->owning_controller;
1343 scics_sds_remote_node_context_callback abort_resume_cb =
1344 idev->abort_resume_cb;
1345
1346 dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n",
1347 __func__, abort_resume_cb);
1348
1349 if (abort_resume_cb != NULL) {
1350 idev->abort_resume_cb = NULL;
1351 abort_resume_cb(idev->abort_resume_cbparam);
1352 }
1353 clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1354 wake_up(&ihost->eventq);
1355 }
1356
isci_remote_device_test_resume_done(struct isci_host * ihost,struct isci_remote_device * idev)1357 static bool isci_remote_device_test_resume_done(
1358 struct isci_host *ihost,
1359 struct isci_remote_device *idev)
1360 {
1361 unsigned long flags;
1362 bool done;
1363
1364 spin_lock_irqsave(&ihost->scic_lock, flags);
1365 done = !test_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags)
1366 || test_bit(IDEV_STOP_PENDING, &idev->flags)
1367 || sci_remote_node_context_is_being_destroyed(&idev->rnc);
1368 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1369
1370 return done;
1371 }
1372
isci_remote_device_wait_for_resume_from_abort(struct isci_host * ihost,struct isci_remote_device * idev)1373 static void isci_remote_device_wait_for_resume_from_abort(
1374 struct isci_host *ihost,
1375 struct isci_remote_device *idev)
1376 {
1377 dev_dbg(&ihost->pdev->dev, "%s: starting resume wait: %p\n",
1378 __func__, idev);
1379
1380 #define MAX_RESUME_MSECS 10000
1381 if (!wait_event_timeout(ihost->eventq,
1382 isci_remote_device_test_resume_done(ihost, idev),
1383 msecs_to_jiffies(MAX_RESUME_MSECS))) {
1384
1385 dev_warn(&ihost->pdev->dev, "%s: #### Timeout waiting for "
1386 "resume: %p\n", __func__, idev);
1387 }
1388 clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1389
1390 dev_dbg(&ihost->pdev->dev, "%s: resume wait done: %p\n",
1391 __func__, idev);
1392 }
1393
isci_remote_device_resume_from_abort(struct isci_host * ihost,struct isci_remote_device * idev)1394 enum sci_status isci_remote_device_resume_from_abort(
1395 struct isci_host *ihost,
1396 struct isci_remote_device *idev)
1397 {
1398 unsigned long flags;
1399 enum sci_status status = SCI_SUCCESS;
1400 int destroyed;
1401
1402 spin_lock_irqsave(&ihost->scic_lock, flags);
1403 /* Preserve any current resume callbacks, for instance from other
1404 * resumptions.
1405 */
1406 idev->abort_resume_cb = idev->rnc.user_callback;
1407 idev->abort_resume_cbparam = idev->rnc.user_cookie;
1408 set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1409 clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
1410 destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc);
1411 if (!destroyed)
1412 status = sci_remote_device_resume(
1413 idev, isci_remote_device_resume_from_abort_complete,
1414 idev);
1415 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1416 if (!destroyed && (status == SCI_SUCCESS))
1417 isci_remote_device_wait_for_resume_from_abort(ihost, idev);
1418 else
1419 clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1420
1421 return status;
1422 }
1423
1424 /**
1425 * sci_remote_device_start() - This method will start the supplied remote
1426 * device. This method enables normal IO requests to flow through to the
1427 * remote device.
1428 * @idev: This parameter specifies the device to be started.
1429 * @timeout: This parameter specifies the number of milliseconds in which the
1430 * start operation should complete.
1431 *
1432 * An indication of whether the device was successfully started. SCI_SUCCESS
1433 * This value is returned if the device was successfully started.
1434 * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
1435 * the device when there have been no phys added to it.
1436 */
sci_remote_device_start(struct isci_remote_device * idev,u32 timeout)1437 static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
1438 u32 timeout)
1439 {
1440 struct sci_base_state_machine *sm = &idev->sm;
1441 enum sci_remote_device_states state = sm->current_state_id;
1442 enum sci_status status;
1443
1444 if (state != SCI_DEV_STOPPED) {
1445 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
1446 __func__, dev_state_name(state));
1447 return SCI_FAILURE_INVALID_STATE;
1448 }
1449
1450 status = sci_remote_device_resume(idev, remote_device_resume_done,
1451 idev);
1452 if (status != SCI_SUCCESS)
1453 return status;
1454
1455 sci_change_state(sm, SCI_DEV_STARTING);
1456
1457 return SCI_SUCCESS;
1458 }
1459
isci_remote_device_construct(struct isci_port * iport,struct isci_remote_device * idev)1460 static enum sci_status isci_remote_device_construct(struct isci_port *iport,
1461 struct isci_remote_device *idev)
1462 {
1463 struct isci_host *ihost = iport->isci_host;
1464 struct domain_device *dev = idev->domain_dev;
1465 enum sci_status status;
1466
1467 if (dev->parent && dev_is_expander(dev->parent->dev_type))
1468 status = sci_remote_device_ea_construct(iport, idev);
1469 else
1470 status = sci_remote_device_da_construct(iport, idev);
1471
1472 if (status != SCI_SUCCESS) {
1473 dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
1474 __func__, status);
1475
1476 return status;
1477 }
1478
1479 /* start the device. */
1480 status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
1481
1482 if (status != SCI_SUCCESS)
1483 dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
1484 status);
1485
1486 return status;
1487 }
1488
1489 /**
1490 * isci_remote_device_alloc()
1491 * This function builds the isci_remote_device when a libsas dev_found message
1492 * is received.
1493 * @ihost: This parameter specifies the isci host object.
1494 * @iport: This parameter specifies the isci_port connected to this device.
1495 *
1496 * pointer to new isci_remote_device.
1497 */
1498 static struct isci_remote_device *
isci_remote_device_alloc(struct isci_host * ihost,struct isci_port * iport)1499 isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
1500 {
1501 struct isci_remote_device *idev;
1502 int i;
1503
1504 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
1505 idev = &ihost->devices[i];
1506 if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
1507 break;
1508 }
1509
1510 if (i >= SCI_MAX_REMOTE_DEVICES) {
1511 dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
1512 return NULL;
1513 }
1514 if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
1515 return NULL;
1516
1517 return idev;
1518 }
1519
isci_remote_device_release(struct kref * kref)1520 void isci_remote_device_release(struct kref *kref)
1521 {
1522 struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
1523 struct isci_host *ihost = idev->isci_port->isci_host;
1524
1525 idev->domain_dev = NULL;
1526 idev->isci_port = NULL;
1527 clear_bit(IDEV_START_PENDING, &idev->flags);
1528 clear_bit(IDEV_STOP_PENDING, &idev->flags);
1529 clear_bit(IDEV_IO_READY, &idev->flags);
1530 clear_bit(IDEV_GONE, &idev->flags);
1531 smp_mb__before_atomic();
1532 clear_bit(IDEV_ALLOCATED, &idev->flags);
1533 wake_up(&ihost->eventq);
1534 }
1535
1536 /**
1537 * isci_remote_device_stop() - This function is called internally to stop the
1538 * remote device.
1539 * @ihost: This parameter specifies the isci host object.
1540 * @idev: This parameter specifies the remote device.
1541 *
1542 * The status of the ihost request to stop.
1543 */
isci_remote_device_stop(struct isci_host * ihost,struct isci_remote_device * idev)1544 enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
1545 {
1546 enum sci_status status;
1547 unsigned long flags;
1548
1549 dev_dbg(&ihost->pdev->dev,
1550 "%s: isci_device = %p\n", __func__, idev);
1551
1552 spin_lock_irqsave(&ihost->scic_lock, flags);
1553 idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
1554 set_bit(IDEV_GONE, &idev->flags);
1555
1556 set_bit(IDEV_STOP_PENDING, &idev->flags);
1557 status = sci_remote_device_stop(idev, 50);
1558 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1559
1560 /* Wait for the stop complete callback. */
1561 if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
1562 /* nothing to wait for */;
1563 else
1564 wait_for_device_stop(ihost, idev);
1565
1566 dev_dbg(&ihost->pdev->dev,
1567 "%s: isci_device = %p, waiting done.\n", __func__, idev);
1568
1569 return status;
1570 }
1571
1572 /**
1573 * isci_remote_device_gone() - This function is called by libsas when a domain
1574 * device is removed.
1575 * @dev: This parameter specifies the libsas domain device.
1576 */
isci_remote_device_gone(struct domain_device * dev)1577 void isci_remote_device_gone(struct domain_device *dev)
1578 {
1579 struct isci_host *ihost = dev_to_ihost(dev);
1580 struct isci_remote_device *idev = dev->lldd_dev;
1581
1582 dev_dbg(&ihost->pdev->dev,
1583 "%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
1584 __func__, dev, idev, idev->isci_port);
1585
1586 isci_remote_device_stop(ihost, idev);
1587 }
1588
1589
1590 /**
1591 * isci_remote_device_found() - This function is called by libsas when a remote
1592 * device is discovered. A remote device object is created and started. the
1593 * function then sleeps until the sci core device started message is
1594 * received.
1595 * @dev: This parameter specifies the libsas domain device.
1596 *
1597 * status, zero indicates success.
1598 */
isci_remote_device_found(struct domain_device * dev)1599 int isci_remote_device_found(struct domain_device *dev)
1600 {
1601 struct isci_host *isci_host = dev_to_ihost(dev);
1602 struct isci_port *isci_port = dev->port->lldd_port;
1603 struct isci_remote_device *isci_device;
1604 enum sci_status status;
1605
1606 dev_dbg(&isci_host->pdev->dev,
1607 "%s: domain_device = %p\n", __func__, dev);
1608
1609 if (!isci_port)
1610 return -ENODEV;
1611
1612 isci_device = isci_remote_device_alloc(isci_host, isci_port);
1613 if (!isci_device)
1614 return -ENODEV;
1615
1616 kref_init(&isci_device->kref);
1617 INIT_LIST_HEAD(&isci_device->node);
1618
1619 spin_lock_irq(&isci_host->scic_lock);
1620 isci_device->domain_dev = dev;
1621 isci_device->isci_port = isci_port;
1622 list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
1623
1624 set_bit(IDEV_START_PENDING, &isci_device->flags);
1625 status = isci_remote_device_construct(isci_port, isci_device);
1626
1627 dev_dbg(&isci_host->pdev->dev,
1628 "%s: isci_device = %p\n",
1629 __func__, isci_device);
1630
1631 if (status == SCI_SUCCESS) {
1632 /* device came up, advertise it to the world */
1633 dev->lldd_dev = isci_device;
1634 } else
1635 isci_put_device(isci_device);
1636 spin_unlock_irq(&isci_host->scic_lock);
1637
1638 /* wait for the device ready callback. */
1639 wait_for_device_start(isci_host, isci_device);
1640
1641 return status == SCI_SUCCESS ? 0 : -ENODEV;
1642 }
1643
isci_remote_device_suspend_terminate(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)1644 enum sci_status isci_remote_device_suspend_terminate(
1645 struct isci_host *ihost,
1646 struct isci_remote_device *idev,
1647 struct isci_request *ireq)
1648 {
1649 unsigned long flags;
1650 enum sci_status status;
1651
1652 /* Put the device into suspension. */
1653 spin_lock_irqsave(&ihost->scic_lock, flags);
1654 set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
1655 sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
1656 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1657
1658 /* Terminate and wait for the completions. */
1659 status = isci_remote_device_terminate_requests(ihost, idev, ireq);
1660 if (status != SCI_SUCCESS)
1661 dev_dbg(&ihost->pdev->dev,
1662 "%s: isci_remote_device_terminate_requests(%p) "
1663 "returned %d!\n",
1664 __func__, idev, status);
1665
1666 /* NOTE: RNC resumption is left to the caller! */
1667 return status;
1668 }
1669
isci_remote_device_is_safe_to_abort(struct isci_remote_device * idev)1670 int isci_remote_device_is_safe_to_abort(
1671 struct isci_remote_device *idev)
1672 {
1673 return sci_remote_node_context_is_safe_to_abort(&idev->rnc);
1674 }
1675
sci_remote_device_abort_requests_pending_abort(struct isci_remote_device * idev)1676 enum sci_status sci_remote_device_abort_requests_pending_abort(
1677 struct isci_remote_device *idev)
1678 {
1679 return sci_remote_device_terminate_reqs_checkabort(idev, 1);
1680 }
1681
isci_dev_set_hang_detection_timeout(struct isci_remote_device * idev,u32 timeout)1682 void isci_dev_set_hang_detection_timeout(
1683 struct isci_remote_device *idev,
1684 u32 timeout)
1685 {
1686 if (dev_is_sata(idev->domain_dev)) {
1687 if (timeout) {
1688 if (test_and_set_bit(IDEV_RNC_LLHANG_ENABLED,
1689 &idev->flags))
1690 return; /* Already enabled. */
1691 } else if (!test_and_clear_bit(IDEV_RNC_LLHANG_ENABLED,
1692 &idev->flags))
1693 return; /* Not enabled. */
1694
1695 sci_port_set_hang_detection_timeout(idev->owning_port,
1696 timeout);
1697 }
1698 }
1699