1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * BSD LICENSE 25 * 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 * All rights reserved. 28 * 29 * Redistribution and use in source and binary forms, with or without 30 * modification, are permitted provided that the following conditions 31 * are met: 32 * 33 * * Redistributions of source code must retain the above copyright 34 * notice, this list of conditions and the following disclaimer. 35 * * Redistributions in binary form must reproduce the above copyright 36 * notice, this list of conditions and the following disclaimer in 37 * the documentation and/or other materials provided with the 38 * distribution. 39 * * Neither the name of Intel Corporation nor the names of its 40 * contributors may be used to endorse or promote products derived 41 * from this software without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 #include <scsi/sas.h> 56 #include <linux/bitops.h> 57 #include "isci.h" 58 #include "port.h" 59 #include "remote_device.h" 60 #include "request.h" 61 #include "remote_node_context.h" 62 #include "scu_event_codes.h" 63 #include "task.h" 64 65 /** 66 * isci_remote_device_not_ready() - This function is called by the ihost when 67 * the remote device is not ready. We mark the isci device as ready (not 68 * "ready_for_io") and signal the waiting proccess. 69 * @isci_host: This parameter specifies the isci host object. 70 * @isci_device: This parameter specifies the remote device 71 * 72 * sci_lock is held on entrance to this function. 73 */ 74 static void isci_remote_device_not_ready(struct isci_host *ihost, 75 struct isci_remote_device *idev, u32 reason) 76 { 77 struct isci_request *ireq; 78 79 dev_dbg(&ihost->pdev->dev, 80 "%s: isci_device = %p\n", __func__, idev); 81 82 switch (reason) { 83 case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED: 84 set_bit(IDEV_GONE, &idev->flags); 85 break; 86 case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED: 87 set_bit(IDEV_IO_NCQERROR, &idev->flags); 88 89 /* Kill all outstanding requests for the device. */ 90 list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) { 91 92 dev_dbg(&ihost->pdev->dev, 93 "%s: isci_device = %p request = %p\n", 94 __func__, idev, ireq); 95 96 sci_controller_terminate_request(ihost, 97 idev, 98 ireq); 99 } 100 /* Fall through into the default case... */ 101 default: 102 clear_bit(IDEV_IO_READY, &idev->flags); 103 break; 104 } 105 } 106 107 /** 108 * isci_remote_device_ready() - This function is called by the ihost when the 109 * remote device is ready. We mark the isci device as ready and signal the 110 * waiting proccess. 111 * @ihost: our valid isci_host 112 * @idev: remote device 113 * 114 */ 115 static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev) 116 { 117 dev_dbg(&ihost->pdev->dev, 118 "%s: idev = %p\n", __func__, idev); 119 120 clear_bit(IDEV_IO_NCQERROR, &idev->flags); 121 set_bit(IDEV_IO_READY, &idev->flags); 122 if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags)) 123 wake_up(&ihost->eventq); 124 } 125 126 /* called once the remote node context is ready to be freed. 127 * The remote device can now report that its stop operation is complete. none 128 */ 129 static void rnc_destruct_done(void *_dev) 130 { 131 struct isci_remote_device *idev = _dev; 132 133 BUG_ON(idev->started_request_count != 0); 134 sci_change_state(&idev->sm, SCI_DEV_STOPPED); 135 } 136 137 static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev) 138 { 139 struct isci_host *ihost = idev->owning_port->owning_controller; 140 enum sci_status status = SCI_SUCCESS; 141 u32 i; 142 143 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { 144 struct isci_request *ireq = ihost->reqs[i]; 145 enum sci_status s; 146 147 if (!test_bit(IREQ_ACTIVE, &ireq->flags) || 148 ireq->target_device != idev) 149 continue; 150 151 s = sci_controller_terminate_request(ihost, idev, ireq); 152 if (s != SCI_SUCCESS) 153 status = s; 154 } 155 156 return status; 157 } 158 159 enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, 160 u32 timeout) 161 { 162 struct sci_base_state_machine *sm = &idev->sm; 163 enum sci_remote_device_states state = sm->current_state_id; 164 165 switch (state) { 166 case SCI_DEV_INITIAL: 167 case SCI_DEV_FAILED: 168 case SCI_DEV_FINAL: 169 default: 170 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 171 __func__, state); 172 return SCI_FAILURE_INVALID_STATE; 173 case SCI_DEV_STOPPED: 174 return SCI_SUCCESS; 175 case SCI_DEV_STARTING: 176 /* device not started so there had better be no requests */ 177 BUG_ON(idev->started_request_count != 0); 178 sci_remote_node_context_destruct(&idev->rnc, 179 rnc_destruct_done, idev); 180 /* Transition to the stopping state and wait for the 181 * remote node to complete being posted and invalidated. 182 */ 183 sci_change_state(sm, SCI_DEV_STOPPING); 184 return SCI_SUCCESS; 185 case SCI_DEV_READY: 186 case SCI_STP_DEV_IDLE: 187 case SCI_STP_DEV_CMD: 188 case SCI_STP_DEV_NCQ: 189 case SCI_STP_DEV_NCQ_ERROR: 190 case SCI_STP_DEV_AWAIT_RESET: 191 case SCI_SMP_DEV_IDLE: 192 case SCI_SMP_DEV_CMD: 193 sci_change_state(sm, SCI_DEV_STOPPING); 194 if (idev->started_request_count == 0) { 195 sci_remote_node_context_destruct(&idev->rnc, 196 rnc_destruct_done, idev); 197 return SCI_SUCCESS; 198 } else 199 return sci_remote_device_terminate_requests(idev); 200 break; 201 case SCI_DEV_STOPPING: 202 /* All requests should have been terminated, but if there is an 203 * attempt to stop a device already in the stopping state, then 204 * try again to terminate. 205 */ 206 return sci_remote_device_terminate_requests(idev); 207 case SCI_DEV_RESETTING: 208 sci_change_state(sm, SCI_DEV_STOPPING); 209 return SCI_SUCCESS; 210 } 211 } 212 213 enum sci_status sci_remote_device_reset(struct isci_remote_device *idev) 214 { 215 struct sci_base_state_machine *sm = &idev->sm; 216 enum sci_remote_device_states state = sm->current_state_id; 217 218 switch (state) { 219 case SCI_DEV_INITIAL: 220 case SCI_DEV_STOPPED: 221 case SCI_DEV_STARTING: 222 case SCI_SMP_DEV_IDLE: 223 case SCI_SMP_DEV_CMD: 224 case SCI_DEV_STOPPING: 225 case SCI_DEV_FAILED: 226 case SCI_DEV_RESETTING: 227 case SCI_DEV_FINAL: 228 default: 229 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 230 __func__, state); 231 return SCI_FAILURE_INVALID_STATE; 232 case SCI_DEV_READY: 233 case SCI_STP_DEV_IDLE: 234 case SCI_STP_DEV_CMD: 235 case SCI_STP_DEV_NCQ: 236 case SCI_STP_DEV_NCQ_ERROR: 237 case SCI_STP_DEV_AWAIT_RESET: 238 sci_change_state(sm, SCI_DEV_RESETTING); 239 return SCI_SUCCESS; 240 } 241 } 242 243 enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev) 244 { 245 struct sci_base_state_machine *sm = &idev->sm; 246 enum sci_remote_device_states state = sm->current_state_id; 247 248 if (state != SCI_DEV_RESETTING) { 249 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 250 __func__, state); 251 return SCI_FAILURE_INVALID_STATE; 252 } 253 254 sci_change_state(sm, SCI_DEV_READY); 255 return SCI_SUCCESS; 256 } 257 258 enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, 259 u32 suspend_type) 260 { 261 struct sci_base_state_machine *sm = &idev->sm; 262 enum sci_remote_device_states state = sm->current_state_id; 263 264 if (state != SCI_STP_DEV_CMD) { 265 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 266 __func__, state); 267 return SCI_FAILURE_INVALID_STATE; 268 } 269 270 return sci_remote_node_context_suspend(&idev->rnc, 271 suspend_type, NULL, NULL); 272 } 273 274 enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, 275 u32 frame_index) 276 { 277 struct sci_base_state_machine *sm = &idev->sm; 278 enum sci_remote_device_states state = sm->current_state_id; 279 struct isci_host *ihost = idev->owning_port->owning_controller; 280 enum sci_status status; 281 282 switch (state) { 283 case SCI_DEV_INITIAL: 284 case SCI_DEV_STOPPED: 285 case SCI_DEV_STARTING: 286 case SCI_STP_DEV_IDLE: 287 case SCI_SMP_DEV_IDLE: 288 case SCI_DEV_FINAL: 289 default: 290 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 291 __func__, state); 292 /* Return the frame back to the controller */ 293 sci_controller_release_frame(ihost, frame_index); 294 return SCI_FAILURE_INVALID_STATE; 295 case SCI_DEV_READY: 296 case SCI_STP_DEV_NCQ_ERROR: 297 case SCI_STP_DEV_AWAIT_RESET: 298 case SCI_DEV_STOPPING: 299 case SCI_DEV_FAILED: 300 case SCI_DEV_RESETTING: { 301 struct isci_request *ireq; 302 struct ssp_frame_hdr hdr; 303 void *frame_header; 304 ssize_t word_cnt; 305 306 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 307 frame_index, 308 &frame_header); 309 if (status != SCI_SUCCESS) 310 return status; 311 312 word_cnt = sizeof(hdr) / sizeof(u32); 313 sci_swab32_cpy(&hdr, frame_header, word_cnt); 314 315 ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag)); 316 if (ireq && ireq->target_device == idev) { 317 /* The IO request is now in charge of releasing the frame */ 318 status = sci_io_request_frame_handler(ireq, frame_index); 319 } else { 320 /* We could not map this tag to a valid IO 321 * request Just toss the frame and continue 322 */ 323 sci_controller_release_frame(ihost, frame_index); 324 } 325 break; 326 } 327 case SCI_STP_DEV_NCQ: { 328 struct dev_to_host_fis *hdr; 329 330 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 331 frame_index, 332 (void **)&hdr); 333 if (status != SCI_SUCCESS) 334 return status; 335 336 if (hdr->fis_type == FIS_SETDEVBITS && 337 (hdr->status & ATA_ERR)) { 338 idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; 339 340 /* TODO Check sactive and complete associated IO if any. */ 341 sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR); 342 } else if (hdr->fis_type == FIS_REGD2H && 343 (hdr->status & ATA_ERR)) { 344 /* 345 * Some devices return D2H FIS when an NCQ error is detected. 346 * Treat this like an SDB error FIS ready reason. 347 */ 348 idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; 349 sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR); 350 } else 351 status = SCI_FAILURE; 352 353 sci_controller_release_frame(ihost, frame_index); 354 break; 355 } 356 case SCI_STP_DEV_CMD: 357 case SCI_SMP_DEV_CMD: 358 /* The device does not process any UF received from the hardware while 359 * in this state. All unsolicited frames are forwarded to the io request 360 * object. 361 */ 362 status = sci_io_request_frame_handler(idev->working_request, frame_index); 363 break; 364 } 365 366 return status; 367 } 368 369 static bool is_remote_device_ready(struct isci_remote_device *idev) 370 { 371 372 struct sci_base_state_machine *sm = &idev->sm; 373 enum sci_remote_device_states state = sm->current_state_id; 374 375 switch (state) { 376 case SCI_DEV_READY: 377 case SCI_STP_DEV_IDLE: 378 case SCI_STP_DEV_CMD: 379 case SCI_STP_DEV_NCQ: 380 case SCI_STP_DEV_NCQ_ERROR: 381 case SCI_STP_DEV_AWAIT_RESET: 382 case SCI_SMP_DEV_IDLE: 383 case SCI_SMP_DEV_CMD: 384 return true; 385 default: 386 return false; 387 } 388 } 389 390 /* 391 * called once the remote node context has transisitioned to a ready 392 * state (after suspending RX and/or TX due to early D2H fis) 393 */ 394 static void atapi_remote_device_resume_done(void *_dev) 395 { 396 struct isci_remote_device *idev = _dev; 397 struct isci_request *ireq = idev->working_request; 398 399 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 400 } 401 402 enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, 403 u32 event_code) 404 { 405 struct sci_base_state_machine *sm = &idev->sm; 406 enum sci_remote_device_states state = sm->current_state_id; 407 enum sci_status status; 408 409 switch (scu_get_event_type(event_code)) { 410 case SCU_EVENT_TYPE_RNC_OPS_MISC: 411 case SCU_EVENT_TYPE_RNC_SUSPEND_TX: 412 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: 413 status = sci_remote_node_context_event_handler(&idev->rnc, event_code); 414 break; 415 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: 416 if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) { 417 status = SCI_SUCCESS; 418 419 /* Suspend the associated RNC */ 420 sci_remote_node_context_suspend(&idev->rnc, 421 SCI_SOFTWARE_SUSPENSION, 422 NULL, NULL); 423 424 dev_dbg(scirdev_to_dev(idev), 425 "%s: device: %p event code: %x: %s\n", 426 __func__, idev, event_code, 427 is_remote_device_ready(idev) 428 ? "I_T_Nexus_Timeout event" 429 : "I_T_Nexus_Timeout event in wrong state"); 430 431 break; 432 } 433 /* Else, fall through and treat as unhandled... */ 434 default: 435 dev_dbg(scirdev_to_dev(idev), 436 "%s: device: %p event code: %x: %s\n", 437 __func__, idev, event_code, 438 is_remote_device_ready(idev) 439 ? "unexpected event" 440 : "unexpected event in wrong state"); 441 status = SCI_FAILURE_INVALID_STATE; 442 break; 443 } 444 445 if (status != SCI_SUCCESS) 446 return status; 447 448 if (state == SCI_STP_DEV_ATAPI_ERROR) { 449 /* For ATAPI error state resume the RNC right away. */ 450 if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || 451 scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) { 452 return sci_remote_node_context_resume(&idev->rnc, 453 atapi_remote_device_resume_done, 454 idev); 455 } 456 } 457 458 if (state == SCI_STP_DEV_IDLE) { 459 460 /* We pick up suspension events to handle specifically to this 461 * state. We resume the RNC right away. 462 */ 463 if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || 464 scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) 465 status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL); 466 } 467 468 return status; 469 } 470 471 static void sci_remote_device_start_request(struct isci_remote_device *idev, 472 struct isci_request *ireq, 473 enum sci_status status) 474 { 475 struct isci_port *iport = idev->owning_port; 476 477 /* cleanup requests that failed after starting on the port */ 478 if (status != SCI_SUCCESS) 479 sci_port_complete_io(iport, idev, ireq); 480 else { 481 kref_get(&idev->kref); 482 idev->started_request_count++; 483 } 484 } 485 486 enum sci_status sci_remote_device_start_io(struct isci_host *ihost, 487 struct isci_remote_device *idev, 488 struct isci_request *ireq) 489 { 490 struct sci_base_state_machine *sm = &idev->sm; 491 enum sci_remote_device_states state = sm->current_state_id; 492 struct isci_port *iport = idev->owning_port; 493 enum sci_status status; 494 495 switch (state) { 496 case SCI_DEV_INITIAL: 497 case SCI_DEV_STOPPED: 498 case SCI_DEV_STARTING: 499 case SCI_STP_DEV_NCQ_ERROR: 500 case SCI_DEV_STOPPING: 501 case SCI_DEV_FAILED: 502 case SCI_DEV_RESETTING: 503 case SCI_DEV_FINAL: 504 default: 505 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 506 __func__, state); 507 return SCI_FAILURE_INVALID_STATE; 508 case SCI_DEV_READY: 509 /* attempt to start an io request for this device object. The remote 510 * device object will issue the start request for the io and if 511 * successful it will start the request for the port object then 512 * increment its own request count. 513 */ 514 status = sci_port_start_io(iport, idev, ireq); 515 if (status != SCI_SUCCESS) 516 return status; 517 518 status = sci_remote_node_context_start_io(&idev->rnc, ireq); 519 if (status != SCI_SUCCESS) 520 break; 521 522 status = sci_request_start(ireq); 523 break; 524 case SCI_STP_DEV_IDLE: { 525 /* handle the start io operation for a sata device that is in 526 * the command idle state. - Evalute the type of IO request to 527 * be started - If its an NCQ request change to NCQ substate - 528 * If its any other command change to the CMD substate 529 * 530 * If this is a softreset we may want to have a different 531 * substate. 532 */ 533 enum sci_remote_device_states new_state; 534 struct sas_task *task = isci_request_access_task(ireq); 535 536 status = sci_port_start_io(iport, idev, ireq); 537 if (status != SCI_SUCCESS) 538 return status; 539 540 status = sci_remote_node_context_start_io(&idev->rnc, ireq); 541 if (status != SCI_SUCCESS) 542 break; 543 544 status = sci_request_start(ireq); 545 if (status != SCI_SUCCESS) 546 break; 547 548 if (task->ata_task.use_ncq) 549 new_state = SCI_STP_DEV_NCQ; 550 else { 551 idev->working_request = ireq; 552 new_state = SCI_STP_DEV_CMD; 553 } 554 sci_change_state(sm, new_state); 555 break; 556 } 557 case SCI_STP_DEV_NCQ: { 558 struct sas_task *task = isci_request_access_task(ireq); 559 560 if (task->ata_task.use_ncq) { 561 status = sci_port_start_io(iport, idev, ireq); 562 if (status != SCI_SUCCESS) 563 return status; 564 565 status = sci_remote_node_context_start_io(&idev->rnc, ireq); 566 if (status != SCI_SUCCESS) 567 break; 568 569 status = sci_request_start(ireq); 570 } else 571 return SCI_FAILURE_INVALID_STATE; 572 break; 573 } 574 case SCI_STP_DEV_AWAIT_RESET: 575 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 576 case SCI_SMP_DEV_IDLE: 577 status = sci_port_start_io(iport, idev, ireq); 578 if (status != SCI_SUCCESS) 579 return status; 580 581 status = sci_remote_node_context_start_io(&idev->rnc, ireq); 582 if (status != SCI_SUCCESS) 583 break; 584 585 status = sci_request_start(ireq); 586 if (status != SCI_SUCCESS) 587 break; 588 589 idev->working_request = ireq; 590 sci_change_state(&idev->sm, SCI_SMP_DEV_CMD); 591 break; 592 case SCI_STP_DEV_CMD: 593 case SCI_SMP_DEV_CMD: 594 /* device is already handling a command it can not accept new commands 595 * until this one is complete. 596 */ 597 return SCI_FAILURE_INVALID_STATE; 598 } 599 600 sci_remote_device_start_request(idev, ireq, status); 601 return status; 602 } 603 604 static enum sci_status common_complete_io(struct isci_port *iport, 605 struct isci_remote_device *idev, 606 struct isci_request *ireq) 607 { 608 enum sci_status status; 609 610 status = sci_request_complete(ireq); 611 if (status != SCI_SUCCESS) 612 return status; 613 614 status = sci_port_complete_io(iport, idev, ireq); 615 if (status != SCI_SUCCESS) 616 return status; 617 618 sci_remote_device_decrement_request_count(idev); 619 return status; 620 } 621 622 enum sci_status sci_remote_device_complete_io(struct isci_host *ihost, 623 struct isci_remote_device *idev, 624 struct isci_request *ireq) 625 { 626 struct sci_base_state_machine *sm = &idev->sm; 627 enum sci_remote_device_states state = sm->current_state_id; 628 struct isci_port *iport = idev->owning_port; 629 enum sci_status status; 630 631 switch (state) { 632 case SCI_DEV_INITIAL: 633 case SCI_DEV_STOPPED: 634 case SCI_DEV_STARTING: 635 case SCI_STP_DEV_IDLE: 636 case SCI_SMP_DEV_IDLE: 637 case SCI_DEV_FAILED: 638 case SCI_DEV_FINAL: 639 default: 640 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 641 __func__, state); 642 return SCI_FAILURE_INVALID_STATE; 643 case SCI_DEV_READY: 644 case SCI_STP_DEV_AWAIT_RESET: 645 case SCI_DEV_RESETTING: 646 status = common_complete_io(iport, idev, ireq); 647 break; 648 case SCI_STP_DEV_CMD: 649 case SCI_STP_DEV_NCQ: 650 case SCI_STP_DEV_NCQ_ERROR: 651 case SCI_STP_DEV_ATAPI_ERROR: 652 status = common_complete_io(iport, idev, ireq); 653 if (status != SCI_SUCCESS) 654 break; 655 656 if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 657 /* This request causes hardware error, device needs to be Lun Reset. 658 * So here we force the state machine to IDLE state so the rest IOs 659 * can reach RNC state handler, these IOs will be completed by RNC with 660 * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE". 661 */ 662 sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET); 663 } else if (idev->started_request_count == 0) 664 sci_change_state(sm, SCI_STP_DEV_IDLE); 665 break; 666 case SCI_SMP_DEV_CMD: 667 status = common_complete_io(iport, idev, ireq); 668 if (status != SCI_SUCCESS) 669 break; 670 sci_change_state(sm, SCI_SMP_DEV_IDLE); 671 break; 672 case SCI_DEV_STOPPING: 673 status = common_complete_io(iport, idev, ireq); 674 if (status != SCI_SUCCESS) 675 break; 676 677 if (idev->started_request_count == 0) 678 sci_remote_node_context_destruct(&idev->rnc, 679 rnc_destruct_done, 680 idev); 681 break; 682 } 683 684 if (status != SCI_SUCCESS) 685 dev_err(scirdev_to_dev(idev), 686 "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x " 687 "could not complete\n", __func__, iport, 688 idev, ireq, status); 689 else 690 isci_put_device(idev); 691 692 return status; 693 } 694 695 static void sci_remote_device_continue_request(void *dev) 696 { 697 struct isci_remote_device *idev = dev; 698 699 /* we need to check if this request is still valid to continue. */ 700 if (idev->working_request) 701 sci_controller_continue_io(idev->working_request); 702 } 703 704 enum sci_status sci_remote_device_start_task(struct isci_host *ihost, 705 struct isci_remote_device *idev, 706 struct isci_request *ireq) 707 { 708 struct sci_base_state_machine *sm = &idev->sm; 709 enum sci_remote_device_states state = sm->current_state_id; 710 struct isci_port *iport = idev->owning_port; 711 enum sci_status status; 712 713 switch (state) { 714 case SCI_DEV_INITIAL: 715 case SCI_DEV_STOPPED: 716 case SCI_DEV_STARTING: 717 case SCI_SMP_DEV_IDLE: 718 case SCI_SMP_DEV_CMD: 719 case SCI_DEV_STOPPING: 720 case SCI_DEV_FAILED: 721 case SCI_DEV_RESETTING: 722 case SCI_DEV_FINAL: 723 default: 724 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 725 __func__, state); 726 return SCI_FAILURE_INVALID_STATE; 727 case SCI_STP_DEV_IDLE: 728 case SCI_STP_DEV_CMD: 729 case SCI_STP_DEV_NCQ: 730 case SCI_STP_DEV_NCQ_ERROR: 731 case SCI_STP_DEV_AWAIT_RESET: 732 status = sci_port_start_io(iport, idev, ireq); 733 if (status != SCI_SUCCESS) 734 return status; 735 736 status = sci_remote_node_context_start_task(&idev->rnc, ireq); 737 if (status != SCI_SUCCESS) 738 goto out; 739 740 status = sci_request_start(ireq); 741 if (status != SCI_SUCCESS) 742 goto out; 743 744 /* Note: If the remote device state is not IDLE this will 745 * replace the request that probably resulted in the task 746 * management request. 747 */ 748 idev->working_request = ireq; 749 sci_change_state(sm, SCI_STP_DEV_CMD); 750 751 /* The remote node context must cleanup the TCi to NCQ mapping 752 * table. The only way to do this correctly is to either write 753 * to the TLCR register or to invalidate and repost the RNC. In 754 * either case the remote node context state machine will take 755 * the correct action when the remote node context is suspended 756 * and later resumed. 757 */ 758 sci_remote_node_context_suspend(&idev->rnc, 759 SCI_SOFTWARE_SUSPENSION, NULL, NULL); 760 sci_remote_node_context_resume(&idev->rnc, 761 sci_remote_device_continue_request, 762 idev); 763 764 out: 765 sci_remote_device_start_request(idev, ireq, status); 766 /* We need to let the controller start request handler know that 767 * it can't post TC yet. We will provide a callback function to 768 * post TC when RNC gets resumed. 769 */ 770 return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS; 771 case SCI_DEV_READY: 772 status = sci_port_start_io(iport, idev, ireq); 773 if (status != SCI_SUCCESS) 774 return status; 775 776 status = sci_remote_node_context_start_task(&idev->rnc, ireq); 777 if (status != SCI_SUCCESS) 778 break; 779 780 status = sci_request_start(ireq); 781 break; 782 } 783 sci_remote_device_start_request(idev, ireq, status); 784 785 return status; 786 } 787 788 void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request) 789 { 790 struct isci_port *iport = idev->owning_port; 791 u32 context; 792 793 context = request | 794 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 795 (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 796 idev->rnc.remote_node_index; 797 798 sci_controller_post_request(iport->owning_controller, context); 799 } 800 801 /* called once the remote node context has transisitioned to a 802 * ready state. This is the indication that the remote device object can also 803 * transition to ready. 804 */ 805 static void remote_device_resume_done(void *_dev) 806 { 807 struct isci_remote_device *idev = _dev; 808 809 if (is_remote_device_ready(idev)) 810 return; 811 812 /* go 'ready' if we are not already in a ready state */ 813 sci_change_state(&idev->sm, SCI_DEV_READY); 814 } 815 816 static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev) 817 { 818 struct isci_remote_device *idev = _dev; 819 struct isci_host *ihost = idev->owning_port->owning_controller; 820 821 /* For NCQ operation we do not issue a isci_remote_device_not_ready(). 822 * As a result, avoid sending the ready notification. 823 */ 824 if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ) 825 isci_remote_device_ready(ihost, idev); 826 } 827 828 static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm) 829 { 830 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 831 832 /* Initial state is a transitional state to the stopped state */ 833 sci_change_state(&idev->sm, SCI_DEV_STOPPED); 834 } 835 836 /** 837 * sci_remote_device_destruct() - free remote node context and destruct 838 * @remote_device: This parameter specifies the remote device to be destructed. 839 * 840 * Remote device objects are a limited resource. As such, they must be 841 * protected. Thus calls to construct and destruct are mutually exclusive and 842 * non-reentrant. The return value shall indicate if the device was 843 * successfully destructed or if some failure occurred. enum sci_status This value 844 * is returned if the device is successfully destructed. 845 * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied 846 * device isn't valid (e.g. it's already been destoryed, the handle isn't 847 * valid, etc.). 848 */ 849 static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev) 850 { 851 struct sci_base_state_machine *sm = &idev->sm; 852 enum sci_remote_device_states state = sm->current_state_id; 853 struct isci_host *ihost; 854 855 if (state != SCI_DEV_STOPPED) { 856 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 857 __func__, state); 858 return SCI_FAILURE_INVALID_STATE; 859 } 860 861 ihost = idev->owning_port->owning_controller; 862 sci_controller_free_remote_node_context(ihost, idev, 863 idev->rnc.remote_node_index); 864 idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; 865 sci_change_state(sm, SCI_DEV_FINAL); 866 867 return SCI_SUCCESS; 868 } 869 870 /** 871 * isci_remote_device_deconstruct() - This function frees an isci_remote_device. 872 * @ihost: This parameter specifies the isci host object. 873 * @idev: This parameter specifies the remote device to be freed. 874 * 875 */ 876 static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev) 877 { 878 dev_dbg(&ihost->pdev->dev, 879 "%s: isci_device = %p\n", __func__, idev); 880 881 /* There should not be any outstanding io's. All paths to 882 * here should go through isci_remote_device_nuke_requests. 883 * If we hit this condition, we will need a way to complete 884 * io requests in process */ 885 BUG_ON(!list_empty(&idev->reqs_in_process)); 886 887 sci_remote_device_destruct(idev); 888 list_del_init(&idev->node); 889 isci_put_device(idev); 890 } 891 892 static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm) 893 { 894 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 895 struct isci_host *ihost = idev->owning_port->owning_controller; 896 u32 prev_state; 897 898 /* If we are entering from the stopping state let the SCI User know that 899 * the stop operation has completed. 900 */ 901 prev_state = idev->sm.previous_state_id; 902 if (prev_state == SCI_DEV_STOPPING) 903 isci_remote_device_deconstruct(ihost, idev); 904 905 sci_controller_remote_device_stopped(ihost, idev); 906 } 907 908 static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm) 909 { 910 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 911 struct isci_host *ihost = idev->owning_port->owning_controller; 912 913 isci_remote_device_not_ready(ihost, idev, 914 SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED); 915 } 916 917 static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm) 918 { 919 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 920 struct isci_host *ihost = idev->owning_port->owning_controller; 921 struct domain_device *dev = idev->domain_dev; 922 923 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { 924 sci_change_state(&idev->sm, SCI_STP_DEV_IDLE); 925 } else if (dev_is_expander(dev)) { 926 sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE); 927 } else 928 isci_remote_device_ready(ihost, idev); 929 } 930 931 static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm) 932 { 933 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 934 struct domain_device *dev = idev->domain_dev; 935 936 if (dev->dev_type == SAS_END_DEV) { 937 struct isci_host *ihost = idev->owning_port->owning_controller; 938 939 isci_remote_device_not_ready(ihost, idev, 940 SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED); 941 } 942 } 943 944 static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) 945 { 946 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 947 948 sci_remote_node_context_suspend( 949 &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); 950 } 951 952 static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) 953 { 954 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 955 956 sci_remote_node_context_resume(&idev->rnc, NULL, NULL); 957 } 958 959 static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) 960 { 961 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 962 963 idev->working_request = NULL; 964 if (sci_remote_node_context_is_ready(&idev->rnc)) { 965 /* 966 * Since the RNC is ready, it's alright to finish completion 967 * processing (e.g. signal the remote device is ready). */ 968 sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev); 969 } else { 970 sci_remote_node_context_resume(&idev->rnc, 971 sci_stp_remote_device_ready_idle_substate_resume_complete_handler, 972 idev); 973 } 974 } 975 976 static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) 977 { 978 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 979 struct isci_host *ihost = idev->owning_port->owning_controller; 980 981 BUG_ON(idev->working_request == NULL); 982 983 isci_remote_device_not_ready(ihost, idev, 984 SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED); 985 } 986 987 static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) 988 { 989 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 990 struct isci_host *ihost = idev->owning_port->owning_controller; 991 992 if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) 993 isci_remote_device_not_ready(ihost, idev, 994 idev->not_ready_reason); 995 } 996 997 static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) 998 { 999 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 1000 struct isci_host *ihost = idev->owning_port->owning_controller; 1001 1002 isci_remote_device_ready(ihost, idev); 1003 } 1004 1005 static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) 1006 { 1007 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 1008 struct isci_host *ihost = idev->owning_port->owning_controller; 1009 1010 BUG_ON(idev->working_request == NULL); 1011 1012 isci_remote_device_not_ready(ihost, idev, 1013 SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED); 1014 } 1015 1016 static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm) 1017 { 1018 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 1019 1020 idev->working_request = NULL; 1021 } 1022 1023 static const struct sci_base_state sci_remote_device_state_table[] = { 1024 [SCI_DEV_INITIAL] = { 1025 .enter_state = sci_remote_device_initial_state_enter, 1026 }, 1027 [SCI_DEV_STOPPED] = { 1028 .enter_state = sci_remote_device_stopped_state_enter, 1029 }, 1030 [SCI_DEV_STARTING] = { 1031 .enter_state = sci_remote_device_starting_state_enter, 1032 }, 1033 [SCI_DEV_READY] = { 1034 .enter_state = sci_remote_device_ready_state_enter, 1035 .exit_state = sci_remote_device_ready_state_exit 1036 }, 1037 [SCI_STP_DEV_IDLE] = { 1038 .enter_state = sci_stp_remote_device_ready_idle_substate_enter, 1039 }, 1040 [SCI_STP_DEV_CMD] = { 1041 .enter_state = sci_stp_remote_device_ready_cmd_substate_enter, 1042 }, 1043 [SCI_STP_DEV_NCQ] = { }, 1044 [SCI_STP_DEV_NCQ_ERROR] = { 1045 .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter, 1046 }, 1047 [SCI_STP_DEV_ATAPI_ERROR] = { }, 1048 [SCI_STP_DEV_AWAIT_RESET] = { }, 1049 [SCI_SMP_DEV_IDLE] = { 1050 .enter_state = sci_smp_remote_device_ready_idle_substate_enter, 1051 }, 1052 [SCI_SMP_DEV_CMD] = { 1053 .enter_state = sci_smp_remote_device_ready_cmd_substate_enter, 1054 .exit_state = sci_smp_remote_device_ready_cmd_substate_exit, 1055 }, 1056 [SCI_DEV_STOPPING] = { }, 1057 [SCI_DEV_FAILED] = { }, 1058 [SCI_DEV_RESETTING] = { 1059 .enter_state = sci_remote_device_resetting_state_enter, 1060 .exit_state = sci_remote_device_resetting_state_exit 1061 }, 1062 [SCI_DEV_FINAL] = { }, 1063 }; 1064 1065 /** 1066 * sci_remote_device_construct() - common construction 1067 * @sci_port: SAS/SATA port through which this device is accessed. 1068 * @sci_dev: remote device to construct 1069 * 1070 * This routine just performs benign initialization and does not 1071 * allocate the remote_node_context which is left to 1072 * sci_remote_device_[de]a_construct(). sci_remote_device_destruct() 1073 * frees the remote_node_context(s) for the device. 1074 */ 1075 static void sci_remote_device_construct(struct isci_port *iport, 1076 struct isci_remote_device *idev) 1077 { 1078 idev->owning_port = iport; 1079 idev->started_request_count = 0; 1080 1081 sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL); 1082 1083 sci_remote_node_context_construct(&idev->rnc, 1084 SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); 1085 } 1086 1087 /** 1088 * sci_remote_device_da_construct() - construct direct attached device. 1089 * 1090 * The information (e.g. IAF, Signature FIS, etc.) necessary to build 1091 * the device is known to the SCI Core since it is contained in the 1092 * sci_phy object. Remote node context(s) is/are a global resource 1093 * allocated by this routine, freed by sci_remote_device_destruct(). 1094 * 1095 * Returns: 1096 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. 1097 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to 1098 * sata-only controller instance. 1099 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. 1100 */ 1101 static enum sci_status sci_remote_device_da_construct(struct isci_port *iport, 1102 struct isci_remote_device *idev) 1103 { 1104 enum sci_status status; 1105 struct sci_port_properties properties; 1106 struct domain_device *dev = idev->domain_dev; 1107 1108 sci_remote_device_construct(iport, idev); 1109 1110 /* 1111 * This information is request to determine how many remote node context 1112 * entries will be needed to store the remote node. 1113 */ 1114 idev->is_direct_attached = true; 1115 1116 sci_port_get_properties(iport, &properties); 1117 /* Get accurate port width from port's phy mask for a DA device. */ 1118 idev->device_port_width = hweight32(properties.phy_mask); 1119 1120 status = sci_controller_allocate_remote_node_context(iport->owning_controller, 1121 idev, 1122 &idev->rnc.remote_node_index); 1123 1124 if (status != SCI_SUCCESS) 1125 return status; 1126 1127 if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || 1128 (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev)) 1129 /* pass */; 1130 else 1131 return SCI_FAILURE_UNSUPPORTED_PROTOCOL; 1132 1133 idev->connection_rate = sci_port_get_max_allowed_speed(iport); 1134 1135 return SCI_SUCCESS; 1136 } 1137 1138 /** 1139 * sci_remote_device_ea_construct() - construct expander attached device 1140 * 1141 * Remote node context(s) is/are a global resource allocated by this 1142 * routine, freed by sci_remote_device_destruct(). 1143 * 1144 * Returns: 1145 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. 1146 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to 1147 * sata-only controller instance. 1148 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. 1149 */ 1150 static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport, 1151 struct isci_remote_device *idev) 1152 { 1153 struct domain_device *dev = idev->domain_dev; 1154 enum sci_status status; 1155 1156 sci_remote_device_construct(iport, idev); 1157 1158 status = sci_controller_allocate_remote_node_context(iport->owning_controller, 1159 idev, 1160 &idev->rnc.remote_node_index); 1161 if (status != SCI_SUCCESS) 1162 return status; 1163 1164 if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || 1165 (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev)) 1166 /* pass */; 1167 else 1168 return SCI_FAILURE_UNSUPPORTED_PROTOCOL; 1169 1170 /* 1171 * For SAS-2 the physical link rate is actually a logical link 1172 * rate that incorporates multiplexing. The SCU doesn't 1173 * incorporate multiplexing and for the purposes of the 1174 * connection the logical link rate is that same as the 1175 * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay 1176 * one another, so this code works for both situations. */ 1177 idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport), 1178 dev->linkrate); 1179 1180 /* / @todo Should I assign the port width by reading all of the phys on the port? */ 1181 idev->device_port_width = 1; 1182 1183 return SCI_SUCCESS; 1184 } 1185 1186 /** 1187 * sci_remote_device_start() - This method will start the supplied remote 1188 * device. This method enables normal IO requests to flow through to the 1189 * remote device. 1190 * @remote_device: This parameter specifies the device to be started. 1191 * @timeout: This parameter specifies the number of milliseconds in which the 1192 * start operation should complete. 1193 * 1194 * An indication of whether the device was successfully started. SCI_SUCCESS 1195 * This value is returned if the device was successfully started. 1196 * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start 1197 * the device when there have been no phys added to it. 1198 */ 1199 static enum sci_status sci_remote_device_start(struct isci_remote_device *idev, 1200 u32 timeout) 1201 { 1202 struct sci_base_state_machine *sm = &idev->sm; 1203 enum sci_remote_device_states state = sm->current_state_id; 1204 enum sci_status status; 1205 1206 if (state != SCI_DEV_STOPPED) { 1207 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 1208 __func__, state); 1209 return SCI_FAILURE_INVALID_STATE; 1210 } 1211 1212 status = sci_remote_node_context_resume(&idev->rnc, 1213 remote_device_resume_done, 1214 idev); 1215 if (status != SCI_SUCCESS) 1216 return status; 1217 1218 sci_change_state(sm, SCI_DEV_STARTING); 1219 1220 return SCI_SUCCESS; 1221 } 1222 1223 static enum sci_status isci_remote_device_construct(struct isci_port *iport, 1224 struct isci_remote_device *idev) 1225 { 1226 struct isci_host *ihost = iport->isci_host; 1227 struct domain_device *dev = idev->domain_dev; 1228 enum sci_status status; 1229 1230 if (dev->parent && dev_is_expander(dev->parent)) 1231 status = sci_remote_device_ea_construct(iport, idev); 1232 else 1233 status = sci_remote_device_da_construct(iport, idev); 1234 1235 if (status != SCI_SUCCESS) { 1236 dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n", 1237 __func__, status); 1238 1239 return status; 1240 } 1241 1242 /* start the device. */ 1243 status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT); 1244 1245 if (status != SCI_SUCCESS) 1246 dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n", 1247 status); 1248 1249 return status; 1250 } 1251 1252 void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev) 1253 { 1254 DECLARE_COMPLETION_ONSTACK(aborted_task_completion); 1255 1256 dev_dbg(&ihost->pdev->dev, 1257 "%s: idev = %p\n", __func__, idev); 1258 1259 /* Cleanup all requests pending for this device. */ 1260 isci_terminate_pending_requests(ihost, idev); 1261 1262 dev_dbg(&ihost->pdev->dev, 1263 "%s: idev = %p, done\n", __func__, idev); 1264 } 1265 1266 /** 1267 * This function builds the isci_remote_device when a libsas dev_found message 1268 * is received. 1269 * @isci_host: This parameter specifies the isci host object. 1270 * @port: This parameter specifies the isci_port conected to this device. 1271 * 1272 * pointer to new isci_remote_device. 1273 */ 1274 static struct isci_remote_device * 1275 isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport) 1276 { 1277 struct isci_remote_device *idev; 1278 int i; 1279 1280 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { 1281 idev = &ihost->devices[i]; 1282 if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags)) 1283 break; 1284 } 1285 1286 if (i >= SCI_MAX_REMOTE_DEVICES) { 1287 dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__); 1288 return NULL; 1289 } 1290 1291 if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n")) 1292 return NULL; 1293 1294 if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n")) 1295 return NULL; 1296 1297 return idev; 1298 } 1299 1300 void isci_remote_device_release(struct kref *kref) 1301 { 1302 struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref); 1303 struct isci_host *ihost = idev->isci_port->isci_host; 1304 1305 idev->domain_dev = NULL; 1306 idev->isci_port = NULL; 1307 clear_bit(IDEV_START_PENDING, &idev->flags); 1308 clear_bit(IDEV_STOP_PENDING, &idev->flags); 1309 clear_bit(IDEV_IO_READY, &idev->flags); 1310 clear_bit(IDEV_GONE, &idev->flags); 1311 clear_bit(IDEV_EH, &idev->flags); 1312 smp_mb__before_clear_bit(); 1313 clear_bit(IDEV_ALLOCATED, &idev->flags); 1314 wake_up(&ihost->eventq); 1315 } 1316 1317 /** 1318 * isci_remote_device_stop() - This function is called internally to stop the 1319 * remote device. 1320 * @isci_host: This parameter specifies the isci host object. 1321 * @isci_device: This parameter specifies the remote device. 1322 * 1323 * The status of the ihost request to stop. 1324 */ 1325 enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev) 1326 { 1327 enum sci_status status; 1328 unsigned long flags; 1329 1330 dev_dbg(&ihost->pdev->dev, 1331 "%s: isci_device = %p\n", __func__, idev); 1332 1333 spin_lock_irqsave(&ihost->scic_lock, flags); 1334 idev->domain_dev->lldd_dev = NULL; /* disable new lookups */ 1335 set_bit(IDEV_GONE, &idev->flags); 1336 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1337 1338 /* Kill all outstanding requests. */ 1339 isci_remote_device_nuke_requests(ihost, idev); 1340 1341 set_bit(IDEV_STOP_PENDING, &idev->flags); 1342 1343 spin_lock_irqsave(&ihost->scic_lock, flags); 1344 status = sci_remote_device_stop(idev, 50); 1345 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1346 1347 /* Wait for the stop complete callback. */ 1348 if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n")) 1349 /* nothing to wait for */; 1350 else 1351 wait_for_device_stop(ihost, idev); 1352 1353 return status; 1354 } 1355 1356 /** 1357 * isci_remote_device_gone() - This function is called by libsas when a domain 1358 * device is removed. 1359 * @domain_device: This parameter specifies the libsas domain device. 1360 * 1361 */ 1362 void isci_remote_device_gone(struct domain_device *dev) 1363 { 1364 struct isci_host *ihost = dev_to_ihost(dev); 1365 struct isci_remote_device *idev = dev->lldd_dev; 1366 1367 dev_dbg(&ihost->pdev->dev, 1368 "%s: domain_device = %p, isci_device = %p, isci_port = %p\n", 1369 __func__, dev, idev, idev->isci_port); 1370 1371 isci_remote_device_stop(ihost, idev); 1372 } 1373 1374 1375 /** 1376 * isci_remote_device_found() - This function is called by libsas when a remote 1377 * device is discovered. A remote device object is created and started. the 1378 * function then sleeps until the sci core device started message is 1379 * received. 1380 * @domain_device: This parameter specifies the libsas domain device. 1381 * 1382 * status, zero indicates success. 1383 */ 1384 int isci_remote_device_found(struct domain_device *domain_dev) 1385 { 1386 struct isci_host *isci_host = dev_to_ihost(domain_dev); 1387 struct isci_port *isci_port; 1388 struct isci_phy *isci_phy; 1389 struct asd_sas_port *sas_port; 1390 struct asd_sas_phy *sas_phy; 1391 struct isci_remote_device *isci_device; 1392 enum sci_status status; 1393 1394 dev_dbg(&isci_host->pdev->dev, 1395 "%s: domain_device = %p\n", __func__, domain_dev); 1396 1397 wait_for_start(isci_host); 1398 1399 sas_port = domain_dev->port; 1400 sas_phy = list_first_entry(&sas_port->phy_list, struct asd_sas_phy, 1401 port_phy_el); 1402 isci_phy = to_iphy(sas_phy); 1403 isci_port = isci_phy->isci_port; 1404 1405 /* we are being called for a device on this port, 1406 * so it has to come up eventually 1407 */ 1408 wait_for_completion(&isci_port->start_complete); 1409 1410 if ((isci_stopping == isci_port_get_state(isci_port)) || 1411 (isci_stopped == isci_port_get_state(isci_port))) 1412 return -ENODEV; 1413 1414 isci_device = isci_remote_device_alloc(isci_host, isci_port); 1415 if (!isci_device) 1416 return -ENODEV; 1417 1418 kref_init(&isci_device->kref); 1419 INIT_LIST_HEAD(&isci_device->node); 1420 1421 spin_lock_irq(&isci_host->scic_lock); 1422 isci_device->domain_dev = domain_dev; 1423 isci_device->isci_port = isci_port; 1424 list_add_tail(&isci_device->node, &isci_port->remote_dev_list); 1425 1426 set_bit(IDEV_START_PENDING, &isci_device->flags); 1427 status = isci_remote_device_construct(isci_port, isci_device); 1428 1429 dev_dbg(&isci_host->pdev->dev, 1430 "%s: isci_device = %p\n", 1431 __func__, isci_device); 1432 1433 if (status == SCI_SUCCESS) { 1434 /* device came up, advertise it to the world */ 1435 domain_dev->lldd_dev = isci_device; 1436 } else 1437 isci_put_device(isci_device); 1438 spin_unlock_irq(&isci_host->scic_lock); 1439 1440 /* wait for the device ready callback. */ 1441 wait_for_device_start(isci_host, isci_device); 1442 1443 return status == SCI_SUCCESS ? 0 : -ENODEV; 1444 } 1445