1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * BSD LICENSE 25 * 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 * All rights reserved. 28 * 29 * Redistribution and use in source and binary forms, with or without 30 * modification, are permitted provided that the following conditions 31 * are met: 32 * 33 * * Redistributions of source code must retain the above copyright 34 * notice, this list of conditions and the following disclaimer. 35 * * Redistributions in binary form must reproduce the above copyright 36 * notice, this list of conditions and the following disclaimer in 37 * the documentation and/or other materials provided with the 38 * distribution. 39 * * Neither the name of Intel Corporation nor the names of its 40 * contributors may be used to endorse or promote products derived 41 * from this software without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 #include <scsi/sas_ata.h> 56 #include "host.h" 57 #include "isci.h" 58 #include "remote_device.h" 59 #include "remote_node_context.h" 60 #include "scu_event_codes.h" 61 #include "scu_task_context.h" 62 63 #undef C 64 #define C(a) (#a) 65 const char *rnc_state_name(enum scis_sds_remote_node_context_states state) 66 { 67 static const char * const strings[] = RNC_STATES; 68 69 return strings[state]; 70 } 71 #undef C 72 73 /** 74 * 75 * @sci_rnc: The state of the remote node context object to check. 76 * 77 * This method will return true if the remote node context is in a READY state 78 * otherwise it will return false bool true if the remote node context is in 79 * the ready state. false if the remote node context is not in the ready state. 80 */ 81 bool sci_remote_node_context_is_ready( 82 struct sci_remote_node_context *sci_rnc) 83 { 84 u32 current_state = sci_rnc->sm.current_state_id; 85 86 if (current_state == SCI_RNC_READY) { 87 return true; 88 } 89 90 return false; 91 } 92 93 bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc) 94 { 95 u32 current_state = sci_rnc->sm.current_state_id; 96 97 if (current_state == SCI_RNC_TX_RX_SUSPENDED) 98 return true; 99 return false; 100 } 101 102 static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id) 103 { 104 if (id < ihost->remote_node_entries && 105 ihost->device_table[id]) 106 return &ihost->remote_node_context_table[id]; 107 108 return NULL; 109 } 110 111 static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc) 112 { 113 struct isci_remote_device *idev = rnc_to_dev(sci_rnc); 114 struct domain_device *dev = idev->domain_dev; 115 int rni = sci_rnc->remote_node_index; 116 union scu_remote_node_context *rnc; 117 struct isci_host *ihost; 118 __le64 sas_addr; 119 120 ihost = idev->owning_port->owning_controller; 121 rnc = sci_rnc_by_id(ihost, rni); 122 123 memset(rnc, 0, sizeof(union scu_remote_node_context) 124 * sci_remote_device_node_count(idev)); 125 126 rnc->ssp.remote_node_index = rni; 127 rnc->ssp.remote_node_port_width = idev->device_port_width; 128 rnc->ssp.logical_port_index = idev->owning_port->physical_port_index; 129 130 /* sas address is __be64, context ram format is __le64 */ 131 sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr)); 132 rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr); 133 rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr); 134 135 rnc->ssp.nexus_loss_timer_enable = true; 136 rnc->ssp.check_bit = false; 137 rnc->ssp.is_valid = false; 138 rnc->ssp.is_remote_node_context = true; 139 rnc->ssp.function_number = 0; 140 141 rnc->ssp.arbitration_wait_time = 0; 142 143 if (dev_is_sata(dev)) { 144 rnc->ssp.connection_occupancy_timeout = 145 ihost->user_parameters.stp_max_occupancy_timeout; 146 rnc->ssp.connection_inactivity_timeout = 147 ihost->user_parameters.stp_inactivity_timeout; 148 } else { 149 rnc->ssp.connection_occupancy_timeout = 150 ihost->user_parameters.ssp_max_occupancy_timeout; 151 rnc->ssp.connection_inactivity_timeout = 152 ihost->user_parameters.ssp_inactivity_timeout; 153 } 154 155 rnc->ssp.initial_arbitration_wait_time = 0; 156 157 /* Open Address Frame Parameters */ 158 rnc->ssp.oaf_connection_rate = idev->connection_rate; 159 rnc->ssp.oaf_features = 0; 160 rnc->ssp.oaf_source_zone_group = 0; 161 rnc->ssp.oaf_more_compatibility_features = 0; 162 } 163 /** 164 * 165 * @sci_rnc: 166 * @callback: 167 * @callback_parameter: 168 * 169 * This method will setup the remote node context object so it will transition 170 * to its ready state. If the remote node context is already setup to 171 * transition to its final state then this function does nothing. none 172 */ 173 static void sci_remote_node_context_setup_to_resume( 174 struct sci_remote_node_context *sci_rnc, 175 scics_sds_remote_node_context_callback callback, 176 void *callback_parameter, 177 enum sci_remote_node_context_destination_state dest_param) 178 { 179 if (sci_rnc->destination_state != RNC_DEST_FINAL) { 180 sci_rnc->destination_state = dest_param; 181 if (callback != NULL) { 182 sci_rnc->user_callback = callback; 183 sci_rnc->user_cookie = callback_parameter; 184 } 185 } 186 } 187 188 static void sci_remote_node_context_setup_to_destroy( 189 struct sci_remote_node_context *sci_rnc, 190 scics_sds_remote_node_context_callback callback, 191 void *callback_parameter) 192 { 193 struct isci_host *ihost = idev_to_ihost(rnc_to_dev(sci_rnc)); 194 195 sci_rnc->destination_state = RNC_DEST_FINAL; 196 sci_rnc->user_callback = callback; 197 sci_rnc->user_cookie = callback_parameter; 198 199 wake_up(&ihost->eventq); 200 } 201 202 /** 203 * 204 * 205 * This method just calls the user callback function and then resets the 206 * callback. 207 */ 208 static void sci_remote_node_context_notify_user( 209 struct sci_remote_node_context *rnc) 210 { 211 if (rnc->user_callback != NULL) { 212 (*rnc->user_callback)(rnc->user_cookie); 213 214 rnc->user_callback = NULL; 215 rnc->user_cookie = NULL; 216 } 217 } 218 219 static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc) 220 { 221 switch (rnc->destination_state) { 222 case RNC_DEST_READY: 223 case RNC_DEST_SUSPENDED_RESUME: 224 rnc->destination_state = RNC_DEST_READY; 225 /* Fall through... */ 226 case RNC_DEST_FINAL: 227 sci_remote_node_context_resume(rnc, rnc->user_callback, 228 rnc->user_cookie); 229 break; 230 default: 231 rnc->destination_state = RNC_DEST_UNSPECIFIED; 232 break; 233 } 234 } 235 236 static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc) 237 { 238 union scu_remote_node_context *rnc_buffer; 239 struct isci_remote_device *idev = rnc_to_dev(sci_rnc); 240 struct domain_device *dev = idev->domain_dev; 241 struct isci_host *ihost = idev->owning_port->owning_controller; 242 243 rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index); 244 245 rnc_buffer->ssp.is_valid = true; 246 247 if (dev_is_sata(dev) && dev->parent) { 248 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96); 249 } else { 250 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); 251 252 if (!dev->parent) 253 sci_port_setup_transports(idev->owning_port, 254 sci_rnc->remote_node_index); 255 } 256 } 257 258 static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc) 259 { 260 union scu_remote_node_context *rnc_buffer; 261 struct isci_remote_device *idev = rnc_to_dev(sci_rnc); 262 struct isci_host *ihost = idev->owning_port->owning_controller; 263 264 rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index); 265 266 rnc_buffer->ssp.is_valid = false; 267 268 sci_remote_device_post_request(rnc_to_dev(sci_rnc), 269 SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE); 270 } 271 272 static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm) 273 { 274 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); 275 struct isci_remote_device *idev = rnc_to_dev(rnc); 276 struct isci_host *ihost = idev->owning_port->owning_controller; 277 278 /* Check to see if we have gotten back to the initial state because 279 * someone requested to destroy the remote node context object. 280 */ 281 if (sm->previous_state_id == SCI_RNC_INVALIDATING) { 282 rnc->destination_state = RNC_DEST_UNSPECIFIED; 283 sci_remote_node_context_notify_user(rnc); 284 285 smp_wmb(); 286 wake_up(&ihost->eventq); 287 } 288 } 289 290 static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm) 291 { 292 struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm); 293 294 sci_remote_node_context_validate_context_buffer(sci_rnc); 295 } 296 297 static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm) 298 { 299 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); 300 301 /* Terminate all outstanding requests. */ 302 sci_remote_device_terminate_requests(rnc_to_dev(rnc)); 303 sci_remote_node_context_invalidate_context_buffer(rnc); 304 } 305 306 static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm) 307 { 308 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); 309 struct isci_remote_device *idev; 310 struct domain_device *dev; 311 312 idev = rnc_to_dev(rnc); 313 dev = idev->domain_dev; 314 315 /* 316 * For direct attached SATA devices we need to clear the TLCR 317 * NCQ to TCi tag mapping on the phy and in cases where we 318 * resume because of a target reset we also need to update 319 * the STPTLDARNI register with the RNi of the device 320 */ 321 if (dev_is_sata(dev) && !dev->parent) 322 sci_port_setup_transports(idev->owning_port, rnc->remote_node_index); 323 324 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); 325 } 326 327 static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) 328 { 329 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); 330 enum sci_remote_node_context_destination_state dest_select; 331 int tell_user = 1; 332 333 dest_select = rnc->destination_state; 334 rnc->destination_state = RNC_DEST_UNSPECIFIED; 335 336 if ((dest_select == RNC_DEST_SUSPENDED) || 337 (dest_select == RNC_DEST_SUSPENDED_RESUME)) { 338 sci_remote_node_context_suspend( 339 rnc, rnc->suspend_reason, 340 SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT); 341 342 if (dest_select == RNC_DEST_SUSPENDED_RESUME) 343 tell_user = 0; /* Wait until ready again. */ 344 } 345 if (tell_user) 346 sci_remote_node_context_notify_user(rnc); 347 } 348 349 static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm) 350 { 351 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); 352 353 sci_remote_node_context_continue_state_transitions(rnc); 354 } 355 356 static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm) 357 { 358 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); 359 struct isci_remote_device *idev = rnc_to_dev(rnc); 360 struct isci_host *ihost = idev->owning_port->owning_controller; 361 u32 new_count = rnc->suspend_count + 1; 362 363 if (new_count == 0) 364 rnc->suspend_count = 1; 365 else 366 rnc->suspend_count = new_count; 367 smp_wmb(); 368 369 /* Terminate outstanding requests pending abort. */ 370 sci_remote_device_abort_requests_pending_abort(idev); 371 372 wake_up(&ihost->eventq); 373 sci_remote_node_context_continue_state_transitions(rnc); 374 } 375 376 static void sci_remote_node_context_await_suspend_state_exit( 377 struct sci_base_state_machine *sm) 378 { 379 struct sci_remote_node_context *rnc 380 = container_of(sm, typeof(*rnc), sm); 381 struct isci_remote_device *idev = rnc_to_dev(rnc); 382 383 if (dev_is_sata(idev->domain_dev)) 384 isci_dev_set_hang_detection_timeout(idev, 0); 385 } 386 387 static const struct sci_base_state sci_remote_node_context_state_table[] = { 388 [SCI_RNC_INITIAL] = { 389 .enter_state = sci_remote_node_context_initial_state_enter, 390 }, 391 [SCI_RNC_POSTING] = { 392 .enter_state = sci_remote_node_context_posting_state_enter, 393 }, 394 [SCI_RNC_INVALIDATING] = { 395 .enter_state = sci_remote_node_context_invalidating_state_enter, 396 }, 397 [SCI_RNC_RESUMING] = { 398 .enter_state = sci_remote_node_context_resuming_state_enter, 399 }, 400 [SCI_RNC_READY] = { 401 .enter_state = sci_remote_node_context_ready_state_enter, 402 }, 403 [SCI_RNC_TX_SUSPENDED] = { 404 .enter_state = sci_remote_node_context_tx_suspended_state_enter, 405 }, 406 [SCI_RNC_TX_RX_SUSPENDED] = { 407 .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter, 408 }, 409 [SCI_RNC_AWAIT_SUSPENSION] = { 410 .exit_state = sci_remote_node_context_await_suspend_state_exit, 411 }, 412 }; 413 414 void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, 415 u16 remote_node_index) 416 { 417 memset(rnc, 0, sizeof(struct sci_remote_node_context)); 418 419 rnc->remote_node_index = remote_node_index; 420 rnc->destination_state = RNC_DEST_UNSPECIFIED; 421 422 sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL); 423 } 424 425 enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, 426 u32 event_code) 427 { 428 enum scis_sds_remote_node_context_states state; 429 u32 next_state; 430 431 state = sci_rnc->sm.current_state_id; 432 switch (state) { 433 case SCI_RNC_POSTING: 434 switch (scu_get_event_code(event_code)) { 435 case SCU_EVENT_POST_RNC_COMPLETE: 436 sci_change_state(&sci_rnc->sm, SCI_RNC_READY); 437 break; 438 default: 439 goto out; 440 } 441 break; 442 case SCI_RNC_INVALIDATING: 443 if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) { 444 if (sci_rnc->destination_state == RNC_DEST_FINAL) 445 next_state = SCI_RNC_INITIAL; 446 else 447 next_state = SCI_RNC_POSTING; 448 sci_change_state(&sci_rnc->sm, next_state); 449 } else { 450 switch (scu_get_event_type(event_code)) { 451 case SCU_EVENT_TYPE_RNC_SUSPEND_TX: 452 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: 453 /* We really dont care if the hardware is going to suspend 454 * the device since it's being invalidated anyway */ 455 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 456 "%s: SCIC Remote Node Context 0x%p was " 457 "suspeneded by hardware while being " 458 "invalidated.\n", __func__, sci_rnc); 459 break; 460 default: 461 goto out; 462 } 463 } 464 break; 465 case SCI_RNC_RESUMING: 466 if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) { 467 sci_change_state(&sci_rnc->sm, SCI_RNC_READY); 468 } else { 469 switch (scu_get_event_type(event_code)) { 470 case SCU_EVENT_TYPE_RNC_SUSPEND_TX: 471 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: 472 /* We really dont care if the hardware is going to suspend 473 * the device since it's being resumed anyway */ 474 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 475 "%s: SCIC Remote Node Context 0x%p was " 476 "suspeneded by hardware while being resumed.\n", 477 __func__, sci_rnc); 478 break; 479 default: 480 goto out; 481 } 482 } 483 break; 484 case SCI_RNC_READY: 485 switch (scu_get_event_type(event_code)) { 486 case SCU_EVENT_TL_RNC_SUSPEND_TX: 487 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); 488 sci_rnc->suspend_type = scu_get_event_type(event_code); 489 break; 490 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: 491 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); 492 sci_rnc->suspend_type = scu_get_event_type(event_code); 493 break; 494 default: 495 goto out; 496 } 497 break; 498 case SCI_RNC_AWAIT_SUSPENSION: 499 switch (scu_get_event_type(event_code)) { 500 case SCU_EVENT_TL_RNC_SUSPEND_TX: 501 next_state = SCI_RNC_TX_SUSPENDED; 502 break; 503 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: 504 next_state = SCI_RNC_TX_RX_SUSPENDED; 505 break; 506 default: 507 goto out; 508 } 509 if (sci_rnc->suspend_type == scu_get_event_type(event_code)) 510 sci_change_state(&sci_rnc->sm, next_state); 511 break; 512 default: 513 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 514 "%s: invalid state: %s\n", __func__, 515 rnc_state_name(state)); 516 return SCI_FAILURE_INVALID_STATE; 517 } 518 return SCI_SUCCESS; 519 520 out: 521 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 522 "%s: code: %#x state: %s\n", __func__, event_code, 523 rnc_state_name(state)); 524 return SCI_FAILURE; 525 526 } 527 528 enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, 529 scics_sds_remote_node_context_callback cb_fn, 530 void *cb_p) 531 { 532 enum scis_sds_remote_node_context_states state; 533 534 state = sci_rnc->sm.current_state_id; 535 switch (state) { 536 case SCI_RNC_INVALIDATING: 537 sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p); 538 return SCI_SUCCESS; 539 case SCI_RNC_POSTING: 540 case SCI_RNC_RESUMING: 541 case SCI_RNC_READY: 542 case SCI_RNC_TX_SUSPENDED: 543 case SCI_RNC_TX_RX_SUSPENDED: 544 sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p); 545 sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); 546 return SCI_SUCCESS; 547 case SCI_RNC_AWAIT_SUSPENSION: 548 sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p); 549 return SCI_SUCCESS; 550 case SCI_RNC_INITIAL: 551 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 552 "%s: invalid state: %s\n", __func__, 553 rnc_state_name(state)); 554 /* We have decided that the destruct request on the remote node context 555 * can not fail since it is either in the initial/destroyed state or is 556 * can be destroyed. 557 */ 558 return SCI_SUCCESS; 559 default: 560 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 561 "%s: invalid state %s\n", __func__, 562 rnc_state_name(state)); 563 return SCI_FAILURE_INVALID_STATE; 564 } 565 } 566 567 enum sci_status sci_remote_node_context_suspend( 568 struct sci_remote_node_context *sci_rnc, 569 enum sci_remote_node_suspension_reasons suspend_reason, 570 u32 suspend_type) 571 { 572 enum scis_sds_remote_node_context_states state 573 = sci_rnc->sm.current_state_id; 574 struct isci_remote_device *idev = rnc_to_dev(sci_rnc); 575 enum sci_status status = SCI_FAILURE_INVALID_STATE; 576 enum sci_remote_node_context_destination_state dest_param = 577 RNC_DEST_UNSPECIFIED; 578 579 dev_dbg(scirdev_to_dev(idev), 580 "%s: current state %s, current suspend_type %x dest state %d," 581 " arg suspend_reason %d, arg suspend_type %x", 582 __func__, rnc_state_name(state), sci_rnc->suspend_type, 583 sci_rnc->destination_state, suspend_reason, 584 suspend_type); 585 586 /* Disable automatic state continuations if explicitly suspending. */ 587 if ((suspend_reason == SCI_HW_SUSPEND) || 588 (sci_rnc->destination_state == RNC_DEST_FINAL)) 589 dest_param = sci_rnc->destination_state; 590 591 switch (state) { 592 case SCI_RNC_READY: 593 break; 594 case SCI_RNC_INVALIDATING: 595 if (sci_rnc->destination_state == RNC_DEST_FINAL) { 596 dev_warn(scirdev_to_dev(idev), 597 "%s: already destroying %p\n", 598 __func__, sci_rnc); 599 return SCI_FAILURE_INVALID_STATE; 600 } 601 /* Fall through and handle like SCI_RNC_POSTING */ 602 case SCI_RNC_RESUMING: 603 /* Fall through and handle like SCI_RNC_POSTING */ 604 case SCI_RNC_POSTING: 605 /* Set the destination state to AWAIT - this signals the 606 * entry into the SCI_RNC_READY state that a suspension 607 * needs to be done immediately. 608 */ 609 if (sci_rnc->destination_state != RNC_DEST_FINAL) 610 sci_rnc->destination_state = RNC_DEST_SUSPENDED; 611 sci_rnc->suspend_type = suspend_type; 612 sci_rnc->suspend_reason = suspend_reason; 613 return SCI_SUCCESS; 614 615 case SCI_RNC_TX_SUSPENDED: 616 if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX) 617 status = SCI_SUCCESS; 618 break; 619 case SCI_RNC_TX_RX_SUSPENDED: 620 if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX) 621 status = SCI_SUCCESS; 622 break; 623 case SCI_RNC_AWAIT_SUSPENSION: 624 if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX) 625 || (suspend_type == sci_rnc->suspend_type)) 626 return SCI_SUCCESS; 627 break; 628 default: 629 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 630 "%s: invalid state %s\n", __func__, 631 rnc_state_name(state)); 632 return SCI_FAILURE_INVALID_STATE; 633 } 634 sci_rnc->destination_state = dest_param; 635 sci_rnc->suspend_type = suspend_type; 636 sci_rnc->suspend_reason = suspend_reason; 637 638 if (status == SCI_SUCCESS) { /* Already in the destination state? */ 639 struct isci_host *ihost = idev->owning_port->owning_controller; 640 641 wake_up_all(&ihost->eventq); /* Let observers look. */ 642 return SCI_SUCCESS; 643 } 644 if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) || 645 (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) { 646 647 if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT) 648 isci_dev_set_hang_detection_timeout(idev, 0x00000001); 649 650 sci_remote_device_post_request( 651 idev, SCI_SOFTWARE_SUSPEND_CMD); 652 } 653 if (state != SCI_RNC_AWAIT_SUSPENSION) 654 sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION); 655 656 return SCI_SUCCESS; 657 } 658 659 enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, 660 scics_sds_remote_node_context_callback cb_fn, 661 void *cb_p) 662 { 663 enum scis_sds_remote_node_context_states state; 664 struct isci_remote_device *idev = rnc_to_dev(sci_rnc); 665 666 state = sci_rnc->sm.current_state_id; 667 dev_dbg(scirdev_to_dev(idev), 668 "%s: state %s, cb_fn = %p, cb_p = %p; dest_state = %d; " 669 "dev resume path %s\n", 670 __func__, rnc_state_name(state), cb_fn, cb_p, 671 sci_rnc->destination_state, 672 test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags) 673 ? "<abort active>" : "<normal>"); 674 675 switch (state) { 676 case SCI_RNC_INITIAL: 677 if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) 678 return SCI_FAILURE_INVALID_STATE; 679 680 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p, 681 RNC_DEST_READY); 682 if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) { 683 sci_remote_node_context_construct_buffer(sci_rnc); 684 sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING); 685 } 686 return SCI_SUCCESS; 687 688 case SCI_RNC_POSTING: 689 case SCI_RNC_INVALIDATING: 690 case SCI_RNC_RESUMING: 691 /* We are still waiting to post when a resume was 692 * requested. 693 */ 694 switch (sci_rnc->destination_state) { 695 case RNC_DEST_SUSPENDED: 696 case RNC_DEST_SUSPENDED_RESUME: 697 /* Previously waiting to suspend after posting. 698 * Now continue onto resumption. 699 */ 700 sci_remote_node_context_setup_to_resume( 701 sci_rnc, cb_fn, cb_p, 702 RNC_DEST_SUSPENDED_RESUME); 703 break; 704 default: 705 sci_remote_node_context_setup_to_resume( 706 sci_rnc, cb_fn, cb_p, 707 RNC_DEST_READY); 708 break; 709 } 710 return SCI_SUCCESS; 711 712 case SCI_RNC_TX_SUSPENDED: 713 case SCI_RNC_TX_RX_SUSPENDED: 714 { 715 struct domain_device *dev = idev->domain_dev; 716 /* If this is an expander attached SATA device we must 717 * invalidate and repost the RNC since this is the only 718 * way to clear the TCi to NCQ tag mapping table for 719 * the RNi. All other device types we can just resume. 720 */ 721 sci_remote_node_context_setup_to_resume( 722 sci_rnc, cb_fn, cb_p, RNC_DEST_READY); 723 724 if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) { 725 if ((dev_is_sata(dev) && dev->parent) || 726 (sci_rnc->destination_state == RNC_DEST_FINAL)) 727 sci_change_state(&sci_rnc->sm, 728 SCI_RNC_INVALIDATING); 729 else 730 sci_change_state(&sci_rnc->sm, 731 SCI_RNC_RESUMING); 732 } 733 } 734 return SCI_SUCCESS; 735 736 case SCI_RNC_AWAIT_SUSPENSION: 737 sci_remote_node_context_setup_to_resume( 738 sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME); 739 return SCI_SUCCESS; 740 default: 741 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 742 "%s: invalid state %s\n", __func__, 743 rnc_state_name(state)); 744 return SCI_FAILURE_INVALID_STATE; 745 } 746 } 747 748 enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, 749 struct isci_request *ireq) 750 { 751 enum scis_sds_remote_node_context_states state; 752 753 state = sci_rnc->sm.current_state_id; 754 755 switch (state) { 756 case SCI_RNC_READY: 757 return SCI_SUCCESS; 758 case SCI_RNC_TX_SUSPENDED: 759 case SCI_RNC_TX_RX_SUSPENDED: 760 case SCI_RNC_AWAIT_SUSPENSION: 761 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 762 "%s: invalid state %s\n", __func__, 763 rnc_state_name(state)); 764 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 765 default: 766 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), 767 "%s: invalid state %s\n", __func__, 768 rnc_state_name(state)); 769 return SCI_FAILURE_INVALID_STATE; 770 } 771 } 772 773 enum sci_status sci_remote_node_context_start_task( 774 struct sci_remote_node_context *sci_rnc, 775 struct isci_request *ireq, 776 scics_sds_remote_node_context_callback cb_fn, 777 void *cb_p) 778 { 779 enum sci_status status = sci_remote_node_context_resume(sci_rnc, 780 cb_fn, cb_p); 781 if (status != SCI_SUCCESS) 782 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 783 "%s: resume failed: %d\n", __func__, status); 784 return status; 785 } 786 787 int sci_remote_node_context_is_safe_to_abort( 788 struct sci_remote_node_context *sci_rnc) 789 { 790 enum scis_sds_remote_node_context_states state; 791 792 state = sci_rnc->sm.current_state_id; 793 switch (state) { 794 case SCI_RNC_INVALIDATING: 795 case SCI_RNC_TX_RX_SUSPENDED: 796 return 1; 797 case SCI_RNC_POSTING: 798 case SCI_RNC_RESUMING: 799 case SCI_RNC_READY: 800 case SCI_RNC_TX_SUSPENDED: 801 case SCI_RNC_AWAIT_SUSPENSION: 802 case SCI_RNC_INITIAL: 803 return 0; 804 default: 805 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 806 "%s: invalid state %d\n", __func__, state); 807 return 0; 808 } 809 } 810