1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * BSD LICENSE 25 * 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 * All rights reserved. 28 * 29 * Redistribution and use in source and binary forms, with or without 30 * modification, are permitted provided that the following conditions 31 * are met: 32 * 33 * * Redistributions of source code must retain the above copyright 34 * notice, this list of conditions and the following disclaimer. 35 * * Redistributions in binary form must reproduce the above copyright 36 * notice, this list of conditions and the following disclaimer in 37 * the documentation and/or other materials provided with the 38 * distribution. 39 * * Neither the name of Intel Corporation nor the names of its 40 * contributors may be used to endorse or promote products derived 41 * from this software without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 #include <linux/circ_buf.h> 56 #include <linux/device.h> 57 #include <scsi/sas.h> 58 #include "host.h" 59 #include "isci.h" 60 #include "port.h" 61 #include "host.h" 62 #include "probe_roms.h" 63 #include "remote_device.h" 64 #include "request.h" 65 #include "scu_completion_codes.h" 66 #include "scu_event_codes.h" 67 #include "registers.h" 68 #include "scu_remote_node_context.h" 69 #include "scu_task_context.h" 70 71 #define SCU_CONTEXT_RAM_INIT_STALL_TIME 200 72 73 #define smu_max_ports(dcc_value) \ 74 (\ 75 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \ 76 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \ 77 ) 78 79 #define smu_max_task_contexts(dcc_value) \ 80 (\ 81 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \ 82 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \ 83 ) 84 85 #define smu_max_rncs(dcc_value) \ 86 (\ 87 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \ 88 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \ 89 ) 90 91 #define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100 92 93 /** 94 * 95 * 96 * The number of milliseconds to wait while a given phy is consuming power 97 * before allowing another set of phys to consume power. Ultimately, this will 98 * be specified by OEM parameter. 99 */ 100 #define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500 101 102 /** 103 * NORMALIZE_PUT_POINTER() - 104 * 105 * This macro will normalize the completion queue put pointer so its value can 106 * be used as an array inde 107 */ 108 #define NORMALIZE_PUT_POINTER(x) \ 109 ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK) 110 111 112 /** 113 * NORMALIZE_EVENT_POINTER() - 114 * 115 * This macro will normalize the completion queue event entry so its value can 116 * be used as an index. 117 */ 118 #define NORMALIZE_EVENT_POINTER(x) \ 119 (\ 120 ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \ 121 >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \ 122 ) 123 124 /** 125 * NORMALIZE_GET_POINTER() - 126 * 127 * This macro will normalize the completion queue get pointer so its value can 128 * be used as an index into an array 129 */ 130 #define NORMALIZE_GET_POINTER(x) \ 131 ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK) 132 133 /** 134 * NORMALIZE_GET_POINTER_CYCLE_BIT() - 135 * 136 * This macro will normalize the completion queue cycle pointer so it matches 137 * the completion queue cycle bit 138 */ 139 #define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \ 140 ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT)) 141 142 /** 143 * COMPLETION_QUEUE_CYCLE_BIT() - 144 * 145 * This macro will return the cycle bit of the completion queue entry 146 */ 147 #define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000) 148 149 /* Init the state machine and call the state entry function (if any) */ 150 void sci_init_sm(struct sci_base_state_machine *sm, 151 const struct sci_base_state *state_table, u32 initial_state) 152 { 153 sci_state_transition_t handler; 154 155 sm->initial_state_id = initial_state; 156 sm->previous_state_id = initial_state; 157 sm->current_state_id = initial_state; 158 sm->state_table = state_table; 159 160 handler = sm->state_table[initial_state].enter_state; 161 if (handler) 162 handler(sm); 163 } 164 165 /* Call the state exit fn, update the current state, call the state entry fn */ 166 void sci_change_state(struct sci_base_state_machine *sm, u32 next_state) 167 { 168 sci_state_transition_t handler; 169 170 handler = sm->state_table[sm->current_state_id].exit_state; 171 if (handler) 172 handler(sm); 173 174 sm->previous_state_id = sm->current_state_id; 175 sm->current_state_id = next_state; 176 177 handler = sm->state_table[sm->current_state_id].enter_state; 178 if (handler) 179 handler(sm); 180 } 181 182 static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost) 183 { 184 u32 get_value = ihost->completion_queue_get; 185 u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK; 186 187 if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) == 188 COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])) 189 return true; 190 191 return false; 192 } 193 194 static bool sci_controller_isr(struct isci_host *ihost) 195 { 196 if (sci_controller_completion_queue_has_entries(ihost)) { 197 return true; 198 } else { 199 /* 200 * we have a spurious interrupt it could be that we have already 201 * emptied the completion queue from a previous interrupt */ 202 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); 203 204 /* 205 * There is a race in the hardware that could cause us not to be notified 206 * of an interrupt completion if we do not take this step. We will mask 207 * then unmask the interrupts so if there is another interrupt pending 208 * the clearing of the interrupt source we get the next interrupt message. */ 209 writel(0xFF000000, &ihost->smu_registers->interrupt_mask); 210 writel(0, &ihost->smu_registers->interrupt_mask); 211 } 212 213 return false; 214 } 215 216 irqreturn_t isci_msix_isr(int vec, void *data) 217 { 218 struct isci_host *ihost = data; 219 220 if (sci_controller_isr(ihost)) 221 tasklet_schedule(&ihost->completion_tasklet); 222 223 return IRQ_HANDLED; 224 } 225 226 static bool sci_controller_error_isr(struct isci_host *ihost) 227 { 228 u32 interrupt_status; 229 230 interrupt_status = 231 readl(&ihost->smu_registers->interrupt_status); 232 interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND); 233 234 if (interrupt_status != 0) { 235 /* 236 * There is an error interrupt pending so let it through and handle 237 * in the callback */ 238 return true; 239 } 240 241 /* 242 * There is a race in the hardware that could cause us not to be notified 243 * of an interrupt completion if we do not take this step. We will mask 244 * then unmask the error interrupts so if there was another interrupt 245 * pending we will be notified. 246 * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */ 247 writel(0xff, &ihost->smu_registers->interrupt_mask); 248 writel(0, &ihost->smu_registers->interrupt_mask); 249 250 return false; 251 } 252 253 static void sci_controller_task_completion(struct isci_host *ihost, u32 ent) 254 { 255 u32 index = SCU_GET_COMPLETION_INDEX(ent); 256 struct isci_request *ireq = ihost->reqs[index]; 257 258 /* Make sure that we really want to process this IO request */ 259 if (test_bit(IREQ_ACTIVE, &ireq->flags) && 260 ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG && 261 ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index]) 262 /* Yep this is a valid io request pass it along to the 263 * io request handler 264 */ 265 sci_io_request_tc_completion(ireq, ent); 266 } 267 268 static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent) 269 { 270 u32 index; 271 struct isci_request *ireq; 272 struct isci_remote_device *idev; 273 274 index = SCU_GET_COMPLETION_INDEX(ent); 275 276 switch (scu_get_command_request_type(ent)) { 277 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC: 278 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC: 279 ireq = ihost->reqs[index]; 280 dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n", 281 __func__, ent, ireq); 282 /* @todo For a post TC operation we need to fail the IO 283 * request 284 */ 285 break; 286 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC: 287 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC: 288 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC: 289 idev = ihost->device_table[index]; 290 dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n", 291 __func__, ent, idev); 292 /* @todo For a port RNC operation we need to fail the 293 * device 294 */ 295 break; 296 default: 297 dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n", 298 __func__, ent); 299 break; 300 } 301 } 302 303 static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent) 304 { 305 u32 index; 306 u32 frame_index; 307 308 struct scu_unsolicited_frame_header *frame_header; 309 struct isci_phy *iphy; 310 struct isci_remote_device *idev; 311 312 enum sci_status result = SCI_FAILURE; 313 314 frame_index = SCU_GET_FRAME_INDEX(ent); 315 316 frame_header = ihost->uf_control.buffers.array[frame_index].header; 317 ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE; 318 319 if (SCU_GET_FRAME_ERROR(ent)) { 320 /* 321 * / @todo If the IAF frame or SIGNATURE FIS frame has an error will 322 * / this cause a problem? We expect the phy initialization will 323 * / fail if there is an error in the frame. */ 324 sci_controller_release_frame(ihost, frame_index); 325 return; 326 } 327 328 if (frame_header->is_address_frame) { 329 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); 330 iphy = &ihost->phys[index]; 331 result = sci_phy_frame_handler(iphy, frame_index); 332 } else { 333 334 index = SCU_GET_COMPLETION_INDEX(ent); 335 336 if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { 337 /* 338 * This is a signature fis or a frame from a direct attached SATA 339 * device that has not yet been created. In either case forwared 340 * the frame to the PE and let it take care of the frame data. */ 341 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); 342 iphy = &ihost->phys[index]; 343 result = sci_phy_frame_handler(iphy, frame_index); 344 } else { 345 if (index < ihost->remote_node_entries) 346 idev = ihost->device_table[index]; 347 else 348 idev = NULL; 349 350 if (idev != NULL) 351 result = sci_remote_device_frame_handler(idev, frame_index); 352 else 353 sci_controller_release_frame(ihost, frame_index); 354 } 355 } 356 357 if (result != SCI_SUCCESS) { 358 /* 359 * / @todo Is there any reason to report some additional error message 360 * / when we get this failure notifiction? */ 361 } 362 } 363 364 static void sci_controller_event_completion(struct isci_host *ihost, u32 ent) 365 { 366 struct isci_remote_device *idev; 367 struct isci_request *ireq; 368 struct isci_phy *iphy; 369 u32 index; 370 371 index = SCU_GET_COMPLETION_INDEX(ent); 372 373 switch (scu_get_event_type(ent)) { 374 case SCU_EVENT_TYPE_SMU_COMMAND_ERROR: 375 /* / @todo The driver did something wrong and we need to fix the condtion. */ 376 dev_err(&ihost->pdev->dev, 377 "%s: SCIC Controller 0x%p received SMU command error " 378 "0x%x\n", 379 __func__, 380 ihost, 381 ent); 382 break; 383 384 case SCU_EVENT_TYPE_SMU_PCQ_ERROR: 385 case SCU_EVENT_TYPE_SMU_ERROR: 386 case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR: 387 /* 388 * / @todo This is a hardware failure and its likely that we want to 389 * / reset the controller. */ 390 dev_err(&ihost->pdev->dev, 391 "%s: SCIC Controller 0x%p received fatal controller " 392 "event 0x%x\n", 393 __func__, 394 ihost, 395 ent); 396 break; 397 398 case SCU_EVENT_TYPE_TRANSPORT_ERROR: 399 ireq = ihost->reqs[index]; 400 sci_io_request_event_handler(ireq, ent); 401 break; 402 403 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: 404 switch (scu_get_event_specifier(ent)) { 405 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE: 406 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT: 407 ireq = ihost->reqs[index]; 408 if (ireq != NULL) 409 sci_io_request_event_handler(ireq, ent); 410 else 411 dev_warn(&ihost->pdev->dev, 412 "%s: SCIC Controller 0x%p received " 413 "event 0x%x for io request object " 414 "that doesnt exist.\n", 415 __func__, 416 ihost, 417 ent); 418 419 break; 420 421 case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT: 422 idev = ihost->device_table[index]; 423 if (idev != NULL) 424 sci_remote_device_event_handler(idev, ent); 425 else 426 dev_warn(&ihost->pdev->dev, 427 "%s: SCIC Controller 0x%p received " 428 "event 0x%x for remote device object " 429 "that doesnt exist.\n", 430 __func__, 431 ihost, 432 ent); 433 434 break; 435 } 436 break; 437 438 case SCU_EVENT_TYPE_BROADCAST_CHANGE: 439 /* 440 * direct the broadcast change event to the phy first and then let 441 * the phy redirect the broadcast change to the port object */ 442 case SCU_EVENT_TYPE_ERR_CNT_EVENT: 443 /* 444 * direct error counter event to the phy object since that is where 445 * we get the event notification. This is a type 4 event. */ 446 case SCU_EVENT_TYPE_OSSP_EVENT: 447 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); 448 iphy = &ihost->phys[index]; 449 sci_phy_event_handler(iphy, ent); 450 break; 451 452 case SCU_EVENT_TYPE_RNC_SUSPEND_TX: 453 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: 454 case SCU_EVENT_TYPE_RNC_OPS_MISC: 455 if (index < ihost->remote_node_entries) { 456 idev = ihost->device_table[index]; 457 458 if (idev != NULL) 459 sci_remote_device_event_handler(idev, ent); 460 } else 461 dev_err(&ihost->pdev->dev, 462 "%s: SCIC Controller 0x%p received event 0x%x " 463 "for remote device object 0x%0x that doesnt " 464 "exist.\n", 465 __func__, 466 ihost, 467 ent, 468 index); 469 470 break; 471 472 default: 473 dev_warn(&ihost->pdev->dev, 474 "%s: SCIC Controller received unknown event code %x\n", 475 __func__, 476 ent); 477 break; 478 } 479 } 480 481 static void sci_controller_process_completions(struct isci_host *ihost) 482 { 483 u32 completion_count = 0; 484 u32 ent; 485 u32 get_index; 486 u32 get_cycle; 487 u32 event_get; 488 u32 event_cycle; 489 490 dev_dbg(&ihost->pdev->dev, 491 "%s: completion queue begining get:0x%08x\n", 492 __func__, 493 ihost->completion_queue_get); 494 495 /* Get the component parts of the completion queue */ 496 get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get); 497 get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get; 498 499 event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get); 500 event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get; 501 502 while ( 503 NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle) 504 == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]) 505 ) { 506 completion_count++; 507 508 ent = ihost->completion_queue[get_index]; 509 510 /* increment the get pointer and check for rollover to toggle the cycle bit */ 511 get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) << 512 (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT); 513 get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1); 514 515 dev_dbg(&ihost->pdev->dev, 516 "%s: completion queue entry:0x%08x\n", 517 __func__, 518 ent); 519 520 switch (SCU_GET_COMPLETION_TYPE(ent)) { 521 case SCU_COMPLETION_TYPE_TASK: 522 sci_controller_task_completion(ihost, ent); 523 break; 524 525 case SCU_COMPLETION_TYPE_SDMA: 526 sci_controller_sdma_completion(ihost, ent); 527 break; 528 529 case SCU_COMPLETION_TYPE_UFI: 530 sci_controller_unsolicited_frame(ihost, ent); 531 break; 532 533 case SCU_COMPLETION_TYPE_EVENT: 534 case SCU_COMPLETION_TYPE_NOTIFY: { 535 event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) << 536 (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); 537 event_get = (event_get+1) & (SCU_MAX_EVENTS-1); 538 539 sci_controller_event_completion(ihost, ent); 540 break; 541 } 542 default: 543 dev_warn(&ihost->pdev->dev, 544 "%s: SCIC Controller received unknown " 545 "completion type %x\n", 546 __func__, 547 ent); 548 break; 549 } 550 } 551 552 /* Update the get register if we completed one or more entries */ 553 if (completion_count > 0) { 554 ihost->completion_queue_get = 555 SMU_CQGR_GEN_BIT(ENABLE) | 556 SMU_CQGR_GEN_BIT(EVENT_ENABLE) | 557 event_cycle | 558 SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) | 559 get_cycle | 560 SMU_CQGR_GEN_VAL(POINTER, get_index); 561 562 writel(ihost->completion_queue_get, 563 &ihost->smu_registers->completion_queue_get); 564 565 } 566 567 dev_dbg(&ihost->pdev->dev, 568 "%s: completion queue ending get:0x%08x\n", 569 __func__, 570 ihost->completion_queue_get); 571 572 } 573 574 static void sci_controller_error_handler(struct isci_host *ihost) 575 { 576 u32 interrupt_status; 577 578 interrupt_status = 579 readl(&ihost->smu_registers->interrupt_status); 580 581 if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) && 582 sci_controller_completion_queue_has_entries(ihost)) { 583 584 sci_controller_process_completions(ihost); 585 writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status); 586 } else { 587 dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__, 588 interrupt_status); 589 590 sci_change_state(&ihost->sm, SCIC_FAILED); 591 592 return; 593 } 594 595 /* If we dont process any completions I am not sure that we want to do this. 596 * We are in the middle of a hardware fault and should probably be reset. 597 */ 598 writel(0, &ihost->smu_registers->interrupt_mask); 599 } 600 601 irqreturn_t isci_intx_isr(int vec, void *data) 602 { 603 irqreturn_t ret = IRQ_NONE; 604 struct isci_host *ihost = data; 605 606 if (sci_controller_isr(ihost)) { 607 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); 608 tasklet_schedule(&ihost->completion_tasklet); 609 ret = IRQ_HANDLED; 610 } else if (sci_controller_error_isr(ihost)) { 611 spin_lock(&ihost->scic_lock); 612 sci_controller_error_handler(ihost); 613 spin_unlock(&ihost->scic_lock); 614 ret = IRQ_HANDLED; 615 } 616 617 return ret; 618 } 619 620 irqreturn_t isci_error_isr(int vec, void *data) 621 { 622 struct isci_host *ihost = data; 623 624 if (sci_controller_error_isr(ihost)) 625 sci_controller_error_handler(ihost); 626 627 return IRQ_HANDLED; 628 } 629 630 /** 631 * isci_host_start_complete() - This function is called by the core library, 632 * through the ISCI Module, to indicate controller start status. 633 * @isci_host: This parameter specifies the ISCI host object 634 * @completion_status: This parameter specifies the completion status from the 635 * core library. 636 * 637 */ 638 static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status) 639 { 640 if (completion_status != SCI_SUCCESS) 641 dev_info(&ihost->pdev->dev, 642 "controller start timed out, continuing...\n"); 643 isci_host_change_state(ihost, isci_ready); 644 clear_bit(IHOST_START_PENDING, &ihost->flags); 645 wake_up(&ihost->eventq); 646 } 647 648 int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time) 649 { 650 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; 651 652 if (test_bit(IHOST_START_PENDING, &ihost->flags)) 653 return 0; 654 655 /* todo: use sas_flush_discovery once it is upstream */ 656 scsi_flush_work(shost); 657 658 scsi_flush_work(shost); 659 660 dev_dbg(&ihost->pdev->dev, 661 "%s: ihost->status = %d, time = %ld\n", 662 __func__, isci_host_get_state(ihost), time); 663 664 return 1; 665 666 } 667 668 /** 669 * sci_controller_get_suggested_start_timeout() - This method returns the 670 * suggested sci_controller_start() timeout amount. The user is free to 671 * use any timeout value, but this method provides the suggested minimum 672 * start timeout value. The returned value is based upon empirical 673 * information determined as a result of interoperability testing. 674 * @controller: the handle to the controller object for which to return the 675 * suggested start timeout. 676 * 677 * This method returns the number of milliseconds for the suggested start 678 * operation timeout. 679 */ 680 static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost) 681 { 682 /* Validate the user supplied parameters. */ 683 if (!ihost) 684 return 0; 685 686 /* 687 * The suggested minimum timeout value for a controller start operation: 688 * 689 * Signature FIS Timeout 690 * + Phy Start Timeout 691 * + Number of Phy Spin Up Intervals 692 * --------------------------------- 693 * Number of milliseconds for the controller start operation. 694 * 695 * NOTE: The number of phy spin up intervals will be equivalent 696 * to the number of phys divided by the number phys allowed 697 * per interval - 1 (once OEM parameters are supported). 698 * Currently we assume only 1 phy per interval. */ 699 700 return SCIC_SDS_SIGNATURE_FIS_TIMEOUT 701 + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 702 + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); 703 } 704 705 static void sci_controller_enable_interrupts(struct isci_host *ihost) 706 { 707 BUG_ON(ihost->smu_registers == NULL); 708 writel(0, &ihost->smu_registers->interrupt_mask); 709 } 710 711 void sci_controller_disable_interrupts(struct isci_host *ihost) 712 { 713 BUG_ON(ihost->smu_registers == NULL); 714 writel(0xffffffff, &ihost->smu_registers->interrupt_mask); 715 } 716 717 static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost) 718 { 719 u32 port_task_scheduler_value; 720 721 port_task_scheduler_value = 722 readl(&ihost->scu_registers->peg0.ptsg.control); 723 port_task_scheduler_value |= 724 (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) | 725 SCU_PTSGCR_GEN_BIT(PTSG_ENABLE)); 726 writel(port_task_scheduler_value, 727 &ihost->scu_registers->peg0.ptsg.control); 728 } 729 730 static void sci_controller_assign_task_entries(struct isci_host *ihost) 731 { 732 u32 task_assignment; 733 734 /* 735 * Assign all the TCs to function 0 736 * TODO: Do we actually need to read this register to write it back? 737 */ 738 739 task_assignment = 740 readl(&ihost->smu_registers->task_context_assignment[0]); 741 742 task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) | 743 (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) | 744 (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE)); 745 746 writel(task_assignment, 747 &ihost->smu_registers->task_context_assignment[0]); 748 749 } 750 751 static void sci_controller_initialize_completion_queue(struct isci_host *ihost) 752 { 753 u32 index; 754 u32 completion_queue_control_value; 755 u32 completion_queue_get_value; 756 u32 completion_queue_put_value; 757 758 ihost->completion_queue_get = 0; 759 760 completion_queue_control_value = 761 (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) | 762 SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1)); 763 764 writel(completion_queue_control_value, 765 &ihost->smu_registers->completion_queue_control); 766 767 768 /* Set the completion queue get pointer and enable the queue */ 769 completion_queue_get_value = ( 770 (SMU_CQGR_GEN_VAL(POINTER, 0)) 771 | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0)) 772 | (SMU_CQGR_GEN_BIT(ENABLE)) 773 | (SMU_CQGR_GEN_BIT(EVENT_ENABLE)) 774 ); 775 776 writel(completion_queue_get_value, 777 &ihost->smu_registers->completion_queue_get); 778 779 /* Set the completion queue put pointer */ 780 completion_queue_put_value = ( 781 (SMU_CQPR_GEN_VAL(POINTER, 0)) 782 | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0)) 783 ); 784 785 writel(completion_queue_put_value, 786 &ihost->smu_registers->completion_queue_put); 787 788 /* Initialize the cycle bit of the completion queue entries */ 789 for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) { 790 /* 791 * If get.cycle_bit != completion_queue.cycle_bit 792 * its not a valid completion queue entry 793 * so at system start all entries are invalid */ 794 ihost->completion_queue[index] = 0x80000000; 795 } 796 } 797 798 static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost) 799 { 800 u32 frame_queue_control_value; 801 u32 frame_queue_get_value; 802 u32 frame_queue_put_value; 803 804 /* Write the queue size */ 805 frame_queue_control_value = 806 SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES); 807 808 writel(frame_queue_control_value, 809 &ihost->scu_registers->sdma.unsolicited_frame_queue_control); 810 811 /* Setup the get pointer for the unsolicited frame queue */ 812 frame_queue_get_value = ( 813 SCU_UFQGP_GEN_VAL(POINTER, 0) 814 | SCU_UFQGP_GEN_BIT(ENABLE_BIT) 815 ); 816 817 writel(frame_queue_get_value, 818 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); 819 /* Setup the put pointer for the unsolicited frame queue */ 820 frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0); 821 writel(frame_queue_put_value, 822 &ihost->scu_registers->sdma.unsolicited_frame_put_pointer); 823 } 824 825 static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status) 826 { 827 if (ihost->sm.current_state_id == SCIC_STARTING) { 828 /* 829 * We move into the ready state, because some of the phys/ports 830 * may be up and operational. 831 */ 832 sci_change_state(&ihost->sm, SCIC_READY); 833 834 isci_host_start_complete(ihost, status); 835 } 836 } 837 838 static bool is_phy_starting(struct isci_phy *iphy) 839 { 840 enum sci_phy_states state; 841 842 state = iphy->sm.current_state_id; 843 switch (state) { 844 case SCI_PHY_STARTING: 845 case SCI_PHY_SUB_INITIAL: 846 case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: 847 case SCI_PHY_SUB_AWAIT_IAF_UF: 848 case SCI_PHY_SUB_AWAIT_SAS_POWER: 849 case SCI_PHY_SUB_AWAIT_SATA_POWER: 850 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: 851 case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: 852 case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: 853 case SCI_PHY_SUB_FINAL: 854 return true; 855 default: 856 return false; 857 } 858 } 859 860 /** 861 * sci_controller_start_next_phy - start phy 862 * @scic: controller 863 * 864 * If all the phys have been started, then attempt to transition the 865 * controller to the READY state and inform the user 866 * (sci_cb_controller_start_complete()). 867 */ 868 static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost) 869 { 870 struct sci_oem_params *oem = &ihost->oem_parameters; 871 struct isci_phy *iphy; 872 enum sci_status status; 873 874 status = SCI_SUCCESS; 875 876 if (ihost->phy_startup_timer_pending) 877 return status; 878 879 if (ihost->next_phy_to_start >= SCI_MAX_PHYS) { 880 bool is_controller_start_complete = true; 881 u32 state; 882 u8 index; 883 884 for (index = 0; index < SCI_MAX_PHYS; index++) { 885 iphy = &ihost->phys[index]; 886 state = iphy->sm.current_state_id; 887 888 if (!phy_get_non_dummy_port(iphy)) 889 continue; 890 891 /* The controller start operation is complete iff: 892 * - all links have been given an opportunity to start 893 * - have no indication of a connected device 894 * - have an indication of a connected device and it has 895 * finished the link training process. 896 */ 897 if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) || 898 (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) || 899 (iphy->is_in_link_training == true && is_phy_starting(iphy))) { 900 is_controller_start_complete = false; 901 break; 902 } 903 } 904 905 /* 906 * The controller has successfully finished the start process. 907 * Inform the SCI Core user and transition to the READY state. */ 908 if (is_controller_start_complete == true) { 909 sci_controller_transition_to_ready(ihost, SCI_SUCCESS); 910 sci_del_timer(&ihost->phy_timer); 911 ihost->phy_startup_timer_pending = false; 912 } 913 } else { 914 iphy = &ihost->phys[ihost->next_phy_to_start]; 915 916 if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { 917 if (phy_get_non_dummy_port(iphy) == NULL) { 918 ihost->next_phy_to_start++; 919 920 /* Caution recursion ahead be forwarned 921 * 922 * The PHY was never added to a PORT in MPC mode 923 * so start the next phy in sequence This phy 924 * will never go link up and will not draw power 925 * the OEM parameters either configured the phy 926 * incorrectly for the PORT or it was never 927 * assigned to a PORT 928 */ 929 return sci_controller_start_next_phy(ihost); 930 } 931 } 932 933 status = sci_phy_start(iphy); 934 935 if (status == SCI_SUCCESS) { 936 sci_mod_timer(&ihost->phy_timer, 937 SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT); 938 ihost->phy_startup_timer_pending = true; 939 } else { 940 dev_warn(&ihost->pdev->dev, 941 "%s: Controller stop operation failed " 942 "to stop phy %d because of status " 943 "%d.\n", 944 __func__, 945 ihost->phys[ihost->next_phy_to_start].phy_index, 946 status); 947 } 948 949 ihost->next_phy_to_start++; 950 } 951 952 return status; 953 } 954 955 static void phy_startup_timeout(unsigned long data) 956 { 957 struct sci_timer *tmr = (struct sci_timer *)data; 958 struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer); 959 unsigned long flags; 960 enum sci_status status; 961 962 spin_lock_irqsave(&ihost->scic_lock, flags); 963 964 if (tmr->cancel) 965 goto done; 966 967 ihost->phy_startup_timer_pending = false; 968 969 do { 970 status = sci_controller_start_next_phy(ihost); 971 } while (status != SCI_SUCCESS); 972 973 done: 974 spin_unlock_irqrestore(&ihost->scic_lock, flags); 975 } 976 977 static u16 isci_tci_active(struct isci_host *ihost) 978 { 979 return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); 980 } 981 982 static enum sci_status sci_controller_start(struct isci_host *ihost, 983 u32 timeout) 984 { 985 enum sci_status result; 986 u16 index; 987 988 if (ihost->sm.current_state_id != SCIC_INITIALIZED) { 989 dev_warn(&ihost->pdev->dev, 990 "SCIC Controller start operation requested in " 991 "invalid state\n"); 992 return SCI_FAILURE_INVALID_STATE; 993 } 994 995 /* Build the TCi free pool */ 996 BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8); 997 ihost->tci_head = 0; 998 ihost->tci_tail = 0; 999 for (index = 0; index < ihost->task_context_entries; index++) 1000 isci_tci_free(ihost, index); 1001 1002 /* Build the RNi free pool */ 1003 sci_remote_node_table_initialize(&ihost->available_remote_nodes, 1004 ihost->remote_node_entries); 1005 1006 /* 1007 * Before anything else lets make sure we will not be 1008 * interrupted by the hardware. 1009 */ 1010 sci_controller_disable_interrupts(ihost); 1011 1012 /* Enable the port task scheduler */ 1013 sci_controller_enable_port_task_scheduler(ihost); 1014 1015 /* Assign all the task entries to ihost physical function */ 1016 sci_controller_assign_task_entries(ihost); 1017 1018 /* Now initialize the completion queue */ 1019 sci_controller_initialize_completion_queue(ihost); 1020 1021 /* Initialize the unsolicited frame queue for use */ 1022 sci_controller_initialize_unsolicited_frame_queue(ihost); 1023 1024 /* Start all of the ports on this controller */ 1025 for (index = 0; index < ihost->logical_port_entries; index++) { 1026 struct isci_port *iport = &ihost->ports[index]; 1027 1028 result = sci_port_start(iport); 1029 if (result) 1030 return result; 1031 } 1032 1033 sci_controller_start_next_phy(ihost); 1034 1035 sci_mod_timer(&ihost->timer, timeout); 1036 1037 sci_change_state(&ihost->sm, SCIC_STARTING); 1038 1039 return SCI_SUCCESS; 1040 } 1041 1042 void isci_host_scan_start(struct Scsi_Host *shost) 1043 { 1044 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; 1045 unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost); 1046 1047 set_bit(IHOST_START_PENDING, &ihost->flags); 1048 1049 spin_lock_irq(&ihost->scic_lock); 1050 sci_controller_start(ihost, tmo); 1051 sci_controller_enable_interrupts(ihost); 1052 spin_unlock_irq(&ihost->scic_lock); 1053 } 1054 1055 static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status) 1056 { 1057 isci_host_change_state(ihost, isci_stopped); 1058 sci_controller_disable_interrupts(ihost); 1059 clear_bit(IHOST_STOP_PENDING, &ihost->flags); 1060 wake_up(&ihost->eventq); 1061 } 1062 1063 static void sci_controller_completion_handler(struct isci_host *ihost) 1064 { 1065 /* Empty out the completion queue */ 1066 if (sci_controller_completion_queue_has_entries(ihost)) 1067 sci_controller_process_completions(ihost); 1068 1069 /* Clear the interrupt and enable all interrupts again */ 1070 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); 1071 /* Could we write the value of SMU_ISR_COMPLETION? */ 1072 writel(0xFF000000, &ihost->smu_registers->interrupt_mask); 1073 writel(0, &ihost->smu_registers->interrupt_mask); 1074 } 1075 1076 /** 1077 * isci_host_completion_routine() - This function is the delayed service 1078 * routine that calls the sci core library's completion handler. It's 1079 * scheduled as a tasklet from the interrupt service routine when interrupts 1080 * in use, or set as the timeout function in polled mode. 1081 * @data: This parameter specifies the ISCI host object 1082 * 1083 */ 1084 static void isci_host_completion_routine(unsigned long data) 1085 { 1086 struct isci_host *ihost = (struct isci_host *)data; 1087 struct list_head completed_request_list; 1088 struct list_head errored_request_list; 1089 struct list_head *current_position; 1090 struct list_head *next_position; 1091 struct isci_request *request; 1092 struct isci_request *next_request; 1093 struct sas_task *task; 1094 1095 INIT_LIST_HEAD(&completed_request_list); 1096 INIT_LIST_HEAD(&errored_request_list); 1097 1098 spin_lock_irq(&ihost->scic_lock); 1099 1100 sci_controller_completion_handler(ihost); 1101 1102 /* Take the lists of completed I/Os from the host. */ 1103 1104 list_splice_init(&ihost->requests_to_complete, 1105 &completed_request_list); 1106 1107 /* Take the list of errored I/Os from the host. */ 1108 list_splice_init(&ihost->requests_to_errorback, 1109 &errored_request_list); 1110 1111 spin_unlock_irq(&ihost->scic_lock); 1112 1113 /* Process any completions in the lists. */ 1114 list_for_each_safe(current_position, next_position, 1115 &completed_request_list) { 1116 1117 request = list_entry(current_position, struct isci_request, 1118 completed_node); 1119 task = isci_request_access_task(request); 1120 1121 /* Normal notification (task_done) */ 1122 dev_dbg(&ihost->pdev->dev, 1123 "%s: Normal - request/task = %p/%p\n", 1124 __func__, 1125 request, 1126 task); 1127 1128 /* Return the task to libsas */ 1129 if (task != NULL) { 1130 1131 task->lldd_task = NULL; 1132 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1133 1134 /* If the task is already in the abort path, 1135 * the task_done callback cannot be called. 1136 */ 1137 task->task_done(task); 1138 } 1139 } 1140 1141 spin_lock_irq(&ihost->scic_lock); 1142 isci_free_tag(ihost, request->io_tag); 1143 spin_unlock_irq(&ihost->scic_lock); 1144 } 1145 list_for_each_entry_safe(request, next_request, &errored_request_list, 1146 completed_node) { 1147 1148 task = isci_request_access_task(request); 1149 1150 /* Use sas_task_abort */ 1151 dev_warn(&ihost->pdev->dev, 1152 "%s: Error - request/task = %p/%p\n", 1153 __func__, 1154 request, 1155 task); 1156 1157 if (task != NULL) { 1158 1159 /* Put the task into the abort path if it's not there 1160 * already. 1161 */ 1162 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) 1163 sas_task_abort(task); 1164 1165 } else { 1166 /* This is a case where the request has completed with a 1167 * status such that it needed further target servicing, 1168 * but the sas_task reference has already been removed 1169 * from the request. Since it was errored, it was not 1170 * being aborted, so there is nothing to do except free 1171 * it. 1172 */ 1173 1174 spin_lock_irq(&ihost->scic_lock); 1175 /* Remove the request from the remote device's list 1176 * of pending requests. 1177 */ 1178 list_del_init(&request->dev_node); 1179 isci_free_tag(ihost, request->io_tag); 1180 spin_unlock_irq(&ihost->scic_lock); 1181 } 1182 } 1183 1184 } 1185 1186 /** 1187 * sci_controller_stop() - This method will stop an individual controller 1188 * object.This method will invoke the associated user callback upon 1189 * completion. The completion callback is called when the following 1190 * conditions are met: -# the method return status is SCI_SUCCESS. -# the 1191 * controller has been quiesced. This method will ensure that all IO 1192 * requests are quiesced, phys are stopped, and all additional operation by 1193 * the hardware is halted. 1194 * @controller: the handle to the controller object to stop. 1195 * @timeout: This parameter specifies the number of milliseconds in which the 1196 * stop operation should complete. 1197 * 1198 * The controller must be in the STARTED or STOPPED state. Indicate if the 1199 * controller stop method succeeded or failed in some way. SCI_SUCCESS if the 1200 * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the 1201 * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the 1202 * controller is not either in the STARTED or STOPPED states. 1203 */ 1204 static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout) 1205 { 1206 if (ihost->sm.current_state_id != SCIC_READY) { 1207 dev_warn(&ihost->pdev->dev, 1208 "SCIC Controller stop operation requested in " 1209 "invalid state\n"); 1210 return SCI_FAILURE_INVALID_STATE; 1211 } 1212 1213 sci_mod_timer(&ihost->timer, timeout); 1214 sci_change_state(&ihost->sm, SCIC_STOPPING); 1215 return SCI_SUCCESS; 1216 } 1217 1218 /** 1219 * sci_controller_reset() - This method will reset the supplied core 1220 * controller regardless of the state of said controller. This operation is 1221 * considered destructive. In other words, all current operations are wiped 1222 * out. No IO completions for outstanding devices occur. Outstanding IO 1223 * requests are not aborted or completed at the actual remote device. 1224 * @controller: the handle to the controller object to reset. 1225 * 1226 * Indicate if the controller reset method succeeded or failed in some way. 1227 * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if 1228 * the controller reset operation is unable to complete. 1229 */ 1230 static enum sci_status sci_controller_reset(struct isci_host *ihost) 1231 { 1232 switch (ihost->sm.current_state_id) { 1233 case SCIC_RESET: 1234 case SCIC_READY: 1235 case SCIC_STOPPED: 1236 case SCIC_FAILED: 1237 /* 1238 * The reset operation is not a graceful cleanup, just 1239 * perform the state transition. 1240 */ 1241 sci_change_state(&ihost->sm, SCIC_RESETTING); 1242 return SCI_SUCCESS; 1243 default: 1244 dev_warn(&ihost->pdev->dev, 1245 "SCIC Controller reset operation requested in " 1246 "invalid state\n"); 1247 return SCI_FAILURE_INVALID_STATE; 1248 } 1249 } 1250 1251 void isci_host_deinit(struct isci_host *ihost) 1252 { 1253 int i; 1254 1255 isci_host_change_state(ihost, isci_stopping); 1256 for (i = 0; i < SCI_MAX_PORTS; i++) { 1257 struct isci_port *iport = &ihost->ports[i]; 1258 struct isci_remote_device *idev, *d; 1259 1260 list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) { 1261 if (test_bit(IDEV_ALLOCATED, &idev->flags)) 1262 isci_remote_device_stop(ihost, idev); 1263 } 1264 } 1265 1266 set_bit(IHOST_STOP_PENDING, &ihost->flags); 1267 1268 spin_lock_irq(&ihost->scic_lock); 1269 sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT); 1270 spin_unlock_irq(&ihost->scic_lock); 1271 1272 wait_for_stop(ihost); 1273 sci_controller_reset(ihost); 1274 1275 /* Cancel any/all outstanding port timers */ 1276 for (i = 0; i < ihost->logical_port_entries; i++) { 1277 struct isci_port *iport = &ihost->ports[i]; 1278 del_timer_sync(&iport->timer.timer); 1279 } 1280 1281 /* Cancel any/all outstanding phy timers */ 1282 for (i = 0; i < SCI_MAX_PHYS; i++) { 1283 struct isci_phy *iphy = &ihost->phys[i]; 1284 del_timer_sync(&iphy->sata_timer.timer); 1285 } 1286 1287 del_timer_sync(&ihost->port_agent.timer.timer); 1288 1289 del_timer_sync(&ihost->power_control.timer.timer); 1290 1291 del_timer_sync(&ihost->timer.timer); 1292 1293 del_timer_sync(&ihost->phy_timer.timer); 1294 } 1295 1296 static void __iomem *scu_base(struct isci_host *isci_host) 1297 { 1298 struct pci_dev *pdev = isci_host->pdev; 1299 int id = isci_host->id; 1300 1301 return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id; 1302 } 1303 1304 static void __iomem *smu_base(struct isci_host *isci_host) 1305 { 1306 struct pci_dev *pdev = isci_host->pdev; 1307 int id = isci_host->id; 1308 1309 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id; 1310 } 1311 1312 static void isci_user_parameters_get(struct sci_user_parameters *u) 1313 { 1314 int i; 1315 1316 for (i = 0; i < SCI_MAX_PHYS; i++) { 1317 struct sci_phy_user_params *u_phy = &u->phys[i]; 1318 1319 u_phy->max_speed_generation = phy_gen; 1320 1321 /* we are not exporting these for now */ 1322 u_phy->align_insertion_frequency = 0x7f; 1323 u_phy->in_connection_align_insertion_frequency = 0xff; 1324 u_phy->notify_enable_spin_up_insertion_frequency = 0x33; 1325 } 1326 1327 u->stp_inactivity_timeout = stp_inactive_to; 1328 u->ssp_inactivity_timeout = ssp_inactive_to; 1329 u->stp_max_occupancy_timeout = stp_max_occ_to; 1330 u->ssp_max_occupancy_timeout = ssp_max_occ_to; 1331 u->no_outbound_task_timeout = no_outbound_task_to; 1332 u->max_number_concurrent_device_spin_up = max_concurr_spinup; 1333 } 1334 1335 static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm) 1336 { 1337 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1338 1339 sci_change_state(&ihost->sm, SCIC_RESET); 1340 } 1341 1342 static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm) 1343 { 1344 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1345 1346 sci_del_timer(&ihost->timer); 1347 } 1348 1349 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853 1350 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280 1351 #define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000 1352 #define INTERRUPT_COALESCE_NUMBER_MAX 256 1353 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7 1354 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28 1355 1356 /** 1357 * sci_controller_set_interrupt_coalescence() - This method allows the user to 1358 * configure the interrupt coalescence. 1359 * @controller: This parameter represents the handle to the controller object 1360 * for which its interrupt coalesce register is overridden. 1361 * @coalesce_number: Used to control the number of entries in the Completion 1362 * Queue before an interrupt is generated. If the number of entries exceed 1363 * this number, an interrupt will be generated. The valid range of the input 1364 * is [0, 256]. A setting of 0 results in coalescing being disabled. 1365 * @coalesce_timeout: Timeout value in microseconds. The valid range of the 1366 * input is [0, 2700000] . A setting of 0 is allowed and results in no 1367 * interrupt coalescing timeout. 1368 * 1369 * Indicate if the user successfully set the interrupt coalesce parameters. 1370 * SCI_SUCCESS The user successfully updated the interrutp coalescence. 1371 * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range. 1372 */ 1373 static enum sci_status 1374 sci_controller_set_interrupt_coalescence(struct isci_host *ihost, 1375 u32 coalesce_number, 1376 u32 coalesce_timeout) 1377 { 1378 u8 timeout_encode = 0; 1379 u32 min = 0; 1380 u32 max = 0; 1381 1382 /* Check if the input parameters fall in the range. */ 1383 if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX) 1384 return SCI_FAILURE_INVALID_PARAMETER_VALUE; 1385 1386 /* 1387 * Defined encoding for interrupt coalescing timeout: 1388 * Value Min Max Units 1389 * ----- --- --- ----- 1390 * 0 - - Disabled 1391 * 1 13.3 20.0 ns 1392 * 2 26.7 40.0 1393 * 3 53.3 80.0 1394 * 4 106.7 160.0 1395 * 5 213.3 320.0 1396 * 6 426.7 640.0 1397 * 7 853.3 1280.0 1398 * 8 1.7 2.6 us 1399 * 9 3.4 5.1 1400 * 10 6.8 10.2 1401 * 11 13.7 20.5 1402 * 12 27.3 41.0 1403 * 13 54.6 81.9 1404 * 14 109.2 163.8 1405 * 15 218.5 327.7 1406 * 16 436.9 655.4 1407 * 17 873.8 1310.7 1408 * 18 1.7 2.6 ms 1409 * 19 3.5 5.2 1410 * 20 7.0 10.5 1411 * 21 14.0 21.0 1412 * 22 28.0 41.9 1413 * 23 55.9 83.9 1414 * 24 111.8 167.8 1415 * 25 223.7 335.5 1416 * 26 447.4 671.1 1417 * 27 894.8 1342.2 1418 * 28 1.8 2.7 s 1419 * Others Undefined */ 1420 1421 /* 1422 * Use the table above to decide the encode of interrupt coalescing timeout 1423 * value for register writing. */ 1424 if (coalesce_timeout == 0) 1425 timeout_encode = 0; 1426 else{ 1427 /* make the timeout value in unit of (10 ns). */ 1428 coalesce_timeout = coalesce_timeout * 100; 1429 min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10; 1430 max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10; 1431 1432 /* get the encode of timeout for register writing. */ 1433 for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN; 1434 timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX; 1435 timeout_encode++) { 1436 if (min <= coalesce_timeout && max > coalesce_timeout) 1437 break; 1438 else if (coalesce_timeout >= max && coalesce_timeout < min * 2 1439 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) { 1440 if ((coalesce_timeout - max) < (2 * min - coalesce_timeout)) 1441 break; 1442 else{ 1443 timeout_encode++; 1444 break; 1445 } 1446 } else { 1447 max = max * 2; 1448 min = min * 2; 1449 } 1450 } 1451 1452 if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1) 1453 /* the value is out of range. */ 1454 return SCI_FAILURE_INVALID_PARAMETER_VALUE; 1455 } 1456 1457 writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) | 1458 SMU_ICC_GEN_VAL(TIMER, timeout_encode), 1459 &ihost->smu_registers->interrupt_coalesce_control); 1460 1461 1462 ihost->interrupt_coalesce_number = (u16)coalesce_number; 1463 ihost->interrupt_coalesce_timeout = coalesce_timeout / 100; 1464 1465 return SCI_SUCCESS; 1466 } 1467 1468 1469 static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm) 1470 { 1471 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1472 1473 /* set the default interrupt coalescence number and timeout value. */ 1474 sci_controller_set_interrupt_coalescence(ihost, 0x10, 250); 1475 } 1476 1477 static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm) 1478 { 1479 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1480 1481 /* disable interrupt coalescence. */ 1482 sci_controller_set_interrupt_coalescence(ihost, 0, 0); 1483 } 1484 1485 static enum sci_status sci_controller_stop_phys(struct isci_host *ihost) 1486 { 1487 u32 index; 1488 enum sci_status status; 1489 enum sci_status phy_status; 1490 1491 status = SCI_SUCCESS; 1492 1493 for (index = 0; index < SCI_MAX_PHYS; index++) { 1494 phy_status = sci_phy_stop(&ihost->phys[index]); 1495 1496 if (phy_status != SCI_SUCCESS && 1497 phy_status != SCI_FAILURE_INVALID_STATE) { 1498 status = SCI_FAILURE; 1499 1500 dev_warn(&ihost->pdev->dev, 1501 "%s: Controller stop operation failed to stop " 1502 "phy %d because of status %d.\n", 1503 __func__, 1504 ihost->phys[index].phy_index, phy_status); 1505 } 1506 } 1507 1508 return status; 1509 } 1510 1511 static enum sci_status sci_controller_stop_ports(struct isci_host *ihost) 1512 { 1513 u32 index; 1514 enum sci_status port_status; 1515 enum sci_status status = SCI_SUCCESS; 1516 1517 for (index = 0; index < ihost->logical_port_entries; index++) { 1518 struct isci_port *iport = &ihost->ports[index]; 1519 1520 port_status = sci_port_stop(iport); 1521 1522 if ((port_status != SCI_SUCCESS) && 1523 (port_status != SCI_FAILURE_INVALID_STATE)) { 1524 status = SCI_FAILURE; 1525 1526 dev_warn(&ihost->pdev->dev, 1527 "%s: Controller stop operation failed to " 1528 "stop port %d because of status %d.\n", 1529 __func__, 1530 iport->logical_port_index, 1531 port_status); 1532 } 1533 } 1534 1535 return status; 1536 } 1537 1538 static enum sci_status sci_controller_stop_devices(struct isci_host *ihost) 1539 { 1540 u32 index; 1541 enum sci_status status; 1542 enum sci_status device_status; 1543 1544 status = SCI_SUCCESS; 1545 1546 for (index = 0; index < ihost->remote_node_entries; index++) { 1547 if (ihost->device_table[index] != NULL) { 1548 /* / @todo What timeout value do we want to provide to this request? */ 1549 device_status = sci_remote_device_stop(ihost->device_table[index], 0); 1550 1551 if ((device_status != SCI_SUCCESS) && 1552 (device_status != SCI_FAILURE_INVALID_STATE)) { 1553 dev_warn(&ihost->pdev->dev, 1554 "%s: Controller stop operation failed " 1555 "to stop device 0x%p because of " 1556 "status %d.\n", 1557 __func__, 1558 ihost->device_table[index], device_status); 1559 } 1560 } 1561 } 1562 1563 return status; 1564 } 1565 1566 static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm) 1567 { 1568 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1569 1570 /* Stop all of the components for this controller */ 1571 sci_controller_stop_phys(ihost); 1572 sci_controller_stop_ports(ihost); 1573 sci_controller_stop_devices(ihost); 1574 } 1575 1576 static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm) 1577 { 1578 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1579 1580 sci_del_timer(&ihost->timer); 1581 } 1582 1583 static void sci_controller_reset_hardware(struct isci_host *ihost) 1584 { 1585 /* Disable interrupts so we dont take any spurious interrupts */ 1586 sci_controller_disable_interrupts(ihost); 1587 1588 /* Reset the SCU */ 1589 writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control); 1590 1591 /* Delay for 1ms to before clearing the CQP and UFQPR. */ 1592 udelay(1000); 1593 1594 /* The write to the CQGR clears the CQP */ 1595 writel(0x00000000, &ihost->smu_registers->completion_queue_get); 1596 1597 /* The write to the UFQGP clears the UFQPR */ 1598 writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); 1599 } 1600 1601 static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm) 1602 { 1603 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1604 1605 sci_controller_reset_hardware(ihost); 1606 sci_change_state(&ihost->sm, SCIC_RESET); 1607 } 1608 1609 static const struct sci_base_state sci_controller_state_table[] = { 1610 [SCIC_INITIAL] = { 1611 .enter_state = sci_controller_initial_state_enter, 1612 }, 1613 [SCIC_RESET] = {}, 1614 [SCIC_INITIALIZING] = {}, 1615 [SCIC_INITIALIZED] = {}, 1616 [SCIC_STARTING] = { 1617 .exit_state = sci_controller_starting_state_exit, 1618 }, 1619 [SCIC_READY] = { 1620 .enter_state = sci_controller_ready_state_enter, 1621 .exit_state = sci_controller_ready_state_exit, 1622 }, 1623 [SCIC_RESETTING] = { 1624 .enter_state = sci_controller_resetting_state_enter, 1625 }, 1626 [SCIC_STOPPING] = { 1627 .enter_state = sci_controller_stopping_state_enter, 1628 .exit_state = sci_controller_stopping_state_exit, 1629 }, 1630 [SCIC_STOPPED] = {}, 1631 [SCIC_FAILED] = {} 1632 }; 1633 1634 static void sci_controller_set_default_config_parameters(struct isci_host *ihost) 1635 { 1636 /* these defaults are overridden by the platform / firmware */ 1637 u16 index; 1638 1639 /* Default to APC mode. */ 1640 ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; 1641 1642 /* Default to APC mode. */ 1643 ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1; 1644 1645 /* Default to no SSC operation. */ 1646 ihost->oem_parameters.controller.do_enable_ssc = false; 1647 1648 /* Initialize all of the port parameter information to narrow ports. */ 1649 for (index = 0; index < SCI_MAX_PORTS; index++) { 1650 ihost->oem_parameters.ports[index].phy_mask = 0; 1651 } 1652 1653 /* Initialize all of the phy parameter information. */ 1654 for (index = 0; index < SCI_MAX_PHYS; index++) { 1655 /* Default to 6G (i.e. Gen 3) for now. */ 1656 ihost->user_parameters.phys[index].max_speed_generation = 3; 1657 1658 /* the frequencies cannot be 0 */ 1659 ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f; 1660 ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff; 1661 ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33; 1662 1663 /* 1664 * Previous Vitesse based expanders had a arbitration issue that 1665 * is worked around by having the upper 32-bits of SAS address 1666 * with a value greater then the Vitesse company identifier. 1667 * Hence, usage of 0x5FCFFFFF. */ 1668 ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id; 1669 ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF; 1670 } 1671 1672 ihost->user_parameters.stp_inactivity_timeout = 5; 1673 ihost->user_parameters.ssp_inactivity_timeout = 5; 1674 ihost->user_parameters.stp_max_occupancy_timeout = 5; 1675 ihost->user_parameters.ssp_max_occupancy_timeout = 20; 1676 ihost->user_parameters.no_outbound_task_timeout = 20; 1677 } 1678 1679 static void controller_timeout(unsigned long data) 1680 { 1681 struct sci_timer *tmr = (struct sci_timer *)data; 1682 struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer); 1683 struct sci_base_state_machine *sm = &ihost->sm; 1684 unsigned long flags; 1685 1686 spin_lock_irqsave(&ihost->scic_lock, flags); 1687 1688 if (tmr->cancel) 1689 goto done; 1690 1691 if (sm->current_state_id == SCIC_STARTING) 1692 sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); 1693 else if (sm->current_state_id == SCIC_STOPPING) { 1694 sci_change_state(sm, SCIC_FAILED); 1695 isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT); 1696 } else /* / @todo Now what do we want to do in this case? */ 1697 dev_err(&ihost->pdev->dev, 1698 "%s: Controller timer fired when controller was not " 1699 "in a state being timed.\n", 1700 __func__); 1701 1702 done: 1703 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1704 } 1705 1706 static enum sci_status sci_controller_construct(struct isci_host *ihost, 1707 void __iomem *scu_base, 1708 void __iomem *smu_base) 1709 { 1710 u8 i; 1711 1712 sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL); 1713 1714 ihost->scu_registers = scu_base; 1715 ihost->smu_registers = smu_base; 1716 1717 sci_port_configuration_agent_construct(&ihost->port_agent); 1718 1719 /* Construct the ports for this controller */ 1720 for (i = 0; i < SCI_MAX_PORTS; i++) 1721 sci_port_construct(&ihost->ports[i], i, ihost); 1722 sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost); 1723 1724 /* Construct the phys for this controller */ 1725 for (i = 0; i < SCI_MAX_PHYS; i++) { 1726 /* Add all the PHYs to the dummy port */ 1727 sci_phy_construct(&ihost->phys[i], 1728 &ihost->ports[SCI_MAX_PORTS], i); 1729 } 1730 1731 ihost->invalid_phy_mask = 0; 1732 1733 sci_init_timer(&ihost->timer, controller_timeout); 1734 1735 /* Initialize the User and OEM parameters to default values. */ 1736 sci_controller_set_default_config_parameters(ihost); 1737 1738 return sci_controller_reset(ihost); 1739 } 1740 1741 int sci_oem_parameters_validate(struct sci_oem_params *oem) 1742 { 1743 int i; 1744 1745 for (i = 0; i < SCI_MAX_PORTS; i++) 1746 if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX) 1747 return -EINVAL; 1748 1749 for (i = 0; i < SCI_MAX_PHYS; i++) 1750 if (oem->phys[i].sas_address.high == 0 && 1751 oem->phys[i].sas_address.low == 0) 1752 return -EINVAL; 1753 1754 if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) { 1755 for (i = 0; i < SCI_MAX_PHYS; i++) 1756 if (oem->ports[i].phy_mask != 0) 1757 return -EINVAL; 1758 } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { 1759 u8 phy_mask = 0; 1760 1761 for (i = 0; i < SCI_MAX_PHYS; i++) 1762 phy_mask |= oem->ports[i].phy_mask; 1763 1764 if (phy_mask == 0) 1765 return -EINVAL; 1766 } else 1767 return -EINVAL; 1768 1769 if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT) 1770 return -EINVAL; 1771 1772 return 0; 1773 } 1774 1775 static enum sci_status sci_oem_parameters_set(struct isci_host *ihost) 1776 { 1777 u32 state = ihost->sm.current_state_id; 1778 1779 if (state == SCIC_RESET || 1780 state == SCIC_INITIALIZING || 1781 state == SCIC_INITIALIZED) { 1782 1783 if (sci_oem_parameters_validate(&ihost->oem_parameters)) 1784 return SCI_FAILURE_INVALID_PARAMETER_VALUE; 1785 1786 return SCI_SUCCESS; 1787 } 1788 1789 return SCI_FAILURE_INVALID_STATE; 1790 } 1791 1792 static void power_control_timeout(unsigned long data) 1793 { 1794 struct sci_timer *tmr = (struct sci_timer *)data; 1795 struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer); 1796 struct isci_phy *iphy; 1797 unsigned long flags; 1798 u8 i; 1799 1800 spin_lock_irqsave(&ihost->scic_lock, flags); 1801 1802 if (tmr->cancel) 1803 goto done; 1804 1805 ihost->power_control.phys_granted_power = 0; 1806 1807 if (ihost->power_control.phys_waiting == 0) { 1808 ihost->power_control.timer_started = false; 1809 goto done; 1810 } 1811 1812 for (i = 0; i < SCI_MAX_PHYS; i++) { 1813 1814 if (ihost->power_control.phys_waiting == 0) 1815 break; 1816 1817 iphy = ihost->power_control.requesters[i]; 1818 if (iphy == NULL) 1819 continue; 1820 1821 if (ihost->power_control.phys_granted_power >= 1822 ihost->oem_parameters.controller.max_concurrent_dev_spin_up) 1823 break; 1824 1825 ihost->power_control.requesters[i] = NULL; 1826 ihost->power_control.phys_waiting--; 1827 ihost->power_control.phys_granted_power++; 1828 sci_phy_consume_power_handler(iphy); 1829 } 1830 1831 /* 1832 * It doesn't matter if the power list is empty, we need to start the 1833 * timer in case another phy becomes ready. 1834 */ 1835 sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); 1836 ihost->power_control.timer_started = true; 1837 1838 done: 1839 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1840 } 1841 1842 void sci_controller_power_control_queue_insert(struct isci_host *ihost, 1843 struct isci_phy *iphy) 1844 { 1845 BUG_ON(iphy == NULL); 1846 1847 if (ihost->power_control.phys_granted_power < 1848 ihost->oem_parameters.controller.max_concurrent_dev_spin_up) { 1849 ihost->power_control.phys_granted_power++; 1850 sci_phy_consume_power_handler(iphy); 1851 1852 /* 1853 * stop and start the power_control timer. When the timer fires, the 1854 * no_of_phys_granted_power will be set to 0 1855 */ 1856 if (ihost->power_control.timer_started) 1857 sci_del_timer(&ihost->power_control.timer); 1858 1859 sci_mod_timer(&ihost->power_control.timer, 1860 SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); 1861 ihost->power_control.timer_started = true; 1862 1863 } else { 1864 /* Add the phy in the waiting list */ 1865 ihost->power_control.requesters[iphy->phy_index] = iphy; 1866 ihost->power_control.phys_waiting++; 1867 } 1868 } 1869 1870 void sci_controller_power_control_queue_remove(struct isci_host *ihost, 1871 struct isci_phy *iphy) 1872 { 1873 BUG_ON(iphy == NULL); 1874 1875 if (ihost->power_control.requesters[iphy->phy_index]) 1876 ihost->power_control.phys_waiting--; 1877 1878 ihost->power_control.requesters[iphy->phy_index] = NULL; 1879 } 1880 1881 #define AFE_REGISTER_WRITE_DELAY 10 1882 1883 /* Initialize the AFE for this phy index. We need to read the AFE setup from 1884 * the OEM parameters 1885 */ 1886 static void sci_controller_afe_initialization(struct isci_host *ihost) 1887 { 1888 const struct sci_oem_params *oem = &ihost->oem_parameters; 1889 struct pci_dev *pdev = ihost->pdev; 1890 u32 afe_status; 1891 u32 phy_id; 1892 1893 /* Clear DFX Status registers */ 1894 writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0); 1895 udelay(AFE_REGISTER_WRITE_DELAY); 1896 1897 if (is_b0(pdev)) { 1898 /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement 1899 * Timer, PM Stagger Timer */ 1900 writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2); 1901 udelay(AFE_REGISTER_WRITE_DELAY); 1902 } 1903 1904 /* Configure bias currents to normal */ 1905 if (is_a2(pdev)) 1906 writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control); 1907 else if (is_b0(pdev) || is_c0(pdev)) 1908 writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control); 1909 1910 udelay(AFE_REGISTER_WRITE_DELAY); 1911 1912 /* Enable PLL */ 1913 if (is_b0(pdev) || is_c0(pdev)) 1914 writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0); 1915 else 1916 writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0); 1917 1918 udelay(AFE_REGISTER_WRITE_DELAY); 1919 1920 /* Wait for the PLL to lock */ 1921 do { 1922 afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status); 1923 udelay(AFE_REGISTER_WRITE_DELAY); 1924 } while ((afe_status & 0x00001000) == 0); 1925 1926 if (is_a2(pdev)) { 1927 /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */ 1928 writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0); 1929 udelay(AFE_REGISTER_WRITE_DELAY); 1930 } 1931 1932 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) { 1933 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id]; 1934 1935 if (is_b0(pdev)) { 1936 /* Configure transmitter SSC parameters */ 1937 writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); 1938 udelay(AFE_REGISTER_WRITE_DELAY); 1939 } else if (is_c0(pdev)) { 1940 /* Configure transmitter SSC parameters */ 1941 writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); 1942 udelay(AFE_REGISTER_WRITE_DELAY); 1943 1944 /* 1945 * All defaults, except the Receive Word Alignament/Comma Detect 1946 * Enable....(0xe800) */ 1947 writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); 1948 udelay(AFE_REGISTER_WRITE_DELAY); 1949 } else { 1950 /* 1951 * All defaults, except the Receive Word Alignament/Comma Detect 1952 * Enable....(0xe800) */ 1953 writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); 1954 udelay(AFE_REGISTER_WRITE_DELAY); 1955 1956 writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1); 1957 udelay(AFE_REGISTER_WRITE_DELAY); 1958 } 1959 1960 /* 1961 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) 1962 * & increase TX int & ext bias 20%....(0xe85c) */ 1963 if (is_a2(pdev)) 1964 writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); 1965 else if (is_b0(pdev)) { 1966 /* Power down TX and RX (PWRDNTX and PWRDNRX) */ 1967 writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); 1968 udelay(AFE_REGISTER_WRITE_DELAY); 1969 1970 /* 1971 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) 1972 * & increase TX int & ext bias 20%....(0xe85c) */ 1973 writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); 1974 } else { 1975 writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); 1976 udelay(AFE_REGISTER_WRITE_DELAY); 1977 1978 /* 1979 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) 1980 * & increase TX int & ext bias 20%....(0xe85c) */ 1981 writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); 1982 } 1983 udelay(AFE_REGISTER_WRITE_DELAY); 1984 1985 if (is_a2(pdev)) { 1986 /* Enable TX equalization (0xe824) */ 1987 writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); 1988 udelay(AFE_REGISTER_WRITE_DELAY); 1989 } 1990 1991 /* 1992 * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On), 1993 * RDD=0x0(RX Detect Enabled) ....(0xe800) */ 1994 writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); 1995 udelay(AFE_REGISTER_WRITE_DELAY); 1996 1997 /* Leave DFE/FFE on */ 1998 if (is_a2(pdev)) 1999 writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); 2000 else if (is_b0(pdev)) { 2001 writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); 2002 udelay(AFE_REGISTER_WRITE_DELAY); 2003 /* Enable TX equalization (0xe824) */ 2004 writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); 2005 } else { 2006 writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1); 2007 udelay(AFE_REGISTER_WRITE_DELAY); 2008 2009 writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); 2010 udelay(AFE_REGISTER_WRITE_DELAY); 2011 2012 /* Enable TX equalization (0xe824) */ 2013 writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); 2014 } 2015 2016 udelay(AFE_REGISTER_WRITE_DELAY); 2017 2018 writel(oem_phy->afe_tx_amp_control0, 2019 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0); 2020 udelay(AFE_REGISTER_WRITE_DELAY); 2021 2022 writel(oem_phy->afe_tx_amp_control1, 2023 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1); 2024 udelay(AFE_REGISTER_WRITE_DELAY); 2025 2026 writel(oem_phy->afe_tx_amp_control2, 2027 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2); 2028 udelay(AFE_REGISTER_WRITE_DELAY); 2029 2030 writel(oem_phy->afe_tx_amp_control3, 2031 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3); 2032 udelay(AFE_REGISTER_WRITE_DELAY); 2033 } 2034 2035 /* Transfer control to the PEs */ 2036 writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0); 2037 udelay(AFE_REGISTER_WRITE_DELAY); 2038 } 2039 2040 static void sci_controller_initialize_power_control(struct isci_host *ihost) 2041 { 2042 sci_init_timer(&ihost->power_control.timer, power_control_timeout); 2043 2044 memset(ihost->power_control.requesters, 0, 2045 sizeof(ihost->power_control.requesters)); 2046 2047 ihost->power_control.phys_waiting = 0; 2048 ihost->power_control.phys_granted_power = 0; 2049 } 2050 2051 static enum sci_status sci_controller_initialize(struct isci_host *ihost) 2052 { 2053 struct sci_base_state_machine *sm = &ihost->sm; 2054 enum sci_status result = SCI_FAILURE; 2055 unsigned long i, state, val; 2056 2057 if (ihost->sm.current_state_id != SCIC_RESET) { 2058 dev_warn(&ihost->pdev->dev, 2059 "SCIC Controller initialize operation requested " 2060 "in invalid state\n"); 2061 return SCI_FAILURE_INVALID_STATE; 2062 } 2063 2064 sci_change_state(sm, SCIC_INITIALIZING); 2065 2066 sci_init_timer(&ihost->phy_timer, phy_startup_timeout); 2067 2068 ihost->next_phy_to_start = 0; 2069 ihost->phy_startup_timer_pending = false; 2070 2071 sci_controller_initialize_power_control(ihost); 2072 2073 /* 2074 * There is nothing to do here for B0 since we do not have to 2075 * program the AFE registers. 2076 * / @todo The AFE settings are supposed to be correct for the B0 but 2077 * / presently they seem to be wrong. */ 2078 sci_controller_afe_initialization(ihost); 2079 2080 2081 /* Take the hardware out of reset */ 2082 writel(0, &ihost->smu_registers->soft_reset_control); 2083 2084 /* 2085 * / @todo Provide meaningfull error code for hardware failure 2086 * result = SCI_FAILURE_CONTROLLER_HARDWARE; */ 2087 for (i = 100; i >= 1; i--) { 2088 u32 status; 2089 2090 /* Loop until the hardware reports success */ 2091 udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME); 2092 status = readl(&ihost->smu_registers->control_status); 2093 2094 if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED) 2095 break; 2096 } 2097 if (i == 0) 2098 goto out; 2099 2100 /* 2101 * Determine what are the actaul device capacities that the 2102 * hardware will support */ 2103 val = readl(&ihost->smu_registers->device_context_capacity); 2104 2105 /* Record the smaller of the two capacity values */ 2106 ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS); 2107 ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS); 2108 ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES); 2109 2110 /* 2111 * Make all PEs that are unassigned match up with the 2112 * logical ports 2113 */ 2114 for (i = 0; i < ihost->logical_port_entries; i++) { 2115 struct scu_port_task_scheduler_group_registers __iomem 2116 *ptsg = &ihost->scu_registers->peg0.ptsg; 2117 2118 writel(i, &ptsg->protocol_engine[i]); 2119 } 2120 2121 /* Initialize hardware PCI Relaxed ordering in DMA engines */ 2122 val = readl(&ihost->scu_registers->sdma.pdma_configuration); 2123 val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); 2124 writel(val, &ihost->scu_registers->sdma.pdma_configuration); 2125 2126 val = readl(&ihost->scu_registers->sdma.cdma_configuration); 2127 val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); 2128 writel(val, &ihost->scu_registers->sdma.cdma_configuration); 2129 2130 /* 2131 * Initialize the PHYs before the PORTs because the PHY registers 2132 * are accessed during the port initialization. 2133 */ 2134 for (i = 0; i < SCI_MAX_PHYS; i++) { 2135 result = sci_phy_initialize(&ihost->phys[i], 2136 &ihost->scu_registers->peg0.pe[i].tl, 2137 &ihost->scu_registers->peg0.pe[i].ll); 2138 if (result != SCI_SUCCESS) 2139 goto out; 2140 } 2141 2142 for (i = 0; i < ihost->logical_port_entries; i++) { 2143 struct isci_port *iport = &ihost->ports[i]; 2144 2145 iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i]; 2146 iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0]; 2147 iport->viit_registers = &ihost->scu_registers->peg0.viit[i]; 2148 } 2149 2150 result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent); 2151 2152 out: 2153 /* Advance the controller state machine */ 2154 if (result == SCI_SUCCESS) 2155 state = SCIC_INITIALIZED; 2156 else 2157 state = SCIC_FAILED; 2158 sci_change_state(sm, state); 2159 2160 return result; 2161 } 2162 2163 static enum sci_status sci_user_parameters_set(struct isci_host *ihost, 2164 struct sci_user_parameters *sci_parms) 2165 { 2166 u32 state = ihost->sm.current_state_id; 2167 2168 if (state == SCIC_RESET || 2169 state == SCIC_INITIALIZING || 2170 state == SCIC_INITIALIZED) { 2171 u16 index; 2172 2173 /* 2174 * Validate the user parameters. If they are not legal, then 2175 * return a failure. 2176 */ 2177 for (index = 0; index < SCI_MAX_PHYS; index++) { 2178 struct sci_phy_user_params *user_phy; 2179 2180 user_phy = &sci_parms->phys[index]; 2181 2182 if (!((user_phy->max_speed_generation <= 2183 SCIC_SDS_PARM_MAX_SPEED) && 2184 (user_phy->max_speed_generation > 2185 SCIC_SDS_PARM_NO_SPEED))) 2186 return SCI_FAILURE_INVALID_PARAMETER_VALUE; 2187 2188 if (user_phy->in_connection_align_insertion_frequency < 2189 3) 2190 return SCI_FAILURE_INVALID_PARAMETER_VALUE; 2191 2192 if ((user_phy->in_connection_align_insertion_frequency < 2193 3) || 2194 (user_phy->align_insertion_frequency == 0) || 2195 (user_phy-> 2196 notify_enable_spin_up_insertion_frequency == 2197 0)) 2198 return SCI_FAILURE_INVALID_PARAMETER_VALUE; 2199 } 2200 2201 if ((sci_parms->stp_inactivity_timeout == 0) || 2202 (sci_parms->ssp_inactivity_timeout == 0) || 2203 (sci_parms->stp_max_occupancy_timeout == 0) || 2204 (sci_parms->ssp_max_occupancy_timeout == 0) || 2205 (sci_parms->no_outbound_task_timeout == 0)) 2206 return SCI_FAILURE_INVALID_PARAMETER_VALUE; 2207 2208 memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms)); 2209 2210 return SCI_SUCCESS; 2211 } 2212 2213 return SCI_FAILURE_INVALID_STATE; 2214 } 2215 2216 static int sci_controller_mem_init(struct isci_host *ihost) 2217 { 2218 struct device *dev = &ihost->pdev->dev; 2219 dma_addr_t dma; 2220 size_t size; 2221 int err; 2222 2223 size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32); 2224 ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); 2225 if (!ihost->completion_queue) 2226 return -ENOMEM; 2227 2228 writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower); 2229 writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper); 2230 2231 size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); 2232 ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma, 2233 GFP_KERNEL); 2234 if (!ihost->remote_node_context_table) 2235 return -ENOMEM; 2236 2237 writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower); 2238 writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper); 2239 2240 size = ihost->task_context_entries * sizeof(struct scu_task_context), 2241 ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); 2242 if (!ihost->task_context_table) 2243 return -ENOMEM; 2244 2245 ihost->task_context_dma = dma; 2246 writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower); 2247 writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper); 2248 2249 err = sci_unsolicited_frame_control_construct(ihost); 2250 if (err) 2251 return err; 2252 2253 /* 2254 * Inform the silicon as to the location of the UF headers and 2255 * address table. 2256 */ 2257 writel(lower_32_bits(ihost->uf_control.headers.physical_address), 2258 &ihost->scu_registers->sdma.uf_header_base_address_lower); 2259 writel(upper_32_bits(ihost->uf_control.headers.physical_address), 2260 &ihost->scu_registers->sdma.uf_header_base_address_upper); 2261 2262 writel(lower_32_bits(ihost->uf_control.address_table.physical_address), 2263 &ihost->scu_registers->sdma.uf_address_table_lower); 2264 writel(upper_32_bits(ihost->uf_control.address_table.physical_address), 2265 &ihost->scu_registers->sdma.uf_address_table_upper); 2266 2267 return 0; 2268 } 2269 2270 int isci_host_init(struct isci_host *ihost) 2271 { 2272 int err = 0, i; 2273 enum sci_status status; 2274 struct sci_user_parameters sci_user_params; 2275 struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); 2276 2277 spin_lock_init(&ihost->state_lock); 2278 spin_lock_init(&ihost->scic_lock); 2279 init_waitqueue_head(&ihost->eventq); 2280 2281 isci_host_change_state(ihost, isci_starting); 2282 2283 status = sci_controller_construct(ihost, scu_base(ihost), 2284 smu_base(ihost)); 2285 2286 if (status != SCI_SUCCESS) { 2287 dev_err(&ihost->pdev->dev, 2288 "%s: sci_controller_construct failed - status = %x\n", 2289 __func__, 2290 status); 2291 return -ENODEV; 2292 } 2293 2294 ihost->sas_ha.dev = &ihost->pdev->dev; 2295 ihost->sas_ha.lldd_ha = ihost; 2296 2297 /* 2298 * grab initial values stored in the controller object for OEM and USER 2299 * parameters 2300 */ 2301 isci_user_parameters_get(&sci_user_params); 2302 status = sci_user_parameters_set(ihost, &sci_user_params); 2303 if (status != SCI_SUCCESS) { 2304 dev_warn(&ihost->pdev->dev, 2305 "%s: sci_user_parameters_set failed\n", 2306 __func__); 2307 return -ENODEV; 2308 } 2309 2310 /* grab any OEM parameters specified in orom */ 2311 if (pci_info->orom) { 2312 status = isci_parse_oem_parameters(&ihost->oem_parameters, 2313 pci_info->orom, 2314 ihost->id); 2315 if (status != SCI_SUCCESS) { 2316 dev_warn(&ihost->pdev->dev, 2317 "parsing firmware oem parameters failed\n"); 2318 return -EINVAL; 2319 } 2320 } 2321 2322 status = sci_oem_parameters_set(ihost); 2323 if (status != SCI_SUCCESS) { 2324 dev_warn(&ihost->pdev->dev, 2325 "%s: sci_oem_parameters_set failed\n", 2326 __func__); 2327 return -ENODEV; 2328 } 2329 2330 tasklet_init(&ihost->completion_tasklet, 2331 isci_host_completion_routine, (unsigned long)ihost); 2332 2333 INIT_LIST_HEAD(&ihost->requests_to_complete); 2334 INIT_LIST_HEAD(&ihost->requests_to_errorback); 2335 2336 spin_lock_irq(&ihost->scic_lock); 2337 status = sci_controller_initialize(ihost); 2338 spin_unlock_irq(&ihost->scic_lock); 2339 if (status != SCI_SUCCESS) { 2340 dev_warn(&ihost->pdev->dev, 2341 "%s: sci_controller_initialize failed -" 2342 " status = 0x%x\n", 2343 __func__, status); 2344 return -ENODEV; 2345 } 2346 2347 err = sci_controller_mem_init(ihost); 2348 if (err) 2349 return err; 2350 2351 for (i = 0; i < SCI_MAX_PORTS; i++) 2352 isci_port_init(&ihost->ports[i], ihost, i); 2353 2354 for (i = 0; i < SCI_MAX_PHYS; i++) 2355 isci_phy_init(&ihost->phys[i], ihost, i); 2356 2357 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { 2358 struct isci_remote_device *idev = &ihost->devices[i]; 2359 2360 INIT_LIST_HEAD(&idev->reqs_in_process); 2361 INIT_LIST_HEAD(&idev->node); 2362 } 2363 2364 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { 2365 struct isci_request *ireq; 2366 dma_addr_t dma; 2367 2368 ireq = dmam_alloc_coherent(&ihost->pdev->dev, 2369 sizeof(struct isci_request), &dma, 2370 GFP_KERNEL); 2371 if (!ireq) 2372 return -ENOMEM; 2373 2374 ireq->tc = &ihost->task_context_table[i]; 2375 ireq->owning_controller = ihost; 2376 spin_lock_init(&ireq->state_lock); 2377 ireq->request_daddr = dma; 2378 ireq->isci_host = ihost; 2379 ihost->reqs[i] = ireq; 2380 } 2381 2382 return 0; 2383 } 2384 2385 void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport, 2386 struct isci_phy *iphy) 2387 { 2388 switch (ihost->sm.current_state_id) { 2389 case SCIC_STARTING: 2390 sci_del_timer(&ihost->phy_timer); 2391 ihost->phy_startup_timer_pending = false; 2392 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, 2393 iport, iphy); 2394 sci_controller_start_next_phy(ihost); 2395 break; 2396 case SCIC_READY: 2397 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, 2398 iport, iphy); 2399 break; 2400 default: 2401 dev_dbg(&ihost->pdev->dev, 2402 "%s: SCIC Controller linkup event from phy %d in " 2403 "unexpected state %d\n", __func__, iphy->phy_index, 2404 ihost->sm.current_state_id); 2405 } 2406 } 2407 2408 void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport, 2409 struct isci_phy *iphy) 2410 { 2411 switch (ihost->sm.current_state_id) { 2412 case SCIC_STARTING: 2413 case SCIC_READY: 2414 ihost->port_agent.link_down_handler(ihost, &ihost->port_agent, 2415 iport, iphy); 2416 break; 2417 default: 2418 dev_dbg(&ihost->pdev->dev, 2419 "%s: SCIC Controller linkdown event from phy %d in " 2420 "unexpected state %d\n", 2421 __func__, 2422 iphy->phy_index, 2423 ihost->sm.current_state_id); 2424 } 2425 } 2426 2427 static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost) 2428 { 2429 u32 index; 2430 2431 for (index = 0; index < ihost->remote_node_entries; index++) { 2432 if ((ihost->device_table[index] != NULL) && 2433 (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING)) 2434 return true; 2435 } 2436 2437 return false; 2438 } 2439 2440 void sci_controller_remote_device_stopped(struct isci_host *ihost, 2441 struct isci_remote_device *idev) 2442 { 2443 if (ihost->sm.current_state_id != SCIC_STOPPING) { 2444 dev_dbg(&ihost->pdev->dev, 2445 "SCIC Controller 0x%p remote device stopped event " 2446 "from device 0x%p in unexpected state %d\n", 2447 ihost, idev, 2448 ihost->sm.current_state_id); 2449 return; 2450 } 2451 2452 if (!sci_controller_has_remote_devices_stopping(ihost)) 2453 sci_change_state(&ihost->sm, SCIC_STOPPED); 2454 } 2455 2456 void sci_controller_post_request(struct isci_host *ihost, u32 request) 2457 { 2458 dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n", 2459 __func__, ihost->id, request); 2460 2461 writel(request, &ihost->smu_registers->post_context_port); 2462 } 2463 2464 struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag) 2465 { 2466 u16 task_index; 2467 u16 task_sequence; 2468 2469 task_index = ISCI_TAG_TCI(io_tag); 2470 2471 if (task_index < ihost->task_context_entries) { 2472 struct isci_request *ireq = ihost->reqs[task_index]; 2473 2474 if (test_bit(IREQ_ACTIVE, &ireq->flags)) { 2475 task_sequence = ISCI_TAG_SEQ(io_tag); 2476 2477 if (task_sequence == ihost->io_request_sequence[task_index]) 2478 return ireq; 2479 } 2480 } 2481 2482 return NULL; 2483 } 2484 2485 /** 2486 * This method allocates remote node index and the reserves the remote node 2487 * context space for use. This method can fail if there are no more remote 2488 * node index available. 2489 * @scic: This is the controller object which contains the set of 2490 * free remote node ids 2491 * @sci_dev: This is the device object which is requesting the a remote node 2492 * id 2493 * @node_id: This is the remote node id that is assinged to the device if one 2494 * is available 2495 * 2496 * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote 2497 * node index available. 2498 */ 2499 enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost, 2500 struct isci_remote_device *idev, 2501 u16 *node_id) 2502 { 2503 u16 node_index; 2504 u32 remote_node_count = sci_remote_device_node_count(idev); 2505 2506 node_index = sci_remote_node_table_allocate_remote_node( 2507 &ihost->available_remote_nodes, remote_node_count 2508 ); 2509 2510 if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { 2511 ihost->device_table[node_index] = idev; 2512 2513 *node_id = node_index; 2514 2515 return SCI_SUCCESS; 2516 } 2517 2518 return SCI_FAILURE_INSUFFICIENT_RESOURCES; 2519 } 2520 2521 void sci_controller_free_remote_node_context(struct isci_host *ihost, 2522 struct isci_remote_device *idev, 2523 u16 node_id) 2524 { 2525 u32 remote_node_count = sci_remote_device_node_count(idev); 2526 2527 if (ihost->device_table[node_id] == idev) { 2528 ihost->device_table[node_id] = NULL; 2529 2530 sci_remote_node_table_release_remote_node_index( 2531 &ihost->available_remote_nodes, remote_node_count, node_id 2532 ); 2533 } 2534 } 2535 2536 void sci_controller_copy_sata_response(void *response_buffer, 2537 void *frame_header, 2538 void *frame_buffer) 2539 { 2540 /* XXX type safety? */ 2541 memcpy(response_buffer, frame_header, sizeof(u32)); 2542 2543 memcpy(response_buffer + sizeof(u32), 2544 frame_buffer, 2545 sizeof(struct dev_to_host_fis) - sizeof(u32)); 2546 } 2547 2548 void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index) 2549 { 2550 if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index)) 2551 writel(ihost->uf_control.get, 2552 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); 2553 } 2554 2555 void isci_tci_free(struct isci_host *ihost, u16 tci) 2556 { 2557 u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1); 2558 2559 ihost->tci_pool[tail] = tci; 2560 ihost->tci_tail = tail + 1; 2561 } 2562 2563 static u16 isci_tci_alloc(struct isci_host *ihost) 2564 { 2565 u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1); 2566 u16 tci = ihost->tci_pool[head]; 2567 2568 ihost->tci_head = head + 1; 2569 return tci; 2570 } 2571 2572 static u16 isci_tci_space(struct isci_host *ihost) 2573 { 2574 return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); 2575 } 2576 2577 u16 isci_alloc_tag(struct isci_host *ihost) 2578 { 2579 if (isci_tci_space(ihost)) { 2580 u16 tci = isci_tci_alloc(ihost); 2581 u8 seq = ihost->io_request_sequence[tci]; 2582 2583 return ISCI_TAG(seq, tci); 2584 } 2585 2586 return SCI_CONTROLLER_INVALID_IO_TAG; 2587 } 2588 2589 enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag) 2590 { 2591 u16 tci = ISCI_TAG_TCI(io_tag); 2592 u16 seq = ISCI_TAG_SEQ(io_tag); 2593 2594 /* prevent tail from passing head */ 2595 if (isci_tci_active(ihost) == 0) 2596 return SCI_FAILURE_INVALID_IO_TAG; 2597 2598 if (seq == ihost->io_request_sequence[tci]) { 2599 ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1); 2600 2601 isci_tci_free(ihost, tci); 2602 2603 return SCI_SUCCESS; 2604 } 2605 return SCI_FAILURE_INVALID_IO_TAG; 2606 } 2607 2608 enum sci_status sci_controller_start_io(struct isci_host *ihost, 2609 struct isci_remote_device *idev, 2610 struct isci_request *ireq) 2611 { 2612 enum sci_status status; 2613 2614 if (ihost->sm.current_state_id != SCIC_READY) { 2615 dev_warn(&ihost->pdev->dev, "invalid state to start I/O"); 2616 return SCI_FAILURE_INVALID_STATE; 2617 } 2618 2619 status = sci_remote_device_start_io(ihost, idev, ireq); 2620 if (status != SCI_SUCCESS) 2621 return status; 2622 2623 set_bit(IREQ_ACTIVE, &ireq->flags); 2624 sci_controller_post_request(ihost, ireq->post_context); 2625 return SCI_SUCCESS; 2626 } 2627 2628 enum sci_status sci_controller_terminate_request(struct isci_host *ihost, 2629 struct isci_remote_device *idev, 2630 struct isci_request *ireq) 2631 { 2632 /* terminate an ongoing (i.e. started) core IO request. This does not 2633 * abort the IO request at the target, but rather removes the IO 2634 * request from the host controller. 2635 */ 2636 enum sci_status status; 2637 2638 if (ihost->sm.current_state_id != SCIC_READY) { 2639 dev_warn(&ihost->pdev->dev, 2640 "invalid state to terminate request\n"); 2641 return SCI_FAILURE_INVALID_STATE; 2642 } 2643 2644 status = sci_io_request_terminate(ireq); 2645 if (status != SCI_SUCCESS) 2646 return status; 2647 2648 /* 2649 * Utilize the original post context command and or in the POST_TC_ABORT 2650 * request sub-type. 2651 */ 2652 sci_controller_post_request(ihost, 2653 ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); 2654 return SCI_SUCCESS; 2655 } 2656 2657 /** 2658 * sci_controller_complete_io() - This method will perform core specific 2659 * completion operations for an IO request. After this method is invoked, 2660 * the user should consider the IO request as invalid until it is properly 2661 * reused (i.e. re-constructed). 2662 * @ihost: The handle to the controller object for which to complete the 2663 * IO request. 2664 * @idev: The handle to the remote device object for which to complete 2665 * the IO request. 2666 * @ireq: the handle to the io request object to complete. 2667 */ 2668 enum sci_status sci_controller_complete_io(struct isci_host *ihost, 2669 struct isci_remote_device *idev, 2670 struct isci_request *ireq) 2671 { 2672 enum sci_status status; 2673 u16 index; 2674 2675 switch (ihost->sm.current_state_id) { 2676 case SCIC_STOPPING: 2677 /* XXX: Implement this function */ 2678 return SCI_FAILURE; 2679 case SCIC_READY: 2680 status = sci_remote_device_complete_io(ihost, idev, ireq); 2681 if (status != SCI_SUCCESS) 2682 return status; 2683 2684 index = ISCI_TAG_TCI(ireq->io_tag); 2685 clear_bit(IREQ_ACTIVE, &ireq->flags); 2686 return SCI_SUCCESS; 2687 default: 2688 dev_warn(&ihost->pdev->dev, "invalid state to complete I/O"); 2689 return SCI_FAILURE_INVALID_STATE; 2690 } 2691 2692 } 2693 2694 enum sci_status sci_controller_continue_io(struct isci_request *ireq) 2695 { 2696 struct isci_host *ihost = ireq->owning_controller; 2697 2698 if (ihost->sm.current_state_id != SCIC_READY) { 2699 dev_warn(&ihost->pdev->dev, "invalid state to continue I/O"); 2700 return SCI_FAILURE_INVALID_STATE; 2701 } 2702 2703 set_bit(IREQ_ACTIVE, &ireq->flags); 2704 sci_controller_post_request(ihost, ireq->post_context); 2705 return SCI_SUCCESS; 2706 } 2707 2708 /** 2709 * sci_controller_start_task() - This method is called by the SCIC user to 2710 * send/start a framework task management request. 2711 * @controller: the handle to the controller object for which to start the task 2712 * management request. 2713 * @remote_device: the handle to the remote device object for which to start 2714 * the task management request. 2715 * @task_request: the handle to the task request object to start. 2716 */ 2717 enum sci_task_status sci_controller_start_task(struct isci_host *ihost, 2718 struct isci_remote_device *idev, 2719 struct isci_request *ireq) 2720 { 2721 enum sci_status status; 2722 2723 if (ihost->sm.current_state_id != SCIC_READY) { 2724 dev_warn(&ihost->pdev->dev, 2725 "%s: SCIC Controller starting task from invalid " 2726 "state\n", 2727 __func__); 2728 return SCI_TASK_FAILURE_INVALID_STATE; 2729 } 2730 2731 status = sci_remote_device_start_task(ihost, idev, ireq); 2732 switch (status) { 2733 case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS: 2734 set_bit(IREQ_ACTIVE, &ireq->flags); 2735 2736 /* 2737 * We will let framework know this task request started successfully, 2738 * although core is still woring on starting the request (to post tc when 2739 * RNC is resumed.) 2740 */ 2741 return SCI_SUCCESS; 2742 case SCI_SUCCESS: 2743 set_bit(IREQ_ACTIVE, &ireq->flags); 2744 sci_controller_post_request(ihost, ireq->post_context); 2745 break; 2746 default: 2747 break; 2748 } 2749 2750 return status; 2751 } 2752