1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * BSD LICENSE 25 * 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 * All rights reserved. 28 * 29 * Redistribution and use in source and binary forms, with or without 30 * modification, are permitted provided that the following conditions 31 * are met: 32 * 33 * * Redistributions of source code must retain the above copyright 34 * notice, this list of conditions and the following disclaimer. 35 * * Redistributions in binary form must reproduce the above copyright 36 * notice, this list of conditions and the following disclaimer in 37 * the documentation and/or other materials provided with the 38 * distribution. 39 * * Neither the name of Intel Corporation nor the names of its 40 * contributors may be used to endorse or promote products derived 41 * from this software without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 56 #include "isci.h" 57 #include "scic_io_request.h" 58 #include "scic_remote_device.h" 59 #include "scic_port.h" 60 61 #include "port.h" 62 #include "request.h" 63 #include "host.h" 64 #include "probe_roms.h" 65 #include "core/scic_sds_controller.h" 66 67 irqreturn_t isci_msix_isr(int vec, void *data) 68 { 69 struct isci_host *ihost = data; 70 struct scic_sds_controller *scic = ihost->core_controller; 71 72 if (scic_sds_controller_isr(scic)) 73 tasklet_schedule(&ihost->completion_tasklet); 74 75 return IRQ_HANDLED; 76 } 77 78 irqreturn_t isci_intx_isr(int vec, void *data) 79 { 80 irqreturn_t ret = IRQ_NONE; 81 struct isci_host *ihost = data; 82 struct scic_sds_controller *scic = ihost->core_controller; 83 84 if (scic_sds_controller_isr(scic)) { 85 writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status); 86 tasklet_schedule(&ihost->completion_tasklet); 87 ret = IRQ_HANDLED; 88 } else if (scic_sds_controller_error_isr(scic)) { 89 spin_lock(&ihost->scic_lock); 90 scic_sds_controller_error_handler(scic); 91 spin_unlock(&ihost->scic_lock); 92 ret = IRQ_HANDLED; 93 } 94 95 return ret; 96 } 97 98 irqreturn_t isci_error_isr(int vec, void *data) 99 { 100 struct isci_host *ihost = data; 101 struct scic_sds_controller *scic = ihost->core_controller; 102 103 if (scic_sds_controller_error_isr(scic)) 104 scic_sds_controller_error_handler(scic); 105 106 return IRQ_HANDLED; 107 } 108 109 /** 110 * isci_host_start_complete() - This function is called by the core library, 111 * through the ISCI Module, to indicate controller start status. 112 * @isci_host: This parameter specifies the ISCI host object 113 * @completion_status: This parameter specifies the completion status from the 114 * core library. 115 * 116 */ 117 void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status) 118 { 119 if (completion_status != SCI_SUCCESS) 120 dev_info(&ihost->pdev->dev, 121 "controller start timed out, continuing...\n"); 122 isci_host_change_state(ihost, isci_ready); 123 clear_bit(IHOST_START_PENDING, &ihost->flags); 124 wake_up(&ihost->eventq); 125 } 126 127 int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time) 128 { 129 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; 130 131 if (test_bit(IHOST_START_PENDING, &ihost->flags)) 132 return 0; 133 134 /* todo: use sas_flush_discovery once it is upstream */ 135 scsi_flush_work(shost); 136 137 scsi_flush_work(shost); 138 139 dev_dbg(&ihost->pdev->dev, 140 "%s: ihost->status = %d, time = %ld\n", 141 __func__, isci_host_get_state(ihost), time); 142 143 return 1; 144 145 } 146 147 void isci_host_scan_start(struct Scsi_Host *shost) 148 { 149 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; 150 struct scic_sds_controller *scic = ihost->core_controller; 151 unsigned long tmo = scic_controller_get_suggested_start_timeout(scic); 152 153 set_bit(IHOST_START_PENDING, &ihost->flags); 154 155 spin_lock_irq(&ihost->scic_lock); 156 scic_controller_start(scic, tmo); 157 scic_controller_enable_interrupts(scic); 158 spin_unlock_irq(&ihost->scic_lock); 159 } 160 161 void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status) 162 { 163 isci_host_change_state(ihost, isci_stopped); 164 scic_controller_disable_interrupts(ihost->core_controller); 165 clear_bit(IHOST_STOP_PENDING, &ihost->flags); 166 wake_up(&ihost->eventq); 167 } 168 169 /** 170 * isci_host_completion_routine() - This function is the delayed service 171 * routine that calls the sci core library's completion handler. It's 172 * scheduled as a tasklet from the interrupt service routine when interrupts 173 * in use, or set as the timeout function in polled mode. 174 * @data: This parameter specifies the ISCI host object 175 * 176 */ 177 static void isci_host_completion_routine(unsigned long data) 178 { 179 struct isci_host *isci_host = (struct isci_host *)data; 180 struct list_head completed_request_list; 181 struct list_head errored_request_list; 182 struct list_head *current_position; 183 struct list_head *next_position; 184 struct isci_request *request; 185 struct isci_request *next_request; 186 struct sas_task *task; 187 188 INIT_LIST_HEAD(&completed_request_list); 189 INIT_LIST_HEAD(&errored_request_list); 190 191 spin_lock_irq(&isci_host->scic_lock); 192 193 scic_sds_controller_completion_handler(isci_host->core_controller); 194 195 /* Take the lists of completed I/Os from the host. */ 196 197 list_splice_init(&isci_host->requests_to_complete, 198 &completed_request_list); 199 200 /* Take the list of errored I/Os from the host. */ 201 list_splice_init(&isci_host->requests_to_errorback, 202 &errored_request_list); 203 204 spin_unlock_irq(&isci_host->scic_lock); 205 206 /* Process any completions in the lists. */ 207 list_for_each_safe(current_position, next_position, 208 &completed_request_list) { 209 210 request = list_entry(current_position, struct isci_request, 211 completed_node); 212 task = isci_request_access_task(request); 213 214 /* Normal notification (task_done) */ 215 dev_dbg(&isci_host->pdev->dev, 216 "%s: Normal - request/task = %p/%p\n", 217 __func__, 218 request, 219 task); 220 221 /* Return the task to libsas */ 222 if (task != NULL) { 223 224 task->lldd_task = NULL; 225 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 226 227 /* If the task is already in the abort path, 228 * the task_done callback cannot be called. 229 */ 230 task->task_done(task); 231 } 232 } 233 /* Free the request object. */ 234 isci_request_free(isci_host, request); 235 } 236 list_for_each_entry_safe(request, next_request, &errored_request_list, 237 completed_node) { 238 239 task = isci_request_access_task(request); 240 241 /* Use sas_task_abort */ 242 dev_warn(&isci_host->pdev->dev, 243 "%s: Error - request/task = %p/%p\n", 244 __func__, 245 request, 246 task); 247 248 if (task != NULL) { 249 250 /* Put the task into the abort path if it's not there 251 * already. 252 */ 253 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) 254 sas_task_abort(task); 255 256 } else { 257 /* This is a case where the request has completed with a 258 * status such that it needed further target servicing, 259 * but the sas_task reference has already been removed 260 * from the request. Since it was errored, it was not 261 * being aborted, so there is nothing to do except free 262 * it. 263 */ 264 265 spin_lock_irq(&isci_host->scic_lock); 266 /* Remove the request from the remote device's list 267 * of pending requests. 268 */ 269 list_del_init(&request->dev_node); 270 spin_unlock_irq(&isci_host->scic_lock); 271 272 /* Free the request object. */ 273 isci_request_free(isci_host, request); 274 } 275 } 276 277 } 278 279 void isci_host_deinit(struct isci_host *ihost) 280 { 281 struct scic_sds_controller *scic = ihost->core_controller; 282 int i; 283 284 isci_host_change_state(ihost, isci_stopping); 285 for (i = 0; i < SCI_MAX_PORTS; i++) { 286 struct isci_port *port = &ihost->isci_ports[i]; 287 struct isci_remote_device *idev, *d; 288 289 list_for_each_entry_safe(idev, d, &port->remote_dev_list, node) { 290 isci_remote_device_change_state(idev, isci_stopping); 291 isci_remote_device_stop(ihost, idev); 292 } 293 } 294 295 set_bit(IHOST_STOP_PENDING, &ihost->flags); 296 297 spin_lock_irq(&ihost->scic_lock); 298 scic_controller_stop(scic, SCIC_CONTROLLER_STOP_TIMEOUT); 299 spin_unlock_irq(&ihost->scic_lock); 300 301 wait_for_stop(ihost); 302 scic_controller_reset(scic); 303 isci_timer_list_destroy(ihost); 304 } 305 306 static void __iomem *scu_base(struct isci_host *isci_host) 307 { 308 struct pci_dev *pdev = isci_host->pdev; 309 int id = isci_host->id; 310 311 return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id; 312 } 313 314 static void __iomem *smu_base(struct isci_host *isci_host) 315 { 316 struct pci_dev *pdev = isci_host->pdev; 317 int id = isci_host->id; 318 319 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id; 320 } 321 322 static void isci_user_parameters_get( 323 struct isci_host *isci_host, 324 union scic_user_parameters *scic_user_params) 325 { 326 struct scic_sds_user_parameters *u = &scic_user_params->sds1; 327 int i; 328 329 for (i = 0; i < SCI_MAX_PHYS; i++) { 330 struct sci_phy_user_params *u_phy = &u->phys[i]; 331 332 u_phy->max_speed_generation = phy_gen; 333 334 /* we are not exporting these for now */ 335 u_phy->align_insertion_frequency = 0x7f; 336 u_phy->in_connection_align_insertion_frequency = 0xff; 337 u_phy->notify_enable_spin_up_insertion_frequency = 0x33; 338 } 339 340 u->stp_inactivity_timeout = stp_inactive_to; 341 u->ssp_inactivity_timeout = ssp_inactive_to; 342 u->stp_max_occupancy_timeout = stp_max_occ_to; 343 u->ssp_max_occupancy_timeout = ssp_max_occ_to; 344 u->no_outbound_task_timeout = no_outbound_task_to; 345 u->max_number_concurrent_device_spin_up = max_concurr_spinup; 346 } 347 348 int isci_host_init(struct isci_host *isci_host) 349 { 350 int err = 0, i; 351 enum sci_status status; 352 struct scic_sds_controller *controller; 353 union scic_oem_parameters oem; 354 union scic_user_parameters scic_user_params; 355 struct isci_pci_info *pci_info = to_pci_info(isci_host->pdev); 356 357 isci_timer_list_construct(isci_host); 358 359 controller = scic_controller_alloc(&isci_host->pdev->dev); 360 361 if (!controller) { 362 dev_err(&isci_host->pdev->dev, 363 "%s: failed (%d)\n", 364 __func__, 365 err); 366 return -ENOMEM; 367 } 368 369 isci_host->core_controller = controller; 370 sci_object_set_association(isci_host->core_controller, isci_host); 371 spin_lock_init(&isci_host->state_lock); 372 spin_lock_init(&isci_host->scic_lock); 373 spin_lock_init(&isci_host->queue_lock); 374 init_waitqueue_head(&isci_host->eventq); 375 376 isci_host_change_state(isci_host, isci_starting); 377 isci_host->can_queue = ISCI_CAN_QUEUE_VAL; 378 379 status = scic_controller_construct(controller, scu_base(isci_host), 380 smu_base(isci_host)); 381 382 if (status != SCI_SUCCESS) { 383 dev_err(&isci_host->pdev->dev, 384 "%s: scic_controller_construct failed - status = %x\n", 385 __func__, 386 status); 387 return -ENODEV; 388 } 389 390 isci_host->sas_ha.dev = &isci_host->pdev->dev; 391 isci_host->sas_ha.lldd_ha = isci_host; 392 393 /* 394 * grab initial values stored in the controller object for OEM and USER 395 * parameters 396 */ 397 isci_user_parameters_get(isci_host, &scic_user_params); 398 status = scic_user_parameters_set(isci_host->core_controller, 399 &scic_user_params); 400 if (status != SCI_SUCCESS) { 401 dev_warn(&isci_host->pdev->dev, 402 "%s: scic_user_parameters_set failed\n", 403 __func__); 404 return -ENODEV; 405 } 406 407 scic_oem_parameters_get(controller, &oem); 408 409 /* grab any OEM parameters specified in orom */ 410 if (pci_info->orom) { 411 status = isci_parse_oem_parameters(&oem, 412 pci_info->orom, 413 isci_host->id); 414 if (status != SCI_SUCCESS) { 415 dev_warn(&isci_host->pdev->dev, 416 "parsing firmware oem parameters failed\n"); 417 return -EINVAL; 418 } 419 } 420 421 status = scic_oem_parameters_set(isci_host->core_controller, &oem); 422 if (status != SCI_SUCCESS) { 423 dev_warn(&isci_host->pdev->dev, 424 "%s: scic_oem_parameters_set failed\n", 425 __func__); 426 return -ENODEV; 427 } 428 429 tasklet_init(&isci_host->completion_tasklet, 430 isci_host_completion_routine, (unsigned long)isci_host); 431 432 INIT_LIST_HEAD(&isci_host->requests_to_complete); 433 INIT_LIST_HEAD(&isci_host->requests_to_errorback); 434 435 spin_lock_irq(&isci_host->scic_lock); 436 status = scic_controller_initialize(isci_host->core_controller); 437 spin_unlock_irq(&isci_host->scic_lock); 438 if (status != SCI_SUCCESS) { 439 dev_warn(&isci_host->pdev->dev, 440 "%s: scic_controller_initialize failed -" 441 " status = 0x%x\n", 442 __func__, status); 443 return -ENODEV; 444 } 445 446 err = scic_controller_mem_init(isci_host->core_controller); 447 if (err) 448 return err; 449 450 /* 451 * keep the pool alloc size around, will use it for a bounds checking 452 * when trying to convert virtual addresses to physical addresses 453 */ 454 isci_host->dma_pool_alloc_size = sizeof(struct isci_request) + 455 scic_io_request_get_object_size(); 456 isci_host->dma_pool = dmam_pool_create(DRV_NAME, &isci_host->pdev->dev, 457 isci_host->dma_pool_alloc_size, 458 SLAB_HWCACHE_ALIGN, 0); 459 460 if (!isci_host->dma_pool) 461 return -ENOMEM; 462 463 for (i = 0; i < SCI_MAX_PORTS; i++) 464 isci_port_init(&isci_host->isci_ports[i], isci_host, i); 465 466 for (i = 0; i < SCI_MAX_PHYS; i++) 467 isci_phy_init(&isci_host->phys[i], isci_host, i); 468 469 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { 470 struct isci_remote_device *idev = &isci_host->devices[i]; 471 472 INIT_LIST_HEAD(&idev->reqs_in_process); 473 INIT_LIST_HEAD(&idev->node); 474 spin_lock_init(&idev->state_lock); 475 } 476 477 return 0; 478 } 479