1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <dev/isci/isci.h> 35 36 #include <sys/sysctl.h> 37 #include <sys/malloc.h> 38 39 #include <cam/cam_periph.h> 40 41 #include <dev/pci/pcireg.h> 42 #include <dev/pci/pcivar.h> 43 44 #include <dev/isci/scil/scic_logger.h> 45 #include <dev/isci/scil/scic_library.h> 46 #include <dev/isci/scil/scic_user_callback.h> 47 48 #include <dev/isci/scil/scif_controller.h> 49 #include <dev/isci/scil/scif_library.h> 50 #include <dev/isci/scil/scif_logger.h> 51 #include <dev/isci/scil/scif_user_callback.h> 52 53 MALLOC_DEFINE(M_ISCI, "isci", "isci driver memory allocations"); 54 55 struct isci_softc *g_isci; 56 uint32_t g_isci_debug_level = 0; 57 58 static int isci_probe(device_t); 59 static int isci_attach(device_t); 60 static int isci_detach(device_t); 61 62 int isci_initialize(struct isci_softc *isci); 63 64 void isci_allocate_dma_buffer_callback(void *arg, bus_dma_segment_t *seg, 65 int nseg, int error); 66 67 static devclass_t isci_devclass; 68 69 static device_method_t isci_pci_methods[] = { 70 /* Device interface */ 71 DEVMETHOD(device_probe, isci_probe), 72 DEVMETHOD(device_attach, isci_attach), 73 DEVMETHOD(device_detach, isci_detach), 74 { 0, 0 } 75 }; 76 77 static driver_t isci_pci_driver = { 78 "isci", 79 isci_pci_methods, 80 sizeof(struct isci_softc), 81 }; 82 83 DRIVER_MODULE(isci, pci, isci_pci_driver, isci_devclass, 0, 0); 84 85 static struct _pcsid 86 { 87 u_int32_t type; 88 const char *desc; 89 } pci_ids[] = { 90 { 0x1d608086, "Intel(R) C600 Series Chipset SAS Controller" }, 91 { 0x1d618086, "Intel(R) C600 Series Chipset SAS Controller (SATA mode)" }, 92 { 0x1d628086, "Intel(R) C600 Series Chipset SAS Controller" }, 93 { 0x1d638086, "Intel(R) C600 Series Chipset SAS Controller" }, 94 { 0x1d648086, "Intel(R) C600 Series Chipset SAS Controller" }, 95 { 0x1d658086, "Intel(R) C600 Series Chipset SAS Controller" }, 96 { 0x1d668086, "Intel(R) C600 Series Chipset SAS Controller" }, 97 { 0x1d678086, "Intel(R) C600 Series Chipset SAS Controller" }, 98 { 0x1d688086, "Intel(R) C600 Series Chipset SAS Controller" }, 99 { 0x1d698086, "Intel(R) C600 Series Chipset SAS Controller" }, 100 { 0x1d6a8086, "Intel(R) C600 Series Chipset SAS Controller (SATA mode)" }, 101 { 0x1d6b8086, "Intel(R) C600 Series Chipset SAS Controller (SATA mode)" }, 102 { 0x1d6c8086, "Intel(R) C600 Series Chipset SAS Controller" }, 103 { 0x1d6d8086, "Intel(R) C600 Series Chipset SAS Controller" }, 104 { 0x1d6e8086, "Intel(R) C600 Series Chipset SAS Controller" }, 105 { 0x1d6f8086, "Intel(R) C600 Series Chipset SAS Controller (SATA mode)" }, 106 { 0x00000000, NULL } 107 }; 108 109 static int 110 isci_probe (device_t device) 111 { 112 u_int32_t type = pci_get_devid(device); 113 struct _pcsid *ep = pci_ids; 114 115 while (ep->type && ep->type != type) 116 ++ep; 117 118 if (ep->desc) 119 { 120 device_set_desc(device, ep->desc); 121 return (BUS_PROBE_DEFAULT); 122 } 123 else 124 return (ENXIO); 125 } 126 127 static int 128 isci_allocate_pci_memory(struct isci_softc *isci) 129 { 130 int i; 131 132 for (i = 0; i < ISCI_NUM_PCI_BARS; i++) 133 { 134 struct ISCI_PCI_BAR *pci_bar = &isci->pci_bar[i]; 135 136 pci_bar->resource_id = PCIR_BAR(i*2); 137 pci_bar->resource = bus_alloc_resource(isci->device, 138 SYS_RES_MEMORY, &pci_bar->resource_id, 0, ~0, 1, 139 RF_ACTIVE); 140 141 if(pci_bar->resource == NULL) 142 isci_log_message(0, "ISCI", 143 "unable to allocate pci resource\n"); 144 else { 145 pci_bar->bus_tag = rman_get_bustag(pci_bar->resource); 146 pci_bar->bus_handle = 147 rman_get_bushandle(pci_bar->resource); 148 } 149 } 150 151 return (0); 152 } 153 154 static int 155 isci_attach(device_t device) 156 { 157 int error; 158 struct isci_softc *isci = DEVICE2SOFTC(device); 159 160 g_isci = isci; 161 isci->device = device; 162 163 isci_allocate_pci_memory(isci); 164 165 error = isci_initialize(isci); 166 167 if (error) 168 { 169 isci_detach(device); 170 return (error); 171 } 172 173 isci_interrupt_setup(isci); 174 isci_sysctl_initialize(isci); 175 176 return (0); 177 } 178 179 static int 180 isci_detach(device_t device) 181 { 182 struct isci_softc *isci = DEVICE2SOFTC(device); 183 int i; 184 185 for (i = 0; i < isci->controller_count; i++) { 186 struct ISCI_CONTROLLER *controller = &isci->controllers[i]; 187 SCI_STATUS status; 188 189 if (controller->scif_controller_handle != NULL) { 190 scic_controller_disable_interrupts( 191 scif_controller_get_scic_handle(controller->scif_controller_handle)); 192 193 mtx_lock(&controller->lock); 194 status = scif_controller_stop(controller->scif_controller_handle, 0); 195 mtx_unlock(&controller->lock); 196 197 while (controller->is_started == TRUE) { 198 /* Now poll for interrupts until the controller stop complete 199 * callback is received. 200 */ 201 mtx_lock(&controller->lock); 202 isci_interrupt_poll_handler(controller); 203 mtx_unlock(&controller->lock); 204 pause("isci", 1); 205 } 206 207 if(controller->sim != NULL) { 208 mtx_lock(&controller->lock); 209 xpt_free_path(controller->path); 210 xpt_bus_deregister(cam_sim_path(controller->sim)); 211 cam_sim_free(controller->sim, TRUE); 212 mtx_unlock(&controller->lock); 213 } 214 } 215 216 if (controller->timer_memory != NULL) 217 free(controller->timer_memory, M_ISCI); 218 219 if (controller->remote_device_memory != NULL) 220 free(controller->remote_device_memory, M_ISCI); 221 } 222 223 /* The SCIF controllers have been stopped, so we can now 224 * free the SCI library memory. 225 */ 226 if (isci->sci_library_memory != NULL) 227 free(isci->sci_library_memory, M_ISCI); 228 229 for (i = 0; i < ISCI_NUM_PCI_BARS; i++) 230 { 231 struct ISCI_PCI_BAR *pci_bar = &isci->pci_bar[i]; 232 233 if (pci_bar->resource != NULL) 234 bus_release_resource(device, SYS_RES_MEMORY, 235 pci_bar->resource_id, pci_bar->resource); 236 } 237 238 for (i = 0; i < isci->num_interrupts; i++) 239 { 240 struct ISCI_INTERRUPT_INFO *interrupt_info; 241 242 interrupt_info = &isci->interrupt_info[i]; 243 244 if(interrupt_info->tag != NULL) 245 bus_teardown_intr(device, interrupt_info->res, 246 interrupt_info->tag); 247 248 if(interrupt_info->res != NULL) 249 bus_release_resource(device, SYS_RES_IRQ, 250 rman_get_rid(interrupt_info->res), 251 interrupt_info->res); 252 253 pci_release_msi(device); 254 } 255 256 return (0); 257 } 258 259 int 260 isci_initialize(struct isci_softc *isci) 261 { 262 int error; 263 uint32_t status = 0; 264 uint32_t library_object_size; 265 uint32_t verbosity_mask; 266 uint32_t scic_log_object_mask; 267 uint32_t scif_log_object_mask; 268 uint8_t *header_buffer; 269 270 library_object_size = scif_library_get_object_size(SCI_MAX_CONTROLLERS); 271 272 isci->sci_library_memory = 273 malloc(library_object_size, M_ISCI, M_NOWAIT | M_ZERO ); 274 275 isci->sci_library_handle = scif_library_construct( 276 isci->sci_library_memory, SCI_MAX_CONTROLLERS); 277 278 sci_object_set_association( isci->sci_library_handle, (void *)isci); 279 280 verbosity_mask = (1<<SCI_LOG_VERBOSITY_ERROR) | 281 (1<<SCI_LOG_VERBOSITY_WARNING) | (1<<SCI_LOG_VERBOSITY_INFO) | 282 (1<<SCI_LOG_VERBOSITY_TRACE); 283 284 scic_log_object_mask = 0xFFFFFFFF; 285 scic_log_object_mask &= ~SCIC_LOG_OBJECT_COMPLETION_QUEUE; 286 scic_log_object_mask &= ~SCIC_LOG_OBJECT_SSP_IO_REQUEST; 287 scic_log_object_mask &= ~SCIC_LOG_OBJECT_STP_IO_REQUEST; 288 scic_log_object_mask &= ~SCIC_LOG_OBJECT_SMP_IO_REQUEST; 289 scic_log_object_mask &= ~SCIC_LOG_OBJECT_CONTROLLER; 290 291 scif_log_object_mask = 0xFFFFFFFF; 292 scif_log_object_mask &= ~SCIF_LOG_OBJECT_CONTROLLER; 293 scif_log_object_mask &= ~SCIF_LOG_OBJECT_IO_REQUEST; 294 295 TUNABLE_INT_FETCH("hw.isci.debug_level", &g_isci_debug_level); 296 297 sci_logger_enable(sci_object_get_logger(isci->sci_library_handle), 298 scif_log_object_mask, verbosity_mask); 299 300 sci_logger_enable(sci_object_get_logger( 301 scif_library_get_scic_handle(isci->sci_library_handle)), 302 scic_log_object_mask, verbosity_mask); 303 304 header_buffer = (uint8_t *)&isci->pci_common_header; 305 for (uint8_t i = 0; i < sizeof(isci->pci_common_header); i++) 306 header_buffer[i] = pci_read_config(isci->device, i, 1); 307 308 scic_library_set_pci_info( 309 scif_library_get_scic_handle(isci->sci_library_handle), 310 &isci->pci_common_header); 311 312 isci->oem_parameters_found = FALSE; 313 314 isci_get_oem_parameters(isci); 315 316 /* trigger interrupt if 32 completions occur before timeout expires */ 317 isci->coalesce_number = 32; 318 319 /* trigger interrupt if 2 microseconds elapse after a completion occurs, 320 * regardless if "coalesce_number" completions have occurred 321 */ 322 isci->coalesce_timeout = 2; 323 324 isci->controller_count = scic_library_get_pci_device_controller_count( 325 scif_library_get_scic_handle(isci->sci_library_handle)); 326 327 for (int index = 0; index < isci->controller_count; index++) { 328 struct ISCI_CONTROLLER *controller = &isci->controllers[index]; 329 SCI_CONTROLLER_HANDLE_T scif_controller_handle; 330 331 controller->index = index; 332 isci_controller_construct(controller, isci); 333 334 scif_controller_handle = controller->scif_controller_handle; 335 336 status = isci_controller_initialize(controller); 337 338 if(status != SCI_SUCCESS) { 339 isci_log_message(0, "ISCI", 340 "isci_controller_initialize FAILED: %x\n", 341 status); 342 return (status); 343 } 344 345 error = isci_controller_allocate_memory(controller); 346 347 if (error != 0) 348 return (error); 349 350 scif_controller_set_interrupt_coalescence( 351 scif_controller_handle, isci->coalesce_number, 352 isci->coalesce_timeout); 353 } 354 355 /* FreeBSD provides us a hook to ensure we get a chance to start 356 * our controllers and complete initial domain discovery before 357 * it searches for the boot device. Once we're done, we'll 358 * disestablish the hook, signaling the kernel that is can proceed 359 * with the boot process. 360 */ 361 isci->config_hook.ich_func = &isci_controller_start; 362 isci->config_hook.ich_arg = &isci->controllers[0]; 363 364 if (config_intrhook_establish(&isci->config_hook) != 0) 365 isci_log_message(0, "ISCI", 366 "config_intrhook_establish failed!\n"); 367 368 return (status); 369 } 370 371 void 372 isci_allocate_dma_buffer_callback(void *arg, bus_dma_segment_t *seg, 373 int nseg, int error) 374 { 375 struct ISCI_MEMORY *memory = (struct ISCI_MEMORY *)arg; 376 377 memory->error = error; 378 379 if (nseg != 1 || error != 0) 380 isci_log_message(0, "ISCI", 381 "Failed to allocate physically contiguous memory!\n"); 382 else 383 memory->physical_address = seg->ds_addr; 384 } 385 386 int 387 isci_allocate_dma_buffer(device_t device, struct ISCI_MEMORY *memory) 388 { 389 uint32_t status; 390 391 status = bus_dma_tag_create(bus_get_dma_tag(device), 392 0x40 /* cacheline alignment */, 0x0, BUS_SPACE_MAXADDR, 393 BUS_SPACE_MAXADDR, NULL, NULL, memory->size, 394 0x1 /* we want physically contiguous */, 395 memory->size, 0, NULL, NULL, &memory->dma_tag); 396 397 if(status == ENOMEM) { 398 isci_log_message(0, "ISCI", "bus_dma_tag_create failed\n"); 399 return (status); 400 } 401 402 status = bus_dmamem_alloc(memory->dma_tag, 403 (void **)&memory->virtual_address, BUS_DMA_ZERO, &memory->dma_map); 404 405 if(status == ENOMEM) 406 { 407 isci_log_message(0, "ISCI", "bus_dmamem_alloc failed\n"); 408 return (status); 409 } 410 411 status = bus_dmamap_load(memory->dma_tag, memory->dma_map, 412 (void *)memory->virtual_address, memory->size, 413 isci_allocate_dma_buffer_callback, memory, 0); 414 415 if(status == EINVAL) 416 { 417 isci_log_message(0, "ISCI", "bus_dmamap_load failed\n"); 418 return (status); 419 } 420 421 return (0); 422 } 423 424 /** 425 * @brief This callback method asks the user to associate the supplied 426 * lock with an operating environment specific locking construct. 427 * 428 * @param[in] controller This parameter specifies the controller with 429 * which this lock is to be associated. 430 * @param[in] lock This parameter specifies the lock for which the 431 * user should associate an operating environment specific 432 * locking object. 433 * 434 * @see The SCI_LOCK_LEVEL enumeration for more information. 435 * 436 * @return none. 437 */ 438 void 439 scif_cb_lock_associate(SCI_CONTROLLER_HANDLE_T controller, 440 SCI_LOCK_HANDLE_T lock) 441 { 442 443 } 444 445 /** 446 * @brief This callback method asks the user to de-associate the supplied 447 * lock with an operating environment specific locking construct. 448 * 449 * @param[in] controller This parameter specifies the controller with 450 * which this lock is to be de-associated. 451 * @param[in] lock This parameter specifies the lock for which the 452 * user should de-associate an operating environment specific 453 * locking object. 454 * 455 * @see The SCI_LOCK_LEVEL enumeration for more information. 456 * 457 * @return none. 458 */ 459 void 460 scif_cb_lock_disassociate(SCI_CONTROLLER_HANDLE_T controller, 461 SCI_LOCK_HANDLE_T lock) 462 { 463 464 } 465 466 467 /** 468 * @brief This callback method asks the user to acquire/get the lock. 469 * This method should pend until the lock has been acquired. 470 * 471 * @param[in] controller This parameter specifies the controller with 472 * which this lock is associated. 473 * @param[in] lock This parameter specifies the lock to be acquired. 474 * 475 * @return none 476 */ 477 void 478 scif_cb_lock_acquire(SCI_CONTROLLER_HANDLE_T controller, 479 SCI_LOCK_HANDLE_T lock) 480 { 481 482 } 483 484 /** 485 * @brief This callback method asks the user to release a lock. 486 * 487 * @param[in] controller This parameter specifies the controller with 488 * which this lock is associated. 489 * @param[in] lock This parameter specifies the lock to be released. 490 * 491 * @return none 492 */ 493 void 494 scif_cb_lock_release(SCI_CONTROLLER_HANDLE_T controller, 495 SCI_LOCK_HANDLE_T lock) 496 { 497 } 498 499 /** 500 * @brief This callback method creates an OS specific deferred task 501 * for internal usage. The handler to deferred task is stored by OS 502 * driver. 503 * 504 * @param[in] controller This parameter specifies the controller object 505 * with which this callback is associated. 506 * 507 * @return none 508 */ 509 void 510 scif_cb_start_internal_io_task_create(SCI_CONTROLLER_HANDLE_T controller) 511 { 512 513 } 514 515 /** 516 * @brief This callback method schedules a OS specific deferred task. 517 * 518 * @param[in] controller This parameter specifies the controller 519 * object with which this callback is associated. 520 * @param[in] start_internal_io_task_routine This parameter specifies the 521 * sci start_internal_io routine. 522 * @param[in] context This parameter specifies a handle to a parameter 523 * that will be passed into the "start_internal_io_task_routine" 524 * when it is invoked. 525 * 526 * @return none 527 */ 528 void 529 scif_cb_start_internal_io_task_schedule(SCI_CONTROLLER_HANDLE_T scif_controller, 530 FUNCPTR start_internal_io_task_routine, void *context) 531 { 532 /** @todo Use FreeBSD tasklet to defer this routine to a later time, 533 * rather than calling the routine inline. 534 */ 535 SCI_START_INTERNAL_IO_ROUTINE sci_start_internal_io_routine = 536 (SCI_START_INTERNAL_IO_ROUTINE)start_internal_io_task_routine; 537 538 sci_start_internal_io_routine(context); 539 } 540 541 /** 542 * @brief In this method the user must write to PCI memory via access. 543 * This method is used for access to memory space and IO space. 544 * 545 * @param[in] controller The controller for which to read a DWORD. 546 * @param[in] address This parameter depicts the address into 547 * which to write. 548 * @param[out] write_value This parameter depicts the value being written 549 * into the PCI memory location. 550 * 551 * @todo These PCI memory access calls likely needs to be optimized into macros? 552 */ 553 void 554 scic_cb_pci_write_dword(SCI_CONTROLLER_HANDLE_T scic_controller, 555 void *address, uint32_t write_value) 556 { 557 SCI_CONTROLLER_HANDLE_T scif_controller = 558 (SCI_CONTROLLER_HANDLE_T) sci_object_get_association(scic_controller); 559 struct ISCI_CONTROLLER *isci_controller = 560 (struct ISCI_CONTROLLER *) sci_object_get_association(scif_controller); 561 struct isci_softc *isci = isci_controller->isci; 562 uint32_t bar = (uint32_t)(((POINTER_UINT)address & 0xF0000000) >> 28); 563 bus_size_t offset = (bus_size_t)((POINTER_UINT)address & 0x0FFFFFFF); 564 565 bus_space_write_4(isci->pci_bar[bar].bus_tag, 566 isci->pci_bar[bar].bus_handle, offset, write_value); 567 } 568 569 /** 570 * @brief In this method the user must read from PCI memory via access. 571 * This method is used for access to memory space and IO space. 572 * 573 * @param[in] controller The controller for which to read a DWORD. 574 * @param[in] address This parameter depicts the address from 575 * which to read. 576 * 577 * @return The value being returned from the PCI memory location. 578 * 579 * @todo This PCI memory access calls likely need to be optimized into macro? 580 */ 581 uint32_t 582 scic_cb_pci_read_dword(SCI_CONTROLLER_HANDLE_T scic_controller, void *address) 583 { 584 SCI_CONTROLLER_HANDLE_T scif_controller = 585 (SCI_CONTROLLER_HANDLE_T)sci_object_get_association(scic_controller); 586 struct ISCI_CONTROLLER *isci_controller = 587 (struct ISCI_CONTROLLER *)sci_object_get_association(scif_controller); 588 struct isci_softc *isci = isci_controller->isci; 589 uint32_t bar = (uint32_t)(((POINTER_UINT)address & 0xF0000000) >> 28); 590 bus_size_t offset = (bus_size_t)((POINTER_UINT)address & 0x0FFFFFFF); 591 592 return (bus_space_read_4(isci->pci_bar[bar].bus_tag, 593 isci->pci_bar[bar].bus_handle, offset)); 594 } 595 596 /** 597 * @brief This method is called when the core requires the OS driver 598 * to stall execution. This method is utilized during initialization 599 * or non-performance paths only. 600 * 601 * @param[in] microseconds This parameter specifies the number of 602 * microseconds for which to stall. The operating system driver 603 * is allowed to round this value up where necessary. 604 * 605 * @return none. 606 */ 607 void 608 scic_cb_stall_execution(uint32_t microseconds) 609 { 610 611 DELAY(microseconds); 612 } 613 614 /** 615 * @brief In this method the user must return the base address register (BAR) 616 * value for the supplied base address register number. 617 * 618 * @param[in] controller The controller for which to retrieve the bar number. 619 * @param[in] bar_number This parameter depicts the BAR index/number to be read. 620 * 621 * @return Return a pointer value indicating the contents of the BAR. 622 * @retval NULL indicates an invalid BAR index/number was specified. 623 * @retval All other values indicate a valid VIRTUAL address from the BAR. 624 */ 625 void * 626 scic_cb_pci_get_bar(SCI_CONTROLLER_HANDLE_T controller, 627 uint16_t bar_number) 628 { 629 630 return ((void *)(POINTER_UINT)((uint32_t)bar_number << 28)); 631 } 632 633 /** 634 * @brief This method informs the SCI Core user that a phy/link became 635 * ready, but the phy is not allowed in the port. In some 636 * situations the underlying hardware only allows for certain phy 637 * to port mappings. If these mappings are violated, then this 638 * API is invoked. 639 * 640 * @param[in] controller This parameter represents the controller which 641 * contains the port. 642 * @param[in] port This parameter specifies the SCI port object for which 643 * the callback is being invoked. 644 * @param[in] phy This parameter specifies the phy that came ready, but the 645 * phy can't be a valid member of the port. 646 * 647 * @return none 648 */ 649 void 650 scic_cb_port_invalid_link_up(SCI_CONTROLLER_HANDLE_T controller, 651 SCI_PORT_HANDLE_T port, SCI_PHY_HANDLE_T phy) 652 { 653 654 } 655