1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2010, LSI Corp. 5 * All rights reserved. 6 * Author : Manjunath Ranganathaiah 7 * Support: freebsdraid@lsi.com 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of the <ORGANIZATION> nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 31 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 33 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include <dev/tws/tws.h> 41 #include <dev/tws/tws_services.h> 42 #include <dev/tws/tws_hdm.h> 43 44 #include <cam/cam.h> 45 #include <cam/cam_ccb.h> 46 47 MALLOC_DEFINE(M_TWS, "twsbuf", "buffers used by tws driver"); 48 int tws_queue_depth = TWS_MAX_REQS; 49 int tws_enable_msi = 0; 50 int tws_enable_msix = 0; 51 52 /* externs */ 53 extern int tws_cam_attach(struct tws_softc *sc); 54 extern void tws_cam_detach(struct tws_softc *sc); 55 extern int tws_init_ctlr(struct tws_softc *sc); 56 extern boolean tws_ctlr_ready(struct tws_softc *sc); 57 extern void tws_turn_off_interrupts(struct tws_softc *sc); 58 extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req, 59 u_int8_t q_type ); 60 extern struct tws_request *tws_q_remove_request(struct tws_softc *sc, 61 struct tws_request *req, u_int8_t q_type ); 62 extern struct tws_request *tws_q_remove_head(struct tws_softc *sc, 63 u_int8_t q_type ); 64 extern boolean tws_get_response(struct tws_softc *sc, u_int16_t *req_id); 65 extern boolean tws_ctlr_reset(struct tws_softc *sc); 66 extern void tws_intr(void *arg); 67 extern int tws_use_32bit_sgls; 68 69 struct tws_request *tws_get_request(struct tws_softc *sc, u_int16_t type); 70 int tws_init_connect(struct tws_softc *sc, u_int16_t mc); 71 void tws_send_event(struct tws_softc *sc, u_int8_t event); 72 uint8_t tws_get_state(struct tws_softc *sc); 73 void tws_release_request(struct tws_request *req); 74 75 /* Function prototypes */ 76 static d_open_t tws_open; 77 static d_close_t tws_close; 78 static d_read_t tws_read; 79 static d_write_t tws_write; 80 extern d_ioctl_t tws_ioctl; 81 82 static int tws_init(struct tws_softc *sc); 83 static void tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs, 84 int nseg, int error); 85 86 static int tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size); 87 static int tws_init_aen_q(struct tws_softc *sc); 88 static int tws_init_trace_q(struct tws_softc *sc); 89 static int tws_setup_irq(struct tws_softc *sc); 90 int tws_setup_intr(struct tws_softc *sc, int irqs); 91 int tws_teardown_intr(struct tws_softc *sc); 92 93 /* Character device entry points */ 94 95 static struct cdevsw tws_cdevsw = { 96 .d_version = D_VERSION, 97 .d_open = tws_open, 98 .d_close = tws_close, 99 .d_read = tws_read, 100 .d_write = tws_write, 101 .d_ioctl = tws_ioctl, 102 .d_name = "tws", 103 }; 104 105 /* 106 * In the cdevsw routines, we find our softc by using the si_drv1 member 107 * of struct cdev. We set this variable to point to our softc in our 108 * attach routine when we create the /dev entry. 109 */ 110 111 int 112 tws_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 113 { 114 struct tws_softc *sc = dev->si_drv1; 115 116 if ( sc ) 117 TWS_TRACE_DEBUG(sc, "entry", dev, oflags); 118 return (0); 119 } 120 121 int 122 tws_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 123 { 124 struct tws_softc *sc = dev->si_drv1; 125 126 if ( sc ) 127 TWS_TRACE_DEBUG(sc, "entry", dev, fflag); 128 return (0); 129 } 130 131 int 132 tws_read(struct cdev *dev, struct uio *uio, int ioflag) 133 { 134 struct tws_softc *sc = dev->si_drv1; 135 136 if ( sc ) 137 TWS_TRACE_DEBUG(sc, "entry", dev, ioflag); 138 return (0); 139 } 140 141 int 142 tws_write(struct cdev *dev, struct uio *uio, int ioflag) 143 { 144 struct tws_softc *sc = dev->si_drv1; 145 146 if ( sc ) 147 TWS_TRACE_DEBUG(sc, "entry", dev, ioflag); 148 return (0); 149 } 150 151 /* PCI Support Functions */ 152 153 /* 154 * Compare the device ID of this device against the IDs that this driver 155 * supports. If there is a match, set the description and return success. 156 */ 157 static int 158 tws_probe(device_t dev) 159 { 160 static u_int8_t first_ctlr = 1; 161 162 if ((pci_get_vendor(dev) == TWS_VENDOR_ID) && 163 (pci_get_device(dev) == TWS_DEVICE_ID)) { 164 device_set_desc(dev, "LSI 3ware SAS/SATA Storage Controller"); 165 if (first_ctlr) { 166 printf("LSI 3ware device driver for SAS/SATA storage " 167 "controllers, version: %s\n", TWS_DRIVER_VERSION_STRING); 168 first_ctlr = 0; 169 } 170 171 return(BUS_PROBE_DEFAULT); 172 } 173 return (ENXIO); 174 } 175 176 /* Attach function is only called if the probe is successful. */ 177 178 static int 179 tws_attach(device_t dev) 180 { 181 struct tws_softc *sc = device_get_softc(dev); 182 u_int32_t bar; 183 int error=0,i; 184 185 /* no tracing yet */ 186 /* Look up our softc and initialize its fields. */ 187 sc->tws_dev = dev; 188 sc->device_id = pci_get_device(dev); 189 sc->subvendor_id = pci_get_subvendor(dev); 190 sc->subdevice_id = pci_get_subdevice(dev); 191 192 /* Intialize mutexes */ 193 mtx_init( &sc->q_lock, "tws_q_lock", NULL, MTX_DEF); 194 mtx_init( &sc->sim_lock, "tws_sim_lock", NULL, MTX_DEF); 195 mtx_init( &sc->gen_lock, "tws_gen_lock", NULL, MTX_DEF); 196 mtx_init( &sc->io_lock, "tws_io_lock", NULL, MTX_DEF | MTX_RECURSE); 197 callout_init(&sc->stats_timer, 1); 198 199 if ( tws_init_trace_q(sc) == FAILURE ) 200 printf("trace init failure\n"); 201 /* send init event */ 202 mtx_lock(&sc->gen_lock); 203 tws_send_event(sc, TWS_INIT_START); 204 mtx_unlock(&sc->gen_lock); 205 206 #if _BYTE_ORDER == _BIG_ENDIAN 207 TWS_TRACE(sc, "BIG endian", 0, 0); 208 #endif 209 /* sysctl context setup */ 210 sysctl_ctx_init(&sc->tws_clist); 211 sc->tws_oidp = SYSCTL_ADD_NODE(&sc->tws_clist, 212 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 213 device_get_nameunit(dev), CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); 214 if ( sc->tws_oidp == NULL ) { 215 tws_log(sc, SYSCTL_TREE_NODE_ADD); 216 goto attach_fail_1; 217 } 218 SYSCTL_ADD_STRING(&sc->tws_clist, SYSCTL_CHILDREN(sc->tws_oidp), 219 OID_AUTO, "driver_version", CTLFLAG_RD, 220 TWS_DRIVER_VERSION_STRING, 0, "TWS driver version"); 221 222 pci_enable_busmaster(dev); 223 224 bar = pci_read_config(dev, TWS_PCI_BAR0, 4); 225 TWS_TRACE_DEBUG(sc, "bar0 ", bar, 0); 226 bar = pci_read_config(dev, TWS_PCI_BAR1, 4); 227 bar = bar & ~TWS_BIT2; 228 TWS_TRACE_DEBUG(sc, "bar1 ", bar, 0); 229 230 /* MFA base address is BAR2 register used for 231 * push mode. Firmware will evatualy move to 232 * pull mode during witch this needs to change 233 */ 234 #ifndef TWS_PULL_MODE_ENABLE 235 sc->mfa_base = (u_int64_t)pci_read_config(dev, TWS_PCI_BAR2, 4); 236 sc->mfa_base = sc->mfa_base & ~TWS_BIT2; 237 TWS_TRACE_DEBUG(sc, "bar2 ", sc->mfa_base, 0); 238 #endif 239 240 /* allocate MMIO register space */ 241 sc->reg_res_id = TWS_PCI_BAR1; /* BAR1 offset */ 242 if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 243 &(sc->reg_res_id), RF_ACTIVE)) 244 == NULL) { 245 tws_log(sc, ALLOC_MEMORY_RES); 246 goto attach_fail_1; 247 } 248 sc->bus_tag = rman_get_bustag(sc->reg_res); 249 sc->bus_handle = rman_get_bushandle(sc->reg_res); 250 251 #ifndef TWS_PULL_MODE_ENABLE 252 /* Allocate bus space for inbound mfa */ 253 sc->mfa_res_id = TWS_PCI_BAR2; /* BAR2 offset */ 254 if ((sc->mfa_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 255 &(sc->mfa_res_id), RF_ACTIVE)) 256 == NULL) { 257 tws_log(sc, ALLOC_MEMORY_RES); 258 goto attach_fail_2; 259 } 260 sc->bus_mfa_tag = rman_get_bustag(sc->mfa_res); 261 sc->bus_mfa_handle = rman_get_bushandle(sc->mfa_res); 262 #endif 263 264 /* Allocate and register our interrupt. */ 265 sc->intr_type = TWS_INTx; /* default */ 266 267 if ( tws_enable_msi ) 268 sc->intr_type = TWS_MSI; 269 if ( tws_setup_irq(sc) == FAILURE ) { 270 tws_log(sc, ALLOC_MEMORY_RES); 271 goto attach_fail_3; 272 } 273 274 /* 275 * Create a /dev entry for this device. The kernel will assign us 276 * a major number automatically. We use the unit number of this 277 * device as the minor number and name the character device 278 * "tws<unit>". 279 */ 280 sc->tws_cdev = make_dev(&tws_cdevsw, device_get_unit(dev), 281 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "tws%u", 282 device_get_unit(dev)); 283 sc->tws_cdev->si_drv1 = sc; 284 285 if ( tws_init(sc) == FAILURE ) { 286 tws_log(sc, TWS_INIT_FAILURE); 287 goto attach_fail_4; 288 } 289 if ( tws_init_ctlr(sc) == FAILURE ) { 290 tws_log(sc, TWS_CTLR_INIT_FAILURE); 291 goto attach_fail_4; 292 } 293 if ((error = tws_cam_attach(sc))) { 294 tws_log(sc, TWS_CAM_ATTACH); 295 goto attach_fail_4; 296 } 297 /* send init complete event */ 298 mtx_lock(&sc->gen_lock); 299 tws_send_event(sc, TWS_INIT_COMPLETE); 300 mtx_unlock(&sc->gen_lock); 301 302 TWS_TRACE_DEBUG(sc, "attached successfully", 0, sc->device_id); 303 return(0); 304 305 attach_fail_4: 306 tws_teardown_intr(sc); 307 destroy_dev(sc->tws_cdev); 308 if (sc->dma_mem_phys) 309 bus_dmamap_unload(sc->cmd_tag, sc->cmd_map); 310 if (sc->dma_mem) 311 bus_dmamem_free(sc->cmd_tag, sc->dma_mem, sc->cmd_map); 312 if (sc->cmd_tag) 313 bus_dma_tag_destroy(sc->cmd_tag); 314 attach_fail_3: 315 for(i=0;i<sc->irqs;i++) { 316 if ( sc->irq_res[i] ){ 317 if (bus_release_resource(sc->tws_dev, 318 SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i])) 319 TWS_TRACE(sc, "bus irq res", 0, 0); 320 } 321 } 322 #ifndef TWS_PULL_MODE_ENABLE 323 attach_fail_2: 324 #endif 325 if ( sc->mfa_res ){ 326 if (bus_release_resource(sc->tws_dev, 327 SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res)) 328 TWS_TRACE(sc, "bus release ", 0, sc->mfa_res_id); 329 } 330 if ( sc->reg_res ){ 331 if (bus_release_resource(sc->tws_dev, 332 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)) 333 TWS_TRACE(sc, "bus release2 ", 0, sc->reg_res_id); 334 } 335 attach_fail_1: 336 mtx_destroy(&sc->q_lock); 337 mtx_destroy(&sc->sim_lock); 338 mtx_destroy(&sc->gen_lock); 339 mtx_destroy(&sc->io_lock); 340 sysctl_ctx_free(&sc->tws_clist); 341 return (ENXIO); 342 } 343 344 /* Detach device. */ 345 346 static int 347 tws_detach(device_t dev) 348 { 349 struct tws_softc *sc = device_get_softc(dev); 350 int i; 351 u_int32_t reg; 352 353 TWS_TRACE_DEBUG(sc, "entry", 0, 0); 354 355 mtx_lock(&sc->gen_lock); 356 tws_send_event(sc, TWS_UNINIT_START); 357 mtx_unlock(&sc->gen_lock); 358 359 /* needs to disable interrupt before detaching from cam */ 360 tws_turn_off_interrupts(sc); 361 /* clear door bell */ 362 tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4); 363 reg = tws_read_reg(sc, TWS_I2O0_HIMASK, 4); 364 TWS_TRACE_DEBUG(sc, "turn-off-intr", reg, 0); 365 sc->obfl_q_overrun = false; 366 tws_init_connect(sc, 1); 367 368 /* Teardown the state in our softc created in our attach routine. */ 369 /* Disconnect the interrupt handler. */ 370 tws_teardown_intr(sc); 371 372 /* Release irq resource */ 373 for(i=0;i<sc->irqs;i++) { 374 if ( sc->irq_res[i] ){ 375 if (bus_release_resource(sc->tws_dev, 376 SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i])) 377 TWS_TRACE(sc, "bus release irq resource", 378 i, sc->irq_res_id[i]); 379 } 380 } 381 if ( sc->intr_type == TWS_MSI ) { 382 pci_release_msi(sc->tws_dev); 383 } 384 385 tws_cam_detach(sc); 386 387 if (sc->dma_mem_phys) 388 bus_dmamap_unload(sc->cmd_tag, sc->cmd_map); 389 if (sc->dma_mem) 390 bus_dmamem_free(sc->cmd_tag, sc->dma_mem, sc->cmd_map); 391 if (sc->cmd_tag) 392 bus_dma_tag_destroy(sc->cmd_tag); 393 394 /* Release memory resource */ 395 if ( sc->mfa_res ){ 396 if (bus_release_resource(sc->tws_dev, 397 SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res)) 398 TWS_TRACE(sc, "bus release mem resource", 0, sc->mfa_res_id); 399 } 400 if ( sc->reg_res ){ 401 if (bus_release_resource(sc->tws_dev, 402 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)) 403 TWS_TRACE(sc, "bus release mem resource", 0, sc->reg_res_id); 404 } 405 406 for ( i=0; i< tws_queue_depth; i++) { 407 if (sc->reqs[i].dma_map) 408 bus_dmamap_destroy(sc->data_tag, sc->reqs[i].dma_map); 409 callout_drain(&sc->reqs[i].timeout); 410 } 411 412 callout_drain(&sc->stats_timer); 413 free(sc->reqs, M_TWS); 414 free(sc->sense_bufs, M_TWS); 415 free(sc->scan_ccb, M_TWS); 416 if (sc->ioctl_data_mem) 417 bus_dmamem_free(sc->data_tag, sc->ioctl_data_mem, sc->ioctl_data_map); 418 if (sc->data_tag) 419 bus_dma_tag_destroy(sc->data_tag); 420 free(sc->aen_q.q, M_TWS); 421 free(sc->trace_q.q, M_TWS); 422 mtx_destroy(&sc->q_lock); 423 mtx_destroy(&sc->sim_lock); 424 mtx_destroy(&sc->gen_lock); 425 mtx_destroy(&sc->io_lock); 426 destroy_dev(sc->tws_cdev); 427 sysctl_ctx_free(&sc->tws_clist); 428 return (0); 429 } 430 431 int 432 tws_setup_intr(struct tws_softc *sc, int irqs) 433 { 434 int i, error; 435 436 for(i=0;i<irqs;i++) { 437 if (!(sc->intr_handle[i])) { 438 if ((error = bus_setup_intr(sc->tws_dev, sc->irq_res[i], 439 INTR_TYPE_CAM | INTR_MPSAFE, 440 NULL, 441 tws_intr, sc, &sc->intr_handle[i]))) { 442 tws_log(sc, SETUP_INTR_RES); 443 return(FAILURE); 444 } 445 } 446 } 447 return(SUCCESS); 448 449 } 450 451 int 452 tws_teardown_intr(struct tws_softc *sc) 453 { 454 int i, error; 455 456 for(i=0;i<sc->irqs;i++) { 457 if (sc->intr_handle[i]) { 458 error = bus_teardown_intr(sc->tws_dev, 459 sc->irq_res[i], sc->intr_handle[i]); 460 sc->intr_handle[i] = NULL; 461 } 462 } 463 return(SUCCESS); 464 } 465 466 static int 467 tws_setup_irq(struct tws_softc *sc) 468 { 469 int messages; 470 471 switch(sc->intr_type) { 472 case TWS_INTx : 473 sc->irqs = 1; 474 sc->irq_res_id[0] = 0; 475 sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ, 476 &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE); 477 if ( ! sc->irq_res[0] ) 478 return(FAILURE); 479 if ( tws_setup_intr(sc, sc->irqs) == FAILURE ) 480 return(FAILURE); 481 device_printf(sc->tws_dev, "Using legacy INTx\n"); 482 break; 483 case TWS_MSI : 484 sc->irqs = 1; 485 sc->irq_res_id[0] = 1; 486 messages = 1; 487 if (pci_alloc_msi(sc->tws_dev, &messages) != 0 ) { 488 TWS_TRACE(sc, "pci alloc msi fail", 0, messages); 489 return(FAILURE); 490 } 491 sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ, 492 &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE); 493 494 if ( !sc->irq_res[0] ) 495 return(FAILURE); 496 if ( tws_setup_intr(sc, sc->irqs) == FAILURE ) 497 return(FAILURE); 498 device_printf(sc->tws_dev, "Using MSI\n"); 499 break; 500 } 501 502 return(SUCCESS); 503 } 504 505 static int 506 tws_init(struct tws_softc *sc) 507 { 508 509 u_int32_t max_sg_elements; 510 u_int32_t dma_mem_size; 511 int error; 512 u_int32_t reg; 513 514 sc->seq_id = 0; 515 if ( tws_queue_depth > TWS_MAX_REQS ) 516 tws_queue_depth = TWS_MAX_REQS; 517 if (tws_queue_depth < TWS_RESERVED_REQS+1) 518 tws_queue_depth = TWS_RESERVED_REQS+1; 519 sc->is64bit = (sizeof(bus_addr_t) == 8) ? true : false; 520 max_sg_elements = (sc->is64bit && !tws_use_32bit_sgls) ? 521 TWS_MAX_64BIT_SG_ELEMENTS : 522 TWS_MAX_32BIT_SG_ELEMENTS; 523 dma_mem_size = (sizeof(struct tws_command_packet) * tws_queue_depth) + 524 (TWS_SECTOR_SIZE) ; 525 if ( bus_dma_tag_create(bus_get_dma_tag(sc->tws_dev), /* PCI parent */ 526 TWS_ALIGNMENT, /* alignment */ 527 0, /* boundary */ 528 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 529 BUS_SPACE_MAXADDR, /* highaddr */ 530 NULL, NULL, /* filter, filterarg */ 531 BUS_SPACE_MAXSIZE, /* maxsize */ 532 max_sg_elements, /* numsegs */ 533 BUS_SPACE_MAXSIZE, /* maxsegsize */ 534 0, /* flags */ 535 NULL, NULL, /* lockfunc, lockfuncarg */ 536 &sc->parent_tag /* tag */ 537 )) { 538 TWS_TRACE_DEBUG(sc, "DMA parent tag Create fail", max_sg_elements, 539 sc->is64bit); 540 return(ENOMEM); 541 } 542 /* In bound message frame requires 16byte alignment. 543 * Outbound MF's can live with 4byte alignment - for now just 544 * use 16 for both. 545 */ 546 if ( bus_dma_tag_create(sc->parent_tag, /* parent */ 547 TWS_IN_MF_ALIGNMENT, /* alignment */ 548 0, /* boundary */ 549 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 550 BUS_SPACE_MAXADDR, /* highaddr */ 551 NULL, NULL, /* filter, filterarg */ 552 dma_mem_size, /* maxsize */ 553 1, /* numsegs */ 554 BUS_SPACE_MAXSIZE, /* maxsegsize */ 555 0, /* flags */ 556 NULL, NULL, /* lockfunc, lockfuncarg */ 557 &sc->cmd_tag /* tag */ 558 )) { 559 TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit); 560 return(ENOMEM); 561 } 562 563 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem, 564 BUS_DMA_NOWAIT, &sc->cmd_map)) { 565 TWS_TRACE_DEBUG(sc, "DMA mem alloc fail", max_sg_elements, sc->is64bit); 566 return(ENOMEM); 567 } 568 569 /* if bus_dmamem_alloc succeeds then bus_dmamap_load will succeed */ 570 sc->dma_mem_phys=0; 571 error = bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem, 572 dma_mem_size, tws_dmamap_cmds_load_cbfn, 573 &sc->dma_mem_phys, 0); 574 575 /* 576 * Create a dma tag for data buffers; size will be the maximum 577 * possible I/O size (128kB). 578 */ 579 if (bus_dma_tag_create(sc->parent_tag, /* parent */ 580 TWS_ALIGNMENT, /* alignment */ 581 0, /* boundary */ 582 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 583 BUS_SPACE_MAXADDR, /* highaddr */ 584 NULL, NULL, /* filter, filterarg */ 585 TWS_MAX_IO_SIZE, /* maxsize */ 586 max_sg_elements, /* nsegments */ 587 TWS_MAX_IO_SIZE, /* maxsegsize */ 588 BUS_DMA_ALLOCNOW, /* flags */ 589 busdma_lock_mutex, /* lockfunc */ 590 &sc->io_lock, /* lockfuncarg */ 591 &sc->data_tag /* tag */)) { 592 TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit); 593 return(ENOMEM); 594 } 595 596 sc->reqs = malloc(sizeof(struct tws_request) * tws_queue_depth, M_TWS, 597 M_WAITOK | M_ZERO); 598 sc->sense_bufs = malloc(sizeof(struct tws_sense) * tws_queue_depth, M_TWS, 599 M_WAITOK | M_ZERO); 600 sc->scan_ccb = malloc(sizeof(union ccb), M_TWS, M_WAITOK | M_ZERO); 601 if (bus_dmamem_alloc(sc->data_tag, (void **)&sc->ioctl_data_mem, 602 (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &sc->ioctl_data_map)) { 603 device_printf(sc->tws_dev, "Cannot allocate ioctl data mem\n"); 604 return(ENOMEM); 605 } 606 607 if ( !tws_ctlr_ready(sc) ) 608 if( !tws_ctlr_reset(sc) ) 609 return(FAILURE); 610 611 bzero(&sc->stats, sizeof(struct tws_stats)); 612 tws_init_qs(sc); 613 tws_turn_off_interrupts(sc); 614 615 /* 616 * enable pull mode by setting bit1 . 617 * setting bit0 to 1 will enable interrupt coalesing 618 * will revisit. 619 */ 620 621 #ifdef TWS_PULL_MODE_ENABLE 622 623 reg = tws_read_reg(sc, TWS_I2O0_CTL, 4); 624 TWS_TRACE_DEBUG(sc, "i20 ctl", reg, TWS_I2O0_CTL); 625 tws_write_reg(sc, TWS_I2O0_CTL, reg | TWS_BIT1, 4); 626 627 #endif 628 629 TWS_TRACE_DEBUG(sc, "dma_mem_phys", sc->dma_mem_phys, TWS_I2O0_CTL); 630 if ( tws_init_reqs(sc, dma_mem_size) == FAILURE ) 631 return(FAILURE); 632 if ( tws_init_aen_q(sc) == FAILURE ) 633 return(FAILURE); 634 635 return(SUCCESS); 636 637 } 638 639 static int 640 tws_init_aen_q(struct tws_softc *sc) 641 { 642 sc->aen_q.head=0; 643 sc->aen_q.tail=0; 644 sc->aen_q.depth=256; 645 sc->aen_q.overflow=0; 646 sc->aen_q.q = malloc(sizeof(struct tws_event_packet)*sc->aen_q.depth, 647 M_TWS, M_WAITOK | M_ZERO); 648 return(SUCCESS); 649 } 650 651 static int 652 tws_init_trace_q(struct tws_softc *sc) 653 { 654 sc->trace_q.head=0; 655 sc->trace_q.tail=0; 656 sc->trace_q.depth=256; 657 sc->trace_q.overflow=0; 658 sc->trace_q.q = malloc(sizeof(struct tws_trace_rec)*sc->trace_q.depth, 659 M_TWS, M_WAITOK | M_ZERO); 660 return(SUCCESS); 661 } 662 663 static int 664 tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size) 665 { 666 667 struct tws_command_packet *cmd_buf; 668 cmd_buf = (struct tws_command_packet *)sc->dma_mem; 669 int i; 670 671 bzero(cmd_buf, dma_mem_size); 672 TWS_TRACE_DEBUG(sc, "phy cmd", sc->dma_mem_phys, 0); 673 mtx_lock(&sc->q_lock); 674 for ( i=0; i< tws_queue_depth; i++) 675 { 676 if (bus_dmamap_create(sc->data_tag, 0, &sc->reqs[i].dma_map)) { 677 /* log a ENOMEM failure msg here */ 678 mtx_unlock(&sc->q_lock); 679 return(FAILURE); 680 } 681 sc->reqs[i].cmd_pkt = &cmd_buf[i]; 682 683 sc->sense_bufs[i].hdr = &cmd_buf[i].hdr ; 684 sc->sense_bufs[i].hdr_pkt_phy = sc->dma_mem_phys + 685 (i * sizeof(struct tws_command_packet)); 686 687 sc->reqs[i].cmd_pkt_phy = sc->dma_mem_phys + 688 sizeof(struct tws_command_header) + 689 (i * sizeof(struct tws_command_packet)); 690 sc->reqs[i].request_id = i; 691 sc->reqs[i].sc = sc; 692 693 sc->reqs[i].cmd_pkt->hdr.header_desc.size_header = 128; 694 695 callout_init(&sc->reqs[i].timeout, 1); 696 sc->reqs[i].state = TWS_REQ_STATE_FREE; 697 if ( i >= TWS_RESERVED_REQS ) 698 tws_q_insert_tail(sc, &sc->reqs[i], TWS_FREE_Q); 699 } 700 mtx_unlock(&sc->q_lock); 701 return(SUCCESS); 702 } 703 704 static void 705 tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs, 706 int nseg, int error) 707 { 708 709 /* printf("command load done \n"); */ 710 711 *((bus_addr_t *)arg) = segs[0].ds_addr; 712 } 713 714 void 715 tws_send_event(struct tws_softc *sc, u_int8_t event) 716 { 717 mtx_assert(&sc->gen_lock, MA_OWNED); 718 TWS_TRACE_DEBUG(sc, "received event ", 0, event); 719 switch (event) { 720 case TWS_INIT_START: 721 sc->tws_state = TWS_INIT; 722 break; 723 724 case TWS_INIT_COMPLETE: 725 if (sc->tws_state != TWS_INIT) { 726 device_printf(sc->tws_dev, "invalid state transition %d => TWS_ONLINE\n", sc->tws_state); 727 } else { 728 sc->tws_state = TWS_ONLINE; 729 } 730 break; 731 732 case TWS_RESET_START: 733 /* We can transition to reset state from any state except reset*/ 734 if (sc->tws_state != TWS_RESET) { 735 sc->tws_prev_state = sc->tws_state; 736 sc->tws_state = TWS_RESET; 737 } 738 break; 739 740 case TWS_RESET_COMPLETE: 741 if (sc->tws_state != TWS_RESET) { 742 device_printf(sc->tws_dev, "invalid state transition %d => %d (previous state)\n", sc->tws_state, sc->tws_prev_state); 743 } else { 744 sc->tws_state = sc->tws_prev_state; 745 } 746 break; 747 748 case TWS_SCAN_FAILURE: 749 if (sc->tws_state != TWS_ONLINE) { 750 device_printf(sc->tws_dev, "invalid state transition %d => TWS_OFFLINE\n", sc->tws_state); 751 } else { 752 sc->tws_state = TWS_OFFLINE; 753 } 754 break; 755 756 case TWS_UNINIT_START: 757 if ((sc->tws_state != TWS_ONLINE) && (sc->tws_state != TWS_OFFLINE)) { 758 device_printf(sc->tws_dev, "invalid state transition %d => TWS_UNINIT\n", sc->tws_state); 759 } else { 760 sc->tws_state = TWS_UNINIT; 761 } 762 break; 763 } 764 765 } 766 767 uint8_t 768 tws_get_state(struct tws_softc *sc) 769 { 770 771 return((u_int8_t)sc->tws_state); 772 773 } 774 775 /* Called during system shutdown after sync. */ 776 777 static int 778 tws_shutdown(device_t dev) 779 { 780 781 struct tws_softc *sc = device_get_softc(dev); 782 783 TWS_TRACE_DEBUG(sc, "entry", 0, 0); 784 785 tws_turn_off_interrupts(sc); 786 tws_init_connect(sc, 1); 787 788 return (0); 789 } 790 791 /* 792 * Device suspend routine. 793 */ 794 static int 795 tws_suspend(device_t dev) 796 { 797 struct tws_softc *sc = device_get_softc(dev); 798 799 if ( sc ) 800 TWS_TRACE_DEBUG(sc, "entry", 0, 0); 801 return (0); 802 } 803 804 /* 805 * Device resume routine. 806 */ 807 static int 808 tws_resume(device_t dev) 809 { 810 811 struct tws_softc *sc = device_get_softc(dev); 812 813 if ( sc ) 814 TWS_TRACE_DEBUG(sc, "entry", 0, 0); 815 return (0); 816 } 817 818 struct tws_request * 819 tws_get_request(struct tws_softc *sc, u_int16_t type) 820 { 821 struct mtx *my_mutex = ((type == TWS_REQ_TYPE_SCSI_IO) ? &sc->q_lock : &sc->gen_lock); 822 struct tws_request *r = NULL; 823 824 mtx_lock(my_mutex); 825 826 if (type == TWS_REQ_TYPE_SCSI_IO) { 827 r = tws_q_remove_head(sc, TWS_FREE_Q); 828 } else { 829 if ( sc->reqs[type].state == TWS_REQ_STATE_FREE ) { 830 r = &sc->reqs[type]; 831 } 832 } 833 834 if ( r ) { 835 bzero(&r->cmd_pkt->cmd, sizeof(struct tws_command_apache)); 836 r->data = NULL; 837 r->length = 0; 838 r->type = type; 839 r->flags = TWS_DIR_UNKNOWN; 840 r->error_code = TWS_REQ_RET_INVALID; 841 r->cb = NULL; 842 r->ccb_ptr = NULL; 843 callout_stop(&r->timeout); 844 r->next = r->prev = NULL; 845 846 r->state = ((type == TWS_REQ_TYPE_SCSI_IO) ? TWS_REQ_STATE_TRAN : TWS_REQ_STATE_BUSY); 847 } 848 849 mtx_unlock(my_mutex); 850 851 return(r); 852 } 853 854 void 855 tws_release_request(struct tws_request *req) 856 { 857 858 struct tws_softc *sc = req->sc; 859 860 TWS_TRACE_DEBUG(sc, "entry", sc, 0); 861 mtx_lock(&sc->q_lock); 862 tws_q_insert_tail(sc, req, TWS_FREE_Q); 863 mtx_unlock(&sc->q_lock); 864 } 865 866 static device_method_t tws_methods[] = { 867 /* Device interface */ 868 DEVMETHOD(device_probe, tws_probe), 869 DEVMETHOD(device_attach, tws_attach), 870 DEVMETHOD(device_detach, tws_detach), 871 DEVMETHOD(device_shutdown, tws_shutdown), 872 DEVMETHOD(device_suspend, tws_suspend), 873 DEVMETHOD(device_resume, tws_resume), 874 875 DEVMETHOD_END 876 }; 877 878 static driver_t tws_driver = { 879 "tws", 880 tws_methods, 881 sizeof(struct tws_softc) 882 }; 883 884 static devclass_t tws_devclass; 885 886 /* DEFINE_CLASS_0(tws, tws_driver, tws_methods, sizeof(struct tws_softc)); */ 887 DRIVER_MODULE(tws, pci, tws_driver, tws_devclass, 0, 0); 888 MODULE_DEPEND(tws, cam, 1, 1, 1); 889 MODULE_DEPEND(tws, pci, 1, 1, 1); 890 891 TUNABLE_INT("hw.tws.queue_depth", &tws_queue_depth); 892 TUNABLE_INT("hw.tws.enable_msi", &tws_enable_msi); 893