1 /*- 2 * Copyright (c) 2000 Michael Smith 3 * Copyright (c) 2001 Scott Long 4 * Copyright (c) 2000 BSDi 5 * Copyright (c) 2001 Adaptec, Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for the Adaptec 'FSA' family of PCI/SCSI RAID adapters. 35 */ 36 #define AAC_DRIVER_VERSION 0x02000000 37 #define AAC_DRIVERNAME "aac" 38 39 #include "opt_aac.h" 40 41 /* #include <stddef.h> */ 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/malloc.h> 45 #include <sys/kernel.h> 46 #include <sys/kthread.h> 47 #include <sys/sysctl.h> 48 #include <sys/poll.h> 49 #include <sys/ioccom.h> 50 51 #include <sys/bus.h> 52 #include <sys/conf.h> 53 #include <sys/signalvar.h> 54 #include <sys/time.h> 55 #include <sys/eventhandler.h> 56 #include <sys/rman.h> 57 58 #include <machine/bus.h> 59 #include <sys/bus_dma.h> 60 #include <machine/resource.h> 61 62 #include <dev/pci/pcireg.h> 63 #include <dev/pci/pcivar.h> 64 65 #include <dev/aac/aacreg.h> 66 #include <sys/aac_ioctl.h> 67 #include <dev/aac/aacvar.h> 68 #include <dev/aac/aac_tables.h> 69 70 static void aac_startup(void *arg); 71 static void aac_add_container(struct aac_softc *sc, 72 struct aac_mntinforesp *mir, int f); 73 static void aac_get_bus_info(struct aac_softc *sc); 74 75 /* Command Processing */ 76 static void aac_timeout(struct aac_softc *sc); 77 static void aac_complete(void *context, int pending); 78 static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp); 79 static void aac_bio_complete(struct aac_command *cm); 80 static int aac_wait_command(struct aac_command *cm); 81 static void aac_command_thread(struct aac_softc *sc); 82 83 /* Command Buffer Management */ 84 static void aac_map_command_sg(void *arg, bus_dma_segment_t *segs, 85 int nseg, int error); 86 static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, 87 int nseg, int error); 88 static int aac_alloc_commands(struct aac_softc *sc); 89 static void aac_free_commands(struct aac_softc *sc); 90 static void aac_unmap_command(struct aac_command *cm); 91 92 /* Hardware Interface */ 93 static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, 94 int error); 95 static int aac_check_firmware(struct aac_softc *sc); 96 static int aac_init(struct aac_softc *sc); 97 static int aac_sync_command(struct aac_softc *sc, u_int32_t command, 98 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, 99 u_int32_t arg3, u_int32_t *sp); 100 static int aac_enqueue_fib(struct aac_softc *sc, int queue, 101 struct aac_command *cm); 102 static int aac_dequeue_fib(struct aac_softc *sc, int queue, 103 u_int32_t *fib_size, struct aac_fib **fib_addr); 104 static int aac_enqueue_response(struct aac_softc *sc, int queue, 105 struct aac_fib *fib); 106 107 /* Falcon/PPC interface */ 108 static int aac_fa_get_fwstatus(struct aac_softc *sc); 109 static void aac_fa_qnotify(struct aac_softc *sc, int qbit); 110 static int aac_fa_get_istatus(struct aac_softc *sc); 111 static void aac_fa_clear_istatus(struct aac_softc *sc, int mask); 112 static void aac_fa_set_mailbox(struct aac_softc *sc, u_int32_t command, 113 u_int32_t arg0, u_int32_t arg1, 114 u_int32_t arg2, u_int32_t arg3); 115 static int aac_fa_get_mailbox(struct aac_softc *sc, int mb); 116 static void aac_fa_set_interrupts(struct aac_softc *sc, int enable); 117 118 struct aac_interface aac_fa_interface = { 119 aac_fa_get_fwstatus, 120 aac_fa_qnotify, 121 aac_fa_get_istatus, 122 aac_fa_clear_istatus, 123 aac_fa_set_mailbox, 124 aac_fa_get_mailbox, 125 aac_fa_set_interrupts, 126 NULL, NULL, NULL 127 }; 128 129 /* StrongARM interface */ 130 static int aac_sa_get_fwstatus(struct aac_softc *sc); 131 static void aac_sa_qnotify(struct aac_softc *sc, int qbit); 132 static int aac_sa_get_istatus(struct aac_softc *sc); 133 static void aac_sa_clear_istatus(struct aac_softc *sc, int mask); 134 static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, 135 u_int32_t arg0, u_int32_t arg1, 136 u_int32_t arg2, u_int32_t arg3); 137 static int aac_sa_get_mailbox(struct aac_softc *sc, int mb); 138 static void aac_sa_set_interrupts(struct aac_softc *sc, int enable); 139 140 struct aac_interface aac_sa_interface = { 141 aac_sa_get_fwstatus, 142 aac_sa_qnotify, 143 aac_sa_get_istatus, 144 aac_sa_clear_istatus, 145 aac_sa_set_mailbox, 146 aac_sa_get_mailbox, 147 aac_sa_set_interrupts, 148 NULL, NULL, NULL 149 }; 150 151 /* i960Rx interface */ 152 static int aac_rx_get_fwstatus(struct aac_softc *sc); 153 static void aac_rx_qnotify(struct aac_softc *sc, int qbit); 154 static int aac_rx_get_istatus(struct aac_softc *sc); 155 static void aac_rx_clear_istatus(struct aac_softc *sc, int mask); 156 static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, 157 u_int32_t arg0, u_int32_t arg1, 158 u_int32_t arg2, u_int32_t arg3); 159 static int aac_rx_get_mailbox(struct aac_softc *sc, int mb); 160 static void aac_rx_set_interrupts(struct aac_softc *sc, int enable); 161 static int aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm); 162 static int aac_rx_get_outb_queue(struct aac_softc *sc); 163 static void aac_rx_set_outb_queue(struct aac_softc *sc, int index); 164 165 struct aac_interface aac_rx_interface = { 166 aac_rx_get_fwstatus, 167 aac_rx_qnotify, 168 aac_rx_get_istatus, 169 aac_rx_clear_istatus, 170 aac_rx_set_mailbox, 171 aac_rx_get_mailbox, 172 aac_rx_set_interrupts, 173 aac_rx_send_command, 174 aac_rx_get_outb_queue, 175 aac_rx_set_outb_queue 176 }; 177 178 /* Rocket/MIPS interface */ 179 static int aac_rkt_get_fwstatus(struct aac_softc *sc); 180 static void aac_rkt_qnotify(struct aac_softc *sc, int qbit); 181 static int aac_rkt_get_istatus(struct aac_softc *sc); 182 static void aac_rkt_clear_istatus(struct aac_softc *sc, int mask); 183 static void aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, 184 u_int32_t arg0, u_int32_t arg1, 185 u_int32_t arg2, u_int32_t arg3); 186 static int aac_rkt_get_mailbox(struct aac_softc *sc, int mb); 187 static void aac_rkt_set_interrupts(struct aac_softc *sc, int enable); 188 static int aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm); 189 static int aac_rkt_get_outb_queue(struct aac_softc *sc); 190 static void aac_rkt_set_outb_queue(struct aac_softc *sc, int index); 191 192 struct aac_interface aac_rkt_interface = { 193 aac_rkt_get_fwstatus, 194 aac_rkt_qnotify, 195 aac_rkt_get_istatus, 196 aac_rkt_clear_istatus, 197 aac_rkt_set_mailbox, 198 aac_rkt_get_mailbox, 199 aac_rkt_set_interrupts, 200 aac_rkt_send_command, 201 aac_rkt_get_outb_queue, 202 aac_rkt_set_outb_queue 203 }; 204 205 /* Debugging and Diagnostics */ 206 static void aac_describe_controller(struct aac_softc *sc); 207 static char *aac_describe_code(struct aac_code_lookup *table, 208 u_int32_t code); 209 210 /* Management Interface */ 211 static d_open_t aac_open; 212 static d_close_t aac_close; 213 static d_ioctl_t aac_ioctl; 214 static d_poll_t aac_poll; 215 static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib); 216 static void aac_handle_aif(struct aac_softc *sc, 217 struct aac_fib *fib); 218 static int aac_rev_check(struct aac_softc *sc, caddr_t udata); 219 static int aac_open_aif(struct aac_softc *sc, caddr_t arg); 220 static int aac_close_aif(struct aac_softc *sc, caddr_t arg); 221 static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg); 222 static int aac_return_aif(struct aac_softc *sc, 223 struct aac_fib_context *ctx, caddr_t uptr); 224 static int aac_query_disk(struct aac_softc *sc, caddr_t uptr); 225 static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr); 226 static void aac_ioctl_event(struct aac_softc *sc, 227 struct aac_event *event, void *arg); 228 229 static struct cdevsw aac_cdevsw = { 230 .d_version = D_VERSION, 231 .d_flags = D_NEEDGIANT, 232 .d_open = aac_open, 233 .d_close = aac_close, 234 .d_ioctl = aac_ioctl, 235 .d_poll = aac_poll, 236 .d_name = "aac", 237 }; 238 239 MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for the AAC driver"); 240 241 /* sysctl node */ 242 SYSCTL_NODE(_hw, OID_AUTO, aac, CTLFLAG_RD, 0, "AAC driver parameters"); 243 244 /* 245 * Device Interface 246 */ 247 248 /* 249 * Initialise the controller and softc 250 */ 251 int 252 aac_attach(struct aac_softc *sc) 253 { 254 int error, unit; 255 256 debug_called(1); 257 258 /* 259 * Initialise per-controller queues. 260 */ 261 aac_initq_free(sc); 262 aac_initq_ready(sc); 263 aac_initq_busy(sc); 264 aac_initq_bio(sc); 265 266 /* 267 * Initialise command-completion task. 268 */ 269 TASK_INIT(&sc->aac_task_complete, 0, aac_complete, sc); 270 271 /* mark controller as suspended until we get ourselves organised */ 272 sc->aac_state |= AAC_STATE_SUSPEND; 273 274 /* 275 * Check that the firmware on the card is supported. 276 */ 277 if ((error = aac_check_firmware(sc)) != 0) 278 return(error); 279 280 /* 281 * Initialize locks 282 */ 283 mtx_init(&sc->aac_aifq_lock, "AAC AIF lock", NULL, MTX_DEF); 284 mtx_init(&sc->aac_io_lock, "AAC I/O lock", NULL, MTX_DEF); 285 mtx_init(&sc->aac_container_lock, "AAC container lock", NULL, MTX_DEF); 286 TAILQ_INIT(&sc->aac_container_tqh); 287 TAILQ_INIT(&sc->aac_ev_cmfree); 288 289 /* 290 * Initialise the adapter. 291 */ 292 if ((error = aac_init(sc)) != 0) 293 return(error); 294 295 /* 296 * Allocate and connect our interrupt. 297 */ 298 sc->aac_irq_rid = 0; 299 if ((sc->aac_irq = bus_alloc_resource_any(sc->aac_dev, SYS_RES_IRQ, 300 &sc->aac_irq_rid, 301 RF_SHAREABLE | 302 RF_ACTIVE)) == NULL) { 303 device_printf(sc->aac_dev, "can't allocate interrupt\n"); 304 return (EINVAL); 305 } 306 if (sc->flags & AAC_FLAGS_NEW_COMM) { 307 if (bus_setup_intr(sc->aac_dev, sc->aac_irq, 308 INTR_MPSAFE|INTR_TYPE_BIO, NULL, 309 aac_new_intr, sc, &sc->aac_intr)) { 310 device_printf(sc->aac_dev, "can't set up interrupt\n"); 311 return (EINVAL); 312 } 313 } else { 314 if (bus_setup_intr(sc->aac_dev, sc->aac_irq, 315 INTR_TYPE_BIO, aac_fast_intr, NULL, 316 sc, &sc->aac_intr)) { 317 device_printf(sc->aac_dev, 318 "can't set up FAST interrupt\n"); 319 if (bus_setup_intr(sc->aac_dev, sc->aac_irq, 320 INTR_MPSAFE|INTR_TYPE_BIO, 321 NULL, (driver_intr_t *)aac_fast_intr, 322 sc, &sc->aac_intr)) { 323 device_printf(sc->aac_dev, 324 "can't set up MPSAFE interrupt\n"); 325 return (EINVAL); 326 } 327 } 328 } 329 330 /* 331 * Print a little information about the controller. 332 */ 333 aac_describe_controller(sc); 334 335 /* 336 * Register to probe our containers later. 337 */ 338 sc->aac_ich.ich_func = aac_startup; 339 sc->aac_ich.ich_arg = sc; 340 if (config_intrhook_establish(&sc->aac_ich) != 0) { 341 device_printf(sc->aac_dev, 342 "can't establish configuration hook\n"); 343 return(ENXIO); 344 } 345 346 /* 347 * Make the control device. 348 */ 349 unit = device_get_unit(sc->aac_dev); 350 sc->aac_dev_t = make_dev(&aac_cdevsw, unit, UID_ROOT, GID_OPERATOR, 351 0640, "aac%d", unit); 352 (void)make_dev_alias(sc->aac_dev_t, "afa%d", unit); 353 (void)make_dev_alias(sc->aac_dev_t, "hpn%d", unit); 354 sc->aac_dev_t->si_drv1 = sc; 355 356 /* Create the AIF thread */ 357 if (kproc_create((void(*)(void *))aac_command_thread, sc, 358 &sc->aifthread, 0, 0, "aac%daif", unit)) 359 panic("Could not create AIF thread\n"); 360 361 /* Register the shutdown method to only be called post-dump */ 362 if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aac_shutdown, 363 sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL) 364 device_printf(sc->aac_dev, 365 "shutdown event registration failed\n"); 366 367 /* Register with CAM for the non-DASD devices */ 368 if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) { 369 TAILQ_INIT(&sc->aac_sim_tqh); 370 aac_get_bus_info(sc); 371 } 372 373 return(0); 374 } 375 376 void 377 aac_add_event(struct aac_softc *sc, struct aac_event *event) 378 { 379 380 switch (event->ev_type & AAC_EVENT_MASK) { 381 case AAC_EVENT_CMFREE: 382 TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links); 383 break; 384 default: 385 device_printf(sc->aac_dev, "aac_add event: unknown event %d\n", 386 event->ev_type); 387 break; 388 } 389 390 return; 391 } 392 393 /* 394 * Probe for containers, create disks. 395 */ 396 static void 397 aac_startup(void *arg) 398 { 399 struct aac_softc *sc; 400 struct aac_fib *fib; 401 struct aac_mntinfo *mi; 402 struct aac_mntinforesp *mir = NULL; 403 int count = 0, i = 0; 404 405 debug_called(1); 406 407 sc = (struct aac_softc *)arg; 408 409 /* disconnect ourselves from the intrhook chain */ 410 config_intrhook_disestablish(&sc->aac_ich); 411 412 mtx_lock(&sc->aac_io_lock); 413 aac_alloc_sync_fib(sc, &fib); 414 mi = (struct aac_mntinfo *)&fib->data[0]; 415 416 /* loop over possible containers */ 417 do { 418 /* request information on this container */ 419 bzero(mi, sizeof(struct aac_mntinfo)); 420 mi->Command = VM_NameServe; 421 mi->MntType = FT_FILESYS; 422 mi->MntCount = i; 423 if (aac_sync_fib(sc, ContainerCommand, 0, fib, 424 sizeof(struct aac_mntinfo))) { 425 printf("error probing container %d", i); 426 continue; 427 } 428 429 mir = (struct aac_mntinforesp *)&fib->data[0]; 430 /* XXX Need to check if count changed */ 431 count = mir->MntRespCount; 432 aac_add_container(sc, mir, 0); 433 i++; 434 } while ((i < count) && (i < AAC_MAX_CONTAINERS)); 435 436 aac_release_sync_fib(sc); 437 mtx_unlock(&sc->aac_io_lock); 438 439 /* poke the bus to actually attach the child devices */ 440 if (bus_generic_attach(sc->aac_dev)) 441 device_printf(sc->aac_dev, "bus_generic_attach failed\n"); 442 443 /* mark the controller up */ 444 sc->aac_state &= ~AAC_STATE_SUSPEND; 445 446 /* enable interrupts now */ 447 AAC_UNMASK_INTERRUPTS(sc); 448 } 449 450 /* 451 * Create a device to respresent a new container 452 */ 453 static void 454 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f) 455 { 456 struct aac_container *co; 457 device_t child; 458 459 /* 460 * Check container volume type for validity. Note that many of 461 * the possible types may never show up. 462 */ 463 if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) { 464 co = (struct aac_container *)malloc(sizeof *co, M_AACBUF, 465 M_NOWAIT | M_ZERO); 466 if (co == NULL) 467 panic("Out of memory?!\n"); 468 debug(1, "id %x name '%.16s' size %u type %d", 469 mir->MntTable[0].ObjectId, 470 mir->MntTable[0].FileSystemName, 471 mir->MntTable[0].Capacity, mir->MntTable[0].VolType); 472 473 if ((child = device_add_child(sc->aac_dev, "aacd", -1)) == NULL) 474 device_printf(sc->aac_dev, "device_add_child failed\n"); 475 else 476 device_set_ivars(child, co); 477 device_set_desc(child, aac_describe_code(aac_container_types, 478 mir->MntTable[0].VolType)); 479 co->co_disk = child; 480 co->co_found = f; 481 bcopy(&mir->MntTable[0], &co->co_mntobj, 482 sizeof(struct aac_mntobj)); 483 mtx_lock(&sc->aac_container_lock); 484 TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link); 485 mtx_unlock(&sc->aac_container_lock); 486 } 487 } 488 489 /* 490 * Free all of the resources associated with (sc) 491 * 492 * Should not be called if the controller is active. 493 */ 494 void 495 aac_free(struct aac_softc *sc) 496 { 497 498 debug_called(1); 499 500 /* remove the control device */ 501 if (sc->aac_dev_t != NULL) 502 destroy_dev(sc->aac_dev_t); 503 504 /* throw away any FIB buffers, discard the FIB DMA tag */ 505 aac_free_commands(sc); 506 if (sc->aac_fib_dmat) 507 bus_dma_tag_destroy(sc->aac_fib_dmat); 508 509 free(sc->aac_commands, M_AACBUF); 510 511 /* destroy the common area */ 512 if (sc->aac_common) { 513 bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap); 514 bus_dmamem_free(sc->aac_common_dmat, sc->aac_common, 515 sc->aac_common_dmamap); 516 } 517 if (sc->aac_common_dmat) 518 bus_dma_tag_destroy(sc->aac_common_dmat); 519 520 /* disconnect the interrupt handler */ 521 if (sc->aac_intr) 522 bus_teardown_intr(sc->aac_dev, sc->aac_irq, sc->aac_intr); 523 if (sc->aac_irq != NULL) 524 bus_release_resource(sc->aac_dev, SYS_RES_IRQ, sc->aac_irq_rid, 525 sc->aac_irq); 526 527 /* destroy data-transfer DMA tag */ 528 if (sc->aac_buffer_dmat) 529 bus_dma_tag_destroy(sc->aac_buffer_dmat); 530 531 /* destroy the parent DMA tag */ 532 if (sc->aac_parent_dmat) 533 bus_dma_tag_destroy(sc->aac_parent_dmat); 534 535 /* release the register window mapping */ 536 if (sc->aac_regs_resource != NULL) 537 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, 538 sc->aac_regs_rid, sc->aac_regs_resource); 539 } 540 541 /* 542 * Disconnect from the controller completely, in preparation for unload. 543 */ 544 int 545 aac_detach(device_t dev) 546 { 547 struct aac_softc *sc; 548 struct aac_container *co; 549 struct aac_sim *sim; 550 int error; 551 552 debug_called(1); 553 554 sc = device_get_softc(dev); 555 556 if (sc->aac_state & AAC_STATE_OPEN) 557 return(EBUSY); 558 559 /* Remove the child containers */ 560 while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) { 561 error = device_delete_child(dev, co->co_disk); 562 if (error) 563 return (error); 564 TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link); 565 free(co, M_AACBUF); 566 } 567 568 /* Remove the CAM SIMs */ 569 while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) { 570 TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link); 571 error = device_delete_child(dev, sim->sim_dev); 572 if (error) 573 return (error); 574 free(sim, M_AACBUF); 575 } 576 577 if (sc->aifflags & AAC_AIFFLAGS_RUNNING) { 578 sc->aifflags |= AAC_AIFFLAGS_EXIT; 579 wakeup(sc->aifthread); 580 tsleep(sc->aac_dev, PUSER | PCATCH, "aacdch", 30 * hz); 581 } 582 583 if (sc->aifflags & AAC_AIFFLAGS_RUNNING) 584 panic("Cannot shutdown AIF thread\n"); 585 586 if ((error = aac_shutdown(dev))) 587 return(error); 588 589 EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh); 590 591 aac_free(sc); 592 593 mtx_destroy(&sc->aac_aifq_lock); 594 mtx_destroy(&sc->aac_io_lock); 595 mtx_destroy(&sc->aac_container_lock); 596 597 return(0); 598 } 599 600 /* 601 * Bring the controller down to a dormant state and detach all child devices. 602 * 603 * This function is called before detach or system shutdown. 604 * 605 * Note that we can assume that the bioq on the controller is empty, as we won't 606 * allow shutdown if any device is open. 607 */ 608 int 609 aac_shutdown(device_t dev) 610 { 611 struct aac_softc *sc; 612 struct aac_fib *fib; 613 struct aac_close_command *cc; 614 615 debug_called(1); 616 617 sc = device_get_softc(dev); 618 619 sc->aac_state |= AAC_STATE_SUSPEND; 620 621 /* 622 * Send a Container shutdown followed by a HostShutdown FIB to the 623 * controller to convince it that we don't want to talk to it anymore. 624 * We've been closed and all I/O completed already 625 */ 626 device_printf(sc->aac_dev, "shutting down controller..."); 627 628 mtx_lock(&sc->aac_io_lock); 629 aac_alloc_sync_fib(sc, &fib); 630 cc = (struct aac_close_command *)&fib->data[0]; 631 632 bzero(cc, sizeof(struct aac_close_command)); 633 cc->Command = VM_CloseAll; 634 cc->ContainerId = 0xffffffff; 635 if (aac_sync_fib(sc, ContainerCommand, 0, fib, 636 sizeof(struct aac_close_command))) 637 printf("FAILED.\n"); 638 else 639 printf("done\n"); 640 #if 0 641 else { 642 fib->data[0] = 0; 643 /* 644 * XXX Issuing this command to the controller makes it shut down 645 * but also keeps it from coming back up without a reset of the 646 * PCI bus. This is not desirable if you are just unloading the 647 * driver module with the intent to reload it later. 648 */ 649 if (aac_sync_fib(sc, FsaHostShutdown, AAC_FIBSTATE_SHUTDOWN, 650 fib, 1)) { 651 printf("FAILED.\n"); 652 } else { 653 printf("done.\n"); 654 } 655 } 656 #endif 657 658 AAC_MASK_INTERRUPTS(sc); 659 aac_release_sync_fib(sc); 660 mtx_unlock(&sc->aac_io_lock); 661 662 return(0); 663 } 664 665 /* 666 * Bring the controller to a quiescent state, ready for system suspend. 667 */ 668 int 669 aac_suspend(device_t dev) 670 { 671 struct aac_softc *sc; 672 673 debug_called(1); 674 675 sc = device_get_softc(dev); 676 677 sc->aac_state |= AAC_STATE_SUSPEND; 678 679 AAC_MASK_INTERRUPTS(sc); 680 return(0); 681 } 682 683 /* 684 * Bring the controller back to a state ready for operation. 685 */ 686 int 687 aac_resume(device_t dev) 688 { 689 struct aac_softc *sc; 690 691 debug_called(1); 692 693 sc = device_get_softc(dev); 694 695 sc->aac_state &= ~AAC_STATE_SUSPEND; 696 AAC_UNMASK_INTERRUPTS(sc); 697 return(0); 698 } 699 700 /* 701 * Interrupt handler for NEW_COMM interface. 702 */ 703 void 704 aac_new_intr(void *arg) 705 { 706 struct aac_softc *sc; 707 u_int32_t index, fast; 708 struct aac_command *cm; 709 struct aac_fib *fib; 710 int i; 711 712 debug_called(2); 713 714 sc = (struct aac_softc *)arg; 715 716 mtx_lock(&sc->aac_io_lock); 717 while (1) { 718 index = AAC_GET_OUTB_QUEUE(sc); 719 if (index == 0xffffffff) 720 index = AAC_GET_OUTB_QUEUE(sc); 721 if (index == 0xffffffff) 722 break; 723 if (index & 2) { 724 if (index == 0xfffffffe) { 725 /* XXX This means that the controller wants 726 * more work. Ignore it for now. 727 */ 728 continue; 729 } 730 /* AIF */ 731 fib = (struct aac_fib *)malloc(sizeof *fib, M_AACBUF, 732 M_NOWAIT | M_ZERO); 733 if (fib == NULL) { 734 /* If we're really this short on memory, 735 * hopefully breaking out of the handler will 736 * allow something to get freed. This 737 * actually sucks a whole lot. 738 */ 739 break; 740 } 741 index &= ~2; 742 for (i = 0; i < sizeof(struct aac_fib)/4; ++i) 743 ((u_int32_t *)fib)[i] = AAC_GETREG4(sc, index + i*4); 744 aac_handle_aif(sc, fib); 745 free(fib, M_AACBUF); 746 747 /* 748 * AIF memory is owned by the adapter, so let it 749 * know that we are done with it. 750 */ 751 AAC_SET_OUTB_QUEUE(sc, index); 752 AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY); 753 } else { 754 fast = index & 1; 755 cm = sc->aac_commands + (index >> 2); 756 fib = cm->cm_fib; 757 if (fast) { 758 fib->Header.XferState |= AAC_FIBSTATE_DONEADAP; 759 *((u_int32_t *)(fib->data)) = AAC_ERROR_NORMAL; 760 } 761 aac_remove_busy(cm); 762 aac_unmap_command(cm); 763 cm->cm_flags |= AAC_CMD_COMPLETED; 764 765 /* is there a completion handler? */ 766 if (cm->cm_complete != NULL) { 767 cm->cm_complete(cm); 768 } else { 769 /* assume that someone is sleeping on this 770 * command 771 */ 772 wakeup(cm); 773 } 774 sc->flags &= ~AAC_QUEUE_FRZN; 775 } 776 } 777 /* see if we can start some more I/O */ 778 if ((sc->flags & AAC_QUEUE_FRZN) == 0) 779 aac_startio(sc); 780 781 mtx_unlock(&sc->aac_io_lock); 782 } 783 784 int 785 aac_fast_intr(void *arg) 786 { 787 struct aac_softc *sc; 788 u_int16_t reason; 789 790 debug_called(2); 791 792 sc = (struct aac_softc *)arg; 793 794 /* 795 * Read the status register directly. This is faster than taking the 796 * driver lock and reading the queues directly. It also saves having 797 * to turn parts of the driver lock into a spin mutex, which would be 798 * ugly. 799 */ 800 reason = AAC_GET_ISTATUS(sc); 801 AAC_CLEAR_ISTATUS(sc, reason); 802 803 /* handle completion processing */ 804 if (reason & AAC_DB_RESPONSE_READY) 805 taskqueue_enqueue_fast(taskqueue_fast, &sc->aac_task_complete); 806 807 /* controller wants to talk to us */ 808 if (reason & (AAC_DB_PRINTF | AAC_DB_COMMAND_READY)) { 809 /* 810 * XXX Make sure that we don't get fooled by strange messages 811 * that start with a NULL. 812 */ 813 if ((reason & AAC_DB_PRINTF) && 814 (sc->aac_common->ac_printf[0] == 0)) 815 sc->aac_common->ac_printf[0] = 32; 816 817 /* 818 * This might miss doing the actual wakeup. However, the 819 * msleep that this is waking up has a timeout, so it will 820 * wake up eventually. AIFs and printfs are low enough 821 * priority that they can handle hanging out for a few seconds 822 * if needed. 823 */ 824 wakeup(sc->aifthread); 825 } 826 return (FILTER_HANDLED); 827 } 828 829 /* 830 * Command Processing 831 */ 832 833 /* 834 * Start as much queued I/O as possible on the controller 835 */ 836 void 837 aac_startio(struct aac_softc *sc) 838 { 839 struct aac_command *cm; 840 int error; 841 842 debug_called(2); 843 844 for (;;) { 845 /* 846 * This flag might be set if the card is out of resources. 847 * Checking it here prevents an infinite loop of deferrals. 848 */ 849 if (sc->flags & AAC_QUEUE_FRZN) 850 break; 851 852 /* 853 * Try to get a command that's been put off for lack of 854 * resources 855 */ 856 cm = aac_dequeue_ready(sc); 857 858 /* 859 * Try to build a command off the bio queue (ignore error 860 * return) 861 */ 862 if (cm == NULL) 863 aac_bio_command(sc, &cm); 864 865 /* nothing to do? */ 866 if (cm == NULL) 867 break; 868 869 /* don't map more than once */ 870 if (cm->cm_flags & AAC_CMD_MAPPED) 871 panic("aac: command %p already mapped", cm); 872 873 /* 874 * Set up the command to go to the controller. If there are no 875 * data buffers associated with the command then it can bypass 876 * busdma. 877 */ 878 if (cm->cm_datalen != 0) { 879 error = bus_dmamap_load(sc->aac_buffer_dmat, 880 cm->cm_datamap, cm->cm_data, 881 cm->cm_datalen, 882 aac_map_command_sg, cm, 0); 883 if (error == EINPROGRESS) { 884 debug(1, "freezing queue\n"); 885 sc->flags |= AAC_QUEUE_FRZN; 886 error = 0; 887 } else if (error != 0) 888 panic("aac_startio: unexpected error %d from " 889 "busdma\n", error); 890 } else 891 aac_map_command_sg(cm, NULL, 0, 0); 892 } 893 } 894 895 /* 896 * Handle notification of one or more FIBs coming from the controller. 897 */ 898 static void 899 aac_command_thread(struct aac_softc *sc) 900 { 901 struct aac_fib *fib; 902 u_int32_t fib_size; 903 int size, retval; 904 905 debug_called(2); 906 907 mtx_lock(&sc->aac_io_lock); 908 sc->aifflags = AAC_AIFFLAGS_RUNNING; 909 910 while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) { 911 912 retval = 0; 913 if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0) 914 retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO, 915 "aifthd", AAC_PERIODIC_INTERVAL * hz); 916 917 /* 918 * First see if any FIBs need to be allocated. This needs 919 * to be called without the driver lock because contigmalloc 920 * will grab Giant, and would result in an LOR. 921 */ 922 if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) { 923 mtx_unlock(&sc->aac_io_lock); 924 aac_alloc_commands(sc); 925 mtx_lock(&sc->aac_io_lock); 926 sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS; 927 aac_startio(sc); 928 } 929 930 /* 931 * While we're here, check to see if any commands are stuck. 932 * This is pretty low-priority, so it's ok if it doesn't 933 * always fire. 934 */ 935 if (retval == EWOULDBLOCK) 936 aac_timeout(sc); 937 938 /* Check the hardware printf message buffer */ 939 if (sc->aac_common->ac_printf[0] != 0) 940 aac_print_printf(sc); 941 942 /* Also check to see if the adapter has a command for us. */ 943 if (sc->flags & AAC_FLAGS_NEW_COMM) 944 continue; 945 for (;;) { 946 if (aac_dequeue_fib(sc, AAC_HOST_NORM_CMD_QUEUE, 947 &fib_size, &fib)) 948 break; 949 950 AAC_PRINT_FIB(sc, fib); 951 952 switch (fib->Header.Command) { 953 case AifRequest: 954 aac_handle_aif(sc, fib); 955 break; 956 default: 957 device_printf(sc->aac_dev, "unknown command " 958 "from controller\n"); 959 break; 960 } 961 962 if ((fib->Header.XferState == 0) || 963 (fib->Header.StructType != AAC_FIBTYPE_TFIB)) { 964 break; 965 } 966 967 /* Return the AIF to the controller. */ 968 if (fib->Header.XferState & AAC_FIBSTATE_FROMADAP) { 969 fib->Header.XferState |= AAC_FIBSTATE_DONEHOST; 970 *(AAC_FSAStatus*)fib->data = ST_OK; 971 972 /* XXX Compute the Size field? */ 973 size = fib->Header.Size; 974 if (size > sizeof(struct aac_fib)) { 975 size = sizeof(struct aac_fib); 976 fib->Header.Size = size; 977 } 978 /* 979 * Since we did not generate this command, it 980 * cannot go through the normal 981 * enqueue->startio chain. 982 */ 983 aac_enqueue_response(sc, 984 AAC_ADAP_NORM_RESP_QUEUE, 985 fib); 986 } 987 } 988 } 989 sc->aifflags &= ~AAC_AIFFLAGS_RUNNING; 990 mtx_unlock(&sc->aac_io_lock); 991 wakeup(sc->aac_dev); 992 993 kproc_exit(0); 994 } 995 996 /* 997 * Process completed commands. 998 */ 999 static void 1000 aac_complete(void *context, int pending) 1001 { 1002 struct aac_softc *sc; 1003 struct aac_command *cm; 1004 struct aac_fib *fib; 1005 u_int32_t fib_size; 1006 1007 debug_called(2); 1008 1009 sc = (struct aac_softc *)context; 1010 1011 mtx_lock(&sc->aac_io_lock); 1012 1013 /* pull completed commands off the queue */ 1014 for (;;) { 1015 /* look for completed FIBs on our queue */ 1016 if (aac_dequeue_fib(sc, AAC_HOST_NORM_RESP_QUEUE, &fib_size, 1017 &fib)) 1018 break; /* nothing to do */ 1019 1020 /* get the command, unmap and hand off for processing */ 1021 cm = sc->aac_commands + fib->Header.SenderData; 1022 if (cm == NULL) { 1023 AAC_PRINT_FIB(sc, fib); 1024 break; 1025 } 1026 aac_remove_busy(cm); 1027 1028 aac_unmap_command(cm); 1029 cm->cm_flags |= AAC_CMD_COMPLETED; 1030 1031 /* is there a completion handler? */ 1032 if (cm->cm_complete != NULL) { 1033 cm->cm_complete(cm); 1034 } else { 1035 /* assume that someone is sleeping on this command */ 1036 wakeup(cm); 1037 } 1038 } 1039 1040 /* see if we can start some more I/O */ 1041 sc->flags &= ~AAC_QUEUE_FRZN; 1042 aac_startio(sc); 1043 1044 mtx_unlock(&sc->aac_io_lock); 1045 } 1046 1047 /* 1048 * Handle a bio submitted from a disk device. 1049 */ 1050 void 1051 aac_submit_bio(struct bio *bp) 1052 { 1053 struct aac_disk *ad; 1054 struct aac_softc *sc; 1055 1056 debug_called(2); 1057 1058 ad = (struct aac_disk *)bp->bio_disk->d_drv1; 1059 sc = ad->ad_controller; 1060 1061 /* queue the BIO and try to get some work done */ 1062 aac_enqueue_bio(sc, bp); 1063 aac_startio(sc); 1064 } 1065 1066 /* 1067 * Get a bio and build a command to go with it. 1068 */ 1069 static int 1070 aac_bio_command(struct aac_softc *sc, struct aac_command **cmp) 1071 { 1072 struct aac_command *cm; 1073 struct aac_fib *fib; 1074 struct aac_disk *ad; 1075 struct bio *bp; 1076 1077 debug_called(2); 1078 1079 /* get the resources we will need */ 1080 cm = NULL; 1081 bp = NULL; 1082 if (aac_alloc_command(sc, &cm)) /* get a command */ 1083 goto fail; 1084 if ((bp = aac_dequeue_bio(sc)) == NULL) 1085 goto fail; 1086 1087 /* fill out the command */ 1088 cm->cm_data = (void *)bp->bio_data; 1089 cm->cm_datalen = bp->bio_bcount; 1090 cm->cm_complete = aac_bio_complete; 1091 cm->cm_private = bp; 1092 cm->cm_timestamp = time_uptime; 1093 cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE; 1094 1095 /* build the FIB */ 1096 fib = cm->cm_fib; 1097 fib->Header.Size = sizeof(struct aac_fib_header); 1098 fib->Header.XferState = 1099 AAC_FIBSTATE_HOSTOWNED | 1100 AAC_FIBSTATE_INITIALISED | 1101 AAC_FIBSTATE_EMPTY | 1102 AAC_FIBSTATE_FROMHOST | 1103 AAC_FIBSTATE_REXPECTED | 1104 AAC_FIBSTATE_NORM | 1105 AAC_FIBSTATE_ASYNC | 1106 AAC_FIBSTATE_FAST_RESPONSE; 1107 1108 /* build the read/write request */ 1109 ad = (struct aac_disk *)bp->bio_disk->d_drv1; 1110 1111 if (sc->flags & AAC_FLAGS_RAW_IO) { 1112 struct aac_raw_io *raw; 1113 raw = (struct aac_raw_io *)&fib->data[0]; 1114 fib->Header.Command = RawIo; 1115 raw->BlockNumber = (u_int64_t)bp->bio_pblkno; 1116 raw->ByteCount = bp->bio_bcount; 1117 raw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1118 raw->BpTotal = 0; 1119 raw->BpComplete = 0; 1120 fib->Header.Size += sizeof(struct aac_raw_io); 1121 cm->cm_sgtable = (struct aac_sg_table *)&raw->SgMapRaw; 1122 if (bp->bio_cmd == BIO_READ) { 1123 raw->Flags = 1; 1124 cm->cm_flags |= AAC_CMD_DATAIN; 1125 } else { 1126 raw->Flags = 0; 1127 cm->cm_flags |= AAC_CMD_DATAOUT; 1128 } 1129 } else if ((sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 1130 fib->Header.Command = ContainerCommand; 1131 if (bp->bio_cmd == BIO_READ) { 1132 struct aac_blockread *br; 1133 br = (struct aac_blockread *)&fib->data[0]; 1134 br->Command = VM_CtBlockRead; 1135 br->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1136 br->BlockNumber = bp->bio_pblkno; 1137 br->ByteCount = bp->bio_bcount; 1138 fib->Header.Size += sizeof(struct aac_blockread); 1139 cm->cm_sgtable = &br->SgMap; 1140 cm->cm_flags |= AAC_CMD_DATAIN; 1141 } else { 1142 struct aac_blockwrite *bw; 1143 bw = (struct aac_blockwrite *)&fib->data[0]; 1144 bw->Command = VM_CtBlockWrite; 1145 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1146 bw->BlockNumber = bp->bio_pblkno; 1147 bw->ByteCount = bp->bio_bcount; 1148 bw->Stable = CUNSTABLE; 1149 fib->Header.Size += sizeof(struct aac_blockwrite); 1150 cm->cm_flags |= AAC_CMD_DATAOUT; 1151 cm->cm_sgtable = &bw->SgMap; 1152 } 1153 } else { 1154 fib->Header.Command = ContainerCommand64; 1155 if (bp->bio_cmd == BIO_READ) { 1156 struct aac_blockread64 *br; 1157 br = (struct aac_blockread64 *)&fib->data[0]; 1158 br->Command = VM_CtHostRead64; 1159 br->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1160 br->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE; 1161 br->BlockNumber = bp->bio_pblkno; 1162 br->Pad = 0; 1163 br->Flags = 0; 1164 fib->Header.Size += sizeof(struct aac_blockread64); 1165 cm->cm_flags |= AAC_CMD_DATAOUT; 1166 cm->cm_sgtable = (struct aac_sg_table *)&br->SgMap64; 1167 } else { 1168 struct aac_blockwrite64 *bw; 1169 bw = (struct aac_blockwrite64 *)&fib->data[0]; 1170 bw->Command = VM_CtHostWrite64; 1171 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1172 bw->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE; 1173 bw->BlockNumber = bp->bio_pblkno; 1174 bw->Pad = 0; 1175 bw->Flags = 0; 1176 fib->Header.Size += sizeof(struct aac_blockwrite64); 1177 cm->cm_flags |= AAC_CMD_DATAIN; 1178 cm->cm_sgtable = (struct aac_sg_table *)&bw->SgMap64; 1179 } 1180 } 1181 1182 *cmp = cm; 1183 return(0); 1184 1185 fail: 1186 if (bp != NULL) 1187 aac_enqueue_bio(sc, bp); 1188 if (cm != NULL) 1189 aac_release_command(cm); 1190 return(ENOMEM); 1191 } 1192 1193 /* 1194 * Handle a bio-instigated command that has been completed. 1195 */ 1196 static void 1197 aac_bio_complete(struct aac_command *cm) 1198 { 1199 struct aac_blockread_response *brr; 1200 struct aac_blockwrite_response *bwr; 1201 struct bio *bp; 1202 AAC_FSAStatus status; 1203 1204 /* fetch relevant status and then release the command */ 1205 bp = (struct bio *)cm->cm_private; 1206 if (bp->bio_cmd == BIO_READ) { 1207 brr = (struct aac_blockread_response *)&cm->cm_fib->data[0]; 1208 status = brr->Status; 1209 } else { 1210 bwr = (struct aac_blockwrite_response *)&cm->cm_fib->data[0]; 1211 status = bwr->Status; 1212 } 1213 aac_release_command(cm); 1214 1215 /* fix up the bio based on status */ 1216 if (status == ST_OK) { 1217 bp->bio_resid = 0; 1218 } else { 1219 bp->bio_error = EIO; 1220 bp->bio_flags |= BIO_ERROR; 1221 /* pass an error string out to the disk layer */ 1222 bp->bio_driver1 = aac_describe_code(aac_command_status_table, 1223 status); 1224 } 1225 aac_biodone(bp); 1226 } 1227 1228 /* 1229 * Submit a command to the controller, return when it completes. 1230 * XXX This is very dangerous! If the card has gone out to lunch, we could 1231 * be stuck here forever. At the same time, signals are not caught 1232 * because there is a risk that a signal could wakeup the sleep before 1233 * the card has a chance to complete the command. Since there is no way 1234 * to cancel a command that is in progress, we can't protect against the 1235 * card completing a command late and spamming the command and data 1236 * memory. So, we are held hostage until the command completes. 1237 */ 1238 static int 1239 aac_wait_command(struct aac_command *cm) 1240 { 1241 struct aac_softc *sc; 1242 int error; 1243 1244 debug_called(2); 1245 1246 sc = cm->cm_sc; 1247 1248 /* Put the command on the ready queue and get things going */ 1249 cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE; 1250 aac_enqueue_ready(cm); 1251 aac_startio(sc); 1252 error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacwait", 0); 1253 return(error); 1254 } 1255 1256 /* 1257 *Command Buffer Management 1258 */ 1259 1260 /* 1261 * Allocate a command. 1262 */ 1263 int 1264 aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp) 1265 { 1266 struct aac_command *cm; 1267 1268 debug_called(3); 1269 1270 if ((cm = aac_dequeue_free(sc)) == NULL) { 1271 if (sc->total_fibs < sc->aac_max_fibs) { 1272 sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS; 1273 wakeup(sc->aifthread); 1274 } 1275 return (EBUSY); 1276 } 1277 1278 *cmp = cm; 1279 return(0); 1280 } 1281 1282 /* 1283 * Release a command back to the freelist. 1284 */ 1285 void 1286 aac_release_command(struct aac_command *cm) 1287 { 1288 struct aac_event *event; 1289 struct aac_softc *sc; 1290 1291 debug_called(3); 1292 1293 /* (re)initialise the command/FIB */ 1294 cm->cm_sgtable = NULL; 1295 cm->cm_flags = 0; 1296 cm->cm_complete = NULL; 1297 cm->cm_private = NULL; 1298 cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY; 1299 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB; 1300 cm->cm_fib->Header.Flags = 0; 1301 cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size; 1302 1303 /* 1304 * These are duplicated in aac_start to cover the case where an 1305 * intermediate stage may have destroyed them. They're left 1306 * initialised here for debugging purposes only. 1307 */ 1308 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; 1309 cm->cm_fib->Header.SenderData = 0; 1310 1311 aac_enqueue_free(cm); 1312 1313 /* 1314 * Dequeue all events so that there's no risk of events getting 1315 * stranded. 1316 */ 1317 sc = cm->cm_sc; 1318 while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) { 1319 TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links); 1320 event->ev_callback(sc, event, event->ev_arg); 1321 } 1322 } 1323 1324 /* 1325 * Map helper for command/FIB allocation. 1326 */ 1327 static void 1328 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1329 { 1330 uint64_t *fibphys; 1331 1332 fibphys = (uint64_t *)arg; 1333 1334 debug_called(3); 1335 1336 *fibphys = segs[0].ds_addr; 1337 } 1338 1339 /* 1340 * Allocate and initialise commands/FIBs for this adapter. 1341 */ 1342 static int 1343 aac_alloc_commands(struct aac_softc *sc) 1344 { 1345 struct aac_command *cm; 1346 struct aac_fibmap *fm; 1347 uint64_t fibphys; 1348 int i, error; 1349 1350 debug_called(2); 1351 1352 if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs) 1353 return (ENOMEM); 1354 1355 fm = malloc(sizeof(struct aac_fibmap), M_AACBUF, M_NOWAIT|M_ZERO); 1356 if (fm == NULL) 1357 return (ENOMEM); 1358 1359 /* allocate the FIBs in DMAable memory and load them */ 1360 if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs, 1361 BUS_DMA_NOWAIT, &fm->aac_fibmap)) { 1362 device_printf(sc->aac_dev, 1363 "Not enough contiguous memory available.\n"); 1364 free(fm, M_AACBUF); 1365 return (ENOMEM); 1366 } 1367 1368 /* Ignore errors since this doesn't bounce */ 1369 (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs, 1370 sc->aac_max_fibs_alloc * sc->aac_max_fib_size, 1371 aac_map_command_helper, &fibphys, 0); 1372 1373 /* initialise constant fields in the command structure */ 1374 bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * sc->aac_max_fib_size); 1375 for (i = 0; i < sc->aac_max_fibs_alloc; i++) { 1376 cm = sc->aac_commands + sc->total_fibs; 1377 fm->aac_commands = cm; 1378 cm->cm_sc = sc; 1379 cm->cm_fib = (struct aac_fib *) 1380 ((u_int8_t *)fm->aac_fibs + i*sc->aac_max_fib_size); 1381 cm->cm_fibphys = fibphys + i*sc->aac_max_fib_size; 1382 cm->cm_index = sc->total_fibs; 1383 1384 if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0, 1385 &cm->cm_datamap)) != 0) 1386 break; 1387 mtx_lock(&sc->aac_io_lock); 1388 aac_release_command(cm); 1389 sc->total_fibs++; 1390 mtx_unlock(&sc->aac_io_lock); 1391 } 1392 1393 if (i > 0) { 1394 mtx_lock(&sc->aac_io_lock); 1395 TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link); 1396 debug(1, "total_fibs= %d\n", sc->total_fibs); 1397 mtx_unlock(&sc->aac_io_lock); 1398 return (0); 1399 } 1400 1401 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); 1402 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); 1403 free(fm, M_AACBUF); 1404 return (ENOMEM); 1405 } 1406 1407 /* 1408 * Free FIBs owned by this adapter. 1409 */ 1410 static void 1411 aac_free_commands(struct aac_softc *sc) 1412 { 1413 struct aac_fibmap *fm; 1414 struct aac_command *cm; 1415 int i; 1416 1417 debug_called(1); 1418 1419 while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) { 1420 1421 TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link); 1422 /* 1423 * We check against total_fibs to handle partially 1424 * allocated blocks. 1425 */ 1426 for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) { 1427 cm = fm->aac_commands + i; 1428 bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap); 1429 } 1430 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); 1431 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); 1432 free(fm, M_AACBUF); 1433 } 1434 } 1435 1436 /* 1437 * Command-mapping helper function - populate this command's s/g table. 1438 */ 1439 static void 1440 aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1441 { 1442 struct aac_softc *sc; 1443 struct aac_command *cm; 1444 struct aac_fib *fib; 1445 int i; 1446 1447 debug_called(3); 1448 1449 cm = (struct aac_command *)arg; 1450 sc = cm->cm_sc; 1451 fib = cm->cm_fib; 1452 1453 /* copy into the FIB */ 1454 if (cm->cm_sgtable != NULL) { 1455 if (fib->Header.Command == RawIo) { 1456 struct aac_sg_tableraw *sg; 1457 sg = (struct aac_sg_tableraw *)cm->cm_sgtable; 1458 sg->SgCount = nseg; 1459 for (i = 0; i < nseg; i++) { 1460 sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr; 1461 sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len; 1462 sg->SgEntryRaw[i].Next = 0; 1463 sg->SgEntryRaw[i].Prev = 0; 1464 sg->SgEntryRaw[i].Flags = 0; 1465 } 1466 /* update the FIB size for the s/g count */ 1467 fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw); 1468 } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 1469 struct aac_sg_table *sg; 1470 sg = cm->cm_sgtable; 1471 sg->SgCount = nseg; 1472 for (i = 0; i < nseg; i++) { 1473 sg->SgEntry[i].SgAddress = segs[i].ds_addr; 1474 sg->SgEntry[i].SgByteCount = segs[i].ds_len; 1475 } 1476 /* update the FIB size for the s/g count */ 1477 fib->Header.Size += nseg*sizeof(struct aac_sg_entry); 1478 } else { 1479 struct aac_sg_table64 *sg; 1480 sg = (struct aac_sg_table64 *)cm->cm_sgtable; 1481 sg->SgCount = nseg; 1482 for (i = 0; i < nseg; i++) { 1483 sg->SgEntry64[i].SgAddress = segs[i].ds_addr; 1484 sg->SgEntry64[i].SgByteCount = segs[i].ds_len; 1485 } 1486 /* update the FIB size for the s/g count */ 1487 fib->Header.Size += nseg*sizeof(struct aac_sg_entry64); 1488 } 1489 } 1490 1491 /* Fix up the address values in the FIB. Use the command array index 1492 * instead of a pointer since these fields are only 32 bits. Shift 1493 * the SenderFibAddress over to make room for the fast response bit 1494 * and for the AIF bit 1495 */ 1496 cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2); 1497 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; 1498 1499 /* save a pointer to the command for speedy reverse-lookup */ 1500 cm->cm_fib->Header.SenderData = cm->cm_index; 1501 1502 if (cm->cm_flags & AAC_CMD_DATAIN) 1503 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1504 BUS_DMASYNC_PREREAD); 1505 if (cm->cm_flags & AAC_CMD_DATAOUT) 1506 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1507 BUS_DMASYNC_PREWRITE); 1508 cm->cm_flags |= AAC_CMD_MAPPED; 1509 1510 if (sc->flags & AAC_FLAGS_NEW_COMM) { 1511 int count = 10000000L; 1512 while (AAC_SEND_COMMAND(sc, cm) != 0) { 1513 if (--count == 0) { 1514 aac_unmap_command(cm); 1515 sc->flags |= AAC_QUEUE_FRZN; 1516 aac_requeue_ready(cm); 1517 } 1518 DELAY(5); /* wait 5 usec. */ 1519 } 1520 } else { 1521 /* Put the FIB on the outbound queue */ 1522 if (aac_enqueue_fib(sc, cm->cm_queue, cm) == EBUSY) { 1523 aac_unmap_command(cm); 1524 sc->flags |= AAC_QUEUE_FRZN; 1525 aac_requeue_ready(cm); 1526 } 1527 } 1528 1529 return; 1530 } 1531 1532 /* 1533 * Unmap a command from controller-visible space. 1534 */ 1535 static void 1536 aac_unmap_command(struct aac_command *cm) 1537 { 1538 struct aac_softc *sc; 1539 1540 debug_called(2); 1541 1542 sc = cm->cm_sc; 1543 1544 if (!(cm->cm_flags & AAC_CMD_MAPPED)) 1545 return; 1546 1547 if (cm->cm_datalen != 0) { 1548 if (cm->cm_flags & AAC_CMD_DATAIN) 1549 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1550 BUS_DMASYNC_POSTREAD); 1551 if (cm->cm_flags & AAC_CMD_DATAOUT) 1552 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1553 BUS_DMASYNC_POSTWRITE); 1554 1555 bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap); 1556 } 1557 cm->cm_flags &= ~AAC_CMD_MAPPED; 1558 } 1559 1560 /* 1561 * Hardware Interface 1562 */ 1563 1564 /* 1565 * Initialise the adapter. 1566 */ 1567 static void 1568 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1569 { 1570 struct aac_softc *sc; 1571 1572 debug_called(1); 1573 1574 sc = (struct aac_softc *)arg; 1575 1576 sc->aac_common_busaddr = segs[0].ds_addr; 1577 } 1578 1579 static int 1580 aac_check_firmware(struct aac_softc *sc) 1581 { 1582 u_int32_t major, minor, options = 0, atu_size = 0; 1583 int status; 1584 1585 debug_called(1); 1586 1587 /* 1588 * Retrieve the firmware version numbers. Dell PERC2/QC cards with 1589 * firmware version 1.x are not compatible with this driver. 1590 */ 1591 if (sc->flags & AAC_FLAGS_PERC2QC) { 1592 if (aac_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0, 1593 NULL)) { 1594 device_printf(sc->aac_dev, 1595 "Error reading firmware version\n"); 1596 return (EIO); 1597 } 1598 1599 /* These numbers are stored as ASCII! */ 1600 major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30; 1601 minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30; 1602 if (major == 1) { 1603 device_printf(sc->aac_dev, 1604 "Firmware version %d.%d is not supported.\n", 1605 major, minor); 1606 return (EINVAL); 1607 } 1608 } 1609 1610 /* 1611 * Retrieve the capabilities/supported options word so we know what 1612 * work-arounds to enable. Some firmware revs don't support this 1613 * command. 1614 */ 1615 if (aac_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status)) { 1616 if (status != AAC_SRB_STS_INVALID_REQUEST) { 1617 device_printf(sc->aac_dev, 1618 "RequestAdapterInfo failed\n"); 1619 return (EIO); 1620 } 1621 } else { 1622 options = AAC_GET_MAILBOX(sc, 1); 1623 atu_size = AAC_GET_MAILBOX(sc, 2); 1624 sc->supported_options = options; 1625 1626 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && 1627 (sc->flags & AAC_FLAGS_NO4GB) == 0) 1628 sc->flags |= AAC_FLAGS_4GB_WINDOW; 1629 if (options & AAC_SUPPORTED_NONDASD) 1630 sc->flags |= AAC_FLAGS_ENABLE_CAM; 1631 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0 1632 && (sizeof(bus_addr_t) > 4)) { 1633 device_printf(sc->aac_dev, 1634 "Enabling 64-bit address support\n"); 1635 sc->flags |= AAC_FLAGS_SG_64BIT; 1636 } 1637 if ((options & AAC_SUPPORTED_NEW_COMM) 1638 && sc->aac_if.aif_send_command) 1639 sc->flags |= AAC_FLAGS_NEW_COMM; 1640 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) 1641 sc->flags |= AAC_FLAGS_ARRAY_64BIT; 1642 } 1643 1644 /* Check for broken hardware that does a lower number of commands */ 1645 sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512); 1646 1647 /* Remap mem. resource, if required */ 1648 if ((sc->flags & AAC_FLAGS_NEW_COMM) && 1649 atu_size > rman_get_size(sc->aac_regs_resource)) { 1650 bus_release_resource( 1651 sc->aac_dev, SYS_RES_MEMORY, 1652 sc->aac_regs_rid, sc->aac_regs_resource); 1653 sc->aac_regs_resource = bus_alloc_resource( 1654 sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid, 1655 0ul, ~0ul, atu_size, RF_ACTIVE); 1656 if (sc->aac_regs_resource == NULL) { 1657 sc->aac_regs_resource = bus_alloc_resource_any( 1658 sc->aac_dev, SYS_RES_MEMORY, 1659 &sc->aac_regs_rid, RF_ACTIVE); 1660 if (sc->aac_regs_resource == NULL) { 1661 device_printf(sc->aac_dev, 1662 "couldn't allocate register window\n"); 1663 return (ENXIO); 1664 } 1665 sc->flags &= ~AAC_FLAGS_NEW_COMM; 1666 } 1667 sc->aac_btag = rman_get_bustag(sc->aac_regs_resource); 1668 sc->aac_bhandle = rman_get_bushandle(sc->aac_regs_resource); 1669 } 1670 1671 /* Read preferred settings */ 1672 sc->aac_max_fib_size = sizeof(struct aac_fib); 1673 sc->aac_max_sectors = 128; /* 64KB */ 1674 if (sc->flags & AAC_FLAGS_SG_64BIT) 1675 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE 1676 - sizeof(struct aac_blockwrite64)) 1677 / sizeof(struct aac_sg_entry64); 1678 else 1679 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE 1680 - sizeof(struct aac_blockwrite)) 1681 / sizeof(struct aac_sg_entry); 1682 1683 if (!aac_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL)) { 1684 options = AAC_GET_MAILBOX(sc, 1); 1685 sc->aac_max_fib_size = (options & 0xFFFF); 1686 sc->aac_max_sectors = (options >> 16) << 1; 1687 options = AAC_GET_MAILBOX(sc, 2); 1688 sc->aac_sg_tablesize = (options >> 16); 1689 options = AAC_GET_MAILBOX(sc, 3); 1690 sc->aac_max_fibs = (options & 0xFFFF); 1691 } 1692 if (sc->aac_max_fib_size > PAGE_SIZE) 1693 sc->aac_max_fib_size = PAGE_SIZE; 1694 sc->aac_max_fibs_alloc = PAGE_SIZE / sc->aac_max_fib_size; 1695 1696 return (0); 1697 } 1698 1699 static int 1700 aac_init(struct aac_softc *sc) 1701 { 1702 struct aac_adapter_init *ip; 1703 time_t then; 1704 u_int32_t code, qoffset; 1705 int error; 1706 1707 debug_called(1); 1708 1709 /* 1710 * First wait for the adapter to come ready. 1711 */ 1712 then = time_uptime; 1713 do { 1714 code = AAC_GET_FWSTATUS(sc); 1715 if (code & AAC_SELF_TEST_FAILED) { 1716 device_printf(sc->aac_dev, "FATAL: selftest failed\n"); 1717 return(ENXIO); 1718 } 1719 if (code & AAC_KERNEL_PANIC) { 1720 device_printf(sc->aac_dev, 1721 "FATAL: controller kernel panic\n"); 1722 return(ENXIO); 1723 } 1724 if (time_uptime > (then + AAC_BOOT_TIMEOUT)) { 1725 device_printf(sc->aac_dev, 1726 "FATAL: controller not coming ready, " 1727 "status %x\n", code); 1728 return(ENXIO); 1729 } 1730 } while (!(code & AAC_UP_AND_RUNNING)); 1731 1732 error = ENOMEM; 1733 /* 1734 * Create DMA tag for mapping buffers into controller-addressable space. 1735 */ 1736 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 1737 1, 0, /* algnmnt, boundary */ 1738 (sc->flags & AAC_FLAGS_SG_64BIT) ? 1739 BUS_SPACE_MAXADDR : 1740 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1741 BUS_SPACE_MAXADDR, /* highaddr */ 1742 NULL, NULL, /* filter, filterarg */ 1743 MAXBSIZE, /* maxsize */ 1744 sc->aac_sg_tablesize, /* nsegments */ 1745 MAXBSIZE, /* maxsegsize */ 1746 BUS_DMA_ALLOCNOW, /* flags */ 1747 busdma_lock_mutex, /* lockfunc */ 1748 &sc->aac_io_lock, /* lockfuncarg */ 1749 &sc->aac_buffer_dmat)) { 1750 device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n"); 1751 goto out; 1752 } 1753 1754 /* 1755 * Create DMA tag for mapping FIBs into controller-addressable space.. 1756 */ 1757 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 1758 1, 0, /* algnmnt, boundary */ 1759 (sc->flags & AAC_FLAGS_4GB_WINDOW) ? 1760 BUS_SPACE_MAXADDR_32BIT : 1761 0x7fffffff, /* lowaddr */ 1762 BUS_SPACE_MAXADDR, /* highaddr */ 1763 NULL, NULL, /* filter, filterarg */ 1764 sc->aac_max_fibs_alloc * 1765 sc->aac_max_fib_size, /* maxsize */ 1766 1, /* nsegments */ 1767 sc->aac_max_fibs_alloc * 1768 sc->aac_max_fib_size, /* maxsegsize */ 1769 0, /* flags */ 1770 NULL, NULL, /* No locking needed */ 1771 &sc->aac_fib_dmat)) { 1772 device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");; 1773 goto out; 1774 } 1775 1776 /* 1777 * Create DMA tag for the common structure and allocate it. 1778 */ 1779 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 1780 1, 0, /* algnmnt, boundary */ 1781 (sc->flags & AAC_FLAGS_4GB_WINDOW) ? 1782 BUS_SPACE_MAXADDR_32BIT : 1783 0x7fffffff, /* lowaddr */ 1784 BUS_SPACE_MAXADDR, /* highaddr */ 1785 NULL, NULL, /* filter, filterarg */ 1786 8192 + sizeof(struct aac_common), /* maxsize */ 1787 1, /* nsegments */ 1788 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1789 0, /* flags */ 1790 NULL, NULL, /* No locking needed */ 1791 &sc->aac_common_dmat)) { 1792 device_printf(sc->aac_dev, 1793 "can't allocate common structure DMA tag\n"); 1794 goto out; 1795 } 1796 if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common, 1797 BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) { 1798 device_printf(sc->aac_dev, "can't allocate common structure\n"); 1799 goto out; 1800 } 1801 1802 /* 1803 * Work around a bug in the 2120 and 2200 that cannot DMA commands 1804 * below address 8192 in physical memory. 1805 * XXX If the padding is not needed, can it be put to use instead 1806 * of ignored? 1807 */ 1808 (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap, 1809 sc->aac_common, 8192 + sizeof(*sc->aac_common), 1810 aac_common_map, sc, 0); 1811 1812 if (sc->aac_common_busaddr < 8192) { 1813 sc->aac_common = (struct aac_common *) 1814 ((uint8_t *)sc->aac_common + 8192); 1815 sc->aac_common_busaddr += 8192; 1816 } 1817 bzero(sc->aac_common, sizeof(*sc->aac_common)); 1818 1819 /* Allocate some FIBs and associated command structs */ 1820 TAILQ_INIT(&sc->aac_fibmap_tqh); 1821 sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command), 1822 M_AACBUF, M_WAITOK|M_ZERO); 1823 while (sc->total_fibs < AAC_PREALLOCATE_FIBS) { 1824 if (aac_alloc_commands(sc) != 0) 1825 break; 1826 } 1827 if (sc->total_fibs == 0) 1828 goto out; 1829 1830 /* 1831 * Fill in the init structure. This tells the adapter about the 1832 * physical location of various important shared data structures. 1833 */ 1834 ip = &sc->aac_common->ac_init; 1835 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION; 1836 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { 1837 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4; 1838 sc->flags |= AAC_FLAGS_RAW_IO; 1839 } 1840 ip->MiniPortRevision = AAC_INIT_STRUCT_MINIPORT_REVISION; 1841 1842 ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr + 1843 offsetof(struct aac_common, ac_fibs); 1844 ip->AdapterFibsVirtualAddress = 0; 1845 ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib); 1846 ip->AdapterFibAlign = sizeof(struct aac_fib); 1847 1848 ip->PrintfBufferAddress = sc->aac_common_busaddr + 1849 offsetof(struct aac_common, ac_printf); 1850 ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE; 1851 1852 /* 1853 * The adapter assumes that pages are 4K in size, except on some 1854 * broken firmware versions that do the page->byte conversion twice, 1855 * therefore 'assuming' that this value is in 16MB units (2^24). 1856 * Round up since the granularity is so high. 1857 */ 1858 ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE; 1859 if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) { 1860 ip->HostPhysMemPages = 1861 (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE; 1862 } 1863 ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */ 1864 1865 ip->InitFlags = 0; 1866 if (sc->flags & AAC_FLAGS_NEW_COMM) { 1867 ip->InitFlags = INITFLAGS_NEW_COMM_SUPPORTED; 1868 device_printf(sc->aac_dev, "New comm. interface enabled\n"); 1869 } 1870 1871 ip->MaxIoCommands = sc->aac_max_fibs; 1872 ip->MaxIoSize = sc->aac_max_sectors << 9; 1873 ip->MaxFibSize = sc->aac_max_fib_size; 1874 1875 /* 1876 * Initialise FIB queues. Note that it appears that the layout of the 1877 * indexes and the segmentation of the entries may be mandated by the 1878 * adapter, which is only told about the base of the queue index fields. 1879 * 1880 * The initial values of the indices are assumed to inform the adapter 1881 * of the sizes of the respective queues, and theoretically it could 1882 * work out the entire layout of the queue structures from this. We 1883 * take the easy route and just lay this area out like everyone else 1884 * does. 1885 * 1886 * The Linux driver uses a much more complex scheme whereby several 1887 * header records are kept for each queue. We use a couple of generic 1888 * list manipulation functions which 'know' the size of each list by 1889 * virtue of a table. 1890 */ 1891 qoffset = offsetof(struct aac_common, ac_qbuf) + AAC_QUEUE_ALIGN; 1892 qoffset &= ~(AAC_QUEUE_ALIGN - 1); 1893 sc->aac_queues = 1894 (struct aac_queue_table *)((uintptr_t)sc->aac_common + qoffset); 1895 ip->CommHeaderAddress = sc->aac_common_busaddr + qoffset; 1896 1897 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1898 AAC_HOST_NORM_CMD_ENTRIES; 1899 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1900 AAC_HOST_NORM_CMD_ENTRIES; 1901 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1902 AAC_HOST_HIGH_CMD_ENTRIES; 1903 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1904 AAC_HOST_HIGH_CMD_ENTRIES; 1905 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1906 AAC_ADAP_NORM_CMD_ENTRIES; 1907 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1908 AAC_ADAP_NORM_CMD_ENTRIES; 1909 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1910 AAC_ADAP_HIGH_CMD_ENTRIES; 1911 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1912 AAC_ADAP_HIGH_CMD_ENTRIES; 1913 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1914 AAC_HOST_NORM_RESP_ENTRIES; 1915 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1916 AAC_HOST_NORM_RESP_ENTRIES; 1917 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1918 AAC_HOST_HIGH_RESP_ENTRIES; 1919 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1920 AAC_HOST_HIGH_RESP_ENTRIES; 1921 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1922 AAC_ADAP_NORM_RESP_ENTRIES; 1923 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1924 AAC_ADAP_NORM_RESP_ENTRIES; 1925 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1926 AAC_ADAP_HIGH_RESP_ENTRIES; 1927 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1928 AAC_ADAP_HIGH_RESP_ENTRIES; 1929 sc->aac_qentries[AAC_HOST_NORM_CMD_QUEUE] = 1930 &sc->aac_queues->qt_HostNormCmdQueue[0]; 1931 sc->aac_qentries[AAC_HOST_HIGH_CMD_QUEUE] = 1932 &sc->aac_queues->qt_HostHighCmdQueue[0]; 1933 sc->aac_qentries[AAC_ADAP_NORM_CMD_QUEUE] = 1934 &sc->aac_queues->qt_AdapNormCmdQueue[0]; 1935 sc->aac_qentries[AAC_ADAP_HIGH_CMD_QUEUE] = 1936 &sc->aac_queues->qt_AdapHighCmdQueue[0]; 1937 sc->aac_qentries[AAC_HOST_NORM_RESP_QUEUE] = 1938 &sc->aac_queues->qt_HostNormRespQueue[0]; 1939 sc->aac_qentries[AAC_HOST_HIGH_RESP_QUEUE] = 1940 &sc->aac_queues->qt_HostHighRespQueue[0]; 1941 sc->aac_qentries[AAC_ADAP_NORM_RESP_QUEUE] = 1942 &sc->aac_queues->qt_AdapNormRespQueue[0]; 1943 sc->aac_qentries[AAC_ADAP_HIGH_RESP_QUEUE] = 1944 &sc->aac_queues->qt_AdapHighRespQueue[0]; 1945 1946 /* 1947 * Do controller-type-specific initialisation 1948 */ 1949 switch (sc->aac_hwif) { 1950 case AAC_HWIF_I960RX: 1951 AAC_SETREG4(sc, AAC_RX_ODBR, ~0); 1952 break; 1953 case AAC_HWIF_RKT: 1954 AAC_SETREG4(sc, AAC_RKT_ODBR, ~0); 1955 break; 1956 default: 1957 break; 1958 } 1959 1960 /* 1961 * Give the init structure to the controller. 1962 */ 1963 if (aac_sync_command(sc, AAC_MONKER_INITSTRUCT, 1964 sc->aac_common_busaddr + 1965 offsetof(struct aac_common, ac_init), 0, 0, 0, 1966 NULL)) { 1967 device_printf(sc->aac_dev, 1968 "error establishing init structure\n"); 1969 error = EIO; 1970 goto out; 1971 } 1972 1973 error = 0; 1974 out: 1975 return(error); 1976 } 1977 1978 /* 1979 * Send a synchronous command to the controller and wait for a result. 1980 * Indicate if the controller completed the command with an error status. 1981 */ 1982 static int 1983 aac_sync_command(struct aac_softc *sc, u_int32_t command, 1984 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3, 1985 u_int32_t *sp) 1986 { 1987 time_t then; 1988 u_int32_t status; 1989 1990 debug_called(3); 1991 1992 /* populate the mailbox */ 1993 AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3); 1994 1995 /* ensure the sync command doorbell flag is cleared */ 1996 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); 1997 1998 /* then set it to signal the adapter */ 1999 AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND); 2000 2001 /* spin waiting for the command to complete */ 2002 then = time_uptime; 2003 do { 2004 if (time_uptime > (then + AAC_IMMEDIATE_TIMEOUT)) { 2005 debug(1, "timed out"); 2006 return(EIO); 2007 } 2008 } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND)); 2009 2010 /* clear the completion flag */ 2011 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); 2012 2013 /* get the command status */ 2014 status = AAC_GET_MAILBOX(sc, 0); 2015 if (sp != NULL) 2016 *sp = status; 2017 2018 if (status != AAC_SRB_STS_SUCCESS) 2019 return (-1); 2020 return(0); 2021 } 2022 2023 int 2024 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate, 2025 struct aac_fib *fib, u_int16_t datasize) 2026 { 2027 debug_called(3); 2028 mtx_assert(&sc->aac_io_lock, MA_OWNED); 2029 2030 if (datasize > AAC_FIB_DATASIZE) 2031 return(EINVAL); 2032 2033 /* 2034 * Set up the sync FIB 2035 */ 2036 fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | 2037 AAC_FIBSTATE_INITIALISED | 2038 AAC_FIBSTATE_EMPTY; 2039 fib->Header.XferState |= xferstate; 2040 fib->Header.Command = command; 2041 fib->Header.StructType = AAC_FIBTYPE_TFIB; 2042 fib->Header.Size = sizeof(struct aac_fib) + datasize; 2043 fib->Header.SenderSize = sizeof(struct aac_fib); 2044 fib->Header.SenderFibAddress = 0; /* Not needed */ 2045 fib->Header.ReceiverFibAddress = sc->aac_common_busaddr + 2046 offsetof(struct aac_common, 2047 ac_sync_fib); 2048 2049 /* 2050 * Give the FIB to the controller, wait for a response. 2051 */ 2052 if (aac_sync_command(sc, AAC_MONKER_SYNCFIB, 2053 fib->Header.ReceiverFibAddress, 0, 0, 0, NULL)) { 2054 debug(2, "IO error"); 2055 return(EIO); 2056 } 2057 2058 return (0); 2059 } 2060 2061 /* 2062 * Adapter-space FIB queue manipulation 2063 * 2064 * Note that the queue implementation here is a little funky; neither the PI or 2065 * CI will ever be zero. This behaviour is a controller feature. 2066 */ 2067 static struct { 2068 int size; 2069 int notify; 2070 } aac_qinfo[] = { 2071 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL}, 2072 {AAC_HOST_HIGH_CMD_ENTRIES, 0}, 2073 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY}, 2074 {AAC_ADAP_HIGH_CMD_ENTRIES, 0}, 2075 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL}, 2076 {AAC_HOST_HIGH_RESP_ENTRIES, 0}, 2077 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY}, 2078 {AAC_ADAP_HIGH_RESP_ENTRIES, 0} 2079 }; 2080 2081 /* 2082 * Atomically insert an entry into the nominated queue, returns 0 on success or 2083 * EBUSY if the queue is full. 2084 * 2085 * Note: it would be more efficient to defer notifying the controller in 2086 * the case where we may be inserting several entries in rapid succession, 2087 * but implementing this usefully may be difficult (it would involve a 2088 * separate queue/notify interface). 2089 */ 2090 static int 2091 aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm) 2092 { 2093 u_int32_t pi, ci; 2094 int error; 2095 u_int32_t fib_size; 2096 u_int32_t fib_addr; 2097 2098 debug_called(3); 2099 2100 fib_size = cm->cm_fib->Header.Size; 2101 fib_addr = cm->cm_fib->Header.ReceiverFibAddress; 2102 2103 /* get the producer/consumer indices */ 2104 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2105 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2106 2107 /* wrap the queue? */ 2108 if (pi >= aac_qinfo[queue].size) 2109 pi = 0; 2110 2111 /* check for queue full */ 2112 if ((pi + 1) == ci) { 2113 error = EBUSY; 2114 goto out; 2115 } 2116 2117 /* 2118 * To avoid a race with its completion interrupt, place this command on 2119 * the busy queue prior to advertising it to the controller. 2120 */ 2121 aac_enqueue_busy(cm); 2122 2123 /* populate queue entry */ 2124 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; 2125 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; 2126 2127 /* update producer index */ 2128 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; 2129 2130 /* notify the adapter if we know how */ 2131 if (aac_qinfo[queue].notify != 0) 2132 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2133 2134 error = 0; 2135 2136 out: 2137 return(error); 2138 } 2139 2140 /* 2141 * Atomically remove one entry from the nominated queue, returns 0 on 2142 * success or ENOENT if the queue is empty. 2143 */ 2144 static int 2145 aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size, 2146 struct aac_fib **fib_addr) 2147 { 2148 u_int32_t pi, ci; 2149 u_int32_t fib_index; 2150 int error; 2151 int notify; 2152 2153 debug_called(3); 2154 2155 /* get the producer/consumer indices */ 2156 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2157 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2158 2159 /* check for queue empty */ 2160 if (ci == pi) { 2161 error = ENOENT; 2162 goto out; 2163 } 2164 2165 /* wrap the pi so the following test works */ 2166 if (pi >= aac_qinfo[queue].size) 2167 pi = 0; 2168 2169 notify = 0; 2170 if (ci == pi + 1) 2171 notify++; 2172 2173 /* wrap the queue? */ 2174 if (ci >= aac_qinfo[queue].size) 2175 ci = 0; 2176 2177 /* fetch the entry */ 2178 *fib_size = (sc->aac_qentries[queue] + ci)->aq_fib_size; 2179 2180 switch (queue) { 2181 case AAC_HOST_NORM_CMD_QUEUE: 2182 case AAC_HOST_HIGH_CMD_QUEUE: 2183 /* 2184 * The aq_fib_addr is only 32 bits wide so it can't be counted 2185 * on to hold an address. For AIF's, the adapter assumes 2186 * that it's giving us an address into the array of AIF fibs. 2187 * Therefore, we have to convert it to an index. 2188 */ 2189 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr / 2190 sizeof(struct aac_fib); 2191 *fib_addr = &sc->aac_common->ac_fibs[fib_index]; 2192 break; 2193 2194 case AAC_HOST_NORM_RESP_QUEUE: 2195 case AAC_HOST_HIGH_RESP_QUEUE: 2196 { 2197 struct aac_command *cm; 2198 2199 /* 2200 * As above, an index is used instead of an actual address. 2201 * Gotta shift the index to account for the fast response 2202 * bit. No other correction is needed since this value was 2203 * originally provided by the driver via the SenderFibAddress 2204 * field. 2205 */ 2206 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr; 2207 cm = sc->aac_commands + (fib_index >> 2); 2208 *fib_addr = cm->cm_fib; 2209 2210 /* 2211 * Is this a fast response? If it is, update the fib fields in 2212 * local memory since the whole fib isn't DMA'd back up. 2213 */ 2214 if (fib_index & 0x01) { 2215 (*fib_addr)->Header.XferState |= AAC_FIBSTATE_DONEADAP; 2216 *((u_int32_t*)((*fib_addr)->data)) = AAC_ERROR_NORMAL; 2217 } 2218 break; 2219 } 2220 default: 2221 panic("Invalid queue in aac_dequeue_fib()"); 2222 break; 2223 } 2224 2225 /* update consumer index */ 2226 sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1; 2227 2228 /* if we have made the queue un-full, notify the adapter */ 2229 if (notify && (aac_qinfo[queue].notify != 0)) 2230 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2231 error = 0; 2232 2233 out: 2234 return(error); 2235 } 2236 2237 /* 2238 * Put our response to an Adapter Initialed Fib on the response queue 2239 */ 2240 static int 2241 aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib) 2242 { 2243 u_int32_t pi, ci; 2244 int error; 2245 u_int32_t fib_size; 2246 u_int32_t fib_addr; 2247 2248 debug_called(1); 2249 2250 /* Tell the adapter where the FIB is */ 2251 fib_size = fib->Header.Size; 2252 fib_addr = fib->Header.SenderFibAddress; 2253 fib->Header.ReceiverFibAddress = fib_addr; 2254 2255 /* get the producer/consumer indices */ 2256 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2257 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2258 2259 /* wrap the queue? */ 2260 if (pi >= aac_qinfo[queue].size) 2261 pi = 0; 2262 2263 /* check for queue full */ 2264 if ((pi + 1) == ci) { 2265 error = EBUSY; 2266 goto out; 2267 } 2268 2269 /* populate queue entry */ 2270 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; 2271 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; 2272 2273 /* update producer index */ 2274 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; 2275 2276 /* notify the adapter if we know how */ 2277 if (aac_qinfo[queue].notify != 0) 2278 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2279 2280 error = 0; 2281 2282 out: 2283 return(error); 2284 } 2285 2286 /* 2287 * Check for commands that have been outstanding for a suspiciously long time, 2288 * and complain about them. 2289 */ 2290 static void 2291 aac_timeout(struct aac_softc *sc) 2292 { 2293 struct aac_command *cm; 2294 time_t deadline; 2295 int timedout, code; 2296 2297 /* 2298 * Traverse the busy command list, bitch about late commands once 2299 * only. 2300 */ 2301 timedout = 0; 2302 deadline = time_uptime - AAC_CMD_TIMEOUT; 2303 TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) { 2304 if ((cm->cm_timestamp < deadline) 2305 /* && !(cm->cm_flags & AAC_CMD_TIMEDOUT) */) { 2306 cm->cm_flags |= AAC_CMD_TIMEDOUT; 2307 device_printf(sc->aac_dev, 2308 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", 2309 cm, (int)(time_uptime-cm->cm_timestamp)); 2310 AAC_PRINT_FIB(sc, cm->cm_fib); 2311 timedout++; 2312 } 2313 } 2314 2315 if (timedout) { 2316 code = AAC_GET_FWSTATUS(sc); 2317 if (code != AAC_UP_AND_RUNNING) { 2318 device_printf(sc->aac_dev, "WARNING! Controller is no " 2319 "longer running! code= 0x%x\n", code); 2320 } 2321 } 2322 return; 2323 } 2324 2325 /* 2326 * Interface Function Vectors 2327 */ 2328 2329 /* 2330 * Read the current firmware status word. 2331 */ 2332 static int 2333 aac_sa_get_fwstatus(struct aac_softc *sc) 2334 { 2335 debug_called(3); 2336 2337 return(AAC_GETREG4(sc, AAC_SA_FWSTATUS)); 2338 } 2339 2340 static int 2341 aac_rx_get_fwstatus(struct aac_softc *sc) 2342 { 2343 debug_called(3); 2344 2345 return(AAC_GETREG4(sc, AAC_RX_FWSTATUS)); 2346 } 2347 2348 static int 2349 aac_fa_get_fwstatus(struct aac_softc *sc) 2350 { 2351 int val; 2352 2353 debug_called(3); 2354 2355 val = AAC_GETREG4(sc, AAC_FA_FWSTATUS); 2356 return (val); 2357 } 2358 2359 static int 2360 aac_rkt_get_fwstatus(struct aac_softc *sc) 2361 { 2362 debug_called(3); 2363 2364 return(AAC_GETREG4(sc, AAC_RKT_FWSTATUS)); 2365 } 2366 2367 /* 2368 * Notify the controller of a change in a given queue 2369 */ 2370 2371 static void 2372 aac_sa_qnotify(struct aac_softc *sc, int qbit) 2373 { 2374 debug_called(3); 2375 2376 AAC_SETREG2(sc, AAC_SA_DOORBELL1_SET, qbit); 2377 } 2378 2379 static void 2380 aac_rx_qnotify(struct aac_softc *sc, int qbit) 2381 { 2382 debug_called(3); 2383 2384 AAC_SETREG4(sc, AAC_RX_IDBR, qbit); 2385 } 2386 2387 static void 2388 aac_fa_qnotify(struct aac_softc *sc, int qbit) 2389 { 2390 debug_called(3); 2391 2392 AAC_SETREG2(sc, AAC_FA_DOORBELL1, qbit); 2393 AAC_FA_HACK(sc); 2394 } 2395 2396 static void 2397 aac_rkt_qnotify(struct aac_softc *sc, int qbit) 2398 { 2399 debug_called(3); 2400 2401 AAC_SETREG4(sc, AAC_RKT_IDBR, qbit); 2402 } 2403 2404 /* 2405 * Get the interrupt reason bits 2406 */ 2407 static int 2408 aac_sa_get_istatus(struct aac_softc *sc) 2409 { 2410 debug_called(3); 2411 2412 return(AAC_GETREG2(sc, AAC_SA_DOORBELL0)); 2413 } 2414 2415 static int 2416 aac_rx_get_istatus(struct aac_softc *sc) 2417 { 2418 debug_called(3); 2419 2420 return(AAC_GETREG4(sc, AAC_RX_ODBR)); 2421 } 2422 2423 static int 2424 aac_fa_get_istatus(struct aac_softc *sc) 2425 { 2426 int val; 2427 2428 debug_called(3); 2429 2430 val = AAC_GETREG2(sc, AAC_FA_DOORBELL0); 2431 return (val); 2432 } 2433 2434 static int 2435 aac_rkt_get_istatus(struct aac_softc *sc) 2436 { 2437 debug_called(3); 2438 2439 return(AAC_GETREG4(sc, AAC_RKT_ODBR)); 2440 } 2441 2442 /* 2443 * Clear some interrupt reason bits 2444 */ 2445 static void 2446 aac_sa_clear_istatus(struct aac_softc *sc, int mask) 2447 { 2448 debug_called(3); 2449 2450 AAC_SETREG2(sc, AAC_SA_DOORBELL0_CLEAR, mask); 2451 } 2452 2453 static void 2454 aac_rx_clear_istatus(struct aac_softc *sc, int mask) 2455 { 2456 debug_called(3); 2457 2458 AAC_SETREG4(sc, AAC_RX_ODBR, mask); 2459 } 2460 2461 static void 2462 aac_fa_clear_istatus(struct aac_softc *sc, int mask) 2463 { 2464 debug_called(3); 2465 2466 AAC_SETREG2(sc, AAC_FA_DOORBELL0_CLEAR, mask); 2467 AAC_FA_HACK(sc); 2468 } 2469 2470 static void 2471 aac_rkt_clear_istatus(struct aac_softc *sc, int mask) 2472 { 2473 debug_called(3); 2474 2475 AAC_SETREG4(sc, AAC_RKT_ODBR, mask); 2476 } 2477 2478 /* 2479 * Populate the mailbox and set the command word 2480 */ 2481 static void 2482 aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, 2483 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2484 { 2485 debug_called(4); 2486 2487 AAC_SETREG4(sc, AAC_SA_MAILBOX, command); 2488 AAC_SETREG4(sc, AAC_SA_MAILBOX + 4, arg0); 2489 AAC_SETREG4(sc, AAC_SA_MAILBOX + 8, arg1); 2490 AAC_SETREG4(sc, AAC_SA_MAILBOX + 12, arg2); 2491 AAC_SETREG4(sc, AAC_SA_MAILBOX + 16, arg3); 2492 } 2493 2494 static void 2495 aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, 2496 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2497 { 2498 debug_called(4); 2499 2500 AAC_SETREG4(sc, AAC_RX_MAILBOX, command); 2501 AAC_SETREG4(sc, AAC_RX_MAILBOX + 4, arg0); 2502 AAC_SETREG4(sc, AAC_RX_MAILBOX + 8, arg1); 2503 AAC_SETREG4(sc, AAC_RX_MAILBOX + 12, arg2); 2504 AAC_SETREG4(sc, AAC_RX_MAILBOX + 16, arg3); 2505 } 2506 2507 static void 2508 aac_fa_set_mailbox(struct aac_softc *sc, u_int32_t command, 2509 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2510 { 2511 debug_called(4); 2512 2513 AAC_SETREG4(sc, AAC_FA_MAILBOX, command); 2514 AAC_FA_HACK(sc); 2515 AAC_SETREG4(sc, AAC_FA_MAILBOX + 4, arg0); 2516 AAC_FA_HACK(sc); 2517 AAC_SETREG4(sc, AAC_FA_MAILBOX + 8, arg1); 2518 AAC_FA_HACK(sc); 2519 AAC_SETREG4(sc, AAC_FA_MAILBOX + 12, arg2); 2520 AAC_FA_HACK(sc); 2521 AAC_SETREG4(sc, AAC_FA_MAILBOX + 16, arg3); 2522 AAC_FA_HACK(sc); 2523 } 2524 2525 static void 2526 aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, 2527 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2528 { 2529 debug_called(4); 2530 2531 AAC_SETREG4(sc, AAC_RKT_MAILBOX, command); 2532 AAC_SETREG4(sc, AAC_RKT_MAILBOX + 4, arg0); 2533 AAC_SETREG4(sc, AAC_RKT_MAILBOX + 8, arg1); 2534 AAC_SETREG4(sc, AAC_RKT_MAILBOX + 12, arg2); 2535 AAC_SETREG4(sc, AAC_RKT_MAILBOX + 16, arg3); 2536 } 2537 2538 /* 2539 * Fetch the immediate command status word 2540 */ 2541 static int 2542 aac_sa_get_mailbox(struct aac_softc *sc, int mb) 2543 { 2544 debug_called(4); 2545 2546 return(AAC_GETREG4(sc, AAC_SA_MAILBOX + (mb * 4))); 2547 } 2548 2549 static int 2550 aac_rx_get_mailbox(struct aac_softc *sc, int mb) 2551 { 2552 debug_called(4); 2553 2554 return(AAC_GETREG4(sc, AAC_RX_MAILBOX + (mb * 4))); 2555 } 2556 2557 static int 2558 aac_fa_get_mailbox(struct aac_softc *sc, int mb) 2559 { 2560 int val; 2561 2562 debug_called(4); 2563 2564 val = AAC_GETREG4(sc, AAC_FA_MAILBOX + (mb * 4)); 2565 return (val); 2566 } 2567 2568 static int 2569 aac_rkt_get_mailbox(struct aac_softc *sc, int mb) 2570 { 2571 debug_called(4); 2572 2573 return(AAC_GETREG4(sc, AAC_RKT_MAILBOX + (mb * 4))); 2574 } 2575 2576 /* 2577 * Set/clear interrupt masks 2578 */ 2579 static void 2580 aac_sa_set_interrupts(struct aac_softc *sc, int enable) 2581 { 2582 debug(2, "%sable interrupts", enable ? "en" : "dis"); 2583 2584 if (enable) { 2585 AAC_SETREG2((sc), AAC_SA_MASK0_CLEAR, AAC_DB_INTERRUPTS); 2586 } else { 2587 AAC_SETREG2((sc), AAC_SA_MASK0_SET, ~0); 2588 } 2589 } 2590 2591 static void 2592 aac_rx_set_interrupts(struct aac_softc *sc, int enable) 2593 { 2594 debug(2, "%sable interrupts", enable ? "en" : "dis"); 2595 2596 if (enable) { 2597 if (sc->flags & AAC_FLAGS_NEW_COMM) 2598 AAC_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INT_NEW_COMM); 2599 else 2600 AAC_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INTERRUPTS); 2601 } else { 2602 AAC_SETREG4(sc, AAC_RX_OIMR, ~0); 2603 } 2604 } 2605 2606 static void 2607 aac_fa_set_interrupts(struct aac_softc *sc, int enable) 2608 { 2609 debug(2, "%sable interrupts", enable ? "en" : "dis"); 2610 2611 if (enable) { 2612 AAC_SETREG2((sc), AAC_FA_MASK0_CLEAR, AAC_DB_INTERRUPTS); 2613 AAC_FA_HACK(sc); 2614 } else { 2615 AAC_SETREG2((sc), AAC_FA_MASK0, ~0); 2616 AAC_FA_HACK(sc); 2617 } 2618 } 2619 2620 static void 2621 aac_rkt_set_interrupts(struct aac_softc *sc, int enable) 2622 { 2623 debug(2, "%sable interrupts", enable ? "en" : "dis"); 2624 2625 if (enable) { 2626 if (sc->flags & AAC_FLAGS_NEW_COMM) 2627 AAC_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INT_NEW_COMM); 2628 else 2629 AAC_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INTERRUPTS); 2630 } else { 2631 AAC_SETREG4(sc, AAC_RKT_OIMR, ~0); 2632 } 2633 } 2634 2635 /* 2636 * New comm. interface: Send command functions 2637 */ 2638 static int 2639 aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm) 2640 { 2641 u_int32_t index, device; 2642 2643 debug(2, "send command (new comm.)"); 2644 2645 index = AAC_GETREG4(sc, AAC_RX_IQUE); 2646 if (index == 0xffffffffL) 2647 index = AAC_GETREG4(sc, AAC_RX_IQUE); 2648 if (index == 0xffffffffL) 2649 return index; 2650 aac_enqueue_busy(cm); 2651 device = index; 2652 AAC_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); 2653 device += 4; 2654 AAC_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); 2655 device += 4; 2656 AAC_SETREG4(sc, device, cm->cm_fib->Header.Size); 2657 AAC_SETREG4(sc, AAC_RX_IQUE, index); 2658 return 0; 2659 } 2660 2661 static int 2662 aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm) 2663 { 2664 u_int32_t index, device; 2665 2666 debug(2, "send command (new comm.)"); 2667 2668 index = AAC_GETREG4(sc, AAC_RKT_IQUE); 2669 if (index == 0xffffffffL) 2670 index = AAC_GETREG4(sc, AAC_RKT_IQUE); 2671 if (index == 0xffffffffL) 2672 return index; 2673 aac_enqueue_busy(cm); 2674 device = index; 2675 AAC_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); 2676 device += 4; 2677 AAC_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); 2678 device += 4; 2679 AAC_SETREG4(sc, device, cm->cm_fib->Header.Size); 2680 AAC_SETREG4(sc, AAC_RKT_IQUE, index); 2681 return 0; 2682 } 2683 2684 /* 2685 * New comm. interface: get, set outbound queue index 2686 */ 2687 static int 2688 aac_rx_get_outb_queue(struct aac_softc *sc) 2689 { 2690 debug_called(3); 2691 2692 return(AAC_GETREG4(sc, AAC_RX_OQUE)); 2693 } 2694 2695 static int 2696 aac_rkt_get_outb_queue(struct aac_softc *sc) 2697 { 2698 debug_called(3); 2699 2700 return(AAC_GETREG4(sc, AAC_RKT_OQUE)); 2701 } 2702 2703 static void 2704 aac_rx_set_outb_queue(struct aac_softc *sc, int index) 2705 { 2706 debug_called(3); 2707 2708 AAC_SETREG4(sc, AAC_RX_OQUE, index); 2709 } 2710 2711 static void 2712 aac_rkt_set_outb_queue(struct aac_softc *sc, int index) 2713 { 2714 debug_called(3); 2715 2716 AAC_SETREG4(sc, AAC_RKT_OQUE, index); 2717 } 2718 2719 /* 2720 * Debugging and Diagnostics 2721 */ 2722 2723 /* 2724 * Print some information about the controller. 2725 */ 2726 static void 2727 aac_describe_controller(struct aac_softc *sc) 2728 { 2729 struct aac_fib *fib; 2730 struct aac_adapter_info *info; 2731 char *adapter_type = "Adaptec RAID controller"; 2732 2733 debug_called(2); 2734 2735 mtx_lock(&sc->aac_io_lock); 2736 aac_alloc_sync_fib(sc, &fib); 2737 2738 if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) { 2739 fib->data[0] = 0; 2740 if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1)) 2741 device_printf(sc->aac_dev, 2742 "RequestSupplementAdapterInfo failed\n"); 2743 else 2744 adapter_type = ((struct aac_supplement_adapter_info *) 2745 &fib->data[0])->AdapterTypeText; 2746 } 2747 device_printf(sc->aac_dev, "%s, aac driver %d.%d.%d-%d\n", 2748 adapter_type, 2749 AAC_DRIVER_VERSION >> 24, 2750 (AAC_DRIVER_VERSION >> 16) & 0xFF, 2751 AAC_DRIVER_VERSION & 0xFF, 2752 AAC_DRIVER_BUILD); 2753 2754 fib->data[0] = 0; 2755 if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) { 2756 device_printf(sc->aac_dev, "RequestAdapterInfo failed\n"); 2757 aac_release_sync_fib(sc); 2758 mtx_unlock(&sc->aac_io_lock); 2759 return; 2760 } 2761 2762 /* save the kernel revision structure for later use */ 2763 info = (struct aac_adapter_info *)&fib->data[0]; 2764 sc->aac_revision = info->KernelRevision; 2765 2766 2767 if (bootverbose) { 2768 device_printf(sc->aac_dev, "%s %dMHz, %dMB memory " 2769 "(%dMB cache, %dMB execution), %s\n", 2770 aac_describe_code(aac_cpu_variant, info->CpuVariant), 2771 info->ClockSpeed, info->TotalMem / (1024 * 1024), 2772 info->BufferMem / (1024 * 1024), 2773 info->ExecutionMem / (1024 * 1024), 2774 aac_describe_code(aac_battery_platform, 2775 info->batteryPlatform)); 2776 2777 device_printf(sc->aac_dev, 2778 "Kernel %d.%d-%d, Build %d, S/N %6X\n", 2779 info->KernelRevision.external.comp.major, 2780 info->KernelRevision.external.comp.minor, 2781 info->KernelRevision.external.comp.dash, 2782 info->KernelRevision.buildNumber, 2783 (u_int32_t)(info->SerialNumber & 0xffffff)); 2784 2785 device_printf(sc->aac_dev, "Supported Options=%b\n", 2786 sc->supported_options, 2787 "\20" 2788 "\1SNAPSHOT" 2789 "\2CLUSTERS" 2790 "\3WCACHE" 2791 "\4DATA64" 2792 "\5HOSTTIME" 2793 "\6RAID50" 2794 "\7WINDOW4GB" 2795 "\10SCSIUPGD" 2796 "\11SOFTERR" 2797 "\12NORECOND" 2798 "\13SGMAP64" 2799 "\14ALARM" 2800 "\15NONDASD" 2801 "\16SCSIMGT" 2802 "\17RAIDSCSI" 2803 "\21ADPTINFO" 2804 "\22NEWCOMM" 2805 "\23ARRAY64BIT" 2806 "\24HEATSENSOR"); 2807 } 2808 aac_release_sync_fib(sc); 2809 mtx_unlock(&sc->aac_io_lock); 2810 } 2811 2812 /* 2813 * Look up a text description of a numeric error code and return a pointer to 2814 * same. 2815 */ 2816 static char * 2817 aac_describe_code(struct aac_code_lookup *table, u_int32_t code) 2818 { 2819 int i; 2820 2821 for (i = 0; table[i].string != NULL; i++) 2822 if (table[i].code == code) 2823 return(table[i].string); 2824 return(table[i + 1].string); 2825 } 2826 2827 /* 2828 * Management Interface 2829 */ 2830 2831 static int 2832 aac_open(struct cdev *dev, int flags, int fmt, d_thread_t *td) 2833 { 2834 struct aac_softc *sc; 2835 2836 debug_called(2); 2837 2838 sc = dev->si_drv1; 2839 sc->aac_open_cnt++; 2840 sc->aac_state |= AAC_STATE_OPEN; 2841 2842 return 0; 2843 } 2844 2845 static int 2846 aac_close(struct cdev *dev, int flags, int fmt, d_thread_t *td) 2847 { 2848 struct aac_softc *sc; 2849 2850 debug_called(2); 2851 2852 sc = dev->si_drv1; 2853 sc->aac_open_cnt--; 2854 /* Mark this unit as no longer open */ 2855 if (sc->aac_open_cnt == 0) 2856 sc->aac_state &= ~AAC_STATE_OPEN; 2857 2858 return 0; 2859 } 2860 2861 static int 2862 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td) 2863 { 2864 union aac_statrequest *as; 2865 struct aac_softc *sc; 2866 int error = 0; 2867 2868 debug_called(2); 2869 2870 as = (union aac_statrequest *)arg; 2871 sc = dev->si_drv1; 2872 2873 switch (cmd) { 2874 case AACIO_STATS: 2875 switch (as->as_item) { 2876 case AACQ_FREE: 2877 case AACQ_BIO: 2878 case AACQ_READY: 2879 case AACQ_BUSY: 2880 bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat, 2881 sizeof(struct aac_qstat)); 2882 break; 2883 default: 2884 error = ENOENT; 2885 break; 2886 } 2887 break; 2888 2889 case FSACTL_SENDFIB: 2890 arg = *(caddr_t*)arg; 2891 case FSACTL_LNX_SENDFIB: 2892 debug(1, "FSACTL_SENDFIB"); 2893 error = aac_ioctl_sendfib(sc, arg); 2894 break; 2895 case FSACTL_AIF_THREAD: 2896 case FSACTL_LNX_AIF_THREAD: 2897 debug(1, "FSACTL_AIF_THREAD"); 2898 error = EINVAL; 2899 break; 2900 case FSACTL_OPEN_GET_ADAPTER_FIB: 2901 arg = *(caddr_t*)arg; 2902 case FSACTL_LNX_OPEN_GET_ADAPTER_FIB: 2903 debug(1, "FSACTL_OPEN_GET_ADAPTER_FIB"); 2904 error = aac_open_aif(sc, arg); 2905 break; 2906 case FSACTL_GET_NEXT_ADAPTER_FIB: 2907 arg = *(caddr_t*)arg; 2908 case FSACTL_LNX_GET_NEXT_ADAPTER_FIB: 2909 debug(1, "FSACTL_GET_NEXT_ADAPTER_FIB"); 2910 error = aac_getnext_aif(sc, arg); 2911 break; 2912 case FSACTL_CLOSE_GET_ADAPTER_FIB: 2913 arg = *(caddr_t*)arg; 2914 case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB: 2915 debug(1, "FSACTL_CLOSE_GET_ADAPTER_FIB"); 2916 error = aac_close_aif(sc, arg); 2917 break; 2918 case FSACTL_MINIPORT_REV_CHECK: 2919 arg = *(caddr_t*)arg; 2920 case FSACTL_LNX_MINIPORT_REV_CHECK: 2921 debug(1, "FSACTL_MINIPORT_REV_CHECK"); 2922 error = aac_rev_check(sc, arg); 2923 break; 2924 case FSACTL_QUERY_DISK: 2925 arg = *(caddr_t*)arg; 2926 case FSACTL_LNX_QUERY_DISK: 2927 debug(1, "FSACTL_QUERY_DISK"); 2928 error = aac_query_disk(sc, arg); 2929 break; 2930 case FSACTL_DELETE_DISK: 2931 case FSACTL_LNX_DELETE_DISK: 2932 /* 2933 * We don't trust the underland to tell us when to delete a 2934 * container, rather we rely on an AIF coming from the 2935 * controller 2936 */ 2937 error = 0; 2938 break; 2939 case FSACTL_GET_PCI_INFO: 2940 arg = *(caddr_t*)arg; 2941 case FSACTL_LNX_GET_PCI_INFO: 2942 debug(1, "FSACTL_GET_PCI_INFO"); 2943 error = aac_get_pci_info(sc, arg); 2944 break; 2945 default: 2946 debug(1, "unsupported cmd 0x%lx\n", cmd); 2947 error = EINVAL; 2948 break; 2949 } 2950 return(error); 2951 } 2952 2953 static int 2954 aac_poll(struct cdev *dev, int poll_events, d_thread_t *td) 2955 { 2956 struct aac_softc *sc; 2957 int revents; 2958 2959 sc = dev->si_drv1; 2960 revents = 0; 2961 2962 mtx_lock(&sc->aac_aifq_lock); 2963 if ((poll_events & (POLLRDNORM | POLLIN)) != 0) { 2964 if (sc->aifq_idx != 0 || sc->aifq_filled) 2965 revents |= poll_events & (POLLIN | POLLRDNORM); 2966 } 2967 mtx_unlock(&sc->aac_aifq_lock); 2968 2969 if (revents == 0) { 2970 if (poll_events & (POLLIN | POLLRDNORM)) 2971 selrecord(td, &sc->rcv_select); 2972 } 2973 2974 return (revents); 2975 } 2976 2977 static void 2978 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg) 2979 { 2980 2981 switch (event->ev_type) { 2982 case AAC_EVENT_CMFREE: 2983 mtx_assert(&sc->aac_io_lock, MA_OWNED); 2984 if (aac_alloc_command(sc, (struct aac_command **)arg)) { 2985 aac_add_event(sc, event); 2986 return; 2987 } 2988 free(event, M_AACBUF); 2989 wakeup(arg); 2990 break; 2991 default: 2992 break; 2993 } 2994 } 2995 2996 /* 2997 * Send a FIB supplied from userspace 2998 */ 2999 static int 3000 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib) 3001 { 3002 struct aac_command *cm; 3003 int size, error; 3004 3005 debug_called(2); 3006 3007 cm = NULL; 3008 3009 /* 3010 * Get a command 3011 */ 3012 mtx_lock(&sc->aac_io_lock); 3013 if (aac_alloc_command(sc, &cm)) { 3014 struct aac_event *event; 3015 3016 event = malloc(sizeof(struct aac_event), M_AACBUF, 3017 M_NOWAIT | M_ZERO); 3018 if (event == NULL) { 3019 error = EBUSY; 3020 mtx_unlock(&sc->aac_io_lock); 3021 goto out; 3022 } 3023 event->ev_type = AAC_EVENT_CMFREE; 3024 event->ev_callback = aac_ioctl_event; 3025 event->ev_arg = &cm; 3026 aac_add_event(sc, event); 3027 msleep(&cm, &sc->aac_io_lock, 0, "sendfib", 0); 3028 } 3029 mtx_unlock(&sc->aac_io_lock); 3030 3031 /* 3032 * Fetch the FIB header, then re-copy to get data as well. 3033 */ 3034 if ((error = copyin(ufib, cm->cm_fib, 3035 sizeof(struct aac_fib_header))) != 0) 3036 goto out; 3037 size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header); 3038 if (size > sizeof(struct aac_fib)) { 3039 device_printf(sc->aac_dev, "incoming FIB oversized (%d > %zd)\n", 3040 size, sizeof(struct aac_fib)); 3041 size = sizeof(struct aac_fib); 3042 } 3043 if ((error = copyin(ufib, cm->cm_fib, size)) != 0) 3044 goto out; 3045 cm->cm_fib->Header.Size = size; 3046 cm->cm_timestamp = time_uptime; 3047 3048 /* 3049 * Pass the FIB to the controller, wait for it to complete. 3050 */ 3051 mtx_lock(&sc->aac_io_lock); 3052 error = aac_wait_command(cm); 3053 mtx_unlock(&sc->aac_io_lock); 3054 if (error != 0) { 3055 device_printf(sc->aac_dev, 3056 "aac_wait_command return %d\n", error); 3057 goto out; 3058 } 3059 3060 /* 3061 * Copy the FIB and data back out to the caller. 3062 */ 3063 size = cm->cm_fib->Header.Size; 3064 if (size > sizeof(struct aac_fib)) { 3065 device_printf(sc->aac_dev, "outbound FIB oversized (%d > %zd)\n", 3066 size, sizeof(struct aac_fib)); 3067 size = sizeof(struct aac_fib); 3068 } 3069 error = copyout(cm->cm_fib, ufib, size); 3070 3071 out: 3072 if (cm != NULL) { 3073 mtx_lock(&sc->aac_io_lock); 3074 aac_release_command(cm); 3075 mtx_unlock(&sc->aac_io_lock); 3076 } 3077 return(error); 3078 } 3079 3080 /* 3081 * Handle an AIF sent to us by the controller; queue it for later reference. 3082 * If the queue fills up, then drop the older entries. 3083 */ 3084 static void 3085 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib) 3086 { 3087 struct aac_aif_command *aif; 3088 struct aac_container *co, *co_next; 3089 struct aac_fib_context *ctx; 3090 struct aac_mntinfo *mi; 3091 struct aac_mntinforesp *mir = NULL; 3092 u_int16_t rsize; 3093 int next, current, found; 3094 int count = 0, added = 0, i = 0; 3095 3096 debug_called(2); 3097 3098 aif = (struct aac_aif_command*)&fib->data[0]; 3099 aac_print_aif(sc, aif); 3100 3101 /* Is it an event that we should care about? */ 3102 switch (aif->command) { 3103 case AifCmdEventNotify: 3104 switch (aif->data.EN.type) { 3105 case AifEnAddContainer: 3106 case AifEnDeleteContainer: 3107 /* 3108 * A container was added or deleted, but the message 3109 * doesn't tell us anything else! Re-enumerate the 3110 * containers and sort things out. 3111 */ 3112 aac_alloc_sync_fib(sc, &fib); 3113 mi = (struct aac_mntinfo *)&fib->data[0]; 3114 do { 3115 /* 3116 * Ask the controller for its containers one at 3117 * a time. 3118 * XXX What if the controller's list changes 3119 * midway through this enumaration? 3120 * XXX This should be done async. 3121 */ 3122 bzero(mi, sizeof(struct aac_mntinfo)); 3123 mi->Command = VM_NameServe; 3124 mi->MntType = FT_FILESYS; 3125 mi->MntCount = i; 3126 rsize = sizeof(mir); 3127 if (aac_sync_fib(sc, ContainerCommand, 0, fib, 3128 sizeof(struct aac_mntinfo))) { 3129 printf("Error probing container %d\n", 3130 i); 3131 continue; 3132 } 3133 mir = (struct aac_mntinforesp *)&fib->data[0]; 3134 /* XXX Need to check if count changed */ 3135 count = mir->MntRespCount; 3136 /* 3137 * Check the container against our list. 3138 * co->co_found was already set to 0 in a 3139 * previous run. 3140 */ 3141 if ((mir->Status == ST_OK) && 3142 (mir->MntTable[0].VolType != CT_NONE)) { 3143 found = 0; 3144 TAILQ_FOREACH(co, 3145 &sc->aac_container_tqh, 3146 co_link) { 3147 if (co->co_mntobj.ObjectId == 3148 mir->MntTable[0].ObjectId) { 3149 co->co_found = 1; 3150 found = 1; 3151 break; 3152 } 3153 } 3154 /* 3155 * If the container matched, continue 3156 * in the list. 3157 */ 3158 if (found) { 3159 i++; 3160 continue; 3161 } 3162 3163 /* 3164 * This is a new container. Do all the 3165 * appropriate things to set it up. 3166 */ 3167 aac_add_container(sc, mir, 1); 3168 added = 1; 3169 } 3170 i++; 3171 } while ((i < count) && (i < AAC_MAX_CONTAINERS)); 3172 aac_release_sync_fib(sc); 3173 3174 /* 3175 * Go through our list of containers and see which ones 3176 * were not marked 'found'. Since the controller didn't 3177 * list them they must have been deleted. Do the 3178 * appropriate steps to destroy the device. Also reset 3179 * the co->co_found field. 3180 */ 3181 co = TAILQ_FIRST(&sc->aac_container_tqh); 3182 while (co != NULL) { 3183 if (co->co_found == 0) { 3184 mtx_unlock(&sc->aac_io_lock); 3185 mtx_lock(&Giant); 3186 device_delete_child(sc->aac_dev, 3187 co->co_disk); 3188 mtx_unlock(&Giant); 3189 mtx_lock(&sc->aac_io_lock); 3190 co_next = TAILQ_NEXT(co, co_link); 3191 mtx_lock(&sc->aac_container_lock); 3192 TAILQ_REMOVE(&sc->aac_container_tqh, co, 3193 co_link); 3194 mtx_unlock(&sc->aac_container_lock); 3195 free(co, M_AACBUF); 3196 co = co_next; 3197 } else { 3198 co->co_found = 0; 3199 co = TAILQ_NEXT(co, co_link); 3200 } 3201 } 3202 3203 /* Attach the newly created containers */ 3204 if (added) { 3205 mtx_unlock(&sc->aac_io_lock); 3206 mtx_lock(&Giant); 3207 bus_generic_attach(sc->aac_dev); 3208 mtx_unlock(&Giant); 3209 mtx_lock(&sc->aac_io_lock); 3210 } 3211 3212 break; 3213 3214 default: 3215 break; 3216 } 3217 3218 default: 3219 break; 3220 } 3221 3222 /* Copy the AIF data to the AIF queue for ioctl retrieval */ 3223 mtx_lock(&sc->aac_aifq_lock); 3224 current = sc->aifq_idx; 3225 next = (current + 1) % AAC_AIFQ_LENGTH; 3226 if (next == 0) 3227 sc->aifq_filled = 1; 3228 bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib)); 3229 /* modify AIF contexts */ 3230 if (sc->aifq_filled) { 3231 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3232 if (next == ctx->ctx_idx) 3233 ctx->ctx_wrap = 1; 3234 else if (current == ctx->ctx_idx && ctx->ctx_wrap) 3235 ctx->ctx_idx = next; 3236 } 3237 } 3238 sc->aifq_idx = next; 3239 /* On the off chance that someone is sleeping for an aif... */ 3240 if (sc->aac_state & AAC_STATE_AIF_SLEEPER) 3241 wakeup(sc->aac_aifq); 3242 /* Wakeup any poll()ers */ 3243 selwakeuppri(&sc->rcv_select, PRIBIO); 3244 mtx_unlock(&sc->aac_aifq_lock); 3245 3246 return; 3247 } 3248 3249 /* 3250 * Return the Revision of the driver to userspace and check to see if the 3251 * userspace app is possibly compatible. This is extremely bogus since 3252 * our driver doesn't follow Adaptec's versioning system. Cheat by just 3253 * returning what the card reported. 3254 */ 3255 static int 3256 aac_rev_check(struct aac_softc *sc, caddr_t udata) 3257 { 3258 struct aac_rev_check rev_check; 3259 struct aac_rev_check_resp rev_check_resp; 3260 int error = 0; 3261 3262 debug_called(2); 3263 3264 /* 3265 * Copyin the revision struct from userspace 3266 */ 3267 if ((error = copyin(udata, (caddr_t)&rev_check, 3268 sizeof(struct aac_rev_check))) != 0) { 3269 return error; 3270 } 3271 3272 debug(2, "Userland revision= %d\n", 3273 rev_check.callingRevision.buildNumber); 3274 3275 /* 3276 * Doctor up the response struct. 3277 */ 3278 rev_check_resp.possiblyCompatible = 1; 3279 rev_check_resp.adapterSWRevision.external.ul = 3280 sc->aac_revision.external.ul; 3281 rev_check_resp.adapterSWRevision.buildNumber = 3282 sc->aac_revision.buildNumber; 3283 3284 return(copyout((caddr_t)&rev_check_resp, udata, 3285 sizeof(struct aac_rev_check_resp))); 3286 } 3287 3288 /* 3289 * Pass the fib context to the caller 3290 */ 3291 static int 3292 aac_open_aif(struct aac_softc *sc, caddr_t arg) 3293 { 3294 struct aac_fib_context *fibctx, *ctx; 3295 int error = 0; 3296 3297 debug_called(2); 3298 3299 fibctx = malloc(sizeof(struct aac_fib_context), M_AACBUF, M_NOWAIT|M_ZERO); 3300 if (fibctx == NULL) 3301 return (ENOMEM); 3302 3303 mtx_lock(&sc->aac_aifq_lock); 3304 /* all elements are already 0, add to queue */ 3305 if (sc->fibctx == NULL) 3306 sc->fibctx = fibctx; 3307 else { 3308 for (ctx = sc->fibctx; ctx->next; ctx = ctx->next) 3309 ; 3310 ctx->next = fibctx; 3311 fibctx->prev = ctx; 3312 } 3313 3314 /* evaluate unique value */ 3315 fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff); 3316 ctx = sc->fibctx; 3317 while (ctx != fibctx) { 3318 if (ctx->unique == fibctx->unique) { 3319 fibctx->unique++; 3320 ctx = sc->fibctx; 3321 } else { 3322 ctx = ctx->next; 3323 } 3324 } 3325 mtx_unlock(&sc->aac_aifq_lock); 3326 3327 error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t)); 3328 if (error) 3329 aac_close_aif(sc, (caddr_t)ctx); 3330 return error; 3331 } 3332 3333 /* 3334 * Close the caller's fib context 3335 */ 3336 static int 3337 aac_close_aif(struct aac_softc *sc, caddr_t arg) 3338 { 3339 struct aac_fib_context *ctx; 3340 3341 debug_called(2); 3342 3343 mtx_lock(&sc->aac_aifq_lock); 3344 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3345 if (ctx->unique == *(uint32_t *)&arg) { 3346 if (ctx == sc->fibctx) 3347 sc->fibctx = NULL; 3348 else { 3349 ctx->prev->next = ctx->next; 3350 if (ctx->next) 3351 ctx->next->prev = ctx->prev; 3352 } 3353 break; 3354 } 3355 } 3356 mtx_unlock(&sc->aac_aifq_lock); 3357 if (ctx) 3358 free(ctx, M_AACBUF); 3359 3360 return 0; 3361 } 3362 3363 /* 3364 * Pass the caller the next AIF in their queue 3365 */ 3366 static int 3367 aac_getnext_aif(struct aac_softc *sc, caddr_t arg) 3368 { 3369 struct get_adapter_fib_ioctl agf; 3370 struct aac_fib_context *ctx; 3371 int error; 3372 3373 debug_called(2); 3374 3375 if ((error = copyin(arg, &agf, sizeof(agf))) == 0) { 3376 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3377 if (agf.AdapterFibContext == ctx->unique) 3378 break; 3379 } 3380 if (!ctx) 3381 return (EFAULT); 3382 3383 error = aac_return_aif(sc, ctx, agf.AifFib); 3384 if (error == EAGAIN && agf.Wait) { 3385 debug(2, "aac_getnext_aif(): waiting for AIF"); 3386 sc->aac_state |= AAC_STATE_AIF_SLEEPER; 3387 while (error == EAGAIN) { 3388 error = tsleep(sc->aac_aifq, PRIBIO | 3389 PCATCH, "aacaif", 0); 3390 if (error == 0) 3391 error = aac_return_aif(sc, ctx, agf.AifFib); 3392 } 3393 sc->aac_state &= ~AAC_STATE_AIF_SLEEPER; 3394 } 3395 } 3396 return(error); 3397 } 3398 3399 /* 3400 * Hand the next AIF off the top of the queue out to userspace. 3401 */ 3402 static int 3403 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr) 3404 { 3405 int current, error; 3406 3407 debug_called(2); 3408 3409 mtx_lock(&sc->aac_aifq_lock); 3410 current = ctx->ctx_idx; 3411 if (current == sc->aifq_idx && !ctx->ctx_wrap) { 3412 /* empty */ 3413 mtx_unlock(&sc->aac_aifq_lock); 3414 return (EAGAIN); 3415 } 3416 error = 3417 copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib)); 3418 if (error) 3419 device_printf(sc->aac_dev, 3420 "aac_return_aif: copyout returned %d\n", error); 3421 else { 3422 ctx->ctx_wrap = 0; 3423 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH; 3424 } 3425 mtx_unlock(&sc->aac_aifq_lock); 3426 return(error); 3427 } 3428 3429 static int 3430 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr) 3431 { 3432 struct aac_pci_info { 3433 u_int32_t bus; 3434 u_int32_t slot; 3435 } pciinf; 3436 int error; 3437 3438 debug_called(2); 3439 3440 pciinf.bus = pci_get_bus(sc->aac_dev); 3441 pciinf.slot = pci_get_slot(sc->aac_dev); 3442 3443 error = copyout((caddr_t)&pciinf, uptr, 3444 sizeof(struct aac_pci_info)); 3445 3446 return (error); 3447 } 3448 3449 /* 3450 * Give the userland some information about the container. The AAC arch 3451 * expects the driver to be a SCSI passthrough type driver, so it expects 3452 * the containers to have b:t:l numbers. Fake it. 3453 */ 3454 static int 3455 aac_query_disk(struct aac_softc *sc, caddr_t uptr) 3456 { 3457 struct aac_query_disk query_disk; 3458 struct aac_container *co; 3459 struct aac_disk *disk; 3460 int error, id; 3461 3462 debug_called(2); 3463 3464 disk = NULL; 3465 3466 error = copyin(uptr, (caddr_t)&query_disk, 3467 sizeof(struct aac_query_disk)); 3468 if (error) 3469 return (error); 3470 3471 id = query_disk.ContainerNumber; 3472 if (id == -1) 3473 return (EINVAL); 3474 3475 mtx_lock(&sc->aac_container_lock); 3476 TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { 3477 if (co->co_mntobj.ObjectId == id) 3478 break; 3479 } 3480 3481 if (co == NULL) { 3482 query_disk.Valid = 0; 3483 query_disk.Locked = 0; 3484 query_disk.Deleted = 1; /* XXX is this right? */ 3485 } else { 3486 disk = device_get_softc(co->co_disk); 3487 query_disk.Valid = 1; 3488 query_disk.Locked = 3489 (disk->ad_flags & AAC_DISK_OPEN) ? 1 : 0; 3490 query_disk.Deleted = 0; 3491 query_disk.Bus = device_get_unit(sc->aac_dev); 3492 query_disk.Target = disk->unit; 3493 query_disk.Lun = 0; 3494 query_disk.UnMapped = 0; 3495 sprintf(&query_disk.diskDeviceName[0], "%s%d", 3496 disk->ad_disk->d_name, disk->ad_disk->d_unit); 3497 } 3498 mtx_unlock(&sc->aac_container_lock); 3499 3500 error = copyout((caddr_t)&query_disk, uptr, 3501 sizeof(struct aac_query_disk)); 3502 3503 return (error); 3504 } 3505 3506 static void 3507 aac_get_bus_info(struct aac_softc *sc) 3508 { 3509 struct aac_fib *fib; 3510 struct aac_ctcfg *c_cmd; 3511 struct aac_ctcfg_resp *c_resp; 3512 struct aac_vmioctl *vmi; 3513 struct aac_vmi_businf_resp *vmi_resp; 3514 struct aac_getbusinf businfo; 3515 struct aac_sim *caminf; 3516 device_t child; 3517 int i, found, error; 3518 3519 mtx_lock(&sc->aac_io_lock); 3520 aac_alloc_sync_fib(sc, &fib); 3521 c_cmd = (struct aac_ctcfg *)&fib->data[0]; 3522 bzero(c_cmd, sizeof(struct aac_ctcfg)); 3523 3524 c_cmd->Command = VM_ContainerConfig; 3525 c_cmd->cmd = CT_GET_SCSI_METHOD; 3526 c_cmd->param = 0; 3527 3528 error = aac_sync_fib(sc, ContainerCommand, 0, fib, 3529 sizeof(struct aac_ctcfg)); 3530 if (error) { 3531 device_printf(sc->aac_dev, "Error %d sending " 3532 "VM_ContainerConfig command\n", error); 3533 aac_release_sync_fib(sc); 3534 mtx_unlock(&sc->aac_io_lock); 3535 return; 3536 } 3537 3538 c_resp = (struct aac_ctcfg_resp *)&fib->data[0]; 3539 if (c_resp->Status != ST_OK) { 3540 device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n", 3541 c_resp->Status); 3542 aac_release_sync_fib(sc); 3543 mtx_unlock(&sc->aac_io_lock); 3544 return; 3545 } 3546 3547 sc->scsi_method_id = c_resp->param; 3548 3549 vmi = (struct aac_vmioctl *)&fib->data[0]; 3550 bzero(vmi, sizeof(struct aac_vmioctl)); 3551 3552 vmi->Command = VM_Ioctl; 3553 vmi->ObjType = FT_DRIVE; 3554 vmi->MethId = sc->scsi_method_id; 3555 vmi->ObjId = 0; 3556 vmi->IoctlCmd = GetBusInfo; 3557 3558 error = aac_sync_fib(sc, ContainerCommand, 0, fib, 3559 sizeof(struct aac_vmioctl)); 3560 if (error) { 3561 device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n", 3562 error); 3563 aac_release_sync_fib(sc); 3564 mtx_unlock(&sc->aac_io_lock); 3565 return; 3566 } 3567 3568 vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0]; 3569 if (vmi_resp->Status != ST_OK) { 3570 device_printf(sc->aac_dev, "VM_Ioctl returned %d\n", 3571 vmi_resp->Status); 3572 aac_release_sync_fib(sc); 3573 mtx_unlock(&sc->aac_io_lock); 3574 return; 3575 } 3576 3577 bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf)); 3578 aac_release_sync_fib(sc); 3579 mtx_unlock(&sc->aac_io_lock); 3580 3581 found = 0; 3582 for (i = 0; i < businfo.BusCount; i++) { 3583 if (businfo.BusValid[i] != AAC_BUS_VALID) 3584 continue; 3585 3586 caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim), 3587 M_AACBUF, M_NOWAIT | M_ZERO); 3588 if (caminf == NULL) { 3589 device_printf(sc->aac_dev, 3590 "No memory to add passthrough bus %d\n", i); 3591 break; 3592 }; 3593 3594 child = device_add_child(sc->aac_dev, "aacp", -1); 3595 if (child == NULL) { 3596 device_printf(sc->aac_dev, 3597 "device_add_child failed for passthrough bus %d\n", 3598 i); 3599 free(caminf, M_AACBUF); 3600 break; 3601 } 3602 3603 caminf->TargetsPerBus = businfo.TargetsPerBus; 3604 caminf->BusNumber = i; 3605 caminf->InitiatorBusId = businfo.InitiatorBusId[i]; 3606 caminf->aac_sc = sc; 3607 caminf->sim_dev = child; 3608 3609 device_set_ivars(child, caminf); 3610 device_set_desc(child, "SCSI Passthrough Bus"); 3611 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link); 3612 3613 found = 1; 3614 } 3615 3616 if (found) 3617 bus_generic_attach(sc->aac_dev); 3618 3619 return; 3620 } 3621