1 /*- 2 * Copyright (c) 2000 Michael Smith 3 * Copyright (c) 2001 Scott Long 4 * Copyright (c) 2000 BSDi 5 * Copyright (c) 2001 Adaptec, Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for the Adaptec 'FSA' family of PCI/SCSI RAID adapters. 35 */ 36 #define AAC_DRIVER_VERSION 0x02000000 37 #define AAC_DRIVERNAME "aac" 38 39 #include "opt_aac.h" 40 41 /* #include <stddef.h> */ 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/malloc.h> 45 #include <sys/kernel.h> 46 #include <sys/kthread.h> 47 #include <sys/sysctl.h> 48 #include <sys/poll.h> 49 #include <sys/ioccom.h> 50 51 #include <sys/bus.h> 52 #include <sys/conf.h> 53 #include <sys/signalvar.h> 54 #include <sys/time.h> 55 #include <sys/eventhandler.h> 56 #include <sys/rman.h> 57 58 #include <machine/bus.h> 59 #include <sys/bus_dma.h> 60 #include <machine/resource.h> 61 62 #include <dev/pci/pcireg.h> 63 #include <dev/pci/pcivar.h> 64 65 #include <dev/aac/aacreg.h> 66 #include <sys/aac_ioctl.h> 67 #include <dev/aac/aacvar.h> 68 #include <dev/aac/aac_tables.h> 69 70 static void aac_startup(void *arg); 71 static void aac_add_container(struct aac_softc *sc, 72 struct aac_mntinforesp *mir, int f); 73 static void aac_get_bus_info(struct aac_softc *sc); 74 75 /* Command Processing */ 76 static void aac_timeout(struct aac_softc *sc); 77 static void aac_complete(void *context, int pending); 78 static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp); 79 static void aac_bio_complete(struct aac_command *cm); 80 static int aac_wait_command(struct aac_command *cm); 81 static void aac_command_thread(struct aac_softc *sc); 82 83 /* Command Buffer Management */ 84 static void aac_map_command_sg(void *arg, bus_dma_segment_t *segs, 85 int nseg, int error); 86 static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, 87 int nseg, int error); 88 static int aac_alloc_commands(struct aac_softc *sc); 89 static void aac_free_commands(struct aac_softc *sc); 90 static void aac_unmap_command(struct aac_command *cm); 91 92 /* Hardware Interface */ 93 static int aac_alloc(struct aac_softc *sc); 94 static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, 95 int error); 96 static int aac_check_firmware(struct aac_softc *sc); 97 static int aac_init(struct aac_softc *sc); 98 static int aac_sync_command(struct aac_softc *sc, u_int32_t command, 99 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, 100 u_int32_t arg3, u_int32_t *sp); 101 static int aac_setup_intr(struct aac_softc *sc); 102 static int aac_enqueue_fib(struct aac_softc *sc, int queue, 103 struct aac_command *cm); 104 static int aac_dequeue_fib(struct aac_softc *sc, int queue, 105 u_int32_t *fib_size, struct aac_fib **fib_addr); 106 static int aac_enqueue_response(struct aac_softc *sc, int queue, 107 struct aac_fib *fib); 108 109 /* Falcon/PPC interface */ 110 static int aac_fa_get_fwstatus(struct aac_softc *sc); 111 static void aac_fa_qnotify(struct aac_softc *sc, int qbit); 112 static int aac_fa_get_istatus(struct aac_softc *sc); 113 static void aac_fa_clear_istatus(struct aac_softc *sc, int mask); 114 static void aac_fa_set_mailbox(struct aac_softc *sc, u_int32_t command, 115 u_int32_t arg0, u_int32_t arg1, 116 u_int32_t arg2, u_int32_t arg3); 117 static int aac_fa_get_mailbox(struct aac_softc *sc, int mb); 118 static void aac_fa_set_interrupts(struct aac_softc *sc, int enable); 119 120 struct aac_interface aac_fa_interface = { 121 aac_fa_get_fwstatus, 122 aac_fa_qnotify, 123 aac_fa_get_istatus, 124 aac_fa_clear_istatus, 125 aac_fa_set_mailbox, 126 aac_fa_get_mailbox, 127 aac_fa_set_interrupts, 128 NULL, NULL, NULL 129 }; 130 131 /* StrongARM interface */ 132 static int aac_sa_get_fwstatus(struct aac_softc *sc); 133 static void aac_sa_qnotify(struct aac_softc *sc, int qbit); 134 static int aac_sa_get_istatus(struct aac_softc *sc); 135 static void aac_sa_clear_istatus(struct aac_softc *sc, int mask); 136 static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, 137 u_int32_t arg0, u_int32_t arg1, 138 u_int32_t arg2, u_int32_t arg3); 139 static int aac_sa_get_mailbox(struct aac_softc *sc, int mb); 140 static void aac_sa_set_interrupts(struct aac_softc *sc, int enable); 141 142 struct aac_interface aac_sa_interface = { 143 aac_sa_get_fwstatus, 144 aac_sa_qnotify, 145 aac_sa_get_istatus, 146 aac_sa_clear_istatus, 147 aac_sa_set_mailbox, 148 aac_sa_get_mailbox, 149 aac_sa_set_interrupts, 150 NULL, NULL, NULL 151 }; 152 153 /* i960Rx interface */ 154 static int aac_rx_get_fwstatus(struct aac_softc *sc); 155 static void aac_rx_qnotify(struct aac_softc *sc, int qbit); 156 static int aac_rx_get_istatus(struct aac_softc *sc); 157 static void aac_rx_clear_istatus(struct aac_softc *sc, int mask); 158 static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, 159 u_int32_t arg0, u_int32_t arg1, 160 u_int32_t arg2, u_int32_t arg3); 161 static int aac_rx_get_mailbox(struct aac_softc *sc, int mb); 162 static void aac_rx_set_interrupts(struct aac_softc *sc, int enable); 163 static int aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm); 164 static int aac_rx_get_outb_queue(struct aac_softc *sc); 165 static void aac_rx_set_outb_queue(struct aac_softc *sc, int index); 166 167 struct aac_interface aac_rx_interface = { 168 aac_rx_get_fwstatus, 169 aac_rx_qnotify, 170 aac_rx_get_istatus, 171 aac_rx_clear_istatus, 172 aac_rx_set_mailbox, 173 aac_rx_get_mailbox, 174 aac_rx_set_interrupts, 175 aac_rx_send_command, 176 aac_rx_get_outb_queue, 177 aac_rx_set_outb_queue 178 }; 179 180 /* Rocket/MIPS interface */ 181 static int aac_rkt_get_fwstatus(struct aac_softc *sc); 182 static void aac_rkt_qnotify(struct aac_softc *sc, int qbit); 183 static int aac_rkt_get_istatus(struct aac_softc *sc); 184 static void aac_rkt_clear_istatus(struct aac_softc *sc, int mask); 185 static void aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, 186 u_int32_t arg0, u_int32_t arg1, 187 u_int32_t arg2, u_int32_t arg3); 188 static int aac_rkt_get_mailbox(struct aac_softc *sc, int mb); 189 static void aac_rkt_set_interrupts(struct aac_softc *sc, int enable); 190 static int aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm); 191 static int aac_rkt_get_outb_queue(struct aac_softc *sc); 192 static void aac_rkt_set_outb_queue(struct aac_softc *sc, int index); 193 194 struct aac_interface aac_rkt_interface = { 195 aac_rkt_get_fwstatus, 196 aac_rkt_qnotify, 197 aac_rkt_get_istatus, 198 aac_rkt_clear_istatus, 199 aac_rkt_set_mailbox, 200 aac_rkt_get_mailbox, 201 aac_rkt_set_interrupts, 202 aac_rkt_send_command, 203 aac_rkt_get_outb_queue, 204 aac_rkt_set_outb_queue 205 }; 206 207 /* Debugging and Diagnostics */ 208 static void aac_describe_controller(struct aac_softc *sc); 209 static char *aac_describe_code(struct aac_code_lookup *table, 210 u_int32_t code); 211 212 /* Management Interface */ 213 static d_open_t aac_open; 214 static d_close_t aac_close; 215 static d_ioctl_t aac_ioctl; 216 static d_poll_t aac_poll; 217 static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib); 218 static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg); 219 static void aac_handle_aif(struct aac_softc *sc, 220 struct aac_fib *fib); 221 static int aac_rev_check(struct aac_softc *sc, caddr_t udata); 222 static int aac_open_aif(struct aac_softc *sc, caddr_t arg); 223 static int aac_close_aif(struct aac_softc *sc, caddr_t arg); 224 static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg); 225 static int aac_return_aif(struct aac_softc *sc, 226 struct aac_fib_context *ctx, caddr_t uptr); 227 static int aac_query_disk(struct aac_softc *sc, caddr_t uptr); 228 static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr); 229 static void aac_ioctl_event(struct aac_softc *sc, 230 struct aac_event *event, void *arg); 231 static struct aac_mntinforesp * 232 aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid); 233 234 static struct cdevsw aac_cdevsw = { 235 .d_version = D_VERSION, 236 .d_flags = D_NEEDGIANT, 237 .d_open = aac_open, 238 .d_close = aac_close, 239 .d_ioctl = aac_ioctl, 240 .d_poll = aac_poll, 241 .d_name = "aac", 242 }; 243 244 MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for the AAC driver"); 245 246 /* sysctl node */ 247 SYSCTL_NODE(_hw, OID_AUTO, aac, CTLFLAG_RD, 0, "AAC driver parameters"); 248 249 /* 250 * Device Interface 251 */ 252 253 /* 254 * Initialize the controller and softc 255 */ 256 int 257 aac_attach(struct aac_softc *sc) 258 { 259 int error, unit; 260 261 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 262 263 /* 264 * Initialize per-controller queues. 265 */ 266 aac_initq_free(sc); 267 aac_initq_ready(sc); 268 aac_initq_busy(sc); 269 aac_initq_bio(sc); 270 271 /* 272 * Initialize command-completion task. 273 */ 274 TASK_INIT(&sc->aac_task_complete, 0, aac_complete, sc); 275 276 /* mark controller as suspended until we get ourselves organised */ 277 sc->aac_state |= AAC_STATE_SUSPEND; 278 279 /* 280 * Check that the firmware on the card is supported. 281 */ 282 if ((error = aac_check_firmware(sc)) != 0) 283 return(error); 284 285 /* 286 * Initialize locks 287 */ 288 mtx_init(&sc->aac_aifq_lock, "AAC AIF lock", NULL, MTX_DEF); 289 mtx_init(&sc->aac_io_lock, "AAC I/O lock", NULL, MTX_DEF); 290 mtx_init(&sc->aac_container_lock, "AAC container lock", NULL, MTX_DEF); 291 TAILQ_INIT(&sc->aac_container_tqh); 292 TAILQ_INIT(&sc->aac_ev_cmfree); 293 294 /* 295 * Initialize the adapter. 296 */ 297 if ((error = aac_alloc(sc)) != 0) 298 return(error); 299 if ((error = aac_init(sc)) != 0) 300 return(error); 301 302 /* 303 * Allocate and connect our interrupt. 304 */ 305 if ((error = aac_setup_intr(sc)) != 0) 306 return(error); 307 308 /* 309 * Print a little information about the controller. 310 */ 311 aac_describe_controller(sc); 312 313 /* 314 * Register to probe our containers later. 315 */ 316 sc->aac_ich.ich_func = aac_startup; 317 sc->aac_ich.ich_arg = sc; 318 if (config_intrhook_establish(&sc->aac_ich) != 0) { 319 device_printf(sc->aac_dev, 320 "can't establish configuration hook\n"); 321 return(ENXIO); 322 } 323 324 /* 325 * Make the control device. 326 */ 327 unit = device_get_unit(sc->aac_dev); 328 sc->aac_dev_t = make_dev(&aac_cdevsw, unit, UID_ROOT, GID_OPERATOR, 329 0640, "aac%d", unit); 330 (void)make_dev_alias(sc->aac_dev_t, "afa%d", unit); 331 (void)make_dev_alias(sc->aac_dev_t, "hpn%d", unit); 332 sc->aac_dev_t->si_drv1 = sc; 333 334 /* Create the AIF thread */ 335 if (kproc_create((void(*)(void *))aac_command_thread, sc, 336 &sc->aifthread, 0, 0, "aac%daif", unit)) 337 panic("Could not create AIF thread"); 338 339 /* Register the shutdown method to only be called post-dump */ 340 if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aac_shutdown, 341 sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL) 342 device_printf(sc->aac_dev, 343 "shutdown event registration failed\n"); 344 345 /* Register with CAM for the non-DASD devices */ 346 if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) { 347 TAILQ_INIT(&sc->aac_sim_tqh); 348 aac_get_bus_info(sc); 349 } 350 351 return(0); 352 } 353 354 void 355 aac_add_event(struct aac_softc *sc, struct aac_event *event) 356 { 357 358 switch (event->ev_type & AAC_EVENT_MASK) { 359 case AAC_EVENT_CMFREE: 360 TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links); 361 break; 362 default: 363 device_printf(sc->aac_dev, "aac_add event: unknown event %d\n", 364 event->ev_type); 365 break; 366 } 367 368 return; 369 } 370 371 /* 372 * Request information of container #cid 373 */ 374 static struct aac_mntinforesp * 375 aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid) 376 { 377 struct aac_mntinfo *mi; 378 379 mi = (struct aac_mntinfo *)&fib->data[0]; 380 /* use 64-bit LBA if enabled */ 381 mi->Command = (sc->flags & AAC_FLAGS_LBA_64BIT) ? 382 VM_NameServe64 : VM_NameServe; 383 mi->MntType = FT_FILESYS; 384 mi->MntCount = cid; 385 386 if (aac_sync_fib(sc, ContainerCommand, 0, fib, 387 sizeof(struct aac_mntinfo))) { 388 printf("Error probing container %d\n", cid); 389 return (NULL); 390 } 391 392 return ((struct aac_mntinforesp *)&fib->data[0]); 393 } 394 395 /* 396 * Probe for containers, create disks. 397 */ 398 static void 399 aac_startup(void *arg) 400 { 401 struct aac_softc *sc; 402 struct aac_fib *fib; 403 struct aac_mntinforesp *mir; 404 int count = 0, i = 0; 405 406 sc = (struct aac_softc *)arg; 407 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 408 409 /* disconnect ourselves from the intrhook chain */ 410 config_intrhook_disestablish(&sc->aac_ich); 411 412 mtx_lock(&sc->aac_io_lock); 413 aac_alloc_sync_fib(sc, &fib); 414 415 /* loop over possible containers */ 416 do { 417 if ((mir = aac_get_container_info(sc, fib, i)) == NULL) 418 continue; 419 if (i == 0) 420 count = mir->MntRespCount; 421 aac_add_container(sc, mir, 0); 422 i++; 423 } while ((i < count) && (i < AAC_MAX_CONTAINERS)); 424 425 aac_release_sync_fib(sc); 426 mtx_unlock(&sc->aac_io_lock); 427 428 /* poke the bus to actually attach the child devices */ 429 if (bus_generic_attach(sc->aac_dev)) 430 device_printf(sc->aac_dev, "bus_generic_attach failed\n"); 431 432 /* mark the controller up */ 433 sc->aac_state &= ~AAC_STATE_SUSPEND; 434 435 /* enable interrupts now */ 436 AAC_UNMASK_INTERRUPTS(sc); 437 } 438 439 /* 440 * Create a device to represent a new container 441 */ 442 static void 443 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f) 444 { 445 struct aac_container *co; 446 device_t child; 447 448 /* 449 * Check container volume type for validity. Note that many of 450 * the possible types may never show up. 451 */ 452 if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) { 453 co = (struct aac_container *)malloc(sizeof *co, M_AACBUF, 454 M_NOWAIT | M_ZERO); 455 if (co == NULL) 456 panic("Out of memory?!"); 457 fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "id %x name '%.16s' size %u type %d", 458 mir->MntTable[0].ObjectId, 459 mir->MntTable[0].FileSystemName, 460 mir->MntTable[0].Capacity, mir->MntTable[0].VolType); 461 462 if ((child = device_add_child(sc->aac_dev, "aacd", -1)) == NULL) 463 device_printf(sc->aac_dev, "device_add_child failed\n"); 464 else 465 device_set_ivars(child, co); 466 device_set_desc(child, aac_describe_code(aac_container_types, 467 mir->MntTable[0].VolType)); 468 co->co_disk = child; 469 co->co_found = f; 470 bcopy(&mir->MntTable[0], &co->co_mntobj, 471 sizeof(struct aac_mntobj)); 472 mtx_lock(&sc->aac_container_lock); 473 TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link); 474 mtx_unlock(&sc->aac_container_lock); 475 } 476 } 477 478 /* 479 * Allocate resources associated with (sc) 480 */ 481 static int 482 aac_alloc(struct aac_softc *sc) 483 { 484 485 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 486 487 /* 488 * Create DMA tag for mapping buffers into controller-addressable space. 489 */ 490 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 491 1, 0, /* algnmnt, boundary */ 492 (sc->flags & AAC_FLAGS_SG_64BIT) ? 493 BUS_SPACE_MAXADDR : 494 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 495 BUS_SPACE_MAXADDR, /* highaddr */ 496 NULL, NULL, /* filter, filterarg */ 497 MAXBSIZE, /* maxsize */ 498 sc->aac_sg_tablesize, /* nsegments */ 499 MAXBSIZE, /* maxsegsize */ 500 BUS_DMA_ALLOCNOW, /* flags */ 501 busdma_lock_mutex, /* lockfunc */ 502 &sc->aac_io_lock, /* lockfuncarg */ 503 &sc->aac_buffer_dmat)) { 504 device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n"); 505 return (ENOMEM); 506 } 507 508 /* 509 * Create DMA tag for mapping FIBs into controller-addressable space.. 510 */ 511 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 512 1, 0, /* algnmnt, boundary */ 513 (sc->flags & AAC_FLAGS_4GB_WINDOW) ? 514 BUS_SPACE_MAXADDR_32BIT : 515 0x7fffffff, /* lowaddr */ 516 BUS_SPACE_MAXADDR, /* highaddr */ 517 NULL, NULL, /* filter, filterarg */ 518 sc->aac_max_fibs_alloc * 519 sc->aac_max_fib_size, /* maxsize */ 520 1, /* nsegments */ 521 sc->aac_max_fibs_alloc * 522 sc->aac_max_fib_size, /* maxsize */ 523 0, /* flags */ 524 NULL, NULL, /* No locking needed */ 525 &sc->aac_fib_dmat)) { 526 device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");; 527 return (ENOMEM); 528 } 529 530 /* 531 * Create DMA tag for the common structure and allocate it. 532 */ 533 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 534 1, 0, /* algnmnt, boundary */ 535 (sc->flags & AAC_FLAGS_4GB_WINDOW) ? 536 BUS_SPACE_MAXADDR_32BIT : 537 0x7fffffff, /* lowaddr */ 538 BUS_SPACE_MAXADDR, /* highaddr */ 539 NULL, NULL, /* filter, filterarg */ 540 8192 + sizeof(struct aac_common), /* maxsize */ 541 1, /* nsegments */ 542 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 543 0, /* flags */ 544 NULL, NULL, /* No locking needed */ 545 &sc->aac_common_dmat)) { 546 device_printf(sc->aac_dev, 547 "can't allocate common structure DMA tag\n"); 548 return (ENOMEM); 549 } 550 if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common, 551 BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) { 552 device_printf(sc->aac_dev, "can't allocate common structure\n"); 553 return (ENOMEM); 554 } 555 556 /* 557 * Work around a bug in the 2120 and 2200 that cannot DMA commands 558 * below address 8192 in physical memory. 559 * XXX If the padding is not needed, can it be put to use instead 560 * of ignored? 561 */ 562 (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap, 563 sc->aac_common, 8192 + sizeof(*sc->aac_common), 564 aac_common_map, sc, 0); 565 566 if (sc->aac_common_busaddr < 8192) { 567 sc->aac_common = (struct aac_common *) 568 ((uint8_t *)sc->aac_common + 8192); 569 sc->aac_common_busaddr += 8192; 570 } 571 bzero(sc->aac_common, sizeof(*sc->aac_common)); 572 573 /* Allocate some FIBs and associated command structs */ 574 TAILQ_INIT(&sc->aac_fibmap_tqh); 575 sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command), 576 M_AACBUF, M_WAITOK|M_ZERO); 577 while (sc->total_fibs < AAC_PREALLOCATE_FIBS) { 578 if (aac_alloc_commands(sc) != 0) 579 break; 580 } 581 if (sc->total_fibs == 0) 582 return (ENOMEM); 583 584 return (0); 585 } 586 587 /* 588 * Free all of the resources associated with (sc) 589 * 590 * Should not be called if the controller is active. 591 */ 592 void 593 aac_free(struct aac_softc *sc) 594 { 595 596 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 597 598 /* remove the control device */ 599 if (sc->aac_dev_t != NULL) 600 destroy_dev(sc->aac_dev_t); 601 602 /* throw away any FIB buffers, discard the FIB DMA tag */ 603 aac_free_commands(sc); 604 if (sc->aac_fib_dmat) 605 bus_dma_tag_destroy(sc->aac_fib_dmat); 606 607 free(sc->aac_commands, M_AACBUF); 608 609 /* destroy the common area */ 610 if (sc->aac_common) { 611 bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap); 612 bus_dmamem_free(sc->aac_common_dmat, sc->aac_common, 613 sc->aac_common_dmamap); 614 } 615 if (sc->aac_common_dmat) 616 bus_dma_tag_destroy(sc->aac_common_dmat); 617 618 /* disconnect the interrupt handler */ 619 if (sc->aac_intr) 620 bus_teardown_intr(sc->aac_dev, sc->aac_irq, sc->aac_intr); 621 if (sc->aac_irq != NULL) 622 bus_release_resource(sc->aac_dev, SYS_RES_IRQ, sc->aac_irq_rid, 623 sc->aac_irq); 624 625 /* destroy data-transfer DMA tag */ 626 if (sc->aac_buffer_dmat) 627 bus_dma_tag_destroy(sc->aac_buffer_dmat); 628 629 /* destroy the parent DMA tag */ 630 if (sc->aac_parent_dmat) 631 bus_dma_tag_destroy(sc->aac_parent_dmat); 632 633 /* release the register window mapping */ 634 if (sc->aac_regs_resource != NULL) 635 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, 636 sc->aac_regs_rid, sc->aac_regs_resource); 637 } 638 639 /* 640 * Disconnect from the controller completely, in preparation for unload. 641 */ 642 int 643 aac_detach(device_t dev) 644 { 645 struct aac_softc *sc; 646 struct aac_container *co; 647 struct aac_sim *sim; 648 int error; 649 650 sc = device_get_softc(dev); 651 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 652 653 if (sc->aac_state & AAC_STATE_OPEN) 654 return(EBUSY); 655 656 /* Remove the child containers */ 657 while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) { 658 error = device_delete_child(dev, co->co_disk); 659 if (error) 660 return (error); 661 TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link); 662 free(co, M_AACBUF); 663 } 664 665 /* Remove the CAM SIMs */ 666 while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) { 667 TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link); 668 error = device_delete_child(dev, sim->sim_dev); 669 if (error) 670 return (error); 671 free(sim, M_AACBUF); 672 } 673 674 if (sc->aifflags & AAC_AIFFLAGS_RUNNING) { 675 sc->aifflags |= AAC_AIFFLAGS_EXIT; 676 wakeup(sc->aifthread); 677 tsleep(sc->aac_dev, PUSER | PCATCH, "aacdch", 30 * hz); 678 } 679 680 if (sc->aifflags & AAC_AIFFLAGS_RUNNING) 681 panic("Cannot shutdown AIF thread"); 682 683 if ((error = aac_shutdown(dev))) 684 return(error); 685 686 EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh); 687 688 aac_free(sc); 689 690 mtx_destroy(&sc->aac_aifq_lock); 691 mtx_destroy(&sc->aac_io_lock); 692 mtx_destroy(&sc->aac_container_lock); 693 694 return(0); 695 } 696 697 /* 698 * Bring the controller down to a dormant state and detach all child devices. 699 * 700 * This function is called before detach or system shutdown. 701 * 702 * Note that we can assume that the bioq on the controller is empty, as we won't 703 * allow shutdown if any device is open. 704 */ 705 int 706 aac_shutdown(device_t dev) 707 { 708 struct aac_softc *sc; 709 struct aac_fib *fib; 710 struct aac_close_command *cc; 711 712 sc = device_get_softc(dev); 713 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 714 715 sc->aac_state |= AAC_STATE_SUSPEND; 716 717 /* 718 * Send a Container shutdown followed by a HostShutdown FIB to the 719 * controller to convince it that we don't want to talk to it anymore. 720 * We've been closed and all I/O completed already 721 */ 722 device_printf(sc->aac_dev, "shutting down controller..."); 723 724 mtx_lock(&sc->aac_io_lock); 725 aac_alloc_sync_fib(sc, &fib); 726 cc = (struct aac_close_command *)&fib->data[0]; 727 728 bzero(cc, sizeof(struct aac_close_command)); 729 cc->Command = VM_CloseAll; 730 cc->ContainerId = 0xffffffff; 731 if (aac_sync_fib(sc, ContainerCommand, 0, fib, 732 sizeof(struct aac_close_command))) 733 printf("FAILED.\n"); 734 else 735 printf("done\n"); 736 #if 0 737 else { 738 fib->data[0] = 0; 739 /* 740 * XXX Issuing this command to the controller makes it shut down 741 * but also keeps it from coming back up without a reset of the 742 * PCI bus. This is not desirable if you are just unloading the 743 * driver module with the intent to reload it later. 744 */ 745 if (aac_sync_fib(sc, FsaHostShutdown, AAC_FIBSTATE_SHUTDOWN, 746 fib, 1)) { 747 printf("FAILED.\n"); 748 } else { 749 printf("done.\n"); 750 } 751 } 752 #endif 753 754 AAC_MASK_INTERRUPTS(sc); 755 aac_release_sync_fib(sc); 756 mtx_unlock(&sc->aac_io_lock); 757 758 return(0); 759 } 760 761 /* 762 * Bring the controller to a quiescent state, ready for system suspend. 763 */ 764 int 765 aac_suspend(device_t dev) 766 { 767 struct aac_softc *sc; 768 769 sc = device_get_softc(dev); 770 771 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 772 sc->aac_state |= AAC_STATE_SUSPEND; 773 774 AAC_MASK_INTERRUPTS(sc); 775 return(0); 776 } 777 778 /* 779 * Bring the controller back to a state ready for operation. 780 */ 781 int 782 aac_resume(device_t dev) 783 { 784 struct aac_softc *sc; 785 786 sc = device_get_softc(dev); 787 788 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 789 sc->aac_state &= ~AAC_STATE_SUSPEND; 790 AAC_UNMASK_INTERRUPTS(sc); 791 return(0); 792 } 793 794 /* 795 * Interrupt handler for NEW_COMM interface. 796 */ 797 void 798 aac_new_intr(void *arg) 799 { 800 struct aac_softc *sc; 801 u_int32_t index, fast; 802 struct aac_command *cm; 803 struct aac_fib *fib; 804 int i; 805 806 sc = (struct aac_softc *)arg; 807 808 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 809 mtx_lock(&sc->aac_io_lock); 810 while (1) { 811 index = AAC_GET_OUTB_QUEUE(sc); 812 if (index == 0xffffffff) 813 index = AAC_GET_OUTB_QUEUE(sc); 814 if (index == 0xffffffff) 815 break; 816 if (index & 2) { 817 if (index == 0xfffffffe) { 818 /* XXX This means that the controller wants 819 * more work. Ignore it for now. 820 */ 821 continue; 822 } 823 /* AIF */ 824 fib = (struct aac_fib *)malloc(sizeof *fib, M_AACBUF, 825 M_NOWAIT | M_ZERO); 826 if (fib == NULL) { 827 /* If we're really this short on memory, 828 * hopefully breaking out of the handler will 829 * allow something to get freed. This 830 * actually sucks a whole lot. 831 */ 832 break; 833 } 834 index &= ~2; 835 for (i = 0; i < sizeof(struct aac_fib)/4; ++i) 836 ((u_int32_t *)fib)[i] = AAC_GETREG4(sc, index + i*4); 837 aac_handle_aif(sc, fib); 838 free(fib, M_AACBUF); 839 840 /* 841 * AIF memory is owned by the adapter, so let it 842 * know that we are done with it. 843 */ 844 AAC_SET_OUTB_QUEUE(sc, index); 845 AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY); 846 } else { 847 fast = index & 1; 848 cm = sc->aac_commands + (index >> 2); 849 fib = cm->cm_fib; 850 if (fast) { 851 fib->Header.XferState |= AAC_FIBSTATE_DONEADAP; 852 *((u_int32_t *)(fib->data)) = AAC_ERROR_NORMAL; 853 } 854 aac_remove_busy(cm); 855 aac_unmap_command(cm); 856 cm->cm_flags |= AAC_CMD_COMPLETED; 857 858 /* is there a completion handler? */ 859 if (cm->cm_complete != NULL) { 860 cm->cm_complete(cm); 861 } else { 862 /* assume that someone is sleeping on this 863 * command 864 */ 865 wakeup(cm); 866 } 867 sc->flags &= ~AAC_QUEUE_FRZN; 868 } 869 } 870 /* see if we can start some more I/O */ 871 if ((sc->flags & AAC_QUEUE_FRZN) == 0) 872 aac_startio(sc); 873 874 mtx_unlock(&sc->aac_io_lock); 875 } 876 877 int 878 aac_fast_intr(void *arg) 879 { 880 struct aac_softc *sc; 881 u_int16_t reason; 882 883 sc = (struct aac_softc *)arg; 884 885 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 886 /* 887 * Read the status register directly. This is faster than taking the 888 * driver lock and reading the queues directly. It also saves having 889 * to turn parts of the driver lock into a spin mutex, which would be 890 * ugly. 891 */ 892 reason = AAC_GET_ISTATUS(sc); 893 AAC_CLEAR_ISTATUS(sc, reason); 894 895 /* handle completion processing */ 896 if (reason & AAC_DB_RESPONSE_READY) 897 taskqueue_enqueue_fast(taskqueue_fast, &sc->aac_task_complete); 898 899 /* controller wants to talk to us */ 900 if (reason & (AAC_DB_PRINTF | AAC_DB_COMMAND_READY)) { 901 /* 902 * XXX Make sure that we don't get fooled by strange messages 903 * that start with a NULL. 904 */ 905 if ((reason & AAC_DB_PRINTF) && 906 (sc->aac_common->ac_printf[0] == 0)) 907 sc->aac_common->ac_printf[0] = 32; 908 909 /* 910 * This might miss doing the actual wakeup. However, the 911 * msleep that this is waking up has a timeout, so it will 912 * wake up eventually. AIFs and printfs are low enough 913 * priority that they can handle hanging out for a few seconds 914 * if needed. 915 */ 916 wakeup(sc->aifthread); 917 } 918 return (FILTER_HANDLED); 919 } 920 921 /* 922 * Command Processing 923 */ 924 925 /* 926 * Start as much queued I/O as possible on the controller 927 */ 928 void 929 aac_startio(struct aac_softc *sc) 930 { 931 struct aac_command *cm; 932 int error; 933 934 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 935 936 for (;;) { 937 /* 938 * This flag might be set if the card is out of resources. 939 * Checking it here prevents an infinite loop of deferrals. 940 */ 941 if (sc->flags & AAC_QUEUE_FRZN) 942 break; 943 944 /* 945 * Try to get a command that's been put off for lack of 946 * resources 947 */ 948 cm = aac_dequeue_ready(sc); 949 950 /* 951 * Try to build a command off the bio queue (ignore error 952 * return) 953 */ 954 if (cm == NULL) 955 aac_bio_command(sc, &cm); 956 957 /* nothing to do? */ 958 if (cm == NULL) 959 break; 960 961 /* don't map more than once */ 962 if (cm->cm_flags & AAC_CMD_MAPPED) 963 panic("aac: command %p already mapped", cm); 964 965 /* 966 * Set up the command to go to the controller. If there are no 967 * data buffers associated with the command then it can bypass 968 * busdma. 969 */ 970 if (cm->cm_datalen != 0) { 971 error = bus_dmamap_load(sc->aac_buffer_dmat, 972 cm->cm_datamap, cm->cm_data, 973 cm->cm_datalen, 974 aac_map_command_sg, cm, 0); 975 if (error == EINPROGRESS) { 976 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "freezing queue\n"); 977 sc->flags |= AAC_QUEUE_FRZN; 978 error = 0; 979 } else if (error != 0) 980 panic("aac_startio: unexpected error %d from " 981 "busdma", error); 982 } else 983 aac_map_command_sg(cm, NULL, 0, 0); 984 } 985 } 986 987 /* 988 * Handle notification of one or more FIBs coming from the controller. 989 */ 990 static void 991 aac_command_thread(struct aac_softc *sc) 992 { 993 struct aac_fib *fib; 994 u_int32_t fib_size; 995 int size, retval; 996 997 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 998 999 mtx_lock(&sc->aac_io_lock); 1000 sc->aifflags = AAC_AIFFLAGS_RUNNING; 1001 1002 while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) { 1003 1004 retval = 0; 1005 if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0) 1006 retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO, 1007 "aifthd", AAC_PERIODIC_INTERVAL * hz); 1008 1009 /* 1010 * First see if any FIBs need to be allocated. This needs 1011 * to be called without the driver lock because contigmalloc 1012 * will grab Giant, and would result in an LOR. 1013 */ 1014 if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) { 1015 mtx_unlock(&sc->aac_io_lock); 1016 aac_alloc_commands(sc); 1017 mtx_lock(&sc->aac_io_lock); 1018 sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS; 1019 aac_startio(sc); 1020 } 1021 1022 /* 1023 * While we're here, check to see if any commands are stuck. 1024 * This is pretty low-priority, so it's ok if it doesn't 1025 * always fire. 1026 */ 1027 if (retval == EWOULDBLOCK) 1028 aac_timeout(sc); 1029 1030 /* Check the hardware printf message buffer */ 1031 if (sc->aac_common->ac_printf[0] != 0) 1032 aac_print_printf(sc); 1033 1034 /* Also check to see if the adapter has a command for us. */ 1035 if (sc->flags & AAC_FLAGS_NEW_COMM) 1036 continue; 1037 for (;;) { 1038 if (aac_dequeue_fib(sc, AAC_HOST_NORM_CMD_QUEUE, 1039 &fib_size, &fib)) 1040 break; 1041 1042 AAC_PRINT_FIB(sc, fib); 1043 1044 switch (fib->Header.Command) { 1045 case AifRequest: 1046 aac_handle_aif(sc, fib); 1047 break; 1048 default: 1049 device_printf(sc->aac_dev, "unknown command " 1050 "from controller\n"); 1051 break; 1052 } 1053 1054 if ((fib->Header.XferState == 0) || 1055 (fib->Header.StructType != AAC_FIBTYPE_TFIB)) { 1056 break; 1057 } 1058 1059 /* Return the AIF to the controller. */ 1060 if (fib->Header.XferState & AAC_FIBSTATE_FROMADAP) { 1061 fib->Header.XferState |= AAC_FIBSTATE_DONEHOST; 1062 *(AAC_FSAStatus*)fib->data = ST_OK; 1063 1064 /* XXX Compute the Size field? */ 1065 size = fib->Header.Size; 1066 if (size > sizeof(struct aac_fib)) { 1067 size = sizeof(struct aac_fib); 1068 fib->Header.Size = size; 1069 } 1070 /* 1071 * Since we did not generate this command, it 1072 * cannot go through the normal 1073 * enqueue->startio chain. 1074 */ 1075 aac_enqueue_response(sc, 1076 AAC_ADAP_NORM_RESP_QUEUE, 1077 fib); 1078 } 1079 } 1080 } 1081 sc->aifflags &= ~AAC_AIFFLAGS_RUNNING; 1082 mtx_unlock(&sc->aac_io_lock); 1083 wakeup(sc->aac_dev); 1084 1085 kproc_exit(0); 1086 } 1087 1088 /* 1089 * Process completed commands. 1090 */ 1091 static void 1092 aac_complete(void *context, int pending) 1093 { 1094 struct aac_softc *sc; 1095 struct aac_command *cm; 1096 struct aac_fib *fib; 1097 u_int32_t fib_size; 1098 1099 sc = (struct aac_softc *)context; 1100 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1101 1102 mtx_lock(&sc->aac_io_lock); 1103 1104 /* pull completed commands off the queue */ 1105 for (;;) { 1106 /* look for completed FIBs on our queue */ 1107 if (aac_dequeue_fib(sc, AAC_HOST_NORM_RESP_QUEUE, &fib_size, 1108 &fib)) 1109 break; /* nothing to do */ 1110 1111 /* get the command, unmap and hand off for processing */ 1112 cm = sc->aac_commands + fib->Header.SenderData; 1113 if (cm == NULL) { 1114 AAC_PRINT_FIB(sc, fib); 1115 break; 1116 } 1117 aac_remove_busy(cm); 1118 1119 aac_unmap_command(cm); 1120 cm->cm_flags |= AAC_CMD_COMPLETED; 1121 1122 /* is there a completion handler? */ 1123 if (cm->cm_complete != NULL) { 1124 cm->cm_complete(cm); 1125 } else { 1126 /* assume that someone is sleeping on this command */ 1127 wakeup(cm); 1128 } 1129 } 1130 1131 /* see if we can start some more I/O */ 1132 sc->flags &= ~AAC_QUEUE_FRZN; 1133 aac_startio(sc); 1134 1135 mtx_unlock(&sc->aac_io_lock); 1136 } 1137 1138 /* 1139 * Handle a bio submitted from a disk device. 1140 */ 1141 void 1142 aac_submit_bio(struct bio *bp) 1143 { 1144 struct aac_disk *ad; 1145 struct aac_softc *sc; 1146 1147 ad = (struct aac_disk *)bp->bio_disk->d_drv1; 1148 sc = ad->ad_controller; 1149 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1150 1151 /* queue the BIO and try to get some work done */ 1152 aac_enqueue_bio(sc, bp); 1153 aac_startio(sc); 1154 } 1155 1156 /* 1157 * Get a bio and build a command to go with it. 1158 */ 1159 static int 1160 aac_bio_command(struct aac_softc *sc, struct aac_command **cmp) 1161 { 1162 struct aac_command *cm; 1163 struct aac_fib *fib; 1164 struct aac_disk *ad; 1165 struct bio *bp; 1166 1167 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1168 1169 /* get the resources we will need */ 1170 cm = NULL; 1171 bp = NULL; 1172 if (aac_alloc_command(sc, &cm)) /* get a command */ 1173 goto fail; 1174 if ((bp = aac_dequeue_bio(sc)) == NULL) 1175 goto fail; 1176 1177 /* fill out the command */ 1178 cm->cm_data = (void *)bp->bio_data; 1179 cm->cm_datalen = bp->bio_bcount; 1180 cm->cm_complete = aac_bio_complete; 1181 cm->cm_private = bp; 1182 cm->cm_timestamp = time_uptime; 1183 cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE; 1184 1185 /* build the FIB */ 1186 fib = cm->cm_fib; 1187 fib->Header.Size = sizeof(struct aac_fib_header); 1188 fib->Header.XferState = 1189 AAC_FIBSTATE_HOSTOWNED | 1190 AAC_FIBSTATE_INITIALISED | 1191 AAC_FIBSTATE_EMPTY | 1192 AAC_FIBSTATE_FROMHOST | 1193 AAC_FIBSTATE_REXPECTED | 1194 AAC_FIBSTATE_NORM | 1195 AAC_FIBSTATE_ASYNC | 1196 AAC_FIBSTATE_FAST_RESPONSE; 1197 1198 /* build the read/write request */ 1199 ad = (struct aac_disk *)bp->bio_disk->d_drv1; 1200 1201 if (sc->flags & AAC_FLAGS_RAW_IO) { 1202 struct aac_raw_io *raw; 1203 raw = (struct aac_raw_io *)&fib->data[0]; 1204 fib->Header.Command = RawIo; 1205 raw->BlockNumber = (u_int64_t)bp->bio_pblkno; 1206 raw->ByteCount = bp->bio_bcount; 1207 raw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1208 raw->BpTotal = 0; 1209 raw->BpComplete = 0; 1210 fib->Header.Size += sizeof(struct aac_raw_io); 1211 cm->cm_sgtable = (struct aac_sg_table *)&raw->SgMapRaw; 1212 if (bp->bio_cmd == BIO_READ) { 1213 raw->Flags = 1; 1214 cm->cm_flags |= AAC_CMD_DATAIN; 1215 } else { 1216 raw->Flags = 0; 1217 cm->cm_flags |= AAC_CMD_DATAOUT; 1218 } 1219 } else if ((sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 1220 fib->Header.Command = ContainerCommand; 1221 if (bp->bio_cmd == BIO_READ) { 1222 struct aac_blockread *br; 1223 br = (struct aac_blockread *)&fib->data[0]; 1224 br->Command = VM_CtBlockRead; 1225 br->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1226 br->BlockNumber = bp->bio_pblkno; 1227 br->ByteCount = bp->bio_bcount; 1228 fib->Header.Size += sizeof(struct aac_blockread); 1229 cm->cm_sgtable = &br->SgMap; 1230 cm->cm_flags |= AAC_CMD_DATAIN; 1231 } else { 1232 struct aac_blockwrite *bw; 1233 bw = (struct aac_blockwrite *)&fib->data[0]; 1234 bw->Command = VM_CtBlockWrite; 1235 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1236 bw->BlockNumber = bp->bio_pblkno; 1237 bw->ByteCount = bp->bio_bcount; 1238 bw->Stable = CUNSTABLE; 1239 fib->Header.Size += sizeof(struct aac_blockwrite); 1240 cm->cm_flags |= AAC_CMD_DATAOUT; 1241 cm->cm_sgtable = &bw->SgMap; 1242 } 1243 } else { 1244 fib->Header.Command = ContainerCommand64; 1245 if (bp->bio_cmd == BIO_READ) { 1246 struct aac_blockread64 *br; 1247 br = (struct aac_blockread64 *)&fib->data[0]; 1248 br->Command = VM_CtHostRead64; 1249 br->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1250 br->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE; 1251 br->BlockNumber = bp->bio_pblkno; 1252 br->Pad = 0; 1253 br->Flags = 0; 1254 fib->Header.Size += sizeof(struct aac_blockread64); 1255 cm->cm_flags |= AAC_CMD_DATAIN; 1256 cm->cm_sgtable = (struct aac_sg_table *)&br->SgMap64; 1257 } else { 1258 struct aac_blockwrite64 *bw; 1259 bw = (struct aac_blockwrite64 *)&fib->data[0]; 1260 bw->Command = VM_CtHostWrite64; 1261 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1262 bw->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE; 1263 bw->BlockNumber = bp->bio_pblkno; 1264 bw->Pad = 0; 1265 bw->Flags = 0; 1266 fib->Header.Size += sizeof(struct aac_blockwrite64); 1267 cm->cm_flags |= AAC_CMD_DATAOUT; 1268 cm->cm_sgtable = (struct aac_sg_table *)&bw->SgMap64; 1269 } 1270 } 1271 1272 *cmp = cm; 1273 return(0); 1274 1275 fail: 1276 if (bp != NULL) 1277 aac_enqueue_bio(sc, bp); 1278 if (cm != NULL) 1279 aac_release_command(cm); 1280 return(ENOMEM); 1281 } 1282 1283 /* 1284 * Handle a bio-instigated command that has been completed. 1285 */ 1286 static void 1287 aac_bio_complete(struct aac_command *cm) 1288 { 1289 struct aac_blockread_response *brr; 1290 struct aac_blockwrite_response *bwr; 1291 struct bio *bp; 1292 AAC_FSAStatus status; 1293 1294 /* fetch relevant status and then release the command */ 1295 bp = (struct bio *)cm->cm_private; 1296 if (bp->bio_cmd == BIO_READ) { 1297 brr = (struct aac_blockread_response *)&cm->cm_fib->data[0]; 1298 status = brr->Status; 1299 } else { 1300 bwr = (struct aac_blockwrite_response *)&cm->cm_fib->data[0]; 1301 status = bwr->Status; 1302 } 1303 aac_release_command(cm); 1304 1305 /* fix up the bio based on status */ 1306 if (status == ST_OK) { 1307 bp->bio_resid = 0; 1308 } else { 1309 bp->bio_error = EIO; 1310 bp->bio_flags |= BIO_ERROR; 1311 /* pass an error string out to the disk layer */ 1312 bp->bio_driver1 = aac_describe_code(aac_command_status_table, 1313 status); 1314 } 1315 aac_biodone(bp); 1316 } 1317 1318 /* 1319 * Submit a command to the controller, return when it completes. 1320 * XXX This is very dangerous! If the card has gone out to lunch, we could 1321 * be stuck here forever. At the same time, signals are not caught 1322 * because there is a risk that a signal could wakeup the sleep before 1323 * the card has a chance to complete the command. Since there is no way 1324 * to cancel a command that is in progress, we can't protect against the 1325 * card completing a command late and spamming the command and data 1326 * memory. So, we are held hostage until the command completes. 1327 */ 1328 static int 1329 aac_wait_command(struct aac_command *cm) 1330 { 1331 struct aac_softc *sc; 1332 int error; 1333 1334 sc = cm->cm_sc; 1335 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1336 1337 /* Put the command on the ready queue and get things going */ 1338 cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE; 1339 aac_enqueue_ready(cm); 1340 aac_startio(sc); 1341 error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacwait", 0); 1342 return(error); 1343 } 1344 1345 /* 1346 *Command Buffer Management 1347 */ 1348 1349 /* 1350 * Allocate a command. 1351 */ 1352 int 1353 aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp) 1354 { 1355 struct aac_command *cm; 1356 1357 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1358 1359 if ((cm = aac_dequeue_free(sc)) == NULL) { 1360 if (sc->total_fibs < sc->aac_max_fibs) { 1361 sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS; 1362 wakeup(sc->aifthread); 1363 } 1364 return (EBUSY); 1365 } 1366 1367 *cmp = cm; 1368 return(0); 1369 } 1370 1371 /* 1372 * Release a command back to the freelist. 1373 */ 1374 void 1375 aac_release_command(struct aac_command *cm) 1376 { 1377 struct aac_event *event; 1378 struct aac_softc *sc; 1379 1380 sc = cm->cm_sc; 1381 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1382 1383 /* (re)initialize the command/FIB */ 1384 cm->cm_sgtable = NULL; 1385 cm->cm_flags = 0; 1386 cm->cm_complete = NULL; 1387 cm->cm_private = NULL; 1388 cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY; 1389 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB; 1390 cm->cm_fib->Header.Flags = 0; 1391 cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size; 1392 1393 /* 1394 * These are duplicated in aac_start to cover the case where an 1395 * intermediate stage may have destroyed them. They're left 1396 * initialized here for debugging purposes only. 1397 */ 1398 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; 1399 cm->cm_fib->Header.SenderData = 0; 1400 1401 aac_enqueue_free(cm); 1402 1403 /* 1404 * Dequeue all events so that there's no risk of events getting 1405 * stranded. 1406 */ 1407 while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) { 1408 TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links); 1409 event->ev_callback(sc, event, event->ev_arg); 1410 } 1411 } 1412 1413 /* 1414 * Map helper for command/FIB allocation. 1415 */ 1416 static void 1417 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1418 { 1419 uint64_t *fibphys; 1420 1421 fibphys = (uint64_t *)arg; 1422 1423 *fibphys = segs[0].ds_addr; 1424 } 1425 1426 /* 1427 * Allocate and initialize commands/FIBs for this adapter. 1428 */ 1429 static int 1430 aac_alloc_commands(struct aac_softc *sc) 1431 { 1432 struct aac_command *cm; 1433 struct aac_fibmap *fm; 1434 uint64_t fibphys; 1435 int i, error; 1436 1437 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1438 1439 if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs) 1440 return (ENOMEM); 1441 1442 fm = malloc(sizeof(struct aac_fibmap), M_AACBUF, M_NOWAIT|M_ZERO); 1443 if (fm == NULL) 1444 return (ENOMEM); 1445 1446 /* allocate the FIBs in DMAable memory and load them */ 1447 if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs, 1448 BUS_DMA_NOWAIT, &fm->aac_fibmap)) { 1449 device_printf(sc->aac_dev, 1450 "Not enough contiguous memory available.\n"); 1451 free(fm, M_AACBUF); 1452 return (ENOMEM); 1453 } 1454 1455 /* Ignore errors since this doesn't bounce */ 1456 (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs, 1457 sc->aac_max_fibs_alloc * sc->aac_max_fib_size, 1458 aac_map_command_helper, &fibphys, 0); 1459 1460 /* initialize constant fields in the command structure */ 1461 bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * sc->aac_max_fib_size); 1462 for (i = 0; i < sc->aac_max_fibs_alloc; i++) { 1463 cm = sc->aac_commands + sc->total_fibs; 1464 fm->aac_commands = cm; 1465 cm->cm_sc = sc; 1466 cm->cm_fib = (struct aac_fib *) 1467 ((u_int8_t *)fm->aac_fibs + i*sc->aac_max_fib_size); 1468 cm->cm_fibphys = fibphys + i*sc->aac_max_fib_size; 1469 cm->cm_index = sc->total_fibs; 1470 1471 if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0, 1472 &cm->cm_datamap)) != 0) 1473 break; 1474 mtx_lock(&sc->aac_io_lock); 1475 aac_release_command(cm); 1476 sc->total_fibs++; 1477 mtx_unlock(&sc->aac_io_lock); 1478 } 1479 1480 if (i > 0) { 1481 mtx_lock(&sc->aac_io_lock); 1482 TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link); 1483 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs); 1484 mtx_unlock(&sc->aac_io_lock); 1485 return (0); 1486 } 1487 1488 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); 1489 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); 1490 free(fm, M_AACBUF); 1491 return (ENOMEM); 1492 } 1493 1494 /* 1495 * Free FIBs owned by this adapter. 1496 */ 1497 static void 1498 aac_free_commands(struct aac_softc *sc) 1499 { 1500 struct aac_fibmap *fm; 1501 struct aac_command *cm; 1502 int i; 1503 1504 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1505 1506 while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) { 1507 1508 TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link); 1509 /* 1510 * We check against total_fibs to handle partially 1511 * allocated blocks. 1512 */ 1513 for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) { 1514 cm = fm->aac_commands + i; 1515 bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap); 1516 } 1517 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); 1518 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); 1519 free(fm, M_AACBUF); 1520 } 1521 } 1522 1523 /* 1524 * Command-mapping helper function - populate this command's s/g table. 1525 */ 1526 static void 1527 aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1528 { 1529 struct aac_softc *sc; 1530 struct aac_command *cm; 1531 struct aac_fib *fib; 1532 int i; 1533 1534 cm = (struct aac_command *)arg; 1535 sc = cm->cm_sc; 1536 fib = cm->cm_fib; 1537 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1538 1539 /* copy into the FIB */ 1540 if (cm->cm_sgtable != NULL) { 1541 if (fib->Header.Command == RawIo) { 1542 struct aac_sg_tableraw *sg; 1543 sg = (struct aac_sg_tableraw *)cm->cm_sgtable; 1544 sg->SgCount = nseg; 1545 for (i = 0; i < nseg; i++) { 1546 sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr; 1547 sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len; 1548 sg->SgEntryRaw[i].Next = 0; 1549 sg->SgEntryRaw[i].Prev = 0; 1550 sg->SgEntryRaw[i].Flags = 0; 1551 } 1552 /* update the FIB size for the s/g count */ 1553 fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw); 1554 } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 1555 struct aac_sg_table *sg; 1556 sg = cm->cm_sgtable; 1557 sg->SgCount = nseg; 1558 for (i = 0; i < nseg; i++) { 1559 sg->SgEntry[i].SgAddress = segs[i].ds_addr; 1560 sg->SgEntry[i].SgByteCount = segs[i].ds_len; 1561 } 1562 /* update the FIB size for the s/g count */ 1563 fib->Header.Size += nseg*sizeof(struct aac_sg_entry); 1564 } else { 1565 struct aac_sg_table64 *sg; 1566 sg = (struct aac_sg_table64 *)cm->cm_sgtable; 1567 sg->SgCount = nseg; 1568 for (i = 0; i < nseg; i++) { 1569 sg->SgEntry64[i].SgAddress = segs[i].ds_addr; 1570 sg->SgEntry64[i].SgByteCount = segs[i].ds_len; 1571 } 1572 /* update the FIB size for the s/g count */ 1573 fib->Header.Size += nseg*sizeof(struct aac_sg_entry64); 1574 } 1575 } 1576 1577 /* Fix up the address values in the FIB. Use the command array index 1578 * instead of a pointer since these fields are only 32 bits. Shift 1579 * the SenderFibAddress over to make room for the fast response bit 1580 * and for the AIF bit 1581 */ 1582 cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2); 1583 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; 1584 1585 /* save a pointer to the command for speedy reverse-lookup */ 1586 cm->cm_fib->Header.SenderData = cm->cm_index; 1587 1588 if (cm->cm_flags & AAC_CMD_DATAIN) 1589 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1590 BUS_DMASYNC_PREREAD); 1591 if (cm->cm_flags & AAC_CMD_DATAOUT) 1592 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1593 BUS_DMASYNC_PREWRITE); 1594 cm->cm_flags |= AAC_CMD_MAPPED; 1595 1596 if (sc->flags & AAC_FLAGS_NEW_COMM) { 1597 int count = 10000000L; 1598 while (AAC_SEND_COMMAND(sc, cm) != 0) { 1599 if (--count == 0) { 1600 aac_unmap_command(cm); 1601 sc->flags |= AAC_QUEUE_FRZN; 1602 aac_requeue_ready(cm); 1603 } 1604 DELAY(5); /* wait 5 usec. */ 1605 } 1606 } else { 1607 /* Put the FIB on the outbound queue */ 1608 if (aac_enqueue_fib(sc, cm->cm_queue, cm) == EBUSY) { 1609 aac_unmap_command(cm); 1610 sc->flags |= AAC_QUEUE_FRZN; 1611 aac_requeue_ready(cm); 1612 } 1613 } 1614 1615 return; 1616 } 1617 1618 /* 1619 * Unmap a command from controller-visible space. 1620 */ 1621 static void 1622 aac_unmap_command(struct aac_command *cm) 1623 { 1624 struct aac_softc *sc; 1625 1626 sc = cm->cm_sc; 1627 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1628 1629 if (!(cm->cm_flags & AAC_CMD_MAPPED)) 1630 return; 1631 1632 if (cm->cm_datalen != 0) { 1633 if (cm->cm_flags & AAC_CMD_DATAIN) 1634 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1635 BUS_DMASYNC_POSTREAD); 1636 if (cm->cm_flags & AAC_CMD_DATAOUT) 1637 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1638 BUS_DMASYNC_POSTWRITE); 1639 1640 bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap); 1641 } 1642 cm->cm_flags &= ~AAC_CMD_MAPPED; 1643 } 1644 1645 /* 1646 * Hardware Interface 1647 */ 1648 1649 /* 1650 * Initialize the adapter. 1651 */ 1652 static void 1653 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1654 { 1655 struct aac_softc *sc; 1656 1657 sc = (struct aac_softc *)arg; 1658 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1659 1660 sc->aac_common_busaddr = segs[0].ds_addr; 1661 } 1662 1663 static int 1664 aac_check_firmware(struct aac_softc *sc) 1665 { 1666 u_int32_t code, major, minor, options = 0, atu_size = 0; 1667 int status; 1668 time_t then; 1669 1670 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1671 /* 1672 * Wait for the adapter to come ready. 1673 */ 1674 then = time_uptime; 1675 do { 1676 code = AAC_GET_FWSTATUS(sc); 1677 if (code & AAC_SELF_TEST_FAILED) { 1678 device_printf(sc->aac_dev, "FATAL: selftest failed\n"); 1679 return(ENXIO); 1680 } 1681 if (code & AAC_KERNEL_PANIC) { 1682 device_printf(sc->aac_dev, 1683 "FATAL: controller kernel panic"); 1684 return(ENXIO); 1685 } 1686 if (time_uptime > (then + AAC_BOOT_TIMEOUT)) { 1687 device_printf(sc->aac_dev, 1688 "FATAL: controller not coming ready, " 1689 "status %x\n", code); 1690 return(ENXIO); 1691 } 1692 } while (!(code & AAC_UP_AND_RUNNING)); 1693 1694 /* 1695 * Retrieve the firmware version numbers. Dell PERC2/QC cards with 1696 * firmware version 1.x are not compatible with this driver. 1697 */ 1698 if (sc->flags & AAC_FLAGS_PERC2QC) { 1699 if (aac_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0, 1700 NULL)) { 1701 device_printf(sc->aac_dev, 1702 "Error reading firmware version\n"); 1703 return (EIO); 1704 } 1705 1706 /* These numbers are stored as ASCII! */ 1707 major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30; 1708 minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30; 1709 if (major == 1) { 1710 device_printf(sc->aac_dev, 1711 "Firmware version %d.%d is not supported.\n", 1712 major, minor); 1713 return (EINVAL); 1714 } 1715 } 1716 1717 /* 1718 * Retrieve the capabilities/supported options word so we know what 1719 * work-arounds to enable. Some firmware revs don't support this 1720 * command. 1721 */ 1722 if (aac_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status)) { 1723 if (status != AAC_SRB_STS_INVALID_REQUEST) { 1724 device_printf(sc->aac_dev, 1725 "RequestAdapterInfo failed\n"); 1726 return (EIO); 1727 } 1728 } else { 1729 options = AAC_GET_MAILBOX(sc, 1); 1730 atu_size = AAC_GET_MAILBOX(sc, 2); 1731 sc->supported_options = options; 1732 1733 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && 1734 (sc->flags & AAC_FLAGS_NO4GB) == 0) 1735 sc->flags |= AAC_FLAGS_4GB_WINDOW; 1736 if (options & AAC_SUPPORTED_NONDASD) 1737 sc->flags |= AAC_FLAGS_ENABLE_CAM; 1738 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0 1739 && (sizeof(bus_addr_t) > 4)) { 1740 device_printf(sc->aac_dev, 1741 "Enabling 64-bit address support\n"); 1742 sc->flags |= AAC_FLAGS_SG_64BIT; 1743 } 1744 if ((options & AAC_SUPPORTED_NEW_COMM) 1745 && sc->aac_if.aif_send_command) 1746 sc->flags |= AAC_FLAGS_NEW_COMM; 1747 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) 1748 sc->flags |= AAC_FLAGS_ARRAY_64BIT; 1749 } 1750 1751 /* Check for broken hardware that does a lower number of commands */ 1752 sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512); 1753 1754 /* Remap mem. resource, if required */ 1755 if ((sc->flags & AAC_FLAGS_NEW_COMM) && 1756 atu_size > rman_get_size(sc->aac_regs_resource)) { 1757 bus_release_resource( 1758 sc->aac_dev, SYS_RES_MEMORY, 1759 sc->aac_regs_rid, sc->aac_regs_resource); 1760 sc->aac_regs_resource = bus_alloc_resource( 1761 sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid, 1762 0ul, ~0ul, atu_size, RF_ACTIVE); 1763 if (sc->aac_regs_resource == NULL) { 1764 sc->aac_regs_resource = bus_alloc_resource_any( 1765 sc->aac_dev, SYS_RES_MEMORY, 1766 &sc->aac_regs_rid, RF_ACTIVE); 1767 if (sc->aac_regs_resource == NULL) { 1768 device_printf(sc->aac_dev, 1769 "couldn't allocate register window\n"); 1770 return (ENXIO); 1771 } 1772 sc->flags &= ~AAC_FLAGS_NEW_COMM; 1773 } 1774 sc->aac_btag = rman_get_bustag(sc->aac_regs_resource); 1775 sc->aac_bhandle = rman_get_bushandle(sc->aac_regs_resource); 1776 } 1777 1778 /* Read preferred settings */ 1779 sc->aac_max_fib_size = sizeof(struct aac_fib); 1780 sc->aac_max_sectors = 128; /* 64KB */ 1781 if (sc->flags & AAC_FLAGS_SG_64BIT) 1782 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE 1783 - sizeof(struct aac_blockwrite64)) 1784 / sizeof(struct aac_sg_entry64); 1785 else 1786 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE 1787 - sizeof(struct aac_blockwrite)) 1788 / sizeof(struct aac_sg_entry); 1789 1790 if (!aac_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL)) { 1791 options = AAC_GET_MAILBOX(sc, 1); 1792 sc->aac_max_fib_size = (options & 0xFFFF); 1793 sc->aac_max_sectors = (options >> 16) << 1; 1794 options = AAC_GET_MAILBOX(sc, 2); 1795 sc->aac_sg_tablesize = (options >> 16); 1796 options = AAC_GET_MAILBOX(sc, 3); 1797 sc->aac_max_fibs = (options & 0xFFFF); 1798 } 1799 if (sc->aac_max_fib_size > PAGE_SIZE) 1800 sc->aac_max_fib_size = PAGE_SIZE; 1801 sc->aac_max_fibs_alloc = PAGE_SIZE / sc->aac_max_fib_size; 1802 1803 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { 1804 sc->flags |= AAC_FLAGS_RAW_IO; 1805 device_printf(sc->aac_dev, "Enable Raw I/O\n"); 1806 } 1807 if ((sc->flags & AAC_FLAGS_RAW_IO) && 1808 (sc->flags & AAC_FLAGS_ARRAY_64BIT)) { 1809 sc->flags |= AAC_FLAGS_LBA_64BIT; 1810 device_printf(sc->aac_dev, "Enable 64-bit array\n"); 1811 } 1812 1813 return (0); 1814 } 1815 1816 static int 1817 aac_init(struct aac_softc *sc) 1818 { 1819 struct aac_adapter_init *ip; 1820 u_int32_t qoffset; 1821 int error; 1822 1823 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1824 1825 /* 1826 * Fill in the init structure. This tells the adapter about the 1827 * physical location of various important shared data structures. 1828 */ 1829 ip = &sc->aac_common->ac_init; 1830 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION; 1831 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { 1832 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4; 1833 sc->flags |= AAC_FLAGS_RAW_IO; 1834 } 1835 ip->MiniPortRevision = AAC_INIT_STRUCT_MINIPORT_REVISION; 1836 1837 ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr + 1838 offsetof(struct aac_common, ac_fibs); 1839 ip->AdapterFibsVirtualAddress = 0; 1840 ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib); 1841 ip->AdapterFibAlign = sizeof(struct aac_fib); 1842 1843 ip->PrintfBufferAddress = sc->aac_common_busaddr + 1844 offsetof(struct aac_common, ac_printf); 1845 ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE; 1846 1847 /* 1848 * The adapter assumes that pages are 4K in size, except on some 1849 * broken firmware versions that do the page->byte conversion twice, 1850 * therefore 'assuming' that this value is in 16MB units (2^24). 1851 * Round up since the granularity is so high. 1852 */ 1853 ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE; 1854 if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) { 1855 ip->HostPhysMemPages = 1856 (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE; 1857 } 1858 ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */ 1859 1860 ip->InitFlags = 0; 1861 if (sc->flags & AAC_FLAGS_NEW_COMM) { 1862 ip->InitFlags = INITFLAGS_NEW_COMM_SUPPORTED; 1863 device_printf(sc->aac_dev, "New comm. interface enabled\n"); 1864 } 1865 1866 ip->MaxIoCommands = sc->aac_max_fibs; 1867 ip->MaxIoSize = sc->aac_max_sectors << 9; 1868 ip->MaxFibSize = sc->aac_max_fib_size; 1869 1870 /* 1871 * Initialize FIB queues. Note that it appears that the layout of the 1872 * indexes and the segmentation of the entries may be mandated by the 1873 * adapter, which is only told about the base of the queue index fields. 1874 * 1875 * The initial values of the indices are assumed to inform the adapter 1876 * of the sizes of the respective queues, and theoretically it could 1877 * work out the entire layout of the queue structures from this. We 1878 * take the easy route and just lay this area out like everyone else 1879 * does. 1880 * 1881 * The Linux driver uses a much more complex scheme whereby several 1882 * header records are kept for each queue. We use a couple of generic 1883 * list manipulation functions which 'know' the size of each list by 1884 * virtue of a table. 1885 */ 1886 qoffset = offsetof(struct aac_common, ac_qbuf) + AAC_QUEUE_ALIGN; 1887 qoffset &= ~(AAC_QUEUE_ALIGN - 1); 1888 sc->aac_queues = 1889 (struct aac_queue_table *)((uintptr_t)sc->aac_common + qoffset); 1890 ip->CommHeaderAddress = sc->aac_common_busaddr + qoffset; 1891 1892 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1893 AAC_HOST_NORM_CMD_ENTRIES; 1894 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1895 AAC_HOST_NORM_CMD_ENTRIES; 1896 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1897 AAC_HOST_HIGH_CMD_ENTRIES; 1898 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1899 AAC_HOST_HIGH_CMD_ENTRIES; 1900 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1901 AAC_ADAP_NORM_CMD_ENTRIES; 1902 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1903 AAC_ADAP_NORM_CMD_ENTRIES; 1904 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1905 AAC_ADAP_HIGH_CMD_ENTRIES; 1906 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1907 AAC_ADAP_HIGH_CMD_ENTRIES; 1908 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1909 AAC_HOST_NORM_RESP_ENTRIES; 1910 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1911 AAC_HOST_NORM_RESP_ENTRIES; 1912 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1913 AAC_HOST_HIGH_RESP_ENTRIES; 1914 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1915 AAC_HOST_HIGH_RESP_ENTRIES; 1916 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1917 AAC_ADAP_NORM_RESP_ENTRIES; 1918 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1919 AAC_ADAP_NORM_RESP_ENTRIES; 1920 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1921 AAC_ADAP_HIGH_RESP_ENTRIES; 1922 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1923 AAC_ADAP_HIGH_RESP_ENTRIES; 1924 sc->aac_qentries[AAC_HOST_NORM_CMD_QUEUE] = 1925 &sc->aac_queues->qt_HostNormCmdQueue[0]; 1926 sc->aac_qentries[AAC_HOST_HIGH_CMD_QUEUE] = 1927 &sc->aac_queues->qt_HostHighCmdQueue[0]; 1928 sc->aac_qentries[AAC_ADAP_NORM_CMD_QUEUE] = 1929 &sc->aac_queues->qt_AdapNormCmdQueue[0]; 1930 sc->aac_qentries[AAC_ADAP_HIGH_CMD_QUEUE] = 1931 &sc->aac_queues->qt_AdapHighCmdQueue[0]; 1932 sc->aac_qentries[AAC_HOST_NORM_RESP_QUEUE] = 1933 &sc->aac_queues->qt_HostNormRespQueue[0]; 1934 sc->aac_qentries[AAC_HOST_HIGH_RESP_QUEUE] = 1935 &sc->aac_queues->qt_HostHighRespQueue[0]; 1936 sc->aac_qentries[AAC_ADAP_NORM_RESP_QUEUE] = 1937 &sc->aac_queues->qt_AdapNormRespQueue[0]; 1938 sc->aac_qentries[AAC_ADAP_HIGH_RESP_QUEUE] = 1939 &sc->aac_queues->qt_AdapHighRespQueue[0]; 1940 1941 /* 1942 * Do controller-type-specific initialisation 1943 */ 1944 switch (sc->aac_hwif) { 1945 case AAC_HWIF_I960RX: 1946 AAC_SETREG4(sc, AAC_RX_ODBR, ~0); 1947 break; 1948 case AAC_HWIF_RKT: 1949 AAC_SETREG4(sc, AAC_RKT_ODBR, ~0); 1950 break; 1951 default: 1952 break; 1953 } 1954 1955 /* 1956 * Give the init structure to the controller. 1957 */ 1958 if (aac_sync_command(sc, AAC_MONKER_INITSTRUCT, 1959 sc->aac_common_busaddr + 1960 offsetof(struct aac_common, ac_init), 0, 0, 0, 1961 NULL)) { 1962 device_printf(sc->aac_dev, 1963 "error establishing init structure\n"); 1964 error = EIO; 1965 goto out; 1966 } 1967 1968 error = 0; 1969 out: 1970 return(error); 1971 } 1972 1973 static int 1974 aac_setup_intr(struct aac_softc *sc) 1975 { 1976 sc->aac_irq_rid = 0; 1977 if ((sc->aac_irq = bus_alloc_resource_any(sc->aac_dev, SYS_RES_IRQ, 1978 &sc->aac_irq_rid, 1979 RF_SHAREABLE | 1980 RF_ACTIVE)) == NULL) { 1981 device_printf(sc->aac_dev, "can't allocate interrupt\n"); 1982 return (EINVAL); 1983 } 1984 if (sc->flags & AAC_FLAGS_NEW_COMM) { 1985 if (bus_setup_intr(sc->aac_dev, sc->aac_irq, 1986 INTR_MPSAFE|INTR_TYPE_BIO, NULL, 1987 aac_new_intr, sc, &sc->aac_intr)) { 1988 device_printf(sc->aac_dev, "can't set up interrupt\n"); 1989 return (EINVAL); 1990 } 1991 } else { 1992 if (bus_setup_intr(sc->aac_dev, sc->aac_irq, 1993 INTR_TYPE_BIO, aac_fast_intr, NULL, 1994 sc, &sc->aac_intr)) { 1995 device_printf(sc->aac_dev, 1996 "can't set up FAST interrupt\n"); 1997 if (bus_setup_intr(sc->aac_dev, sc->aac_irq, 1998 INTR_MPSAFE|INTR_TYPE_BIO, 1999 NULL, (driver_intr_t *)aac_fast_intr, 2000 sc, &sc->aac_intr)) { 2001 device_printf(sc->aac_dev, 2002 "can't set up MPSAFE interrupt\n"); 2003 return (EINVAL); 2004 } 2005 } 2006 } 2007 return (0); 2008 } 2009 2010 /* 2011 * Send a synchronous command to the controller and wait for a result. 2012 * Indicate if the controller completed the command with an error status. 2013 */ 2014 static int 2015 aac_sync_command(struct aac_softc *sc, u_int32_t command, 2016 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3, 2017 u_int32_t *sp) 2018 { 2019 time_t then; 2020 u_int32_t status; 2021 2022 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2023 2024 /* populate the mailbox */ 2025 AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3); 2026 2027 /* ensure the sync command doorbell flag is cleared */ 2028 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); 2029 2030 /* then set it to signal the adapter */ 2031 AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND); 2032 2033 /* spin waiting for the command to complete */ 2034 then = time_uptime; 2035 do { 2036 if (time_uptime > (then + AAC_IMMEDIATE_TIMEOUT)) { 2037 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out"); 2038 return(EIO); 2039 } 2040 } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND)); 2041 2042 /* clear the completion flag */ 2043 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); 2044 2045 /* get the command status */ 2046 status = AAC_GET_MAILBOX(sc, 0); 2047 if (sp != NULL) 2048 *sp = status; 2049 2050 if (status != AAC_SRB_STS_SUCCESS) 2051 return (-1); 2052 return(0); 2053 } 2054 2055 int 2056 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate, 2057 struct aac_fib *fib, u_int16_t datasize) 2058 { 2059 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2060 mtx_assert(&sc->aac_io_lock, MA_OWNED); 2061 2062 if (datasize > AAC_FIB_DATASIZE) 2063 return(EINVAL); 2064 2065 /* 2066 * Set up the sync FIB 2067 */ 2068 fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | 2069 AAC_FIBSTATE_INITIALISED | 2070 AAC_FIBSTATE_EMPTY; 2071 fib->Header.XferState |= xferstate; 2072 fib->Header.Command = command; 2073 fib->Header.StructType = AAC_FIBTYPE_TFIB; 2074 fib->Header.Size = sizeof(struct aac_fib_header) + datasize; 2075 fib->Header.SenderSize = sizeof(struct aac_fib); 2076 fib->Header.SenderFibAddress = 0; /* Not needed */ 2077 fib->Header.ReceiverFibAddress = sc->aac_common_busaddr + 2078 offsetof(struct aac_common, 2079 ac_sync_fib); 2080 2081 /* 2082 * Give the FIB to the controller, wait for a response. 2083 */ 2084 if (aac_sync_command(sc, AAC_MONKER_SYNCFIB, 2085 fib->Header.ReceiverFibAddress, 0, 0, 0, NULL)) { 2086 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error"); 2087 return(EIO); 2088 } 2089 2090 return (0); 2091 } 2092 2093 /* 2094 * Adapter-space FIB queue manipulation 2095 * 2096 * Note that the queue implementation here is a little funky; neither the PI or 2097 * CI will ever be zero. This behaviour is a controller feature. 2098 */ 2099 static struct { 2100 int size; 2101 int notify; 2102 } aac_qinfo[] = { 2103 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL}, 2104 {AAC_HOST_HIGH_CMD_ENTRIES, 0}, 2105 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY}, 2106 {AAC_ADAP_HIGH_CMD_ENTRIES, 0}, 2107 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL}, 2108 {AAC_HOST_HIGH_RESP_ENTRIES, 0}, 2109 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY}, 2110 {AAC_ADAP_HIGH_RESP_ENTRIES, 0} 2111 }; 2112 2113 /* 2114 * Atomically insert an entry into the nominated queue, returns 0 on success or 2115 * EBUSY if the queue is full. 2116 * 2117 * Note: it would be more efficient to defer notifying the controller in 2118 * the case where we may be inserting several entries in rapid succession, 2119 * but implementing this usefully may be difficult (it would involve a 2120 * separate queue/notify interface). 2121 */ 2122 static int 2123 aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm) 2124 { 2125 u_int32_t pi, ci; 2126 int error; 2127 u_int32_t fib_size; 2128 u_int32_t fib_addr; 2129 2130 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2131 2132 fib_size = cm->cm_fib->Header.Size; 2133 fib_addr = cm->cm_fib->Header.ReceiverFibAddress; 2134 2135 /* get the producer/consumer indices */ 2136 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2137 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2138 2139 /* wrap the queue? */ 2140 if (pi >= aac_qinfo[queue].size) 2141 pi = 0; 2142 2143 /* check for queue full */ 2144 if ((pi + 1) == ci) { 2145 error = EBUSY; 2146 goto out; 2147 } 2148 2149 /* 2150 * To avoid a race with its completion interrupt, place this command on 2151 * the busy queue prior to advertising it to the controller. 2152 */ 2153 aac_enqueue_busy(cm); 2154 2155 /* populate queue entry */ 2156 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; 2157 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; 2158 2159 /* update producer index */ 2160 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; 2161 2162 /* notify the adapter if we know how */ 2163 if (aac_qinfo[queue].notify != 0) 2164 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2165 2166 error = 0; 2167 2168 out: 2169 return(error); 2170 } 2171 2172 /* 2173 * Atomically remove one entry from the nominated queue, returns 0 on 2174 * success or ENOENT if the queue is empty. 2175 */ 2176 static int 2177 aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size, 2178 struct aac_fib **fib_addr) 2179 { 2180 u_int32_t pi, ci; 2181 u_int32_t fib_index; 2182 int error; 2183 int notify; 2184 2185 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2186 2187 /* get the producer/consumer indices */ 2188 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2189 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2190 2191 /* check for queue empty */ 2192 if (ci == pi) { 2193 error = ENOENT; 2194 goto out; 2195 } 2196 2197 /* wrap the pi so the following test works */ 2198 if (pi >= aac_qinfo[queue].size) 2199 pi = 0; 2200 2201 notify = 0; 2202 if (ci == pi + 1) 2203 notify++; 2204 2205 /* wrap the queue? */ 2206 if (ci >= aac_qinfo[queue].size) 2207 ci = 0; 2208 2209 /* fetch the entry */ 2210 *fib_size = (sc->aac_qentries[queue] + ci)->aq_fib_size; 2211 2212 switch (queue) { 2213 case AAC_HOST_NORM_CMD_QUEUE: 2214 case AAC_HOST_HIGH_CMD_QUEUE: 2215 /* 2216 * The aq_fib_addr is only 32 bits wide so it can't be counted 2217 * on to hold an address. For AIF's, the adapter assumes 2218 * that it's giving us an address into the array of AIF fibs. 2219 * Therefore, we have to convert it to an index. 2220 */ 2221 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr / 2222 sizeof(struct aac_fib); 2223 *fib_addr = &sc->aac_common->ac_fibs[fib_index]; 2224 break; 2225 2226 case AAC_HOST_NORM_RESP_QUEUE: 2227 case AAC_HOST_HIGH_RESP_QUEUE: 2228 { 2229 struct aac_command *cm; 2230 2231 /* 2232 * As above, an index is used instead of an actual address. 2233 * Gotta shift the index to account for the fast response 2234 * bit. No other correction is needed since this value was 2235 * originally provided by the driver via the SenderFibAddress 2236 * field. 2237 */ 2238 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr; 2239 cm = sc->aac_commands + (fib_index >> 2); 2240 *fib_addr = cm->cm_fib; 2241 2242 /* 2243 * Is this a fast response? If it is, update the fib fields in 2244 * local memory since the whole fib isn't DMA'd back up. 2245 */ 2246 if (fib_index & 0x01) { 2247 (*fib_addr)->Header.XferState |= AAC_FIBSTATE_DONEADAP; 2248 *((u_int32_t*)((*fib_addr)->data)) = AAC_ERROR_NORMAL; 2249 } 2250 break; 2251 } 2252 default: 2253 panic("Invalid queue in aac_dequeue_fib()"); 2254 break; 2255 } 2256 2257 /* update consumer index */ 2258 sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1; 2259 2260 /* if we have made the queue un-full, notify the adapter */ 2261 if (notify && (aac_qinfo[queue].notify != 0)) 2262 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2263 error = 0; 2264 2265 out: 2266 return(error); 2267 } 2268 2269 /* 2270 * Put our response to an Adapter Initialed Fib on the response queue 2271 */ 2272 static int 2273 aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib) 2274 { 2275 u_int32_t pi, ci; 2276 int error; 2277 u_int32_t fib_size; 2278 u_int32_t fib_addr; 2279 2280 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2281 2282 /* Tell the adapter where the FIB is */ 2283 fib_size = fib->Header.Size; 2284 fib_addr = fib->Header.SenderFibAddress; 2285 fib->Header.ReceiverFibAddress = fib_addr; 2286 2287 /* get the producer/consumer indices */ 2288 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2289 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2290 2291 /* wrap the queue? */ 2292 if (pi >= aac_qinfo[queue].size) 2293 pi = 0; 2294 2295 /* check for queue full */ 2296 if ((pi + 1) == ci) { 2297 error = EBUSY; 2298 goto out; 2299 } 2300 2301 /* populate queue entry */ 2302 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; 2303 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; 2304 2305 /* update producer index */ 2306 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; 2307 2308 /* notify the adapter if we know how */ 2309 if (aac_qinfo[queue].notify != 0) 2310 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2311 2312 error = 0; 2313 2314 out: 2315 return(error); 2316 } 2317 2318 /* 2319 * Check for commands that have been outstanding for a suspiciously long time, 2320 * and complain about them. 2321 */ 2322 static void 2323 aac_timeout(struct aac_softc *sc) 2324 { 2325 struct aac_command *cm; 2326 time_t deadline; 2327 int timedout, code; 2328 2329 /* 2330 * Traverse the busy command list, bitch about late commands once 2331 * only. 2332 */ 2333 timedout = 0; 2334 deadline = time_uptime - AAC_CMD_TIMEOUT; 2335 TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) { 2336 if ((cm->cm_timestamp < deadline) 2337 /* && !(cm->cm_flags & AAC_CMD_TIMEDOUT) */) { 2338 cm->cm_flags |= AAC_CMD_TIMEDOUT; 2339 device_printf(sc->aac_dev, 2340 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", 2341 cm, (int)(time_uptime-cm->cm_timestamp)); 2342 AAC_PRINT_FIB(sc, cm->cm_fib); 2343 timedout++; 2344 } 2345 } 2346 2347 if (timedout) { 2348 code = AAC_GET_FWSTATUS(sc); 2349 if (code != AAC_UP_AND_RUNNING) { 2350 device_printf(sc->aac_dev, "WARNING! Controller is no " 2351 "longer running! code= 0x%x\n", code); 2352 } 2353 } 2354 return; 2355 } 2356 2357 /* 2358 * Interface Function Vectors 2359 */ 2360 2361 /* 2362 * Read the current firmware status word. 2363 */ 2364 static int 2365 aac_sa_get_fwstatus(struct aac_softc *sc) 2366 { 2367 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2368 2369 return(AAC_GETREG4(sc, AAC_SA_FWSTATUS)); 2370 } 2371 2372 static int 2373 aac_rx_get_fwstatus(struct aac_softc *sc) 2374 { 2375 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2376 2377 return(AAC_GETREG4(sc, AAC_RX_FWSTATUS)); 2378 } 2379 2380 static int 2381 aac_fa_get_fwstatus(struct aac_softc *sc) 2382 { 2383 int val; 2384 2385 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2386 2387 val = AAC_GETREG4(sc, AAC_FA_FWSTATUS); 2388 return (val); 2389 } 2390 2391 static int 2392 aac_rkt_get_fwstatus(struct aac_softc *sc) 2393 { 2394 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2395 2396 return(AAC_GETREG4(sc, AAC_RKT_FWSTATUS)); 2397 } 2398 2399 /* 2400 * Notify the controller of a change in a given queue 2401 */ 2402 2403 static void 2404 aac_sa_qnotify(struct aac_softc *sc, int qbit) 2405 { 2406 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2407 2408 AAC_SETREG2(sc, AAC_SA_DOORBELL1_SET, qbit); 2409 } 2410 2411 static void 2412 aac_rx_qnotify(struct aac_softc *sc, int qbit) 2413 { 2414 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2415 2416 AAC_SETREG4(sc, AAC_RX_IDBR, qbit); 2417 } 2418 2419 static void 2420 aac_fa_qnotify(struct aac_softc *sc, int qbit) 2421 { 2422 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2423 2424 AAC_SETREG2(sc, AAC_FA_DOORBELL1, qbit); 2425 AAC_FA_HACK(sc); 2426 } 2427 2428 static void 2429 aac_rkt_qnotify(struct aac_softc *sc, int qbit) 2430 { 2431 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2432 2433 AAC_SETREG4(sc, AAC_RKT_IDBR, qbit); 2434 } 2435 2436 /* 2437 * Get the interrupt reason bits 2438 */ 2439 static int 2440 aac_sa_get_istatus(struct aac_softc *sc) 2441 { 2442 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2443 2444 return(AAC_GETREG2(sc, AAC_SA_DOORBELL0)); 2445 } 2446 2447 static int 2448 aac_rx_get_istatus(struct aac_softc *sc) 2449 { 2450 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2451 2452 return(AAC_GETREG4(sc, AAC_RX_ODBR)); 2453 } 2454 2455 static int 2456 aac_fa_get_istatus(struct aac_softc *sc) 2457 { 2458 int val; 2459 2460 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2461 2462 val = AAC_GETREG2(sc, AAC_FA_DOORBELL0); 2463 return (val); 2464 } 2465 2466 static int 2467 aac_rkt_get_istatus(struct aac_softc *sc) 2468 { 2469 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2470 2471 return(AAC_GETREG4(sc, AAC_RKT_ODBR)); 2472 } 2473 2474 /* 2475 * Clear some interrupt reason bits 2476 */ 2477 static void 2478 aac_sa_clear_istatus(struct aac_softc *sc, int mask) 2479 { 2480 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2481 2482 AAC_SETREG2(sc, AAC_SA_DOORBELL0_CLEAR, mask); 2483 } 2484 2485 static void 2486 aac_rx_clear_istatus(struct aac_softc *sc, int mask) 2487 { 2488 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2489 2490 AAC_SETREG4(sc, AAC_RX_ODBR, mask); 2491 } 2492 2493 static void 2494 aac_fa_clear_istatus(struct aac_softc *sc, int mask) 2495 { 2496 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2497 2498 AAC_SETREG2(sc, AAC_FA_DOORBELL0_CLEAR, mask); 2499 AAC_FA_HACK(sc); 2500 } 2501 2502 static void 2503 aac_rkt_clear_istatus(struct aac_softc *sc, int mask) 2504 { 2505 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2506 2507 AAC_SETREG4(sc, AAC_RKT_ODBR, mask); 2508 } 2509 2510 /* 2511 * Populate the mailbox and set the command word 2512 */ 2513 static void 2514 aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, 2515 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2516 { 2517 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2518 2519 AAC_SETREG4(sc, AAC_SA_MAILBOX, command); 2520 AAC_SETREG4(sc, AAC_SA_MAILBOX + 4, arg0); 2521 AAC_SETREG4(sc, AAC_SA_MAILBOX + 8, arg1); 2522 AAC_SETREG4(sc, AAC_SA_MAILBOX + 12, arg2); 2523 AAC_SETREG4(sc, AAC_SA_MAILBOX + 16, arg3); 2524 } 2525 2526 static void 2527 aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, 2528 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2529 { 2530 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2531 2532 AAC_SETREG4(sc, AAC_RX_MAILBOX, command); 2533 AAC_SETREG4(sc, AAC_RX_MAILBOX + 4, arg0); 2534 AAC_SETREG4(sc, AAC_RX_MAILBOX + 8, arg1); 2535 AAC_SETREG4(sc, AAC_RX_MAILBOX + 12, arg2); 2536 AAC_SETREG4(sc, AAC_RX_MAILBOX + 16, arg3); 2537 } 2538 2539 static void 2540 aac_fa_set_mailbox(struct aac_softc *sc, u_int32_t command, 2541 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2542 { 2543 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2544 2545 AAC_SETREG4(sc, AAC_FA_MAILBOX, command); 2546 AAC_FA_HACK(sc); 2547 AAC_SETREG4(sc, AAC_FA_MAILBOX + 4, arg0); 2548 AAC_FA_HACK(sc); 2549 AAC_SETREG4(sc, AAC_FA_MAILBOX + 8, arg1); 2550 AAC_FA_HACK(sc); 2551 AAC_SETREG4(sc, AAC_FA_MAILBOX + 12, arg2); 2552 AAC_FA_HACK(sc); 2553 AAC_SETREG4(sc, AAC_FA_MAILBOX + 16, arg3); 2554 AAC_FA_HACK(sc); 2555 } 2556 2557 static void 2558 aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, 2559 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2560 { 2561 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2562 2563 AAC_SETREG4(sc, AAC_RKT_MAILBOX, command); 2564 AAC_SETREG4(sc, AAC_RKT_MAILBOX + 4, arg0); 2565 AAC_SETREG4(sc, AAC_RKT_MAILBOX + 8, arg1); 2566 AAC_SETREG4(sc, AAC_RKT_MAILBOX + 12, arg2); 2567 AAC_SETREG4(sc, AAC_RKT_MAILBOX + 16, arg3); 2568 } 2569 2570 /* 2571 * Fetch the immediate command status word 2572 */ 2573 static int 2574 aac_sa_get_mailbox(struct aac_softc *sc, int mb) 2575 { 2576 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2577 2578 return(AAC_GETREG4(sc, AAC_SA_MAILBOX + (mb * 4))); 2579 } 2580 2581 static int 2582 aac_rx_get_mailbox(struct aac_softc *sc, int mb) 2583 { 2584 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2585 2586 return(AAC_GETREG4(sc, AAC_RX_MAILBOX + (mb * 4))); 2587 } 2588 2589 static int 2590 aac_fa_get_mailbox(struct aac_softc *sc, int mb) 2591 { 2592 int val; 2593 2594 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2595 2596 val = AAC_GETREG4(sc, AAC_FA_MAILBOX + (mb * 4)); 2597 return (val); 2598 } 2599 2600 static int 2601 aac_rkt_get_mailbox(struct aac_softc *sc, int mb) 2602 { 2603 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2604 2605 return(AAC_GETREG4(sc, AAC_RKT_MAILBOX + (mb * 4))); 2606 } 2607 2608 /* 2609 * Set/clear interrupt masks 2610 */ 2611 static void 2612 aac_sa_set_interrupts(struct aac_softc *sc, int enable) 2613 { 2614 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2615 2616 if (enable) { 2617 AAC_SETREG2((sc), AAC_SA_MASK0_CLEAR, AAC_DB_INTERRUPTS); 2618 } else { 2619 AAC_SETREG2((sc), AAC_SA_MASK0_SET, ~0); 2620 } 2621 } 2622 2623 static void 2624 aac_rx_set_interrupts(struct aac_softc *sc, int enable) 2625 { 2626 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2627 2628 if (enable) { 2629 if (sc->flags & AAC_FLAGS_NEW_COMM) 2630 AAC_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INT_NEW_COMM); 2631 else 2632 AAC_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INTERRUPTS); 2633 } else { 2634 AAC_SETREG4(sc, AAC_RX_OIMR, ~0); 2635 } 2636 } 2637 2638 static void 2639 aac_fa_set_interrupts(struct aac_softc *sc, int enable) 2640 { 2641 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2642 2643 if (enable) { 2644 AAC_SETREG2((sc), AAC_FA_MASK0_CLEAR, AAC_DB_INTERRUPTS); 2645 AAC_FA_HACK(sc); 2646 } else { 2647 AAC_SETREG2((sc), AAC_FA_MASK0, ~0); 2648 AAC_FA_HACK(sc); 2649 } 2650 } 2651 2652 static void 2653 aac_rkt_set_interrupts(struct aac_softc *sc, int enable) 2654 { 2655 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2656 2657 if (enable) { 2658 if (sc->flags & AAC_FLAGS_NEW_COMM) 2659 AAC_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INT_NEW_COMM); 2660 else 2661 AAC_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INTERRUPTS); 2662 } else { 2663 AAC_SETREG4(sc, AAC_RKT_OIMR, ~0); 2664 } 2665 } 2666 2667 /* 2668 * New comm. interface: Send command functions 2669 */ 2670 static int 2671 aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm) 2672 { 2673 u_int32_t index, device; 2674 2675 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)"); 2676 2677 index = AAC_GETREG4(sc, AAC_RX_IQUE); 2678 if (index == 0xffffffffL) 2679 index = AAC_GETREG4(sc, AAC_RX_IQUE); 2680 if (index == 0xffffffffL) 2681 return index; 2682 aac_enqueue_busy(cm); 2683 device = index; 2684 AAC_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); 2685 device += 4; 2686 AAC_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); 2687 device += 4; 2688 AAC_SETREG4(sc, device, cm->cm_fib->Header.Size); 2689 AAC_SETREG4(sc, AAC_RX_IQUE, index); 2690 return 0; 2691 } 2692 2693 static int 2694 aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm) 2695 { 2696 u_int32_t index, device; 2697 2698 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)"); 2699 2700 index = AAC_GETREG4(sc, AAC_RKT_IQUE); 2701 if (index == 0xffffffffL) 2702 index = AAC_GETREG4(sc, AAC_RKT_IQUE); 2703 if (index == 0xffffffffL) 2704 return index; 2705 aac_enqueue_busy(cm); 2706 device = index; 2707 AAC_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); 2708 device += 4; 2709 AAC_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); 2710 device += 4; 2711 AAC_SETREG4(sc, device, cm->cm_fib->Header.Size); 2712 AAC_SETREG4(sc, AAC_RKT_IQUE, index); 2713 return 0; 2714 } 2715 2716 /* 2717 * New comm. interface: get, set outbound queue index 2718 */ 2719 static int 2720 aac_rx_get_outb_queue(struct aac_softc *sc) 2721 { 2722 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2723 2724 return(AAC_GETREG4(sc, AAC_RX_OQUE)); 2725 } 2726 2727 static int 2728 aac_rkt_get_outb_queue(struct aac_softc *sc) 2729 { 2730 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2731 2732 return(AAC_GETREG4(sc, AAC_RKT_OQUE)); 2733 } 2734 2735 static void 2736 aac_rx_set_outb_queue(struct aac_softc *sc, int index) 2737 { 2738 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2739 2740 AAC_SETREG4(sc, AAC_RX_OQUE, index); 2741 } 2742 2743 static void 2744 aac_rkt_set_outb_queue(struct aac_softc *sc, int index) 2745 { 2746 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2747 2748 AAC_SETREG4(sc, AAC_RKT_OQUE, index); 2749 } 2750 2751 /* 2752 * Debugging and Diagnostics 2753 */ 2754 2755 /* 2756 * Print some information about the controller. 2757 */ 2758 static void 2759 aac_describe_controller(struct aac_softc *sc) 2760 { 2761 struct aac_fib *fib; 2762 struct aac_adapter_info *info; 2763 char *adapter_type = "Adaptec RAID controller"; 2764 2765 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2766 2767 mtx_lock(&sc->aac_io_lock); 2768 aac_alloc_sync_fib(sc, &fib); 2769 2770 if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) { 2771 fib->data[0] = 0; 2772 if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1)) 2773 device_printf(sc->aac_dev, 2774 "RequestSupplementAdapterInfo failed\n"); 2775 else 2776 adapter_type = ((struct aac_supplement_adapter_info *) 2777 &fib->data[0])->AdapterTypeText; 2778 } 2779 device_printf(sc->aac_dev, "%s, aac driver %d.%d.%d-%d\n", 2780 adapter_type, 2781 AAC_DRIVER_VERSION >> 24, 2782 (AAC_DRIVER_VERSION >> 16) & 0xFF, 2783 AAC_DRIVER_VERSION & 0xFF, 2784 AAC_DRIVER_BUILD); 2785 2786 fib->data[0] = 0; 2787 if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) { 2788 device_printf(sc->aac_dev, "RequestAdapterInfo failed\n"); 2789 aac_release_sync_fib(sc); 2790 mtx_unlock(&sc->aac_io_lock); 2791 return; 2792 } 2793 2794 /* save the kernel revision structure for later use */ 2795 info = (struct aac_adapter_info *)&fib->data[0]; 2796 sc->aac_revision = info->KernelRevision; 2797 2798 2799 if (bootverbose) { 2800 device_printf(sc->aac_dev, "%s %dMHz, %dMB memory " 2801 "(%dMB cache, %dMB execution), %s\n", 2802 aac_describe_code(aac_cpu_variant, info->CpuVariant), 2803 info->ClockSpeed, info->TotalMem / (1024 * 1024), 2804 info->BufferMem / (1024 * 1024), 2805 info->ExecutionMem / (1024 * 1024), 2806 aac_describe_code(aac_battery_platform, 2807 info->batteryPlatform)); 2808 2809 device_printf(sc->aac_dev, 2810 "Kernel %d.%d-%d, Build %d, S/N %6X\n", 2811 info->KernelRevision.external.comp.major, 2812 info->KernelRevision.external.comp.minor, 2813 info->KernelRevision.external.comp.dash, 2814 info->KernelRevision.buildNumber, 2815 (u_int32_t)(info->SerialNumber & 0xffffff)); 2816 2817 device_printf(sc->aac_dev, "Supported Options=%b\n", 2818 sc->supported_options, 2819 "\20" 2820 "\1SNAPSHOT" 2821 "\2CLUSTERS" 2822 "\3WCACHE" 2823 "\4DATA64" 2824 "\5HOSTTIME" 2825 "\6RAID50" 2826 "\7WINDOW4GB" 2827 "\10SCSIUPGD" 2828 "\11SOFTERR" 2829 "\12NORECOND" 2830 "\13SGMAP64" 2831 "\14ALARM" 2832 "\15NONDASD" 2833 "\16SCSIMGT" 2834 "\17RAIDSCSI" 2835 "\21ADPTINFO" 2836 "\22NEWCOMM" 2837 "\23ARRAY64BIT" 2838 "\24HEATSENSOR"); 2839 } 2840 aac_release_sync_fib(sc); 2841 mtx_unlock(&sc->aac_io_lock); 2842 } 2843 2844 /* 2845 * Look up a text description of a numeric error code and return a pointer to 2846 * same. 2847 */ 2848 static char * 2849 aac_describe_code(struct aac_code_lookup *table, u_int32_t code) 2850 { 2851 int i; 2852 2853 for (i = 0; table[i].string != NULL; i++) 2854 if (table[i].code == code) 2855 return(table[i].string); 2856 return(table[i + 1].string); 2857 } 2858 2859 /* 2860 * Management Interface 2861 */ 2862 2863 static int 2864 aac_open(struct cdev *dev, int flags, int fmt, d_thread_t *td) 2865 { 2866 struct aac_softc *sc; 2867 2868 sc = dev->si_drv1; 2869 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2870 sc->aac_open_cnt++; 2871 sc->aac_state |= AAC_STATE_OPEN; 2872 2873 return 0; 2874 } 2875 2876 static int 2877 aac_close(struct cdev *dev, int flags, int fmt, d_thread_t *td) 2878 { 2879 struct aac_softc *sc; 2880 2881 sc = dev->si_drv1; 2882 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2883 sc->aac_open_cnt--; 2884 /* Mark this unit as no longer open */ 2885 if (sc->aac_open_cnt == 0) 2886 sc->aac_state &= ~AAC_STATE_OPEN; 2887 2888 return 0; 2889 } 2890 2891 static int 2892 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td) 2893 { 2894 union aac_statrequest *as; 2895 struct aac_softc *sc; 2896 int error = 0; 2897 2898 as = (union aac_statrequest *)arg; 2899 sc = dev->si_drv1; 2900 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2901 2902 switch (cmd) { 2903 case AACIO_STATS: 2904 switch (as->as_item) { 2905 case AACQ_FREE: 2906 case AACQ_BIO: 2907 case AACQ_READY: 2908 case AACQ_BUSY: 2909 bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat, 2910 sizeof(struct aac_qstat)); 2911 break; 2912 default: 2913 error = ENOENT; 2914 break; 2915 } 2916 break; 2917 2918 case FSACTL_SENDFIB: 2919 case FSACTL_SEND_LARGE_FIB: 2920 arg = *(caddr_t*)arg; 2921 case FSACTL_LNX_SENDFIB: 2922 case FSACTL_LNX_SEND_LARGE_FIB: 2923 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB"); 2924 error = aac_ioctl_sendfib(sc, arg); 2925 break; 2926 case FSACTL_SEND_RAW_SRB: 2927 arg = *(caddr_t*)arg; 2928 case FSACTL_LNX_SEND_RAW_SRB: 2929 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB"); 2930 error = aac_ioctl_send_raw_srb(sc, arg); 2931 break; 2932 case FSACTL_AIF_THREAD: 2933 case FSACTL_LNX_AIF_THREAD: 2934 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD"); 2935 error = EINVAL; 2936 break; 2937 case FSACTL_OPEN_GET_ADAPTER_FIB: 2938 arg = *(caddr_t*)arg; 2939 case FSACTL_LNX_OPEN_GET_ADAPTER_FIB: 2940 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB"); 2941 error = aac_open_aif(sc, arg); 2942 break; 2943 case FSACTL_GET_NEXT_ADAPTER_FIB: 2944 arg = *(caddr_t*)arg; 2945 case FSACTL_LNX_GET_NEXT_ADAPTER_FIB: 2946 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB"); 2947 error = aac_getnext_aif(sc, arg); 2948 break; 2949 case FSACTL_CLOSE_GET_ADAPTER_FIB: 2950 arg = *(caddr_t*)arg; 2951 case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB: 2952 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB"); 2953 error = aac_close_aif(sc, arg); 2954 break; 2955 case FSACTL_MINIPORT_REV_CHECK: 2956 arg = *(caddr_t*)arg; 2957 case FSACTL_LNX_MINIPORT_REV_CHECK: 2958 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK"); 2959 error = aac_rev_check(sc, arg); 2960 break; 2961 case FSACTL_QUERY_DISK: 2962 arg = *(caddr_t*)arg; 2963 case FSACTL_LNX_QUERY_DISK: 2964 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK"); 2965 error = aac_query_disk(sc, arg); 2966 break; 2967 case FSACTL_DELETE_DISK: 2968 case FSACTL_LNX_DELETE_DISK: 2969 /* 2970 * We don't trust the underland to tell us when to delete a 2971 * container, rather we rely on an AIF coming from the 2972 * controller 2973 */ 2974 error = 0; 2975 break; 2976 case FSACTL_GET_PCI_INFO: 2977 arg = *(caddr_t*)arg; 2978 case FSACTL_LNX_GET_PCI_INFO: 2979 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO"); 2980 error = aac_get_pci_info(sc, arg); 2981 break; 2982 default: 2983 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd); 2984 error = EINVAL; 2985 break; 2986 } 2987 return(error); 2988 } 2989 2990 static int 2991 aac_poll(struct cdev *dev, int poll_events, d_thread_t *td) 2992 { 2993 struct aac_softc *sc; 2994 int revents; 2995 2996 sc = dev->si_drv1; 2997 revents = 0; 2998 2999 mtx_lock(&sc->aac_aifq_lock); 3000 if ((poll_events & (POLLRDNORM | POLLIN)) != 0) { 3001 if (sc->aifq_idx != 0 || sc->aifq_filled) 3002 revents |= poll_events & (POLLIN | POLLRDNORM); 3003 } 3004 mtx_unlock(&sc->aac_aifq_lock); 3005 3006 if (revents == 0) { 3007 if (poll_events & (POLLIN | POLLRDNORM)) 3008 selrecord(td, &sc->rcv_select); 3009 } 3010 3011 return (revents); 3012 } 3013 3014 static void 3015 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg) 3016 { 3017 3018 switch (event->ev_type) { 3019 case AAC_EVENT_CMFREE: 3020 mtx_assert(&sc->aac_io_lock, MA_OWNED); 3021 if (aac_alloc_command(sc, (struct aac_command **)arg)) { 3022 aac_add_event(sc, event); 3023 return; 3024 } 3025 free(event, M_AACBUF); 3026 wakeup(arg); 3027 break; 3028 default: 3029 break; 3030 } 3031 } 3032 3033 /* 3034 * Send a FIB supplied from userspace 3035 */ 3036 static int 3037 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib) 3038 { 3039 struct aac_command *cm; 3040 int size, error; 3041 3042 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3043 3044 cm = NULL; 3045 3046 /* 3047 * Get a command 3048 */ 3049 mtx_lock(&sc->aac_io_lock); 3050 if (aac_alloc_command(sc, &cm)) { 3051 struct aac_event *event; 3052 3053 event = malloc(sizeof(struct aac_event), M_AACBUF, 3054 M_NOWAIT | M_ZERO); 3055 if (event == NULL) { 3056 error = EBUSY; 3057 mtx_unlock(&sc->aac_io_lock); 3058 goto out; 3059 } 3060 event->ev_type = AAC_EVENT_CMFREE; 3061 event->ev_callback = aac_ioctl_event; 3062 event->ev_arg = &cm; 3063 aac_add_event(sc, event); 3064 msleep(&cm, &sc->aac_io_lock, 0, "sendfib", 0); 3065 } 3066 mtx_unlock(&sc->aac_io_lock); 3067 3068 /* 3069 * Fetch the FIB header, then re-copy to get data as well. 3070 */ 3071 if ((error = copyin(ufib, cm->cm_fib, 3072 sizeof(struct aac_fib_header))) != 0) 3073 goto out; 3074 size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header); 3075 if (size > sc->aac_max_fib_size) { 3076 device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n", 3077 size, sc->aac_max_fib_size); 3078 size = sc->aac_max_fib_size; 3079 } 3080 if ((error = copyin(ufib, cm->cm_fib, size)) != 0) 3081 goto out; 3082 cm->cm_fib->Header.Size = size; 3083 cm->cm_timestamp = time_uptime; 3084 3085 /* 3086 * Pass the FIB to the controller, wait for it to complete. 3087 */ 3088 mtx_lock(&sc->aac_io_lock); 3089 error = aac_wait_command(cm); 3090 mtx_unlock(&sc->aac_io_lock); 3091 if (error != 0) { 3092 device_printf(sc->aac_dev, 3093 "aac_wait_command return %d\n", error); 3094 goto out; 3095 } 3096 3097 /* 3098 * Copy the FIB and data back out to the caller. 3099 */ 3100 size = cm->cm_fib->Header.Size; 3101 if (size > sc->aac_max_fib_size) { 3102 device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n", 3103 size, sc->aac_max_fib_size); 3104 size = sc->aac_max_fib_size; 3105 } 3106 error = copyout(cm->cm_fib, ufib, size); 3107 3108 out: 3109 if (cm != NULL) { 3110 mtx_lock(&sc->aac_io_lock); 3111 aac_release_command(cm); 3112 mtx_unlock(&sc->aac_io_lock); 3113 } 3114 return(error); 3115 } 3116 3117 /* 3118 * Send a passthrough FIB supplied from userspace 3119 */ 3120 static int 3121 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg) 3122 { 3123 return (EINVAL); 3124 } 3125 3126 /* 3127 * Handle an AIF sent to us by the controller; queue it for later reference. 3128 * If the queue fills up, then drop the older entries. 3129 */ 3130 static void 3131 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib) 3132 { 3133 struct aac_aif_command *aif; 3134 struct aac_container *co, *co_next; 3135 struct aac_fib_context *ctx; 3136 struct aac_mntinforesp *mir; 3137 int next, current, found; 3138 int count = 0, added = 0, i = 0; 3139 3140 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3141 3142 aif = (struct aac_aif_command*)&fib->data[0]; 3143 aac_print_aif(sc, aif); 3144 3145 /* Is it an event that we should care about? */ 3146 switch (aif->command) { 3147 case AifCmdEventNotify: 3148 switch (aif->data.EN.type) { 3149 case AifEnAddContainer: 3150 case AifEnDeleteContainer: 3151 /* 3152 * A container was added or deleted, but the message 3153 * doesn't tell us anything else! Re-enumerate the 3154 * containers and sort things out. 3155 */ 3156 aac_alloc_sync_fib(sc, &fib); 3157 do { 3158 /* 3159 * Ask the controller for its containers one at 3160 * a time. 3161 * XXX What if the controller's list changes 3162 * midway through this enumaration? 3163 * XXX This should be done async. 3164 */ 3165 if ((mir = aac_get_container_info(sc, fib, i)) == NULL) 3166 continue; 3167 if (i == 0) 3168 count = mir->MntRespCount; 3169 /* 3170 * Check the container against our list. 3171 * co->co_found was already set to 0 in a 3172 * previous run. 3173 */ 3174 if ((mir->Status == ST_OK) && 3175 (mir->MntTable[0].VolType != CT_NONE)) { 3176 found = 0; 3177 TAILQ_FOREACH(co, 3178 &sc->aac_container_tqh, 3179 co_link) { 3180 if (co->co_mntobj.ObjectId == 3181 mir->MntTable[0].ObjectId) { 3182 co->co_found = 1; 3183 found = 1; 3184 break; 3185 } 3186 } 3187 /* 3188 * If the container matched, continue 3189 * in the list. 3190 */ 3191 if (found) { 3192 i++; 3193 continue; 3194 } 3195 3196 /* 3197 * This is a new container. Do all the 3198 * appropriate things to set it up. 3199 */ 3200 aac_add_container(sc, mir, 1); 3201 added = 1; 3202 } 3203 i++; 3204 } while ((i < count) && (i < AAC_MAX_CONTAINERS)); 3205 aac_release_sync_fib(sc); 3206 3207 /* 3208 * Go through our list of containers and see which ones 3209 * were not marked 'found'. Since the controller didn't 3210 * list them they must have been deleted. Do the 3211 * appropriate steps to destroy the device. Also reset 3212 * the co->co_found field. 3213 */ 3214 co = TAILQ_FIRST(&sc->aac_container_tqh); 3215 while (co != NULL) { 3216 if (co->co_found == 0) { 3217 mtx_unlock(&sc->aac_io_lock); 3218 mtx_lock(&Giant); 3219 device_delete_child(sc->aac_dev, 3220 co->co_disk); 3221 mtx_unlock(&Giant); 3222 mtx_lock(&sc->aac_io_lock); 3223 co_next = TAILQ_NEXT(co, co_link); 3224 mtx_lock(&sc->aac_container_lock); 3225 TAILQ_REMOVE(&sc->aac_container_tqh, co, 3226 co_link); 3227 mtx_unlock(&sc->aac_container_lock); 3228 free(co, M_AACBUF); 3229 co = co_next; 3230 } else { 3231 co->co_found = 0; 3232 co = TAILQ_NEXT(co, co_link); 3233 } 3234 } 3235 3236 /* Attach the newly created containers */ 3237 if (added) { 3238 mtx_unlock(&sc->aac_io_lock); 3239 mtx_lock(&Giant); 3240 bus_generic_attach(sc->aac_dev); 3241 mtx_unlock(&Giant); 3242 mtx_lock(&sc->aac_io_lock); 3243 } 3244 3245 break; 3246 3247 default: 3248 break; 3249 } 3250 3251 default: 3252 break; 3253 } 3254 3255 /* Copy the AIF data to the AIF queue for ioctl retrieval */ 3256 mtx_lock(&sc->aac_aifq_lock); 3257 current = sc->aifq_idx; 3258 next = (current + 1) % AAC_AIFQ_LENGTH; 3259 if (next == 0) 3260 sc->aifq_filled = 1; 3261 bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib)); 3262 /* modify AIF contexts */ 3263 if (sc->aifq_filled) { 3264 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3265 if (next == ctx->ctx_idx) 3266 ctx->ctx_wrap = 1; 3267 else if (current == ctx->ctx_idx && ctx->ctx_wrap) 3268 ctx->ctx_idx = next; 3269 } 3270 } 3271 sc->aifq_idx = next; 3272 /* On the off chance that someone is sleeping for an aif... */ 3273 if (sc->aac_state & AAC_STATE_AIF_SLEEPER) 3274 wakeup(sc->aac_aifq); 3275 /* Wakeup any poll()ers */ 3276 selwakeuppri(&sc->rcv_select, PRIBIO); 3277 mtx_unlock(&sc->aac_aifq_lock); 3278 3279 return; 3280 } 3281 3282 /* 3283 * Return the Revision of the driver to userspace and check to see if the 3284 * userspace app is possibly compatible. This is extremely bogus since 3285 * our driver doesn't follow Adaptec's versioning system. Cheat by just 3286 * returning what the card reported. 3287 */ 3288 static int 3289 aac_rev_check(struct aac_softc *sc, caddr_t udata) 3290 { 3291 struct aac_rev_check rev_check; 3292 struct aac_rev_check_resp rev_check_resp; 3293 int error = 0; 3294 3295 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3296 3297 /* 3298 * Copyin the revision struct from userspace 3299 */ 3300 if ((error = copyin(udata, (caddr_t)&rev_check, 3301 sizeof(struct aac_rev_check))) != 0) { 3302 return error; 3303 } 3304 3305 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n", 3306 rev_check.callingRevision.buildNumber); 3307 3308 /* 3309 * Doctor up the response struct. 3310 */ 3311 rev_check_resp.possiblyCompatible = 1; 3312 rev_check_resp.adapterSWRevision.external.ul = 3313 sc->aac_revision.external.ul; 3314 rev_check_resp.adapterSWRevision.buildNumber = 3315 sc->aac_revision.buildNumber; 3316 3317 return(copyout((caddr_t)&rev_check_resp, udata, 3318 sizeof(struct aac_rev_check_resp))); 3319 } 3320 3321 /* 3322 * Pass the fib context to the caller 3323 */ 3324 static int 3325 aac_open_aif(struct aac_softc *sc, caddr_t arg) 3326 { 3327 struct aac_fib_context *fibctx, *ctx; 3328 int error = 0; 3329 3330 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3331 3332 fibctx = malloc(sizeof(struct aac_fib_context), M_AACBUF, M_NOWAIT|M_ZERO); 3333 if (fibctx == NULL) 3334 return (ENOMEM); 3335 3336 mtx_lock(&sc->aac_aifq_lock); 3337 /* all elements are already 0, add to queue */ 3338 if (sc->fibctx == NULL) 3339 sc->fibctx = fibctx; 3340 else { 3341 for (ctx = sc->fibctx; ctx->next; ctx = ctx->next) 3342 ; 3343 ctx->next = fibctx; 3344 fibctx->prev = ctx; 3345 } 3346 3347 /* evaluate unique value */ 3348 fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff); 3349 ctx = sc->fibctx; 3350 while (ctx != fibctx) { 3351 if (ctx->unique == fibctx->unique) { 3352 fibctx->unique++; 3353 ctx = sc->fibctx; 3354 } else { 3355 ctx = ctx->next; 3356 } 3357 } 3358 mtx_unlock(&sc->aac_aifq_lock); 3359 3360 error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t)); 3361 if (error) 3362 aac_close_aif(sc, (caddr_t)ctx); 3363 return error; 3364 } 3365 3366 /* 3367 * Close the caller's fib context 3368 */ 3369 static int 3370 aac_close_aif(struct aac_softc *sc, caddr_t arg) 3371 { 3372 struct aac_fib_context *ctx; 3373 3374 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3375 3376 mtx_lock(&sc->aac_aifq_lock); 3377 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3378 if (ctx->unique == *(uint32_t *)&arg) { 3379 if (ctx == sc->fibctx) 3380 sc->fibctx = NULL; 3381 else { 3382 ctx->prev->next = ctx->next; 3383 if (ctx->next) 3384 ctx->next->prev = ctx->prev; 3385 } 3386 break; 3387 } 3388 } 3389 mtx_unlock(&sc->aac_aifq_lock); 3390 if (ctx) 3391 free(ctx, M_AACBUF); 3392 3393 return 0; 3394 } 3395 3396 /* 3397 * Pass the caller the next AIF in their queue 3398 */ 3399 static int 3400 aac_getnext_aif(struct aac_softc *sc, caddr_t arg) 3401 { 3402 struct get_adapter_fib_ioctl agf; 3403 struct aac_fib_context *ctx; 3404 int error; 3405 3406 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3407 3408 if ((error = copyin(arg, &agf, sizeof(agf))) == 0) { 3409 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3410 if (agf.AdapterFibContext == ctx->unique) 3411 break; 3412 } 3413 if (!ctx) 3414 return (EFAULT); 3415 3416 error = aac_return_aif(sc, ctx, agf.AifFib); 3417 if (error == EAGAIN && agf.Wait) { 3418 fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF"); 3419 sc->aac_state |= AAC_STATE_AIF_SLEEPER; 3420 while (error == EAGAIN) { 3421 error = tsleep(sc->aac_aifq, PRIBIO | 3422 PCATCH, "aacaif", 0); 3423 if (error == 0) 3424 error = aac_return_aif(sc, ctx, agf.AifFib); 3425 } 3426 sc->aac_state &= ~AAC_STATE_AIF_SLEEPER; 3427 } 3428 } 3429 return(error); 3430 } 3431 3432 /* 3433 * Hand the next AIF off the top of the queue out to userspace. 3434 */ 3435 static int 3436 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr) 3437 { 3438 int current, error; 3439 3440 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3441 3442 mtx_lock(&sc->aac_aifq_lock); 3443 current = ctx->ctx_idx; 3444 if (current == sc->aifq_idx && !ctx->ctx_wrap) { 3445 /* empty */ 3446 mtx_unlock(&sc->aac_aifq_lock); 3447 return (EAGAIN); 3448 } 3449 error = 3450 copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib)); 3451 if (error) 3452 device_printf(sc->aac_dev, 3453 "aac_return_aif: copyout returned %d\n", error); 3454 else { 3455 ctx->ctx_wrap = 0; 3456 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH; 3457 } 3458 mtx_unlock(&sc->aac_aifq_lock); 3459 return(error); 3460 } 3461 3462 static int 3463 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr) 3464 { 3465 struct aac_pci_info { 3466 u_int32_t bus; 3467 u_int32_t slot; 3468 } pciinf; 3469 int error; 3470 3471 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3472 3473 pciinf.bus = pci_get_bus(sc->aac_dev); 3474 pciinf.slot = pci_get_slot(sc->aac_dev); 3475 3476 error = copyout((caddr_t)&pciinf, uptr, 3477 sizeof(struct aac_pci_info)); 3478 3479 return (error); 3480 } 3481 3482 /* 3483 * Give the userland some information about the container. The AAC arch 3484 * expects the driver to be a SCSI passthrough type driver, so it expects 3485 * the containers to have b:t:l numbers. Fake it. 3486 */ 3487 static int 3488 aac_query_disk(struct aac_softc *sc, caddr_t uptr) 3489 { 3490 struct aac_query_disk query_disk; 3491 struct aac_container *co; 3492 struct aac_disk *disk; 3493 int error, id; 3494 3495 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3496 3497 disk = NULL; 3498 3499 error = copyin(uptr, (caddr_t)&query_disk, 3500 sizeof(struct aac_query_disk)); 3501 if (error) 3502 return (error); 3503 3504 id = query_disk.ContainerNumber; 3505 if (id == -1) 3506 return (EINVAL); 3507 3508 mtx_lock(&sc->aac_container_lock); 3509 TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { 3510 if (co->co_mntobj.ObjectId == id) 3511 break; 3512 } 3513 3514 if (co == NULL) { 3515 query_disk.Valid = 0; 3516 query_disk.Locked = 0; 3517 query_disk.Deleted = 1; /* XXX is this right? */ 3518 } else { 3519 disk = device_get_softc(co->co_disk); 3520 query_disk.Valid = 1; 3521 query_disk.Locked = 3522 (disk->ad_flags & AAC_DISK_OPEN) ? 1 : 0; 3523 query_disk.Deleted = 0; 3524 query_disk.Bus = device_get_unit(sc->aac_dev); 3525 query_disk.Target = disk->unit; 3526 query_disk.Lun = 0; 3527 query_disk.UnMapped = 0; 3528 sprintf(&query_disk.diskDeviceName[0], "%s%d", 3529 disk->ad_disk->d_name, disk->ad_disk->d_unit); 3530 } 3531 mtx_unlock(&sc->aac_container_lock); 3532 3533 error = copyout((caddr_t)&query_disk, uptr, 3534 sizeof(struct aac_query_disk)); 3535 3536 return (error); 3537 } 3538 3539 static void 3540 aac_get_bus_info(struct aac_softc *sc) 3541 { 3542 struct aac_fib *fib; 3543 struct aac_ctcfg *c_cmd; 3544 struct aac_ctcfg_resp *c_resp; 3545 struct aac_vmioctl *vmi; 3546 struct aac_vmi_businf_resp *vmi_resp; 3547 struct aac_getbusinf businfo; 3548 struct aac_sim *caminf; 3549 device_t child; 3550 int i, found, error; 3551 3552 mtx_lock(&sc->aac_io_lock); 3553 aac_alloc_sync_fib(sc, &fib); 3554 c_cmd = (struct aac_ctcfg *)&fib->data[0]; 3555 bzero(c_cmd, sizeof(struct aac_ctcfg)); 3556 3557 c_cmd->Command = VM_ContainerConfig; 3558 c_cmd->cmd = CT_GET_SCSI_METHOD; 3559 c_cmd->param = 0; 3560 3561 error = aac_sync_fib(sc, ContainerCommand, 0, fib, 3562 sizeof(struct aac_ctcfg)); 3563 if (error) { 3564 device_printf(sc->aac_dev, "Error %d sending " 3565 "VM_ContainerConfig command\n", error); 3566 aac_release_sync_fib(sc); 3567 mtx_unlock(&sc->aac_io_lock); 3568 return; 3569 } 3570 3571 c_resp = (struct aac_ctcfg_resp *)&fib->data[0]; 3572 if (c_resp->Status != ST_OK) { 3573 device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n", 3574 c_resp->Status); 3575 aac_release_sync_fib(sc); 3576 mtx_unlock(&sc->aac_io_lock); 3577 return; 3578 } 3579 3580 sc->scsi_method_id = c_resp->param; 3581 3582 vmi = (struct aac_vmioctl *)&fib->data[0]; 3583 bzero(vmi, sizeof(struct aac_vmioctl)); 3584 3585 vmi->Command = VM_Ioctl; 3586 vmi->ObjType = FT_DRIVE; 3587 vmi->MethId = sc->scsi_method_id; 3588 vmi->ObjId = 0; 3589 vmi->IoctlCmd = GetBusInfo; 3590 3591 error = aac_sync_fib(sc, ContainerCommand, 0, fib, 3592 sizeof(struct aac_vmi_businf_resp)); 3593 if (error) { 3594 device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n", 3595 error); 3596 aac_release_sync_fib(sc); 3597 mtx_unlock(&sc->aac_io_lock); 3598 return; 3599 } 3600 3601 vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0]; 3602 if (vmi_resp->Status != ST_OK) { 3603 device_printf(sc->aac_dev, "VM_Ioctl returned %d\n", 3604 vmi_resp->Status); 3605 aac_release_sync_fib(sc); 3606 mtx_unlock(&sc->aac_io_lock); 3607 return; 3608 } 3609 3610 bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf)); 3611 aac_release_sync_fib(sc); 3612 mtx_unlock(&sc->aac_io_lock); 3613 3614 found = 0; 3615 for (i = 0; i < businfo.BusCount; i++) { 3616 if (businfo.BusValid[i] != AAC_BUS_VALID) 3617 continue; 3618 3619 caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim), 3620 M_AACBUF, M_NOWAIT | M_ZERO); 3621 if (caminf == NULL) { 3622 device_printf(sc->aac_dev, 3623 "No memory to add passthrough bus %d\n", i); 3624 break; 3625 }; 3626 3627 child = device_add_child(sc->aac_dev, "aacp", -1); 3628 if (child == NULL) { 3629 device_printf(sc->aac_dev, 3630 "device_add_child failed for passthrough bus %d\n", 3631 i); 3632 free(caminf, M_AACBUF); 3633 break; 3634 } 3635 3636 caminf->TargetsPerBus = businfo.TargetsPerBus; 3637 caminf->BusNumber = i; 3638 caminf->InitiatorBusId = businfo.InitiatorBusId[i]; 3639 caminf->aac_sc = sc; 3640 caminf->sim_dev = child; 3641 3642 device_set_ivars(child, caminf); 3643 device_set_desc(child, "SCSI Passthrough Bus"); 3644 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link); 3645 3646 found = 1; 3647 } 3648 3649 if (found) 3650 bus_generic_attach(sc->aac_dev); 3651 3652 return; 3653 } 3654