1 /*- 2 * Copyright (c) 2000 Michael Smith 3 * Copyright (c) 2001 Scott Long 4 * Copyright (c) 2000 BSDi 5 * Copyright (c) 2001 Adaptec, Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for the Adaptec 'FSA' family of PCI/SCSI RAID adapters. 35 */ 36 #define AAC_DRIVERNAME "aac" 37 38 #include "opt_aac.h" 39 40 /* #include <stddef.h> */ 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/malloc.h> 44 #include <sys/kernel.h> 45 #include <sys/kthread.h> 46 #include <sys/sysctl.h> 47 #include <sys/poll.h> 48 #include <sys/ioccom.h> 49 50 #include <sys/bus.h> 51 #include <sys/conf.h> 52 #include <sys/signalvar.h> 53 #include <sys/time.h> 54 #include <sys/eventhandler.h> 55 #include <sys/rman.h> 56 57 #include <machine/bus.h> 58 #include <sys/bus_dma.h> 59 #include <machine/resource.h> 60 61 #include <dev/pci/pcireg.h> 62 #include <dev/pci/pcivar.h> 63 64 #include <dev/aac/aacreg.h> 65 #include <sys/aac_ioctl.h> 66 #include <dev/aac/aacvar.h> 67 #include <dev/aac/aac_tables.h> 68 69 static void aac_startup(void *arg); 70 static void aac_add_container(struct aac_softc *sc, 71 struct aac_mntinforesp *mir, int f); 72 static void aac_get_bus_info(struct aac_softc *sc); 73 static void aac_daemon(void *arg); 74 75 /* Command Processing */ 76 static void aac_timeout(struct aac_softc *sc); 77 static void aac_complete(void *context, int pending); 78 static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp); 79 static void aac_bio_complete(struct aac_command *cm); 80 static int aac_wait_command(struct aac_command *cm); 81 static void aac_command_thread(struct aac_softc *sc); 82 83 /* Command Buffer Management */ 84 static void aac_map_command_sg(void *arg, bus_dma_segment_t *segs, 85 int nseg, int error); 86 static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, 87 int nseg, int error); 88 static int aac_alloc_commands(struct aac_softc *sc); 89 static void aac_free_commands(struct aac_softc *sc); 90 static void aac_unmap_command(struct aac_command *cm); 91 92 /* Hardware Interface */ 93 static int aac_alloc(struct aac_softc *sc); 94 static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, 95 int error); 96 static int aac_check_firmware(struct aac_softc *sc); 97 static int aac_init(struct aac_softc *sc); 98 static int aac_sync_command(struct aac_softc *sc, u_int32_t command, 99 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, 100 u_int32_t arg3, u_int32_t *sp); 101 static int aac_setup_intr(struct aac_softc *sc); 102 static int aac_enqueue_fib(struct aac_softc *sc, int queue, 103 struct aac_command *cm); 104 static int aac_dequeue_fib(struct aac_softc *sc, int queue, 105 u_int32_t *fib_size, struct aac_fib **fib_addr); 106 static int aac_enqueue_response(struct aac_softc *sc, int queue, 107 struct aac_fib *fib); 108 109 /* StrongARM interface */ 110 static int aac_sa_get_fwstatus(struct aac_softc *sc); 111 static void aac_sa_qnotify(struct aac_softc *sc, int qbit); 112 static int aac_sa_get_istatus(struct aac_softc *sc); 113 static void aac_sa_clear_istatus(struct aac_softc *sc, int mask); 114 static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, 115 u_int32_t arg0, u_int32_t arg1, 116 u_int32_t arg2, u_int32_t arg3); 117 static int aac_sa_get_mailbox(struct aac_softc *sc, int mb); 118 static void aac_sa_set_interrupts(struct aac_softc *sc, int enable); 119 120 struct aac_interface aac_sa_interface = { 121 aac_sa_get_fwstatus, 122 aac_sa_qnotify, 123 aac_sa_get_istatus, 124 aac_sa_clear_istatus, 125 aac_sa_set_mailbox, 126 aac_sa_get_mailbox, 127 aac_sa_set_interrupts, 128 NULL, NULL, NULL 129 }; 130 131 /* i960Rx interface */ 132 static int aac_rx_get_fwstatus(struct aac_softc *sc); 133 static void aac_rx_qnotify(struct aac_softc *sc, int qbit); 134 static int aac_rx_get_istatus(struct aac_softc *sc); 135 static void aac_rx_clear_istatus(struct aac_softc *sc, int mask); 136 static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, 137 u_int32_t arg0, u_int32_t arg1, 138 u_int32_t arg2, u_int32_t arg3); 139 static int aac_rx_get_mailbox(struct aac_softc *sc, int mb); 140 static void aac_rx_set_interrupts(struct aac_softc *sc, int enable); 141 static int aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm); 142 static int aac_rx_get_outb_queue(struct aac_softc *sc); 143 static void aac_rx_set_outb_queue(struct aac_softc *sc, int index); 144 145 struct aac_interface aac_rx_interface = { 146 aac_rx_get_fwstatus, 147 aac_rx_qnotify, 148 aac_rx_get_istatus, 149 aac_rx_clear_istatus, 150 aac_rx_set_mailbox, 151 aac_rx_get_mailbox, 152 aac_rx_set_interrupts, 153 aac_rx_send_command, 154 aac_rx_get_outb_queue, 155 aac_rx_set_outb_queue 156 }; 157 158 /* Rocket/MIPS interface */ 159 static int aac_rkt_get_fwstatus(struct aac_softc *sc); 160 static void aac_rkt_qnotify(struct aac_softc *sc, int qbit); 161 static int aac_rkt_get_istatus(struct aac_softc *sc); 162 static void aac_rkt_clear_istatus(struct aac_softc *sc, int mask); 163 static void aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, 164 u_int32_t arg0, u_int32_t arg1, 165 u_int32_t arg2, u_int32_t arg3); 166 static int aac_rkt_get_mailbox(struct aac_softc *sc, int mb); 167 static void aac_rkt_set_interrupts(struct aac_softc *sc, int enable); 168 static int aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm); 169 static int aac_rkt_get_outb_queue(struct aac_softc *sc); 170 static void aac_rkt_set_outb_queue(struct aac_softc *sc, int index); 171 172 struct aac_interface aac_rkt_interface = { 173 aac_rkt_get_fwstatus, 174 aac_rkt_qnotify, 175 aac_rkt_get_istatus, 176 aac_rkt_clear_istatus, 177 aac_rkt_set_mailbox, 178 aac_rkt_get_mailbox, 179 aac_rkt_set_interrupts, 180 aac_rkt_send_command, 181 aac_rkt_get_outb_queue, 182 aac_rkt_set_outb_queue 183 }; 184 185 /* Debugging and Diagnostics */ 186 static void aac_describe_controller(struct aac_softc *sc); 187 static char *aac_describe_code(struct aac_code_lookup *table, 188 u_int32_t code); 189 190 /* Management Interface */ 191 static d_open_t aac_open; 192 static d_close_t aac_close; 193 static d_ioctl_t aac_ioctl; 194 static d_poll_t aac_poll; 195 static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib); 196 static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg); 197 static void aac_handle_aif(struct aac_softc *sc, 198 struct aac_fib *fib); 199 static int aac_rev_check(struct aac_softc *sc, caddr_t udata); 200 static int aac_open_aif(struct aac_softc *sc, caddr_t arg); 201 static int aac_close_aif(struct aac_softc *sc, caddr_t arg); 202 static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg); 203 static int aac_return_aif(struct aac_softc *sc, 204 struct aac_fib_context *ctx, caddr_t uptr); 205 static int aac_query_disk(struct aac_softc *sc, caddr_t uptr); 206 static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr); 207 static int aac_supported_features(struct aac_softc *sc, caddr_t uptr); 208 static void aac_ioctl_event(struct aac_softc *sc, 209 struct aac_event *event, void *arg); 210 static struct aac_mntinforesp * 211 aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid); 212 213 static struct cdevsw aac_cdevsw = { 214 .d_version = D_VERSION, 215 .d_flags = D_NEEDGIANT, 216 .d_open = aac_open, 217 .d_close = aac_close, 218 .d_ioctl = aac_ioctl, 219 .d_poll = aac_poll, 220 .d_name = "aac", 221 }; 222 223 MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for the AAC driver"); 224 225 /* sysctl node */ 226 SYSCTL_NODE(_hw, OID_AUTO, aac, CTLFLAG_RD, 0, "AAC driver parameters"); 227 228 /* 229 * Device Interface 230 */ 231 232 /* 233 * Initialize the controller and softc 234 */ 235 int 236 aac_attach(struct aac_softc *sc) 237 { 238 int error, unit; 239 240 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 241 242 /* 243 * Initialize per-controller queues. 244 */ 245 aac_initq_free(sc); 246 aac_initq_ready(sc); 247 aac_initq_busy(sc); 248 aac_initq_bio(sc); 249 250 /* 251 * Initialize command-completion task. 252 */ 253 TASK_INIT(&sc->aac_task_complete, 0, aac_complete, sc); 254 255 /* mark controller as suspended until we get ourselves organised */ 256 sc->aac_state |= AAC_STATE_SUSPEND; 257 258 /* 259 * Check that the firmware on the card is supported. 260 */ 261 if ((error = aac_check_firmware(sc)) != 0) 262 return(error); 263 264 /* 265 * Initialize locks 266 */ 267 mtx_init(&sc->aac_aifq_lock, "AAC AIF lock", NULL, MTX_DEF); 268 mtx_init(&sc->aac_io_lock, "AAC I/O lock", NULL, MTX_DEF); 269 mtx_init(&sc->aac_container_lock, "AAC container lock", NULL, MTX_DEF); 270 TAILQ_INIT(&sc->aac_container_tqh); 271 TAILQ_INIT(&sc->aac_ev_cmfree); 272 273 /* Initialize the clock daemon callout. */ 274 callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0); 275 276 /* 277 * Initialize the adapter. 278 */ 279 if ((error = aac_alloc(sc)) != 0) 280 return(error); 281 if ((error = aac_init(sc)) != 0) 282 return(error); 283 284 /* 285 * Allocate and connect our interrupt. 286 */ 287 if ((error = aac_setup_intr(sc)) != 0) 288 return(error); 289 290 /* 291 * Print a little information about the controller. 292 */ 293 aac_describe_controller(sc); 294 295 /* 296 * Register to probe our containers later. 297 */ 298 sc->aac_ich.ich_func = aac_startup; 299 sc->aac_ich.ich_arg = sc; 300 if (config_intrhook_establish(&sc->aac_ich) != 0) { 301 device_printf(sc->aac_dev, 302 "can't establish configuration hook\n"); 303 return(ENXIO); 304 } 305 306 /* 307 * Make the control device. 308 */ 309 unit = device_get_unit(sc->aac_dev); 310 sc->aac_dev_t = make_dev(&aac_cdevsw, unit, UID_ROOT, GID_OPERATOR, 311 0640, "aac%d", unit); 312 (void)make_dev_alias(sc->aac_dev_t, "afa%d", unit); 313 (void)make_dev_alias(sc->aac_dev_t, "hpn%d", unit); 314 sc->aac_dev_t->si_drv1 = sc; 315 316 /* Create the AIF thread */ 317 if (kproc_create((void(*)(void *))aac_command_thread, sc, 318 &sc->aifthread, 0, 0, "aac%daif", unit)) 319 panic("Could not create AIF thread"); 320 321 /* Register the shutdown method to only be called post-dump */ 322 if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aac_shutdown, 323 sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL) 324 device_printf(sc->aac_dev, 325 "shutdown event registration failed\n"); 326 327 /* Register with CAM for the non-DASD devices */ 328 if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) { 329 TAILQ_INIT(&sc->aac_sim_tqh); 330 aac_get_bus_info(sc); 331 } 332 333 mtx_lock(&sc->aac_io_lock); 334 callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc); 335 mtx_unlock(&sc->aac_io_lock); 336 337 return(0); 338 } 339 340 static void 341 aac_daemon(void *arg) 342 { 343 struct timeval tv; 344 struct aac_softc *sc; 345 struct aac_fib *fib; 346 347 sc = arg; 348 mtx_assert(&sc->aac_io_lock, MA_OWNED); 349 350 if (callout_pending(&sc->aac_daemontime) || 351 callout_active(&sc->aac_daemontime) == 0) 352 return; 353 getmicrotime(&tv); 354 aac_alloc_sync_fib(sc, &fib); 355 *(uint32_t *)fib->data = tv.tv_sec; 356 aac_sync_fib(sc, SendHostTime, 0, fib, sizeof(uint32_t)); 357 aac_release_sync_fib(sc); 358 callout_schedule(&sc->aac_daemontime, 30 * 60 * hz); 359 } 360 361 void 362 aac_add_event(struct aac_softc *sc, struct aac_event *event) 363 { 364 365 switch (event->ev_type & AAC_EVENT_MASK) { 366 case AAC_EVENT_CMFREE: 367 TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links); 368 break; 369 default: 370 device_printf(sc->aac_dev, "aac_add event: unknown event %d\n", 371 event->ev_type); 372 break; 373 } 374 375 return; 376 } 377 378 /* 379 * Request information of container #cid 380 */ 381 static struct aac_mntinforesp * 382 aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid) 383 { 384 struct aac_mntinfo *mi; 385 386 mi = (struct aac_mntinfo *)&fib->data[0]; 387 /* use 64-bit LBA if enabled */ 388 mi->Command = (sc->flags & AAC_FLAGS_LBA_64BIT) ? 389 VM_NameServe64 : VM_NameServe; 390 mi->MntType = FT_FILESYS; 391 mi->MntCount = cid; 392 393 if (aac_sync_fib(sc, ContainerCommand, 0, fib, 394 sizeof(struct aac_mntinfo))) { 395 printf("Error probing container %d\n", cid); 396 return (NULL); 397 } 398 399 return ((struct aac_mntinforesp *)&fib->data[0]); 400 } 401 402 /* 403 * Probe for containers, create disks. 404 */ 405 static void 406 aac_startup(void *arg) 407 { 408 struct aac_softc *sc; 409 struct aac_fib *fib; 410 struct aac_mntinforesp *mir; 411 int count = 0, i = 0; 412 413 sc = (struct aac_softc *)arg; 414 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 415 416 /* disconnect ourselves from the intrhook chain */ 417 config_intrhook_disestablish(&sc->aac_ich); 418 419 mtx_lock(&sc->aac_io_lock); 420 aac_alloc_sync_fib(sc, &fib); 421 422 /* loop over possible containers */ 423 do { 424 if ((mir = aac_get_container_info(sc, fib, i)) == NULL) 425 continue; 426 if (i == 0) 427 count = mir->MntRespCount; 428 aac_add_container(sc, mir, 0); 429 i++; 430 } while ((i < count) && (i < AAC_MAX_CONTAINERS)); 431 432 aac_release_sync_fib(sc); 433 mtx_unlock(&sc->aac_io_lock); 434 435 /* poke the bus to actually attach the child devices */ 436 if (bus_generic_attach(sc->aac_dev)) 437 device_printf(sc->aac_dev, "bus_generic_attach failed\n"); 438 439 /* mark the controller up */ 440 sc->aac_state &= ~AAC_STATE_SUSPEND; 441 442 /* enable interrupts now */ 443 AAC_UNMASK_INTERRUPTS(sc); 444 } 445 446 /* 447 * Create a device to represent a new container 448 */ 449 static void 450 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f) 451 { 452 struct aac_container *co; 453 device_t child; 454 455 /* 456 * Check container volume type for validity. Note that many of 457 * the possible types may never show up. 458 */ 459 if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) { 460 co = (struct aac_container *)malloc(sizeof *co, M_AACBUF, 461 M_NOWAIT | M_ZERO); 462 if (co == NULL) 463 panic("Out of memory?!"); 464 fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "id %x name '%.16s' size %u type %d", 465 mir->MntTable[0].ObjectId, 466 mir->MntTable[0].FileSystemName, 467 mir->MntTable[0].Capacity, mir->MntTable[0].VolType); 468 469 if ((child = device_add_child(sc->aac_dev, "aacd", -1)) == NULL) 470 device_printf(sc->aac_dev, "device_add_child failed\n"); 471 else 472 device_set_ivars(child, co); 473 device_set_desc(child, aac_describe_code(aac_container_types, 474 mir->MntTable[0].VolType)); 475 co->co_disk = child; 476 co->co_found = f; 477 bcopy(&mir->MntTable[0], &co->co_mntobj, 478 sizeof(struct aac_mntobj)); 479 mtx_lock(&sc->aac_container_lock); 480 TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link); 481 mtx_unlock(&sc->aac_container_lock); 482 } 483 } 484 485 /* 486 * Allocate resources associated with (sc) 487 */ 488 static int 489 aac_alloc(struct aac_softc *sc) 490 { 491 492 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 493 494 /* 495 * Create DMA tag for mapping buffers into controller-addressable space. 496 */ 497 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 498 1, 0, /* algnmnt, boundary */ 499 (sc->flags & AAC_FLAGS_SG_64BIT) ? 500 BUS_SPACE_MAXADDR : 501 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 502 BUS_SPACE_MAXADDR, /* highaddr */ 503 NULL, NULL, /* filter, filterarg */ 504 MAXBSIZE, /* maxsize */ 505 sc->aac_sg_tablesize, /* nsegments */ 506 MAXBSIZE, /* maxsegsize */ 507 BUS_DMA_ALLOCNOW, /* flags */ 508 busdma_lock_mutex, /* lockfunc */ 509 &sc->aac_io_lock, /* lockfuncarg */ 510 &sc->aac_buffer_dmat)) { 511 device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n"); 512 return (ENOMEM); 513 } 514 515 /* 516 * Create DMA tag for mapping FIBs into controller-addressable space.. 517 */ 518 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 519 1, 0, /* algnmnt, boundary */ 520 (sc->flags & AAC_FLAGS_4GB_WINDOW) ? 521 BUS_SPACE_MAXADDR_32BIT : 522 0x7fffffff, /* lowaddr */ 523 BUS_SPACE_MAXADDR, /* highaddr */ 524 NULL, NULL, /* filter, filterarg */ 525 sc->aac_max_fibs_alloc * 526 sc->aac_max_fib_size, /* maxsize */ 527 1, /* nsegments */ 528 sc->aac_max_fibs_alloc * 529 sc->aac_max_fib_size, /* maxsize */ 530 0, /* flags */ 531 NULL, NULL, /* No locking needed */ 532 &sc->aac_fib_dmat)) { 533 device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n"); 534 return (ENOMEM); 535 } 536 537 /* 538 * Create DMA tag for the common structure and allocate it. 539 */ 540 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 541 1, 0, /* algnmnt, boundary */ 542 (sc->flags & AAC_FLAGS_4GB_WINDOW) ? 543 BUS_SPACE_MAXADDR_32BIT : 544 0x7fffffff, /* lowaddr */ 545 BUS_SPACE_MAXADDR, /* highaddr */ 546 NULL, NULL, /* filter, filterarg */ 547 8192 + sizeof(struct aac_common), /* maxsize */ 548 1, /* nsegments */ 549 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 550 0, /* flags */ 551 NULL, NULL, /* No locking needed */ 552 &sc->aac_common_dmat)) { 553 device_printf(sc->aac_dev, 554 "can't allocate common structure DMA tag\n"); 555 return (ENOMEM); 556 } 557 if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common, 558 BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) { 559 device_printf(sc->aac_dev, "can't allocate common structure\n"); 560 return (ENOMEM); 561 } 562 563 /* 564 * Work around a bug in the 2120 and 2200 that cannot DMA commands 565 * below address 8192 in physical memory. 566 * XXX If the padding is not needed, can it be put to use instead 567 * of ignored? 568 */ 569 (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap, 570 sc->aac_common, 8192 + sizeof(*sc->aac_common), 571 aac_common_map, sc, 0); 572 573 if (sc->aac_common_busaddr < 8192) { 574 sc->aac_common = (struct aac_common *) 575 ((uint8_t *)sc->aac_common + 8192); 576 sc->aac_common_busaddr += 8192; 577 } 578 bzero(sc->aac_common, sizeof(*sc->aac_common)); 579 580 /* Allocate some FIBs and associated command structs */ 581 TAILQ_INIT(&sc->aac_fibmap_tqh); 582 sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command), 583 M_AACBUF, M_WAITOK|M_ZERO); 584 while (sc->total_fibs < sc->aac_max_fibs) { 585 if (aac_alloc_commands(sc) != 0) 586 break; 587 } 588 if (sc->total_fibs == 0) 589 return (ENOMEM); 590 591 return (0); 592 } 593 594 /* 595 * Free all of the resources associated with (sc) 596 * 597 * Should not be called if the controller is active. 598 */ 599 void 600 aac_free(struct aac_softc *sc) 601 { 602 603 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 604 605 /* remove the control device */ 606 if (sc->aac_dev_t != NULL) 607 destroy_dev(sc->aac_dev_t); 608 609 /* throw away any FIB buffers, discard the FIB DMA tag */ 610 aac_free_commands(sc); 611 if (sc->aac_fib_dmat) 612 bus_dma_tag_destroy(sc->aac_fib_dmat); 613 614 free(sc->aac_commands, M_AACBUF); 615 616 /* destroy the common area */ 617 if (sc->aac_common) { 618 bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap); 619 bus_dmamem_free(sc->aac_common_dmat, sc->aac_common, 620 sc->aac_common_dmamap); 621 } 622 if (sc->aac_common_dmat) 623 bus_dma_tag_destroy(sc->aac_common_dmat); 624 625 /* disconnect the interrupt handler */ 626 if (sc->aac_intr) 627 bus_teardown_intr(sc->aac_dev, sc->aac_irq, sc->aac_intr); 628 if (sc->aac_irq != NULL) 629 bus_release_resource(sc->aac_dev, SYS_RES_IRQ, sc->aac_irq_rid, 630 sc->aac_irq); 631 632 /* destroy data-transfer DMA tag */ 633 if (sc->aac_buffer_dmat) 634 bus_dma_tag_destroy(sc->aac_buffer_dmat); 635 636 /* destroy the parent DMA tag */ 637 if (sc->aac_parent_dmat) 638 bus_dma_tag_destroy(sc->aac_parent_dmat); 639 640 /* release the register window mapping */ 641 if (sc->aac_regs_res0 != NULL) 642 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, 643 sc->aac_regs_rid0, sc->aac_regs_res0); 644 if (sc->aac_hwif == AAC_HWIF_NARK && sc->aac_regs_res1 != NULL) 645 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, 646 sc->aac_regs_rid1, sc->aac_regs_res1); 647 } 648 649 /* 650 * Disconnect from the controller completely, in preparation for unload. 651 */ 652 int 653 aac_detach(device_t dev) 654 { 655 struct aac_softc *sc; 656 struct aac_container *co; 657 struct aac_sim *sim; 658 int error; 659 660 sc = device_get_softc(dev); 661 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 662 663 if (sc->aac_state & AAC_STATE_OPEN) 664 return(EBUSY); 665 666 callout_drain(&sc->aac_daemontime); 667 668 /* Remove the child containers */ 669 while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) { 670 error = device_delete_child(dev, co->co_disk); 671 if (error) 672 return (error); 673 TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link); 674 free(co, M_AACBUF); 675 } 676 677 /* Remove the CAM SIMs */ 678 while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) { 679 TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link); 680 error = device_delete_child(dev, sim->sim_dev); 681 if (error) 682 return (error); 683 free(sim, M_AACBUF); 684 } 685 686 if (sc->aifflags & AAC_AIFFLAGS_RUNNING) { 687 sc->aifflags |= AAC_AIFFLAGS_EXIT; 688 wakeup(sc->aifthread); 689 tsleep(sc->aac_dev, PUSER | PCATCH, "aacdch", 30 * hz); 690 } 691 692 if (sc->aifflags & AAC_AIFFLAGS_RUNNING) 693 panic("Cannot shutdown AIF thread"); 694 695 if ((error = aac_shutdown(dev))) 696 return(error); 697 698 EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh); 699 700 aac_free(sc); 701 702 mtx_destroy(&sc->aac_aifq_lock); 703 mtx_destroy(&sc->aac_io_lock); 704 mtx_destroy(&sc->aac_container_lock); 705 706 return(0); 707 } 708 709 /* 710 * Bring the controller down to a dormant state and detach all child devices. 711 * 712 * This function is called before detach or system shutdown. 713 * 714 * Note that we can assume that the bioq on the controller is empty, as we won't 715 * allow shutdown if any device is open. 716 */ 717 int 718 aac_shutdown(device_t dev) 719 { 720 struct aac_softc *sc; 721 struct aac_fib *fib; 722 struct aac_close_command *cc; 723 724 sc = device_get_softc(dev); 725 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 726 727 sc->aac_state |= AAC_STATE_SUSPEND; 728 729 /* 730 * Send a Container shutdown followed by a HostShutdown FIB to the 731 * controller to convince it that we don't want to talk to it anymore. 732 * We've been closed and all I/O completed already 733 */ 734 device_printf(sc->aac_dev, "shutting down controller..."); 735 736 mtx_lock(&sc->aac_io_lock); 737 aac_alloc_sync_fib(sc, &fib); 738 cc = (struct aac_close_command *)&fib->data[0]; 739 740 bzero(cc, sizeof(struct aac_close_command)); 741 cc->Command = VM_CloseAll; 742 cc->ContainerId = 0xffffffff; 743 if (aac_sync_fib(sc, ContainerCommand, 0, fib, 744 sizeof(struct aac_close_command))) 745 printf("FAILED.\n"); 746 else 747 printf("done\n"); 748 #if 0 749 else { 750 fib->data[0] = 0; 751 /* 752 * XXX Issuing this command to the controller makes it shut down 753 * but also keeps it from coming back up without a reset of the 754 * PCI bus. This is not desirable if you are just unloading the 755 * driver module with the intent to reload it later. 756 */ 757 if (aac_sync_fib(sc, FsaHostShutdown, AAC_FIBSTATE_SHUTDOWN, 758 fib, 1)) { 759 printf("FAILED.\n"); 760 } else { 761 printf("done.\n"); 762 } 763 } 764 #endif 765 766 AAC_MASK_INTERRUPTS(sc); 767 aac_release_sync_fib(sc); 768 mtx_unlock(&sc->aac_io_lock); 769 770 return(0); 771 } 772 773 /* 774 * Bring the controller to a quiescent state, ready for system suspend. 775 */ 776 int 777 aac_suspend(device_t dev) 778 { 779 struct aac_softc *sc; 780 781 sc = device_get_softc(dev); 782 783 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 784 sc->aac_state |= AAC_STATE_SUSPEND; 785 786 AAC_MASK_INTERRUPTS(sc); 787 return(0); 788 } 789 790 /* 791 * Bring the controller back to a state ready for operation. 792 */ 793 int 794 aac_resume(device_t dev) 795 { 796 struct aac_softc *sc; 797 798 sc = device_get_softc(dev); 799 800 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 801 sc->aac_state &= ~AAC_STATE_SUSPEND; 802 AAC_UNMASK_INTERRUPTS(sc); 803 return(0); 804 } 805 806 /* 807 * Interrupt handler for NEW_COMM interface. 808 */ 809 void 810 aac_new_intr(void *arg) 811 { 812 struct aac_softc *sc; 813 u_int32_t index, fast; 814 struct aac_command *cm; 815 struct aac_fib *fib; 816 int i; 817 818 sc = (struct aac_softc *)arg; 819 820 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 821 mtx_lock(&sc->aac_io_lock); 822 while (1) { 823 index = AAC_GET_OUTB_QUEUE(sc); 824 if (index == 0xffffffff) 825 index = AAC_GET_OUTB_QUEUE(sc); 826 if (index == 0xffffffff) 827 break; 828 if (index & 2) { 829 if (index == 0xfffffffe) { 830 /* XXX This means that the controller wants 831 * more work. Ignore it for now. 832 */ 833 continue; 834 } 835 /* AIF */ 836 fib = (struct aac_fib *)malloc(sizeof *fib, M_AACBUF, 837 M_NOWAIT | M_ZERO); 838 if (fib == NULL) { 839 /* If we're really this short on memory, 840 * hopefully breaking out of the handler will 841 * allow something to get freed. This 842 * actually sucks a whole lot. 843 */ 844 break; 845 } 846 index &= ~2; 847 for (i = 0; i < sizeof(struct aac_fib)/4; ++i) 848 ((u_int32_t *)fib)[i] = AAC_MEM1_GETREG4(sc, index + i*4); 849 aac_handle_aif(sc, fib); 850 free(fib, M_AACBUF); 851 852 /* 853 * AIF memory is owned by the adapter, so let it 854 * know that we are done with it. 855 */ 856 AAC_SET_OUTB_QUEUE(sc, index); 857 AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY); 858 } else { 859 fast = index & 1; 860 cm = sc->aac_commands + (index >> 2); 861 fib = cm->cm_fib; 862 if (fast) { 863 fib->Header.XferState |= AAC_FIBSTATE_DONEADAP; 864 *((u_int32_t *)(fib->data)) = AAC_ERROR_NORMAL; 865 } 866 aac_remove_busy(cm); 867 aac_unmap_command(cm); 868 cm->cm_flags |= AAC_CMD_COMPLETED; 869 870 /* is there a completion handler? */ 871 if (cm->cm_complete != NULL) { 872 cm->cm_complete(cm); 873 } else { 874 /* assume that someone is sleeping on this 875 * command 876 */ 877 wakeup(cm); 878 } 879 sc->flags &= ~AAC_QUEUE_FRZN; 880 } 881 } 882 /* see if we can start some more I/O */ 883 if ((sc->flags & AAC_QUEUE_FRZN) == 0) 884 aac_startio(sc); 885 886 mtx_unlock(&sc->aac_io_lock); 887 } 888 889 /* 890 * Interrupt filter for !NEW_COMM interface. 891 */ 892 int 893 aac_filter(void *arg) 894 { 895 struct aac_softc *sc; 896 u_int16_t reason; 897 898 sc = (struct aac_softc *)arg; 899 900 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 901 /* 902 * Read the status register directly. This is faster than taking the 903 * driver lock and reading the queues directly. It also saves having 904 * to turn parts of the driver lock into a spin mutex, which would be 905 * ugly. 906 */ 907 reason = AAC_GET_ISTATUS(sc); 908 AAC_CLEAR_ISTATUS(sc, reason); 909 910 /* handle completion processing */ 911 if (reason & AAC_DB_RESPONSE_READY) 912 taskqueue_enqueue_fast(taskqueue_fast, &sc->aac_task_complete); 913 914 /* controller wants to talk to us */ 915 if (reason & (AAC_DB_PRINTF | AAC_DB_COMMAND_READY)) { 916 /* 917 * XXX Make sure that we don't get fooled by strange messages 918 * that start with a NULL. 919 */ 920 if ((reason & AAC_DB_PRINTF) && 921 (sc->aac_common->ac_printf[0] == 0)) 922 sc->aac_common->ac_printf[0] = 32; 923 924 /* 925 * This might miss doing the actual wakeup. However, the 926 * msleep that this is waking up has a timeout, so it will 927 * wake up eventually. AIFs and printfs are low enough 928 * priority that they can handle hanging out for a few seconds 929 * if needed. 930 */ 931 wakeup(sc->aifthread); 932 } 933 return (FILTER_HANDLED); 934 } 935 936 /* 937 * Command Processing 938 */ 939 940 /* 941 * Start as much queued I/O as possible on the controller 942 */ 943 void 944 aac_startio(struct aac_softc *sc) 945 { 946 struct aac_command *cm; 947 int error; 948 949 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 950 951 for (;;) { 952 /* 953 * This flag might be set if the card is out of resources. 954 * Checking it here prevents an infinite loop of deferrals. 955 */ 956 if (sc->flags & AAC_QUEUE_FRZN) 957 break; 958 959 /* 960 * Try to get a command that's been put off for lack of 961 * resources 962 */ 963 cm = aac_dequeue_ready(sc); 964 965 /* 966 * Try to build a command off the bio queue (ignore error 967 * return) 968 */ 969 if (cm == NULL) 970 aac_bio_command(sc, &cm); 971 972 /* nothing to do? */ 973 if (cm == NULL) 974 break; 975 976 /* don't map more than once */ 977 if (cm->cm_flags & AAC_CMD_MAPPED) 978 panic("aac: command %p already mapped", cm); 979 980 /* 981 * Set up the command to go to the controller. If there are no 982 * data buffers associated with the command then it can bypass 983 * busdma. 984 */ 985 if (cm->cm_datalen != 0) { 986 error = bus_dmamap_load(sc->aac_buffer_dmat, 987 cm->cm_datamap, cm->cm_data, 988 cm->cm_datalen, 989 aac_map_command_sg, cm, 0); 990 if (error == EINPROGRESS) { 991 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "freezing queue\n"); 992 sc->flags |= AAC_QUEUE_FRZN; 993 error = 0; 994 } else if (error != 0) 995 panic("aac_startio: unexpected error %d from " 996 "busdma", error); 997 } else 998 aac_map_command_sg(cm, NULL, 0, 0); 999 } 1000 } 1001 1002 /* 1003 * Handle notification of one or more FIBs coming from the controller. 1004 */ 1005 static void 1006 aac_command_thread(struct aac_softc *sc) 1007 { 1008 struct aac_fib *fib; 1009 u_int32_t fib_size; 1010 int size, retval; 1011 1012 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1013 1014 mtx_lock(&sc->aac_io_lock); 1015 sc->aifflags = AAC_AIFFLAGS_RUNNING; 1016 1017 while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) { 1018 1019 retval = 0; 1020 if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0) 1021 retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO, 1022 "aifthd", AAC_PERIODIC_INTERVAL * hz); 1023 1024 /* 1025 * First see if any FIBs need to be allocated. This needs 1026 * to be called without the driver lock because contigmalloc 1027 * will grab Giant, and would result in an LOR. 1028 */ 1029 if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) { 1030 mtx_unlock(&sc->aac_io_lock); 1031 aac_alloc_commands(sc); 1032 mtx_lock(&sc->aac_io_lock); 1033 sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS; 1034 aac_startio(sc); 1035 } 1036 1037 /* 1038 * While we're here, check to see if any commands are stuck. 1039 * This is pretty low-priority, so it's ok if it doesn't 1040 * always fire. 1041 */ 1042 if (retval == EWOULDBLOCK) 1043 aac_timeout(sc); 1044 1045 /* Check the hardware printf message buffer */ 1046 if (sc->aac_common->ac_printf[0] != 0) 1047 aac_print_printf(sc); 1048 1049 /* Also check to see if the adapter has a command for us. */ 1050 if (sc->flags & AAC_FLAGS_NEW_COMM) 1051 continue; 1052 for (;;) { 1053 if (aac_dequeue_fib(sc, AAC_HOST_NORM_CMD_QUEUE, 1054 &fib_size, &fib)) 1055 break; 1056 1057 AAC_PRINT_FIB(sc, fib); 1058 1059 switch (fib->Header.Command) { 1060 case AifRequest: 1061 aac_handle_aif(sc, fib); 1062 break; 1063 default: 1064 device_printf(sc->aac_dev, "unknown command " 1065 "from controller\n"); 1066 break; 1067 } 1068 1069 if ((fib->Header.XferState == 0) || 1070 (fib->Header.StructType != AAC_FIBTYPE_TFIB)) { 1071 break; 1072 } 1073 1074 /* Return the AIF to the controller. */ 1075 if (fib->Header.XferState & AAC_FIBSTATE_FROMADAP) { 1076 fib->Header.XferState |= AAC_FIBSTATE_DONEHOST; 1077 *(AAC_FSAStatus*)fib->data = ST_OK; 1078 1079 /* XXX Compute the Size field? */ 1080 size = fib->Header.Size; 1081 if (size > sizeof(struct aac_fib)) { 1082 size = sizeof(struct aac_fib); 1083 fib->Header.Size = size; 1084 } 1085 /* 1086 * Since we did not generate this command, it 1087 * cannot go through the normal 1088 * enqueue->startio chain. 1089 */ 1090 aac_enqueue_response(sc, 1091 AAC_ADAP_NORM_RESP_QUEUE, 1092 fib); 1093 } 1094 } 1095 } 1096 sc->aifflags &= ~AAC_AIFFLAGS_RUNNING; 1097 mtx_unlock(&sc->aac_io_lock); 1098 wakeup(sc->aac_dev); 1099 1100 kproc_exit(0); 1101 } 1102 1103 /* 1104 * Process completed commands. 1105 */ 1106 static void 1107 aac_complete(void *context, int pending) 1108 { 1109 struct aac_softc *sc; 1110 struct aac_command *cm; 1111 struct aac_fib *fib; 1112 u_int32_t fib_size; 1113 1114 sc = (struct aac_softc *)context; 1115 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1116 1117 mtx_lock(&sc->aac_io_lock); 1118 1119 /* pull completed commands off the queue */ 1120 for (;;) { 1121 /* look for completed FIBs on our queue */ 1122 if (aac_dequeue_fib(sc, AAC_HOST_NORM_RESP_QUEUE, &fib_size, 1123 &fib)) 1124 break; /* nothing to do */ 1125 1126 /* get the command, unmap and hand off for processing */ 1127 cm = sc->aac_commands + fib->Header.SenderData; 1128 if (cm == NULL) { 1129 AAC_PRINT_FIB(sc, fib); 1130 break; 1131 } 1132 aac_remove_busy(cm); 1133 1134 aac_unmap_command(cm); 1135 cm->cm_flags |= AAC_CMD_COMPLETED; 1136 1137 /* is there a completion handler? */ 1138 if (cm->cm_complete != NULL) { 1139 cm->cm_complete(cm); 1140 } else { 1141 /* assume that someone is sleeping on this command */ 1142 wakeup(cm); 1143 } 1144 } 1145 1146 /* see if we can start some more I/O */ 1147 sc->flags &= ~AAC_QUEUE_FRZN; 1148 aac_startio(sc); 1149 1150 mtx_unlock(&sc->aac_io_lock); 1151 } 1152 1153 /* 1154 * Handle a bio submitted from a disk device. 1155 */ 1156 void 1157 aac_submit_bio(struct bio *bp) 1158 { 1159 struct aac_disk *ad; 1160 struct aac_softc *sc; 1161 1162 ad = (struct aac_disk *)bp->bio_disk->d_drv1; 1163 sc = ad->ad_controller; 1164 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1165 1166 /* queue the BIO and try to get some work done */ 1167 aac_enqueue_bio(sc, bp); 1168 aac_startio(sc); 1169 } 1170 1171 /* 1172 * Get a bio and build a command to go with it. 1173 */ 1174 static int 1175 aac_bio_command(struct aac_softc *sc, struct aac_command **cmp) 1176 { 1177 struct aac_command *cm; 1178 struct aac_fib *fib; 1179 struct aac_disk *ad; 1180 struct bio *bp; 1181 1182 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1183 1184 /* get the resources we will need */ 1185 cm = NULL; 1186 bp = NULL; 1187 if (aac_alloc_command(sc, &cm)) /* get a command */ 1188 goto fail; 1189 if ((bp = aac_dequeue_bio(sc)) == NULL) 1190 goto fail; 1191 1192 /* fill out the command */ 1193 cm->cm_data = (void *)bp->bio_data; 1194 cm->cm_datalen = bp->bio_bcount; 1195 cm->cm_complete = aac_bio_complete; 1196 cm->cm_private = bp; 1197 cm->cm_timestamp = time_uptime; 1198 1199 /* build the FIB */ 1200 fib = cm->cm_fib; 1201 fib->Header.Size = sizeof(struct aac_fib_header); 1202 fib->Header.XferState = 1203 AAC_FIBSTATE_HOSTOWNED | 1204 AAC_FIBSTATE_INITIALISED | 1205 AAC_FIBSTATE_EMPTY | 1206 AAC_FIBSTATE_FROMHOST | 1207 AAC_FIBSTATE_REXPECTED | 1208 AAC_FIBSTATE_NORM | 1209 AAC_FIBSTATE_ASYNC | 1210 AAC_FIBSTATE_FAST_RESPONSE; 1211 1212 /* build the read/write request */ 1213 ad = (struct aac_disk *)bp->bio_disk->d_drv1; 1214 1215 if (sc->flags & AAC_FLAGS_RAW_IO) { 1216 struct aac_raw_io *raw; 1217 raw = (struct aac_raw_io *)&fib->data[0]; 1218 fib->Header.Command = RawIo; 1219 raw->BlockNumber = (u_int64_t)bp->bio_pblkno; 1220 raw->ByteCount = bp->bio_bcount; 1221 raw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1222 raw->BpTotal = 0; 1223 raw->BpComplete = 0; 1224 fib->Header.Size += sizeof(struct aac_raw_io); 1225 cm->cm_sgtable = (struct aac_sg_table *)&raw->SgMapRaw; 1226 if (bp->bio_cmd == BIO_READ) { 1227 raw->Flags = 1; 1228 cm->cm_flags |= AAC_CMD_DATAIN; 1229 } else { 1230 raw->Flags = 0; 1231 cm->cm_flags |= AAC_CMD_DATAOUT; 1232 } 1233 } else if ((sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 1234 fib->Header.Command = ContainerCommand; 1235 if (bp->bio_cmd == BIO_READ) { 1236 struct aac_blockread *br; 1237 br = (struct aac_blockread *)&fib->data[0]; 1238 br->Command = VM_CtBlockRead; 1239 br->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1240 br->BlockNumber = bp->bio_pblkno; 1241 br->ByteCount = bp->bio_bcount; 1242 fib->Header.Size += sizeof(struct aac_blockread); 1243 cm->cm_sgtable = &br->SgMap; 1244 cm->cm_flags |= AAC_CMD_DATAIN; 1245 } else { 1246 struct aac_blockwrite *bw; 1247 bw = (struct aac_blockwrite *)&fib->data[0]; 1248 bw->Command = VM_CtBlockWrite; 1249 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1250 bw->BlockNumber = bp->bio_pblkno; 1251 bw->ByteCount = bp->bio_bcount; 1252 bw->Stable = CUNSTABLE; 1253 fib->Header.Size += sizeof(struct aac_blockwrite); 1254 cm->cm_flags |= AAC_CMD_DATAOUT; 1255 cm->cm_sgtable = &bw->SgMap; 1256 } 1257 } else { 1258 fib->Header.Command = ContainerCommand64; 1259 if (bp->bio_cmd == BIO_READ) { 1260 struct aac_blockread64 *br; 1261 br = (struct aac_blockread64 *)&fib->data[0]; 1262 br->Command = VM_CtHostRead64; 1263 br->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1264 br->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE; 1265 br->BlockNumber = bp->bio_pblkno; 1266 br->Pad = 0; 1267 br->Flags = 0; 1268 fib->Header.Size += sizeof(struct aac_blockread64); 1269 cm->cm_flags |= AAC_CMD_DATAIN; 1270 cm->cm_sgtable = (struct aac_sg_table *)&br->SgMap64; 1271 } else { 1272 struct aac_blockwrite64 *bw; 1273 bw = (struct aac_blockwrite64 *)&fib->data[0]; 1274 bw->Command = VM_CtHostWrite64; 1275 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1276 bw->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE; 1277 bw->BlockNumber = bp->bio_pblkno; 1278 bw->Pad = 0; 1279 bw->Flags = 0; 1280 fib->Header.Size += sizeof(struct aac_blockwrite64); 1281 cm->cm_flags |= AAC_CMD_DATAOUT; 1282 cm->cm_sgtable = (struct aac_sg_table *)&bw->SgMap64; 1283 } 1284 } 1285 1286 *cmp = cm; 1287 return(0); 1288 1289 fail: 1290 if (bp != NULL) 1291 aac_enqueue_bio(sc, bp); 1292 if (cm != NULL) 1293 aac_release_command(cm); 1294 return(ENOMEM); 1295 } 1296 1297 /* 1298 * Handle a bio-instigated command that has been completed. 1299 */ 1300 static void 1301 aac_bio_complete(struct aac_command *cm) 1302 { 1303 struct aac_blockread_response *brr; 1304 struct aac_blockwrite_response *bwr; 1305 struct bio *bp; 1306 AAC_FSAStatus status; 1307 1308 /* fetch relevant status and then release the command */ 1309 bp = (struct bio *)cm->cm_private; 1310 if (bp->bio_cmd == BIO_READ) { 1311 brr = (struct aac_blockread_response *)&cm->cm_fib->data[0]; 1312 status = brr->Status; 1313 } else { 1314 bwr = (struct aac_blockwrite_response *)&cm->cm_fib->data[0]; 1315 status = bwr->Status; 1316 } 1317 aac_release_command(cm); 1318 1319 /* fix up the bio based on status */ 1320 if (status == ST_OK) { 1321 bp->bio_resid = 0; 1322 } else { 1323 bp->bio_error = EIO; 1324 bp->bio_flags |= BIO_ERROR; 1325 /* pass an error string out to the disk layer */ 1326 bp->bio_driver1 = aac_describe_code(aac_command_status_table, 1327 status); 1328 } 1329 aac_biodone(bp); 1330 } 1331 1332 /* 1333 * Submit a command to the controller, return when it completes. 1334 * XXX This is very dangerous! If the card has gone out to lunch, we could 1335 * be stuck here forever. At the same time, signals are not caught 1336 * because there is a risk that a signal could wakeup the sleep before 1337 * the card has a chance to complete the command. Since there is no way 1338 * to cancel a command that is in progress, we can't protect against the 1339 * card completing a command late and spamming the command and data 1340 * memory. So, we are held hostage until the command completes. 1341 */ 1342 static int 1343 aac_wait_command(struct aac_command *cm) 1344 { 1345 struct aac_softc *sc; 1346 int error; 1347 1348 sc = cm->cm_sc; 1349 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1350 1351 /* Put the command on the ready queue and get things going */ 1352 aac_enqueue_ready(cm); 1353 aac_startio(sc); 1354 error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacwait", 0); 1355 return(error); 1356 } 1357 1358 /* 1359 *Command Buffer Management 1360 */ 1361 1362 /* 1363 * Allocate a command. 1364 */ 1365 int 1366 aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp) 1367 { 1368 struct aac_command *cm; 1369 1370 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1371 1372 if ((cm = aac_dequeue_free(sc)) == NULL) { 1373 if (sc->total_fibs < sc->aac_max_fibs) { 1374 sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS; 1375 wakeup(sc->aifthread); 1376 } 1377 return (EBUSY); 1378 } 1379 1380 *cmp = cm; 1381 return(0); 1382 } 1383 1384 /* 1385 * Release a command back to the freelist. 1386 */ 1387 void 1388 aac_release_command(struct aac_command *cm) 1389 { 1390 struct aac_event *event; 1391 struct aac_softc *sc; 1392 1393 sc = cm->cm_sc; 1394 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1395 1396 /* (re)initialize the command/FIB */ 1397 cm->cm_sgtable = NULL; 1398 cm->cm_flags = 0; 1399 cm->cm_complete = NULL; 1400 cm->cm_private = NULL; 1401 cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE; 1402 cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY; 1403 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB; 1404 cm->cm_fib->Header.Flags = 0; 1405 cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size; 1406 1407 /* 1408 * These are duplicated in aac_start to cover the case where an 1409 * intermediate stage may have destroyed them. They're left 1410 * initialized here for debugging purposes only. 1411 */ 1412 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; 1413 cm->cm_fib->Header.SenderData = 0; 1414 1415 aac_enqueue_free(cm); 1416 1417 /* 1418 * Dequeue all events so that there's no risk of events getting 1419 * stranded. 1420 */ 1421 while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) { 1422 TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links); 1423 event->ev_callback(sc, event, event->ev_arg); 1424 } 1425 } 1426 1427 /* 1428 * Map helper for command/FIB allocation. 1429 */ 1430 static void 1431 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1432 { 1433 uint64_t *fibphys; 1434 1435 fibphys = (uint64_t *)arg; 1436 1437 *fibphys = segs[0].ds_addr; 1438 } 1439 1440 /* 1441 * Allocate and initialize commands/FIBs for this adapter. 1442 */ 1443 static int 1444 aac_alloc_commands(struct aac_softc *sc) 1445 { 1446 struct aac_command *cm; 1447 struct aac_fibmap *fm; 1448 uint64_t fibphys; 1449 int i, error; 1450 1451 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1452 1453 if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs) 1454 return (ENOMEM); 1455 1456 fm = malloc(sizeof(struct aac_fibmap), M_AACBUF, M_NOWAIT|M_ZERO); 1457 if (fm == NULL) 1458 return (ENOMEM); 1459 1460 /* allocate the FIBs in DMAable memory and load them */ 1461 if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs, 1462 BUS_DMA_NOWAIT, &fm->aac_fibmap)) { 1463 device_printf(sc->aac_dev, 1464 "Not enough contiguous memory available.\n"); 1465 free(fm, M_AACBUF); 1466 return (ENOMEM); 1467 } 1468 1469 /* Ignore errors since this doesn't bounce */ 1470 (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs, 1471 sc->aac_max_fibs_alloc * sc->aac_max_fib_size, 1472 aac_map_command_helper, &fibphys, 0); 1473 1474 /* initialize constant fields in the command structure */ 1475 bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * sc->aac_max_fib_size); 1476 for (i = 0; i < sc->aac_max_fibs_alloc; i++) { 1477 cm = sc->aac_commands + sc->total_fibs; 1478 fm->aac_commands = cm; 1479 cm->cm_sc = sc; 1480 cm->cm_fib = (struct aac_fib *) 1481 ((u_int8_t *)fm->aac_fibs + i*sc->aac_max_fib_size); 1482 cm->cm_fibphys = fibphys + i*sc->aac_max_fib_size; 1483 cm->cm_index = sc->total_fibs; 1484 1485 if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0, 1486 &cm->cm_datamap)) != 0) 1487 break; 1488 mtx_lock(&sc->aac_io_lock); 1489 aac_release_command(cm); 1490 sc->total_fibs++; 1491 mtx_unlock(&sc->aac_io_lock); 1492 } 1493 1494 if (i > 0) { 1495 mtx_lock(&sc->aac_io_lock); 1496 TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link); 1497 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs); 1498 mtx_unlock(&sc->aac_io_lock); 1499 return (0); 1500 } 1501 1502 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); 1503 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); 1504 free(fm, M_AACBUF); 1505 return (ENOMEM); 1506 } 1507 1508 /* 1509 * Free FIBs owned by this adapter. 1510 */ 1511 static void 1512 aac_free_commands(struct aac_softc *sc) 1513 { 1514 struct aac_fibmap *fm; 1515 struct aac_command *cm; 1516 int i; 1517 1518 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1519 1520 while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) { 1521 1522 TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link); 1523 /* 1524 * We check against total_fibs to handle partially 1525 * allocated blocks. 1526 */ 1527 for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) { 1528 cm = fm->aac_commands + i; 1529 bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap); 1530 } 1531 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); 1532 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); 1533 free(fm, M_AACBUF); 1534 } 1535 } 1536 1537 /* 1538 * Command-mapping helper function - populate this command's s/g table. 1539 */ 1540 static void 1541 aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1542 { 1543 struct aac_softc *sc; 1544 struct aac_command *cm; 1545 struct aac_fib *fib; 1546 int i; 1547 1548 cm = (struct aac_command *)arg; 1549 sc = cm->cm_sc; 1550 fib = cm->cm_fib; 1551 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1552 1553 /* copy into the FIB */ 1554 if (cm->cm_sgtable != NULL) { 1555 if (fib->Header.Command == RawIo) { 1556 struct aac_sg_tableraw *sg; 1557 sg = (struct aac_sg_tableraw *)cm->cm_sgtable; 1558 sg->SgCount = nseg; 1559 for (i = 0; i < nseg; i++) { 1560 sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr; 1561 sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len; 1562 sg->SgEntryRaw[i].Next = 0; 1563 sg->SgEntryRaw[i].Prev = 0; 1564 sg->SgEntryRaw[i].Flags = 0; 1565 } 1566 /* update the FIB size for the s/g count */ 1567 fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw); 1568 } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 1569 struct aac_sg_table *sg; 1570 sg = cm->cm_sgtable; 1571 sg->SgCount = nseg; 1572 for (i = 0; i < nseg; i++) { 1573 sg->SgEntry[i].SgAddress = segs[i].ds_addr; 1574 sg->SgEntry[i].SgByteCount = segs[i].ds_len; 1575 } 1576 /* update the FIB size for the s/g count */ 1577 fib->Header.Size += nseg*sizeof(struct aac_sg_entry); 1578 } else { 1579 struct aac_sg_table64 *sg; 1580 sg = (struct aac_sg_table64 *)cm->cm_sgtable; 1581 sg->SgCount = nseg; 1582 for (i = 0; i < nseg; i++) { 1583 sg->SgEntry64[i].SgAddress = segs[i].ds_addr; 1584 sg->SgEntry64[i].SgByteCount = segs[i].ds_len; 1585 } 1586 /* update the FIB size for the s/g count */ 1587 fib->Header.Size += nseg*sizeof(struct aac_sg_entry64); 1588 } 1589 } 1590 1591 /* Fix up the address values in the FIB. Use the command array index 1592 * instead of a pointer since these fields are only 32 bits. Shift 1593 * the SenderFibAddress over to make room for the fast response bit 1594 * and for the AIF bit 1595 */ 1596 cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2); 1597 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; 1598 1599 /* save a pointer to the command for speedy reverse-lookup */ 1600 cm->cm_fib->Header.SenderData = cm->cm_index; 1601 1602 if (cm->cm_flags & AAC_CMD_DATAIN) 1603 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1604 BUS_DMASYNC_PREREAD); 1605 if (cm->cm_flags & AAC_CMD_DATAOUT) 1606 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1607 BUS_DMASYNC_PREWRITE); 1608 cm->cm_flags |= AAC_CMD_MAPPED; 1609 1610 if (sc->flags & AAC_FLAGS_NEW_COMM) { 1611 int count = 10000000L; 1612 while (AAC_SEND_COMMAND(sc, cm) != 0) { 1613 if (--count == 0) { 1614 aac_unmap_command(cm); 1615 sc->flags |= AAC_QUEUE_FRZN; 1616 aac_requeue_ready(cm); 1617 } 1618 DELAY(5); /* wait 5 usec. */ 1619 } 1620 } else { 1621 /* Put the FIB on the outbound queue */ 1622 if (aac_enqueue_fib(sc, cm->cm_queue, cm) == EBUSY) { 1623 aac_unmap_command(cm); 1624 sc->flags |= AAC_QUEUE_FRZN; 1625 aac_requeue_ready(cm); 1626 } 1627 } 1628 1629 return; 1630 } 1631 1632 /* 1633 * Unmap a command from controller-visible space. 1634 */ 1635 static void 1636 aac_unmap_command(struct aac_command *cm) 1637 { 1638 struct aac_softc *sc; 1639 1640 sc = cm->cm_sc; 1641 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1642 1643 if (!(cm->cm_flags & AAC_CMD_MAPPED)) 1644 return; 1645 1646 if (cm->cm_datalen != 0) { 1647 if (cm->cm_flags & AAC_CMD_DATAIN) 1648 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1649 BUS_DMASYNC_POSTREAD); 1650 if (cm->cm_flags & AAC_CMD_DATAOUT) 1651 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1652 BUS_DMASYNC_POSTWRITE); 1653 1654 bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap); 1655 } 1656 cm->cm_flags &= ~AAC_CMD_MAPPED; 1657 } 1658 1659 /* 1660 * Hardware Interface 1661 */ 1662 1663 /* 1664 * Initialize the adapter. 1665 */ 1666 static void 1667 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1668 { 1669 struct aac_softc *sc; 1670 1671 sc = (struct aac_softc *)arg; 1672 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1673 1674 sc->aac_common_busaddr = segs[0].ds_addr; 1675 } 1676 1677 static int 1678 aac_check_firmware(struct aac_softc *sc) 1679 { 1680 u_int32_t code, major, minor, options = 0, atu_size = 0; 1681 int status; 1682 time_t then; 1683 1684 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1685 /* 1686 * Wait for the adapter to come ready. 1687 */ 1688 then = time_uptime; 1689 do { 1690 code = AAC_GET_FWSTATUS(sc); 1691 if (code & AAC_SELF_TEST_FAILED) { 1692 device_printf(sc->aac_dev, "FATAL: selftest failed\n"); 1693 return(ENXIO); 1694 } 1695 if (code & AAC_KERNEL_PANIC) { 1696 device_printf(sc->aac_dev, 1697 "FATAL: controller kernel panic"); 1698 return(ENXIO); 1699 } 1700 if (time_uptime > (then + AAC_BOOT_TIMEOUT)) { 1701 device_printf(sc->aac_dev, 1702 "FATAL: controller not coming ready, " 1703 "status %x\n", code); 1704 return(ENXIO); 1705 } 1706 } while (!(code & AAC_UP_AND_RUNNING)); 1707 1708 /* 1709 * Retrieve the firmware version numbers. Dell PERC2/QC cards with 1710 * firmware version 1.x are not compatible with this driver. 1711 */ 1712 if (sc->flags & AAC_FLAGS_PERC2QC) { 1713 if (aac_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0, 1714 NULL)) { 1715 device_printf(sc->aac_dev, 1716 "Error reading firmware version\n"); 1717 return (EIO); 1718 } 1719 1720 /* These numbers are stored as ASCII! */ 1721 major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30; 1722 minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30; 1723 if (major == 1) { 1724 device_printf(sc->aac_dev, 1725 "Firmware version %d.%d is not supported.\n", 1726 major, minor); 1727 return (EINVAL); 1728 } 1729 } 1730 1731 /* 1732 * Retrieve the capabilities/supported options word so we know what 1733 * work-arounds to enable. Some firmware revs don't support this 1734 * command. 1735 */ 1736 if (aac_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status)) { 1737 if (status != AAC_SRB_STS_INVALID_REQUEST) { 1738 device_printf(sc->aac_dev, 1739 "RequestAdapterInfo failed\n"); 1740 return (EIO); 1741 } 1742 } else { 1743 options = AAC_GET_MAILBOX(sc, 1); 1744 atu_size = AAC_GET_MAILBOX(sc, 2); 1745 sc->supported_options = options; 1746 1747 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && 1748 (sc->flags & AAC_FLAGS_NO4GB) == 0) 1749 sc->flags |= AAC_FLAGS_4GB_WINDOW; 1750 if (options & AAC_SUPPORTED_NONDASD) 1751 sc->flags |= AAC_FLAGS_ENABLE_CAM; 1752 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0 1753 && (sizeof(bus_addr_t) > 4)) { 1754 device_printf(sc->aac_dev, 1755 "Enabling 64-bit address support\n"); 1756 sc->flags |= AAC_FLAGS_SG_64BIT; 1757 } 1758 if ((options & AAC_SUPPORTED_NEW_COMM) 1759 && sc->aac_if.aif_send_command) 1760 sc->flags |= AAC_FLAGS_NEW_COMM; 1761 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) 1762 sc->flags |= AAC_FLAGS_ARRAY_64BIT; 1763 } 1764 1765 /* Check for broken hardware that does a lower number of commands */ 1766 sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512); 1767 1768 /* Remap mem. resource, if required */ 1769 if ((sc->flags & AAC_FLAGS_NEW_COMM) && 1770 atu_size > rman_get_size(sc->aac_regs_res1)) { 1771 bus_release_resource( 1772 sc->aac_dev, SYS_RES_MEMORY, 1773 sc->aac_regs_rid1, sc->aac_regs_res1); 1774 sc->aac_regs_res1 = bus_alloc_resource( 1775 sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid1, 1776 0ul, ~0ul, atu_size, RF_ACTIVE); 1777 if (sc->aac_regs_res1 == NULL) { 1778 sc->aac_regs_res1 = bus_alloc_resource_any( 1779 sc->aac_dev, SYS_RES_MEMORY, 1780 &sc->aac_regs_rid1, RF_ACTIVE); 1781 if (sc->aac_regs_res1 == NULL) { 1782 device_printf(sc->aac_dev, 1783 "couldn't allocate register window\n"); 1784 return (ENXIO); 1785 } 1786 sc->flags &= ~AAC_FLAGS_NEW_COMM; 1787 } 1788 sc->aac_btag1 = rman_get_bustag(sc->aac_regs_res1); 1789 sc->aac_bhandle1 = rman_get_bushandle(sc->aac_regs_res1); 1790 1791 if (sc->aac_hwif == AAC_HWIF_NARK) { 1792 sc->aac_regs_res0 = sc->aac_regs_res1; 1793 sc->aac_regs_rid0 = sc->aac_regs_rid1; 1794 sc->aac_btag0 = sc->aac_btag1; 1795 sc->aac_bhandle0 = sc->aac_bhandle1; 1796 } 1797 } 1798 1799 /* Read preferred settings */ 1800 sc->aac_max_fib_size = sizeof(struct aac_fib); 1801 sc->aac_max_sectors = 128; /* 64KB */ 1802 if (sc->flags & AAC_FLAGS_SG_64BIT) 1803 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE 1804 - sizeof(struct aac_blockwrite64)) 1805 / sizeof(struct aac_sg_entry64); 1806 else 1807 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE 1808 - sizeof(struct aac_blockwrite)) 1809 / sizeof(struct aac_sg_entry); 1810 1811 if (!aac_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL)) { 1812 options = AAC_GET_MAILBOX(sc, 1); 1813 sc->aac_max_fib_size = (options & 0xFFFF); 1814 sc->aac_max_sectors = (options >> 16) << 1; 1815 options = AAC_GET_MAILBOX(sc, 2); 1816 sc->aac_sg_tablesize = (options >> 16); 1817 options = AAC_GET_MAILBOX(sc, 3); 1818 sc->aac_max_fibs = (options & 0xFFFF); 1819 } 1820 if (sc->aac_max_fib_size > PAGE_SIZE) 1821 sc->aac_max_fib_size = PAGE_SIZE; 1822 sc->aac_max_fibs_alloc = PAGE_SIZE / sc->aac_max_fib_size; 1823 1824 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { 1825 sc->flags |= AAC_FLAGS_RAW_IO; 1826 device_printf(sc->aac_dev, "Enable Raw I/O\n"); 1827 } 1828 if ((sc->flags & AAC_FLAGS_RAW_IO) && 1829 (sc->flags & AAC_FLAGS_ARRAY_64BIT)) { 1830 sc->flags |= AAC_FLAGS_LBA_64BIT; 1831 device_printf(sc->aac_dev, "Enable 64-bit array\n"); 1832 } 1833 1834 return (0); 1835 } 1836 1837 static int 1838 aac_init(struct aac_softc *sc) 1839 { 1840 struct aac_adapter_init *ip; 1841 u_int32_t qoffset; 1842 int error; 1843 1844 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1845 1846 /* 1847 * Fill in the init structure. This tells the adapter about the 1848 * physical location of various important shared data structures. 1849 */ 1850 ip = &sc->aac_common->ac_init; 1851 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION; 1852 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { 1853 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4; 1854 sc->flags |= AAC_FLAGS_RAW_IO; 1855 } 1856 ip->MiniPortRevision = AAC_INIT_STRUCT_MINIPORT_REVISION; 1857 1858 ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr + 1859 offsetof(struct aac_common, ac_fibs); 1860 ip->AdapterFibsVirtualAddress = 0; 1861 ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib); 1862 ip->AdapterFibAlign = sizeof(struct aac_fib); 1863 1864 ip->PrintfBufferAddress = sc->aac_common_busaddr + 1865 offsetof(struct aac_common, ac_printf); 1866 ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE; 1867 1868 /* 1869 * The adapter assumes that pages are 4K in size, except on some 1870 * broken firmware versions that do the page->byte conversion twice, 1871 * therefore 'assuming' that this value is in 16MB units (2^24). 1872 * Round up since the granularity is so high. 1873 */ 1874 ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE; 1875 if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) { 1876 ip->HostPhysMemPages = 1877 (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE; 1878 } 1879 ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */ 1880 1881 ip->InitFlags = 0; 1882 if (sc->flags & AAC_FLAGS_NEW_COMM) { 1883 ip->InitFlags |= AAC_INITFLAGS_NEW_COMM_SUPPORTED; 1884 device_printf(sc->aac_dev, "New comm. interface enabled\n"); 1885 } 1886 1887 ip->MaxIoCommands = sc->aac_max_fibs; 1888 ip->MaxIoSize = sc->aac_max_sectors << 9; 1889 ip->MaxFibSize = sc->aac_max_fib_size; 1890 1891 /* 1892 * Initialize FIB queues. Note that it appears that the layout of the 1893 * indexes and the segmentation of the entries may be mandated by the 1894 * adapter, which is only told about the base of the queue index fields. 1895 * 1896 * The initial values of the indices are assumed to inform the adapter 1897 * of the sizes of the respective queues, and theoretically it could 1898 * work out the entire layout of the queue structures from this. We 1899 * take the easy route and just lay this area out like everyone else 1900 * does. 1901 * 1902 * The Linux driver uses a much more complex scheme whereby several 1903 * header records are kept for each queue. We use a couple of generic 1904 * list manipulation functions which 'know' the size of each list by 1905 * virtue of a table. 1906 */ 1907 qoffset = offsetof(struct aac_common, ac_qbuf) + AAC_QUEUE_ALIGN; 1908 qoffset &= ~(AAC_QUEUE_ALIGN - 1); 1909 sc->aac_queues = 1910 (struct aac_queue_table *)((uintptr_t)sc->aac_common + qoffset); 1911 ip->CommHeaderAddress = sc->aac_common_busaddr + qoffset; 1912 1913 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1914 AAC_HOST_NORM_CMD_ENTRIES; 1915 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1916 AAC_HOST_NORM_CMD_ENTRIES; 1917 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1918 AAC_HOST_HIGH_CMD_ENTRIES; 1919 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1920 AAC_HOST_HIGH_CMD_ENTRIES; 1921 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1922 AAC_ADAP_NORM_CMD_ENTRIES; 1923 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1924 AAC_ADAP_NORM_CMD_ENTRIES; 1925 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1926 AAC_ADAP_HIGH_CMD_ENTRIES; 1927 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1928 AAC_ADAP_HIGH_CMD_ENTRIES; 1929 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1930 AAC_HOST_NORM_RESP_ENTRIES; 1931 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1932 AAC_HOST_NORM_RESP_ENTRIES; 1933 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1934 AAC_HOST_HIGH_RESP_ENTRIES; 1935 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1936 AAC_HOST_HIGH_RESP_ENTRIES; 1937 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1938 AAC_ADAP_NORM_RESP_ENTRIES; 1939 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1940 AAC_ADAP_NORM_RESP_ENTRIES; 1941 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1942 AAC_ADAP_HIGH_RESP_ENTRIES; 1943 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1944 AAC_ADAP_HIGH_RESP_ENTRIES; 1945 sc->aac_qentries[AAC_HOST_NORM_CMD_QUEUE] = 1946 &sc->aac_queues->qt_HostNormCmdQueue[0]; 1947 sc->aac_qentries[AAC_HOST_HIGH_CMD_QUEUE] = 1948 &sc->aac_queues->qt_HostHighCmdQueue[0]; 1949 sc->aac_qentries[AAC_ADAP_NORM_CMD_QUEUE] = 1950 &sc->aac_queues->qt_AdapNormCmdQueue[0]; 1951 sc->aac_qentries[AAC_ADAP_HIGH_CMD_QUEUE] = 1952 &sc->aac_queues->qt_AdapHighCmdQueue[0]; 1953 sc->aac_qentries[AAC_HOST_NORM_RESP_QUEUE] = 1954 &sc->aac_queues->qt_HostNormRespQueue[0]; 1955 sc->aac_qentries[AAC_HOST_HIGH_RESP_QUEUE] = 1956 &sc->aac_queues->qt_HostHighRespQueue[0]; 1957 sc->aac_qentries[AAC_ADAP_NORM_RESP_QUEUE] = 1958 &sc->aac_queues->qt_AdapNormRespQueue[0]; 1959 sc->aac_qentries[AAC_ADAP_HIGH_RESP_QUEUE] = 1960 &sc->aac_queues->qt_AdapHighRespQueue[0]; 1961 1962 /* 1963 * Do controller-type-specific initialisation 1964 */ 1965 switch (sc->aac_hwif) { 1966 case AAC_HWIF_I960RX: 1967 AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, ~0); 1968 break; 1969 case AAC_HWIF_RKT: 1970 AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, ~0); 1971 break; 1972 default: 1973 break; 1974 } 1975 1976 /* 1977 * Give the init structure to the controller. 1978 */ 1979 if (aac_sync_command(sc, AAC_MONKER_INITSTRUCT, 1980 sc->aac_common_busaddr + 1981 offsetof(struct aac_common, ac_init), 0, 0, 0, 1982 NULL)) { 1983 device_printf(sc->aac_dev, 1984 "error establishing init structure\n"); 1985 error = EIO; 1986 goto out; 1987 } 1988 1989 error = 0; 1990 out: 1991 return(error); 1992 } 1993 1994 static int 1995 aac_setup_intr(struct aac_softc *sc) 1996 { 1997 sc->aac_irq_rid = 0; 1998 if ((sc->aac_irq = bus_alloc_resource_any(sc->aac_dev, SYS_RES_IRQ, 1999 &sc->aac_irq_rid, 2000 RF_SHAREABLE | 2001 RF_ACTIVE)) == NULL) { 2002 device_printf(sc->aac_dev, "can't allocate interrupt\n"); 2003 return (EINVAL); 2004 } 2005 if (sc->flags & AAC_FLAGS_NEW_COMM) { 2006 if (bus_setup_intr(sc->aac_dev, sc->aac_irq, 2007 INTR_MPSAFE|INTR_TYPE_BIO, NULL, 2008 aac_new_intr, sc, &sc->aac_intr)) { 2009 device_printf(sc->aac_dev, "can't set up interrupt\n"); 2010 return (EINVAL); 2011 } 2012 } else { 2013 if (bus_setup_intr(sc->aac_dev, sc->aac_irq, 2014 INTR_TYPE_BIO, aac_filter, NULL, 2015 sc, &sc->aac_intr)) { 2016 device_printf(sc->aac_dev, 2017 "can't set up interrupt filter\n"); 2018 return (EINVAL); 2019 } 2020 } 2021 return (0); 2022 } 2023 2024 /* 2025 * Send a synchronous command to the controller and wait for a result. 2026 * Indicate if the controller completed the command with an error status. 2027 */ 2028 static int 2029 aac_sync_command(struct aac_softc *sc, u_int32_t command, 2030 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3, 2031 u_int32_t *sp) 2032 { 2033 time_t then; 2034 u_int32_t status; 2035 2036 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2037 2038 /* populate the mailbox */ 2039 AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3); 2040 2041 /* ensure the sync command doorbell flag is cleared */ 2042 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); 2043 2044 /* then set it to signal the adapter */ 2045 AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND); 2046 2047 /* spin waiting for the command to complete */ 2048 then = time_uptime; 2049 do { 2050 if (time_uptime > (then + AAC_IMMEDIATE_TIMEOUT)) { 2051 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out"); 2052 return(EIO); 2053 } 2054 } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND)); 2055 2056 /* clear the completion flag */ 2057 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); 2058 2059 /* get the command status */ 2060 status = AAC_GET_MAILBOX(sc, 0); 2061 if (sp != NULL) 2062 *sp = status; 2063 2064 if (status != AAC_SRB_STS_SUCCESS) 2065 return (-1); 2066 return(0); 2067 } 2068 2069 int 2070 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate, 2071 struct aac_fib *fib, u_int16_t datasize) 2072 { 2073 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2074 mtx_assert(&sc->aac_io_lock, MA_OWNED); 2075 2076 if (datasize > AAC_FIB_DATASIZE) 2077 return(EINVAL); 2078 2079 /* 2080 * Set up the sync FIB 2081 */ 2082 fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | 2083 AAC_FIBSTATE_INITIALISED | 2084 AAC_FIBSTATE_EMPTY; 2085 fib->Header.XferState |= xferstate; 2086 fib->Header.Command = command; 2087 fib->Header.StructType = AAC_FIBTYPE_TFIB; 2088 fib->Header.Size = sizeof(struct aac_fib_header) + datasize; 2089 fib->Header.SenderSize = sizeof(struct aac_fib); 2090 fib->Header.SenderFibAddress = 0; /* Not needed */ 2091 fib->Header.ReceiverFibAddress = sc->aac_common_busaddr + 2092 offsetof(struct aac_common, 2093 ac_sync_fib); 2094 2095 /* 2096 * Give the FIB to the controller, wait for a response. 2097 */ 2098 if (aac_sync_command(sc, AAC_MONKER_SYNCFIB, 2099 fib->Header.ReceiverFibAddress, 0, 0, 0, NULL)) { 2100 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error"); 2101 return(EIO); 2102 } 2103 2104 return (0); 2105 } 2106 2107 /* 2108 * Adapter-space FIB queue manipulation 2109 * 2110 * Note that the queue implementation here is a little funky; neither the PI or 2111 * CI will ever be zero. This behaviour is a controller feature. 2112 */ 2113 static struct { 2114 int size; 2115 int notify; 2116 } aac_qinfo[] = { 2117 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL}, 2118 {AAC_HOST_HIGH_CMD_ENTRIES, 0}, 2119 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY}, 2120 {AAC_ADAP_HIGH_CMD_ENTRIES, 0}, 2121 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL}, 2122 {AAC_HOST_HIGH_RESP_ENTRIES, 0}, 2123 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY}, 2124 {AAC_ADAP_HIGH_RESP_ENTRIES, 0} 2125 }; 2126 2127 /* 2128 * Atomically insert an entry into the nominated queue, returns 0 on success or 2129 * EBUSY if the queue is full. 2130 * 2131 * Note: it would be more efficient to defer notifying the controller in 2132 * the case where we may be inserting several entries in rapid succession, 2133 * but implementing this usefully may be difficult (it would involve a 2134 * separate queue/notify interface). 2135 */ 2136 static int 2137 aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm) 2138 { 2139 u_int32_t pi, ci; 2140 int error; 2141 u_int32_t fib_size; 2142 u_int32_t fib_addr; 2143 2144 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2145 2146 fib_size = cm->cm_fib->Header.Size; 2147 fib_addr = cm->cm_fib->Header.ReceiverFibAddress; 2148 2149 /* get the producer/consumer indices */ 2150 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2151 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2152 2153 /* wrap the queue? */ 2154 if (pi >= aac_qinfo[queue].size) 2155 pi = 0; 2156 2157 /* check for queue full */ 2158 if ((pi + 1) == ci) { 2159 error = EBUSY; 2160 goto out; 2161 } 2162 2163 /* 2164 * To avoid a race with its completion interrupt, place this command on 2165 * the busy queue prior to advertising it to the controller. 2166 */ 2167 aac_enqueue_busy(cm); 2168 2169 /* populate queue entry */ 2170 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; 2171 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; 2172 2173 /* update producer index */ 2174 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; 2175 2176 /* notify the adapter if we know how */ 2177 if (aac_qinfo[queue].notify != 0) 2178 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2179 2180 error = 0; 2181 2182 out: 2183 return(error); 2184 } 2185 2186 /* 2187 * Atomically remove one entry from the nominated queue, returns 0 on 2188 * success or ENOENT if the queue is empty. 2189 */ 2190 static int 2191 aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size, 2192 struct aac_fib **fib_addr) 2193 { 2194 u_int32_t pi, ci; 2195 u_int32_t fib_index; 2196 int error; 2197 int notify; 2198 2199 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2200 2201 /* get the producer/consumer indices */ 2202 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2203 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2204 2205 /* check for queue empty */ 2206 if (ci == pi) { 2207 error = ENOENT; 2208 goto out; 2209 } 2210 2211 /* wrap the pi so the following test works */ 2212 if (pi >= aac_qinfo[queue].size) 2213 pi = 0; 2214 2215 notify = 0; 2216 if (ci == pi + 1) 2217 notify++; 2218 2219 /* wrap the queue? */ 2220 if (ci >= aac_qinfo[queue].size) 2221 ci = 0; 2222 2223 /* fetch the entry */ 2224 *fib_size = (sc->aac_qentries[queue] + ci)->aq_fib_size; 2225 2226 switch (queue) { 2227 case AAC_HOST_NORM_CMD_QUEUE: 2228 case AAC_HOST_HIGH_CMD_QUEUE: 2229 /* 2230 * The aq_fib_addr is only 32 bits wide so it can't be counted 2231 * on to hold an address. For AIF's, the adapter assumes 2232 * that it's giving us an address into the array of AIF fibs. 2233 * Therefore, we have to convert it to an index. 2234 */ 2235 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr / 2236 sizeof(struct aac_fib); 2237 *fib_addr = &sc->aac_common->ac_fibs[fib_index]; 2238 break; 2239 2240 case AAC_HOST_NORM_RESP_QUEUE: 2241 case AAC_HOST_HIGH_RESP_QUEUE: 2242 { 2243 struct aac_command *cm; 2244 2245 /* 2246 * As above, an index is used instead of an actual address. 2247 * Gotta shift the index to account for the fast response 2248 * bit. No other correction is needed since this value was 2249 * originally provided by the driver via the SenderFibAddress 2250 * field. 2251 */ 2252 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr; 2253 cm = sc->aac_commands + (fib_index >> 2); 2254 *fib_addr = cm->cm_fib; 2255 2256 /* 2257 * Is this a fast response? If it is, update the fib fields in 2258 * local memory since the whole fib isn't DMA'd back up. 2259 */ 2260 if (fib_index & 0x01) { 2261 (*fib_addr)->Header.XferState |= AAC_FIBSTATE_DONEADAP; 2262 *((u_int32_t*)((*fib_addr)->data)) = AAC_ERROR_NORMAL; 2263 } 2264 break; 2265 } 2266 default: 2267 panic("Invalid queue in aac_dequeue_fib()"); 2268 break; 2269 } 2270 2271 /* update consumer index */ 2272 sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1; 2273 2274 /* if we have made the queue un-full, notify the adapter */ 2275 if (notify && (aac_qinfo[queue].notify != 0)) 2276 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2277 error = 0; 2278 2279 out: 2280 return(error); 2281 } 2282 2283 /* 2284 * Put our response to an Adapter Initialed Fib on the response queue 2285 */ 2286 static int 2287 aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib) 2288 { 2289 u_int32_t pi, ci; 2290 int error; 2291 u_int32_t fib_size; 2292 u_int32_t fib_addr; 2293 2294 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2295 2296 /* Tell the adapter where the FIB is */ 2297 fib_size = fib->Header.Size; 2298 fib_addr = fib->Header.SenderFibAddress; 2299 fib->Header.ReceiverFibAddress = fib_addr; 2300 2301 /* get the producer/consumer indices */ 2302 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2303 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2304 2305 /* wrap the queue? */ 2306 if (pi >= aac_qinfo[queue].size) 2307 pi = 0; 2308 2309 /* check for queue full */ 2310 if ((pi + 1) == ci) { 2311 error = EBUSY; 2312 goto out; 2313 } 2314 2315 /* populate queue entry */ 2316 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; 2317 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; 2318 2319 /* update producer index */ 2320 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; 2321 2322 /* notify the adapter if we know how */ 2323 if (aac_qinfo[queue].notify != 0) 2324 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2325 2326 error = 0; 2327 2328 out: 2329 return(error); 2330 } 2331 2332 /* 2333 * Check for commands that have been outstanding for a suspiciously long time, 2334 * and complain about them. 2335 */ 2336 static void 2337 aac_timeout(struct aac_softc *sc) 2338 { 2339 struct aac_command *cm; 2340 time_t deadline; 2341 int timedout, code; 2342 2343 /* 2344 * Traverse the busy command list, bitch about late commands once 2345 * only. 2346 */ 2347 timedout = 0; 2348 deadline = time_uptime - AAC_CMD_TIMEOUT; 2349 TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) { 2350 if ((cm->cm_timestamp < deadline) 2351 /* && !(cm->cm_flags & AAC_CMD_TIMEDOUT) */) { 2352 cm->cm_flags |= AAC_CMD_TIMEDOUT; 2353 device_printf(sc->aac_dev, 2354 "COMMAND %p (TYPE %d) TIMEOUT AFTER %d SECONDS\n", 2355 cm, cm->cm_fib->Header.Command, 2356 (int)(time_uptime-cm->cm_timestamp)); 2357 AAC_PRINT_FIB(sc, cm->cm_fib); 2358 timedout++; 2359 } 2360 } 2361 2362 if (timedout) { 2363 code = AAC_GET_FWSTATUS(sc); 2364 if (code != AAC_UP_AND_RUNNING) { 2365 device_printf(sc->aac_dev, "WARNING! Controller is no " 2366 "longer running! code= 0x%x\n", code); 2367 } 2368 } 2369 return; 2370 } 2371 2372 /* 2373 * Interface Function Vectors 2374 */ 2375 2376 /* 2377 * Read the current firmware status word. 2378 */ 2379 static int 2380 aac_sa_get_fwstatus(struct aac_softc *sc) 2381 { 2382 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2383 2384 return(AAC_MEM0_GETREG4(sc, AAC_SA_FWSTATUS)); 2385 } 2386 2387 static int 2388 aac_rx_get_fwstatus(struct aac_softc *sc) 2389 { 2390 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2391 2392 return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ? 2393 AAC_RX_OMR0 : AAC_RX_FWSTATUS)); 2394 } 2395 2396 static int 2397 aac_rkt_get_fwstatus(struct aac_softc *sc) 2398 { 2399 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2400 2401 return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ? 2402 AAC_RKT_OMR0 : AAC_RKT_FWSTATUS)); 2403 } 2404 2405 /* 2406 * Notify the controller of a change in a given queue 2407 */ 2408 2409 static void 2410 aac_sa_qnotify(struct aac_softc *sc, int qbit) 2411 { 2412 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2413 2414 AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL1_SET, qbit); 2415 } 2416 2417 static void 2418 aac_rx_qnotify(struct aac_softc *sc, int qbit) 2419 { 2420 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2421 2422 AAC_MEM0_SETREG4(sc, AAC_RX_IDBR, qbit); 2423 } 2424 2425 static void 2426 aac_rkt_qnotify(struct aac_softc *sc, int qbit) 2427 { 2428 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2429 2430 AAC_MEM0_SETREG4(sc, AAC_RKT_IDBR, qbit); 2431 } 2432 2433 /* 2434 * Get the interrupt reason bits 2435 */ 2436 static int 2437 aac_sa_get_istatus(struct aac_softc *sc) 2438 { 2439 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2440 2441 return(AAC_MEM0_GETREG2(sc, AAC_SA_DOORBELL0)); 2442 } 2443 2444 static int 2445 aac_rx_get_istatus(struct aac_softc *sc) 2446 { 2447 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2448 2449 return(AAC_MEM0_GETREG4(sc, AAC_RX_ODBR)); 2450 } 2451 2452 static int 2453 aac_rkt_get_istatus(struct aac_softc *sc) 2454 { 2455 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2456 2457 return(AAC_MEM0_GETREG4(sc, AAC_RKT_ODBR)); 2458 } 2459 2460 /* 2461 * Clear some interrupt reason bits 2462 */ 2463 static void 2464 aac_sa_clear_istatus(struct aac_softc *sc, int mask) 2465 { 2466 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2467 2468 AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL0_CLEAR, mask); 2469 } 2470 2471 static void 2472 aac_rx_clear_istatus(struct aac_softc *sc, int mask) 2473 { 2474 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2475 2476 AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, mask); 2477 } 2478 2479 static void 2480 aac_rkt_clear_istatus(struct aac_softc *sc, int mask) 2481 { 2482 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2483 2484 AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, mask); 2485 } 2486 2487 /* 2488 * Populate the mailbox and set the command word 2489 */ 2490 static void 2491 aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, 2492 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2493 { 2494 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2495 2496 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX, command); 2497 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 4, arg0); 2498 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 8, arg1); 2499 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 12, arg2); 2500 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 16, arg3); 2501 } 2502 2503 static void 2504 aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, 2505 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2506 { 2507 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2508 2509 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX, command); 2510 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 4, arg0); 2511 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 8, arg1); 2512 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 12, arg2); 2513 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 16, arg3); 2514 } 2515 2516 static void 2517 aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, 2518 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2519 { 2520 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2521 2522 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX, command); 2523 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 4, arg0); 2524 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 8, arg1); 2525 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 12, arg2); 2526 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 16, arg3); 2527 } 2528 2529 /* 2530 * Fetch the immediate command status word 2531 */ 2532 static int 2533 aac_sa_get_mailbox(struct aac_softc *sc, int mb) 2534 { 2535 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2536 2537 return(AAC_MEM1_GETREG4(sc, AAC_SA_MAILBOX + (mb * 4))); 2538 } 2539 2540 static int 2541 aac_rx_get_mailbox(struct aac_softc *sc, int mb) 2542 { 2543 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2544 2545 return(AAC_MEM1_GETREG4(sc, AAC_RX_MAILBOX + (mb * 4))); 2546 } 2547 2548 static int 2549 aac_rkt_get_mailbox(struct aac_softc *sc, int mb) 2550 { 2551 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2552 2553 return(AAC_MEM1_GETREG4(sc, AAC_RKT_MAILBOX + (mb * 4))); 2554 } 2555 2556 /* 2557 * Set/clear interrupt masks 2558 */ 2559 static void 2560 aac_sa_set_interrupts(struct aac_softc *sc, int enable) 2561 { 2562 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2563 2564 if (enable) { 2565 AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_CLEAR, AAC_DB_INTERRUPTS); 2566 } else { 2567 AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_SET, ~0); 2568 } 2569 } 2570 2571 static void 2572 aac_rx_set_interrupts(struct aac_softc *sc, int enable) 2573 { 2574 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2575 2576 if (enable) { 2577 if (sc->flags & AAC_FLAGS_NEW_COMM) 2578 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INT_NEW_COMM); 2579 else 2580 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INTERRUPTS); 2581 } else { 2582 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~0); 2583 } 2584 } 2585 2586 static void 2587 aac_rkt_set_interrupts(struct aac_softc *sc, int enable) 2588 { 2589 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2590 2591 if (enable) { 2592 if (sc->flags & AAC_FLAGS_NEW_COMM) 2593 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INT_NEW_COMM); 2594 else 2595 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INTERRUPTS); 2596 } else { 2597 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~0); 2598 } 2599 } 2600 2601 /* 2602 * New comm. interface: Send command functions 2603 */ 2604 static int 2605 aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm) 2606 { 2607 u_int32_t index, device; 2608 2609 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)"); 2610 2611 index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE); 2612 if (index == 0xffffffffL) 2613 index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE); 2614 if (index == 0xffffffffL) 2615 return index; 2616 aac_enqueue_busy(cm); 2617 device = index; 2618 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); 2619 device += 4; 2620 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); 2621 device += 4; 2622 AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size); 2623 AAC_MEM0_SETREG4(sc, AAC_RX_IQUE, index); 2624 return 0; 2625 } 2626 2627 static int 2628 aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm) 2629 { 2630 u_int32_t index, device; 2631 2632 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)"); 2633 2634 index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE); 2635 if (index == 0xffffffffL) 2636 index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE); 2637 if (index == 0xffffffffL) 2638 return index; 2639 aac_enqueue_busy(cm); 2640 device = index; 2641 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); 2642 device += 4; 2643 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); 2644 device += 4; 2645 AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size); 2646 AAC_MEM0_SETREG4(sc, AAC_RKT_IQUE, index); 2647 return 0; 2648 } 2649 2650 /* 2651 * New comm. interface: get, set outbound queue index 2652 */ 2653 static int 2654 aac_rx_get_outb_queue(struct aac_softc *sc) 2655 { 2656 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2657 2658 return(AAC_MEM0_GETREG4(sc, AAC_RX_OQUE)); 2659 } 2660 2661 static int 2662 aac_rkt_get_outb_queue(struct aac_softc *sc) 2663 { 2664 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2665 2666 return(AAC_MEM0_GETREG4(sc, AAC_RKT_OQUE)); 2667 } 2668 2669 static void 2670 aac_rx_set_outb_queue(struct aac_softc *sc, int index) 2671 { 2672 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2673 2674 AAC_MEM0_SETREG4(sc, AAC_RX_OQUE, index); 2675 } 2676 2677 static void 2678 aac_rkt_set_outb_queue(struct aac_softc *sc, int index) 2679 { 2680 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2681 2682 AAC_MEM0_SETREG4(sc, AAC_RKT_OQUE, index); 2683 } 2684 2685 /* 2686 * Debugging and Diagnostics 2687 */ 2688 2689 /* 2690 * Print some information about the controller. 2691 */ 2692 static void 2693 aac_describe_controller(struct aac_softc *sc) 2694 { 2695 struct aac_fib *fib; 2696 struct aac_adapter_info *info; 2697 char *adapter_type = "Adaptec RAID controller"; 2698 2699 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2700 2701 mtx_lock(&sc->aac_io_lock); 2702 aac_alloc_sync_fib(sc, &fib); 2703 2704 fib->data[0] = 0; 2705 if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) { 2706 device_printf(sc->aac_dev, "RequestAdapterInfo failed\n"); 2707 aac_release_sync_fib(sc); 2708 mtx_unlock(&sc->aac_io_lock); 2709 return; 2710 } 2711 2712 /* save the kernel revision structure for later use */ 2713 info = (struct aac_adapter_info *)&fib->data[0]; 2714 sc->aac_revision = info->KernelRevision; 2715 2716 if (bootverbose) { 2717 device_printf(sc->aac_dev, "%s %dMHz, %dMB memory " 2718 "(%dMB cache, %dMB execution), %s\n", 2719 aac_describe_code(aac_cpu_variant, info->CpuVariant), 2720 info->ClockSpeed, info->TotalMem / (1024 * 1024), 2721 info->BufferMem / (1024 * 1024), 2722 info->ExecutionMem / (1024 * 1024), 2723 aac_describe_code(aac_battery_platform, 2724 info->batteryPlatform)); 2725 2726 device_printf(sc->aac_dev, 2727 "Kernel %d.%d-%d, Build %d, S/N %6X\n", 2728 info->KernelRevision.external.comp.major, 2729 info->KernelRevision.external.comp.minor, 2730 info->KernelRevision.external.comp.dash, 2731 info->KernelRevision.buildNumber, 2732 (u_int32_t)(info->SerialNumber & 0xffffff)); 2733 2734 device_printf(sc->aac_dev, "Supported Options=%b\n", 2735 sc->supported_options, 2736 "\20" 2737 "\1SNAPSHOT" 2738 "\2CLUSTERS" 2739 "\3WCACHE" 2740 "\4DATA64" 2741 "\5HOSTTIME" 2742 "\6RAID50" 2743 "\7WINDOW4GB" 2744 "\10SCSIUPGD" 2745 "\11SOFTERR" 2746 "\12NORECOND" 2747 "\13SGMAP64" 2748 "\14ALARM" 2749 "\15NONDASD" 2750 "\16SCSIMGT" 2751 "\17RAIDSCSI" 2752 "\21ADPTINFO" 2753 "\22NEWCOMM" 2754 "\23ARRAY64BIT" 2755 "\24HEATSENSOR"); 2756 } 2757 2758 if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) { 2759 fib->data[0] = 0; 2760 if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1)) 2761 device_printf(sc->aac_dev, 2762 "RequestSupplementAdapterInfo failed\n"); 2763 else 2764 adapter_type = ((struct aac_supplement_adapter_info *) 2765 &fib->data[0])->AdapterTypeText; 2766 } 2767 device_printf(sc->aac_dev, "%s, aac driver %d.%d.%d-%d\n", 2768 adapter_type, 2769 AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION, 2770 AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD); 2771 2772 aac_release_sync_fib(sc); 2773 mtx_unlock(&sc->aac_io_lock); 2774 } 2775 2776 /* 2777 * Look up a text description of a numeric error code and return a pointer to 2778 * same. 2779 */ 2780 static char * 2781 aac_describe_code(struct aac_code_lookup *table, u_int32_t code) 2782 { 2783 int i; 2784 2785 for (i = 0; table[i].string != NULL; i++) 2786 if (table[i].code == code) 2787 return(table[i].string); 2788 return(table[i + 1].string); 2789 } 2790 2791 /* 2792 * Management Interface 2793 */ 2794 2795 static int 2796 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td) 2797 { 2798 struct aac_softc *sc; 2799 2800 sc = dev->si_drv1; 2801 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2802 sc->aac_open_cnt++; 2803 sc->aac_state |= AAC_STATE_OPEN; 2804 2805 return 0; 2806 } 2807 2808 static int 2809 aac_close(struct cdev *dev, int flags, int fmt, struct thread *td) 2810 { 2811 struct aac_softc *sc; 2812 2813 sc = dev->si_drv1; 2814 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2815 sc->aac_open_cnt--; 2816 /* Mark this unit as no longer open */ 2817 if (sc->aac_open_cnt == 0) 2818 sc->aac_state &= ~AAC_STATE_OPEN; 2819 2820 return 0; 2821 } 2822 2823 static int 2824 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) 2825 { 2826 union aac_statrequest *as; 2827 struct aac_softc *sc; 2828 int error = 0; 2829 2830 as = (union aac_statrequest *)arg; 2831 sc = dev->si_drv1; 2832 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2833 2834 switch (cmd) { 2835 case AACIO_STATS: 2836 switch (as->as_item) { 2837 case AACQ_FREE: 2838 case AACQ_BIO: 2839 case AACQ_READY: 2840 case AACQ_BUSY: 2841 bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat, 2842 sizeof(struct aac_qstat)); 2843 break; 2844 default: 2845 error = ENOENT; 2846 break; 2847 } 2848 break; 2849 2850 case FSACTL_SENDFIB: 2851 case FSACTL_SEND_LARGE_FIB: 2852 arg = *(caddr_t*)arg; 2853 case FSACTL_LNX_SENDFIB: 2854 case FSACTL_LNX_SEND_LARGE_FIB: 2855 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB"); 2856 error = aac_ioctl_sendfib(sc, arg); 2857 break; 2858 case FSACTL_SEND_RAW_SRB: 2859 arg = *(caddr_t*)arg; 2860 case FSACTL_LNX_SEND_RAW_SRB: 2861 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB"); 2862 error = aac_ioctl_send_raw_srb(sc, arg); 2863 break; 2864 case FSACTL_AIF_THREAD: 2865 case FSACTL_LNX_AIF_THREAD: 2866 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD"); 2867 error = EINVAL; 2868 break; 2869 case FSACTL_OPEN_GET_ADAPTER_FIB: 2870 arg = *(caddr_t*)arg; 2871 case FSACTL_LNX_OPEN_GET_ADAPTER_FIB: 2872 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB"); 2873 error = aac_open_aif(sc, arg); 2874 break; 2875 case FSACTL_GET_NEXT_ADAPTER_FIB: 2876 arg = *(caddr_t*)arg; 2877 case FSACTL_LNX_GET_NEXT_ADAPTER_FIB: 2878 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB"); 2879 error = aac_getnext_aif(sc, arg); 2880 break; 2881 case FSACTL_CLOSE_GET_ADAPTER_FIB: 2882 arg = *(caddr_t*)arg; 2883 case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB: 2884 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB"); 2885 error = aac_close_aif(sc, arg); 2886 break; 2887 case FSACTL_MINIPORT_REV_CHECK: 2888 arg = *(caddr_t*)arg; 2889 case FSACTL_LNX_MINIPORT_REV_CHECK: 2890 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK"); 2891 error = aac_rev_check(sc, arg); 2892 break; 2893 case FSACTL_QUERY_DISK: 2894 arg = *(caddr_t*)arg; 2895 case FSACTL_LNX_QUERY_DISK: 2896 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK"); 2897 error = aac_query_disk(sc, arg); 2898 break; 2899 case FSACTL_DELETE_DISK: 2900 case FSACTL_LNX_DELETE_DISK: 2901 /* 2902 * We don't trust the underland to tell us when to delete a 2903 * container, rather we rely on an AIF coming from the 2904 * controller 2905 */ 2906 error = 0; 2907 break; 2908 case FSACTL_GET_PCI_INFO: 2909 arg = *(caddr_t*)arg; 2910 case FSACTL_LNX_GET_PCI_INFO: 2911 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO"); 2912 error = aac_get_pci_info(sc, arg); 2913 break; 2914 case FSACTL_GET_FEATURES: 2915 arg = *(caddr_t*)arg; 2916 case FSACTL_LNX_GET_FEATURES: 2917 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES"); 2918 error = aac_supported_features(sc, arg); 2919 break; 2920 default: 2921 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd); 2922 error = EINVAL; 2923 break; 2924 } 2925 return(error); 2926 } 2927 2928 static int 2929 aac_poll(struct cdev *dev, int poll_events, struct thread *td) 2930 { 2931 struct aac_softc *sc; 2932 struct aac_fib_context *ctx; 2933 int revents; 2934 2935 sc = dev->si_drv1; 2936 revents = 0; 2937 2938 mtx_lock(&sc->aac_aifq_lock); 2939 if ((poll_events & (POLLRDNORM | POLLIN)) != 0) { 2940 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 2941 if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) { 2942 revents |= poll_events & (POLLIN | POLLRDNORM); 2943 break; 2944 } 2945 } 2946 } 2947 mtx_unlock(&sc->aac_aifq_lock); 2948 2949 if (revents == 0) { 2950 if (poll_events & (POLLIN | POLLRDNORM)) 2951 selrecord(td, &sc->rcv_select); 2952 } 2953 2954 return (revents); 2955 } 2956 2957 static void 2958 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg) 2959 { 2960 2961 switch (event->ev_type) { 2962 case AAC_EVENT_CMFREE: 2963 mtx_assert(&sc->aac_io_lock, MA_OWNED); 2964 if (aac_alloc_command(sc, (struct aac_command **)arg)) { 2965 aac_add_event(sc, event); 2966 return; 2967 } 2968 free(event, M_AACBUF); 2969 wakeup(arg); 2970 break; 2971 default: 2972 break; 2973 } 2974 } 2975 2976 /* 2977 * Send a FIB supplied from userspace 2978 */ 2979 static int 2980 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib) 2981 { 2982 struct aac_command *cm; 2983 int size, error; 2984 2985 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2986 2987 cm = NULL; 2988 2989 /* 2990 * Get a command 2991 */ 2992 mtx_lock(&sc->aac_io_lock); 2993 if (aac_alloc_command(sc, &cm)) { 2994 struct aac_event *event; 2995 2996 event = malloc(sizeof(struct aac_event), M_AACBUF, 2997 M_NOWAIT | M_ZERO); 2998 if (event == NULL) { 2999 error = EBUSY; 3000 mtx_unlock(&sc->aac_io_lock); 3001 goto out; 3002 } 3003 event->ev_type = AAC_EVENT_CMFREE; 3004 event->ev_callback = aac_ioctl_event; 3005 event->ev_arg = &cm; 3006 aac_add_event(sc, event); 3007 msleep(&cm, &sc->aac_io_lock, 0, "sendfib", 0); 3008 } 3009 mtx_unlock(&sc->aac_io_lock); 3010 3011 /* 3012 * Fetch the FIB header, then re-copy to get data as well. 3013 */ 3014 if ((error = copyin(ufib, cm->cm_fib, 3015 sizeof(struct aac_fib_header))) != 0) 3016 goto out; 3017 size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header); 3018 if (size > sc->aac_max_fib_size) { 3019 device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n", 3020 size, sc->aac_max_fib_size); 3021 size = sc->aac_max_fib_size; 3022 } 3023 if ((error = copyin(ufib, cm->cm_fib, size)) != 0) 3024 goto out; 3025 cm->cm_fib->Header.Size = size; 3026 cm->cm_timestamp = time_uptime; 3027 3028 /* 3029 * Pass the FIB to the controller, wait for it to complete. 3030 */ 3031 mtx_lock(&sc->aac_io_lock); 3032 error = aac_wait_command(cm); 3033 mtx_unlock(&sc->aac_io_lock); 3034 if (error != 0) { 3035 device_printf(sc->aac_dev, 3036 "aac_wait_command return %d\n", error); 3037 goto out; 3038 } 3039 3040 /* 3041 * Copy the FIB and data back out to the caller. 3042 */ 3043 size = cm->cm_fib->Header.Size; 3044 if (size > sc->aac_max_fib_size) { 3045 device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n", 3046 size, sc->aac_max_fib_size); 3047 size = sc->aac_max_fib_size; 3048 } 3049 error = copyout(cm->cm_fib, ufib, size); 3050 3051 out: 3052 if (cm != NULL) { 3053 mtx_lock(&sc->aac_io_lock); 3054 aac_release_command(cm); 3055 mtx_unlock(&sc->aac_io_lock); 3056 } 3057 return(error); 3058 } 3059 3060 /* 3061 * Send a passthrough FIB supplied from userspace 3062 */ 3063 static int 3064 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg) 3065 { 3066 struct aac_command *cm; 3067 struct aac_event *event; 3068 struct aac_fib *fib; 3069 struct aac_srb *srbcmd, *user_srb; 3070 struct aac_sg_entry *sge; 3071 struct aac_sg_entry64 *sge64; 3072 void *srb_sg_address, *ureply; 3073 uint32_t fibsize, srb_sg_bytecount; 3074 int error, transfer_data; 3075 3076 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3077 3078 cm = NULL; 3079 transfer_data = 0; 3080 fibsize = 0; 3081 user_srb = (struct aac_srb *)arg; 3082 3083 mtx_lock(&sc->aac_io_lock); 3084 if (aac_alloc_command(sc, &cm)) { 3085 event = malloc(sizeof(struct aac_event), M_AACBUF, 3086 M_NOWAIT | M_ZERO); 3087 if (event == NULL) { 3088 error = EBUSY; 3089 mtx_unlock(&sc->aac_io_lock); 3090 goto out; 3091 } 3092 event->ev_type = AAC_EVENT_CMFREE; 3093 event->ev_callback = aac_ioctl_event; 3094 event->ev_arg = &cm; 3095 aac_add_event(sc, event); 3096 msleep(cm, &sc->aac_io_lock, 0, "aacraw", 0); 3097 } 3098 mtx_unlock(&sc->aac_io_lock); 3099 3100 cm->cm_data = NULL; 3101 fib = cm->cm_fib; 3102 srbcmd = (struct aac_srb *)fib->data; 3103 error = copyin(&user_srb->data_len, &fibsize, sizeof(uint32_t)); 3104 if (error != 0) 3105 goto out; 3106 if (fibsize > (sc->aac_max_fib_size - sizeof(struct aac_fib_header))) { 3107 error = EINVAL; 3108 goto out; 3109 } 3110 error = copyin(user_srb, srbcmd, fibsize); 3111 if (error != 0) 3112 goto out; 3113 srbcmd->function = 0; 3114 srbcmd->retry_limit = 0; 3115 if (srbcmd->sg_map.SgCount > 1) { 3116 error = EINVAL; 3117 goto out; 3118 } 3119 3120 /* Retrieve correct SG entries. */ 3121 if (fibsize == (sizeof(struct aac_srb) + 3122 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) { 3123 sge = srbcmd->sg_map.SgEntry; 3124 sge64 = NULL; 3125 srb_sg_bytecount = sge->SgByteCount; 3126 srb_sg_address = (void *)(uintptr_t)sge->SgAddress; 3127 } 3128 #ifdef __amd64__ 3129 else if (fibsize == (sizeof(struct aac_srb) + 3130 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) { 3131 sge = NULL; 3132 sge64 = (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry; 3133 srb_sg_bytecount = sge64->SgByteCount; 3134 srb_sg_address = (void *)sge64->SgAddress; 3135 if (sge64->SgAddress > 0xffffffffull && 3136 (sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 3137 error = EINVAL; 3138 goto out; 3139 } 3140 } 3141 #endif 3142 else { 3143 error = EINVAL; 3144 goto out; 3145 } 3146 ureply = (char *)arg + fibsize; 3147 srbcmd->data_len = srb_sg_bytecount; 3148 if (srbcmd->sg_map.SgCount == 1) 3149 transfer_data = 1; 3150 3151 cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map; 3152 if (transfer_data) { 3153 cm->cm_datalen = srb_sg_bytecount; 3154 cm->cm_data = malloc(cm->cm_datalen, M_AACBUF, M_NOWAIT); 3155 if (cm->cm_data == NULL) { 3156 error = ENOMEM; 3157 goto out; 3158 } 3159 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) 3160 cm->cm_flags |= AAC_CMD_DATAIN; 3161 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) { 3162 cm->cm_flags |= AAC_CMD_DATAOUT; 3163 error = copyin(srb_sg_address, cm->cm_data, 3164 cm->cm_datalen); 3165 if (error != 0) 3166 goto out; 3167 } 3168 } 3169 3170 fib->Header.Size = sizeof(struct aac_fib_header) + 3171 sizeof(struct aac_srb); 3172 fib->Header.XferState = 3173 AAC_FIBSTATE_HOSTOWNED | 3174 AAC_FIBSTATE_INITIALISED | 3175 AAC_FIBSTATE_EMPTY | 3176 AAC_FIBSTATE_FROMHOST | 3177 AAC_FIBSTATE_REXPECTED | 3178 AAC_FIBSTATE_NORM | 3179 AAC_FIBSTATE_ASYNC | 3180 AAC_FIBSTATE_FAST_RESPONSE; 3181 fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) != 0 ? 3182 ScsiPortCommandU64 : ScsiPortCommand; 3183 3184 mtx_lock(&sc->aac_io_lock); 3185 aac_wait_command(cm); 3186 mtx_unlock(&sc->aac_io_lock); 3187 3188 if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) != 0) { 3189 error = copyout(cm->cm_data, srb_sg_address, cm->cm_datalen); 3190 if (error != 0) 3191 goto out; 3192 } 3193 error = copyout(fib->data, ureply, sizeof(struct aac_srb_response)); 3194 out: 3195 if (cm != NULL) { 3196 if (cm->cm_data != NULL) 3197 free(cm->cm_data, M_AACBUF); 3198 mtx_lock(&sc->aac_io_lock); 3199 aac_release_command(cm); 3200 mtx_unlock(&sc->aac_io_lock); 3201 } 3202 return(error); 3203 } 3204 3205 /* 3206 * Handle an AIF sent to us by the controller; queue it for later reference. 3207 * If the queue fills up, then drop the older entries. 3208 */ 3209 static void 3210 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib) 3211 { 3212 struct aac_aif_command *aif; 3213 struct aac_container *co, *co_next; 3214 struct aac_fib_context *ctx; 3215 struct aac_mntinforesp *mir; 3216 int next, current, found; 3217 int count = 0, added = 0, i = 0; 3218 3219 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3220 3221 aif = (struct aac_aif_command*)&fib->data[0]; 3222 aac_print_aif(sc, aif); 3223 3224 /* Is it an event that we should care about? */ 3225 switch (aif->command) { 3226 case AifCmdEventNotify: 3227 switch (aif->data.EN.type) { 3228 case AifEnAddContainer: 3229 case AifEnDeleteContainer: 3230 /* 3231 * A container was added or deleted, but the message 3232 * doesn't tell us anything else! Re-enumerate the 3233 * containers and sort things out. 3234 */ 3235 aac_alloc_sync_fib(sc, &fib); 3236 do { 3237 /* 3238 * Ask the controller for its containers one at 3239 * a time. 3240 * XXX What if the controller's list changes 3241 * midway through this enumaration? 3242 * XXX This should be done async. 3243 */ 3244 if ((mir = aac_get_container_info(sc, fib, i)) == NULL) 3245 continue; 3246 if (i == 0) 3247 count = mir->MntRespCount; 3248 /* 3249 * Check the container against our list. 3250 * co->co_found was already set to 0 in a 3251 * previous run. 3252 */ 3253 if ((mir->Status == ST_OK) && 3254 (mir->MntTable[0].VolType != CT_NONE)) { 3255 found = 0; 3256 TAILQ_FOREACH(co, 3257 &sc->aac_container_tqh, 3258 co_link) { 3259 if (co->co_mntobj.ObjectId == 3260 mir->MntTable[0].ObjectId) { 3261 co->co_found = 1; 3262 found = 1; 3263 break; 3264 } 3265 } 3266 /* 3267 * If the container matched, continue 3268 * in the list. 3269 */ 3270 if (found) { 3271 i++; 3272 continue; 3273 } 3274 3275 /* 3276 * This is a new container. Do all the 3277 * appropriate things to set it up. 3278 */ 3279 aac_add_container(sc, mir, 1); 3280 added = 1; 3281 } 3282 i++; 3283 } while ((i < count) && (i < AAC_MAX_CONTAINERS)); 3284 aac_release_sync_fib(sc); 3285 3286 /* 3287 * Go through our list of containers and see which ones 3288 * were not marked 'found'. Since the controller didn't 3289 * list them they must have been deleted. Do the 3290 * appropriate steps to destroy the device. Also reset 3291 * the co->co_found field. 3292 */ 3293 co = TAILQ_FIRST(&sc->aac_container_tqh); 3294 while (co != NULL) { 3295 if (co->co_found == 0) { 3296 mtx_unlock(&sc->aac_io_lock); 3297 mtx_lock(&Giant); 3298 device_delete_child(sc->aac_dev, 3299 co->co_disk); 3300 mtx_unlock(&Giant); 3301 mtx_lock(&sc->aac_io_lock); 3302 co_next = TAILQ_NEXT(co, co_link); 3303 mtx_lock(&sc->aac_container_lock); 3304 TAILQ_REMOVE(&sc->aac_container_tqh, co, 3305 co_link); 3306 mtx_unlock(&sc->aac_container_lock); 3307 free(co, M_AACBUF); 3308 co = co_next; 3309 } else { 3310 co->co_found = 0; 3311 co = TAILQ_NEXT(co, co_link); 3312 } 3313 } 3314 3315 /* Attach the newly created containers */ 3316 if (added) { 3317 mtx_unlock(&sc->aac_io_lock); 3318 mtx_lock(&Giant); 3319 bus_generic_attach(sc->aac_dev); 3320 mtx_unlock(&Giant); 3321 mtx_lock(&sc->aac_io_lock); 3322 } 3323 3324 break; 3325 3326 default: 3327 break; 3328 } 3329 3330 default: 3331 break; 3332 } 3333 3334 /* Copy the AIF data to the AIF queue for ioctl retrieval */ 3335 mtx_lock(&sc->aac_aifq_lock); 3336 current = sc->aifq_idx; 3337 next = (current + 1) % AAC_AIFQ_LENGTH; 3338 if (next == 0) 3339 sc->aifq_filled = 1; 3340 bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib)); 3341 /* modify AIF contexts */ 3342 if (sc->aifq_filled) { 3343 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3344 if (next == ctx->ctx_idx) 3345 ctx->ctx_wrap = 1; 3346 else if (current == ctx->ctx_idx && ctx->ctx_wrap) 3347 ctx->ctx_idx = next; 3348 } 3349 } 3350 sc->aifq_idx = next; 3351 /* On the off chance that someone is sleeping for an aif... */ 3352 if (sc->aac_state & AAC_STATE_AIF_SLEEPER) 3353 wakeup(sc->aac_aifq); 3354 /* Wakeup any poll()ers */ 3355 selwakeuppri(&sc->rcv_select, PRIBIO); 3356 mtx_unlock(&sc->aac_aifq_lock); 3357 3358 return; 3359 } 3360 3361 /* 3362 * Return the Revision of the driver to userspace and check to see if the 3363 * userspace app is possibly compatible. This is extremely bogus since 3364 * our driver doesn't follow Adaptec's versioning system. Cheat by just 3365 * returning what the card reported. 3366 */ 3367 static int 3368 aac_rev_check(struct aac_softc *sc, caddr_t udata) 3369 { 3370 struct aac_rev_check rev_check; 3371 struct aac_rev_check_resp rev_check_resp; 3372 int error = 0; 3373 3374 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3375 3376 /* 3377 * Copyin the revision struct from userspace 3378 */ 3379 if ((error = copyin(udata, (caddr_t)&rev_check, 3380 sizeof(struct aac_rev_check))) != 0) { 3381 return error; 3382 } 3383 3384 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n", 3385 rev_check.callingRevision.buildNumber); 3386 3387 /* 3388 * Doctor up the response struct. 3389 */ 3390 rev_check_resp.possiblyCompatible = 1; 3391 rev_check_resp.adapterSWRevision.external.comp.major = 3392 AAC_DRIVER_MAJOR_VERSION; 3393 rev_check_resp.adapterSWRevision.external.comp.minor = 3394 AAC_DRIVER_MINOR_VERSION; 3395 rev_check_resp.adapterSWRevision.external.comp.type = 3396 AAC_DRIVER_TYPE; 3397 rev_check_resp.adapterSWRevision.external.comp.dash = 3398 AAC_DRIVER_BUGFIX_LEVEL; 3399 rev_check_resp.adapterSWRevision.buildNumber = 3400 AAC_DRIVER_BUILD; 3401 3402 return(copyout((caddr_t)&rev_check_resp, udata, 3403 sizeof(struct aac_rev_check_resp))); 3404 } 3405 3406 /* 3407 * Pass the fib context to the caller 3408 */ 3409 static int 3410 aac_open_aif(struct aac_softc *sc, caddr_t arg) 3411 { 3412 struct aac_fib_context *fibctx, *ctx; 3413 int error = 0; 3414 3415 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3416 3417 fibctx = malloc(sizeof(struct aac_fib_context), M_AACBUF, M_NOWAIT|M_ZERO); 3418 if (fibctx == NULL) 3419 return (ENOMEM); 3420 3421 mtx_lock(&sc->aac_aifq_lock); 3422 /* all elements are already 0, add to queue */ 3423 if (sc->fibctx == NULL) 3424 sc->fibctx = fibctx; 3425 else { 3426 for (ctx = sc->fibctx; ctx->next; ctx = ctx->next) 3427 ; 3428 ctx->next = fibctx; 3429 fibctx->prev = ctx; 3430 } 3431 3432 /* evaluate unique value */ 3433 fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff); 3434 ctx = sc->fibctx; 3435 while (ctx != fibctx) { 3436 if (ctx->unique == fibctx->unique) { 3437 fibctx->unique++; 3438 ctx = sc->fibctx; 3439 } else { 3440 ctx = ctx->next; 3441 } 3442 } 3443 mtx_unlock(&sc->aac_aifq_lock); 3444 3445 error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t)); 3446 if (error) 3447 aac_close_aif(sc, (caddr_t)ctx); 3448 return error; 3449 } 3450 3451 /* 3452 * Close the caller's fib context 3453 */ 3454 static int 3455 aac_close_aif(struct aac_softc *sc, caddr_t arg) 3456 { 3457 struct aac_fib_context *ctx; 3458 3459 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3460 3461 mtx_lock(&sc->aac_aifq_lock); 3462 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3463 if (ctx->unique == *(uint32_t *)&arg) { 3464 if (ctx == sc->fibctx) 3465 sc->fibctx = NULL; 3466 else { 3467 ctx->prev->next = ctx->next; 3468 if (ctx->next) 3469 ctx->next->prev = ctx->prev; 3470 } 3471 break; 3472 } 3473 } 3474 mtx_unlock(&sc->aac_aifq_lock); 3475 if (ctx) 3476 free(ctx, M_AACBUF); 3477 3478 return 0; 3479 } 3480 3481 /* 3482 * Pass the caller the next AIF in their queue 3483 */ 3484 static int 3485 aac_getnext_aif(struct aac_softc *sc, caddr_t arg) 3486 { 3487 struct get_adapter_fib_ioctl agf; 3488 struct aac_fib_context *ctx; 3489 int error; 3490 3491 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3492 3493 if ((error = copyin(arg, &agf, sizeof(agf))) == 0) { 3494 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3495 if (agf.AdapterFibContext == ctx->unique) 3496 break; 3497 } 3498 if (!ctx) 3499 return (EFAULT); 3500 3501 error = aac_return_aif(sc, ctx, agf.AifFib); 3502 if (error == EAGAIN && agf.Wait) { 3503 fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF"); 3504 sc->aac_state |= AAC_STATE_AIF_SLEEPER; 3505 while (error == EAGAIN) { 3506 error = tsleep(sc->aac_aifq, PRIBIO | 3507 PCATCH, "aacaif", 0); 3508 if (error == 0) 3509 error = aac_return_aif(sc, ctx, agf.AifFib); 3510 } 3511 sc->aac_state &= ~AAC_STATE_AIF_SLEEPER; 3512 } 3513 } 3514 return(error); 3515 } 3516 3517 /* 3518 * Hand the next AIF off the top of the queue out to userspace. 3519 */ 3520 static int 3521 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr) 3522 { 3523 int current, error; 3524 3525 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3526 3527 mtx_lock(&sc->aac_aifq_lock); 3528 current = ctx->ctx_idx; 3529 if (current == sc->aifq_idx && !ctx->ctx_wrap) { 3530 /* empty */ 3531 mtx_unlock(&sc->aac_aifq_lock); 3532 return (EAGAIN); 3533 } 3534 error = 3535 copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib)); 3536 if (error) 3537 device_printf(sc->aac_dev, 3538 "aac_return_aif: copyout returned %d\n", error); 3539 else { 3540 ctx->ctx_wrap = 0; 3541 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH; 3542 } 3543 mtx_unlock(&sc->aac_aifq_lock); 3544 return(error); 3545 } 3546 3547 static int 3548 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr) 3549 { 3550 struct aac_pci_info { 3551 u_int32_t bus; 3552 u_int32_t slot; 3553 } pciinf; 3554 int error; 3555 3556 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3557 3558 pciinf.bus = pci_get_bus(sc->aac_dev); 3559 pciinf.slot = pci_get_slot(sc->aac_dev); 3560 3561 error = copyout((caddr_t)&pciinf, uptr, 3562 sizeof(struct aac_pci_info)); 3563 3564 return (error); 3565 } 3566 3567 static int 3568 aac_supported_features(struct aac_softc *sc, caddr_t uptr) 3569 { 3570 struct aac_features f; 3571 int error; 3572 3573 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3574 3575 if ((error = copyin(uptr, &f, sizeof (f))) != 0) 3576 return (error); 3577 3578 /* 3579 * When the management driver receives FSACTL_GET_FEATURES ioctl with 3580 * ALL zero in the featuresState, the driver will return the current 3581 * state of all the supported features, the data field will not be 3582 * valid. 3583 * When the management driver receives FSACTL_GET_FEATURES ioctl with 3584 * a specific bit set in the featuresState, the driver will return the 3585 * current state of this specific feature and whatever data that are 3586 * associated with the feature in the data field or perform whatever 3587 * action needed indicates in the data field. 3588 */ 3589 if (f.feat.fValue == 0) { 3590 f.feat.fBits.largeLBA = 3591 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; 3592 /* TODO: In the future, add other features state here as well */ 3593 } else { 3594 if (f.feat.fBits.largeLBA) 3595 f.feat.fBits.largeLBA = 3596 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; 3597 /* TODO: Add other features state and data in the future */ 3598 } 3599 3600 error = copyout(&f, uptr, sizeof (f)); 3601 return (error); 3602 } 3603 3604 /* 3605 * Give the userland some information about the container. The AAC arch 3606 * expects the driver to be a SCSI passthrough type driver, so it expects 3607 * the containers to have b:t:l numbers. Fake it. 3608 */ 3609 static int 3610 aac_query_disk(struct aac_softc *sc, caddr_t uptr) 3611 { 3612 struct aac_query_disk query_disk; 3613 struct aac_container *co; 3614 struct aac_disk *disk; 3615 int error, id; 3616 3617 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3618 3619 disk = NULL; 3620 3621 error = copyin(uptr, (caddr_t)&query_disk, 3622 sizeof(struct aac_query_disk)); 3623 if (error) 3624 return (error); 3625 3626 id = query_disk.ContainerNumber; 3627 if (id == -1) 3628 return (EINVAL); 3629 3630 mtx_lock(&sc->aac_container_lock); 3631 TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { 3632 if (co->co_mntobj.ObjectId == id) 3633 break; 3634 } 3635 3636 if (co == NULL) { 3637 query_disk.Valid = 0; 3638 query_disk.Locked = 0; 3639 query_disk.Deleted = 1; /* XXX is this right? */ 3640 } else { 3641 disk = device_get_softc(co->co_disk); 3642 query_disk.Valid = 1; 3643 query_disk.Locked = 3644 (disk->ad_flags & AAC_DISK_OPEN) ? 1 : 0; 3645 query_disk.Deleted = 0; 3646 query_disk.Bus = device_get_unit(sc->aac_dev); 3647 query_disk.Target = disk->unit; 3648 query_disk.Lun = 0; 3649 query_disk.UnMapped = 0; 3650 sprintf(&query_disk.diskDeviceName[0], "%s%d", 3651 disk->ad_disk->d_name, disk->ad_disk->d_unit); 3652 } 3653 mtx_unlock(&sc->aac_container_lock); 3654 3655 error = copyout((caddr_t)&query_disk, uptr, 3656 sizeof(struct aac_query_disk)); 3657 3658 return (error); 3659 } 3660 3661 static void 3662 aac_get_bus_info(struct aac_softc *sc) 3663 { 3664 struct aac_fib *fib; 3665 struct aac_ctcfg *c_cmd; 3666 struct aac_ctcfg_resp *c_resp; 3667 struct aac_vmioctl *vmi; 3668 struct aac_vmi_businf_resp *vmi_resp; 3669 struct aac_getbusinf businfo; 3670 struct aac_sim *caminf; 3671 device_t child; 3672 int i, found, error; 3673 3674 mtx_lock(&sc->aac_io_lock); 3675 aac_alloc_sync_fib(sc, &fib); 3676 c_cmd = (struct aac_ctcfg *)&fib->data[0]; 3677 bzero(c_cmd, sizeof(struct aac_ctcfg)); 3678 3679 c_cmd->Command = VM_ContainerConfig; 3680 c_cmd->cmd = CT_GET_SCSI_METHOD; 3681 c_cmd->param = 0; 3682 3683 error = aac_sync_fib(sc, ContainerCommand, 0, fib, 3684 sizeof(struct aac_ctcfg)); 3685 if (error) { 3686 device_printf(sc->aac_dev, "Error %d sending " 3687 "VM_ContainerConfig command\n", error); 3688 aac_release_sync_fib(sc); 3689 mtx_unlock(&sc->aac_io_lock); 3690 return; 3691 } 3692 3693 c_resp = (struct aac_ctcfg_resp *)&fib->data[0]; 3694 if (c_resp->Status != ST_OK) { 3695 device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n", 3696 c_resp->Status); 3697 aac_release_sync_fib(sc); 3698 mtx_unlock(&sc->aac_io_lock); 3699 return; 3700 } 3701 3702 sc->scsi_method_id = c_resp->param; 3703 3704 vmi = (struct aac_vmioctl *)&fib->data[0]; 3705 bzero(vmi, sizeof(struct aac_vmioctl)); 3706 3707 vmi->Command = VM_Ioctl; 3708 vmi->ObjType = FT_DRIVE; 3709 vmi->MethId = sc->scsi_method_id; 3710 vmi->ObjId = 0; 3711 vmi->IoctlCmd = GetBusInfo; 3712 3713 error = aac_sync_fib(sc, ContainerCommand, 0, fib, 3714 sizeof(struct aac_vmi_businf_resp)); 3715 if (error) { 3716 device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n", 3717 error); 3718 aac_release_sync_fib(sc); 3719 mtx_unlock(&sc->aac_io_lock); 3720 return; 3721 } 3722 3723 vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0]; 3724 if (vmi_resp->Status != ST_OK) { 3725 device_printf(sc->aac_dev, "VM_Ioctl returned %d\n", 3726 vmi_resp->Status); 3727 aac_release_sync_fib(sc); 3728 mtx_unlock(&sc->aac_io_lock); 3729 return; 3730 } 3731 3732 bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf)); 3733 aac_release_sync_fib(sc); 3734 mtx_unlock(&sc->aac_io_lock); 3735 3736 found = 0; 3737 for (i = 0; i < businfo.BusCount; i++) { 3738 if (businfo.BusValid[i] != AAC_BUS_VALID) 3739 continue; 3740 3741 caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim), 3742 M_AACBUF, M_NOWAIT | M_ZERO); 3743 if (caminf == NULL) { 3744 device_printf(sc->aac_dev, 3745 "No memory to add passthrough bus %d\n", i); 3746 break; 3747 }; 3748 3749 child = device_add_child(sc->aac_dev, "aacp", -1); 3750 if (child == NULL) { 3751 device_printf(sc->aac_dev, 3752 "device_add_child failed for passthrough bus %d\n", 3753 i); 3754 free(caminf, M_AACBUF); 3755 break; 3756 } 3757 3758 caminf->TargetsPerBus = businfo.TargetsPerBus; 3759 caminf->BusNumber = i; 3760 caminf->InitiatorBusId = businfo.InitiatorBusId[i]; 3761 caminf->aac_sc = sc; 3762 caminf->sim_dev = child; 3763 3764 device_set_ivars(child, caminf); 3765 device_set_desc(child, "SCSI Passthrough Bus"); 3766 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link); 3767 3768 found = 1; 3769 } 3770 3771 if (found) 3772 bus_generic_attach(sc->aac_dev); 3773 3774 return; 3775 } 3776