1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2000 Michael Smith 5 * Copyright (c) 2001 Scott Long 6 * Copyright (c) 2000 BSDi 7 * Copyright (c) 2001 Adaptec, Inc. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 /* 36 * Driver for the Adaptec 'FSA' family of PCI/SCSI RAID adapters. 37 */ 38 #define AAC_DRIVERNAME "aac" 39 40 #include "opt_aac.h" 41 42 /* #include <stddef.h> */ 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/malloc.h> 46 #include <sys/kernel.h> 47 #include <sys/kthread.h> 48 #include <sys/proc.h> 49 #include <sys/sysctl.h> 50 #include <sys/sysent.h> 51 #include <sys/poll.h> 52 #include <sys/ioccom.h> 53 54 #include <sys/bus.h> 55 #include <sys/conf.h> 56 #include <sys/signalvar.h> 57 #include <sys/time.h> 58 #include <sys/eventhandler.h> 59 #include <sys/rman.h> 60 61 #include <machine/bus.h> 62 #include <machine/resource.h> 63 64 #include <dev/pci/pcireg.h> 65 #include <dev/pci/pcivar.h> 66 67 #include <dev/aac/aacreg.h> 68 #include <sys/aac_ioctl.h> 69 #include <dev/aac/aacvar.h> 70 #include <dev/aac/aac_tables.h> 71 72 static void aac_startup(void *arg); 73 static void aac_add_container(struct aac_softc *sc, 74 struct aac_mntinforesp *mir, int f); 75 static void aac_get_bus_info(struct aac_softc *sc); 76 static void aac_daemon(void *arg); 77 78 /* Command Processing */ 79 static void aac_timeout(struct aac_softc *sc); 80 static void aac_complete(void *context, int pending); 81 static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp); 82 static void aac_bio_complete(struct aac_command *cm); 83 static int aac_wait_command(struct aac_command *cm); 84 static void aac_command_thread(struct aac_softc *sc); 85 86 /* Command Buffer Management */ 87 static void aac_map_command_sg(void *arg, bus_dma_segment_t *segs, 88 int nseg, int error); 89 static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, 90 int nseg, int error); 91 static int aac_alloc_commands(struct aac_softc *sc); 92 static void aac_free_commands(struct aac_softc *sc); 93 static void aac_unmap_command(struct aac_command *cm); 94 95 /* Hardware Interface */ 96 static int aac_alloc(struct aac_softc *sc); 97 static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, 98 int error); 99 static int aac_check_firmware(struct aac_softc *sc); 100 static int aac_init(struct aac_softc *sc); 101 static int aac_sync_command(struct aac_softc *sc, u_int32_t command, 102 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, 103 u_int32_t arg3, u_int32_t *sp); 104 static int aac_setup_intr(struct aac_softc *sc); 105 static int aac_enqueue_fib(struct aac_softc *sc, int queue, 106 struct aac_command *cm); 107 static int aac_dequeue_fib(struct aac_softc *sc, int queue, 108 u_int32_t *fib_size, struct aac_fib **fib_addr); 109 static int aac_enqueue_response(struct aac_softc *sc, int queue, 110 struct aac_fib *fib); 111 112 /* StrongARM interface */ 113 static int aac_sa_get_fwstatus(struct aac_softc *sc); 114 static void aac_sa_qnotify(struct aac_softc *sc, int qbit); 115 static int aac_sa_get_istatus(struct aac_softc *sc); 116 static void aac_sa_clear_istatus(struct aac_softc *sc, int mask); 117 static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, 118 u_int32_t arg0, u_int32_t arg1, 119 u_int32_t arg2, u_int32_t arg3); 120 static int aac_sa_get_mailbox(struct aac_softc *sc, int mb); 121 static void aac_sa_set_interrupts(struct aac_softc *sc, int enable); 122 123 const struct aac_interface aac_sa_interface = { 124 aac_sa_get_fwstatus, 125 aac_sa_qnotify, 126 aac_sa_get_istatus, 127 aac_sa_clear_istatus, 128 aac_sa_set_mailbox, 129 aac_sa_get_mailbox, 130 aac_sa_set_interrupts, 131 NULL, NULL, NULL 132 }; 133 134 /* i960Rx interface */ 135 static int aac_rx_get_fwstatus(struct aac_softc *sc); 136 static void aac_rx_qnotify(struct aac_softc *sc, int qbit); 137 static int aac_rx_get_istatus(struct aac_softc *sc); 138 static void aac_rx_clear_istatus(struct aac_softc *sc, int mask); 139 static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, 140 u_int32_t arg0, u_int32_t arg1, 141 u_int32_t arg2, u_int32_t arg3); 142 static int aac_rx_get_mailbox(struct aac_softc *sc, int mb); 143 static void aac_rx_set_interrupts(struct aac_softc *sc, int enable); 144 static int aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm); 145 static int aac_rx_get_outb_queue(struct aac_softc *sc); 146 static void aac_rx_set_outb_queue(struct aac_softc *sc, int index); 147 148 const struct aac_interface aac_rx_interface = { 149 aac_rx_get_fwstatus, 150 aac_rx_qnotify, 151 aac_rx_get_istatus, 152 aac_rx_clear_istatus, 153 aac_rx_set_mailbox, 154 aac_rx_get_mailbox, 155 aac_rx_set_interrupts, 156 aac_rx_send_command, 157 aac_rx_get_outb_queue, 158 aac_rx_set_outb_queue 159 }; 160 161 /* Rocket/MIPS interface */ 162 static int aac_rkt_get_fwstatus(struct aac_softc *sc); 163 static void aac_rkt_qnotify(struct aac_softc *sc, int qbit); 164 static int aac_rkt_get_istatus(struct aac_softc *sc); 165 static void aac_rkt_clear_istatus(struct aac_softc *sc, int mask); 166 static void aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, 167 u_int32_t arg0, u_int32_t arg1, 168 u_int32_t arg2, u_int32_t arg3); 169 static int aac_rkt_get_mailbox(struct aac_softc *sc, int mb); 170 static void aac_rkt_set_interrupts(struct aac_softc *sc, int enable); 171 static int aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm); 172 static int aac_rkt_get_outb_queue(struct aac_softc *sc); 173 static void aac_rkt_set_outb_queue(struct aac_softc *sc, int index); 174 175 const struct aac_interface aac_rkt_interface = { 176 aac_rkt_get_fwstatus, 177 aac_rkt_qnotify, 178 aac_rkt_get_istatus, 179 aac_rkt_clear_istatus, 180 aac_rkt_set_mailbox, 181 aac_rkt_get_mailbox, 182 aac_rkt_set_interrupts, 183 aac_rkt_send_command, 184 aac_rkt_get_outb_queue, 185 aac_rkt_set_outb_queue 186 }; 187 188 /* Debugging and Diagnostics */ 189 static void aac_describe_controller(struct aac_softc *sc); 190 static const char *aac_describe_code(const struct aac_code_lookup *table, 191 u_int32_t code); 192 193 /* Management Interface */ 194 static d_open_t aac_open; 195 static d_ioctl_t aac_ioctl; 196 static d_poll_t aac_poll; 197 static void aac_cdevpriv_dtor(void *arg); 198 static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib); 199 static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg); 200 static void aac_handle_aif(struct aac_softc *sc, 201 struct aac_fib *fib); 202 static int aac_rev_check(struct aac_softc *sc, caddr_t udata); 203 static int aac_open_aif(struct aac_softc *sc, caddr_t arg); 204 static int aac_close_aif(struct aac_softc *sc, caddr_t arg); 205 static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg); 206 static int aac_return_aif(struct aac_softc *sc, 207 struct aac_fib_context *ctx, caddr_t uptr); 208 static int aac_query_disk(struct aac_softc *sc, caddr_t uptr); 209 static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr); 210 static int aac_supported_features(struct aac_softc *sc, caddr_t uptr); 211 static void aac_ioctl_event(struct aac_softc *sc, 212 struct aac_event *event, void *arg); 213 static struct aac_mntinforesp * 214 aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid); 215 216 static struct cdevsw aac_cdevsw = { 217 .d_version = D_VERSION, 218 .d_flags = D_NEEDGIANT, 219 .d_open = aac_open, 220 .d_ioctl = aac_ioctl, 221 .d_poll = aac_poll, 222 .d_name = "aac", 223 }; 224 225 static MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for the AAC driver"); 226 227 /* sysctl node */ 228 SYSCTL_NODE(_hw, OID_AUTO, aac, CTLFLAG_RD, 0, "AAC driver parameters"); 229 230 /* 231 * Device Interface 232 */ 233 234 /* 235 * Initialize the controller and softc 236 */ 237 int 238 aac_attach(struct aac_softc *sc) 239 { 240 int error, unit; 241 242 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 243 244 /* 245 * Initialize per-controller queues. 246 */ 247 aac_initq_free(sc); 248 aac_initq_ready(sc); 249 aac_initq_busy(sc); 250 aac_initq_bio(sc); 251 252 /* 253 * Initialize command-completion task. 254 */ 255 TASK_INIT(&sc->aac_task_complete, 0, aac_complete, sc); 256 257 /* mark controller as suspended until we get ourselves organised */ 258 sc->aac_state |= AAC_STATE_SUSPEND; 259 260 /* 261 * Check that the firmware on the card is supported. 262 */ 263 if ((error = aac_check_firmware(sc)) != 0) 264 return(error); 265 266 /* 267 * Initialize locks 268 */ 269 mtx_init(&sc->aac_aifq_lock, "AAC AIF lock", NULL, MTX_DEF); 270 mtx_init(&sc->aac_io_lock, "AAC I/O lock", NULL, MTX_DEF); 271 mtx_init(&sc->aac_container_lock, "AAC container lock", NULL, MTX_DEF); 272 TAILQ_INIT(&sc->aac_container_tqh); 273 TAILQ_INIT(&sc->aac_ev_cmfree); 274 275 /* Initialize the clock daemon callout. */ 276 callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0); 277 278 /* 279 * Initialize the adapter. 280 */ 281 if ((error = aac_alloc(sc)) != 0) 282 return(error); 283 if ((error = aac_init(sc)) != 0) 284 return(error); 285 286 /* 287 * Allocate and connect our interrupt. 288 */ 289 if ((error = aac_setup_intr(sc)) != 0) 290 return(error); 291 292 /* 293 * Print a little information about the controller. 294 */ 295 aac_describe_controller(sc); 296 297 /* 298 * Add sysctls. 299 */ 300 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->aac_dev), 301 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->aac_dev)), 302 OID_AUTO, "firmware_build", CTLFLAG_RD, 303 &sc->aac_revision.buildNumber, 0, 304 "firmware build number"); 305 306 /* 307 * Register to probe our containers later. 308 */ 309 sc->aac_ich.ich_func = aac_startup; 310 sc->aac_ich.ich_arg = sc; 311 if (config_intrhook_establish(&sc->aac_ich) != 0) { 312 device_printf(sc->aac_dev, 313 "can't establish configuration hook\n"); 314 return(ENXIO); 315 } 316 317 /* 318 * Make the control device. 319 */ 320 unit = device_get_unit(sc->aac_dev); 321 sc->aac_dev_t = make_dev(&aac_cdevsw, unit, UID_ROOT, GID_OPERATOR, 322 0640, "aac%d", unit); 323 (void)make_dev_alias(sc->aac_dev_t, "afa%d", unit); 324 (void)make_dev_alias(sc->aac_dev_t, "hpn%d", unit); 325 sc->aac_dev_t->si_drv1 = sc; 326 327 /* Create the AIF thread */ 328 if (kproc_create((void(*)(void *))aac_command_thread, sc, 329 &sc->aifthread, 0, 0, "aac%daif", unit)) 330 panic("Could not create AIF thread"); 331 332 /* Register the shutdown method to only be called post-dump */ 333 if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aac_shutdown, 334 sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL) 335 device_printf(sc->aac_dev, 336 "shutdown event registration failed\n"); 337 338 /* Register with CAM for the non-DASD devices */ 339 if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) { 340 TAILQ_INIT(&sc->aac_sim_tqh); 341 aac_get_bus_info(sc); 342 } 343 344 mtx_lock(&sc->aac_io_lock); 345 callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc); 346 mtx_unlock(&sc->aac_io_lock); 347 348 return(0); 349 } 350 351 static void 352 aac_daemon(void *arg) 353 { 354 struct timeval tv; 355 struct aac_softc *sc; 356 struct aac_fib *fib; 357 358 sc = arg; 359 mtx_assert(&sc->aac_io_lock, MA_OWNED); 360 361 if (callout_pending(&sc->aac_daemontime) || 362 callout_active(&sc->aac_daemontime) == 0) 363 return; 364 getmicrotime(&tv); 365 aac_alloc_sync_fib(sc, &fib); 366 *(uint32_t *)fib->data = tv.tv_sec; 367 aac_sync_fib(sc, SendHostTime, 0, fib, sizeof(uint32_t)); 368 aac_release_sync_fib(sc); 369 callout_schedule(&sc->aac_daemontime, 30 * 60 * hz); 370 } 371 372 void 373 aac_add_event(struct aac_softc *sc, struct aac_event *event) 374 { 375 376 switch (event->ev_type & AAC_EVENT_MASK) { 377 case AAC_EVENT_CMFREE: 378 TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links); 379 break; 380 default: 381 device_printf(sc->aac_dev, "aac_add event: unknown event %d\n", 382 event->ev_type); 383 break; 384 } 385 } 386 387 /* 388 * Request information of container #cid 389 */ 390 static struct aac_mntinforesp * 391 aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid) 392 { 393 struct aac_mntinfo *mi; 394 395 mi = (struct aac_mntinfo *)&fib->data[0]; 396 /* use 64-bit LBA if enabled */ 397 mi->Command = (sc->flags & AAC_FLAGS_LBA_64BIT) ? 398 VM_NameServe64 : VM_NameServe; 399 mi->MntType = FT_FILESYS; 400 mi->MntCount = cid; 401 402 if (aac_sync_fib(sc, ContainerCommand, 0, fib, 403 sizeof(struct aac_mntinfo))) { 404 device_printf(sc->aac_dev, "Error probing container %d\n", cid); 405 return (NULL); 406 } 407 408 return ((struct aac_mntinforesp *)&fib->data[0]); 409 } 410 411 /* 412 * Probe for containers, create disks. 413 */ 414 static void 415 aac_startup(void *arg) 416 { 417 struct aac_softc *sc; 418 struct aac_fib *fib; 419 struct aac_mntinforesp *mir; 420 int count = 0, i = 0; 421 422 sc = (struct aac_softc *)arg; 423 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 424 425 mtx_lock(&sc->aac_io_lock); 426 aac_alloc_sync_fib(sc, &fib); 427 428 /* loop over possible containers */ 429 do { 430 if ((mir = aac_get_container_info(sc, fib, i)) == NULL) 431 continue; 432 if (i == 0) 433 count = mir->MntRespCount; 434 aac_add_container(sc, mir, 0); 435 i++; 436 } while ((i < count) && (i < AAC_MAX_CONTAINERS)); 437 438 aac_release_sync_fib(sc); 439 mtx_unlock(&sc->aac_io_lock); 440 441 /* mark the controller up */ 442 sc->aac_state &= ~AAC_STATE_SUSPEND; 443 444 /* poke the bus to actually attach the child devices */ 445 if (bus_generic_attach(sc->aac_dev)) 446 device_printf(sc->aac_dev, "bus_generic_attach failed\n"); 447 448 /* disconnect ourselves from the intrhook chain */ 449 config_intrhook_disestablish(&sc->aac_ich); 450 451 /* enable interrupts now */ 452 AAC_UNMASK_INTERRUPTS(sc); 453 } 454 455 /* 456 * Create a device to represent a new container 457 */ 458 static void 459 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f) 460 { 461 struct aac_container *co; 462 device_t child; 463 464 /* 465 * Check container volume type for validity. Note that many of 466 * the possible types may never show up. 467 */ 468 if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) { 469 co = (struct aac_container *)malloc(sizeof *co, M_AACBUF, 470 M_NOWAIT | M_ZERO); 471 if (co == NULL) 472 panic("Out of memory?!"); 473 fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "id %x name '%.16s' size %u type %d", 474 mir->MntTable[0].ObjectId, 475 mir->MntTable[0].FileSystemName, 476 mir->MntTable[0].Capacity, mir->MntTable[0].VolType); 477 478 if ((child = device_add_child(sc->aac_dev, "aacd", -1)) == NULL) 479 device_printf(sc->aac_dev, "device_add_child failed\n"); 480 else 481 device_set_ivars(child, co); 482 device_set_desc(child, aac_describe_code(aac_container_types, 483 mir->MntTable[0].VolType)); 484 co->co_disk = child; 485 co->co_found = f; 486 bcopy(&mir->MntTable[0], &co->co_mntobj, 487 sizeof(struct aac_mntobj)); 488 mtx_lock(&sc->aac_container_lock); 489 TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link); 490 mtx_unlock(&sc->aac_container_lock); 491 } 492 } 493 494 /* 495 * Allocate resources associated with (sc) 496 */ 497 static int 498 aac_alloc(struct aac_softc *sc) 499 { 500 501 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 502 503 /* 504 * Create DMA tag for mapping buffers into controller-addressable space. 505 */ 506 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 507 1, 0, /* algnmnt, boundary */ 508 (sc->flags & AAC_FLAGS_SG_64BIT) ? 509 BUS_SPACE_MAXADDR : 510 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 511 BUS_SPACE_MAXADDR, /* highaddr */ 512 NULL, NULL, /* filter, filterarg */ 513 sc->aac_max_sectors << 9, /* maxsize */ 514 sc->aac_sg_tablesize, /* nsegments */ 515 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 516 BUS_DMA_ALLOCNOW, /* flags */ 517 busdma_lock_mutex, /* lockfunc */ 518 &sc->aac_io_lock, /* lockfuncarg */ 519 &sc->aac_buffer_dmat)) { 520 device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n"); 521 return (ENOMEM); 522 } 523 524 /* 525 * Create DMA tag for mapping FIBs into controller-addressable space.. 526 */ 527 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 528 1, 0, /* algnmnt, boundary */ 529 (sc->flags & AAC_FLAGS_4GB_WINDOW) ? 530 BUS_SPACE_MAXADDR_32BIT : 531 0x7fffffff, /* lowaddr */ 532 BUS_SPACE_MAXADDR, /* highaddr */ 533 NULL, NULL, /* filter, filterarg */ 534 sc->aac_max_fibs_alloc * 535 sc->aac_max_fib_size, /* maxsize */ 536 1, /* nsegments */ 537 sc->aac_max_fibs_alloc * 538 sc->aac_max_fib_size, /* maxsize */ 539 0, /* flags */ 540 NULL, NULL, /* No locking needed */ 541 &sc->aac_fib_dmat)) { 542 device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n"); 543 return (ENOMEM); 544 } 545 546 /* 547 * Create DMA tag for the common structure and allocate it. 548 */ 549 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 550 1, 0, /* algnmnt, boundary */ 551 (sc->flags & AAC_FLAGS_4GB_WINDOW) ? 552 BUS_SPACE_MAXADDR_32BIT : 553 0x7fffffff, /* lowaddr */ 554 BUS_SPACE_MAXADDR, /* highaddr */ 555 NULL, NULL, /* filter, filterarg */ 556 8192 + sizeof(struct aac_common), /* maxsize */ 557 1, /* nsegments */ 558 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 559 0, /* flags */ 560 NULL, NULL, /* No locking needed */ 561 &sc->aac_common_dmat)) { 562 device_printf(sc->aac_dev, 563 "can't allocate common structure DMA tag\n"); 564 return (ENOMEM); 565 } 566 if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common, 567 BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) { 568 device_printf(sc->aac_dev, "can't allocate common structure\n"); 569 return (ENOMEM); 570 } 571 572 /* 573 * Work around a bug in the 2120 and 2200 that cannot DMA commands 574 * below address 8192 in physical memory. 575 * XXX If the padding is not needed, can it be put to use instead 576 * of ignored? 577 */ 578 (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap, 579 sc->aac_common, 8192 + sizeof(*sc->aac_common), 580 aac_common_map, sc, 0); 581 582 if (sc->aac_common_busaddr < 8192) { 583 sc->aac_common = (struct aac_common *) 584 ((uint8_t *)sc->aac_common + 8192); 585 sc->aac_common_busaddr += 8192; 586 } 587 bzero(sc->aac_common, sizeof(*sc->aac_common)); 588 589 /* Allocate some FIBs and associated command structs */ 590 TAILQ_INIT(&sc->aac_fibmap_tqh); 591 sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command), 592 M_AACBUF, M_WAITOK|M_ZERO); 593 while (sc->total_fibs < sc->aac_max_fibs) { 594 if (aac_alloc_commands(sc) != 0) 595 break; 596 } 597 if (sc->total_fibs == 0) 598 return (ENOMEM); 599 600 return (0); 601 } 602 603 /* 604 * Free all of the resources associated with (sc) 605 * 606 * Should not be called if the controller is active. 607 */ 608 void 609 aac_free(struct aac_softc *sc) 610 { 611 612 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 613 614 /* remove the control device */ 615 if (sc->aac_dev_t != NULL) 616 destroy_dev(sc->aac_dev_t); 617 618 /* throw away any FIB buffers, discard the FIB DMA tag */ 619 aac_free_commands(sc); 620 if (sc->aac_fib_dmat) 621 bus_dma_tag_destroy(sc->aac_fib_dmat); 622 623 free(sc->aac_commands, M_AACBUF); 624 625 /* destroy the common area */ 626 if (sc->aac_common) { 627 bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap); 628 bus_dmamem_free(sc->aac_common_dmat, sc->aac_common, 629 sc->aac_common_dmamap); 630 } 631 if (sc->aac_common_dmat) 632 bus_dma_tag_destroy(sc->aac_common_dmat); 633 634 /* disconnect the interrupt handler */ 635 if (sc->aac_intr) 636 bus_teardown_intr(sc->aac_dev, sc->aac_irq, sc->aac_intr); 637 if (sc->aac_irq != NULL) { 638 bus_release_resource(sc->aac_dev, SYS_RES_IRQ, 639 rman_get_rid(sc->aac_irq), sc->aac_irq); 640 pci_release_msi(sc->aac_dev); 641 } 642 643 /* destroy data-transfer DMA tag */ 644 if (sc->aac_buffer_dmat) 645 bus_dma_tag_destroy(sc->aac_buffer_dmat); 646 647 /* destroy the parent DMA tag */ 648 if (sc->aac_parent_dmat) 649 bus_dma_tag_destroy(sc->aac_parent_dmat); 650 651 /* release the register window mapping */ 652 if (sc->aac_regs_res0 != NULL) 653 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, 654 rman_get_rid(sc->aac_regs_res0), sc->aac_regs_res0); 655 if (sc->aac_hwif == AAC_HWIF_NARK && sc->aac_regs_res1 != NULL) 656 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, 657 rman_get_rid(sc->aac_regs_res1), sc->aac_regs_res1); 658 } 659 660 /* 661 * Disconnect from the controller completely, in preparation for unload. 662 */ 663 int 664 aac_detach(device_t dev) 665 { 666 struct aac_softc *sc; 667 struct aac_container *co; 668 struct aac_sim *sim; 669 int error; 670 671 sc = device_get_softc(dev); 672 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 673 674 callout_drain(&sc->aac_daemontime); 675 676 mtx_lock(&sc->aac_io_lock); 677 while (sc->aifflags & AAC_AIFFLAGS_RUNNING) { 678 sc->aifflags |= AAC_AIFFLAGS_EXIT; 679 wakeup(sc->aifthread); 680 msleep(sc->aac_dev, &sc->aac_io_lock, PUSER, "aacdch", 0); 681 } 682 mtx_unlock(&sc->aac_io_lock); 683 KASSERT((sc->aifflags & AAC_AIFFLAGS_RUNNING) == 0, 684 ("%s: invalid detach state", __func__)); 685 686 /* Remove the child containers */ 687 while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) { 688 error = device_delete_child(dev, co->co_disk); 689 if (error) 690 return (error); 691 TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link); 692 free(co, M_AACBUF); 693 } 694 695 /* Remove the CAM SIMs */ 696 while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) { 697 TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link); 698 error = device_delete_child(dev, sim->sim_dev); 699 if (error) 700 return (error); 701 free(sim, M_AACBUF); 702 } 703 704 if ((error = aac_shutdown(dev))) 705 return(error); 706 707 EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh); 708 709 aac_free(sc); 710 711 mtx_destroy(&sc->aac_aifq_lock); 712 mtx_destroy(&sc->aac_io_lock); 713 mtx_destroy(&sc->aac_container_lock); 714 715 return(0); 716 } 717 718 /* 719 * Bring the controller down to a dormant state and detach all child devices. 720 * 721 * This function is called before detach or system shutdown. 722 * 723 * Note that we can assume that the bioq on the controller is empty, as we won't 724 * allow shutdown if any device is open. 725 */ 726 int 727 aac_shutdown(device_t dev) 728 { 729 struct aac_softc *sc; 730 struct aac_fib *fib; 731 struct aac_close_command *cc; 732 733 sc = device_get_softc(dev); 734 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 735 736 sc->aac_state |= AAC_STATE_SUSPEND; 737 738 /* 739 * Send a Container shutdown followed by a HostShutdown FIB to the 740 * controller to convince it that we don't want to talk to it anymore. 741 * We've been closed and all I/O completed already 742 */ 743 device_printf(sc->aac_dev, "shutting down controller..."); 744 745 mtx_lock(&sc->aac_io_lock); 746 aac_alloc_sync_fib(sc, &fib); 747 cc = (struct aac_close_command *)&fib->data[0]; 748 749 bzero(cc, sizeof(struct aac_close_command)); 750 cc->Command = VM_CloseAll; 751 cc->ContainerId = 0xffffffff; 752 if (aac_sync_fib(sc, ContainerCommand, 0, fib, 753 sizeof(struct aac_close_command))) 754 printf("FAILED.\n"); 755 else 756 printf("done\n"); 757 #if 0 758 else { 759 fib->data[0] = 0; 760 /* 761 * XXX Issuing this command to the controller makes it shut down 762 * but also keeps it from coming back up without a reset of the 763 * PCI bus. This is not desirable if you are just unloading the 764 * driver module with the intent to reload it later. 765 */ 766 if (aac_sync_fib(sc, FsaHostShutdown, AAC_FIBSTATE_SHUTDOWN, 767 fib, 1)) { 768 printf("FAILED.\n"); 769 } else { 770 printf("done.\n"); 771 } 772 } 773 #endif 774 775 AAC_MASK_INTERRUPTS(sc); 776 aac_release_sync_fib(sc); 777 mtx_unlock(&sc->aac_io_lock); 778 779 return(0); 780 } 781 782 /* 783 * Bring the controller to a quiescent state, ready for system suspend. 784 */ 785 int 786 aac_suspend(device_t dev) 787 { 788 struct aac_softc *sc; 789 790 sc = device_get_softc(dev); 791 792 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 793 sc->aac_state |= AAC_STATE_SUSPEND; 794 795 AAC_MASK_INTERRUPTS(sc); 796 return(0); 797 } 798 799 /* 800 * Bring the controller back to a state ready for operation. 801 */ 802 int 803 aac_resume(device_t dev) 804 { 805 struct aac_softc *sc; 806 807 sc = device_get_softc(dev); 808 809 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 810 sc->aac_state &= ~AAC_STATE_SUSPEND; 811 AAC_UNMASK_INTERRUPTS(sc); 812 return(0); 813 } 814 815 /* 816 * Interrupt handler for NEW_COMM interface. 817 */ 818 void 819 aac_new_intr(void *arg) 820 { 821 struct aac_softc *sc; 822 u_int32_t index, fast; 823 struct aac_command *cm; 824 struct aac_fib *fib; 825 int i; 826 827 sc = (struct aac_softc *)arg; 828 829 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 830 mtx_lock(&sc->aac_io_lock); 831 while (1) { 832 index = AAC_GET_OUTB_QUEUE(sc); 833 if (index == 0xffffffff) 834 index = AAC_GET_OUTB_QUEUE(sc); 835 if (index == 0xffffffff) 836 break; 837 if (index & 2) { 838 if (index == 0xfffffffe) { 839 /* XXX This means that the controller wants 840 * more work. Ignore it for now. 841 */ 842 continue; 843 } 844 /* AIF */ 845 fib = (struct aac_fib *)malloc(sizeof *fib, M_AACBUF, 846 M_NOWAIT | M_ZERO); 847 if (fib == NULL) { 848 /* If we're really this short on memory, 849 * hopefully breaking out of the handler will 850 * allow something to get freed. This 851 * actually sucks a whole lot. 852 */ 853 break; 854 } 855 index &= ~2; 856 for (i = 0; i < sizeof(struct aac_fib)/4; ++i) 857 ((u_int32_t *)fib)[i] = AAC_MEM1_GETREG4(sc, index + i*4); 858 aac_handle_aif(sc, fib); 859 free(fib, M_AACBUF); 860 861 /* 862 * AIF memory is owned by the adapter, so let it 863 * know that we are done with it. 864 */ 865 AAC_SET_OUTB_QUEUE(sc, index); 866 AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY); 867 } else { 868 fast = index & 1; 869 cm = sc->aac_commands + (index >> 2); 870 fib = cm->cm_fib; 871 if (fast) { 872 fib->Header.XferState |= AAC_FIBSTATE_DONEADAP; 873 *((u_int32_t *)(fib->data)) = AAC_ERROR_NORMAL; 874 } 875 aac_remove_busy(cm); 876 aac_unmap_command(cm); 877 cm->cm_flags |= AAC_CMD_COMPLETED; 878 879 /* is there a completion handler? */ 880 if (cm->cm_complete != NULL) { 881 cm->cm_complete(cm); 882 } else { 883 /* assume that someone is sleeping on this 884 * command 885 */ 886 wakeup(cm); 887 } 888 sc->flags &= ~AAC_QUEUE_FRZN; 889 } 890 } 891 /* see if we can start some more I/O */ 892 if ((sc->flags & AAC_QUEUE_FRZN) == 0) 893 aac_startio(sc); 894 895 mtx_unlock(&sc->aac_io_lock); 896 } 897 898 /* 899 * Interrupt filter for !NEW_COMM interface. 900 */ 901 int 902 aac_filter(void *arg) 903 { 904 struct aac_softc *sc; 905 u_int16_t reason; 906 907 sc = (struct aac_softc *)arg; 908 909 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 910 /* 911 * Read the status register directly. This is faster than taking the 912 * driver lock and reading the queues directly. It also saves having 913 * to turn parts of the driver lock into a spin mutex, which would be 914 * ugly. 915 */ 916 reason = AAC_GET_ISTATUS(sc); 917 AAC_CLEAR_ISTATUS(sc, reason); 918 919 /* handle completion processing */ 920 if (reason & AAC_DB_RESPONSE_READY) 921 taskqueue_enqueue(taskqueue_fast, &sc->aac_task_complete); 922 923 /* controller wants to talk to us */ 924 if (reason & (AAC_DB_PRINTF | AAC_DB_COMMAND_READY)) { 925 /* 926 * XXX Make sure that we don't get fooled by strange messages 927 * that start with a NULL. 928 */ 929 if ((reason & AAC_DB_PRINTF) && 930 (sc->aac_common->ac_printf[0] == 0)) 931 sc->aac_common->ac_printf[0] = 32; 932 933 /* 934 * This might miss doing the actual wakeup. However, the 935 * msleep that this is waking up has a timeout, so it will 936 * wake up eventually. AIFs and printfs are low enough 937 * priority that they can handle hanging out for a few seconds 938 * if needed. 939 */ 940 wakeup(sc->aifthread); 941 } 942 return (FILTER_HANDLED); 943 } 944 945 /* 946 * Command Processing 947 */ 948 949 /* 950 * Start as much queued I/O as possible on the controller 951 */ 952 void 953 aac_startio(struct aac_softc *sc) 954 { 955 struct aac_command *cm; 956 int error; 957 958 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 959 960 for (;;) { 961 /* 962 * This flag might be set if the card is out of resources. 963 * Checking it here prevents an infinite loop of deferrals. 964 */ 965 if (sc->flags & AAC_QUEUE_FRZN) 966 break; 967 968 /* 969 * Try to get a command that's been put off for lack of 970 * resources 971 */ 972 cm = aac_dequeue_ready(sc); 973 974 /* 975 * Try to build a command off the bio queue (ignore error 976 * return) 977 */ 978 if (cm == NULL) 979 aac_bio_command(sc, &cm); 980 981 /* nothing to do? */ 982 if (cm == NULL) 983 break; 984 985 /* don't map more than once */ 986 if (cm->cm_flags & AAC_CMD_MAPPED) 987 panic("aac: command %p already mapped", cm); 988 989 /* 990 * Set up the command to go to the controller. If there are no 991 * data buffers associated with the command then it can bypass 992 * busdma. 993 */ 994 if (cm->cm_datalen != 0) { 995 if (cm->cm_flags & AAC_REQ_BIO) 996 error = bus_dmamap_load_bio( 997 sc->aac_buffer_dmat, cm->cm_datamap, 998 (struct bio *)cm->cm_private, 999 aac_map_command_sg, cm, 0); 1000 else 1001 error = bus_dmamap_load(sc->aac_buffer_dmat, 1002 cm->cm_datamap, cm->cm_data, 1003 cm->cm_datalen, aac_map_command_sg, cm, 0); 1004 if (error == EINPROGRESS) { 1005 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "freezing queue\n"); 1006 sc->flags |= AAC_QUEUE_FRZN; 1007 } else if (error != 0) 1008 panic("aac_startio: unexpected error %d from " 1009 "busdma", error); 1010 } else 1011 aac_map_command_sg(cm, NULL, 0, 0); 1012 } 1013 } 1014 1015 /* 1016 * Handle notification of one or more FIBs coming from the controller. 1017 */ 1018 static void 1019 aac_command_thread(struct aac_softc *sc) 1020 { 1021 struct aac_fib *fib; 1022 u_int32_t fib_size; 1023 int size, retval; 1024 1025 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1026 1027 mtx_lock(&sc->aac_io_lock); 1028 sc->aifflags = AAC_AIFFLAGS_RUNNING; 1029 1030 while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) { 1031 1032 retval = 0; 1033 if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0) 1034 retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO, 1035 "aifthd", AAC_PERIODIC_INTERVAL * hz); 1036 1037 /* 1038 * First see if any FIBs need to be allocated. This needs 1039 * to be called without the driver lock because contigmalloc 1040 * can sleep. 1041 */ 1042 if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) { 1043 mtx_unlock(&sc->aac_io_lock); 1044 aac_alloc_commands(sc); 1045 mtx_lock(&sc->aac_io_lock); 1046 sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS; 1047 aac_startio(sc); 1048 } 1049 1050 /* 1051 * While we're here, check to see if any commands are stuck. 1052 * This is pretty low-priority, so it's ok if it doesn't 1053 * always fire. 1054 */ 1055 if (retval == EWOULDBLOCK) 1056 aac_timeout(sc); 1057 1058 /* Check the hardware printf message buffer */ 1059 if (sc->aac_common->ac_printf[0] != 0) 1060 aac_print_printf(sc); 1061 1062 /* Also check to see if the adapter has a command for us. */ 1063 if (sc->flags & AAC_FLAGS_NEW_COMM) 1064 continue; 1065 for (;;) { 1066 if (aac_dequeue_fib(sc, AAC_HOST_NORM_CMD_QUEUE, 1067 &fib_size, &fib)) 1068 break; 1069 1070 AAC_PRINT_FIB(sc, fib); 1071 1072 switch (fib->Header.Command) { 1073 case AifRequest: 1074 aac_handle_aif(sc, fib); 1075 break; 1076 default: 1077 device_printf(sc->aac_dev, "unknown command " 1078 "from controller\n"); 1079 break; 1080 } 1081 1082 if ((fib->Header.XferState == 0) || 1083 (fib->Header.StructType != AAC_FIBTYPE_TFIB)) { 1084 break; 1085 } 1086 1087 /* Return the AIF to the controller. */ 1088 if (fib->Header.XferState & AAC_FIBSTATE_FROMADAP) { 1089 fib->Header.XferState |= AAC_FIBSTATE_DONEHOST; 1090 *(AAC_FSAStatus*)fib->data = ST_OK; 1091 1092 /* XXX Compute the Size field? */ 1093 size = fib->Header.Size; 1094 if (size > sizeof(struct aac_fib)) { 1095 size = sizeof(struct aac_fib); 1096 fib->Header.Size = size; 1097 } 1098 /* 1099 * Since we did not generate this command, it 1100 * cannot go through the normal 1101 * enqueue->startio chain. 1102 */ 1103 aac_enqueue_response(sc, 1104 AAC_ADAP_NORM_RESP_QUEUE, 1105 fib); 1106 } 1107 } 1108 } 1109 sc->aifflags &= ~AAC_AIFFLAGS_RUNNING; 1110 mtx_unlock(&sc->aac_io_lock); 1111 wakeup(sc->aac_dev); 1112 1113 kproc_exit(0); 1114 } 1115 1116 /* 1117 * Process completed commands. 1118 */ 1119 static void 1120 aac_complete(void *context, int pending) 1121 { 1122 struct aac_softc *sc; 1123 struct aac_command *cm; 1124 struct aac_fib *fib; 1125 u_int32_t fib_size; 1126 1127 sc = (struct aac_softc *)context; 1128 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1129 1130 mtx_lock(&sc->aac_io_lock); 1131 1132 /* pull completed commands off the queue */ 1133 for (;;) { 1134 /* look for completed FIBs on our queue */ 1135 if (aac_dequeue_fib(sc, AAC_HOST_NORM_RESP_QUEUE, &fib_size, 1136 &fib)) 1137 break; /* nothing to do */ 1138 1139 /* get the command, unmap and hand off for processing */ 1140 cm = sc->aac_commands + fib->Header.SenderData; 1141 if (cm == NULL) { 1142 AAC_PRINT_FIB(sc, fib); 1143 break; 1144 } 1145 if ((cm->cm_flags & AAC_CMD_TIMEDOUT) != 0) 1146 device_printf(sc->aac_dev, 1147 "COMMAND %p COMPLETED AFTER %d SECONDS\n", 1148 cm, (int)(time_uptime-cm->cm_timestamp)); 1149 1150 aac_remove_busy(cm); 1151 1152 aac_unmap_command(cm); 1153 cm->cm_flags |= AAC_CMD_COMPLETED; 1154 1155 /* is there a completion handler? */ 1156 if (cm->cm_complete != NULL) { 1157 cm->cm_complete(cm); 1158 } else { 1159 /* assume that someone is sleeping on this command */ 1160 wakeup(cm); 1161 } 1162 } 1163 1164 /* see if we can start some more I/O */ 1165 sc->flags &= ~AAC_QUEUE_FRZN; 1166 aac_startio(sc); 1167 1168 mtx_unlock(&sc->aac_io_lock); 1169 } 1170 1171 /* 1172 * Handle a bio submitted from a disk device. 1173 */ 1174 void 1175 aac_submit_bio(struct bio *bp) 1176 { 1177 struct aac_disk *ad; 1178 struct aac_softc *sc; 1179 1180 ad = (struct aac_disk *)bp->bio_disk->d_drv1; 1181 sc = ad->ad_controller; 1182 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1183 1184 /* queue the BIO and try to get some work done */ 1185 aac_enqueue_bio(sc, bp); 1186 aac_startio(sc); 1187 } 1188 1189 /* 1190 * Get a bio and build a command to go with it. 1191 */ 1192 static int 1193 aac_bio_command(struct aac_softc *sc, struct aac_command **cmp) 1194 { 1195 struct aac_command *cm; 1196 struct aac_fib *fib; 1197 struct aac_disk *ad; 1198 struct bio *bp; 1199 1200 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1201 1202 /* get the resources we will need */ 1203 cm = NULL; 1204 bp = NULL; 1205 if (aac_alloc_command(sc, &cm)) /* get a command */ 1206 goto fail; 1207 if ((bp = aac_dequeue_bio(sc)) == NULL) 1208 goto fail; 1209 1210 /* fill out the command */ 1211 cm->cm_datalen = bp->bio_bcount; 1212 cm->cm_complete = aac_bio_complete; 1213 cm->cm_flags = AAC_REQ_BIO; 1214 cm->cm_private = bp; 1215 cm->cm_timestamp = time_uptime; 1216 1217 /* build the FIB */ 1218 fib = cm->cm_fib; 1219 fib->Header.Size = sizeof(struct aac_fib_header); 1220 fib->Header.XferState = 1221 AAC_FIBSTATE_HOSTOWNED | 1222 AAC_FIBSTATE_INITIALISED | 1223 AAC_FIBSTATE_EMPTY | 1224 AAC_FIBSTATE_FROMHOST | 1225 AAC_FIBSTATE_REXPECTED | 1226 AAC_FIBSTATE_NORM | 1227 AAC_FIBSTATE_ASYNC | 1228 AAC_FIBSTATE_FAST_RESPONSE; 1229 1230 /* build the read/write request */ 1231 ad = (struct aac_disk *)bp->bio_disk->d_drv1; 1232 1233 if (sc->flags & AAC_FLAGS_RAW_IO) { 1234 struct aac_raw_io *raw; 1235 raw = (struct aac_raw_io *)&fib->data[0]; 1236 fib->Header.Command = RawIo; 1237 raw->BlockNumber = (u_int64_t)bp->bio_pblkno; 1238 raw->ByteCount = bp->bio_bcount; 1239 raw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1240 raw->BpTotal = 0; 1241 raw->BpComplete = 0; 1242 fib->Header.Size += sizeof(struct aac_raw_io); 1243 cm->cm_sgtable = (struct aac_sg_table *)&raw->SgMapRaw; 1244 if (bp->bio_cmd == BIO_READ) { 1245 raw->Flags = 1; 1246 cm->cm_flags |= AAC_CMD_DATAIN; 1247 } else { 1248 raw->Flags = 0; 1249 cm->cm_flags |= AAC_CMD_DATAOUT; 1250 } 1251 } else if ((sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 1252 fib->Header.Command = ContainerCommand; 1253 if (bp->bio_cmd == BIO_READ) { 1254 struct aac_blockread *br; 1255 br = (struct aac_blockread *)&fib->data[0]; 1256 br->Command = VM_CtBlockRead; 1257 br->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1258 br->BlockNumber = bp->bio_pblkno; 1259 br->ByteCount = bp->bio_bcount; 1260 fib->Header.Size += sizeof(struct aac_blockread); 1261 cm->cm_sgtable = &br->SgMap; 1262 cm->cm_flags |= AAC_CMD_DATAIN; 1263 } else { 1264 struct aac_blockwrite *bw; 1265 bw = (struct aac_blockwrite *)&fib->data[0]; 1266 bw->Command = VM_CtBlockWrite; 1267 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1268 bw->BlockNumber = bp->bio_pblkno; 1269 bw->ByteCount = bp->bio_bcount; 1270 bw->Stable = CUNSTABLE; 1271 fib->Header.Size += sizeof(struct aac_blockwrite); 1272 cm->cm_flags |= AAC_CMD_DATAOUT; 1273 cm->cm_sgtable = &bw->SgMap; 1274 } 1275 } else { 1276 fib->Header.Command = ContainerCommand64; 1277 if (bp->bio_cmd == BIO_READ) { 1278 struct aac_blockread64 *br; 1279 br = (struct aac_blockread64 *)&fib->data[0]; 1280 br->Command = VM_CtHostRead64; 1281 br->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1282 br->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE; 1283 br->BlockNumber = bp->bio_pblkno; 1284 br->Pad = 0; 1285 br->Flags = 0; 1286 fib->Header.Size += sizeof(struct aac_blockread64); 1287 cm->cm_flags |= AAC_CMD_DATAIN; 1288 cm->cm_sgtable = (struct aac_sg_table *)&br->SgMap64; 1289 } else { 1290 struct aac_blockwrite64 *bw; 1291 bw = (struct aac_blockwrite64 *)&fib->data[0]; 1292 bw->Command = VM_CtHostWrite64; 1293 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1294 bw->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE; 1295 bw->BlockNumber = bp->bio_pblkno; 1296 bw->Pad = 0; 1297 bw->Flags = 0; 1298 fib->Header.Size += sizeof(struct aac_blockwrite64); 1299 cm->cm_flags |= AAC_CMD_DATAOUT; 1300 cm->cm_sgtable = (struct aac_sg_table *)&bw->SgMap64; 1301 } 1302 } 1303 1304 *cmp = cm; 1305 return(0); 1306 1307 fail: 1308 if (bp != NULL) 1309 aac_enqueue_bio(sc, bp); 1310 if (cm != NULL) 1311 aac_release_command(cm); 1312 return(ENOMEM); 1313 } 1314 1315 /* 1316 * Handle a bio-instigated command that has been completed. 1317 */ 1318 static void 1319 aac_bio_complete(struct aac_command *cm) 1320 { 1321 struct aac_blockread_response *brr; 1322 struct aac_blockwrite_response *bwr; 1323 struct bio *bp; 1324 AAC_FSAStatus status; 1325 1326 /* fetch relevant status and then release the command */ 1327 bp = (struct bio *)cm->cm_private; 1328 if (bp->bio_cmd == BIO_READ) { 1329 brr = (struct aac_blockread_response *)&cm->cm_fib->data[0]; 1330 status = brr->Status; 1331 } else { 1332 bwr = (struct aac_blockwrite_response *)&cm->cm_fib->data[0]; 1333 status = bwr->Status; 1334 } 1335 aac_release_command(cm); 1336 1337 /* fix up the bio based on status */ 1338 if (status == ST_OK) { 1339 bp->bio_resid = 0; 1340 } else { 1341 bp->bio_error = EIO; 1342 bp->bio_flags |= BIO_ERROR; 1343 } 1344 aac_biodone(bp); 1345 } 1346 1347 /* 1348 * Submit a command to the controller, return when it completes. 1349 * XXX This is very dangerous! If the card has gone out to lunch, we could 1350 * be stuck here forever. At the same time, signals are not caught 1351 * because there is a risk that a signal could wakeup the sleep before 1352 * the card has a chance to complete the command. Since there is no way 1353 * to cancel a command that is in progress, we can't protect against the 1354 * card completing a command late and spamming the command and data 1355 * memory. So, we are held hostage until the command completes. 1356 */ 1357 static int 1358 aac_wait_command(struct aac_command *cm) 1359 { 1360 struct aac_softc *sc; 1361 int error; 1362 1363 sc = cm->cm_sc; 1364 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1365 1366 /* Put the command on the ready queue and get things going */ 1367 aac_enqueue_ready(cm); 1368 aac_startio(sc); 1369 error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacwait", 0); 1370 return(error); 1371 } 1372 1373 /* 1374 *Command Buffer Management 1375 */ 1376 1377 /* 1378 * Allocate a command. 1379 */ 1380 int 1381 aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp) 1382 { 1383 struct aac_command *cm; 1384 1385 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1386 1387 if ((cm = aac_dequeue_free(sc)) == NULL) { 1388 if (sc->total_fibs < sc->aac_max_fibs) { 1389 mtx_lock(&sc->aac_io_lock); 1390 sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS; 1391 mtx_unlock(&sc->aac_io_lock); 1392 wakeup(sc->aifthread); 1393 } 1394 return (EBUSY); 1395 } 1396 1397 *cmp = cm; 1398 return(0); 1399 } 1400 1401 /* 1402 * Release a command back to the freelist. 1403 */ 1404 void 1405 aac_release_command(struct aac_command *cm) 1406 { 1407 struct aac_event *event; 1408 struct aac_softc *sc; 1409 1410 sc = cm->cm_sc; 1411 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1412 1413 /* (re)initialize the command/FIB */ 1414 cm->cm_datalen = 0; 1415 cm->cm_sgtable = NULL; 1416 cm->cm_flags = 0; 1417 cm->cm_complete = NULL; 1418 cm->cm_private = NULL; 1419 cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE; 1420 cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY; 1421 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB; 1422 cm->cm_fib->Header.Flags = 0; 1423 cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size; 1424 1425 /* 1426 * These are duplicated in aac_start to cover the case where an 1427 * intermediate stage may have destroyed them. They're left 1428 * initialized here for debugging purposes only. 1429 */ 1430 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; 1431 cm->cm_fib->Header.SenderData = 0; 1432 1433 aac_enqueue_free(cm); 1434 1435 if ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) { 1436 TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links); 1437 event->ev_callback(sc, event, event->ev_arg); 1438 } 1439 } 1440 1441 /* 1442 * Map helper for command/FIB allocation. 1443 */ 1444 static void 1445 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1446 { 1447 uint64_t *fibphys; 1448 1449 fibphys = (uint64_t *)arg; 1450 1451 *fibphys = segs[0].ds_addr; 1452 } 1453 1454 /* 1455 * Allocate and initialize commands/FIBs for this adapter. 1456 */ 1457 static int 1458 aac_alloc_commands(struct aac_softc *sc) 1459 { 1460 struct aac_command *cm; 1461 struct aac_fibmap *fm; 1462 uint64_t fibphys; 1463 int i, error; 1464 1465 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1466 1467 if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs) 1468 return (ENOMEM); 1469 1470 fm = malloc(sizeof(struct aac_fibmap), M_AACBUF, M_NOWAIT|M_ZERO); 1471 if (fm == NULL) 1472 return (ENOMEM); 1473 1474 /* allocate the FIBs in DMAable memory and load them */ 1475 if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs, 1476 BUS_DMA_NOWAIT, &fm->aac_fibmap)) { 1477 device_printf(sc->aac_dev, 1478 "Not enough contiguous memory available.\n"); 1479 free(fm, M_AACBUF); 1480 return (ENOMEM); 1481 } 1482 1483 /* Ignore errors since this doesn't bounce */ 1484 (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs, 1485 sc->aac_max_fibs_alloc * sc->aac_max_fib_size, 1486 aac_map_command_helper, &fibphys, 0); 1487 1488 /* initialize constant fields in the command structure */ 1489 bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * sc->aac_max_fib_size); 1490 for (i = 0; i < sc->aac_max_fibs_alloc; i++) { 1491 cm = sc->aac_commands + sc->total_fibs; 1492 fm->aac_commands = cm; 1493 cm->cm_sc = sc; 1494 cm->cm_fib = (struct aac_fib *) 1495 ((u_int8_t *)fm->aac_fibs + i*sc->aac_max_fib_size); 1496 cm->cm_fibphys = fibphys + i*sc->aac_max_fib_size; 1497 cm->cm_index = sc->total_fibs; 1498 1499 if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0, 1500 &cm->cm_datamap)) != 0) 1501 break; 1502 mtx_lock(&sc->aac_io_lock); 1503 aac_release_command(cm); 1504 sc->total_fibs++; 1505 mtx_unlock(&sc->aac_io_lock); 1506 } 1507 1508 if (i > 0) { 1509 mtx_lock(&sc->aac_io_lock); 1510 TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link); 1511 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs); 1512 mtx_unlock(&sc->aac_io_lock); 1513 return (0); 1514 } 1515 1516 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); 1517 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); 1518 free(fm, M_AACBUF); 1519 return (ENOMEM); 1520 } 1521 1522 /* 1523 * Free FIBs owned by this adapter. 1524 */ 1525 static void 1526 aac_free_commands(struct aac_softc *sc) 1527 { 1528 struct aac_fibmap *fm; 1529 struct aac_command *cm; 1530 int i; 1531 1532 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1533 1534 while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) { 1535 1536 TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link); 1537 /* 1538 * We check against total_fibs to handle partially 1539 * allocated blocks. 1540 */ 1541 for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) { 1542 cm = fm->aac_commands + i; 1543 bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap); 1544 } 1545 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); 1546 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); 1547 free(fm, M_AACBUF); 1548 } 1549 } 1550 1551 /* 1552 * Command-mapping helper function - populate this command's s/g table. 1553 */ 1554 static void 1555 aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1556 { 1557 struct aac_softc *sc; 1558 struct aac_command *cm; 1559 struct aac_fib *fib; 1560 int i; 1561 1562 cm = (struct aac_command *)arg; 1563 sc = cm->cm_sc; 1564 fib = cm->cm_fib; 1565 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1566 1567 /* copy into the FIB */ 1568 if (cm->cm_sgtable != NULL) { 1569 if (fib->Header.Command == RawIo) { 1570 struct aac_sg_tableraw *sg; 1571 sg = (struct aac_sg_tableraw *)cm->cm_sgtable; 1572 sg->SgCount = nseg; 1573 for (i = 0; i < nseg; i++) { 1574 sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr; 1575 sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len; 1576 sg->SgEntryRaw[i].Next = 0; 1577 sg->SgEntryRaw[i].Prev = 0; 1578 sg->SgEntryRaw[i].Flags = 0; 1579 } 1580 /* update the FIB size for the s/g count */ 1581 fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw); 1582 } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 1583 struct aac_sg_table *sg; 1584 sg = cm->cm_sgtable; 1585 sg->SgCount = nseg; 1586 for (i = 0; i < nseg; i++) { 1587 sg->SgEntry[i].SgAddress = segs[i].ds_addr; 1588 sg->SgEntry[i].SgByteCount = segs[i].ds_len; 1589 } 1590 /* update the FIB size for the s/g count */ 1591 fib->Header.Size += nseg*sizeof(struct aac_sg_entry); 1592 } else { 1593 struct aac_sg_table64 *sg; 1594 sg = (struct aac_sg_table64 *)cm->cm_sgtable; 1595 sg->SgCount = nseg; 1596 for (i = 0; i < nseg; i++) { 1597 sg->SgEntry64[i].SgAddress = segs[i].ds_addr; 1598 sg->SgEntry64[i].SgByteCount = segs[i].ds_len; 1599 } 1600 /* update the FIB size for the s/g count */ 1601 fib->Header.Size += nseg*sizeof(struct aac_sg_entry64); 1602 } 1603 } 1604 1605 /* Fix up the address values in the FIB. Use the command array index 1606 * instead of a pointer since these fields are only 32 bits. Shift 1607 * the SenderFibAddress over to make room for the fast response bit 1608 * and for the AIF bit 1609 */ 1610 cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2); 1611 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; 1612 1613 /* save a pointer to the command for speedy reverse-lookup */ 1614 cm->cm_fib->Header.SenderData = cm->cm_index; 1615 1616 if (cm->cm_flags & AAC_CMD_DATAIN) 1617 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1618 BUS_DMASYNC_PREREAD); 1619 if (cm->cm_flags & AAC_CMD_DATAOUT) 1620 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1621 BUS_DMASYNC_PREWRITE); 1622 cm->cm_flags |= AAC_CMD_MAPPED; 1623 1624 if (sc->flags & AAC_FLAGS_NEW_COMM) { 1625 int count = 10000000L; 1626 while (AAC_SEND_COMMAND(sc, cm) != 0) { 1627 if (--count == 0) { 1628 aac_unmap_command(cm); 1629 sc->flags |= AAC_QUEUE_FRZN; 1630 aac_requeue_ready(cm); 1631 } 1632 DELAY(5); /* wait 5 usec. */ 1633 } 1634 } else { 1635 /* Put the FIB on the outbound queue */ 1636 if (aac_enqueue_fib(sc, cm->cm_queue, cm) == EBUSY) { 1637 aac_unmap_command(cm); 1638 sc->flags |= AAC_QUEUE_FRZN; 1639 aac_requeue_ready(cm); 1640 } 1641 } 1642 } 1643 1644 /* 1645 * Unmap a command from controller-visible space. 1646 */ 1647 static void 1648 aac_unmap_command(struct aac_command *cm) 1649 { 1650 struct aac_softc *sc; 1651 1652 sc = cm->cm_sc; 1653 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1654 1655 if (!(cm->cm_flags & AAC_CMD_MAPPED)) 1656 return; 1657 1658 if (cm->cm_datalen != 0) { 1659 if (cm->cm_flags & AAC_CMD_DATAIN) 1660 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1661 BUS_DMASYNC_POSTREAD); 1662 if (cm->cm_flags & AAC_CMD_DATAOUT) 1663 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1664 BUS_DMASYNC_POSTWRITE); 1665 1666 bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap); 1667 } 1668 cm->cm_flags &= ~AAC_CMD_MAPPED; 1669 } 1670 1671 /* 1672 * Hardware Interface 1673 */ 1674 1675 /* 1676 * Initialize the adapter. 1677 */ 1678 static void 1679 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1680 { 1681 struct aac_softc *sc; 1682 1683 sc = (struct aac_softc *)arg; 1684 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1685 1686 sc->aac_common_busaddr = segs[0].ds_addr; 1687 } 1688 1689 static int 1690 aac_check_firmware(struct aac_softc *sc) 1691 { 1692 u_int32_t code, major, minor, options = 0, atu_size = 0; 1693 int rid, status; 1694 time_t then; 1695 1696 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1697 /* 1698 * Wait for the adapter to come ready. 1699 */ 1700 then = time_uptime; 1701 do { 1702 code = AAC_GET_FWSTATUS(sc); 1703 if (code & AAC_SELF_TEST_FAILED) { 1704 device_printf(sc->aac_dev, "FATAL: selftest failed\n"); 1705 return(ENXIO); 1706 } 1707 if (code & AAC_KERNEL_PANIC) { 1708 device_printf(sc->aac_dev, 1709 "FATAL: controller kernel panic"); 1710 return(ENXIO); 1711 } 1712 if (time_uptime > (then + AAC_BOOT_TIMEOUT)) { 1713 device_printf(sc->aac_dev, 1714 "FATAL: controller not coming ready, " 1715 "status %x\n", code); 1716 return(ENXIO); 1717 } 1718 } while (!(code & AAC_UP_AND_RUNNING)); 1719 1720 /* 1721 * Retrieve the firmware version numbers. Dell PERC2/QC cards with 1722 * firmware version 1.x are not compatible with this driver. 1723 */ 1724 if (sc->flags & AAC_FLAGS_PERC2QC) { 1725 if (aac_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0, 1726 NULL)) { 1727 device_printf(sc->aac_dev, 1728 "Error reading firmware version\n"); 1729 return (EIO); 1730 } 1731 1732 /* These numbers are stored as ASCII! */ 1733 major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30; 1734 minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30; 1735 if (major == 1) { 1736 device_printf(sc->aac_dev, 1737 "Firmware version %d.%d is not supported.\n", 1738 major, minor); 1739 return (EINVAL); 1740 } 1741 } 1742 1743 /* 1744 * Retrieve the capabilities/supported options word so we know what 1745 * work-arounds to enable. Some firmware revs don't support this 1746 * command. 1747 */ 1748 if (aac_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status)) { 1749 if (status != AAC_SRB_STS_INVALID_REQUEST) { 1750 device_printf(sc->aac_dev, 1751 "RequestAdapterInfo failed\n"); 1752 return (EIO); 1753 } 1754 } else { 1755 options = AAC_GET_MAILBOX(sc, 1); 1756 atu_size = AAC_GET_MAILBOX(sc, 2); 1757 sc->supported_options = options; 1758 1759 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && 1760 (sc->flags & AAC_FLAGS_NO4GB) == 0) 1761 sc->flags |= AAC_FLAGS_4GB_WINDOW; 1762 if (options & AAC_SUPPORTED_NONDASD) 1763 sc->flags |= AAC_FLAGS_ENABLE_CAM; 1764 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0 1765 && (sizeof(bus_addr_t) > 4)) { 1766 device_printf(sc->aac_dev, 1767 "Enabling 64-bit address support\n"); 1768 sc->flags |= AAC_FLAGS_SG_64BIT; 1769 } 1770 if ((options & AAC_SUPPORTED_NEW_COMM) 1771 && sc->aac_if->aif_send_command) 1772 sc->flags |= AAC_FLAGS_NEW_COMM; 1773 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) 1774 sc->flags |= AAC_FLAGS_ARRAY_64BIT; 1775 } 1776 1777 /* Check for broken hardware that does a lower number of commands */ 1778 sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512); 1779 1780 /* Remap mem. resource, if required */ 1781 if ((sc->flags & AAC_FLAGS_NEW_COMM) && 1782 atu_size > rman_get_size(sc->aac_regs_res1)) { 1783 rid = rman_get_rid(sc->aac_regs_res1); 1784 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, rid, 1785 sc->aac_regs_res1); 1786 sc->aac_regs_res1 = bus_alloc_resource_anywhere(sc->aac_dev, 1787 SYS_RES_MEMORY, &rid, atu_size, RF_ACTIVE); 1788 if (sc->aac_regs_res1 == NULL) { 1789 sc->aac_regs_res1 = bus_alloc_resource_any( 1790 sc->aac_dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); 1791 if (sc->aac_regs_res1 == NULL) { 1792 device_printf(sc->aac_dev, 1793 "couldn't allocate register window\n"); 1794 return (ENXIO); 1795 } 1796 sc->flags &= ~AAC_FLAGS_NEW_COMM; 1797 } 1798 sc->aac_btag1 = rman_get_bustag(sc->aac_regs_res1); 1799 sc->aac_bhandle1 = rman_get_bushandle(sc->aac_regs_res1); 1800 1801 if (sc->aac_hwif == AAC_HWIF_NARK) { 1802 sc->aac_regs_res0 = sc->aac_regs_res1; 1803 sc->aac_btag0 = sc->aac_btag1; 1804 sc->aac_bhandle0 = sc->aac_bhandle1; 1805 } 1806 } 1807 1808 /* Read preferred settings */ 1809 sc->aac_max_fib_size = sizeof(struct aac_fib); 1810 sc->aac_max_sectors = 128; /* 64KB */ 1811 if (sc->flags & AAC_FLAGS_SG_64BIT) 1812 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE 1813 - sizeof(struct aac_blockwrite64)) 1814 / sizeof(struct aac_sg_entry64); 1815 else 1816 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE 1817 - sizeof(struct aac_blockwrite)) 1818 / sizeof(struct aac_sg_entry); 1819 1820 if (!aac_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL)) { 1821 options = AAC_GET_MAILBOX(sc, 1); 1822 sc->aac_max_fib_size = (options & 0xFFFF); 1823 sc->aac_max_sectors = (options >> 16) << 1; 1824 options = AAC_GET_MAILBOX(sc, 2); 1825 sc->aac_sg_tablesize = (options >> 16); 1826 options = AAC_GET_MAILBOX(sc, 3); 1827 sc->aac_max_fibs = (options & 0xFFFF); 1828 } 1829 if (sc->aac_max_fib_size > PAGE_SIZE) 1830 sc->aac_max_fib_size = PAGE_SIZE; 1831 sc->aac_max_fibs_alloc = PAGE_SIZE / sc->aac_max_fib_size; 1832 1833 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { 1834 sc->flags |= AAC_FLAGS_RAW_IO; 1835 device_printf(sc->aac_dev, "Enable Raw I/O\n"); 1836 } 1837 if ((sc->flags & AAC_FLAGS_RAW_IO) && 1838 (sc->flags & AAC_FLAGS_ARRAY_64BIT)) { 1839 sc->flags |= AAC_FLAGS_LBA_64BIT; 1840 device_printf(sc->aac_dev, "Enable 64-bit array\n"); 1841 } 1842 1843 return (0); 1844 } 1845 1846 static int 1847 aac_init(struct aac_softc *sc) 1848 { 1849 struct aac_adapter_init *ip; 1850 u_int32_t qoffset; 1851 int error; 1852 1853 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1854 1855 /* 1856 * Fill in the init structure. This tells the adapter about the 1857 * physical location of various important shared data structures. 1858 */ 1859 ip = &sc->aac_common->ac_init; 1860 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION; 1861 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { 1862 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4; 1863 sc->flags |= AAC_FLAGS_RAW_IO; 1864 } 1865 ip->MiniPortRevision = AAC_INIT_STRUCT_MINIPORT_REVISION; 1866 1867 ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr + 1868 offsetof(struct aac_common, ac_fibs); 1869 ip->AdapterFibsVirtualAddress = 0; 1870 ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib); 1871 ip->AdapterFibAlign = sizeof(struct aac_fib); 1872 1873 ip->PrintfBufferAddress = sc->aac_common_busaddr + 1874 offsetof(struct aac_common, ac_printf); 1875 ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE; 1876 1877 /* 1878 * The adapter assumes that pages are 4K in size, except on some 1879 * broken firmware versions that do the page->byte conversion twice, 1880 * therefore 'assuming' that this value is in 16MB units (2^24). 1881 * Round up since the granularity is so high. 1882 */ 1883 ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE; 1884 if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) { 1885 ip->HostPhysMemPages = 1886 (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE; 1887 } 1888 ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */ 1889 1890 ip->InitFlags = 0; 1891 if (sc->flags & AAC_FLAGS_NEW_COMM) { 1892 ip->InitFlags |= AAC_INITFLAGS_NEW_COMM_SUPPORTED; 1893 device_printf(sc->aac_dev, "New comm. interface enabled\n"); 1894 } 1895 1896 ip->MaxIoCommands = sc->aac_max_fibs; 1897 ip->MaxIoSize = sc->aac_max_sectors << 9; 1898 ip->MaxFibSize = sc->aac_max_fib_size; 1899 1900 /* 1901 * Initialize FIB queues. Note that it appears that the layout of the 1902 * indexes and the segmentation of the entries may be mandated by the 1903 * adapter, which is only told about the base of the queue index fields. 1904 * 1905 * The initial values of the indices are assumed to inform the adapter 1906 * of the sizes of the respective queues, and theoretically it could 1907 * work out the entire layout of the queue structures from this. We 1908 * take the easy route and just lay this area out like everyone else 1909 * does. 1910 * 1911 * The Linux driver uses a much more complex scheme whereby several 1912 * header records are kept for each queue. We use a couple of generic 1913 * list manipulation functions which 'know' the size of each list by 1914 * virtue of a table. 1915 */ 1916 qoffset = offsetof(struct aac_common, ac_qbuf) + AAC_QUEUE_ALIGN; 1917 qoffset &= ~(AAC_QUEUE_ALIGN - 1); 1918 sc->aac_queues = 1919 (struct aac_queue_table *)((uintptr_t)sc->aac_common + qoffset); 1920 ip->CommHeaderAddress = sc->aac_common_busaddr + qoffset; 1921 1922 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1923 AAC_HOST_NORM_CMD_ENTRIES; 1924 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1925 AAC_HOST_NORM_CMD_ENTRIES; 1926 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1927 AAC_HOST_HIGH_CMD_ENTRIES; 1928 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1929 AAC_HOST_HIGH_CMD_ENTRIES; 1930 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1931 AAC_ADAP_NORM_CMD_ENTRIES; 1932 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1933 AAC_ADAP_NORM_CMD_ENTRIES; 1934 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1935 AAC_ADAP_HIGH_CMD_ENTRIES; 1936 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1937 AAC_ADAP_HIGH_CMD_ENTRIES; 1938 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1939 AAC_HOST_NORM_RESP_ENTRIES; 1940 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1941 AAC_HOST_NORM_RESP_ENTRIES; 1942 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1943 AAC_HOST_HIGH_RESP_ENTRIES; 1944 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1945 AAC_HOST_HIGH_RESP_ENTRIES; 1946 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1947 AAC_ADAP_NORM_RESP_ENTRIES; 1948 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1949 AAC_ADAP_NORM_RESP_ENTRIES; 1950 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1951 AAC_ADAP_HIGH_RESP_ENTRIES; 1952 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1953 AAC_ADAP_HIGH_RESP_ENTRIES; 1954 sc->aac_qentries[AAC_HOST_NORM_CMD_QUEUE] = 1955 &sc->aac_queues->qt_HostNormCmdQueue[0]; 1956 sc->aac_qentries[AAC_HOST_HIGH_CMD_QUEUE] = 1957 &sc->aac_queues->qt_HostHighCmdQueue[0]; 1958 sc->aac_qentries[AAC_ADAP_NORM_CMD_QUEUE] = 1959 &sc->aac_queues->qt_AdapNormCmdQueue[0]; 1960 sc->aac_qentries[AAC_ADAP_HIGH_CMD_QUEUE] = 1961 &sc->aac_queues->qt_AdapHighCmdQueue[0]; 1962 sc->aac_qentries[AAC_HOST_NORM_RESP_QUEUE] = 1963 &sc->aac_queues->qt_HostNormRespQueue[0]; 1964 sc->aac_qentries[AAC_HOST_HIGH_RESP_QUEUE] = 1965 &sc->aac_queues->qt_HostHighRespQueue[0]; 1966 sc->aac_qentries[AAC_ADAP_NORM_RESP_QUEUE] = 1967 &sc->aac_queues->qt_AdapNormRespQueue[0]; 1968 sc->aac_qentries[AAC_ADAP_HIGH_RESP_QUEUE] = 1969 &sc->aac_queues->qt_AdapHighRespQueue[0]; 1970 1971 /* 1972 * Do controller-type-specific initialisation 1973 */ 1974 switch (sc->aac_hwif) { 1975 case AAC_HWIF_I960RX: 1976 AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, ~0); 1977 break; 1978 case AAC_HWIF_RKT: 1979 AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, ~0); 1980 break; 1981 default: 1982 break; 1983 } 1984 1985 /* 1986 * Give the init structure to the controller. 1987 */ 1988 if (aac_sync_command(sc, AAC_MONKER_INITSTRUCT, 1989 sc->aac_common_busaddr + 1990 offsetof(struct aac_common, ac_init), 0, 0, 0, 1991 NULL)) { 1992 device_printf(sc->aac_dev, 1993 "error establishing init structure\n"); 1994 error = EIO; 1995 goto out; 1996 } 1997 1998 error = 0; 1999 out: 2000 return(error); 2001 } 2002 2003 static int 2004 aac_setup_intr(struct aac_softc *sc) 2005 { 2006 2007 if (sc->flags & AAC_FLAGS_NEW_COMM) { 2008 if (bus_setup_intr(sc->aac_dev, sc->aac_irq, 2009 INTR_MPSAFE|INTR_TYPE_BIO, NULL, 2010 aac_new_intr, sc, &sc->aac_intr)) { 2011 device_printf(sc->aac_dev, "can't set up interrupt\n"); 2012 return (EINVAL); 2013 } 2014 } else { 2015 if (bus_setup_intr(sc->aac_dev, sc->aac_irq, 2016 INTR_TYPE_BIO, aac_filter, NULL, 2017 sc, &sc->aac_intr)) { 2018 device_printf(sc->aac_dev, 2019 "can't set up interrupt filter\n"); 2020 return (EINVAL); 2021 } 2022 } 2023 return (0); 2024 } 2025 2026 /* 2027 * Send a synchronous command to the controller and wait for a result. 2028 * Indicate if the controller completed the command with an error status. 2029 */ 2030 static int 2031 aac_sync_command(struct aac_softc *sc, u_int32_t command, 2032 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3, 2033 u_int32_t *sp) 2034 { 2035 time_t then; 2036 u_int32_t status; 2037 2038 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2039 2040 /* populate the mailbox */ 2041 AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3); 2042 2043 /* ensure the sync command doorbell flag is cleared */ 2044 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); 2045 2046 /* then set it to signal the adapter */ 2047 AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND); 2048 2049 /* spin waiting for the command to complete */ 2050 then = time_uptime; 2051 do { 2052 if (time_uptime > (then + AAC_IMMEDIATE_TIMEOUT)) { 2053 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out"); 2054 return(EIO); 2055 } 2056 } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND)); 2057 2058 /* clear the completion flag */ 2059 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); 2060 2061 /* get the command status */ 2062 status = AAC_GET_MAILBOX(sc, 0); 2063 if (sp != NULL) 2064 *sp = status; 2065 2066 if (status != AAC_SRB_STS_SUCCESS) 2067 return (-1); 2068 return(0); 2069 } 2070 2071 int 2072 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate, 2073 struct aac_fib *fib, u_int16_t datasize) 2074 { 2075 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2076 mtx_assert(&sc->aac_io_lock, MA_OWNED); 2077 2078 if (datasize > AAC_FIB_DATASIZE) 2079 return(EINVAL); 2080 2081 /* 2082 * Set up the sync FIB 2083 */ 2084 fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | 2085 AAC_FIBSTATE_INITIALISED | 2086 AAC_FIBSTATE_EMPTY; 2087 fib->Header.XferState |= xferstate; 2088 fib->Header.Command = command; 2089 fib->Header.StructType = AAC_FIBTYPE_TFIB; 2090 fib->Header.Size = sizeof(struct aac_fib_header) + datasize; 2091 fib->Header.SenderSize = sizeof(struct aac_fib); 2092 fib->Header.SenderFibAddress = 0; /* Not needed */ 2093 fib->Header.ReceiverFibAddress = sc->aac_common_busaddr + 2094 offsetof(struct aac_common, 2095 ac_sync_fib); 2096 2097 /* 2098 * Give the FIB to the controller, wait for a response. 2099 */ 2100 if (aac_sync_command(sc, AAC_MONKER_SYNCFIB, 2101 fib->Header.ReceiverFibAddress, 0, 0, 0, NULL)) { 2102 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error"); 2103 return(EIO); 2104 } 2105 2106 return (0); 2107 } 2108 2109 /* 2110 * Adapter-space FIB queue manipulation 2111 * 2112 * Note that the queue implementation here is a little funky; neither the PI or 2113 * CI will ever be zero. This behaviour is a controller feature. 2114 */ 2115 static const struct { 2116 int size; 2117 int notify; 2118 } aac_qinfo[] = { 2119 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL}, 2120 {AAC_HOST_HIGH_CMD_ENTRIES, 0}, 2121 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY}, 2122 {AAC_ADAP_HIGH_CMD_ENTRIES, 0}, 2123 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL}, 2124 {AAC_HOST_HIGH_RESP_ENTRIES, 0}, 2125 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY}, 2126 {AAC_ADAP_HIGH_RESP_ENTRIES, 0} 2127 }; 2128 2129 /* 2130 * Atomically insert an entry into the nominated queue, returns 0 on success or 2131 * EBUSY if the queue is full. 2132 * 2133 * Note: it would be more efficient to defer notifying the controller in 2134 * the case where we may be inserting several entries in rapid succession, 2135 * but implementing this usefully may be difficult (it would involve a 2136 * separate queue/notify interface). 2137 */ 2138 static int 2139 aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm) 2140 { 2141 u_int32_t pi, ci; 2142 int error; 2143 u_int32_t fib_size; 2144 u_int32_t fib_addr; 2145 2146 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2147 2148 fib_size = cm->cm_fib->Header.Size; 2149 fib_addr = cm->cm_fib->Header.ReceiverFibAddress; 2150 2151 /* get the producer/consumer indices */ 2152 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2153 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2154 2155 /* wrap the queue? */ 2156 if (pi >= aac_qinfo[queue].size) 2157 pi = 0; 2158 2159 /* check for queue full */ 2160 if ((pi + 1) == ci) { 2161 error = EBUSY; 2162 goto out; 2163 } 2164 2165 /* 2166 * To avoid a race with its completion interrupt, place this command on 2167 * the busy queue prior to advertising it to the controller. 2168 */ 2169 aac_enqueue_busy(cm); 2170 2171 /* populate queue entry */ 2172 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; 2173 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; 2174 2175 /* update producer index */ 2176 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; 2177 2178 /* notify the adapter if we know how */ 2179 if (aac_qinfo[queue].notify != 0) 2180 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2181 2182 error = 0; 2183 2184 out: 2185 return(error); 2186 } 2187 2188 /* 2189 * Atomically remove one entry from the nominated queue, returns 0 on 2190 * success or ENOENT if the queue is empty. 2191 */ 2192 static int 2193 aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size, 2194 struct aac_fib **fib_addr) 2195 { 2196 u_int32_t pi, ci; 2197 u_int32_t fib_index; 2198 int error; 2199 int notify; 2200 2201 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2202 2203 /* get the producer/consumer indices */ 2204 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2205 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2206 2207 /* check for queue empty */ 2208 if (ci == pi) { 2209 error = ENOENT; 2210 goto out; 2211 } 2212 2213 /* wrap the pi so the following test works */ 2214 if (pi >= aac_qinfo[queue].size) 2215 pi = 0; 2216 2217 notify = 0; 2218 if (ci == pi + 1) 2219 notify++; 2220 2221 /* wrap the queue? */ 2222 if (ci >= aac_qinfo[queue].size) 2223 ci = 0; 2224 2225 /* fetch the entry */ 2226 *fib_size = (sc->aac_qentries[queue] + ci)->aq_fib_size; 2227 2228 switch (queue) { 2229 case AAC_HOST_NORM_CMD_QUEUE: 2230 case AAC_HOST_HIGH_CMD_QUEUE: 2231 /* 2232 * The aq_fib_addr is only 32 bits wide so it can't be counted 2233 * on to hold an address. For AIF's, the adapter assumes 2234 * that it's giving us an address into the array of AIF fibs. 2235 * Therefore, we have to convert it to an index. 2236 */ 2237 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr / 2238 sizeof(struct aac_fib); 2239 *fib_addr = &sc->aac_common->ac_fibs[fib_index]; 2240 break; 2241 2242 case AAC_HOST_NORM_RESP_QUEUE: 2243 case AAC_HOST_HIGH_RESP_QUEUE: 2244 { 2245 struct aac_command *cm; 2246 2247 /* 2248 * As above, an index is used instead of an actual address. 2249 * Gotta shift the index to account for the fast response 2250 * bit. No other correction is needed since this value was 2251 * originally provided by the driver via the SenderFibAddress 2252 * field. 2253 */ 2254 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr; 2255 cm = sc->aac_commands + (fib_index >> 2); 2256 *fib_addr = cm->cm_fib; 2257 2258 /* 2259 * Is this a fast response? If it is, update the fib fields in 2260 * local memory since the whole fib isn't DMA'd back up. 2261 */ 2262 if (fib_index & 0x01) { 2263 (*fib_addr)->Header.XferState |= AAC_FIBSTATE_DONEADAP; 2264 *((u_int32_t*)((*fib_addr)->data)) = AAC_ERROR_NORMAL; 2265 } 2266 break; 2267 } 2268 default: 2269 panic("Invalid queue in aac_dequeue_fib()"); 2270 break; 2271 } 2272 2273 /* update consumer index */ 2274 sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1; 2275 2276 /* if we have made the queue un-full, notify the adapter */ 2277 if (notify && (aac_qinfo[queue].notify != 0)) 2278 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2279 error = 0; 2280 2281 out: 2282 return(error); 2283 } 2284 2285 /* 2286 * Put our response to an Adapter Initialed Fib on the response queue 2287 */ 2288 static int 2289 aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib) 2290 { 2291 u_int32_t pi, ci; 2292 int error; 2293 u_int32_t fib_size; 2294 u_int32_t fib_addr; 2295 2296 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2297 2298 /* Tell the adapter where the FIB is */ 2299 fib_size = fib->Header.Size; 2300 fib_addr = fib->Header.SenderFibAddress; 2301 fib->Header.ReceiverFibAddress = fib_addr; 2302 2303 /* get the producer/consumer indices */ 2304 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2305 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2306 2307 /* wrap the queue? */ 2308 if (pi >= aac_qinfo[queue].size) 2309 pi = 0; 2310 2311 /* check for queue full */ 2312 if ((pi + 1) == ci) { 2313 error = EBUSY; 2314 goto out; 2315 } 2316 2317 /* populate queue entry */ 2318 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; 2319 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; 2320 2321 /* update producer index */ 2322 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; 2323 2324 /* notify the adapter if we know how */ 2325 if (aac_qinfo[queue].notify != 0) 2326 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2327 2328 error = 0; 2329 2330 out: 2331 return(error); 2332 } 2333 2334 /* 2335 * Check for commands that have been outstanding for a suspiciously long time, 2336 * and complain about them. 2337 */ 2338 static void 2339 aac_timeout(struct aac_softc *sc) 2340 { 2341 struct aac_command *cm; 2342 time_t deadline; 2343 int timedout, code; 2344 2345 /* 2346 * Traverse the busy command list, bitch about late commands once 2347 * only. 2348 */ 2349 timedout = 0; 2350 deadline = time_uptime - AAC_CMD_TIMEOUT; 2351 TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) { 2352 if ((cm->cm_timestamp < deadline) 2353 && !(cm->cm_flags & AAC_CMD_TIMEDOUT)) { 2354 cm->cm_flags |= AAC_CMD_TIMEDOUT; 2355 device_printf(sc->aac_dev, 2356 "COMMAND %p (TYPE %d) TIMEOUT AFTER %d SECONDS\n", 2357 cm, cm->cm_fib->Header.Command, 2358 (int)(time_uptime-cm->cm_timestamp)); 2359 AAC_PRINT_FIB(sc, cm->cm_fib); 2360 timedout++; 2361 } 2362 } 2363 2364 if (timedout) { 2365 code = AAC_GET_FWSTATUS(sc); 2366 if (code != AAC_UP_AND_RUNNING) { 2367 device_printf(sc->aac_dev, "WARNING! Controller is no " 2368 "longer running! code= 0x%x\n", code); 2369 } 2370 } 2371 } 2372 2373 /* 2374 * Interface Function Vectors 2375 */ 2376 2377 /* 2378 * Read the current firmware status word. 2379 */ 2380 static int 2381 aac_sa_get_fwstatus(struct aac_softc *sc) 2382 { 2383 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2384 2385 return(AAC_MEM0_GETREG4(sc, AAC_SA_FWSTATUS)); 2386 } 2387 2388 static int 2389 aac_rx_get_fwstatus(struct aac_softc *sc) 2390 { 2391 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2392 2393 return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ? 2394 AAC_RX_OMR0 : AAC_RX_FWSTATUS)); 2395 } 2396 2397 static int 2398 aac_rkt_get_fwstatus(struct aac_softc *sc) 2399 { 2400 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2401 2402 return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ? 2403 AAC_RKT_OMR0 : AAC_RKT_FWSTATUS)); 2404 } 2405 2406 /* 2407 * Notify the controller of a change in a given queue 2408 */ 2409 2410 static void 2411 aac_sa_qnotify(struct aac_softc *sc, int qbit) 2412 { 2413 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2414 2415 AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL1_SET, qbit); 2416 } 2417 2418 static void 2419 aac_rx_qnotify(struct aac_softc *sc, int qbit) 2420 { 2421 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2422 2423 AAC_MEM0_SETREG4(sc, AAC_RX_IDBR, qbit); 2424 } 2425 2426 static void 2427 aac_rkt_qnotify(struct aac_softc *sc, int qbit) 2428 { 2429 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2430 2431 AAC_MEM0_SETREG4(sc, AAC_RKT_IDBR, qbit); 2432 } 2433 2434 /* 2435 * Get the interrupt reason bits 2436 */ 2437 static int 2438 aac_sa_get_istatus(struct aac_softc *sc) 2439 { 2440 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2441 2442 return(AAC_MEM0_GETREG2(sc, AAC_SA_DOORBELL0)); 2443 } 2444 2445 static int 2446 aac_rx_get_istatus(struct aac_softc *sc) 2447 { 2448 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2449 2450 return(AAC_MEM0_GETREG4(sc, AAC_RX_ODBR)); 2451 } 2452 2453 static int 2454 aac_rkt_get_istatus(struct aac_softc *sc) 2455 { 2456 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2457 2458 return(AAC_MEM0_GETREG4(sc, AAC_RKT_ODBR)); 2459 } 2460 2461 /* 2462 * Clear some interrupt reason bits 2463 */ 2464 static void 2465 aac_sa_clear_istatus(struct aac_softc *sc, int mask) 2466 { 2467 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2468 2469 AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL0_CLEAR, mask); 2470 } 2471 2472 static void 2473 aac_rx_clear_istatus(struct aac_softc *sc, int mask) 2474 { 2475 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2476 2477 AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, mask); 2478 } 2479 2480 static void 2481 aac_rkt_clear_istatus(struct aac_softc *sc, int mask) 2482 { 2483 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2484 2485 AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, mask); 2486 } 2487 2488 /* 2489 * Populate the mailbox and set the command word 2490 */ 2491 static void 2492 aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, 2493 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2494 { 2495 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2496 2497 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX, command); 2498 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 4, arg0); 2499 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 8, arg1); 2500 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 12, arg2); 2501 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 16, arg3); 2502 } 2503 2504 static void 2505 aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, 2506 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2507 { 2508 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2509 2510 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX, command); 2511 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 4, arg0); 2512 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 8, arg1); 2513 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 12, arg2); 2514 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 16, arg3); 2515 } 2516 2517 static void 2518 aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, 2519 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2520 { 2521 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2522 2523 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX, command); 2524 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 4, arg0); 2525 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 8, arg1); 2526 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 12, arg2); 2527 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 16, arg3); 2528 } 2529 2530 /* 2531 * Fetch the immediate command status word 2532 */ 2533 static int 2534 aac_sa_get_mailbox(struct aac_softc *sc, int mb) 2535 { 2536 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2537 2538 return(AAC_MEM1_GETREG4(sc, AAC_SA_MAILBOX + (mb * 4))); 2539 } 2540 2541 static int 2542 aac_rx_get_mailbox(struct aac_softc *sc, int mb) 2543 { 2544 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2545 2546 return(AAC_MEM1_GETREG4(sc, AAC_RX_MAILBOX + (mb * 4))); 2547 } 2548 2549 static int 2550 aac_rkt_get_mailbox(struct aac_softc *sc, int mb) 2551 { 2552 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2553 2554 return(AAC_MEM1_GETREG4(sc, AAC_RKT_MAILBOX + (mb * 4))); 2555 } 2556 2557 /* 2558 * Set/clear interrupt masks 2559 */ 2560 static void 2561 aac_sa_set_interrupts(struct aac_softc *sc, int enable) 2562 { 2563 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2564 2565 if (enable) { 2566 AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_CLEAR, AAC_DB_INTERRUPTS); 2567 } else { 2568 AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_SET, ~0); 2569 } 2570 } 2571 2572 static void 2573 aac_rx_set_interrupts(struct aac_softc *sc, int enable) 2574 { 2575 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2576 2577 if (enable) { 2578 if (sc->flags & AAC_FLAGS_NEW_COMM) 2579 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INT_NEW_COMM); 2580 else 2581 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INTERRUPTS); 2582 } else { 2583 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~0); 2584 } 2585 } 2586 2587 static void 2588 aac_rkt_set_interrupts(struct aac_softc *sc, int enable) 2589 { 2590 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2591 2592 if (enable) { 2593 if (sc->flags & AAC_FLAGS_NEW_COMM) 2594 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INT_NEW_COMM); 2595 else 2596 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INTERRUPTS); 2597 } else { 2598 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~0); 2599 } 2600 } 2601 2602 /* 2603 * New comm. interface: Send command functions 2604 */ 2605 static int 2606 aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm) 2607 { 2608 u_int32_t index, device; 2609 2610 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)"); 2611 2612 index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE); 2613 if (index == 0xffffffffL) 2614 index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE); 2615 if (index == 0xffffffffL) 2616 return index; 2617 aac_enqueue_busy(cm); 2618 device = index; 2619 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); 2620 device += 4; 2621 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); 2622 device += 4; 2623 AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size); 2624 AAC_MEM0_SETREG4(sc, AAC_RX_IQUE, index); 2625 return 0; 2626 } 2627 2628 static int 2629 aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm) 2630 { 2631 u_int32_t index, device; 2632 2633 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)"); 2634 2635 index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE); 2636 if (index == 0xffffffffL) 2637 index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE); 2638 if (index == 0xffffffffL) 2639 return index; 2640 aac_enqueue_busy(cm); 2641 device = index; 2642 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); 2643 device += 4; 2644 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); 2645 device += 4; 2646 AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size); 2647 AAC_MEM0_SETREG4(sc, AAC_RKT_IQUE, index); 2648 return 0; 2649 } 2650 2651 /* 2652 * New comm. interface: get, set outbound queue index 2653 */ 2654 static int 2655 aac_rx_get_outb_queue(struct aac_softc *sc) 2656 { 2657 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2658 2659 return(AAC_MEM0_GETREG4(sc, AAC_RX_OQUE)); 2660 } 2661 2662 static int 2663 aac_rkt_get_outb_queue(struct aac_softc *sc) 2664 { 2665 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2666 2667 return(AAC_MEM0_GETREG4(sc, AAC_RKT_OQUE)); 2668 } 2669 2670 static void 2671 aac_rx_set_outb_queue(struct aac_softc *sc, int index) 2672 { 2673 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2674 2675 AAC_MEM0_SETREG4(sc, AAC_RX_OQUE, index); 2676 } 2677 2678 static void 2679 aac_rkt_set_outb_queue(struct aac_softc *sc, int index) 2680 { 2681 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2682 2683 AAC_MEM0_SETREG4(sc, AAC_RKT_OQUE, index); 2684 } 2685 2686 /* 2687 * Debugging and Diagnostics 2688 */ 2689 2690 /* 2691 * Print some information about the controller. 2692 */ 2693 static void 2694 aac_describe_controller(struct aac_softc *sc) 2695 { 2696 struct aac_fib *fib; 2697 struct aac_adapter_info *info; 2698 char *adapter_type = "Adaptec RAID controller"; 2699 2700 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2701 2702 mtx_lock(&sc->aac_io_lock); 2703 aac_alloc_sync_fib(sc, &fib); 2704 2705 fib->data[0] = 0; 2706 if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) { 2707 device_printf(sc->aac_dev, "RequestAdapterInfo failed\n"); 2708 aac_release_sync_fib(sc); 2709 mtx_unlock(&sc->aac_io_lock); 2710 return; 2711 } 2712 2713 /* save the kernel revision structure for later use */ 2714 info = (struct aac_adapter_info *)&fib->data[0]; 2715 sc->aac_revision = info->KernelRevision; 2716 2717 if (bootverbose) { 2718 device_printf(sc->aac_dev, "%s %dMHz, %dMB memory " 2719 "(%dMB cache, %dMB execution), %s\n", 2720 aac_describe_code(aac_cpu_variant, info->CpuVariant), 2721 info->ClockSpeed, info->TotalMem / (1024 * 1024), 2722 info->BufferMem / (1024 * 1024), 2723 info->ExecutionMem / (1024 * 1024), 2724 aac_describe_code(aac_battery_platform, 2725 info->batteryPlatform)); 2726 2727 device_printf(sc->aac_dev, 2728 "Kernel %d.%d-%d, Build %d, S/N %6X\n", 2729 info->KernelRevision.external.comp.major, 2730 info->KernelRevision.external.comp.minor, 2731 info->KernelRevision.external.comp.dash, 2732 info->KernelRevision.buildNumber, 2733 (u_int32_t)(info->SerialNumber & 0xffffff)); 2734 2735 device_printf(sc->aac_dev, "Supported Options=%b\n", 2736 sc->supported_options, 2737 "\20" 2738 "\1SNAPSHOT" 2739 "\2CLUSTERS" 2740 "\3WCACHE" 2741 "\4DATA64" 2742 "\5HOSTTIME" 2743 "\6RAID50" 2744 "\7WINDOW4GB" 2745 "\10SCSIUPGD" 2746 "\11SOFTERR" 2747 "\12NORECOND" 2748 "\13SGMAP64" 2749 "\14ALARM" 2750 "\15NONDASD" 2751 "\16SCSIMGT" 2752 "\17RAIDSCSI" 2753 "\21ADPTINFO" 2754 "\22NEWCOMM" 2755 "\23ARRAY64BIT" 2756 "\24HEATSENSOR"); 2757 } 2758 2759 if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) { 2760 fib->data[0] = 0; 2761 if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1)) 2762 device_printf(sc->aac_dev, 2763 "RequestSupplementAdapterInfo failed\n"); 2764 else 2765 adapter_type = ((struct aac_supplement_adapter_info *) 2766 &fib->data[0])->AdapterTypeText; 2767 } 2768 device_printf(sc->aac_dev, "%s, aac driver %d.%d.%d-%d\n", 2769 adapter_type, 2770 AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION, 2771 AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD); 2772 2773 aac_release_sync_fib(sc); 2774 mtx_unlock(&sc->aac_io_lock); 2775 } 2776 2777 /* 2778 * Look up a text description of a numeric error code and return a pointer to 2779 * same. 2780 */ 2781 static const char * 2782 aac_describe_code(const struct aac_code_lookup *table, u_int32_t code) 2783 { 2784 int i; 2785 2786 for (i = 0; table[i].string != NULL; i++) 2787 if (table[i].code == code) 2788 return(table[i].string); 2789 return(table[i + 1].string); 2790 } 2791 2792 /* 2793 * Management Interface 2794 */ 2795 2796 static int 2797 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td) 2798 { 2799 struct aac_softc *sc; 2800 2801 sc = dev->si_drv1; 2802 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2803 device_busy(sc->aac_dev); 2804 devfs_set_cdevpriv(sc, aac_cdevpriv_dtor); 2805 2806 return 0; 2807 } 2808 2809 static int 2810 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) 2811 { 2812 union aac_statrequest *as; 2813 struct aac_softc *sc; 2814 int error = 0; 2815 2816 as = (union aac_statrequest *)arg; 2817 sc = dev->si_drv1; 2818 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2819 2820 switch (cmd) { 2821 case AACIO_STATS: 2822 switch (as->as_item) { 2823 case AACQ_FREE: 2824 case AACQ_BIO: 2825 case AACQ_READY: 2826 case AACQ_BUSY: 2827 bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat, 2828 sizeof(struct aac_qstat)); 2829 break; 2830 default: 2831 error = ENOENT; 2832 break; 2833 } 2834 break; 2835 2836 case FSACTL_SENDFIB: 2837 case FSACTL_SEND_LARGE_FIB: 2838 arg = *(caddr_t*)arg; 2839 case FSACTL_LNX_SENDFIB: 2840 case FSACTL_LNX_SEND_LARGE_FIB: 2841 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB"); 2842 error = aac_ioctl_sendfib(sc, arg); 2843 break; 2844 case FSACTL_SEND_RAW_SRB: 2845 arg = *(caddr_t*)arg; 2846 case FSACTL_LNX_SEND_RAW_SRB: 2847 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB"); 2848 error = aac_ioctl_send_raw_srb(sc, arg); 2849 break; 2850 case FSACTL_AIF_THREAD: 2851 case FSACTL_LNX_AIF_THREAD: 2852 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD"); 2853 error = EINVAL; 2854 break; 2855 case FSACTL_OPEN_GET_ADAPTER_FIB: 2856 arg = *(caddr_t*)arg; 2857 case FSACTL_LNX_OPEN_GET_ADAPTER_FIB: 2858 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB"); 2859 error = aac_open_aif(sc, arg); 2860 break; 2861 case FSACTL_GET_NEXT_ADAPTER_FIB: 2862 arg = *(caddr_t*)arg; 2863 case FSACTL_LNX_GET_NEXT_ADAPTER_FIB: 2864 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB"); 2865 error = aac_getnext_aif(sc, arg); 2866 break; 2867 case FSACTL_CLOSE_GET_ADAPTER_FIB: 2868 arg = *(caddr_t*)arg; 2869 case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB: 2870 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB"); 2871 error = aac_close_aif(sc, arg); 2872 break; 2873 case FSACTL_MINIPORT_REV_CHECK: 2874 arg = *(caddr_t*)arg; 2875 case FSACTL_LNX_MINIPORT_REV_CHECK: 2876 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK"); 2877 error = aac_rev_check(sc, arg); 2878 break; 2879 case FSACTL_QUERY_DISK: 2880 arg = *(caddr_t*)arg; 2881 case FSACTL_LNX_QUERY_DISK: 2882 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK"); 2883 error = aac_query_disk(sc, arg); 2884 break; 2885 case FSACTL_DELETE_DISK: 2886 case FSACTL_LNX_DELETE_DISK: 2887 /* 2888 * We don't trust the underland to tell us when to delete a 2889 * container, rather we rely on an AIF coming from the 2890 * controller 2891 */ 2892 error = 0; 2893 break; 2894 case FSACTL_GET_PCI_INFO: 2895 arg = *(caddr_t*)arg; 2896 case FSACTL_LNX_GET_PCI_INFO: 2897 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO"); 2898 error = aac_get_pci_info(sc, arg); 2899 break; 2900 case FSACTL_GET_FEATURES: 2901 arg = *(caddr_t*)arg; 2902 case FSACTL_LNX_GET_FEATURES: 2903 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES"); 2904 error = aac_supported_features(sc, arg); 2905 break; 2906 default: 2907 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd); 2908 error = EINVAL; 2909 break; 2910 } 2911 return(error); 2912 } 2913 2914 static int 2915 aac_poll(struct cdev *dev, int poll_events, struct thread *td) 2916 { 2917 struct aac_softc *sc; 2918 struct aac_fib_context *ctx; 2919 int revents; 2920 2921 sc = dev->si_drv1; 2922 revents = 0; 2923 2924 mtx_lock(&sc->aac_aifq_lock); 2925 if ((poll_events & (POLLRDNORM | POLLIN)) != 0) { 2926 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 2927 if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) { 2928 revents |= poll_events & (POLLIN | POLLRDNORM); 2929 break; 2930 } 2931 } 2932 } 2933 mtx_unlock(&sc->aac_aifq_lock); 2934 2935 if (revents == 0) { 2936 if (poll_events & (POLLIN | POLLRDNORM)) 2937 selrecord(td, &sc->rcv_select); 2938 } 2939 2940 return (revents); 2941 } 2942 2943 static void 2944 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg) 2945 { 2946 2947 switch (event->ev_type) { 2948 case AAC_EVENT_CMFREE: 2949 mtx_assert(&sc->aac_io_lock, MA_OWNED); 2950 if (aac_alloc_command(sc, (struct aac_command **)arg)) { 2951 aac_add_event(sc, event); 2952 return; 2953 } 2954 free(event, M_AACBUF); 2955 wakeup(arg); 2956 break; 2957 default: 2958 break; 2959 } 2960 } 2961 2962 /* 2963 * Send a FIB supplied from userspace 2964 */ 2965 static int 2966 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib) 2967 { 2968 struct aac_command *cm; 2969 int size, error; 2970 2971 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2972 2973 cm = NULL; 2974 2975 /* 2976 * Get a command 2977 */ 2978 mtx_lock(&sc->aac_io_lock); 2979 if (aac_alloc_command(sc, &cm)) { 2980 struct aac_event *event; 2981 2982 event = malloc(sizeof(struct aac_event), M_AACBUF, 2983 M_NOWAIT | M_ZERO); 2984 if (event == NULL) { 2985 error = EBUSY; 2986 mtx_unlock(&sc->aac_io_lock); 2987 goto out; 2988 } 2989 event->ev_type = AAC_EVENT_CMFREE; 2990 event->ev_callback = aac_ioctl_event; 2991 event->ev_arg = &cm; 2992 aac_add_event(sc, event); 2993 msleep(&cm, &sc->aac_io_lock, 0, "sendfib", 0); 2994 } 2995 mtx_unlock(&sc->aac_io_lock); 2996 2997 /* 2998 * Fetch the FIB header, then re-copy to get data as well. 2999 */ 3000 if ((error = copyin(ufib, cm->cm_fib, 3001 sizeof(struct aac_fib_header))) != 0) 3002 goto out; 3003 size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header); 3004 if (size > sc->aac_max_fib_size) { 3005 device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n", 3006 size, sc->aac_max_fib_size); 3007 size = sc->aac_max_fib_size; 3008 } 3009 if ((error = copyin(ufib, cm->cm_fib, size)) != 0) 3010 goto out; 3011 cm->cm_fib->Header.Size = size; 3012 cm->cm_timestamp = time_uptime; 3013 3014 /* 3015 * Pass the FIB to the controller, wait for it to complete. 3016 */ 3017 mtx_lock(&sc->aac_io_lock); 3018 error = aac_wait_command(cm); 3019 mtx_unlock(&sc->aac_io_lock); 3020 if (error != 0) { 3021 device_printf(sc->aac_dev, 3022 "aac_wait_command return %d\n", error); 3023 goto out; 3024 } 3025 3026 /* 3027 * Copy the FIB and data back out to the caller. 3028 */ 3029 size = cm->cm_fib->Header.Size; 3030 if (size > sc->aac_max_fib_size) { 3031 device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n", 3032 size, sc->aac_max_fib_size); 3033 size = sc->aac_max_fib_size; 3034 } 3035 error = copyout(cm->cm_fib, ufib, size); 3036 3037 out: 3038 if (cm != NULL) { 3039 mtx_lock(&sc->aac_io_lock); 3040 aac_release_command(cm); 3041 mtx_unlock(&sc->aac_io_lock); 3042 } 3043 return(error); 3044 } 3045 3046 /* 3047 * Send a passthrough FIB supplied from userspace 3048 */ 3049 static int 3050 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg) 3051 { 3052 struct aac_command *cm; 3053 struct aac_event *event; 3054 struct aac_fib *fib; 3055 struct aac_srb *srbcmd, *user_srb; 3056 struct aac_sg_entry *sge; 3057 struct aac_sg_entry64 *sge64; 3058 void *srb_sg_address, *ureply; 3059 uint32_t fibsize, srb_sg_bytecount; 3060 int error, transfer_data; 3061 3062 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3063 3064 cm = NULL; 3065 transfer_data = 0; 3066 fibsize = 0; 3067 user_srb = (struct aac_srb *)arg; 3068 3069 mtx_lock(&sc->aac_io_lock); 3070 if (aac_alloc_command(sc, &cm)) { 3071 event = malloc(sizeof(struct aac_event), M_AACBUF, 3072 M_NOWAIT | M_ZERO); 3073 if (event == NULL) { 3074 error = EBUSY; 3075 mtx_unlock(&sc->aac_io_lock); 3076 goto out; 3077 } 3078 event->ev_type = AAC_EVENT_CMFREE; 3079 event->ev_callback = aac_ioctl_event; 3080 event->ev_arg = &cm; 3081 aac_add_event(sc, event); 3082 msleep(cm, &sc->aac_io_lock, 0, "aacraw", 0); 3083 } 3084 mtx_unlock(&sc->aac_io_lock); 3085 3086 cm->cm_data = NULL; 3087 fib = cm->cm_fib; 3088 srbcmd = (struct aac_srb *)fib->data; 3089 error = copyin(&user_srb->data_len, &fibsize, sizeof(uint32_t)); 3090 if (error != 0) 3091 goto out; 3092 if (fibsize > (sc->aac_max_fib_size - sizeof(struct aac_fib_header))) { 3093 error = EINVAL; 3094 goto out; 3095 } 3096 error = copyin(user_srb, srbcmd, fibsize); 3097 if (error != 0) 3098 goto out; 3099 srbcmd->function = 0; 3100 srbcmd->retry_limit = 0; 3101 if (srbcmd->sg_map.SgCount > 1) { 3102 error = EINVAL; 3103 goto out; 3104 } 3105 3106 /* Retrieve correct SG entries. */ 3107 if (fibsize == (sizeof(struct aac_srb) + 3108 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) { 3109 struct aac_sg_entry sg; 3110 3111 sge = srbcmd->sg_map.SgEntry; 3112 sge64 = NULL; 3113 3114 if ((error = copyin(sge, &sg, sizeof(sg))) != 0) 3115 goto out; 3116 3117 srb_sg_bytecount = sg.SgByteCount; 3118 srb_sg_address = (void *)(uintptr_t)sg.SgAddress; 3119 } 3120 #ifdef __amd64__ 3121 else if (fibsize == (sizeof(struct aac_srb) + 3122 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) { 3123 struct aac_sg_entry64 sg; 3124 3125 sge = NULL; 3126 sge64 = (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry; 3127 3128 if ((error = copyin(sge64, &sg, sizeof(sg))) != 0) 3129 goto out; 3130 3131 srb_sg_bytecount = sg.SgByteCount; 3132 srb_sg_address = (void *)sg.SgAddress; 3133 if (sge64->SgAddress > 0xffffffffull && 3134 (sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 3135 error = EINVAL; 3136 goto out; 3137 } 3138 } 3139 #endif 3140 else { 3141 error = EINVAL; 3142 goto out; 3143 } 3144 ureply = (char *)arg + fibsize; 3145 srbcmd->data_len = srb_sg_bytecount; 3146 if (srbcmd->sg_map.SgCount == 1) 3147 transfer_data = 1; 3148 3149 cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map; 3150 if (transfer_data) { 3151 cm->cm_datalen = srb_sg_bytecount; 3152 cm->cm_data = malloc(cm->cm_datalen, M_AACBUF, M_NOWAIT); 3153 if (cm->cm_data == NULL) { 3154 error = ENOMEM; 3155 goto out; 3156 } 3157 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) 3158 cm->cm_flags |= AAC_CMD_DATAIN; 3159 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) { 3160 cm->cm_flags |= AAC_CMD_DATAOUT; 3161 error = copyin(srb_sg_address, cm->cm_data, 3162 cm->cm_datalen); 3163 if (error != 0) 3164 goto out; 3165 } 3166 } 3167 3168 fib->Header.Size = sizeof(struct aac_fib_header) + 3169 sizeof(struct aac_srb); 3170 fib->Header.XferState = 3171 AAC_FIBSTATE_HOSTOWNED | 3172 AAC_FIBSTATE_INITIALISED | 3173 AAC_FIBSTATE_EMPTY | 3174 AAC_FIBSTATE_FROMHOST | 3175 AAC_FIBSTATE_REXPECTED | 3176 AAC_FIBSTATE_NORM | 3177 AAC_FIBSTATE_ASYNC | 3178 AAC_FIBSTATE_FAST_RESPONSE; 3179 fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) != 0 ? 3180 ScsiPortCommandU64 : ScsiPortCommand; 3181 3182 mtx_lock(&sc->aac_io_lock); 3183 aac_wait_command(cm); 3184 mtx_unlock(&sc->aac_io_lock); 3185 3186 if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) != 0) { 3187 error = copyout(cm->cm_data, srb_sg_address, cm->cm_datalen); 3188 if (error != 0) 3189 goto out; 3190 } 3191 error = copyout(fib->data, ureply, sizeof(struct aac_srb_response)); 3192 out: 3193 if (cm != NULL) { 3194 if (cm->cm_data != NULL) 3195 free(cm->cm_data, M_AACBUF); 3196 mtx_lock(&sc->aac_io_lock); 3197 aac_release_command(cm); 3198 mtx_unlock(&sc->aac_io_lock); 3199 } 3200 return(error); 3201 } 3202 3203 /* 3204 * cdevpriv interface private destructor. 3205 */ 3206 static void 3207 aac_cdevpriv_dtor(void *arg) 3208 { 3209 struct aac_softc *sc; 3210 3211 sc = arg; 3212 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3213 mtx_lock(&Giant); 3214 device_unbusy(sc->aac_dev); 3215 mtx_unlock(&Giant); 3216 } 3217 3218 /* 3219 * Handle an AIF sent to us by the controller; queue it for later reference. 3220 * If the queue fills up, then drop the older entries. 3221 */ 3222 static void 3223 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib) 3224 { 3225 struct aac_aif_command *aif; 3226 struct aac_container *co, *co_next; 3227 struct aac_fib_context *ctx; 3228 struct aac_mntinforesp *mir; 3229 int next, current, found; 3230 int count = 0, added = 0, i = 0; 3231 uint32_t channel; 3232 3233 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3234 3235 aif = (struct aac_aif_command*)&fib->data[0]; 3236 aac_print_aif(sc, aif); 3237 3238 /* Is it an event that we should care about? */ 3239 switch (aif->command) { 3240 case AifCmdEventNotify: 3241 switch (aif->data.EN.type) { 3242 case AifEnAddContainer: 3243 case AifEnDeleteContainer: 3244 /* 3245 * A container was added or deleted, but the message 3246 * doesn't tell us anything else! Re-enumerate the 3247 * containers and sort things out. 3248 */ 3249 aac_alloc_sync_fib(sc, &fib); 3250 do { 3251 /* 3252 * Ask the controller for its containers one at 3253 * a time. 3254 * XXX What if the controller's list changes 3255 * midway through this enumaration? 3256 * XXX This should be done async. 3257 */ 3258 if ((mir = aac_get_container_info(sc, fib, i)) == NULL) 3259 continue; 3260 if (i == 0) 3261 count = mir->MntRespCount; 3262 /* 3263 * Check the container against our list. 3264 * co->co_found was already set to 0 in a 3265 * previous run. 3266 */ 3267 if ((mir->Status == ST_OK) && 3268 (mir->MntTable[0].VolType != CT_NONE)) { 3269 found = 0; 3270 TAILQ_FOREACH(co, 3271 &sc->aac_container_tqh, 3272 co_link) { 3273 if (co->co_mntobj.ObjectId == 3274 mir->MntTable[0].ObjectId) { 3275 co->co_found = 1; 3276 found = 1; 3277 break; 3278 } 3279 } 3280 /* 3281 * If the container matched, continue 3282 * in the list. 3283 */ 3284 if (found) { 3285 i++; 3286 continue; 3287 } 3288 3289 /* 3290 * This is a new container. Do all the 3291 * appropriate things to set it up. 3292 */ 3293 aac_add_container(sc, mir, 1); 3294 added = 1; 3295 } 3296 i++; 3297 } while ((i < count) && (i < AAC_MAX_CONTAINERS)); 3298 aac_release_sync_fib(sc); 3299 3300 /* 3301 * Go through our list of containers and see which ones 3302 * were not marked 'found'. Since the controller didn't 3303 * list them they must have been deleted. Do the 3304 * appropriate steps to destroy the device. Also reset 3305 * the co->co_found field. 3306 */ 3307 co = TAILQ_FIRST(&sc->aac_container_tqh); 3308 while (co != NULL) { 3309 if (co->co_found == 0) { 3310 mtx_unlock(&sc->aac_io_lock); 3311 mtx_lock(&Giant); 3312 device_delete_child(sc->aac_dev, 3313 co->co_disk); 3314 mtx_unlock(&Giant); 3315 mtx_lock(&sc->aac_io_lock); 3316 co_next = TAILQ_NEXT(co, co_link); 3317 mtx_lock(&sc->aac_container_lock); 3318 TAILQ_REMOVE(&sc->aac_container_tqh, co, 3319 co_link); 3320 mtx_unlock(&sc->aac_container_lock); 3321 free(co, M_AACBUF); 3322 co = co_next; 3323 } else { 3324 co->co_found = 0; 3325 co = TAILQ_NEXT(co, co_link); 3326 } 3327 } 3328 3329 /* Attach the newly created containers */ 3330 if (added) { 3331 mtx_unlock(&sc->aac_io_lock); 3332 mtx_lock(&Giant); 3333 bus_generic_attach(sc->aac_dev); 3334 mtx_unlock(&Giant); 3335 mtx_lock(&sc->aac_io_lock); 3336 } 3337 3338 break; 3339 3340 case AifEnEnclosureManagement: 3341 switch (aif->data.EN.data.EEE.eventType) { 3342 case AIF_EM_DRIVE_INSERTION: 3343 case AIF_EM_DRIVE_REMOVAL: 3344 channel = aif->data.EN.data.EEE.unitID; 3345 if (sc->cam_rescan_cb != NULL) 3346 sc->cam_rescan_cb(sc, 3347 (channel >> 24) & 0xF, 3348 (channel & 0xFFFF)); 3349 break; 3350 } 3351 break; 3352 3353 case AifEnAddJBOD: 3354 case AifEnDeleteJBOD: 3355 channel = aif->data.EN.data.ECE.container; 3356 if (sc->cam_rescan_cb != NULL) 3357 sc->cam_rescan_cb(sc, (channel >> 24) & 0xF, 3358 AAC_CAM_TARGET_WILDCARD); 3359 break; 3360 3361 default: 3362 break; 3363 } 3364 3365 default: 3366 break; 3367 } 3368 3369 /* Copy the AIF data to the AIF queue for ioctl retrieval */ 3370 mtx_lock(&sc->aac_aifq_lock); 3371 current = sc->aifq_idx; 3372 next = (current + 1) % AAC_AIFQ_LENGTH; 3373 if (next == 0) 3374 sc->aifq_filled = 1; 3375 bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib)); 3376 /* modify AIF contexts */ 3377 if (sc->aifq_filled) { 3378 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3379 if (next == ctx->ctx_idx) 3380 ctx->ctx_wrap = 1; 3381 else if (current == ctx->ctx_idx && ctx->ctx_wrap) 3382 ctx->ctx_idx = next; 3383 } 3384 } 3385 sc->aifq_idx = next; 3386 /* On the off chance that someone is sleeping for an aif... */ 3387 if (sc->aac_state & AAC_STATE_AIF_SLEEPER) 3388 wakeup(sc->aac_aifq); 3389 /* Wakeup any poll()ers */ 3390 selwakeuppri(&sc->rcv_select, PRIBIO); 3391 mtx_unlock(&sc->aac_aifq_lock); 3392 } 3393 3394 /* 3395 * Return the Revision of the driver to userspace and check to see if the 3396 * userspace app is possibly compatible. This is extremely bogus since 3397 * our driver doesn't follow Adaptec's versioning system. Cheat by just 3398 * returning what the card reported. 3399 */ 3400 static int 3401 aac_rev_check(struct aac_softc *sc, caddr_t udata) 3402 { 3403 struct aac_rev_check rev_check; 3404 struct aac_rev_check_resp rev_check_resp; 3405 int error = 0; 3406 3407 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3408 3409 /* 3410 * Copyin the revision struct from userspace 3411 */ 3412 if ((error = copyin(udata, (caddr_t)&rev_check, 3413 sizeof(struct aac_rev_check))) != 0) { 3414 return error; 3415 } 3416 3417 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n", 3418 rev_check.callingRevision.buildNumber); 3419 3420 /* 3421 * Doctor up the response struct. 3422 */ 3423 rev_check_resp.possiblyCompatible = 1; 3424 rev_check_resp.adapterSWRevision.external.comp.major = 3425 AAC_DRIVER_MAJOR_VERSION; 3426 rev_check_resp.adapterSWRevision.external.comp.minor = 3427 AAC_DRIVER_MINOR_VERSION; 3428 rev_check_resp.adapterSWRevision.external.comp.type = 3429 AAC_DRIVER_TYPE; 3430 rev_check_resp.adapterSWRevision.external.comp.dash = 3431 AAC_DRIVER_BUGFIX_LEVEL; 3432 rev_check_resp.adapterSWRevision.buildNumber = 3433 AAC_DRIVER_BUILD; 3434 3435 return(copyout((caddr_t)&rev_check_resp, udata, 3436 sizeof(struct aac_rev_check_resp))); 3437 } 3438 3439 /* 3440 * Pass the fib context to the caller 3441 */ 3442 static int 3443 aac_open_aif(struct aac_softc *sc, caddr_t arg) 3444 { 3445 struct aac_fib_context *fibctx, *ctx; 3446 int error = 0; 3447 3448 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3449 3450 fibctx = malloc(sizeof(struct aac_fib_context), M_AACBUF, M_NOWAIT|M_ZERO); 3451 if (fibctx == NULL) 3452 return (ENOMEM); 3453 3454 mtx_lock(&sc->aac_aifq_lock); 3455 /* all elements are already 0, add to queue */ 3456 if (sc->fibctx == NULL) 3457 sc->fibctx = fibctx; 3458 else { 3459 for (ctx = sc->fibctx; ctx->next; ctx = ctx->next) 3460 ; 3461 ctx->next = fibctx; 3462 fibctx->prev = ctx; 3463 } 3464 3465 /* evaluate unique value */ 3466 fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff); 3467 ctx = sc->fibctx; 3468 while (ctx != fibctx) { 3469 if (ctx->unique == fibctx->unique) { 3470 fibctx->unique++; 3471 ctx = sc->fibctx; 3472 } else { 3473 ctx = ctx->next; 3474 } 3475 } 3476 mtx_unlock(&sc->aac_aifq_lock); 3477 3478 error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t)); 3479 if (error) 3480 aac_close_aif(sc, (caddr_t)ctx); 3481 return error; 3482 } 3483 3484 /* 3485 * Close the caller's fib context 3486 */ 3487 static int 3488 aac_close_aif(struct aac_softc *sc, caddr_t arg) 3489 { 3490 struct aac_fib_context *ctx; 3491 3492 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3493 3494 mtx_lock(&sc->aac_aifq_lock); 3495 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3496 if (ctx->unique == *(uint32_t *)&arg) { 3497 if (ctx == sc->fibctx) 3498 sc->fibctx = NULL; 3499 else { 3500 ctx->prev->next = ctx->next; 3501 if (ctx->next) 3502 ctx->next->prev = ctx->prev; 3503 } 3504 break; 3505 } 3506 } 3507 mtx_unlock(&sc->aac_aifq_lock); 3508 if (ctx) 3509 free(ctx, M_AACBUF); 3510 3511 return 0; 3512 } 3513 3514 /* 3515 * Pass the caller the next AIF in their queue 3516 */ 3517 static int 3518 aac_getnext_aif(struct aac_softc *sc, caddr_t arg) 3519 { 3520 struct get_adapter_fib_ioctl agf; 3521 struct aac_fib_context *ctx; 3522 int error; 3523 3524 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3525 3526 #ifdef COMPAT_FREEBSD32 3527 if (SV_CURPROC_FLAG(SV_ILP32)) { 3528 struct get_adapter_fib_ioctl32 agf32; 3529 error = copyin(arg, &agf32, sizeof(agf32)); 3530 if (error == 0) { 3531 agf.AdapterFibContext = agf32.AdapterFibContext; 3532 agf.Wait = agf32.Wait; 3533 agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib; 3534 } 3535 } else 3536 #endif 3537 error = copyin(arg, &agf, sizeof(agf)); 3538 if (error == 0) { 3539 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3540 if (agf.AdapterFibContext == ctx->unique) 3541 break; 3542 } 3543 if (!ctx) 3544 return (EFAULT); 3545 3546 error = aac_return_aif(sc, ctx, agf.AifFib); 3547 if (error == EAGAIN && agf.Wait) { 3548 fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF"); 3549 sc->aac_state |= AAC_STATE_AIF_SLEEPER; 3550 while (error == EAGAIN) { 3551 error = tsleep(sc->aac_aifq, PRIBIO | 3552 PCATCH, "aacaif", 0); 3553 if (error == 0) 3554 error = aac_return_aif(sc, ctx, agf.AifFib); 3555 } 3556 sc->aac_state &= ~AAC_STATE_AIF_SLEEPER; 3557 } 3558 } 3559 return(error); 3560 } 3561 3562 /* 3563 * Hand the next AIF off the top of the queue out to userspace. 3564 */ 3565 static int 3566 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr) 3567 { 3568 int current, error; 3569 3570 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3571 3572 mtx_lock(&sc->aac_aifq_lock); 3573 current = ctx->ctx_idx; 3574 if (current == sc->aifq_idx && !ctx->ctx_wrap) { 3575 /* empty */ 3576 mtx_unlock(&sc->aac_aifq_lock); 3577 return (EAGAIN); 3578 } 3579 error = 3580 copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib)); 3581 if (error) 3582 device_printf(sc->aac_dev, 3583 "aac_return_aif: copyout returned %d\n", error); 3584 else { 3585 ctx->ctx_wrap = 0; 3586 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH; 3587 } 3588 mtx_unlock(&sc->aac_aifq_lock); 3589 return(error); 3590 } 3591 3592 static int 3593 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr) 3594 { 3595 struct aac_pci_info { 3596 u_int32_t bus; 3597 u_int32_t slot; 3598 } pciinf; 3599 int error; 3600 3601 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3602 3603 pciinf.bus = pci_get_bus(sc->aac_dev); 3604 pciinf.slot = pci_get_slot(sc->aac_dev); 3605 3606 error = copyout((caddr_t)&pciinf, uptr, 3607 sizeof(struct aac_pci_info)); 3608 3609 return (error); 3610 } 3611 3612 static int 3613 aac_supported_features(struct aac_softc *sc, caddr_t uptr) 3614 { 3615 struct aac_features f; 3616 int error; 3617 3618 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3619 3620 if ((error = copyin(uptr, &f, sizeof (f))) != 0) 3621 return (error); 3622 3623 /* 3624 * When the management driver receives FSACTL_GET_FEATURES ioctl with 3625 * ALL zero in the featuresState, the driver will return the current 3626 * state of all the supported features, the data field will not be 3627 * valid. 3628 * When the management driver receives FSACTL_GET_FEATURES ioctl with 3629 * a specific bit set in the featuresState, the driver will return the 3630 * current state of this specific feature and whatever data that are 3631 * associated with the feature in the data field or perform whatever 3632 * action needed indicates in the data field. 3633 */ 3634 if (f.feat.fValue == 0) { 3635 f.feat.fBits.largeLBA = 3636 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; 3637 /* TODO: In the future, add other features state here as well */ 3638 } else { 3639 if (f.feat.fBits.largeLBA) 3640 f.feat.fBits.largeLBA = 3641 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; 3642 /* TODO: Add other features state and data in the future */ 3643 } 3644 3645 error = copyout(&f, uptr, sizeof (f)); 3646 return (error); 3647 } 3648 3649 /* 3650 * Give the userland some information about the container. The AAC arch 3651 * expects the driver to be a SCSI passthrough type driver, so it expects 3652 * the containers to have b:t:l numbers. Fake it. 3653 */ 3654 static int 3655 aac_query_disk(struct aac_softc *sc, caddr_t uptr) 3656 { 3657 struct aac_query_disk query_disk; 3658 struct aac_container *co; 3659 struct aac_disk *disk; 3660 int error, id; 3661 3662 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3663 3664 disk = NULL; 3665 3666 error = copyin(uptr, (caddr_t)&query_disk, 3667 sizeof(struct aac_query_disk)); 3668 if (error) 3669 return (error); 3670 3671 id = query_disk.ContainerNumber; 3672 if (id == -1) 3673 return (EINVAL); 3674 3675 mtx_lock(&sc->aac_container_lock); 3676 TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { 3677 if (co->co_mntobj.ObjectId == id) 3678 break; 3679 } 3680 3681 if (co == NULL) { 3682 query_disk.Valid = 0; 3683 query_disk.Locked = 0; 3684 query_disk.Deleted = 1; /* XXX is this right? */ 3685 } else { 3686 disk = device_get_softc(co->co_disk); 3687 query_disk.Valid = 1; 3688 query_disk.Locked = 3689 (disk->ad_flags & AAC_DISK_OPEN) ? 1 : 0; 3690 query_disk.Deleted = 0; 3691 query_disk.Bus = device_get_unit(sc->aac_dev); 3692 query_disk.Target = disk->unit; 3693 query_disk.Lun = 0; 3694 query_disk.UnMapped = 0; 3695 sprintf(&query_disk.diskDeviceName[0], "%s%d", 3696 disk->ad_disk->d_name, disk->ad_disk->d_unit); 3697 } 3698 mtx_unlock(&sc->aac_container_lock); 3699 3700 error = copyout((caddr_t)&query_disk, uptr, 3701 sizeof(struct aac_query_disk)); 3702 3703 return (error); 3704 } 3705 3706 static void 3707 aac_get_bus_info(struct aac_softc *sc) 3708 { 3709 struct aac_fib *fib; 3710 struct aac_ctcfg *c_cmd; 3711 struct aac_ctcfg_resp *c_resp; 3712 struct aac_vmioctl *vmi; 3713 struct aac_vmi_businf_resp *vmi_resp; 3714 struct aac_getbusinf businfo; 3715 struct aac_sim *caminf; 3716 device_t child; 3717 int i, found, error; 3718 3719 mtx_lock(&sc->aac_io_lock); 3720 aac_alloc_sync_fib(sc, &fib); 3721 c_cmd = (struct aac_ctcfg *)&fib->data[0]; 3722 bzero(c_cmd, sizeof(struct aac_ctcfg)); 3723 3724 c_cmd->Command = VM_ContainerConfig; 3725 c_cmd->cmd = CT_GET_SCSI_METHOD; 3726 c_cmd->param = 0; 3727 3728 error = aac_sync_fib(sc, ContainerCommand, 0, fib, 3729 sizeof(struct aac_ctcfg)); 3730 if (error) { 3731 device_printf(sc->aac_dev, "Error %d sending " 3732 "VM_ContainerConfig command\n", error); 3733 aac_release_sync_fib(sc); 3734 mtx_unlock(&sc->aac_io_lock); 3735 return; 3736 } 3737 3738 c_resp = (struct aac_ctcfg_resp *)&fib->data[0]; 3739 if (c_resp->Status != ST_OK) { 3740 device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n", 3741 c_resp->Status); 3742 aac_release_sync_fib(sc); 3743 mtx_unlock(&sc->aac_io_lock); 3744 return; 3745 } 3746 3747 sc->scsi_method_id = c_resp->param; 3748 3749 vmi = (struct aac_vmioctl *)&fib->data[0]; 3750 bzero(vmi, sizeof(struct aac_vmioctl)); 3751 3752 vmi->Command = VM_Ioctl; 3753 vmi->ObjType = FT_DRIVE; 3754 vmi->MethId = sc->scsi_method_id; 3755 vmi->ObjId = 0; 3756 vmi->IoctlCmd = GetBusInfo; 3757 3758 error = aac_sync_fib(sc, ContainerCommand, 0, fib, 3759 sizeof(struct aac_vmi_businf_resp)); 3760 if (error) { 3761 device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n", 3762 error); 3763 aac_release_sync_fib(sc); 3764 mtx_unlock(&sc->aac_io_lock); 3765 return; 3766 } 3767 3768 vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0]; 3769 if (vmi_resp->Status != ST_OK) { 3770 device_printf(sc->aac_dev, "VM_Ioctl returned %d\n", 3771 vmi_resp->Status); 3772 aac_release_sync_fib(sc); 3773 mtx_unlock(&sc->aac_io_lock); 3774 return; 3775 } 3776 3777 bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf)); 3778 aac_release_sync_fib(sc); 3779 mtx_unlock(&sc->aac_io_lock); 3780 3781 found = 0; 3782 for (i = 0; i < businfo.BusCount; i++) { 3783 if (businfo.BusValid[i] != AAC_BUS_VALID) 3784 continue; 3785 3786 caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim), 3787 M_AACBUF, M_NOWAIT | M_ZERO); 3788 if (caminf == NULL) { 3789 device_printf(sc->aac_dev, 3790 "No memory to add passthrough bus %d\n", i); 3791 break; 3792 } 3793 3794 child = device_add_child(sc->aac_dev, "aacp", -1); 3795 if (child == NULL) { 3796 device_printf(sc->aac_dev, 3797 "device_add_child failed for passthrough bus %d\n", 3798 i); 3799 free(caminf, M_AACBUF); 3800 break; 3801 } 3802 3803 caminf->TargetsPerBus = businfo.TargetsPerBus; 3804 caminf->BusNumber = i; 3805 caminf->InitiatorBusId = businfo.InitiatorBusId[i]; 3806 caminf->aac_sc = sc; 3807 caminf->sim_dev = child; 3808 3809 device_set_ivars(child, caminf); 3810 device_set_desc(child, "SCSI Passthrough Bus"); 3811 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link); 3812 3813 found = 1; 3814 } 3815 3816 if (found) 3817 bus_generic_attach(sc->aac_dev); 3818 } 3819