1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2000 Michael Smith 5 * Copyright (c) 2001 Scott Long 6 * Copyright (c) 2000 BSDi 7 * Copyright (c) 2001 Adaptec, Inc. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 /* 34 * Driver for the Adaptec 'FSA' family of PCI/SCSI RAID adapters. 35 */ 36 #define AAC_DRIVERNAME "aac" 37 38 #include "opt_aac.h" 39 40 /* #include <stddef.h> */ 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/malloc.h> 44 #include <sys/kernel.h> 45 #include <sys/kthread.h> 46 #include <sys/proc.h> 47 #include <sys/sysctl.h> 48 #include <sys/sysent.h> 49 #include <sys/poll.h> 50 #include <sys/ioccom.h> 51 52 #include <sys/bus.h> 53 #include <sys/conf.h> 54 #include <sys/signalvar.h> 55 #include <sys/time.h> 56 #include <sys/eventhandler.h> 57 #include <sys/rman.h> 58 59 #include <machine/bus.h> 60 #include <machine/resource.h> 61 62 #include <dev/pci/pcireg.h> 63 #include <dev/pci/pcivar.h> 64 65 #include <dev/aac/aacreg.h> 66 #include <sys/aac_ioctl.h> 67 #include <dev/aac/aacvar.h> 68 #include <dev/aac/aac_tables.h> 69 70 static void aac_startup(void *arg); 71 static void aac_add_container(struct aac_softc *sc, 72 struct aac_mntinforesp *mir, int f); 73 static void aac_get_bus_info(struct aac_softc *sc); 74 static void aac_daemon(void *arg); 75 76 /* Command Processing */ 77 static void aac_timeout(struct aac_softc *sc); 78 static void aac_complete(void *context, int pending); 79 static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp); 80 static void aac_bio_complete(struct aac_command *cm); 81 static int aac_wait_command(struct aac_command *cm); 82 static void aac_command_thread(struct aac_softc *sc); 83 84 /* Command Buffer Management */ 85 static void aac_map_command_sg(void *arg, bus_dma_segment_t *segs, 86 int nseg, int error); 87 static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, 88 int nseg, int error); 89 static int aac_alloc_commands(struct aac_softc *sc); 90 static void aac_free_commands(struct aac_softc *sc); 91 static void aac_unmap_command(struct aac_command *cm); 92 93 /* Hardware Interface */ 94 static int aac_alloc(struct aac_softc *sc); 95 static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, 96 int error); 97 static int aac_check_firmware(struct aac_softc *sc); 98 static int aac_init(struct aac_softc *sc); 99 static int aac_sync_command(struct aac_softc *sc, u_int32_t command, 100 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, 101 u_int32_t arg3, u_int32_t *sp); 102 static int aac_setup_intr(struct aac_softc *sc); 103 static int aac_enqueue_fib(struct aac_softc *sc, int queue, 104 struct aac_command *cm); 105 static int aac_dequeue_fib(struct aac_softc *sc, int queue, 106 u_int32_t *fib_size, struct aac_fib **fib_addr); 107 static int aac_enqueue_response(struct aac_softc *sc, int queue, 108 struct aac_fib *fib); 109 110 /* StrongARM interface */ 111 static int aac_sa_get_fwstatus(struct aac_softc *sc); 112 static void aac_sa_qnotify(struct aac_softc *sc, int qbit); 113 static int aac_sa_get_istatus(struct aac_softc *sc); 114 static void aac_sa_clear_istatus(struct aac_softc *sc, int mask); 115 static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, 116 u_int32_t arg0, u_int32_t arg1, 117 u_int32_t arg2, u_int32_t arg3); 118 static int aac_sa_get_mailbox(struct aac_softc *sc, int mb); 119 static void aac_sa_set_interrupts(struct aac_softc *sc, int enable); 120 121 const struct aac_interface aac_sa_interface = { 122 aac_sa_get_fwstatus, 123 aac_sa_qnotify, 124 aac_sa_get_istatus, 125 aac_sa_clear_istatus, 126 aac_sa_set_mailbox, 127 aac_sa_get_mailbox, 128 aac_sa_set_interrupts, 129 NULL, NULL, NULL 130 }; 131 132 /* i960Rx interface */ 133 static int aac_rx_get_fwstatus(struct aac_softc *sc); 134 static void aac_rx_qnotify(struct aac_softc *sc, int qbit); 135 static int aac_rx_get_istatus(struct aac_softc *sc); 136 static void aac_rx_clear_istatus(struct aac_softc *sc, int mask); 137 static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, 138 u_int32_t arg0, u_int32_t arg1, 139 u_int32_t arg2, u_int32_t arg3); 140 static int aac_rx_get_mailbox(struct aac_softc *sc, int mb); 141 static void aac_rx_set_interrupts(struct aac_softc *sc, int enable); 142 static int aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm); 143 static int aac_rx_get_outb_queue(struct aac_softc *sc); 144 static void aac_rx_set_outb_queue(struct aac_softc *sc, int index); 145 146 const struct aac_interface aac_rx_interface = { 147 aac_rx_get_fwstatus, 148 aac_rx_qnotify, 149 aac_rx_get_istatus, 150 aac_rx_clear_istatus, 151 aac_rx_set_mailbox, 152 aac_rx_get_mailbox, 153 aac_rx_set_interrupts, 154 aac_rx_send_command, 155 aac_rx_get_outb_queue, 156 aac_rx_set_outb_queue 157 }; 158 159 /* Rocket/MIPS interface */ 160 static int aac_rkt_get_fwstatus(struct aac_softc *sc); 161 static void aac_rkt_qnotify(struct aac_softc *sc, int qbit); 162 static int aac_rkt_get_istatus(struct aac_softc *sc); 163 static void aac_rkt_clear_istatus(struct aac_softc *sc, int mask); 164 static void aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, 165 u_int32_t arg0, u_int32_t arg1, 166 u_int32_t arg2, u_int32_t arg3); 167 static int aac_rkt_get_mailbox(struct aac_softc *sc, int mb); 168 static void aac_rkt_set_interrupts(struct aac_softc *sc, int enable); 169 static int aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm); 170 static int aac_rkt_get_outb_queue(struct aac_softc *sc); 171 static void aac_rkt_set_outb_queue(struct aac_softc *sc, int index); 172 173 const struct aac_interface aac_rkt_interface = { 174 aac_rkt_get_fwstatus, 175 aac_rkt_qnotify, 176 aac_rkt_get_istatus, 177 aac_rkt_clear_istatus, 178 aac_rkt_set_mailbox, 179 aac_rkt_get_mailbox, 180 aac_rkt_set_interrupts, 181 aac_rkt_send_command, 182 aac_rkt_get_outb_queue, 183 aac_rkt_set_outb_queue 184 }; 185 186 /* Debugging and Diagnostics */ 187 static void aac_describe_controller(struct aac_softc *sc); 188 static const char *aac_describe_code(const struct aac_code_lookup *table, 189 u_int32_t code); 190 191 /* Management Interface */ 192 static d_open_t aac_open; 193 static d_ioctl_t aac_ioctl; 194 static d_poll_t aac_poll; 195 static void aac_cdevpriv_dtor(void *arg); 196 static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib); 197 static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg); 198 static void aac_handle_aif(struct aac_softc *sc, 199 struct aac_fib *fib); 200 static int aac_rev_check(struct aac_softc *sc, caddr_t udata); 201 static int aac_open_aif(struct aac_softc *sc, caddr_t arg); 202 static int aac_close_aif(struct aac_softc *sc, caddr_t arg); 203 static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg); 204 static int aac_return_aif(struct aac_softc *sc, 205 struct aac_fib_context *ctx, caddr_t uptr); 206 static int aac_query_disk(struct aac_softc *sc, caddr_t uptr); 207 static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr); 208 static int aac_supported_features(struct aac_softc *sc, caddr_t uptr); 209 static void aac_ioctl_event(struct aac_softc *sc, 210 struct aac_event *event, void *arg); 211 static struct aac_mntinforesp * 212 aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid); 213 214 static struct cdevsw aac_cdevsw = { 215 .d_version = D_VERSION, 216 .d_flags = 0, 217 .d_open = aac_open, 218 .d_ioctl = aac_ioctl, 219 .d_poll = aac_poll, 220 .d_name = "aac", 221 }; 222 223 static MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for the AAC driver"); 224 225 /* sysctl node */ 226 SYSCTL_NODE(_hw, OID_AUTO, aac, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 227 "AAC driver parameters"); 228 229 /* 230 * Device Interface 231 */ 232 233 /* 234 * Initialize the controller and softc 235 */ 236 int 237 aac_attach(struct aac_softc *sc) 238 { 239 int error, unit; 240 241 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 242 243 /* 244 * Initialize per-controller queues. 245 */ 246 aac_initq_free(sc); 247 aac_initq_ready(sc); 248 aac_initq_busy(sc); 249 aac_initq_bio(sc); 250 251 /* 252 * Initialize command-completion task. 253 */ 254 TASK_INIT(&sc->aac_task_complete, 0, aac_complete, sc); 255 256 /* mark controller as suspended until we get ourselves organised */ 257 sc->aac_state |= AAC_STATE_SUSPEND; 258 259 /* 260 * Check that the firmware on the card is supported. 261 */ 262 if ((error = aac_check_firmware(sc)) != 0) 263 return(error); 264 265 /* 266 * Initialize locks 267 */ 268 mtx_init(&sc->aac_aifq_lock, "AAC AIF lock", NULL, MTX_DEF); 269 mtx_init(&sc->aac_io_lock, "AAC I/O lock", NULL, MTX_DEF); 270 mtx_init(&sc->aac_container_lock, "AAC container lock", NULL, MTX_DEF); 271 TAILQ_INIT(&sc->aac_container_tqh); 272 TAILQ_INIT(&sc->aac_ev_cmfree); 273 274 /* Initialize the clock daemon callout. */ 275 callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0); 276 277 /* 278 * Initialize the adapter. 279 */ 280 if ((error = aac_alloc(sc)) != 0) 281 return(error); 282 if ((error = aac_init(sc)) != 0) 283 return(error); 284 285 /* 286 * Allocate and connect our interrupt. 287 */ 288 if ((error = aac_setup_intr(sc)) != 0) 289 return(error); 290 291 /* 292 * Print a little information about the controller. 293 */ 294 aac_describe_controller(sc); 295 296 /* 297 * Add sysctls. 298 */ 299 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->aac_dev), 300 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->aac_dev)), 301 OID_AUTO, "firmware_build", CTLFLAG_RD, 302 &sc->aac_revision.buildNumber, 0, 303 "firmware build number"); 304 305 /* 306 * Register to probe our containers later. 307 */ 308 sc->aac_ich.ich_func = aac_startup; 309 sc->aac_ich.ich_arg = sc; 310 if (config_intrhook_establish(&sc->aac_ich) != 0) { 311 device_printf(sc->aac_dev, 312 "can't establish configuration hook\n"); 313 return(ENXIO); 314 } 315 316 /* 317 * Make the control device. 318 */ 319 unit = device_get_unit(sc->aac_dev); 320 sc->aac_dev_t = make_dev(&aac_cdevsw, unit, UID_ROOT, GID_OPERATOR, 321 0640, "aac%d", unit); 322 (void)make_dev_alias(sc->aac_dev_t, "afa%d", unit); 323 (void)make_dev_alias(sc->aac_dev_t, "hpn%d", unit); 324 sc->aac_dev_t->si_drv1 = sc; 325 326 /* Create the AIF thread */ 327 if (kproc_create((void(*)(void *))aac_command_thread, sc, 328 &sc->aifthread, 0, 0, "aac%daif", unit)) 329 panic("Could not create AIF thread"); 330 331 /* Register the shutdown method to only be called post-dump */ 332 if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aac_shutdown, 333 sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL) 334 device_printf(sc->aac_dev, 335 "shutdown event registration failed\n"); 336 337 /* Register with CAM for the non-DASD devices */ 338 if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) { 339 TAILQ_INIT(&sc->aac_sim_tqh); 340 aac_get_bus_info(sc); 341 } 342 343 mtx_lock(&sc->aac_io_lock); 344 callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc); 345 mtx_unlock(&sc->aac_io_lock); 346 347 return(0); 348 } 349 350 static void 351 aac_daemon(void *arg) 352 { 353 struct timeval tv; 354 struct aac_softc *sc; 355 struct aac_fib *fib; 356 357 sc = arg; 358 mtx_assert(&sc->aac_io_lock, MA_OWNED); 359 360 if (callout_pending(&sc->aac_daemontime) || 361 callout_active(&sc->aac_daemontime) == 0) 362 return; 363 getmicrotime(&tv); 364 aac_alloc_sync_fib(sc, &fib); 365 *(uint32_t *)fib->data = tv.tv_sec; 366 aac_sync_fib(sc, SendHostTime, 0, fib, sizeof(uint32_t)); 367 aac_release_sync_fib(sc); 368 callout_schedule(&sc->aac_daemontime, 30 * 60 * hz); 369 } 370 371 void 372 aac_add_event(struct aac_softc *sc, struct aac_event *event) 373 { 374 375 switch (event->ev_type & AAC_EVENT_MASK) { 376 case AAC_EVENT_CMFREE: 377 TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links); 378 break; 379 default: 380 device_printf(sc->aac_dev, "aac_add event: unknown event %d\n", 381 event->ev_type); 382 break; 383 } 384 } 385 386 /* 387 * Request information of container #cid 388 */ 389 static struct aac_mntinforesp * 390 aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid) 391 { 392 struct aac_mntinfo *mi; 393 394 mi = (struct aac_mntinfo *)&fib->data[0]; 395 /* use 64-bit LBA if enabled */ 396 mi->Command = (sc->flags & AAC_FLAGS_LBA_64BIT) ? 397 VM_NameServe64 : VM_NameServe; 398 mi->MntType = FT_FILESYS; 399 mi->MntCount = cid; 400 401 if (aac_sync_fib(sc, ContainerCommand, 0, fib, 402 sizeof(struct aac_mntinfo))) { 403 device_printf(sc->aac_dev, "Error probing container %d\n", cid); 404 return (NULL); 405 } 406 407 return ((struct aac_mntinforesp *)&fib->data[0]); 408 } 409 410 /* 411 * Probe for containers, create disks. 412 */ 413 static void 414 aac_startup(void *arg) 415 { 416 struct aac_softc *sc; 417 struct aac_fib *fib; 418 struct aac_mntinforesp *mir; 419 int count = 0, i = 0; 420 421 sc = (struct aac_softc *)arg; 422 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 423 424 mtx_lock(&sc->aac_io_lock); 425 aac_alloc_sync_fib(sc, &fib); 426 427 /* loop over possible containers */ 428 do { 429 if ((mir = aac_get_container_info(sc, fib, i)) == NULL) 430 continue; 431 if (i == 0) 432 count = mir->MntRespCount; 433 aac_add_container(sc, mir, 0); 434 i++; 435 } while ((i < count) && (i < AAC_MAX_CONTAINERS)); 436 437 aac_release_sync_fib(sc); 438 mtx_unlock(&sc->aac_io_lock); 439 440 /* mark the controller up */ 441 sc->aac_state &= ~AAC_STATE_SUSPEND; 442 443 /* poke the bus to actually attach the child devices */ 444 if (bus_generic_attach(sc->aac_dev)) 445 device_printf(sc->aac_dev, "bus_generic_attach failed\n"); 446 447 /* disconnect ourselves from the intrhook chain */ 448 config_intrhook_disestablish(&sc->aac_ich); 449 450 /* enable interrupts now */ 451 AAC_UNMASK_INTERRUPTS(sc); 452 } 453 454 /* 455 * Create a device to represent a new container 456 */ 457 static void 458 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f) 459 { 460 struct aac_container *co; 461 device_t child; 462 463 /* 464 * Check container volume type for validity. Note that many of 465 * the possible types may never show up. 466 */ 467 if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) { 468 co = (struct aac_container *)malloc(sizeof *co, M_AACBUF, 469 M_NOWAIT | M_ZERO); 470 if (co == NULL) 471 panic("Out of memory?!"); 472 fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "id %x name '%.16s' size %u type %d", 473 mir->MntTable[0].ObjectId, 474 mir->MntTable[0].FileSystemName, 475 mir->MntTable[0].Capacity, mir->MntTable[0].VolType); 476 477 if ((child = device_add_child(sc->aac_dev, "aacd", -1)) == NULL) 478 device_printf(sc->aac_dev, "device_add_child failed\n"); 479 else 480 device_set_ivars(child, co); 481 device_set_desc(child, aac_describe_code(aac_container_types, 482 mir->MntTable[0].VolType)); 483 co->co_disk = child; 484 co->co_found = f; 485 bcopy(&mir->MntTable[0], &co->co_mntobj, 486 sizeof(struct aac_mntobj)); 487 mtx_lock(&sc->aac_container_lock); 488 TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link); 489 mtx_unlock(&sc->aac_container_lock); 490 } 491 } 492 493 /* 494 * Allocate resources associated with (sc) 495 */ 496 static int 497 aac_alloc(struct aac_softc *sc) 498 { 499 500 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 501 502 /* 503 * Create DMA tag for mapping buffers into controller-addressable space. 504 */ 505 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 506 1, 0, /* algnmnt, boundary */ 507 (sc->flags & AAC_FLAGS_SG_64BIT) ? 508 BUS_SPACE_MAXADDR : 509 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 510 BUS_SPACE_MAXADDR, /* highaddr */ 511 NULL, NULL, /* filter, filterarg */ 512 sc->aac_max_sectors << 9, /* maxsize */ 513 sc->aac_sg_tablesize, /* nsegments */ 514 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 515 BUS_DMA_ALLOCNOW, /* flags */ 516 busdma_lock_mutex, /* lockfunc */ 517 &sc->aac_io_lock, /* lockfuncarg */ 518 &sc->aac_buffer_dmat)) { 519 device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n"); 520 return (ENOMEM); 521 } 522 523 /* 524 * Create DMA tag for mapping FIBs into controller-addressable space.. 525 */ 526 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 527 1, 0, /* algnmnt, boundary */ 528 (sc->flags & AAC_FLAGS_4GB_WINDOW) ? 529 BUS_SPACE_MAXADDR_32BIT : 530 0x7fffffff, /* lowaddr */ 531 BUS_SPACE_MAXADDR, /* highaddr */ 532 NULL, NULL, /* filter, filterarg */ 533 sc->aac_max_fibs_alloc * 534 sc->aac_max_fib_size, /* maxsize */ 535 1, /* nsegments */ 536 sc->aac_max_fibs_alloc * 537 sc->aac_max_fib_size, /* maxsize */ 538 0, /* flags */ 539 NULL, NULL, /* No locking needed */ 540 &sc->aac_fib_dmat)) { 541 device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n"); 542 return (ENOMEM); 543 } 544 545 /* 546 * Create DMA tag for the common structure and allocate it. 547 */ 548 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 549 1, 0, /* algnmnt, boundary */ 550 (sc->flags & AAC_FLAGS_4GB_WINDOW) ? 551 BUS_SPACE_MAXADDR_32BIT : 552 0x7fffffff, /* lowaddr */ 553 BUS_SPACE_MAXADDR, /* highaddr */ 554 NULL, NULL, /* filter, filterarg */ 555 8192 + sizeof(struct aac_common), /* maxsize */ 556 1, /* nsegments */ 557 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 558 0, /* flags */ 559 NULL, NULL, /* No locking needed */ 560 &sc->aac_common_dmat)) { 561 device_printf(sc->aac_dev, 562 "can't allocate common structure DMA tag\n"); 563 return (ENOMEM); 564 } 565 if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common, 566 BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) { 567 device_printf(sc->aac_dev, "can't allocate common structure\n"); 568 return (ENOMEM); 569 } 570 571 /* 572 * Work around a bug in the 2120 and 2200 that cannot DMA commands 573 * below address 8192 in physical memory. 574 * XXX If the padding is not needed, can it be put to use instead 575 * of ignored? 576 */ 577 (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap, 578 sc->aac_common, 8192 + sizeof(*sc->aac_common), 579 aac_common_map, sc, 0); 580 581 if (sc->aac_common_busaddr < 8192) { 582 sc->aac_common = (struct aac_common *) 583 ((uint8_t *)sc->aac_common + 8192); 584 sc->aac_common_busaddr += 8192; 585 } 586 bzero(sc->aac_common, sizeof(*sc->aac_common)); 587 588 /* Allocate some FIBs and associated command structs */ 589 TAILQ_INIT(&sc->aac_fibmap_tqh); 590 sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command), 591 M_AACBUF, M_WAITOK|M_ZERO); 592 while (sc->total_fibs < sc->aac_max_fibs) { 593 if (aac_alloc_commands(sc) != 0) 594 break; 595 } 596 if (sc->total_fibs == 0) 597 return (ENOMEM); 598 599 return (0); 600 } 601 602 /* 603 * Free all of the resources associated with (sc) 604 * 605 * Should not be called if the controller is active. 606 */ 607 void 608 aac_free(struct aac_softc *sc) 609 { 610 611 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 612 613 /* remove the control device */ 614 if (sc->aac_dev_t != NULL) 615 destroy_dev(sc->aac_dev_t); 616 617 /* throw away any FIB buffers, discard the FIB DMA tag */ 618 aac_free_commands(sc); 619 if (sc->aac_fib_dmat) 620 bus_dma_tag_destroy(sc->aac_fib_dmat); 621 622 free(sc->aac_commands, M_AACBUF); 623 624 /* destroy the common area */ 625 if (sc->aac_common) { 626 bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap); 627 bus_dmamem_free(sc->aac_common_dmat, sc->aac_common, 628 sc->aac_common_dmamap); 629 } 630 if (sc->aac_common_dmat) 631 bus_dma_tag_destroy(sc->aac_common_dmat); 632 633 /* disconnect the interrupt handler */ 634 if (sc->aac_intr) 635 bus_teardown_intr(sc->aac_dev, sc->aac_irq, sc->aac_intr); 636 if (sc->aac_irq != NULL) { 637 bus_release_resource(sc->aac_dev, SYS_RES_IRQ, 638 rman_get_rid(sc->aac_irq), sc->aac_irq); 639 pci_release_msi(sc->aac_dev); 640 } 641 642 /* destroy data-transfer DMA tag */ 643 if (sc->aac_buffer_dmat) 644 bus_dma_tag_destroy(sc->aac_buffer_dmat); 645 646 /* destroy the parent DMA tag */ 647 if (sc->aac_parent_dmat) 648 bus_dma_tag_destroy(sc->aac_parent_dmat); 649 650 /* release the register window mapping */ 651 if (sc->aac_regs_res0 != NULL) 652 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, 653 rman_get_rid(sc->aac_regs_res0), sc->aac_regs_res0); 654 if (sc->aac_hwif == AAC_HWIF_NARK && sc->aac_regs_res1 != NULL) 655 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, 656 rman_get_rid(sc->aac_regs_res1), sc->aac_regs_res1); 657 } 658 659 /* 660 * Disconnect from the controller completely, in preparation for unload. 661 */ 662 int 663 aac_detach(device_t dev) 664 { 665 struct aac_softc *sc; 666 struct aac_container *co; 667 struct aac_sim *sim; 668 int error; 669 670 sc = device_get_softc(dev); 671 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 672 673 callout_drain(&sc->aac_daemontime); 674 675 mtx_lock(&sc->aac_io_lock); 676 while (sc->aifflags & AAC_AIFFLAGS_RUNNING) { 677 sc->aifflags |= AAC_AIFFLAGS_EXIT; 678 wakeup(sc->aifthread); 679 msleep(sc->aac_dev, &sc->aac_io_lock, PUSER, "aacdch", 0); 680 } 681 mtx_unlock(&sc->aac_io_lock); 682 KASSERT((sc->aifflags & AAC_AIFFLAGS_RUNNING) == 0, 683 ("%s: invalid detach state", __func__)); 684 685 /* Remove the child containers */ 686 while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) { 687 error = device_delete_child(dev, co->co_disk); 688 if (error) 689 return (error); 690 TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link); 691 free(co, M_AACBUF); 692 } 693 694 /* Remove the CAM SIMs */ 695 while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) { 696 TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link); 697 error = device_delete_child(dev, sim->sim_dev); 698 if (error) 699 return (error); 700 free(sim, M_AACBUF); 701 } 702 703 if ((error = aac_shutdown(dev))) 704 return(error); 705 706 EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh); 707 708 aac_free(sc); 709 710 mtx_destroy(&sc->aac_aifq_lock); 711 mtx_destroy(&sc->aac_io_lock); 712 mtx_destroy(&sc->aac_container_lock); 713 714 return(0); 715 } 716 717 /* 718 * Bring the controller down to a dormant state and detach all child devices. 719 * 720 * This function is called before detach or system shutdown. 721 * 722 * Note that we can assume that the bioq on the controller is empty, as we won't 723 * allow shutdown if any device is open. 724 */ 725 int 726 aac_shutdown(device_t dev) 727 { 728 struct aac_softc *sc; 729 struct aac_fib *fib; 730 struct aac_close_command *cc; 731 732 sc = device_get_softc(dev); 733 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 734 735 sc->aac_state |= AAC_STATE_SUSPEND; 736 737 /* 738 * Send a Container shutdown followed by a HostShutdown FIB to the 739 * controller to convince it that we don't want to talk to it anymore. 740 * We've been closed and all I/O completed already 741 */ 742 device_printf(sc->aac_dev, "shutting down controller..."); 743 744 mtx_lock(&sc->aac_io_lock); 745 aac_alloc_sync_fib(sc, &fib); 746 cc = (struct aac_close_command *)&fib->data[0]; 747 748 bzero(cc, sizeof(struct aac_close_command)); 749 cc->Command = VM_CloseAll; 750 cc->ContainerId = 0xffffffff; 751 if (aac_sync_fib(sc, ContainerCommand, 0, fib, 752 sizeof(struct aac_close_command))) 753 printf("FAILED.\n"); 754 else 755 printf("done\n"); 756 #if 0 757 else { 758 fib->data[0] = 0; 759 /* 760 * XXX Issuing this command to the controller makes it shut down 761 * but also keeps it from coming back up without a reset of the 762 * PCI bus. This is not desirable if you are just unloading the 763 * driver module with the intent to reload it later. 764 */ 765 if (aac_sync_fib(sc, FsaHostShutdown, AAC_FIBSTATE_SHUTDOWN, 766 fib, 1)) { 767 printf("FAILED.\n"); 768 } else { 769 printf("done.\n"); 770 } 771 } 772 #endif 773 774 AAC_MASK_INTERRUPTS(sc); 775 aac_release_sync_fib(sc); 776 mtx_unlock(&sc->aac_io_lock); 777 778 return(0); 779 } 780 781 /* 782 * Bring the controller to a quiescent state, ready for system suspend. 783 */ 784 int 785 aac_suspend(device_t dev) 786 { 787 struct aac_softc *sc; 788 789 sc = device_get_softc(dev); 790 791 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 792 sc->aac_state |= AAC_STATE_SUSPEND; 793 794 AAC_MASK_INTERRUPTS(sc); 795 return(0); 796 } 797 798 /* 799 * Bring the controller back to a state ready for operation. 800 */ 801 int 802 aac_resume(device_t dev) 803 { 804 struct aac_softc *sc; 805 806 sc = device_get_softc(dev); 807 808 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 809 sc->aac_state &= ~AAC_STATE_SUSPEND; 810 AAC_UNMASK_INTERRUPTS(sc); 811 return(0); 812 } 813 814 /* 815 * Interrupt handler for NEW_COMM interface. 816 */ 817 void 818 aac_new_intr(void *arg) 819 { 820 struct aac_softc *sc; 821 u_int32_t index, fast; 822 struct aac_command *cm; 823 struct aac_fib *fib; 824 int i; 825 826 sc = (struct aac_softc *)arg; 827 828 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 829 mtx_lock(&sc->aac_io_lock); 830 while (1) { 831 index = AAC_GET_OUTB_QUEUE(sc); 832 if (index == 0xffffffff) 833 index = AAC_GET_OUTB_QUEUE(sc); 834 if (index == 0xffffffff) 835 break; 836 if (index & 2) { 837 if (index == 0xfffffffe) { 838 /* XXX This means that the controller wants 839 * more work. Ignore it for now. 840 */ 841 continue; 842 } 843 /* AIF */ 844 fib = (struct aac_fib *)malloc(sizeof *fib, M_AACBUF, 845 M_NOWAIT | M_ZERO); 846 if (fib == NULL) { 847 /* If we're really this short on memory, 848 * hopefully breaking out of the handler will 849 * allow something to get freed. This 850 * actually sucks a whole lot. 851 */ 852 break; 853 } 854 index &= ~2; 855 for (i = 0; i < sizeof(struct aac_fib)/4; ++i) 856 ((u_int32_t *)fib)[i] = AAC_MEM1_GETREG4(sc, index + i*4); 857 aac_handle_aif(sc, fib); 858 free(fib, M_AACBUF); 859 860 /* 861 * AIF memory is owned by the adapter, so let it 862 * know that we are done with it. 863 */ 864 AAC_SET_OUTB_QUEUE(sc, index); 865 AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY); 866 } else { 867 fast = index & 1; 868 cm = sc->aac_commands + (index >> 2); 869 fib = cm->cm_fib; 870 if (fast) { 871 fib->Header.XferState |= AAC_FIBSTATE_DONEADAP; 872 *((u_int32_t *)(fib->data)) = AAC_ERROR_NORMAL; 873 } 874 aac_remove_busy(cm); 875 aac_unmap_command(cm); 876 cm->cm_flags |= AAC_CMD_COMPLETED; 877 878 /* is there a completion handler? */ 879 if (cm->cm_complete != NULL) { 880 cm->cm_complete(cm); 881 } else { 882 /* assume that someone is sleeping on this 883 * command 884 */ 885 wakeup(cm); 886 } 887 sc->flags &= ~AAC_QUEUE_FRZN; 888 } 889 } 890 /* see if we can start some more I/O */ 891 if ((sc->flags & AAC_QUEUE_FRZN) == 0) 892 aac_startio(sc); 893 894 mtx_unlock(&sc->aac_io_lock); 895 } 896 897 /* 898 * Interrupt filter for !NEW_COMM interface. 899 */ 900 int 901 aac_filter(void *arg) 902 { 903 struct aac_softc *sc; 904 u_int16_t reason; 905 906 sc = (struct aac_softc *)arg; 907 908 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 909 /* 910 * Read the status register directly. This is faster than taking the 911 * driver lock and reading the queues directly. It also saves having 912 * to turn parts of the driver lock into a spin mutex, which would be 913 * ugly. 914 */ 915 reason = AAC_GET_ISTATUS(sc); 916 AAC_CLEAR_ISTATUS(sc, reason); 917 918 /* handle completion processing */ 919 if (reason & AAC_DB_RESPONSE_READY) 920 taskqueue_enqueue(taskqueue_fast, &sc->aac_task_complete); 921 922 /* controller wants to talk to us */ 923 if (reason & (AAC_DB_PRINTF | AAC_DB_COMMAND_READY)) { 924 /* 925 * XXX Make sure that we don't get fooled by strange messages 926 * that start with a NULL. 927 */ 928 if ((reason & AAC_DB_PRINTF) && 929 (sc->aac_common->ac_printf[0] == 0)) 930 sc->aac_common->ac_printf[0] = 32; 931 932 /* 933 * This might miss doing the actual wakeup. However, the 934 * msleep that this is waking up has a timeout, so it will 935 * wake up eventually. AIFs and printfs are low enough 936 * priority that they can handle hanging out for a few seconds 937 * if needed. 938 */ 939 wakeup(sc->aifthread); 940 } 941 return (FILTER_HANDLED); 942 } 943 944 /* 945 * Command Processing 946 */ 947 948 /* 949 * Start as much queued I/O as possible on the controller 950 */ 951 void 952 aac_startio(struct aac_softc *sc) 953 { 954 struct aac_command *cm; 955 int error; 956 957 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 958 959 for (;;) { 960 /* 961 * This flag might be set if the card is out of resources. 962 * Checking it here prevents an infinite loop of deferrals. 963 */ 964 if (sc->flags & AAC_QUEUE_FRZN) 965 break; 966 967 /* 968 * Try to get a command that's been put off for lack of 969 * resources 970 */ 971 cm = aac_dequeue_ready(sc); 972 973 /* 974 * Try to build a command off the bio queue (ignore error 975 * return) 976 */ 977 if (cm == NULL) 978 aac_bio_command(sc, &cm); 979 980 /* nothing to do? */ 981 if (cm == NULL) 982 break; 983 984 /* don't map more than once */ 985 if (cm->cm_flags & AAC_CMD_MAPPED) 986 panic("aac: command %p already mapped", cm); 987 988 /* 989 * Set up the command to go to the controller. If there are no 990 * data buffers associated with the command then it can bypass 991 * busdma. 992 */ 993 if (cm->cm_datalen != 0) { 994 if (cm->cm_flags & AAC_REQ_BIO) 995 error = bus_dmamap_load_bio( 996 sc->aac_buffer_dmat, cm->cm_datamap, 997 (struct bio *)cm->cm_private, 998 aac_map_command_sg, cm, 0); 999 else 1000 error = bus_dmamap_load(sc->aac_buffer_dmat, 1001 cm->cm_datamap, cm->cm_data, 1002 cm->cm_datalen, aac_map_command_sg, cm, 0); 1003 if (error == EINPROGRESS) { 1004 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "freezing queue\n"); 1005 sc->flags |= AAC_QUEUE_FRZN; 1006 } else if (error != 0) 1007 panic("aac_startio: unexpected error %d from " 1008 "busdma", error); 1009 } else 1010 aac_map_command_sg(cm, NULL, 0, 0); 1011 } 1012 } 1013 1014 /* 1015 * Handle notification of one or more FIBs coming from the controller. 1016 */ 1017 static void 1018 aac_command_thread(struct aac_softc *sc) 1019 { 1020 struct aac_fib *fib; 1021 u_int32_t fib_size; 1022 int size, retval; 1023 1024 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1025 1026 mtx_lock(&sc->aac_io_lock); 1027 sc->aifflags = AAC_AIFFLAGS_RUNNING; 1028 1029 while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) { 1030 retval = 0; 1031 if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0) 1032 retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO, 1033 "aifthd", AAC_PERIODIC_INTERVAL * hz); 1034 1035 /* 1036 * First see if any FIBs need to be allocated. This needs 1037 * to be called without the driver lock because contigmalloc 1038 * can sleep. 1039 */ 1040 if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) { 1041 mtx_unlock(&sc->aac_io_lock); 1042 aac_alloc_commands(sc); 1043 mtx_lock(&sc->aac_io_lock); 1044 sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS; 1045 aac_startio(sc); 1046 } 1047 1048 /* 1049 * While we're here, check to see if any commands are stuck. 1050 * This is pretty low-priority, so it's ok if it doesn't 1051 * always fire. 1052 */ 1053 if (retval == EWOULDBLOCK) 1054 aac_timeout(sc); 1055 1056 /* Check the hardware printf message buffer */ 1057 if (sc->aac_common->ac_printf[0] != 0) 1058 aac_print_printf(sc); 1059 1060 /* Also check to see if the adapter has a command for us. */ 1061 if (sc->flags & AAC_FLAGS_NEW_COMM) 1062 continue; 1063 for (;;) { 1064 if (aac_dequeue_fib(sc, AAC_HOST_NORM_CMD_QUEUE, 1065 &fib_size, &fib)) 1066 break; 1067 1068 AAC_PRINT_FIB(sc, fib); 1069 1070 switch (fib->Header.Command) { 1071 case AifRequest: 1072 aac_handle_aif(sc, fib); 1073 break; 1074 default: 1075 device_printf(sc->aac_dev, "unknown command " 1076 "from controller\n"); 1077 break; 1078 } 1079 1080 if ((fib->Header.XferState == 0) || 1081 (fib->Header.StructType != AAC_FIBTYPE_TFIB)) { 1082 break; 1083 } 1084 1085 /* Return the AIF to the controller. */ 1086 if (fib->Header.XferState & AAC_FIBSTATE_FROMADAP) { 1087 fib->Header.XferState |= AAC_FIBSTATE_DONEHOST; 1088 *(AAC_FSAStatus*)fib->data = ST_OK; 1089 1090 /* XXX Compute the Size field? */ 1091 size = fib->Header.Size; 1092 if (size > sizeof(struct aac_fib)) { 1093 size = sizeof(struct aac_fib); 1094 fib->Header.Size = size; 1095 } 1096 /* 1097 * Since we did not generate this command, it 1098 * cannot go through the normal 1099 * enqueue->startio chain. 1100 */ 1101 aac_enqueue_response(sc, 1102 AAC_ADAP_NORM_RESP_QUEUE, 1103 fib); 1104 } 1105 } 1106 } 1107 sc->aifflags &= ~AAC_AIFFLAGS_RUNNING; 1108 mtx_unlock(&sc->aac_io_lock); 1109 wakeup(sc->aac_dev); 1110 1111 kproc_exit(0); 1112 } 1113 1114 /* 1115 * Process completed commands. 1116 */ 1117 static void 1118 aac_complete(void *context, int pending) 1119 { 1120 struct aac_softc *sc; 1121 struct aac_command *cm; 1122 struct aac_fib *fib; 1123 u_int32_t fib_size; 1124 1125 sc = (struct aac_softc *)context; 1126 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1127 1128 mtx_lock(&sc->aac_io_lock); 1129 1130 /* pull completed commands off the queue */ 1131 for (;;) { 1132 /* look for completed FIBs on our queue */ 1133 if (aac_dequeue_fib(sc, AAC_HOST_NORM_RESP_QUEUE, &fib_size, 1134 &fib)) 1135 break; /* nothing to do */ 1136 1137 /* get the command, unmap and hand off for processing */ 1138 cm = sc->aac_commands + fib->Header.SenderData; 1139 if (cm == NULL) { 1140 AAC_PRINT_FIB(sc, fib); 1141 break; 1142 } 1143 if ((cm->cm_flags & AAC_CMD_TIMEDOUT) != 0) 1144 device_printf(sc->aac_dev, 1145 "COMMAND %p COMPLETED AFTER %d SECONDS\n", 1146 cm, (int)(time_uptime-cm->cm_timestamp)); 1147 1148 aac_remove_busy(cm); 1149 1150 aac_unmap_command(cm); 1151 cm->cm_flags |= AAC_CMD_COMPLETED; 1152 1153 /* is there a completion handler? */ 1154 if (cm->cm_complete != NULL) { 1155 cm->cm_complete(cm); 1156 } else { 1157 /* assume that someone is sleeping on this command */ 1158 wakeup(cm); 1159 } 1160 } 1161 1162 /* see if we can start some more I/O */ 1163 sc->flags &= ~AAC_QUEUE_FRZN; 1164 aac_startio(sc); 1165 1166 mtx_unlock(&sc->aac_io_lock); 1167 } 1168 1169 /* 1170 * Handle a bio submitted from a disk device. 1171 */ 1172 void 1173 aac_submit_bio(struct bio *bp) 1174 { 1175 struct aac_disk *ad; 1176 struct aac_softc *sc; 1177 1178 ad = (struct aac_disk *)bp->bio_disk->d_drv1; 1179 sc = ad->ad_controller; 1180 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1181 1182 /* queue the BIO and try to get some work done */ 1183 aac_enqueue_bio(sc, bp); 1184 aac_startio(sc); 1185 } 1186 1187 /* 1188 * Get a bio and build a command to go with it. 1189 */ 1190 static int 1191 aac_bio_command(struct aac_softc *sc, struct aac_command **cmp) 1192 { 1193 struct aac_command *cm; 1194 struct aac_fib *fib; 1195 struct aac_disk *ad; 1196 struct bio *bp; 1197 1198 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1199 1200 /* get the resources we will need */ 1201 cm = NULL; 1202 bp = NULL; 1203 if (aac_alloc_command(sc, &cm)) /* get a command */ 1204 goto fail; 1205 if ((bp = aac_dequeue_bio(sc)) == NULL) 1206 goto fail; 1207 1208 /* fill out the command */ 1209 cm->cm_datalen = bp->bio_bcount; 1210 cm->cm_complete = aac_bio_complete; 1211 cm->cm_flags = AAC_REQ_BIO; 1212 cm->cm_private = bp; 1213 cm->cm_timestamp = time_uptime; 1214 1215 /* build the FIB */ 1216 fib = cm->cm_fib; 1217 fib->Header.Size = sizeof(struct aac_fib_header); 1218 fib->Header.XferState = 1219 AAC_FIBSTATE_HOSTOWNED | 1220 AAC_FIBSTATE_INITIALISED | 1221 AAC_FIBSTATE_EMPTY | 1222 AAC_FIBSTATE_FROMHOST | 1223 AAC_FIBSTATE_REXPECTED | 1224 AAC_FIBSTATE_NORM | 1225 AAC_FIBSTATE_ASYNC | 1226 AAC_FIBSTATE_FAST_RESPONSE; 1227 1228 /* build the read/write request */ 1229 ad = (struct aac_disk *)bp->bio_disk->d_drv1; 1230 1231 if (sc->flags & AAC_FLAGS_RAW_IO) { 1232 struct aac_raw_io *raw; 1233 raw = (struct aac_raw_io *)&fib->data[0]; 1234 fib->Header.Command = RawIo; 1235 raw->BlockNumber = (u_int64_t)bp->bio_pblkno; 1236 raw->ByteCount = bp->bio_bcount; 1237 raw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1238 raw->BpTotal = 0; 1239 raw->BpComplete = 0; 1240 fib->Header.Size += sizeof(struct aac_raw_io); 1241 cm->cm_sgtable = (struct aac_sg_table *)&raw->SgMapRaw; 1242 if (bp->bio_cmd == BIO_READ) { 1243 raw->Flags = 1; 1244 cm->cm_flags |= AAC_CMD_DATAIN; 1245 } else { 1246 raw->Flags = 0; 1247 cm->cm_flags |= AAC_CMD_DATAOUT; 1248 } 1249 } else if ((sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 1250 fib->Header.Command = ContainerCommand; 1251 if (bp->bio_cmd == BIO_READ) { 1252 struct aac_blockread *br; 1253 br = (struct aac_blockread *)&fib->data[0]; 1254 br->Command = VM_CtBlockRead; 1255 br->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1256 br->BlockNumber = bp->bio_pblkno; 1257 br->ByteCount = bp->bio_bcount; 1258 fib->Header.Size += sizeof(struct aac_blockread); 1259 cm->cm_sgtable = &br->SgMap; 1260 cm->cm_flags |= AAC_CMD_DATAIN; 1261 } else { 1262 struct aac_blockwrite *bw; 1263 bw = (struct aac_blockwrite *)&fib->data[0]; 1264 bw->Command = VM_CtBlockWrite; 1265 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1266 bw->BlockNumber = bp->bio_pblkno; 1267 bw->ByteCount = bp->bio_bcount; 1268 bw->Stable = CUNSTABLE; 1269 fib->Header.Size += sizeof(struct aac_blockwrite); 1270 cm->cm_flags |= AAC_CMD_DATAOUT; 1271 cm->cm_sgtable = &bw->SgMap; 1272 } 1273 } else { 1274 fib->Header.Command = ContainerCommand64; 1275 if (bp->bio_cmd == BIO_READ) { 1276 struct aac_blockread64 *br; 1277 br = (struct aac_blockread64 *)&fib->data[0]; 1278 br->Command = VM_CtHostRead64; 1279 br->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1280 br->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE; 1281 br->BlockNumber = bp->bio_pblkno; 1282 br->Pad = 0; 1283 br->Flags = 0; 1284 fib->Header.Size += sizeof(struct aac_blockread64); 1285 cm->cm_flags |= AAC_CMD_DATAIN; 1286 cm->cm_sgtable = (struct aac_sg_table *)&br->SgMap64; 1287 } else { 1288 struct aac_blockwrite64 *bw; 1289 bw = (struct aac_blockwrite64 *)&fib->data[0]; 1290 bw->Command = VM_CtHostWrite64; 1291 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1292 bw->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE; 1293 bw->BlockNumber = bp->bio_pblkno; 1294 bw->Pad = 0; 1295 bw->Flags = 0; 1296 fib->Header.Size += sizeof(struct aac_blockwrite64); 1297 cm->cm_flags |= AAC_CMD_DATAOUT; 1298 cm->cm_sgtable = (struct aac_sg_table *)&bw->SgMap64; 1299 } 1300 } 1301 1302 *cmp = cm; 1303 return(0); 1304 1305 fail: 1306 if (bp != NULL) 1307 aac_enqueue_bio(sc, bp); 1308 if (cm != NULL) 1309 aac_release_command(cm); 1310 return(ENOMEM); 1311 } 1312 1313 /* 1314 * Handle a bio-instigated command that has been completed. 1315 */ 1316 static void 1317 aac_bio_complete(struct aac_command *cm) 1318 { 1319 struct aac_blockread_response *brr; 1320 struct aac_blockwrite_response *bwr; 1321 struct bio *bp; 1322 AAC_FSAStatus status; 1323 1324 /* fetch relevant status and then release the command */ 1325 bp = (struct bio *)cm->cm_private; 1326 if (bp->bio_cmd == BIO_READ) { 1327 brr = (struct aac_blockread_response *)&cm->cm_fib->data[0]; 1328 status = brr->Status; 1329 } else { 1330 bwr = (struct aac_blockwrite_response *)&cm->cm_fib->data[0]; 1331 status = bwr->Status; 1332 } 1333 aac_release_command(cm); 1334 1335 /* fix up the bio based on status */ 1336 if (status == ST_OK) { 1337 bp->bio_resid = 0; 1338 } else { 1339 bp->bio_error = EIO; 1340 bp->bio_flags |= BIO_ERROR; 1341 } 1342 aac_biodone(bp); 1343 } 1344 1345 /* 1346 * Submit a command to the controller, return when it completes. 1347 * XXX This is very dangerous! If the card has gone out to lunch, we could 1348 * be stuck here forever. At the same time, signals are not caught 1349 * because there is a risk that a signal could wakeup the sleep before 1350 * the card has a chance to complete the command. Since there is no way 1351 * to cancel a command that is in progress, we can't protect against the 1352 * card completing a command late and spamming the command and data 1353 * memory. So, we are held hostage until the command completes. 1354 */ 1355 static int 1356 aac_wait_command(struct aac_command *cm) 1357 { 1358 struct aac_softc *sc; 1359 int error; 1360 1361 sc = cm->cm_sc; 1362 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1363 1364 /* Put the command on the ready queue and get things going */ 1365 aac_enqueue_ready(cm); 1366 aac_startio(sc); 1367 error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacwait", 0); 1368 return(error); 1369 } 1370 1371 /* 1372 *Command Buffer Management 1373 */ 1374 1375 /* 1376 * Allocate a command. 1377 */ 1378 int 1379 aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp) 1380 { 1381 struct aac_command *cm; 1382 1383 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1384 1385 if ((cm = aac_dequeue_free(sc)) == NULL) { 1386 if (sc->total_fibs < sc->aac_max_fibs) { 1387 mtx_lock(&sc->aac_io_lock); 1388 sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS; 1389 mtx_unlock(&sc->aac_io_lock); 1390 wakeup(sc->aifthread); 1391 } 1392 return (EBUSY); 1393 } 1394 1395 *cmp = cm; 1396 return(0); 1397 } 1398 1399 /* 1400 * Release a command back to the freelist. 1401 */ 1402 void 1403 aac_release_command(struct aac_command *cm) 1404 { 1405 struct aac_event *event; 1406 struct aac_softc *sc; 1407 1408 sc = cm->cm_sc; 1409 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1410 1411 /* (re)initialize the command/FIB */ 1412 cm->cm_datalen = 0; 1413 cm->cm_sgtable = NULL; 1414 cm->cm_flags = 0; 1415 cm->cm_complete = NULL; 1416 cm->cm_private = NULL; 1417 cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE; 1418 cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY; 1419 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB; 1420 cm->cm_fib->Header.Flags = 0; 1421 cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size; 1422 1423 /* 1424 * These are duplicated in aac_start to cover the case where an 1425 * intermediate stage may have destroyed them. They're left 1426 * initialized here for debugging purposes only. 1427 */ 1428 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; 1429 cm->cm_fib->Header.SenderData = 0; 1430 1431 aac_enqueue_free(cm); 1432 1433 if ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) { 1434 TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links); 1435 event->ev_callback(sc, event, event->ev_arg); 1436 } 1437 } 1438 1439 /* 1440 * Map helper for command/FIB allocation. 1441 */ 1442 static void 1443 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1444 { 1445 uint64_t *fibphys; 1446 1447 fibphys = (uint64_t *)arg; 1448 1449 *fibphys = segs[0].ds_addr; 1450 } 1451 1452 /* 1453 * Allocate and initialize commands/FIBs for this adapter. 1454 */ 1455 static int 1456 aac_alloc_commands(struct aac_softc *sc) 1457 { 1458 struct aac_command *cm; 1459 struct aac_fibmap *fm; 1460 uint64_t fibphys; 1461 int i, error; 1462 1463 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1464 1465 if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs) 1466 return (ENOMEM); 1467 1468 fm = malloc(sizeof(struct aac_fibmap), M_AACBUF, M_NOWAIT|M_ZERO); 1469 if (fm == NULL) 1470 return (ENOMEM); 1471 1472 /* allocate the FIBs in DMAable memory and load them */ 1473 if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs, 1474 BUS_DMA_NOWAIT, &fm->aac_fibmap)) { 1475 device_printf(sc->aac_dev, 1476 "Not enough contiguous memory available.\n"); 1477 free(fm, M_AACBUF); 1478 return (ENOMEM); 1479 } 1480 1481 /* Ignore errors since this doesn't bounce */ 1482 (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs, 1483 sc->aac_max_fibs_alloc * sc->aac_max_fib_size, 1484 aac_map_command_helper, &fibphys, 0); 1485 1486 /* initialize constant fields in the command structure */ 1487 bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * sc->aac_max_fib_size); 1488 for (i = 0; i < sc->aac_max_fibs_alloc; i++) { 1489 cm = sc->aac_commands + sc->total_fibs; 1490 fm->aac_commands = cm; 1491 cm->cm_sc = sc; 1492 cm->cm_fib = (struct aac_fib *) 1493 ((u_int8_t *)fm->aac_fibs + i*sc->aac_max_fib_size); 1494 cm->cm_fibphys = fibphys + i*sc->aac_max_fib_size; 1495 cm->cm_index = sc->total_fibs; 1496 1497 if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0, 1498 &cm->cm_datamap)) != 0) 1499 break; 1500 mtx_lock(&sc->aac_io_lock); 1501 aac_release_command(cm); 1502 sc->total_fibs++; 1503 mtx_unlock(&sc->aac_io_lock); 1504 } 1505 1506 if (i > 0) { 1507 mtx_lock(&sc->aac_io_lock); 1508 TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link); 1509 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs); 1510 mtx_unlock(&sc->aac_io_lock); 1511 return (0); 1512 } 1513 1514 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); 1515 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); 1516 free(fm, M_AACBUF); 1517 return (ENOMEM); 1518 } 1519 1520 /* 1521 * Free FIBs owned by this adapter. 1522 */ 1523 static void 1524 aac_free_commands(struct aac_softc *sc) 1525 { 1526 struct aac_fibmap *fm; 1527 struct aac_command *cm; 1528 int i; 1529 1530 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1531 1532 while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) { 1533 TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link); 1534 /* 1535 * We check against total_fibs to handle partially 1536 * allocated blocks. 1537 */ 1538 for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) { 1539 cm = fm->aac_commands + i; 1540 bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap); 1541 } 1542 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); 1543 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); 1544 free(fm, M_AACBUF); 1545 } 1546 } 1547 1548 /* 1549 * Command-mapping helper function - populate this command's s/g table. 1550 */ 1551 static void 1552 aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1553 { 1554 struct aac_softc *sc; 1555 struct aac_command *cm; 1556 struct aac_fib *fib; 1557 int i; 1558 1559 cm = (struct aac_command *)arg; 1560 sc = cm->cm_sc; 1561 fib = cm->cm_fib; 1562 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1563 1564 /* copy into the FIB */ 1565 if (cm->cm_sgtable != NULL) { 1566 if (fib->Header.Command == RawIo) { 1567 struct aac_sg_tableraw *sg; 1568 sg = (struct aac_sg_tableraw *)cm->cm_sgtable; 1569 sg->SgCount = nseg; 1570 for (i = 0; i < nseg; i++) { 1571 sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr; 1572 sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len; 1573 sg->SgEntryRaw[i].Next = 0; 1574 sg->SgEntryRaw[i].Prev = 0; 1575 sg->SgEntryRaw[i].Flags = 0; 1576 } 1577 /* update the FIB size for the s/g count */ 1578 fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw); 1579 } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 1580 struct aac_sg_table *sg; 1581 sg = cm->cm_sgtable; 1582 sg->SgCount = nseg; 1583 for (i = 0; i < nseg; i++) { 1584 sg->SgEntry[i].SgAddress = segs[i].ds_addr; 1585 sg->SgEntry[i].SgByteCount = segs[i].ds_len; 1586 } 1587 /* update the FIB size for the s/g count */ 1588 fib->Header.Size += nseg*sizeof(struct aac_sg_entry); 1589 } else { 1590 struct aac_sg_table64 *sg; 1591 sg = (struct aac_sg_table64 *)cm->cm_sgtable; 1592 sg->SgCount = nseg; 1593 for (i = 0; i < nseg; i++) { 1594 sg->SgEntry64[i].SgAddress = segs[i].ds_addr; 1595 sg->SgEntry64[i].SgByteCount = segs[i].ds_len; 1596 } 1597 /* update the FIB size for the s/g count */ 1598 fib->Header.Size += nseg*sizeof(struct aac_sg_entry64); 1599 } 1600 } 1601 1602 /* Fix up the address values in the FIB. Use the command array index 1603 * instead of a pointer since these fields are only 32 bits. Shift 1604 * the SenderFibAddress over to make room for the fast response bit 1605 * and for the AIF bit 1606 */ 1607 cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2); 1608 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; 1609 1610 /* save a pointer to the command for speedy reverse-lookup */ 1611 cm->cm_fib->Header.SenderData = cm->cm_index; 1612 1613 if (cm->cm_flags & AAC_CMD_DATAIN) 1614 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1615 BUS_DMASYNC_PREREAD); 1616 if (cm->cm_flags & AAC_CMD_DATAOUT) 1617 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1618 BUS_DMASYNC_PREWRITE); 1619 cm->cm_flags |= AAC_CMD_MAPPED; 1620 1621 if (sc->flags & AAC_FLAGS_NEW_COMM) { 1622 int count = 10000000L; 1623 while (AAC_SEND_COMMAND(sc, cm) != 0) { 1624 if (--count == 0) { 1625 aac_unmap_command(cm); 1626 sc->flags |= AAC_QUEUE_FRZN; 1627 aac_requeue_ready(cm); 1628 } 1629 DELAY(5); /* wait 5 usec. */ 1630 } 1631 } else { 1632 /* Put the FIB on the outbound queue */ 1633 if (aac_enqueue_fib(sc, cm->cm_queue, cm) == EBUSY) { 1634 aac_unmap_command(cm); 1635 sc->flags |= AAC_QUEUE_FRZN; 1636 aac_requeue_ready(cm); 1637 } 1638 } 1639 } 1640 1641 /* 1642 * Unmap a command from controller-visible space. 1643 */ 1644 static void 1645 aac_unmap_command(struct aac_command *cm) 1646 { 1647 struct aac_softc *sc; 1648 1649 sc = cm->cm_sc; 1650 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1651 1652 if (!(cm->cm_flags & AAC_CMD_MAPPED)) 1653 return; 1654 1655 if (cm->cm_datalen != 0) { 1656 if (cm->cm_flags & AAC_CMD_DATAIN) 1657 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1658 BUS_DMASYNC_POSTREAD); 1659 if (cm->cm_flags & AAC_CMD_DATAOUT) 1660 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1661 BUS_DMASYNC_POSTWRITE); 1662 1663 bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap); 1664 } 1665 cm->cm_flags &= ~AAC_CMD_MAPPED; 1666 } 1667 1668 /* 1669 * Hardware Interface 1670 */ 1671 1672 /* 1673 * Initialize the adapter. 1674 */ 1675 static void 1676 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1677 { 1678 struct aac_softc *sc; 1679 1680 sc = (struct aac_softc *)arg; 1681 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1682 1683 sc->aac_common_busaddr = segs[0].ds_addr; 1684 } 1685 1686 static int 1687 aac_check_firmware(struct aac_softc *sc) 1688 { 1689 u_int32_t code, major, minor, options = 0, atu_size = 0; 1690 int rid, status; 1691 time_t then; 1692 1693 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1694 /* 1695 * Wait for the adapter to come ready. 1696 */ 1697 then = time_uptime; 1698 do { 1699 code = AAC_GET_FWSTATUS(sc); 1700 if (code & AAC_SELF_TEST_FAILED) { 1701 device_printf(sc->aac_dev, "FATAL: selftest failed\n"); 1702 return(ENXIO); 1703 } 1704 if (code & AAC_KERNEL_PANIC) { 1705 device_printf(sc->aac_dev, 1706 "FATAL: controller kernel panic"); 1707 return(ENXIO); 1708 } 1709 if (time_uptime > (then + AAC_BOOT_TIMEOUT)) { 1710 device_printf(sc->aac_dev, 1711 "FATAL: controller not coming ready, " 1712 "status %x\n", code); 1713 return(ENXIO); 1714 } 1715 } while (!(code & AAC_UP_AND_RUNNING)); 1716 1717 /* 1718 * Retrieve the firmware version numbers. Dell PERC2/QC cards with 1719 * firmware version 1.x are not compatible with this driver. 1720 */ 1721 if (sc->flags & AAC_FLAGS_PERC2QC) { 1722 if (aac_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0, 1723 NULL)) { 1724 device_printf(sc->aac_dev, 1725 "Error reading firmware version\n"); 1726 return (EIO); 1727 } 1728 1729 /* These numbers are stored as ASCII! */ 1730 major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30; 1731 minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30; 1732 if (major == 1) { 1733 device_printf(sc->aac_dev, 1734 "Firmware version %d.%d is not supported.\n", 1735 major, minor); 1736 return (EINVAL); 1737 } 1738 } 1739 1740 /* 1741 * Retrieve the capabilities/supported options word so we know what 1742 * work-arounds to enable. Some firmware revs don't support this 1743 * command. 1744 */ 1745 if (aac_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status)) { 1746 if (status != AAC_SRB_STS_INVALID_REQUEST) { 1747 device_printf(sc->aac_dev, 1748 "RequestAdapterInfo failed\n"); 1749 return (EIO); 1750 } 1751 } else { 1752 options = AAC_GET_MAILBOX(sc, 1); 1753 atu_size = AAC_GET_MAILBOX(sc, 2); 1754 sc->supported_options = options; 1755 1756 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && 1757 (sc->flags & AAC_FLAGS_NO4GB) == 0) 1758 sc->flags |= AAC_FLAGS_4GB_WINDOW; 1759 if (options & AAC_SUPPORTED_NONDASD) 1760 sc->flags |= AAC_FLAGS_ENABLE_CAM; 1761 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0 1762 && (sizeof(bus_addr_t) > 4)) { 1763 device_printf(sc->aac_dev, 1764 "Enabling 64-bit address support\n"); 1765 sc->flags |= AAC_FLAGS_SG_64BIT; 1766 } 1767 if ((options & AAC_SUPPORTED_NEW_COMM) 1768 && sc->aac_if->aif_send_command) 1769 sc->flags |= AAC_FLAGS_NEW_COMM; 1770 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) 1771 sc->flags |= AAC_FLAGS_ARRAY_64BIT; 1772 } 1773 1774 /* Check for broken hardware that does a lower number of commands */ 1775 sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512); 1776 1777 /* Remap mem. resource, if required */ 1778 if ((sc->flags & AAC_FLAGS_NEW_COMM) && 1779 atu_size > rman_get_size(sc->aac_regs_res1)) { 1780 rid = rman_get_rid(sc->aac_regs_res1); 1781 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, rid, 1782 sc->aac_regs_res1); 1783 sc->aac_regs_res1 = bus_alloc_resource_anywhere(sc->aac_dev, 1784 SYS_RES_MEMORY, &rid, atu_size, RF_ACTIVE); 1785 if (sc->aac_regs_res1 == NULL) { 1786 sc->aac_regs_res1 = bus_alloc_resource_any( 1787 sc->aac_dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); 1788 if (sc->aac_regs_res1 == NULL) { 1789 device_printf(sc->aac_dev, 1790 "couldn't allocate register window\n"); 1791 return (ENXIO); 1792 } 1793 sc->flags &= ~AAC_FLAGS_NEW_COMM; 1794 } 1795 sc->aac_btag1 = rman_get_bustag(sc->aac_regs_res1); 1796 sc->aac_bhandle1 = rman_get_bushandle(sc->aac_regs_res1); 1797 1798 if (sc->aac_hwif == AAC_HWIF_NARK) { 1799 sc->aac_regs_res0 = sc->aac_regs_res1; 1800 sc->aac_btag0 = sc->aac_btag1; 1801 sc->aac_bhandle0 = sc->aac_bhandle1; 1802 } 1803 } 1804 1805 /* Read preferred settings */ 1806 sc->aac_max_fib_size = sizeof(struct aac_fib); 1807 sc->aac_max_sectors = 128; /* 64KB */ 1808 if (sc->flags & AAC_FLAGS_SG_64BIT) 1809 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE 1810 - sizeof(struct aac_blockwrite64)) 1811 / sizeof(struct aac_sg_entry64); 1812 else 1813 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE 1814 - sizeof(struct aac_blockwrite)) 1815 / sizeof(struct aac_sg_entry); 1816 1817 if (!aac_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL)) { 1818 options = AAC_GET_MAILBOX(sc, 1); 1819 sc->aac_max_fib_size = (options & 0xFFFF); 1820 sc->aac_max_sectors = (options >> 16) << 1; 1821 options = AAC_GET_MAILBOX(sc, 2); 1822 sc->aac_sg_tablesize = (options >> 16); 1823 options = AAC_GET_MAILBOX(sc, 3); 1824 sc->aac_max_fibs = (options & 0xFFFF); 1825 } 1826 if (sc->aac_max_fib_size > PAGE_SIZE) 1827 sc->aac_max_fib_size = PAGE_SIZE; 1828 sc->aac_max_fibs_alloc = PAGE_SIZE / sc->aac_max_fib_size; 1829 1830 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { 1831 sc->flags |= AAC_FLAGS_RAW_IO; 1832 device_printf(sc->aac_dev, "Enable Raw I/O\n"); 1833 } 1834 if ((sc->flags & AAC_FLAGS_RAW_IO) && 1835 (sc->flags & AAC_FLAGS_ARRAY_64BIT)) { 1836 sc->flags |= AAC_FLAGS_LBA_64BIT; 1837 device_printf(sc->aac_dev, "Enable 64-bit array\n"); 1838 } 1839 1840 return (0); 1841 } 1842 1843 static int 1844 aac_init(struct aac_softc *sc) 1845 { 1846 struct aac_adapter_init *ip; 1847 u_int32_t qoffset; 1848 int error; 1849 1850 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1851 1852 /* 1853 * Fill in the init structure. This tells the adapter about the 1854 * physical location of various important shared data structures. 1855 */ 1856 ip = &sc->aac_common->ac_init; 1857 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION; 1858 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { 1859 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4; 1860 sc->flags |= AAC_FLAGS_RAW_IO; 1861 } 1862 ip->MiniPortRevision = AAC_INIT_STRUCT_MINIPORT_REVISION; 1863 1864 ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr + 1865 offsetof(struct aac_common, ac_fibs); 1866 ip->AdapterFibsVirtualAddress = 0; 1867 ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib); 1868 ip->AdapterFibAlign = sizeof(struct aac_fib); 1869 1870 ip->PrintfBufferAddress = sc->aac_common_busaddr + 1871 offsetof(struct aac_common, ac_printf); 1872 ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE; 1873 1874 /* 1875 * The adapter assumes that pages are 4K in size, except on some 1876 * broken firmware versions that do the page->byte conversion twice, 1877 * therefore 'assuming' that this value is in 16MB units (2^24). 1878 * Round up since the granularity is so high. 1879 */ 1880 ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE; 1881 if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) { 1882 ip->HostPhysMemPages = 1883 (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE; 1884 } 1885 ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */ 1886 1887 ip->InitFlags = 0; 1888 if (sc->flags & AAC_FLAGS_NEW_COMM) { 1889 ip->InitFlags |= AAC_INITFLAGS_NEW_COMM_SUPPORTED; 1890 device_printf(sc->aac_dev, "New comm. interface enabled\n"); 1891 } 1892 1893 ip->MaxIoCommands = sc->aac_max_fibs; 1894 ip->MaxIoSize = sc->aac_max_sectors << 9; 1895 ip->MaxFibSize = sc->aac_max_fib_size; 1896 1897 /* 1898 * Initialize FIB queues. Note that it appears that the layout of the 1899 * indexes and the segmentation of the entries may be mandated by the 1900 * adapter, which is only told about the base of the queue index fields. 1901 * 1902 * The initial values of the indices are assumed to inform the adapter 1903 * of the sizes of the respective queues, and theoretically it could 1904 * work out the entire layout of the queue structures from this. We 1905 * take the easy route and just lay this area out like everyone else 1906 * does. 1907 * 1908 * The Linux driver uses a much more complex scheme whereby several 1909 * header records are kept for each queue. We use a couple of generic 1910 * list manipulation functions which 'know' the size of each list by 1911 * virtue of a table. 1912 */ 1913 qoffset = offsetof(struct aac_common, ac_qbuf) + AAC_QUEUE_ALIGN; 1914 qoffset &= ~(AAC_QUEUE_ALIGN - 1); 1915 sc->aac_queues = 1916 (struct aac_queue_table *)((uintptr_t)sc->aac_common + qoffset); 1917 ip->CommHeaderAddress = sc->aac_common_busaddr + qoffset; 1918 1919 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1920 AAC_HOST_NORM_CMD_ENTRIES; 1921 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1922 AAC_HOST_NORM_CMD_ENTRIES; 1923 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1924 AAC_HOST_HIGH_CMD_ENTRIES; 1925 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1926 AAC_HOST_HIGH_CMD_ENTRIES; 1927 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1928 AAC_ADAP_NORM_CMD_ENTRIES; 1929 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1930 AAC_ADAP_NORM_CMD_ENTRIES; 1931 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1932 AAC_ADAP_HIGH_CMD_ENTRIES; 1933 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1934 AAC_ADAP_HIGH_CMD_ENTRIES; 1935 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1936 AAC_HOST_NORM_RESP_ENTRIES; 1937 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1938 AAC_HOST_NORM_RESP_ENTRIES; 1939 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1940 AAC_HOST_HIGH_RESP_ENTRIES; 1941 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1942 AAC_HOST_HIGH_RESP_ENTRIES; 1943 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1944 AAC_ADAP_NORM_RESP_ENTRIES; 1945 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1946 AAC_ADAP_NORM_RESP_ENTRIES; 1947 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1948 AAC_ADAP_HIGH_RESP_ENTRIES; 1949 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1950 AAC_ADAP_HIGH_RESP_ENTRIES; 1951 sc->aac_qentries[AAC_HOST_NORM_CMD_QUEUE] = 1952 &sc->aac_queues->qt_HostNormCmdQueue[0]; 1953 sc->aac_qentries[AAC_HOST_HIGH_CMD_QUEUE] = 1954 &sc->aac_queues->qt_HostHighCmdQueue[0]; 1955 sc->aac_qentries[AAC_ADAP_NORM_CMD_QUEUE] = 1956 &sc->aac_queues->qt_AdapNormCmdQueue[0]; 1957 sc->aac_qentries[AAC_ADAP_HIGH_CMD_QUEUE] = 1958 &sc->aac_queues->qt_AdapHighCmdQueue[0]; 1959 sc->aac_qentries[AAC_HOST_NORM_RESP_QUEUE] = 1960 &sc->aac_queues->qt_HostNormRespQueue[0]; 1961 sc->aac_qentries[AAC_HOST_HIGH_RESP_QUEUE] = 1962 &sc->aac_queues->qt_HostHighRespQueue[0]; 1963 sc->aac_qentries[AAC_ADAP_NORM_RESP_QUEUE] = 1964 &sc->aac_queues->qt_AdapNormRespQueue[0]; 1965 sc->aac_qentries[AAC_ADAP_HIGH_RESP_QUEUE] = 1966 &sc->aac_queues->qt_AdapHighRespQueue[0]; 1967 1968 /* 1969 * Do controller-type-specific initialisation 1970 */ 1971 switch (sc->aac_hwif) { 1972 case AAC_HWIF_I960RX: 1973 AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, ~0); 1974 break; 1975 case AAC_HWIF_RKT: 1976 AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, ~0); 1977 break; 1978 default: 1979 break; 1980 } 1981 1982 /* 1983 * Give the init structure to the controller. 1984 */ 1985 if (aac_sync_command(sc, AAC_MONKER_INITSTRUCT, 1986 sc->aac_common_busaddr + 1987 offsetof(struct aac_common, ac_init), 0, 0, 0, 1988 NULL)) { 1989 device_printf(sc->aac_dev, 1990 "error establishing init structure\n"); 1991 error = EIO; 1992 goto out; 1993 } 1994 1995 error = 0; 1996 out: 1997 return(error); 1998 } 1999 2000 static int 2001 aac_setup_intr(struct aac_softc *sc) 2002 { 2003 2004 if (sc->flags & AAC_FLAGS_NEW_COMM) { 2005 if (bus_setup_intr(sc->aac_dev, sc->aac_irq, 2006 INTR_MPSAFE|INTR_TYPE_BIO, NULL, 2007 aac_new_intr, sc, &sc->aac_intr)) { 2008 device_printf(sc->aac_dev, "can't set up interrupt\n"); 2009 return (EINVAL); 2010 } 2011 } else { 2012 if (bus_setup_intr(sc->aac_dev, sc->aac_irq, 2013 INTR_TYPE_BIO, aac_filter, NULL, 2014 sc, &sc->aac_intr)) { 2015 device_printf(sc->aac_dev, 2016 "can't set up interrupt filter\n"); 2017 return (EINVAL); 2018 } 2019 } 2020 return (0); 2021 } 2022 2023 /* 2024 * Send a synchronous command to the controller and wait for a result. 2025 * Indicate if the controller completed the command with an error status. 2026 */ 2027 static int 2028 aac_sync_command(struct aac_softc *sc, u_int32_t command, 2029 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3, 2030 u_int32_t *sp) 2031 { 2032 time_t then; 2033 u_int32_t status; 2034 2035 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2036 2037 /* populate the mailbox */ 2038 AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3); 2039 2040 /* ensure the sync command doorbell flag is cleared */ 2041 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); 2042 2043 /* then set it to signal the adapter */ 2044 AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND); 2045 2046 /* spin waiting for the command to complete */ 2047 then = time_uptime; 2048 do { 2049 if (time_uptime > (then + AAC_IMMEDIATE_TIMEOUT)) { 2050 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out"); 2051 return(EIO); 2052 } 2053 } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND)); 2054 2055 /* clear the completion flag */ 2056 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); 2057 2058 /* get the command status */ 2059 status = AAC_GET_MAILBOX(sc, 0); 2060 if (sp != NULL) 2061 *sp = status; 2062 2063 if (status != AAC_SRB_STS_SUCCESS) 2064 return (-1); 2065 return(0); 2066 } 2067 2068 int 2069 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate, 2070 struct aac_fib *fib, u_int16_t datasize) 2071 { 2072 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2073 mtx_assert(&sc->aac_io_lock, MA_OWNED); 2074 2075 if (datasize > AAC_FIB_DATASIZE) 2076 return(EINVAL); 2077 2078 /* 2079 * Set up the sync FIB 2080 */ 2081 fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | 2082 AAC_FIBSTATE_INITIALISED | 2083 AAC_FIBSTATE_EMPTY; 2084 fib->Header.XferState |= xferstate; 2085 fib->Header.Command = command; 2086 fib->Header.StructType = AAC_FIBTYPE_TFIB; 2087 fib->Header.Size = sizeof(struct aac_fib_header) + datasize; 2088 fib->Header.SenderSize = sizeof(struct aac_fib); 2089 fib->Header.SenderFibAddress = 0; /* Not needed */ 2090 fib->Header.ReceiverFibAddress = sc->aac_common_busaddr + 2091 offsetof(struct aac_common, 2092 ac_sync_fib); 2093 2094 /* 2095 * Give the FIB to the controller, wait for a response. 2096 */ 2097 if (aac_sync_command(sc, AAC_MONKER_SYNCFIB, 2098 fib->Header.ReceiverFibAddress, 0, 0, 0, NULL)) { 2099 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error"); 2100 return(EIO); 2101 } 2102 2103 return (0); 2104 } 2105 2106 /* 2107 * Adapter-space FIB queue manipulation 2108 * 2109 * Note that the queue implementation here is a little funky; neither the PI or 2110 * CI will ever be zero. This behaviour is a controller feature. 2111 */ 2112 static const struct { 2113 int size; 2114 int notify; 2115 } aac_qinfo[] = { 2116 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL}, 2117 {AAC_HOST_HIGH_CMD_ENTRIES, 0}, 2118 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY}, 2119 {AAC_ADAP_HIGH_CMD_ENTRIES, 0}, 2120 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL}, 2121 {AAC_HOST_HIGH_RESP_ENTRIES, 0}, 2122 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY}, 2123 {AAC_ADAP_HIGH_RESP_ENTRIES, 0} 2124 }; 2125 2126 /* 2127 * Atomically insert an entry into the nominated queue, returns 0 on success or 2128 * EBUSY if the queue is full. 2129 * 2130 * Note: it would be more efficient to defer notifying the controller in 2131 * the case where we may be inserting several entries in rapid succession, 2132 * but implementing this usefully may be difficult (it would involve a 2133 * separate queue/notify interface). 2134 */ 2135 static int 2136 aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm) 2137 { 2138 u_int32_t pi, ci; 2139 int error; 2140 u_int32_t fib_size; 2141 u_int32_t fib_addr; 2142 2143 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2144 2145 fib_size = cm->cm_fib->Header.Size; 2146 fib_addr = cm->cm_fib->Header.ReceiverFibAddress; 2147 2148 /* get the producer/consumer indices */ 2149 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2150 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2151 2152 /* wrap the queue? */ 2153 if (pi >= aac_qinfo[queue].size) 2154 pi = 0; 2155 2156 /* check for queue full */ 2157 if ((pi + 1) == ci) { 2158 error = EBUSY; 2159 goto out; 2160 } 2161 2162 /* 2163 * To avoid a race with its completion interrupt, place this command on 2164 * the busy queue prior to advertising it to the controller. 2165 */ 2166 aac_enqueue_busy(cm); 2167 2168 /* populate queue entry */ 2169 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; 2170 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; 2171 2172 /* update producer index */ 2173 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; 2174 2175 /* notify the adapter if we know how */ 2176 if (aac_qinfo[queue].notify != 0) 2177 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2178 2179 error = 0; 2180 2181 out: 2182 return(error); 2183 } 2184 2185 /* 2186 * Atomically remove one entry from the nominated queue, returns 0 on 2187 * success or ENOENT if the queue is empty. 2188 */ 2189 static int 2190 aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size, 2191 struct aac_fib **fib_addr) 2192 { 2193 u_int32_t pi, ci; 2194 u_int32_t fib_index; 2195 int error; 2196 int notify; 2197 2198 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2199 2200 /* get the producer/consumer indices */ 2201 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2202 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2203 2204 /* check for queue empty */ 2205 if (ci == pi) { 2206 error = ENOENT; 2207 goto out; 2208 } 2209 2210 /* wrap the pi so the following test works */ 2211 if (pi >= aac_qinfo[queue].size) 2212 pi = 0; 2213 2214 notify = 0; 2215 if (ci == pi + 1) 2216 notify++; 2217 2218 /* wrap the queue? */ 2219 if (ci >= aac_qinfo[queue].size) 2220 ci = 0; 2221 2222 /* fetch the entry */ 2223 *fib_size = (sc->aac_qentries[queue] + ci)->aq_fib_size; 2224 2225 switch (queue) { 2226 case AAC_HOST_NORM_CMD_QUEUE: 2227 case AAC_HOST_HIGH_CMD_QUEUE: 2228 /* 2229 * The aq_fib_addr is only 32 bits wide so it can't be counted 2230 * on to hold an address. For AIF's, the adapter assumes 2231 * that it's giving us an address into the array of AIF fibs. 2232 * Therefore, we have to convert it to an index. 2233 */ 2234 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr / 2235 sizeof(struct aac_fib); 2236 *fib_addr = &sc->aac_common->ac_fibs[fib_index]; 2237 break; 2238 2239 case AAC_HOST_NORM_RESP_QUEUE: 2240 case AAC_HOST_HIGH_RESP_QUEUE: 2241 { 2242 struct aac_command *cm; 2243 2244 /* 2245 * As above, an index is used instead of an actual address. 2246 * Gotta shift the index to account for the fast response 2247 * bit. No other correction is needed since this value was 2248 * originally provided by the driver via the SenderFibAddress 2249 * field. 2250 */ 2251 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr; 2252 cm = sc->aac_commands + (fib_index >> 2); 2253 *fib_addr = cm->cm_fib; 2254 2255 /* 2256 * Is this a fast response? If it is, update the fib fields in 2257 * local memory since the whole fib isn't DMA'd back up. 2258 */ 2259 if (fib_index & 0x01) { 2260 (*fib_addr)->Header.XferState |= AAC_FIBSTATE_DONEADAP; 2261 *((u_int32_t*)((*fib_addr)->data)) = AAC_ERROR_NORMAL; 2262 } 2263 break; 2264 } 2265 default: 2266 panic("Invalid queue in aac_dequeue_fib()"); 2267 break; 2268 } 2269 2270 /* update consumer index */ 2271 sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1; 2272 2273 /* if we have made the queue un-full, notify the adapter */ 2274 if (notify && (aac_qinfo[queue].notify != 0)) 2275 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2276 error = 0; 2277 2278 out: 2279 return(error); 2280 } 2281 2282 /* 2283 * Put our response to an Adapter Initialed Fib on the response queue 2284 */ 2285 static int 2286 aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib) 2287 { 2288 u_int32_t pi, ci; 2289 int error; 2290 u_int32_t fib_size; 2291 u_int32_t fib_addr; 2292 2293 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2294 2295 /* Tell the adapter where the FIB is */ 2296 fib_size = fib->Header.Size; 2297 fib_addr = fib->Header.SenderFibAddress; 2298 fib->Header.ReceiverFibAddress = fib_addr; 2299 2300 /* get the producer/consumer indices */ 2301 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2302 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2303 2304 /* wrap the queue? */ 2305 if (pi >= aac_qinfo[queue].size) 2306 pi = 0; 2307 2308 /* check for queue full */ 2309 if ((pi + 1) == ci) { 2310 error = EBUSY; 2311 goto out; 2312 } 2313 2314 /* populate queue entry */ 2315 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; 2316 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; 2317 2318 /* update producer index */ 2319 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; 2320 2321 /* notify the adapter if we know how */ 2322 if (aac_qinfo[queue].notify != 0) 2323 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2324 2325 error = 0; 2326 2327 out: 2328 return(error); 2329 } 2330 2331 /* 2332 * Check for commands that have been outstanding for a suspiciously long time, 2333 * and complain about them. 2334 */ 2335 static void 2336 aac_timeout(struct aac_softc *sc) 2337 { 2338 struct aac_command *cm; 2339 time_t deadline; 2340 int timedout, code; 2341 2342 /* 2343 * Traverse the busy command list, bitch about late commands once 2344 * only. 2345 */ 2346 timedout = 0; 2347 deadline = time_uptime - AAC_CMD_TIMEOUT; 2348 TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) { 2349 if ((cm->cm_timestamp < deadline) 2350 && !(cm->cm_flags & AAC_CMD_TIMEDOUT)) { 2351 cm->cm_flags |= AAC_CMD_TIMEDOUT; 2352 device_printf(sc->aac_dev, 2353 "COMMAND %p (TYPE %d) TIMEOUT AFTER %d SECONDS\n", 2354 cm, cm->cm_fib->Header.Command, 2355 (int)(time_uptime-cm->cm_timestamp)); 2356 AAC_PRINT_FIB(sc, cm->cm_fib); 2357 timedout++; 2358 } 2359 } 2360 2361 if (timedout) { 2362 code = AAC_GET_FWSTATUS(sc); 2363 if (code != AAC_UP_AND_RUNNING) { 2364 device_printf(sc->aac_dev, "WARNING! Controller is no " 2365 "longer running! code= 0x%x\n", code); 2366 } 2367 } 2368 } 2369 2370 /* 2371 * Interface Function Vectors 2372 */ 2373 2374 /* 2375 * Read the current firmware status word. 2376 */ 2377 static int 2378 aac_sa_get_fwstatus(struct aac_softc *sc) 2379 { 2380 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2381 2382 return(AAC_MEM0_GETREG4(sc, AAC_SA_FWSTATUS)); 2383 } 2384 2385 static int 2386 aac_rx_get_fwstatus(struct aac_softc *sc) 2387 { 2388 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2389 2390 return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ? 2391 AAC_RX_OMR0 : AAC_RX_FWSTATUS)); 2392 } 2393 2394 static int 2395 aac_rkt_get_fwstatus(struct aac_softc *sc) 2396 { 2397 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2398 2399 return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ? 2400 AAC_RKT_OMR0 : AAC_RKT_FWSTATUS)); 2401 } 2402 2403 /* 2404 * Notify the controller of a change in a given queue 2405 */ 2406 2407 static void 2408 aac_sa_qnotify(struct aac_softc *sc, int qbit) 2409 { 2410 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2411 2412 AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL1_SET, qbit); 2413 } 2414 2415 static void 2416 aac_rx_qnotify(struct aac_softc *sc, int qbit) 2417 { 2418 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2419 2420 AAC_MEM0_SETREG4(sc, AAC_RX_IDBR, qbit); 2421 } 2422 2423 static void 2424 aac_rkt_qnotify(struct aac_softc *sc, int qbit) 2425 { 2426 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2427 2428 AAC_MEM0_SETREG4(sc, AAC_RKT_IDBR, qbit); 2429 } 2430 2431 /* 2432 * Get the interrupt reason bits 2433 */ 2434 static int 2435 aac_sa_get_istatus(struct aac_softc *sc) 2436 { 2437 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2438 2439 return(AAC_MEM0_GETREG2(sc, AAC_SA_DOORBELL0)); 2440 } 2441 2442 static int 2443 aac_rx_get_istatus(struct aac_softc *sc) 2444 { 2445 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2446 2447 return(AAC_MEM0_GETREG4(sc, AAC_RX_ODBR)); 2448 } 2449 2450 static int 2451 aac_rkt_get_istatus(struct aac_softc *sc) 2452 { 2453 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2454 2455 return(AAC_MEM0_GETREG4(sc, AAC_RKT_ODBR)); 2456 } 2457 2458 /* 2459 * Clear some interrupt reason bits 2460 */ 2461 static void 2462 aac_sa_clear_istatus(struct aac_softc *sc, int mask) 2463 { 2464 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2465 2466 AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL0_CLEAR, mask); 2467 } 2468 2469 static void 2470 aac_rx_clear_istatus(struct aac_softc *sc, int mask) 2471 { 2472 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2473 2474 AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, mask); 2475 } 2476 2477 static void 2478 aac_rkt_clear_istatus(struct aac_softc *sc, int mask) 2479 { 2480 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2481 2482 AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, mask); 2483 } 2484 2485 /* 2486 * Populate the mailbox and set the command word 2487 */ 2488 static void 2489 aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, 2490 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2491 { 2492 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2493 2494 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX, command); 2495 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 4, arg0); 2496 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 8, arg1); 2497 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 12, arg2); 2498 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 16, arg3); 2499 } 2500 2501 static void 2502 aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, 2503 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2504 { 2505 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2506 2507 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX, command); 2508 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 4, arg0); 2509 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 8, arg1); 2510 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 12, arg2); 2511 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 16, arg3); 2512 } 2513 2514 static void 2515 aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, 2516 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2517 { 2518 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2519 2520 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX, command); 2521 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 4, arg0); 2522 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 8, arg1); 2523 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 12, arg2); 2524 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 16, arg3); 2525 } 2526 2527 /* 2528 * Fetch the immediate command status word 2529 */ 2530 static int 2531 aac_sa_get_mailbox(struct aac_softc *sc, int mb) 2532 { 2533 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2534 2535 return(AAC_MEM1_GETREG4(sc, AAC_SA_MAILBOX + (mb * 4))); 2536 } 2537 2538 static int 2539 aac_rx_get_mailbox(struct aac_softc *sc, int mb) 2540 { 2541 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2542 2543 return(AAC_MEM1_GETREG4(sc, AAC_RX_MAILBOX + (mb * 4))); 2544 } 2545 2546 static int 2547 aac_rkt_get_mailbox(struct aac_softc *sc, int mb) 2548 { 2549 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2550 2551 return(AAC_MEM1_GETREG4(sc, AAC_RKT_MAILBOX + (mb * 4))); 2552 } 2553 2554 /* 2555 * Set/clear interrupt masks 2556 */ 2557 static void 2558 aac_sa_set_interrupts(struct aac_softc *sc, int enable) 2559 { 2560 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2561 2562 if (enable) { 2563 AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_CLEAR, AAC_DB_INTERRUPTS); 2564 } else { 2565 AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_SET, ~0); 2566 } 2567 } 2568 2569 static void 2570 aac_rx_set_interrupts(struct aac_softc *sc, int enable) 2571 { 2572 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2573 2574 if (enable) { 2575 if (sc->flags & AAC_FLAGS_NEW_COMM) 2576 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INT_NEW_COMM); 2577 else 2578 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INTERRUPTS); 2579 } else { 2580 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~0); 2581 } 2582 } 2583 2584 static void 2585 aac_rkt_set_interrupts(struct aac_softc *sc, int enable) 2586 { 2587 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2588 2589 if (enable) { 2590 if (sc->flags & AAC_FLAGS_NEW_COMM) 2591 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INT_NEW_COMM); 2592 else 2593 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INTERRUPTS); 2594 } else { 2595 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~0); 2596 } 2597 } 2598 2599 /* 2600 * New comm. interface: Send command functions 2601 */ 2602 static int 2603 aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm) 2604 { 2605 u_int32_t index, device; 2606 2607 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)"); 2608 2609 index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE); 2610 if (index == 0xffffffffL) 2611 index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE); 2612 if (index == 0xffffffffL) 2613 return index; 2614 aac_enqueue_busy(cm); 2615 device = index; 2616 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); 2617 device += 4; 2618 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); 2619 device += 4; 2620 AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size); 2621 AAC_MEM0_SETREG4(sc, AAC_RX_IQUE, index); 2622 return 0; 2623 } 2624 2625 static int 2626 aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm) 2627 { 2628 u_int32_t index, device; 2629 2630 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)"); 2631 2632 index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE); 2633 if (index == 0xffffffffL) 2634 index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE); 2635 if (index == 0xffffffffL) 2636 return index; 2637 aac_enqueue_busy(cm); 2638 device = index; 2639 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); 2640 device += 4; 2641 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); 2642 device += 4; 2643 AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size); 2644 AAC_MEM0_SETREG4(sc, AAC_RKT_IQUE, index); 2645 return 0; 2646 } 2647 2648 /* 2649 * New comm. interface: get, set outbound queue index 2650 */ 2651 static int 2652 aac_rx_get_outb_queue(struct aac_softc *sc) 2653 { 2654 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2655 2656 return(AAC_MEM0_GETREG4(sc, AAC_RX_OQUE)); 2657 } 2658 2659 static int 2660 aac_rkt_get_outb_queue(struct aac_softc *sc) 2661 { 2662 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2663 2664 return(AAC_MEM0_GETREG4(sc, AAC_RKT_OQUE)); 2665 } 2666 2667 static void 2668 aac_rx_set_outb_queue(struct aac_softc *sc, int index) 2669 { 2670 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2671 2672 AAC_MEM0_SETREG4(sc, AAC_RX_OQUE, index); 2673 } 2674 2675 static void 2676 aac_rkt_set_outb_queue(struct aac_softc *sc, int index) 2677 { 2678 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2679 2680 AAC_MEM0_SETREG4(sc, AAC_RKT_OQUE, index); 2681 } 2682 2683 /* 2684 * Debugging and Diagnostics 2685 */ 2686 2687 /* 2688 * Print some information about the controller. 2689 */ 2690 static void 2691 aac_describe_controller(struct aac_softc *sc) 2692 { 2693 struct aac_fib *fib; 2694 struct aac_adapter_info *info; 2695 char *adapter_type = "Adaptec RAID controller"; 2696 2697 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2698 2699 mtx_lock(&sc->aac_io_lock); 2700 aac_alloc_sync_fib(sc, &fib); 2701 2702 fib->data[0] = 0; 2703 if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) { 2704 device_printf(sc->aac_dev, "RequestAdapterInfo failed\n"); 2705 aac_release_sync_fib(sc); 2706 mtx_unlock(&sc->aac_io_lock); 2707 return; 2708 } 2709 2710 /* save the kernel revision structure for later use */ 2711 info = (struct aac_adapter_info *)&fib->data[0]; 2712 sc->aac_revision = info->KernelRevision; 2713 2714 if (bootverbose) { 2715 device_printf(sc->aac_dev, "%s %dMHz, %dMB memory " 2716 "(%dMB cache, %dMB execution), %s\n", 2717 aac_describe_code(aac_cpu_variant, info->CpuVariant), 2718 info->ClockSpeed, info->TotalMem / (1024 * 1024), 2719 info->BufferMem / (1024 * 1024), 2720 info->ExecutionMem / (1024 * 1024), 2721 aac_describe_code(aac_battery_platform, 2722 info->batteryPlatform)); 2723 2724 device_printf(sc->aac_dev, 2725 "Kernel %d.%d-%d, Build %d, S/N %6X\n", 2726 info->KernelRevision.external.comp.major, 2727 info->KernelRevision.external.comp.minor, 2728 info->KernelRevision.external.comp.dash, 2729 info->KernelRevision.buildNumber, 2730 (u_int32_t)(info->SerialNumber & 0xffffff)); 2731 2732 device_printf(sc->aac_dev, "Supported Options=%b\n", 2733 sc->supported_options, 2734 "\20" 2735 "\1SNAPSHOT" 2736 "\2CLUSTERS" 2737 "\3WCACHE" 2738 "\4DATA64" 2739 "\5HOSTTIME" 2740 "\6RAID50" 2741 "\7WINDOW4GB" 2742 "\10SCSIUPGD" 2743 "\11SOFTERR" 2744 "\12NORECOND" 2745 "\13SGMAP64" 2746 "\14ALARM" 2747 "\15NONDASD" 2748 "\16SCSIMGT" 2749 "\17RAIDSCSI" 2750 "\21ADPTINFO" 2751 "\22NEWCOMM" 2752 "\23ARRAY64BIT" 2753 "\24HEATSENSOR"); 2754 } 2755 2756 if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) { 2757 fib->data[0] = 0; 2758 if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1)) 2759 device_printf(sc->aac_dev, 2760 "RequestSupplementAdapterInfo failed\n"); 2761 else 2762 adapter_type = ((struct aac_supplement_adapter_info *) 2763 &fib->data[0])->AdapterTypeText; 2764 } 2765 device_printf(sc->aac_dev, "%s, aac driver %d.%d.%d-%d\n", 2766 adapter_type, 2767 AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION, 2768 AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD); 2769 2770 aac_release_sync_fib(sc); 2771 mtx_unlock(&sc->aac_io_lock); 2772 } 2773 2774 /* 2775 * Look up a text description of a numeric error code and return a pointer to 2776 * same. 2777 */ 2778 static const char * 2779 aac_describe_code(const struct aac_code_lookup *table, u_int32_t code) 2780 { 2781 int i; 2782 2783 for (i = 0; table[i].string != NULL; i++) 2784 if (table[i].code == code) 2785 return(table[i].string); 2786 return(table[i + 1].string); 2787 } 2788 2789 /* 2790 * Management Interface 2791 */ 2792 2793 static int 2794 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td) 2795 { 2796 struct aac_softc *sc; 2797 2798 sc = dev->si_drv1; 2799 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2800 device_busy(sc->aac_dev); 2801 devfs_set_cdevpriv(sc, aac_cdevpriv_dtor); 2802 2803 return 0; 2804 } 2805 2806 static int 2807 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) 2808 { 2809 union aac_statrequest *as; 2810 struct aac_softc *sc; 2811 int error = 0; 2812 2813 as = (union aac_statrequest *)arg; 2814 sc = dev->si_drv1; 2815 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2816 2817 switch (cmd) { 2818 case AACIO_STATS: 2819 switch (as->as_item) { 2820 case AACQ_FREE: 2821 case AACQ_BIO: 2822 case AACQ_READY: 2823 case AACQ_BUSY: 2824 bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat, 2825 sizeof(struct aac_qstat)); 2826 break; 2827 default: 2828 error = ENOENT; 2829 break; 2830 } 2831 break; 2832 2833 case FSACTL_SENDFIB: 2834 case FSACTL_SEND_LARGE_FIB: 2835 arg = *(caddr_t*)arg; 2836 case FSACTL_LNX_SENDFIB: 2837 case FSACTL_LNX_SEND_LARGE_FIB: 2838 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB"); 2839 error = aac_ioctl_sendfib(sc, arg); 2840 break; 2841 case FSACTL_SEND_RAW_SRB: 2842 arg = *(caddr_t*)arg; 2843 case FSACTL_LNX_SEND_RAW_SRB: 2844 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB"); 2845 error = aac_ioctl_send_raw_srb(sc, arg); 2846 break; 2847 case FSACTL_AIF_THREAD: 2848 case FSACTL_LNX_AIF_THREAD: 2849 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD"); 2850 error = EINVAL; 2851 break; 2852 case FSACTL_OPEN_GET_ADAPTER_FIB: 2853 arg = *(caddr_t*)arg; 2854 case FSACTL_LNX_OPEN_GET_ADAPTER_FIB: 2855 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB"); 2856 error = aac_open_aif(sc, arg); 2857 break; 2858 case FSACTL_GET_NEXT_ADAPTER_FIB: 2859 arg = *(caddr_t*)arg; 2860 case FSACTL_LNX_GET_NEXT_ADAPTER_FIB: 2861 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB"); 2862 error = aac_getnext_aif(sc, arg); 2863 break; 2864 case FSACTL_CLOSE_GET_ADAPTER_FIB: 2865 arg = *(caddr_t*)arg; 2866 case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB: 2867 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB"); 2868 error = aac_close_aif(sc, arg); 2869 break; 2870 case FSACTL_MINIPORT_REV_CHECK: 2871 arg = *(caddr_t*)arg; 2872 case FSACTL_LNX_MINIPORT_REV_CHECK: 2873 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK"); 2874 error = aac_rev_check(sc, arg); 2875 break; 2876 case FSACTL_QUERY_DISK: 2877 arg = *(caddr_t*)arg; 2878 case FSACTL_LNX_QUERY_DISK: 2879 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK"); 2880 error = aac_query_disk(sc, arg); 2881 break; 2882 case FSACTL_DELETE_DISK: 2883 case FSACTL_LNX_DELETE_DISK: 2884 /* 2885 * We don't trust the underland to tell us when to delete a 2886 * container, rather we rely on an AIF coming from the 2887 * controller 2888 */ 2889 error = 0; 2890 break; 2891 case FSACTL_GET_PCI_INFO: 2892 arg = *(caddr_t*)arg; 2893 case FSACTL_LNX_GET_PCI_INFO: 2894 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO"); 2895 error = aac_get_pci_info(sc, arg); 2896 break; 2897 case FSACTL_GET_FEATURES: 2898 arg = *(caddr_t*)arg; 2899 case FSACTL_LNX_GET_FEATURES: 2900 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES"); 2901 error = aac_supported_features(sc, arg); 2902 break; 2903 default: 2904 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd); 2905 error = EINVAL; 2906 break; 2907 } 2908 return(error); 2909 } 2910 2911 static int 2912 aac_poll(struct cdev *dev, int poll_events, struct thread *td) 2913 { 2914 struct aac_softc *sc; 2915 struct aac_fib_context *ctx; 2916 int revents; 2917 2918 sc = dev->si_drv1; 2919 revents = 0; 2920 2921 mtx_lock(&sc->aac_aifq_lock); 2922 if ((poll_events & (POLLRDNORM | POLLIN)) != 0) { 2923 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 2924 if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) { 2925 revents |= poll_events & (POLLIN | POLLRDNORM); 2926 break; 2927 } 2928 } 2929 } 2930 mtx_unlock(&sc->aac_aifq_lock); 2931 2932 if (revents == 0) { 2933 if (poll_events & (POLLIN | POLLRDNORM)) 2934 selrecord(td, &sc->rcv_select); 2935 } 2936 2937 return (revents); 2938 } 2939 2940 static void 2941 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg) 2942 { 2943 2944 switch (event->ev_type) { 2945 case AAC_EVENT_CMFREE: 2946 mtx_assert(&sc->aac_io_lock, MA_OWNED); 2947 if (aac_alloc_command(sc, (struct aac_command **)arg)) { 2948 aac_add_event(sc, event); 2949 return; 2950 } 2951 free(event, M_AACBUF); 2952 wakeup(arg); 2953 break; 2954 default: 2955 break; 2956 } 2957 } 2958 2959 /* 2960 * Send a FIB supplied from userspace 2961 */ 2962 static int 2963 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib) 2964 { 2965 struct aac_command *cm; 2966 int size, error; 2967 2968 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2969 2970 cm = NULL; 2971 2972 /* 2973 * Get a command 2974 */ 2975 mtx_lock(&sc->aac_io_lock); 2976 if (aac_alloc_command(sc, &cm)) { 2977 struct aac_event *event; 2978 2979 event = malloc(sizeof(struct aac_event), M_AACBUF, 2980 M_NOWAIT | M_ZERO); 2981 if (event == NULL) { 2982 error = EBUSY; 2983 mtx_unlock(&sc->aac_io_lock); 2984 goto out; 2985 } 2986 event->ev_type = AAC_EVENT_CMFREE; 2987 event->ev_callback = aac_ioctl_event; 2988 event->ev_arg = &cm; 2989 aac_add_event(sc, event); 2990 msleep(&cm, &sc->aac_io_lock, 0, "sendfib", 0); 2991 } 2992 mtx_unlock(&sc->aac_io_lock); 2993 2994 /* 2995 * Fetch the FIB header, then re-copy to get data as well. 2996 */ 2997 if ((error = copyin(ufib, cm->cm_fib, 2998 sizeof(struct aac_fib_header))) != 0) 2999 goto out; 3000 size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header); 3001 if (size > sc->aac_max_fib_size) { 3002 device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n", 3003 size, sc->aac_max_fib_size); 3004 size = sc->aac_max_fib_size; 3005 } 3006 if ((error = copyin(ufib, cm->cm_fib, size)) != 0) 3007 goto out; 3008 cm->cm_fib->Header.Size = size; 3009 cm->cm_timestamp = time_uptime; 3010 3011 /* 3012 * Pass the FIB to the controller, wait for it to complete. 3013 */ 3014 mtx_lock(&sc->aac_io_lock); 3015 error = aac_wait_command(cm); 3016 mtx_unlock(&sc->aac_io_lock); 3017 if (error != 0) { 3018 device_printf(sc->aac_dev, 3019 "aac_wait_command return %d\n", error); 3020 goto out; 3021 } 3022 3023 /* 3024 * Copy the FIB and data back out to the caller. 3025 */ 3026 size = cm->cm_fib->Header.Size; 3027 if (size > sc->aac_max_fib_size) { 3028 device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n", 3029 size, sc->aac_max_fib_size); 3030 size = sc->aac_max_fib_size; 3031 } 3032 error = copyout(cm->cm_fib, ufib, size); 3033 3034 out: 3035 if (cm != NULL) { 3036 mtx_lock(&sc->aac_io_lock); 3037 aac_release_command(cm); 3038 mtx_unlock(&sc->aac_io_lock); 3039 } 3040 return(error); 3041 } 3042 3043 /* 3044 * Send a passthrough FIB supplied from userspace 3045 */ 3046 static int 3047 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg) 3048 { 3049 struct aac_command *cm; 3050 struct aac_event *event; 3051 struct aac_fib *fib; 3052 struct aac_srb *srbcmd, *user_srb; 3053 struct aac_sg_entry *sge; 3054 void *srb_sg_address, *ureply; 3055 uint32_t fibsize, srb_sg_bytecount; 3056 int error, transfer_data; 3057 3058 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3059 3060 cm = NULL; 3061 transfer_data = 0; 3062 fibsize = 0; 3063 user_srb = (struct aac_srb *)arg; 3064 3065 mtx_lock(&sc->aac_io_lock); 3066 if (aac_alloc_command(sc, &cm)) { 3067 event = malloc(sizeof(struct aac_event), M_AACBUF, 3068 M_NOWAIT | M_ZERO); 3069 if (event == NULL) { 3070 error = EBUSY; 3071 mtx_unlock(&sc->aac_io_lock); 3072 goto out; 3073 } 3074 event->ev_type = AAC_EVENT_CMFREE; 3075 event->ev_callback = aac_ioctl_event; 3076 event->ev_arg = &cm; 3077 aac_add_event(sc, event); 3078 msleep(cm, &sc->aac_io_lock, 0, "aacraw", 0); 3079 } 3080 mtx_unlock(&sc->aac_io_lock); 3081 3082 cm->cm_data = NULL; 3083 fib = cm->cm_fib; 3084 srbcmd = (struct aac_srb *)fib->data; 3085 error = copyin(&user_srb->data_len, &fibsize, sizeof(uint32_t)); 3086 if (error != 0) 3087 goto out; 3088 if (fibsize > (sc->aac_max_fib_size - sizeof(struct aac_fib_header))) { 3089 error = EINVAL; 3090 goto out; 3091 } 3092 error = copyin(user_srb, srbcmd, fibsize); 3093 if (error != 0) 3094 goto out; 3095 srbcmd->function = 0; 3096 srbcmd->retry_limit = 0; 3097 if (srbcmd->sg_map.SgCount > 1) { 3098 error = EINVAL; 3099 goto out; 3100 } 3101 3102 /* Retrieve correct SG entries. */ 3103 if (fibsize == (sizeof(struct aac_srb) + 3104 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) { 3105 struct aac_sg_entry sg; 3106 3107 sge = srbcmd->sg_map.SgEntry; 3108 3109 if ((error = copyin(sge, &sg, sizeof(sg))) != 0) 3110 goto out; 3111 3112 srb_sg_bytecount = sg.SgByteCount; 3113 srb_sg_address = (void *)(uintptr_t)sg.SgAddress; 3114 } 3115 #ifdef __amd64__ 3116 else if (fibsize == (sizeof(struct aac_srb) + 3117 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) { 3118 struct aac_sg_entry64 *sge64; 3119 struct aac_sg_entry64 sg; 3120 3121 sge = NULL; 3122 sge64 = (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry; 3123 3124 if ((error = copyin(sge64, &sg, sizeof(sg))) != 0) 3125 goto out; 3126 3127 srb_sg_bytecount = sg.SgByteCount; 3128 srb_sg_address = (void *)sg.SgAddress; 3129 if (sge64->SgAddress > 0xffffffffull && 3130 (sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 3131 error = EINVAL; 3132 goto out; 3133 } 3134 } 3135 #endif 3136 else { 3137 error = EINVAL; 3138 goto out; 3139 } 3140 ureply = (char *)arg + fibsize; 3141 srbcmd->data_len = srb_sg_bytecount; 3142 if (srbcmd->sg_map.SgCount == 1) 3143 transfer_data = 1; 3144 3145 cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map; 3146 if (transfer_data) { 3147 cm->cm_datalen = srb_sg_bytecount; 3148 cm->cm_data = malloc(cm->cm_datalen, M_AACBUF, M_NOWAIT); 3149 if (cm->cm_data == NULL) { 3150 error = ENOMEM; 3151 goto out; 3152 } 3153 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) 3154 cm->cm_flags |= AAC_CMD_DATAIN; 3155 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) { 3156 cm->cm_flags |= AAC_CMD_DATAOUT; 3157 error = copyin(srb_sg_address, cm->cm_data, 3158 cm->cm_datalen); 3159 if (error != 0) 3160 goto out; 3161 } 3162 } 3163 3164 fib->Header.Size = sizeof(struct aac_fib_header) + 3165 sizeof(struct aac_srb); 3166 fib->Header.XferState = 3167 AAC_FIBSTATE_HOSTOWNED | 3168 AAC_FIBSTATE_INITIALISED | 3169 AAC_FIBSTATE_EMPTY | 3170 AAC_FIBSTATE_FROMHOST | 3171 AAC_FIBSTATE_REXPECTED | 3172 AAC_FIBSTATE_NORM | 3173 AAC_FIBSTATE_ASYNC | 3174 AAC_FIBSTATE_FAST_RESPONSE; 3175 fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) != 0 ? 3176 ScsiPortCommandU64 : ScsiPortCommand; 3177 3178 mtx_lock(&sc->aac_io_lock); 3179 aac_wait_command(cm); 3180 mtx_unlock(&sc->aac_io_lock); 3181 3182 if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) != 0) { 3183 error = copyout(cm->cm_data, srb_sg_address, cm->cm_datalen); 3184 if (error != 0) 3185 goto out; 3186 } 3187 error = copyout(fib->data, ureply, sizeof(struct aac_srb_response)); 3188 out: 3189 if (cm != NULL) { 3190 if (cm->cm_data != NULL) 3191 free(cm->cm_data, M_AACBUF); 3192 mtx_lock(&sc->aac_io_lock); 3193 aac_release_command(cm); 3194 mtx_unlock(&sc->aac_io_lock); 3195 } 3196 return(error); 3197 } 3198 3199 /* 3200 * cdevpriv interface private destructor. 3201 */ 3202 static void 3203 aac_cdevpriv_dtor(void *arg) 3204 { 3205 struct aac_softc *sc; 3206 3207 sc = arg; 3208 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3209 device_unbusy(sc->aac_dev); 3210 } 3211 3212 /* 3213 * Handle an AIF sent to us by the controller; queue it for later reference. 3214 * If the queue fills up, then drop the older entries. 3215 */ 3216 static void 3217 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib) 3218 { 3219 struct aac_aif_command *aif; 3220 struct aac_container *co, *co_next; 3221 struct aac_fib_context *ctx; 3222 struct aac_mntinforesp *mir; 3223 int next, current, found; 3224 int count = 0, added = 0, i = 0; 3225 uint32_t channel; 3226 3227 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3228 3229 aif = (struct aac_aif_command*)&fib->data[0]; 3230 aac_print_aif(sc, aif); 3231 3232 /* Is it an event that we should care about? */ 3233 switch (aif->command) { 3234 case AifCmdEventNotify: 3235 switch (aif->data.EN.type) { 3236 case AifEnAddContainer: 3237 case AifEnDeleteContainer: 3238 /* 3239 * A container was added or deleted, but the message 3240 * doesn't tell us anything else! Re-enumerate the 3241 * containers and sort things out. 3242 */ 3243 aac_alloc_sync_fib(sc, &fib); 3244 do { 3245 /* 3246 * Ask the controller for its containers one at 3247 * a time. 3248 * XXX What if the controller's list changes 3249 * midway through this enumaration? 3250 * XXX This should be done async. 3251 */ 3252 if ((mir = aac_get_container_info(sc, fib, i)) == NULL) 3253 continue; 3254 if (i == 0) 3255 count = mir->MntRespCount; 3256 /* 3257 * Check the container against our list. 3258 * co->co_found was already set to 0 in a 3259 * previous run. 3260 */ 3261 if ((mir->Status == ST_OK) && 3262 (mir->MntTable[0].VolType != CT_NONE)) { 3263 found = 0; 3264 TAILQ_FOREACH(co, 3265 &sc->aac_container_tqh, 3266 co_link) { 3267 if (co->co_mntobj.ObjectId == 3268 mir->MntTable[0].ObjectId) { 3269 co->co_found = 1; 3270 found = 1; 3271 break; 3272 } 3273 } 3274 /* 3275 * If the container matched, continue 3276 * in the list. 3277 */ 3278 if (found) { 3279 i++; 3280 continue; 3281 } 3282 3283 /* 3284 * This is a new container. Do all the 3285 * appropriate things to set it up. 3286 */ 3287 aac_add_container(sc, mir, 1); 3288 added = 1; 3289 } 3290 i++; 3291 } while ((i < count) && (i < AAC_MAX_CONTAINERS)); 3292 aac_release_sync_fib(sc); 3293 3294 /* 3295 * Go through our list of containers and see which ones 3296 * were not marked 'found'. Since the controller didn't 3297 * list them they must have been deleted. Do the 3298 * appropriate steps to destroy the device. Also reset 3299 * the co->co_found field. 3300 */ 3301 co = TAILQ_FIRST(&sc->aac_container_tqh); 3302 while (co != NULL) { 3303 if (co->co_found == 0) { 3304 mtx_unlock(&sc->aac_io_lock); 3305 bus_topo_lock(); 3306 device_delete_child(sc->aac_dev, 3307 co->co_disk); 3308 bus_topo_unlock(); 3309 mtx_lock(&sc->aac_io_lock); 3310 co_next = TAILQ_NEXT(co, co_link); 3311 mtx_lock(&sc->aac_container_lock); 3312 TAILQ_REMOVE(&sc->aac_container_tqh, co, 3313 co_link); 3314 mtx_unlock(&sc->aac_container_lock); 3315 free(co, M_AACBUF); 3316 co = co_next; 3317 } else { 3318 co->co_found = 0; 3319 co = TAILQ_NEXT(co, co_link); 3320 } 3321 } 3322 3323 /* Attach the newly created containers */ 3324 if (added) { 3325 mtx_unlock(&sc->aac_io_lock); 3326 bus_topo_lock(); 3327 bus_generic_attach(sc->aac_dev); 3328 bus_topo_unlock(); 3329 mtx_lock(&sc->aac_io_lock); 3330 } 3331 3332 break; 3333 3334 case AifEnEnclosureManagement: 3335 switch (aif->data.EN.data.EEE.eventType) { 3336 case AIF_EM_DRIVE_INSERTION: 3337 case AIF_EM_DRIVE_REMOVAL: 3338 channel = aif->data.EN.data.EEE.unitID; 3339 if (sc->cam_rescan_cb != NULL) 3340 sc->cam_rescan_cb(sc, 3341 (channel >> 24) & 0xF, 3342 (channel & 0xFFFF)); 3343 break; 3344 } 3345 break; 3346 3347 case AifEnAddJBOD: 3348 case AifEnDeleteJBOD: 3349 channel = aif->data.EN.data.ECE.container; 3350 if (sc->cam_rescan_cb != NULL) 3351 sc->cam_rescan_cb(sc, (channel >> 24) & 0xF, 3352 AAC_CAM_TARGET_WILDCARD); 3353 break; 3354 3355 default: 3356 break; 3357 } 3358 3359 default: 3360 break; 3361 } 3362 3363 /* Copy the AIF data to the AIF queue for ioctl retrieval */ 3364 mtx_lock(&sc->aac_aifq_lock); 3365 current = sc->aifq_idx; 3366 next = (current + 1) % AAC_AIFQ_LENGTH; 3367 if (next == 0) 3368 sc->aifq_filled = 1; 3369 bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib)); 3370 /* modify AIF contexts */ 3371 if (sc->aifq_filled) { 3372 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3373 if (next == ctx->ctx_idx) 3374 ctx->ctx_wrap = 1; 3375 else if (current == ctx->ctx_idx && ctx->ctx_wrap) 3376 ctx->ctx_idx = next; 3377 } 3378 } 3379 sc->aifq_idx = next; 3380 /* On the off chance that someone is sleeping for an aif... */ 3381 if (sc->aac_state & AAC_STATE_AIF_SLEEPER) 3382 wakeup(sc->aac_aifq); 3383 /* Wakeup any poll()ers */ 3384 selwakeuppri(&sc->rcv_select, PRIBIO); 3385 mtx_unlock(&sc->aac_aifq_lock); 3386 } 3387 3388 /* 3389 * Return the Revision of the driver to userspace and check to see if the 3390 * userspace app is possibly compatible. This is extremely bogus since 3391 * our driver doesn't follow Adaptec's versioning system. Cheat by just 3392 * returning what the card reported. 3393 */ 3394 static int 3395 aac_rev_check(struct aac_softc *sc, caddr_t udata) 3396 { 3397 struct aac_rev_check rev_check; 3398 struct aac_rev_check_resp rev_check_resp; 3399 int error = 0; 3400 3401 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3402 3403 /* 3404 * Copyin the revision struct from userspace 3405 */ 3406 if ((error = copyin(udata, (caddr_t)&rev_check, 3407 sizeof(struct aac_rev_check))) != 0) { 3408 return error; 3409 } 3410 3411 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n", 3412 rev_check.callingRevision.buildNumber); 3413 3414 /* 3415 * Doctor up the response struct. 3416 */ 3417 rev_check_resp.possiblyCompatible = 1; 3418 rev_check_resp.adapterSWRevision.external.comp.major = 3419 AAC_DRIVER_MAJOR_VERSION; 3420 rev_check_resp.adapterSWRevision.external.comp.minor = 3421 AAC_DRIVER_MINOR_VERSION; 3422 rev_check_resp.adapterSWRevision.external.comp.type = 3423 AAC_DRIVER_TYPE; 3424 rev_check_resp.adapterSWRevision.external.comp.dash = 3425 AAC_DRIVER_BUGFIX_LEVEL; 3426 rev_check_resp.adapterSWRevision.buildNumber = 3427 AAC_DRIVER_BUILD; 3428 3429 return(copyout((caddr_t)&rev_check_resp, udata, 3430 sizeof(struct aac_rev_check_resp))); 3431 } 3432 3433 /* 3434 * Pass the fib context to the caller 3435 */ 3436 static int 3437 aac_open_aif(struct aac_softc *sc, caddr_t arg) 3438 { 3439 struct aac_fib_context *fibctx, *ctx; 3440 int error = 0; 3441 3442 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3443 3444 fibctx = malloc(sizeof(struct aac_fib_context), M_AACBUF, M_NOWAIT|M_ZERO); 3445 if (fibctx == NULL) 3446 return (ENOMEM); 3447 3448 mtx_lock(&sc->aac_aifq_lock); 3449 /* all elements are already 0, add to queue */ 3450 if (sc->fibctx == NULL) 3451 sc->fibctx = fibctx; 3452 else { 3453 for (ctx = sc->fibctx; ctx->next; ctx = ctx->next) 3454 ; 3455 ctx->next = fibctx; 3456 fibctx->prev = ctx; 3457 } 3458 3459 /* evaluate unique value */ 3460 fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff); 3461 ctx = sc->fibctx; 3462 while (ctx != fibctx) { 3463 if (ctx->unique == fibctx->unique) { 3464 fibctx->unique++; 3465 ctx = sc->fibctx; 3466 } else { 3467 ctx = ctx->next; 3468 } 3469 } 3470 mtx_unlock(&sc->aac_aifq_lock); 3471 3472 error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t)); 3473 if (error) 3474 aac_close_aif(sc, (caddr_t)ctx); 3475 return error; 3476 } 3477 3478 /* 3479 * Close the caller's fib context 3480 */ 3481 static int 3482 aac_close_aif(struct aac_softc *sc, caddr_t arg) 3483 { 3484 struct aac_fib_context *ctx; 3485 3486 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3487 3488 mtx_lock(&sc->aac_aifq_lock); 3489 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3490 if (ctx->unique == *(uint32_t *)&arg) { 3491 if (ctx == sc->fibctx) 3492 sc->fibctx = NULL; 3493 else { 3494 ctx->prev->next = ctx->next; 3495 if (ctx->next) 3496 ctx->next->prev = ctx->prev; 3497 } 3498 break; 3499 } 3500 } 3501 mtx_unlock(&sc->aac_aifq_lock); 3502 if (ctx) 3503 free(ctx, M_AACBUF); 3504 3505 return 0; 3506 } 3507 3508 /* 3509 * Pass the caller the next AIF in their queue 3510 */ 3511 static int 3512 aac_getnext_aif(struct aac_softc *sc, caddr_t arg) 3513 { 3514 struct get_adapter_fib_ioctl agf; 3515 struct aac_fib_context *ctx; 3516 int error; 3517 3518 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3519 3520 #ifdef COMPAT_FREEBSD32 3521 if (SV_CURPROC_FLAG(SV_ILP32)) { 3522 struct get_adapter_fib_ioctl32 agf32; 3523 error = copyin(arg, &agf32, sizeof(agf32)); 3524 if (error == 0) { 3525 agf.AdapterFibContext = agf32.AdapterFibContext; 3526 agf.Wait = agf32.Wait; 3527 agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib; 3528 } 3529 } else 3530 #endif 3531 error = copyin(arg, &agf, sizeof(agf)); 3532 if (error == 0) { 3533 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3534 if (agf.AdapterFibContext == ctx->unique) 3535 break; 3536 } 3537 if (!ctx) 3538 return (EFAULT); 3539 3540 error = aac_return_aif(sc, ctx, agf.AifFib); 3541 if (error == EAGAIN && agf.Wait) { 3542 fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF"); 3543 sc->aac_state |= AAC_STATE_AIF_SLEEPER; 3544 while (error == EAGAIN) { 3545 error = tsleep(sc->aac_aifq, PRIBIO | 3546 PCATCH, "aacaif", 0); 3547 if (error == 0) 3548 error = aac_return_aif(sc, ctx, agf.AifFib); 3549 } 3550 sc->aac_state &= ~AAC_STATE_AIF_SLEEPER; 3551 } 3552 } 3553 return(error); 3554 } 3555 3556 /* 3557 * Hand the next AIF off the top of the queue out to userspace. 3558 */ 3559 static int 3560 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr) 3561 { 3562 int current, error; 3563 3564 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3565 3566 mtx_lock(&sc->aac_aifq_lock); 3567 current = ctx->ctx_idx; 3568 if (current == sc->aifq_idx && !ctx->ctx_wrap) { 3569 /* empty */ 3570 mtx_unlock(&sc->aac_aifq_lock); 3571 return (EAGAIN); 3572 } 3573 error = 3574 copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib)); 3575 if (error) 3576 device_printf(sc->aac_dev, 3577 "aac_return_aif: copyout returned %d\n", error); 3578 else { 3579 ctx->ctx_wrap = 0; 3580 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH; 3581 } 3582 mtx_unlock(&sc->aac_aifq_lock); 3583 return(error); 3584 } 3585 3586 static int 3587 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr) 3588 { 3589 struct aac_pci_info { 3590 u_int32_t bus; 3591 u_int32_t slot; 3592 } pciinf; 3593 int error; 3594 3595 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3596 3597 pciinf.bus = pci_get_bus(sc->aac_dev); 3598 pciinf.slot = pci_get_slot(sc->aac_dev); 3599 3600 error = copyout((caddr_t)&pciinf, uptr, 3601 sizeof(struct aac_pci_info)); 3602 3603 return (error); 3604 } 3605 3606 static int 3607 aac_supported_features(struct aac_softc *sc, caddr_t uptr) 3608 { 3609 struct aac_features f; 3610 int error; 3611 3612 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3613 3614 if ((error = copyin(uptr, &f, sizeof (f))) != 0) 3615 return (error); 3616 3617 /* 3618 * When the management driver receives FSACTL_GET_FEATURES ioctl with 3619 * ALL zero in the featuresState, the driver will return the current 3620 * state of all the supported features, the data field will not be 3621 * valid. 3622 * When the management driver receives FSACTL_GET_FEATURES ioctl with 3623 * a specific bit set in the featuresState, the driver will return the 3624 * current state of this specific feature and whatever data that are 3625 * associated with the feature in the data field or perform whatever 3626 * action needed indicates in the data field. 3627 */ 3628 if (f.feat.fValue == 0) { 3629 f.feat.fBits.largeLBA = 3630 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; 3631 /* TODO: In the future, add other features state here as well */ 3632 } else { 3633 if (f.feat.fBits.largeLBA) 3634 f.feat.fBits.largeLBA = 3635 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; 3636 /* TODO: Add other features state and data in the future */ 3637 } 3638 3639 error = copyout(&f, uptr, sizeof (f)); 3640 return (error); 3641 } 3642 3643 /* 3644 * Give the userland some information about the container. The AAC arch 3645 * expects the driver to be a SCSI passthrough type driver, so it expects 3646 * the containers to have b:t:l numbers. Fake it. 3647 */ 3648 static int 3649 aac_query_disk(struct aac_softc *sc, caddr_t uptr) 3650 { 3651 struct aac_query_disk query_disk; 3652 struct aac_container *co; 3653 struct aac_disk *disk; 3654 int error, id; 3655 3656 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3657 3658 disk = NULL; 3659 3660 error = copyin(uptr, (caddr_t)&query_disk, 3661 sizeof(struct aac_query_disk)); 3662 if (error) 3663 return (error); 3664 3665 id = query_disk.ContainerNumber; 3666 if (id == -1) 3667 return (EINVAL); 3668 3669 mtx_lock(&sc->aac_container_lock); 3670 TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { 3671 if (co->co_mntobj.ObjectId == id) 3672 break; 3673 } 3674 3675 if (co == NULL) { 3676 query_disk.Valid = 0; 3677 query_disk.Locked = 0; 3678 query_disk.Deleted = 1; /* XXX is this right? */ 3679 } else { 3680 disk = device_get_softc(co->co_disk); 3681 query_disk.Valid = 1; 3682 query_disk.Locked = 3683 (disk->ad_flags & AAC_DISK_OPEN) ? 1 : 0; 3684 query_disk.Deleted = 0; 3685 query_disk.Bus = device_get_unit(sc->aac_dev); 3686 query_disk.Target = disk->unit; 3687 query_disk.Lun = 0; 3688 query_disk.UnMapped = 0; 3689 sprintf(&query_disk.diskDeviceName[0], "%s%d", 3690 disk->ad_disk->d_name, disk->ad_disk->d_unit); 3691 } 3692 mtx_unlock(&sc->aac_container_lock); 3693 3694 error = copyout((caddr_t)&query_disk, uptr, 3695 sizeof(struct aac_query_disk)); 3696 3697 return (error); 3698 } 3699 3700 static void 3701 aac_get_bus_info(struct aac_softc *sc) 3702 { 3703 struct aac_fib *fib; 3704 struct aac_ctcfg *c_cmd; 3705 struct aac_ctcfg_resp *c_resp; 3706 struct aac_vmioctl *vmi; 3707 struct aac_vmi_businf_resp *vmi_resp; 3708 struct aac_getbusinf businfo; 3709 struct aac_sim *caminf; 3710 device_t child; 3711 int i, found, error; 3712 3713 mtx_lock(&sc->aac_io_lock); 3714 aac_alloc_sync_fib(sc, &fib); 3715 c_cmd = (struct aac_ctcfg *)&fib->data[0]; 3716 bzero(c_cmd, sizeof(struct aac_ctcfg)); 3717 3718 c_cmd->Command = VM_ContainerConfig; 3719 c_cmd->cmd = CT_GET_SCSI_METHOD; 3720 c_cmd->param = 0; 3721 3722 error = aac_sync_fib(sc, ContainerCommand, 0, fib, 3723 sizeof(struct aac_ctcfg)); 3724 if (error) { 3725 device_printf(sc->aac_dev, "Error %d sending " 3726 "VM_ContainerConfig command\n", error); 3727 aac_release_sync_fib(sc); 3728 mtx_unlock(&sc->aac_io_lock); 3729 return; 3730 } 3731 3732 c_resp = (struct aac_ctcfg_resp *)&fib->data[0]; 3733 if (c_resp->Status != ST_OK) { 3734 device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n", 3735 c_resp->Status); 3736 aac_release_sync_fib(sc); 3737 mtx_unlock(&sc->aac_io_lock); 3738 return; 3739 } 3740 3741 sc->scsi_method_id = c_resp->param; 3742 3743 vmi = (struct aac_vmioctl *)&fib->data[0]; 3744 bzero(vmi, sizeof(struct aac_vmioctl)); 3745 3746 vmi->Command = VM_Ioctl; 3747 vmi->ObjType = FT_DRIVE; 3748 vmi->MethId = sc->scsi_method_id; 3749 vmi->ObjId = 0; 3750 vmi->IoctlCmd = GetBusInfo; 3751 3752 error = aac_sync_fib(sc, ContainerCommand, 0, fib, 3753 sizeof(struct aac_vmi_businf_resp)); 3754 if (error) { 3755 device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n", 3756 error); 3757 aac_release_sync_fib(sc); 3758 mtx_unlock(&sc->aac_io_lock); 3759 return; 3760 } 3761 3762 vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0]; 3763 if (vmi_resp->Status != ST_OK) { 3764 device_printf(sc->aac_dev, "VM_Ioctl returned %d\n", 3765 vmi_resp->Status); 3766 aac_release_sync_fib(sc); 3767 mtx_unlock(&sc->aac_io_lock); 3768 return; 3769 } 3770 3771 bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf)); 3772 aac_release_sync_fib(sc); 3773 mtx_unlock(&sc->aac_io_lock); 3774 3775 found = 0; 3776 for (i = 0; i < businfo.BusCount; i++) { 3777 if (businfo.BusValid[i] != AAC_BUS_VALID) 3778 continue; 3779 3780 caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim), 3781 M_AACBUF, M_NOWAIT | M_ZERO); 3782 if (caminf == NULL) { 3783 device_printf(sc->aac_dev, 3784 "No memory to add passthrough bus %d\n", i); 3785 break; 3786 } 3787 3788 child = device_add_child(sc->aac_dev, "aacp", -1); 3789 if (child == NULL) { 3790 device_printf(sc->aac_dev, 3791 "device_add_child failed for passthrough bus %d\n", 3792 i); 3793 free(caminf, M_AACBUF); 3794 break; 3795 } 3796 3797 caminf->TargetsPerBus = businfo.TargetsPerBus; 3798 caminf->BusNumber = i; 3799 caminf->InitiatorBusId = businfo.InitiatorBusId[i]; 3800 caminf->aac_sc = sc; 3801 caminf->sim_dev = child; 3802 3803 device_set_ivars(child, caminf); 3804 device_set_desc(child, "SCSI Passthrough Bus"); 3805 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link); 3806 3807 found = 1; 3808 } 3809 3810 if (found) 3811 bus_generic_attach(sc->aac_dev); 3812 } 3813