1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2000 Michael Smith 5 * Copyright (c) 2001 Scott Long 6 * Copyright (c) 2000 BSDi 7 * Copyright (c) 2001-2010 Adaptec, Inc. 8 * Copyright (c) 2010-2012 PMC-Sierra, Inc. 9 * All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* 37 * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers 38 */ 39 #define AAC_DRIVERNAME "aacraid" 40 41 #include "opt_aacraid.h" 42 #include "opt_compat.h" 43 44 /* #include <stddef.h> */ 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/malloc.h> 48 #include <sys/kernel.h> 49 #include <sys/kthread.h> 50 #include <sys/proc.h> 51 #include <sys/sysctl.h> 52 #include <sys/sysent.h> 53 #include <sys/poll.h> 54 #include <sys/ioccom.h> 55 56 #include <sys/bus.h> 57 #include <sys/conf.h> 58 #include <sys/signalvar.h> 59 #include <sys/time.h> 60 #include <sys/eventhandler.h> 61 #include <sys/rman.h> 62 63 #include <machine/bus.h> 64 #include <machine/resource.h> 65 66 #include <dev/pci/pcireg.h> 67 #include <dev/pci/pcivar.h> 68 69 #include <dev/aacraid/aacraid_reg.h> 70 #include <sys/aac_ioctl.h> 71 #include <dev/aacraid/aacraid_debug.h> 72 #include <dev/aacraid/aacraid_var.h> 73 74 #ifndef FILTER_HANDLED 75 #define FILTER_HANDLED 0x02 76 #endif 77 78 static void aac_add_container(struct aac_softc *sc, 79 struct aac_mntinforesp *mir, int f, 80 u_int32_t uid); 81 static void aac_get_bus_info(struct aac_softc *sc); 82 static void aac_container_bus(struct aac_softc *sc); 83 static void aac_daemon(void *arg); 84 static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw, 85 int pages, int nseg, int nseg_new); 86 87 /* Command Processing */ 88 static void aac_timeout(struct aac_softc *sc); 89 static void aac_command_thread(struct aac_softc *sc); 90 static int aac_sync_fib(struct aac_softc *sc, u_int32_t command, 91 u_int32_t xferstate, struct aac_fib *fib, 92 u_int16_t datasize); 93 /* Command Buffer Management */ 94 static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, 95 int nseg, int error); 96 static int aac_alloc_commands(struct aac_softc *sc); 97 static void aac_free_commands(struct aac_softc *sc); 98 static void aac_unmap_command(struct aac_command *cm); 99 100 /* Hardware Interface */ 101 static int aac_alloc(struct aac_softc *sc); 102 static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, 103 int error); 104 static int aac_check_firmware(struct aac_softc *sc); 105 static void aac_define_int_mode(struct aac_softc *sc); 106 static int aac_init(struct aac_softc *sc); 107 static int aac_find_pci_capability(struct aac_softc *sc, int cap); 108 static int aac_setup_intr(struct aac_softc *sc); 109 static int aac_check_config(struct aac_softc *sc); 110 111 /* PMC SRC interface */ 112 static int aac_src_get_fwstatus(struct aac_softc *sc); 113 static void aac_src_qnotify(struct aac_softc *sc, int qbit); 114 static int aac_src_get_istatus(struct aac_softc *sc); 115 static void aac_src_clear_istatus(struct aac_softc *sc, int mask); 116 static void aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, 117 u_int32_t arg0, u_int32_t arg1, 118 u_int32_t arg2, u_int32_t arg3); 119 static int aac_src_get_mailbox(struct aac_softc *sc, int mb); 120 static void aac_src_access_devreg(struct aac_softc *sc, int mode); 121 static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm); 122 static int aac_src_get_outb_queue(struct aac_softc *sc); 123 static void aac_src_set_outb_queue(struct aac_softc *sc, int index); 124 125 struct aac_interface aacraid_src_interface = { 126 aac_src_get_fwstatus, 127 aac_src_qnotify, 128 aac_src_get_istatus, 129 aac_src_clear_istatus, 130 aac_src_set_mailbox, 131 aac_src_get_mailbox, 132 aac_src_access_devreg, 133 aac_src_send_command, 134 aac_src_get_outb_queue, 135 aac_src_set_outb_queue 136 }; 137 138 /* PMC SRCv interface */ 139 static void aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, 140 u_int32_t arg0, u_int32_t arg1, 141 u_int32_t arg2, u_int32_t arg3); 142 static int aac_srcv_get_mailbox(struct aac_softc *sc, int mb); 143 144 struct aac_interface aacraid_srcv_interface = { 145 aac_src_get_fwstatus, 146 aac_src_qnotify, 147 aac_src_get_istatus, 148 aac_src_clear_istatus, 149 aac_srcv_set_mailbox, 150 aac_srcv_get_mailbox, 151 aac_src_access_devreg, 152 aac_src_send_command, 153 aac_src_get_outb_queue, 154 aac_src_set_outb_queue 155 }; 156 157 /* Debugging and Diagnostics */ 158 static struct aac_code_lookup aac_cpu_variant[] = { 159 {"i960JX", CPUI960_JX}, 160 {"i960CX", CPUI960_CX}, 161 {"i960HX", CPUI960_HX}, 162 {"i960RX", CPUI960_RX}, 163 {"i960 80303", CPUI960_80303}, 164 {"StrongARM SA110", CPUARM_SA110}, 165 {"PPC603e", CPUPPC_603e}, 166 {"XScale 80321", CPU_XSCALE_80321}, 167 {"MIPS 4KC", CPU_MIPS_4KC}, 168 {"MIPS 5KC", CPU_MIPS_5KC}, 169 {"Unknown StrongARM", CPUARM_xxx}, 170 {"Unknown PowerPC", CPUPPC_xxx}, 171 {NULL, 0}, 172 {"Unknown processor", 0} 173 }; 174 175 static struct aac_code_lookup aac_battery_platform[] = { 176 {"required battery present", PLATFORM_BAT_REQ_PRESENT}, 177 {"REQUIRED BATTERY NOT PRESENT", PLATFORM_BAT_REQ_NOTPRESENT}, 178 {"optional battery present", PLATFORM_BAT_OPT_PRESENT}, 179 {"optional battery not installed", PLATFORM_BAT_OPT_NOTPRESENT}, 180 {"no battery support", PLATFORM_BAT_NOT_SUPPORTED}, 181 {NULL, 0}, 182 {"unknown battery platform", 0} 183 }; 184 static void aac_describe_controller(struct aac_softc *sc); 185 static char *aac_describe_code(struct aac_code_lookup *table, 186 u_int32_t code); 187 188 /* Management Interface */ 189 static d_open_t aac_open; 190 static d_ioctl_t aac_ioctl; 191 static d_poll_t aac_poll; 192 #if __FreeBSD_version >= 702000 193 static void aac_cdevpriv_dtor(void *arg); 194 #else 195 static d_close_t aac_close; 196 #endif 197 static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib); 198 static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg); 199 static void aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib); 200 static void aac_request_aif(struct aac_softc *sc); 201 static int aac_rev_check(struct aac_softc *sc, caddr_t udata); 202 static int aac_open_aif(struct aac_softc *sc, caddr_t arg); 203 static int aac_close_aif(struct aac_softc *sc, caddr_t arg); 204 static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg); 205 static int aac_return_aif(struct aac_softc *sc, 206 struct aac_fib_context *ctx, caddr_t uptr); 207 static int aac_query_disk(struct aac_softc *sc, caddr_t uptr); 208 static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr); 209 static int aac_supported_features(struct aac_softc *sc, caddr_t uptr); 210 static void aac_ioctl_event(struct aac_softc *sc, 211 struct aac_event *event, void *arg); 212 static int aac_reset_adapter(struct aac_softc *sc); 213 static int aac_get_container_info(struct aac_softc *sc, 214 struct aac_fib *fib, int cid, 215 struct aac_mntinforesp *mir, 216 u_int32_t *uid); 217 static u_int32_t 218 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled); 219 220 static struct cdevsw aacraid_cdevsw = { 221 .d_version = D_VERSION, 222 .d_flags = D_NEEDGIANT, 223 .d_open = aac_open, 224 #if __FreeBSD_version < 702000 225 .d_close = aac_close, 226 #endif 227 .d_ioctl = aac_ioctl, 228 .d_poll = aac_poll, 229 .d_name = "aacraid", 230 }; 231 232 MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver"); 233 234 /* sysctl node */ 235 SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD, 0, "AACRAID driver parameters"); 236 237 /* 238 * Device Interface 239 */ 240 241 /* 242 * Initialize the controller and softc 243 */ 244 int 245 aacraid_attach(struct aac_softc *sc) 246 { 247 int error, unit; 248 struct aac_fib *fib; 249 struct aac_mntinforesp mir; 250 int count = 0, i = 0; 251 u_int32_t uid; 252 253 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 254 sc->hint_flags = device_get_flags(sc->aac_dev); 255 /* 256 * Initialize per-controller queues. 257 */ 258 aac_initq_free(sc); 259 aac_initq_ready(sc); 260 aac_initq_busy(sc); 261 262 /* mark controller as suspended until we get ourselves organised */ 263 sc->aac_state |= AAC_STATE_SUSPEND; 264 265 /* 266 * Check that the firmware on the card is supported. 267 */ 268 sc->msi_enabled = FALSE; 269 if ((error = aac_check_firmware(sc)) != 0) 270 return(error); 271 272 /* 273 * Initialize locks 274 */ 275 mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF); 276 TAILQ_INIT(&sc->aac_container_tqh); 277 TAILQ_INIT(&sc->aac_ev_cmfree); 278 279 #if __FreeBSD_version >= 800000 280 /* Initialize the clock daemon callout. */ 281 callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0); 282 #endif 283 /* 284 * Initialize the adapter. 285 */ 286 if ((error = aac_alloc(sc)) != 0) 287 return(error); 288 if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) { 289 aac_define_int_mode(sc); 290 if ((error = aac_init(sc)) != 0) 291 return(error); 292 } 293 294 /* 295 * Allocate and connect our interrupt. 296 */ 297 if ((error = aac_setup_intr(sc)) != 0) 298 return(error); 299 300 /* 301 * Print a little information about the controller. 302 */ 303 aac_describe_controller(sc); 304 305 /* 306 * Make the control device. 307 */ 308 unit = device_get_unit(sc->aac_dev); 309 sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR, 310 0640, "aacraid%d", unit); 311 sc->aac_dev_t->si_drv1 = sc; 312 313 /* Create the AIF thread */ 314 if (aac_kthread_create((void(*)(void *))aac_command_thread, sc, 315 &sc->aifthread, 0, 0, "aacraid%daif", unit)) 316 panic("Could not create AIF thread"); 317 318 /* Register the shutdown method to only be called post-dump */ 319 if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown, 320 sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL) 321 device_printf(sc->aac_dev, 322 "shutdown event registration failed\n"); 323 324 /* Find containers */ 325 mtx_lock(&sc->aac_io_lock); 326 aac_alloc_sync_fib(sc, &fib); 327 /* loop over possible containers */ 328 do { 329 if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0) 330 continue; 331 if (i == 0) 332 count = mir.MntRespCount; 333 aac_add_container(sc, &mir, 0, uid); 334 i++; 335 } while ((i < count) && (i < AAC_MAX_CONTAINERS)); 336 aac_release_sync_fib(sc); 337 mtx_unlock(&sc->aac_io_lock); 338 339 /* Register with CAM for the containers */ 340 TAILQ_INIT(&sc->aac_sim_tqh); 341 aac_container_bus(sc); 342 /* Register with CAM for the non-DASD devices */ 343 if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) 344 aac_get_bus_info(sc); 345 346 /* poke the bus to actually attach the child devices */ 347 bus_generic_attach(sc->aac_dev); 348 349 /* mark the controller up */ 350 sc->aac_state &= ~AAC_STATE_SUSPEND; 351 352 /* enable interrupts now */ 353 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT); 354 355 #if __FreeBSD_version >= 800000 356 mtx_lock(&sc->aac_io_lock); 357 callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc); 358 mtx_unlock(&sc->aac_io_lock); 359 #else 360 { 361 struct timeval tv; 362 tv.tv_sec = 60; 363 tv.tv_usec = 0; 364 sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv)); 365 } 366 #endif 367 368 return(0); 369 } 370 371 static void 372 aac_daemon(void *arg) 373 { 374 struct aac_softc *sc; 375 struct timeval tv; 376 struct aac_command *cm; 377 struct aac_fib *fib; 378 379 sc = arg; 380 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 381 382 #if __FreeBSD_version >= 800000 383 mtx_assert(&sc->aac_io_lock, MA_OWNED); 384 if (callout_pending(&sc->aac_daemontime) || 385 callout_active(&sc->aac_daemontime) == 0) 386 return; 387 #else 388 mtx_lock(&sc->aac_io_lock); 389 #endif 390 getmicrotime(&tv); 391 392 if (!aacraid_alloc_command(sc, &cm)) { 393 fib = cm->cm_fib; 394 cm->cm_timestamp = time_uptime; 395 cm->cm_datalen = 0; 396 cm->cm_flags |= AAC_CMD_WAIT; 397 398 fib->Header.Size = 399 sizeof(struct aac_fib_header) + sizeof(u_int32_t); 400 fib->Header.XferState = 401 AAC_FIBSTATE_HOSTOWNED | 402 AAC_FIBSTATE_INITIALISED | 403 AAC_FIBSTATE_EMPTY | 404 AAC_FIBSTATE_FROMHOST | 405 AAC_FIBSTATE_REXPECTED | 406 AAC_FIBSTATE_NORM | 407 AAC_FIBSTATE_ASYNC | 408 AAC_FIBSTATE_FAST_RESPONSE; 409 fib->Header.Command = SendHostTime; 410 *(uint32_t *)fib->data = tv.tv_sec; 411 412 aacraid_map_command_sg(cm, NULL, 0, 0); 413 aacraid_release_command(cm); 414 } 415 416 #if __FreeBSD_version >= 800000 417 callout_schedule(&sc->aac_daemontime, 30 * 60 * hz); 418 #else 419 mtx_unlock(&sc->aac_io_lock); 420 tv.tv_sec = 30 * 60; 421 tv.tv_usec = 0; 422 sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv)); 423 #endif 424 } 425 426 void 427 aacraid_add_event(struct aac_softc *sc, struct aac_event *event) 428 { 429 430 switch (event->ev_type & AAC_EVENT_MASK) { 431 case AAC_EVENT_CMFREE: 432 TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links); 433 break; 434 default: 435 device_printf(sc->aac_dev, "aac_add event: unknown event %d\n", 436 event->ev_type); 437 break; 438 } 439 440 return; 441 } 442 443 /* 444 * Request information of container #cid 445 */ 446 static int 447 aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid, 448 struct aac_mntinforesp *mir, u_int32_t *uid) 449 { 450 struct aac_command *cm; 451 struct aac_fib *fib; 452 struct aac_mntinfo *mi; 453 struct aac_cnt_config *ccfg; 454 int rval; 455 456 if (sync_fib == NULL) { 457 if (aacraid_alloc_command(sc, &cm)) { 458 device_printf(sc->aac_dev, 459 "Warning, no free command available\n"); 460 return (-1); 461 } 462 fib = cm->cm_fib; 463 } else { 464 fib = sync_fib; 465 } 466 467 mi = (struct aac_mntinfo *)&fib->data[0]; 468 /* 4KB support?, 64-bit LBA? */ 469 if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE) 470 mi->Command = VM_NameServeAllBlk; 471 else if (sc->flags & AAC_FLAGS_LBA_64BIT) 472 mi->Command = VM_NameServe64; 473 else 474 mi->Command = VM_NameServe; 475 mi->MntType = FT_FILESYS; 476 mi->MntCount = cid; 477 478 if (sync_fib) { 479 if (aac_sync_fib(sc, ContainerCommand, 0, fib, 480 sizeof(struct aac_mntinfo))) { 481 device_printf(sc->aac_dev, "Error probing container %d\n", cid); 482 return (-1); 483 } 484 } else { 485 cm->cm_timestamp = time_uptime; 486 cm->cm_datalen = 0; 487 488 fib->Header.Size = 489 sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo); 490 fib->Header.XferState = 491 AAC_FIBSTATE_HOSTOWNED | 492 AAC_FIBSTATE_INITIALISED | 493 AAC_FIBSTATE_EMPTY | 494 AAC_FIBSTATE_FROMHOST | 495 AAC_FIBSTATE_REXPECTED | 496 AAC_FIBSTATE_NORM | 497 AAC_FIBSTATE_ASYNC | 498 AAC_FIBSTATE_FAST_RESPONSE; 499 fib->Header.Command = ContainerCommand; 500 if (aacraid_wait_command(cm) != 0) { 501 device_printf(sc->aac_dev, "Error probing container %d\n", cid); 502 aacraid_release_command(cm); 503 return (-1); 504 } 505 } 506 bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp)); 507 508 /* UID */ 509 *uid = cid; 510 if (mir->MntTable[0].VolType != CT_NONE && 511 !(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) { 512 if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) { 513 mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200; 514 mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0; 515 } 516 ccfg = (struct aac_cnt_config *)&fib->data[0]; 517 bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE); 518 ccfg->Command = VM_ContainerConfig; 519 ccfg->CTCommand.command = CT_CID_TO_32BITS_UID; 520 ccfg->CTCommand.param[0] = cid; 521 522 if (sync_fib) { 523 rval = aac_sync_fib(sc, ContainerCommand, 0, fib, 524 sizeof(struct aac_cnt_config)); 525 if (rval == 0 && ccfg->Command == ST_OK && 526 ccfg->CTCommand.param[0] == CT_OK && 527 mir->MntTable[0].VolType != CT_PASSTHRU) 528 *uid = ccfg->CTCommand.param[1]; 529 } else { 530 fib->Header.Size = 531 sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config); 532 fib->Header.XferState = 533 AAC_FIBSTATE_HOSTOWNED | 534 AAC_FIBSTATE_INITIALISED | 535 AAC_FIBSTATE_EMPTY | 536 AAC_FIBSTATE_FROMHOST | 537 AAC_FIBSTATE_REXPECTED | 538 AAC_FIBSTATE_NORM | 539 AAC_FIBSTATE_ASYNC | 540 AAC_FIBSTATE_FAST_RESPONSE; 541 fib->Header.Command = ContainerCommand; 542 rval = aacraid_wait_command(cm); 543 if (rval == 0 && ccfg->Command == ST_OK && 544 ccfg->CTCommand.param[0] == CT_OK && 545 mir->MntTable[0].VolType != CT_PASSTHRU) 546 *uid = ccfg->CTCommand.param[1]; 547 aacraid_release_command(cm); 548 } 549 } 550 551 return (0); 552 } 553 554 /* 555 * Create a device to represent a new container 556 */ 557 static void 558 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f, 559 u_int32_t uid) 560 { 561 struct aac_container *co; 562 563 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 564 565 /* 566 * Check container volume type for validity. Note that many of 567 * the possible types may never show up. 568 */ 569 if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) { 570 co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF, 571 M_NOWAIT | M_ZERO); 572 if (co == NULL) { 573 panic("Out of memory?!"); 574 } 575 576 co->co_found = f; 577 bcopy(&mir->MntTable[0], &co->co_mntobj, 578 sizeof(struct aac_mntobj)); 579 co->co_uid = uid; 580 TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link); 581 } 582 } 583 584 /* 585 * Allocate resources associated with (sc) 586 */ 587 static int 588 aac_alloc(struct aac_softc *sc) 589 { 590 bus_size_t maxsize; 591 592 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 593 594 /* 595 * Create DMA tag for mapping buffers into controller-addressable space. 596 */ 597 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 598 1, 0, /* algnmnt, boundary */ 599 (sc->flags & AAC_FLAGS_SG_64BIT) ? 600 BUS_SPACE_MAXADDR : 601 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 602 BUS_SPACE_MAXADDR, /* highaddr */ 603 NULL, NULL, /* filter, filterarg */ 604 sc->aac_max_sectors << 9, /* maxsize */ 605 sc->aac_sg_tablesize, /* nsegments */ 606 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 607 BUS_DMA_ALLOCNOW, /* flags */ 608 busdma_lock_mutex, /* lockfunc */ 609 &sc->aac_io_lock, /* lockfuncarg */ 610 &sc->aac_buffer_dmat)) { 611 device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n"); 612 return (ENOMEM); 613 } 614 615 /* 616 * Create DMA tag for mapping FIBs into controller-addressable space.. 617 */ 618 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) 619 maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 620 sizeof(struct aac_fib_xporthdr) + 31); 621 else 622 maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31); 623 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 624 1, 0, /* algnmnt, boundary */ 625 (sc->flags & AAC_FLAGS_4GB_WINDOW) ? 626 BUS_SPACE_MAXADDR_32BIT : 627 0x7fffffff, /* lowaddr */ 628 BUS_SPACE_MAXADDR, /* highaddr */ 629 NULL, NULL, /* filter, filterarg */ 630 maxsize, /* maxsize */ 631 1, /* nsegments */ 632 maxsize, /* maxsize */ 633 0, /* flags */ 634 NULL, NULL, /* No locking needed */ 635 &sc->aac_fib_dmat)) { 636 device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n"); 637 return (ENOMEM); 638 } 639 640 /* 641 * Create DMA tag for the common structure and allocate it. 642 */ 643 maxsize = sizeof(struct aac_common); 644 maxsize += sc->aac_max_fibs * sizeof(u_int32_t); 645 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 646 1, 0, /* algnmnt, boundary */ 647 (sc->flags & AAC_FLAGS_4GB_WINDOW) ? 648 BUS_SPACE_MAXADDR_32BIT : 649 0x7fffffff, /* lowaddr */ 650 BUS_SPACE_MAXADDR, /* highaddr */ 651 NULL, NULL, /* filter, filterarg */ 652 maxsize, /* maxsize */ 653 1, /* nsegments */ 654 maxsize, /* maxsegsize */ 655 0, /* flags */ 656 NULL, NULL, /* No locking needed */ 657 &sc->aac_common_dmat)) { 658 device_printf(sc->aac_dev, 659 "can't allocate common structure DMA tag\n"); 660 return (ENOMEM); 661 } 662 if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common, 663 BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) { 664 device_printf(sc->aac_dev, "can't allocate common structure\n"); 665 return (ENOMEM); 666 } 667 668 (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap, 669 sc->aac_common, maxsize, 670 aac_common_map, sc, 0); 671 bzero(sc->aac_common, maxsize); 672 673 /* Allocate some FIBs and associated command structs */ 674 TAILQ_INIT(&sc->aac_fibmap_tqh); 675 sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command), 676 M_AACRAIDBUF, M_WAITOK|M_ZERO); 677 mtx_lock(&sc->aac_io_lock); 678 while (sc->total_fibs < sc->aac_max_fibs) { 679 if (aac_alloc_commands(sc) != 0) 680 break; 681 } 682 mtx_unlock(&sc->aac_io_lock); 683 if (sc->total_fibs == 0) 684 return (ENOMEM); 685 686 return (0); 687 } 688 689 /* 690 * Free all of the resources associated with (sc) 691 * 692 * Should not be called if the controller is active. 693 */ 694 void 695 aacraid_free(struct aac_softc *sc) 696 { 697 int i; 698 699 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 700 701 /* remove the control device */ 702 if (sc->aac_dev_t != NULL) 703 destroy_dev(sc->aac_dev_t); 704 705 /* throw away any FIB buffers, discard the FIB DMA tag */ 706 aac_free_commands(sc); 707 if (sc->aac_fib_dmat) 708 bus_dma_tag_destroy(sc->aac_fib_dmat); 709 710 free(sc->aac_commands, M_AACRAIDBUF); 711 712 /* destroy the common area */ 713 if (sc->aac_common) { 714 bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap); 715 bus_dmamem_free(sc->aac_common_dmat, sc->aac_common, 716 sc->aac_common_dmamap); 717 } 718 if (sc->aac_common_dmat) 719 bus_dma_tag_destroy(sc->aac_common_dmat); 720 721 /* disconnect the interrupt handler */ 722 for (i = 0; i < AAC_MAX_MSIX; ++i) { 723 if (sc->aac_intr[i]) 724 bus_teardown_intr(sc->aac_dev, 725 sc->aac_irq[i], sc->aac_intr[i]); 726 if (sc->aac_irq[i]) 727 bus_release_resource(sc->aac_dev, SYS_RES_IRQ, 728 sc->aac_irq_rid[i], sc->aac_irq[i]); 729 else 730 break; 731 } 732 if (sc->msi_enabled) 733 pci_release_msi(sc->aac_dev); 734 735 /* destroy data-transfer DMA tag */ 736 if (sc->aac_buffer_dmat) 737 bus_dma_tag_destroy(sc->aac_buffer_dmat); 738 739 /* destroy the parent DMA tag */ 740 if (sc->aac_parent_dmat) 741 bus_dma_tag_destroy(sc->aac_parent_dmat); 742 743 /* release the register window mapping */ 744 if (sc->aac_regs_res0 != NULL) 745 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, 746 sc->aac_regs_rid0, sc->aac_regs_res0); 747 if (sc->aac_regs_res1 != NULL) 748 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, 749 sc->aac_regs_rid1, sc->aac_regs_res1); 750 } 751 752 /* 753 * Disconnect from the controller completely, in preparation for unload. 754 */ 755 int 756 aacraid_detach(device_t dev) 757 { 758 struct aac_softc *sc; 759 struct aac_container *co; 760 struct aac_sim *sim; 761 int error; 762 763 sc = device_get_softc(dev); 764 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 765 766 #if __FreeBSD_version >= 800000 767 callout_drain(&sc->aac_daemontime); 768 #else 769 untimeout(aac_daemon, (void *)sc, sc->timeout_id); 770 #endif 771 /* Remove the child containers */ 772 while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) { 773 TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link); 774 free(co, M_AACRAIDBUF); 775 } 776 777 /* Remove the CAM SIMs */ 778 while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) { 779 TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link); 780 error = device_delete_child(dev, sim->sim_dev); 781 if (error) 782 return (error); 783 free(sim, M_AACRAIDBUF); 784 } 785 786 if (sc->aifflags & AAC_AIFFLAGS_RUNNING) { 787 sc->aifflags |= AAC_AIFFLAGS_EXIT; 788 wakeup(sc->aifthread); 789 tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz); 790 } 791 792 if (sc->aifflags & AAC_AIFFLAGS_RUNNING) 793 panic("Cannot shutdown AIF thread"); 794 795 if ((error = aacraid_shutdown(dev))) 796 return(error); 797 798 EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh); 799 800 aacraid_free(sc); 801 802 mtx_destroy(&sc->aac_io_lock); 803 804 return(0); 805 } 806 807 /* 808 * Bring the controller down to a dormant state and detach all child devices. 809 * 810 * This function is called before detach or system shutdown. 811 * 812 * Note that we can assume that the bioq on the controller is empty, as we won't 813 * allow shutdown if any device is open. 814 */ 815 int 816 aacraid_shutdown(device_t dev) 817 { 818 struct aac_softc *sc; 819 struct aac_fib *fib; 820 struct aac_close_command *cc; 821 822 sc = device_get_softc(dev); 823 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 824 825 sc->aac_state |= AAC_STATE_SUSPEND; 826 827 /* 828 * Send a Container shutdown followed by a HostShutdown FIB to the 829 * controller to convince it that we don't want to talk to it anymore. 830 * We've been closed and all I/O completed already 831 */ 832 device_printf(sc->aac_dev, "shutting down controller..."); 833 834 mtx_lock(&sc->aac_io_lock); 835 aac_alloc_sync_fib(sc, &fib); 836 cc = (struct aac_close_command *)&fib->data[0]; 837 838 bzero(cc, sizeof(struct aac_close_command)); 839 cc->Command = VM_CloseAll; 840 cc->ContainerId = 0xfffffffe; 841 if (aac_sync_fib(sc, ContainerCommand, 0, fib, 842 sizeof(struct aac_close_command))) 843 printf("FAILED.\n"); 844 else 845 printf("done\n"); 846 847 AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT); 848 aac_release_sync_fib(sc); 849 mtx_unlock(&sc->aac_io_lock); 850 851 return(0); 852 } 853 854 /* 855 * Bring the controller to a quiescent state, ready for system suspend. 856 */ 857 int 858 aacraid_suspend(device_t dev) 859 { 860 struct aac_softc *sc; 861 862 sc = device_get_softc(dev); 863 864 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 865 sc->aac_state |= AAC_STATE_SUSPEND; 866 867 AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT); 868 return(0); 869 } 870 871 /* 872 * Bring the controller back to a state ready for operation. 873 */ 874 int 875 aacraid_resume(device_t dev) 876 { 877 struct aac_softc *sc; 878 879 sc = device_get_softc(dev); 880 881 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 882 sc->aac_state &= ~AAC_STATE_SUSPEND; 883 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT); 884 return(0); 885 } 886 887 /* 888 * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface. 889 */ 890 void 891 aacraid_new_intr_type1(void *arg) 892 { 893 struct aac_msix_ctx *ctx; 894 struct aac_softc *sc; 895 int vector_no; 896 struct aac_command *cm; 897 struct aac_fib *fib; 898 u_int32_t bellbits, bellbits_shifted, index, handle; 899 int isFastResponse, isAif, noMoreAif, mode; 900 901 ctx = (struct aac_msix_ctx *)arg; 902 sc = ctx->sc; 903 vector_no = ctx->vector_no; 904 905 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 906 mtx_lock(&sc->aac_io_lock); 907 908 if (sc->msi_enabled) { 909 mode = AAC_INT_MODE_MSI; 910 if (vector_no == 0) { 911 bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI); 912 if (bellbits & 0x40000) 913 mode |= AAC_INT_MODE_AIF; 914 else if (bellbits & 0x1000) 915 mode |= AAC_INT_MODE_SYNC; 916 } 917 } else { 918 mode = AAC_INT_MODE_INTX; 919 bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R); 920 if (bellbits & AAC_DB_RESPONSE_SENT_NS) { 921 bellbits = AAC_DB_RESPONSE_SENT_NS; 922 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits); 923 } else { 924 bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT); 925 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits); 926 if (bellbits_shifted & AAC_DB_AIF_PENDING) 927 mode |= AAC_INT_MODE_AIF; 928 else if (bellbits_shifted & AAC_DB_SYNC_COMMAND) 929 mode |= AAC_INT_MODE_SYNC; 930 } 931 /* ODR readback, Prep #238630 */ 932 AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R); 933 } 934 935 if (mode & AAC_INT_MODE_SYNC) { 936 if (sc->aac_sync_cm) { 937 cm = sc->aac_sync_cm; 938 cm->cm_flags |= AAC_CMD_COMPLETED; 939 /* is there a completion handler? */ 940 if (cm->cm_complete != NULL) { 941 cm->cm_complete(cm); 942 } else { 943 /* assume that someone is sleeping on this command */ 944 wakeup(cm); 945 } 946 sc->flags &= ~AAC_QUEUE_FRZN; 947 sc->aac_sync_cm = NULL; 948 } 949 mode = 0; 950 } 951 952 if (mode & AAC_INT_MODE_AIF) { 953 if (mode & AAC_INT_MODE_INTX) { 954 aac_request_aif(sc); 955 mode = 0; 956 } 957 } 958 959 if (mode) { 960 /* handle async. status */ 961 index = sc->aac_host_rrq_idx[vector_no]; 962 for (;;) { 963 isFastResponse = isAif = noMoreAif = 0; 964 /* remove toggle bit (31) */ 965 handle = (sc->aac_common->ac_host_rrq[index] & 0x7fffffff); 966 /* check fast response bit (30) */ 967 if (handle & 0x40000000) 968 isFastResponse = 1; 969 /* check AIF bit (23) */ 970 else if (handle & 0x00800000) 971 isAif = TRUE; 972 handle &= 0x0000ffff; 973 if (handle == 0) 974 break; 975 976 cm = sc->aac_commands + (handle - 1); 977 fib = cm->cm_fib; 978 sc->aac_rrq_outstanding[vector_no]--; 979 if (isAif) { 980 noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0; 981 if (!noMoreAif) 982 aac_handle_aif(sc, fib); 983 aac_remove_busy(cm); 984 aacraid_release_command(cm); 985 } else { 986 if (isFastResponse) { 987 fib->Header.XferState |= AAC_FIBSTATE_DONEADAP; 988 *((u_int32_t *)(fib->data)) = ST_OK; 989 cm->cm_flags |= AAC_CMD_FASTRESP; 990 } 991 aac_remove_busy(cm); 992 aac_unmap_command(cm); 993 cm->cm_flags |= AAC_CMD_COMPLETED; 994 995 /* is there a completion handler? */ 996 if (cm->cm_complete != NULL) { 997 cm->cm_complete(cm); 998 } else { 999 /* assume that someone is sleeping on this command */ 1000 wakeup(cm); 1001 } 1002 sc->flags &= ~AAC_QUEUE_FRZN; 1003 } 1004 1005 sc->aac_common->ac_host_rrq[index++] = 0; 1006 if (index == (vector_no + 1) * sc->aac_vector_cap) 1007 index = vector_no * sc->aac_vector_cap; 1008 sc->aac_host_rrq_idx[vector_no] = index; 1009 1010 if ((isAif && !noMoreAif) || sc->aif_pending) 1011 aac_request_aif(sc); 1012 } 1013 } 1014 1015 if (mode & AAC_INT_MODE_AIF) { 1016 aac_request_aif(sc); 1017 AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT); 1018 mode = 0; 1019 } 1020 1021 /* see if we can start some more I/O */ 1022 if ((sc->flags & AAC_QUEUE_FRZN) == 0) 1023 aacraid_startio(sc); 1024 mtx_unlock(&sc->aac_io_lock); 1025 } 1026 1027 /* 1028 * Handle notification of one or more FIBs coming from the controller. 1029 */ 1030 static void 1031 aac_command_thread(struct aac_softc *sc) 1032 { 1033 int retval; 1034 1035 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1036 1037 mtx_lock(&sc->aac_io_lock); 1038 sc->aifflags = AAC_AIFFLAGS_RUNNING; 1039 1040 while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) { 1041 1042 retval = 0; 1043 if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0) 1044 retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO, 1045 "aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz); 1046 1047 /* 1048 * First see if any FIBs need to be allocated. This needs 1049 * to be called without the driver lock because contigmalloc 1050 * will grab Giant, and would result in an LOR. 1051 */ 1052 if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) { 1053 aac_alloc_commands(sc); 1054 sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS; 1055 aacraid_startio(sc); 1056 } 1057 1058 /* 1059 * While we're here, check to see if any commands are stuck. 1060 * This is pretty low-priority, so it's ok if it doesn't 1061 * always fire. 1062 */ 1063 if (retval == EWOULDBLOCK) 1064 aac_timeout(sc); 1065 1066 /* Check the hardware printf message buffer */ 1067 if (sc->aac_common->ac_printf[0] != 0) 1068 aac_print_printf(sc); 1069 } 1070 sc->aifflags &= ~AAC_AIFFLAGS_RUNNING; 1071 mtx_unlock(&sc->aac_io_lock); 1072 wakeup(sc->aac_dev); 1073 1074 aac_kthread_exit(0); 1075 } 1076 1077 /* 1078 * Submit a command to the controller, return when it completes. 1079 * XXX This is very dangerous! If the card has gone out to lunch, we could 1080 * be stuck here forever. At the same time, signals are not caught 1081 * because there is a risk that a signal could wakeup the sleep before 1082 * the card has a chance to complete the command. Since there is no way 1083 * to cancel a command that is in progress, we can't protect against the 1084 * card completing a command late and spamming the command and data 1085 * memory. So, we are held hostage until the command completes. 1086 */ 1087 int 1088 aacraid_wait_command(struct aac_command *cm) 1089 { 1090 struct aac_softc *sc; 1091 int error; 1092 1093 sc = cm->cm_sc; 1094 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1095 mtx_assert(&sc->aac_io_lock, MA_OWNED); 1096 1097 /* Put the command on the ready queue and get things going */ 1098 aac_enqueue_ready(cm); 1099 aacraid_startio(sc); 1100 error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0); 1101 return(error); 1102 } 1103 1104 /* 1105 *Command Buffer Management 1106 */ 1107 1108 /* 1109 * Allocate a command. 1110 */ 1111 int 1112 aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp) 1113 { 1114 struct aac_command *cm; 1115 1116 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1117 1118 if ((cm = aac_dequeue_free(sc)) == NULL) { 1119 if (sc->total_fibs < sc->aac_max_fibs) { 1120 sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS; 1121 wakeup(sc->aifthread); 1122 } 1123 return (EBUSY); 1124 } 1125 1126 *cmp = cm; 1127 return(0); 1128 } 1129 1130 /* 1131 * Release a command back to the freelist. 1132 */ 1133 void 1134 aacraid_release_command(struct aac_command *cm) 1135 { 1136 struct aac_event *event; 1137 struct aac_softc *sc; 1138 1139 sc = cm->cm_sc; 1140 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1141 mtx_assert(&sc->aac_io_lock, MA_OWNED); 1142 1143 /* (re)initialize the command/FIB */ 1144 cm->cm_sgtable = NULL; 1145 cm->cm_flags = 0; 1146 cm->cm_complete = NULL; 1147 cm->cm_ccb = NULL; 1148 cm->cm_passthr_dmat = 0; 1149 cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY; 1150 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB; 1151 cm->cm_fib->Header.Unused = 0; 1152 cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size; 1153 1154 /* 1155 * These are duplicated in aac_start to cover the case where an 1156 * intermediate stage may have destroyed them. They're left 1157 * initialized here for debugging purposes only. 1158 */ 1159 cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; 1160 cm->cm_fib->Header.Handle = 0; 1161 1162 aac_enqueue_free(cm); 1163 1164 /* 1165 * Dequeue all events so that there's no risk of events getting 1166 * stranded. 1167 */ 1168 while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) { 1169 TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links); 1170 event->ev_callback(sc, event, event->ev_arg); 1171 } 1172 } 1173 1174 /* 1175 * Map helper for command/FIB allocation. 1176 */ 1177 static void 1178 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1179 { 1180 uint64_t *fibphys; 1181 1182 fibphys = (uint64_t *)arg; 1183 1184 *fibphys = segs[0].ds_addr; 1185 } 1186 1187 /* 1188 * Allocate and initialize commands/FIBs for this adapter. 1189 */ 1190 static int 1191 aac_alloc_commands(struct aac_softc *sc) 1192 { 1193 struct aac_command *cm; 1194 struct aac_fibmap *fm; 1195 uint64_t fibphys; 1196 int i, error; 1197 u_int32_t maxsize; 1198 1199 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1200 mtx_assert(&sc->aac_io_lock, MA_OWNED); 1201 1202 if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs) 1203 return (ENOMEM); 1204 1205 fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO); 1206 if (fm == NULL) 1207 return (ENOMEM); 1208 1209 mtx_unlock(&sc->aac_io_lock); 1210 /* allocate the FIBs in DMAable memory and load them */ 1211 if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs, 1212 BUS_DMA_NOWAIT, &fm->aac_fibmap)) { 1213 device_printf(sc->aac_dev, 1214 "Not enough contiguous memory available.\n"); 1215 free(fm, M_AACRAIDBUF); 1216 mtx_lock(&sc->aac_io_lock); 1217 return (ENOMEM); 1218 } 1219 1220 maxsize = sc->aac_max_fib_size + 31; 1221 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) 1222 maxsize += sizeof(struct aac_fib_xporthdr); 1223 /* Ignore errors since this doesn't bounce */ 1224 (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs, 1225 sc->aac_max_fibs_alloc * maxsize, 1226 aac_map_command_helper, &fibphys, 0); 1227 mtx_lock(&sc->aac_io_lock); 1228 1229 /* initialize constant fields in the command structure */ 1230 bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize); 1231 for (i = 0; i < sc->aac_max_fibs_alloc; i++) { 1232 cm = sc->aac_commands + sc->total_fibs; 1233 fm->aac_commands = cm; 1234 cm->cm_sc = sc; 1235 cm->cm_fib = (struct aac_fib *) 1236 ((u_int8_t *)fm->aac_fibs + i * maxsize); 1237 cm->cm_fibphys = fibphys + i * maxsize; 1238 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) { 1239 u_int64_t fibphys_aligned; 1240 fibphys_aligned = 1241 (cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31; 1242 cm->cm_fib = (struct aac_fib *) 1243 ((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys)); 1244 cm->cm_fibphys = fibphys_aligned; 1245 } else { 1246 u_int64_t fibphys_aligned; 1247 fibphys_aligned = (cm->cm_fibphys + 31) & ~31; 1248 cm->cm_fib = (struct aac_fib *) 1249 ((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys)); 1250 cm->cm_fibphys = fibphys_aligned; 1251 } 1252 cm->cm_index = sc->total_fibs; 1253 1254 if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0, 1255 &cm->cm_datamap)) != 0) 1256 break; 1257 if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1) 1258 aacraid_release_command(cm); 1259 sc->total_fibs++; 1260 } 1261 1262 if (i > 0) { 1263 TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link); 1264 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs); 1265 return (0); 1266 } 1267 1268 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); 1269 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); 1270 free(fm, M_AACRAIDBUF); 1271 return (ENOMEM); 1272 } 1273 1274 /* 1275 * Free FIBs owned by this adapter. 1276 */ 1277 static void 1278 aac_free_commands(struct aac_softc *sc) 1279 { 1280 struct aac_fibmap *fm; 1281 struct aac_command *cm; 1282 int i; 1283 1284 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1285 1286 while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) { 1287 1288 TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link); 1289 /* 1290 * We check against total_fibs to handle partially 1291 * allocated blocks. 1292 */ 1293 for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) { 1294 cm = fm->aac_commands + i; 1295 bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap); 1296 } 1297 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); 1298 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); 1299 free(fm, M_AACRAIDBUF); 1300 } 1301 } 1302 1303 /* 1304 * Command-mapping helper function - populate this command's s/g table. 1305 */ 1306 void 1307 aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1308 { 1309 struct aac_softc *sc; 1310 struct aac_command *cm; 1311 struct aac_fib *fib; 1312 int i; 1313 1314 cm = (struct aac_command *)arg; 1315 sc = cm->cm_sc; 1316 fib = cm->cm_fib; 1317 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg); 1318 mtx_assert(&sc->aac_io_lock, MA_OWNED); 1319 1320 /* copy into the FIB */ 1321 if (cm->cm_sgtable != NULL) { 1322 if (fib->Header.Command == RawIo2) { 1323 struct aac_raw_io2 *raw; 1324 struct aac_sge_ieee1212 *sg; 1325 u_int32_t min_size = PAGE_SIZE, cur_size; 1326 int conformable = TRUE; 1327 1328 raw = (struct aac_raw_io2 *)&fib->data[0]; 1329 sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable; 1330 raw->sgeCnt = nseg; 1331 1332 for (i = 0; i < nseg; i++) { 1333 cur_size = segs[i].ds_len; 1334 sg[i].addrHigh = 0; 1335 *(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr; 1336 sg[i].length = cur_size; 1337 sg[i].flags = 0; 1338 if (i == 0) { 1339 raw->sgeFirstSize = cur_size; 1340 } else if (i == 1) { 1341 raw->sgeNominalSize = cur_size; 1342 min_size = cur_size; 1343 } else if ((i+1) < nseg && 1344 cur_size != raw->sgeNominalSize) { 1345 conformable = FALSE; 1346 if (cur_size < min_size) 1347 min_size = cur_size; 1348 } 1349 } 1350 1351 /* not conformable: evaluate required sg elements */ 1352 if (!conformable) { 1353 int j, err_found, nseg_new = nseg; 1354 for (i = min_size / PAGE_SIZE; i >= 1; --i) { 1355 err_found = FALSE; 1356 nseg_new = 2; 1357 for (j = 1; j < nseg - 1; ++j) { 1358 if (sg[j].length % (i*PAGE_SIZE)) { 1359 err_found = TRUE; 1360 break; 1361 } 1362 nseg_new += (sg[j].length / (i*PAGE_SIZE)); 1363 } 1364 if (!err_found) 1365 break; 1366 } 1367 if (i>0 && nseg_new<=sc->aac_sg_tablesize && 1368 !(sc->hint_flags & 4)) 1369 nseg = aac_convert_sgraw2(sc, 1370 raw, i, nseg, nseg_new); 1371 } else { 1372 raw->flags |= RIO2_SGL_CONFORMANT; 1373 } 1374 1375 /* update the FIB size for the s/g count */ 1376 fib->Header.Size += nseg * 1377 sizeof(struct aac_sge_ieee1212); 1378 1379 } else if (fib->Header.Command == RawIo) { 1380 struct aac_sg_tableraw *sg; 1381 sg = (struct aac_sg_tableraw *)cm->cm_sgtable; 1382 sg->SgCount = nseg; 1383 for (i = 0; i < nseg; i++) { 1384 sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr; 1385 sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len; 1386 sg->SgEntryRaw[i].Next = 0; 1387 sg->SgEntryRaw[i].Prev = 0; 1388 sg->SgEntryRaw[i].Flags = 0; 1389 } 1390 /* update the FIB size for the s/g count */ 1391 fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw); 1392 } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 1393 struct aac_sg_table *sg; 1394 sg = cm->cm_sgtable; 1395 sg->SgCount = nseg; 1396 for (i = 0; i < nseg; i++) { 1397 sg->SgEntry[i].SgAddress = segs[i].ds_addr; 1398 sg->SgEntry[i].SgByteCount = segs[i].ds_len; 1399 } 1400 /* update the FIB size for the s/g count */ 1401 fib->Header.Size += nseg*sizeof(struct aac_sg_entry); 1402 } else { 1403 struct aac_sg_table64 *sg; 1404 sg = (struct aac_sg_table64 *)cm->cm_sgtable; 1405 sg->SgCount = nseg; 1406 for (i = 0; i < nseg; i++) { 1407 sg->SgEntry64[i].SgAddress = segs[i].ds_addr; 1408 sg->SgEntry64[i].SgByteCount = segs[i].ds_len; 1409 } 1410 /* update the FIB size for the s/g count */ 1411 fib->Header.Size += nseg*sizeof(struct aac_sg_entry64); 1412 } 1413 } 1414 1415 /* Fix up the address values in the FIB. Use the command array index 1416 * instead of a pointer since these fields are only 32 bits. Shift 1417 * the SenderFibAddress over to make room for the fast response bit 1418 * and for the AIF bit 1419 */ 1420 cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2); 1421 cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; 1422 1423 /* save a pointer to the command for speedy reverse-lookup */ 1424 cm->cm_fib->Header.Handle += cm->cm_index + 1; 1425 1426 if (cm->cm_passthr_dmat == 0) { 1427 if (cm->cm_flags & AAC_CMD_DATAIN) 1428 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1429 BUS_DMASYNC_PREREAD); 1430 if (cm->cm_flags & AAC_CMD_DATAOUT) 1431 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1432 BUS_DMASYNC_PREWRITE); 1433 } 1434 1435 cm->cm_flags |= AAC_CMD_MAPPED; 1436 1437 if (sc->flags & AAC_FLAGS_SYNC_MODE) { 1438 u_int32_t wait = 0; 1439 aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, &wait, NULL); 1440 } else if (cm->cm_flags & AAC_CMD_WAIT) { 1441 aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, NULL, NULL); 1442 } else { 1443 int count = 10000000L; 1444 while (AAC_SEND_COMMAND(sc, cm) != 0) { 1445 if (--count == 0) { 1446 aac_unmap_command(cm); 1447 sc->flags |= AAC_QUEUE_FRZN; 1448 aac_requeue_ready(cm); 1449 } 1450 DELAY(5); /* wait 5 usec. */ 1451 } 1452 } 1453 } 1454 1455 1456 static int 1457 aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw, 1458 int pages, int nseg, int nseg_new) 1459 { 1460 struct aac_sge_ieee1212 *sge; 1461 int i, j, pos; 1462 u_int32_t addr_low; 1463 1464 sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212), 1465 M_AACRAIDBUF, M_NOWAIT|M_ZERO); 1466 if (sge == NULL) 1467 return nseg; 1468 1469 for (i = 1, pos = 1; i < nseg - 1; ++i) { 1470 for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) { 1471 addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE; 1472 sge[pos].addrLow = addr_low; 1473 sge[pos].addrHigh = raw->sge[i].addrHigh; 1474 if (addr_low < raw->sge[i].addrLow) 1475 sge[pos].addrHigh++; 1476 sge[pos].length = pages * PAGE_SIZE; 1477 sge[pos].flags = 0; 1478 pos++; 1479 } 1480 } 1481 sge[pos] = raw->sge[nseg-1]; 1482 for (i = 1; i < nseg_new; ++i) 1483 raw->sge[i] = sge[i]; 1484 1485 free(sge, M_AACRAIDBUF); 1486 raw->sgeCnt = nseg_new; 1487 raw->flags |= RIO2_SGL_CONFORMANT; 1488 raw->sgeNominalSize = pages * PAGE_SIZE; 1489 return nseg_new; 1490 } 1491 1492 1493 /* 1494 * Unmap a command from controller-visible space. 1495 */ 1496 static void 1497 aac_unmap_command(struct aac_command *cm) 1498 { 1499 struct aac_softc *sc; 1500 1501 sc = cm->cm_sc; 1502 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1503 1504 if (!(cm->cm_flags & AAC_CMD_MAPPED)) 1505 return; 1506 1507 if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) { 1508 if (cm->cm_flags & AAC_CMD_DATAIN) 1509 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1510 BUS_DMASYNC_POSTREAD); 1511 if (cm->cm_flags & AAC_CMD_DATAOUT) 1512 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1513 BUS_DMASYNC_POSTWRITE); 1514 1515 bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap); 1516 } 1517 cm->cm_flags &= ~AAC_CMD_MAPPED; 1518 } 1519 1520 /* 1521 * Hardware Interface 1522 */ 1523 1524 /* 1525 * Initialize the adapter. 1526 */ 1527 static void 1528 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1529 { 1530 struct aac_softc *sc; 1531 1532 sc = (struct aac_softc *)arg; 1533 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1534 1535 sc->aac_common_busaddr = segs[0].ds_addr; 1536 } 1537 1538 static int 1539 aac_check_firmware(struct aac_softc *sc) 1540 { 1541 u_int32_t code, major, minor, maxsize; 1542 u_int32_t options = 0, atu_size = 0, status, waitCount; 1543 time_t then; 1544 1545 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1546 1547 /* check if flash update is running */ 1548 if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) { 1549 then = time_uptime; 1550 do { 1551 code = AAC_GET_FWSTATUS(sc); 1552 if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) { 1553 device_printf(sc->aac_dev, 1554 "FATAL: controller not coming ready, " 1555 "status %x\n", code); 1556 return(ENXIO); 1557 } 1558 } while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED)); 1559 /* 1560 * Delay 10 seconds. Because right now FW is doing a soft reset, 1561 * do not read scratch pad register at this time 1562 */ 1563 waitCount = 10 * 10000; 1564 while (waitCount) { 1565 DELAY(100); /* delay 100 microseconds */ 1566 waitCount--; 1567 } 1568 } 1569 1570 /* 1571 * Wait for the adapter to come ready. 1572 */ 1573 then = time_uptime; 1574 do { 1575 code = AAC_GET_FWSTATUS(sc); 1576 if (time_uptime > (then + AAC_BOOT_TIMEOUT)) { 1577 device_printf(sc->aac_dev, 1578 "FATAL: controller not coming ready, " 1579 "status %x\n", code); 1580 return(ENXIO); 1581 } 1582 } while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff); 1583 1584 /* 1585 * Retrieve the firmware version numbers. Dell PERC2/QC cards with 1586 * firmware version 1.x are not compatible with this driver. 1587 */ 1588 if (sc->flags & AAC_FLAGS_PERC2QC) { 1589 if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0, 1590 NULL, NULL)) { 1591 device_printf(sc->aac_dev, 1592 "Error reading firmware version\n"); 1593 return (EIO); 1594 } 1595 1596 /* These numbers are stored as ASCII! */ 1597 major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30; 1598 minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30; 1599 if (major == 1) { 1600 device_printf(sc->aac_dev, 1601 "Firmware version %d.%d is not supported.\n", 1602 major, minor); 1603 return (EINVAL); 1604 } 1605 } 1606 /* 1607 * Retrieve the capabilities/supported options word so we know what 1608 * work-arounds to enable. Some firmware revs don't support this 1609 * command. 1610 */ 1611 if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) { 1612 if (status != AAC_SRB_STS_INVALID_REQUEST) { 1613 device_printf(sc->aac_dev, 1614 "RequestAdapterInfo failed\n"); 1615 return (EIO); 1616 } 1617 } else { 1618 options = AAC_GET_MAILBOX(sc, 1); 1619 atu_size = AAC_GET_MAILBOX(sc, 2); 1620 sc->supported_options = options; 1621 1622 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && 1623 (sc->flags & AAC_FLAGS_NO4GB) == 0) 1624 sc->flags |= AAC_FLAGS_4GB_WINDOW; 1625 if (options & AAC_SUPPORTED_NONDASD) 1626 sc->flags |= AAC_FLAGS_ENABLE_CAM; 1627 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0 1628 && (sizeof(bus_addr_t) > 4) 1629 && (sc->hint_flags & 0x1)) { 1630 device_printf(sc->aac_dev, 1631 "Enabling 64-bit address support\n"); 1632 sc->flags |= AAC_FLAGS_SG_64BIT; 1633 } 1634 if (sc->aac_if.aif_send_command) { 1635 if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) || 1636 (options & AAC_SUPPORTED_NEW_COMM_TYPE4)) 1637 sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34; 1638 else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1) 1639 sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1; 1640 else if (options & AAC_SUPPORTED_NEW_COMM_TYPE2) 1641 sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2; 1642 } 1643 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) 1644 sc->flags |= AAC_FLAGS_ARRAY_64BIT; 1645 } 1646 1647 if (!(sc->flags & AAC_FLAGS_NEW_COMM)) { 1648 device_printf(sc->aac_dev, "Communication interface not supported!\n"); 1649 return (ENXIO); 1650 } 1651 1652 if (sc->hint_flags & 2) { 1653 device_printf(sc->aac_dev, 1654 "Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n"); 1655 sc->flags |= AAC_FLAGS_SYNC_MODE; 1656 } else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) { 1657 device_printf(sc->aac_dev, 1658 "Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n"); 1659 sc->flags |= AAC_FLAGS_SYNC_MODE; 1660 } 1661 1662 /* Check for broken hardware that does a lower number of commands */ 1663 sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512); 1664 1665 /* Remap mem. resource, if required */ 1666 if (atu_size > rman_get_size(sc->aac_regs_res0)) { 1667 bus_release_resource( 1668 sc->aac_dev, SYS_RES_MEMORY, 1669 sc->aac_regs_rid0, sc->aac_regs_res0); 1670 sc->aac_regs_res0 = bus_alloc_resource_anywhere( 1671 sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0, 1672 atu_size, RF_ACTIVE); 1673 if (sc->aac_regs_res0 == NULL) { 1674 sc->aac_regs_res0 = bus_alloc_resource_any( 1675 sc->aac_dev, SYS_RES_MEMORY, 1676 &sc->aac_regs_rid0, RF_ACTIVE); 1677 if (sc->aac_regs_res0 == NULL) { 1678 device_printf(sc->aac_dev, 1679 "couldn't allocate register window\n"); 1680 return (ENXIO); 1681 } 1682 } 1683 sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0); 1684 sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0); 1685 } 1686 1687 /* Read preferred settings */ 1688 sc->aac_max_fib_size = sizeof(struct aac_fib); 1689 sc->aac_max_sectors = 128; /* 64KB */ 1690 sc->aac_max_aif = 1; 1691 if (sc->flags & AAC_FLAGS_SG_64BIT) 1692 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE 1693 - sizeof(struct aac_blockwrite64)) 1694 / sizeof(struct aac_sg_entry64); 1695 else 1696 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE 1697 - sizeof(struct aac_blockwrite)) 1698 / sizeof(struct aac_sg_entry); 1699 1700 if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) { 1701 options = AAC_GET_MAILBOX(sc, 1); 1702 sc->aac_max_fib_size = (options & 0xFFFF); 1703 sc->aac_max_sectors = (options >> 16) << 1; 1704 options = AAC_GET_MAILBOX(sc, 2); 1705 sc->aac_sg_tablesize = (options >> 16); 1706 options = AAC_GET_MAILBOX(sc, 3); 1707 sc->aac_max_fibs = ((options >> 16) & 0xFFFF); 1708 if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV) 1709 sc->aac_max_fibs = (options & 0xFFFF); 1710 options = AAC_GET_MAILBOX(sc, 4); 1711 sc->aac_max_aif = (options & 0xFFFF); 1712 options = AAC_GET_MAILBOX(sc, 5); 1713 sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0; 1714 } 1715 1716 maxsize = sc->aac_max_fib_size + 31; 1717 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) 1718 maxsize += sizeof(struct aac_fib_xporthdr); 1719 if (maxsize > PAGE_SIZE) { 1720 sc->aac_max_fib_size -= (maxsize - PAGE_SIZE); 1721 maxsize = PAGE_SIZE; 1722 } 1723 sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize; 1724 1725 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { 1726 sc->flags |= AAC_FLAGS_RAW_IO; 1727 device_printf(sc->aac_dev, "Enable Raw I/O\n"); 1728 } 1729 if ((sc->flags & AAC_FLAGS_RAW_IO) && 1730 (sc->flags & AAC_FLAGS_ARRAY_64BIT)) { 1731 sc->flags |= AAC_FLAGS_LBA_64BIT; 1732 device_printf(sc->aac_dev, "Enable 64-bit array\n"); 1733 } 1734 1735 #ifdef AACRAID_DEBUG 1736 aacraid_get_fw_debug_buffer(sc); 1737 #endif 1738 return (0); 1739 } 1740 1741 static int 1742 aac_init(struct aac_softc *sc) 1743 { 1744 struct aac_adapter_init *ip; 1745 int i, error; 1746 1747 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1748 1749 /* reset rrq index */ 1750 sc->aac_fibs_pushed_no = 0; 1751 for (i = 0; i < sc->aac_max_msix; i++) 1752 sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap; 1753 1754 /* 1755 * Fill in the init structure. This tells the adapter about the 1756 * physical location of various important shared data structures. 1757 */ 1758 ip = &sc->aac_common->ac_init; 1759 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION; 1760 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { 1761 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4; 1762 sc->flags |= AAC_FLAGS_RAW_IO; 1763 } 1764 ip->NoOfMSIXVectors = sc->aac_max_msix; 1765 1766 ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr + 1767 offsetof(struct aac_common, ac_fibs); 1768 ip->AdapterFibsVirtualAddress = 0; 1769 ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib); 1770 ip->AdapterFibAlign = sizeof(struct aac_fib); 1771 1772 ip->PrintfBufferAddress = sc->aac_common_busaddr + 1773 offsetof(struct aac_common, ac_printf); 1774 ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE; 1775 1776 /* 1777 * The adapter assumes that pages are 4K in size, except on some 1778 * broken firmware versions that do the page->byte conversion twice, 1779 * therefore 'assuming' that this value is in 16MB units (2^24). 1780 * Round up since the granularity is so high. 1781 */ 1782 ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE; 1783 if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) { 1784 ip->HostPhysMemPages = 1785 (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE; 1786 } 1787 ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */ 1788 1789 ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED; 1790 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) { 1791 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6; 1792 ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED | 1793 AAC_INITFLAGS_FAST_JBOD_SUPPORTED); 1794 device_printf(sc->aac_dev, "New comm. interface type1 enabled\n"); 1795 } else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) { 1796 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7; 1797 ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | 1798 AAC_INITFLAGS_FAST_JBOD_SUPPORTED); 1799 device_printf(sc->aac_dev, "New comm. interface type2 enabled\n"); 1800 } 1801 ip->MaxNumAif = sc->aac_max_aif; 1802 ip->HostRRQ_AddrLow = 1803 sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq); 1804 /* always 32-bit address */ 1805 ip->HostRRQ_AddrHigh = 0; 1806 1807 if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) { 1808 ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM; 1809 ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME; 1810 device_printf(sc->aac_dev, "Power Management enabled\n"); 1811 } 1812 1813 ip->MaxIoCommands = sc->aac_max_fibs; 1814 ip->MaxIoSize = sc->aac_max_sectors << 9; 1815 ip->MaxFibSize = sc->aac_max_fib_size; 1816 1817 /* 1818 * Do controller-type-specific initialisation 1819 */ 1820 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0); 1821 1822 /* 1823 * Give the init structure to the controller. 1824 */ 1825 if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT, 1826 sc->aac_common_busaddr + 1827 offsetof(struct aac_common, ac_init), 0, 0, 0, 1828 NULL, NULL)) { 1829 device_printf(sc->aac_dev, 1830 "error establishing init structure\n"); 1831 error = EIO; 1832 goto out; 1833 } 1834 1835 /* 1836 * Check configuration issues 1837 */ 1838 if ((error = aac_check_config(sc)) != 0) 1839 goto out; 1840 1841 error = 0; 1842 out: 1843 return(error); 1844 } 1845 1846 static void 1847 aac_define_int_mode(struct aac_softc *sc) 1848 { 1849 device_t dev; 1850 int cap, msi_count, error = 0; 1851 uint32_t val; 1852 1853 dev = sc->aac_dev; 1854 1855 /* max. vectors from AAC_MONKER_GETCOMMPREF */ 1856 if (sc->aac_max_msix == 0) { 1857 sc->aac_max_msix = 1; 1858 sc->aac_vector_cap = sc->aac_max_fibs; 1859 return; 1860 } 1861 1862 /* OS capability */ 1863 msi_count = pci_msix_count(dev); 1864 if (msi_count > AAC_MAX_MSIX) 1865 msi_count = AAC_MAX_MSIX; 1866 if (msi_count > sc->aac_max_msix) 1867 msi_count = sc->aac_max_msix; 1868 if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) { 1869 device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; " 1870 "will try MSI\n", msi_count, error); 1871 pci_release_msi(dev); 1872 } else { 1873 sc->msi_enabled = TRUE; 1874 device_printf(dev, "using MSI-X interrupts (%u vectors)\n", 1875 msi_count); 1876 } 1877 1878 if (!sc->msi_enabled) { 1879 msi_count = 1; 1880 if ((error = pci_alloc_msi(dev, &msi_count)) != 0) { 1881 device_printf(dev, "alloc msi failed - err=%d; " 1882 "will use INTx\n", error); 1883 pci_release_msi(dev); 1884 } else { 1885 sc->msi_enabled = TRUE; 1886 device_printf(dev, "using MSI interrupts\n"); 1887 } 1888 } 1889 1890 if (sc->msi_enabled) { 1891 /* now read controller capability from PCI config. space */ 1892 cap = aac_find_pci_capability(sc, PCIY_MSIX); 1893 val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0); 1894 if (!(val & AAC_PCI_MSI_ENABLE)) { 1895 pci_release_msi(dev); 1896 sc->msi_enabled = FALSE; 1897 } 1898 } 1899 1900 if (!sc->msi_enabled) { 1901 device_printf(dev, "using legacy interrupts\n"); 1902 sc->aac_max_msix = 1; 1903 } else { 1904 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX); 1905 if (sc->aac_max_msix > msi_count) 1906 sc->aac_max_msix = msi_count; 1907 } 1908 sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix; 1909 1910 fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d", 1911 sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix); 1912 } 1913 1914 static int 1915 aac_find_pci_capability(struct aac_softc *sc, int cap) 1916 { 1917 device_t dev; 1918 uint32_t status; 1919 uint8_t ptr; 1920 1921 dev = sc->aac_dev; 1922 1923 status = pci_read_config(dev, PCIR_STATUS, 2); 1924 if (!(status & PCIM_STATUS_CAPPRESENT)) 1925 return (0); 1926 1927 status = pci_read_config(dev, PCIR_HDRTYPE, 1); 1928 switch (status & PCIM_HDRTYPE) { 1929 case 0: 1930 case 1: 1931 ptr = PCIR_CAP_PTR; 1932 break; 1933 case 2: 1934 ptr = PCIR_CAP_PTR_2; 1935 break; 1936 default: 1937 return (0); 1938 break; 1939 } 1940 ptr = pci_read_config(dev, ptr, 1); 1941 1942 while (ptr != 0) { 1943 int next, val; 1944 next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1); 1945 val = pci_read_config(dev, ptr + PCICAP_ID, 1); 1946 if (val == cap) 1947 return (ptr); 1948 ptr = next; 1949 } 1950 1951 return (0); 1952 } 1953 1954 static int 1955 aac_setup_intr(struct aac_softc *sc) 1956 { 1957 int i, msi_count, rid; 1958 struct resource *res; 1959 void *tag; 1960 1961 msi_count = sc->aac_max_msix; 1962 rid = (sc->msi_enabled ? 1:0); 1963 1964 for (i = 0; i < msi_count; i++, rid++) { 1965 if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid, 1966 RF_SHAREABLE | RF_ACTIVE)) == NULL) { 1967 device_printf(sc->aac_dev,"can't allocate interrupt\n"); 1968 return (EINVAL); 1969 } 1970 sc->aac_irq_rid[i] = rid; 1971 sc->aac_irq[i] = res; 1972 if (aac_bus_setup_intr(sc->aac_dev, res, 1973 INTR_MPSAFE | INTR_TYPE_BIO, NULL, 1974 aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) { 1975 device_printf(sc->aac_dev, "can't set up interrupt\n"); 1976 return (EINVAL); 1977 } 1978 sc->aac_msix[i].vector_no = i; 1979 sc->aac_msix[i].sc = sc; 1980 sc->aac_intr[i] = tag; 1981 } 1982 1983 return (0); 1984 } 1985 1986 static int 1987 aac_check_config(struct aac_softc *sc) 1988 { 1989 struct aac_fib *fib; 1990 struct aac_cnt_config *ccfg; 1991 struct aac_cf_status_hdr *cf_shdr; 1992 int rval; 1993 1994 mtx_lock(&sc->aac_io_lock); 1995 aac_alloc_sync_fib(sc, &fib); 1996 1997 ccfg = (struct aac_cnt_config *)&fib->data[0]; 1998 bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE); 1999 ccfg->Command = VM_ContainerConfig; 2000 ccfg->CTCommand.command = CT_GET_CONFIG_STATUS; 2001 ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr); 2002 2003 rval = aac_sync_fib(sc, ContainerCommand, 0, fib, 2004 sizeof (struct aac_cnt_config)); 2005 cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data; 2006 if (rval == 0 && ccfg->Command == ST_OK && 2007 ccfg->CTCommand.param[0] == CT_OK) { 2008 if (cf_shdr->action <= CFACT_PAUSE) { 2009 bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE); 2010 ccfg->Command = VM_ContainerConfig; 2011 ccfg->CTCommand.command = CT_COMMIT_CONFIG; 2012 2013 rval = aac_sync_fib(sc, ContainerCommand, 0, fib, 2014 sizeof (struct aac_cnt_config)); 2015 if (rval == 0 && ccfg->Command == ST_OK && 2016 ccfg->CTCommand.param[0] == CT_OK) { 2017 /* successful completion */ 2018 rval = 0; 2019 } else { 2020 /* auto commit aborted due to error(s) */ 2021 rval = -2; 2022 } 2023 } else { 2024 /* auto commit aborted due to adapter indicating 2025 config. issues too dangerous to auto commit */ 2026 rval = -3; 2027 } 2028 } else { 2029 /* error */ 2030 rval = -1; 2031 } 2032 2033 aac_release_sync_fib(sc); 2034 mtx_unlock(&sc->aac_io_lock); 2035 return(rval); 2036 } 2037 2038 /* 2039 * Send a synchronous command to the controller and wait for a result. 2040 * Indicate if the controller completed the command with an error status. 2041 */ 2042 int 2043 aacraid_sync_command(struct aac_softc *sc, u_int32_t command, 2044 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3, 2045 u_int32_t *sp, u_int32_t *r1) 2046 { 2047 time_t then; 2048 u_int32_t status; 2049 2050 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2051 2052 /* populate the mailbox */ 2053 AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3); 2054 2055 /* ensure the sync command doorbell flag is cleared */ 2056 if (!sc->msi_enabled) 2057 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); 2058 2059 /* then set it to signal the adapter */ 2060 AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND); 2061 2062 if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) { 2063 /* spin waiting for the command to complete */ 2064 then = time_uptime; 2065 do { 2066 if (time_uptime > (then + AAC_SYNC_TIMEOUT)) { 2067 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out"); 2068 return(EIO); 2069 } 2070 } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND)); 2071 2072 /* clear the completion flag */ 2073 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); 2074 2075 /* get the command status */ 2076 status = AAC_GET_MAILBOX(sc, 0); 2077 if (sp != NULL) 2078 *sp = status; 2079 2080 /* return parameter */ 2081 if (r1 != NULL) 2082 *r1 = AAC_GET_MAILBOX(sc, 1); 2083 2084 if (status != AAC_SRB_STS_SUCCESS) 2085 return (-1); 2086 } 2087 return(0); 2088 } 2089 2090 static int 2091 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate, 2092 struct aac_fib *fib, u_int16_t datasize) 2093 { 2094 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2095 mtx_assert(&sc->aac_io_lock, MA_OWNED); 2096 2097 if (datasize > AAC_FIB_DATASIZE) 2098 return(EINVAL); 2099 2100 /* 2101 * Set up the sync FIB 2102 */ 2103 fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | 2104 AAC_FIBSTATE_INITIALISED | 2105 AAC_FIBSTATE_EMPTY; 2106 fib->Header.XferState |= xferstate; 2107 fib->Header.Command = command; 2108 fib->Header.StructType = AAC_FIBTYPE_TFIB; 2109 fib->Header.Size = sizeof(struct aac_fib_header) + datasize; 2110 fib->Header.SenderSize = sizeof(struct aac_fib); 2111 fib->Header.SenderFibAddress = 0; /* Not needed */ 2112 fib->Header.u.ReceiverFibAddress = sc->aac_common_busaddr + 2113 offsetof(struct aac_common, ac_sync_fib); 2114 2115 /* 2116 * Give the FIB to the controller, wait for a response. 2117 */ 2118 if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, 2119 fib->Header.u.ReceiverFibAddress, 0, 0, 0, NULL, NULL)) { 2120 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error"); 2121 return(EIO); 2122 } 2123 2124 return (0); 2125 } 2126 2127 /* 2128 * Check for commands that have been outstanding for a suspiciously long time, 2129 * and complain about them. 2130 */ 2131 static void 2132 aac_timeout(struct aac_softc *sc) 2133 { 2134 struct aac_command *cm; 2135 time_t deadline; 2136 int timedout; 2137 2138 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2139 /* 2140 * Traverse the busy command list, bitch about late commands once 2141 * only. 2142 */ 2143 timedout = 0; 2144 deadline = time_uptime - AAC_CMD_TIMEOUT; 2145 TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) { 2146 if (cm->cm_timestamp < deadline) { 2147 device_printf(sc->aac_dev, 2148 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", 2149 cm, (int)(time_uptime-cm->cm_timestamp)); 2150 AAC_PRINT_FIB(sc, cm->cm_fib); 2151 timedout++; 2152 } 2153 } 2154 2155 if (timedout) 2156 aac_reset_adapter(sc); 2157 aacraid_print_queues(sc); 2158 } 2159 2160 /* 2161 * Interface Function Vectors 2162 */ 2163 2164 /* 2165 * Read the current firmware status word. 2166 */ 2167 static int 2168 aac_src_get_fwstatus(struct aac_softc *sc) 2169 { 2170 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2171 2172 return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR)); 2173 } 2174 2175 /* 2176 * Notify the controller of a change in a given queue 2177 */ 2178 static void 2179 aac_src_qnotify(struct aac_softc *sc, int qbit) 2180 { 2181 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2182 2183 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT); 2184 } 2185 2186 /* 2187 * Get the interrupt reason bits 2188 */ 2189 static int 2190 aac_src_get_istatus(struct aac_softc *sc) 2191 { 2192 int val; 2193 2194 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2195 2196 if (sc->msi_enabled) { 2197 val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI); 2198 if (val & AAC_MSI_SYNC_STATUS) 2199 val = AAC_DB_SYNC_COMMAND; 2200 else 2201 val = 0; 2202 } else { 2203 val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT; 2204 } 2205 return(val); 2206 } 2207 2208 /* 2209 * Clear some interrupt reason bits 2210 */ 2211 static void 2212 aac_src_clear_istatus(struct aac_softc *sc, int mask) 2213 { 2214 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2215 2216 if (sc->msi_enabled) { 2217 if (mask == AAC_DB_SYNC_COMMAND) 2218 AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT); 2219 } else { 2220 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT); 2221 } 2222 } 2223 2224 /* 2225 * Populate the mailbox and set the command word 2226 */ 2227 static void 2228 aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, 2229 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2230 { 2231 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2232 2233 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command); 2234 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0); 2235 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1); 2236 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2); 2237 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3); 2238 } 2239 2240 static void 2241 aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, 2242 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2243 { 2244 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2245 2246 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command); 2247 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0); 2248 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1); 2249 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2); 2250 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3); 2251 } 2252 2253 /* 2254 * Fetch the immediate command status word 2255 */ 2256 static int 2257 aac_src_get_mailbox(struct aac_softc *sc, int mb) 2258 { 2259 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2260 2261 return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4))); 2262 } 2263 2264 static int 2265 aac_srcv_get_mailbox(struct aac_softc *sc, int mb) 2266 { 2267 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2268 2269 return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4))); 2270 } 2271 2272 /* 2273 * Set/clear interrupt masks 2274 */ 2275 static void 2276 aac_src_access_devreg(struct aac_softc *sc, int mode) 2277 { 2278 u_int32_t val; 2279 2280 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2281 2282 switch (mode) { 2283 case AAC_ENABLE_INTERRUPT: 2284 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, 2285 (sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX : 2286 AAC_INT_ENABLE_TYPE1_INTX)); 2287 break; 2288 2289 case AAC_DISABLE_INTERRUPT: 2290 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL); 2291 break; 2292 2293 case AAC_ENABLE_MSIX: 2294 /* set bit 6 */ 2295 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); 2296 val |= 0x40; 2297 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val); 2298 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); 2299 /* unmask int. */ 2300 val = PMC_ALL_INTERRUPT_BITS; 2301 AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val); 2302 val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR); 2303 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, 2304 val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0))); 2305 break; 2306 2307 case AAC_DISABLE_MSIX: 2308 /* reset bit 6 */ 2309 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); 2310 val &= ~0x40; 2311 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val); 2312 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); 2313 break; 2314 2315 case AAC_CLEAR_AIF_BIT: 2316 /* set bit 5 */ 2317 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); 2318 val |= 0x20; 2319 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val); 2320 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); 2321 break; 2322 2323 case AAC_CLEAR_SYNC_BIT: 2324 /* set bit 4 */ 2325 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); 2326 val |= 0x10; 2327 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val); 2328 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); 2329 break; 2330 2331 case AAC_ENABLE_INTX: 2332 /* set bit 7 */ 2333 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); 2334 val |= 0x80; 2335 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val); 2336 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); 2337 /* unmask int. */ 2338 val = PMC_ALL_INTERRUPT_BITS; 2339 AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val); 2340 val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR); 2341 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, 2342 val & (~(PMC_GLOBAL_INT_BIT2))); 2343 break; 2344 2345 default: 2346 break; 2347 } 2348 } 2349 2350 /* 2351 * New comm. interface: Send command functions 2352 */ 2353 static int 2354 aac_src_send_command(struct aac_softc *sc, struct aac_command *cm) 2355 { 2356 struct aac_fib_xporthdr *pFibX; 2357 u_int32_t fibsize, high_addr; 2358 u_int64_t address; 2359 2360 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)"); 2361 2362 if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest && 2363 sc->aac_max_msix > 1) { 2364 u_int16_t vector_no, first_choice = 0xffff; 2365 2366 vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix; 2367 do { 2368 vector_no += 1; 2369 if (vector_no == sc->aac_max_msix) 2370 vector_no = 1; 2371 if (sc->aac_rrq_outstanding[vector_no] < 2372 sc->aac_vector_cap) 2373 break; 2374 if (0xffff == first_choice) 2375 first_choice = vector_no; 2376 else if (vector_no == first_choice) 2377 break; 2378 } while (1); 2379 if (vector_no == first_choice) 2380 vector_no = 0; 2381 sc->aac_rrq_outstanding[vector_no]++; 2382 if (sc->aac_fibs_pushed_no == 0xffffffff) 2383 sc->aac_fibs_pushed_no = 0; 2384 else 2385 sc->aac_fibs_pushed_no++; 2386 2387 cm->cm_fib->Header.Handle += (vector_no << 16); 2388 } 2389 2390 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) { 2391 /* Calculate the amount to the fibsize bits */ 2392 fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1; 2393 /* Fill new FIB header */ 2394 address = cm->cm_fibphys; 2395 high_addr = (u_int32_t)(address >> 32); 2396 if (high_addr == 0L) { 2397 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2; 2398 cm->cm_fib->Header.u.TimeStamp = 0L; 2399 } else { 2400 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64; 2401 cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr; 2402 } 2403 cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address; 2404 } else { 2405 /* Calculate the amount to the fibsize bits */ 2406 fibsize = (sizeof(struct aac_fib_xporthdr) + 2407 cm->cm_fib->Header.Size + 127) / 128 - 1; 2408 /* Fill XPORT header */ 2409 pFibX = (struct aac_fib_xporthdr *) 2410 ((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr)); 2411 pFibX->Handle = cm->cm_fib->Header.Handle; 2412 pFibX->HostAddress = cm->cm_fibphys; 2413 pFibX->Size = cm->cm_fib->Header.Size; 2414 address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr); 2415 high_addr = (u_int32_t)(address >> 32); 2416 } 2417 2418 if (fibsize > 31) 2419 fibsize = 31; 2420 aac_enqueue_busy(cm); 2421 if (high_addr) { 2422 AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr); 2423 AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize); 2424 } else { 2425 AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize); 2426 } 2427 return 0; 2428 } 2429 2430 /* 2431 * New comm. interface: get, set outbound queue index 2432 */ 2433 static int 2434 aac_src_get_outb_queue(struct aac_softc *sc) 2435 { 2436 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2437 2438 return(-1); 2439 } 2440 2441 static void 2442 aac_src_set_outb_queue(struct aac_softc *sc, int index) 2443 { 2444 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2445 } 2446 2447 /* 2448 * Debugging and Diagnostics 2449 */ 2450 2451 /* 2452 * Print some information about the controller. 2453 */ 2454 static void 2455 aac_describe_controller(struct aac_softc *sc) 2456 { 2457 struct aac_fib *fib; 2458 struct aac_adapter_info *info; 2459 char *adapter_type = "Adaptec RAID controller"; 2460 2461 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2462 2463 mtx_lock(&sc->aac_io_lock); 2464 aac_alloc_sync_fib(sc, &fib); 2465 2466 if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) { 2467 fib->data[0] = 0; 2468 if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1)) 2469 device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n"); 2470 else { 2471 struct aac_supplement_adapter_info *supp_info; 2472 2473 supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]); 2474 adapter_type = (char *)supp_info->AdapterTypeText; 2475 sc->aac_feature_bits = supp_info->FeatureBits; 2476 sc->aac_support_opt2 = supp_info->SupportedOptions2; 2477 } 2478 } 2479 device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n", 2480 adapter_type, 2481 AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION, 2482 AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD); 2483 2484 fib->data[0] = 0; 2485 if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) { 2486 device_printf(sc->aac_dev, "RequestAdapterInfo failed\n"); 2487 aac_release_sync_fib(sc); 2488 mtx_unlock(&sc->aac_io_lock); 2489 return; 2490 } 2491 2492 /* save the kernel revision structure for later use */ 2493 info = (struct aac_adapter_info *)&fib->data[0]; 2494 sc->aac_revision = info->KernelRevision; 2495 2496 if (bootverbose) { 2497 device_printf(sc->aac_dev, "%s %dMHz, %dMB memory " 2498 "(%dMB cache, %dMB execution), %s\n", 2499 aac_describe_code(aac_cpu_variant, info->CpuVariant), 2500 info->ClockSpeed, info->TotalMem / (1024 * 1024), 2501 info->BufferMem / (1024 * 1024), 2502 info->ExecutionMem / (1024 * 1024), 2503 aac_describe_code(aac_battery_platform, 2504 info->batteryPlatform)); 2505 2506 device_printf(sc->aac_dev, 2507 "Kernel %d.%d-%d, Build %d, S/N %6X\n", 2508 info->KernelRevision.external.comp.major, 2509 info->KernelRevision.external.comp.minor, 2510 info->KernelRevision.external.comp.dash, 2511 info->KernelRevision.buildNumber, 2512 (u_int32_t)(info->SerialNumber & 0xffffff)); 2513 2514 device_printf(sc->aac_dev, "Supported Options=%b\n", 2515 sc->supported_options, 2516 "\20" 2517 "\1SNAPSHOT" 2518 "\2CLUSTERS" 2519 "\3WCACHE" 2520 "\4DATA64" 2521 "\5HOSTTIME" 2522 "\6RAID50" 2523 "\7WINDOW4GB" 2524 "\10SCSIUPGD" 2525 "\11SOFTERR" 2526 "\12NORECOND" 2527 "\13SGMAP64" 2528 "\14ALARM" 2529 "\15NONDASD" 2530 "\16SCSIMGT" 2531 "\17RAIDSCSI" 2532 "\21ADPTINFO" 2533 "\22NEWCOMM" 2534 "\23ARRAY64BIT" 2535 "\24HEATSENSOR"); 2536 } 2537 2538 aac_release_sync_fib(sc); 2539 mtx_unlock(&sc->aac_io_lock); 2540 } 2541 2542 /* 2543 * Look up a text description of a numeric error code and return a pointer to 2544 * same. 2545 */ 2546 static char * 2547 aac_describe_code(struct aac_code_lookup *table, u_int32_t code) 2548 { 2549 int i; 2550 2551 for (i = 0; table[i].string != NULL; i++) 2552 if (table[i].code == code) 2553 return(table[i].string); 2554 return(table[i + 1].string); 2555 } 2556 2557 /* 2558 * Management Interface 2559 */ 2560 2561 static int 2562 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td) 2563 { 2564 struct aac_softc *sc; 2565 2566 sc = dev->si_drv1; 2567 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2568 #if __FreeBSD_version >= 702000 2569 device_busy(sc->aac_dev); 2570 devfs_set_cdevpriv(sc, aac_cdevpriv_dtor); 2571 #endif 2572 return 0; 2573 } 2574 2575 static int 2576 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) 2577 { 2578 union aac_statrequest *as; 2579 struct aac_softc *sc; 2580 int error = 0; 2581 2582 as = (union aac_statrequest *)arg; 2583 sc = dev->si_drv1; 2584 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2585 2586 switch (cmd) { 2587 case AACIO_STATS: 2588 switch (as->as_item) { 2589 case AACQ_FREE: 2590 case AACQ_READY: 2591 case AACQ_BUSY: 2592 bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat, 2593 sizeof(struct aac_qstat)); 2594 break; 2595 default: 2596 error = ENOENT; 2597 break; 2598 } 2599 break; 2600 2601 case FSACTL_SENDFIB: 2602 case FSACTL_SEND_LARGE_FIB: 2603 arg = *(caddr_t*)arg; 2604 case FSACTL_LNX_SENDFIB: 2605 case FSACTL_LNX_SEND_LARGE_FIB: 2606 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB"); 2607 error = aac_ioctl_sendfib(sc, arg); 2608 break; 2609 case FSACTL_SEND_RAW_SRB: 2610 arg = *(caddr_t*)arg; 2611 case FSACTL_LNX_SEND_RAW_SRB: 2612 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB"); 2613 error = aac_ioctl_send_raw_srb(sc, arg); 2614 break; 2615 case FSACTL_AIF_THREAD: 2616 case FSACTL_LNX_AIF_THREAD: 2617 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD"); 2618 error = EINVAL; 2619 break; 2620 case FSACTL_OPEN_GET_ADAPTER_FIB: 2621 arg = *(caddr_t*)arg; 2622 case FSACTL_LNX_OPEN_GET_ADAPTER_FIB: 2623 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB"); 2624 error = aac_open_aif(sc, arg); 2625 break; 2626 case FSACTL_GET_NEXT_ADAPTER_FIB: 2627 arg = *(caddr_t*)arg; 2628 case FSACTL_LNX_GET_NEXT_ADAPTER_FIB: 2629 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB"); 2630 error = aac_getnext_aif(sc, arg); 2631 break; 2632 case FSACTL_CLOSE_GET_ADAPTER_FIB: 2633 arg = *(caddr_t*)arg; 2634 case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB: 2635 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB"); 2636 error = aac_close_aif(sc, arg); 2637 break; 2638 case FSACTL_MINIPORT_REV_CHECK: 2639 arg = *(caddr_t*)arg; 2640 case FSACTL_LNX_MINIPORT_REV_CHECK: 2641 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK"); 2642 error = aac_rev_check(sc, arg); 2643 break; 2644 case FSACTL_QUERY_DISK: 2645 arg = *(caddr_t*)arg; 2646 case FSACTL_LNX_QUERY_DISK: 2647 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK"); 2648 error = aac_query_disk(sc, arg); 2649 break; 2650 case FSACTL_DELETE_DISK: 2651 case FSACTL_LNX_DELETE_DISK: 2652 /* 2653 * We don't trust the underland to tell us when to delete a 2654 * container, rather we rely on an AIF coming from the 2655 * controller 2656 */ 2657 error = 0; 2658 break; 2659 case FSACTL_GET_PCI_INFO: 2660 arg = *(caddr_t*)arg; 2661 case FSACTL_LNX_GET_PCI_INFO: 2662 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO"); 2663 error = aac_get_pci_info(sc, arg); 2664 break; 2665 case FSACTL_GET_FEATURES: 2666 arg = *(caddr_t*)arg; 2667 case FSACTL_LNX_GET_FEATURES: 2668 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES"); 2669 error = aac_supported_features(sc, arg); 2670 break; 2671 default: 2672 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd); 2673 error = EINVAL; 2674 break; 2675 } 2676 return(error); 2677 } 2678 2679 static int 2680 aac_poll(struct cdev *dev, int poll_events, struct thread *td) 2681 { 2682 struct aac_softc *sc; 2683 struct aac_fib_context *ctx; 2684 int revents; 2685 2686 sc = dev->si_drv1; 2687 revents = 0; 2688 2689 mtx_lock(&sc->aac_io_lock); 2690 if ((poll_events & (POLLRDNORM | POLLIN)) != 0) { 2691 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 2692 if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) { 2693 revents |= poll_events & (POLLIN | POLLRDNORM); 2694 break; 2695 } 2696 } 2697 } 2698 mtx_unlock(&sc->aac_io_lock); 2699 2700 if (revents == 0) { 2701 if (poll_events & (POLLIN | POLLRDNORM)) 2702 selrecord(td, &sc->rcv_select); 2703 } 2704 2705 return (revents); 2706 } 2707 2708 static void 2709 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg) 2710 { 2711 2712 switch (event->ev_type) { 2713 case AAC_EVENT_CMFREE: 2714 mtx_assert(&sc->aac_io_lock, MA_OWNED); 2715 if (aacraid_alloc_command(sc, (struct aac_command **)arg)) { 2716 aacraid_add_event(sc, event); 2717 return; 2718 } 2719 free(event, M_AACRAIDBUF); 2720 wakeup(arg); 2721 break; 2722 default: 2723 break; 2724 } 2725 } 2726 2727 /* 2728 * Send a FIB supplied from userspace 2729 */ 2730 static int 2731 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib) 2732 { 2733 struct aac_command *cm; 2734 int size, error; 2735 2736 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2737 2738 cm = NULL; 2739 2740 /* 2741 * Get a command 2742 */ 2743 mtx_lock(&sc->aac_io_lock); 2744 if (aacraid_alloc_command(sc, &cm)) { 2745 struct aac_event *event; 2746 2747 event = malloc(sizeof(struct aac_event), M_AACRAIDBUF, 2748 M_NOWAIT | M_ZERO); 2749 if (event == NULL) { 2750 error = EBUSY; 2751 mtx_unlock(&sc->aac_io_lock); 2752 goto out; 2753 } 2754 event->ev_type = AAC_EVENT_CMFREE; 2755 event->ev_callback = aac_ioctl_event; 2756 event->ev_arg = &cm; 2757 aacraid_add_event(sc, event); 2758 msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0); 2759 } 2760 mtx_unlock(&sc->aac_io_lock); 2761 2762 /* 2763 * Fetch the FIB header, then re-copy to get data as well. 2764 */ 2765 if ((error = copyin(ufib, cm->cm_fib, 2766 sizeof(struct aac_fib_header))) != 0) 2767 goto out; 2768 size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header); 2769 if (size > sc->aac_max_fib_size) { 2770 device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n", 2771 size, sc->aac_max_fib_size); 2772 size = sc->aac_max_fib_size; 2773 } 2774 if ((error = copyin(ufib, cm->cm_fib, size)) != 0) 2775 goto out; 2776 cm->cm_fib->Header.Size = size; 2777 cm->cm_timestamp = time_uptime; 2778 cm->cm_datalen = 0; 2779 2780 /* 2781 * Pass the FIB to the controller, wait for it to complete. 2782 */ 2783 mtx_lock(&sc->aac_io_lock); 2784 error = aacraid_wait_command(cm); 2785 mtx_unlock(&sc->aac_io_lock); 2786 if (error != 0) { 2787 device_printf(sc->aac_dev, 2788 "aacraid_wait_command return %d\n", error); 2789 goto out; 2790 } 2791 2792 /* 2793 * Copy the FIB and data back out to the caller. 2794 */ 2795 size = cm->cm_fib->Header.Size; 2796 if (size > sc->aac_max_fib_size) { 2797 device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n", 2798 size, sc->aac_max_fib_size); 2799 size = sc->aac_max_fib_size; 2800 } 2801 error = copyout(cm->cm_fib, ufib, size); 2802 2803 out: 2804 if (cm != NULL) { 2805 mtx_lock(&sc->aac_io_lock); 2806 aacraid_release_command(cm); 2807 mtx_unlock(&sc->aac_io_lock); 2808 } 2809 return(error); 2810 } 2811 2812 /* 2813 * Send a passthrough FIB supplied from userspace 2814 */ 2815 static int 2816 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg) 2817 { 2818 struct aac_command *cm; 2819 struct aac_fib *fib; 2820 struct aac_srb *srbcmd; 2821 struct aac_srb *user_srb = (struct aac_srb *)arg; 2822 void *user_reply; 2823 int error, transfer_data = 0; 2824 bus_dmamap_t orig_map = 0; 2825 u_int32_t fibsize = 0; 2826 u_int64_t srb_sg_address; 2827 u_int32_t srb_sg_bytecount; 2828 2829 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2830 2831 cm = NULL; 2832 2833 mtx_lock(&sc->aac_io_lock); 2834 if (aacraid_alloc_command(sc, &cm)) { 2835 struct aac_event *event; 2836 2837 event = malloc(sizeof(struct aac_event), M_AACRAIDBUF, 2838 M_NOWAIT | M_ZERO); 2839 if (event == NULL) { 2840 error = EBUSY; 2841 mtx_unlock(&sc->aac_io_lock); 2842 goto out; 2843 } 2844 event->ev_type = AAC_EVENT_CMFREE; 2845 event->ev_callback = aac_ioctl_event; 2846 event->ev_arg = &cm; 2847 aacraid_add_event(sc, event); 2848 msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0); 2849 } 2850 mtx_unlock(&sc->aac_io_lock); 2851 2852 cm->cm_data = NULL; 2853 /* save original dma map */ 2854 orig_map = cm->cm_datamap; 2855 2856 fib = cm->cm_fib; 2857 srbcmd = (struct aac_srb *)fib->data; 2858 if ((error = copyin((void *)&user_srb->data_len, &fibsize, 2859 sizeof (u_int32_t)) != 0)) 2860 goto out; 2861 if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) { 2862 error = EINVAL; 2863 goto out; 2864 } 2865 if ((error = copyin((void *)user_srb, srbcmd, fibsize) != 0)) 2866 goto out; 2867 2868 srbcmd->function = 0; /* SRBF_ExecuteScsi */ 2869 srbcmd->retry_limit = 0; /* obsolete */ 2870 2871 /* only one sg element from userspace supported */ 2872 if (srbcmd->sg_map.SgCount > 1) { 2873 error = EINVAL; 2874 goto out; 2875 } 2876 /* check fibsize */ 2877 if (fibsize == (sizeof(struct aac_srb) + 2878 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) { 2879 struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry; 2880 struct aac_sg_entry sg; 2881 2882 if ((error = copyin(sgp, &sg, sizeof(sg))) != 0) 2883 goto out; 2884 2885 srb_sg_bytecount = sg.SgByteCount; 2886 srb_sg_address = (u_int64_t)sg.SgAddress; 2887 } else if (fibsize == (sizeof(struct aac_srb) + 2888 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) { 2889 #ifdef __LP64__ 2890 struct aac_sg_entry64 *sgp = 2891 (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry; 2892 struct aac_sg_entry64 sg; 2893 2894 if ((error = copyin(sgp, &sg, sizeof(sg))) != 0) 2895 goto out; 2896 2897 srb_sg_bytecount = sg.SgByteCount; 2898 srb_sg_address = sg.SgAddress; 2899 if (srb_sg_address > 0xffffffffull && 2900 !(sc->flags & AAC_FLAGS_SG_64BIT)) 2901 #endif 2902 { 2903 error = EINVAL; 2904 goto out; 2905 } 2906 } else { 2907 error = EINVAL; 2908 goto out; 2909 } 2910 user_reply = (char *)arg + fibsize; 2911 srbcmd->data_len = srb_sg_bytecount; 2912 if (srbcmd->sg_map.SgCount == 1) 2913 transfer_data = 1; 2914 2915 if (transfer_data) { 2916 /* 2917 * Create DMA tag for the passthr. data buffer and allocate it. 2918 */ 2919 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 2920 1, 0, /* algnmnt, boundary */ 2921 (sc->flags & AAC_FLAGS_SG_64BIT) ? 2922 BUS_SPACE_MAXADDR_32BIT : 2923 0x7fffffff, /* lowaddr */ 2924 BUS_SPACE_MAXADDR, /* highaddr */ 2925 NULL, NULL, /* filter, filterarg */ 2926 srb_sg_bytecount, /* size */ 2927 sc->aac_sg_tablesize, /* nsegments */ 2928 srb_sg_bytecount, /* maxsegsize */ 2929 0, /* flags */ 2930 NULL, NULL, /* No locking needed */ 2931 &cm->cm_passthr_dmat)) { 2932 error = ENOMEM; 2933 goto out; 2934 } 2935 if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data, 2936 BUS_DMA_NOWAIT, &cm->cm_datamap)) { 2937 error = ENOMEM; 2938 goto out; 2939 } 2940 /* fill some cm variables */ 2941 cm->cm_datalen = srb_sg_bytecount; 2942 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) 2943 cm->cm_flags |= AAC_CMD_DATAIN; 2944 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) 2945 cm->cm_flags |= AAC_CMD_DATAOUT; 2946 2947 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) { 2948 if ((error = copyin((void *)(uintptr_t)srb_sg_address, 2949 cm->cm_data, cm->cm_datalen)) != 0) 2950 goto out; 2951 /* sync required for bus_dmamem_alloc() alloc. mem.? */ 2952 bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap, 2953 BUS_DMASYNC_PREWRITE); 2954 } 2955 } 2956 2957 /* build the FIB */ 2958 fib->Header.Size = sizeof(struct aac_fib_header) + 2959 sizeof(struct aac_srb); 2960 fib->Header.XferState = 2961 AAC_FIBSTATE_HOSTOWNED | 2962 AAC_FIBSTATE_INITIALISED | 2963 AAC_FIBSTATE_EMPTY | 2964 AAC_FIBSTATE_FROMHOST | 2965 AAC_FIBSTATE_REXPECTED | 2966 AAC_FIBSTATE_NORM | 2967 AAC_FIBSTATE_ASYNC; 2968 2969 fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ? 2970 ScsiPortCommandU64 : ScsiPortCommand; 2971 cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map; 2972 2973 /* send command */ 2974 if (transfer_data) { 2975 bus_dmamap_load(cm->cm_passthr_dmat, 2976 cm->cm_datamap, cm->cm_data, 2977 cm->cm_datalen, 2978 aacraid_map_command_sg, cm, 0); 2979 } else { 2980 aacraid_map_command_sg(cm, NULL, 0, 0); 2981 } 2982 2983 /* wait for completion */ 2984 mtx_lock(&sc->aac_io_lock); 2985 while (!(cm->cm_flags & AAC_CMD_COMPLETED)) 2986 msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0); 2987 mtx_unlock(&sc->aac_io_lock); 2988 2989 /* copy data */ 2990 if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)) { 2991 if ((error = copyout(cm->cm_data, 2992 (void *)(uintptr_t)srb_sg_address, 2993 cm->cm_datalen)) != 0) 2994 goto out; 2995 /* sync required for bus_dmamem_alloc() allocated mem.? */ 2996 bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap, 2997 BUS_DMASYNC_POSTREAD); 2998 } 2999 3000 /* status */ 3001 error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response)); 3002 3003 out: 3004 if (cm && cm->cm_data) { 3005 if (transfer_data) 3006 bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap); 3007 bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap); 3008 cm->cm_datamap = orig_map; 3009 } 3010 if (cm && cm->cm_passthr_dmat) 3011 bus_dma_tag_destroy(cm->cm_passthr_dmat); 3012 if (cm) { 3013 mtx_lock(&sc->aac_io_lock); 3014 aacraid_release_command(cm); 3015 mtx_unlock(&sc->aac_io_lock); 3016 } 3017 return(error); 3018 } 3019 3020 /* 3021 * Request an AIF from the controller (new comm. type1) 3022 */ 3023 static void 3024 aac_request_aif(struct aac_softc *sc) 3025 { 3026 struct aac_command *cm; 3027 struct aac_fib *fib; 3028 3029 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3030 3031 if (aacraid_alloc_command(sc, &cm)) { 3032 sc->aif_pending = 1; 3033 return; 3034 } 3035 sc->aif_pending = 0; 3036 3037 /* build the FIB */ 3038 fib = cm->cm_fib; 3039 fib->Header.Size = sizeof(struct aac_fib); 3040 fib->Header.XferState = 3041 AAC_FIBSTATE_HOSTOWNED | 3042 AAC_FIBSTATE_INITIALISED | 3043 AAC_FIBSTATE_EMPTY | 3044 AAC_FIBSTATE_FROMHOST | 3045 AAC_FIBSTATE_REXPECTED | 3046 AAC_FIBSTATE_NORM | 3047 AAC_FIBSTATE_ASYNC; 3048 /* set AIF marker */ 3049 fib->Header.Handle = 0x00800000; 3050 fib->Header.Command = AifRequest; 3051 ((struct aac_aif_command *)fib->data)->command = AifReqEvent; 3052 3053 aacraid_map_command_sg(cm, NULL, 0, 0); 3054 } 3055 3056 3057 #if __FreeBSD_version >= 702000 3058 /* 3059 * cdevpriv interface private destructor. 3060 */ 3061 static void 3062 aac_cdevpriv_dtor(void *arg) 3063 { 3064 struct aac_softc *sc; 3065 3066 sc = arg; 3067 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3068 mtx_lock(&Giant); 3069 device_unbusy(sc->aac_dev); 3070 mtx_unlock(&Giant); 3071 } 3072 #else 3073 static int 3074 aac_close(struct cdev *dev, int flags, int fmt, struct thread *td) 3075 { 3076 struct aac_softc *sc; 3077 3078 sc = dev->si_drv1; 3079 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3080 return 0; 3081 } 3082 #endif 3083 3084 /* 3085 * Handle an AIF sent to us by the controller; queue it for later reference. 3086 * If the queue fills up, then drop the older entries. 3087 */ 3088 static void 3089 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib) 3090 { 3091 struct aac_aif_command *aif; 3092 struct aac_container *co, *co_next; 3093 struct aac_fib_context *ctx; 3094 struct aac_fib *sync_fib; 3095 struct aac_mntinforesp mir; 3096 int next, current, found; 3097 int count = 0, changed = 0, i = 0; 3098 u_int32_t channel, uid; 3099 3100 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3101 3102 aif = (struct aac_aif_command*)&fib->data[0]; 3103 aacraid_print_aif(sc, aif); 3104 3105 /* Is it an event that we should care about? */ 3106 switch (aif->command) { 3107 case AifCmdEventNotify: 3108 switch (aif->data.EN.type) { 3109 case AifEnAddContainer: 3110 case AifEnDeleteContainer: 3111 /* 3112 * A container was added or deleted, but the message 3113 * doesn't tell us anything else! Re-enumerate the 3114 * containers and sort things out. 3115 */ 3116 aac_alloc_sync_fib(sc, &sync_fib); 3117 do { 3118 /* 3119 * Ask the controller for its containers one at 3120 * a time. 3121 * XXX What if the controller's list changes 3122 * midway through this enumaration? 3123 * XXX This should be done async. 3124 */ 3125 if (aac_get_container_info(sc, sync_fib, i, 3126 &mir, &uid) != 0) 3127 continue; 3128 if (i == 0) 3129 count = mir.MntRespCount; 3130 /* 3131 * Check the container against our list. 3132 * co->co_found was already set to 0 in a 3133 * previous run. 3134 */ 3135 if ((mir.Status == ST_OK) && 3136 (mir.MntTable[0].VolType != CT_NONE)) { 3137 found = 0; 3138 TAILQ_FOREACH(co, 3139 &sc->aac_container_tqh, 3140 co_link) { 3141 if (co->co_mntobj.ObjectId == 3142 mir.MntTable[0].ObjectId) { 3143 co->co_found = 1; 3144 found = 1; 3145 break; 3146 } 3147 } 3148 /* 3149 * If the container matched, continue 3150 * in the list. 3151 */ 3152 if (found) { 3153 i++; 3154 continue; 3155 } 3156 3157 /* 3158 * This is a new container. Do all the 3159 * appropriate things to set it up. 3160 */ 3161 aac_add_container(sc, &mir, 1, uid); 3162 changed = 1; 3163 } 3164 i++; 3165 } while ((i < count) && (i < AAC_MAX_CONTAINERS)); 3166 aac_release_sync_fib(sc); 3167 3168 /* 3169 * Go through our list of containers and see which ones 3170 * were not marked 'found'. Since the controller didn't 3171 * list them they must have been deleted. Do the 3172 * appropriate steps to destroy the device. Also reset 3173 * the co->co_found field. 3174 */ 3175 co = TAILQ_FIRST(&sc->aac_container_tqh); 3176 while (co != NULL) { 3177 if (co->co_found == 0) { 3178 co_next = TAILQ_NEXT(co, co_link); 3179 TAILQ_REMOVE(&sc->aac_container_tqh, co, 3180 co_link); 3181 free(co, M_AACRAIDBUF); 3182 changed = 1; 3183 co = co_next; 3184 } else { 3185 co->co_found = 0; 3186 co = TAILQ_NEXT(co, co_link); 3187 } 3188 } 3189 3190 /* Attach the newly created containers */ 3191 if (changed) { 3192 if (sc->cam_rescan_cb != NULL) 3193 sc->cam_rescan_cb(sc, 0, 3194 AAC_CAM_TARGET_WILDCARD); 3195 } 3196 3197 break; 3198 3199 case AifEnEnclosureManagement: 3200 switch (aif->data.EN.data.EEE.eventType) { 3201 case AIF_EM_DRIVE_INSERTION: 3202 case AIF_EM_DRIVE_REMOVAL: 3203 channel = aif->data.EN.data.EEE.unitID; 3204 if (sc->cam_rescan_cb != NULL) 3205 sc->cam_rescan_cb(sc, 3206 ((channel>>24) & 0xF) + 1, 3207 (channel & 0xFFFF)); 3208 break; 3209 } 3210 break; 3211 3212 case AifEnAddJBOD: 3213 case AifEnDeleteJBOD: 3214 case AifRawDeviceRemove: 3215 channel = aif->data.EN.data.ECE.container; 3216 if (sc->cam_rescan_cb != NULL) 3217 sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1, 3218 AAC_CAM_TARGET_WILDCARD); 3219 break; 3220 3221 default: 3222 break; 3223 } 3224 3225 default: 3226 break; 3227 } 3228 3229 /* Copy the AIF data to the AIF queue for ioctl retrieval */ 3230 current = sc->aifq_idx; 3231 next = (current + 1) % AAC_AIFQ_LENGTH; 3232 if (next == 0) 3233 sc->aifq_filled = 1; 3234 bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib)); 3235 /* modify AIF contexts */ 3236 if (sc->aifq_filled) { 3237 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3238 if (next == ctx->ctx_idx) 3239 ctx->ctx_wrap = 1; 3240 else if (current == ctx->ctx_idx && ctx->ctx_wrap) 3241 ctx->ctx_idx = next; 3242 } 3243 } 3244 sc->aifq_idx = next; 3245 /* On the off chance that someone is sleeping for an aif... */ 3246 if (sc->aac_state & AAC_STATE_AIF_SLEEPER) 3247 wakeup(sc->aac_aifq); 3248 /* Wakeup any poll()ers */ 3249 selwakeuppri(&sc->rcv_select, PRIBIO); 3250 3251 return; 3252 } 3253 3254 /* 3255 * Return the Revision of the driver to userspace and check to see if the 3256 * userspace app is possibly compatible. This is extremely bogus since 3257 * our driver doesn't follow Adaptec's versioning system. Cheat by just 3258 * returning what the card reported. 3259 */ 3260 static int 3261 aac_rev_check(struct aac_softc *sc, caddr_t udata) 3262 { 3263 struct aac_rev_check rev_check; 3264 struct aac_rev_check_resp rev_check_resp; 3265 int error = 0; 3266 3267 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3268 3269 /* 3270 * Copyin the revision struct from userspace 3271 */ 3272 if ((error = copyin(udata, (caddr_t)&rev_check, 3273 sizeof(struct aac_rev_check))) != 0) { 3274 return error; 3275 } 3276 3277 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n", 3278 rev_check.callingRevision.buildNumber); 3279 3280 /* 3281 * Doctor up the response struct. 3282 */ 3283 rev_check_resp.possiblyCompatible = 1; 3284 rev_check_resp.adapterSWRevision.external.comp.major = 3285 AAC_DRIVER_MAJOR_VERSION; 3286 rev_check_resp.adapterSWRevision.external.comp.minor = 3287 AAC_DRIVER_MINOR_VERSION; 3288 rev_check_resp.adapterSWRevision.external.comp.type = 3289 AAC_DRIVER_TYPE; 3290 rev_check_resp.adapterSWRevision.external.comp.dash = 3291 AAC_DRIVER_BUGFIX_LEVEL; 3292 rev_check_resp.adapterSWRevision.buildNumber = 3293 AAC_DRIVER_BUILD; 3294 3295 return(copyout((caddr_t)&rev_check_resp, udata, 3296 sizeof(struct aac_rev_check_resp))); 3297 } 3298 3299 /* 3300 * Pass the fib context to the caller 3301 */ 3302 static int 3303 aac_open_aif(struct aac_softc *sc, caddr_t arg) 3304 { 3305 struct aac_fib_context *fibctx, *ctx; 3306 int error = 0; 3307 3308 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3309 3310 fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO); 3311 if (fibctx == NULL) 3312 return (ENOMEM); 3313 3314 mtx_lock(&sc->aac_io_lock); 3315 /* all elements are already 0, add to queue */ 3316 if (sc->fibctx == NULL) 3317 sc->fibctx = fibctx; 3318 else { 3319 for (ctx = sc->fibctx; ctx->next; ctx = ctx->next) 3320 ; 3321 ctx->next = fibctx; 3322 fibctx->prev = ctx; 3323 } 3324 3325 /* evaluate unique value */ 3326 fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff); 3327 ctx = sc->fibctx; 3328 while (ctx != fibctx) { 3329 if (ctx->unique == fibctx->unique) { 3330 fibctx->unique++; 3331 ctx = sc->fibctx; 3332 } else { 3333 ctx = ctx->next; 3334 } 3335 } 3336 3337 error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t)); 3338 mtx_unlock(&sc->aac_io_lock); 3339 if (error) 3340 aac_close_aif(sc, (caddr_t)ctx); 3341 return error; 3342 } 3343 3344 /* 3345 * Close the caller's fib context 3346 */ 3347 static int 3348 aac_close_aif(struct aac_softc *sc, caddr_t arg) 3349 { 3350 struct aac_fib_context *ctx; 3351 3352 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3353 3354 mtx_lock(&sc->aac_io_lock); 3355 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3356 if (ctx->unique == *(uint32_t *)&arg) { 3357 if (ctx == sc->fibctx) 3358 sc->fibctx = NULL; 3359 else { 3360 ctx->prev->next = ctx->next; 3361 if (ctx->next) 3362 ctx->next->prev = ctx->prev; 3363 } 3364 break; 3365 } 3366 } 3367 if (ctx) 3368 free(ctx, M_AACRAIDBUF); 3369 3370 mtx_unlock(&sc->aac_io_lock); 3371 return 0; 3372 } 3373 3374 /* 3375 * Pass the caller the next AIF in their queue 3376 */ 3377 static int 3378 aac_getnext_aif(struct aac_softc *sc, caddr_t arg) 3379 { 3380 struct get_adapter_fib_ioctl agf; 3381 struct aac_fib_context *ctx; 3382 int error; 3383 3384 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3385 3386 mtx_lock(&sc->aac_io_lock); 3387 #ifdef COMPAT_FREEBSD32 3388 if (SV_CURPROC_FLAG(SV_ILP32)) { 3389 struct get_adapter_fib_ioctl32 agf32; 3390 error = copyin(arg, &agf32, sizeof(agf32)); 3391 if (error == 0) { 3392 agf.AdapterFibContext = agf32.AdapterFibContext; 3393 agf.Wait = agf32.Wait; 3394 agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib; 3395 } 3396 } else 3397 #endif 3398 error = copyin(arg, &agf, sizeof(agf)); 3399 if (error == 0) { 3400 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3401 if (agf.AdapterFibContext == ctx->unique) 3402 break; 3403 } 3404 if (!ctx) { 3405 mtx_unlock(&sc->aac_io_lock); 3406 return (EFAULT); 3407 } 3408 3409 error = aac_return_aif(sc, ctx, agf.AifFib); 3410 if (error == EAGAIN && agf.Wait) { 3411 fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF"); 3412 sc->aac_state |= AAC_STATE_AIF_SLEEPER; 3413 while (error == EAGAIN) { 3414 mtx_unlock(&sc->aac_io_lock); 3415 error = tsleep(sc->aac_aifq, PRIBIO | 3416 PCATCH, "aacaif", 0); 3417 mtx_lock(&sc->aac_io_lock); 3418 if (error == 0) 3419 error = aac_return_aif(sc, ctx, agf.AifFib); 3420 } 3421 sc->aac_state &= ~AAC_STATE_AIF_SLEEPER; 3422 } 3423 } 3424 mtx_unlock(&sc->aac_io_lock); 3425 return(error); 3426 } 3427 3428 /* 3429 * Hand the next AIF off the top of the queue out to userspace. 3430 */ 3431 static int 3432 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr) 3433 { 3434 int current, error; 3435 3436 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3437 3438 current = ctx->ctx_idx; 3439 if (current == sc->aifq_idx && !ctx->ctx_wrap) { 3440 /* empty */ 3441 return (EAGAIN); 3442 } 3443 error = 3444 copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib)); 3445 if (error) 3446 device_printf(sc->aac_dev, 3447 "aac_return_aif: copyout returned %d\n", error); 3448 else { 3449 ctx->ctx_wrap = 0; 3450 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH; 3451 } 3452 return(error); 3453 } 3454 3455 static int 3456 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr) 3457 { 3458 struct aac_pci_info { 3459 u_int32_t bus; 3460 u_int32_t slot; 3461 } pciinf; 3462 int error; 3463 3464 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3465 3466 pciinf.bus = pci_get_bus(sc->aac_dev); 3467 pciinf.slot = pci_get_slot(sc->aac_dev); 3468 3469 error = copyout((caddr_t)&pciinf, uptr, 3470 sizeof(struct aac_pci_info)); 3471 3472 return (error); 3473 } 3474 3475 static int 3476 aac_supported_features(struct aac_softc *sc, caddr_t uptr) 3477 { 3478 struct aac_features f; 3479 int error; 3480 3481 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3482 3483 if ((error = copyin(uptr, &f, sizeof (f))) != 0) 3484 return (error); 3485 3486 /* 3487 * When the management driver receives FSACTL_GET_FEATURES ioctl with 3488 * ALL zero in the featuresState, the driver will return the current 3489 * state of all the supported features, the data field will not be 3490 * valid. 3491 * When the management driver receives FSACTL_GET_FEATURES ioctl with 3492 * a specific bit set in the featuresState, the driver will return the 3493 * current state of this specific feature and whatever data that are 3494 * associated with the feature in the data field or perform whatever 3495 * action needed indicates in the data field. 3496 */ 3497 if (f.feat.fValue == 0) { 3498 f.feat.fBits.largeLBA = 3499 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; 3500 f.feat.fBits.JBODSupport = 1; 3501 /* TODO: In the future, add other features state here as well */ 3502 } else { 3503 if (f.feat.fBits.largeLBA) 3504 f.feat.fBits.largeLBA = 3505 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; 3506 /* TODO: Add other features state and data in the future */ 3507 } 3508 3509 error = copyout(&f, uptr, sizeof (f)); 3510 return (error); 3511 } 3512 3513 /* 3514 * Give the userland some information about the container. The AAC arch 3515 * expects the driver to be a SCSI passthrough type driver, so it expects 3516 * the containers to have b:t:l numbers. Fake it. 3517 */ 3518 static int 3519 aac_query_disk(struct aac_softc *sc, caddr_t uptr) 3520 { 3521 struct aac_query_disk query_disk; 3522 struct aac_container *co; 3523 int error, id; 3524 3525 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3526 3527 mtx_lock(&sc->aac_io_lock); 3528 error = copyin(uptr, (caddr_t)&query_disk, 3529 sizeof(struct aac_query_disk)); 3530 if (error) { 3531 mtx_unlock(&sc->aac_io_lock); 3532 return (error); 3533 } 3534 3535 id = query_disk.ContainerNumber; 3536 if (id == -1) { 3537 mtx_unlock(&sc->aac_io_lock); 3538 return (EINVAL); 3539 } 3540 3541 TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { 3542 if (co->co_mntobj.ObjectId == id) 3543 break; 3544 } 3545 3546 if (co == NULL) { 3547 query_disk.Valid = 0; 3548 query_disk.Locked = 0; 3549 query_disk.Deleted = 1; /* XXX is this right? */ 3550 } else { 3551 query_disk.Valid = 1; 3552 query_disk.Locked = 1; 3553 query_disk.Deleted = 0; 3554 query_disk.Bus = device_get_unit(sc->aac_dev); 3555 query_disk.Target = 0; 3556 query_disk.Lun = 0; 3557 query_disk.UnMapped = 0; 3558 } 3559 3560 error = copyout((caddr_t)&query_disk, uptr, 3561 sizeof(struct aac_query_disk)); 3562 3563 mtx_unlock(&sc->aac_io_lock); 3564 return (error); 3565 } 3566 3567 static void 3568 aac_container_bus(struct aac_softc *sc) 3569 { 3570 struct aac_sim *sim; 3571 device_t child; 3572 3573 sim =(struct aac_sim *)malloc(sizeof(struct aac_sim), 3574 M_AACRAIDBUF, M_NOWAIT | M_ZERO); 3575 if (sim == NULL) { 3576 device_printf(sc->aac_dev, 3577 "No memory to add container bus\n"); 3578 panic("Out of memory?!"); 3579 } 3580 child = device_add_child(sc->aac_dev, "aacraidp", -1); 3581 if (child == NULL) { 3582 device_printf(sc->aac_dev, 3583 "device_add_child failed for container bus\n"); 3584 free(sim, M_AACRAIDBUF); 3585 panic("Out of memory?!"); 3586 } 3587 3588 sim->TargetsPerBus = AAC_MAX_CONTAINERS; 3589 sim->BusNumber = 0; 3590 sim->BusType = CONTAINER_BUS; 3591 sim->InitiatorBusId = -1; 3592 sim->aac_sc = sc; 3593 sim->sim_dev = child; 3594 sim->aac_cam = NULL; 3595 3596 device_set_ivars(child, sim); 3597 device_set_desc(child, "Container Bus"); 3598 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link); 3599 /* 3600 device_set_desc(child, aac_describe_code(aac_container_types, 3601 mir->MntTable[0].VolType)); 3602 */ 3603 bus_generic_attach(sc->aac_dev); 3604 } 3605 3606 static void 3607 aac_get_bus_info(struct aac_softc *sc) 3608 { 3609 struct aac_fib *fib; 3610 struct aac_ctcfg *c_cmd; 3611 struct aac_ctcfg_resp *c_resp; 3612 struct aac_vmioctl *vmi; 3613 struct aac_vmi_businf_resp *vmi_resp; 3614 struct aac_getbusinf businfo; 3615 struct aac_sim *caminf; 3616 device_t child; 3617 int i, error; 3618 3619 mtx_lock(&sc->aac_io_lock); 3620 aac_alloc_sync_fib(sc, &fib); 3621 c_cmd = (struct aac_ctcfg *)&fib->data[0]; 3622 bzero(c_cmd, sizeof(struct aac_ctcfg)); 3623 3624 c_cmd->Command = VM_ContainerConfig; 3625 c_cmd->cmd = CT_GET_SCSI_METHOD; 3626 c_cmd->param = 0; 3627 3628 error = aac_sync_fib(sc, ContainerCommand, 0, fib, 3629 sizeof(struct aac_ctcfg)); 3630 if (error) { 3631 device_printf(sc->aac_dev, "Error %d sending " 3632 "VM_ContainerConfig command\n", error); 3633 aac_release_sync_fib(sc); 3634 mtx_unlock(&sc->aac_io_lock); 3635 return; 3636 } 3637 3638 c_resp = (struct aac_ctcfg_resp *)&fib->data[0]; 3639 if (c_resp->Status != ST_OK) { 3640 device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n", 3641 c_resp->Status); 3642 aac_release_sync_fib(sc); 3643 mtx_unlock(&sc->aac_io_lock); 3644 return; 3645 } 3646 3647 sc->scsi_method_id = c_resp->param; 3648 3649 vmi = (struct aac_vmioctl *)&fib->data[0]; 3650 bzero(vmi, sizeof(struct aac_vmioctl)); 3651 3652 vmi->Command = VM_Ioctl; 3653 vmi->ObjType = FT_DRIVE; 3654 vmi->MethId = sc->scsi_method_id; 3655 vmi->ObjId = 0; 3656 vmi->IoctlCmd = GetBusInfo; 3657 3658 error = aac_sync_fib(sc, ContainerCommand, 0, fib, 3659 sizeof(struct aac_vmi_businf_resp)); 3660 if (error) { 3661 device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n", 3662 error); 3663 aac_release_sync_fib(sc); 3664 mtx_unlock(&sc->aac_io_lock); 3665 return; 3666 } 3667 3668 vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0]; 3669 if (vmi_resp->Status != ST_OK) { 3670 device_printf(sc->aac_dev, "VM_Ioctl returned %d\n", 3671 vmi_resp->Status); 3672 aac_release_sync_fib(sc); 3673 mtx_unlock(&sc->aac_io_lock); 3674 return; 3675 } 3676 3677 bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf)); 3678 aac_release_sync_fib(sc); 3679 mtx_unlock(&sc->aac_io_lock); 3680 3681 for (i = 0; i < businfo.BusCount; i++) { 3682 if (businfo.BusValid[i] != AAC_BUS_VALID) 3683 continue; 3684 3685 caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim), 3686 M_AACRAIDBUF, M_NOWAIT | M_ZERO); 3687 if (caminf == NULL) { 3688 device_printf(sc->aac_dev, 3689 "No memory to add passthrough bus %d\n", i); 3690 break; 3691 } 3692 3693 child = device_add_child(sc->aac_dev, "aacraidp", -1); 3694 if (child == NULL) { 3695 device_printf(sc->aac_dev, 3696 "device_add_child failed for passthrough bus %d\n", 3697 i); 3698 free(caminf, M_AACRAIDBUF); 3699 break; 3700 } 3701 3702 caminf->TargetsPerBus = businfo.TargetsPerBus; 3703 caminf->BusNumber = i+1; 3704 caminf->BusType = PASSTHROUGH_BUS; 3705 caminf->InitiatorBusId = businfo.InitiatorBusId[i]; 3706 caminf->aac_sc = sc; 3707 caminf->sim_dev = child; 3708 caminf->aac_cam = NULL; 3709 3710 device_set_ivars(child, caminf); 3711 device_set_desc(child, "SCSI Passthrough Bus"); 3712 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link); 3713 } 3714 } 3715 3716 /* 3717 * Check to see if the kernel is up and running. If we are in a 3718 * BlinkLED state, return the BlinkLED code. 3719 */ 3720 static u_int32_t 3721 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled) 3722 { 3723 u_int32_t ret; 3724 3725 ret = AAC_GET_FWSTATUS(sc); 3726 3727 if (ret & AAC_UP_AND_RUNNING) 3728 ret = 0; 3729 else if (ret & AAC_KERNEL_PANIC && bled) 3730 *bled = (ret >> 16) & 0xff; 3731 3732 return (ret); 3733 } 3734 3735 /* 3736 * Once do an IOP reset, basically have to re-initialize the card as 3737 * if coming up from a cold boot, and the driver is responsible for 3738 * any IO that was outstanding to the adapter at the time of the IOP 3739 * RESET. And prepare the driver for IOP RESET by making the init code 3740 * modular with the ability to call it from multiple places. 3741 */ 3742 static int 3743 aac_reset_adapter(struct aac_softc *sc) 3744 { 3745 struct aac_command *cm; 3746 struct aac_fib *fib; 3747 struct aac_pause_command *pc; 3748 u_int32_t status, reset_mask, waitCount, max_msix_orig; 3749 int msi_enabled_orig; 3750 3751 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3752 mtx_assert(&sc->aac_io_lock, MA_OWNED); 3753 3754 if (sc->aac_state & AAC_STATE_RESET) { 3755 device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n"); 3756 return (EINVAL); 3757 } 3758 sc->aac_state |= AAC_STATE_RESET; 3759 3760 /* disable interrupt */ 3761 AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT); 3762 3763 /* 3764 * Abort all pending commands: 3765 * a) on the controller 3766 */ 3767 while ((cm = aac_dequeue_busy(sc)) != NULL) { 3768 cm->cm_flags |= AAC_CMD_RESET; 3769 3770 /* is there a completion handler? */ 3771 if (cm->cm_complete != NULL) { 3772 cm->cm_complete(cm); 3773 } else { 3774 /* assume that someone is sleeping on this 3775 * command 3776 */ 3777 wakeup(cm); 3778 } 3779 } 3780 3781 /* b) in the waiting queues */ 3782 while ((cm = aac_dequeue_ready(sc)) != NULL) { 3783 cm->cm_flags |= AAC_CMD_RESET; 3784 3785 /* is there a completion handler? */ 3786 if (cm->cm_complete != NULL) { 3787 cm->cm_complete(cm); 3788 } else { 3789 /* assume that someone is sleeping on this 3790 * command 3791 */ 3792 wakeup(cm); 3793 } 3794 } 3795 3796 /* flush drives */ 3797 if (aac_check_adapter_health(sc, NULL) == 0) { 3798 mtx_unlock(&sc->aac_io_lock); 3799 (void) aacraid_shutdown(sc->aac_dev); 3800 mtx_lock(&sc->aac_io_lock); 3801 } 3802 3803 /* execute IOP reset */ 3804 if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) { 3805 AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST); 3806 3807 /* We need to wait for 5 seconds before accessing the MU again 3808 * 10000 * 100us = 1000,000us = 1000ms = 1s 3809 */ 3810 waitCount = 5 * 10000; 3811 while (waitCount) { 3812 DELAY(100); /* delay 100 microseconds */ 3813 waitCount--; 3814 } 3815 } else if ((aacraid_sync_command(sc, 3816 AAC_IOP_RESET_ALWAYS, 0, 0, 0, 0, &status, &reset_mask)) != 0) { 3817 /* call IOP_RESET for older firmware */ 3818 if ((aacraid_sync_command(sc, 3819 AAC_IOP_RESET, 0, 0, 0, 0, &status, NULL)) != 0) { 3820 3821 if (status == AAC_SRB_STS_INVALID_REQUEST) 3822 device_printf(sc->aac_dev, "IOP_RESET not supported\n"); 3823 else 3824 /* probably timeout */ 3825 device_printf(sc->aac_dev, "IOP_RESET failed\n"); 3826 3827 /* unwind aac_shutdown() */ 3828 aac_alloc_sync_fib(sc, &fib); 3829 pc = (struct aac_pause_command *)&fib->data[0]; 3830 pc->Command = VM_ContainerConfig; 3831 pc->Type = CT_PAUSE_IO; 3832 pc->Timeout = 1; 3833 pc->Min = 1; 3834 pc->NoRescan = 1; 3835 3836 (void) aac_sync_fib(sc, ContainerCommand, 0, fib, 3837 sizeof (struct aac_pause_command)); 3838 aac_release_sync_fib(sc); 3839 3840 goto finish; 3841 } 3842 } else if (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET) { 3843 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask); 3844 /* 3845 * We need to wait for 5 seconds before accessing the doorbell 3846 * again, 10000 * 100us = 1000,000us = 1000ms = 1s 3847 */ 3848 waitCount = 5 * 10000; 3849 while (waitCount) { 3850 DELAY(100); /* delay 100 microseconds */ 3851 waitCount--; 3852 } 3853 } 3854 3855 /* 3856 * Initialize the adapter. 3857 */ 3858 max_msix_orig = sc->aac_max_msix; 3859 msi_enabled_orig = sc->msi_enabled; 3860 sc->msi_enabled = FALSE; 3861 if (aac_check_firmware(sc) != 0) 3862 goto finish; 3863 if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) { 3864 sc->aac_max_msix = max_msix_orig; 3865 if (msi_enabled_orig) { 3866 sc->msi_enabled = msi_enabled_orig; 3867 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX); 3868 } 3869 mtx_unlock(&sc->aac_io_lock); 3870 aac_init(sc); 3871 mtx_lock(&sc->aac_io_lock); 3872 } 3873 3874 finish: 3875 sc->aac_state &= ~AAC_STATE_RESET; 3876 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT); 3877 aacraid_startio(sc); 3878 return (0); 3879 } 3880