1 /* 2 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6 /* 7 * Copyright 2005-06 Adaptec, Inc. 8 * Copyright (c) 2005-06 Adaptec Inc., Achim Leubner 9 * Copyright (c) 2000 Michael Smith 10 * Copyright (c) 2001 Scott Long 11 * Copyright (c) 2000 BSDi 12 * All rights reserved. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #pragma ident "%Z%%M% %I% %E% SMI" 36 37 #include <sys/modctl.h> 38 #include <sys/conf.h> 39 #include <sys/cmn_err.h> 40 #include <sys/ddi.h> 41 #include <sys/devops.h> 42 #include <sys/pci.h> 43 #include <sys/types.h> 44 #include <sys/ddidmareq.h> 45 #include <sys/scsi/scsi.h> 46 #include <sys/ksynch.h> 47 #include <sys/sunddi.h> 48 #include <sys/byteorder.h> 49 #include "aac_regs.h" 50 #include "aac.h" 51 52 /* 53 * FMA header files 54 */ 55 #include <sys/ddifm.h> 56 #include <sys/fm/protocol.h> 57 #include <sys/fm/util.h> 58 #include <sys/fm/io/ddi.h> 59 60 /* 61 * For minor nodes created by the SCSA framework, minor numbers are 62 * formed by left-shifting instance by INST_MINOR_SHIFT and OR in a 63 * number less than 64. 64 * 65 * To support cfgadm, need to confirm the SCSA framework by creating 66 * devctl/scsi and driver specific minor nodes under SCSA format, 67 * and calling scsi_hba_xxx() functions aacordingly. 68 */ 69 70 #define AAC_MINOR 32 71 #define INST2AAC(x) (((x) << INST_MINOR_SHIFT) | AAC_MINOR) 72 #define AAC_SCSA_MINOR(x) ((x) & TRAN_MINOR_MASK) 73 #define AAC_IS_SCSA_NODE(x) ((x) == DEVCTL_MINOR || (x) == SCSI_MINOR) 74 75 #define AAC_TRAN2SOFTS(tran) ((struct aac_softstate *)(tran)->tran_hba_private) 76 #define AAC_DIP2TRAN(dip) ((scsi_hba_tran_t *)ddi_get_driver_private(dip)) 77 #define AAC_DIP2SOFTS(dip) (AAC_TRAN2SOFTS(AAC_DIP2TRAN(dip))) 78 #define PKT2AC(pkt) ((struct aac_cmd *)(pkt)->pkt_ha_private) 79 #define AAC_BUSYWAIT(cond, timeout /* in millisecond */) { \ 80 if (!(cond)) { \ 81 int count = (timeout) * 10; \ 82 while (count) { \ 83 drv_usecwait(100); \ 84 if (cond) \ 85 break; \ 86 count--; \ 87 } \ 88 (timeout) = (count + 9) / 10; \ 89 } \ 90 } 91 92 #define AAC_SENSE_DATA_DESCR_LEN \ 93 (sizeof (struct scsi_descr_sense_hdr) + \ 94 sizeof (struct scsi_information_sense_descr)) 95 #define AAC_ARQ64_LENGTH \ 96 (sizeof (struct scsi_arq_status) + \ 97 AAC_SENSE_DATA_DESCR_LEN - SENSE_LENGTH) 98 99 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */ 100 #define AAC_GETGXADDR(cmdlen, cdbp) \ 101 ((cmdlen == 6) ? GETG0ADDR(cdbp) : \ 102 (cmdlen == 10) ? (uint32_t)GETG1ADDR(cdbp) : \ 103 ((uint64_t)GETG4ADDR(cdbp) << 32) | (uint32_t)GETG4ADDRTL(cdbp)) 104 105 #define AAC_CDB_INQUIRY_CMDDT 0x02 106 #define AAC_CDB_INQUIRY_EVPD 0x01 107 #define AAC_VPD_PAGE_CODE 1 108 #define AAC_VPD_PAGE_LENGTH 3 109 #define AAC_VPD_PAGE_DATA 4 110 #define AAC_VPD_ID_CODESET 0 111 #define AAC_VPD_ID_TYPE 1 112 #define AAC_VPD_ID_LENGTH 3 113 #define AAC_VPD_ID_DATA 4 114 115 /* Return the size of FIB with data part type data_type */ 116 #define AAC_FIB_SIZEOF(data_type) \ 117 (sizeof (struct aac_fib_header) + sizeof (data_type)) 118 /* Return the container size defined in mir */ 119 #define AAC_MIR_SIZE(softs, acc, mir) \ 120 (((softs)->flags & AAC_FLAGS_LBA_64BIT) ? \ 121 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity) + \ 122 ((uint64_t)ddi_get32((acc), &(mir)->MntObj.CapacityHigh) << 32) : \ 123 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity)) 124 125 /* The last entry of aac_cards[] is for unknown cards */ 126 #define AAC_UNKNOWN_CARD \ 127 (sizeof (aac_cards) / sizeof (struct aac_card_type) - 1) 128 #define CARD_IS_UNKNOWN(i) (i == AAC_UNKNOWN_CARD) 129 #define BUF_IS_READ(bp) ((bp)->b_flags & B_READ) 130 #define AAC_IS_Q_EMPTY(q) ((q)->q_head == NULL) 131 #define AAC_CMDQ(acp) (!((acp)->flags & AAC_CMD_SYNC)) 132 133 #define PCI_MEM_GET32(softs, off) \ 134 ddi_get32((softs)->pci_mem_handle, \ 135 (void *)((softs)->pci_mem_base_vaddr + (off))) 136 #define PCI_MEM_PUT32(softs, off, val) \ 137 ddi_put32((softs)->pci_mem_handle, \ 138 (void *)((softs)->pci_mem_base_vaddr + (off)), \ 139 (uint32_t)(val)) 140 #define PCI_MEM_GET16(softs, off) \ 141 ddi_get16((softs)->pci_mem_handle, \ 142 (void *)((softs)->pci_mem_base_vaddr + (off))) 143 #define PCI_MEM_PUT16(softs, off, val) \ 144 ddi_put16((softs)->pci_mem_handle, \ 145 (void *)((softs)->pci_mem_base_vaddr + (off)), (uint16_t)(val)) 146 /* Write host data at valp to device mem[off] repeatedly count times */ 147 #define PCI_MEM_REP_PUT8(softs, off, valp, count) \ 148 ddi_rep_put8((softs)->pci_mem_handle, (uint8_t *)(valp), \ 149 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \ 150 count, DDI_DEV_AUTOINCR) 151 /* Read device data at mem[off] to host addr valp repeatedly count times */ 152 #define PCI_MEM_REP_GET8(softs, off, valp, count) \ 153 ddi_rep_get8((softs)->pci_mem_handle, (uint8_t *)(valp), \ 154 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \ 155 count, DDI_DEV_AUTOINCR) 156 #define AAC_GET_FIELD8(acc, d, s, field) \ 157 (d)->field = ddi_get8(acc, (uint8_t *)&(s)->field) 158 #define AAC_GET_FIELD32(acc, d, s, field) \ 159 (d)->field = ddi_get32(acc, (uint32_t *)&(s)->field) 160 #define AAC_GET_FIELD64(acc, d, s, field) \ 161 (d)->field = ddi_get64(acc, (uint64_t *)&(s)->field) 162 #define AAC_REP_GET_FIELD8(acc, d, s, field, r) \ 163 ddi_rep_get8((acc), (uint8_t *)&(d)->field, \ 164 (uint8_t *)&(s)->field, (r), DDI_DEV_AUTOINCR) 165 #define AAC_REP_GET_FIELD32(acc, d, s, field, r) \ 166 ddi_rep_get32((acc), (uint32_t *)&(d)->field, \ 167 (uint32_t *)&(s)->field, (r), DDI_DEV_AUTOINCR) 168 169 #define AAC_ENABLE_INTR(softs) { \ 170 if (softs->flags & AAC_FLAGS_NEW_COMM) \ 171 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_NEW); \ 172 else \ 173 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_BITS); \ 174 } 175 176 #define AAC_DISABLE_INTR(softs) PCI_MEM_PUT32(softs, AAC_OIMR, ~0) 177 #define AAC_STATUS_CLR(softs, mask) PCI_MEM_PUT32(softs, AAC_ODBR, mask) 178 #define AAC_STATUS_GET(softs) PCI_MEM_GET32(softs, AAC_ODBR) 179 #define AAC_NOTIFY(softs, val) PCI_MEM_PUT32(softs, AAC_IDBR, val) 180 #define AAC_OUTB_GET(softs) PCI_MEM_GET32(softs, AAC_OQUE) 181 #define AAC_OUTB_SET(softs, val) PCI_MEM_PUT32(softs, AAC_OQUE, val) 182 #define AAC_FWSTATUS_GET(softs) \ 183 ((softs)->aac_if.aif_get_fwstatus(softs)) 184 #define AAC_MAILBOX_GET(softs, mb) \ 185 ((softs)->aac_if.aif_get_mailbox((softs), (mb))) 186 #define AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3) \ 187 ((softs)->aac_if.aif_set_mailbox((softs), (cmd), \ 188 (arg0), (arg1), (arg2), (arg3))) 189 190 #define AAC_THROTTLE_DRAIN -1 191 192 #define AAC_QUIESCE_TICK 1 /* 1 second */ 193 #define AAC_QUIESCE_TIMEOUT 60 /* 60 seconds */ 194 #define AAC_DEFAULT_TICK 10 /* 10 seconds */ 195 #define AAC_SYNC_TICK (30*60) /* 30 minutes */ 196 197 /* Poll time for aac_do_poll_io() */ 198 #define AAC_POLL_TIME 60 /* 60 seconds */ 199 200 /* 201 * Hardware access functions 202 */ 203 static int aac_rx_get_fwstatus(struct aac_softstate *); 204 static int aac_rx_get_mailbox(struct aac_softstate *, int); 205 static void aac_rx_set_mailbox(struct aac_softstate *, uint32_t, uint32_t, 206 uint32_t, uint32_t, uint32_t); 207 static int aac_rkt_get_fwstatus(struct aac_softstate *); 208 static int aac_rkt_get_mailbox(struct aac_softstate *, int); 209 static void aac_rkt_set_mailbox(struct aac_softstate *, uint32_t, uint32_t, 210 uint32_t, uint32_t, uint32_t); 211 212 /* 213 * SCSA function prototypes 214 */ 215 static int aac_attach(dev_info_t *, ddi_attach_cmd_t); 216 static int aac_detach(dev_info_t *, ddi_detach_cmd_t); 217 static int aac_reset(dev_info_t *, ddi_reset_cmd_t); 218 219 /* 220 * Interrupt handler functions 221 */ 222 static int aac_query_intrs(struct aac_softstate *, int); 223 static int aac_add_intrs(struct aac_softstate *); 224 static void aac_remove_intrs(struct aac_softstate *); 225 static uint_t aac_intr_old(caddr_t); 226 static uint_t aac_intr_new(caddr_t); 227 static uint_t aac_softintr(caddr_t); 228 229 /* 230 * Internal functions in attach 231 */ 232 static int aac_check_card_type(struct aac_softstate *); 233 static int aac_check_firmware(struct aac_softstate *); 234 static int aac_common_attach(struct aac_softstate *); 235 static void aac_common_detach(struct aac_softstate *); 236 static int aac_probe_containers(struct aac_softstate *); 237 static int aac_alloc_comm_space(struct aac_softstate *); 238 static int aac_setup_comm_space(struct aac_softstate *); 239 static void aac_free_comm_space(struct aac_softstate *); 240 static int aac_hba_setup(struct aac_softstate *); 241 242 /* 243 * Sync FIB operation functions 244 */ 245 int aac_sync_mbcommand(struct aac_softstate *, uint32_t, uint32_t, 246 uint32_t, uint32_t, uint32_t, uint32_t *); 247 static int aac_sync_fib(struct aac_softstate *, uint16_t, uint16_t); 248 249 /* 250 * Command queue operation functions 251 */ 252 static void aac_cmd_initq(struct aac_cmd_queue *); 253 static void aac_cmd_enqueue(struct aac_cmd_queue *, struct aac_cmd *); 254 static struct aac_cmd *aac_cmd_dequeue(struct aac_cmd_queue *); 255 static void aac_cmd_delete(struct aac_cmd_queue *, struct aac_cmd *); 256 257 /* 258 * FIB queue operation functions 259 */ 260 static int aac_fib_enqueue(struct aac_softstate *, int, uint32_t, uint32_t); 261 static int aac_fib_dequeue(struct aac_softstate *, int, int *); 262 263 /* 264 * Slot operation functions 265 */ 266 static int aac_create_slots(struct aac_softstate *); 267 static void aac_destroy_slots(struct aac_softstate *); 268 static void aac_alloc_fibs(struct aac_softstate *); 269 static void aac_destroy_fibs(struct aac_softstate *); 270 static struct aac_slot *aac_get_slot(struct aac_softstate *); 271 static void aac_release_slot(struct aac_softstate *, struct aac_slot *); 272 static int aac_alloc_fib(struct aac_softstate *, struct aac_slot *); 273 static void aac_free_fib(struct aac_slot *); 274 275 /* 276 * Internal functions 277 */ 278 static void aac_cmd_fib_header(struct aac_softstate *, struct aac_slot *, 279 uint16_t, uint16_t); 280 static void aac_cmd_fib_rawio(struct aac_softstate *, struct aac_cmd *); 281 static void aac_cmd_fib_brw64(struct aac_softstate *, struct aac_cmd *); 282 static void aac_cmd_fib_brw(struct aac_softstate *, struct aac_cmd *); 283 static void aac_cmd_fib_sync(struct aac_softstate *, struct aac_cmd *); 284 static void aac_cmd_fib_scsi32(struct aac_softstate *, struct aac_cmd *); 285 static void aac_cmd_fib_scsi64(struct aac_softstate *, struct aac_cmd *); 286 static void aac_start_waiting_io(struct aac_softstate *); 287 static void aac_drain_comp_q(struct aac_softstate *); 288 int aac_do_io(struct aac_softstate *, struct aac_cmd *); 289 static int aac_do_poll_io(struct aac_softstate *, struct aac_cmd *); 290 static int aac_do_sync_io(struct aac_softstate *, struct aac_cmd *); 291 static int aac_send_command(struct aac_softstate *, struct aac_slot *); 292 static void aac_cmd_timeout(struct aac_softstate *); 293 static int aac_dma_sync_ac(struct aac_cmd *); 294 static int aac_shutdown(struct aac_softstate *); 295 static int aac_reset_adapter(struct aac_softstate *); 296 static int aac_do_quiesce(struct aac_softstate *softs); 297 static int aac_do_unquiesce(struct aac_softstate *softs); 298 static void aac_unhold_bus(struct aac_softstate *, int); 299 static void aac_set_throttle(struct aac_softstate *, struct aac_container *, 300 int, int); 301 302 /* 303 * Adapter Initiated FIB handling function 304 */ 305 static int aac_handle_aif(struct aac_softstate *, struct aac_fib *); 306 307 /* 308 * Timeout handling thread function 309 */ 310 static void aac_daemon(void *); 311 312 /* 313 * IOCTL interface related functions 314 */ 315 static int aac_open(dev_t *, int, int, cred_t *); 316 static int aac_close(dev_t, int, int, cred_t *); 317 static int aac_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 318 extern int aac_do_ioctl(struct aac_softstate *, dev_t, int, intptr_t, int); 319 320 /* 321 * FMA Prototypes 322 */ 323 static void aac_fm_init(struct aac_softstate *); 324 static void aac_fm_fini(struct aac_softstate *); 325 static int aac_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *); 326 int aac_check_acc_handle(ddi_acc_handle_t); 327 int aac_check_dma_handle(ddi_dma_handle_t); 328 void aac_fm_ereport(struct aac_softstate *, char *); 329 330 #ifdef DEBUG 331 /* 332 * UART debug output support 333 */ 334 335 #define AAC_PRINT_BUFFER_SIZE 512 336 #define AAC_PRINT_TIMEOUT 250 /* 1/4 sec. = 250 msec. */ 337 338 #define AAC_FW_DBG_STRLEN_OFFSET 0x00 339 #define AAC_FW_DBG_FLAGS_OFFSET 0x04 340 #define AAC_FW_DBG_BLED_OFFSET 0x08 341 342 static int aac_get_fw_debug_buffer(struct aac_softstate *); 343 static void aac_print_scmd(struct aac_softstate *, struct aac_cmd *); 344 static void aac_print_aif(struct aac_softstate *, struct aac_aif_command *); 345 346 static char aac_prt_buf[AAC_PRINT_BUFFER_SIZE]; 347 static char aac_fmt[] = " %s"; 348 static char aac_fmt_header[] = " %s.%d: %s"; 349 static kmutex_t aac_prt_mutex; 350 351 /* 352 * Debug flags to be put into the softstate flags field 353 * when initialized 354 */ 355 uint32_t aac_debug_flags = 356 /* AACDB_FLAGS_KERNEL_PRINT | */ 357 /* AACDB_FLAGS_FW_PRINT | */ 358 /* AACDB_FLAGS_MISC | */ 359 /* AACDB_FLAGS_FUNC1 | */ 360 /* AACDB_FLAGS_FUNC2 | */ 361 /* AACDB_FLAGS_SCMD | */ 362 /* AACDB_FLAGS_AIF | */ 363 /* AACDB_FLAGS_FIB | */ 364 /* AACDB_FLAGS_IOCTL | */ 365 0; 366 367 #endif /* DEBUG */ 368 369 static struct cb_ops aac_cb_ops = { 370 aac_open, /* open */ 371 aac_close, /* close */ 372 nodev, /* strategy */ 373 nodev, /* print */ 374 nodev, /* dump */ 375 nodev, /* read */ 376 nodev, /* write */ 377 aac_ioctl, /* ioctl */ 378 nodev, /* devmap */ 379 nodev, /* mmap */ 380 nodev, /* segmap */ 381 nochpoll, /* poll */ 382 ddi_prop_op, /* cb_prop_op */ 383 NULL, /* streamtab */ 384 D_64BIT | D_NEW | D_MP | D_HOTPLUG, /* cb_flag */ 385 CB_REV, /* cb_rev */ 386 nodev, /* async I/O read entry point */ 387 nodev /* async I/O write entry point */ 388 }; 389 390 static struct dev_ops aac_dev_ops = { 391 DEVO_REV, 392 0, 393 nodev, 394 nulldev, 395 nulldev, 396 aac_attach, 397 aac_detach, 398 aac_reset, 399 &aac_cb_ops, 400 NULL, 401 NULL 402 }; 403 404 static struct modldrv aac_modldrv = { 405 &mod_driverops, 406 "AAC Driver " AAC_DRIVER_VERSION, 407 &aac_dev_ops, 408 }; 409 410 static struct modlinkage aac_modlinkage = { 411 MODREV_1, 412 &aac_modldrv, 413 NULL 414 }; 415 416 static struct aac_softstate *aac_softstatep; 417 418 /* 419 * Supported card list 420 * ordered in vendor id, subvendor id, subdevice id, and device id 421 */ 422 static struct aac_card_type aac_cards[] = { 423 {0x1028, 0x1, 0x1028, 0x1, AAC_HWIF_I960RX, 424 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 425 "Dell", "PERC 3/Di"}, 426 {0x1028, 0x2, 0x1028, 0x2, AAC_HWIF_I960RX, 427 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 428 "Dell", "PERC 3/Di"}, 429 {0x1028, 0x3, 0x1028, 0x3, AAC_HWIF_I960RX, 430 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 431 "Dell", "PERC 3/Si"}, 432 {0x1028, 0x8, 0x1028, 0xcf, AAC_HWIF_I960RX, 433 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 434 "Dell", "PERC 3/Di"}, 435 {0x1028, 0x4, 0x1028, 0xd0, AAC_HWIF_I960RX, 436 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 437 "Dell", "PERC 3/Si"}, 438 {0x1028, 0x2, 0x1028, 0xd1, AAC_HWIF_I960RX, 439 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 440 "Dell", "PERC 3/Di"}, 441 {0x1028, 0x2, 0x1028, 0xd9, AAC_HWIF_I960RX, 442 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 443 "Dell", "PERC 3/Di"}, 444 {0x1028, 0xa, 0x1028, 0x106, AAC_HWIF_I960RX, 445 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 446 "Dell", "PERC 3/Di"}, 447 {0x1028, 0xa, 0x1028, 0x11b, AAC_HWIF_I960RX, 448 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 449 "Dell", "PERC 3/Di"}, 450 {0x1028, 0xa, 0x1028, 0x121, AAC_HWIF_I960RX, 451 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 452 "Dell", "PERC 3/Di"}, 453 {0x9005, 0x285, 0x1028, 0x287, AAC_HWIF_I960RX, 454 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 455 "Dell", "PERC 320/DC"}, 456 {0x9005, 0x285, 0x1028, 0x291, AAC_HWIF_I960RX, 457 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Dell", "CERC SR2"}, 458 459 {0x9005, 0x285, 0x1014, 0x2f2, AAC_HWIF_I960RX, 460 0, AAC_TYPE_SCSI, "IBM", "ServeRAID 8i"}, 461 {0x9005, 0x285, 0x1014, 0x34d, AAC_HWIF_I960RX, 462 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8s"}, 463 {0x9005, 0x286, 0x1014, 0x9580, AAC_HWIF_RKT, 464 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8k"}, 465 466 {0x9005, 0x285, 0x103c, 0x3227, AAC_HWIF_I960RX, 467 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"}, 468 {0x9005, 0x285, 0xe11, 0x295, AAC_HWIF_I960RX, 469 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"}, 470 471 {0x9005, 0x285, 0x9005, 0x285, AAC_HWIF_I960RX, 472 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 473 "Adaptec", "2200S"}, 474 {0x9005, 0x285, 0x9005, 0x286, AAC_HWIF_I960RX, 475 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 476 "Adaptec", "2120S"}, 477 {0x9005, 0x285, 0x9005, 0x287, AAC_HWIF_I960RX, 478 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 479 "Adaptec", "2200S"}, 480 {0x9005, 0x285, 0x9005, 0x288, AAC_HWIF_I960RX, 481 0, AAC_TYPE_SCSI, "Adaptec", "3230S"}, 482 {0x9005, 0x285, 0x9005, 0x289, AAC_HWIF_I960RX, 483 0, AAC_TYPE_SCSI, "Adaptec", "3240S"}, 484 {0x9005, 0x285, 0x9005, 0x28a, AAC_HWIF_I960RX, 485 0, AAC_TYPE_SCSI, "Adaptec", "2020ZCR"}, 486 {0x9005, 0x285, 0x9005, 0x28b, AAC_HWIF_I960RX, 487 0, AAC_TYPE_SCSI, "Adaptec", "2025ZCR"}, 488 {0x9005, 0x286, 0x9005, 0x28c, AAC_HWIF_RKT, 489 0, AAC_TYPE_SCSI, "Adaptec", "2230S"}, 490 {0x9005, 0x286, 0x9005, 0x28d, AAC_HWIF_RKT, 491 0, AAC_TYPE_SCSI, "Adaptec", "2130S"}, 492 {0x9005, 0x285, 0x9005, 0x28e, AAC_HWIF_I960RX, 493 0, AAC_TYPE_SATA, "Adaptec", "2020SA"}, 494 {0x9005, 0x285, 0x9005, 0x28f, AAC_HWIF_I960RX, 495 0, AAC_TYPE_SATA, "Adaptec", "2025SA"}, 496 {0x9005, 0x285, 0x9005, 0x290, AAC_HWIF_I960RX, 497 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2410SA"}, 498 {0x9005, 0x285, 0x9005, 0x292, AAC_HWIF_I960RX, 499 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2810SA"}, 500 {0x9005, 0x285, 0x9005, 0x293, AAC_HWIF_I960RX, 501 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "21610SA"}, 502 {0x9005, 0x285, 0x9005, 0x294, AAC_HWIF_I960RX, 503 0, AAC_TYPE_SATA, "Adaptec", "2026ZCR"}, 504 {0x9005, 0x285, 0x9005, 0x296, AAC_HWIF_I960RX, 505 0, AAC_TYPE_SCSI, "Adaptec", "2240S"}, 506 {0x9005, 0x285, 0x9005, 0x297, AAC_HWIF_I960RX, 507 0, AAC_TYPE_SAS, "Adaptec", "4005SAS"}, 508 {0x9005, 0x285, 0x9005, 0x298, AAC_HWIF_I960RX, 509 0, AAC_TYPE_SAS, "Adaptec", "RAID 4000"}, 510 {0x9005, 0x285, 0x9005, 0x299, AAC_HWIF_I960RX, 511 0, AAC_TYPE_SAS, "Adaptec", "4800SAS"}, 512 {0x9005, 0x285, 0x9005, 0x29a, AAC_HWIF_I960RX, 513 0, AAC_TYPE_SAS, "Adaptec", "4805SAS"}, 514 {0x9005, 0x286, 0x9005, 0x29b, AAC_HWIF_RKT, 515 0, AAC_TYPE_SATA, "Adaptec", "2820SA"}, 516 {0x9005, 0x286, 0x9005, 0x29c, AAC_HWIF_RKT, 517 0, AAC_TYPE_SATA, "Adaptec", "2620SA"}, 518 {0x9005, 0x286, 0x9005, 0x29d, AAC_HWIF_RKT, 519 0, AAC_TYPE_SATA, "Adaptec", "2420SA"}, 520 {0x9005, 0x286, 0x9005, 0x29e, AAC_HWIF_RKT, 521 0, AAC_TYPE_SATA, "ICP", "9024RO"}, 522 {0x9005, 0x286, 0x9005, 0x29f, AAC_HWIF_RKT, 523 0, AAC_TYPE_SATA, "ICP", "9014RO"}, 524 {0x9005, 0x286, 0x9005, 0x2a0, AAC_HWIF_RKT, 525 0, AAC_TYPE_SATA, "ICP", "9047MA"}, 526 {0x9005, 0x286, 0x9005, 0x2a1, AAC_HWIF_RKT, 527 0, AAC_TYPE_SATA, "ICP", "9087MA"}, 528 {0x9005, 0x285, 0x9005, 0x2a4, AAC_HWIF_I960RX, 529 0, AAC_TYPE_SAS, "ICP", "9085LI"}, 530 {0x9005, 0x285, 0x9005, 0x2a5, AAC_HWIF_I960RX, 531 0, AAC_TYPE_SAS, "ICP", "5085BR"}, 532 {0x9005, 0x286, 0x9005, 0x2a6, AAC_HWIF_RKT, 533 0, AAC_TYPE_SATA, "ICP", "9067MA"}, 534 {0x9005, 0x285, 0x9005, 0x2b5, AAC_HWIF_I960RX, 535 0, AAC_TYPE_SAS, "Adaptec", "RAID 5445"}, 536 {0x9005, 0x285, 0x9005, 0x2b6, AAC_HWIF_I960RX, 537 0, AAC_TYPE_SAS, "Adaptec", "RAID 5805"}, 538 {0x9005, 0x285, 0x9005, 0x2b7, AAC_HWIF_I960RX, 539 0, AAC_TYPE_SAS, "Adaptec", "RAID 5085"}, 540 {0x9005, 0x285, 0x9005, 0x2b8, AAC_HWIF_I960RX, 541 0, AAC_TYPE_SAS, "ICP", "RAID ICP5445SL"}, 542 {0x9005, 0x285, 0x9005, 0x2b9, AAC_HWIF_I960RX, 543 0, AAC_TYPE_SAS, "ICP", "RAID ICP5085SL"}, 544 {0x9005, 0x285, 0x9005, 0x2ba, AAC_HWIF_I960RX, 545 0, AAC_TYPE_SAS, "ICP", "RAID ICP5805SL"}, 546 547 {0, 0, 0, 0, AAC_HWIF_UNKNOWN, 548 0, AAC_TYPE_UNKNOWN, "Unknown", "AAC card"}, 549 }; 550 551 /* 552 * Hardware access functions for i960 based cards 553 */ 554 static struct aac_interface aac_rx_interface = { 555 aac_rx_get_fwstatus, 556 aac_rx_get_mailbox, 557 aac_rx_set_mailbox 558 }; 559 560 /* 561 * Hardware access functions for Rocket based cards 562 */ 563 static struct aac_interface aac_rkt_interface = { 564 aac_rkt_get_fwstatus, 565 aac_rkt_get_mailbox, 566 aac_rkt_set_mailbox 567 }; 568 569 ddi_device_acc_attr_t aac_acc_attr = { 570 DDI_DEVICE_ATTR_V0, 571 DDI_STRUCTURE_LE_ACC, 572 DDI_STRICTORDER_ACC, 573 DDI_FLAGERR_ACC 574 }; 575 576 static struct { 577 int size; 578 int notify; 579 } aac_qinfo[] = { 580 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL}, 581 {AAC_HOST_HIGH_CMD_ENTRIES, 0}, 582 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY}, 583 {AAC_ADAP_HIGH_CMD_ENTRIES, 0}, 584 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL}, 585 {AAC_HOST_HIGH_RESP_ENTRIES, 0}, 586 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY}, 587 {AAC_ADAP_HIGH_RESP_ENTRIES, 0} 588 }; 589 590 /* 591 * Default aac dma attributes 592 */ 593 static ddi_dma_attr_t aac_dma_attr = { 594 DMA_ATTR_V0, 595 0, /* lowest usable address */ 596 0xffffffffull, /* high DMA address range */ 597 0xffffffffull, /* DMA counter register */ 598 AAC_DMA_ALIGN, /* DMA address alignment */ 599 1, /* DMA burstsizes */ 600 1, /* min effective DMA size */ 601 0xffffffffull, /* max DMA xfer size */ 602 0xffffffffull, /* segment boundary */ 603 1, /* s/g list length */ 604 AAC_BLK_SIZE, /* granularity of device */ 605 DDI_DMA_FLAGERR /* DMA transfer flags */ 606 }; 607 608 static int aac_tick = AAC_DEFAULT_TICK; /* tick for the internal timer */ 609 static uint32_t aac_timebase = 0; /* internal timer in seconds */ 610 static uint32_t aac_sync_time = 0; /* next time to sync. with firmware */ 611 612 /* 613 * Warlock directives 614 * 615 * Different variables with the same types have to be protected by the 616 * same mutex; otherwise, warlock will complain with "variables don't 617 * seem to be protected consistently". For example, 618 * aac_softstate::{q_wait, q_comp} are type of aac_cmd_queue, and protected 619 * by aac_softstate::{io_lock, q_comp_mutex} respectively. We have to 620 * declare them as protected explictly at aac_cmd_dequeue(). 621 */ 622 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt scsi_cdb scsi_status \ 623 scsi_arq_status scsi_descr_sense_hdr scsi_information_sense_descr \ 624 mode_format mode_geometry mode_header aac_cmd)) 625 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_cmd", aac_fib ddi_dma_cookie_t \ 626 aac_sge)) 627 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_fib", aac_blockread aac_blockwrite \ 628 aac_blockread64 aac_raw_io aac_sg_entry aac_sg_entry64 aac_sg_entryraw \ 629 aac_sg_table aac_srb)) 630 _NOTE(SCHEME_PROTECTS_DATA("unique to sync fib and cdb", scsi_inquiry)) 631 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address)) 632 633 int 634 _init(void) 635 { 636 int rval = 0; 637 638 #ifdef DEBUG 639 mutex_init(&aac_prt_mutex, NULL, MUTEX_DRIVER, NULL); 640 #endif 641 DBCALLED(NULL, 1); 642 643 if ((rval = ddi_soft_state_init((void *)&aac_softstatep, 644 sizeof (struct aac_softstate), 0)) != 0) 645 goto error; 646 647 if ((rval = scsi_hba_init(&aac_modlinkage)) != 0) { 648 ddi_soft_state_fini((void *)&aac_softstatep); 649 goto error; 650 } 651 652 if ((rval = mod_install(&aac_modlinkage)) != 0) { 653 ddi_soft_state_fini((void *)&aac_softstatep); 654 scsi_hba_fini(&aac_modlinkage); 655 goto error; 656 } 657 return (rval); 658 659 error: 660 AACDB_PRINT(NULL, CE_WARN, "Mod init error!"); 661 #ifdef DEBUG 662 mutex_destroy(&aac_prt_mutex); 663 #endif 664 return (rval); 665 } 666 667 int 668 _info(struct modinfo *modinfop) 669 { 670 DBCALLED(NULL, 1); 671 return (mod_info(&aac_modlinkage, modinfop)); 672 } 673 674 /* 675 * An HBA driver cannot be unload unless you reboot, 676 * so this function will be of no use. 677 */ 678 int 679 _fini(void) 680 { 681 int rval; 682 683 DBCALLED(NULL, 1); 684 685 if ((rval = mod_remove(&aac_modlinkage)) != 0) 686 goto error; 687 688 scsi_hba_fini(&aac_modlinkage); 689 ddi_soft_state_fini((void *)&aac_softstatep); 690 #ifdef DEBUG 691 mutex_destroy(&aac_prt_mutex); 692 #endif 693 return (0); 694 695 error: 696 AACDB_PRINT(NULL, CE_WARN, "AAC is busy, cannot unload!"); 697 return (rval); 698 } 699 700 static int 701 aac_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 702 { 703 int instance, i; 704 struct aac_softstate *softs = NULL; 705 int attach_state = 0; 706 int intr_types; 707 708 DBCALLED(NULL, 1); 709 710 switch (cmd) { 711 case DDI_ATTACH: 712 break; 713 case DDI_RESUME: 714 return (DDI_FAILURE); 715 default: 716 return (DDI_FAILURE); 717 } 718 719 instance = ddi_get_instance(dip); 720 721 /* Get soft state */ 722 if (ddi_soft_state_zalloc(aac_softstatep, instance) != DDI_SUCCESS) { 723 AACDB_PRINT(softs, CE_WARN, "Cannot alloc soft state"); 724 goto error; 725 } 726 softs = ddi_get_soft_state(aac_softstatep, instance); 727 attach_state |= AAC_ATTACH_SOFTSTATE_ALLOCED; 728 729 softs->instance = instance; 730 softs->devinfo_p = dip; 731 softs->buf_dma_attr = softs->addr_dma_attr = aac_dma_attr; 732 softs->addr_dma_attr.dma_attr_granular = 1; 733 softs->card = AAC_UNKNOWN_CARD; 734 #ifdef DEBUG 735 softs->debug_flags = aac_debug_flags; 736 #endif 737 738 /* Check the card type */ 739 if (aac_check_card_type(softs) == AACERR) { 740 AACDB_PRINT(softs, CE_WARN, "Card not supported"); 741 goto error; 742 } 743 /* We have found the right card and everything is OK */ 744 attach_state |= AAC_ATTACH_CARD_DETECTED; 745 746 /* 747 * Initialize FMA 748 */ 749 softs->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, softs->devinfo_p, 750 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 751 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 752 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 753 754 aac_fm_init(softs); 755 756 /* Map PCI mem space */ 757 if (ddi_regs_map_setup(dip, 1, 758 (caddr_t *)&softs->pci_mem_base_vaddr, 0, 759 softs->map_size_min, &aac_acc_attr, 760 &softs->pci_mem_handle) != DDI_SUCCESS) 761 goto error; 762 763 softs->map_size = softs->map_size_min; 764 attach_state |= AAC_ATTACH_PCI_MEM_MAPPED; 765 766 AAC_DISABLE_INTR(softs); 767 768 /* Get the type of device intrrupts */ 769 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) { 770 AACDB_PRINT(softs, CE_WARN, 771 "ddi_intr_get_supported_types() failed"); 772 goto error; 773 } 774 AACDB_PRINT(softs, CE_NOTE, 775 "ddi_intr_get_supported_types() ret: 0x%x", intr_types); 776 777 /* Query interrupt, and alloc/init all needed struct */ 778 if (intr_types & DDI_INTR_TYPE_MSI) { 779 if (aac_query_intrs(softs, DDI_INTR_TYPE_MSI) 780 != DDI_SUCCESS) { 781 AACDB_PRINT(softs, CE_WARN, 782 "MSI interrupt query failed"); 783 goto error; 784 } 785 softs->intr_type = DDI_INTR_TYPE_MSI; 786 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 787 if (aac_query_intrs(softs, DDI_INTR_TYPE_FIXED) 788 != DDI_SUCCESS) { 789 AACDB_PRINT(softs, CE_WARN, 790 "FIXED interrupt query failed"); 791 goto error; 792 } 793 softs->intr_type = DDI_INTR_TYPE_FIXED; 794 } else { 795 AACDB_PRINT(softs, CE_WARN, 796 "Device cannot suppport both FIXED and MSI interrupts"); 797 goto error; 798 } 799 800 /* Init mutexes */ 801 mutex_init(&softs->q_comp_mutex, NULL, 802 MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri)); 803 cv_init(&softs->event, NULL, CV_DRIVER, NULL); 804 mutex_init(&softs->aifq_mutex, NULL, 805 MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri)); 806 cv_init(&softs->aifv, NULL, CV_DRIVER, NULL); 807 cv_init(&softs->drain_cv, NULL, CV_DRIVER, NULL); 808 mutex_init(&softs->io_lock, NULL, MUTEX_DRIVER, 809 DDI_INTR_PRI(softs->intr_pri)); 810 attach_state |= AAC_ATTACH_KMUTEX_INITED; 811 812 /* 813 * Everything has been set up till now, 814 * we will do some common attach. 815 */ 816 if (aac_common_attach(softs) == AACERR) 817 goto error; 818 attach_state |= AAC_ATTACH_COMM_SPACE_SETUP; 819 820 /* Init the cmd queues */ 821 for (i = 0; i < AAC_CMDQ_NUM; i++) 822 aac_cmd_initq(&softs->q_wait[i]); 823 aac_cmd_initq(&softs->q_busy); 824 aac_cmd_initq(&softs->q_comp); 825 826 if (aac_hba_setup(softs) != AACOK) 827 goto error; 828 attach_state |= AAC_ATTACH_SCSI_TRAN_SETUP; 829 830 /* Connect interrupt handlers */ 831 if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &softs->softint_id, 832 NULL, NULL, aac_softintr, (caddr_t)softs) != DDI_SUCCESS) { 833 AACDB_PRINT(softs, CE_WARN, 834 "Can not setup soft interrupt handler!"); 835 goto error; 836 } 837 attach_state |= AAC_ATTACH_SOFT_INTR_SETUP; 838 839 if (aac_add_intrs(softs) != DDI_SUCCESS) { 840 AACDB_PRINT(softs, CE_WARN, 841 "Interrupt registration failed, intr type: %s", 842 softs->intr_type == DDI_INTR_TYPE_MSI ? "MSI" : "FIXED"); 843 goto error; 844 } 845 attach_state |= AAC_ATTACH_HARD_INTR_SETUP; 846 847 /* Create devctl/scsi nodes for cfgadm */ 848 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 849 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) { 850 AACDB_PRINT(softs, CE_WARN, "failed to create devctl node"); 851 goto error; 852 } 853 attach_state |= AAC_ATTACH_CREATE_DEVCTL; 854 855 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, INST2SCSI(instance), 856 DDI_NT_SCSI_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 857 AACDB_PRINT(softs, CE_WARN, "failed to create scsi node"); 858 goto error; 859 } 860 attach_state |= AAC_ATTACH_CREATE_SCSI; 861 862 /* Create aac node for app. to issue ioctls */ 863 if (ddi_create_minor_node(dip, "aac", S_IFCHR, INST2AAC(instance), 864 DDI_PSEUDO, 0) != DDI_SUCCESS) { 865 AACDB_PRINT(softs, CE_WARN, "failed to create aac node"); 866 goto error; 867 } 868 869 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 870 softs->state = AAC_STATE_RUN; 871 872 /* Create a thread for command timeout */ 873 softs->timeout_id = timeout(aac_daemon, (void *)softs, 874 (60 * drv_usectohz(1000000))); 875 876 /* Common attach is OK, so we are attached! */ 877 AAC_ENABLE_INTR(softs); 878 ddi_report_dev(dip); 879 AACDB_PRINT(softs, CE_NOTE, "aac attached ok"); 880 return (DDI_SUCCESS); 881 882 error: 883 if (attach_state & AAC_ATTACH_CREATE_SCSI) 884 ddi_remove_minor_node(dip, "scsi"); 885 if (attach_state & AAC_ATTACH_CREATE_DEVCTL) 886 ddi_remove_minor_node(dip, "devctl"); 887 if (attach_state & AAC_ATTACH_COMM_SPACE_SETUP) 888 aac_common_detach(softs); 889 if (attach_state & AAC_ATTACH_SCSI_TRAN_SETUP) { 890 (void) scsi_hba_detach(dip); 891 scsi_hba_tran_free(AAC_DIP2TRAN(dip)); 892 } 893 if (attach_state & AAC_ATTACH_HARD_INTR_SETUP) 894 aac_remove_intrs(softs); 895 if (attach_state & AAC_ATTACH_SOFT_INTR_SETUP) 896 ddi_remove_softintr(softs->softint_id); 897 if (attach_state & AAC_ATTACH_KMUTEX_INITED) { 898 mutex_destroy(&softs->q_comp_mutex); 899 cv_destroy(&softs->event); 900 mutex_destroy(&softs->aifq_mutex); 901 cv_destroy(&softs->aifv); 902 cv_destroy(&softs->drain_cv); 903 mutex_destroy(&softs->io_lock); 904 } 905 if (attach_state & AAC_ATTACH_PCI_MEM_MAPPED) 906 ddi_regs_map_free(&softs->pci_mem_handle); 907 aac_fm_fini(softs); 908 if (attach_state & AAC_ATTACH_CARD_DETECTED) 909 softs->card = AACERR; 910 if (attach_state & AAC_ATTACH_SOFTSTATE_ALLOCED) 911 ddi_soft_state_free(aac_softstatep, instance); 912 return (DDI_FAILURE); 913 } 914 915 static int 916 aac_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 917 { 918 scsi_hba_tran_t *tran = AAC_DIP2TRAN(dip); 919 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran); 920 921 DBCALLED(softs, 1); 922 923 switch (cmd) { 924 case DDI_DETACH: 925 break; 926 case DDI_SUSPEND: 927 return (DDI_FAILURE); 928 default: 929 return (DDI_FAILURE); 930 } 931 932 mutex_enter(&softs->io_lock); 933 AAC_DISABLE_INTR(softs); 934 softs->state = AAC_STATE_STOPPED; 935 936 mutex_exit(&softs->io_lock); 937 (void) untimeout(softs->timeout_id); 938 mutex_enter(&softs->io_lock); 939 softs->timeout_id = 0; 940 941 ddi_remove_minor_node(dip, "aac"); 942 ddi_remove_minor_node(dip, "scsi"); 943 ddi_remove_minor_node(dip, "devctl"); 944 945 mutex_exit(&softs->io_lock); 946 aac_remove_intrs(softs); 947 ddi_remove_softintr(softs->softint_id); 948 949 aac_common_detach(softs); 950 951 (void) scsi_hba_detach(dip); 952 scsi_hba_tran_free(tran); 953 954 mutex_destroy(&softs->q_comp_mutex); 955 cv_destroy(&softs->event); 956 mutex_destroy(&softs->aifq_mutex); 957 cv_destroy(&softs->aifv); 958 cv_destroy(&softs->drain_cv); 959 mutex_destroy(&softs->io_lock); 960 961 ddi_regs_map_free(&softs->pci_mem_handle); 962 aac_fm_fini(softs); 963 softs->hwif = AAC_HWIF_UNKNOWN; 964 softs->card = AAC_UNKNOWN_CARD; 965 ddi_soft_state_free(aac_softstatep, ddi_get_instance(dip)); 966 967 return (DDI_SUCCESS); 968 } 969 970 /*ARGSUSED*/ 971 static int 972 aac_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) 973 { 974 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 975 976 DBCALLED(softs, 1); 977 978 mutex_enter(&softs->io_lock); 979 (void) aac_shutdown(softs); 980 mutex_exit(&softs->io_lock); 981 982 return (DDI_SUCCESS); 983 } 984 985 /* 986 * Bring the controller down to a dormant state and detach all child devices. 987 * This function is called before detach or system shutdown. 988 * Note: we can assume that the q_wait on the controller is empty, as we 989 * won't allow shutdown if any device is open. 990 */ 991 static int 992 aac_shutdown(struct aac_softstate *softs) 993 { 994 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 995 struct aac_close_command *cc = (struct aac_close_command *) \ 996 &softs->sync_slot.fibp->data[0]; 997 int rval; 998 999 ddi_put32(acc, &cc->Command, VM_CloseAll); 1000 ddi_put32(acc, &cc->ContainerId, 0xfffffffful); 1001 1002 /* Flush all caches, set FW to write through mode */ 1003 rval = aac_sync_fib(softs, ContainerCommand, 1004 AAC_FIB_SIZEOF(struct aac_close_command)); 1005 1006 AACDB_PRINT(softs, CE_NOTE, 1007 "shutting down aac %s", (rval == AACOK) ? "ok" : "fail"); 1008 return (rval); 1009 } 1010 1011 static uint_t 1012 aac_softintr(caddr_t arg) 1013 { 1014 struct aac_softstate *softs = (void *)arg; 1015 1016 if (!AAC_IS_Q_EMPTY(&softs->q_comp)) { 1017 aac_drain_comp_q(softs); 1018 return (DDI_INTR_CLAIMED); 1019 } else { 1020 return (DDI_INTR_UNCLAIMED); 1021 } 1022 } 1023 1024 /* 1025 * Setup auto sense data for pkt 1026 */ 1027 static void 1028 aac_set_arq_data(struct scsi_pkt *pkt, uchar_t key, 1029 uchar_t add_code, uchar_t qual_code, uint64_t info) 1030 { 1031 struct scsi_arq_status *arqstat; 1032 1033 pkt->pkt_state |= STATE_GOT_STATUS | STATE_ARQ_DONE; 1034 1035 arqstat = (void *)(pkt->pkt_scbp); 1036 arqstat->sts_status.sts_chk = 1; /* CHECK CONDITION */ 1037 arqstat->sts_rqpkt_reason = CMD_CMPLT; 1038 arqstat->sts_rqpkt_resid = 0; 1039 arqstat->sts_rqpkt_state = 1040 STATE_GOT_BUS | 1041 STATE_GOT_TARGET | 1042 STATE_SENT_CMD | 1043 STATE_XFERRED_DATA; 1044 arqstat->sts_rqpkt_statistics = 0; 1045 1046 if (info <= 0xfffffffful) { 1047 arqstat->sts_sensedata.es_valid = 1; 1048 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE; 1049 arqstat->sts_sensedata.es_code = CODE_FMT_FIXED_CURRENT; 1050 arqstat->sts_sensedata.es_key = key; 1051 arqstat->sts_sensedata.es_add_code = add_code; 1052 arqstat->sts_sensedata.es_qual_code = qual_code; 1053 1054 arqstat->sts_sensedata.es_info_1 = (info >> 24) & 0xFF; 1055 arqstat->sts_sensedata.es_info_2 = (info >> 16) & 0xFF; 1056 arqstat->sts_sensedata.es_info_3 = (info >> 8) & 0xFF; 1057 arqstat->sts_sensedata.es_info_4 = info & 0xFF; 1058 } else { /* 64-bit LBA */ 1059 struct scsi_descr_sense_hdr *dsp; 1060 struct scsi_information_sense_descr *isd; 1061 1062 dsp = (struct scsi_descr_sense_hdr *)&arqstat->sts_sensedata; 1063 dsp->ds_class = CLASS_EXTENDED_SENSE; 1064 dsp->ds_code = CODE_FMT_DESCR_CURRENT; 1065 dsp->ds_key = key; 1066 dsp->ds_add_code = add_code; 1067 dsp->ds_qual_code = qual_code; 1068 dsp->ds_addl_sense_length = 1069 sizeof (struct scsi_information_sense_descr); 1070 1071 isd = (struct scsi_information_sense_descr *)(dsp+1); 1072 isd->isd_descr_type = DESCR_INFORMATION; 1073 isd->isd_valid = 1; 1074 isd->isd_information[0] = (info >> 56) & 0xFF; 1075 isd->isd_information[1] = (info >> 48) & 0xFF; 1076 isd->isd_information[2] = (info >> 40) & 0xFF; 1077 isd->isd_information[3] = (info >> 32) & 0xFF; 1078 isd->isd_information[4] = (info >> 24) & 0xFF; 1079 isd->isd_information[5] = (info >> 16) & 0xFF; 1080 isd->isd_information[6] = (info >> 8) & 0xFF; 1081 isd->isd_information[7] = (info) & 0xFF; 1082 } 1083 } 1084 1085 /* 1086 * Setup auto sense data for HARDWARE ERROR 1087 */ 1088 static void 1089 aac_set_arq_data_hwerr(struct aac_cmd *acp) 1090 { 1091 union scsi_cdb *cdbp; 1092 uint64_t err_blkno; 1093 1094 cdbp = (void *)acp->pkt->pkt_cdbp; 1095 err_blkno = AAC_GETGXADDR(acp->cmdlen, cdbp); 1096 aac_set_arq_data(acp->pkt, KEY_HARDWARE_ERROR, 0x00, 0x00, err_blkno); 1097 } 1098 1099 /* 1100 * Setup auto sense data for UNIT ATTENTION 1101 */ 1102 /*ARGSUSED*/ 1103 static void 1104 aac_set_arq_data_reset(struct aac_softstate *softs, struct aac_cmd *acp) 1105 { 1106 struct aac_container *dvp = acp->dvp; 1107 1108 if (dvp->reset) { 1109 dvp->reset = 0; 1110 aac_set_arq_data(acp->pkt, KEY_UNIT_ATTENTION, 0x29, 0x02, 0); 1111 } 1112 } 1113 1114 /* 1115 * Send a command to the adapter in New Comm. interface 1116 */ 1117 static int 1118 aac_send_command(struct aac_softstate *softs, struct aac_slot *slotp) 1119 { 1120 uint32_t index, device; 1121 1122 index = PCI_MEM_GET32(softs, AAC_IQUE); 1123 if (index == 0xffffffffUL) { 1124 index = PCI_MEM_GET32(softs, AAC_IQUE); 1125 if (index == 0xffffffffUL) 1126 return (AACERR); 1127 } 1128 1129 device = index; 1130 PCI_MEM_PUT32(softs, device, 1131 (uint32_t)(slotp->fib_phyaddr & 0xfffffffful)); 1132 device += 4; 1133 PCI_MEM_PUT32(softs, device, (uint32_t)(slotp->fib_phyaddr >> 32)); 1134 device += 4; 1135 PCI_MEM_PUT32(softs, device, slotp->acp->fib_size); 1136 PCI_MEM_PUT32(softs, AAC_IQUE, index); 1137 return (AACOK); 1138 } 1139 1140 static void 1141 aac_end_io(struct aac_softstate *softs, struct aac_cmd *acp) 1142 { 1143 struct aac_container *dvp = acp->dvp; 1144 int q = AAC_CMDQ(acp); 1145 1146 if (acp->slotp) { /* outstanding cmd */ 1147 aac_release_slot(softs, acp->slotp); 1148 acp->slotp = NULL; 1149 if (dvp) { 1150 dvp->ncmds[q]--; 1151 if (dvp->throttle[q] == AAC_THROTTLE_DRAIN && 1152 dvp->ncmds[q] == 0 && q == AAC_CMDQ_ASYNC) 1153 aac_set_throttle(softs, dvp, q, 1154 softs->total_slots); 1155 } 1156 softs->bus_ncmds[q]--; 1157 (void) aac_cmd_delete(&softs->q_busy, acp); 1158 } else { /* cmd in waiting queue */ 1159 aac_cmd_delete(&softs->q_wait[q], acp); 1160 } 1161 1162 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) { /* async IO */ 1163 mutex_enter(&softs->q_comp_mutex); 1164 aac_cmd_enqueue(&softs->q_comp, acp); 1165 mutex_exit(&softs->q_comp_mutex); 1166 } else if (acp->flags & AAC_CMD_NO_CB) { /* sync IO */ 1167 cv_broadcast(&softs->event); 1168 } 1169 } 1170 1171 static void 1172 aac_handle_io(struct aac_softstate *softs, int index) 1173 { 1174 struct aac_slot *slotp; 1175 struct aac_cmd *acp; 1176 uint32_t fast; 1177 1178 fast = index & AAC_SENDERADDR_MASK_FAST_RESPONSE; 1179 index >>= 2; 1180 1181 /* Make sure firmware reported index is valid */ 1182 ASSERT(index >= 0 && index < softs->total_slots); 1183 slotp = &softs->io_slot[index]; 1184 ASSERT(slotp->index == index); 1185 acp = slotp->acp; 1186 ASSERT(acp != NULL && acp->slotp == slotp); 1187 1188 acp->flags |= AAC_CMD_CMPLT; 1189 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU); 1190 1191 if (aac_check_dma_handle(slotp->fib_dma_handle) == DDI_SUCCESS) { 1192 /* 1193 * For fast response IO, the firmware do not return any FIB 1194 * data, so we need to fill in the FIB status and state so that 1195 * FIB users can handle it correctly. 1196 */ 1197 if (fast) { 1198 uint32_t state; 1199 1200 state = ddi_get32(slotp->fib_acc_handle, 1201 &slotp->fibp->Header.XferState); 1202 /* 1203 * Update state for CPU not for device, no DMA sync 1204 * needed 1205 */ 1206 ddi_put32(slotp->fib_acc_handle, 1207 &slotp->fibp->Header.XferState, 1208 state | AAC_FIBSTATE_DONEADAP); 1209 ddi_put32(slotp->fib_acc_handle, 1210 (void *)&slotp->fibp->data[0], ST_OK); 1211 } 1212 1213 /* Handle completed ac */ 1214 acp->ac_comp(softs, acp); 1215 } else { 1216 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1217 acp->flags |= AAC_CMD_ERR; 1218 if (acp->pkt) { 1219 acp->pkt->pkt_reason = CMD_TRAN_ERR; 1220 acp->pkt->pkt_statistics = 0; 1221 } 1222 } 1223 aac_end_io(softs, acp); 1224 } 1225 1226 /* 1227 * Interrupt handler for New Comm. interface 1228 * New Comm. interface use a different mechanism for interrupt. No explict 1229 * message queues, and driver need only accesses the mapped PCI mem space to 1230 * find the completed FIB or AIF. 1231 */ 1232 static int 1233 aac_process_intr_new(struct aac_softstate *softs) 1234 { 1235 uint32_t index; 1236 1237 index = AAC_OUTB_GET(softs); 1238 if (index == 0xfffffffful) 1239 index = AAC_OUTB_GET(softs); 1240 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 1241 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1242 return (DDI_INTR_UNCLAIMED); 1243 } 1244 if (index != 0xfffffffful) { 1245 do { 1246 if ((index & AAC_SENDERADDR_MASK_AIF) == 0) { 1247 aac_handle_io(softs, index); 1248 } else if (index != 0xfffffffeul) { 1249 struct aac_fib *fibp; /* FIB in AIF queue */ 1250 uint16_t fib_size, fib_size0; 1251 1252 /* 1253 * 0xfffffffe means that the controller wants 1254 * more work, ignore it for now. Otherwise, 1255 * AIF received. 1256 */ 1257 index &= ~2; 1258 1259 mutex_enter(&softs->aifq_mutex); 1260 /* 1261 * Copy AIF from adapter to the empty AIF slot 1262 */ 1263 fibp = &softs->aifq[softs->aifq_idx].d; 1264 fib_size0 = PCI_MEM_GET16(softs, index + \ 1265 offsetof(struct aac_fib, Header.Size)); 1266 fib_size = (fib_size0 > AAC_FIB_SIZE) ? 1267 AAC_FIB_SIZE : fib_size0; 1268 PCI_MEM_REP_GET8(softs, index, fibp, 1269 fib_size); 1270 1271 if (aac_check_acc_handle(softs-> \ 1272 pci_mem_handle) == DDI_SUCCESS) 1273 (void) aac_handle_aif(softs, fibp); 1274 else 1275 ddi_fm_service_impact(softs->devinfo_p, 1276 DDI_SERVICE_UNAFFECTED); 1277 mutex_exit(&softs->aifq_mutex); 1278 1279 /* 1280 * AIF memory is owned by the adapter, so let it 1281 * know that we are done with it. 1282 */ 1283 AAC_OUTB_SET(softs, index); 1284 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY); 1285 } 1286 1287 index = AAC_OUTB_GET(softs); 1288 } while (index != 0xfffffffful); 1289 1290 /* 1291 * Process waiting cmds before start new ones to 1292 * ensure first IOs are serviced first. 1293 */ 1294 aac_start_waiting_io(softs); 1295 return (AAC_DB_COMMAND_READY); 1296 } else { 1297 return (0); 1298 } 1299 } 1300 1301 static uint_t 1302 aac_intr_new(caddr_t arg) 1303 { 1304 struct aac_softstate *softs = (void *)arg; 1305 uint_t rval; 1306 1307 mutex_enter(&softs->io_lock); 1308 if (aac_process_intr_new(softs)) 1309 rval = DDI_INTR_CLAIMED; 1310 else 1311 rval = DDI_INTR_UNCLAIMED; 1312 mutex_exit(&softs->io_lock); 1313 1314 aac_drain_comp_q(softs); 1315 return (rval); 1316 } 1317 1318 /* 1319 * Interrupt handler for old interface 1320 * Explicit message queues are used to send FIB to and get completed FIB from 1321 * the adapter. Driver and adapter maitain the queues in the producer/consumer 1322 * manner. The driver has to query the queues to find the completed FIB. 1323 */ 1324 static int 1325 aac_process_intr_old(struct aac_softstate *softs) 1326 { 1327 uint16_t status; 1328 1329 status = AAC_STATUS_GET(softs); 1330 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 1331 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1332 return (DDI_INTR_UNCLAIMED); 1333 } 1334 if (status & AAC_DB_RESPONSE_READY) { 1335 int slot_idx; 1336 1337 /* ACK the intr */ 1338 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY); 1339 (void) AAC_STATUS_GET(softs); 1340 while (aac_fib_dequeue(softs, AAC_HOST_NORM_RESP_Q, 1341 &slot_idx) == AACOK) 1342 aac_handle_io(softs, slot_idx); 1343 1344 /* 1345 * Process waiting cmds before start new ones to 1346 * ensure first IOs are serviced first. 1347 */ 1348 aac_start_waiting_io(softs); 1349 return (AAC_DB_RESPONSE_READY); 1350 } else if (status & AAC_DB_COMMAND_READY) { 1351 int aif_idx; 1352 1353 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_READY); 1354 (void) AAC_STATUS_GET(softs); 1355 if (aac_fib_dequeue(softs, AAC_HOST_NORM_CMD_Q, &aif_idx) == 1356 AACOK) { 1357 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 1358 struct aac_fib *fibp; /* FIB in AIF queue */ 1359 struct aac_fib *fibp0; /* FIB in communication space */ 1360 uint16_t fib_size, fib_size0; 1361 uint32_t fib_xfer_state; 1362 uint32_t addr, size; 1363 1364 ASSERT((aif_idx >= 0) && (aif_idx < AAC_ADAPTER_FIBS)); 1365 1366 #define AAC_SYNC_AIF(softs, aif_idx, type) \ 1367 { (void) ddi_dma_sync((softs)->comm_space_dma_handle, \ 1368 offsetof(struct aac_comm_space, \ 1369 adapter_fibs[(aif_idx)]), AAC_FIB_SIZE, \ 1370 (type)); } 1371 1372 mutex_enter(&softs->aifq_mutex); 1373 /* Copy AIF from adapter to the empty AIF slot */ 1374 fibp = &softs->aifq[softs->aifq_idx].d; 1375 AAC_SYNC_AIF(softs, aif_idx, DDI_DMA_SYNC_FORCPU); 1376 fibp0 = &softs->comm_space->adapter_fibs[aif_idx]; 1377 fib_size0 = ddi_get16(acc, &fibp0->Header.Size); 1378 fib_size = (fib_size0 > AAC_FIB_SIZE) ? 1379 AAC_FIB_SIZE : fib_size0; 1380 ddi_rep_get8(acc, (uint8_t *)fibp, (uint8_t *)fibp0, 1381 fib_size, DDI_DEV_AUTOINCR); 1382 1383 (void) aac_handle_aif(softs, fibp); 1384 mutex_exit(&softs->aifq_mutex); 1385 1386 /* Complete AIF back to adapter with good status */ 1387 fib_xfer_state = LE_32(fibp->Header.XferState); 1388 if (fib_xfer_state & AAC_FIBSTATE_FROMADAP) { 1389 ddi_put32(acc, &fibp0->Header.XferState, 1390 fib_xfer_state | AAC_FIBSTATE_DONEHOST); 1391 ddi_put32(acc, (void *)&fibp0->data[0], 1392 ST_OK); 1393 if (fib_size0 > AAC_FIB_SIZE) 1394 ddi_put16(acc, &fibp0->Header.Size, 1395 AAC_FIB_SIZE); 1396 AAC_SYNC_AIF(softs, aif_idx, 1397 DDI_DMA_SYNC_FORDEV); 1398 } 1399 1400 /* Put the AIF response on the response queue */ 1401 addr = ddi_get32(acc, 1402 &softs->comm_space->adapter_fibs[aif_idx]. \ 1403 Header.SenderFibAddress); 1404 size = (uint32_t)ddi_get16(acc, 1405 &softs->comm_space->adapter_fibs[aif_idx]. \ 1406 Header.Size); 1407 ddi_put32(acc, 1408 &softs->comm_space->adapter_fibs[aif_idx]. \ 1409 Header.ReceiverFibAddress, addr); 1410 if (aac_fib_enqueue(softs, AAC_ADAP_NORM_RESP_Q, 1411 addr, size) == AACERR) 1412 cmn_err(CE_NOTE, "!AIF ack failed"); 1413 } 1414 return (AAC_DB_COMMAND_READY); 1415 } else if (status & AAC_DB_PRINTF_READY) { 1416 /* ACK the intr */ 1417 AAC_STATUS_CLR(softs, AAC_DB_PRINTF_READY); 1418 (void) AAC_STATUS_GET(softs); 1419 (void) ddi_dma_sync(softs->comm_space_dma_handle, 1420 offsetof(struct aac_comm_space, adapter_print_buf), 1421 AAC_ADAPTER_PRINT_BUFSIZE, DDI_DMA_SYNC_FORCPU); 1422 if (aac_check_dma_handle(softs->comm_space_dma_handle) == 1423 DDI_SUCCESS) 1424 cmn_err(CE_NOTE, "MSG From Adapter: %s", 1425 softs->comm_space->adapter_print_buf); 1426 else 1427 ddi_fm_service_impact(softs->devinfo_p, 1428 DDI_SERVICE_UNAFFECTED); 1429 AAC_NOTIFY(softs, AAC_DB_PRINTF_READY); 1430 return (AAC_DB_PRINTF_READY); 1431 } else if (status & AAC_DB_COMMAND_NOT_FULL) { 1432 /* 1433 * Without these two condition statements, the OS could hang 1434 * after a while, especially if there are a lot of AIF's to 1435 * handle, for instance if a drive is pulled from an array 1436 * under heavy load. 1437 */ 1438 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL); 1439 return (AAC_DB_COMMAND_NOT_FULL); 1440 } else if (status & AAC_DB_RESPONSE_NOT_FULL) { 1441 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL); 1442 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_NOT_FULL); 1443 return (AAC_DB_RESPONSE_NOT_FULL); 1444 } else { 1445 return (0); 1446 } 1447 } 1448 1449 static uint_t 1450 aac_intr_old(caddr_t arg) 1451 { 1452 struct aac_softstate *softs = (void *)arg; 1453 int rval; 1454 1455 mutex_enter(&softs->io_lock); 1456 if (aac_process_intr_old(softs)) 1457 rval = DDI_INTR_CLAIMED; 1458 else 1459 rval = DDI_INTR_UNCLAIMED; 1460 mutex_exit(&softs->io_lock); 1461 1462 aac_drain_comp_q(softs); 1463 return (rval); 1464 } 1465 1466 /* 1467 * Query FIXED or MSI interrupts 1468 */ 1469 static int 1470 aac_query_intrs(struct aac_softstate *softs, int intr_type) 1471 { 1472 dev_info_t *dip = softs->devinfo_p; 1473 int avail, actual, intr_size, count; 1474 int i, flag, ret; 1475 1476 AACDB_PRINT(softs, CE_NOTE, 1477 "aac_query_intrs:interrupt type 0x%x", intr_type); 1478 1479 /* Get number of interrupts */ 1480 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 1481 if ((ret != DDI_SUCCESS) || (count == 0)) { 1482 AACDB_PRINT(softs, CE_WARN, 1483 "ddi_intr_get_nintrs() failed, ret %d count %d", 1484 ret, count); 1485 return (DDI_FAILURE); 1486 } 1487 1488 /* Get number of available interrupts */ 1489 ret = ddi_intr_get_navail(dip, intr_type, &avail); 1490 if ((ret != DDI_SUCCESS) || (avail == 0)) { 1491 AACDB_PRINT(softs, CE_WARN, 1492 "ddi_intr_get_navail() failed, ret %d avail %d", 1493 ret, avail); 1494 return (DDI_FAILURE); 1495 } 1496 1497 AACDB_PRINT(softs, CE_NOTE, 1498 "ddi_intr_get_nvail returned %d, navail() returned %d", 1499 count, avail); 1500 1501 /* Allocate an array of interrupt handles */ 1502 intr_size = count * sizeof (ddi_intr_handle_t); 1503 softs->htable = kmem_alloc(intr_size, KM_SLEEP); 1504 1505 if (intr_type == DDI_INTR_TYPE_MSI) { 1506 count = 1; /* only one vector needed by now */ 1507 flag = DDI_INTR_ALLOC_STRICT; 1508 } else { /* must be DDI_INTR_TYPE_FIXED */ 1509 flag = DDI_INTR_ALLOC_NORMAL; 1510 } 1511 1512 /* Call ddi_intr_alloc() */ 1513 ret = ddi_intr_alloc(dip, softs->htable, intr_type, 0, 1514 count, &actual, flag); 1515 1516 if ((ret != DDI_SUCCESS) || (actual == 0)) { 1517 AACDB_PRINT(softs, CE_WARN, 1518 "ddi_intr_alloc() failed, ret = %d", ret); 1519 actual = 0; 1520 goto error; 1521 } 1522 1523 if (actual < count) { 1524 AACDB_PRINT(softs, CE_NOTE, 1525 "Requested: %d, Received: %d", count, actual); 1526 goto error; 1527 } 1528 1529 softs->intr_cnt = actual; 1530 1531 /* Get priority for first msi, assume remaining are all the same */ 1532 if ((ret = ddi_intr_get_pri(softs->htable[0], 1533 &softs->intr_pri)) != DDI_SUCCESS) { 1534 AACDB_PRINT(softs, CE_WARN, 1535 "ddi_intr_get_pri() failed, ret = %d", ret); 1536 goto error; 1537 } 1538 1539 /* Test for high level mutex */ 1540 if (softs->intr_pri >= ddi_intr_get_hilevel_pri()) { 1541 AACDB_PRINT(softs, CE_WARN, 1542 "aac_query_intrs: Hi level interrupt not supported"); 1543 goto error; 1544 } 1545 1546 return (DDI_SUCCESS); 1547 1548 error: 1549 /* Free already allocated intr */ 1550 for (i = 0; i < actual; i++) 1551 (void) ddi_intr_free(softs->htable[i]); 1552 1553 kmem_free(softs->htable, intr_size); 1554 return (DDI_FAILURE); 1555 } 1556 1557 /* 1558 * Register FIXED or MSI interrupts, and enable them 1559 */ 1560 static int 1561 aac_add_intrs(struct aac_softstate *softs) 1562 { 1563 int i, ret; 1564 int intr_size, actual; 1565 ddi_intr_handler_t *aac_intr; 1566 1567 actual = softs->intr_cnt; 1568 intr_size = actual * sizeof (ddi_intr_handle_t); 1569 aac_intr = (ddi_intr_handler_t *)((softs->flags & AAC_FLAGS_NEW_COMM) ? 1570 aac_intr_new : aac_intr_old); 1571 1572 /* Call ddi_intr_add_handler() */ 1573 for (i = 0; i < actual; i++) { 1574 if ((ret = ddi_intr_add_handler(softs->htable[i], 1575 aac_intr, (caddr_t)softs, NULL)) != DDI_SUCCESS) { 1576 cmn_err(CE_WARN, 1577 "ddi_intr_add_handler() failed ret = %d", ret); 1578 1579 /* Free already allocated intr */ 1580 for (i = 0; i < actual; i++) 1581 (void) ddi_intr_free(softs->htable[i]); 1582 1583 kmem_free(softs->htable, intr_size); 1584 return (DDI_FAILURE); 1585 } 1586 } 1587 1588 if ((ret = ddi_intr_get_cap(softs->htable[0], &softs->intr_cap)) 1589 != DDI_SUCCESS) { 1590 cmn_err(CE_WARN, "ddi_intr_get_cap() failed, ret = %d", ret); 1591 1592 /* Free already allocated intr */ 1593 for (i = 0; i < actual; i++) 1594 (void) ddi_intr_free(softs->htable[i]); 1595 1596 kmem_free(softs->htable, intr_size); 1597 return (DDI_FAILURE); 1598 } 1599 1600 /* Enable interrupts */ 1601 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) { 1602 /* for MSI block enable */ 1603 (void) ddi_intr_block_enable(softs->htable, softs->intr_cnt); 1604 } else { 1605 /* Call ddi_intr_enable() for legacy/MSI non block enable */ 1606 for (i = 0; i < softs->intr_cnt; i++) 1607 (void) ddi_intr_enable(softs->htable[i]); 1608 } 1609 1610 return (DDI_SUCCESS); 1611 } 1612 1613 /* 1614 * Unregister FIXED or MSI interrupts 1615 */ 1616 static void 1617 aac_remove_intrs(struct aac_softstate *softs) 1618 { 1619 int i; 1620 1621 /* Disable all interrupts */ 1622 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) { 1623 /* Call ddi_intr_block_disable() */ 1624 (void) ddi_intr_block_disable(softs->htable, softs->intr_cnt); 1625 } else { 1626 for (i = 0; i < softs->intr_cnt; i++) 1627 (void) ddi_intr_disable(softs->htable[i]); 1628 } 1629 1630 /* Call ddi_intr_remove_handler() */ 1631 for (i = 0; i < softs->intr_cnt; i++) { 1632 (void) ddi_intr_remove_handler(softs->htable[i]); 1633 (void) ddi_intr_free(softs->htable[i]); 1634 } 1635 1636 kmem_free(softs->htable, softs->intr_cnt * sizeof (ddi_intr_handle_t)); 1637 } 1638 1639 /* 1640 * Set pkt_reason and OR in pkt_statistics flag 1641 */ 1642 static void 1643 aac_set_pkt_reason(struct aac_softstate *softs, struct aac_cmd *acp, 1644 uchar_t reason, uint_t stat) 1645 { 1646 #ifndef __lock_lint 1647 _NOTE(ARGUNUSED(softs)) 1648 #endif 1649 AACDB_PRINT(softs, CE_NOTE, "acp=0x%p, reason=%x, stat=%x", 1650 (void *)acp, reason, stat); 1651 if (acp->pkt->pkt_reason == CMD_CMPLT) 1652 acp->pkt->pkt_reason = reason; 1653 acp->pkt->pkt_statistics |= stat; 1654 } 1655 1656 /* 1657 * Handle a finished pkt of soft SCMD 1658 */ 1659 static void 1660 aac_soft_callback(struct aac_softstate *softs, struct aac_cmd *acp) 1661 { 1662 ASSERT(acp->pkt); 1663 1664 acp->flags |= AAC_CMD_CMPLT; 1665 1666 acp->pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | \ 1667 STATE_SENT_CMD; 1668 if (acp->pkt->pkt_state & STATE_XFERRED_DATA) 1669 acp->pkt->pkt_resid = 0; 1670 1671 /* AAC_CMD_NO_INTR means no complete callback */ 1672 if (!(acp->flags & AAC_CMD_NO_INTR)) { 1673 mutex_enter(&softs->q_comp_mutex); 1674 aac_cmd_enqueue(&softs->q_comp, acp); 1675 mutex_exit(&softs->q_comp_mutex); 1676 ddi_trigger_softintr(softs->softint_id); 1677 } 1678 } 1679 1680 /* 1681 * Handlers for completed IOs, common to aac_intr_new() and aac_intr_old() 1682 */ 1683 1684 /* 1685 * Handle completed logical device IO command 1686 */ 1687 /*ARGSUSED*/ 1688 static void 1689 aac_ld_complete(struct aac_softstate *softs, struct aac_cmd *acp) 1690 { 1691 struct aac_slot *slotp = acp->slotp; 1692 struct aac_blockread_response *resp; 1693 uint32_t status; 1694 1695 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 1696 ASSERT(!(acp->flags & AAC_CMD_NO_CB)); 1697 1698 /* 1699 * block_read/write has a similar response header, use blockread 1700 * response for both. 1701 */ 1702 resp = (struct aac_blockread_response *)&slotp->fibp->data[0]; 1703 status = ddi_get32(slotp->fib_acc_handle, &resp->Status); 1704 if (status == ST_OK) { 1705 acp->pkt->pkt_resid = 0; 1706 acp->pkt->pkt_state |= STATE_XFERRED_DATA; 1707 } else { 1708 aac_set_arq_data_hwerr(acp); 1709 } 1710 } 1711 1712 /* 1713 * Handle completed IOCTL command 1714 */ 1715 /*ARGSUSED*/ 1716 void 1717 aac_ioctl_complete(struct aac_softstate *softs, struct aac_cmd *acp) 1718 { 1719 struct aac_slot *slotp = acp->slotp; 1720 1721 /* 1722 * NOTE: Both aac_ioctl_send_fib() and aac_send_raw_srb() 1723 * may wait on softs->event, so use cv_broadcast() instead 1724 * of cv_signal(). 1725 */ 1726 ASSERT(acp->flags & AAC_CMD_SYNC); 1727 ASSERT(acp->flags & AAC_CMD_NO_CB); 1728 1729 /* Get the size of the response FIB from its FIB.Header.Size field */ 1730 acp->fib_size = ddi_get16(slotp->fib_acc_handle, 1731 &slotp->fibp->Header.Size); 1732 1733 ASSERT(acp->fib_size <= softs->aac_max_fib_size); 1734 ddi_rep_get8(slotp->fib_acc_handle, (uint8_t *)acp->fibp, 1735 (uint8_t *)slotp->fibp, acp->fib_size, DDI_DEV_AUTOINCR); 1736 } 1737 1738 /* 1739 * Handle completed Flush command 1740 */ 1741 /*ARGSUSED*/ 1742 static void 1743 aac_synccache_complete(struct aac_softstate *softs, struct aac_cmd *acp) 1744 { 1745 struct aac_slot *slotp = acp->slotp; 1746 ddi_acc_handle_t acc = slotp->fib_acc_handle; 1747 struct aac_synchronize_reply *resp; 1748 uint32_t status; 1749 1750 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 1751 1752 resp = (struct aac_synchronize_reply *)&slotp->fibp->data[0]; 1753 status = ddi_get32(acc, &resp->Status); 1754 if (status != CT_OK) 1755 aac_set_arq_data_hwerr(acp); 1756 } 1757 1758 /* 1759 * Access PCI space to see if the driver can support the card 1760 */ 1761 static int 1762 aac_check_card_type(struct aac_softstate *softs) 1763 { 1764 ddi_acc_handle_t pci_config_handle; 1765 int card_index; 1766 uint32_t pci_cmd; 1767 1768 /* Map pci configuration space */ 1769 if ((pci_config_setup(softs->devinfo_p, &pci_config_handle)) != 1770 DDI_SUCCESS) { 1771 AACDB_PRINT(softs, CE_WARN, "Cannot setup pci config space"); 1772 return (AACERR); 1773 } 1774 1775 softs->vendid = pci_config_get16(pci_config_handle, PCI_CONF_VENID); 1776 softs->devid = pci_config_get16(pci_config_handle, PCI_CONF_DEVID); 1777 softs->subvendid = pci_config_get16(pci_config_handle, 1778 PCI_CONF_SUBVENID); 1779 softs->subsysid = pci_config_get16(pci_config_handle, 1780 PCI_CONF_SUBSYSID); 1781 1782 card_index = 0; 1783 while (!CARD_IS_UNKNOWN(card_index)) { 1784 if ((aac_cards[card_index].vendor == softs->vendid) && 1785 (aac_cards[card_index].device == softs->devid) && 1786 (aac_cards[card_index].subvendor == softs->subvendid) && 1787 (aac_cards[card_index].subsys == softs->subsysid)) { 1788 break; 1789 } 1790 card_index++; 1791 } 1792 1793 softs->card = card_index; 1794 softs->hwif = aac_cards[card_index].hwif; 1795 1796 /* 1797 * Unknown aac card 1798 * do a generic match based on the VendorID and DeviceID to 1799 * support the new cards in the aac family 1800 */ 1801 if (CARD_IS_UNKNOWN(card_index)) { 1802 if (softs->vendid != 0x9005) { 1803 AACDB_PRINT(softs, CE_WARN, 1804 "Unknown vendor 0x%x", softs->vendid); 1805 goto error; 1806 } 1807 switch (softs->devid) { 1808 case 0x285: 1809 softs->hwif = AAC_HWIF_I960RX; 1810 break; 1811 case 0x286: 1812 softs->hwif = AAC_HWIF_RKT; 1813 break; 1814 default: 1815 AACDB_PRINT(softs, CE_WARN, 1816 "Unknown device \"pci9005,%x\"", softs->devid); 1817 goto error; 1818 } 1819 } 1820 1821 /* Set hardware dependent interface */ 1822 switch (softs->hwif) { 1823 case AAC_HWIF_I960RX: 1824 softs->aac_if = aac_rx_interface; 1825 softs->map_size_min = AAC_MAP_SIZE_MIN_RX; 1826 break; 1827 case AAC_HWIF_RKT: 1828 softs->aac_if = aac_rkt_interface; 1829 softs->map_size_min = AAC_MAP_SIZE_MIN_RKT; 1830 break; 1831 default: 1832 AACDB_PRINT(softs, CE_WARN, 1833 "Unknown hardware interface %d", softs->hwif); 1834 goto error; 1835 } 1836 1837 /* Set card names */ 1838 (void *)strncpy(softs->vendor_name, aac_cards[card_index].vid, 1839 AAC_VENDOR_LEN); 1840 (void *)strncpy(softs->product_name, aac_cards[card_index].desc, 1841 AAC_PRODUCT_LEN); 1842 1843 /* Set up quirks */ 1844 softs->flags = aac_cards[card_index].quirks; 1845 1846 /* Force the busmaster enable bit on */ 1847 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM); 1848 if ((pci_cmd & PCI_COMM_ME) == 0) { 1849 pci_cmd |= PCI_COMM_ME; 1850 pci_config_put16(pci_config_handle, PCI_CONF_COMM, pci_cmd); 1851 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM); 1852 if ((pci_cmd & PCI_COMM_ME) == 0) { 1853 cmn_err(CE_CONT, "?Cannot enable busmaster bit"); 1854 goto error; 1855 } 1856 } 1857 1858 /* Set memory base to map */ 1859 softs->pci_mem_base_paddr = 0xfffffff0UL & \ 1860 pci_config_get32(pci_config_handle, PCI_CONF_BASE0); 1861 1862 pci_config_teardown(&pci_config_handle); 1863 1864 return (AACOK); /* card type detected */ 1865 error: 1866 pci_config_teardown(&pci_config_handle); 1867 return (AACERR); /* no matched card found */ 1868 } 1869 1870 /* 1871 * Check the firmware to determine the features to support and the FIB 1872 * parameters to use. 1873 */ 1874 static int 1875 aac_check_firmware(struct aac_softstate *softs) 1876 { 1877 uint32_t options; 1878 uint32_t atu_size; 1879 ddi_acc_handle_t pci_handle; 1880 uint8_t *pci_mbr; 1881 uint32_t max_fibs; 1882 uint32_t max_fib_size; 1883 uint32_t sg_tablesize; 1884 uint32_t max_sectors; 1885 uint32_t status; 1886 1887 /* Get supported options */ 1888 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETINFO, 0, 0, 0, 0, 1889 &status)) != AACOK) { 1890 if (status != SRB_STATUS_INVALID_REQUEST) { 1891 cmn_err(CE_CONT, 1892 "?Fatal error: request adapter info error"); 1893 return (AACERR); 1894 } 1895 options = 0; 1896 atu_size = 0; 1897 } else { 1898 options = AAC_MAILBOX_GET(softs, 1); 1899 atu_size = AAC_MAILBOX_GET(softs, 2); 1900 } 1901 1902 if (softs->state & AAC_STATE_RESET) { 1903 if ((softs->support_opt == options) && 1904 (softs->atu_size == atu_size)) 1905 return (AACOK); 1906 1907 cmn_err(CE_WARN, 1908 "?Fatal error: firmware changed, system needs reboot"); 1909 return (AACERR); 1910 } 1911 1912 /* 1913 * The following critical settings are initialized only once during 1914 * driver attachment. 1915 */ 1916 softs->support_opt = options; 1917 softs->atu_size = atu_size; 1918 1919 /* Process supported options */ 1920 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && 1921 (softs->flags & AAC_FLAGS_NO4GB) == 0) { 1922 AACDB_PRINT(softs, CE_NOTE, "!Enable FIB map 4GB window"); 1923 softs->flags |= AAC_FLAGS_4GB_WINDOW; 1924 } else { 1925 /* 1926 * Quirk AAC_FLAGS_NO4GB is for FIB address and thus comm space 1927 * only. IO is handled by the DMA engine which does not suffer 1928 * from the ATU window programming workarounds necessary for 1929 * CPU copy operations. 1930 */ 1931 softs->addr_dma_attr.dma_attr_addr_lo = 0x2000ull; 1932 softs->addr_dma_attr.dma_attr_addr_hi = 0x7fffffffull; 1933 } 1934 1935 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0) { 1936 AACDB_PRINT(softs, CE_NOTE, "!Enable SG map 64-bit address"); 1937 softs->buf_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull; 1938 softs->buf_dma_attr.dma_attr_seg = 0xffffffffffffffffull; 1939 softs->flags |= AAC_FLAGS_SG_64BIT; 1940 } 1941 1942 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) { 1943 softs->flags |= AAC_FLAGS_ARRAY_64BIT; 1944 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array size"); 1945 } 1946 1947 /* Read preferred settings */ 1948 max_fib_size = 0; 1949 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETCOMMPREF, 1950 0, 0, 0, 0, NULL)) == AACOK) { 1951 options = AAC_MAILBOX_GET(softs, 1); 1952 max_fib_size = (options & 0xffff); 1953 max_sectors = (options >> 16) << 1; 1954 options = AAC_MAILBOX_GET(softs, 2); 1955 sg_tablesize = (options >> 16); 1956 options = AAC_MAILBOX_GET(softs, 3); 1957 max_fibs = (options & 0xffff); 1958 } 1959 1960 /* Enable new comm. and rawio at the same time */ 1961 if ((softs->support_opt & AAC_SUPPORTED_NEW_COMM) && 1962 (max_fib_size != 0)) { 1963 if ((atu_size > softs->map_size) && 1964 (ddi_regs_map_setup(softs->devinfo_p, 1, 1965 (caddr_t *)&pci_mbr, 0, atu_size, &aac_acc_attr, 1966 &pci_handle) == DDI_SUCCESS)) { 1967 ddi_regs_map_free(&softs->pci_mem_handle); 1968 softs->pci_mem_handle = pci_handle; 1969 softs->pci_mem_base_vaddr = pci_mbr; 1970 softs->map_size = atu_size; 1971 } 1972 if (atu_size == softs->map_size) { 1973 softs->flags |= AAC_FLAGS_NEW_COMM; 1974 AACDB_PRINT(softs, CE_NOTE, 1975 "!Enable New Comm. interface"); 1976 } 1977 } 1978 1979 /* Set FIB parameters */ 1980 if (softs->flags & AAC_FLAGS_NEW_COMM) { 1981 softs->aac_max_fibs = max_fibs; 1982 softs->aac_max_fib_size = max_fib_size; 1983 softs->aac_max_sectors = max_sectors; 1984 softs->aac_sg_tablesize = sg_tablesize; 1985 1986 softs->flags |= AAC_FLAGS_RAW_IO; 1987 AACDB_PRINT(softs, CE_NOTE, "!Enable RawIO"); 1988 } else { 1989 softs->aac_max_fibs = 1990 (softs->flags & AAC_FLAGS_256FIBS) ? 256 : 512; 1991 softs->aac_max_fib_size = AAC_FIB_SIZE; 1992 softs->aac_max_sectors = 128; /* 64K */ 1993 if (softs->flags & AAC_FLAGS_17SG) 1994 softs->aac_sg_tablesize = 17; 1995 else if (softs->flags & AAC_FLAGS_34SG) 1996 softs->aac_sg_tablesize = 34; 1997 else if (softs->flags & AAC_FLAGS_SG_64BIT) 1998 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE - 1999 sizeof (struct aac_blockwrite64) + 2000 sizeof (struct aac_sg_entry64)) / 2001 sizeof (struct aac_sg_entry64); 2002 else 2003 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE - 2004 sizeof (struct aac_blockwrite) + 2005 sizeof (struct aac_sg_entry)) / 2006 sizeof (struct aac_sg_entry); 2007 } 2008 2009 if ((softs->flags & AAC_FLAGS_RAW_IO) && 2010 (softs->flags & AAC_FLAGS_ARRAY_64BIT)) { 2011 softs->flags |= AAC_FLAGS_LBA_64BIT; 2012 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array"); 2013 } 2014 softs->buf_dma_attr.dma_attr_sgllen = softs->aac_sg_tablesize; 2015 softs->buf_dma_attr.dma_attr_maxxfer = softs->aac_max_sectors << 9; 2016 /* 2017 * 64K maximum segment size in scatter gather list is controlled by 2018 * the NEW_COMM bit in the adapter information. If not set, the card 2019 * can only accept a maximum of 64K. It is not recommended to permit 2020 * more than 128KB of total transfer size to the adapters because 2021 * performance is negatively impacted. 2022 * 2023 * For new comm, segment size equals max xfer size. For old comm, 2024 * we use 64K for both. 2025 */ 2026 softs->buf_dma_attr.dma_attr_count_max = 2027 softs->buf_dma_attr.dma_attr_maxxfer - 1; 2028 2029 /* Setup FIB operations for logical devices */ 2030 if (softs->flags & AAC_FLAGS_RAW_IO) 2031 softs->aac_cmd_fib = aac_cmd_fib_rawio; 2032 else if (softs->flags & AAC_FLAGS_SG_64BIT) 2033 softs->aac_cmd_fib = aac_cmd_fib_brw64; 2034 else 2035 softs->aac_cmd_fib = aac_cmd_fib_brw; 2036 softs->aac_cmd_fib_scsi = (softs->flags & AAC_FLAGS_SG_64BIT) ? \ 2037 aac_cmd_fib_scsi64 : aac_cmd_fib_scsi32; 2038 2039 /* 64-bit LBA needs descriptor format sense data */ 2040 softs->slen = sizeof (struct scsi_arq_status); 2041 if ((softs->flags & AAC_FLAGS_LBA_64BIT) && 2042 softs->slen < AAC_ARQ64_LENGTH) 2043 softs->slen = AAC_ARQ64_LENGTH; 2044 2045 AACDB_PRINT(softs, CE_NOTE, 2046 "!max_fibs %d max_fibsize 0x%x max_sectors %d max_sg %d", 2047 softs->aac_max_fibs, softs->aac_max_fib_size, 2048 softs->aac_max_sectors, softs->aac_sg_tablesize); 2049 2050 return (AACOK); 2051 } 2052 2053 static void 2054 aac_fsa_rev(struct aac_softstate *softs, struct FsaRev *fsarev0, 2055 struct FsaRev *fsarev1) 2056 { 2057 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 2058 2059 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.dash); 2060 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.type); 2061 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.minor); 2062 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.major); 2063 AAC_GET_FIELD32(acc, fsarev1, fsarev0, buildNumber); 2064 } 2065 2066 /* 2067 * The following function comes from Adaptec: 2068 * 2069 * Query adapter information and supplement adapter information 2070 */ 2071 static int 2072 aac_get_adapter_info(struct aac_softstate *softs, 2073 struct aac_adapter_info *ainfr, struct aac_supplement_adapter_info *sinfr) 2074 { 2075 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 2076 struct aac_fib *fibp = softs->sync_slot.fibp; 2077 struct aac_adapter_info *ainfp; 2078 struct aac_supplement_adapter_info *sinfp; 2079 2080 ddi_put8(acc, &fibp->data[0], 0); 2081 if (aac_sync_fib(softs, RequestAdapterInfo, 2082 sizeof (struct aac_fib_header)) != AACOK) { 2083 AACDB_PRINT(softs, CE_WARN, "RequestAdapterInfo failed"); 2084 return (AACERR); 2085 } 2086 ainfp = (struct aac_adapter_info *)fibp->data; 2087 if (ainfr) { 2088 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions); 2089 AAC_GET_FIELD32(acc, ainfr, ainfp, PlatformBase); 2090 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuArchitecture); 2091 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuVariant); 2092 AAC_GET_FIELD32(acc, ainfr, ainfp, ClockSpeed); 2093 AAC_GET_FIELD32(acc, ainfr, ainfp, ExecutionMem); 2094 AAC_GET_FIELD32(acc, ainfr, ainfp, BufferMem); 2095 AAC_GET_FIELD32(acc, ainfr, ainfp, TotalMem); 2096 aac_fsa_rev(softs, &ainfp->KernelRevision, 2097 &ainfr->KernelRevision); 2098 aac_fsa_rev(softs, &ainfp->MonitorRevision, 2099 &ainfr->MonitorRevision); 2100 aac_fsa_rev(softs, &ainfp->HardwareRevision, 2101 &ainfr->HardwareRevision); 2102 aac_fsa_rev(softs, &ainfp->BIOSRevision, 2103 &ainfr->BIOSRevision); 2104 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusteringEnabled); 2105 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusterChannelMask); 2106 AAC_GET_FIELD64(acc, ainfr, ainfp, SerialNumber); 2107 AAC_GET_FIELD32(acc, ainfr, ainfp, batteryPlatform); 2108 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions); 2109 AAC_GET_FIELD32(acc, ainfr, ainfp, OemVariant); 2110 } 2111 if (sinfr) { 2112 if (!(softs->support_opt & 2113 AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO)) { 2114 AACDB_PRINT(softs, CE_WARN, 2115 "SupplementAdapterInfo not supported"); 2116 return (AACERR); 2117 } 2118 ddi_put8(acc, &fibp->data[0], 0); 2119 if (aac_sync_fib(softs, RequestSupplementAdapterInfo, 2120 sizeof (struct aac_fib_header)) != AACOK) { 2121 AACDB_PRINT(softs, CE_WARN, 2122 "RequestSupplementAdapterInfo failed"); 2123 return (AACERR); 2124 } 2125 sinfp = (struct aac_supplement_adapter_info *)fibp->data; 2126 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, AdapterTypeText[0], 17+1); 2127 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, Pad[0], 2); 2128 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashMemoryByteSize); 2129 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashImageId); 2130 AAC_GET_FIELD32(acc, sinfr, sinfp, MaxNumberPorts); 2131 AAC_GET_FIELD32(acc, sinfr, sinfp, Version); 2132 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits); 2133 AAC_GET_FIELD8(acc, sinfr, sinfp, SlotNumber); 2134 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, ReservedPad0[0], 3); 2135 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, BuildDate[0], 12); 2136 AAC_GET_FIELD32(acc, sinfr, sinfp, CurrentNumberPorts); 2137 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, VpdInfo, 2138 sizeof (struct vpd_info)); 2139 aac_fsa_rev(softs, &sinfp->FlashFirmwareRevision, 2140 &sinfr->FlashFirmwareRevision); 2141 AAC_GET_FIELD32(acc, sinfr, sinfp, RaidTypeMorphOptions); 2142 aac_fsa_rev(softs, &sinfp->FlashFirmwareBootRevision, 2143 &sinfr->FlashFirmwareBootRevision); 2144 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgPcbaSerialNo, 2145 MFG_PCBA_SERIAL_NUMBER_WIDTH); 2146 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgWWNName[0], 2147 MFG_WWN_WIDTH); 2148 AAC_REP_GET_FIELD32(acc, sinfr, sinfp, ReservedGrowth[0], 2); 2149 } 2150 return (AACOK); 2151 } 2152 2153 /* 2154 * The following function comes from Adaptec: 2155 * 2156 * Routine to be called during initialization of communications with 2157 * the adapter to handle possible adapter configuration issues. When 2158 * the adapter first boots up, it examines attached drives, etc, and 2159 * potentially comes up with a new or revised configuration (relative to 2160 * what's stored in it's NVRAM). Additionally it may discover problems 2161 * that make the current physical configuration unworkable (currently 2162 * applicable only to cluster configuration issues). 2163 * 2164 * If there are no configuration issues or the issues are considered 2165 * trival by the adapter, it will set it's configuration status to 2166 * "FSACT_CONTINUE" and execute the "commit confiuguration" action 2167 * automatically on it's own. 2168 * 2169 * However, if there are non-trivial issues, the adapter will set it's 2170 * internal configuration status to "FSACT_PAUSE" or "FASCT_ABORT" 2171 * and wait for some agent on the host to issue the "\ContainerCommand 2172 * \VM_ContainerConfig\CT_COMMIT_CONFIG" FIB command to cause the 2173 * adapter to commit the new/updated configuration and enable 2174 * un-inhibited operation. The host agent should first issue the 2175 * "\ContainerCommand\VM_ContainerConfig\CT_GET_CONFIG_STATUS" FIB 2176 * command to obtain information about config issues detected by 2177 * the adapter. 2178 * 2179 * Normally the adapter's PC BIOS will execute on the host following 2180 * adapter poweron and reset and will be responsible for querring the 2181 * adapter with CT_GET_CONFIG_STATUS and issuing the CT_COMMIT_CONFIG 2182 * command if appropriate. 2183 * 2184 * However, with the introduction of IOP reset support, the adapter may 2185 * boot up without the benefit of the adapter's PC BIOS host agent. 2186 * This routine is intended to take care of these issues in situations 2187 * where BIOS doesn't execute following adapter poweron or reset. The 2188 * CT_COMMIT_CONFIG command is a no-op if it's already been issued, so 2189 * there is no harm in doing this when it's already been done. 2190 */ 2191 static int 2192 aac_handle_adapter_config_issues(struct aac_softstate *softs) 2193 { 2194 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 2195 struct aac_fib *fibp = softs->sync_slot.fibp; 2196 struct aac_Container *cmd; 2197 struct aac_Container_resp *resp; 2198 struct aac_cf_status_header *cfg_sts_hdr; 2199 uint32_t resp_status; 2200 uint32_t ct_status; 2201 uint32_t cfg_stat_action; 2202 int rval; 2203 2204 /* Get adapter config status */ 2205 cmd = (struct aac_Container *)&fibp->data[0]; 2206 2207 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 2208 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 2209 ddi_put32(acc, &cmd->CTCommand.command, CT_GET_CONFIG_STATUS); 2210 ddi_put32(acc, &cmd->CTCommand.param[CNT_SIZE], 2211 sizeof (struct aac_cf_status_header)); 2212 rval = aac_sync_fib(softs, ContainerCommand, 2213 AAC_FIB_SIZEOF(struct aac_Container)); 2214 resp = (struct aac_Container_resp *)cmd; 2215 cfg_sts_hdr = (struct aac_cf_status_header *)resp->CTResponse.data; 2216 2217 resp_status = ddi_get32(acc, &resp->Status); 2218 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]); 2219 if ((rval == AACOK) && (resp_status == 0) && (ct_status == CT_OK)) { 2220 cfg_stat_action = ddi_get32(acc, &cfg_sts_hdr->action); 2221 2222 /* Commit configuration if it's reasonable to do so. */ 2223 if (cfg_stat_action <= CFACT_PAUSE) { 2224 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 2225 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 2226 ddi_put32(acc, &cmd->CTCommand.command, 2227 CT_COMMIT_CONFIG); 2228 rval = aac_sync_fib(softs, ContainerCommand, 2229 AAC_FIB_SIZEOF(struct aac_Container)); 2230 2231 resp_status = ddi_get32(acc, &resp->Status); 2232 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]); 2233 if ((rval == AACOK) && (resp_status == 0) && 2234 (ct_status == CT_OK)) 2235 /* Successful completion */ 2236 rval = AACMPE_OK; 2237 else 2238 /* Auto-commit aborted due to error(s). */ 2239 rval = AACMPE_COMMIT_CONFIG; 2240 } else { 2241 /* 2242 * Auto-commit aborted due to adapter indicating 2243 * configuration issue(s) too dangerous to auto-commit. 2244 */ 2245 rval = AACMPE_CONFIG_STATUS; 2246 } 2247 } else { 2248 cmn_err(CE_WARN, "!Configuration issue, auto-commit aborted"); 2249 rval = AACMPE_CONFIG_STATUS; 2250 } 2251 return (rval); 2252 } 2253 2254 /* 2255 * Hardware initialization and resource allocation 2256 */ 2257 static int 2258 aac_common_attach(struct aac_softstate *softs) 2259 { 2260 uint32_t status; 2261 int i; 2262 2263 DBCALLED(softs, 1); 2264 2265 /* 2266 * Do a little check here to make sure there aren't any outstanding 2267 * FIBs in the message queue. At this point there should not be and 2268 * if there are they are probably left over from another instance of 2269 * the driver like when the system crashes and the crash dump driver 2270 * gets loaded. 2271 */ 2272 while (AAC_OUTB_GET(softs) != 0xfffffffful) 2273 ; 2274 2275 /* 2276 * Wait the card to complete booting up before do anything that 2277 * attempts to communicate with it. 2278 */ 2279 status = AAC_FWSTATUS_GET(softs); 2280 if (status == AAC_SELF_TEST_FAILED || status == AAC_KERNEL_PANIC) 2281 goto error; 2282 i = AAC_FWUP_TIMEOUT * 1000; /* set timeout */ 2283 AAC_BUSYWAIT(AAC_FWSTATUS_GET(softs) & AAC_KERNEL_UP_AND_RUNNING, i); 2284 if (i == 0) { 2285 cmn_err(CE_CONT, "?Fatal error: controller not ready"); 2286 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2287 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2288 goto error; 2289 } 2290 2291 /* Read and set card supported options and settings */ 2292 if (aac_check_firmware(softs) == AACERR) { 2293 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2294 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2295 goto error; 2296 } 2297 2298 /* Clear out all interrupts */ 2299 AAC_STATUS_CLR(softs, ~0); 2300 2301 /* Setup communication space with the card */ 2302 if (softs->comm_space_dma_handle == NULL) { 2303 if (aac_alloc_comm_space(softs) != AACOK) 2304 goto error; 2305 } 2306 if (aac_setup_comm_space(softs) != AACOK) { 2307 cmn_err(CE_CONT, "?Setup communication space failed"); 2308 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2309 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2310 goto error; 2311 } 2312 2313 #ifdef DEBUG 2314 if (aac_get_fw_debug_buffer(softs) != AACOK) 2315 cmn_err(CE_CONT, "?firmware UART trace not supported"); 2316 #endif 2317 2318 /* Allocate slots */ 2319 if ((softs->total_slots == 0) && (aac_create_slots(softs) != AACOK)) { 2320 cmn_err(CE_CONT, "?Fatal error: slots allocate failed"); 2321 goto error; 2322 } 2323 AACDB_PRINT(softs, CE_NOTE, "%d slots allocated", softs->total_slots); 2324 2325 /* Allocate FIBs */ 2326 if (softs->total_fibs < softs->total_slots) { 2327 aac_alloc_fibs(softs); 2328 if (softs->total_fibs == 0) 2329 goto error; 2330 AACDB_PRINT(softs, CE_NOTE, "%d fibs allocated", 2331 softs->total_fibs); 2332 } 2333 2334 /* Get adapter names */ 2335 if (CARD_IS_UNKNOWN(softs->card)) { 2336 struct aac_supplement_adapter_info sinf; 2337 2338 if (aac_get_adapter_info(softs, NULL, &sinf) != AACOK) { 2339 cmn_err(CE_CONT, "?Query adapter information failed"); 2340 } else { 2341 char *p, *p0, *p1; 2342 2343 /* 2344 * Now find the controller name in supp_adapter_info-> 2345 * AdapterTypeText. Use the first word as the vendor 2346 * and the other words as the product name. 2347 */ 2348 AACDB_PRINT(softs, CE_NOTE, "sinf.AdapterTypeText = " 2349 "\"%s\"", sinf.AdapterTypeText); 2350 p = sinf.AdapterTypeText; 2351 p0 = p1 = NULL; 2352 /* Skip heading spaces */ 2353 while (*p && (*p == ' ' || *p == '\t')) 2354 p++; 2355 p0 = p; 2356 while (*p && (*p != ' ' && *p != '\t')) 2357 p++; 2358 /* Remove middle spaces */ 2359 while (*p && (*p == ' ' || *p == '\t')) 2360 *p++ = 0; 2361 p1 = p; 2362 /* Remove trailing spaces */ 2363 p = p1 + strlen(p1) - 1; 2364 while (p > p1 && (*p == ' ' || *p == '\t')) 2365 *p-- = 0; 2366 if (*p0 && *p1) { 2367 (void *)strncpy(softs->vendor_name, p0, 2368 AAC_VENDOR_LEN); 2369 (void *)strncpy(softs->product_name, p1, 2370 AAC_PRODUCT_LEN); 2371 } else { 2372 cmn_err(CE_WARN, 2373 "?adapter name mis-formatted\n"); 2374 if (*p0) 2375 (void *)strncpy(softs->product_name, 2376 p0, AAC_PRODUCT_LEN); 2377 } 2378 } 2379 } 2380 2381 cmn_err(CE_NOTE, 2382 "!aac driver %d.%02d.%02d-%d, found card: " \ 2383 "%s %s(pci0x%x.%x.%x.%x) at 0x%x", 2384 AAC_DRIVER_MAJOR_VERSION, 2385 AAC_DRIVER_MINOR_VERSION, 2386 AAC_DRIVER_BUGFIX_LEVEL, 2387 AAC_DRIVER_BUILD, 2388 softs->vendor_name, softs->product_name, 2389 softs->vendid, softs->devid, softs->subvendid, softs->subsysid, 2390 softs->pci_mem_base_paddr); 2391 2392 /* Perform acceptance of adapter-detected config changes if possible */ 2393 if (aac_handle_adapter_config_issues(softs) != AACMPE_OK) { 2394 cmn_err(CE_CONT, "?Handle adapter config issues failed"); 2395 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2396 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2397 goto error; 2398 } 2399 2400 /* Setup containers */ 2401 bzero(softs->containers, sizeof (struct aac_container) * AAC_MAX_LD); 2402 softs->container_count = 0; 2403 if (aac_probe_containers(softs) != AACOK) { 2404 cmn_err(CE_CONT, "?Fatal error: get container info error"); 2405 goto error; 2406 } 2407 2408 /* Check dma & acc handles allocated in attach */ 2409 if (aac_check_dma_handle(softs->comm_space_dma_handle) != DDI_SUCCESS) { 2410 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2411 goto error; 2412 } 2413 2414 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 2415 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2416 goto error; 2417 } 2418 2419 for (i = 0; i < softs->total_slots; i++) { 2420 if (aac_check_dma_handle(softs->io_slot[i].fib_dma_handle) != 2421 DDI_SUCCESS) { 2422 ddi_fm_service_impact(softs->devinfo_p, 2423 DDI_SERVICE_LOST); 2424 goto error; 2425 } 2426 } 2427 2428 return (AACOK); 2429 2430 error: 2431 if (softs->state & AAC_STATE_RESET) 2432 return (AACERR); 2433 if (softs->total_fibs > 0) 2434 aac_destroy_fibs(softs); 2435 if (softs->total_slots > 0) 2436 aac_destroy_slots(softs); 2437 if (softs->comm_space_dma_handle) 2438 aac_free_comm_space(softs); 2439 return (AACERR); 2440 } 2441 2442 /* 2443 * Hardware shutdown and resource release 2444 */ 2445 static void 2446 aac_common_detach(struct aac_softstate *softs) 2447 { 2448 DBCALLED(softs, 1); 2449 2450 (void) aac_shutdown(softs); 2451 2452 aac_destroy_fibs(softs); 2453 aac_destroy_slots(softs); 2454 aac_free_comm_space(softs); 2455 } 2456 2457 /* 2458 * Send a synchronous command to the controller and wait for a result. 2459 * Indicate if the controller completed the command with an error status. 2460 */ 2461 int 2462 aac_sync_mbcommand(struct aac_softstate *softs, uint32_t cmd, 2463 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3, 2464 uint32_t *statusp) 2465 { 2466 int timeout; 2467 uint32_t status; 2468 2469 if (statusp != NULL) 2470 *statusp = SRB_STATUS_SUCCESS; 2471 2472 /* Fill in mailbox */ 2473 AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3); 2474 2475 /* Ensure the sync command doorbell flag is cleared */ 2476 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND); 2477 2478 /* Then set it to signal the adapter */ 2479 AAC_NOTIFY(softs, AAC_DB_SYNC_COMMAND); 2480 2481 /* Spin waiting for the command to complete */ 2482 timeout = AAC_IMMEDIATE_TIMEOUT * 1000; 2483 AAC_BUSYWAIT(AAC_STATUS_GET(softs) & AAC_DB_SYNC_COMMAND, timeout); 2484 if (!timeout) { 2485 AACDB_PRINT(softs, CE_WARN, 2486 "Sync command timed out after %d seconds (0x%x)!", 2487 AAC_IMMEDIATE_TIMEOUT, AAC_FWSTATUS_GET(softs)); 2488 return (AACERR); 2489 } 2490 2491 /* Clear the completion flag */ 2492 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND); 2493 2494 /* Get the command status */ 2495 status = AAC_MAILBOX_GET(softs, 0); 2496 if (statusp != NULL) 2497 *statusp = status; 2498 if (status != SRB_STATUS_SUCCESS) { 2499 AACDB_PRINT(softs, CE_WARN, 2500 "Sync command fail: status = 0x%x", status); 2501 return (AACERR); 2502 } 2503 2504 return (AACOK); 2505 } 2506 2507 /* 2508 * Send a synchronous FIB to the adapter and wait for its completion 2509 */ 2510 static int 2511 aac_sync_fib(struct aac_softstate *softs, uint16_t cmd, uint16_t fibsize) 2512 { 2513 struct aac_slot *slotp = &softs->sync_slot; 2514 ddi_dma_handle_t dma = slotp->fib_dma_handle; 2515 uint32_t status; 2516 int rval; 2517 2518 /* Sync fib only supports 512 bytes */ 2519 if (fibsize > AAC_FIB_SIZE) 2520 return (AACERR); 2521 2522 /* 2523 * Setup sync fib 2524 * Need not reinitialize FIB header if it's already been filled 2525 * by others like aac_cmd_fib_scsi as aac_cmd. 2526 */ 2527 if (slotp->acp == NULL) 2528 aac_cmd_fib_header(softs, slotp, cmd, fibsize); 2529 2530 (void) ddi_dma_sync(dma, offsetof(struct aac_comm_space, sync_fib), 2531 fibsize, DDI_DMA_SYNC_FORDEV); 2532 2533 /* Give the FIB to the controller, wait for a response. */ 2534 rval = aac_sync_mbcommand(softs, AAC_MONKER_SYNCFIB, 2535 slotp->fib_phyaddr, 0, 0, 0, &status); 2536 if (rval == AACERR) { 2537 AACDB_PRINT(softs, CE_WARN, 2538 "Send sync fib to controller failed"); 2539 return (AACERR); 2540 } 2541 2542 (void) ddi_dma_sync(dma, offsetof(struct aac_comm_space, sync_fib), 2543 AAC_FIB_SIZE, DDI_DMA_SYNC_FORCPU); 2544 2545 if ((aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) || 2546 (aac_check_dma_handle(dma) != DDI_SUCCESS)) { 2547 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 2548 return (AACERR); 2549 } 2550 2551 return (AACOK); 2552 } 2553 2554 static void 2555 aac_cmd_initq(struct aac_cmd_queue *q) 2556 { 2557 q->q_head = NULL; 2558 q->q_tail = (struct aac_cmd *)&q->q_head; 2559 } 2560 2561 /* 2562 * Remove a cmd from the head of q 2563 */ 2564 static struct aac_cmd * 2565 aac_cmd_dequeue(struct aac_cmd_queue *q) 2566 { 2567 struct aac_cmd *acp; 2568 2569 _NOTE(ASSUMING_PROTECTED(*q)) 2570 2571 if ((acp = q->q_head) != NULL) { 2572 if ((q->q_head = acp->next) != NULL) 2573 acp->next = NULL; 2574 else 2575 q->q_tail = (struct aac_cmd *)&q->q_head; 2576 acp->prev = NULL; 2577 } 2578 return (acp); 2579 } 2580 2581 /* 2582 * Add a cmd to the tail of q 2583 */ 2584 static void 2585 aac_cmd_enqueue(struct aac_cmd_queue *q, struct aac_cmd *acp) 2586 { 2587 ASSERT(acp->next == NULL); 2588 acp->prev = q->q_tail; 2589 q->q_tail->next = acp; 2590 q->q_tail = acp; 2591 } 2592 2593 /* 2594 * Remove the cmd ac from q 2595 */ 2596 static void 2597 aac_cmd_delete(struct aac_cmd_queue *q, struct aac_cmd *acp) 2598 { 2599 if (acp->prev) { 2600 if ((acp->prev->next = acp->next) != NULL) { 2601 acp->next->prev = acp->prev; 2602 acp->next = NULL; 2603 } else { 2604 q->q_tail = acp->prev; 2605 } 2606 acp->prev = NULL; 2607 } 2608 /* ac is not in the queue */ 2609 } 2610 2611 /* 2612 * Atomically insert an entry into the nominated queue, returns 0 on success or 2613 * AACERR if the queue is full. 2614 * 2615 * Note: it would be more efficient to defer notifying the controller in 2616 * the case where we may be inserting several entries in rapid succession, 2617 * but implementing this usefully may be difficult (it would involve a 2618 * separate queue/notify interface). 2619 */ 2620 static int 2621 aac_fib_enqueue(struct aac_softstate *softs, int queue, uint32_t fib_addr, 2622 uint32_t fib_size) 2623 { 2624 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 2625 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 2626 uint32_t pi, ci; 2627 2628 DBCALLED(softs, 2); 2629 2630 ASSERT(queue == AAC_ADAP_NORM_CMD_Q || queue == AAC_ADAP_NORM_RESP_Q); 2631 2632 /* Get the producer/consumer indices */ 2633 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \ 2634 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2, 2635 DDI_DMA_SYNC_FORCPU); 2636 if (aac_check_dma_handle(dma) != DDI_SUCCESS) { 2637 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 2638 return (AACERR); 2639 } 2640 2641 pi = ddi_get32(acc, 2642 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]); 2643 ci = ddi_get32(acc, 2644 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]); 2645 2646 /* 2647 * Wrap the queue first before we check the queue to see 2648 * if it is full 2649 */ 2650 if (pi >= aac_qinfo[queue].size) 2651 pi = 0; 2652 2653 /* XXX queue full */ 2654 if ((pi + 1) == ci) 2655 return (AACERR); 2656 2657 /* Fill in queue entry */ 2658 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_size), fib_size); 2659 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_addr), fib_addr); 2660 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \ 2661 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry), 2662 DDI_DMA_SYNC_FORDEV); 2663 2664 /* Update producer index */ 2665 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX], 2666 pi + 1); 2667 (void) ddi_dma_sync(dma, 2668 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX] - \ 2669 (uintptr_t)softs->comm_space, sizeof (uint32_t), 2670 DDI_DMA_SYNC_FORDEV); 2671 2672 if (aac_qinfo[queue].notify != 0) 2673 AAC_NOTIFY(softs, aac_qinfo[queue].notify); 2674 return (AACOK); 2675 } 2676 2677 /* 2678 * Atomically remove one entry from the nominated queue, returns 0 on 2679 * success or AACERR if the queue is empty. 2680 */ 2681 static int 2682 aac_fib_dequeue(struct aac_softstate *softs, int queue, int *idxp) 2683 { 2684 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 2685 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 2686 uint32_t pi, ci; 2687 int unfull = 0; 2688 2689 DBCALLED(softs, 2); 2690 2691 ASSERT(idxp); 2692 2693 /* Get the producer/consumer indices */ 2694 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \ 2695 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2, 2696 DDI_DMA_SYNC_FORCPU); 2697 pi = ddi_get32(acc, 2698 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]); 2699 ci = ddi_get32(acc, 2700 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]); 2701 2702 /* Check for queue empty */ 2703 if (ci == pi) 2704 return (AACERR); 2705 2706 if (pi >= aac_qinfo[queue].size) 2707 pi = 0; 2708 2709 /* Check for queue full */ 2710 if (ci == pi + 1) 2711 unfull = 1; 2712 2713 /* 2714 * The controller does not wrap the queue, 2715 * so we have to do it by ourselves 2716 */ 2717 if (ci >= aac_qinfo[queue].size) 2718 ci = 0; 2719 2720 /* Fetch the entry */ 2721 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \ 2722 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry), 2723 DDI_DMA_SYNC_FORCPU); 2724 if (aac_check_dma_handle(dma) != DDI_SUCCESS) { 2725 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 2726 return (AACERR); 2727 } 2728 2729 switch (queue) { 2730 case AAC_HOST_NORM_RESP_Q: 2731 case AAC_HOST_HIGH_RESP_Q: 2732 *idxp = ddi_get32(acc, 2733 &(softs->qentries[queue] + ci)->aq_fib_addr); 2734 break; 2735 2736 case AAC_HOST_NORM_CMD_Q: 2737 case AAC_HOST_HIGH_CMD_Q: 2738 *idxp = ddi_get32(acc, 2739 &(softs->qentries[queue] + ci)->aq_fib_addr) / AAC_FIB_SIZE; 2740 break; 2741 2742 default: 2743 cmn_err(CE_NOTE, "!Invalid queue in aac_fib_dequeue()"); 2744 return (AACERR); 2745 } 2746 2747 /* Update consumer index */ 2748 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX], 2749 ci + 1); 2750 (void) ddi_dma_sync(dma, 2751 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX] - \ 2752 (uintptr_t)softs->comm_space, sizeof (uint32_t), 2753 DDI_DMA_SYNC_FORDEV); 2754 2755 if (unfull && aac_qinfo[queue].notify != 0) 2756 AAC_NOTIFY(softs, aac_qinfo[queue].notify); 2757 return (AACOK); 2758 } 2759 2760 /* 2761 * Request information of the container cid 2762 */ 2763 static struct aac_mntinforesp * 2764 aac_get_container_info(struct aac_softstate *softs, int cid) 2765 { 2766 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 2767 struct aac_fib *fibp = softs->sync_slot.fibp; 2768 struct aac_mntinfo *mi = (struct aac_mntinfo *)&fibp->data[0]; 2769 struct aac_mntinforesp *mir; 2770 2771 ddi_put32(acc, &mi->Command, /* Use 64-bit LBA if enabled */ 2772 (softs->flags & AAC_FLAGS_LBA_64BIT) ? 2773 VM_NameServe64 : VM_NameServe); 2774 ddi_put32(acc, &mi->MntType, FT_FILESYS); 2775 ddi_put32(acc, &mi->MntCount, cid); 2776 2777 if (aac_sync_fib(softs, ContainerCommand, 2778 AAC_FIB_SIZEOF(struct aac_mntinfo)) == AACERR) { 2779 AACDB_PRINT(softs, CE_WARN, "Error probe container %d", cid); 2780 return (NULL); 2781 } 2782 2783 mir = (struct aac_mntinforesp *)&fibp->data[0]; 2784 if (ddi_get32(acc, &mir->Status) == ST_OK) 2785 return (mir); 2786 return (NULL); 2787 } 2788 2789 static int 2790 aac_get_container_count(struct aac_softstate *softs, int *count) 2791 { 2792 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 2793 struct aac_mntinforesp *mir; 2794 2795 if ((mir = aac_get_container_info(softs, 0)) == NULL) 2796 return (AACERR); 2797 *count = ddi_get32(acc, &mir->MntRespCount); 2798 if (*count > AAC_MAX_LD) { 2799 AACDB_PRINT(softs, CE_CONT, 2800 "container count(%d) > AAC_MAX_LD", *count); 2801 return (AACERR); 2802 } 2803 return (AACOK); 2804 } 2805 2806 static int 2807 aac_get_container_uid(struct aac_softstate *softs, uint32_t cid, uint32_t *uid) 2808 { 2809 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 2810 struct aac_Container *ct = (struct aac_Container *) \ 2811 &softs->sync_slot.fibp->data[0]; 2812 2813 bzero(ct, sizeof (*ct) - CT_PACKET_SIZE); 2814 ddi_put32(acc, &ct->Command, VM_ContainerConfig); 2815 ddi_put32(acc, &ct->CTCommand.command, CT_CID_TO_32BITS_UID); 2816 ddi_put32(acc, &ct->CTCommand.param[0], cid); 2817 2818 if (aac_sync_fib(softs, ContainerCommand, 2819 AAC_FIB_SIZEOF(struct aac_Container)) == AACERR) 2820 return (AACERR); 2821 if (ddi_get32(acc, &ct->CTCommand.param[0]) != CT_OK) 2822 return (AACERR); 2823 2824 *uid = ddi_get32(acc, &ct->CTCommand.param[1]); 2825 return (AACOK); 2826 } 2827 2828 static int 2829 aac_probe_container(struct aac_softstate *softs, uint32_t cid) 2830 { 2831 struct aac_container *dvp = &softs->containers[cid]; 2832 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 2833 struct aac_mntinforesp *mir; 2834 uint64_t size; 2835 uint32_t uid; 2836 2837 /* Get container basic info */ 2838 if ((mir = aac_get_container_info(softs, cid)) == NULL) 2839 return (AACERR); 2840 2841 if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) { 2842 if (dvp->valid) { 2843 AACDB_PRINT(softs, CE_NOTE, 2844 ">>> Container %d deleted", cid); 2845 dvp->valid = 0; 2846 } 2847 } else { 2848 size = AAC_MIR_SIZE(softs, acc, mir); 2849 AACDB_PRINT(softs, CE_NOTE, "Container #%d found: " \ 2850 "size=0x%x.%08x, type=%d, name=%s", 2851 cid, 2852 ddi_get32(acc, &mir->MntObj.CapacityHigh), 2853 ddi_get32(acc, &mir->MntObj.Capacity), 2854 ddi_get32(acc, &mir->MntObj.VolType), 2855 mir->MntObj.FileSystemName); 2856 2857 /* Get container UID */ 2858 if (aac_get_container_uid(softs, cid, &uid) == AACERR) { 2859 AACDB_PRINT(softs, CE_CONT, 2860 "query container %d uid failed", cid); 2861 return (AACERR); 2862 } 2863 AACDB_PRINT(softs, CE_CONT, "uid=0x%08x", uid); 2864 2865 if (dvp->valid) { 2866 if (dvp->uid != uid) { 2867 AACDB_PRINT(softs, CE_WARN, 2868 ">>> Container %u uid changed to %d", 2869 cid, uid); 2870 dvp->uid = uid; 2871 } 2872 if (dvp->size != size) { 2873 AACDB_PRINT(softs, CE_NOTE, 2874 ">>> Container %u size changed to %"PRIu64, 2875 cid, size); 2876 dvp->size = size; 2877 } 2878 } else { /* Init new container */ 2879 AACDB_PRINT(softs, CE_NOTE, 2880 ">>> Container %d added", cid); 2881 dvp->valid = 1; 2882 2883 dvp->cid = cid; 2884 dvp->uid = uid; 2885 dvp->size = size; 2886 dvp->locked = 0; 2887 dvp->deleted = 0; 2888 } 2889 } 2890 return (AACOK); 2891 } 2892 2893 /* 2894 * Do a rescan of all the possible containers and update the container list 2895 * with newly online/offline containers. 2896 */ 2897 static int 2898 aac_probe_containers(struct aac_softstate *softs) 2899 { 2900 int i, count, total; 2901 2902 /* Loop over possible containers */ 2903 count = softs->container_count; 2904 if (aac_get_container_count(softs, &count) == AACERR) 2905 return (AACERR); 2906 for (i = total = 0; i < count; i++) { 2907 if (aac_probe_container(softs, i) == AACOK) 2908 total++; 2909 } 2910 if (count < softs->container_count) { 2911 struct aac_container *dvp; 2912 2913 for (dvp = &softs->containers[count]; 2914 dvp < &softs->containers[softs->container_count]; dvp++) { 2915 if (dvp->valid == 0) 2916 continue; 2917 AACDB_PRINT(softs, CE_NOTE, ">>> Container %d deleted", 2918 dvp->cid); 2919 dvp->valid = 0; 2920 } 2921 } 2922 softs->container_count = count; 2923 AACDB_PRINT(softs, CE_CONT, "?Total %d container(s) found", total); 2924 return (AACOK); 2925 } 2926 2927 static int 2928 aac_alloc_comm_space(struct aac_softstate *softs) 2929 { 2930 size_t rlen; 2931 ddi_dma_cookie_t cookie; 2932 uint_t cookien; 2933 2934 /* Allocate DMA for comm. space */ 2935 if (ddi_dma_alloc_handle( 2936 softs->devinfo_p, 2937 &softs->addr_dma_attr, 2938 DDI_DMA_SLEEP, 2939 NULL, 2940 &softs->comm_space_dma_handle) != DDI_SUCCESS) { 2941 AACDB_PRINT(softs, CE_WARN, 2942 "Cannot alloc dma handle for communication area"); 2943 goto error; 2944 } 2945 if (ddi_dma_mem_alloc( 2946 softs->comm_space_dma_handle, 2947 sizeof (struct aac_comm_space), 2948 &aac_acc_attr, 2949 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2950 DDI_DMA_SLEEP, 2951 NULL, 2952 (caddr_t *)&softs->comm_space, 2953 &rlen, 2954 &softs->comm_space_acc_handle) != DDI_SUCCESS) { 2955 AACDB_PRINT(softs, CE_WARN, 2956 "Cannot alloc mem for communication area"); 2957 goto error; 2958 } 2959 if (ddi_dma_addr_bind_handle( 2960 softs->comm_space_dma_handle, 2961 NULL, 2962 (caddr_t)softs->comm_space, 2963 sizeof (struct aac_comm_space), 2964 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2965 DDI_DMA_SLEEP, 2966 NULL, 2967 &cookie, 2968 &cookien) != DDI_DMA_MAPPED) { 2969 AACDB_PRINT(softs, CE_WARN, 2970 "DMA bind failed for communication area"); 2971 goto error; 2972 } 2973 softs->comm_space_phyaddr = cookie.dmac_address; 2974 2975 /* Setup sync FIB space */ 2976 softs->sync_slot.fibp = &softs->comm_space->sync_fib; 2977 softs->sync_slot.fib_phyaddr = softs->comm_space_phyaddr + \ 2978 offsetof(struct aac_comm_space, sync_fib); 2979 softs->sync_slot.fib_acc_handle = softs->comm_space_acc_handle; 2980 softs->sync_slot.fib_dma_handle = softs->comm_space_dma_handle; 2981 2982 return (AACOK); 2983 error: 2984 if (softs->comm_space_acc_handle) { 2985 ddi_dma_mem_free(&softs->comm_space_acc_handle); 2986 softs->comm_space_acc_handle = NULL; 2987 } 2988 if (softs->comm_space_dma_handle) { 2989 ddi_dma_free_handle(&softs->comm_space_dma_handle); 2990 softs->comm_space_dma_handle = NULL; 2991 } 2992 return (AACERR); 2993 } 2994 2995 static void 2996 aac_free_comm_space(struct aac_softstate *softs) 2997 { 2998 softs->sync_slot.fibp = NULL; 2999 softs->sync_slot.fib_phyaddr = NULL; 3000 softs->sync_slot.fib_acc_handle = NULL; 3001 softs->sync_slot.fib_dma_handle = NULL; 3002 3003 (void) ddi_dma_unbind_handle(softs->comm_space_dma_handle); 3004 ddi_dma_mem_free(&softs->comm_space_acc_handle); 3005 softs->comm_space_acc_handle = NULL; 3006 ddi_dma_free_handle(&softs->comm_space_dma_handle); 3007 softs->comm_space_dma_handle = NULL; 3008 softs->comm_space_phyaddr = NULL; 3009 } 3010 3011 /* 3012 * Initialize the data structures that are required for the communication 3013 * interface to operate 3014 */ 3015 static int 3016 aac_setup_comm_space(struct aac_softstate *softs) 3017 { 3018 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3019 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3020 uint32_t comm_space_phyaddr; 3021 struct aac_adapter_init *initp; 3022 int qoffset; 3023 3024 comm_space_phyaddr = softs->comm_space_phyaddr; 3025 3026 /* Setup adapter init struct */ 3027 initp = &softs->comm_space->init_data; 3028 bzero(initp, sizeof (struct aac_adapter_init)); 3029 3030 ddi_put32(acc, &initp->InitStructRevision, AAC_INIT_STRUCT_REVISION); 3031 ddi_put32(acc, &initp->HostElapsedSeconds, ddi_get_time()); 3032 3033 /* Setup new/old comm. specific data */ 3034 if (softs->flags & AAC_FLAGS_RAW_IO) { 3035 ddi_put32(acc, &initp->InitStructRevision, 3036 AAC_INIT_STRUCT_REVISION_4); 3037 ddi_put32(acc, &initp->InitFlags, 3038 (softs->flags & AAC_FLAGS_NEW_COMM) ? 3039 AAC_INIT_FLAGS_NEW_COMM_SUPPORTED : 0); 3040 /* Setup the preferred settings */ 3041 ddi_put32(acc, &initp->MaxIoCommands, softs->aac_max_fibs); 3042 ddi_put32(acc, &initp->MaxIoSize, 3043 (softs->aac_max_sectors << 9)); 3044 ddi_put32(acc, &initp->MaxFibSize, softs->aac_max_fib_size); 3045 } else { 3046 /* 3047 * Tells the adapter about the physical location of various 3048 * important shared data structures 3049 */ 3050 ddi_put32(acc, &initp->AdapterFibsPhysicalAddress, 3051 comm_space_phyaddr + \ 3052 offsetof(struct aac_comm_space, adapter_fibs)); 3053 ddi_put32(acc, &initp->AdapterFibsVirtualAddress, 0); 3054 ddi_put32(acc, &initp->AdapterFibAlign, AAC_FIB_SIZE); 3055 ddi_put32(acc, &initp->AdapterFibsSize, 3056 AAC_ADAPTER_FIBS * AAC_FIB_SIZE); 3057 ddi_put32(acc, &initp->PrintfBufferAddress, 3058 comm_space_phyaddr + \ 3059 offsetof(struct aac_comm_space, adapter_print_buf)); 3060 ddi_put32(acc, &initp->PrintfBufferSize, 3061 AAC_ADAPTER_PRINT_BUFSIZE); 3062 ddi_put32(acc, &initp->MiniPortRevision, 3063 AAC_INIT_STRUCT_MINIPORT_REVISION); 3064 ddi_put32(acc, &initp->HostPhysMemPages, AAC_MAX_PFN); 3065 3066 qoffset = (comm_space_phyaddr + \ 3067 offsetof(struct aac_comm_space, qtable)) % \ 3068 AAC_QUEUE_ALIGN; 3069 if (qoffset) 3070 qoffset = AAC_QUEUE_ALIGN - qoffset; 3071 softs->qtablep = (struct aac_queue_table *) \ 3072 ((char *)&softs->comm_space->qtable + qoffset); 3073 ddi_put32(acc, &initp->CommHeaderAddress, comm_space_phyaddr + \ 3074 offsetof(struct aac_comm_space, qtable) + qoffset); 3075 3076 /* Init queue table */ 3077 ddi_put32(acc, &softs->qtablep-> \ 3078 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_PRODUCER_INDEX], 3079 AAC_HOST_NORM_CMD_ENTRIES); 3080 ddi_put32(acc, &softs->qtablep-> \ 3081 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_CONSUMER_INDEX], 3082 AAC_HOST_NORM_CMD_ENTRIES); 3083 ddi_put32(acc, &softs->qtablep-> \ 3084 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_PRODUCER_INDEX], 3085 AAC_HOST_HIGH_CMD_ENTRIES); 3086 ddi_put32(acc, &softs->qtablep-> \ 3087 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_CONSUMER_INDEX], 3088 AAC_HOST_HIGH_CMD_ENTRIES); 3089 ddi_put32(acc, &softs->qtablep-> \ 3090 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_PRODUCER_INDEX], 3091 AAC_ADAP_NORM_CMD_ENTRIES); 3092 ddi_put32(acc, &softs->qtablep-> \ 3093 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_CONSUMER_INDEX], 3094 AAC_ADAP_NORM_CMD_ENTRIES); 3095 ddi_put32(acc, &softs->qtablep-> \ 3096 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_PRODUCER_INDEX], 3097 AAC_ADAP_HIGH_CMD_ENTRIES); 3098 ddi_put32(acc, &softs->qtablep-> \ 3099 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_CONSUMER_INDEX], 3100 AAC_ADAP_HIGH_CMD_ENTRIES); 3101 ddi_put32(acc, &softs->qtablep-> \ 3102 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_PRODUCER_INDEX], 3103 AAC_HOST_NORM_RESP_ENTRIES); 3104 ddi_put32(acc, &softs->qtablep-> \ 3105 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_CONSUMER_INDEX], 3106 AAC_HOST_NORM_RESP_ENTRIES); 3107 ddi_put32(acc, &softs->qtablep-> \ 3108 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_PRODUCER_INDEX], 3109 AAC_HOST_HIGH_RESP_ENTRIES); 3110 ddi_put32(acc, &softs->qtablep-> \ 3111 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_CONSUMER_INDEX], 3112 AAC_HOST_HIGH_RESP_ENTRIES); 3113 ddi_put32(acc, &softs->qtablep-> \ 3114 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_PRODUCER_INDEX], 3115 AAC_ADAP_NORM_RESP_ENTRIES); 3116 ddi_put32(acc, &softs->qtablep-> \ 3117 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_CONSUMER_INDEX], 3118 AAC_ADAP_NORM_RESP_ENTRIES); 3119 ddi_put32(acc, &softs->qtablep-> \ 3120 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_PRODUCER_INDEX], 3121 AAC_ADAP_HIGH_RESP_ENTRIES); 3122 ddi_put32(acc, &softs->qtablep-> \ 3123 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_CONSUMER_INDEX], 3124 AAC_ADAP_HIGH_RESP_ENTRIES); 3125 3126 /* Init queue entries */ 3127 softs->qentries[AAC_HOST_NORM_CMD_Q] = 3128 &softs->qtablep->qt_HostNormCmdQueue[0]; 3129 softs->qentries[AAC_HOST_HIGH_CMD_Q] = 3130 &softs->qtablep->qt_HostHighCmdQueue[0]; 3131 softs->qentries[AAC_ADAP_NORM_CMD_Q] = 3132 &softs->qtablep->qt_AdapNormCmdQueue[0]; 3133 softs->qentries[AAC_ADAP_HIGH_CMD_Q] = 3134 &softs->qtablep->qt_AdapHighCmdQueue[0]; 3135 softs->qentries[AAC_HOST_NORM_RESP_Q] = 3136 &softs->qtablep->qt_HostNormRespQueue[0]; 3137 softs->qentries[AAC_HOST_HIGH_RESP_Q] = 3138 &softs->qtablep->qt_HostHighRespQueue[0]; 3139 softs->qentries[AAC_ADAP_NORM_RESP_Q] = 3140 &softs->qtablep->qt_AdapNormRespQueue[0]; 3141 softs->qentries[AAC_ADAP_HIGH_RESP_Q] = 3142 &softs->qtablep->qt_AdapHighRespQueue[0]; 3143 } 3144 (void) ddi_dma_sync(dma, 0, 0, DDI_DMA_SYNC_FORDEV); 3145 3146 /* Send init structure to the card */ 3147 if (aac_sync_mbcommand(softs, AAC_MONKER_INITSTRUCT, 3148 comm_space_phyaddr + \ 3149 offsetof(struct aac_comm_space, init_data), 3150 0, 0, 0, NULL) == AACERR) { 3151 AACDB_PRINT(softs, CE_WARN, 3152 "Cannot send init structure to adapter"); 3153 return (AACERR); 3154 } 3155 3156 return (AACOK); 3157 } 3158 3159 static uchar_t * 3160 aac_vendor_id(struct aac_softstate *softs, uchar_t *buf) 3161 { 3162 (void) memset(buf, ' ', AAC_VENDOR_LEN); 3163 bcopy(softs->vendor_name, buf, strlen(softs->vendor_name)); 3164 return (buf + AAC_VENDOR_LEN); 3165 } 3166 3167 static uchar_t * 3168 aac_product_id(struct aac_softstate *softs, uchar_t *buf) 3169 { 3170 (void) memset(buf, ' ', AAC_PRODUCT_LEN); 3171 bcopy(softs->product_name, buf, strlen(softs->product_name)); 3172 return (buf + AAC_PRODUCT_LEN); 3173 } 3174 3175 /* 3176 * Construct unit serial number from container uid 3177 */ 3178 static uchar_t * 3179 aac_lun_serialno(struct aac_softstate *softs, int tgt, uchar_t *buf) 3180 { 3181 int i, d; 3182 uint32_t uid = softs->containers[tgt].uid; 3183 3184 for (i = 7; i >= 0; i--) { 3185 d = uid & 0xf; 3186 buf[i] = d > 9 ? 'A' + (d - 0xa) : '0' + d; 3187 uid >>= 4; 3188 } 3189 return (buf + 8); 3190 } 3191 3192 /* 3193 * SPC-3 7.5 INQUIRY command implementation 3194 */ 3195 static void 3196 aac_inquiry(struct aac_softstate *softs, struct scsi_pkt *pkt, 3197 union scsi_cdb *cdbp, struct buf *bp) 3198 { 3199 int tgt = pkt->pkt_address.a_target; 3200 char *b_addr = NULL; 3201 uchar_t page = cdbp->cdb_opaque[2]; 3202 3203 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_CMDDT) { 3204 /* Command Support Data is not supported */ 3205 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x00, 0); 3206 return; 3207 } 3208 3209 if (bp && bp->b_un.b_addr && bp->b_bcount) { 3210 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 3211 bp_mapin(bp); 3212 b_addr = bp->b_un.b_addr; 3213 } 3214 3215 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_EVPD) { 3216 uchar_t *vpdp = (uchar_t *)b_addr; 3217 uchar_t *idp, *sp; 3218 3219 /* SPC-3 8.4 Vital product data parameters */ 3220 switch (page) { 3221 case 0x00: 3222 /* Supported VPD pages */ 3223 if (vpdp == NULL || 3224 bp->b_bcount < (AAC_VPD_PAGE_DATA + 3)) 3225 return; 3226 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3227 vpdp[AAC_VPD_PAGE_CODE] = 0x00; 3228 vpdp[AAC_VPD_PAGE_LENGTH] = 3; 3229 3230 vpdp[AAC_VPD_PAGE_DATA] = 0x00; 3231 vpdp[AAC_VPD_PAGE_DATA + 1] = 0x80; 3232 vpdp[AAC_VPD_PAGE_DATA + 2] = 0x83; 3233 3234 pkt->pkt_state |= STATE_XFERRED_DATA; 3235 break; 3236 3237 case 0x80: 3238 /* Unit serial number page */ 3239 if (vpdp == NULL || 3240 bp->b_bcount < (AAC_VPD_PAGE_DATA + 8)) 3241 return; 3242 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3243 vpdp[AAC_VPD_PAGE_CODE] = 0x80; 3244 vpdp[AAC_VPD_PAGE_LENGTH] = 8; 3245 3246 sp = &vpdp[AAC_VPD_PAGE_DATA]; 3247 (void) aac_lun_serialno(softs, tgt, sp); 3248 3249 pkt->pkt_state |= STATE_XFERRED_DATA; 3250 break; 3251 3252 case 0x83: 3253 /* Device identification page */ 3254 if (vpdp == NULL || 3255 bp->b_bcount < (AAC_VPD_PAGE_DATA + 32)) 3256 return; 3257 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3258 vpdp[AAC_VPD_PAGE_CODE] = 0x83; 3259 3260 idp = &vpdp[AAC_VPD_PAGE_DATA]; 3261 bzero(idp, AAC_VPD_ID_LENGTH); 3262 idp[AAC_VPD_ID_CODESET] = 0x02; 3263 idp[AAC_VPD_ID_TYPE] = 0x01; 3264 3265 /* 3266 * SPC-3 Table 111 - Identifier type 3267 * One recommanded method of constructing the remainder 3268 * of identifier field is to concatenate the product 3269 * identification field from the standard INQUIRY data 3270 * field and the product serial number field from the 3271 * unit serial number page. 3272 */ 3273 sp = &idp[AAC_VPD_ID_DATA]; 3274 sp = aac_vendor_id(softs, sp); 3275 sp = aac_product_id(softs, sp); 3276 sp = aac_lun_serialno(softs, tgt, sp); 3277 idp[AAC_VPD_ID_LENGTH] = (uintptr_t)sp - \ 3278 (uintptr_t)&idp[AAC_VPD_ID_DATA]; 3279 3280 vpdp[AAC_VPD_PAGE_LENGTH] = (uintptr_t)sp - \ 3281 (uintptr_t)&vpdp[AAC_VPD_PAGE_DATA]; 3282 pkt->pkt_state |= STATE_XFERRED_DATA; 3283 break; 3284 3285 default: 3286 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 3287 0x24, 0x00, 0); 3288 break; 3289 } 3290 } else { 3291 struct scsi_inquiry *inqp = (struct scsi_inquiry *)b_addr; 3292 size_t len = sizeof (struct scsi_inquiry); 3293 3294 if (page != 0) { 3295 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 3296 0x24, 0x00, 0); 3297 return; 3298 } 3299 if (inqp == NULL || bp->b_bcount < len) 3300 return; 3301 3302 bzero(inqp, len); 3303 inqp->inq_len = AAC_ADDITIONAL_LEN; 3304 inqp->inq_ansi = AAC_ANSI_VER; 3305 inqp->inq_rdf = AAC_RESP_DATA_FORMAT; 3306 (void) aac_vendor_id(softs, (uchar_t *)inqp->inq_vid); 3307 (void) aac_product_id(softs, (uchar_t *)inqp->inq_pid); 3308 bcopy("V1.0", inqp->inq_revision, 4); 3309 inqp->inq_cmdque = 1; /* enable tagged-queuing */ 3310 /* 3311 * For "sd-max-xfer-size" property which may impact performance 3312 * when IO threads increase. 3313 */ 3314 inqp->inq_wbus32 = 1; 3315 3316 pkt->pkt_state |= STATE_XFERRED_DATA; 3317 } 3318 } 3319 3320 /* 3321 * SPC-3 7.10 MODE SENSE command implementation 3322 */ 3323 static void 3324 aac_mode_sense(struct aac_softstate *softs, struct scsi_pkt *pkt, 3325 union scsi_cdb *cdbp, struct buf *bp, int capacity) 3326 { 3327 uchar_t pagecode; 3328 struct mode_header *headerp; 3329 struct mode_header_g1 *g1_headerp; 3330 unsigned int ncyl; 3331 caddr_t sense_data; 3332 caddr_t next_page; 3333 size_t sdata_size; 3334 size_t pages_size; 3335 int unsupport_page = 0; 3336 3337 ASSERT(cdbp->scc_cmd == SCMD_MODE_SENSE || 3338 cdbp->scc_cmd == SCMD_MODE_SENSE_G1); 3339 3340 if (!(bp && bp->b_un.b_addr && bp->b_bcount)) 3341 return; 3342 3343 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 3344 bp_mapin(bp); 3345 pkt->pkt_state |= STATE_XFERRED_DATA; 3346 pagecode = cdbp->cdb_un.sg.scsi[0] & 0x3F; 3347 3348 /* calculate the size of needed buffer */ 3349 if (cdbp->scc_cmd == SCMD_MODE_SENSE) 3350 sdata_size = MODE_HEADER_LENGTH; 3351 else /* must be SCMD_MODE_SENSE_G1 */ 3352 sdata_size = MODE_HEADER_LENGTH_G1; 3353 3354 pages_size = 0; 3355 switch (pagecode) { 3356 case SD_MODE_SENSE_PAGE3_CODE: 3357 pages_size += sizeof (struct mode_format); 3358 break; 3359 3360 case SD_MODE_SENSE_PAGE4_CODE: 3361 pages_size += sizeof (struct mode_geometry); 3362 break; 3363 3364 case MODEPAGE_CTRL_MODE: 3365 if (softs->flags & AAC_FLAGS_LBA_64BIT) { 3366 pages_size += sizeof (struct mode_control_scsi3); 3367 } else { 3368 unsupport_page = 1; 3369 } 3370 break; 3371 3372 case MODEPAGE_ALLPAGES: 3373 if (softs->flags & AAC_FLAGS_LBA_64BIT) { 3374 pages_size += sizeof (struct mode_format) + 3375 sizeof (struct mode_geometry) + 3376 sizeof (struct mode_control_scsi3); 3377 } else { 3378 pages_size += sizeof (struct mode_format) + 3379 sizeof (struct mode_geometry); 3380 } 3381 break; 3382 3383 default: 3384 /* unsupported pages */ 3385 unsupport_page = 1; 3386 } 3387 3388 /* allocate buffer to fill the send data */ 3389 sdata_size += pages_size; 3390 sense_data = kmem_zalloc(sdata_size, KM_SLEEP); 3391 3392 if (cdbp->scc_cmd == SCMD_MODE_SENSE) { 3393 headerp = (struct mode_header *)sense_data; 3394 headerp->length = MODE_HEADER_LENGTH + pages_size - 3395 sizeof (headerp->length); 3396 headerp->bdesc_length = 0; 3397 next_page = sense_data + sizeof (struct mode_header); 3398 } else { 3399 g1_headerp = (void *)sense_data; 3400 g1_headerp->length = BE_16(MODE_HEADER_LENGTH_G1 + pages_size - 3401 sizeof (g1_headerp->length)); 3402 g1_headerp->bdesc_length = 0; 3403 next_page = sense_data + sizeof (struct mode_header_g1); 3404 } 3405 3406 if (unsupport_page) 3407 goto finish; 3408 3409 if (pagecode == SD_MODE_SENSE_PAGE3_CODE || 3410 pagecode == MODEPAGE_ALLPAGES) { 3411 /* SBC-3 7.1.3.3 Format device page */ 3412 struct mode_format *page3p; 3413 3414 page3p = (void *)next_page; 3415 page3p->mode_page.code = SD_MODE_SENSE_PAGE3_CODE; 3416 page3p->mode_page.length = sizeof (struct mode_format); 3417 page3p->data_bytes_sect = BE_16(AAC_SECTOR_SIZE); 3418 page3p->sect_track = BE_16(AAC_SECTORS_PER_TRACK); 3419 3420 next_page += sizeof (struct mode_format); 3421 } 3422 3423 if (pagecode == SD_MODE_SENSE_PAGE4_CODE || 3424 pagecode == MODEPAGE_ALLPAGES) { 3425 /* SBC-3 7.1.3.8 Rigid disk device geometry page */ 3426 struct mode_geometry *page4p; 3427 3428 page4p = (void *)next_page; 3429 page4p->mode_page.code = SD_MODE_SENSE_PAGE4_CODE; 3430 page4p->mode_page.length = sizeof (struct mode_geometry); 3431 page4p->heads = AAC_NUMBER_OF_HEADS; 3432 page4p->rpm = BE_16(AAC_ROTATION_SPEED); 3433 ncyl = capacity / (AAC_NUMBER_OF_HEADS * AAC_SECTORS_PER_TRACK); 3434 page4p->cyl_lb = ncyl & 0xff; 3435 page4p->cyl_mb = (ncyl >> 8) & 0xff; 3436 page4p->cyl_ub = (ncyl >> 16) & 0xff; 3437 3438 next_page += sizeof (struct mode_geometry); 3439 } 3440 3441 if ((pagecode == MODEPAGE_CTRL_MODE || pagecode == MODEPAGE_ALLPAGES) && 3442 softs->flags & AAC_FLAGS_LBA_64BIT) { 3443 /* 64-bit LBA need large sense data */ 3444 struct mode_control_scsi3 *mctl; 3445 3446 mctl = (void *)next_page; 3447 mctl->mode_page.code = MODEPAGE_CTRL_MODE; 3448 mctl->mode_page.length = 3449 sizeof (struct mode_control_scsi3) - 3450 sizeof (struct mode_page); 3451 mctl->d_sense = 1; 3452 } 3453 3454 finish: 3455 /* copyout the valid data. */ 3456 bcopy(sense_data, bp->b_un.b_addr, min(sdata_size, bp->b_bcount)); 3457 kmem_free(sense_data, sdata_size); 3458 } 3459 3460 /*ARGSUSED*/ 3461 static int 3462 aac_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 3463 scsi_hba_tran_t *tran, struct scsi_device *sd) 3464 { 3465 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran); 3466 #if defined(DEBUG) || defined(__lock_lint) 3467 int ctl = ddi_get_instance(softs->devinfo_p); 3468 #endif 3469 int tgt = sd->sd_address.a_target; 3470 int lun = sd->sd_address.a_lun; 3471 struct aac_container *dvp; 3472 3473 DBCALLED(softs, 2); 3474 3475 if ((0 > tgt) || (tgt >= AAC_MAX_LD)) { 3476 AACDB_PRINT(softs, CE_NOTE, 3477 "aac_tran_tgt_init: c%t%dL%d out", ctl, tgt, lun); 3478 return (DDI_FAILURE); 3479 } 3480 3481 /* 3482 * Only support container that has been detected and valid 3483 */ 3484 mutex_enter(&softs->io_lock); 3485 dvp = &softs->containers[tgt]; 3486 if (dvp->valid && lun == 0) { 3487 AACDB_PRINT_TRAN(softs, "aac_tran_tgt_init: c%t%dL%d ok", 3488 ctl, tgt, lun); 3489 mutex_exit(&softs->io_lock); 3490 return (DDI_SUCCESS); 3491 } else { 3492 AACDB_PRINT_TRAN(softs, "aac_tran_tgt_init: c%t%dL%d", 3493 ctl, tgt, lun); 3494 mutex_exit(&softs->io_lock); 3495 return (DDI_FAILURE); 3496 } 3497 } 3498 3499 /* 3500 * Check if the firmware is Up And Running. If it is in the Kernel Panic 3501 * state, (BlinkLED code + 1) is returned. 3502 * 0 -- firmware up and running 3503 * -1 -- firmware dead 3504 * >0 -- firmware kernel panic 3505 */ 3506 static int 3507 aac_check_adapter_health(struct aac_softstate *softs) 3508 { 3509 int rval; 3510 3511 rval = PCI_MEM_GET32(softs, AAC_OMR0); 3512 3513 if (rval & AAC_KERNEL_UP_AND_RUNNING) { 3514 rval = 0; 3515 } else if (rval & AAC_KERNEL_PANIC) { 3516 cmn_err(CE_WARN, "firmware panic"); 3517 rval = ((rval >> 16) & 0xff) + 1; /* avoid 0 as return value */ 3518 } else { 3519 cmn_err(CE_WARN, "firmware dead"); 3520 rval = -1; 3521 } 3522 return (rval); 3523 } 3524 3525 static void 3526 aac_abort_iocmd(struct aac_softstate *softs, struct aac_cmd *acp, 3527 uchar_t reason) 3528 { 3529 acp->flags |= AAC_CMD_ABORT; 3530 3531 if (acp->pkt) { 3532 /* 3533 * Each lun should generate a unit attention 3534 * condition when reset. 3535 * Phys. drives are treated as logical ones 3536 * during error recovery. 3537 */ 3538 if (softs->flags & AAC_STATE_RESET) 3539 aac_set_arq_data_reset(softs, acp); 3540 3541 switch (reason) { 3542 case CMD_TIMEOUT: 3543 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT, 3544 STAT_TIMEOUT | STAT_BUS_RESET); 3545 break; 3546 case CMD_RESET: 3547 /* aac support only RESET_ALL */ 3548 aac_set_pkt_reason(softs, acp, CMD_RESET, 3549 STAT_BUS_RESET); 3550 break; 3551 case CMD_ABORTED: 3552 aac_set_pkt_reason(softs, acp, CMD_ABORTED, 3553 STAT_ABORTED); 3554 break; 3555 } 3556 } 3557 aac_end_io(softs, acp); 3558 } 3559 3560 /* 3561 * Abort all the pending commands of type iocmd or just the command pkt 3562 * corresponding to pkt 3563 */ 3564 static void 3565 aac_abort_iocmds(struct aac_softstate *softs, int iocmd, struct scsi_pkt *pkt, 3566 int reason) 3567 { 3568 struct aac_cmd *ac_arg, *acp; 3569 int i; 3570 3571 if (pkt == NULL) { 3572 ac_arg = NULL; 3573 } else { 3574 ac_arg = PKT2AC(pkt); 3575 iocmd = (ac_arg->flags & AAC_CMD_SYNC) ? 3576 AAC_IOCMD_SYNC : AAC_IOCMD_ASYNC; 3577 } 3578 3579 /* 3580 * a) outstanding commands on the controller 3581 * Note: should abort outstanding commands only after one 3582 * IOP reset has been done. 3583 */ 3584 if (iocmd & AAC_IOCMD_OUTSTANDING) { 3585 struct aac_cmd *acp; 3586 3587 for (i = 0; i < AAC_MAX_LD; i++) { 3588 if (softs->containers[i].valid) 3589 softs->containers[i].reset = 1; 3590 } 3591 while ((acp = softs->q_busy.q_head) != NULL) 3592 aac_abort_iocmd(softs, acp, reason); 3593 } 3594 3595 /* b) commands in the waiting queues */ 3596 for (i = 0; i < AAC_CMDQ_NUM; i++) { 3597 if (iocmd & (1 << i)) { 3598 if (ac_arg) { 3599 aac_abort_iocmd(softs, ac_arg, reason); 3600 } else { 3601 while ((acp = softs->q_wait[i].q_head) != NULL) 3602 aac_abort_iocmd(softs, acp, reason); 3603 } 3604 } 3605 } 3606 } 3607 3608 /* 3609 * The draining thread is shared among quiesce threads. It terminates 3610 * when the adapter is quiesced or stopped by aac_stop_drain(). 3611 */ 3612 static void 3613 aac_check_drain(void *arg) 3614 { 3615 struct aac_softstate *softs = arg; 3616 3617 mutex_enter(&softs->io_lock); 3618 if (softs->ndrains) { 3619 /* 3620 * If both ASYNC and SYNC bus throttle are held, 3621 * wake up threads only when both are drained out. 3622 */ 3623 if ((softs->bus_throttle[AAC_CMDQ_ASYNC] > 0 || 3624 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) && 3625 (softs->bus_throttle[AAC_CMDQ_SYNC] > 0 || 3626 softs->bus_ncmds[AAC_CMDQ_SYNC] == 0)) 3627 cv_broadcast(&softs->drain_cv); 3628 else 3629 softs->drain_timeid = timeout(aac_check_drain, softs, 3630 AAC_QUIESCE_TICK * drv_usectohz(1000000)); 3631 } 3632 mutex_exit(&softs->io_lock); 3633 } 3634 3635 /* 3636 * If not draining the outstanding cmds, drain them. Otherwise, 3637 * only update ndrains. 3638 */ 3639 static void 3640 aac_start_drain(struct aac_softstate *softs) 3641 { 3642 if (softs->ndrains == 0) { 3643 softs->drain_timeid = timeout(aac_check_drain, softs, 3644 AAC_QUIESCE_TICK * drv_usectohz(1000000)); 3645 } 3646 softs->ndrains++; 3647 } 3648 3649 /* 3650 * Stop the draining thread when no other threads use it any longer. 3651 * Side effect: io_lock may be released in the middle. 3652 */ 3653 static void 3654 aac_stop_drain(struct aac_softstate *softs) 3655 { 3656 softs->ndrains--; 3657 if (softs->ndrains == 0) { 3658 if (softs->drain_timeid != 0) { 3659 timeout_id_t tid = softs->drain_timeid; 3660 3661 softs->drain_timeid = 0; 3662 mutex_exit(&softs->io_lock); 3663 (void) untimeout(tid); 3664 mutex_enter(&softs->io_lock); 3665 } 3666 } 3667 } 3668 3669 /* 3670 * The following function comes from Adaptec: 3671 * 3672 * Once do an IOP reset, basically the driver have to re-initialize the card 3673 * as if up from a cold boot, and the driver is responsible for any IO that 3674 * is outstanding to the adapter at the time of the IOP RESET. And prepare 3675 * for IOP RESET by making the init code modular with the ability to call it 3676 * from multiple places. 3677 */ 3678 static int 3679 aac_reset_adapter(struct aac_softstate *softs) 3680 { 3681 int health; 3682 uint32_t status; 3683 int rval = AACERR; 3684 3685 DBCALLED(softs, 1); 3686 3687 ASSERT(softs->state & AAC_STATE_RESET); 3688 3689 ddi_fm_acc_err_clear(softs->pci_mem_handle, DDI_FME_VER0); 3690 /* Disable interrupt */ 3691 AAC_DISABLE_INTR(softs); 3692 3693 health = aac_check_adapter_health(softs); 3694 if (health == -1) { 3695 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 3696 goto finish; 3697 } 3698 if (health == 0) /* flush drives if possible */ 3699 (void) aac_shutdown(softs); 3700 3701 /* Execute IOP reset */ 3702 if ((aac_sync_mbcommand(softs, AAC_IOP_RESET, 0, 0, 0, 0, 3703 &status)) != AACOK) { 3704 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3705 struct aac_fib *fibp; 3706 struct aac_pause_command *pc; 3707 3708 if ((status & 0xf) == 0xf) { 3709 uint32_t wait_count; 3710 3711 /* 3712 * Sunrise Lake has dual cores and we must drag the 3713 * other core with us to reset simultaneously. There 3714 * are 2 bits in the Inbound Reset Control and Status 3715 * Register (offset 0x38) of the Sunrise Lake to reset 3716 * the chip without clearing out the PCI configuration 3717 * info (COMMAND & BARS). 3718 */ 3719 PCI_MEM_PUT32(softs, AAC_IRCSR, AAC_IRCSR_CORES_RST); 3720 3721 /* 3722 * We need to wait for 5 seconds before accessing the MU 3723 * again 10000 * 100us = 1000,000us = 1000ms = 1s 3724 */ 3725 wait_count = 5 * 10000; 3726 while (wait_count) { 3727 drv_usecwait(100); /* delay 100 microseconds */ 3728 wait_count--; 3729 } 3730 } else { 3731 if (status == SRB_STATUS_INVALID_REQUEST) 3732 cmn_err(CE_WARN, "!IOP_RESET not supported"); 3733 else /* probably timeout */ 3734 cmn_err(CE_WARN, "!IOP_RESET failed"); 3735 3736 /* Unwind aac_shutdown() */ 3737 fibp = softs->sync_slot.fibp; 3738 pc = (struct aac_pause_command *)&fibp->data[0]; 3739 3740 bzero(pc, sizeof (*pc)); 3741 ddi_put32(acc, &pc->Command, VM_ContainerConfig); 3742 ddi_put32(acc, &pc->Type, CT_PAUSE_IO); 3743 ddi_put32(acc, &pc->Timeout, 1); 3744 ddi_put32(acc, &pc->Min, 1); 3745 ddi_put32(acc, &pc->NoRescan, 1); 3746 3747 (void) aac_sync_fib(softs, ContainerCommand, 3748 AAC_FIB_SIZEOF(struct aac_pause_command)); 3749 3750 ddi_fm_service_impact(softs->devinfo_p, 3751 DDI_SERVICE_LOST); 3752 goto finish; 3753 } 3754 } 3755 3756 /* 3757 * Re-read and renegotiate the FIB parameters, as one of the actions 3758 * that can result from an IOP reset is the running of a new firmware 3759 * image. 3760 */ 3761 if (aac_common_attach(softs) != AACOK) 3762 goto finish; 3763 3764 rval = AACOK; 3765 3766 finish: 3767 AAC_ENABLE_INTR(softs); 3768 return (rval); 3769 } 3770 3771 static void 3772 aac_set_throttle(struct aac_softstate *softs, struct aac_container *dvp, int q, 3773 int throttle) 3774 { 3775 /* 3776 * If the bus is draining/quiesced, no changes to the throttles 3777 * are allowed. All throttles should have been set to 0. 3778 */ 3779 if ((softs->state & AAC_STATE_QUIESCED) || softs->ndrains) 3780 return; 3781 dvp->throttle[q] = throttle; 3782 } 3783 3784 static void 3785 aac_hold_bus(struct aac_softstate *softs, int iocmds) 3786 { 3787 int i, q; 3788 3789 /* Hold bus by holding every device on the bus */ 3790 for (q = 0; q < AAC_CMDQ_NUM; q++) { 3791 if (iocmds & (1 << q)) { 3792 softs->bus_throttle[q] = 0; 3793 for (i = 0; i < AAC_MAX_LD; i++) 3794 aac_set_throttle(softs, &softs->containers[i], 3795 q, 0); 3796 } 3797 } 3798 } 3799 3800 static void 3801 aac_unhold_bus(struct aac_softstate *softs, int iocmds) 3802 { 3803 int i, q; 3804 3805 for (q = 0; q < AAC_CMDQ_NUM; q++) { 3806 if (iocmds & (1 << q)) { 3807 /* 3808 * Should not unhold AAC_IOCMD_ASYNC bus, if it has been 3809 * quiesced or being drained by possibly some quiesce 3810 * threads. 3811 */ 3812 if (q == AAC_CMDQ_ASYNC && ((softs->state & 3813 AAC_STATE_QUIESCED) || softs->ndrains)) 3814 continue; 3815 softs->bus_throttle[q] = softs->total_slots; 3816 for (i = 0; i < AAC_MAX_LD; i++) 3817 aac_set_throttle(softs, &softs->containers[i], 3818 q, softs->total_slots); 3819 } 3820 } 3821 } 3822 3823 static int 3824 aac_do_reset(struct aac_softstate *softs) 3825 { 3826 int health; 3827 int rval; 3828 3829 softs->state |= AAC_STATE_RESET; 3830 health = aac_check_adapter_health(softs); 3831 3832 /* 3833 * Hold off new io commands and wait all outstanding io 3834 * commands to complete. 3835 */ 3836 if (health == 0 && (softs->bus_ncmds[AAC_CMDQ_SYNC] || 3837 softs->bus_ncmds[AAC_CMDQ_ASYNC])) { 3838 /* 3839 * Give the adapter up to AAC_QUIESCE_TIMEOUT more seconds 3840 * to complete the outstanding io commands 3841 */ 3842 int timeout = AAC_QUIESCE_TIMEOUT * 1000 * 10; 3843 int (*intr_handler)(struct aac_softstate *); 3844 3845 aac_hold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 3846 /* 3847 * Poll the adapter by ourselves in case interrupt is disabled 3848 * and to avoid releasing the io_lock. 3849 */ 3850 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ? 3851 aac_process_intr_new : aac_process_intr_old; 3852 while ((softs->bus_ncmds[AAC_CMDQ_SYNC] || 3853 softs->bus_ncmds[AAC_CMDQ_ASYNC]) && timeout) { 3854 drv_usecwait(100); 3855 (void) intr_handler(softs); 3856 timeout--; 3857 } 3858 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 3859 } 3860 3861 /* 3862 * If a longer waiting time still can't drain all outstanding io 3863 * commands, do IOP reset. 3864 */ 3865 if (softs->bus_ncmds[AAC_CMDQ_SYNC] || 3866 softs->bus_ncmds[AAC_CMDQ_ASYNC]) { 3867 if ((rval = aac_reset_adapter(softs)) != AACOK) 3868 softs->state |= AAC_STATE_DEAD; 3869 } else { 3870 rval = AACOK; 3871 } 3872 3873 softs->state &= ~AAC_STATE_RESET; 3874 return (rval); 3875 } 3876 3877 static int 3878 aac_tran_reset(struct scsi_address *ap, int level) 3879 { 3880 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 3881 int rval; 3882 3883 DBCALLED(softs, 1); 3884 3885 if (level != RESET_ALL) { 3886 cmn_err(CE_NOTE, "!reset target/lun not supported"); 3887 return (0); 3888 } 3889 3890 mutex_enter(&softs->io_lock); 3891 rval = (aac_do_reset(softs) == AACOK) ? 1 : 0; 3892 if (rval == 1 && !ddi_in_panic()) { 3893 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING | AAC_IOCMD_ASYNC, 3894 NULL, CMD_RESET); 3895 aac_start_waiting_io(softs); 3896 } else { 3897 /* Abort IOCTL cmds when system panic or adapter dead */ 3898 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_RESET); 3899 } 3900 mutex_exit(&softs->io_lock); 3901 3902 aac_drain_comp_q(softs); 3903 return (rval); 3904 } 3905 3906 static int 3907 aac_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 3908 { 3909 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 3910 3911 DBCALLED(softs, 1); 3912 3913 mutex_enter(&softs->io_lock); 3914 aac_abort_iocmds(softs, 0, pkt, CMD_ABORTED); 3915 mutex_exit(&softs->io_lock); 3916 3917 aac_drain_comp_q(softs); 3918 return (1); 3919 } 3920 3921 void 3922 aac_free_dmamap(struct aac_cmd *acp) 3923 { 3924 /* Free dma mapping */ 3925 if (acp->flags & AAC_CMD_DMA_VALID) { 3926 ASSERT(acp->buf_dma_handle); 3927 (void) ddi_dma_unbind_handle(acp->buf_dma_handle); 3928 acp->flags &= ~AAC_CMD_DMA_VALID; 3929 } 3930 3931 if (acp->abp != NULL) { /* free non-aligned buf DMA */ 3932 ASSERT(acp->buf_dma_handle); 3933 if ((acp->flags & AAC_CMD_BUF_WRITE) == 0 && acp->bp) 3934 ddi_rep_get8(acp->abh, (uint8_t *)acp->bp->b_un.b_addr, 3935 (uint8_t *)acp->abp, acp->bp->b_bcount, 3936 DDI_DEV_AUTOINCR); 3937 ddi_dma_mem_free(&acp->abh); 3938 acp->abp = NULL; 3939 } 3940 3941 if (acp->buf_dma_handle) { 3942 ddi_dma_free_handle(&acp->buf_dma_handle); 3943 acp->buf_dma_handle = NULL; 3944 } 3945 } 3946 3947 static void 3948 aac_unknown_scmd(struct aac_softstate *softs, struct aac_cmd *acp) 3949 { 3950 AACDB_PRINT(softs, CE_CONT, "SCMD 0x%x not supported", 3951 ((union scsi_cdb *)(void *)acp->pkt->pkt_cdbp)->scc_cmd); 3952 aac_free_dmamap(acp); 3953 aac_set_arq_data(acp->pkt, KEY_ILLEGAL_REQUEST, 0x20, 0x00, 0); 3954 aac_soft_callback(softs, acp); 3955 } 3956 3957 /* 3958 * Handle command to logical device 3959 */ 3960 static int 3961 aac_tran_start_ld(struct aac_softstate *softs, struct aac_cmd *acp) 3962 { 3963 struct aac_container *dvp; 3964 struct scsi_pkt *pkt; 3965 union scsi_cdb *cdbp; 3966 struct buf *bp; 3967 int rval; 3968 3969 dvp = acp->dvp; 3970 pkt = acp->pkt; 3971 cdbp = (void *)pkt->pkt_cdbp; 3972 bp = acp->bp; 3973 3974 switch (cdbp->scc_cmd) { 3975 case SCMD_INQUIRY: /* inquiry */ 3976 aac_free_dmamap(acp); 3977 aac_inquiry(softs, pkt, cdbp, bp); 3978 aac_soft_callback(softs, acp); 3979 rval = TRAN_ACCEPT; 3980 break; 3981 3982 case SCMD_READ_CAPACITY: /* read capacity */ 3983 if (bp && bp->b_un.b_addr && bp->b_bcount) { 3984 struct scsi_capacity cap; 3985 uint64_t last_lba; 3986 3987 /* check 64-bit LBA */ 3988 last_lba = dvp->size - 1; 3989 if (last_lba > 0xffffffffull) { 3990 cap.capacity = 0xfffffffful; 3991 } else { 3992 cap.capacity = BE_32(last_lba); 3993 } 3994 cap.lbasize = BE_32(AAC_SECTOR_SIZE); 3995 3996 aac_free_dmamap(acp); 3997 if (bp->b_flags & (B_PHYS|B_PAGEIO)) 3998 bp_mapin(bp); 3999 bcopy(&cap, bp->b_un.b_addr, min(bp->b_bcount, 8)); 4000 pkt->pkt_state |= STATE_XFERRED_DATA; 4001 } 4002 aac_soft_callback(softs, acp); 4003 rval = TRAN_ACCEPT; 4004 break; 4005 4006 case SCMD_SVC_ACTION_IN_G4: /* read capacity 16 */ 4007 /* Check if containers need 64-bit LBA support */ 4008 if (cdbp->cdb_opaque[1] == SSVC_ACTION_READ_CAPACITY_G4) { 4009 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4010 struct scsi_capacity_16 cap16; 4011 int cap_len = sizeof (struct scsi_capacity_16); 4012 4013 bzero(&cap16, cap_len); 4014 cap16.sc_capacity = BE_64(dvp->size - 1); 4015 cap16.sc_lbasize = BE_32(AAC_SECTOR_SIZE); 4016 4017 aac_free_dmamap(acp); 4018 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 4019 bp_mapin(bp); 4020 bcopy(&cap16, bp->b_un.b_addr, 4021 min(bp->b_bcount, cap_len)); 4022 pkt->pkt_state |= STATE_XFERRED_DATA; 4023 } 4024 aac_soft_callback(softs, acp); 4025 } else { 4026 aac_unknown_scmd(softs, acp); 4027 } 4028 rval = TRAN_ACCEPT; 4029 break; 4030 4031 case SCMD_READ_G4: /* read_16 */ 4032 case SCMD_WRITE_G4: /* write_16 */ 4033 if (softs->flags & AAC_FLAGS_RAW_IO) { 4034 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */ 4035 acp->blkno = ((uint64_t) \ 4036 GETG4ADDR(cdbp) << 32) | \ 4037 (uint32_t)GETG4ADDRTL(cdbp); 4038 goto do_io; 4039 } 4040 AACDB_PRINT(softs, CE_WARN, "64-bit LBA not supported"); 4041 aac_unknown_scmd(softs, acp); 4042 rval = TRAN_ACCEPT; 4043 break; 4044 4045 case SCMD_READ: /* read_6 */ 4046 case SCMD_WRITE: /* write_6 */ 4047 acp->blkno = GETG0ADDR(cdbp); 4048 goto do_io; 4049 4050 case SCMD_READ_G1: /* read_10 */ 4051 case SCMD_WRITE_G1: /* write_10 */ 4052 acp->blkno = (uint32_t)GETG1ADDR(cdbp); 4053 do_io: 4054 if (acp->flags & AAC_CMD_DMA_VALID) { 4055 uint64_t cnt_size = dvp->size; 4056 4057 /* 4058 * If LBA > array size AND rawio, the 4059 * adapter may hang. So check it before 4060 * sending. 4061 * NOTE: (blkno + blkcnt) may overflow 4062 */ 4063 if ((acp->blkno < cnt_size) && 4064 ((acp->blkno + acp->bcount / 4065 AAC_BLK_SIZE) <= cnt_size)) { 4066 rval = aac_do_io(softs, acp); 4067 } else { 4068 /* 4069 * Request exceeds the capacity of disk, 4070 * set error block number to last LBA 4071 * + 1. 4072 */ 4073 aac_set_arq_data(pkt, 4074 KEY_ILLEGAL_REQUEST, 0x21, 4075 0x00, cnt_size); 4076 aac_soft_callback(softs, acp); 4077 rval = TRAN_ACCEPT; 4078 } 4079 } else if (acp->bcount == 0) { 4080 /* For 0 length IO, just return ok */ 4081 aac_soft_callback(softs, acp); 4082 rval = TRAN_ACCEPT; 4083 } else { 4084 rval = TRAN_BADPKT; 4085 } 4086 break; 4087 4088 case SCMD_MODE_SENSE: /* mode_sense_6 */ 4089 case SCMD_MODE_SENSE_G1: { /* mode_sense_10 */ 4090 int capacity; 4091 4092 aac_free_dmamap(acp); 4093 if (dvp->size > 0xffffffffull) 4094 capacity = 0xfffffffful; /* 64-bit LBA */ 4095 else 4096 capacity = dvp->size; 4097 aac_mode_sense(softs, pkt, cdbp, bp, capacity); 4098 aac_soft_callback(softs, acp); 4099 rval = TRAN_ACCEPT; 4100 break; 4101 } 4102 4103 case SCMD_TEST_UNIT_READY: 4104 case SCMD_REQUEST_SENSE: 4105 case SCMD_FORMAT: 4106 case SCMD_START_STOP: 4107 aac_free_dmamap(acp); 4108 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4109 if (acp->flags & AAC_CMD_BUF_READ) { 4110 if (bp->b_flags & (B_PHYS|B_PAGEIO)) 4111 bp_mapin(bp); 4112 bzero(bp->b_un.b_addr, bp->b_bcount); 4113 } 4114 pkt->pkt_state |= STATE_XFERRED_DATA; 4115 } 4116 aac_soft_callback(softs, acp); 4117 rval = TRAN_ACCEPT; 4118 break; 4119 4120 case SCMD_SYNCHRONIZE_CACHE: 4121 acp->flags |= AAC_CMD_NTAG; 4122 acp->aac_cmd_fib = aac_cmd_fib_sync; 4123 acp->ac_comp = aac_synccache_complete; 4124 rval = aac_do_io(softs, acp); 4125 break; 4126 4127 case SCMD_DOORLOCK: 4128 aac_free_dmamap(acp); 4129 dvp->locked = (pkt->pkt_cdbp[4] & 0x01) ? 1 : 0; 4130 aac_soft_callback(softs, acp); 4131 rval = TRAN_ACCEPT; 4132 break; 4133 4134 default: /* unknown command */ 4135 aac_unknown_scmd(softs, acp); 4136 rval = TRAN_ACCEPT; 4137 break; 4138 } 4139 4140 return (rval); 4141 } 4142 4143 static int 4144 aac_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt) 4145 { 4146 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4147 struct aac_cmd *acp = PKT2AC(pkt); 4148 struct aac_container *dvp = acp->dvp; 4149 int rval; 4150 4151 DBCALLED(softs, 2); 4152 4153 /* 4154 * Reinitialize some fields of ac and pkt; the packet may 4155 * have been resubmitted 4156 */ 4157 acp->flags &= AAC_CMD_CONSISTENT | AAC_CMD_DMA_PARTIAL | \ 4158 AAC_CMD_BUF_READ | AAC_CMD_BUF_WRITE | AAC_CMD_DMA_VALID; 4159 acp->timeout = acp->pkt->pkt_time; 4160 if (pkt->pkt_flags & FLAG_NOINTR) 4161 acp->flags |= AAC_CMD_NO_INTR; 4162 pkt->pkt_reason = CMD_CMPLT; 4163 pkt->pkt_state = 0; 4164 pkt->pkt_statistics = 0; 4165 *pkt->pkt_scbp = 0; /* clear arq scsi_status */ 4166 4167 if (acp->flags & AAC_CMD_DMA_VALID) { 4168 pkt->pkt_resid = acp->bcount; 4169 /* Consistent packets need to be sync'ed first */ 4170 if ((acp->flags & AAC_CMD_CONSISTENT) && 4171 (acp->flags & AAC_CMD_BUF_WRITE)) 4172 if (aac_dma_sync_ac(acp) != AACOK) { 4173 ddi_fm_service_impact(softs->devinfo_p, 4174 DDI_SERVICE_UNAFFECTED); 4175 return (TRAN_BADPKT); 4176 } 4177 } else { 4178 pkt->pkt_resid = 0; 4179 } 4180 4181 mutex_enter(&softs->io_lock); 4182 AACDB_PRINT_SCMD(softs, acp); 4183 if (dvp->valid && ap->a_lun == 0 && !(softs->state & AAC_STATE_DEAD)) { 4184 rval = aac_tran_start_ld(softs, acp); 4185 } else { 4186 AACDB_PRINT(softs, CE_WARN, 4187 "Cannot send cmd to target t%dL%d: %s", 4188 ap->a_target, ap->a_lun, 4189 (softs->state & AAC_STATE_DEAD) ? 4190 "adapter dead" : "target invalid"); 4191 rval = TRAN_FATAL_ERROR; 4192 } 4193 mutex_exit(&softs->io_lock); 4194 return (rval); 4195 } 4196 4197 static int 4198 aac_tran_getcap(struct scsi_address *ap, char *cap, int whom) 4199 { 4200 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4201 struct aac_container *dvp; 4202 int rval; 4203 4204 DBCALLED(softs, 2); 4205 4206 /* We don't allow inquiring about capabilities for other targets */ 4207 if (cap == NULL || whom == 0) { 4208 AACDB_PRINT(softs, CE_WARN, 4209 "GetCap> %s not supported: whom=%d", cap, whom); 4210 return (-1); 4211 } 4212 4213 mutex_enter(&softs->io_lock); 4214 dvp = &softs->containers[ap->a_target]; 4215 if (!dvp->valid || (ap->a_lun != 0)) { 4216 mutex_exit(&softs->io_lock); 4217 AACDB_PRINT(softs, CE_WARN, "Bad target t%dL%d to getcap", 4218 ap->a_target, ap->a_lun); 4219 return (-1); 4220 } 4221 4222 switch (scsi_hba_lookup_capstr(cap)) { 4223 case SCSI_CAP_ARQ: /* auto request sense */ 4224 rval = 1; 4225 break; 4226 case SCSI_CAP_UNTAGGED_QING: 4227 case SCSI_CAP_TAGGED_QING: 4228 rval = 1; 4229 break; 4230 case SCSI_CAP_DMA_MAX: 4231 rval = softs->buf_dma_attr.dma_attr_maxxfer; 4232 break; 4233 default: 4234 rval = -1; 4235 break; 4236 } 4237 mutex_exit(&softs->io_lock); 4238 4239 AACDB_PRINT_TRAN(softs, "GetCap> %s t%dL%d: rval=%d", 4240 cap, ap->a_target, ap->a_lun, rval); 4241 return (rval); 4242 } 4243 4244 /*ARGSUSED*/ 4245 static int 4246 aac_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom) 4247 { 4248 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4249 struct aac_container *dvp; 4250 int rval; 4251 4252 DBCALLED(softs, 2); 4253 4254 /* We don't allow inquiring about capabilities for other targets */ 4255 if (cap == NULL || whom == 0) { 4256 AACDB_PRINT(softs, CE_WARN, 4257 "SetCap> %s not supported: whom=%d", cap, whom); 4258 return (-1); 4259 } 4260 4261 mutex_enter(&softs->io_lock); 4262 dvp = &softs->containers[ap->a_target]; 4263 if (!dvp->valid || (ap->a_lun != 0)) { 4264 mutex_exit(&softs->io_lock); 4265 AACDB_PRINT(softs, CE_WARN, "Bad target t%dL%d to setcap", 4266 ap->a_target, ap->a_lun); 4267 return (-1); 4268 } 4269 4270 switch (scsi_hba_lookup_capstr(cap)) { 4271 case SCSI_CAP_ARQ: 4272 /* Force auto request sense */ 4273 rval = (value == 1) ? 1 : 0; 4274 break; 4275 case SCSI_CAP_UNTAGGED_QING: 4276 case SCSI_CAP_TAGGED_QING: 4277 rval = (value == 1) ? 1 : 0; 4278 break; 4279 default: 4280 rval = -1; 4281 break; 4282 } 4283 mutex_exit(&softs->io_lock); 4284 4285 AACDB_PRINT_TRAN(softs, "SetCap> %s t%dL%d val=%d: rval=%d", 4286 cap, ap->a_target, ap->a_lun, value, rval); 4287 return (rval); 4288 } 4289 4290 static void 4291 aac_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 4292 { 4293 struct aac_cmd *acp = PKT2AC(pkt); 4294 4295 DBCALLED(NULL, 2); 4296 4297 if (acp->sgt) { 4298 kmem_free(acp->sgt, sizeof (struct aac_sge) * \ 4299 acp->left_cookien); 4300 } 4301 aac_free_dmamap(acp); 4302 ASSERT(acp->slotp == NULL); 4303 scsi_hba_pkt_free(ap, pkt); 4304 } 4305 4306 int 4307 aac_cmd_dma_alloc(struct aac_softstate *softs, struct aac_cmd *acp, 4308 struct buf *bp, int flags, int (*cb)(), caddr_t arg) 4309 { 4310 int kf = (cb == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP; 4311 uint_t oldcookiec; 4312 int bioerr; 4313 int rval; 4314 4315 oldcookiec = acp->left_cookien; 4316 4317 /* Move window to build s/g map */ 4318 if (acp->total_nwin > 0) { 4319 if (++acp->cur_win < acp->total_nwin) { 4320 off_t off; 4321 size_t len; 4322 4323 rval = ddi_dma_getwin(acp->buf_dma_handle, acp->cur_win, 4324 &off, &len, &acp->cookie, &acp->left_cookien); 4325 if (rval == DDI_SUCCESS) 4326 goto get_dma_cookies; 4327 AACDB_PRINT(softs, CE_WARN, 4328 "ddi_dma_getwin() fail %d", rval); 4329 return (NULL); 4330 } 4331 AACDB_PRINT(softs, CE_WARN, "Nothing to transfer"); 4332 return (NULL); 4333 } 4334 4335 /* We need to transfer data, so we alloc DMA resources for this pkt */ 4336 if (bp && bp->b_bcount != 0 && !(acp->flags & AAC_CMD_DMA_VALID)) { 4337 uint_t dma_flags = 0; 4338 struct aac_sge *sge; 4339 4340 /* 4341 * We will still use this point to fake some 4342 * infomation in tran_start 4343 */ 4344 acp->bp = bp; 4345 4346 /* Set dma flags */ 4347 if (BUF_IS_READ(bp)) { 4348 dma_flags |= DDI_DMA_READ; 4349 acp->flags |= AAC_CMD_BUF_READ; 4350 } else { 4351 dma_flags |= DDI_DMA_WRITE; 4352 acp->flags |= AAC_CMD_BUF_WRITE; 4353 } 4354 if (flags & PKT_CONSISTENT) 4355 dma_flags |= DDI_DMA_CONSISTENT; 4356 if (flags & PKT_DMA_PARTIAL) 4357 dma_flags |= DDI_DMA_PARTIAL; 4358 4359 /* Alloc buf dma handle */ 4360 if (!acp->buf_dma_handle) { 4361 rval = ddi_dma_alloc_handle(softs->devinfo_p, 4362 &softs->buf_dma_attr, cb, arg, 4363 &acp->buf_dma_handle); 4364 if (rval != DDI_SUCCESS) { 4365 AACDB_PRINT(softs, CE_WARN, 4366 "Can't allocate DMA handle, errno=%d", 4367 rval); 4368 goto error_out; 4369 } 4370 } 4371 4372 /* Bind buf */ 4373 if (((uintptr_t)bp->b_un.b_addr & AAC_DMA_ALIGN_MASK) == 0) { 4374 rval = ddi_dma_buf_bind_handle(acp->buf_dma_handle, 4375 bp, dma_flags, cb, arg, &acp->cookie, 4376 &acp->left_cookien); 4377 } else { 4378 size_t bufsz; 4379 4380 AACDB_PRINT_TRAN(softs, 4381 "non-aligned buffer: addr=0x%p, cnt=%lu", 4382 (void *)bp->b_un.b_addr, bp->b_bcount); 4383 if (bp->b_flags & (B_PAGEIO|B_PHYS)) 4384 bp_mapin(bp); 4385 4386 rval = ddi_dma_mem_alloc(acp->buf_dma_handle, 4387 AAC_ROUNDUP(bp->b_bcount, AAC_DMA_ALIGN), 4388 &aac_acc_attr, DDI_DMA_STREAMING, 4389 cb, arg, &acp->abp, &bufsz, &acp->abh); 4390 4391 if (rval != DDI_SUCCESS) { 4392 AACDB_PRINT(softs, CE_NOTE, 4393 "Cannot alloc DMA to non-aligned buf"); 4394 bioerr = 0; 4395 goto error_out; 4396 } 4397 4398 if (acp->flags & AAC_CMD_BUF_WRITE) 4399 ddi_rep_put8(acp->abh, 4400 (uint8_t *)bp->b_un.b_addr, 4401 (uint8_t *)acp->abp, bp->b_bcount, 4402 DDI_DEV_AUTOINCR); 4403 4404 rval = ddi_dma_addr_bind_handle(acp->buf_dma_handle, 4405 NULL, acp->abp, bufsz, dma_flags, cb, arg, 4406 &acp->cookie, &acp->left_cookien); 4407 } 4408 4409 switch (rval) { 4410 case DDI_DMA_PARTIAL_MAP: 4411 if (ddi_dma_numwin(acp->buf_dma_handle, 4412 &acp->total_nwin) == DDI_FAILURE) { 4413 AACDB_PRINT(softs, CE_WARN, 4414 "Cannot get number of DMA windows"); 4415 bioerr = 0; 4416 goto error_out; 4417 } 4418 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)", 4419 acp->left_cookien); 4420 acp->cur_win = 0; 4421 break; 4422 4423 case DDI_DMA_MAPPED: 4424 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)", 4425 acp->left_cookien); 4426 acp->cur_win = 0; 4427 acp->total_nwin = 1; 4428 break; 4429 4430 case DDI_DMA_NORESOURCES: 4431 bioerr = 0; 4432 AACDB_PRINT(softs, CE_WARN, 4433 "Cannot bind buf for DMA: DDI_DMA_NORESOURCES"); 4434 goto error_out; 4435 case DDI_DMA_BADATTR: 4436 case DDI_DMA_NOMAPPING: 4437 bioerr = EFAULT; 4438 AACDB_PRINT(softs, CE_WARN, 4439 "Cannot bind buf for DMA: DDI_DMA_NOMAPPING"); 4440 goto error_out; 4441 case DDI_DMA_TOOBIG: 4442 bioerr = EINVAL; 4443 AACDB_PRINT(softs, CE_WARN, 4444 "Cannot bind buf for DMA: DDI_DMA_TOOBIG(%d)", 4445 bp->b_bcount); 4446 goto error_out; 4447 default: 4448 bioerr = EINVAL; 4449 AACDB_PRINT(softs, CE_WARN, 4450 "Cannot bind buf for DMA: %d", rval); 4451 goto error_out; 4452 } 4453 acp->flags |= AAC_CMD_DMA_VALID; 4454 4455 get_dma_cookies: 4456 ASSERT(acp->left_cookien > 0); 4457 if (acp->left_cookien > softs->aac_sg_tablesize) { 4458 AACDB_PRINT(softs, CE_NOTE, "large cookiec received %d", 4459 acp->left_cookien); 4460 bioerr = EINVAL; 4461 goto error_out; 4462 } 4463 if (oldcookiec != acp->left_cookien && acp->sgt != NULL) { 4464 kmem_free(acp->sgt, sizeof (struct aac_sge) * \ 4465 oldcookiec); 4466 acp->sgt = NULL; 4467 } 4468 if (acp->sgt == NULL) { 4469 acp->sgt = kmem_alloc(sizeof (struct aac_sge) * \ 4470 acp->left_cookien, kf); 4471 if (acp->sgt == NULL) { 4472 AACDB_PRINT(softs, CE_WARN, 4473 "sgt kmem_alloc fail"); 4474 bioerr = ENOMEM; 4475 goto error_out; 4476 } 4477 } 4478 4479 sge = &acp->sgt[0]; 4480 sge->bcount = acp->cookie.dmac_size; 4481 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress); 4482 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress); 4483 acp->bcount = acp->cookie.dmac_size; 4484 for (sge++; sge < &acp->sgt[acp->left_cookien]; sge++) { 4485 ddi_dma_nextcookie(acp->buf_dma_handle, &acp->cookie); 4486 sge->bcount = acp->cookie.dmac_size; 4487 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress); 4488 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress); 4489 acp->bcount += acp->cookie.dmac_size; 4490 } 4491 4492 /* 4493 * Note: The old DMA engine do not correctly handle 4494 * dma_attr_maxxfer attribute. So we have to ensure 4495 * it by ourself. 4496 */ 4497 if (acp->bcount > softs->buf_dma_attr.dma_attr_maxxfer) { 4498 AACDB_PRINT(softs, CE_NOTE, 4499 "large xfer size received %d\n", acp->bcount); 4500 bioerr = EINVAL; 4501 goto error_out; 4502 } 4503 4504 acp->total_xfer += acp->bcount; 4505 4506 if (acp->pkt) { 4507 /* Return remaining byte count */ 4508 acp->pkt->pkt_resid = bp->b_bcount - acp->total_xfer; 4509 4510 AACDB_PRINT_TRAN(softs, 4511 "bp=0x%p, xfered=%d/%d, resid=%d", 4512 (void *)bp->b_un.b_addr, (int)acp->total_xfer, 4513 (int)bp->b_bcount, (int)acp->pkt->pkt_resid); 4514 4515 ASSERT(acp->pkt->pkt_resid >= 0); 4516 } 4517 } 4518 return (AACOK); 4519 4520 error_out: 4521 bioerror(bp, bioerr); 4522 return (AACERR); 4523 } 4524 4525 static struct scsi_pkt * 4526 aac_tran_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 4527 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags, 4528 int (*callback)(), caddr_t arg) 4529 { 4530 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4531 struct aac_cmd *acp, *new_acp; 4532 4533 DBCALLED(softs, 2); 4534 4535 /* Allocate pkt */ 4536 if (pkt == NULL) { 4537 int slen; 4538 4539 /* Force auto request sense */ 4540 slen = (statuslen > softs->slen) ? statuslen : softs->slen; 4541 pkt = scsi_hba_pkt_alloc(softs->devinfo_p, ap, cmdlen, 4542 slen, tgtlen, sizeof (struct aac_cmd), callback, arg); 4543 if (pkt == NULL) { 4544 AACDB_PRINT(softs, CE_WARN, "Alloc scsi pkt failed"); 4545 return (NULL); 4546 } 4547 acp = new_acp = PKT2AC(pkt); 4548 acp->pkt = pkt; 4549 acp->cmdlen = cmdlen; 4550 4551 acp->dvp = &softs->containers[ap->a_target]; 4552 acp->aac_cmd_fib = softs->aac_cmd_fib; 4553 acp->ac_comp = aac_ld_complete; 4554 } else { 4555 acp = PKT2AC(pkt); 4556 new_acp = NULL; 4557 } 4558 4559 if (aac_cmd_dma_alloc(softs, acp, bp, flags, callback, arg) == AACOK) 4560 return (pkt); 4561 4562 if (new_acp) 4563 aac_tran_destroy_pkt(ap, pkt); 4564 return (NULL); 4565 } 4566 4567 /* 4568 * tran_sync_pkt(9E) - explicit DMA synchronization 4569 */ 4570 /*ARGSUSED*/ 4571 static void 4572 aac_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 4573 { 4574 struct aac_cmd *acp = PKT2AC(pkt); 4575 4576 DBCALLED(NULL, 2); 4577 4578 if (aac_dma_sync_ac(acp) != AACOK) 4579 ddi_fm_service_impact( 4580 (AAC_TRAN2SOFTS(ap->a_hba_tran))->devinfo_p, 4581 DDI_SERVICE_UNAFFECTED); 4582 } 4583 4584 /* 4585 * tran_dmafree(9E) - deallocate DMA resources allocated for command 4586 */ 4587 /*ARGSUSED*/ 4588 static void 4589 aac_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 4590 { 4591 struct aac_cmd *acp = PKT2AC(pkt); 4592 4593 DBCALLED(NULL, 2); 4594 4595 aac_free_dmamap(acp); 4596 } 4597 4598 static int 4599 aac_do_quiesce(struct aac_softstate *softs) 4600 { 4601 aac_hold_bus(softs, AAC_IOCMD_ASYNC); 4602 if (softs->bus_ncmds[AAC_CMDQ_ASYNC]) { 4603 aac_start_drain(softs); 4604 do { 4605 if (cv_wait_sig(&softs->drain_cv, 4606 &softs->io_lock) == 0) { 4607 /* Quiesce has been interrupted */ 4608 aac_stop_drain(softs); 4609 aac_unhold_bus(softs, AAC_IOCMD_ASYNC); 4610 aac_start_waiting_io(softs); 4611 return (AACERR); 4612 } 4613 } while (softs->bus_ncmds[AAC_CMDQ_ASYNC]); 4614 aac_stop_drain(softs); 4615 } 4616 4617 softs->state |= AAC_STATE_QUIESCED; 4618 return (AACOK); 4619 } 4620 4621 static int 4622 aac_tran_quiesce(dev_info_t *dip) 4623 { 4624 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 4625 int rval; 4626 4627 DBCALLED(softs, 1); 4628 4629 mutex_enter(&softs->io_lock); 4630 if (aac_do_quiesce(softs) == AACOK) 4631 rval = 0; 4632 else 4633 rval = 1; 4634 mutex_exit(&softs->io_lock); 4635 return (rval); 4636 } 4637 4638 static int 4639 aac_do_unquiesce(struct aac_softstate *softs) 4640 { 4641 softs->state &= ~AAC_STATE_QUIESCED; 4642 aac_unhold_bus(softs, AAC_IOCMD_ASYNC); 4643 4644 aac_start_waiting_io(softs); 4645 return (AACOK); 4646 } 4647 4648 static int 4649 aac_tran_unquiesce(dev_info_t *dip) 4650 { 4651 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 4652 int rval; 4653 4654 DBCALLED(softs, 1); 4655 4656 mutex_enter(&softs->io_lock); 4657 if (aac_do_unquiesce(softs) == AACOK) 4658 rval = 0; 4659 else 4660 rval = 1; 4661 mutex_exit(&softs->io_lock); 4662 return (rval); 4663 } 4664 4665 static int 4666 aac_hba_setup(struct aac_softstate *softs) 4667 { 4668 scsi_hba_tran_t *hba_tran; 4669 int rval; 4670 4671 hba_tran = scsi_hba_tran_alloc(softs->devinfo_p, SCSI_HBA_CANSLEEP); 4672 if (hba_tran == NULL) 4673 return (AACERR); 4674 hba_tran->tran_hba_private = softs; 4675 hba_tran->tran_tgt_init = aac_tran_tgt_init; 4676 hba_tran->tran_tgt_probe = scsi_hba_probe; 4677 hba_tran->tran_start = aac_tran_start; 4678 hba_tran->tran_getcap = aac_tran_getcap; 4679 hba_tran->tran_setcap = aac_tran_setcap; 4680 hba_tran->tran_init_pkt = aac_tran_init_pkt; 4681 hba_tran->tran_destroy_pkt = aac_tran_destroy_pkt; 4682 hba_tran->tran_reset = aac_tran_reset; 4683 hba_tran->tran_abort = aac_tran_abort; 4684 hba_tran->tran_sync_pkt = aac_tran_sync_pkt; 4685 hba_tran->tran_dmafree = aac_tran_dmafree; 4686 hba_tran->tran_quiesce = aac_tran_quiesce; 4687 hba_tran->tran_unquiesce = aac_tran_unquiesce; 4688 rval = scsi_hba_attach_setup(softs->devinfo_p, &softs->buf_dma_attr, 4689 hba_tran, 0); 4690 if (rval != DDI_SUCCESS) { 4691 scsi_hba_tran_free(hba_tran); 4692 AACDB_PRINT(softs, CE_WARN, "aac_hba_setup failed"); 4693 return (AACERR); 4694 } 4695 4696 return (AACOK); 4697 } 4698 4699 /* 4700 * FIB setup operations 4701 */ 4702 4703 /* 4704 * Init FIB header 4705 */ 4706 static void 4707 aac_cmd_fib_header(struct aac_softstate *softs, struct aac_slot *slotp, 4708 uint16_t cmd, uint16_t fib_size) 4709 { 4710 ddi_acc_handle_t acc = slotp->fib_acc_handle; 4711 struct aac_fib *fibp = slotp->fibp; 4712 uint32_t xfer_state; 4713 4714 xfer_state = 4715 AAC_FIBSTATE_HOSTOWNED | 4716 AAC_FIBSTATE_INITIALISED | 4717 AAC_FIBSTATE_EMPTY | 4718 AAC_FIBSTATE_FROMHOST | 4719 AAC_FIBSTATE_REXPECTED | 4720 AAC_FIBSTATE_NORM; 4721 if (slotp->acp && !(slotp->acp->flags & AAC_CMD_SYNC)) { 4722 xfer_state |= 4723 AAC_FIBSTATE_ASYNC | 4724 AAC_FIBSTATE_FAST_RESPONSE /* enable fast io */; 4725 ddi_put16(acc, &fibp->Header.SenderSize, 4726 softs->aac_max_fib_size); 4727 } else { 4728 ddi_put16(acc, &fibp->Header.SenderSize, AAC_FIB_SIZE); 4729 } 4730 4731 ddi_put32(acc, &fibp->Header.XferState, xfer_state); 4732 ddi_put16(acc, &fibp->Header.Command, cmd); 4733 ddi_put8(acc, &fibp->Header.StructType, AAC_FIBTYPE_TFIB); 4734 ddi_put8(acc, &fibp->Header.Flags, 0); /* don't care */ 4735 ddi_put16(acc, &fibp->Header.Size, fib_size); 4736 ddi_put32(acc, &fibp->Header.SenderFibAddress, (slotp->index << 2)); 4737 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr); 4738 ddi_put32(acc, &fibp->Header.SenderData, 0); /* don't care */ 4739 } 4740 4741 /* 4742 * Init FIB for raw IO command 4743 */ 4744 static void 4745 aac_cmd_fib_rawio(struct aac_softstate *softs, struct aac_cmd *acp) 4746 { 4747 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 4748 struct aac_raw_io *io = (struct aac_raw_io *)&acp->slotp->fibp->data[0]; 4749 struct aac_sg_entryraw *sgp; 4750 struct aac_sge *sge; 4751 4752 /* Calculate FIB size */ 4753 acp->fib_size = sizeof (struct aac_fib_header) + \ 4754 sizeof (struct aac_raw_io) + (acp->left_cookien - 1) * \ 4755 sizeof (struct aac_sg_entryraw); 4756 4757 aac_cmd_fib_header(softs, acp->slotp, RawIo, acp->fib_size); 4758 4759 ddi_put16(acc, &io->Flags, (acp->flags & AAC_CMD_BUF_READ) ? 1 : 0); 4760 ddi_put16(acc, &io->BpTotal, 0); 4761 ddi_put16(acc, &io->BpComplete, 0); 4762 4763 ddi_put32(acc, AAC_LO32(&io->BlockNumber), AAC_LS32(acp->blkno)); 4764 ddi_put32(acc, AAC_HI32(&io->BlockNumber), AAC_MS32(acp->blkno)); 4765 ddi_put16(acc, &io->ContainerId, 4766 ((struct aac_container *)acp->dvp)->cid); 4767 4768 /* Fill SG table */ 4769 ddi_put32(acc, &io->SgMapRaw.SgCount, acp->left_cookien); 4770 ddi_put32(acc, &io->ByteCount, acp->bcount); 4771 4772 for (sge = &acp->sgt[0], sgp = &io->SgMapRaw.SgEntryRaw[0]; 4773 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 4774 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 4775 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 4776 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 4777 sgp->Next = 0; 4778 sgp->Prev = 0; 4779 sgp->Flags = 0; 4780 } 4781 } 4782 4783 /* Init FIB for 64-bit block IO command */ 4784 static void 4785 aac_cmd_fib_brw64(struct aac_softstate *softs, struct aac_cmd *acp) 4786 { 4787 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 4788 struct aac_blockread64 *br = (struct aac_blockread64 *) \ 4789 &acp->slotp->fibp->data[0]; 4790 struct aac_sg_entry64 *sgp; 4791 struct aac_sge *sge; 4792 4793 acp->fib_size = sizeof (struct aac_fib_header) + \ 4794 sizeof (struct aac_blockread64) + (acp->left_cookien - 1) * \ 4795 sizeof (struct aac_sg_entry64); 4796 4797 aac_cmd_fib_header(softs, acp->slotp, ContainerCommand64, 4798 acp->fib_size); 4799 4800 /* 4801 * The definitions for aac_blockread64 and aac_blockwrite64 4802 * are the same. 4803 */ 4804 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno); 4805 ddi_put16(acc, &br->ContainerId, 4806 ((struct aac_container *)acp->dvp)->cid); 4807 ddi_put32(acc, &br->Command, (acp->flags & AAC_CMD_BUF_READ) ? 4808 VM_CtHostRead64 : VM_CtHostWrite64); 4809 ddi_put16(acc, &br->Pad, 0); 4810 ddi_put16(acc, &br->Flags, 0); 4811 4812 /* Fill SG table */ 4813 ddi_put32(acc, &br->SgMap64.SgCount, acp->left_cookien); 4814 ddi_put16(acc, &br->SectorCount, acp->bcount / AAC_BLK_SIZE); 4815 4816 for (sge = &acp->sgt[0], sgp = &br->SgMap64.SgEntry64[0]; 4817 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 4818 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 4819 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 4820 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 4821 } 4822 } 4823 4824 /* Init FIB for block IO command */ 4825 static void 4826 aac_cmd_fib_brw(struct aac_softstate *softs, struct aac_cmd *acp) 4827 { 4828 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 4829 struct aac_blockread *br = (struct aac_blockread *) \ 4830 &acp->slotp->fibp->data[0]; 4831 struct aac_sg_entry *sgp; 4832 struct aac_sge *sge = &acp->sgt[0]; 4833 4834 if (acp->flags & AAC_CMD_BUF_READ) { 4835 acp->fib_size = sizeof (struct aac_fib_header) + \ 4836 sizeof (struct aac_blockread) + (acp->left_cookien - 1) * \ 4837 sizeof (struct aac_sg_entry); 4838 4839 ddi_put32(acc, &br->Command, VM_CtBlockRead); 4840 ddi_put32(acc, &br->SgMap.SgCount, acp->left_cookien); 4841 sgp = &br->SgMap.SgEntry[0]; 4842 } else { 4843 struct aac_blockwrite *bw = (struct aac_blockwrite *)br; 4844 4845 acp->fib_size = sizeof (struct aac_fib_header) + \ 4846 sizeof (struct aac_blockwrite) + (acp->left_cookien - 1) * \ 4847 sizeof (struct aac_sg_entry); 4848 4849 ddi_put32(acc, &bw->Command, VM_CtBlockWrite); 4850 ddi_put32(acc, &bw->Stable, CUNSTABLE); 4851 ddi_put32(acc, &bw->SgMap.SgCount, acp->left_cookien); 4852 sgp = &bw->SgMap.SgEntry[0]; 4853 } 4854 aac_cmd_fib_header(softs, acp->slotp, ContainerCommand, acp->fib_size); 4855 4856 /* 4857 * aac_blockread and aac_blockwrite have the similar 4858 * structure head, so use br for bw here 4859 */ 4860 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno); 4861 ddi_put32(acc, &br->ContainerId, 4862 ((struct aac_container *)acp->dvp)->cid); 4863 ddi_put32(acc, &br->ByteCount, acp->bcount); 4864 4865 /* Fill SG table */ 4866 for (sge = &acp->sgt[0]; 4867 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 4868 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32); 4869 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 4870 } 4871 } 4872 4873 /*ARGSUSED*/ 4874 void 4875 aac_cmd_fib_copy(struct aac_softstate *softs, struct aac_cmd *acp) 4876 { 4877 struct aac_slot *slotp = acp->slotp; 4878 struct aac_fib *fibp = slotp->fibp; 4879 ddi_acc_handle_t acc = slotp->fib_acc_handle; 4880 4881 ddi_rep_put8(acc, (uint8_t *)acp->fibp, (uint8_t *)fibp, 4882 acp->fib_size, /* only copy data of needed length */ 4883 DDI_DEV_AUTOINCR); 4884 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr); 4885 ddi_put32(acc, &fibp->Header.SenderFibAddress, slotp->index << 2); 4886 } 4887 4888 static void 4889 aac_cmd_fib_sync(struct aac_softstate *softs, struct aac_cmd *acp) 4890 { 4891 struct aac_slot *slotp = acp->slotp; 4892 ddi_acc_handle_t acc = slotp->fib_acc_handle; 4893 struct aac_synchronize_command *sync = 4894 (struct aac_synchronize_command *)&slotp->fibp->data[0]; 4895 4896 acp->fib_size = sizeof (struct aac_fib_header) + \ 4897 sizeof (struct aac_synchronize_command); 4898 4899 aac_cmd_fib_header(softs, slotp, ContainerCommand, acp->fib_size); 4900 ddi_put32(acc, &sync->Command, VM_ContainerConfig); 4901 ddi_put32(acc, &sync->Type, (uint32_t)CT_FLUSH_CACHE); 4902 ddi_put32(acc, &sync->Cid, ((struct aac_container *)acp->dvp)->cid); 4903 ddi_put32(acc, &sync->Count, 4904 sizeof (((struct aac_synchronize_reply *)0)->Data)); 4905 } 4906 4907 /* 4908 * Init FIB for pass-through SCMD 4909 */ 4910 static void 4911 aac_cmd_fib_srb(struct aac_cmd *acp) 4912 { 4913 struct aac_slot *slotp = acp->slotp; 4914 ddi_acc_handle_t acc = slotp->fib_acc_handle; 4915 struct aac_srb *srb = (struct aac_srb *)&slotp->fibp->data[0]; 4916 struct aac_srb *srb0 = (struct aac_srb *)&acp->fibp->data[0]; 4917 4918 ddi_put32(acc, &srb->function, SRBF_ExecuteScsi); 4919 ddi_put32(acc, &srb->retry_limit, 0); 4920 ddi_put32(acc, &srb->cdb_size, acp->cmdlen); 4921 ddi_put32(acc, &srb->timeout, 0); /* use driver timeout */ 4922 4923 ddi_put32(acc, &srb->flags, srb0->flags); 4924 ddi_put32(acc, &srb->channel, srb0->channel); 4925 ddi_put32(acc, &srb->id, srb0->id); 4926 ddi_put32(acc, &srb->lun, srb0->lun); 4927 ddi_rep_put8(acc, srb0->cdb, srb->cdb, acp->cmdlen, DDI_DEV_AUTOINCR); 4928 } 4929 4930 static void 4931 aac_cmd_fib_scsi32(struct aac_softstate *softs, struct aac_cmd *acp) 4932 { 4933 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 4934 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 4935 struct aac_sg_entry *sgp; 4936 struct aac_sge *sge; 4937 4938 acp->fib_size = sizeof (struct aac_fib_header) + \ 4939 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \ 4940 acp->left_cookien * sizeof (struct aac_sg_entry); 4941 4942 /* Fill FIB and SRB headers, and copy cdb */ 4943 aac_cmd_fib_header(softs, acp->slotp, ScsiPortCommand, acp->fib_size); 4944 aac_cmd_fib_srb(acp); 4945 4946 /* Fill SG table */ 4947 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien); 4948 ddi_put32(acc, &srb->count, acp->bcount); 4949 4950 for (sge = &acp->sgt[0], sgp = &srb->sg.SgEntry[0]; 4951 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 4952 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32); 4953 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 4954 } 4955 } 4956 4957 static void 4958 aac_cmd_fib_scsi64(struct aac_softstate *softs, struct aac_cmd *acp) 4959 { 4960 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 4961 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 4962 struct aac_sg_entry64 *sgp; 4963 struct aac_sge *sge; 4964 4965 acp->fib_size = sizeof (struct aac_fib_header) + \ 4966 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \ 4967 acp->left_cookien * sizeof (struct aac_sg_entry64); 4968 4969 /* Fill FIB and SRB headers, and copy cdb */ 4970 aac_cmd_fib_header(softs, acp->slotp, ScsiPortCommandU64, 4971 acp->fib_size); 4972 aac_cmd_fib_srb(acp); 4973 4974 /* Fill SG table */ 4975 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien); 4976 ddi_put32(acc, &srb->count, acp->bcount); 4977 4978 for (sge = &acp->sgt[0], 4979 sgp = &((struct aac_sg_table64 *)&srb->sg)->SgEntry64[0]; 4980 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 4981 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 4982 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 4983 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 4984 } 4985 } 4986 4987 static int 4988 aac_cmd_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp) 4989 { 4990 struct aac_slot *slotp; 4991 4992 if (slotp = aac_get_slot(softs)) { 4993 acp->slotp = slotp; 4994 slotp->acp = acp; 4995 acp->aac_cmd_fib(softs, acp); 4996 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, 4997 DDI_DMA_SYNC_FORDEV); 4998 return (AACOK); 4999 } 5000 return (AACERR); 5001 } 5002 5003 static int 5004 aac_bind_io(struct aac_softstate *softs, struct aac_cmd *acp) 5005 { 5006 struct aac_container *dvp = acp->dvp; 5007 int q = AAC_CMDQ(acp); 5008 5009 if (dvp) { 5010 if (dvp->ncmds[q] < dvp->throttle[q]) { 5011 if (!(acp->flags & AAC_CMD_NTAG) || 5012 dvp->ncmds[q] == 0) { 5013 do_bind: 5014 return (aac_cmd_slot_bind(softs, acp)); 5015 } 5016 ASSERT(q == AAC_CMDQ_ASYNC); 5017 aac_set_throttle(softs, dvp, AAC_CMDQ_ASYNC, 5018 AAC_THROTTLE_DRAIN); 5019 } 5020 } else { 5021 if (softs->bus_ncmds[q] < softs->bus_throttle[q]) 5022 goto do_bind; 5023 } 5024 return (AACERR); 5025 } 5026 5027 static void 5028 aac_start_io(struct aac_softstate *softs, struct aac_cmd *acp) 5029 { 5030 struct aac_slot *slotp = acp->slotp; 5031 int q = AAC_CMDQ(acp); 5032 int rval; 5033 5034 /* Set ac and pkt */ 5035 if (acp->pkt) { /* ac from ioctl has no pkt */ 5036 acp->pkt->pkt_state |= 5037 STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD; 5038 } 5039 if (acp->timeout) /* 0 indicates no timeout */ 5040 acp->timeout += aac_timebase + aac_tick; 5041 5042 if (acp->dvp) 5043 acp->dvp->ncmds[q]++; 5044 softs->bus_ncmds[q]++; 5045 aac_cmd_enqueue(&softs->q_busy, acp); 5046 5047 if (softs->flags & AAC_FLAGS_NEW_COMM) { 5048 rval = aac_send_command(softs, slotp); 5049 } else { 5050 /* 5051 * If fib can not be enqueued, the adapter is in an abnormal 5052 * state, there will be no interrupt to us. 5053 */ 5054 rval = aac_fib_enqueue(softs, AAC_ADAP_NORM_CMD_Q, 5055 slotp->fib_phyaddr, acp->fib_size); 5056 } 5057 5058 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) 5059 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 5060 5061 /* 5062 * NOTE: We send command only when slots availabe, so should never 5063 * reach here. 5064 */ 5065 if (rval != AACOK) { 5066 AACDB_PRINT(softs, CE_NOTE, "SCMD send failed"); 5067 if (acp->pkt) { 5068 acp->pkt->pkt_state &= ~STATE_SENT_CMD; 5069 aac_set_pkt_reason(softs, acp, CMD_INCOMPLETE, 0); 5070 } 5071 aac_end_io(softs, acp); 5072 if (!(acp->flags & (AAC_CMD_NO_INTR | AAC_CMD_NO_CB))) 5073 ddi_trigger_softintr(softs->softint_id); 5074 } 5075 } 5076 5077 static void 5078 aac_start_waitq(struct aac_softstate *softs, struct aac_cmd_queue *q) 5079 { 5080 struct aac_cmd *acp, *next_acp; 5081 5082 /* Serve as many waiting io's as possible */ 5083 for (acp = q->q_head; acp; acp = next_acp) { 5084 next_acp = acp->next; 5085 if (aac_bind_io(softs, acp) == AACOK) { 5086 aac_cmd_delete(q, acp); 5087 aac_start_io(softs, acp); 5088 } 5089 if (softs->free_io_slot_head == NULL) 5090 break; 5091 } 5092 } 5093 5094 static void 5095 aac_start_waiting_io(struct aac_softstate *softs) 5096 { 5097 /* 5098 * Sync FIB io is served before async FIB io so that io requests 5099 * sent by interactive userland commands get responded asap. 5100 */ 5101 if (softs->q_wait[AAC_CMDQ_SYNC].q_head) 5102 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_SYNC]); 5103 if (softs->q_wait[AAC_CMDQ_ASYNC].q_head) 5104 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_ASYNC]); 5105 } 5106 5107 static void 5108 aac_drain_comp_q(struct aac_softstate *softs) 5109 { 5110 struct aac_cmd *acp; 5111 struct scsi_pkt *pkt; 5112 5113 /*CONSTCOND*/ 5114 while (1) { 5115 mutex_enter(&softs->q_comp_mutex); 5116 acp = aac_cmd_dequeue(&softs->q_comp); 5117 mutex_exit(&softs->q_comp_mutex); 5118 if (acp != NULL) { 5119 ASSERT(acp->pkt != NULL); 5120 pkt = acp->pkt; 5121 5122 if (pkt->pkt_reason == CMD_CMPLT) { 5123 /* 5124 * Consistent packets need to be sync'ed first 5125 */ 5126 if ((acp->flags & AAC_CMD_CONSISTENT) && 5127 (acp->flags & AAC_CMD_BUF_READ)) { 5128 if (aac_dma_sync_ac(acp) != AACOK) { 5129 ddi_fm_service_impact( 5130 softs->devinfo_p, 5131 DDI_SERVICE_UNAFFECTED); 5132 pkt->pkt_reason = CMD_TRAN_ERR; 5133 pkt->pkt_statistics = 0; 5134 } 5135 } 5136 if ((aac_check_acc_handle(softs-> \ 5137 comm_space_acc_handle) != DDI_SUCCESS) || 5138 (aac_check_acc_handle(softs-> \ 5139 pci_mem_handle) != DDI_SUCCESS)) { 5140 ddi_fm_service_impact(softs->devinfo_p, 5141 DDI_SERVICE_UNAFFECTED); 5142 ddi_fm_acc_err_clear(softs-> \ 5143 pci_mem_handle, DDI_FME_VER0); 5144 pkt->pkt_reason = CMD_TRAN_ERR; 5145 pkt->pkt_statistics = 0; 5146 } 5147 if (aac_check_dma_handle(softs-> \ 5148 comm_space_dma_handle) != DDI_SUCCESS) { 5149 ddi_fm_service_impact(softs->devinfo_p, 5150 DDI_SERVICE_UNAFFECTED); 5151 pkt->pkt_reason = CMD_TRAN_ERR; 5152 pkt->pkt_statistics = 0; 5153 } 5154 } 5155 (*pkt->pkt_comp)(pkt); 5156 } else { 5157 break; 5158 } 5159 } 5160 } 5161 5162 static int 5163 aac_alloc_fib(struct aac_softstate *softs, struct aac_slot *slotp) 5164 { 5165 size_t rlen; 5166 ddi_dma_cookie_t cookie; 5167 uint_t cookien; 5168 5169 /* Allocate FIB dma resource */ 5170 if (ddi_dma_alloc_handle( 5171 softs->devinfo_p, 5172 &softs->addr_dma_attr, 5173 DDI_DMA_SLEEP, 5174 NULL, 5175 &slotp->fib_dma_handle) != DDI_SUCCESS) { 5176 AACDB_PRINT(softs, CE_WARN, 5177 "Cannot alloc dma handle for slot fib area"); 5178 goto error; 5179 } 5180 if (ddi_dma_mem_alloc( 5181 slotp->fib_dma_handle, 5182 softs->aac_max_fib_size, 5183 &aac_acc_attr, 5184 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 5185 DDI_DMA_SLEEP, 5186 NULL, 5187 (caddr_t *)&slotp->fibp, 5188 &rlen, 5189 &slotp->fib_acc_handle) != DDI_SUCCESS) { 5190 AACDB_PRINT(softs, CE_WARN, 5191 "Cannot alloc mem for slot fib area"); 5192 goto error; 5193 } 5194 if (ddi_dma_addr_bind_handle( 5195 slotp->fib_dma_handle, 5196 NULL, 5197 (caddr_t)slotp->fibp, 5198 softs->aac_max_fib_size, 5199 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 5200 DDI_DMA_SLEEP, 5201 NULL, 5202 &cookie, 5203 &cookien) != DDI_DMA_MAPPED) { 5204 AACDB_PRINT(softs, CE_WARN, 5205 "dma bind failed for slot fib area"); 5206 goto error; 5207 } 5208 5209 /* Check dma handles allocated in fib attach */ 5210 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) { 5211 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 5212 goto error; 5213 } 5214 5215 /* Check acc handles allocated in fib attach */ 5216 if (aac_check_acc_handle(slotp->fib_acc_handle) != DDI_SUCCESS) { 5217 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 5218 goto error; 5219 } 5220 5221 slotp->fib_phyaddr = cookie.dmac_laddress; 5222 return (AACOK); 5223 5224 error: 5225 if (slotp->fib_acc_handle) { 5226 ddi_dma_mem_free(&slotp->fib_acc_handle); 5227 slotp->fib_acc_handle = NULL; 5228 } 5229 if (slotp->fib_dma_handle) { 5230 ddi_dma_free_handle(&slotp->fib_dma_handle); 5231 slotp->fib_dma_handle = NULL; 5232 } 5233 return (AACERR); 5234 } 5235 5236 static void 5237 aac_free_fib(struct aac_slot *slotp) 5238 { 5239 (void) ddi_dma_unbind_handle(slotp->fib_dma_handle); 5240 ddi_dma_mem_free(&slotp->fib_acc_handle); 5241 slotp->fib_acc_handle = NULL; 5242 ddi_dma_free_handle(&slotp->fib_dma_handle); 5243 slotp->fib_dma_handle = NULL; 5244 slotp->fib_phyaddr = 0; 5245 } 5246 5247 static void 5248 aac_alloc_fibs(struct aac_softstate *softs) 5249 { 5250 int i; 5251 struct aac_slot *slotp; 5252 5253 for (i = 0; i < softs->total_slots && 5254 softs->total_fibs < softs->total_slots; i++) { 5255 slotp = &(softs->io_slot[i]); 5256 if (slotp->fib_phyaddr) 5257 continue; 5258 if (aac_alloc_fib(softs, slotp) != AACOK) 5259 break; 5260 5261 /* Insert the slot to the free slot list */ 5262 aac_release_slot(softs, slotp); 5263 softs->total_fibs++; 5264 } 5265 } 5266 5267 static void 5268 aac_destroy_fibs(struct aac_softstate *softs) 5269 { 5270 struct aac_slot *slotp; 5271 5272 while ((slotp = softs->free_io_slot_head) != NULL) { 5273 ASSERT(slotp->fib_phyaddr); 5274 softs->free_io_slot_head = slotp->next; 5275 aac_free_fib(slotp); 5276 ASSERT(slotp->index == (slotp - softs->io_slot)); 5277 softs->total_fibs--; 5278 } 5279 ASSERT(softs->total_fibs == 0); 5280 } 5281 5282 static int 5283 aac_create_slots(struct aac_softstate *softs) 5284 { 5285 int i; 5286 5287 softs->total_slots = softs->aac_max_fibs; 5288 softs->io_slot = kmem_zalloc(sizeof (struct aac_slot) * \ 5289 softs->total_slots, KM_SLEEP); 5290 if (softs->io_slot == NULL) { 5291 AACDB_PRINT(softs, CE_WARN, "Cannot allocate slot"); 5292 return (AACERR); 5293 } 5294 for (i = 0; i < softs->total_slots; i++) 5295 softs->io_slot[i].index = i; 5296 softs->free_io_slot_head = NULL; 5297 softs->total_fibs = 0; 5298 return (AACOK); 5299 } 5300 5301 static void 5302 aac_destroy_slots(struct aac_softstate *softs) 5303 { 5304 ASSERT(softs->free_io_slot_head == NULL); 5305 5306 kmem_free(softs->io_slot, sizeof (struct aac_slot) * \ 5307 softs->total_slots); 5308 softs->io_slot = NULL; 5309 softs->total_slots = 0; 5310 } 5311 5312 struct aac_slot * 5313 aac_get_slot(struct aac_softstate *softs) 5314 { 5315 struct aac_slot *slotp; 5316 5317 if ((slotp = softs->free_io_slot_head) != NULL) { 5318 softs->free_io_slot_head = slotp->next; 5319 slotp->next = NULL; 5320 } 5321 return (slotp); 5322 } 5323 5324 static void 5325 aac_release_slot(struct aac_softstate *softs, struct aac_slot *slotp) 5326 { 5327 ASSERT((slotp->index >= 0) && (slotp->index < softs->total_slots)); 5328 ASSERT(slotp == &softs->io_slot[slotp->index]); 5329 5330 slotp->acp = NULL; 5331 slotp->next = softs->free_io_slot_head; 5332 softs->free_io_slot_head = slotp; 5333 } 5334 5335 int 5336 aac_do_io(struct aac_softstate *softs, struct aac_cmd *acp) 5337 { 5338 if (aac_bind_io(softs, acp) == AACOK) 5339 aac_start_io(softs, acp); 5340 else 5341 aac_cmd_enqueue(&softs->q_wait[AAC_CMDQ(acp)], acp); 5342 5343 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) 5344 return (TRAN_ACCEPT); 5345 /* 5346 * Because sync FIB is always 512 bytes and used for critical 5347 * functions, async FIB is used for poll IO. 5348 */ 5349 if (acp->flags & AAC_CMD_NO_INTR) { 5350 if (aac_do_poll_io(softs, acp) == AACOK) 5351 return (TRAN_ACCEPT); 5352 } else { 5353 if (aac_do_sync_io(softs, acp) == AACOK) 5354 return (TRAN_ACCEPT); 5355 } 5356 return (TRAN_BADPKT); 5357 } 5358 5359 static int 5360 aac_do_poll_io(struct aac_softstate *softs, struct aac_cmd *acp) 5361 { 5362 int (*intr_handler)(struct aac_softstate *); 5363 5364 /* 5365 * Interrupt is disabled, we have to poll the adapter by ourselves. 5366 */ 5367 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ? 5368 aac_process_intr_new : aac_process_intr_old; 5369 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) { 5370 int i = AAC_POLL_TIME * 1000; 5371 5372 AAC_BUSYWAIT((intr_handler(softs) != AAC_DB_RESPONSE_READY), i); 5373 if (i == 0) 5374 aac_cmd_timeout(softs); 5375 } 5376 5377 ddi_trigger_softintr(softs->softint_id); 5378 5379 if ((acp->flags & AAC_CMD_CMPLT) && !(acp->flags & AAC_CMD_ERR)) 5380 return (AACOK); 5381 return (AACERR); 5382 } 5383 5384 static int 5385 aac_do_sync_io(struct aac_softstate *softs, struct aac_cmd *acp) 5386 { 5387 ASSERT(softs && acp); 5388 5389 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) 5390 cv_wait(&softs->event, &softs->io_lock); 5391 5392 if (acp->flags & AAC_CMD_CMPLT) 5393 return (AACOK); 5394 return (AACERR); 5395 } 5396 5397 static int 5398 aac_dma_sync_ac(struct aac_cmd *acp) 5399 { 5400 if (acp->buf_dma_handle) { 5401 if (acp->flags & AAC_CMD_BUF_WRITE) { 5402 if (acp->abp != NULL) 5403 ddi_rep_put8(acp->abh, 5404 (uint8_t *)acp->bp->b_un.b_addr, 5405 (uint8_t *)acp->abp, acp->bp->b_bcount, 5406 DDI_DEV_AUTOINCR); 5407 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0, 5408 DDI_DMA_SYNC_FORDEV); 5409 } else { 5410 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0, 5411 DDI_DMA_SYNC_FORCPU); 5412 if (aac_check_dma_handle(acp->buf_dma_handle) != 5413 DDI_SUCCESS) 5414 return (AACERR); 5415 if (acp->abp != NULL) 5416 ddi_rep_get8(acp->abh, 5417 (uint8_t *)acp->bp->b_un.b_addr, 5418 (uint8_t *)acp->abp, acp->bp->b_bcount, 5419 DDI_DEV_AUTOINCR); 5420 } 5421 } 5422 return (AACOK); 5423 } 5424 5425 /* 5426 * The following function comes from Adaptec: 5427 * 5428 * When driver sees a particular event that means containers are changed, it 5429 * will rescan containers. However a change may not be complete until some 5430 * other event is received. For example, creating or deleting an array will 5431 * incur as many as six AifEnConfigChange events which would generate six 5432 * container rescans. To diminish rescans, driver set a flag to wait for 5433 * another particular event. When sees that events come in, it will do rescan. 5434 */ 5435 static int 5436 aac_handle_aif(struct aac_softstate *softs, struct aac_fib *fibp) 5437 { 5438 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 5439 uint16_t fib_command; 5440 struct aac_aif_command *aif; 5441 int en_type; 5442 int devcfg_needed; 5443 int current, next; 5444 5445 fib_command = LE_16(fibp->Header.Command); 5446 if (fib_command != AifRequest) { 5447 cmn_err(CE_NOTE, "!Unknown command from controller: 0x%x", 5448 fib_command); 5449 return (AACERR); 5450 } 5451 5452 /* Update internal container state */ 5453 aif = (struct aac_aif_command *)&fibp->data[0]; 5454 5455 AACDB_PRINT_AIF(softs, aif); 5456 devcfg_needed = 0; 5457 en_type = LE_32((uint32_t)aif->data.EN.type); 5458 5459 switch (LE_32((uint32_t)aif->command)) { 5460 case AifCmdDriverNotify: { 5461 int cid = LE_32(aif->data.EN.data.ECC.container[0]); 5462 5463 switch (en_type) { 5464 case AifDenMorphComplete: 5465 case AifDenVolumeExtendComplete: 5466 if (softs->containers[cid].valid) 5467 softs->devcfg_wait_on = AifEnConfigChange; 5468 break; 5469 } 5470 if (softs->devcfg_wait_on == en_type) 5471 devcfg_needed = 1; 5472 break; 5473 } 5474 5475 case AifCmdEventNotify: 5476 switch (en_type) { 5477 case AifEnAddContainer: 5478 case AifEnDeleteContainer: 5479 softs->devcfg_wait_on = AifEnConfigChange; 5480 break; 5481 case AifEnContainerChange: 5482 if (!softs->devcfg_wait_on) 5483 softs->devcfg_wait_on = AifEnConfigChange; 5484 break; 5485 case AifEnContainerEvent: 5486 if (ddi_get32(acc, &aif-> \ 5487 data.EN.data.ECE.eventType) == CT_PUP_MISSING_DRIVE) 5488 devcfg_needed = 1; 5489 break; 5490 } 5491 if (softs->devcfg_wait_on == en_type) 5492 devcfg_needed = 1; 5493 break; 5494 5495 case AifCmdJobProgress: 5496 if (LE_32((uint32_t)aif->data.PR[0].jd.type) == AifJobCtrZero) { 5497 int pr_status; 5498 uint32_t pr_ftick, pr_ctick; 5499 5500 pr_status = LE_32((uint32_t)aif->data.PR[0].status); 5501 pr_ctick = LE_32(aif->data.PR[0].currentTick); 5502 pr_ftick = LE_32(aif->data.PR[0].finalTick); 5503 5504 if ((pr_ctick == pr_ftick) || 5505 (pr_status == AifJobStsSuccess)) 5506 softs->devcfg_wait_on = AifEnContainerChange; 5507 else if ((pr_ctick == 0) && 5508 (pr_status == AifJobStsRunning)) 5509 softs->devcfg_wait_on = AifEnContainerChange; 5510 } 5511 break; 5512 } 5513 5514 if (devcfg_needed) 5515 (void) aac_probe_containers(softs); 5516 5517 /* Modify AIF contexts */ 5518 current = softs->aifq_idx; 5519 next = (current + 1) % AAC_AIFQ_LENGTH; 5520 if (next == 0) { 5521 struct aac_fib_context *ctx; 5522 5523 softs->aifq_wrap = 1; 5524 for (ctx = softs->fibctx; ctx; ctx = ctx->next) { 5525 if (next == ctx->ctx_idx) { 5526 ctx->ctx_filled = 1; 5527 } else if (current == ctx->ctx_idx && ctx->ctx_filled) { 5528 ctx->ctx_idx = next; 5529 AACDB_PRINT(softs, CE_NOTE, 5530 "-- AIF queue(%x) overrun", ctx->unique); 5531 } 5532 } 5533 } 5534 softs->aifq_idx = next; 5535 5536 /* Wakeup applications */ 5537 cv_broadcast(&softs->aifv); 5538 return (AACOK); 5539 } 5540 5541 /* 5542 * Timeout recovery 5543 */ 5544 static void 5545 aac_cmd_timeout(struct aac_softstate *softs) 5546 { 5547 /* 5548 * Besides the firmware in unhealthy state, an overloaded 5549 * adapter may also incur pkt timeout. 5550 * There is a chance for an adapter with a slower IOP to take 5551 * longer than 60 seconds to process the commands, such as when 5552 * to perform IOs. So the adapter is doing a build on a RAID-5 5553 * while being required longer completion times should be 5554 * tolerated. 5555 */ 5556 if (aac_do_reset(softs) == AACOK) { 5557 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING, NULL, 5558 CMD_RESET); 5559 aac_start_waiting_io(softs); 5560 } else { 5561 /* Abort all waiting cmds when adapter is dead */ 5562 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, 5563 CMD_TIMEOUT); 5564 } 5565 } 5566 5567 /* 5568 * The following function comes from Adaptec: 5569 * 5570 * Time sync. command added to synchronize time with firmware every 30 5571 * minutes (required for correct AIF timestamps etc.) 5572 */ 5573 static int 5574 aac_sync_tick(struct aac_softstate *softs) 5575 { 5576 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 5577 struct aac_fib *fibp = softs->sync_slot.fibp; 5578 5579 ddi_put32(acc, (void *)&fibp->data[0], ddi_get_time()); 5580 return (aac_sync_fib(softs, SendHostTime, AAC_FIB_SIZEOF(uint32_t))); 5581 } 5582 5583 static void 5584 aac_daemon(void *arg) 5585 { 5586 struct aac_softstate *softs = (struct aac_softstate *)arg; 5587 struct aac_cmd *acp; 5588 5589 DBCALLED(softs, 2); 5590 5591 mutex_enter(&softs->io_lock); 5592 /* Check slot for timeout pkts */ 5593 aac_timebase += aac_tick; 5594 for (acp = softs->q_busy.q_head; acp; acp = acp->next) { 5595 if (acp->timeout) { 5596 if (acp->timeout <= aac_timebase) { 5597 aac_cmd_timeout(softs); 5598 ddi_trigger_softintr(softs->softint_id); 5599 } 5600 break; 5601 } 5602 } 5603 5604 /* Time sync. with firmware every AAC_SYNC_TICK */ 5605 if (aac_sync_time <= aac_timebase) { 5606 aac_sync_time = aac_timebase; 5607 if (aac_sync_tick(softs) != AACOK) 5608 aac_sync_time += aac_tick << 1; /* retry shortly */ 5609 else 5610 aac_sync_time += AAC_SYNC_TICK; 5611 } 5612 5613 if ((softs->state & AAC_STATE_RUN) && (softs->timeout_id != 0)) 5614 softs->timeout_id = timeout(aac_daemon, (void *)softs, 5615 (aac_tick * drv_usectohz(1000000))); 5616 mutex_exit(&softs->io_lock); 5617 } 5618 5619 /* 5620 * Architecture dependent functions 5621 */ 5622 static int 5623 aac_rx_get_fwstatus(struct aac_softstate *softs) 5624 { 5625 return (PCI_MEM_GET32(softs, AAC_OMR0)); 5626 } 5627 5628 static int 5629 aac_rx_get_mailbox(struct aac_softstate *softs, int mb) 5630 { 5631 return (PCI_MEM_GET32(softs, AAC_RX_MAILBOX + mb * 4)); 5632 } 5633 5634 static void 5635 aac_rx_set_mailbox(struct aac_softstate *softs, uint32_t cmd, 5636 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3) 5637 { 5638 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX, cmd); 5639 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 4, arg0); 5640 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 8, arg1); 5641 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 12, arg2); 5642 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 16, arg3); 5643 } 5644 5645 static int 5646 aac_rkt_get_fwstatus(struct aac_softstate *softs) 5647 { 5648 return (PCI_MEM_GET32(softs, AAC_OMR0)); 5649 } 5650 5651 static int 5652 aac_rkt_get_mailbox(struct aac_softstate *softs, int mb) 5653 { 5654 return (PCI_MEM_GET32(softs, AAC_RKT_MAILBOX + mb *4)); 5655 } 5656 5657 static void 5658 aac_rkt_set_mailbox(struct aac_softstate *softs, uint32_t cmd, 5659 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3) 5660 { 5661 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX, cmd); 5662 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 4, arg0); 5663 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 8, arg1); 5664 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 12, arg2); 5665 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 16, arg3); 5666 } 5667 5668 /* 5669 * cb_ops functions 5670 */ 5671 static int 5672 aac_open(dev_t *devp, int flag, int otyp, cred_t *cred) 5673 { 5674 struct aac_softstate *softs; 5675 int minor0, minor; 5676 int instance; 5677 5678 DBCALLED(NULL, 2); 5679 5680 if (otyp != OTYP_BLK && otyp != OTYP_CHR) 5681 return (EINVAL); 5682 5683 minor0 = getminor(*devp); 5684 minor = AAC_SCSA_MINOR(minor0); 5685 5686 if (AAC_IS_SCSA_NODE(minor)) 5687 return (scsi_hba_open(devp, flag, otyp, cred)); 5688 5689 instance = MINOR2INST(minor0); 5690 if (instance >= AAC_MAX_ADAPTERS) 5691 return (ENXIO); 5692 5693 softs = ddi_get_soft_state(aac_softstatep, instance); 5694 if (softs == NULL) 5695 return (ENXIO); 5696 5697 return (0); 5698 } 5699 5700 /*ARGSUSED*/ 5701 static int 5702 aac_close(dev_t dev, int flag, int otyp, cred_t *cred) 5703 { 5704 int minor0, minor; 5705 int instance; 5706 5707 DBCALLED(NULL, 2); 5708 5709 if (otyp != OTYP_BLK && otyp != OTYP_CHR) 5710 return (EINVAL); 5711 5712 minor0 = getminor(dev); 5713 minor = AAC_SCSA_MINOR(minor0); 5714 5715 if (AAC_IS_SCSA_NODE(minor)) 5716 return (scsi_hba_close(dev, flag, otyp, cred)); 5717 5718 instance = MINOR2INST(minor0); 5719 if (instance >= AAC_MAX_ADAPTERS) 5720 return (ENXIO); 5721 5722 return (0); 5723 } 5724 5725 static int 5726 aac_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, 5727 int *rval_p) 5728 { 5729 struct aac_softstate *softs; 5730 int minor0, minor; 5731 int instance; 5732 5733 DBCALLED(NULL, 2); 5734 5735 if (drv_priv(cred_p) != 0) 5736 return (EPERM); 5737 5738 minor0 = getminor(dev); 5739 minor = AAC_SCSA_MINOR(minor0); 5740 5741 if (AAC_IS_SCSA_NODE(minor)) 5742 return (scsi_hba_ioctl(dev, cmd, arg, flag, cred_p, rval_p)); 5743 5744 instance = MINOR2INST(minor0); 5745 if (instance < AAC_MAX_ADAPTERS) { 5746 softs = ddi_get_soft_state(aac_softstatep, instance); 5747 return (aac_do_ioctl(softs, dev, cmd, arg, flag)); 5748 } 5749 return (ENXIO); 5750 } 5751 5752 /* 5753 * The IO fault service error handling callback function 5754 */ 5755 /*ARGSUSED*/ 5756 static int 5757 aac_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 5758 { 5759 /* 5760 * as the driver can always deal with an error in any dma or 5761 * access handle, we can just return the fme_status value. 5762 */ 5763 pci_ereport_post(dip, err, NULL); 5764 return (err->fme_status); 5765 } 5766 5767 /* 5768 * aac_fm_init - initialize fma capabilities and register with IO 5769 * fault services. 5770 */ 5771 static void 5772 aac_fm_init(struct aac_softstate *softs) 5773 { 5774 /* 5775 * Need to change iblock to priority for new MSI intr 5776 */ 5777 ddi_iblock_cookie_t fm_ibc; 5778 5779 /* Only register with IO Fault Services if we have some capability */ 5780 if (softs->fm_capabilities) { 5781 /* Adjust access and dma attributes for FMA */ 5782 aac_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 5783 softs->buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 5784 softs->addr_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 5785 5786 /* 5787 * Register capabilities with IO Fault Services. 5788 * fm_capabilities will be updated to indicate 5789 * capabilities actually supported (not requested.) 5790 */ 5791 ddi_fm_init(softs->devinfo_p, &softs->fm_capabilities, &fm_ibc); 5792 5793 /* 5794 * Initialize pci ereport capabilities if ereport 5795 * capable (should always be.) 5796 */ 5797 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) || 5798 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 5799 pci_ereport_setup(softs->devinfo_p); 5800 } 5801 5802 /* 5803 * Register error callback if error callback capable. 5804 */ 5805 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 5806 ddi_fm_handler_register(softs->devinfo_p, 5807 aac_fm_error_cb, (void *) softs); 5808 } 5809 } else { 5810 /* Clear FMA if no capabilities */ 5811 aac_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 5812 softs->buf_dma_attr.dma_attr_flags = 0; 5813 softs->addr_dma_attr.dma_attr_flags = 0; 5814 } 5815 } 5816 5817 /* 5818 * aac_fm_fini - Releases fma capabilities and un-registers with IO 5819 * fault services. 5820 */ 5821 static void 5822 aac_fm_fini(struct aac_softstate *softs) 5823 { 5824 /* Only unregister FMA capabilities if registered */ 5825 if (softs->fm_capabilities) { 5826 /* 5827 * Un-register error callback if error callback capable. 5828 */ 5829 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 5830 ddi_fm_handler_unregister(softs->devinfo_p); 5831 } 5832 5833 /* 5834 * Release any resources allocated by pci_ereport_setup() 5835 */ 5836 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) || 5837 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 5838 pci_ereport_teardown(softs->devinfo_p); 5839 } 5840 5841 /* Unregister from IO Fault Services */ 5842 ddi_fm_fini(softs->devinfo_p); 5843 } 5844 } 5845 5846 int 5847 aac_check_acc_handle(ddi_acc_handle_t handle) 5848 { 5849 ddi_fm_error_t de; 5850 5851 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 5852 return (de.fme_status); 5853 } 5854 5855 int 5856 aac_check_dma_handle(ddi_dma_handle_t handle) 5857 { 5858 ddi_fm_error_t de; 5859 5860 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 5861 return (de.fme_status); 5862 } 5863 5864 void 5865 aac_fm_ereport(struct aac_softstate *softs, char *detail) 5866 { 5867 uint64_t ena; 5868 char buf[FM_MAX_CLASS]; 5869 5870 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 5871 ena = fm_ena_generate(0, FM_ENA_FMT1); 5872 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities)) { 5873 ddi_fm_ereport_post(softs->devinfo_p, buf, ena, DDI_NOSLEEP, 5874 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL); 5875 } 5876 } 5877 5878 #ifdef DEBUG 5879 5880 /* -------------------------debug aid functions-------------------------- */ 5881 5882 #define AAC_FIB_CMD_KEY_STRINGS \ 5883 TestCommandResponse, "TestCommandResponse", \ 5884 TestAdapterCommand, "TestAdapterCommand", \ 5885 LastTestCommand, "LastTestCommand", \ 5886 ReinitHostNormCommandQueue, "ReinitHostNormCommandQueue", \ 5887 ReinitHostHighCommandQueue, "ReinitHostHighCommandQueue", \ 5888 ReinitHostHighRespQueue, "ReinitHostHighRespQueue", \ 5889 ReinitHostNormRespQueue, "ReinitHostNormRespQueue", \ 5890 ReinitAdapNormCommandQueue, "ReinitAdapNormCommandQueue", \ 5891 ReinitAdapHighCommandQueue, "ReinitAdapHighCommandQueue", \ 5892 ReinitAdapHighRespQueue, "ReinitAdapHighRespQueue", \ 5893 ReinitAdapNormRespQueue, "ReinitAdapNormRespQueue", \ 5894 InterfaceShutdown, "InterfaceShutdown", \ 5895 DmaCommandFib, "DmaCommandFib", \ 5896 StartProfile, "StartProfile", \ 5897 TermProfile, "TermProfile", \ 5898 SpeedTest, "SpeedTest", \ 5899 TakeABreakPt, "TakeABreakPt", \ 5900 RequestPerfData, "RequestPerfData", \ 5901 SetInterruptDefTimer, "SetInterruptDefTimer", \ 5902 SetInterruptDefCount, "SetInterruptDefCount", \ 5903 GetInterruptDefStatus, "GetInterruptDefStatus", \ 5904 LastCommCommand, "LastCommCommand", \ 5905 NuFileSystem, "NuFileSystem", \ 5906 UFS, "UFS", \ 5907 HostFileSystem, "HostFileSystem", \ 5908 LastFileSystemCommand, "LastFileSystemCommand", \ 5909 ContainerCommand, "ContainerCommand", \ 5910 ContainerCommand64, "ContainerCommand64", \ 5911 ClusterCommand, "ClusterCommand", \ 5912 ScsiPortCommand, "ScsiPortCommand", \ 5913 ScsiPortCommandU64, "ScsiPortCommandU64", \ 5914 AifRequest, "AifRequest", \ 5915 CheckRevision, "CheckRevision", \ 5916 FsaHostShutdown, "FsaHostShutdown", \ 5917 RequestAdapterInfo, "RequestAdapterInfo", \ 5918 IsAdapterPaused, "IsAdapterPaused", \ 5919 SendHostTime, "SendHostTime", \ 5920 LastMiscCommand, "LastMiscCommand" 5921 5922 #define AAC_CTVM_SUBCMD_KEY_STRINGS \ 5923 VM_Null, "VM_Null", \ 5924 VM_NameServe, "VM_NameServe", \ 5925 VM_ContainerConfig, "VM_ContainerConfig", \ 5926 VM_Ioctl, "VM_Ioctl", \ 5927 VM_FilesystemIoctl, "VM_FilesystemIoctl", \ 5928 VM_CloseAll, "VM_CloseAll", \ 5929 VM_CtBlockRead, "VM_CtBlockRead", \ 5930 VM_CtBlockWrite, "VM_CtBlockWrite", \ 5931 VM_SliceBlockRead, "VM_SliceBlockRead", \ 5932 VM_SliceBlockWrite, "VM_SliceBlockWrite", \ 5933 VM_DriveBlockRead, "VM_DriveBlockRead", \ 5934 VM_DriveBlockWrite, "VM_DriveBlockWrite", \ 5935 VM_EnclosureMgt, "VM_EnclosureMgt", \ 5936 VM_Unused, "VM_Unused", \ 5937 VM_CtBlockVerify, "VM_CtBlockVerify", \ 5938 VM_CtPerf, "VM_CtPerf", \ 5939 VM_CtBlockRead64, "VM_CtBlockRead64", \ 5940 VM_CtBlockWrite64, "VM_CtBlockWrite64", \ 5941 VM_CtBlockVerify64, "VM_CtBlockVerify64", \ 5942 VM_CtHostRead64, "VM_CtHostRead64", \ 5943 VM_CtHostWrite64, "VM_CtHostWrite64", \ 5944 VM_NameServe64, "VM_NameServe64" 5945 5946 #define AAC_CT_SUBCMD_KEY_STRINGS \ 5947 CT_Null, "CT_Null", \ 5948 CT_GET_SLICE_COUNT, "CT_GET_SLICE_COUNT", \ 5949 CT_GET_PARTITION_COUNT, "CT_GET_PARTITION_COUNT", \ 5950 CT_GET_PARTITION_INFO, "CT_GET_PARTITION_INFO", \ 5951 CT_GET_CONTAINER_COUNT, "CT_GET_CONTAINER_COUNT", \ 5952 CT_GET_CONTAINER_INFO_OLD, "CT_GET_CONTAINER_INFO_OLD", \ 5953 CT_WRITE_MBR, "CT_WRITE_MBR", \ 5954 CT_WRITE_PARTITION, "CT_WRITE_PARTITION", \ 5955 CT_UPDATE_PARTITION, "CT_UPDATE_PARTITION", \ 5956 CT_UNLOAD_CONTAINER, "CT_UNLOAD_CONTAINER", \ 5957 CT_CONFIG_SINGLE_PRIMARY, "CT_CONFIG_SINGLE_PRIMARY", \ 5958 CT_READ_CONFIG_AGE, "CT_READ_CONFIG_AGE", \ 5959 CT_WRITE_CONFIG_AGE, "CT_WRITE_CONFIG_AGE", \ 5960 CT_READ_SERIAL_NUMBER, "CT_READ_SERIAL_NUMBER", \ 5961 CT_ZERO_PAR_ENTRY, "CT_ZERO_PAR_ENTRY", \ 5962 CT_READ_MBR, "CT_READ_MBR", \ 5963 CT_READ_PARTITION, "CT_READ_PARTITION", \ 5964 CT_DESTROY_CONTAINER, "CT_DESTROY_CONTAINER", \ 5965 CT_DESTROY2_CONTAINER, "CT_DESTROY2_CONTAINER", \ 5966 CT_SLICE_SIZE, "CT_SLICE_SIZE", \ 5967 CT_CHECK_CONFLICTS, "CT_CHECK_CONFLICTS", \ 5968 CT_MOVE_CONTAINER, "CT_MOVE_CONTAINER", \ 5969 CT_READ_LAST_DRIVE, "CT_READ_LAST_DRIVE", \ 5970 CT_WRITE_LAST_DRIVE, "CT_WRITE_LAST_DRIVE", \ 5971 CT_UNMIRROR, "CT_UNMIRROR", \ 5972 CT_MIRROR_DELAY, "CT_MIRROR_DELAY", \ 5973 CT_GEN_MIRROR, "CT_GEN_MIRROR", \ 5974 CT_GEN_MIRROR2, "CT_GEN_MIRROR2", \ 5975 CT_TEST_CONTAINER, "CT_TEST_CONTAINER", \ 5976 CT_MOVE2, "CT_MOVE2", \ 5977 CT_SPLIT, "CT_SPLIT", \ 5978 CT_SPLIT2, "CT_SPLIT2", \ 5979 CT_SPLIT_BROKEN, "CT_SPLIT_BROKEN", \ 5980 CT_SPLIT_BROKEN2, "CT_SPLIT_BROKEN2", \ 5981 CT_RECONFIG, "CT_RECONFIG", \ 5982 CT_BREAK2, "CT_BREAK2", \ 5983 CT_BREAK, "CT_BREAK", \ 5984 CT_MERGE2, "CT_MERGE2", \ 5985 CT_MERGE, "CT_MERGE", \ 5986 CT_FORCE_ERROR, "CT_FORCE_ERROR", \ 5987 CT_CLEAR_ERROR, "CT_CLEAR_ERROR", \ 5988 CT_ASSIGN_FAILOVER, "CT_ASSIGN_FAILOVER", \ 5989 CT_CLEAR_FAILOVER, "CT_CLEAR_FAILOVER", \ 5990 CT_GET_FAILOVER_DATA, "CT_GET_FAILOVER_DATA", \ 5991 CT_VOLUME_ADD, "CT_VOLUME_ADD", \ 5992 CT_VOLUME_ADD2, "CT_VOLUME_ADD2", \ 5993 CT_MIRROR_STATUS, "CT_MIRROR_STATUS", \ 5994 CT_COPY_STATUS, "CT_COPY_STATUS", \ 5995 CT_COPY, "CT_COPY", \ 5996 CT_UNLOCK_CONTAINER, "CT_UNLOCK_CONTAINER", \ 5997 CT_LOCK_CONTAINER, "CT_LOCK_CONTAINER", \ 5998 CT_MAKE_READ_ONLY, "CT_MAKE_READ_ONLY", \ 5999 CT_MAKE_READ_WRITE, "CT_MAKE_READ_WRITE", \ 6000 CT_CLEAN_DEAD, "CT_CLEAN_DEAD", \ 6001 CT_ABORT_MIRROR_COMMAND, "CT_ABORT_MIRROR_COMMAND", \ 6002 CT_SET, "CT_SET", \ 6003 CT_GET, "CT_GET", \ 6004 CT_GET_NVLOG_ENTRY, "CT_GET_NVLOG_ENTRY", \ 6005 CT_GET_DELAY, "CT_GET_DELAY", \ 6006 CT_ZERO_CONTAINER_SPACE, "CT_ZERO_CONTAINER_SPACE", \ 6007 CT_GET_ZERO_STATUS, "CT_GET_ZERO_STATUS", \ 6008 CT_SCRUB, "CT_SCRUB", \ 6009 CT_GET_SCRUB_STATUS, "CT_GET_SCRUB_STATUS", \ 6010 CT_GET_SLICE_INFO, "CT_GET_SLICE_INFO", \ 6011 CT_GET_SCSI_METHOD, "CT_GET_SCSI_METHOD", \ 6012 CT_PAUSE_IO, "CT_PAUSE_IO", \ 6013 CT_RELEASE_IO, "CT_RELEASE_IO", \ 6014 CT_SCRUB2, "CT_SCRUB2", \ 6015 CT_MCHECK, "CT_MCHECK", \ 6016 CT_CORRUPT, "CT_CORRUPT", \ 6017 CT_GET_TASK_COUNT, "CT_GET_TASK_COUNT", \ 6018 CT_PROMOTE, "CT_PROMOTE", \ 6019 CT_SET_DEAD, "CT_SET_DEAD", \ 6020 CT_CONTAINER_OPTIONS, "CT_CONTAINER_OPTIONS", \ 6021 CT_GET_NV_PARAM, "CT_GET_NV_PARAM", \ 6022 CT_GET_PARAM, "CT_GET_PARAM", \ 6023 CT_NV_PARAM_SIZE, "CT_NV_PARAM_SIZE", \ 6024 CT_COMMON_PARAM_SIZE, "CT_COMMON_PARAM_SIZE", \ 6025 CT_PLATFORM_PARAM_SIZE, "CT_PLATFORM_PARAM_SIZE", \ 6026 CT_SET_NV_PARAM, "CT_SET_NV_PARAM", \ 6027 CT_ABORT_SCRUB, "CT_ABORT_SCRUB", \ 6028 CT_GET_SCRUB_ERROR, "CT_GET_SCRUB_ERROR", \ 6029 CT_LABEL_CONTAINER, "CT_LABEL_CONTAINER", \ 6030 CT_CONTINUE_DATA, "CT_CONTINUE_DATA", \ 6031 CT_STOP_DATA, "CT_STOP_DATA", \ 6032 CT_GET_PARTITION_TABLE, "CT_GET_PARTITION_TABLE", \ 6033 CT_GET_DISK_PARTITIONS, "CT_GET_DISK_PARTITIONS", \ 6034 CT_GET_MISC_STATUS, "CT_GET_MISC_STATUS", \ 6035 CT_GET_CONTAINER_PERF_INFO, "CT_GET_CONTAINER_PERF_INFO", \ 6036 CT_GET_TIME, "CT_GET_TIME", \ 6037 CT_READ_DATA, "CT_READ_DATA", \ 6038 CT_CTR, "CT_CTR", \ 6039 CT_CTL, "CT_CTL", \ 6040 CT_DRAINIO, "CT_DRAINIO", \ 6041 CT_RELEASEIO, "CT_RELEASEIO", \ 6042 CT_GET_NVRAM, "CT_GET_NVRAM", \ 6043 CT_GET_MEMORY, "CT_GET_MEMORY", \ 6044 CT_PRINT_CT_LOG, "CT_PRINT_CT_LOG", \ 6045 CT_ADD_LEVEL, "CT_ADD_LEVEL", \ 6046 CT_NV_ZERO, "CT_NV_ZERO", \ 6047 CT_READ_SIGNATURE, "CT_READ_SIGNATURE", \ 6048 CT_THROTTLE_ON, "CT_THROTTLE_ON", \ 6049 CT_THROTTLE_OFF, "CT_THROTTLE_OFF", \ 6050 CT_GET_THROTTLE_STATS, "CT_GET_THROTTLE_STATS", \ 6051 CT_MAKE_SNAPSHOT, "CT_MAKE_SNAPSHOT", \ 6052 CT_REMOVE_SNAPSHOT, "CT_REMOVE_SNAPSHOT", \ 6053 CT_WRITE_USER_FLAGS, "CT_WRITE_USER_FLAGS", \ 6054 CT_READ_USER_FLAGS, "CT_READ_USER_FLAGS", \ 6055 CT_MONITOR, "CT_MONITOR", \ 6056 CT_GEN_MORPH, "CT_GEN_MORPH", \ 6057 CT_GET_SNAPSHOT_INFO, "CT_GET_SNAPSHOT_INFO", \ 6058 CT_CACHE_SET, "CT_CACHE_SET", \ 6059 CT_CACHE_STAT, "CT_CACHE_STAT", \ 6060 CT_TRACE_START, "CT_TRACE_START", \ 6061 CT_TRACE_STOP, "CT_TRACE_STOP", \ 6062 CT_TRACE_ENABLE, "CT_TRACE_ENABLE", \ 6063 CT_TRACE_DISABLE, "CT_TRACE_DISABLE", \ 6064 CT_FORCE_CORE_DUMP, "CT_FORCE_CORE_DUMP", \ 6065 CT_SET_SERIAL_NUMBER, "CT_SET_SERIAL_NUMBER", \ 6066 CT_RESET_SERIAL_NUMBER, "CT_RESET_SERIAL_NUMBER", \ 6067 CT_ENABLE_RAID5, "CT_ENABLE_RAID5", \ 6068 CT_CLEAR_VALID_DUMP_FLAG, "CT_CLEAR_VALID_DUMP_FLAG", \ 6069 CT_GET_MEM_STATS, "CT_GET_MEM_STATS", \ 6070 CT_GET_CORE_SIZE, "CT_GET_CORE_SIZE", \ 6071 CT_CREATE_CONTAINER_OLD, "CT_CREATE_CONTAINER_OLD", \ 6072 CT_STOP_DUMPS, "CT_STOP_DUMPS", \ 6073 CT_PANIC_ON_TAKE_A_BREAK, "CT_PANIC_ON_TAKE_A_BREAK", \ 6074 CT_GET_CACHE_STATS, "CT_GET_CACHE_STATS", \ 6075 CT_MOVE_PARTITION, "CT_MOVE_PARTITION", \ 6076 CT_FLUSH_CACHE, "CT_FLUSH_CACHE", \ 6077 CT_READ_NAME, "CT_READ_NAME", \ 6078 CT_WRITE_NAME, "CT_WRITE_NAME", \ 6079 CT_TOSS_CACHE, "CT_TOSS_CACHE", \ 6080 CT_LOCK_DRAINIO, "CT_LOCK_DRAINIO", \ 6081 CT_CONTAINER_OFFLINE, "CT_CONTAINER_OFFLINE", \ 6082 CT_SET_CACHE_SIZE, "CT_SET_CACHE_SIZE", \ 6083 CT_CLEAN_SHUTDOWN_STATUS, "CT_CLEAN_SHUTDOWN_STATUS", \ 6084 CT_CLEAR_DISKLOG_ON_DISK, "CT_CLEAR_DISKLOG_ON_DISK", \ 6085 CT_CLEAR_ALL_DISKLOG, "CT_CLEAR_ALL_DISKLOG", \ 6086 CT_CACHE_FAVOR, "CT_CACHE_FAVOR", \ 6087 CT_READ_PASSTHRU_MBR, "CT_READ_PASSTHRU_MBR", \ 6088 CT_SCRUB_NOFIX, "CT_SCRUB_NOFIX", \ 6089 CT_SCRUB2_NOFIX, "CT_SCRUB2_NOFIX", \ 6090 CT_FLUSH, "CT_FLUSH", \ 6091 CT_REBUILD, "CT_REBUILD", \ 6092 CT_FLUSH_CONTAINER, "CT_FLUSH_CONTAINER", \ 6093 CT_RESTART, "CT_RESTART", \ 6094 CT_GET_CONFIG_STATUS, "CT_GET_CONFIG_STATUS", \ 6095 CT_TRACE_FLAG, "CT_TRACE_FLAG", \ 6096 CT_RESTART_MORPH, "CT_RESTART_MORPH", \ 6097 CT_GET_TRACE_INFO, "CT_GET_TRACE_INFO", \ 6098 CT_GET_TRACE_ITEM, "CT_GET_TRACE_ITEM", \ 6099 CT_COMMIT_CONFIG, "CT_COMMIT_CONFIG", \ 6100 CT_CONTAINER_EXISTS, "CT_CONTAINER_EXISTS", \ 6101 CT_GET_SLICE_FROM_DEVT, "CT_GET_SLICE_FROM_DEVT", \ 6102 CT_OPEN_READ_WRITE, "CT_OPEN_READ_WRITE", \ 6103 CT_WRITE_MEMORY_BLOCK, "CT_WRITE_MEMORY_BLOCK", \ 6104 CT_GET_CACHE_PARAMS, "CT_GET_CACHE_PARAMS", \ 6105 CT_CRAZY_CACHE, "CT_CRAZY_CACHE", \ 6106 CT_GET_PROFILE_STRUCT, "CT_GET_PROFILE_STRUCT", \ 6107 CT_SET_IO_TRACE_FLAG, "CT_SET_IO_TRACE_FLAG", \ 6108 CT_GET_IO_TRACE_STRUCT, "CT_GET_IO_TRACE_STRUCT", \ 6109 CT_CID_TO_64BITS_UID, "CT_CID_TO_64BITS_UID", \ 6110 CT_64BITS_UID_TO_CID, "CT_64BITS_UID_TO_CID", \ 6111 CT_PAR_TO_64BITS_UID, "CT_PAR_TO_64BITS_UID", \ 6112 CT_CID_TO_32BITS_UID, "CT_CID_TO_32BITS_UID", \ 6113 CT_32BITS_UID_TO_CID, "CT_32BITS_UID_TO_CID", \ 6114 CT_PAR_TO_32BITS_UID, "CT_PAR_TO_32BITS_UID", \ 6115 CT_SET_FAILOVER_OPTION, "CT_SET_FAILOVER_OPTION", \ 6116 CT_GET_FAILOVER_OPTION, "CT_GET_FAILOVER_OPTION", \ 6117 CT_STRIPE_ADD2, "CT_STRIPE_ADD2", \ 6118 CT_CREATE_VOLUME_SET, "CT_CREATE_VOLUME_SET", \ 6119 CT_CREATE_STRIPE_SET, "CT_CREATE_STRIPE_SET", \ 6120 CT_VERIFY_CONTAINER, "CT_VERIFY_CONTAINER", \ 6121 CT_IS_CONTAINER_DEAD, "CT_IS_CONTAINER_DEAD", \ 6122 CT_GET_CONTAINER_OPTION, "CT_GET_CONTAINER_OPTION", \ 6123 CT_GET_SNAPSHOT_UNUSED_STRUCT, "CT_GET_SNAPSHOT_UNUSED_STRUCT", \ 6124 CT_CLEAR_SNAPSHOT_UNUSED_STRUCT, "CT_CLEAR_SNAPSHOT_UNUSED_STRUCT", \ 6125 CT_GET_CONTAINER_INFO, "CT_GET_CONTAINER_INFO", \ 6126 CT_CREATE_CONTAINER, "CT_CREATE_CONTAINER", \ 6127 CT_CHANGE_CREATIONINFO, "CT_CHANGE_CREATIONINFO", \ 6128 CT_CHECK_CONFLICT_UID, "CT_CHECK_CONFLICT_UID", \ 6129 CT_CONTAINER_UID_CHECK, "CT_CONTAINER_UID_CHECK", \ 6130 CT_IS_CONTAINER_MEATADATA_STANDARD, \ 6131 "CT_IS_CONTAINER_MEATADATA_STANDARD", \ 6132 CT_IS_SLICE_METADATA_STANDARD, "CT_IS_SLICE_METADATA_STANDARD", \ 6133 CT_GET_IMPORT_COUNT, "CT_GET_IMPORT_COUNT", \ 6134 CT_CANCEL_ALL_IMPORTS, "CT_CANCEL_ALL_IMPORTS", \ 6135 CT_GET_IMPORT_INFO, "CT_GET_IMPORT_INFO", \ 6136 CT_IMPORT_ARRAY, "CT_IMPORT_ARRAY", \ 6137 CT_GET_LOG_SIZE, "CT_GET_LOG_SIZE", \ 6138 CT_ALARM_GET_STATE, "CT_ALARM_GET_STATE", \ 6139 CT_ALARM_SET_STATE, "CT_ALARM_SET_STATE", \ 6140 CT_ALARM_ON_OFF, "CT_ALARM_ON_OFF", \ 6141 CT_GET_EE_OEM_ID, "CT_GET_EE_OEM_ID", \ 6142 CT_GET_PPI_HEADERS, "CT_GET_PPI_HEADERS", \ 6143 CT_GET_PPI_DATA, "CT_GET_PPI_DATA", \ 6144 CT_GET_PPI_ENTRIES, "CT_GET_PPI_ENTRIES", \ 6145 CT_DELETE_PPI_BUNDLE, "CT_DELETE_PPI_BUNDLE", \ 6146 CT_GET_PARTITION_TABLE_2, "CT_GET_PARTITION_TABLE_2", \ 6147 CT_GET_PARTITION_INFO_2, "CT_GET_PARTITION_INFO_2", \ 6148 CT_GET_DISK_PARTITIONS_2, "CT_GET_DISK_PARTITIONS_2", \ 6149 CT_QUIESCE_ADAPTER, "CT_QUIESCE_ADAPTER", \ 6150 CT_CLEAR_PPI_TABLE, "CT_CLEAR_PPI_TABLE" 6151 6152 #define AAC_CL_SUBCMD_KEY_STRINGS \ 6153 CL_NULL, "CL_NULL", \ 6154 DS_INIT, "DS_INIT", \ 6155 DS_RESCAN, "DS_RESCAN", \ 6156 DS_CREATE, "DS_CREATE", \ 6157 DS_DELETE, "DS_DELETE", \ 6158 DS_ADD_DISK, "DS_ADD_DISK", \ 6159 DS_REMOVE_DISK, "DS_REMOVE_DISK", \ 6160 DS_MOVE_DISK, "DS_MOVE_DISK", \ 6161 DS_TAKE_OWNERSHIP, "DS_TAKE_OWNERSHIP", \ 6162 DS_RELEASE_OWNERSHIP, "DS_RELEASE_OWNERSHIP", \ 6163 DS_FORCE_OWNERSHIP, "DS_FORCE_OWNERSHIP", \ 6164 DS_GET_DISK_SET_PARAM, "DS_GET_DISK_SET_PARAM", \ 6165 DS_GET_DRIVE_PARAM, "DS_GET_DRIVE_PARAM", \ 6166 DS_GET_SLICE_PARAM, "DS_GET_SLICE_PARAM", \ 6167 DS_GET_DISK_SETS, "DS_GET_DISK_SETS", \ 6168 DS_GET_DRIVES, "DS_GET_DRIVES", \ 6169 DS_SET_DISK_SET_PARAM, "DS_SET_DISK_SET_PARAM", \ 6170 DS_ONLINE, "DS_ONLINE", \ 6171 DS_OFFLINE, "DS_OFFLINE", \ 6172 DS_ONLINE_CONTAINERS, "DS_ONLINE_CONTAINERS", \ 6173 DS_FSAPRINT, "DS_FSAPRINT", \ 6174 CL_CFG_SET_HOST_IDS, "CL_CFG_SET_HOST_IDS", \ 6175 CL_CFG_SET_PARTNER_HOST_IDS, "CL_CFG_SET_PARTNER_HOST_IDS", \ 6176 CL_CFG_GET_CLUSTER_CONFIG, "CL_CFG_GET_CLUSTER_CONFIG", \ 6177 CC_CLI_CLEAR_MESSAGE_BUFFER, "CC_CLI_CLEAR_MESSAGE_BUFFER", \ 6178 CC_SRV_CLEAR_MESSAGE_BUFFER, "CC_SRV_CLEAR_MESSAGE_BUFFER", \ 6179 CC_CLI_SHOW_MESSAGE_BUFFER, "CC_CLI_SHOW_MESSAGE_BUFFER", \ 6180 CC_SRV_SHOW_MESSAGE_BUFFER, "CC_SRV_SHOW_MESSAGE_BUFFER", \ 6181 CC_CLI_SEND_MESSAGE, "CC_CLI_SEND_MESSAGE", \ 6182 CC_SRV_SEND_MESSAGE, "CC_SRV_SEND_MESSAGE", \ 6183 CC_CLI_GET_MESSAGE, "CC_CLI_GET_MESSAGE", \ 6184 CC_SRV_GET_MESSAGE, "CC_SRV_GET_MESSAGE", \ 6185 CC_SEND_TEST_MESSAGE, "CC_SEND_TEST_MESSAGE", \ 6186 CC_GET_BUSINFO, "CC_GET_BUSINFO", \ 6187 CC_GET_PORTINFO, "CC_GET_PORTINFO", \ 6188 CC_GET_NAMEINFO, "CC_GET_NAMEINFO", \ 6189 CC_GET_CONFIGINFO, "CC_GET_CONFIGINFO", \ 6190 CQ_QUORUM_OP, "CQ_QUORUM_OP" 6191 6192 #define AAC_AIF_SUBCMD_KEY_STRINGS \ 6193 AifCmdEventNotify, "AifCmdEventNotify", \ 6194 AifCmdJobProgress, "AifCmdJobProgress", \ 6195 AifCmdAPIReport, "AifCmdAPIReport", \ 6196 AifCmdDriverNotify, "AifCmdDriverNotify", \ 6197 AifReqJobList, "AifReqJobList", \ 6198 AifReqJobsForCtr, "AifReqJobsForCtr", \ 6199 AifReqJobsForScsi, "AifReqJobsForScsi", \ 6200 AifReqJobReport, "AifReqJobReport", \ 6201 AifReqTerminateJob, "AifReqTerminateJob", \ 6202 AifReqSuspendJob, "AifReqSuspendJob", \ 6203 AifReqResumeJob, "AifReqResumeJob", \ 6204 AifReqSendAPIReport, "AifReqSendAPIReport", \ 6205 AifReqAPIJobStart, "AifReqAPIJobStart", \ 6206 AifReqAPIJobUpdate, "AifReqAPIJobUpdate", \ 6207 AifReqAPIJobFinish, "AifReqAPIJobFinish" 6208 6209 #define AAC_IOCTL_SUBCMD_KEY_STRINGS \ 6210 Reserved_IOCTL, "Reserved_IOCTL", \ 6211 GetDeviceHandle, "GetDeviceHandle", \ 6212 BusTargetLun_to_DeviceHandle, "BusTargetLun_to_DeviceHandle", \ 6213 DeviceHandle_to_BusTargetLun, "DeviceHandle_to_BusTargetLun", \ 6214 RescanBus, "RescanBus", \ 6215 GetDeviceProbeInfo, "GetDeviceProbeInfo", \ 6216 GetDeviceCapacity, "GetDeviceCapacity", \ 6217 GetContainerProbeInfo, "GetContainerProbeInfo", \ 6218 GetRequestedMemorySize, "GetRequestedMemorySize", \ 6219 GetBusInfo, "GetBusInfo", \ 6220 GetVendorSpecific, "GetVendorSpecific", \ 6221 EnhancedGetDeviceProbeInfo, "EnhancedGetDeviceProbeInfo", \ 6222 EnhancedGetBusInfo, "EnhancedGetBusInfo", \ 6223 SetupExtendedCounters, "SetupExtendedCounters", \ 6224 GetPerformanceCounters, "GetPerformanceCounters", \ 6225 ResetPerformanceCounters, "ResetPerformanceCounters", \ 6226 ReadModePage, "ReadModePage", \ 6227 WriteModePage, "WriteModePage", \ 6228 ReadDriveParameter, "ReadDriveParameter", \ 6229 WriteDriveParameter, "WriteDriveParameter", \ 6230 ResetAdapter, "ResetAdapter", \ 6231 ResetBus, "ResetBus", \ 6232 ResetBusDevice, "ResetBusDevice", \ 6233 ExecuteSrb, "ExecuteSrb", \ 6234 Create_IO_Task, "Create_IO_Task", \ 6235 Delete_IO_Task, "Delete_IO_Task", \ 6236 Get_IO_Task_Info, "Get_IO_Task_Info", \ 6237 Check_Task_Progress, "Check_Task_Progress", \ 6238 InjectError, "InjectError", \ 6239 GetDeviceDefectCounts, "GetDeviceDefectCounts", \ 6240 GetDeviceDefectInfo, "GetDeviceDefectInfo", \ 6241 GetDeviceStatus, "GetDeviceStatus", \ 6242 ClearDeviceStatus, "ClearDeviceStatus", \ 6243 DiskSpinControl, "DiskSpinControl", \ 6244 DiskSmartControl, "DiskSmartControl", \ 6245 WriteSame, "WriteSame", \ 6246 ReadWriteLong, "ReadWriteLong", \ 6247 FormatUnit, "FormatUnit", \ 6248 TargetDeviceControl, "TargetDeviceControl", \ 6249 TargetChannelControl, "TargetChannelControl", \ 6250 FlashNewCode, "FlashNewCode", \ 6251 DiskCheck, "DiskCheck", \ 6252 RequestSense, "RequestSense", \ 6253 DiskPERControl, "DiskPERControl", \ 6254 Read10, "Read10", \ 6255 Write10, "Write10" 6256 6257 #define AAC_AIFEN_KEY_STRINGS \ 6258 AifEnGeneric, "Generic", \ 6259 AifEnTaskComplete, "TaskComplete", \ 6260 AifEnConfigChange, "Config change", \ 6261 AifEnContainerChange, "Container change", \ 6262 AifEnDeviceFailure, "device failed", \ 6263 AifEnMirrorFailover, "Mirror failover", \ 6264 AifEnContainerEvent, "container event", \ 6265 AifEnFileSystemChange, "File system changed", \ 6266 AifEnConfigPause, "Container pause event", \ 6267 AifEnConfigResume, "Container resume event", \ 6268 AifEnFailoverChange, "Failover space assignment changed", \ 6269 AifEnRAID5RebuildDone, "RAID5 rebuild finished", \ 6270 AifEnEnclosureManagement, "Enclosure management event", \ 6271 AifEnBatteryEvent, "battery event", \ 6272 AifEnAddContainer, "Add container", \ 6273 AifEnDeleteContainer, "Delete container", \ 6274 AifEnSMARTEvent, "SMART Event", \ 6275 AifEnBatteryNeedsRecond, "battery needs reconditioning", \ 6276 AifEnClusterEvent, "cluster event", \ 6277 AifEnDiskSetEvent, "disk set event occured", \ 6278 AifDenMorphComplete, "morph operation completed", \ 6279 AifDenVolumeExtendComplete, "VolumeExtendComplete" 6280 6281 struct aac_key_strings { 6282 int key; 6283 char *message; 6284 }; 6285 6286 extern struct scsi_key_strings scsi_cmds[]; 6287 6288 static struct aac_key_strings aac_fib_cmds[] = { 6289 AAC_FIB_CMD_KEY_STRINGS, 6290 -1, NULL 6291 }; 6292 6293 static struct aac_key_strings aac_ctvm_subcmds[] = { 6294 AAC_CTVM_SUBCMD_KEY_STRINGS, 6295 -1, NULL 6296 }; 6297 6298 static struct aac_key_strings aac_ct_subcmds[] = { 6299 AAC_CT_SUBCMD_KEY_STRINGS, 6300 -1, NULL 6301 }; 6302 6303 static struct aac_key_strings aac_cl_subcmds[] = { 6304 AAC_CL_SUBCMD_KEY_STRINGS, 6305 -1, NULL 6306 }; 6307 6308 static struct aac_key_strings aac_aif_subcmds[] = { 6309 AAC_AIF_SUBCMD_KEY_STRINGS, 6310 -1, NULL 6311 }; 6312 6313 static struct aac_key_strings aac_ioctl_subcmds[] = { 6314 AAC_IOCTL_SUBCMD_KEY_STRINGS, 6315 -1, NULL 6316 }; 6317 6318 static struct aac_key_strings aac_aifens[] = { 6319 AAC_AIFEN_KEY_STRINGS, 6320 -1, NULL 6321 }; 6322 6323 /* 6324 * The following function comes from Adaptec: 6325 * 6326 * Get the firmware print buffer parameters from the firmware, 6327 * if the command was successful map in the address. 6328 */ 6329 static int 6330 aac_get_fw_debug_buffer(struct aac_softstate *softs) 6331 { 6332 if (aac_sync_mbcommand(softs, AAC_MONKER_GETDRVPROP, 6333 0, 0, 0, 0, NULL) == AACOK) { 6334 uint32_t mondrv_buf_paddrl = AAC_MAILBOX_GET(softs, 1); 6335 uint32_t mondrv_buf_paddrh = AAC_MAILBOX_GET(softs, 2); 6336 uint32_t mondrv_buf_size = AAC_MAILBOX_GET(softs, 3); 6337 uint32_t mondrv_hdr_size = AAC_MAILBOX_GET(softs, 4); 6338 6339 if (mondrv_buf_size) { 6340 uint32_t offset = mondrv_buf_paddrl - \ 6341 softs->pci_mem_base_paddr; 6342 6343 /* 6344 * See if the address is already mapped in, and 6345 * if so set it up from the base address 6346 */ 6347 if ((mondrv_buf_paddrh == 0) && 6348 (offset + mondrv_buf_size < softs->map_size)) { 6349 mutex_enter(&aac_prt_mutex); 6350 softs->debug_buf_offset = offset; 6351 softs->debug_header_size = mondrv_hdr_size; 6352 softs->debug_buf_size = mondrv_buf_size; 6353 softs->debug_fw_flags = 0; 6354 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT; 6355 mutex_exit(&aac_prt_mutex); 6356 6357 return (AACOK); 6358 } 6359 } 6360 } 6361 return (AACERR); 6362 } 6363 6364 int 6365 aac_dbflag_on(struct aac_softstate *softs, int flag) 6366 { 6367 int debug_flags = softs ? softs->debug_flags : aac_debug_flags; 6368 6369 return ((debug_flags & (AACDB_FLAGS_FW_PRINT | \ 6370 AACDB_FLAGS_KERNEL_PRINT)) && (debug_flags & flag)); 6371 } 6372 6373 static void 6374 aac_cmn_err(struct aac_softstate *softs, uint_t lev, char sl, int noheader) 6375 { 6376 if (noheader) { 6377 if (sl) { 6378 aac_fmt[0] = sl; 6379 cmn_err(lev, aac_fmt, aac_prt_buf); 6380 } else { 6381 cmn_err(lev, &aac_fmt[1], aac_prt_buf); 6382 } 6383 } else { 6384 if (sl) { 6385 aac_fmt_header[0] = sl; 6386 cmn_err(lev, aac_fmt_header, 6387 softs->vendor_name, softs->instance, 6388 aac_prt_buf); 6389 } else { 6390 cmn_err(lev, &aac_fmt_header[1], 6391 softs->vendor_name, softs->instance, 6392 aac_prt_buf); 6393 } 6394 } 6395 } 6396 6397 /* 6398 * The following function comes from Adaptec: 6399 * 6400 * Format and print out the data passed in to UART or console 6401 * as specified by debug flags. 6402 */ 6403 void 6404 aac_printf(struct aac_softstate *softs, uint_t lev, const char *fmt, ...) 6405 { 6406 va_list args; 6407 char sl; /* system log character */ 6408 6409 mutex_enter(&aac_prt_mutex); 6410 /* Set up parameters and call sprintf function to format the data */ 6411 if (strchr("^!?", fmt[0]) == NULL) { 6412 sl = 0; 6413 } else { 6414 sl = fmt[0]; 6415 fmt++; 6416 } 6417 va_start(args, fmt); 6418 (void) vsprintf(aac_prt_buf, fmt, args); 6419 va_end(args); 6420 6421 /* Make sure the softs structure has been passed in for this section */ 6422 if (softs) { 6423 if ((softs->debug_flags & AACDB_FLAGS_FW_PRINT) && 6424 /* If we are set up for a Firmware print */ 6425 (softs->debug_buf_size)) { 6426 uint32_t count, i; 6427 6428 /* Make sure the string size is within boundaries */ 6429 count = strlen(aac_prt_buf); 6430 if (count > softs->debug_buf_size) 6431 count = (uint16_t)softs->debug_buf_size; 6432 6433 /* 6434 * Wait for no more than AAC_PRINT_TIMEOUT for the 6435 * previous message length to clear (the handshake). 6436 */ 6437 for (i = 0; i < AAC_PRINT_TIMEOUT; i++) { 6438 if (!PCI_MEM_GET32(softs, 6439 softs->debug_buf_offset + \ 6440 AAC_FW_DBG_STRLEN_OFFSET)) 6441 break; 6442 6443 drv_usecwait(1000); 6444 } 6445 6446 /* 6447 * If the length is clear, copy over the message, the 6448 * flags, and the length. Make sure the length is the 6449 * last because that is the signal for the Firmware to 6450 * pick it up. 6451 */ 6452 if (!PCI_MEM_GET32(softs, softs->debug_buf_offset + \ 6453 AAC_FW_DBG_STRLEN_OFFSET)) { 6454 PCI_MEM_REP_PUT8(softs, 6455 softs->debug_buf_offset + \ 6456 softs->debug_header_size, 6457 aac_prt_buf, count); 6458 PCI_MEM_PUT32(softs, 6459 softs->debug_buf_offset + \ 6460 AAC_FW_DBG_FLAGS_OFFSET, 6461 softs->debug_fw_flags); 6462 PCI_MEM_PUT32(softs, 6463 softs->debug_buf_offset + \ 6464 AAC_FW_DBG_STRLEN_OFFSET, count); 6465 } else { 6466 cmn_err(CE_WARN, "UART output fail"); 6467 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT; 6468 } 6469 } 6470 6471 /* 6472 * If the Kernel Debug Print flag is set, send it off 6473 * to the Kernel Debugger 6474 */ 6475 if (softs->debug_flags & AACDB_FLAGS_KERNEL_PRINT) 6476 aac_cmn_err(softs, lev, sl, 6477 (softs->debug_flags & AACDB_FLAGS_NO_HEADERS)); 6478 } else { 6479 /* Driver not initialized yet, no firmware or header output */ 6480 if (aac_debug_flags & AACDB_FLAGS_KERNEL_PRINT) 6481 aac_cmn_err(softs, lev, sl, 1); 6482 } 6483 mutex_exit(&aac_prt_mutex); 6484 } 6485 6486 /* 6487 * Translate command number to description string 6488 */ 6489 static char * 6490 aac_cmd_name(int cmd, struct aac_key_strings *cmdlist) 6491 { 6492 int i; 6493 6494 for (i = 0; cmdlist[i].key != -1; i++) { 6495 if (cmd == cmdlist[i].key) 6496 return (cmdlist[i].message); 6497 } 6498 return (NULL); 6499 } 6500 6501 static void 6502 aac_print_scmd(struct aac_softstate *softs, struct aac_cmd *acp) 6503 { 6504 struct scsi_pkt *pkt = acp->pkt; 6505 struct scsi_address *ap = &pkt->pkt_address; 6506 int ctl = ddi_get_instance(softs->devinfo_p); 6507 int tgt = ap->a_target; 6508 int lun = ap->a_lun; 6509 union scsi_cdb *cdbp = (void *)pkt->pkt_cdbp; 6510 uchar_t cmd = cdbp->scc_cmd; 6511 char *desc; 6512 6513 if ((desc = aac_cmd_name(cmd, 6514 (struct aac_key_strings *)scsi_cmds)) == NULL) { 6515 aac_printf(softs, CE_NOTE, 6516 "SCMD> Unknown(0x%2x) --> c%dt%dL%d", 6517 cmd, ctl, tgt, lun); 6518 return; 6519 } 6520 6521 switch (cmd) { 6522 case SCMD_READ: 6523 case SCMD_WRITE: 6524 aac_printf(softs, CE_NOTE, 6525 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d", 6526 desc, GETG0ADDR(cdbp), GETG0COUNT(cdbp), 6527 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 6528 ctl, tgt, lun); 6529 break; 6530 case SCMD_READ_G1: 6531 case SCMD_WRITE_G1: 6532 aac_printf(softs, CE_NOTE, 6533 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d", 6534 desc, GETG1ADDR(cdbp), GETG1COUNT(cdbp), 6535 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 6536 ctl, tgt, lun); 6537 break; 6538 case SCMD_READ_G4: 6539 case SCMD_WRITE_G4: 6540 aac_printf(softs, CE_NOTE, 6541 "SCMD> %s 0x%x.%08x[%d] %s --> c%dt%dL%d", 6542 desc, GETG4ADDR(cdbp), GETG4ADDRTL(cdbp), 6543 GETG4COUNT(cdbp), 6544 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 6545 ctl, tgt, lun); 6546 break; 6547 default: 6548 aac_printf(softs, CE_NOTE, "SCMD> %s --> c%dt%dL%d", 6549 desc, ctl, tgt, lun); 6550 } 6551 } 6552 6553 void 6554 aac_print_fib(struct aac_softstate *softs, struct aac_fib *fibp) 6555 { 6556 uint16_t fib_size; 6557 int32_t fib_cmd, sub_cmd; 6558 char *cmdstr, *subcmdstr; 6559 struct aac_Container *pContainer; 6560 6561 fib_cmd = LE_16(fibp->Header.Command); 6562 cmdstr = aac_cmd_name(fib_cmd, aac_fib_cmds); 6563 sub_cmd = -1; 6564 subcmdstr = NULL; 6565 6566 switch (fib_cmd) { 6567 case ContainerCommand: 6568 pContainer = (struct aac_Container *)fibp->data; 6569 sub_cmd = LE_32(pContainer->Command); 6570 subcmdstr = aac_cmd_name(sub_cmd, aac_ctvm_subcmds); 6571 if (subcmdstr == NULL) 6572 break; 6573 fib_cmd = sub_cmd; 6574 cmdstr = subcmdstr; 6575 sub_cmd = -1; 6576 subcmdstr = NULL; 6577 6578 switch (pContainer->Command) { 6579 case VM_ContainerConfig: 6580 sub_cmd = LE_32(pContainer->CTCommand.command); 6581 subcmdstr = aac_cmd_name(sub_cmd, aac_ct_subcmds); 6582 if (subcmdstr == NULL) 6583 break; 6584 aac_printf(softs, CE_NOTE, "FIB> %s (0x%x, 0x%x, 0x%x)", 6585 subcmdstr, 6586 LE_32(pContainer->CTCommand.param[0]), 6587 LE_32(pContainer->CTCommand.param[1]), 6588 LE_32(pContainer->CTCommand.param[2])); 6589 return; 6590 case VM_Ioctl: 6591 sub_cmd = LE_32(((int32_t *)(void *)pContainer)[4]); 6592 subcmdstr = aac_cmd_name(sub_cmd, aac_ioctl_subcmds); 6593 break; 6594 } 6595 break; 6596 6597 case ClusterCommand: 6598 sub_cmd = LE_32(((int32_t *)(void *)fibp->data)[0]); 6599 subcmdstr = aac_cmd_name(sub_cmd, aac_cl_subcmds); 6600 break; 6601 6602 case AifRequest: 6603 sub_cmd = LE_32(((int32_t *)(void *)fibp->data)[0]); 6604 subcmdstr = aac_cmd_name(sub_cmd, aac_aif_subcmds); 6605 break; 6606 6607 default: 6608 break; 6609 } 6610 6611 fib_size = LE_16(fibp->Header.Size); 6612 if (subcmdstr) 6613 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d", 6614 subcmdstr, fib_size); 6615 else if (cmdstr && sub_cmd == -1) 6616 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d", 6617 cmdstr, fib_size); 6618 else if (cmdstr) 6619 aac_printf(softs, CE_NOTE, "FIB> %s: Unknown(0x%x), sz=%d", 6620 cmdstr, sub_cmd, fib_size); 6621 else 6622 aac_printf(softs, CE_NOTE, "FIB> Unknown(0x%x), sz=%d", 6623 fib_cmd, fib_size); 6624 } 6625 6626 static void 6627 aac_print_aif(struct aac_softstate *softs, struct aac_aif_command *aif) 6628 { 6629 int aif_command; 6630 uint32_t aif_seqnumber; 6631 int aif_en_type; 6632 char *str; 6633 6634 aif_command = LE_32(aif->command); 6635 aif_seqnumber = LE_32(aif->seqNumber); 6636 aif_en_type = LE_32(aif->data.EN.type); 6637 6638 switch (aif_command) { 6639 case AifCmdEventNotify: 6640 str = aac_cmd_name(aif_en_type, aac_aifens); 6641 if (str) 6642 aac_printf(softs, CE_NOTE, "AIF! %s", str); 6643 else 6644 aac_printf(softs, CE_NOTE, "AIF! Unknown(0x%x)", 6645 aif_en_type); 6646 break; 6647 6648 case AifCmdJobProgress: 6649 switch (LE_32(aif->data.PR[0].status)) { 6650 case AifJobStsSuccess: 6651 str = "success"; break; 6652 case AifJobStsFinished: 6653 str = "finished"; break; 6654 case AifJobStsAborted: 6655 str = "aborted"; break; 6656 case AifJobStsFailed: 6657 str = "failed"; break; 6658 case AifJobStsSuspended: 6659 str = "suspended"; break; 6660 case AifJobStsRunning: 6661 str = "running"; break; 6662 default: 6663 str = "unknown"; break; 6664 } 6665 aac_printf(softs, CE_NOTE, 6666 "AIF! JobProgress (%d) - %s (%d, %d)", 6667 aif_seqnumber, str, 6668 LE_32(aif->data.PR[0].currentTick), 6669 LE_32(aif->data.PR[0].finalTick)); 6670 break; 6671 6672 case AifCmdAPIReport: 6673 aac_printf(softs, CE_NOTE, "AIF! APIReport (%d)", 6674 aif_seqnumber); 6675 break; 6676 6677 case AifCmdDriverNotify: 6678 aac_printf(softs, CE_NOTE, "AIF! DriverNotify (%d)", 6679 aif_seqnumber); 6680 break; 6681 6682 default: 6683 aac_printf(softs, CE_NOTE, "AIF! AIF %d (%d)", 6684 aif_command, aif_seqnumber); 6685 break; 6686 } 6687 } 6688 6689 #endif /* DEBUG */ 6690