1 /* 2 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6 /* 7 * Copyright 2005-06 Adaptec, Inc. 8 * Copyright (c) 2005-06 Adaptec Inc., Achim Leubner 9 * Copyright (c) 2000 Michael Smith 10 * Copyright (c) 2001 Scott Long 11 * Copyright (c) 2000 BSDi 12 * All rights reserved. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #pragma ident "%Z%%M% %I% %E% SMI" 36 37 #include <sys/modctl.h> 38 #include <sys/conf.h> 39 #include <sys/cmn_err.h> 40 #include <sys/ddi.h> 41 #include <sys/devops.h> 42 #include <sys/pci.h> 43 #include <sys/types.h> 44 #include <sys/ddidmareq.h> 45 #include <sys/scsi/scsi.h> 46 #include <sys/ksynch.h> 47 #include <sys/sunddi.h> 48 #include <sys/byteorder.h> 49 #include "aac_regs.h" 50 #include "aac.h" 51 52 /* 53 * FMA header files 54 */ 55 #include <sys/ddifm.h> 56 #include <sys/fm/protocol.h> 57 #include <sys/fm/util.h> 58 #include <sys/fm/io/ddi.h> 59 60 /* 61 * For minor nodes created by the SCSA framework, minor numbers are 62 * formed by left-shifting instance by INST_MINOR_SHIFT and OR in a 63 * number less than 64. 64 * 65 * To support cfgadm, need to confirm the SCSA framework by creating 66 * devctl/scsi and driver specific minor nodes under SCSA format, 67 * and calling scsi_hba_xxx() functions aacordingly. 68 */ 69 70 #define AAC_MINOR 32 71 #define INST2AAC(x) (((x) << INST_MINOR_SHIFT) | AAC_MINOR) 72 #define AAC_SCSA_MINOR(x) ((x) & TRAN_MINOR_MASK) 73 #define AAC_IS_SCSA_NODE(x) ((x) == DEVCTL_MINOR || (x) == SCSI_MINOR) 74 75 #define AAC_TRAN2SOFTS(tran) ((struct aac_softstate *)(tran)->tran_hba_private) 76 #define AAC_DIP2TRAN(dip) ((scsi_hba_tran_t *)ddi_get_driver_private(dip)) 77 #define AAC_DIP2SOFTS(dip) (AAC_TRAN2SOFTS(AAC_DIP2TRAN(dip))) 78 #define PKT2AC(pkt) ((struct aac_cmd *)(pkt)->pkt_ha_private) 79 #define AAC_BUSYWAIT(cond, timeout /* in millisecond */) { \ 80 if (!(cond)) { \ 81 int count = (timeout) * 10; \ 82 while (count) { \ 83 drv_usecwait(100); \ 84 if (cond) \ 85 break; \ 86 count--; \ 87 } \ 88 (timeout) = (count + 9) / 10; \ 89 } \ 90 } 91 92 #define AAC_SENSE_DATA_DESCR_LEN \ 93 (sizeof (struct scsi_descr_sense_hdr) + \ 94 sizeof (struct scsi_information_sense_descr)) 95 #define AAC_ARQ64_LENGTH \ 96 (sizeof (struct scsi_arq_status) + \ 97 AAC_SENSE_DATA_DESCR_LEN - SENSE_LENGTH) 98 99 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */ 100 #define AAC_GETGXADDR(cmdlen, cdbp) \ 101 ((cmdlen == 6) ? GETG0ADDR(cdbp) : \ 102 (cmdlen == 10) ? (uint32_t)GETG1ADDR(cdbp) : \ 103 ((uint64_t)GETG4ADDR(cdbp) << 32) | (uint32_t)GETG4ADDRTL(cdbp)) 104 105 #define AAC_CDB_INQUIRY_CMDDT 0x02 106 #define AAC_CDB_INQUIRY_EVPD 0x01 107 #define AAC_VPD_PAGE_CODE 1 108 #define AAC_VPD_PAGE_LENGTH 3 109 #define AAC_VPD_PAGE_DATA 4 110 #define AAC_VPD_ID_CODESET 0 111 #define AAC_VPD_ID_TYPE 1 112 #define AAC_VPD_ID_LENGTH 3 113 #define AAC_VPD_ID_DATA 4 114 115 /* Return the size of FIB with data part type data_type */ 116 #define AAC_FIB_SIZEOF(data_type) \ 117 (sizeof (struct aac_fib_header) + sizeof (data_type)) 118 /* Return the container size defined in mir */ 119 #define AAC_MIR_SIZE(softs, acc, mir) \ 120 (((softs)->flags & AAC_FLAGS_LBA_64BIT) ? \ 121 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity) + \ 122 ((uint64_t)ddi_get32((acc), &(mir)->MntObj.CapacityHigh) << 32) : \ 123 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity)) 124 125 /* The last entry of aac_cards[] is for unknown cards */ 126 #define AAC_UNKNOWN_CARD \ 127 (sizeof (aac_cards) / sizeof (struct aac_card_type) - 1) 128 #define CARD_IS_UNKNOWN(i) (i == AAC_UNKNOWN_CARD) 129 #define BUF_IS_READ(bp) ((bp)->b_flags & B_READ) 130 #define AAC_IS_Q_EMPTY(q) ((q)->q_head == NULL) 131 #define AAC_CMDQ(acp) (!((acp)->flags & AAC_CMD_SYNC)) 132 133 #define PCI_MEM_GET32(softs, off) \ 134 ddi_get32((softs)->pci_mem_handle, \ 135 (uint32_t *)((softs)->pci_mem_base_vaddr + (off))) 136 #define PCI_MEM_PUT32(softs, off, val) \ 137 ddi_put32((softs)->pci_mem_handle, \ 138 (uint32_t *)((softs)->pci_mem_base_vaddr + (off)), \ 139 (uint32_t)(val)) 140 #define PCI_MEM_GET16(softs, off) \ 141 ddi_get16((softs)->pci_mem_handle, \ 142 (uint16_t *)((softs)->pci_mem_base_vaddr + (off))) 143 #define PCI_MEM_PUT16(softs, off, val) \ 144 ddi_put16((softs)->pci_mem_handle, \ 145 (uint16_t *)((softs)->pci_mem_base_vaddr + (off)), (uint16_t)(val)) 146 /* Write host data at valp to device mem[off] repeatedly count times */ 147 #define PCI_MEM_REP_PUT8(softs, off, valp, count) \ 148 ddi_rep_put8((softs)->pci_mem_handle, (uint8_t *)(valp), \ 149 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \ 150 count, DDI_DEV_AUTOINCR) 151 /* Read device data at mem[off] to host addr valp repeatedly count times */ 152 #define PCI_MEM_REP_GET8(softs, off, valp, count) \ 153 ddi_rep_get8((softs)->pci_mem_handle, (uint8_t *)(valp), \ 154 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \ 155 count, DDI_DEV_AUTOINCR) 156 #define AAC_GET_FIELD8(acc, d, s, field) \ 157 (d)->field = ddi_get8(acc, (uint8_t *)&(s)->field) 158 #define AAC_GET_FIELD32(acc, d, s, field) \ 159 (d)->field = ddi_get32(acc, (uint32_t *)&(s)->field) 160 #define AAC_GET_FIELD64(acc, d, s, field) \ 161 (d)->field = ddi_get64(acc, (uint64_t *)&(s)->field) 162 #define AAC_REP_GET_FIELD8(acc, d, s, field, r) \ 163 ddi_rep_get8((acc), (uint8_t *)&(d)->field, \ 164 (uint8_t *)&(s)->field, (r), DDI_DEV_AUTOINCR) 165 #define AAC_REP_GET_FIELD32(acc, d, s, field, r) \ 166 ddi_rep_get32((acc), (uint32_t *)&(d)->field, \ 167 (uint32_t *)&(s)->field, (r), DDI_DEV_AUTOINCR) 168 169 #define AAC_ENABLE_INTR(softs) { \ 170 if (softs->flags & AAC_FLAGS_NEW_COMM) \ 171 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_NEW); \ 172 else \ 173 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_BITS); \ 174 } 175 176 #define AAC_DISABLE_INTR(softs) PCI_MEM_PUT32(softs, AAC_OIMR, ~0) 177 #define AAC_STATUS_CLR(softs, mask) PCI_MEM_PUT32(softs, AAC_ODBR, mask) 178 #define AAC_STATUS_GET(softs) PCI_MEM_GET32(softs, AAC_ODBR) 179 #define AAC_NOTIFY(softs, val) PCI_MEM_PUT32(softs, AAC_IDBR, val) 180 #define AAC_OUTB_GET(softs) PCI_MEM_GET32(softs, AAC_OQUE) 181 #define AAC_OUTB_SET(softs, val) PCI_MEM_PUT32(softs, AAC_OQUE, val) 182 #define AAC_FWSTATUS_GET(softs) \ 183 ((softs)->aac_if.aif_get_fwstatus(softs)) 184 #define AAC_MAILBOX_GET(softs, mb) \ 185 ((softs)->aac_if.aif_get_mailbox((softs), (mb))) 186 #define AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3) \ 187 ((softs)->aac_if.aif_set_mailbox((softs), (cmd), \ 188 (arg0), (arg1), (arg2), (arg3))) 189 190 #define AAC_THROTTLE_DRAIN -1 191 192 #define AAC_QUIESCE_TICK 1 /* 1 second */ 193 #define AAC_QUIESCE_TIMEOUT 60 /* 60 seconds */ 194 #define AAC_DEFAULT_TICK 10 /* 10 seconds */ 195 #define AAC_SYNC_TICK (30*60) /* 30 minutes */ 196 197 /* Poll time for aac_do_poll_io() */ 198 #define AAC_POLL_TIME 60 /* 60 seconds */ 199 200 /* 201 * Hardware access functions 202 */ 203 static int aac_rx_get_fwstatus(struct aac_softstate *); 204 static int aac_rx_get_mailbox(struct aac_softstate *, int); 205 static void aac_rx_set_mailbox(struct aac_softstate *, uint32_t, uint32_t, 206 uint32_t, uint32_t, uint32_t); 207 static int aac_rkt_get_fwstatus(struct aac_softstate *); 208 static int aac_rkt_get_mailbox(struct aac_softstate *, int); 209 static void aac_rkt_set_mailbox(struct aac_softstate *, uint32_t, uint32_t, 210 uint32_t, uint32_t, uint32_t); 211 212 /* 213 * SCSA function prototypes 214 */ 215 static int aac_attach(dev_info_t *, ddi_attach_cmd_t); 216 static int aac_detach(dev_info_t *, ddi_detach_cmd_t); 217 static int aac_reset(dev_info_t *, ddi_reset_cmd_t); 218 219 /* 220 * Interrupt handler functions 221 */ 222 static uint_t aac_intr_old(caddr_t); 223 static uint_t aac_intr_new(caddr_t); 224 static uint_t aac_softintr(caddr_t); 225 226 /* 227 * Internal functions in attach 228 */ 229 static int aac_check_card_type(struct aac_softstate *); 230 static int aac_check_firmware(struct aac_softstate *); 231 static int aac_common_attach(struct aac_softstate *); 232 static void aac_common_detach(struct aac_softstate *); 233 static int aac_probe_containers(struct aac_softstate *); 234 static int aac_alloc_comm_space(struct aac_softstate *); 235 static int aac_setup_comm_space(struct aac_softstate *); 236 static void aac_free_comm_space(struct aac_softstate *); 237 static int aac_hba_setup(struct aac_softstate *); 238 239 /* 240 * Sync FIB operation functions 241 */ 242 int aac_sync_mbcommand(struct aac_softstate *, uint32_t, uint32_t, 243 uint32_t, uint32_t, uint32_t, uint32_t *); 244 static int aac_sync_fib(struct aac_softstate *, uint16_t, uint16_t); 245 246 /* 247 * Command queue operation functions 248 */ 249 static void aac_cmd_initq(struct aac_cmd_queue *); 250 static void aac_cmd_enqueue(struct aac_cmd_queue *, struct aac_cmd *); 251 static struct aac_cmd *aac_cmd_dequeue(struct aac_cmd_queue *); 252 static void aac_cmd_delete(struct aac_cmd_queue *, struct aac_cmd *); 253 254 /* 255 * FIB queue operation functions 256 */ 257 static int aac_fib_enqueue(struct aac_softstate *, int, uint32_t, uint32_t); 258 static int aac_fib_dequeue(struct aac_softstate *, int, int *); 259 260 /* 261 * Slot operation functions 262 */ 263 static int aac_create_slots(struct aac_softstate *); 264 static void aac_destroy_slots(struct aac_softstate *); 265 static void aac_alloc_fibs(struct aac_softstate *); 266 static void aac_destroy_fibs(struct aac_softstate *); 267 static struct aac_slot *aac_get_slot(struct aac_softstate *); 268 static void aac_release_slot(struct aac_softstate *, struct aac_slot *); 269 static int aac_alloc_fib(struct aac_softstate *, struct aac_slot *); 270 static void aac_free_fib(struct aac_slot *); 271 272 /* 273 * Internal functions 274 */ 275 static void aac_cmd_fib_header(struct aac_softstate *, struct aac_slot *, 276 uint16_t, uint16_t); 277 static void aac_cmd_fib_rawio(struct aac_softstate *, struct aac_cmd *); 278 static void aac_cmd_fib_brw64(struct aac_softstate *, struct aac_cmd *); 279 static void aac_cmd_fib_brw(struct aac_softstate *, struct aac_cmd *); 280 static void aac_cmd_fib_sync(struct aac_softstate *, struct aac_cmd *); 281 static void aac_cmd_fib_scsi32(struct aac_softstate *, struct aac_cmd *); 282 static void aac_cmd_fib_scsi64(struct aac_softstate *, struct aac_cmd *); 283 static void aac_start_waiting_io(struct aac_softstate *); 284 static void aac_drain_comp_q(struct aac_softstate *); 285 int aac_do_io(struct aac_softstate *, struct aac_cmd *); 286 static int aac_do_poll_io(struct aac_softstate *, struct aac_cmd *); 287 static int aac_do_sync_io(struct aac_softstate *, struct aac_cmd *); 288 static int aac_send_command(struct aac_softstate *, struct aac_slot *); 289 static void aac_cmd_timeout(struct aac_softstate *); 290 static int aac_dma_sync_ac(struct aac_cmd *); 291 static int aac_shutdown(struct aac_softstate *); 292 static int aac_reset_adapter(struct aac_softstate *); 293 static int aac_do_quiesce(struct aac_softstate *softs); 294 static int aac_do_unquiesce(struct aac_softstate *softs); 295 static void aac_unhold_bus(struct aac_softstate *, int); 296 static void aac_set_throttle(struct aac_softstate *, struct aac_container *, 297 int, int); 298 299 /* 300 * Adapter Initiated FIB handling function 301 */ 302 static int aac_handle_aif(struct aac_softstate *, struct aac_fib *); 303 304 /* 305 * Timeout handling thread function 306 */ 307 static void aac_daemon(void *); 308 309 /* 310 * IOCTL interface related functions 311 */ 312 static int aac_open(dev_t *, int, int, cred_t *); 313 static int aac_close(dev_t, int, int, cred_t *); 314 static int aac_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 315 extern int aac_do_ioctl(struct aac_softstate *, dev_t, int, intptr_t, int); 316 317 /* 318 * FMA Prototypes 319 */ 320 static void aac_fm_init(struct aac_softstate *); 321 static void aac_fm_fini(struct aac_softstate *); 322 static int aac_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *); 323 int aac_check_acc_handle(ddi_acc_handle_t); 324 int aac_check_dma_handle(ddi_dma_handle_t); 325 void aac_fm_ereport(struct aac_softstate *, char *); 326 327 #ifdef DEBUG 328 /* 329 * UART debug output support 330 */ 331 332 #define AAC_PRINT_BUFFER_SIZE 512 333 #define AAC_PRINT_TIMEOUT 250 /* 1/4 sec. = 250 msec. */ 334 335 #define AAC_FW_DBG_STRLEN_OFFSET 0x00 336 #define AAC_FW_DBG_FLAGS_OFFSET 0x04 337 #define AAC_FW_DBG_BLED_OFFSET 0x08 338 339 static int aac_get_fw_debug_buffer(struct aac_softstate *); 340 static void aac_print_scmd(struct aac_softstate *, struct aac_cmd *); 341 static void aac_print_aif(struct aac_softstate *, struct aac_aif_command *); 342 343 static char aac_prt_buf[AAC_PRINT_BUFFER_SIZE]; 344 static char aac_fmt[] = " %s"; 345 static char aac_fmt_header[] = " %s.%d: %s"; 346 static kmutex_t aac_prt_mutex; 347 348 /* 349 * Debug flags to be put into the softstate flags field 350 * when initialized 351 */ 352 uint32_t aac_debug_flags = 353 /* AACDB_FLAGS_KERNEL_PRINT | */ 354 /* AACDB_FLAGS_FW_PRINT | */ 355 /* AACDB_FLAGS_MISC | */ 356 /* AACDB_FLAGS_FUNC1 | */ 357 /* AACDB_FLAGS_FUNC2 | */ 358 /* AACDB_FLAGS_SCMD | */ 359 /* AACDB_FLAGS_AIF | */ 360 /* AACDB_FLAGS_FIB | */ 361 /* AACDB_FLAGS_IOCTL | */ 362 0; 363 364 #endif /* DEBUG */ 365 366 static struct cb_ops aac_cb_ops = { 367 aac_open, /* open */ 368 aac_close, /* close */ 369 nodev, /* strategy */ 370 nodev, /* print */ 371 nodev, /* dump */ 372 nodev, /* read */ 373 nodev, /* write */ 374 aac_ioctl, /* ioctl */ 375 nodev, /* devmap */ 376 nodev, /* mmap */ 377 nodev, /* segmap */ 378 nochpoll, /* poll */ 379 ddi_prop_op, /* cb_prop_op */ 380 NULL, /* streamtab */ 381 D_64BIT | D_NEW | D_MP | D_HOTPLUG, /* cb_flag */ 382 CB_REV, /* cb_rev */ 383 nodev, /* async I/O read entry point */ 384 nodev /* async I/O write entry point */ 385 }; 386 387 static struct dev_ops aac_dev_ops = { 388 DEVO_REV, 389 0, 390 nodev, 391 nulldev, 392 nulldev, 393 aac_attach, 394 aac_detach, 395 aac_reset, 396 &aac_cb_ops, 397 NULL, 398 NULL 399 }; 400 401 static struct modldrv aac_modldrv = { 402 &mod_driverops, 403 "AAC Driver " AAC_DRIVER_VERSION, 404 &aac_dev_ops, 405 }; 406 407 static struct modlinkage aac_modlinkage = { 408 MODREV_1, 409 &aac_modldrv, 410 NULL 411 }; 412 413 static struct aac_softstate *aac_softstatep; 414 415 /* 416 * Supported card list 417 * ordered in vendor id, subvendor id, subdevice id, and device id 418 */ 419 static struct aac_card_type aac_cards[] = { 420 {0x1028, 0x1, 0x1028, 0x1, AAC_HWIF_I960RX, 421 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 422 "Dell", "PERC 3/Di"}, 423 {0x1028, 0x2, 0x1028, 0x2, AAC_HWIF_I960RX, 424 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 425 "Dell", "PERC 3/Di"}, 426 {0x1028, 0x3, 0x1028, 0x3, AAC_HWIF_I960RX, 427 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 428 "Dell", "PERC 3/Si"}, 429 {0x1028, 0x8, 0x1028, 0xcf, AAC_HWIF_I960RX, 430 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 431 "Dell", "PERC 3/Di"}, 432 {0x1028, 0x4, 0x1028, 0xd0, AAC_HWIF_I960RX, 433 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 434 "Dell", "PERC 3/Si"}, 435 {0x1028, 0x2, 0x1028, 0xd1, AAC_HWIF_I960RX, 436 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 437 "Dell", "PERC 3/Di"}, 438 {0x1028, 0x2, 0x1028, 0xd9, AAC_HWIF_I960RX, 439 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 440 "Dell", "PERC 3/Di"}, 441 {0x1028, 0xa, 0x1028, 0x106, AAC_HWIF_I960RX, 442 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 443 "Dell", "PERC 3/Di"}, 444 {0x1028, 0xa, 0x1028, 0x11b, AAC_HWIF_I960RX, 445 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 446 "Dell", "PERC 3/Di"}, 447 {0x1028, 0xa, 0x1028, 0x121, AAC_HWIF_I960RX, 448 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 449 "Dell", "PERC 3/Di"}, 450 {0x9005, 0x285, 0x1028, 0x287, AAC_HWIF_I960RX, 451 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 452 "Dell", "PERC 320/DC"}, 453 {0x9005, 0x285, 0x1028, 0x291, AAC_HWIF_I960RX, 454 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Dell", "CERC SR2"}, 455 456 {0x9005, 0x285, 0x1014, 0x2f2, AAC_HWIF_I960RX, 457 0, AAC_TYPE_SCSI, "IBM", "ServeRAID 8i"}, 458 {0x9005, 0x285, 0x1014, 0x34d, AAC_HWIF_I960RX, 459 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8s"}, 460 {0x9005, 0x286, 0x1014, 0x9580, AAC_HWIF_RKT, 461 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8k"}, 462 463 {0x9005, 0x285, 0x103c, 0x3227, AAC_HWIF_I960RX, 464 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"}, 465 {0x9005, 0x285, 0xe11, 0x295, AAC_HWIF_I960RX, 466 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"}, 467 468 {0x9005, 0x285, 0x9005, 0x285, AAC_HWIF_I960RX, 469 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 470 "Adaptec", "2200S"}, 471 {0x9005, 0x285, 0x9005, 0x286, AAC_HWIF_I960RX, 472 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 473 "Adaptec", "2120S"}, 474 {0x9005, 0x285, 0x9005, 0x287, AAC_HWIF_I960RX, 475 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 476 "Adaptec", "2200S"}, 477 {0x9005, 0x285, 0x9005, 0x288, AAC_HWIF_I960RX, 478 0, AAC_TYPE_SCSI, "Adaptec", "3230S"}, 479 {0x9005, 0x285, 0x9005, 0x289, AAC_HWIF_I960RX, 480 0, AAC_TYPE_SCSI, "Adaptec", "3240S"}, 481 {0x9005, 0x285, 0x9005, 0x28a, AAC_HWIF_I960RX, 482 0, AAC_TYPE_SCSI, "Adaptec", "2020ZCR"}, 483 {0x9005, 0x285, 0x9005, 0x28b, AAC_HWIF_I960RX, 484 0, AAC_TYPE_SCSI, "Adaptec", "2025ZCR"}, 485 {0x9005, 0x286, 0x9005, 0x28c, AAC_HWIF_RKT, 486 0, AAC_TYPE_SCSI, "Adaptec", "2230S"}, 487 {0x9005, 0x286, 0x9005, 0x28d, AAC_HWIF_RKT, 488 0, AAC_TYPE_SCSI, "Adaptec", "2130S"}, 489 {0x9005, 0x285, 0x9005, 0x28e, AAC_HWIF_I960RX, 490 0, AAC_TYPE_SATA, "Adaptec", "2020SA"}, 491 {0x9005, 0x285, 0x9005, 0x28f, AAC_HWIF_I960RX, 492 0, AAC_TYPE_SATA, "Adaptec", "2025SA"}, 493 {0x9005, 0x285, 0x9005, 0x290, AAC_HWIF_I960RX, 494 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2410SA"}, 495 {0x9005, 0x285, 0x9005, 0x292, AAC_HWIF_I960RX, 496 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2810SA"}, 497 {0x9005, 0x285, 0x9005, 0x293, AAC_HWIF_I960RX, 498 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "21610SA"}, 499 {0x9005, 0x285, 0x9005, 0x294, AAC_HWIF_I960RX, 500 0, AAC_TYPE_SATA, "Adaptec", "2026ZCR"}, 501 {0x9005, 0x285, 0x9005, 0x296, AAC_HWIF_I960RX, 502 0, AAC_TYPE_SCSI, "Adaptec", "2240S"}, 503 {0x9005, 0x285, 0x9005, 0x297, AAC_HWIF_I960RX, 504 0, AAC_TYPE_SAS, "Adaptec", "4005SAS"}, 505 {0x9005, 0x285, 0x9005, 0x298, AAC_HWIF_I960RX, 506 0, AAC_TYPE_SAS, "Adaptec", "RAID 4000"}, 507 {0x9005, 0x285, 0x9005, 0x299, AAC_HWIF_I960RX, 508 0, AAC_TYPE_SAS, "Adaptec", "4800SAS"}, 509 {0x9005, 0x285, 0x9005, 0x29a, AAC_HWIF_I960RX, 510 0, AAC_TYPE_SAS, "Adaptec", "4805SAS"}, 511 {0x9005, 0x286, 0x9005, 0x29b, AAC_HWIF_RKT, 512 0, AAC_TYPE_SATA, "Adaptec", "2820SA"}, 513 {0x9005, 0x286, 0x9005, 0x29c, AAC_HWIF_RKT, 514 0, AAC_TYPE_SATA, "Adaptec", "2620SA"}, 515 {0x9005, 0x286, 0x9005, 0x29d, AAC_HWIF_RKT, 516 0, AAC_TYPE_SATA, "Adaptec", "2420SA"}, 517 {0x9005, 0x286, 0x9005, 0x29e, AAC_HWIF_RKT, 518 0, AAC_TYPE_SATA, "ICP", "9024RO"}, 519 {0x9005, 0x286, 0x9005, 0x29f, AAC_HWIF_RKT, 520 0, AAC_TYPE_SATA, "ICP", "9014RO"}, 521 {0x9005, 0x286, 0x9005, 0x2a0, AAC_HWIF_RKT, 522 0, AAC_TYPE_SATA, "ICP", "9047MA"}, 523 {0x9005, 0x286, 0x9005, 0x2a1, AAC_HWIF_RKT, 524 0, AAC_TYPE_SATA, "ICP", "9087MA"}, 525 {0x9005, 0x285, 0x9005, 0x2a4, AAC_HWIF_I960RX, 526 0, AAC_TYPE_SAS, "ICP", "9085LI"}, 527 {0x9005, 0x285, 0x9005, 0x2a5, AAC_HWIF_I960RX, 528 0, AAC_TYPE_SAS, "ICP", "5085BR"}, 529 {0x9005, 0x286, 0x9005, 0x2a6, AAC_HWIF_RKT, 530 0, AAC_TYPE_SATA, "ICP", "9067MA"}, 531 {0x9005, 0x285, 0x9005, 0x2b5, AAC_HWIF_I960RX, 532 0, AAC_TYPE_SAS, "Adaptec", "RAID 5445"}, 533 {0x9005, 0x285, 0x9005, 0x2b6, AAC_HWIF_I960RX, 534 0, AAC_TYPE_SAS, "Adaptec", "RAID 5805"}, 535 {0x9005, 0x285, 0x9005, 0x2b7, AAC_HWIF_I960RX, 536 0, AAC_TYPE_SAS, "Adaptec", "RAID 5085"}, 537 {0x9005, 0x285, 0x9005, 0x2b8, AAC_HWIF_I960RX, 538 0, AAC_TYPE_SAS, "ICP", "RAID ICP5445SL"}, 539 {0x9005, 0x285, 0x9005, 0x2b9, AAC_HWIF_I960RX, 540 0, AAC_TYPE_SAS, "ICP", "RAID ICP5085SL"}, 541 {0x9005, 0x285, 0x9005, 0x2ba, AAC_HWIF_I960RX, 542 0, AAC_TYPE_SAS, "ICP", "RAID ICP5805SL"}, 543 544 {0, 0, 0, 0, AAC_HWIF_UNKNOWN, 545 0, AAC_TYPE_UNKNOWN, "Unknown", "AAC card"}, 546 }; 547 548 /* 549 * Hardware access functions for i960 based cards 550 */ 551 static struct aac_interface aac_rx_interface = { 552 aac_rx_get_fwstatus, 553 aac_rx_get_mailbox, 554 aac_rx_set_mailbox 555 }; 556 557 /* 558 * Hardware access functions for Rocket based cards 559 */ 560 static struct aac_interface aac_rkt_interface = { 561 aac_rkt_get_fwstatus, 562 aac_rkt_get_mailbox, 563 aac_rkt_set_mailbox 564 }; 565 566 ddi_device_acc_attr_t aac_acc_attr = { 567 DDI_DEVICE_ATTR_V0, 568 DDI_STRUCTURE_LE_ACC, 569 DDI_STRICTORDER_ACC, 570 DDI_FLAGERR_ACC 571 }; 572 573 static struct { 574 int size; 575 int notify; 576 } aac_qinfo[] = { 577 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL}, 578 {AAC_HOST_HIGH_CMD_ENTRIES, 0}, 579 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY}, 580 {AAC_ADAP_HIGH_CMD_ENTRIES, 0}, 581 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL}, 582 {AAC_HOST_HIGH_RESP_ENTRIES, 0}, 583 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY}, 584 {AAC_ADAP_HIGH_RESP_ENTRIES, 0} 585 }; 586 587 /* 588 * Default aac dma attributes 589 */ 590 static ddi_dma_attr_t aac_dma_attr = { 591 DMA_ATTR_V0, 592 0, /* lowest usable address */ 593 0xffffffffull, /* high DMA address range */ 594 0xffffffffull, /* DMA counter register */ 595 AAC_DMA_ALIGN, /* DMA address alignment */ 596 1, /* DMA burstsizes */ 597 1, /* min effective DMA size */ 598 0xffffffffull, /* max DMA xfer size */ 599 0xffffffffull, /* segment boundary */ 600 1, /* s/g list length */ 601 AAC_BLK_SIZE, /* granularity of device */ 602 DDI_DMA_FLAGERR /* DMA transfer flags */ 603 }; 604 605 static int aac_tick = AAC_DEFAULT_TICK; /* tick for the internal timer */ 606 static uint32_t aac_timebase = 0; /* internal timer in seconds */ 607 static uint32_t aac_sync_time = 0; /* next time to sync. with firmware */ 608 609 /* 610 * Warlock directives 611 * 612 * Different variables with the same types have to be protected by the 613 * same mutex; otherwise, warlock will complain with "variables don't 614 * seem to be protected consistently". For example, 615 * aac_softstate::{q_wait, q_comp} are type of aac_cmd_queue, and protected 616 * by aac_softstate::{io_lock, q_comp_mutex} respectively. We have to 617 * declare them as protected explictly at aac_cmd_dequeue(). 618 */ 619 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt scsi_cdb scsi_status \ 620 scsi_arq_status scsi_descr_sense_hdr scsi_information_sense_descr \ 621 mode_format mode_geometry mode_header aac_cmd)) 622 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_cmd", aac_fib ddi_dma_cookie_t \ 623 aac_sge)) 624 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_fib", aac_blockread aac_blockwrite \ 625 aac_blockread64 aac_raw_io aac_sg_entry aac_sg_entry64 aac_sg_entryraw \ 626 aac_sg_table aac_srb)) 627 _NOTE(SCHEME_PROTECTS_DATA("unique to sync fib and cdb", scsi_inquiry)) 628 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address)) 629 630 int 631 _init(void) 632 { 633 int rval = 0; 634 635 #ifdef DEBUG 636 mutex_init(&aac_prt_mutex, NULL, MUTEX_DRIVER, NULL); 637 #endif 638 DBCALLED(NULL, 1); 639 640 if ((rval = ddi_soft_state_init((void *)&aac_softstatep, 641 sizeof (struct aac_softstate), 0)) != 0) 642 goto error; 643 644 if ((rval = scsi_hba_init(&aac_modlinkage)) != 0) { 645 ddi_soft_state_fini((void *)&aac_softstatep); 646 goto error; 647 } 648 649 if ((rval = mod_install(&aac_modlinkage)) != 0) { 650 ddi_soft_state_fini((void *)&aac_softstatep); 651 scsi_hba_fini(&aac_modlinkage); 652 goto error; 653 } 654 return (rval); 655 656 error: 657 AACDB_PRINT(NULL, CE_WARN, "Mod init error!"); 658 #ifdef DEBUG 659 mutex_destroy(&aac_prt_mutex); 660 #endif 661 return (rval); 662 } 663 664 int 665 _info(struct modinfo *modinfop) 666 { 667 DBCALLED(NULL, 1); 668 return (mod_info(&aac_modlinkage, modinfop)); 669 } 670 671 /* 672 * An HBA driver cannot be unload unless you reboot, 673 * so this function will be of no use. 674 */ 675 int 676 _fini(void) 677 { 678 int rval; 679 680 DBCALLED(NULL, 1); 681 682 if ((rval = mod_remove(&aac_modlinkage)) != 0) 683 goto error; 684 685 scsi_hba_fini(&aac_modlinkage); 686 ddi_soft_state_fini((void *)&aac_softstatep); 687 #ifdef DEBUG 688 mutex_destroy(&aac_prt_mutex); 689 #endif 690 return (0); 691 692 error: 693 AACDB_PRINT(NULL, CE_WARN, "AAC is busy, cannot unload!"); 694 return (rval); 695 } 696 697 static int 698 aac_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 699 { 700 int instance, i; 701 struct aac_softstate *softs = NULL; 702 int attach_state = 0; 703 704 DBCALLED(NULL, 1); 705 706 switch (cmd) { 707 case DDI_ATTACH: 708 break; 709 case DDI_RESUME: 710 return (DDI_FAILURE); 711 default: 712 return (DDI_FAILURE); 713 } 714 715 instance = ddi_get_instance(dip); 716 717 /* Get soft state */ 718 if (ddi_soft_state_zalloc(aac_softstatep, instance) != DDI_SUCCESS) { 719 AACDB_PRINT(softs, CE_WARN, "Cannot alloc soft state"); 720 goto error; 721 } 722 softs = ddi_get_soft_state(aac_softstatep, instance); 723 attach_state |= AAC_ATTACH_SOFTSTATE_ALLOCED; 724 725 softs->instance = instance; 726 softs->devinfo_p = dip; 727 softs->buf_dma_attr = softs->addr_dma_attr = aac_dma_attr; 728 softs->addr_dma_attr.dma_attr_granular = 1; 729 softs->card = AAC_UNKNOWN_CARD; 730 #ifdef DEBUG 731 softs->debug_flags = aac_debug_flags; 732 #endif 733 734 /* Check the card type */ 735 if (aac_check_card_type(softs) == AACERR) { 736 AACDB_PRINT(softs, CE_WARN, "Card not supported"); 737 goto error; 738 } 739 /* We have found the right card and everything is OK */ 740 attach_state |= AAC_ATTACH_CARD_DETECTED; 741 742 /* 743 * Initialize FMA 744 */ 745 softs->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, softs->devinfo_p, 746 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 747 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 748 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 749 750 aac_fm_init(softs); 751 752 /* Map PCI mem space */ 753 if (ddi_regs_map_setup(dip, 1, 754 (caddr_t *)&softs->pci_mem_base_vaddr, 0, 755 softs->map_size_min, &aac_acc_attr, 756 &softs->pci_mem_handle) != DDI_SUCCESS) 757 goto error; 758 759 softs->map_size = softs->map_size_min; 760 attach_state |= AAC_ATTACH_PCI_MEM_MAPPED; 761 762 AAC_DISABLE_INTR(softs); 763 764 if (ddi_intr_hilevel(dip, 0)) { 765 AACDB_PRINT(softs, CE_WARN, 766 "High level interrupt is not supported!"); 767 goto error; 768 } 769 770 /* Init mutexes */ 771 if (ddi_get_iblock_cookie(dip, 0, &softs->iblock_cookie) != 772 DDI_SUCCESS) { 773 AACDB_PRINT(softs, CE_WARN, 774 "Can not get interrupt block cookie!"); 775 goto error; 776 } 777 mutex_init(&softs->q_comp_mutex, NULL, 778 MUTEX_DRIVER, (void *)softs->iblock_cookie); 779 cv_init(&softs->event, NULL, CV_DRIVER, NULL); 780 mutex_init(&softs->aifq_mutex, NULL, 781 MUTEX_DRIVER, (void *)softs->iblock_cookie); 782 cv_init(&softs->aifv, NULL, CV_DRIVER, NULL); 783 cv_init(&softs->drain_cv, NULL, CV_DRIVER, NULL); 784 mutex_init(&softs->io_lock, NULL, MUTEX_DRIVER, 785 (void *)softs->iblock_cookie); 786 attach_state |= AAC_ATTACH_KMUTEX_INITED; 787 788 /* 789 * Everything has been set up till now, 790 * we will do some common attach. 791 */ 792 if (aac_common_attach(softs) == AACERR) 793 goto error; 794 attach_state |= AAC_ATTACH_COMM_SPACE_SETUP; 795 796 /* Init the cmd queues */ 797 for (i = 0; i < AAC_CMDQ_NUM; i++) 798 aac_cmd_initq(&softs->q_wait[i]); 799 aac_cmd_initq(&softs->q_busy); 800 aac_cmd_initq(&softs->q_comp); 801 802 if (aac_hba_setup(softs) != AACOK) 803 goto error; 804 attach_state |= AAC_ATTACH_SCSI_TRAN_SETUP; 805 806 /* Connect interrupt handlers */ 807 if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &softs->softint_id, 808 NULL, NULL, aac_softintr, (caddr_t)softs) != DDI_SUCCESS) { 809 AACDB_PRINT(softs, CE_WARN, 810 "Can not setup soft interrupt handler!"); 811 goto error; 812 } 813 attach_state |= AAC_ATTACH_SOFT_INTR_SETUP; 814 815 if (ddi_add_intr(dip, 0, &softs->iblock_cookie, 816 (ddi_idevice_cookie_t *)0, 817 (softs->flags & AAC_FLAGS_NEW_COMM) ? 818 aac_intr_new : aac_intr_old, (caddr_t)softs) != DDI_SUCCESS) { 819 AACDB_PRINT(softs, CE_WARN, "Can not setup interrupt handler!"); 820 goto error; 821 } 822 attach_state |= AAC_ATTACH_HARD_INTR_SETUP; 823 824 /* Create devctl/scsi nodes for cfgadm */ 825 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 826 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) { 827 AACDB_PRINT(softs, CE_WARN, "failed to create devctl node"); 828 goto error; 829 } 830 attach_state |= AAC_ATTACH_CREATE_DEVCTL; 831 832 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, INST2SCSI(instance), 833 DDI_NT_SCSI_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 834 AACDB_PRINT(softs, CE_WARN, "failed to create scsi node"); 835 goto error; 836 } 837 attach_state |= AAC_ATTACH_CREATE_SCSI; 838 839 /* Create aac node for app. to issue ioctls */ 840 if (ddi_create_minor_node(dip, "aac", S_IFCHR, INST2AAC(instance), 841 DDI_PSEUDO, 0) != DDI_SUCCESS) { 842 AACDB_PRINT(softs, CE_WARN, "failed to create aac node"); 843 goto error; 844 } 845 846 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 847 softs->state = AAC_STATE_RUN; 848 849 /* Create a thread for command timeout */ 850 softs->timeout_id = timeout(aac_daemon, (void *)softs, 851 (60 * drv_usectohz(1000000))); 852 853 /* Common attach is OK, so we are attached! */ 854 AAC_ENABLE_INTR(softs); 855 ddi_report_dev(dip); 856 AACDB_PRINT(softs, CE_NOTE, "aac attached ok"); 857 return (DDI_SUCCESS); 858 859 error: 860 if (attach_state & AAC_ATTACH_CREATE_SCSI) 861 ddi_remove_minor_node(dip, "scsi"); 862 if (attach_state & AAC_ATTACH_CREATE_DEVCTL) 863 ddi_remove_minor_node(dip, "devctl"); 864 if (attach_state & AAC_ATTACH_COMM_SPACE_SETUP) 865 aac_common_detach(softs); 866 if (attach_state & AAC_ATTACH_SCSI_TRAN_SETUP) { 867 (void) scsi_hba_detach(dip); 868 scsi_hba_tran_free(AAC_DIP2TRAN(dip)); 869 } 870 if (attach_state & AAC_ATTACH_HARD_INTR_SETUP) 871 ddi_remove_intr(dip, 0, softs->iblock_cookie); 872 if (attach_state & AAC_ATTACH_SOFT_INTR_SETUP) 873 ddi_remove_softintr(softs->softint_id); 874 if (attach_state & AAC_ATTACH_KMUTEX_INITED) { 875 mutex_destroy(&softs->q_comp_mutex); 876 cv_destroy(&softs->event); 877 mutex_destroy(&softs->aifq_mutex); 878 cv_destroy(&softs->aifv); 879 cv_destroy(&softs->drain_cv); 880 mutex_destroy(&softs->io_lock); 881 } 882 if (attach_state & AAC_ATTACH_PCI_MEM_MAPPED) 883 ddi_regs_map_free(&softs->pci_mem_handle); 884 aac_fm_fini(softs); 885 if (attach_state & AAC_ATTACH_CARD_DETECTED) 886 softs->card = AACERR; 887 if (attach_state & AAC_ATTACH_SOFTSTATE_ALLOCED) 888 ddi_soft_state_free(aac_softstatep, instance); 889 return (DDI_FAILURE); 890 } 891 892 static int 893 aac_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 894 { 895 scsi_hba_tran_t *tran = AAC_DIP2TRAN(dip); 896 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran); 897 898 DBCALLED(softs, 1); 899 900 switch (cmd) { 901 case DDI_DETACH: 902 break; 903 case DDI_SUSPEND: 904 return (DDI_FAILURE); 905 default: 906 return (DDI_FAILURE); 907 } 908 909 mutex_enter(&softs->io_lock); 910 AAC_DISABLE_INTR(softs); 911 softs->state = AAC_STATE_STOPPED; 912 913 mutex_exit(&softs->io_lock); 914 (void) untimeout(softs->timeout_id); 915 mutex_enter(&softs->io_lock); 916 softs->timeout_id = 0; 917 918 ddi_remove_minor_node(dip, "aac"); 919 ddi_remove_minor_node(dip, "scsi"); 920 ddi_remove_minor_node(dip, "devctl"); 921 922 mutex_exit(&softs->io_lock); 923 ddi_remove_intr(dip, 0, softs->iblock_cookie); 924 ddi_remove_softintr(softs->softint_id); 925 926 aac_common_detach(softs); 927 928 (void) scsi_hba_detach(dip); 929 scsi_hba_tran_free(tran); 930 931 mutex_destroy(&softs->q_comp_mutex); 932 cv_destroy(&softs->event); 933 mutex_destroy(&softs->aifq_mutex); 934 cv_destroy(&softs->aifv); 935 cv_destroy(&softs->drain_cv); 936 mutex_destroy(&softs->io_lock); 937 938 ddi_regs_map_free(&softs->pci_mem_handle); 939 aac_fm_fini(softs); 940 softs->hwif = AAC_HWIF_UNKNOWN; 941 softs->card = AAC_UNKNOWN_CARD; 942 ddi_soft_state_free(aac_softstatep, ddi_get_instance(dip)); 943 944 return (DDI_SUCCESS); 945 } 946 947 /*ARGSUSED*/ 948 static int 949 aac_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) 950 { 951 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 952 953 DBCALLED(softs, 1); 954 955 mutex_enter(&softs->io_lock); 956 (void) aac_shutdown(softs); 957 mutex_exit(&softs->io_lock); 958 959 return (DDI_SUCCESS); 960 } 961 962 /* 963 * Bring the controller down to a dormant state and detach all child devices. 964 * This function is called before detach or system shutdown. 965 * Note: we can assume that the q_wait on the controller is empty, as we 966 * won't allow shutdown if any device is open. 967 */ 968 static int 969 aac_shutdown(struct aac_softstate *softs) 970 { 971 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 972 struct aac_close_command *cc = (struct aac_close_command *) \ 973 &softs->sync_slot.fibp->data[0]; 974 int rval; 975 976 ddi_put32(acc, &cc->Command, VM_CloseAll); 977 ddi_put32(acc, &cc->ContainerId, 0xfffffffful); 978 979 /* Flush all caches, set FW to write through mode */ 980 rval = aac_sync_fib(softs, ContainerCommand, 981 AAC_FIB_SIZEOF(struct aac_close_command)); 982 983 AACDB_PRINT(softs, CE_NOTE, 984 "shutting down aac %s", (rval == AACOK) ? "ok" : "fail"); 985 return (rval); 986 } 987 988 static uint_t 989 aac_softintr(caddr_t arg) 990 { 991 struct aac_softstate *softs = (struct aac_softstate *)arg; 992 993 if (!AAC_IS_Q_EMPTY(&softs->q_comp)) { 994 aac_drain_comp_q(softs); 995 return (DDI_INTR_CLAIMED); 996 } else { 997 return (DDI_INTR_UNCLAIMED); 998 } 999 } 1000 1001 /* 1002 * Setup auto sense data for pkt 1003 */ 1004 static void 1005 aac_set_arq_data(struct scsi_pkt *pkt, uchar_t key, 1006 uchar_t add_code, uchar_t qual_code, uint64_t info) 1007 { 1008 struct scsi_arq_status *arqstat; 1009 1010 pkt->pkt_state |= STATE_GOT_STATUS | STATE_ARQ_DONE; 1011 1012 arqstat = (struct scsi_arq_status *)(pkt->pkt_scbp); 1013 arqstat->sts_status.sts_chk = 1; /* CHECK CONDITION */ 1014 arqstat->sts_rqpkt_reason = CMD_CMPLT; 1015 arqstat->sts_rqpkt_resid = 0; 1016 arqstat->sts_rqpkt_state = 1017 STATE_GOT_BUS | 1018 STATE_GOT_TARGET | 1019 STATE_SENT_CMD | 1020 STATE_XFERRED_DATA; 1021 arqstat->sts_rqpkt_statistics = 0; 1022 1023 if (info <= 0xfffffffful) { 1024 arqstat->sts_sensedata.es_valid = 1; 1025 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE; 1026 arqstat->sts_sensedata.es_code = CODE_FMT_FIXED_CURRENT; 1027 arqstat->sts_sensedata.es_key = key; 1028 arqstat->sts_sensedata.es_add_code = add_code; 1029 arqstat->sts_sensedata.es_qual_code = qual_code; 1030 1031 arqstat->sts_sensedata.es_info_1 = (info >> 24) & 0xFF; 1032 arqstat->sts_sensedata.es_info_2 = (info >> 16) & 0xFF; 1033 arqstat->sts_sensedata.es_info_3 = (info >> 8) & 0xFF; 1034 arqstat->sts_sensedata.es_info_4 = info & 0xFF; 1035 } else { /* 64-bit LBA */ 1036 struct scsi_descr_sense_hdr *dsp; 1037 struct scsi_information_sense_descr *isd; 1038 1039 dsp = (struct scsi_descr_sense_hdr *)&arqstat->sts_sensedata; 1040 dsp->ds_class = CLASS_EXTENDED_SENSE; 1041 dsp->ds_code = CODE_FMT_DESCR_CURRENT; 1042 dsp->ds_key = key; 1043 dsp->ds_add_code = add_code; 1044 dsp->ds_qual_code = qual_code; 1045 dsp->ds_addl_sense_length = 1046 sizeof (struct scsi_information_sense_descr); 1047 1048 isd = (struct scsi_information_sense_descr *)(dsp+1); 1049 isd->isd_descr_type = DESCR_INFORMATION; 1050 isd->isd_valid = 1; 1051 isd->isd_information[0] = (info >> 56) & 0xFF; 1052 isd->isd_information[1] = (info >> 48) & 0xFF; 1053 isd->isd_information[2] = (info >> 40) & 0xFF; 1054 isd->isd_information[3] = (info >> 32) & 0xFF; 1055 isd->isd_information[4] = (info >> 24) & 0xFF; 1056 isd->isd_information[5] = (info >> 16) & 0xFF; 1057 isd->isd_information[6] = (info >> 8) & 0xFF; 1058 isd->isd_information[7] = (info) & 0xFF; 1059 } 1060 } 1061 1062 /* 1063 * Setup auto sense data for HARDWARE ERROR 1064 */ 1065 static void 1066 aac_set_arq_data_hwerr(struct aac_cmd *acp) 1067 { 1068 union scsi_cdb *cdbp; 1069 uint64_t err_blkno; 1070 1071 cdbp = (union scsi_cdb *)acp->pkt->pkt_cdbp; 1072 err_blkno = AAC_GETGXADDR(acp->cmdlen, cdbp); 1073 aac_set_arq_data(acp->pkt, KEY_HARDWARE_ERROR, 0x00, 0x00, err_blkno); 1074 } 1075 1076 /* 1077 * Setup auto sense data for UNIT ATTENTION 1078 */ 1079 /*ARGSUSED*/ 1080 static void 1081 aac_set_arq_data_reset(struct aac_softstate *softs, struct aac_cmd *acp) 1082 { 1083 struct aac_container *dvp = acp->dvp; 1084 1085 if (dvp->reset) { 1086 dvp->reset = 0; 1087 aac_set_arq_data(acp->pkt, KEY_UNIT_ATTENTION, 0x29, 0x02, 0); 1088 } 1089 } 1090 1091 /* 1092 * Send a command to the adapter in New Comm. interface 1093 */ 1094 static int 1095 aac_send_command(struct aac_softstate *softs, struct aac_slot *slotp) 1096 { 1097 uint32_t index, device; 1098 1099 index = PCI_MEM_GET32(softs, AAC_IQUE); 1100 if (index == 0xffffffffUL) { 1101 index = PCI_MEM_GET32(softs, AAC_IQUE); 1102 if (index == 0xffffffffUL) 1103 return (AACERR); 1104 } 1105 1106 device = index; 1107 PCI_MEM_PUT32(softs, device, 1108 (uint32_t)(slotp->fib_phyaddr & 0xfffffffful)); 1109 device += 4; 1110 PCI_MEM_PUT32(softs, device, (uint32_t)(slotp->fib_phyaddr >> 32)); 1111 device += 4; 1112 PCI_MEM_PUT32(softs, device, slotp->acp->fib_size); 1113 PCI_MEM_PUT32(softs, AAC_IQUE, index); 1114 return (AACOK); 1115 } 1116 1117 static void 1118 aac_end_io(struct aac_softstate *softs, struct aac_cmd *acp) 1119 { 1120 struct aac_container *dvp = acp->dvp; 1121 int q = AAC_CMDQ(acp); 1122 1123 if (acp->slotp) { /* outstanding cmd */ 1124 aac_release_slot(softs, acp->slotp); 1125 acp->slotp = NULL; 1126 if (dvp) { 1127 dvp->ncmds[q]--; 1128 if (dvp->throttle[q] == AAC_THROTTLE_DRAIN && 1129 dvp->ncmds[q] == 0 && q == AAC_CMDQ_ASYNC) 1130 aac_set_throttle(softs, dvp, q, 1131 softs->total_slots); 1132 } 1133 softs->bus_ncmds[q]--; 1134 (void) aac_cmd_delete(&softs->q_busy, acp); 1135 } else { /* cmd in waiting queue */ 1136 aac_cmd_delete(&softs->q_wait[q], acp); 1137 } 1138 1139 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) { /* async IO */ 1140 mutex_enter(&softs->q_comp_mutex); 1141 aac_cmd_enqueue(&softs->q_comp, acp); 1142 mutex_exit(&softs->q_comp_mutex); 1143 } else if (acp->flags & AAC_CMD_NO_CB) { /* sync IO */ 1144 cv_broadcast(&softs->event); 1145 } 1146 } 1147 1148 static void 1149 aac_handle_io(struct aac_softstate *softs, int index) 1150 { 1151 struct aac_slot *slotp; 1152 struct aac_cmd *acp; 1153 uint32_t fast; 1154 1155 fast = index & AAC_SENDERADDR_MASK_FAST_RESPONSE; 1156 index >>= 2; 1157 1158 /* Make sure firmware reported index is valid */ 1159 ASSERT(index >= 0 && index < softs->total_slots); 1160 slotp = &softs->io_slot[index]; 1161 ASSERT(slotp->index == index); 1162 acp = slotp->acp; 1163 ASSERT(acp != NULL && acp->slotp == slotp); 1164 1165 acp->flags |= AAC_CMD_CMPLT; 1166 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU); 1167 1168 if (aac_check_dma_handle(slotp->fib_dma_handle) == DDI_SUCCESS) { 1169 /* 1170 * For fast response IO, the firmware do not return any FIB 1171 * data, so we need to fill in the FIB status and state so that 1172 * FIB users can handle it correctly. 1173 */ 1174 if (fast) { 1175 uint32_t state; 1176 1177 state = ddi_get32(slotp->fib_acc_handle, 1178 &slotp->fibp->Header.XferState); 1179 /* 1180 * Update state for CPU not for device, no DMA sync 1181 * needed 1182 */ 1183 ddi_put32(slotp->fib_acc_handle, 1184 &slotp->fibp->Header.XferState, 1185 state | AAC_FIBSTATE_DONEADAP); 1186 ddi_put32(slotp->fib_acc_handle, 1187 (uint32_t *)&slotp->fibp->data[0], ST_OK); 1188 } 1189 1190 /* Handle completed ac */ 1191 acp->ac_comp(softs, acp); 1192 } else { 1193 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1194 acp->flags |= AAC_CMD_ERR; 1195 if (acp->pkt) { 1196 acp->pkt->pkt_reason = CMD_TRAN_ERR; 1197 acp->pkt->pkt_statistics = 0; 1198 } 1199 } 1200 aac_end_io(softs, acp); 1201 } 1202 1203 /* 1204 * Interrupt handler for New Comm. interface 1205 * New Comm. interface use a different mechanism for interrupt. No explict 1206 * message queues, and driver need only accesses the mapped PCI mem space to 1207 * find the completed FIB or AIF. 1208 */ 1209 static int 1210 aac_process_intr_new(struct aac_softstate *softs) 1211 { 1212 uint32_t index; 1213 1214 index = AAC_OUTB_GET(softs); 1215 if (index == 0xfffffffful) 1216 index = AAC_OUTB_GET(softs); 1217 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 1218 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1219 return (DDI_INTR_UNCLAIMED); 1220 } 1221 if (index != 0xfffffffful) { 1222 do { 1223 if ((index & AAC_SENDERADDR_MASK_AIF) == 0) { 1224 aac_handle_io(softs, index); 1225 } else if (index != 0xfffffffeul) { 1226 struct aac_fib *fibp; /* FIB in AIF queue */ 1227 uint16_t fib_size, fib_size0; 1228 1229 /* 1230 * 0xfffffffe means that the controller wants 1231 * more work, ignore it for now. Otherwise, 1232 * AIF received. 1233 */ 1234 index &= ~2; 1235 1236 mutex_enter(&softs->aifq_mutex); 1237 /* 1238 * Copy AIF from adapter to the empty AIF slot 1239 */ 1240 fibp = &softs->aifq[softs->aifq_idx].d; 1241 fib_size0 = PCI_MEM_GET16(softs, index + \ 1242 offsetof(struct aac_fib, Header.Size)); 1243 fib_size = (fib_size0 > AAC_FIB_SIZE) ? 1244 AAC_FIB_SIZE : fib_size0; 1245 PCI_MEM_REP_GET8(softs, index, fibp, 1246 fib_size); 1247 1248 if (aac_check_acc_handle(softs-> \ 1249 pci_mem_handle) == DDI_SUCCESS) 1250 (void) aac_handle_aif(softs, fibp); 1251 else 1252 ddi_fm_service_impact(softs->devinfo_p, 1253 DDI_SERVICE_UNAFFECTED); 1254 mutex_exit(&softs->aifq_mutex); 1255 1256 /* 1257 * AIF memory is owned by the adapter, so let it 1258 * know that we are done with it. 1259 */ 1260 AAC_OUTB_SET(softs, index); 1261 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY); 1262 } 1263 1264 index = AAC_OUTB_GET(softs); 1265 } while (index != 0xfffffffful); 1266 1267 /* 1268 * Process waiting cmds before start new ones to 1269 * ensure first IOs are serviced first. 1270 */ 1271 aac_start_waiting_io(softs); 1272 return (AAC_DB_COMMAND_READY); 1273 } else { 1274 return (0); 1275 } 1276 } 1277 1278 static uint_t 1279 aac_intr_new(caddr_t arg) 1280 { 1281 struct aac_softstate *softs = (struct aac_softstate *)arg; 1282 uint_t rval; 1283 1284 mutex_enter(&softs->io_lock); 1285 if (aac_process_intr_new(softs)) 1286 rval = DDI_INTR_CLAIMED; 1287 else 1288 rval = DDI_INTR_UNCLAIMED; 1289 mutex_exit(&softs->io_lock); 1290 1291 aac_drain_comp_q(softs); 1292 return (rval); 1293 } 1294 1295 /* 1296 * Interrupt handler for old interface 1297 * Explicit message queues are used to send FIB to and get completed FIB from 1298 * the adapter. Driver and adapter maitain the queues in the producer/consumer 1299 * manner. The driver has to query the queues to find the completed FIB. 1300 */ 1301 static int 1302 aac_process_intr_old(struct aac_softstate *softs) 1303 { 1304 uint16_t status; 1305 1306 status = AAC_STATUS_GET(softs); 1307 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 1308 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1309 return (DDI_INTR_UNCLAIMED); 1310 } 1311 if (status & AAC_DB_RESPONSE_READY) { 1312 int slot_idx; 1313 1314 /* ACK the intr */ 1315 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY); 1316 (void) AAC_STATUS_GET(softs); 1317 while (aac_fib_dequeue(softs, AAC_HOST_NORM_RESP_Q, 1318 &slot_idx) == AACOK) 1319 aac_handle_io(softs, slot_idx); 1320 1321 /* 1322 * Process waiting cmds before start new ones to 1323 * ensure first IOs are serviced first. 1324 */ 1325 aac_start_waiting_io(softs); 1326 return (AAC_DB_RESPONSE_READY); 1327 } else if (status & AAC_DB_COMMAND_READY) { 1328 int aif_idx; 1329 1330 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_READY); 1331 (void) AAC_STATUS_GET(softs); 1332 if (aac_fib_dequeue(softs, AAC_HOST_NORM_CMD_Q, &aif_idx) == 1333 AACOK) { 1334 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 1335 struct aac_fib *fibp; /* FIB in AIF queue */ 1336 struct aac_fib *fibp0; /* FIB in communication space */ 1337 uint16_t fib_size, fib_size0; 1338 uint32_t fib_xfer_state; 1339 uint32_t addr, size; 1340 1341 ASSERT((aif_idx >= 0) && (aif_idx < AAC_ADAPTER_FIBS)); 1342 1343 #define AAC_SYNC_AIF(softs, aif_idx, type) \ 1344 { (void) ddi_dma_sync((softs)->comm_space_dma_handle, \ 1345 offsetof(struct aac_comm_space, \ 1346 adapter_fibs[(aif_idx)]), AAC_FIB_SIZE, \ 1347 (type)); } 1348 1349 mutex_enter(&softs->aifq_mutex); 1350 /* Copy AIF from adapter to the empty AIF slot */ 1351 fibp = &softs->aifq[softs->aifq_idx].d; 1352 AAC_SYNC_AIF(softs, aif_idx, DDI_DMA_SYNC_FORCPU); 1353 fibp0 = &softs->comm_space->adapter_fibs[aif_idx]; 1354 fib_size0 = ddi_get16(acc, &fibp0->Header.Size); 1355 fib_size = (fib_size0 > AAC_FIB_SIZE) ? 1356 AAC_FIB_SIZE : fib_size0; 1357 ddi_rep_get8(acc, (uint8_t *)fibp, (uint8_t *)fibp0, 1358 fib_size, DDI_DEV_AUTOINCR); 1359 1360 (void) aac_handle_aif(softs, fibp); 1361 mutex_exit(&softs->aifq_mutex); 1362 1363 /* Complete AIF back to adapter with good status */ 1364 fib_xfer_state = LE_32(fibp->Header.XferState); 1365 if (fib_xfer_state & AAC_FIBSTATE_FROMADAP) { 1366 ddi_put32(acc, &fibp0->Header.XferState, 1367 fib_xfer_state | AAC_FIBSTATE_DONEHOST); 1368 ddi_put32(acc, (uint32_t *)&fibp0->data[0], 1369 ST_OK); 1370 if (fib_size0 > AAC_FIB_SIZE) 1371 ddi_put16(acc, &fibp0->Header.Size, 1372 AAC_FIB_SIZE); 1373 AAC_SYNC_AIF(softs, aif_idx, 1374 DDI_DMA_SYNC_FORDEV); 1375 } 1376 1377 /* Put the AIF response on the response queue */ 1378 addr = ddi_get32(acc, 1379 &softs->comm_space->adapter_fibs[aif_idx]. \ 1380 Header.SenderFibAddress); 1381 size = (uint32_t)ddi_get16(acc, 1382 &softs->comm_space->adapter_fibs[aif_idx]. \ 1383 Header.Size); 1384 ddi_put32(acc, 1385 &softs->comm_space->adapter_fibs[aif_idx]. \ 1386 Header.ReceiverFibAddress, addr); 1387 if (aac_fib_enqueue(softs, AAC_ADAP_NORM_RESP_Q, 1388 addr, size) == AACERR) 1389 cmn_err(CE_NOTE, "!AIF ack failed"); 1390 } 1391 return (AAC_DB_COMMAND_READY); 1392 } else if (status & AAC_DB_PRINTF_READY) { 1393 /* ACK the intr */ 1394 AAC_STATUS_CLR(softs, AAC_DB_PRINTF_READY); 1395 (void) AAC_STATUS_GET(softs); 1396 (void) ddi_dma_sync(softs->comm_space_dma_handle, 1397 offsetof(struct aac_comm_space, adapter_print_buf), 1398 AAC_ADAPTER_PRINT_BUFSIZE, DDI_DMA_SYNC_FORCPU); 1399 if (aac_check_dma_handle(softs->comm_space_dma_handle) == 1400 DDI_SUCCESS) 1401 cmn_err(CE_NOTE, "MSG From Adapter: %s", 1402 softs->comm_space->adapter_print_buf); 1403 else 1404 ddi_fm_service_impact(softs->devinfo_p, 1405 DDI_SERVICE_UNAFFECTED); 1406 AAC_NOTIFY(softs, AAC_DB_PRINTF_READY); 1407 return (AAC_DB_PRINTF_READY); 1408 } else if (status & AAC_DB_COMMAND_NOT_FULL) { 1409 /* 1410 * Without these two condition statements, the OS could hang 1411 * after a while, especially if there are a lot of AIF's to 1412 * handle, for instance if a drive is pulled from an array 1413 * under heavy load. 1414 */ 1415 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL); 1416 return (AAC_DB_COMMAND_NOT_FULL); 1417 } else if (status & AAC_DB_RESPONSE_NOT_FULL) { 1418 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL); 1419 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_NOT_FULL); 1420 return (AAC_DB_RESPONSE_NOT_FULL); 1421 } else { 1422 return (0); 1423 } 1424 } 1425 1426 static uint_t 1427 aac_intr_old(caddr_t arg) 1428 { 1429 struct aac_softstate *softs = (struct aac_softstate *)arg; 1430 int rval; 1431 1432 mutex_enter(&softs->io_lock); 1433 if (aac_process_intr_old(softs)) 1434 rval = DDI_INTR_CLAIMED; 1435 else 1436 rval = DDI_INTR_UNCLAIMED; 1437 mutex_exit(&softs->io_lock); 1438 1439 aac_drain_comp_q(softs); 1440 return (rval); 1441 } 1442 1443 /* 1444 * Set pkt_reason and OR in pkt_statistics flag 1445 */ 1446 static void 1447 aac_set_pkt_reason(struct aac_softstate *softs, struct aac_cmd *acp, 1448 uchar_t reason, uint_t stat) 1449 { 1450 #ifndef __lock_lint 1451 _NOTE(ARGUNUSED(softs)) 1452 #endif 1453 AACDB_PRINT(softs, CE_NOTE, "acp=0x%p, reason=%x, stat=%x", 1454 (void *)acp, reason, stat); 1455 if (acp->pkt->pkt_reason == CMD_CMPLT) 1456 acp->pkt->pkt_reason = reason; 1457 acp->pkt->pkt_statistics |= stat; 1458 } 1459 1460 /* 1461 * Handle a finished pkt of soft SCMD 1462 */ 1463 static void 1464 aac_soft_callback(struct aac_softstate *softs, struct aac_cmd *acp) 1465 { 1466 ASSERT(acp->pkt); 1467 1468 acp->flags |= AAC_CMD_CMPLT; 1469 1470 acp->pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | \ 1471 STATE_SENT_CMD; 1472 if (acp->pkt->pkt_state & STATE_XFERRED_DATA) 1473 acp->pkt->pkt_resid = 0; 1474 1475 /* AAC_CMD_NO_INTR means no complete callback */ 1476 if (!(acp->flags & AAC_CMD_NO_INTR)) { 1477 mutex_enter(&softs->q_comp_mutex); 1478 aac_cmd_enqueue(&softs->q_comp, acp); 1479 mutex_exit(&softs->q_comp_mutex); 1480 ddi_trigger_softintr(softs->softint_id); 1481 } 1482 } 1483 1484 /* 1485 * Handlers for completed IOs, common to aac_intr_new() and aac_intr_old() 1486 */ 1487 1488 /* 1489 * Handle completed logical device IO command 1490 */ 1491 /*ARGSUSED*/ 1492 static void 1493 aac_ld_complete(struct aac_softstate *softs, struct aac_cmd *acp) 1494 { 1495 struct aac_slot *slotp = acp->slotp; 1496 struct aac_blockread_response *resp; 1497 uint32_t status; 1498 1499 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 1500 ASSERT(!(acp->flags & AAC_CMD_NO_CB)); 1501 1502 /* 1503 * block_read/write has a similar response header, use blockread 1504 * response for both. 1505 */ 1506 resp = (struct aac_blockread_response *)&slotp->fibp->data[0]; 1507 status = ddi_get32(slotp->fib_acc_handle, &resp->Status); 1508 if (status == ST_OK) { 1509 acp->pkt->pkt_resid = 0; 1510 acp->pkt->pkt_state |= STATE_XFERRED_DATA; 1511 } else { 1512 aac_set_arq_data_hwerr(acp); 1513 } 1514 } 1515 1516 /* 1517 * Handle completed IOCTL command 1518 */ 1519 /*ARGSUSED*/ 1520 void 1521 aac_ioctl_complete(struct aac_softstate *softs, struct aac_cmd *acp) 1522 { 1523 struct aac_slot *slotp = acp->slotp; 1524 1525 /* 1526 * NOTE: Both aac_ioctl_send_fib() and aac_send_raw_srb() 1527 * may wait on softs->event, so use cv_broadcast() instead 1528 * of cv_signal(). 1529 */ 1530 ASSERT(acp->flags & AAC_CMD_SYNC); 1531 ASSERT(acp->flags & AAC_CMD_NO_CB); 1532 1533 /* Get the size of the response FIB from its FIB.Header.Size field */ 1534 acp->fib_size = ddi_get16(slotp->fib_acc_handle, 1535 &slotp->fibp->Header.Size); 1536 1537 ASSERT(acp->fib_size <= softs->aac_max_fib_size); 1538 ddi_rep_get8(slotp->fib_acc_handle, (uint8_t *)acp->fibp, 1539 (uint8_t *)slotp->fibp, acp->fib_size, DDI_DEV_AUTOINCR); 1540 } 1541 1542 /* 1543 * Handle completed Flush command 1544 */ 1545 /*ARGSUSED*/ 1546 static void 1547 aac_synccache_complete(struct aac_softstate *softs, struct aac_cmd *acp) 1548 { 1549 struct aac_slot *slotp = acp->slotp; 1550 ddi_acc_handle_t acc = slotp->fib_acc_handle; 1551 struct aac_synchronize_reply *resp; 1552 uint32_t status; 1553 1554 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 1555 1556 resp = (struct aac_synchronize_reply *)&slotp->fibp->data[0]; 1557 status = ddi_get32(acc, &resp->Status); 1558 if (status != CT_OK) 1559 aac_set_arq_data_hwerr(acp); 1560 } 1561 1562 /* 1563 * Access PCI space to see if the driver can support the card 1564 */ 1565 static int 1566 aac_check_card_type(struct aac_softstate *softs) 1567 { 1568 ddi_acc_handle_t pci_config_handle; 1569 int card_index; 1570 uint32_t pci_cmd; 1571 1572 /* Map pci configuration space */ 1573 if ((pci_config_setup(softs->devinfo_p, &pci_config_handle)) != 1574 DDI_SUCCESS) { 1575 AACDB_PRINT(softs, CE_WARN, "Cannot setup pci config space"); 1576 return (AACERR); 1577 } 1578 1579 softs->vendid = pci_config_get16(pci_config_handle, PCI_CONF_VENID); 1580 softs->devid = pci_config_get16(pci_config_handle, PCI_CONF_DEVID); 1581 softs->subvendid = pci_config_get16(pci_config_handle, 1582 PCI_CONF_SUBVENID); 1583 softs->subsysid = pci_config_get16(pci_config_handle, 1584 PCI_CONF_SUBSYSID); 1585 1586 card_index = 0; 1587 while (!CARD_IS_UNKNOWN(card_index)) { 1588 if ((aac_cards[card_index].vendor == softs->vendid) && 1589 (aac_cards[card_index].device == softs->devid) && 1590 (aac_cards[card_index].subvendor == softs->subvendid) && 1591 (aac_cards[card_index].subsys == softs->subsysid)) { 1592 break; 1593 } 1594 card_index++; 1595 } 1596 1597 softs->card = card_index; 1598 softs->hwif = aac_cards[card_index].hwif; 1599 1600 /* 1601 * Unknown aac card 1602 * do a generic match based on the VendorID and DeviceID to 1603 * support the new cards in the aac family 1604 */ 1605 if (CARD_IS_UNKNOWN(card_index)) { 1606 if (softs->vendid != 0x9005) { 1607 AACDB_PRINT(softs, CE_WARN, 1608 "Unknown vendor 0x%x", softs->vendid); 1609 goto error; 1610 } 1611 switch (softs->devid) { 1612 case 0x285: 1613 softs->hwif = AAC_HWIF_I960RX; 1614 break; 1615 case 0x286: 1616 softs->hwif = AAC_HWIF_RKT; 1617 break; 1618 default: 1619 AACDB_PRINT(softs, CE_WARN, 1620 "Unknown device \"pci9005,%x\"", softs->devid); 1621 goto error; 1622 } 1623 } 1624 1625 /* Set hardware dependent interface */ 1626 switch (softs->hwif) { 1627 case AAC_HWIF_I960RX: 1628 softs->aac_if = aac_rx_interface; 1629 softs->map_size_min = AAC_MAP_SIZE_MIN_RX; 1630 break; 1631 case AAC_HWIF_RKT: 1632 softs->aac_if = aac_rkt_interface; 1633 softs->map_size_min = AAC_MAP_SIZE_MIN_RKT; 1634 break; 1635 default: 1636 AACDB_PRINT(softs, CE_WARN, 1637 "Unknown hardware interface %d", softs->hwif); 1638 goto error; 1639 } 1640 1641 /* Set card names */ 1642 (void *)strncpy(softs->vendor_name, aac_cards[card_index].vid, 1643 AAC_VENDOR_LEN); 1644 (void *)strncpy(softs->product_name, aac_cards[card_index].desc, 1645 AAC_PRODUCT_LEN); 1646 1647 /* Set up quirks */ 1648 softs->flags = aac_cards[card_index].quirks; 1649 1650 /* Force the busmaster enable bit on */ 1651 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM); 1652 if ((pci_cmd & PCI_COMM_ME) == 0) { 1653 pci_cmd |= PCI_COMM_ME; 1654 pci_config_put16(pci_config_handle, PCI_CONF_COMM, pci_cmd); 1655 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM); 1656 if ((pci_cmd & PCI_COMM_ME) == 0) { 1657 cmn_err(CE_CONT, "?Cannot enable busmaster bit"); 1658 goto error; 1659 } 1660 } 1661 1662 /* Set memory base to map */ 1663 softs->pci_mem_base_paddr = 0xfffffff0UL & \ 1664 pci_config_get32(pci_config_handle, PCI_CONF_BASE0); 1665 1666 pci_config_teardown(&pci_config_handle); 1667 1668 return (AACOK); /* card type detected */ 1669 error: 1670 pci_config_teardown(&pci_config_handle); 1671 return (AACERR); /* no matched card found */ 1672 } 1673 1674 /* 1675 * Check the firmware to determine the features to support and the FIB 1676 * parameters to use. 1677 */ 1678 static int 1679 aac_check_firmware(struct aac_softstate *softs) 1680 { 1681 uint32_t options; 1682 uint32_t atu_size; 1683 ddi_acc_handle_t pci_handle; 1684 char *pci_mbr; 1685 uint32_t max_fibs; 1686 uint32_t max_fib_size; 1687 uint32_t sg_tablesize; 1688 uint32_t max_sectors; 1689 uint32_t status; 1690 1691 /* Get supported options */ 1692 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETINFO, 0, 0, 0, 0, 1693 &status)) != AACOK) { 1694 if (status != SRB_STATUS_INVALID_REQUEST) { 1695 cmn_err(CE_CONT, 1696 "?Fatal error: request adapter info error"); 1697 return (AACERR); 1698 } 1699 options = 0; 1700 atu_size = 0; 1701 } else { 1702 options = AAC_MAILBOX_GET(softs, 1); 1703 atu_size = AAC_MAILBOX_GET(softs, 2); 1704 } 1705 1706 if (softs->state & AAC_STATE_RESET) { 1707 if ((softs->support_opt == options) && 1708 (softs->atu_size == atu_size)) 1709 return (AACOK); 1710 1711 cmn_err(CE_WARN, 1712 "?Fatal error: firmware changed, system needs reboot"); 1713 return (AACERR); 1714 } 1715 1716 /* 1717 * The following critical settings are initialized only once during 1718 * driver attachment. 1719 */ 1720 softs->support_opt = options; 1721 softs->atu_size = atu_size; 1722 1723 /* Process supported options */ 1724 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && 1725 (softs->flags & AAC_FLAGS_NO4GB) == 0) { 1726 AACDB_PRINT(softs, CE_NOTE, "!Enable FIB map 4GB window"); 1727 softs->flags |= AAC_FLAGS_4GB_WINDOW; 1728 } else { 1729 /* 1730 * Quirk AAC_FLAGS_NO4GB is for FIB address and thus comm space 1731 * only. IO is handled by the DMA engine which does not suffer 1732 * from the ATU window programming workarounds necessary for 1733 * CPU copy operations. 1734 */ 1735 softs->addr_dma_attr.dma_attr_addr_lo = 0x2000ull; 1736 softs->addr_dma_attr.dma_attr_addr_hi = 0x7fffffffull; 1737 } 1738 1739 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0) { 1740 AACDB_PRINT(softs, CE_NOTE, "!Enable SG map 64-bit address"); 1741 softs->buf_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull; 1742 softs->buf_dma_attr.dma_attr_seg = 0xffffffffffffffffull; 1743 softs->flags |= AAC_FLAGS_SG_64BIT; 1744 } 1745 1746 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) { 1747 softs->flags |= AAC_FLAGS_ARRAY_64BIT; 1748 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array size"); 1749 } 1750 1751 /* Read preferred settings */ 1752 max_fib_size = 0; 1753 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETCOMMPREF, 1754 0, 0, 0, 0, NULL)) == AACOK) { 1755 options = AAC_MAILBOX_GET(softs, 1); 1756 max_fib_size = (options & 0xffff); 1757 max_sectors = (options >> 16) << 1; 1758 options = AAC_MAILBOX_GET(softs, 2); 1759 sg_tablesize = (options >> 16); 1760 options = AAC_MAILBOX_GET(softs, 3); 1761 max_fibs = (options & 0xffff); 1762 } 1763 1764 /* Enable new comm. and rawio at the same time */ 1765 if ((softs->support_opt & AAC_SUPPORTED_NEW_COMM) && 1766 (max_fib_size != 0)) { 1767 if ((atu_size > softs->map_size) && 1768 (ddi_regs_map_setup(softs->devinfo_p, 1, 1769 (caddr_t *)&pci_mbr, 0, atu_size, &aac_acc_attr, 1770 &pci_handle) == DDI_SUCCESS)) { 1771 ddi_regs_map_free(&softs->pci_mem_handle); 1772 softs->pci_mem_handle = pci_handle; 1773 softs->pci_mem_base_vaddr = pci_mbr; 1774 softs->map_size = atu_size; 1775 } 1776 if (atu_size == softs->map_size) { 1777 softs->flags |= AAC_FLAGS_NEW_COMM; 1778 AACDB_PRINT(softs, CE_NOTE, 1779 "!Enable New Comm. interface"); 1780 } 1781 } 1782 1783 /* Set FIB parameters */ 1784 if (softs->flags & AAC_FLAGS_NEW_COMM) { 1785 softs->aac_max_fibs = max_fibs; 1786 softs->aac_max_fib_size = max_fib_size; 1787 softs->aac_max_sectors = max_sectors; 1788 softs->aac_sg_tablesize = sg_tablesize; 1789 1790 softs->flags |= AAC_FLAGS_RAW_IO; 1791 AACDB_PRINT(softs, CE_NOTE, "!Enable RawIO"); 1792 } else { 1793 softs->aac_max_fibs = 1794 (softs->flags & AAC_FLAGS_256FIBS) ? 256 : 512; 1795 softs->aac_max_fib_size = AAC_FIB_SIZE; 1796 softs->aac_max_sectors = 128; /* 64K */ 1797 if (softs->flags & AAC_FLAGS_17SG) 1798 softs->aac_sg_tablesize = 17; 1799 else if (softs->flags & AAC_FLAGS_34SG) 1800 softs->aac_sg_tablesize = 34; 1801 else if (softs->flags & AAC_FLAGS_SG_64BIT) 1802 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE - 1803 sizeof (struct aac_blockwrite64) + 1804 sizeof (struct aac_sg_entry64)) / 1805 sizeof (struct aac_sg_entry64); 1806 else 1807 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE - 1808 sizeof (struct aac_blockwrite) + 1809 sizeof (struct aac_sg_entry)) / 1810 sizeof (struct aac_sg_entry); 1811 } 1812 1813 if ((softs->flags & AAC_FLAGS_RAW_IO) && 1814 (softs->flags & AAC_FLAGS_ARRAY_64BIT)) { 1815 softs->flags |= AAC_FLAGS_LBA_64BIT; 1816 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array"); 1817 } 1818 softs->buf_dma_attr.dma_attr_sgllen = softs->aac_sg_tablesize; 1819 softs->buf_dma_attr.dma_attr_maxxfer = softs->aac_max_sectors << 9; 1820 /* 1821 * 64K maximum segment size in scatter gather list is controlled by 1822 * the NEW_COMM bit in the adapter information. If not set, the card 1823 * can only accept a maximum of 64K. It is not recommended to permit 1824 * more than 128KB of total transfer size to the adapters because 1825 * performance is negatively impacted. 1826 * 1827 * For new comm, segment size equals max xfer size. For old comm, 1828 * we use 64K for both. 1829 */ 1830 softs->buf_dma_attr.dma_attr_count_max = 1831 softs->buf_dma_attr.dma_attr_maxxfer - 1; 1832 1833 /* Setup FIB operations for logical devices */ 1834 if (softs->flags & AAC_FLAGS_RAW_IO) 1835 softs->aac_cmd_fib = aac_cmd_fib_rawio; 1836 else if (softs->flags & AAC_FLAGS_SG_64BIT) 1837 softs->aac_cmd_fib = aac_cmd_fib_brw64; 1838 else 1839 softs->aac_cmd_fib = aac_cmd_fib_brw; 1840 softs->aac_cmd_fib_scsi = (softs->flags & AAC_FLAGS_SG_64BIT) ? \ 1841 aac_cmd_fib_scsi64 : aac_cmd_fib_scsi32; 1842 1843 /* 64-bit LBA needs descriptor format sense data */ 1844 softs->slen = sizeof (struct scsi_arq_status); 1845 if ((softs->flags & AAC_FLAGS_LBA_64BIT) && 1846 softs->slen < AAC_ARQ64_LENGTH) 1847 softs->slen = AAC_ARQ64_LENGTH; 1848 1849 AACDB_PRINT(softs, CE_NOTE, 1850 "!max_fibs %d max_fibsize 0x%x max_sectors %d max_sg %d", 1851 softs->aac_max_fibs, softs->aac_max_fib_size, 1852 softs->aac_max_sectors, softs->aac_sg_tablesize); 1853 1854 return (AACOK); 1855 } 1856 1857 static void 1858 aac_fsa_rev(struct aac_softstate *softs, struct FsaRev *fsarev0, 1859 struct FsaRev *fsarev1) 1860 { 1861 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 1862 1863 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.dash); 1864 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.type); 1865 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.minor); 1866 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.major); 1867 AAC_GET_FIELD32(acc, fsarev1, fsarev0, buildNumber); 1868 } 1869 1870 /* 1871 * The following function comes from Adaptec: 1872 * 1873 * Query adapter information and supplement adapter information 1874 */ 1875 static int 1876 aac_get_adapter_info(struct aac_softstate *softs, 1877 struct aac_adapter_info *ainfr, struct aac_supplement_adapter_info *sinfr) 1878 { 1879 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 1880 struct aac_fib *fibp = softs->sync_slot.fibp; 1881 struct aac_adapter_info *ainfp; 1882 struct aac_supplement_adapter_info *sinfp; 1883 1884 ddi_put8(acc, &fibp->data[0], 0); 1885 if (aac_sync_fib(softs, RequestAdapterInfo, 1886 sizeof (struct aac_fib_header)) != AACOK) { 1887 AACDB_PRINT(softs, CE_WARN, "RequestAdapterInfo failed"); 1888 return (AACERR); 1889 } 1890 ainfp = (struct aac_adapter_info *)fibp->data; 1891 if (ainfr) { 1892 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions); 1893 AAC_GET_FIELD32(acc, ainfr, ainfp, PlatformBase); 1894 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuArchitecture); 1895 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuVariant); 1896 AAC_GET_FIELD32(acc, ainfr, ainfp, ClockSpeed); 1897 AAC_GET_FIELD32(acc, ainfr, ainfp, ExecutionMem); 1898 AAC_GET_FIELD32(acc, ainfr, ainfp, BufferMem); 1899 AAC_GET_FIELD32(acc, ainfr, ainfp, TotalMem); 1900 aac_fsa_rev(softs, &ainfp->KernelRevision, 1901 &ainfr->KernelRevision); 1902 aac_fsa_rev(softs, &ainfp->MonitorRevision, 1903 &ainfr->MonitorRevision); 1904 aac_fsa_rev(softs, &ainfp->HardwareRevision, 1905 &ainfr->HardwareRevision); 1906 aac_fsa_rev(softs, &ainfp->BIOSRevision, 1907 &ainfr->BIOSRevision); 1908 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusteringEnabled); 1909 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusterChannelMask); 1910 AAC_GET_FIELD64(acc, ainfr, ainfp, SerialNumber); 1911 AAC_GET_FIELD32(acc, ainfr, ainfp, batteryPlatform); 1912 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions); 1913 AAC_GET_FIELD32(acc, ainfr, ainfp, OemVariant); 1914 } 1915 if (sinfr) { 1916 if (!(softs->support_opt & 1917 AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO)) { 1918 AACDB_PRINT(softs, CE_WARN, 1919 "SupplementAdapterInfo not supported"); 1920 return (AACERR); 1921 } 1922 ddi_put8(acc, &fibp->data[0], 0); 1923 if (aac_sync_fib(softs, RequestSupplementAdapterInfo, 1924 sizeof (struct aac_fib_header)) != AACOK) { 1925 AACDB_PRINT(softs, CE_WARN, 1926 "RequestSupplementAdapterInfo failed"); 1927 return (AACERR); 1928 } 1929 sinfp = (struct aac_supplement_adapter_info *)fibp->data; 1930 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, AdapterTypeText[0], 17+1); 1931 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, Pad[0], 2); 1932 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashMemoryByteSize); 1933 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashImageId); 1934 AAC_GET_FIELD32(acc, sinfr, sinfp, MaxNumberPorts); 1935 AAC_GET_FIELD32(acc, sinfr, sinfp, Version); 1936 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits); 1937 AAC_GET_FIELD8(acc, sinfr, sinfp, SlotNumber); 1938 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, ReservedPad0[0], 3); 1939 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, BuildDate[0], 12); 1940 AAC_GET_FIELD32(acc, sinfr, sinfp, CurrentNumberPorts); 1941 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, VpdInfo, 1942 sizeof (struct vpd_info)); 1943 aac_fsa_rev(softs, &sinfp->FlashFirmwareRevision, 1944 &sinfr->FlashFirmwareRevision); 1945 AAC_GET_FIELD32(acc, sinfr, sinfp, RaidTypeMorphOptions); 1946 aac_fsa_rev(softs, &sinfp->FlashFirmwareBootRevision, 1947 &sinfr->FlashFirmwareBootRevision); 1948 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgPcbaSerialNo, 1949 MFG_PCBA_SERIAL_NUMBER_WIDTH); 1950 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgWWNName[0], 1951 MFG_WWN_WIDTH); 1952 AAC_REP_GET_FIELD32(acc, sinfr, sinfp, ReservedGrowth[0], 2); 1953 } 1954 return (AACOK); 1955 } 1956 1957 /* 1958 * The following function comes from Adaptec: 1959 * 1960 * Routine to be called during initialization of communications with 1961 * the adapter to handle possible adapter configuration issues. When 1962 * the adapter first boots up, it examines attached drives, etc, and 1963 * potentially comes up with a new or revised configuration (relative to 1964 * what's stored in it's NVRAM). Additionally it may discover problems 1965 * that make the current physical configuration unworkable (currently 1966 * applicable only to cluster configuration issues). 1967 * 1968 * If there are no configuration issues or the issues are considered 1969 * trival by the adapter, it will set it's configuration status to 1970 * "FSACT_CONTINUE" and execute the "commit confiuguration" action 1971 * automatically on it's own. 1972 * 1973 * However, if there are non-trivial issues, the adapter will set it's 1974 * internal configuration status to "FSACT_PAUSE" or "FASCT_ABORT" 1975 * and wait for some agent on the host to issue the "\ContainerCommand 1976 * \VM_ContainerConfig\CT_COMMIT_CONFIG" FIB command to cause the 1977 * adapter to commit the new/updated configuration and enable 1978 * un-inhibited operation. The host agent should first issue the 1979 * "\ContainerCommand\VM_ContainerConfig\CT_GET_CONFIG_STATUS" FIB 1980 * command to obtain information about config issues detected by 1981 * the adapter. 1982 * 1983 * Normally the adapter's PC BIOS will execute on the host following 1984 * adapter poweron and reset and will be responsible for querring the 1985 * adapter with CT_GET_CONFIG_STATUS and issuing the CT_COMMIT_CONFIG 1986 * command if appropriate. 1987 * 1988 * However, with the introduction of IOP reset support, the adapter may 1989 * boot up without the benefit of the adapter's PC BIOS host agent. 1990 * This routine is intended to take care of these issues in situations 1991 * where BIOS doesn't execute following adapter poweron or reset. The 1992 * CT_COMMIT_CONFIG command is a no-op if it's already been issued, so 1993 * there is no harm in doing this when it's already been done. 1994 */ 1995 static int 1996 aac_handle_adapter_config_issues(struct aac_softstate *softs) 1997 { 1998 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 1999 struct aac_fib *fibp = softs->sync_slot.fibp; 2000 struct aac_Container *cmd; 2001 struct aac_Container_resp *resp; 2002 struct aac_cf_status_header *cfg_sts_hdr; 2003 uint32_t resp_status; 2004 uint32_t ct_status; 2005 uint32_t cfg_stat_action; 2006 int rval; 2007 2008 /* Get adapter config status */ 2009 cmd = (struct aac_Container *)&fibp->data[0]; 2010 2011 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 2012 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 2013 ddi_put32(acc, &cmd->CTCommand.command, CT_GET_CONFIG_STATUS); 2014 ddi_put32(acc, &cmd->CTCommand.param[CNT_SIZE], 2015 sizeof (struct aac_cf_status_header)); 2016 rval = aac_sync_fib(softs, ContainerCommand, 2017 AAC_FIB_SIZEOF(struct aac_Container)); 2018 resp = (struct aac_Container_resp *)cmd; 2019 cfg_sts_hdr = (struct aac_cf_status_header *)resp->CTResponse.data; 2020 2021 resp_status = ddi_get32(acc, &resp->Status); 2022 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]); 2023 if ((rval == AACOK) && (resp_status == 0) && (ct_status == CT_OK)) { 2024 cfg_stat_action = ddi_get32(acc, &cfg_sts_hdr->action); 2025 2026 /* Commit configuration if it's reasonable to do so. */ 2027 if (cfg_stat_action <= CFACT_PAUSE) { 2028 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 2029 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 2030 ddi_put32(acc, &cmd->CTCommand.command, 2031 CT_COMMIT_CONFIG); 2032 rval = aac_sync_fib(softs, ContainerCommand, 2033 AAC_FIB_SIZEOF(struct aac_Container)); 2034 2035 resp_status = ddi_get32(acc, &resp->Status); 2036 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]); 2037 if ((rval == AACOK) && (resp_status == 0) && 2038 (ct_status == CT_OK)) 2039 /* Successful completion */ 2040 rval = AACMPE_OK; 2041 else 2042 /* Auto-commit aborted due to error(s). */ 2043 rval = AACMPE_COMMIT_CONFIG; 2044 } else { 2045 /* 2046 * Auto-commit aborted due to adapter indicating 2047 * configuration issue(s) too dangerous to auto-commit. 2048 */ 2049 rval = AACMPE_CONFIG_STATUS; 2050 } 2051 } else { 2052 cmn_err(CE_WARN, "!Configuration issue, auto-commit aborted"); 2053 rval = AACMPE_CONFIG_STATUS; 2054 } 2055 return (rval); 2056 } 2057 2058 /* 2059 * Hardware initialization and resource allocation 2060 */ 2061 static int 2062 aac_common_attach(struct aac_softstate *softs) 2063 { 2064 uint32_t status; 2065 int i; 2066 2067 DBCALLED(softs, 1); 2068 2069 /* 2070 * Do a little check here to make sure there aren't any outstanding 2071 * FIBs in the message queue. At this point there should not be and 2072 * if there are they are probably left over from another instance of 2073 * the driver like when the system crashes and the crash dump driver 2074 * gets loaded. 2075 */ 2076 while (AAC_OUTB_GET(softs) != 0xfffffffful) 2077 ; 2078 2079 /* 2080 * Wait the card to complete booting up before do anything that 2081 * attempts to communicate with it. 2082 */ 2083 status = AAC_FWSTATUS_GET(softs); 2084 if (status == AAC_SELF_TEST_FAILED || status == AAC_KERNEL_PANIC) 2085 goto error; 2086 i = AAC_FWUP_TIMEOUT * 1000; /* set timeout */ 2087 AAC_BUSYWAIT(AAC_FWSTATUS_GET(softs) & AAC_KERNEL_UP_AND_RUNNING, i); 2088 if (i == 0) { 2089 cmn_err(CE_CONT, "?Fatal error: controller not ready"); 2090 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2091 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2092 goto error; 2093 } 2094 2095 /* Read and set card supported options and settings */ 2096 if (aac_check_firmware(softs) == AACERR) { 2097 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2098 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2099 goto error; 2100 } 2101 2102 /* Clear out all interrupts */ 2103 AAC_STATUS_CLR(softs, ~0); 2104 2105 /* Setup communication space with the card */ 2106 if (softs->comm_space_dma_handle == NULL) { 2107 if (aac_alloc_comm_space(softs) != AACOK) 2108 goto error; 2109 } 2110 if (aac_setup_comm_space(softs) != AACOK) { 2111 cmn_err(CE_CONT, "?Setup communication space failed"); 2112 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2113 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2114 goto error; 2115 } 2116 2117 #ifdef DEBUG 2118 if (aac_get_fw_debug_buffer(softs) != AACOK) 2119 cmn_err(CE_CONT, "?firmware UART trace not supported"); 2120 #endif 2121 2122 /* Allocate slots */ 2123 if ((softs->total_slots == 0) && (aac_create_slots(softs) != AACOK)) { 2124 cmn_err(CE_CONT, "?Fatal error: slots allocate failed"); 2125 goto error; 2126 } 2127 AACDB_PRINT(softs, CE_NOTE, "%d slots allocated", softs->total_slots); 2128 2129 /* Allocate FIBs */ 2130 if (softs->total_fibs < softs->total_slots) { 2131 aac_alloc_fibs(softs); 2132 if (softs->total_fibs == 0) 2133 goto error; 2134 AACDB_PRINT(softs, CE_NOTE, "%d fibs allocated", 2135 softs->total_fibs); 2136 } 2137 2138 /* Get adapter names */ 2139 if (CARD_IS_UNKNOWN(softs->card)) { 2140 struct aac_supplement_adapter_info sinf; 2141 2142 if (aac_get_adapter_info(softs, NULL, &sinf) != AACOK) { 2143 cmn_err(CE_CONT, "?Query adapter information failed"); 2144 } else { 2145 char *p, *p0, *p1; 2146 2147 /* 2148 * Now find the controller name in supp_adapter_info-> 2149 * AdapterTypeText. Use the first word as the vendor 2150 * and the other words as the product name. 2151 */ 2152 AACDB_PRINT(softs, CE_NOTE, "sinf.AdapterTypeText = " 2153 "\"%s\"", sinf.AdapterTypeText); 2154 p = sinf.AdapterTypeText; 2155 p0 = p1 = NULL; 2156 /* Skip heading spaces */ 2157 while (*p && (*p == ' ' || *p == '\t')) 2158 p++; 2159 p0 = p; 2160 while (*p && (*p != ' ' && *p != '\t')) 2161 p++; 2162 /* Remove middle spaces */ 2163 while (*p && (*p == ' ' || *p == '\t')) 2164 *p++ = 0; 2165 p1 = p; 2166 /* Remove trailing spaces */ 2167 p = p1 + strlen(p1) - 1; 2168 while (p > p1 && (*p == ' ' || *p == '\t')) 2169 *p-- = 0; 2170 if (*p0 && *p1) { 2171 (void *)strncpy(softs->vendor_name, p0, 2172 AAC_VENDOR_LEN); 2173 (void *)strncpy(softs->product_name, p1, 2174 AAC_PRODUCT_LEN); 2175 } else { 2176 cmn_err(CE_WARN, 2177 "?adapter name mis-formatted\n"); 2178 if (*p0) 2179 (void *)strncpy(softs->product_name, 2180 p0, AAC_PRODUCT_LEN); 2181 } 2182 } 2183 } 2184 2185 cmn_err(CE_NOTE, 2186 "!aac driver %d.%02d.%02d-%d, found card: " \ 2187 "%s %s(pci0x%x.%x.%x.%x) at 0x%x", 2188 AAC_DRIVER_MAJOR_VERSION, 2189 AAC_DRIVER_MINOR_VERSION, 2190 AAC_DRIVER_BUGFIX_LEVEL, 2191 AAC_DRIVER_BUILD, 2192 softs->vendor_name, softs->product_name, 2193 softs->vendid, softs->devid, softs->subvendid, softs->subsysid, 2194 softs->pci_mem_base_paddr); 2195 2196 /* Perform acceptance of adapter-detected config changes if possible */ 2197 if (aac_handle_adapter_config_issues(softs) != AACMPE_OK) { 2198 cmn_err(CE_CONT, "?Handle adapter config issues failed"); 2199 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2200 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2201 goto error; 2202 } 2203 2204 /* Setup containers */ 2205 bzero(softs->containers, sizeof (struct aac_container) * AAC_MAX_LD); 2206 softs->container_count = 0; 2207 if (aac_probe_containers(softs) != AACOK) { 2208 cmn_err(CE_CONT, "?Fatal error: get container info error"); 2209 goto error; 2210 } 2211 2212 /* Check dma & acc handles allocated in attach */ 2213 if (aac_check_dma_handle(softs->comm_space_dma_handle) != DDI_SUCCESS) { 2214 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2215 goto error; 2216 } 2217 2218 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 2219 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2220 goto error; 2221 } 2222 2223 for (i = 0; i < softs->total_slots; i++) { 2224 if (aac_check_dma_handle(softs->io_slot[i].fib_dma_handle) != 2225 DDI_SUCCESS) { 2226 ddi_fm_service_impact(softs->devinfo_p, 2227 DDI_SERVICE_LOST); 2228 goto error; 2229 } 2230 } 2231 2232 return (AACOK); 2233 2234 error: 2235 if (softs->state & AAC_STATE_RESET) 2236 return (AACERR); 2237 if (softs->total_fibs > 0) 2238 aac_destroy_fibs(softs); 2239 if (softs->total_slots > 0) 2240 aac_destroy_slots(softs); 2241 if (softs->comm_space_dma_handle) 2242 aac_free_comm_space(softs); 2243 return (AACERR); 2244 } 2245 2246 /* 2247 * Hardware shutdown and resource release 2248 */ 2249 static void 2250 aac_common_detach(struct aac_softstate *softs) 2251 { 2252 DBCALLED(softs, 1); 2253 2254 (void) aac_shutdown(softs); 2255 2256 aac_destroy_fibs(softs); 2257 aac_destroy_slots(softs); 2258 aac_free_comm_space(softs); 2259 } 2260 2261 /* 2262 * Send a synchronous command to the controller and wait for a result. 2263 * Indicate if the controller completed the command with an error status. 2264 */ 2265 int 2266 aac_sync_mbcommand(struct aac_softstate *softs, uint32_t cmd, 2267 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3, 2268 uint32_t *statusp) 2269 { 2270 int timeout; 2271 uint32_t status; 2272 2273 if (statusp != NULL) 2274 *statusp = SRB_STATUS_SUCCESS; 2275 2276 /* Fill in mailbox */ 2277 AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3); 2278 2279 /* Ensure the sync command doorbell flag is cleared */ 2280 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND); 2281 2282 /* Then set it to signal the adapter */ 2283 AAC_NOTIFY(softs, AAC_DB_SYNC_COMMAND); 2284 2285 /* Spin waiting for the command to complete */ 2286 timeout = AAC_IMMEDIATE_TIMEOUT * 1000; 2287 AAC_BUSYWAIT(AAC_STATUS_GET(softs) & AAC_DB_SYNC_COMMAND, timeout); 2288 if (!timeout) { 2289 AACDB_PRINT(softs, CE_WARN, 2290 "Sync command timed out after %d seconds (0x%x)!", 2291 AAC_IMMEDIATE_TIMEOUT, AAC_FWSTATUS_GET(softs)); 2292 return (AACERR); 2293 } 2294 2295 /* Clear the completion flag */ 2296 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND); 2297 2298 /* Get the command status */ 2299 status = AAC_MAILBOX_GET(softs, 0); 2300 if (statusp != NULL) 2301 *statusp = status; 2302 if (status != SRB_STATUS_SUCCESS) { 2303 AACDB_PRINT(softs, CE_WARN, 2304 "Sync command fail: status = 0x%x", status); 2305 return (AACERR); 2306 } 2307 2308 return (AACOK); 2309 } 2310 2311 /* 2312 * Send a synchronous FIB to the adapter and wait for its completion 2313 */ 2314 static int 2315 aac_sync_fib(struct aac_softstate *softs, uint16_t cmd, uint16_t fibsize) 2316 { 2317 struct aac_slot *slotp = &softs->sync_slot; 2318 ddi_dma_handle_t dma = slotp->fib_dma_handle; 2319 uint32_t status; 2320 int rval; 2321 2322 /* Sync fib only supports 512 bytes */ 2323 if (fibsize > AAC_FIB_SIZE) 2324 return (AACERR); 2325 2326 /* 2327 * Setup sync fib 2328 * Need not reinitialize FIB header if it's already been filled 2329 * by others like aac_cmd_fib_scsi as aac_cmd. 2330 */ 2331 if (slotp->acp == NULL) 2332 aac_cmd_fib_header(softs, slotp, cmd, fibsize); 2333 2334 (void) ddi_dma_sync(dma, offsetof(struct aac_comm_space, sync_fib), 2335 fibsize, DDI_DMA_SYNC_FORDEV); 2336 2337 /* Give the FIB to the controller, wait for a response. */ 2338 rval = aac_sync_mbcommand(softs, AAC_MONKER_SYNCFIB, 2339 slotp->fib_phyaddr, 0, 0, 0, &status); 2340 if (rval == AACERR) { 2341 AACDB_PRINT(softs, CE_WARN, 2342 "Send sync fib to controller failed"); 2343 return (AACERR); 2344 } 2345 2346 (void) ddi_dma_sync(dma, offsetof(struct aac_comm_space, sync_fib), 2347 AAC_FIB_SIZE, DDI_DMA_SYNC_FORCPU); 2348 2349 if ((aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) || 2350 (aac_check_dma_handle(dma) != DDI_SUCCESS)) { 2351 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 2352 return (AACERR); 2353 } 2354 2355 return (AACOK); 2356 } 2357 2358 static void 2359 aac_cmd_initq(struct aac_cmd_queue *q) 2360 { 2361 q->q_head = NULL; 2362 q->q_tail = (struct aac_cmd *)&q->q_head; 2363 } 2364 2365 /* 2366 * Remove a cmd from the head of q 2367 */ 2368 static struct aac_cmd * 2369 aac_cmd_dequeue(struct aac_cmd_queue *q) 2370 { 2371 struct aac_cmd *acp; 2372 2373 _NOTE(ASSUMING_PROTECTED(*q)) 2374 2375 if ((acp = q->q_head) != NULL) { 2376 if ((q->q_head = acp->next) != NULL) 2377 acp->next = NULL; 2378 else 2379 q->q_tail = (struct aac_cmd *)&q->q_head; 2380 acp->prev = NULL; 2381 } 2382 return (acp); 2383 } 2384 2385 /* 2386 * Add a cmd to the tail of q 2387 */ 2388 static void 2389 aac_cmd_enqueue(struct aac_cmd_queue *q, struct aac_cmd *acp) 2390 { 2391 ASSERT(acp->next == NULL); 2392 acp->prev = q->q_tail; 2393 q->q_tail->next = acp; 2394 q->q_tail = acp; 2395 } 2396 2397 /* 2398 * Remove the cmd ac from q 2399 */ 2400 static void 2401 aac_cmd_delete(struct aac_cmd_queue *q, struct aac_cmd *acp) 2402 { 2403 if (acp->prev) { 2404 if ((acp->prev->next = acp->next) != NULL) { 2405 acp->next->prev = acp->prev; 2406 acp->next = NULL; 2407 } else { 2408 q->q_tail = acp->prev; 2409 } 2410 acp->prev = NULL; 2411 } 2412 /* ac is not in the queue */ 2413 } 2414 2415 /* 2416 * Atomically insert an entry into the nominated queue, returns 0 on success or 2417 * AACERR if the queue is full. 2418 * 2419 * Note: it would be more efficient to defer notifying the controller in 2420 * the case where we may be inserting several entries in rapid succession, 2421 * but implementing this usefully may be difficult (it would involve a 2422 * separate queue/notify interface). 2423 */ 2424 static int 2425 aac_fib_enqueue(struct aac_softstate *softs, int queue, uint32_t fib_addr, 2426 uint32_t fib_size) 2427 { 2428 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 2429 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 2430 uint32_t pi, ci; 2431 2432 DBCALLED(softs, 2); 2433 2434 ASSERT(queue == AAC_ADAP_NORM_CMD_Q || queue == AAC_ADAP_NORM_RESP_Q); 2435 2436 /* Get the producer/consumer indices */ 2437 (void) ddi_dma_sync(dma, (uint8_t *)softs->qtablep->qt_qindex[queue] - \ 2438 (uint8_t *)softs->comm_space, sizeof (uint32_t) * 2, 2439 DDI_DMA_SYNC_FORCPU); 2440 if (aac_check_dma_handle(dma) != DDI_SUCCESS) { 2441 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 2442 return (AACERR); 2443 } 2444 2445 pi = ddi_get32(acc, 2446 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]); 2447 ci = ddi_get32(acc, 2448 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]); 2449 2450 /* 2451 * Wrap the queue first before we check the queue to see 2452 * if it is full 2453 */ 2454 if (pi >= aac_qinfo[queue].size) 2455 pi = 0; 2456 2457 /* XXX queue full */ 2458 if ((pi + 1) == ci) 2459 return (AACERR); 2460 2461 /* Fill in queue entry */ 2462 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_size), fib_size); 2463 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_addr), fib_addr); 2464 (void) ddi_dma_sync(dma, (uint8_t *)(softs->qentries[queue] + pi) - \ 2465 (uint8_t *)softs->comm_space, sizeof (struct aac_queue_entry), 2466 DDI_DMA_SYNC_FORDEV); 2467 2468 /* Update producer index */ 2469 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX], 2470 pi + 1); 2471 (void) ddi_dma_sync(dma, 2472 (uint8_t *)&softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX] - \ 2473 (uint8_t *)softs->comm_space, sizeof (uint32_t), 2474 DDI_DMA_SYNC_FORDEV); 2475 2476 if (aac_qinfo[queue].notify != 0) 2477 AAC_NOTIFY(softs, aac_qinfo[queue].notify); 2478 return (AACOK); 2479 } 2480 2481 /* 2482 * Atomically remove one entry from the nominated queue, returns 0 on 2483 * success or AACERR if the queue is empty. 2484 */ 2485 static int 2486 aac_fib_dequeue(struct aac_softstate *softs, int queue, int *idxp) 2487 { 2488 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 2489 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 2490 uint32_t pi, ci; 2491 int unfull = 0; 2492 2493 DBCALLED(softs, 2); 2494 2495 ASSERT(idxp); 2496 2497 /* Get the producer/consumer indices */ 2498 (void) ddi_dma_sync(dma, (uint8_t *)softs->qtablep->qt_qindex[queue] - \ 2499 (uint8_t *)softs->comm_space, sizeof (uint32_t) * 2, 2500 DDI_DMA_SYNC_FORCPU); 2501 pi = ddi_get32(acc, 2502 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]); 2503 ci = ddi_get32(acc, 2504 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]); 2505 2506 /* Check for queue empty */ 2507 if (ci == pi) 2508 return (AACERR); 2509 2510 if (pi >= aac_qinfo[queue].size) 2511 pi = 0; 2512 2513 /* Check for queue full */ 2514 if (ci == pi + 1) 2515 unfull = 1; 2516 2517 /* 2518 * The controller does not wrap the queue, 2519 * so we have to do it by ourselves 2520 */ 2521 if (ci >= aac_qinfo[queue].size) 2522 ci = 0; 2523 2524 /* Fetch the entry */ 2525 (void) ddi_dma_sync(dma, (uint8_t *)(softs->qentries[queue] + pi) - \ 2526 (uint8_t *)softs->comm_space, sizeof (struct aac_queue_entry), 2527 DDI_DMA_SYNC_FORCPU); 2528 if (aac_check_dma_handle(dma) != DDI_SUCCESS) { 2529 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 2530 return (AACERR); 2531 } 2532 2533 switch (queue) { 2534 case AAC_HOST_NORM_RESP_Q: 2535 case AAC_HOST_HIGH_RESP_Q: 2536 *idxp = ddi_get32(acc, 2537 &(softs->qentries[queue] + ci)->aq_fib_addr); 2538 break; 2539 2540 case AAC_HOST_NORM_CMD_Q: 2541 case AAC_HOST_HIGH_CMD_Q: 2542 *idxp = ddi_get32(acc, 2543 &(softs->qentries[queue] + ci)->aq_fib_addr) / AAC_FIB_SIZE; 2544 break; 2545 2546 default: 2547 cmn_err(CE_NOTE, "!Invalid queue in aac_fib_dequeue()"); 2548 return (AACERR); 2549 } 2550 2551 /* Update consumer index */ 2552 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX], 2553 ci + 1); 2554 (void) ddi_dma_sync(dma, 2555 (uint8_t *)&softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX] - \ 2556 (uint8_t *)softs->comm_space, sizeof (uint32_t), 2557 DDI_DMA_SYNC_FORDEV); 2558 2559 if (unfull && aac_qinfo[queue].notify != 0) 2560 AAC_NOTIFY(softs, aac_qinfo[queue].notify); 2561 return (AACOK); 2562 } 2563 2564 /* 2565 * Request information of the container cid 2566 */ 2567 static struct aac_mntinforesp * 2568 aac_get_container_info(struct aac_softstate *softs, int cid) 2569 { 2570 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 2571 struct aac_fib *fibp = softs->sync_slot.fibp; 2572 struct aac_mntinfo *mi = (struct aac_mntinfo *)&fibp->data[0]; 2573 struct aac_mntinforesp *mir; 2574 2575 ddi_put32(acc, &mi->Command, /* Use 64-bit LBA if enabled */ 2576 (softs->flags & AAC_FLAGS_LBA_64BIT) ? 2577 VM_NameServe64 : VM_NameServe); 2578 ddi_put32(acc, &mi->MntType, FT_FILESYS); 2579 ddi_put32(acc, &mi->MntCount, cid); 2580 2581 if (aac_sync_fib(softs, ContainerCommand, 2582 AAC_FIB_SIZEOF(struct aac_mntinfo)) == AACERR) { 2583 AACDB_PRINT(softs, CE_WARN, "Error probe container %d", cid); 2584 return (NULL); 2585 } 2586 2587 mir = (struct aac_mntinforesp *)&fibp->data[0]; 2588 if (ddi_get32(acc, &mir->Status) == ST_OK) 2589 return (mir); 2590 return (NULL); 2591 } 2592 2593 static int 2594 aac_get_container_count(struct aac_softstate *softs, int *count) 2595 { 2596 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 2597 struct aac_mntinforesp *mir; 2598 2599 if ((mir = aac_get_container_info(softs, 0)) == NULL) 2600 return (AACERR); 2601 *count = ddi_get32(acc, &mir->MntRespCount); 2602 if (*count > AAC_MAX_LD) { 2603 AACDB_PRINT(softs, CE_CONT, 2604 "container count(%d) > AAC_MAX_LD", *count); 2605 return (AACERR); 2606 } 2607 return (AACOK); 2608 } 2609 2610 static int 2611 aac_get_container_uid(struct aac_softstate *softs, uint32_t cid, uint32_t *uid) 2612 { 2613 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 2614 struct aac_Container *ct = (struct aac_Container *) \ 2615 &softs->sync_slot.fibp->data[0]; 2616 2617 bzero(ct, sizeof (*ct) - CT_PACKET_SIZE); 2618 ddi_put32(acc, &ct->Command, VM_ContainerConfig); 2619 ddi_put32(acc, &ct->CTCommand.command, CT_CID_TO_32BITS_UID); 2620 ddi_put32(acc, &ct->CTCommand.param[0], cid); 2621 2622 if (aac_sync_fib(softs, ContainerCommand, 2623 AAC_FIB_SIZEOF(struct aac_Container)) == AACERR) 2624 return (AACERR); 2625 if (ddi_get32(acc, &ct->CTCommand.param[0]) != CT_OK) 2626 return (AACERR); 2627 2628 *uid = ddi_get32(acc, &ct->CTCommand.param[1]); 2629 return (AACOK); 2630 } 2631 2632 static int 2633 aac_probe_container(struct aac_softstate *softs, uint32_t cid) 2634 { 2635 struct aac_container *dvp = &softs->containers[cid]; 2636 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 2637 struct aac_mntinforesp *mir; 2638 uint64_t size; 2639 uint32_t uid; 2640 2641 /* Get container basic info */ 2642 if ((mir = aac_get_container_info(softs, cid)) == NULL) 2643 return (AACERR); 2644 2645 if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) { 2646 if (dvp->valid) { 2647 AACDB_PRINT(softs, CE_NOTE, 2648 ">>> Container %d deleted", cid); 2649 dvp->valid = 0; 2650 } 2651 } else { 2652 size = AAC_MIR_SIZE(softs, acc, mir); 2653 AACDB_PRINT(softs, CE_NOTE, "Container #%d found: " \ 2654 "size=0x%x.%08x, type=%d, name=%s", 2655 cid, 2656 ddi_get32(acc, &mir->MntObj.CapacityHigh), 2657 ddi_get32(acc, &mir->MntObj.Capacity), 2658 ddi_get32(acc, &mir->MntObj.VolType), 2659 mir->MntObj.FileSystemName); 2660 2661 /* Get container UID */ 2662 if (aac_get_container_uid(softs, cid, &uid) == AACERR) { 2663 AACDB_PRINT(softs, CE_CONT, 2664 "query container %d uid failed", cid); 2665 return (AACERR); 2666 } 2667 AACDB_PRINT(softs, CE_CONT, "uid=0x%08x", uid); 2668 2669 if (dvp->valid) { 2670 if (dvp->uid != uid) { 2671 AACDB_PRINT(softs, CE_WARN, 2672 ">>> Container %u uid changed to %d", 2673 cid, uid); 2674 dvp->uid = uid; 2675 } 2676 if (dvp->size != size) { 2677 AACDB_PRINT(softs, CE_NOTE, 2678 ">>> Container %u size changed to %"PRIu64, 2679 cid, size); 2680 dvp->size = size; 2681 } 2682 } else { /* Init new container */ 2683 AACDB_PRINT(softs, CE_NOTE, 2684 ">>> Container %d added", cid); 2685 dvp->valid = 1; 2686 2687 dvp->cid = cid; 2688 dvp->uid = uid; 2689 dvp->size = size; 2690 dvp->locked = 0; 2691 dvp->deleted = 0; 2692 } 2693 } 2694 return (AACOK); 2695 } 2696 2697 /* 2698 * Do a rescan of all the possible containers and update the container list 2699 * with newly online/offline containers. 2700 */ 2701 static int 2702 aac_probe_containers(struct aac_softstate *softs) 2703 { 2704 int i, count, total; 2705 2706 /* Loop over possible containers */ 2707 count = softs->container_count; 2708 if (aac_get_container_count(softs, &count) == AACERR) 2709 return (AACERR); 2710 for (i = total = 0; i < count; i++) { 2711 if (aac_probe_container(softs, i) == AACOK) 2712 total++; 2713 } 2714 if (count < softs->container_count) { 2715 struct aac_container *dvp; 2716 2717 for (dvp = &softs->containers[count]; 2718 dvp < &softs->containers[softs->container_count]; dvp++) { 2719 if (dvp->valid == 0) 2720 continue; 2721 AACDB_PRINT(softs, CE_NOTE, ">>> Container %d deleted", 2722 dvp->cid); 2723 dvp->valid = 0; 2724 } 2725 } 2726 softs->container_count = count; 2727 AACDB_PRINT(softs, CE_CONT, "?Total %d container(s) found", total); 2728 return (AACOK); 2729 } 2730 2731 static int 2732 aac_alloc_comm_space(struct aac_softstate *softs) 2733 { 2734 size_t rlen; 2735 ddi_dma_cookie_t cookie; 2736 uint_t cookien; 2737 2738 /* Allocate DMA for comm. space */ 2739 if (ddi_dma_alloc_handle( 2740 softs->devinfo_p, 2741 &softs->addr_dma_attr, 2742 DDI_DMA_SLEEP, 2743 NULL, 2744 &softs->comm_space_dma_handle) != DDI_SUCCESS) { 2745 AACDB_PRINT(softs, CE_WARN, 2746 "Cannot alloc dma handle for communication area"); 2747 goto error; 2748 } 2749 if (ddi_dma_mem_alloc( 2750 softs->comm_space_dma_handle, 2751 sizeof (struct aac_comm_space), 2752 &aac_acc_attr, 2753 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2754 DDI_DMA_SLEEP, 2755 NULL, 2756 (caddr_t *)&softs->comm_space, 2757 &rlen, 2758 &softs->comm_space_acc_handle) != DDI_SUCCESS) { 2759 AACDB_PRINT(softs, CE_WARN, 2760 "Cannot alloc mem for communication area"); 2761 goto error; 2762 } 2763 if (ddi_dma_addr_bind_handle( 2764 softs->comm_space_dma_handle, 2765 NULL, 2766 (caddr_t)softs->comm_space, 2767 sizeof (struct aac_comm_space), 2768 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2769 DDI_DMA_SLEEP, 2770 NULL, 2771 &cookie, 2772 &cookien) != DDI_DMA_MAPPED) { 2773 AACDB_PRINT(softs, CE_WARN, 2774 "DMA bind failed for communication area"); 2775 goto error; 2776 } 2777 softs->comm_space_phyaddr = cookie.dmac_address; 2778 2779 /* Setup sync FIB space */ 2780 softs->sync_slot.fibp = &softs->comm_space->sync_fib; 2781 softs->sync_slot.fib_phyaddr = softs->comm_space_phyaddr + \ 2782 offsetof(struct aac_comm_space, sync_fib); 2783 softs->sync_slot.fib_acc_handle = softs->comm_space_acc_handle; 2784 softs->sync_slot.fib_dma_handle = softs->comm_space_dma_handle; 2785 2786 return (AACOK); 2787 error: 2788 if (softs->comm_space_acc_handle) { 2789 ddi_dma_mem_free(&softs->comm_space_acc_handle); 2790 softs->comm_space_acc_handle = NULL; 2791 } 2792 if (softs->comm_space_dma_handle) { 2793 ddi_dma_free_handle(&softs->comm_space_dma_handle); 2794 softs->comm_space_dma_handle = NULL; 2795 } 2796 return (AACERR); 2797 } 2798 2799 static void 2800 aac_free_comm_space(struct aac_softstate *softs) 2801 { 2802 softs->sync_slot.fibp = NULL; 2803 softs->sync_slot.fib_phyaddr = NULL; 2804 softs->sync_slot.fib_acc_handle = NULL; 2805 softs->sync_slot.fib_dma_handle = NULL; 2806 2807 (void) ddi_dma_unbind_handle(softs->comm_space_dma_handle); 2808 ddi_dma_mem_free(&softs->comm_space_acc_handle); 2809 softs->comm_space_acc_handle = NULL; 2810 ddi_dma_free_handle(&softs->comm_space_dma_handle); 2811 softs->comm_space_dma_handle = NULL; 2812 softs->comm_space_phyaddr = NULL; 2813 } 2814 2815 /* 2816 * Initialize the data structures that are required for the communication 2817 * interface to operate 2818 */ 2819 static int 2820 aac_setup_comm_space(struct aac_softstate *softs) 2821 { 2822 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 2823 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 2824 uint32_t comm_space_phyaddr; 2825 struct aac_adapter_init *initp; 2826 int qoffset; 2827 2828 comm_space_phyaddr = softs->comm_space_phyaddr; 2829 2830 /* Setup adapter init struct */ 2831 initp = &softs->comm_space->init_data; 2832 bzero(initp, sizeof (struct aac_adapter_init)); 2833 2834 ddi_put32(acc, &initp->InitStructRevision, AAC_INIT_STRUCT_REVISION); 2835 ddi_put32(acc, &initp->HostElapsedSeconds, ddi_get_time()); 2836 2837 /* Setup new/old comm. specific data */ 2838 if (softs->flags & AAC_FLAGS_RAW_IO) { 2839 ddi_put32(acc, &initp->InitStructRevision, 2840 AAC_INIT_STRUCT_REVISION_4); 2841 ddi_put32(acc, &initp->InitFlags, 2842 (softs->flags & AAC_FLAGS_NEW_COMM) ? 2843 AAC_INIT_FLAGS_NEW_COMM_SUPPORTED : 0); 2844 /* Setup the preferred settings */ 2845 ddi_put32(acc, &initp->MaxIoCommands, softs->aac_max_fibs); 2846 ddi_put32(acc, &initp->MaxIoSize, 2847 (softs->aac_max_sectors << 9)); 2848 ddi_put32(acc, &initp->MaxFibSize, softs->aac_max_fib_size); 2849 } else { 2850 /* 2851 * Tells the adapter about the physical location of various 2852 * important shared data structures 2853 */ 2854 ddi_put32(acc, &initp->AdapterFibsPhysicalAddress, 2855 comm_space_phyaddr + \ 2856 offsetof(struct aac_comm_space, adapter_fibs)); 2857 ddi_put32(acc, &initp->AdapterFibsVirtualAddress, 0); 2858 ddi_put32(acc, &initp->AdapterFibAlign, AAC_FIB_SIZE); 2859 ddi_put32(acc, &initp->AdapterFibsSize, 2860 AAC_ADAPTER_FIBS * AAC_FIB_SIZE); 2861 ddi_put32(acc, &initp->PrintfBufferAddress, 2862 comm_space_phyaddr + \ 2863 offsetof(struct aac_comm_space, adapter_print_buf)); 2864 ddi_put32(acc, &initp->PrintfBufferSize, 2865 AAC_ADAPTER_PRINT_BUFSIZE); 2866 ddi_put32(acc, &initp->MiniPortRevision, 2867 AAC_INIT_STRUCT_MINIPORT_REVISION); 2868 ddi_put32(acc, &initp->HostPhysMemPages, AAC_MAX_PFN); 2869 2870 qoffset = (comm_space_phyaddr + \ 2871 offsetof(struct aac_comm_space, qtable)) % \ 2872 AAC_QUEUE_ALIGN; 2873 if (qoffset) 2874 qoffset = AAC_QUEUE_ALIGN - qoffset; 2875 softs->qtablep = (struct aac_queue_table *) \ 2876 ((char *)&softs->comm_space->qtable + qoffset); 2877 ddi_put32(acc, &initp->CommHeaderAddress, comm_space_phyaddr + \ 2878 offsetof(struct aac_comm_space, qtable) + qoffset); 2879 2880 /* Init queue table */ 2881 ddi_put32(acc, &softs->qtablep-> \ 2882 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_PRODUCER_INDEX], 2883 AAC_HOST_NORM_CMD_ENTRIES); 2884 ddi_put32(acc, &softs->qtablep-> \ 2885 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_CONSUMER_INDEX], 2886 AAC_HOST_NORM_CMD_ENTRIES); 2887 ddi_put32(acc, &softs->qtablep-> \ 2888 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_PRODUCER_INDEX], 2889 AAC_HOST_HIGH_CMD_ENTRIES); 2890 ddi_put32(acc, &softs->qtablep-> \ 2891 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_CONSUMER_INDEX], 2892 AAC_HOST_HIGH_CMD_ENTRIES); 2893 ddi_put32(acc, &softs->qtablep-> \ 2894 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_PRODUCER_INDEX], 2895 AAC_ADAP_NORM_CMD_ENTRIES); 2896 ddi_put32(acc, &softs->qtablep-> \ 2897 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_CONSUMER_INDEX], 2898 AAC_ADAP_NORM_CMD_ENTRIES); 2899 ddi_put32(acc, &softs->qtablep-> \ 2900 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_PRODUCER_INDEX], 2901 AAC_ADAP_HIGH_CMD_ENTRIES); 2902 ddi_put32(acc, &softs->qtablep-> \ 2903 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_CONSUMER_INDEX], 2904 AAC_ADAP_HIGH_CMD_ENTRIES); 2905 ddi_put32(acc, &softs->qtablep-> \ 2906 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_PRODUCER_INDEX], 2907 AAC_HOST_NORM_RESP_ENTRIES); 2908 ddi_put32(acc, &softs->qtablep-> \ 2909 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_CONSUMER_INDEX], 2910 AAC_HOST_NORM_RESP_ENTRIES); 2911 ddi_put32(acc, &softs->qtablep-> \ 2912 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_PRODUCER_INDEX], 2913 AAC_HOST_HIGH_RESP_ENTRIES); 2914 ddi_put32(acc, &softs->qtablep-> \ 2915 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_CONSUMER_INDEX], 2916 AAC_HOST_HIGH_RESP_ENTRIES); 2917 ddi_put32(acc, &softs->qtablep-> \ 2918 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_PRODUCER_INDEX], 2919 AAC_ADAP_NORM_RESP_ENTRIES); 2920 ddi_put32(acc, &softs->qtablep-> \ 2921 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_CONSUMER_INDEX], 2922 AAC_ADAP_NORM_RESP_ENTRIES); 2923 ddi_put32(acc, &softs->qtablep-> \ 2924 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_PRODUCER_INDEX], 2925 AAC_ADAP_HIGH_RESP_ENTRIES); 2926 ddi_put32(acc, &softs->qtablep-> \ 2927 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_CONSUMER_INDEX], 2928 AAC_ADAP_HIGH_RESP_ENTRIES); 2929 2930 /* Init queue entries */ 2931 softs->qentries[AAC_HOST_NORM_CMD_Q] = 2932 &softs->qtablep->qt_HostNormCmdQueue[0]; 2933 softs->qentries[AAC_HOST_HIGH_CMD_Q] = 2934 &softs->qtablep->qt_HostHighCmdQueue[0]; 2935 softs->qentries[AAC_ADAP_NORM_CMD_Q] = 2936 &softs->qtablep->qt_AdapNormCmdQueue[0]; 2937 softs->qentries[AAC_ADAP_HIGH_CMD_Q] = 2938 &softs->qtablep->qt_AdapHighCmdQueue[0]; 2939 softs->qentries[AAC_HOST_NORM_RESP_Q] = 2940 &softs->qtablep->qt_HostNormRespQueue[0]; 2941 softs->qentries[AAC_HOST_HIGH_RESP_Q] = 2942 &softs->qtablep->qt_HostHighRespQueue[0]; 2943 softs->qentries[AAC_ADAP_NORM_RESP_Q] = 2944 &softs->qtablep->qt_AdapNormRespQueue[0]; 2945 softs->qentries[AAC_ADAP_HIGH_RESP_Q] = 2946 &softs->qtablep->qt_AdapHighRespQueue[0]; 2947 } 2948 (void) ddi_dma_sync(dma, 0, 0, DDI_DMA_SYNC_FORDEV); 2949 2950 /* Send init structure to the card */ 2951 if (aac_sync_mbcommand(softs, AAC_MONKER_INITSTRUCT, 2952 comm_space_phyaddr + \ 2953 offsetof(struct aac_comm_space, init_data), 2954 0, 0, 0, NULL) == AACERR) { 2955 AACDB_PRINT(softs, CE_WARN, 2956 "Cannot send init structure to adapter"); 2957 return (AACERR); 2958 } 2959 2960 return (AACOK); 2961 } 2962 2963 static uchar_t * 2964 aac_vendor_id(struct aac_softstate *softs, uchar_t *buf) 2965 { 2966 (void) memset(buf, ' ', AAC_VENDOR_LEN); 2967 bcopy(softs->vendor_name, buf, strlen(softs->vendor_name)); 2968 return (buf + AAC_VENDOR_LEN); 2969 } 2970 2971 static uchar_t * 2972 aac_product_id(struct aac_softstate *softs, uchar_t *buf) 2973 { 2974 (void) memset(buf, ' ', AAC_PRODUCT_LEN); 2975 bcopy(softs->product_name, buf, strlen(softs->product_name)); 2976 return (buf + AAC_PRODUCT_LEN); 2977 } 2978 2979 /* 2980 * Construct unit serial number from container uid 2981 */ 2982 static uchar_t * 2983 aac_lun_serialno(struct aac_softstate *softs, int tgt, uchar_t *buf) 2984 { 2985 int i, d; 2986 uint32_t uid = softs->containers[tgt].uid; 2987 2988 for (i = 7; i >= 0; i--) { 2989 d = uid & 0xf; 2990 buf[i] = d > 9 ? 'A' + (d - 0xa) : '0' + d; 2991 uid >>= 4; 2992 } 2993 return (buf + 8); 2994 } 2995 2996 /* 2997 * SPC-3 7.5 INQUIRY command implementation 2998 */ 2999 static void 3000 aac_inquiry(struct aac_softstate *softs, struct scsi_pkt *pkt, 3001 union scsi_cdb *cdbp, struct buf *bp) 3002 { 3003 int tgt = pkt->pkt_address.a_target; 3004 char *b_addr = NULL; 3005 uchar_t page = cdbp->cdb_opaque[2]; 3006 3007 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_CMDDT) { 3008 /* Command Support Data is not supported */ 3009 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x00, 0); 3010 return; 3011 } 3012 3013 if (bp && bp->b_un.b_addr && bp->b_bcount) { 3014 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 3015 bp_mapin(bp); 3016 b_addr = bp->b_un.b_addr; 3017 } 3018 3019 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_EVPD) { 3020 uchar_t *vpdp = (uchar_t *)b_addr; 3021 uchar_t *idp, *sp; 3022 3023 /* SPC-3 8.4 Vital product data parameters */ 3024 switch (page) { 3025 case 0x00: 3026 /* Supported VPD pages */ 3027 if (vpdp == NULL) 3028 return; 3029 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3030 vpdp[AAC_VPD_PAGE_CODE] = 0x00; 3031 vpdp[AAC_VPD_PAGE_LENGTH] = 3; 3032 3033 vpdp[AAC_VPD_PAGE_DATA] = 0x00; 3034 vpdp[AAC_VPD_PAGE_DATA + 1] = 0x80; 3035 vpdp[AAC_VPD_PAGE_DATA + 2] = 0x83; 3036 3037 pkt->pkt_state |= STATE_XFERRED_DATA; 3038 break; 3039 3040 case 0x80: 3041 /* Unit serial number page */ 3042 if (vpdp == NULL) 3043 return; 3044 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3045 vpdp[AAC_VPD_PAGE_CODE] = 0x80; 3046 vpdp[AAC_VPD_PAGE_LENGTH] = 8; 3047 3048 sp = &vpdp[AAC_VPD_PAGE_DATA]; 3049 (void) aac_lun_serialno(softs, tgt, sp); 3050 3051 pkt->pkt_state |= STATE_XFERRED_DATA; 3052 break; 3053 3054 case 0x83: 3055 /* Device identification page */ 3056 if (vpdp == NULL) 3057 return; 3058 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3059 vpdp[AAC_VPD_PAGE_CODE] = 0x83; 3060 3061 idp = &vpdp[AAC_VPD_PAGE_DATA]; 3062 bzero(idp, AAC_VPD_ID_LENGTH); 3063 idp[AAC_VPD_ID_CODESET] = 0x02; 3064 idp[AAC_VPD_ID_TYPE] = 0x01; 3065 3066 /* 3067 * SPC-3 Table 111 - Identifier type 3068 * One recommanded method of constructing the remainder 3069 * of identifier field is to concatenate the product 3070 * identification field from the standard INQUIRY data 3071 * field and the product serial number field from the 3072 * unit serial number page. 3073 */ 3074 sp = &idp[AAC_VPD_ID_DATA]; 3075 sp = aac_vendor_id(softs, sp); 3076 sp = aac_product_id(softs, sp); 3077 sp = aac_lun_serialno(softs, tgt, sp); 3078 idp[AAC_VPD_ID_LENGTH] = sp - &idp[AAC_VPD_ID_DATA]; 3079 3080 vpdp[AAC_VPD_PAGE_LENGTH] = 3081 sp - &vpdp[AAC_VPD_PAGE_DATA]; 3082 pkt->pkt_state |= STATE_XFERRED_DATA; 3083 break; 3084 3085 default: 3086 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 3087 0x24, 0x00, 0); 3088 break; 3089 } 3090 } else { 3091 struct scsi_inquiry *inqp = (struct scsi_inquiry *)b_addr; 3092 size_t len = sizeof (struct scsi_inquiry); 3093 3094 if (page != 0) { 3095 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 3096 0x24, 0x00, 0); 3097 return; 3098 } 3099 if (inqp == NULL) 3100 return; 3101 3102 bzero(inqp, len); 3103 inqp->inq_len = AAC_ADDITIONAL_LEN; 3104 inqp->inq_ansi = AAC_ANSI_VER; 3105 inqp->inq_rdf = AAC_RESP_DATA_FORMAT; 3106 (void) aac_vendor_id(softs, (uchar_t *)inqp->inq_vid); 3107 (void) aac_product_id(softs, (uchar_t *)inqp->inq_pid); 3108 bcopy("V1.0", inqp->inq_revision, 4); 3109 inqp->inq_cmdque = 1; /* enable tagged-queuing */ 3110 /* 3111 * For "sd-max-xfer-size" property which may impact performance 3112 * when IO threads increase. 3113 */ 3114 inqp->inq_wbus32 = 1; 3115 3116 pkt->pkt_state |= STATE_XFERRED_DATA; 3117 } 3118 } 3119 3120 /* 3121 * SPC-3 7.10 MODE SENSE command implementation 3122 */ 3123 static void 3124 aac_mode_sense(struct aac_softstate *softs, struct scsi_pkt *pkt, 3125 union scsi_cdb *cdbp, struct buf *bp, int capacity) 3126 { 3127 uchar_t pagecode; 3128 struct mode_format *page3p; 3129 struct mode_geometry *page4p; 3130 struct mode_header *headerp; 3131 unsigned int ncyl; 3132 3133 if (!(bp && bp->b_un.b_addr && bp->b_bcount)) 3134 return; 3135 3136 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 3137 bp_mapin(bp); 3138 pkt->pkt_state |= STATE_XFERRED_DATA; 3139 pagecode = cdbp->cdb_un.sg.scsi[0]; 3140 headerp = (struct mode_header *)(bp->b_un.b_addr); 3141 headerp->bdesc_length = MODE_BLK_DESC_LENGTH; 3142 3143 switch (pagecode) { 3144 /* SBC-3 7.1.3.3 Format device page */ 3145 case SD_MODE_SENSE_PAGE3_CODE: 3146 page3p = (struct mode_format *)((caddr_t)headerp + 3147 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH); 3148 page3p->mode_page.code = SD_MODE_SENSE_PAGE3_CODE; 3149 page3p->mode_page.length = sizeof (struct mode_format); 3150 page3p->data_bytes_sect = BE_16(AAC_SECTOR_SIZE); 3151 page3p->sect_track = BE_16(AAC_SECTORS_PER_TRACK); 3152 break; 3153 3154 /* SBC-3 7.1.3.8 Rigid disk device geometry page */ 3155 case SD_MODE_SENSE_PAGE4_CODE: 3156 page4p = (struct mode_geometry *)((caddr_t)headerp + 3157 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH); 3158 page4p->mode_page.code = SD_MODE_SENSE_PAGE4_CODE; 3159 page4p->mode_page.length = sizeof (struct mode_geometry); 3160 page4p->heads = AAC_NUMBER_OF_HEADS; 3161 page4p->rpm = BE_16(AAC_ROTATION_SPEED); 3162 ncyl = capacity / (AAC_NUMBER_OF_HEADS * AAC_SECTORS_PER_TRACK); 3163 page4p->cyl_lb = ncyl & 0xff; 3164 page4p->cyl_mb = (ncyl >> 8) & 0xff; 3165 page4p->cyl_ub = (ncyl >> 16) & 0xff; 3166 break; 3167 3168 case MODEPAGE_CTRL_MODE: /* 64-bit LBA need large sense data */ 3169 if (softs->flags & AAC_FLAGS_LBA_64BIT) { 3170 struct mode_control_scsi3 *mctl; 3171 3172 mctl = (struct mode_control_scsi3 *)((caddr_t)headerp + 3173 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH); 3174 mctl->mode_page.code = MODEPAGE_CTRL_MODE; 3175 mctl->mode_page.length = 3176 sizeof (struct mode_control_scsi3) - 3177 sizeof (struct mode_page); 3178 mctl->d_sense = 1; 3179 } else { 3180 bzero(bp->b_un.b_addr, bp->b_bcount); 3181 } 3182 break; 3183 3184 default: 3185 bzero(bp->b_un.b_addr, bp->b_bcount); 3186 break; 3187 } 3188 } 3189 3190 /*ARGSUSED*/ 3191 static int 3192 aac_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 3193 scsi_hba_tran_t *tran, struct scsi_device *sd) 3194 { 3195 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran); 3196 #if defined(DEBUG) || defined(__lock_lint) 3197 int ctl = ddi_get_instance(softs->devinfo_p); 3198 #endif 3199 int tgt = sd->sd_address.a_target; 3200 int lun = sd->sd_address.a_lun; 3201 struct aac_container *dvp; 3202 3203 DBCALLED(softs, 2); 3204 3205 if ((0 > tgt) || (tgt >= AAC_MAX_LD)) { 3206 AACDB_PRINT(softs, CE_NOTE, 3207 "aac_tran_tgt_init: c%t%dL%d out", ctl, tgt, lun); 3208 return (DDI_FAILURE); 3209 } 3210 3211 /* 3212 * Only support container that has been detected and valid 3213 */ 3214 mutex_enter(&softs->io_lock); 3215 dvp = &softs->containers[tgt]; 3216 if (dvp->valid && lun == 0) { 3217 AACDB_PRINT_TRAN(softs, "aac_tran_tgt_init: c%t%dL%d ok", 3218 ctl, tgt, lun); 3219 mutex_exit(&softs->io_lock); 3220 return (DDI_SUCCESS); 3221 } else { 3222 AACDB_PRINT_TRAN(softs, "aac_tran_tgt_init: c%t%dL%d", 3223 ctl, tgt, lun); 3224 mutex_exit(&softs->io_lock); 3225 return (DDI_FAILURE); 3226 } 3227 } 3228 3229 /* 3230 * Check if the firmware is Up And Running. If it is in the Kernel Panic 3231 * state, (BlinkLED code + 1) is returned. 3232 * 0 -- firmware up and running 3233 * -1 -- firmware dead 3234 * >0 -- firmware kernel panic 3235 */ 3236 static int 3237 aac_check_adapter_health(struct aac_softstate *softs) 3238 { 3239 int rval; 3240 3241 rval = PCI_MEM_GET32(softs, AAC_OMR0); 3242 3243 if (rval & AAC_KERNEL_UP_AND_RUNNING) { 3244 rval = 0; 3245 } else if (rval & AAC_KERNEL_PANIC) { 3246 cmn_err(CE_WARN, "firmware panic"); 3247 rval = ((rval >> 16) & 0xff) + 1; /* avoid 0 as return value */ 3248 } else { 3249 cmn_err(CE_WARN, "firmware dead"); 3250 rval = -1; 3251 } 3252 return (rval); 3253 } 3254 3255 static void 3256 aac_abort_iocmd(struct aac_softstate *softs, struct aac_cmd *acp, 3257 uchar_t reason) 3258 { 3259 acp->flags |= AAC_CMD_ABORT; 3260 3261 if (acp->pkt) { 3262 /* 3263 * Each lun should generate a unit attention 3264 * condition when reset. 3265 * Phys. drives are treated as logical ones 3266 * during error recovery. 3267 */ 3268 if (softs->flags & AAC_STATE_RESET) 3269 aac_set_arq_data_reset(softs, acp); 3270 3271 switch (reason) { 3272 case CMD_TIMEOUT: 3273 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT, 3274 STAT_TIMEOUT | STAT_BUS_RESET); 3275 break; 3276 case CMD_RESET: 3277 /* aac support only RESET_ALL */ 3278 aac_set_pkt_reason(softs, acp, CMD_RESET, 3279 STAT_BUS_RESET); 3280 break; 3281 case CMD_ABORTED: 3282 aac_set_pkt_reason(softs, acp, CMD_ABORTED, 3283 STAT_ABORTED); 3284 break; 3285 } 3286 } 3287 aac_end_io(softs, acp); 3288 } 3289 3290 /* 3291 * Abort all the pending commands of type iocmd or just the command pkt 3292 * corresponding to pkt 3293 */ 3294 static void 3295 aac_abort_iocmds(struct aac_softstate *softs, int iocmd, struct scsi_pkt *pkt, 3296 int reason) 3297 { 3298 struct aac_cmd *ac_arg, *acp; 3299 int i; 3300 3301 if (pkt == NULL) { 3302 ac_arg = NULL; 3303 } else { 3304 ac_arg = PKT2AC(pkt); 3305 iocmd = (ac_arg->flags & AAC_CMD_SYNC) ? 3306 AAC_IOCMD_SYNC : AAC_IOCMD_ASYNC; 3307 } 3308 3309 /* 3310 * a) outstanding commands on the controller 3311 * Note: should abort outstanding commands only after one 3312 * IOP reset has been done. 3313 */ 3314 if (iocmd & AAC_IOCMD_OUTSTANDING) { 3315 struct aac_cmd *acp; 3316 3317 for (i = 0; i < AAC_MAX_LD; i++) { 3318 if (softs->containers[i].valid) 3319 softs->containers[i].reset = 1; 3320 } 3321 while ((acp = softs->q_busy.q_head) != NULL) 3322 aac_abort_iocmd(softs, acp, reason); 3323 } 3324 3325 /* b) commands in the waiting queues */ 3326 for (i = 0; i < AAC_CMDQ_NUM; i++) { 3327 if (iocmd & (1 << i)) { 3328 if (ac_arg) { 3329 aac_abort_iocmd(softs, ac_arg, reason); 3330 } else { 3331 while ((acp = softs->q_wait[i].q_head) != NULL) 3332 aac_abort_iocmd(softs, acp, reason); 3333 } 3334 } 3335 } 3336 } 3337 3338 /* 3339 * The draining thread is shared among quiesce threads. It terminates 3340 * when the adapter is quiesced or stopped by aac_stop_drain(). 3341 */ 3342 static void 3343 aac_check_drain(void *arg) 3344 { 3345 struct aac_softstate *softs = arg; 3346 3347 mutex_enter(&softs->io_lock); 3348 if (softs->ndrains) { 3349 /* 3350 * If both ASYNC and SYNC bus throttle are held, 3351 * wake up threads only when both are drained out. 3352 */ 3353 if ((softs->bus_throttle[AAC_CMDQ_ASYNC] > 0 || 3354 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) && 3355 (softs->bus_throttle[AAC_CMDQ_SYNC] > 0 || 3356 softs->bus_ncmds[AAC_CMDQ_SYNC] == 0)) 3357 cv_broadcast(&softs->drain_cv); 3358 else 3359 softs->drain_timeid = timeout(aac_check_drain, softs, 3360 AAC_QUIESCE_TICK * drv_usectohz(1000000)); 3361 } 3362 mutex_exit(&softs->io_lock); 3363 } 3364 3365 /* 3366 * If not draining the outstanding cmds, drain them. Otherwise, 3367 * only update ndrains. 3368 */ 3369 static void 3370 aac_start_drain(struct aac_softstate *softs) 3371 { 3372 if (softs->ndrains == 0) { 3373 softs->drain_timeid = timeout(aac_check_drain, softs, 3374 AAC_QUIESCE_TICK * drv_usectohz(1000000)); 3375 } 3376 softs->ndrains++; 3377 } 3378 3379 /* 3380 * Stop the draining thread when no other threads use it any longer. 3381 * Side effect: io_lock may be released in the middle. 3382 */ 3383 static void 3384 aac_stop_drain(struct aac_softstate *softs) 3385 { 3386 softs->ndrains--; 3387 if (softs->ndrains == 0) { 3388 if (softs->drain_timeid != 0) { 3389 timeout_id_t tid = softs->drain_timeid; 3390 3391 softs->drain_timeid = 0; 3392 mutex_exit(&softs->io_lock); 3393 (void) untimeout(tid); 3394 mutex_enter(&softs->io_lock); 3395 } 3396 } 3397 } 3398 3399 /* 3400 * The following function comes from Adaptec: 3401 * 3402 * Once do an IOP reset, basically the driver have to re-initialize the card 3403 * as if up from a cold boot, and the driver is responsible for any IO that 3404 * is outstanding to the adapter at the time of the IOP RESET. And prepare 3405 * for IOP RESET by making the init code modular with the ability to call it 3406 * from multiple places. 3407 */ 3408 static int 3409 aac_reset_adapter(struct aac_softstate *softs) 3410 { 3411 int health; 3412 uint32_t status; 3413 int rval = AACERR; 3414 3415 DBCALLED(softs, 1); 3416 3417 ASSERT(softs->state & AAC_STATE_RESET); 3418 3419 ddi_fm_acc_err_clear(softs->pci_mem_handle, DDI_FME_VER0); 3420 /* Disable interrupt */ 3421 AAC_DISABLE_INTR(softs); 3422 3423 health = aac_check_adapter_health(softs); 3424 if (health == -1) { 3425 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 3426 goto finish; 3427 } 3428 if (health == 0) /* flush drives if possible */ 3429 (void) aac_shutdown(softs); 3430 3431 /* Execute IOP reset */ 3432 if ((aac_sync_mbcommand(softs, AAC_IOP_RESET, 0, 0, 0, 0, 3433 &status)) != AACOK) { 3434 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3435 struct aac_fib *fibp; 3436 struct aac_pause_command *pc; 3437 3438 if ((status & 0xf) == 0xf) { 3439 uint32_t wait_count; 3440 3441 /* 3442 * Sunrise Lake has dual cores and we must drag the 3443 * other core with us to reset simultaneously. There 3444 * are 2 bits in the Inbound Reset Control and Status 3445 * Register (offset 0x38) of the Sunrise Lake to reset 3446 * the chip without clearing out the PCI configuration 3447 * info (COMMAND & BARS). 3448 */ 3449 PCI_MEM_PUT32(softs, AAC_IRCSR, AAC_IRCSR_CORES_RST); 3450 3451 /* 3452 * We need to wait for 5 seconds before accessing the MU 3453 * again 10000 * 100us = 1000,000us = 1000ms = 1s 3454 */ 3455 wait_count = 5 * 10000; 3456 while (wait_count) { 3457 drv_usecwait(100); /* delay 100 microseconds */ 3458 wait_count--; 3459 } 3460 } else { 3461 if (status == SRB_STATUS_INVALID_REQUEST) 3462 cmn_err(CE_WARN, "!IOP_RESET not supported"); 3463 else /* probably timeout */ 3464 cmn_err(CE_WARN, "!IOP_RESET failed"); 3465 3466 /* Unwind aac_shutdown() */ 3467 fibp = softs->sync_slot.fibp; 3468 pc = (struct aac_pause_command *)&fibp->data[0]; 3469 3470 bzero(pc, sizeof (*pc)); 3471 ddi_put32(acc, &pc->Command, VM_ContainerConfig); 3472 ddi_put32(acc, &pc->Type, CT_PAUSE_IO); 3473 ddi_put32(acc, &pc->Timeout, 1); 3474 ddi_put32(acc, &pc->Min, 1); 3475 ddi_put32(acc, &pc->NoRescan, 1); 3476 3477 (void) aac_sync_fib(softs, ContainerCommand, 3478 AAC_FIB_SIZEOF(struct aac_pause_command)); 3479 3480 ddi_fm_service_impact(softs->devinfo_p, 3481 DDI_SERVICE_LOST); 3482 goto finish; 3483 } 3484 } 3485 3486 /* 3487 * Re-read and renegotiate the FIB parameters, as one of the actions 3488 * that can result from an IOP reset is the running of a new firmware 3489 * image. 3490 */ 3491 if (aac_common_attach(softs) != AACOK) 3492 goto finish; 3493 3494 rval = AACOK; 3495 3496 finish: 3497 AAC_ENABLE_INTR(softs); 3498 return (rval); 3499 } 3500 3501 static void 3502 aac_set_throttle(struct aac_softstate *softs, struct aac_container *dvp, int q, 3503 int throttle) 3504 { 3505 /* 3506 * If the bus is draining/quiesced, no changes to the throttles 3507 * are allowed. All throttles should have been set to 0. 3508 */ 3509 if ((softs->state & AAC_STATE_QUIESCED) || softs->ndrains) 3510 return; 3511 dvp->throttle[q] = throttle; 3512 } 3513 3514 static void 3515 aac_hold_bus(struct aac_softstate *softs, int iocmds) 3516 { 3517 int i, q; 3518 3519 /* Hold bus by holding every device on the bus */ 3520 for (q = 0; q < AAC_CMDQ_NUM; q++) { 3521 if (iocmds & (1 << q)) { 3522 softs->bus_throttle[q] = 0; 3523 for (i = 0; i < AAC_MAX_LD; i++) 3524 aac_set_throttle(softs, &softs->containers[i], 3525 q, 0); 3526 } 3527 } 3528 } 3529 3530 static void 3531 aac_unhold_bus(struct aac_softstate *softs, int iocmds) 3532 { 3533 int i, q; 3534 3535 for (q = 0; q < AAC_CMDQ_NUM; q++) { 3536 if (iocmds & (1 << q)) { 3537 /* 3538 * Should not unhold AAC_IOCMD_ASYNC bus, if it has been 3539 * quiesced or being drained by possibly some quiesce 3540 * threads. 3541 */ 3542 if (q == AAC_CMDQ_ASYNC && ((softs->state & 3543 AAC_STATE_QUIESCED) || softs->ndrains)) 3544 continue; 3545 softs->bus_throttle[q] = softs->total_slots; 3546 for (i = 0; i < AAC_MAX_LD; i++) 3547 aac_set_throttle(softs, &softs->containers[i], 3548 q, softs->total_slots); 3549 } 3550 } 3551 } 3552 3553 static int 3554 aac_do_reset(struct aac_softstate *softs) 3555 { 3556 int health; 3557 int rval; 3558 3559 softs->state |= AAC_STATE_RESET; 3560 health = aac_check_adapter_health(softs); 3561 3562 /* 3563 * Hold off new io commands and wait all outstanding io 3564 * commands to complete. 3565 */ 3566 if (health == 0 && (softs->bus_ncmds[AAC_CMDQ_SYNC] || 3567 softs->bus_ncmds[AAC_CMDQ_ASYNC])) { 3568 /* 3569 * Give the adapter up to AAC_QUIESCE_TIMEOUT more seconds 3570 * to complete the outstanding io commands 3571 */ 3572 int timeout = AAC_QUIESCE_TIMEOUT * 1000 * 10; 3573 int (*intr_handler)(struct aac_softstate *); 3574 3575 aac_hold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 3576 /* 3577 * Poll the adapter by ourselves in case interrupt is disabled 3578 * and to avoid releasing the io_lock. 3579 */ 3580 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ? 3581 aac_process_intr_new : aac_process_intr_old; 3582 while ((softs->bus_ncmds[AAC_CMDQ_SYNC] || 3583 softs->bus_ncmds[AAC_CMDQ_ASYNC]) && timeout) { 3584 drv_usecwait(100); 3585 (void) intr_handler(softs); 3586 timeout--; 3587 } 3588 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 3589 } 3590 3591 /* 3592 * If a longer waiting time still can't drain all outstanding io 3593 * commands, do IOP reset. 3594 */ 3595 if (softs->bus_ncmds[AAC_CMDQ_SYNC] || 3596 softs->bus_ncmds[AAC_CMDQ_ASYNC]) { 3597 if ((rval = aac_reset_adapter(softs)) != AACOK) 3598 softs->state |= AAC_STATE_DEAD; 3599 } else { 3600 rval = AACOK; 3601 } 3602 3603 softs->state &= ~AAC_STATE_RESET; 3604 return (rval); 3605 } 3606 3607 static int 3608 aac_tran_reset(struct scsi_address *ap, int level) 3609 { 3610 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 3611 int rval; 3612 3613 DBCALLED(softs, 1); 3614 3615 if (level != RESET_ALL) { 3616 cmn_err(CE_NOTE, "!reset target/lun not supported"); 3617 return (0); 3618 } 3619 3620 mutex_enter(&softs->io_lock); 3621 rval = (aac_do_reset(softs) == AACOK) ? 1 : 0; 3622 if (rval == 1 && !ddi_in_panic()) { 3623 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING | AAC_IOCMD_ASYNC, 3624 NULL, CMD_RESET); 3625 aac_start_waiting_io(softs); 3626 } else { 3627 /* Abort IOCTL cmds when system panic or adapter dead */ 3628 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_RESET); 3629 } 3630 mutex_exit(&softs->io_lock); 3631 3632 aac_drain_comp_q(softs); 3633 return (rval); 3634 } 3635 3636 static int 3637 aac_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 3638 { 3639 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 3640 3641 DBCALLED(softs, 1); 3642 3643 mutex_enter(&softs->io_lock); 3644 aac_abort_iocmds(softs, 0, pkt, CMD_ABORTED); 3645 mutex_exit(&softs->io_lock); 3646 3647 aac_drain_comp_q(softs); 3648 return (1); 3649 } 3650 3651 void 3652 aac_free_dmamap(struct aac_cmd *acp) 3653 { 3654 /* Free dma mapping */ 3655 if (acp->flags & AAC_CMD_DMA_VALID) { 3656 ASSERT(acp->buf_dma_handle); 3657 (void) ddi_dma_unbind_handle(acp->buf_dma_handle); 3658 acp->flags &= ~AAC_CMD_DMA_VALID; 3659 } 3660 3661 if (acp->abp != NULL) { /* free non-aligned buf DMA */ 3662 ASSERT(acp->buf_dma_handle); 3663 if ((acp->flags & AAC_CMD_BUF_WRITE) == 0 && acp->bp) 3664 ddi_rep_get8(acp->abh, (uint8_t *)acp->bp->b_un.b_addr, 3665 (uint8_t *)acp->abp, acp->bp->b_bcount, 3666 DDI_DEV_AUTOINCR); 3667 ddi_dma_mem_free(&acp->abh); 3668 acp->abp = NULL; 3669 } 3670 3671 if (acp->buf_dma_handle) { 3672 ddi_dma_free_handle(&acp->buf_dma_handle); 3673 acp->buf_dma_handle = NULL; 3674 } 3675 } 3676 3677 static void 3678 aac_unknown_scmd(struct aac_softstate *softs, struct aac_cmd *acp) 3679 { 3680 AACDB_PRINT(softs, CE_CONT, "SCMD 0x%x not supported", 3681 ((union scsi_cdb *)acp->pkt->pkt_cdbp)->scc_cmd); 3682 aac_free_dmamap(acp); 3683 aac_set_arq_data(acp->pkt, KEY_ILLEGAL_REQUEST, 0x20, 0x00, 0); 3684 aac_soft_callback(softs, acp); 3685 } 3686 3687 /* 3688 * Handle command to logical device 3689 */ 3690 static int 3691 aac_tran_start_ld(struct aac_softstate *softs, struct aac_cmd *acp) 3692 { 3693 struct aac_container *dvp; 3694 struct scsi_pkt *pkt; 3695 union scsi_cdb *cdbp; 3696 struct buf *bp; 3697 int rval; 3698 3699 dvp = acp->dvp; 3700 pkt = acp->pkt; 3701 cdbp = (union scsi_cdb *)pkt->pkt_cdbp; 3702 bp = acp->bp; 3703 3704 switch (cdbp->scc_cmd) { 3705 case SCMD_INQUIRY: /* inquiry */ 3706 aac_free_dmamap(acp); 3707 aac_inquiry(softs, pkt, cdbp, bp); 3708 aac_soft_callback(softs, acp); 3709 rval = TRAN_ACCEPT; 3710 break; 3711 3712 case SCMD_READ_CAPACITY: /* read capacity */ 3713 if (bp && bp->b_un.b_addr && bp->b_bcount) { 3714 struct scsi_capacity cap; 3715 uint64_t last_lba; 3716 3717 /* check 64-bit LBA */ 3718 last_lba = dvp->size - 1; 3719 if (last_lba > 0xffffffffull) { 3720 cap.capacity = 0xfffffffful; 3721 } else { 3722 cap.capacity = BE_32(last_lba); 3723 } 3724 cap.lbasize = BE_32(AAC_SECTOR_SIZE); 3725 3726 aac_free_dmamap(acp); 3727 if (bp->b_flags & (B_PHYS|B_PAGEIO)) 3728 bp_mapin(bp); 3729 bcopy(&cap, bp->b_un.b_addr, 8); 3730 pkt->pkt_state |= STATE_XFERRED_DATA; 3731 } 3732 aac_soft_callback(softs, acp); 3733 rval = TRAN_ACCEPT; 3734 break; 3735 3736 case SCMD_SVC_ACTION_IN_G4: /* read capacity 16 */ 3737 /* Check if containers need 64-bit LBA support */ 3738 if (cdbp->cdb_opaque[1] == SSVC_ACTION_READ_CAPACITY_G4) { 3739 if (bp && bp->b_un.b_addr && bp->b_bcount) { 3740 struct scsi_capacity_16 cap16; 3741 int cap_len = sizeof (struct scsi_capacity_16); 3742 3743 bzero(&cap16, cap_len); 3744 cap16.sc_capacity = BE_64(dvp->size); 3745 cap16.sc_lbasize = BE_32(AAC_SECTOR_SIZE); 3746 3747 aac_free_dmamap(acp); 3748 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 3749 bp_mapin(bp); 3750 bcopy(&cap16, bp->b_un.b_addr, cap_len); 3751 pkt->pkt_state |= STATE_XFERRED_DATA; 3752 } 3753 aac_soft_callback(softs, acp); 3754 } else { 3755 aac_unknown_scmd(softs, acp); 3756 } 3757 rval = TRAN_ACCEPT; 3758 break; 3759 3760 case SCMD_READ_G4: /* read_16 */ 3761 case SCMD_WRITE_G4: /* write_16 */ 3762 if (softs->flags & AAC_FLAGS_RAW_IO) { 3763 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */ 3764 acp->blkno = ((uint64_t) \ 3765 GETG4ADDR(cdbp) << 32) | \ 3766 (uint32_t)GETG4ADDRTL(cdbp); 3767 goto do_io; 3768 } 3769 AACDB_PRINT(softs, CE_WARN, "64-bit LBA not supported"); 3770 aac_unknown_scmd(softs, acp); 3771 rval = TRAN_ACCEPT; 3772 break; 3773 3774 case SCMD_READ: /* read_6 */ 3775 case SCMD_WRITE: /* write_6 */ 3776 acp->blkno = GETG0ADDR(cdbp); 3777 goto do_io; 3778 3779 case SCMD_READ_G1: /* read_10 */ 3780 case SCMD_WRITE_G1: /* write_10 */ 3781 acp->blkno = (uint32_t)GETG1ADDR(cdbp); 3782 do_io: 3783 if (acp->flags & AAC_CMD_DMA_VALID) { 3784 uint64_t cnt_size = dvp->size; 3785 3786 /* 3787 * If LBA > array size AND rawio, the 3788 * adapter may hang. So check it before 3789 * sending. 3790 * NOTE: (blkno + blkcnt) may overflow 3791 */ 3792 if ((acp->blkno < cnt_size) && 3793 ((acp->blkno + acp->bcount / 3794 AAC_BLK_SIZE) <= cnt_size)) { 3795 rval = aac_do_io(softs, acp); 3796 } else { 3797 /* 3798 * Request exceeds the capacity of disk, 3799 * set error block number to last LBA 3800 * + 1. 3801 */ 3802 aac_set_arq_data(pkt, 3803 KEY_ILLEGAL_REQUEST, 0x21, 3804 0x00, cnt_size); 3805 aac_soft_callback(softs, acp); 3806 rval = TRAN_ACCEPT; 3807 } 3808 } else if (acp->bcount == 0) { 3809 /* For 0 length IO, just return ok */ 3810 aac_soft_callback(softs, acp); 3811 rval = TRAN_ACCEPT; 3812 } else { 3813 rval = TRAN_BADPKT; 3814 } 3815 break; 3816 3817 case SCMD_MODE_SENSE: /* mode_sense_6 */ 3818 case SCMD_MODE_SENSE_G1: { /* mode_sense_10 */ 3819 int capacity; 3820 3821 aac_free_dmamap(acp); 3822 if (dvp->size > 0xffffffffull) 3823 capacity = 0xfffffffful; /* 64-bit LBA */ 3824 else 3825 capacity = dvp->size; 3826 aac_mode_sense(softs, pkt, cdbp, bp, capacity); 3827 aac_soft_callback(softs, acp); 3828 rval = TRAN_ACCEPT; 3829 break; 3830 } 3831 3832 case SCMD_TEST_UNIT_READY: 3833 case SCMD_REQUEST_SENSE: 3834 case SCMD_FORMAT: 3835 case SCMD_START_STOP: 3836 aac_free_dmamap(acp); 3837 if (bp && bp->b_un.b_addr && bp->b_bcount) { 3838 if (acp->flags & AAC_CMD_BUF_READ) { 3839 if (bp->b_flags & (B_PHYS|B_PAGEIO)) 3840 bp_mapin(bp); 3841 bzero(bp->b_un.b_addr, bp->b_bcount); 3842 } 3843 pkt->pkt_state |= STATE_XFERRED_DATA; 3844 } 3845 aac_soft_callback(softs, acp); 3846 rval = TRAN_ACCEPT; 3847 break; 3848 3849 case SCMD_SYNCHRONIZE_CACHE: 3850 acp->flags |= AAC_CMD_NTAG; 3851 acp->aac_cmd_fib = aac_cmd_fib_sync; 3852 acp->ac_comp = aac_synccache_complete; 3853 rval = aac_do_io(softs, acp); 3854 break; 3855 3856 case SCMD_DOORLOCK: 3857 aac_free_dmamap(acp); 3858 dvp->locked = (pkt->pkt_cdbp[4] & 0x01) ? 1 : 0; 3859 aac_soft_callback(softs, acp); 3860 rval = TRAN_ACCEPT; 3861 break; 3862 3863 default: /* unknown command */ 3864 aac_unknown_scmd(softs, acp); 3865 rval = TRAN_ACCEPT; 3866 break; 3867 } 3868 3869 return (rval); 3870 } 3871 3872 static int 3873 aac_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt) 3874 { 3875 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 3876 struct aac_cmd *acp = PKT2AC(pkt); 3877 struct aac_container *dvp = acp->dvp; 3878 int rval; 3879 3880 DBCALLED(softs, 2); 3881 3882 /* 3883 * Reinitialize some fields of ac and pkt; the packet may 3884 * have been resubmitted 3885 */ 3886 acp->flags &= AAC_CMD_CONSISTENT | AAC_CMD_DMA_PARTIAL | \ 3887 AAC_CMD_BUF_READ | AAC_CMD_BUF_WRITE | AAC_CMD_DMA_VALID; 3888 acp->timeout = acp->pkt->pkt_time; 3889 if (pkt->pkt_flags & FLAG_NOINTR) 3890 acp->flags |= AAC_CMD_NO_INTR; 3891 pkt->pkt_reason = CMD_CMPLT; 3892 pkt->pkt_state = 0; 3893 pkt->pkt_statistics = 0; 3894 *pkt->pkt_scbp = 0; /* clear arq scsi_status */ 3895 3896 if (acp->flags & AAC_CMD_DMA_VALID) { 3897 pkt->pkt_resid = acp->bcount; 3898 /* Consistent packets need to be sync'ed first */ 3899 if ((acp->flags & AAC_CMD_CONSISTENT) && 3900 (acp->flags & AAC_CMD_BUF_WRITE)) 3901 if (aac_dma_sync_ac(acp) != AACOK) { 3902 ddi_fm_service_impact(softs->devinfo_p, 3903 DDI_SERVICE_UNAFFECTED); 3904 return (TRAN_BADPKT); 3905 } 3906 } else { 3907 pkt->pkt_resid = 0; 3908 } 3909 3910 mutex_enter(&softs->io_lock); 3911 AACDB_PRINT_SCMD(softs, acp); 3912 if (dvp->valid && ap->a_lun == 0 && !(softs->state & AAC_STATE_DEAD)) { 3913 rval = aac_tran_start_ld(softs, acp); 3914 } else { 3915 AACDB_PRINT(softs, CE_WARN, 3916 "Cannot send cmd to target t%dL%d: %s", 3917 ap->a_target, ap->a_lun, 3918 (softs->state & AAC_STATE_DEAD) ? 3919 "adapter dead" : "target invalid"); 3920 rval = TRAN_FATAL_ERROR; 3921 } 3922 mutex_exit(&softs->io_lock); 3923 return (rval); 3924 } 3925 3926 static int 3927 aac_tran_getcap(struct scsi_address *ap, char *cap, int whom) 3928 { 3929 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 3930 struct aac_container *dvp; 3931 int rval; 3932 3933 DBCALLED(softs, 2); 3934 3935 /* We don't allow inquiring about capabilities for other targets */ 3936 if (cap == NULL || whom == 0) { 3937 AACDB_PRINT(softs, CE_WARN, 3938 "GetCap> %s not supported: whom=%d", cap, whom); 3939 return (-1); 3940 } 3941 3942 mutex_enter(&softs->io_lock); 3943 dvp = &softs->containers[ap->a_target]; 3944 if (!dvp->valid || (ap->a_lun != 0)) { 3945 mutex_exit(&softs->io_lock); 3946 AACDB_PRINT(softs, CE_WARN, "Bad target t%dL%d to getcap", 3947 ap->a_target, ap->a_lun); 3948 return (-1); 3949 } 3950 3951 switch (scsi_hba_lookup_capstr(cap)) { 3952 case SCSI_CAP_ARQ: /* auto request sense */ 3953 rval = 1; 3954 break; 3955 case SCSI_CAP_UNTAGGED_QING: 3956 case SCSI_CAP_TAGGED_QING: 3957 rval = 1; 3958 break; 3959 case SCSI_CAP_DMA_MAX: 3960 rval = softs->buf_dma_attr.dma_attr_maxxfer; 3961 break; 3962 default: 3963 rval = -1; 3964 break; 3965 } 3966 mutex_exit(&softs->io_lock); 3967 3968 AACDB_PRINT_TRAN(softs, "GetCap> %s t%dL%d: rval=%d", 3969 cap, ap->a_target, ap->a_lun, rval); 3970 return (rval); 3971 } 3972 3973 /*ARGSUSED*/ 3974 static int 3975 aac_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom) 3976 { 3977 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 3978 struct aac_container *dvp; 3979 int rval; 3980 3981 DBCALLED(softs, 2); 3982 3983 /* We don't allow inquiring about capabilities for other targets */ 3984 if (cap == NULL || whom == 0) { 3985 AACDB_PRINT(softs, CE_WARN, 3986 "SetCap> %s not supported: whom=%d", cap, whom); 3987 return (-1); 3988 } 3989 3990 mutex_enter(&softs->io_lock); 3991 dvp = &softs->containers[ap->a_target]; 3992 if (!dvp->valid || (ap->a_lun != 0)) { 3993 mutex_exit(&softs->io_lock); 3994 AACDB_PRINT(softs, CE_WARN, "Bad target t%dL%d to setcap", 3995 ap->a_target, ap->a_lun); 3996 return (-1); 3997 } 3998 3999 switch (scsi_hba_lookup_capstr(cap)) { 4000 case SCSI_CAP_ARQ: 4001 /* Force auto request sense */ 4002 rval = (value == 1) ? 1 : 0; 4003 break; 4004 case SCSI_CAP_UNTAGGED_QING: 4005 case SCSI_CAP_TAGGED_QING: 4006 rval = (value == 1) ? 1 : 0; 4007 break; 4008 default: 4009 rval = -1; 4010 break; 4011 } 4012 mutex_exit(&softs->io_lock); 4013 4014 AACDB_PRINT_TRAN(softs, "SetCap> %s t%dL%d val=%d: rval=%d", 4015 cap, ap->a_target, ap->a_lun, value, rval); 4016 return (rval); 4017 } 4018 4019 static void 4020 aac_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 4021 { 4022 struct aac_cmd *acp = PKT2AC(pkt); 4023 4024 DBCALLED(NULL, 2); 4025 4026 if (acp->sgt) { 4027 kmem_free(acp->sgt, sizeof (struct aac_sge) * \ 4028 acp->left_cookien); 4029 } 4030 aac_free_dmamap(acp); 4031 ASSERT(acp->slotp == NULL); 4032 scsi_hba_pkt_free(ap, pkt); 4033 } 4034 4035 int 4036 aac_cmd_dma_alloc(struct aac_softstate *softs, struct aac_cmd *acp, 4037 struct buf *bp, int flags, int (*cb)(), caddr_t arg) 4038 { 4039 int kf = (cb == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP; 4040 uint_t oldcookiec; 4041 int bioerr; 4042 int rval; 4043 4044 oldcookiec = acp->left_cookien; 4045 4046 /* Move window to build s/g map */ 4047 if (acp->total_nwin > 0) { 4048 if (++acp->cur_win < acp->total_nwin) { 4049 off_t off; 4050 size_t len; 4051 4052 rval = ddi_dma_getwin(acp->buf_dma_handle, acp->cur_win, 4053 &off, &len, &acp->cookie, &acp->left_cookien); 4054 if (rval == DDI_SUCCESS) 4055 goto get_dma_cookies; 4056 AACDB_PRINT(softs, CE_WARN, 4057 "ddi_dma_getwin() fail %d", rval); 4058 return (NULL); 4059 } 4060 AACDB_PRINT(softs, CE_WARN, "Nothing to transfer"); 4061 return (NULL); 4062 } 4063 4064 /* We need to transfer data, so we alloc DMA resources for this pkt */ 4065 if (bp && bp->b_bcount != 0 && !(acp->flags & AAC_CMD_DMA_VALID)) { 4066 uint_t dma_flags = 0; 4067 struct aac_sge *sge; 4068 4069 /* 4070 * We will still use this point to fake some 4071 * infomation in tran_start 4072 */ 4073 acp->bp = bp; 4074 4075 /* Set dma flags */ 4076 if (BUF_IS_READ(bp)) { 4077 dma_flags |= DDI_DMA_READ; 4078 acp->flags |= AAC_CMD_BUF_READ; 4079 } else { 4080 dma_flags |= DDI_DMA_WRITE; 4081 acp->flags |= AAC_CMD_BUF_WRITE; 4082 } 4083 if (flags & PKT_CONSISTENT) 4084 dma_flags |= DDI_DMA_CONSISTENT; 4085 if (flags & PKT_DMA_PARTIAL) 4086 dma_flags |= DDI_DMA_PARTIAL; 4087 4088 /* Alloc buf dma handle */ 4089 if (!acp->buf_dma_handle) { 4090 rval = ddi_dma_alloc_handle(softs->devinfo_p, 4091 &softs->buf_dma_attr, cb, arg, 4092 &acp->buf_dma_handle); 4093 if (rval != DDI_SUCCESS) { 4094 AACDB_PRINT(softs, CE_WARN, 4095 "Can't allocate DMA handle, errno=%d", 4096 rval); 4097 goto error_out; 4098 } 4099 } 4100 4101 /* Bind buf */ 4102 if (((uintptr_t)bp->b_un.b_addr & AAC_DMA_ALIGN_MASK) == 0) { 4103 rval = ddi_dma_buf_bind_handle(acp->buf_dma_handle, 4104 bp, dma_flags, cb, arg, &acp->cookie, 4105 &acp->left_cookien); 4106 } else { 4107 size_t bufsz; 4108 4109 AACDB_PRINT_TRAN(softs, 4110 "non-aligned buffer: addr=0x%p, cnt=%lu", 4111 (void *)bp->b_un.b_addr, bp->b_bcount); 4112 if (bp->b_flags & (B_PAGEIO|B_PHYS)) 4113 bp_mapin(bp); 4114 4115 rval = ddi_dma_mem_alloc(acp->buf_dma_handle, 4116 AAC_ROUNDUP(bp->b_bcount, AAC_DMA_ALIGN), 4117 &aac_acc_attr, DDI_DMA_STREAMING, 4118 cb, arg, &acp->abp, &bufsz, &acp->abh); 4119 4120 if (rval != DDI_SUCCESS) { 4121 AACDB_PRINT(softs, CE_NOTE, 4122 "Cannot alloc DMA to non-aligned buf"); 4123 bioerr = 0; 4124 goto error_out; 4125 } 4126 4127 if (acp->flags & AAC_CMD_BUF_WRITE) 4128 ddi_rep_put8(acp->abh, 4129 (uint8_t *)bp->b_un.b_addr, 4130 (uint8_t *)acp->abp, bp->b_bcount, 4131 DDI_DEV_AUTOINCR); 4132 4133 rval = ddi_dma_addr_bind_handle(acp->buf_dma_handle, 4134 NULL, acp->abp, bufsz, dma_flags, cb, arg, 4135 &acp->cookie, &acp->left_cookien); 4136 } 4137 4138 switch (rval) { 4139 case DDI_DMA_PARTIAL_MAP: 4140 if (ddi_dma_numwin(acp->buf_dma_handle, 4141 &acp->total_nwin) == DDI_FAILURE) { 4142 AACDB_PRINT(softs, CE_WARN, 4143 "Cannot get number of DMA windows"); 4144 bioerr = 0; 4145 goto error_out; 4146 } 4147 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)", 4148 acp->left_cookien); 4149 acp->cur_win = 0; 4150 break; 4151 4152 case DDI_DMA_MAPPED: 4153 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)", 4154 acp->left_cookien); 4155 acp->cur_win = 0; 4156 acp->total_nwin = 1; 4157 break; 4158 4159 case DDI_DMA_NORESOURCES: 4160 bioerr = 0; 4161 AACDB_PRINT(softs, CE_WARN, 4162 "Cannot bind buf for DMA: DDI_DMA_NORESOURCES"); 4163 goto error_out; 4164 case DDI_DMA_BADATTR: 4165 case DDI_DMA_NOMAPPING: 4166 bioerr = EFAULT; 4167 AACDB_PRINT(softs, CE_WARN, 4168 "Cannot bind buf for DMA: DDI_DMA_NOMAPPING"); 4169 goto error_out; 4170 case DDI_DMA_TOOBIG: 4171 bioerr = EINVAL; 4172 AACDB_PRINT(softs, CE_WARN, 4173 "Cannot bind buf for DMA: DDI_DMA_TOOBIG(%d)", 4174 bp->b_bcount); 4175 goto error_out; 4176 default: 4177 bioerr = EINVAL; 4178 AACDB_PRINT(softs, CE_WARN, 4179 "Cannot bind buf for DMA: %d", rval); 4180 goto error_out; 4181 } 4182 acp->flags |= AAC_CMD_DMA_VALID; 4183 4184 get_dma_cookies: 4185 ASSERT(acp->left_cookien > 0); 4186 if (acp->left_cookien > softs->aac_sg_tablesize) { 4187 AACDB_PRINT(softs, CE_NOTE, "large cookiec received %d", 4188 acp->left_cookien); 4189 bioerr = EINVAL; 4190 goto error_out; 4191 } 4192 if (oldcookiec != acp->left_cookien && acp->sgt != NULL) { 4193 kmem_free(acp->sgt, sizeof (struct aac_sge) * \ 4194 oldcookiec); 4195 acp->sgt = NULL; 4196 } 4197 if (acp->sgt == NULL) { 4198 acp->sgt = kmem_alloc(sizeof (struct aac_sge) * \ 4199 acp->left_cookien, kf); 4200 if (acp->sgt == NULL) { 4201 AACDB_PRINT(softs, CE_WARN, 4202 "sgt kmem_alloc fail"); 4203 bioerr = ENOMEM; 4204 goto error_out; 4205 } 4206 } 4207 4208 sge = &acp->sgt[0]; 4209 sge->bcount = acp->cookie.dmac_size; 4210 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress); 4211 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress); 4212 acp->bcount = acp->cookie.dmac_size; 4213 for (sge++; sge < &acp->sgt[acp->left_cookien]; sge++) { 4214 ddi_dma_nextcookie(acp->buf_dma_handle, &acp->cookie); 4215 sge->bcount = acp->cookie.dmac_size; 4216 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress); 4217 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress); 4218 acp->bcount += acp->cookie.dmac_size; 4219 } 4220 4221 /* 4222 * Note: The old DMA engine do not correctly handle 4223 * dma_attr_maxxfer attribute. So we have to ensure 4224 * it by ourself. 4225 */ 4226 if (acp->bcount > softs->buf_dma_attr.dma_attr_maxxfer) { 4227 AACDB_PRINT(softs, CE_NOTE, 4228 "large xfer size received %d\n", acp->bcount); 4229 bioerr = EINVAL; 4230 goto error_out; 4231 } 4232 4233 acp->total_xfer += acp->bcount; 4234 4235 if (acp->pkt) { 4236 /* Return remaining byte count */ 4237 acp->pkt->pkt_resid = bp->b_bcount - acp->total_xfer; 4238 4239 AACDB_PRINT_TRAN(softs, 4240 "bp=0x%p, xfered=%d/%d, resid=%d", 4241 (void *)bp->b_un.b_addr, (int)acp->total_xfer, 4242 (int)bp->b_bcount, (int)acp->pkt->pkt_resid); 4243 4244 ASSERT(acp->pkt->pkt_resid >= 0); 4245 } 4246 } 4247 return (AACOK); 4248 4249 error_out: 4250 bioerror(bp, bioerr); 4251 return (AACERR); 4252 } 4253 4254 static struct scsi_pkt * 4255 aac_tran_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 4256 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags, 4257 int (*callback)(), caddr_t arg) 4258 { 4259 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4260 struct aac_cmd *acp, *new_acp; 4261 4262 DBCALLED(softs, 2); 4263 4264 /* Allocate pkt */ 4265 if (pkt == NULL) { 4266 int slen; 4267 4268 /* Force auto request sense */ 4269 slen = (statuslen > softs->slen) ? statuslen : softs->slen; 4270 pkt = scsi_hba_pkt_alloc(softs->devinfo_p, ap, cmdlen, 4271 slen, tgtlen, sizeof (struct aac_cmd), callback, arg); 4272 if (pkt == NULL) { 4273 AACDB_PRINT(softs, CE_WARN, "Alloc scsi pkt failed"); 4274 return (NULL); 4275 } 4276 acp = new_acp = PKT2AC(pkt); 4277 acp->pkt = pkt; 4278 acp->cmdlen = cmdlen; 4279 4280 acp->dvp = &softs->containers[ap->a_target]; 4281 acp->aac_cmd_fib = softs->aac_cmd_fib; 4282 acp->ac_comp = aac_ld_complete; 4283 } else { 4284 acp = PKT2AC(pkt); 4285 new_acp = NULL; 4286 } 4287 4288 if (aac_cmd_dma_alloc(softs, acp, bp, flags, callback, arg) == AACOK) 4289 return (pkt); 4290 4291 if (new_acp) 4292 aac_tran_destroy_pkt(ap, pkt); 4293 return (NULL); 4294 } 4295 4296 /* 4297 * tran_sync_pkt(9E) - explicit DMA synchronization 4298 */ 4299 /*ARGSUSED*/ 4300 static void 4301 aac_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 4302 { 4303 struct aac_cmd *acp = PKT2AC(pkt); 4304 4305 DBCALLED(NULL, 2); 4306 4307 if (aac_dma_sync_ac(acp) != AACOK) 4308 ddi_fm_service_impact( 4309 (AAC_TRAN2SOFTS(ap->a_hba_tran))->devinfo_p, 4310 DDI_SERVICE_UNAFFECTED); 4311 } 4312 4313 /* 4314 * tran_dmafree(9E) - deallocate DMA resources allocated for command 4315 */ 4316 /*ARGSUSED*/ 4317 static void 4318 aac_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 4319 { 4320 struct aac_cmd *acp = PKT2AC(pkt); 4321 4322 DBCALLED(NULL, 2); 4323 4324 aac_free_dmamap(acp); 4325 } 4326 4327 static int 4328 aac_do_quiesce(struct aac_softstate *softs) 4329 { 4330 aac_hold_bus(softs, AAC_IOCMD_ASYNC); 4331 if (softs->bus_ncmds[AAC_CMDQ_ASYNC]) { 4332 aac_start_drain(softs); 4333 do { 4334 if (cv_wait_sig(&softs->drain_cv, 4335 &softs->io_lock) == 0) { 4336 /* Quiesce has been interrupted */ 4337 aac_stop_drain(softs); 4338 aac_unhold_bus(softs, AAC_IOCMD_ASYNC); 4339 aac_start_waiting_io(softs); 4340 return (AACERR); 4341 } 4342 } while (softs->bus_ncmds[AAC_CMDQ_ASYNC]); 4343 aac_stop_drain(softs); 4344 } 4345 4346 softs->state |= AAC_STATE_QUIESCED; 4347 return (AACOK); 4348 } 4349 4350 static int 4351 aac_tran_quiesce(dev_info_t *dip) 4352 { 4353 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 4354 int rval; 4355 4356 DBCALLED(softs, 1); 4357 4358 mutex_enter(&softs->io_lock); 4359 if (aac_do_quiesce(softs) == AACOK) 4360 rval = 0; 4361 else 4362 rval = 1; 4363 mutex_exit(&softs->io_lock); 4364 return (rval); 4365 } 4366 4367 static int 4368 aac_do_unquiesce(struct aac_softstate *softs) 4369 { 4370 softs->state &= ~AAC_STATE_QUIESCED; 4371 aac_unhold_bus(softs, AAC_IOCMD_ASYNC); 4372 4373 aac_start_waiting_io(softs); 4374 return (AACOK); 4375 } 4376 4377 static int 4378 aac_tran_unquiesce(dev_info_t *dip) 4379 { 4380 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 4381 int rval; 4382 4383 DBCALLED(softs, 1); 4384 4385 mutex_enter(&softs->io_lock); 4386 if (aac_do_unquiesce(softs) == AACOK) 4387 rval = 0; 4388 else 4389 rval = 1; 4390 mutex_exit(&softs->io_lock); 4391 return (rval); 4392 } 4393 4394 static int 4395 aac_hba_setup(struct aac_softstate *softs) 4396 { 4397 scsi_hba_tran_t *hba_tran; 4398 int rval; 4399 4400 hba_tran = scsi_hba_tran_alloc(softs->devinfo_p, SCSI_HBA_CANSLEEP); 4401 if (hba_tran == NULL) 4402 return (AACERR); 4403 hba_tran->tran_hba_private = softs; 4404 hba_tran->tran_tgt_init = aac_tran_tgt_init; 4405 hba_tran->tran_tgt_probe = scsi_hba_probe; 4406 hba_tran->tran_start = aac_tran_start; 4407 hba_tran->tran_getcap = aac_tran_getcap; 4408 hba_tran->tran_setcap = aac_tran_setcap; 4409 hba_tran->tran_init_pkt = aac_tran_init_pkt; 4410 hba_tran->tran_destroy_pkt = aac_tran_destroy_pkt; 4411 hba_tran->tran_reset = aac_tran_reset; 4412 hba_tran->tran_abort = aac_tran_abort; 4413 hba_tran->tran_sync_pkt = aac_tran_sync_pkt; 4414 hba_tran->tran_dmafree = aac_tran_dmafree; 4415 hba_tran->tran_quiesce = aac_tran_quiesce; 4416 hba_tran->tran_unquiesce = aac_tran_unquiesce; 4417 rval = scsi_hba_attach_setup(softs->devinfo_p, &softs->buf_dma_attr, 4418 hba_tran, 0); 4419 if (rval != DDI_SUCCESS) { 4420 scsi_hba_tran_free(hba_tran); 4421 AACDB_PRINT(softs, CE_WARN, "aac_hba_setup failed"); 4422 return (AACERR); 4423 } 4424 4425 return (AACOK); 4426 } 4427 4428 /* 4429 * FIB setup operations 4430 */ 4431 4432 /* 4433 * Init FIB header 4434 */ 4435 static void 4436 aac_cmd_fib_header(struct aac_softstate *softs, struct aac_slot *slotp, 4437 uint16_t cmd, uint16_t fib_size) 4438 { 4439 ddi_acc_handle_t acc = slotp->fib_acc_handle; 4440 struct aac_fib *fibp = slotp->fibp; 4441 uint32_t xfer_state; 4442 4443 xfer_state = 4444 AAC_FIBSTATE_HOSTOWNED | 4445 AAC_FIBSTATE_INITIALISED | 4446 AAC_FIBSTATE_EMPTY | 4447 AAC_FIBSTATE_FROMHOST | 4448 AAC_FIBSTATE_REXPECTED | 4449 AAC_FIBSTATE_NORM; 4450 if (slotp->acp && !(slotp->acp->flags & AAC_CMD_SYNC)) { 4451 xfer_state |= 4452 AAC_FIBSTATE_ASYNC | 4453 AAC_FIBSTATE_FAST_RESPONSE /* enable fast io */; 4454 ddi_put16(acc, &fibp->Header.SenderSize, 4455 softs->aac_max_fib_size); 4456 } else { 4457 ddi_put16(acc, &fibp->Header.SenderSize, AAC_FIB_SIZE); 4458 } 4459 4460 ddi_put32(acc, &fibp->Header.XferState, xfer_state); 4461 ddi_put16(acc, &fibp->Header.Command, cmd); 4462 ddi_put8(acc, &fibp->Header.StructType, AAC_FIBTYPE_TFIB); 4463 ddi_put8(acc, &fibp->Header.Flags, 0); /* don't care */ 4464 ddi_put16(acc, &fibp->Header.Size, fib_size); 4465 ddi_put32(acc, &fibp->Header.SenderFibAddress, (slotp->index << 2)); 4466 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr); 4467 ddi_put32(acc, &fibp->Header.SenderData, 0); /* don't care */ 4468 } 4469 4470 /* 4471 * Init FIB for raw IO command 4472 */ 4473 static void 4474 aac_cmd_fib_rawio(struct aac_softstate *softs, struct aac_cmd *acp) 4475 { 4476 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 4477 struct aac_raw_io *io = (struct aac_raw_io *)&acp->slotp->fibp->data[0]; 4478 struct aac_sg_entryraw *sgp; 4479 struct aac_sge *sge; 4480 4481 /* Calculate FIB size */ 4482 acp->fib_size = sizeof (struct aac_fib_header) + \ 4483 sizeof (struct aac_raw_io) + (acp->left_cookien - 1) * \ 4484 sizeof (struct aac_sg_entryraw); 4485 4486 aac_cmd_fib_header(softs, acp->slotp, RawIo, acp->fib_size); 4487 4488 ddi_put16(acc, &io->Flags, (acp->flags & AAC_CMD_BUF_READ) ? 1 : 0); 4489 ddi_put16(acc, &io->BpTotal, 0); 4490 ddi_put16(acc, &io->BpComplete, 0); 4491 4492 ddi_put32(acc, AAC_LO32(&io->BlockNumber), AAC_LS32(acp->blkno)); 4493 ddi_put32(acc, AAC_HI32(&io->BlockNumber), AAC_MS32(acp->blkno)); 4494 ddi_put16(acc, &io->ContainerId, 4495 ((struct aac_container *)acp->dvp)->cid); 4496 4497 /* Fill SG table */ 4498 ddi_put32(acc, &io->SgMapRaw.SgCount, acp->left_cookien); 4499 ddi_put32(acc, &io->ByteCount, acp->bcount); 4500 4501 for (sge = &acp->sgt[0], sgp = &io->SgMapRaw.SgEntryRaw[0]; 4502 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 4503 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 4504 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 4505 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 4506 sgp->Next = 0; 4507 sgp->Prev = 0; 4508 sgp->Flags = 0; 4509 } 4510 } 4511 4512 /* Init FIB for 64-bit block IO command */ 4513 static void 4514 aac_cmd_fib_brw64(struct aac_softstate *softs, struct aac_cmd *acp) 4515 { 4516 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 4517 struct aac_blockread64 *br = (struct aac_blockread64 *) \ 4518 &acp->slotp->fibp->data[0]; 4519 struct aac_sg_entry64 *sgp; 4520 struct aac_sge *sge; 4521 4522 acp->fib_size = sizeof (struct aac_fib_header) + \ 4523 sizeof (struct aac_blockread64) + (acp->left_cookien - 1) * \ 4524 sizeof (struct aac_sg_entry64); 4525 4526 aac_cmd_fib_header(softs, acp->slotp, ContainerCommand64, 4527 acp->fib_size); 4528 4529 /* 4530 * The definitions for aac_blockread64 and aac_blockwrite64 4531 * are the same. 4532 */ 4533 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno); 4534 ddi_put16(acc, &br->ContainerId, 4535 ((struct aac_container *)acp->dvp)->cid); 4536 ddi_put32(acc, &br->Command, (acp->flags & AAC_CMD_BUF_READ) ? 4537 VM_CtHostRead64 : VM_CtHostWrite64); 4538 ddi_put16(acc, &br->Pad, 0); 4539 ddi_put16(acc, &br->Flags, 0); 4540 4541 /* Fill SG table */ 4542 ddi_put32(acc, &br->SgMap64.SgCount, acp->left_cookien); 4543 ddi_put16(acc, &br->SectorCount, acp->bcount / AAC_BLK_SIZE); 4544 4545 for (sge = &acp->sgt[0], sgp = &br->SgMap64.SgEntry64[0]; 4546 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 4547 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 4548 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 4549 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 4550 } 4551 } 4552 4553 /* Init FIB for block IO command */ 4554 static void 4555 aac_cmd_fib_brw(struct aac_softstate *softs, struct aac_cmd *acp) 4556 { 4557 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 4558 struct aac_blockread *br = (struct aac_blockread *) \ 4559 &acp->slotp->fibp->data[0]; 4560 struct aac_sg_entry *sgp; 4561 struct aac_sge *sge = &acp->sgt[0]; 4562 4563 if (acp->flags & AAC_CMD_BUF_READ) { 4564 acp->fib_size = sizeof (struct aac_fib_header) + \ 4565 sizeof (struct aac_blockread) + (acp->left_cookien - 1) * \ 4566 sizeof (struct aac_sg_entry); 4567 4568 ddi_put32(acc, &br->Command, VM_CtBlockRead); 4569 ddi_put32(acc, &br->SgMap.SgCount, acp->left_cookien); 4570 sgp = &br->SgMap.SgEntry[0]; 4571 } else { 4572 struct aac_blockwrite *bw = (struct aac_blockwrite *)br; 4573 4574 acp->fib_size = sizeof (struct aac_fib_header) + \ 4575 sizeof (struct aac_blockwrite) + (acp->left_cookien - 1) * \ 4576 sizeof (struct aac_sg_entry); 4577 4578 ddi_put32(acc, &bw->Command, VM_CtBlockWrite); 4579 ddi_put32(acc, &bw->Stable, CUNSTABLE); 4580 ddi_put32(acc, &bw->SgMap.SgCount, acp->left_cookien); 4581 sgp = &bw->SgMap.SgEntry[0]; 4582 } 4583 aac_cmd_fib_header(softs, acp->slotp, ContainerCommand, acp->fib_size); 4584 4585 /* 4586 * aac_blockread and aac_blockwrite have the similar 4587 * structure head, so use br for bw here 4588 */ 4589 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno); 4590 ddi_put32(acc, &br->ContainerId, 4591 ((struct aac_container *)acp->dvp)->cid); 4592 ddi_put32(acc, &br->ByteCount, acp->bcount); 4593 4594 /* Fill SG table */ 4595 for (sge = &acp->sgt[0]; 4596 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 4597 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32); 4598 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 4599 } 4600 } 4601 4602 /*ARGSUSED*/ 4603 void 4604 aac_cmd_fib_copy(struct aac_softstate *softs, struct aac_cmd *acp) 4605 { 4606 struct aac_slot *slotp = acp->slotp; 4607 struct aac_fib *fibp = slotp->fibp; 4608 ddi_acc_handle_t acc = slotp->fib_acc_handle; 4609 4610 ddi_rep_put8(acc, (uint8_t *)acp->fibp, (uint8_t *)fibp, 4611 acp->fib_size, /* only copy data of needed length */ 4612 DDI_DEV_AUTOINCR); 4613 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr); 4614 ddi_put32(acc, &fibp->Header.SenderFibAddress, slotp->index << 2); 4615 } 4616 4617 static void 4618 aac_cmd_fib_sync(struct aac_softstate *softs, struct aac_cmd *acp) 4619 { 4620 struct aac_slot *slotp = acp->slotp; 4621 ddi_acc_handle_t acc = slotp->fib_acc_handle; 4622 struct aac_synchronize_command *sync = 4623 (struct aac_synchronize_command *)&slotp->fibp->data[0]; 4624 4625 acp->fib_size = sizeof (struct aac_fib_header) + \ 4626 sizeof (struct aac_synchronize_command); 4627 4628 aac_cmd_fib_header(softs, slotp, ContainerCommand, acp->fib_size); 4629 ddi_put32(acc, &sync->Command, VM_ContainerConfig); 4630 ddi_put32(acc, &sync->Type, (uint32_t)CT_FLUSH_CACHE); 4631 ddi_put32(acc, &sync->Cid, ((struct aac_container *)acp->dvp)->cid); 4632 ddi_put32(acc, &sync->Count, 4633 sizeof (((struct aac_synchronize_reply *)0)->Data)); 4634 } 4635 4636 /* 4637 * Init FIB for pass-through SCMD 4638 */ 4639 static void 4640 aac_cmd_fib_srb(struct aac_cmd *acp) 4641 { 4642 struct aac_slot *slotp = acp->slotp; 4643 ddi_acc_handle_t acc = slotp->fib_acc_handle; 4644 struct aac_srb *srb = (struct aac_srb *)&slotp->fibp->data[0]; 4645 struct aac_srb *srb0 = (struct aac_srb *)&acp->fibp->data[0]; 4646 4647 ddi_put32(acc, &srb->function, SRBF_ExecuteScsi); 4648 ddi_put32(acc, &srb->retry_limit, 0); 4649 ddi_put32(acc, &srb->cdb_size, acp->cmdlen); 4650 ddi_put32(acc, &srb->timeout, 0); /* use driver timeout */ 4651 4652 ddi_put32(acc, &srb->flags, srb0->flags); 4653 ddi_put32(acc, &srb->channel, srb0->channel); 4654 ddi_put32(acc, &srb->id, srb0->id); 4655 ddi_put32(acc, &srb->lun, srb0->lun); 4656 ddi_rep_put8(acc, srb0->cdb, srb->cdb, acp->cmdlen, DDI_DEV_AUTOINCR); 4657 } 4658 4659 static void 4660 aac_cmd_fib_scsi32(struct aac_softstate *softs, struct aac_cmd *acp) 4661 { 4662 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 4663 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 4664 struct aac_sg_entry *sgp; 4665 struct aac_sge *sge; 4666 4667 acp->fib_size = sizeof (struct aac_fib_header) + \ 4668 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \ 4669 acp->left_cookien * sizeof (struct aac_sg_entry); 4670 4671 /* Fill FIB and SRB headers, and copy cdb */ 4672 aac_cmd_fib_header(softs, acp->slotp, ScsiPortCommand, acp->fib_size); 4673 aac_cmd_fib_srb(acp); 4674 4675 /* Fill SG table */ 4676 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien); 4677 ddi_put32(acc, &srb->count, acp->bcount); 4678 4679 for (sge = &acp->sgt[0], sgp = &srb->sg.SgEntry[0]; 4680 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 4681 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32); 4682 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 4683 } 4684 } 4685 4686 static void 4687 aac_cmd_fib_scsi64(struct aac_softstate *softs, struct aac_cmd *acp) 4688 { 4689 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 4690 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 4691 struct aac_sg_entry64 *sgp; 4692 struct aac_sge *sge; 4693 4694 acp->fib_size = sizeof (struct aac_fib_header) + \ 4695 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \ 4696 acp->left_cookien * sizeof (struct aac_sg_entry64); 4697 4698 /* Fill FIB and SRB headers, and copy cdb */ 4699 aac_cmd_fib_header(softs, acp->slotp, ScsiPortCommandU64, 4700 acp->fib_size); 4701 aac_cmd_fib_srb(acp); 4702 4703 /* Fill SG table */ 4704 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien); 4705 ddi_put32(acc, &srb->count, acp->bcount); 4706 4707 for (sge = &acp->sgt[0], 4708 sgp = &((struct aac_sg_table64 *)&srb->sg)->SgEntry64[0]; 4709 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 4710 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 4711 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 4712 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 4713 } 4714 } 4715 4716 static int 4717 aac_cmd_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp) 4718 { 4719 struct aac_slot *slotp; 4720 4721 if (slotp = aac_get_slot(softs)) { 4722 acp->slotp = slotp; 4723 slotp->acp = acp; 4724 acp->aac_cmd_fib(softs, acp); 4725 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, 4726 DDI_DMA_SYNC_FORDEV); 4727 return (AACOK); 4728 } 4729 return (AACERR); 4730 } 4731 4732 static int 4733 aac_bind_io(struct aac_softstate *softs, struct aac_cmd *acp) 4734 { 4735 struct aac_container *dvp = acp->dvp; 4736 int q = AAC_CMDQ(acp); 4737 4738 if (dvp) { 4739 if (dvp->ncmds[q] < dvp->throttle[q]) { 4740 if (!(acp->flags & AAC_CMD_NTAG) || 4741 dvp->ncmds[q] == 0) { 4742 do_bind: 4743 return (aac_cmd_slot_bind(softs, acp)); 4744 } 4745 ASSERT(q == AAC_CMDQ_ASYNC); 4746 aac_set_throttle(softs, dvp, AAC_CMDQ_ASYNC, 4747 AAC_THROTTLE_DRAIN); 4748 } 4749 } else { 4750 if (softs->bus_ncmds[q] < softs->bus_throttle[q]) 4751 goto do_bind; 4752 } 4753 return (AACERR); 4754 } 4755 4756 static void 4757 aac_start_io(struct aac_softstate *softs, struct aac_cmd *acp) 4758 { 4759 struct aac_slot *slotp = acp->slotp; 4760 int q = AAC_CMDQ(acp); 4761 int rval; 4762 4763 /* Set ac and pkt */ 4764 if (acp->pkt) { /* ac from ioctl has no pkt */ 4765 acp->pkt->pkt_state |= 4766 STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD; 4767 } 4768 if (acp->timeout) /* 0 indicates no timeout */ 4769 acp->timeout += aac_timebase + aac_tick; 4770 4771 if (acp->dvp) 4772 acp->dvp->ncmds[q]++; 4773 softs->bus_ncmds[q]++; 4774 aac_cmd_enqueue(&softs->q_busy, acp); 4775 4776 if (softs->flags & AAC_FLAGS_NEW_COMM) { 4777 rval = aac_send_command(softs, slotp); 4778 } else { 4779 /* 4780 * If fib can not be enqueued, the adapter is in an abnormal 4781 * state, there will be no interrupt to us. 4782 */ 4783 rval = aac_fib_enqueue(softs, AAC_ADAP_NORM_CMD_Q, 4784 slotp->fib_phyaddr, acp->fib_size); 4785 } 4786 4787 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) 4788 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 4789 4790 /* 4791 * NOTE: We send command only when slots availabe, so should never 4792 * reach here. 4793 */ 4794 if (rval != AACOK) { 4795 AACDB_PRINT(softs, CE_NOTE, "SCMD send failed"); 4796 if (acp->pkt) { 4797 acp->pkt->pkt_state &= ~STATE_SENT_CMD; 4798 aac_set_pkt_reason(softs, acp, CMD_INCOMPLETE, 0); 4799 } 4800 aac_end_io(softs, acp); 4801 if (!(acp->flags & (AAC_CMD_NO_INTR | AAC_CMD_NO_CB))) 4802 ddi_trigger_softintr(softs->softint_id); 4803 } 4804 } 4805 4806 static void 4807 aac_start_waitq(struct aac_softstate *softs, struct aac_cmd_queue *q) 4808 { 4809 struct aac_cmd *acp, *next_acp; 4810 4811 /* Serve as many waiting io's as possible */ 4812 for (acp = q->q_head; acp; acp = next_acp) { 4813 next_acp = acp->next; 4814 if (aac_bind_io(softs, acp) == AACOK) { 4815 aac_cmd_delete(q, acp); 4816 aac_start_io(softs, acp); 4817 } 4818 if (softs->free_io_slot_head == NULL) 4819 break; 4820 } 4821 } 4822 4823 static void 4824 aac_start_waiting_io(struct aac_softstate *softs) 4825 { 4826 /* 4827 * Sync FIB io is served before async FIB io so that io requests 4828 * sent by interactive userland commands get responded asap. 4829 */ 4830 if (softs->q_wait[AAC_CMDQ_SYNC].q_head) 4831 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_SYNC]); 4832 if (softs->q_wait[AAC_CMDQ_ASYNC].q_head) 4833 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_ASYNC]); 4834 } 4835 4836 static void 4837 aac_drain_comp_q(struct aac_softstate *softs) 4838 { 4839 struct aac_cmd *acp; 4840 struct scsi_pkt *pkt; 4841 4842 /*CONSTCOND*/ 4843 while (1) { 4844 mutex_enter(&softs->q_comp_mutex); 4845 acp = aac_cmd_dequeue(&softs->q_comp); 4846 mutex_exit(&softs->q_comp_mutex); 4847 if (acp != NULL) { 4848 ASSERT(acp->pkt != NULL); 4849 pkt = acp->pkt; 4850 4851 if (pkt->pkt_reason == CMD_CMPLT) { 4852 /* 4853 * Consistent packets need to be sync'ed first 4854 */ 4855 if ((acp->flags & AAC_CMD_CONSISTENT) && 4856 (acp->flags & AAC_CMD_BUF_READ)) { 4857 if (aac_dma_sync_ac(acp) != AACOK) { 4858 ddi_fm_service_impact( 4859 softs->devinfo_p, 4860 DDI_SERVICE_UNAFFECTED); 4861 pkt->pkt_reason = CMD_TRAN_ERR; 4862 pkt->pkt_statistics = 0; 4863 } 4864 } 4865 if ((aac_check_acc_handle(softs-> \ 4866 comm_space_acc_handle) != DDI_SUCCESS) || 4867 (aac_check_acc_handle(softs-> \ 4868 pci_mem_handle) != DDI_SUCCESS)) { 4869 ddi_fm_service_impact(softs->devinfo_p, 4870 DDI_SERVICE_UNAFFECTED); 4871 ddi_fm_acc_err_clear(softs-> \ 4872 pci_mem_handle, DDI_FME_VER0); 4873 pkt->pkt_reason = CMD_TRAN_ERR; 4874 pkt->pkt_statistics = 0; 4875 } 4876 if (aac_check_dma_handle(softs-> \ 4877 comm_space_dma_handle) != DDI_SUCCESS) { 4878 ddi_fm_service_impact(softs->devinfo_p, 4879 DDI_SERVICE_UNAFFECTED); 4880 pkt->pkt_reason = CMD_TRAN_ERR; 4881 pkt->pkt_statistics = 0; 4882 } 4883 } 4884 (*pkt->pkt_comp)(pkt); 4885 } else { 4886 break; 4887 } 4888 } 4889 } 4890 4891 static int 4892 aac_alloc_fib(struct aac_softstate *softs, struct aac_slot *slotp) 4893 { 4894 size_t rlen; 4895 ddi_dma_cookie_t cookie; 4896 uint_t cookien; 4897 4898 /* Allocate FIB dma resource */ 4899 if (ddi_dma_alloc_handle( 4900 softs->devinfo_p, 4901 &softs->addr_dma_attr, 4902 DDI_DMA_SLEEP, 4903 NULL, 4904 &slotp->fib_dma_handle) != DDI_SUCCESS) { 4905 AACDB_PRINT(softs, CE_WARN, 4906 "Cannot alloc dma handle for slot fib area"); 4907 goto error; 4908 } 4909 if (ddi_dma_mem_alloc( 4910 slotp->fib_dma_handle, 4911 softs->aac_max_fib_size, 4912 &aac_acc_attr, 4913 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 4914 DDI_DMA_SLEEP, 4915 NULL, 4916 (caddr_t *)&slotp->fibp, 4917 &rlen, 4918 &slotp->fib_acc_handle) != DDI_SUCCESS) { 4919 AACDB_PRINT(softs, CE_WARN, 4920 "Cannot alloc mem for slot fib area"); 4921 goto error; 4922 } 4923 if (ddi_dma_addr_bind_handle( 4924 slotp->fib_dma_handle, 4925 NULL, 4926 (caddr_t)slotp->fibp, 4927 softs->aac_max_fib_size, 4928 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 4929 DDI_DMA_SLEEP, 4930 NULL, 4931 &cookie, 4932 &cookien) != DDI_DMA_MAPPED) { 4933 AACDB_PRINT(softs, CE_WARN, 4934 "dma bind failed for slot fib area"); 4935 goto error; 4936 } 4937 4938 /* Check dma handles allocated in fib attach */ 4939 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) { 4940 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 4941 goto error; 4942 } 4943 4944 /* Check acc handles allocated in fib attach */ 4945 if (aac_check_acc_handle(slotp->fib_acc_handle) != DDI_SUCCESS) { 4946 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 4947 goto error; 4948 } 4949 4950 slotp->fib_phyaddr = cookie.dmac_laddress; 4951 return (AACOK); 4952 4953 error: 4954 if (slotp->fib_acc_handle) { 4955 ddi_dma_mem_free(&slotp->fib_acc_handle); 4956 slotp->fib_acc_handle = NULL; 4957 } 4958 if (slotp->fib_dma_handle) { 4959 ddi_dma_free_handle(&slotp->fib_dma_handle); 4960 slotp->fib_dma_handle = NULL; 4961 } 4962 return (AACERR); 4963 } 4964 4965 static void 4966 aac_free_fib(struct aac_slot *slotp) 4967 { 4968 (void) ddi_dma_unbind_handle(slotp->fib_dma_handle); 4969 ddi_dma_mem_free(&slotp->fib_acc_handle); 4970 slotp->fib_acc_handle = NULL; 4971 ddi_dma_free_handle(&slotp->fib_dma_handle); 4972 slotp->fib_dma_handle = NULL; 4973 slotp->fib_phyaddr = 0; 4974 } 4975 4976 static void 4977 aac_alloc_fibs(struct aac_softstate *softs) 4978 { 4979 int i; 4980 struct aac_slot *slotp; 4981 4982 for (i = 0; i < softs->total_slots && 4983 softs->total_fibs < softs->total_slots; i++) { 4984 slotp = &(softs->io_slot[i]); 4985 if (slotp->fib_phyaddr) 4986 continue; 4987 if (aac_alloc_fib(softs, slotp) != AACOK) 4988 break; 4989 4990 /* Insert the slot to the free slot list */ 4991 aac_release_slot(softs, slotp); 4992 softs->total_fibs++; 4993 } 4994 } 4995 4996 static void 4997 aac_destroy_fibs(struct aac_softstate *softs) 4998 { 4999 struct aac_slot *slotp; 5000 5001 while ((slotp = softs->free_io_slot_head) != NULL) { 5002 ASSERT(slotp->fib_phyaddr); 5003 softs->free_io_slot_head = slotp->next; 5004 aac_free_fib(slotp); 5005 ASSERT(slotp->index == (slotp - softs->io_slot)); 5006 softs->total_fibs--; 5007 } 5008 ASSERT(softs->total_fibs == 0); 5009 } 5010 5011 static int 5012 aac_create_slots(struct aac_softstate *softs) 5013 { 5014 int i; 5015 5016 softs->total_slots = softs->aac_max_fibs; 5017 softs->io_slot = kmem_zalloc(sizeof (struct aac_slot) * \ 5018 softs->total_slots, KM_SLEEP); 5019 if (softs->io_slot == NULL) { 5020 AACDB_PRINT(softs, CE_WARN, "Cannot allocate slot"); 5021 return (AACERR); 5022 } 5023 for (i = 0; i < softs->total_slots; i++) 5024 softs->io_slot[i].index = i; 5025 softs->free_io_slot_head = NULL; 5026 softs->total_fibs = 0; 5027 return (AACOK); 5028 } 5029 5030 static void 5031 aac_destroy_slots(struct aac_softstate *softs) 5032 { 5033 ASSERT(softs->free_io_slot_head == NULL); 5034 5035 kmem_free(softs->io_slot, sizeof (struct aac_slot) * \ 5036 softs->total_slots); 5037 softs->io_slot = NULL; 5038 softs->total_slots = 0; 5039 } 5040 5041 struct aac_slot * 5042 aac_get_slot(struct aac_softstate *softs) 5043 { 5044 struct aac_slot *slotp; 5045 5046 if ((slotp = softs->free_io_slot_head) != NULL) { 5047 softs->free_io_slot_head = slotp->next; 5048 slotp->next = NULL; 5049 } 5050 return (slotp); 5051 } 5052 5053 static void 5054 aac_release_slot(struct aac_softstate *softs, struct aac_slot *slotp) 5055 { 5056 ASSERT((slotp->index >= 0) && (slotp->index < softs->total_slots)); 5057 ASSERT(slotp == &softs->io_slot[slotp->index]); 5058 5059 slotp->acp = NULL; 5060 slotp->next = softs->free_io_slot_head; 5061 softs->free_io_slot_head = slotp; 5062 } 5063 5064 int 5065 aac_do_io(struct aac_softstate *softs, struct aac_cmd *acp) 5066 { 5067 if (aac_bind_io(softs, acp) == AACOK) 5068 aac_start_io(softs, acp); 5069 else 5070 aac_cmd_enqueue(&softs->q_wait[AAC_CMDQ(acp)], acp); 5071 5072 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) 5073 return (TRAN_ACCEPT); 5074 /* 5075 * Because sync FIB is always 512 bytes and used for critical 5076 * functions, async FIB is used for poll IO. 5077 */ 5078 if (acp->flags & AAC_CMD_NO_INTR) { 5079 if (aac_do_poll_io(softs, acp) == AACOK) 5080 return (TRAN_ACCEPT); 5081 } else { 5082 if (aac_do_sync_io(softs, acp) == AACOK) 5083 return (TRAN_ACCEPT); 5084 } 5085 return (TRAN_BADPKT); 5086 } 5087 5088 static int 5089 aac_do_poll_io(struct aac_softstate *softs, struct aac_cmd *acp) 5090 { 5091 int (*intr_handler)(struct aac_softstate *); 5092 5093 /* 5094 * Interrupt is disabled, we have to poll the adapter by ourselves. 5095 */ 5096 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ? 5097 aac_process_intr_new : aac_process_intr_old; 5098 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) { 5099 int i = AAC_POLL_TIME * 1000; 5100 5101 AAC_BUSYWAIT((intr_handler(softs) != AAC_DB_RESPONSE_READY), i); 5102 if (i == 0) 5103 aac_cmd_timeout(softs); 5104 } 5105 5106 ddi_trigger_softintr(softs->softint_id); 5107 5108 if ((acp->flags & AAC_CMD_CMPLT) && !(acp->flags & AAC_CMD_ERR)) 5109 return (AACOK); 5110 return (AACERR); 5111 } 5112 5113 static int 5114 aac_do_sync_io(struct aac_softstate *softs, struct aac_cmd *acp) 5115 { 5116 ASSERT(softs && acp); 5117 5118 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) 5119 cv_wait(&softs->event, &softs->io_lock); 5120 5121 if (acp->flags & AAC_CMD_CMPLT) 5122 return (AACOK); 5123 return (AACERR); 5124 } 5125 5126 static int 5127 aac_dma_sync_ac(struct aac_cmd *acp) 5128 { 5129 if (acp->buf_dma_handle) { 5130 if (acp->flags & AAC_CMD_BUF_WRITE) { 5131 if (acp->abp != NULL) 5132 ddi_rep_put8(acp->abh, 5133 (uint8_t *)acp->bp->b_un.b_addr, 5134 (uint8_t *)acp->abp, acp->bp->b_bcount, 5135 DDI_DEV_AUTOINCR); 5136 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0, 5137 DDI_DMA_SYNC_FORDEV); 5138 } else { 5139 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0, 5140 DDI_DMA_SYNC_FORCPU); 5141 if (aac_check_dma_handle(acp->buf_dma_handle) != 5142 DDI_SUCCESS) 5143 return (AACERR); 5144 if (acp->abp != NULL) 5145 ddi_rep_get8(acp->abh, 5146 (uint8_t *)acp->bp->b_un.b_addr, 5147 (uint8_t *)acp->abp, acp->bp->b_bcount, 5148 DDI_DEV_AUTOINCR); 5149 } 5150 } 5151 return (AACOK); 5152 } 5153 5154 /* 5155 * The following function comes from Adaptec: 5156 * 5157 * When driver sees a particular event that means containers are changed, it 5158 * will rescan containers. However a change may not be complete until some 5159 * other event is received. For example, creating or deleting an array will 5160 * incur as many as six AifEnConfigChange events which would generate six 5161 * container rescans. To diminish rescans, driver set a flag to wait for 5162 * another particular event. When sees that events come in, it will do rescan. 5163 */ 5164 static int 5165 aac_handle_aif(struct aac_softstate *softs, struct aac_fib *fibp) 5166 { 5167 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 5168 uint16_t fib_command; 5169 struct aac_aif_command *aif; 5170 int en_type; 5171 int devcfg_needed; 5172 int current, next; 5173 5174 fib_command = LE_16(fibp->Header.Command); 5175 if (fib_command != AifRequest) { 5176 cmn_err(CE_NOTE, "!Unknown command from controller: 0x%x", 5177 fib_command); 5178 return (AACERR); 5179 } 5180 5181 /* Update internal container state */ 5182 aif = (struct aac_aif_command *)&fibp->data[0]; 5183 5184 AACDB_PRINT_AIF(softs, aif); 5185 devcfg_needed = 0; 5186 en_type = LE_32((uint32_t)aif->data.EN.type); 5187 5188 switch (LE_32((uint32_t)aif->command)) { 5189 case AifCmdDriverNotify: { 5190 int cid = LE_32(aif->data.EN.data.ECC.container[0]); 5191 5192 switch (en_type) { 5193 case AifDenMorphComplete: 5194 case AifDenVolumeExtendComplete: 5195 if (softs->containers[cid].valid) 5196 softs->devcfg_wait_on = AifEnConfigChange; 5197 break; 5198 } 5199 if (softs->devcfg_wait_on == en_type) 5200 devcfg_needed = 1; 5201 break; 5202 } 5203 5204 case AifCmdEventNotify: 5205 switch (en_type) { 5206 case AifEnAddContainer: 5207 case AifEnDeleteContainer: 5208 softs->devcfg_wait_on = AifEnConfigChange; 5209 break; 5210 case AifEnContainerChange: 5211 if (!softs->devcfg_wait_on) 5212 softs->devcfg_wait_on = AifEnConfigChange; 5213 break; 5214 case AifEnContainerEvent: 5215 if (ddi_get32(acc, &aif-> \ 5216 data.EN.data.ECE.eventType) == CT_PUP_MISSING_DRIVE) 5217 devcfg_needed = 1; 5218 break; 5219 } 5220 if (softs->devcfg_wait_on == en_type) 5221 devcfg_needed = 1; 5222 break; 5223 5224 case AifCmdJobProgress: 5225 if (LE_32((uint32_t)aif->data.PR[0].jd.type) == AifJobCtrZero) { 5226 int pr_status; 5227 uint32_t pr_ftick, pr_ctick; 5228 5229 pr_status = LE_32((uint32_t)aif->data.PR[0].status); 5230 pr_ctick = LE_32(aif->data.PR[0].currentTick); 5231 pr_ftick = LE_32(aif->data.PR[0].finalTick); 5232 5233 if ((pr_ctick == pr_ftick) || 5234 (pr_status == AifJobStsSuccess)) 5235 softs->devcfg_wait_on = AifEnContainerChange; 5236 else if ((pr_ctick == 0) && 5237 (pr_status == AifJobStsRunning)) 5238 softs->devcfg_wait_on = AifEnContainerChange; 5239 } 5240 break; 5241 } 5242 5243 if (devcfg_needed) 5244 (void) aac_probe_containers(softs); 5245 5246 /* Modify AIF contexts */ 5247 current = softs->aifq_idx; 5248 next = (current + 1) % AAC_AIFQ_LENGTH; 5249 if (next == 0) { 5250 struct aac_fib_context *ctx; 5251 5252 softs->aifq_wrap = 1; 5253 for (ctx = softs->fibctx; ctx; ctx = ctx->next) { 5254 if (next == ctx->ctx_idx) { 5255 ctx->ctx_filled = 1; 5256 } else if (current == ctx->ctx_idx && ctx->ctx_filled) { 5257 ctx->ctx_idx = next; 5258 AACDB_PRINT(softs, CE_NOTE, 5259 "-- AIF queue(%x) overrun", ctx->unique); 5260 } 5261 } 5262 } 5263 softs->aifq_idx = next; 5264 5265 /* Wakeup applications */ 5266 cv_broadcast(&softs->aifv); 5267 return (AACOK); 5268 } 5269 5270 /* 5271 * Timeout recovery 5272 */ 5273 static void 5274 aac_cmd_timeout(struct aac_softstate *softs) 5275 { 5276 /* 5277 * Besides the firmware in unhealthy state, an overloaded 5278 * adapter may also incur pkt timeout. 5279 * There is a chance for an adapter with a slower IOP to take 5280 * longer than 60 seconds to process the commands, such as when 5281 * to perform IOs. So the adapter is doing a build on a RAID-5 5282 * while being required longer completion times should be 5283 * tolerated. 5284 */ 5285 if (aac_do_reset(softs) == AACOK) { 5286 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING, NULL, 5287 CMD_RESET); 5288 aac_start_waiting_io(softs); 5289 } else { 5290 /* Abort all waiting cmds when adapter is dead */ 5291 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, 5292 CMD_TIMEOUT); 5293 } 5294 } 5295 5296 /* 5297 * The following function comes from Adaptec: 5298 * 5299 * Time sync. command added to synchronize time with firmware every 30 5300 * minutes (required for correct AIF timestamps etc.) 5301 */ 5302 static int 5303 aac_sync_tick(struct aac_softstate *softs) 5304 { 5305 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 5306 struct aac_fib *fibp = softs->sync_slot.fibp; 5307 5308 ddi_put32(acc, (uint32_t *)&fibp->data[0], ddi_get_time()); 5309 return (aac_sync_fib(softs, SendHostTime, AAC_FIB_SIZEOF(uint32_t))); 5310 } 5311 5312 static void 5313 aac_daemon(void *arg) 5314 { 5315 struct aac_softstate *softs = (struct aac_softstate *)arg; 5316 struct aac_cmd *acp; 5317 5318 DBCALLED(softs, 2); 5319 5320 mutex_enter(&softs->io_lock); 5321 /* Check slot for timeout pkts */ 5322 aac_timebase += aac_tick; 5323 for (acp = softs->q_busy.q_head; acp; acp = acp->next) { 5324 if (acp->timeout) { 5325 if (acp->timeout <= aac_timebase) { 5326 aac_cmd_timeout(softs); 5327 ddi_trigger_softintr(softs->softint_id); 5328 } 5329 break; 5330 } 5331 } 5332 5333 /* Time sync. with firmware every AAC_SYNC_TICK */ 5334 if (aac_sync_time <= aac_timebase) { 5335 aac_sync_time = aac_timebase; 5336 if (aac_sync_tick(softs) != AACOK) 5337 aac_sync_time += aac_tick << 1; /* retry shortly */ 5338 else 5339 aac_sync_time += AAC_SYNC_TICK; 5340 } 5341 5342 if ((softs->state & AAC_STATE_RUN) && (softs->timeout_id != 0)) 5343 softs->timeout_id = timeout(aac_daemon, (void *)softs, 5344 (aac_tick * drv_usectohz(1000000))); 5345 mutex_exit(&softs->io_lock); 5346 } 5347 5348 /* 5349 * Architecture dependent functions 5350 */ 5351 static int 5352 aac_rx_get_fwstatus(struct aac_softstate *softs) 5353 { 5354 return (PCI_MEM_GET32(softs, AAC_OMR0)); 5355 } 5356 5357 static int 5358 aac_rx_get_mailbox(struct aac_softstate *softs, int mb) 5359 { 5360 return (PCI_MEM_GET32(softs, AAC_RX_MAILBOX + mb * 4)); 5361 } 5362 5363 static void 5364 aac_rx_set_mailbox(struct aac_softstate *softs, uint32_t cmd, 5365 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3) 5366 { 5367 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX, cmd); 5368 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 4, arg0); 5369 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 8, arg1); 5370 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 12, arg2); 5371 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 16, arg3); 5372 } 5373 5374 static int 5375 aac_rkt_get_fwstatus(struct aac_softstate *softs) 5376 { 5377 return (PCI_MEM_GET32(softs, AAC_OMR0)); 5378 } 5379 5380 static int 5381 aac_rkt_get_mailbox(struct aac_softstate *softs, int mb) 5382 { 5383 return (PCI_MEM_GET32(softs, AAC_RKT_MAILBOX + mb *4)); 5384 } 5385 5386 static void 5387 aac_rkt_set_mailbox(struct aac_softstate *softs, uint32_t cmd, 5388 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3) 5389 { 5390 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX, cmd); 5391 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 4, arg0); 5392 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 8, arg1); 5393 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 12, arg2); 5394 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 16, arg3); 5395 } 5396 5397 /* 5398 * cb_ops functions 5399 */ 5400 static int 5401 aac_open(dev_t *devp, int flag, int otyp, cred_t *cred) 5402 { 5403 struct aac_softstate *softs; 5404 int minor0, minor; 5405 int instance; 5406 5407 DBCALLED(NULL, 2); 5408 5409 if (otyp != OTYP_BLK && otyp != OTYP_CHR) 5410 return (EINVAL); 5411 5412 minor0 = getminor(*devp); 5413 minor = AAC_SCSA_MINOR(minor0); 5414 5415 if (AAC_IS_SCSA_NODE(minor)) 5416 return (scsi_hba_open(devp, flag, otyp, cred)); 5417 5418 instance = MINOR2INST(minor0); 5419 if (instance >= AAC_MAX_ADAPTERS) 5420 return (ENXIO); 5421 5422 softs = ddi_get_soft_state(aac_softstatep, instance); 5423 if (softs == NULL) 5424 return (ENXIO); 5425 5426 return (0); 5427 } 5428 5429 /*ARGSUSED*/ 5430 static int 5431 aac_close(dev_t dev, int flag, int otyp, cred_t *cred) 5432 { 5433 int minor0, minor; 5434 int instance; 5435 5436 DBCALLED(NULL, 2); 5437 5438 if (otyp != OTYP_BLK && otyp != OTYP_CHR) 5439 return (EINVAL); 5440 5441 minor0 = getminor(dev); 5442 minor = AAC_SCSA_MINOR(minor0); 5443 5444 if (AAC_IS_SCSA_NODE(minor)) 5445 return (scsi_hba_close(dev, flag, otyp, cred)); 5446 5447 instance = MINOR2INST(minor0); 5448 if (instance >= AAC_MAX_ADAPTERS) 5449 return (ENXIO); 5450 5451 return (0); 5452 } 5453 5454 static int 5455 aac_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, 5456 int *rval_p) 5457 { 5458 struct aac_softstate *softs; 5459 int minor0, minor; 5460 int instance; 5461 5462 DBCALLED(NULL, 2); 5463 5464 if (drv_priv(cred_p) != 0) 5465 return (EPERM); 5466 5467 minor0 = getminor(dev); 5468 minor = AAC_SCSA_MINOR(minor0); 5469 5470 if (AAC_IS_SCSA_NODE(minor)) 5471 return (scsi_hba_ioctl(dev, cmd, arg, flag, cred_p, rval_p)); 5472 5473 instance = MINOR2INST(minor0); 5474 if (instance < AAC_MAX_ADAPTERS) { 5475 softs = ddi_get_soft_state(aac_softstatep, instance); 5476 return (aac_do_ioctl(softs, dev, cmd, arg, flag)); 5477 } 5478 return (ENXIO); 5479 } 5480 5481 /* 5482 * The IO fault service error handling callback function 5483 */ 5484 /*ARGSUSED*/ 5485 static int 5486 aac_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 5487 { 5488 /* 5489 * as the driver can always deal with an error in any dma or 5490 * access handle, we can just return the fme_status value. 5491 */ 5492 pci_ereport_post(dip, err, NULL); 5493 return (err->fme_status); 5494 } 5495 5496 /* 5497 * aac_fm_init - initialize fma capabilities and register with IO 5498 * fault services. 5499 */ 5500 static void 5501 aac_fm_init(struct aac_softstate *softs) 5502 { 5503 /* 5504 * Need to change iblock to priority for new MSI intr 5505 */ 5506 ddi_iblock_cookie_t fm_ibc; 5507 5508 /* Only register with IO Fault Services if we have some capability */ 5509 if (softs->fm_capabilities) { 5510 /* Adjust access and dma attributes for FMA */ 5511 aac_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 5512 softs->buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 5513 softs->addr_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 5514 5515 /* 5516 * Register capabilities with IO Fault Services. 5517 * fm_capabilities will be updated to indicate 5518 * capabilities actually supported (not requested.) 5519 */ 5520 ddi_fm_init(softs->devinfo_p, &softs->fm_capabilities, &fm_ibc); 5521 5522 /* 5523 * Initialize pci ereport capabilities if ereport 5524 * capable (should always be.) 5525 */ 5526 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) || 5527 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 5528 pci_ereport_setup(softs->devinfo_p); 5529 } 5530 5531 /* 5532 * Register error callback if error callback capable. 5533 */ 5534 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 5535 ddi_fm_handler_register(softs->devinfo_p, 5536 aac_fm_error_cb, (void *) softs); 5537 } 5538 } else { 5539 /* Clear FMA if no capabilities */ 5540 aac_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 5541 softs->buf_dma_attr.dma_attr_flags = 0; 5542 softs->addr_dma_attr.dma_attr_flags = 0; 5543 } 5544 } 5545 5546 /* 5547 * aac_fm_fini - Releases fma capabilities and un-registers with IO 5548 * fault services. 5549 */ 5550 static void 5551 aac_fm_fini(struct aac_softstate *softs) 5552 { 5553 /* Only unregister FMA capabilities if registered */ 5554 if (softs->fm_capabilities) { 5555 /* 5556 * Un-register error callback if error callback capable. 5557 */ 5558 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 5559 ddi_fm_handler_unregister(softs->devinfo_p); 5560 } 5561 5562 /* 5563 * Release any resources allocated by pci_ereport_setup() 5564 */ 5565 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) || 5566 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 5567 pci_ereport_teardown(softs->devinfo_p); 5568 } 5569 5570 /* Unregister from IO Fault Services */ 5571 ddi_fm_fini(softs->devinfo_p); 5572 } 5573 } 5574 5575 int 5576 aac_check_acc_handle(ddi_acc_handle_t handle) 5577 { 5578 ddi_fm_error_t de; 5579 5580 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 5581 return (de.fme_status); 5582 } 5583 5584 int 5585 aac_check_dma_handle(ddi_dma_handle_t handle) 5586 { 5587 ddi_fm_error_t de; 5588 5589 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 5590 return (de.fme_status); 5591 } 5592 5593 void 5594 aac_fm_ereport(struct aac_softstate *softs, char *detail) 5595 { 5596 uint64_t ena; 5597 char buf[FM_MAX_CLASS]; 5598 5599 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 5600 ena = fm_ena_generate(0, FM_ENA_FMT1); 5601 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities)) { 5602 ddi_fm_ereport_post(softs->devinfo_p, buf, ena, DDI_NOSLEEP, 5603 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL); 5604 } 5605 } 5606 5607 #ifdef DEBUG 5608 5609 /* -------------------------debug aid functions-------------------------- */ 5610 5611 #define AAC_FIB_CMD_KEY_STRINGS \ 5612 TestCommandResponse, "TestCommandResponse", \ 5613 TestAdapterCommand, "TestAdapterCommand", \ 5614 LastTestCommand, "LastTestCommand", \ 5615 ReinitHostNormCommandQueue, "ReinitHostNormCommandQueue", \ 5616 ReinitHostHighCommandQueue, "ReinitHostHighCommandQueue", \ 5617 ReinitHostHighRespQueue, "ReinitHostHighRespQueue", \ 5618 ReinitHostNormRespQueue, "ReinitHostNormRespQueue", \ 5619 ReinitAdapNormCommandQueue, "ReinitAdapNormCommandQueue", \ 5620 ReinitAdapHighCommandQueue, "ReinitAdapHighCommandQueue", \ 5621 ReinitAdapHighRespQueue, "ReinitAdapHighRespQueue", \ 5622 ReinitAdapNormRespQueue, "ReinitAdapNormRespQueue", \ 5623 InterfaceShutdown, "InterfaceShutdown", \ 5624 DmaCommandFib, "DmaCommandFib", \ 5625 StartProfile, "StartProfile", \ 5626 TermProfile, "TermProfile", \ 5627 SpeedTest, "SpeedTest", \ 5628 TakeABreakPt, "TakeABreakPt", \ 5629 RequestPerfData, "RequestPerfData", \ 5630 SetInterruptDefTimer, "SetInterruptDefTimer", \ 5631 SetInterruptDefCount, "SetInterruptDefCount", \ 5632 GetInterruptDefStatus, "GetInterruptDefStatus", \ 5633 LastCommCommand, "LastCommCommand", \ 5634 NuFileSystem, "NuFileSystem", \ 5635 UFS, "UFS", \ 5636 HostFileSystem, "HostFileSystem", \ 5637 LastFileSystemCommand, "LastFileSystemCommand", \ 5638 ContainerCommand, "ContainerCommand", \ 5639 ContainerCommand64, "ContainerCommand64", \ 5640 ClusterCommand, "ClusterCommand", \ 5641 ScsiPortCommand, "ScsiPortCommand", \ 5642 ScsiPortCommandU64, "ScsiPortCommandU64", \ 5643 AifRequest, "AifRequest", \ 5644 CheckRevision, "CheckRevision", \ 5645 FsaHostShutdown, "FsaHostShutdown", \ 5646 RequestAdapterInfo, "RequestAdapterInfo", \ 5647 IsAdapterPaused, "IsAdapterPaused", \ 5648 SendHostTime, "SendHostTime", \ 5649 LastMiscCommand, "LastMiscCommand" 5650 5651 #define AAC_CTVM_SUBCMD_KEY_STRINGS \ 5652 VM_Null, "VM_Null", \ 5653 VM_NameServe, "VM_NameServe", \ 5654 VM_ContainerConfig, "VM_ContainerConfig", \ 5655 VM_Ioctl, "VM_Ioctl", \ 5656 VM_FilesystemIoctl, "VM_FilesystemIoctl", \ 5657 VM_CloseAll, "VM_CloseAll", \ 5658 VM_CtBlockRead, "VM_CtBlockRead", \ 5659 VM_CtBlockWrite, "VM_CtBlockWrite", \ 5660 VM_SliceBlockRead, "VM_SliceBlockRead", \ 5661 VM_SliceBlockWrite, "VM_SliceBlockWrite", \ 5662 VM_DriveBlockRead, "VM_DriveBlockRead", \ 5663 VM_DriveBlockWrite, "VM_DriveBlockWrite", \ 5664 VM_EnclosureMgt, "VM_EnclosureMgt", \ 5665 VM_Unused, "VM_Unused", \ 5666 VM_CtBlockVerify, "VM_CtBlockVerify", \ 5667 VM_CtPerf, "VM_CtPerf", \ 5668 VM_CtBlockRead64, "VM_CtBlockRead64", \ 5669 VM_CtBlockWrite64, "VM_CtBlockWrite64", \ 5670 VM_CtBlockVerify64, "VM_CtBlockVerify64", \ 5671 VM_CtHostRead64, "VM_CtHostRead64", \ 5672 VM_CtHostWrite64, "VM_CtHostWrite64", \ 5673 VM_NameServe64, "VM_NameServe64" 5674 5675 #define AAC_CT_SUBCMD_KEY_STRINGS \ 5676 CT_Null, "CT_Null", \ 5677 CT_GET_SLICE_COUNT, "CT_GET_SLICE_COUNT", \ 5678 CT_GET_PARTITION_COUNT, "CT_GET_PARTITION_COUNT", \ 5679 CT_GET_PARTITION_INFO, "CT_GET_PARTITION_INFO", \ 5680 CT_GET_CONTAINER_COUNT, "CT_GET_CONTAINER_COUNT", \ 5681 CT_GET_CONTAINER_INFO_OLD, "CT_GET_CONTAINER_INFO_OLD", \ 5682 CT_WRITE_MBR, "CT_WRITE_MBR", \ 5683 CT_WRITE_PARTITION, "CT_WRITE_PARTITION", \ 5684 CT_UPDATE_PARTITION, "CT_UPDATE_PARTITION", \ 5685 CT_UNLOAD_CONTAINER, "CT_UNLOAD_CONTAINER", \ 5686 CT_CONFIG_SINGLE_PRIMARY, "CT_CONFIG_SINGLE_PRIMARY", \ 5687 CT_READ_CONFIG_AGE, "CT_READ_CONFIG_AGE", \ 5688 CT_WRITE_CONFIG_AGE, "CT_WRITE_CONFIG_AGE", \ 5689 CT_READ_SERIAL_NUMBER, "CT_READ_SERIAL_NUMBER", \ 5690 CT_ZERO_PAR_ENTRY, "CT_ZERO_PAR_ENTRY", \ 5691 CT_READ_MBR, "CT_READ_MBR", \ 5692 CT_READ_PARTITION, "CT_READ_PARTITION", \ 5693 CT_DESTROY_CONTAINER, "CT_DESTROY_CONTAINER", \ 5694 CT_DESTROY2_CONTAINER, "CT_DESTROY2_CONTAINER", \ 5695 CT_SLICE_SIZE, "CT_SLICE_SIZE", \ 5696 CT_CHECK_CONFLICTS, "CT_CHECK_CONFLICTS", \ 5697 CT_MOVE_CONTAINER, "CT_MOVE_CONTAINER", \ 5698 CT_READ_LAST_DRIVE, "CT_READ_LAST_DRIVE", \ 5699 CT_WRITE_LAST_DRIVE, "CT_WRITE_LAST_DRIVE", \ 5700 CT_UNMIRROR, "CT_UNMIRROR", \ 5701 CT_MIRROR_DELAY, "CT_MIRROR_DELAY", \ 5702 CT_GEN_MIRROR, "CT_GEN_MIRROR", \ 5703 CT_GEN_MIRROR2, "CT_GEN_MIRROR2", \ 5704 CT_TEST_CONTAINER, "CT_TEST_CONTAINER", \ 5705 CT_MOVE2, "CT_MOVE2", \ 5706 CT_SPLIT, "CT_SPLIT", \ 5707 CT_SPLIT2, "CT_SPLIT2", \ 5708 CT_SPLIT_BROKEN, "CT_SPLIT_BROKEN", \ 5709 CT_SPLIT_BROKEN2, "CT_SPLIT_BROKEN2", \ 5710 CT_RECONFIG, "CT_RECONFIG", \ 5711 CT_BREAK2, "CT_BREAK2", \ 5712 CT_BREAK, "CT_BREAK", \ 5713 CT_MERGE2, "CT_MERGE2", \ 5714 CT_MERGE, "CT_MERGE", \ 5715 CT_FORCE_ERROR, "CT_FORCE_ERROR", \ 5716 CT_CLEAR_ERROR, "CT_CLEAR_ERROR", \ 5717 CT_ASSIGN_FAILOVER, "CT_ASSIGN_FAILOVER", \ 5718 CT_CLEAR_FAILOVER, "CT_CLEAR_FAILOVER", \ 5719 CT_GET_FAILOVER_DATA, "CT_GET_FAILOVER_DATA", \ 5720 CT_VOLUME_ADD, "CT_VOLUME_ADD", \ 5721 CT_VOLUME_ADD2, "CT_VOLUME_ADD2", \ 5722 CT_MIRROR_STATUS, "CT_MIRROR_STATUS", \ 5723 CT_COPY_STATUS, "CT_COPY_STATUS", \ 5724 CT_COPY, "CT_COPY", \ 5725 CT_UNLOCK_CONTAINER, "CT_UNLOCK_CONTAINER", \ 5726 CT_LOCK_CONTAINER, "CT_LOCK_CONTAINER", \ 5727 CT_MAKE_READ_ONLY, "CT_MAKE_READ_ONLY", \ 5728 CT_MAKE_READ_WRITE, "CT_MAKE_READ_WRITE", \ 5729 CT_CLEAN_DEAD, "CT_CLEAN_DEAD", \ 5730 CT_ABORT_MIRROR_COMMAND, "CT_ABORT_MIRROR_COMMAND", \ 5731 CT_SET, "CT_SET", \ 5732 CT_GET, "CT_GET", \ 5733 CT_GET_NVLOG_ENTRY, "CT_GET_NVLOG_ENTRY", \ 5734 CT_GET_DELAY, "CT_GET_DELAY", \ 5735 CT_ZERO_CONTAINER_SPACE, "CT_ZERO_CONTAINER_SPACE", \ 5736 CT_GET_ZERO_STATUS, "CT_GET_ZERO_STATUS", \ 5737 CT_SCRUB, "CT_SCRUB", \ 5738 CT_GET_SCRUB_STATUS, "CT_GET_SCRUB_STATUS", \ 5739 CT_GET_SLICE_INFO, "CT_GET_SLICE_INFO", \ 5740 CT_GET_SCSI_METHOD, "CT_GET_SCSI_METHOD", \ 5741 CT_PAUSE_IO, "CT_PAUSE_IO", \ 5742 CT_RELEASE_IO, "CT_RELEASE_IO", \ 5743 CT_SCRUB2, "CT_SCRUB2", \ 5744 CT_MCHECK, "CT_MCHECK", \ 5745 CT_CORRUPT, "CT_CORRUPT", \ 5746 CT_GET_TASK_COUNT, "CT_GET_TASK_COUNT", \ 5747 CT_PROMOTE, "CT_PROMOTE", \ 5748 CT_SET_DEAD, "CT_SET_DEAD", \ 5749 CT_CONTAINER_OPTIONS, "CT_CONTAINER_OPTIONS", \ 5750 CT_GET_NV_PARAM, "CT_GET_NV_PARAM", \ 5751 CT_GET_PARAM, "CT_GET_PARAM", \ 5752 CT_NV_PARAM_SIZE, "CT_NV_PARAM_SIZE", \ 5753 CT_COMMON_PARAM_SIZE, "CT_COMMON_PARAM_SIZE", \ 5754 CT_PLATFORM_PARAM_SIZE, "CT_PLATFORM_PARAM_SIZE", \ 5755 CT_SET_NV_PARAM, "CT_SET_NV_PARAM", \ 5756 CT_ABORT_SCRUB, "CT_ABORT_SCRUB", \ 5757 CT_GET_SCRUB_ERROR, "CT_GET_SCRUB_ERROR", \ 5758 CT_LABEL_CONTAINER, "CT_LABEL_CONTAINER", \ 5759 CT_CONTINUE_DATA, "CT_CONTINUE_DATA", \ 5760 CT_STOP_DATA, "CT_STOP_DATA", \ 5761 CT_GET_PARTITION_TABLE, "CT_GET_PARTITION_TABLE", \ 5762 CT_GET_DISK_PARTITIONS, "CT_GET_DISK_PARTITIONS", \ 5763 CT_GET_MISC_STATUS, "CT_GET_MISC_STATUS", \ 5764 CT_GET_CONTAINER_PERF_INFO, "CT_GET_CONTAINER_PERF_INFO", \ 5765 CT_GET_TIME, "CT_GET_TIME", \ 5766 CT_READ_DATA, "CT_READ_DATA", \ 5767 CT_CTR, "CT_CTR", \ 5768 CT_CTL, "CT_CTL", \ 5769 CT_DRAINIO, "CT_DRAINIO", \ 5770 CT_RELEASEIO, "CT_RELEASEIO", \ 5771 CT_GET_NVRAM, "CT_GET_NVRAM", \ 5772 CT_GET_MEMORY, "CT_GET_MEMORY", \ 5773 CT_PRINT_CT_LOG, "CT_PRINT_CT_LOG", \ 5774 CT_ADD_LEVEL, "CT_ADD_LEVEL", \ 5775 CT_NV_ZERO, "CT_NV_ZERO", \ 5776 CT_READ_SIGNATURE, "CT_READ_SIGNATURE", \ 5777 CT_THROTTLE_ON, "CT_THROTTLE_ON", \ 5778 CT_THROTTLE_OFF, "CT_THROTTLE_OFF", \ 5779 CT_GET_THROTTLE_STATS, "CT_GET_THROTTLE_STATS", \ 5780 CT_MAKE_SNAPSHOT, "CT_MAKE_SNAPSHOT", \ 5781 CT_REMOVE_SNAPSHOT, "CT_REMOVE_SNAPSHOT", \ 5782 CT_WRITE_USER_FLAGS, "CT_WRITE_USER_FLAGS", \ 5783 CT_READ_USER_FLAGS, "CT_READ_USER_FLAGS", \ 5784 CT_MONITOR, "CT_MONITOR", \ 5785 CT_GEN_MORPH, "CT_GEN_MORPH", \ 5786 CT_GET_SNAPSHOT_INFO, "CT_GET_SNAPSHOT_INFO", \ 5787 CT_CACHE_SET, "CT_CACHE_SET", \ 5788 CT_CACHE_STAT, "CT_CACHE_STAT", \ 5789 CT_TRACE_START, "CT_TRACE_START", \ 5790 CT_TRACE_STOP, "CT_TRACE_STOP", \ 5791 CT_TRACE_ENABLE, "CT_TRACE_ENABLE", \ 5792 CT_TRACE_DISABLE, "CT_TRACE_DISABLE", \ 5793 CT_FORCE_CORE_DUMP, "CT_FORCE_CORE_DUMP", \ 5794 CT_SET_SERIAL_NUMBER, "CT_SET_SERIAL_NUMBER", \ 5795 CT_RESET_SERIAL_NUMBER, "CT_RESET_SERIAL_NUMBER", \ 5796 CT_ENABLE_RAID5, "CT_ENABLE_RAID5", \ 5797 CT_CLEAR_VALID_DUMP_FLAG, "CT_CLEAR_VALID_DUMP_FLAG", \ 5798 CT_GET_MEM_STATS, "CT_GET_MEM_STATS", \ 5799 CT_GET_CORE_SIZE, "CT_GET_CORE_SIZE", \ 5800 CT_CREATE_CONTAINER_OLD, "CT_CREATE_CONTAINER_OLD", \ 5801 CT_STOP_DUMPS, "CT_STOP_DUMPS", \ 5802 CT_PANIC_ON_TAKE_A_BREAK, "CT_PANIC_ON_TAKE_A_BREAK", \ 5803 CT_GET_CACHE_STATS, "CT_GET_CACHE_STATS", \ 5804 CT_MOVE_PARTITION, "CT_MOVE_PARTITION", \ 5805 CT_FLUSH_CACHE, "CT_FLUSH_CACHE", \ 5806 CT_READ_NAME, "CT_READ_NAME", \ 5807 CT_WRITE_NAME, "CT_WRITE_NAME", \ 5808 CT_TOSS_CACHE, "CT_TOSS_CACHE", \ 5809 CT_LOCK_DRAINIO, "CT_LOCK_DRAINIO", \ 5810 CT_CONTAINER_OFFLINE, "CT_CONTAINER_OFFLINE", \ 5811 CT_SET_CACHE_SIZE, "CT_SET_CACHE_SIZE", \ 5812 CT_CLEAN_SHUTDOWN_STATUS, "CT_CLEAN_SHUTDOWN_STATUS", \ 5813 CT_CLEAR_DISKLOG_ON_DISK, "CT_CLEAR_DISKLOG_ON_DISK", \ 5814 CT_CLEAR_ALL_DISKLOG, "CT_CLEAR_ALL_DISKLOG", \ 5815 CT_CACHE_FAVOR, "CT_CACHE_FAVOR", \ 5816 CT_READ_PASSTHRU_MBR, "CT_READ_PASSTHRU_MBR", \ 5817 CT_SCRUB_NOFIX, "CT_SCRUB_NOFIX", \ 5818 CT_SCRUB2_NOFIX, "CT_SCRUB2_NOFIX", \ 5819 CT_FLUSH, "CT_FLUSH", \ 5820 CT_REBUILD, "CT_REBUILD", \ 5821 CT_FLUSH_CONTAINER, "CT_FLUSH_CONTAINER", \ 5822 CT_RESTART, "CT_RESTART", \ 5823 CT_GET_CONFIG_STATUS, "CT_GET_CONFIG_STATUS", \ 5824 CT_TRACE_FLAG, "CT_TRACE_FLAG", \ 5825 CT_RESTART_MORPH, "CT_RESTART_MORPH", \ 5826 CT_GET_TRACE_INFO, "CT_GET_TRACE_INFO", \ 5827 CT_GET_TRACE_ITEM, "CT_GET_TRACE_ITEM", \ 5828 CT_COMMIT_CONFIG, "CT_COMMIT_CONFIG", \ 5829 CT_CONTAINER_EXISTS, "CT_CONTAINER_EXISTS", \ 5830 CT_GET_SLICE_FROM_DEVT, "CT_GET_SLICE_FROM_DEVT", \ 5831 CT_OPEN_READ_WRITE, "CT_OPEN_READ_WRITE", \ 5832 CT_WRITE_MEMORY_BLOCK, "CT_WRITE_MEMORY_BLOCK", \ 5833 CT_GET_CACHE_PARAMS, "CT_GET_CACHE_PARAMS", \ 5834 CT_CRAZY_CACHE, "CT_CRAZY_CACHE", \ 5835 CT_GET_PROFILE_STRUCT, "CT_GET_PROFILE_STRUCT", \ 5836 CT_SET_IO_TRACE_FLAG, "CT_SET_IO_TRACE_FLAG", \ 5837 CT_GET_IO_TRACE_STRUCT, "CT_GET_IO_TRACE_STRUCT", \ 5838 CT_CID_TO_64BITS_UID, "CT_CID_TO_64BITS_UID", \ 5839 CT_64BITS_UID_TO_CID, "CT_64BITS_UID_TO_CID", \ 5840 CT_PAR_TO_64BITS_UID, "CT_PAR_TO_64BITS_UID", \ 5841 CT_CID_TO_32BITS_UID, "CT_CID_TO_32BITS_UID", \ 5842 CT_32BITS_UID_TO_CID, "CT_32BITS_UID_TO_CID", \ 5843 CT_PAR_TO_32BITS_UID, "CT_PAR_TO_32BITS_UID", \ 5844 CT_SET_FAILOVER_OPTION, "CT_SET_FAILOVER_OPTION", \ 5845 CT_GET_FAILOVER_OPTION, "CT_GET_FAILOVER_OPTION", \ 5846 CT_STRIPE_ADD2, "CT_STRIPE_ADD2", \ 5847 CT_CREATE_VOLUME_SET, "CT_CREATE_VOLUME_SET", \ 5848 CT_CREATE_STRIPE_SET, "CT_CREATE_STRIPE_SET", \ 5849 CT_VERIFY_CONTAINER, "CT_VERIFY_CONTAINER", \ 5850 CT_IS_CONTAINER_DEAD, "CT_IS_CONTAINER_DEAD", \ 5851 CT_GET_CONTAINER_OPTION, "CT_GET_CONTAINER_OPTION", \ 5852 CT_GET_SNAPSHOT_UNUSED_STRUCT, "CT_GET_SNAPSHOT_UNUSED_STRUCT", \ 5853 CT_CLEAR_SNAPSHOT_UNUSED_STRUCT, "CT_CLEAR_SNAPSHOT_UNUSED_STRUCT", \ 5854 CT_GET_CONTAINER_INFO, "CT_GET_CONTAINER_INFO", \ 5855 CT_CREATE_CONTAINER, "CT_CREATE_CONTAINER", \ 5856 CT_CHANGE_CREATIONINFO, "CT_CHANGE_CREATIONINFO", \ 5857 CT_CHECK_CONFLICT_UID, "CT_CHECK_CONFLICT_UID", \ 5858 CT_CONTAINER_UID_CHECK, "CT_CONTAINER_UID_CHECK", \ 5859 CT_IS_CONTAINER_MEATADATA_STANDARD, \ 5860 "CT_IS_CONTAINER_MEATADATA_STANDARD", \ 5861 CT_IS_SLICE_METADATA_STANDARD, "CT_IS_SLICE_METADATA_STANDARD", \ 5862 CT_GET_IMPORT_COUNT, "CT_GET_IMPORT_COUNT", \ 5863 CT_CANCEL_ALL_IMPORTS, "CT_CANCEL_ALL_IMPORTS", \ 5864 CT_GET_IMPORT_INFO, "CT_GET_IMPORT_INFO", \ 5865 CT_IMPORT_ARRAY, "CT_IMPORT_ARRAY", \ 5866 CT_GET_LOG_SIZE, "CT_GET_LOG_SIZE", \ 5867 CT_ALARM_GET_STATE, "CT_ALARM_GET_STATE", \ 5868 CT_ALARM_SET_STATE, "CT_ALARM_SET_STATE", \ 5869 CT_ALARM_ON_OFF, "CT_ALARM_ON_OFF", \ 5870 CT_GET_EE_OEM_ID, "CT_GET_EE_OEM_ID", \ 5871 CT_GET_PPI_HEADERS, "CT_GET_PPI_HEADERS", \ 5872 CT_GET_PPI_DATA, "CT_GET_PPI_DATA", \ 5873 CT_GET_PPI_ENTRIES, "CT_GET_PPI_ENTRIES", \ 5874 CT_DELETE_PPI_BUNDLE, "CT_DELETE_PPI_BUNDLE", \ 5875 CT_GET_PARTITION_TABLE_2, "CT_GET_PARTITION_TABLE_2", \ 5876 CT_GET_PARTITION_INFO_2, "CT_GET_PARTITION_INFO_2", \ 5877 CT_GET_DISK_PARTITIONS_2, "CT_GET_DISK_PARTITIONS_2", \ 5878 CT_QUIESCE_ADAPTER, "CT_QUIESCE_ADAPTER", \ 5879 CT_CLEAR_PPI_TABLE, "CT_CLEAR_PPI_TABLE" 5880 5881 #define AAC_CL_SUBCMD_KEY_STRINGS \ 5882 CL_NULL, "CL_NULL", \ 5883 DS_INIT, "DS_INIT", \ 5884 DS_RESCAN, "DS_RESCAN", \ 5885 DS_CREATE, "DS_CREATE", \ 5886 DS_DELETE, "DS_DELETE", \ 5887 DS_ADD_DISK, "DS_ADD_DISK", \ 5888 DS_REMOVE_DISK, "DS_REMOVE_DISK", \ 5889 DS_MOVE_DISK, "DS_MOVE_DISK", \ 5890 DS_TAKE_OWNERSHIP, "DS_TAKE_OWNERSHIP", \ 5891 DS_RELEASE_OWNERSHIP, "DS_RELEASE_OWNERSHIP", \ 5892 DS_FORCE_OWNERSHIP, "DS_FORCE_OWNERSHIP", \ 5893 DS_GET_DISK_SET_PARAM, "DS_GET_DISK_SET_PARAM", \ 5894 DS_GET_DRIVE_PARAM, "DS_GET_DRIVE_PARAM", \ 5895 DS_GET_SLICE_PARAM, "DS_GET_SLICE_PARAM", \ 5896 DS_GET_DISK_SETS, "DS_GET_DISK_SETS", \ 5897 DS_GET_DRIVES, "DS_GET_DRIVES", \ 5898 DS_SET_DISK_SET_PARAM, "DS_SET_DISK_SET_PARAM", \ 5899 DS_ONLINE, "DS_ONLINE", \ 5900 DS_OFFLINE, "DS_OFFLINE", \ 5901 DS_ONLINE_CONTAINERS, "DS_ONLINE_CONTAINERS", \ 5902 DS_FSAPRINT, "DS_FSAPRINT", \ 5903 CL_CFG_SET_HOST_IDS, "CL_CFG_SET_HOST_IDS", \ 5904 CL_CFG_SET_PARTNER_HOST_IDS, "CL_CFG_SET_PARTNER_HOST_IDS", \ 5905 CL_CFG_GET_CLUSTER_CONFIG, "CL_CFG_GET_CLUSTER_CONFIG", \ 5906 CC_CLI_CLEAR_MESSAGE_BUFFER, "CC_CLI_CLEAR_MESSAGE_BUFFER", \ 5907 CC_SRV_CLEAR_MESSAGE_BUFFER, "CC_SRV_CLEAR_MESSAGE_BUFFER", \ 5908 CC_CLI_SHOW_MESSAGE_BUFFER, "CC_CLI_SHOW_MESSAGE_BUFFER", \ 5909 CC_SRV_SHOW_MESSAGE_BUFFER, "CC_SRV_SHOW_MESSAGE_BUFFER", \ 5910 CC_CLI_SEND_MESSAGE, "CC_CLI_SEND_MESSAGE", \ 5911 CC_SRV_SEND_MESSAGE, "CC_SRV_SEND_MESSAGE", \ 5912 CC_CLI_GET_MESSAGE, "CC_CLI_GET_MESSAGE", \ 5913 CC_SRV_GET_MESSAGE, "CC_SRV_GET_MESSAGE", \ 5914 CC_SEND_TEST_MESSAGE, "CC_SEND_TEST_MESSAGE", \ 5915 CC_GET_BUSINFO, "CC_GET_BUSINFO", \ 5916 CC_GET_PORTINFO, "CC_GET_PORTINFO", \ 5917 CC_GET_NAMEINFO, "CC_GET_NAMEINFO", \ 5918 CC_GET_CONFIGINFO, "CC_GET_CONFIGINFO", \ 5919 CQ_QUORUM_OP, "CQ_QUORUM_OP" 5920 5921 #define AAC_AIF_SUBCMD_KEY_STRINGS \ 5922 AifCmdEventNotify, "AifCmdEventNotify", \ 5923 AifCmdJobProgress, "AifCmdJobProgress", \ 5924 AifCmdAPIReport, "AifCmdAPIReport", \ 5925 AifCmdDriverNotify, "AifCmdDriverNotify", \ 5926 AifReqJobList, "AifReqJobList", \ 5927 AifReqJobsForCtr, "AifReqJobsForCtr", \ 5928 AifReqJobsForScsi, "AifReqJobsForScsi", \ 5929 AifReqJobReport, "AifReqJobReport", \ 5930 AifReqTerminateJob, "AifReqTerminateJob", \ 5931 AifReqSuspendJob, "AifReqSuspendJob", \ 5932 AifReqResumeJob, "AifReqResumeJob", \ 5933 AifReqSendAPIReport, "AifReqSendAPIReport", \ 5934 AifReqAPIJobStart, "AifReqAPIJobStart", \ 5935 AifReqAPIJobUpdate, "AifReqAPIJobUpdate", \ 5936 AifReqAPIJobFinish, "AifReqAPIJobFinish" 5937 5938 #define AAC_IOCTL_SUBCMD_KEY_STRINGS \ 5939 Reserved_IOCTL, "Reserved_IOCTL", \ 5940 GetDeviceHandle, "GetDeviceHandle", \ 5941 BusTargetLun_to_DeviceHandle, "BusTargetLun_to_DeviceHandle", \ 5942 DeviceHandle_to_BusTargetLun, "DeviceHandle_to_BusTargetLun", \ 5943 RescanBus, "RescanBus", \ 5944 GetDeviceProbeInfo, "GetDeviceProbeInfo", \ 5945 GetDeviceCapacity, "GetDeviceCapacity", \ 5946 GetContainerProbeInfo, "GetContainerProbeInfo", \ 5947 GetRequestedMemorySize, "GetRequestedMemorySize", \ 5948 GetBusInfo, "GetBusInfo", \ 5949 GetVendorSpecific, "GetVendorSpecific", \ 5950 EnhancedGetDeviceProbeInfo, "EnhancedGetDeviceProbeInfo", \ 5951 EnhancedGetBusInfo, "EnhancedGetBusInfo", \ 5952 SetupExtendedCounters, "SetupExtendedCounters", \ 5953 GetPerformanceCounters, "GetPerformanceCounters", \ 5954 ResetPerformanceCounters, "ResetPerformanceCounters", \ 5955 ReadModePage, "ReadModePage", \ 5956 WriteModePage, "WriteModePage", \ 5957 ReadDriveParameter, "ReadDriveParameter", \ 5958 WriteDriveParameter, "WriteDriveParameter", \ 5959 ResetAdapter, "ResetAdapter", \ 5960 ResetBus, "ResetBus", \ 5961 ResetBusDevice, "ResetBusDevice", \ 5962 ExecuteSrb, "ExecuteSrb", \ 5963 Create_IO_Task, "Create_IO_Task", \ 5964 Delete_IO_Task, "Delete_IO_Task", \ 5965 Get_IO_Task_Info, "Get_IO_Task_Info", \ 5966 Check_Task_Progress, "Check_Task_Progress", \ 5967 InjectError, "InjectError", \ 5968 GetDeviceDefectCounts, "GetDeviceDefectCounts", \ 5969 GetDeviceDefectInfo, "GetDeviceDefectInfo", \ 5970 GetDeviceStatus, "GetDeviceStatus", \ 5971 ClearDeviceStatus, "ClearDeviceStatus", \ 5972 DiskSpinControl, "DiskSpinControl", \ 5973 DiskSmartControl, "DiskSmartControl", \ 5974 WriteSame, "WriteSame", \ 5975 ReadWriteLong, "ReadWriteLong", \ 5976 FormatUnit, "FormatUnit", \ 5977 TargetDeviceControl, "TargetDeviceControl", \ 5978 TargetChannelControl, "TargetChannelControl", \ 5979 FlashNewCode, "FlashNewCode", \ 5980 DiskCheck, "DiskCheck", \ 5981 RequestSense, "RequestSense", \ 5982 DiskPERControl, "DiskPERControl", \ 5983 Read10, "Read10", \ 5984 Write10, "Write10" 5985 5986 #define AAC_AIFEN_KEY_STRINGS \ 5987 AifEnGeneric, "Generic", \ 5988 AifEnTaskComplete, "TaskComplete", \ 5989 AifEnConfigChange, "Config change", \ 5990 AifEnContainerChange, "Container change", \ 5991 AifEnDeviceFailure, "device failed", \ 5992 AifEnMirrorFailover, "Mirror failover", \ 5993 AifEnContainerEvent, "container event", \ 5994 AifEnFileSystemChange, "File system changed", \ 5995 AifEnConfigPause, "Container pause event", \ 5996 AifEnConfigResume, "Container resume event", \ 5997 AifEnFailoverChange, "Failover space assignment changed", \ 5998 AifEnRAID5RebuildDone, "RAID5 rebuild finished", \ 5999 AifEnEnclosureManagement, "Enclosure management event", \ 6000 AifEnBatteryEvent, "battery event", \ 6001 AifEnAddContainer, "Add container", \ 6002 AifEnDeleteContainer, "Delete container", \ 6003 AifEnSMARTEvent, "SMART Event", \ 6004 AifEnBatteryNeedsRecond, "battery needs reconditioning", \ 6005 AifEnClusterEvent, "cluster event", \ 6006 AifEnDiskSetEvent, "disk set event occured", \ 6007 AifDenMorphComplete, "morph operation completed", \ 6008 AifDenVolumeExtendComplete, "VolumeExtendComplete" 6009 6010 struct aac_key_strings { 6011 int key; 6012 char *message; 6013 }; 6014 6015 extern struct scsi_key_strings scsi_cmds[]; 6016 6017 static struct aac_key_strings aac_fib_cmds[] = { 6018 AAC_FIB_CMD_KEY_STRINGS, 6019 -1, NULL 6020 }; 6021 6022 static struct aac_key_strings aac_ctvm_subcmds[] = { 6023 AAC_CTVM_SUBCMD_KEY_STRINGS, 6024 -1, NULL 6025 }; 6026 6027 static struct aac_key_strings aac_ct_subcmds[] = { 6028 AAC_CT_SUBCMD_KEY_STRINGS, 6029 -1, NULL 6030 }; 6031 6032 static struct aac_key_strings aac_cl_subcmds[] = { 6033 AAC_CL_SUBCMD_KEY_STRINGS, 6034 -1, NULL 6035 }; 6036 6037 static struct aac_key_strings aac_aif_subcmds[] = { 6038 AAC_AIF_SUBCMD_KEY_STRINGS, 6039 -1, NULL 6040 }; 6041 6042 static struct aac_key_strings aac_ioctl_subcmds[] = { 6043 AAC_IOCTL_SUBCMD_KEY_STRINGS, 6044 -1, NULL 6045 }; 6046 6047 static struct aac_key_strings aac_aifens[] = { 6048 AAC_AIFEN_KEY_STRINGS, 6049 -1, NULL 6050 }; 6051 6052 /* 6053 * The following function comes from Adaptec: 6054 * 6055 * Get the firmware print buffer parameters from the firmware, 6056 * if the command was successful map in the address. 6057 */ 6058 static int 6059 aac_get_fw_debug_buffer(struct aac_softstate *softs) 6060 { 6061 if (aac_sync_mbcommand(softs, AAC_MONKER_GETDRVPROP, 6062 0, 0, 0, 0, NULL) == AACOK) { 6063 uint32_t mondrv_buf_paddrl = AAC_MAILBOX_GET(softs, 1); 6064 uint32_t mondrv_buf_paddrh = AAC_MAILBOX_GET(softs, 2); 6065 uint32_t mondrv_buf_size = AAC_MAILBOX_GET(softs, 3); 6066 uint32_t mondrv_hdr_size = AAC_MAILBOX_GET(softs, 4); 6067 6068 if (mondrv_buf_size) { 6069 uint32_t offset = mondrv_buf_paddrl - \ 6070 softs->pci_mem_base_paddr; 6071 6072 /* 6073 * See if the address is already mapped in, and 6074 * if so set it up from the base address 6075 */ 6076 if ((mondrv_buf_paddrh == 0) && 6077 (offset + mondrv_buf_size < softs->map_size)) { 6078 mutex_enter(&aac_prt_mutex); 6079 softs->debug_buf_offset = offset; 6080 softs->debug_header_size = mondrv_hdr_size; 6081 softs->debug_buf_size = mondrv_buf_size; 6082 softs->debug_fw_flags = 0; 6083 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT; 6084 mutex_exit(&aac_prt_mutex); 6085 6086 return (AACOK); 6087 } 6088 } 6089 } 6090 return (AACERR); 6091 } 6092 6093 int 6094 aac_dbflag_on(struct aac_softstate *softs, int flag) 6095 { 6096 int debug_flags = softs ? softs->debug_flags : aac_debug_flags; 6097 6098 return ((debug_flags & (AACDB_FLAGS_FW_PRINT | \ 6099 AACDB_FLAGS_KERNEL_PRINT)) && (debug_flags & flag)); 6100 } 6101 6102 static void 6103 aac_cmn_err(struct aac_softstate *softs, uint_t lev, char sl, int noheader) 6104 { 6105 if (noheader) { 6106 if (sl) { 6107 aac_fmt[0] = sl; 6108 cmn_err(lev, aac_fmt, aac_prt_buf); 6109 } else { 6110 cmn_err(lev, &aac_fmt[1], aac_prt_buf); 6111 } 6112 } else { 6113 if (sl) { 6114 aac_fmt_header[0] = sl; 6115 cmn_err(lev, aac_fmt_header, 6116 softs->vendor_name, softs->instance, 6117 aac_prt_buf); 6118 } else { 6119 cmn_err(lev, &aac_fmt_header[1], 6120 softs->vendor_name, softs->instance, 6121 aac_prt_buf); 6122 } 6123 } 6124 } 6125 6126 /* 6127 * The following function comes from Adaptec: 6128 * 6129 * Format and print out the data passed in to UART or console 6130 * as specified by debug flags. 6131 */ 6132 void 6133 aac_printf(struct aac_softstate *softs, uint_t lev, const char *fmt, ...) 6134 { 6135 va_list args; 6136 char sl; /* system log character */ 6137 6138 mutex_enter(&aac_prt_mutex); 6139 /* Set up parameters and call sprintf function to format the data */ 6140 if (strchr("^!?", fmt[0]) == NULL) { 6141 sl = 0; 6142 } else { 6143 sl = fmt[0]; 6144 fmt++; 6145 } 6146 va_start(args, fmt); 6147 (void) vsprintf(aac_prt_buf, fmt, args); 6148 va_end(args); 6149 6150 /* Make sure the softs structure has been passed in for this section */ 6151 if (softs) { 6152 if ((softs->debug_flags & AACDB_FLAGS_FW_PRINT) && 6153 /* If we are set up for a Firmware print */ 6154 (softs->debug_buf_size)) { 6155 uint32_t count, i; 6156 6157 /* Make sure the string size is within boundaries */ 6158 count = strlen(aac_prt_buf); 6159 if (count > softs->debug_buf_size) 6160 count = (uint16_t)softs->debug_buf_size; 6161 6162 /* 6163 * Wait for no more than AAC_PRINT_TIMEOUT for the 6164 * previous message length to clear (the handshake). 6165 */ 6166 for (i = 0; i < AAC_PRINT_TIMEOUT; i++) { 6167 if (!PCI_MEM_GET32(softs, 6168 softs->debug_buf_offset + \ 6169 AAC_FW_DBG_STRLEN_OFFSET)) 6170 break; 6171 6172 drv_usecwait(1000); 6173 } 6174 6175 /* 6176 * If the length is clear, copy over the message, the 6177 * flags, and the length. Make sure the length is the 6178 * last because that is the signal for the Firmware to 6179 * pick it up. 6180 */ 6181 if (!PCI_MEM_GET32(softs, softs->debug_buf_offset + \ 6182 AAC_FW_DBG_STRLEN_OFFSET)) { 6183 PCI_MEM_REP_PUT8(softs, 6184 softs->debug_buf_offset + \ 6185 softs->debug_header_size, 6186 aac_prt_buf, count); 6187 PCI_MEM_PUT32(softs, 6188 softs->debug_buf_offset + \ 6189 AAC_FW_DBG_FLAGS_OFFSET, 6190 softs->debug_fw_flags); 6191 PCI_MEM_PUT32(softs, 6192 softs->debug_buf_offset + \ 6193 AAC_FW_DBG_STRLEN_OFFSET, count); 6194 } else { 6195 cmn_err(CE_WARN, "UART output fail"); 6196 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT; 6197 } 6198 } 6199 6200 /* 6201 * If the Kernel Debug Print flag is set, send it off 6202 * to the Kernel Debugger 6203 */ 6204 if (softs->debug_flags & AACDB_FLAGS_KERNEL_PRINT) 6205 aac_cmn_err(softs, lev, sl, 6206 (softs->debug_flags & AACDB_FLAGS_NO_HEADERS)); 6207 } else { 6208 /* Driver not initialized yet, no firmware or header output */ 6209 if (aac_debug_flags & AACDB_FLAGS_KERNEL_PRINT) 6210 aac_cmn_err(softs, lev, sl, 1); 6211 } 6212 mutex_exit(&aac_prt_mutex); 6213 } 6214 6215 /* 6216 * Translate command number to description string 6217 */ 6218 static char * 6219 aac_cmd_name(int cmd, struct aac_key_strings *cmdlist) 6220 { 6221 int i; 6222 6223 for (i = 0; cmdlist[i].key != -1; i++) { 6224 if (cmd == cmdlist[i].key) 6225 return (cmdlist[i].message); 6226 } 6227 return (NULL); 6228 } 6229 6230 static void 6231 aac_print_scmd(struct aac_softstate *softs, struct aac_cmd *acp) 6232 { 6233 struct scsi_pkt *pkt = acp->pkt; 6234 struct scsi_address *ap = &pkt->pkt_address; 6235 int ctl = ddi_get_instance(softs->devinfo_p); 6236 int tgt = ap->a_target; 6237 int lun = ap->a_lun; 6238 union scsi_cdb *cdbp = (union scsi_cdb *)pkt->pkt_cdbp; 6239 uchar_t cmd = cdbp->scc_cmd; 6240 char *desc; 6241 6242 if ((desc = aac_cmd_name(cmd, 6243 (struct aac_key_strings *)scsi_cmds)) == NULL) { 6244 aac_printf(softs, CE_NOTE, 6245 "SCMD> Unknown(0x%2x) --> c%dt%dL%d", 6246 cmd, ctl, tgt, lun); 6247 return; 6248 } 6249 6250 switch (cmd) { 6251 case SCMD_READ: 6252 case SCMD_WRITE: 6253 aac_printf(softs, CE_NOTE, 6254 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d", 6255 desc, GETG0ADDR(cdbp), GETG0COUNT(cdbp), 6256 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 6257 ctl, tgt, lun); 6258 break; 6259 case SCMD_READ_G1: 6260 case SCMD_WRITE_G1: 6261 aac_printf(softs, CE_NOTE, 6262 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d", 6263 desc, GETG1ADDR(cdbp), GETG1COUNT(cdbp), 6264 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 6265 ctl, tgt, lun); 6266 break; 6267 case SCMD_READ_G4: 6268 case SCMD_WRITE_G4: 6269 aac_printf(softs, CE_NOTE, 6270 "SCMD> %s 0x%x.%08x[%d] %s --> c%dt%dL%d", 6271 desc, GETG4ADDR(cdbp), GETG4ADDRTL(cdbp), 6272 GETG4COUNT(cdbp), 6273 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 6274 ctl, tgt, lun); 6275 break; 6276 default: 6277 aac_printf(softs, CE_NOTE, "SCMD> %s --> c%dt%dL%d", 6278 desc, ctl, tgt, lun); 6279 } 6280 } 6281 6282 void 6283 aac_print_fib(struct aac_softstate *softs, struct aac_fib *fibp) 6284 { 6285 uint16_t fib_size; 6286 int32_t fib_cmd, sub_cmd; 6287 char *cmdstr, *subcmdstr; 6288 struct aac_Container *pContainer; 6289 6290 fib_cmd = LE_16(fibp->Header.Command); 6291 cmdstr = aac_cmd_name(fib_cmd, aac_fib_cmds); 6292 sub_cmd = -1; 6293 subcmdstr = NULL; 6294 6295 switch (fib_cmd) { 6296 case ContainerCommand: 6297 pContainer = (struct aac_Container *)fibp->data; 6298 sub_cmd = LE_32(pContainer->Command); 6299 subcmdstr = aac_cmd_name(sub_cmd, aac_ctvm_subcmds); 6300 if (subcmdstr == NULL) 6301 break; 6302 fib_cmd = sub_cmd; 6303 cmdstr = subcmdstr; 6304 sub_cmd = -1; 6305 subcmdstr = NULL; 6306 6307 switch (pContainer->Command) { 6308 case VM_ContainerConfig: 6309 sub_cmd = LE_32(pContainer->CTCommand.command); 6310 subcmdstr = aac_cmd_name(sub_cmd, aac_ct_subcmds); 6311 if (subcmdstr == NULL) 6312 break; 6313 aac_printf(softs, CE_NOTE, "FIB> %s (0x%x, 0x%x, 0x%x)", 6314 subcmdstr, 6315 LE_32(pContainer->CTCommand.param[0]), 6316 LE_32(pContainer->CTCommand.param[1]), 6317 LE_32(pContainer->CTCommand.param[2])); 6318 return; 6319 case VM_Ioctl: 6320 sub_cmd = LE_32(((int32_t *)pContainer)[4]); 6321 subcmdstr = aac_cmd_name(sub_cmd, aac_ioctl_subcmds); 6322 break; 6323 } 6324 break; 6325 6326 case ClusterCommand: 6327 sub_cmd = LE_32(((int32_t *)fibp->data)[0]); 6328 subcmdstr = aac_cmd_name(sub_cmd, aac_cl_subcmds); 6329 break; 6330 6331 case AifRequest: 6332 sub_cmd = LE_32(((int32_t *)fibp->data)[0]); 6333 subcmdstr = aac_cmd_name(sub_cmd, aac_aif_subcmds); 6334 break; 6335 6336 default: 6337 break; 6338 } 6339 6340 fib_size = LE_16(fibp->Header.Size); 6341 if (subcmdstr) 6342 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d", 6343 subcmdstr, fib_size); 6344 else if (cmdstr && sub_cmd == -1) 6345 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d", 6346 cmdstr, fib_size); 6347 else if (cmdstr) 6348 aac_printf(softs, CE_NOTE, "FIB> %s: Unknown(0x%x), sz=%d", 6349 cmdstr, sub_cmd, fib_size); 6350 else 6351 aac_printf(softs, CE_NOTE, "FIB> Unknown(0x%x), sz=%d", 6352 fib_cmd, fib_size); 6353 } 6354 6355 static void 6356 aac_print_aif(struct aac_softstate *softs, struct aac_aif_command *aif) 6357 { 6358 int aif_command; 6359 uint32_t aif_seqnumber; 6360 int aif_en_type; 6361 char *str; 6362 6363 aif_command = LE_32(aif->command); 6364 aif_seqnumber = LE_32(aif->seqNumber); 6365 aif_en_type = LE_32(aif->data.EN.type); 6366 6367 switch (aif_command) { 6368 case AifCmdEventNotify: 6369 str = aac_cmd_name(aif_en_type, aac_aifens); 6370 if (str) 6371 aac_printf(softs, CE_NOTE, "AIF! %s", str); 6372 else 6373 aac_printf(softs, CE_NOTE, "AIF! Unknown(0x%x)", 6374 aif_en_type); 6375 break; 6376 6377 case AifCmdJobProgress: 6378 switch (LE_32(aif->data.PR[0].status)) { 6379 case AifJobStsSuccess: 6380 str = "success"; break; 6381 case AifJobStsFinished: 6382 str = "finished"; break; 6383 case AifJobStsAborted: 6384 str = "aborted"; break; 6385 case AifJobStsFailed: 6386 str = "failed"; break; 6387 case AifJobStsSuspended: 6388 str = "suspended"; break; 6389 case AifJobStsRunning: 6390 str = "running"; break; 6391 default: 6392 str = "unknown"; break; 6393 } 6394 aac_printf(softs, CE_NOTE, 6395 "AIF! JobProgress (%d) - %s (%d, %d)", 6396 aif_seqnumber, str, 6397 LE_32(aif->data.PR[0].currentTick), 6398 LE_32(aif->data.PR[0].finalTick)); 6399 break; 6400 6401 case AifCmdAPIReport: 6402 aac_printf(softs, CE_NOTE, "AIF! APIReport (%d)", 6403 aif_seqnumber); 6404 break; 6405 6406 case AifCmdDriverNotify: 6407 aac_printf(softs, CE_NOTE, "AIF! DriverNotify (%d)", 6408 aif_seqnumber); 6409 break; 6410 6411 default: 6412 aac_printf(softs, CE_NOTE, "AIF! AIF %d (%d)", 6413 aif_command, aif_seqnumber); 6414 break; 6415 } 6416 } 6417 6418 #endif /* DEBUG */ 6419