1 /* 2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6 /* 7 * Copyright 2005-08 Adaptec, Inc. 8 * Copyright (c) 2005-08 Adaptec Inc., Achim Leubner 9 * Copyright (c) 2000 Michael Smith 10 * Copyright (c) 2001 Scott Long 11 * Copyright (c) 2000 BSDi 12 * All rights reserved. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/modctl.h> 36 #include <sys/conf.h> 37 #include <sys/cmn_err.h> 38 #include <sys/ddi.h> 39 #include <sys/devops.h> 40 #include <sys/pci.h> 41 #include <sys/types.h> 42 #include <sys/ddidmareq.h> 43 #include <sys/scsi/scsi.h> 44 #include <sys/ksynch.h> 45 #include <sys/sunddi.h> 46 #include <sys/byteorder.h> 47 #include "aac_regs.h" 48 #include "aac.h" 49 50 /* 51 * FMA header files 52 */ 53 #include <sys/ddifm.h> 54 #include <sys/fm/protocol.h> 55 #include <sys/fm/util.h> 56 #include <sys/fm/io/ddi.h> 57 58 /* 59 * For minor nodes created by the SCSA framework, minor numbers are 60 * formed by left-shifting instance by INST_MINOR_SHIFT and OR in a 61 * number less than 64. 62 * 63 * To support cfgadm, need to confirm the SCSA framework by creating 64 * devctl/scsi and driver specific minor nodes under SCSA format, 65 * and calling scsi_hba_xxx() functions aacordingly. 66 */ 67 68 #define AAC_MINOR 32 69 #define INST2AAC(x) (((x) << INST_MINOR_SHIFT) | AAC_MINOR) 70 #define AAC_SCSA_MINOR(x) ((x) & TRAN_MINOR_MASK) 71 #define AAC_IS_SCSA_NODE(x) ((x) == DEVCTL_MINOR || (x) == SCSI_MINOR) 72 73 #define SD2TRAN(sd) ((sd)->sd_address.a_hba_tran) 74 #define AAC_TRAN2SOFTS(tran) ((struct aac_softstate *)(tran)->tran_hba_private) 75 #define AAC_DIP2TRAN(dip) ((scsi_hba_tran_t *)ddi_get_driver_private(dip)) 76 #define AAC_DIP2SOFTS(dip) (AAC_TRAN2SOFTS(AAC_DIP2TRAN(dip))) 77 #define SD2AAC(sd) (AAC_TRAN2SOFTS(SD2TRAN(sd))) 78 #define AAC_PD(t) ((t) - AAC_MAX_LD) 79 #define AAC_DEV(softs, t) (((t) < AAC_MAX_LD) ? \ 80 &(softs)->containers[(t)].dev : \ 81 ((t) < AAC_MAX_DEV(softs)) ? \ 82 &(softs)->nondasds[AAC_PD(t)].dev : NULL) 83 #define AAC_DEVCFG_BEGIN(softs, tgt) \ 84 aac_devcfg((softs), (tgt), 1) 85 #define AAC_DEVCFG_END(softs, tgt) \ 86 aac_devcfg((softs), (tgt), 0) 87 #define PKT2AC(pkt) ((struct aac_cmd *)(pkt)->pkt_ha_private) 88 #define AAC_BUSYWAIT(cond, timeout /* in millisecond */) { \ 89 if (!(cond)) { \ 90 int count = (timeout) * 10; \ 91 while (count) { \ 92 drv_usecwait(100); \ 93 if (cond) \ 94 break; \ 95 count--; \ 96 } \ 97 (timeout) = (count + 9) / 10; \ 98 } \ 99 } 100 101 #define AAC_SENSE_DATA_DESCR_LEN \ 102 (sizeof (struct scsi_descr_sense_hdr) + \ 103 sizeof (struct scsi_information_sense_descr)) 104 #define AAC_ARQ64_LENGTH \ 105 (sizeof (struct scsi_arq_status) + \ 106 AAC_SENSE_DATA_DESCR_LEN - SENSE_LENGTH) 107 108 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */ 109 #define AAC_GETGXADDR(cmdlen, cdbp) \ 110 ((cmdlen == 6) ? GETG0ADDR(cdbp) : \ 111 (cmdlen == 10) ? (uint32_t)GETG1ADDR(cdbp) : \ 112 ((uint64_t)GETG4ADDR(cdbp) << 32) | (uint32_t)GETG4ADDRTL(cdbp)) 113 114 #define AAC_CDB_INQUIRY_CMDDT 0x02 115 #define AAC_CDB_INQUIRY_EVPD 0x01 116 #define AAC_VPD_PAGE_CODE 1 117 #define AAC_VPD_PAGE_LENGTH 3 118 #define AAC_VPD_PAGE_DATA 4 119 #define AAC_VPD_ID_CODESET 0 120 #define AAC_VPD_ID_TYPE 1 121 #define AAC_VPD_ID_LENGTH 3 122 #define AAC_VPD_ID_DATA 4 123 124 #define AAC_SCSI_RPTLUNS_HEAD_SIZE 0x08 125 #define AAC_SCSI_RPTLUNS_ADDR_SIZE 0x08 126 #define AAC_SCSI_RPTLUNS_ADDR_MASK 0xC0 127 /* 00b - peripheral device addressing method */ 128 #define AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL 0x00 129 /* 01b - flat space addressing method */ 130 #define AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE 0x40 131 /* 10b - logical unit addressing method */ 132 #define AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT 0x80 133 134 /* Return the size of FIB with data part type data_type */ 135 #define AAC_FIB_SIZEOF(data_type) \ 136 (sizeof (struct aac_fib_header) + sizeof (data_type)) 137 /* Return the container size defined in mir */ 138 #define AAC_MIR_SIZE(softs, acc, mir) \ 139 (((softs)->flags & AAC_FLAGS_LBA_64BIT) ? \ 140 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity) + \ 141 ((uint64_t)ddi_get32((acc), &(mir)->MntObj.CapacityHigh) << 32) : \ 142 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity)) 143 144 /* The last entry of aac_cards[] is for unknown cards */ 145 #define AAC_UNKNOWN_CARD \ 146 (sizeof (aac_cards) / sizeof (struct aac_card_type) - 1) 147 #define CARD_IS_UNKNOWN(i) (i == AAC_UNKNOWN_CARD) 148 #define BUF_IS_READ(bp) ((bp)->b_flags & B_READ) 149 #define AAC_IS_Q_EMPTY(q) ((q)->q_head == NULL) 150 #define AAC_CMDQ(acp) (!((acp)->flags & AAC_CMD_SYNC)) 151 152 #define PCI_MEM_GET32(softs, off) \ 153 ddi_get32((softs)->pci_mem_handle, \ 154 (void *)((softs)->pci_mem_base_vaddr + (off))) 155 #define PCI_MEM_PUT32(softs, off, val) \ 156 ddi_put32((softs)->pci_mem_handle, \ 157 (void *)((softs)->pci_mem_base_vaddr + (off)), \ 158 (uint32_t)(val)) 159 #define PCI_MEM_GET16(softs, off) \ 160 ddi_get16((softs)->pci_mem_handle, \ 161 (void *)((softs)->pci_mem_base_vaddr + (off))) 162 #define PCI_MEM_PUT16(softs, off, val) \ 163 ddi_put16((softs)->pci_mem_handle, \ 164 (void *)((softs)->pci_mem_base_vaddr + (off)), (uint16_t)(val)) 165 /* Write host data at valp to device mem[off] repeatedly count times */ 166 #define PCI_MEM_REP_PUT8(softs, off, valp, count) \ 167 ddi_rep_put8((softs)->pci_mem_handle, (uint8_t *)(valp), \ 168 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \ 169 count, DDI_DEV_AUTOINCR) 170 /* Read device data at mem[off] to host addr valp repeatedly count times */ 171 #define PCI_MEM_REP_GET8(softs, off, valp, count) \ 172 ddi_rep_get8((softs)->pci_mem_handle, (uint8_t *)(valp), \ 173 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \ 174 count, DDI_DEV_AUTOINCR) 175 #define AAC_GET_FIELD8(acc, d, s, field) \ 176 (d)->field = ddi_get8(acc, (uint8_t *)&(s)->field) 177 #define AAC_GET_FIELD32(acc, d, s, field) \ 178 (d)->field = ddi_get32(acc, (uint32_t *)&(s)->field) 179 #define AAC_GET_FIELD64(acc, d, s, field) \ 180 (d)->field = ddi_get64(acc, (uint64_t *)&(s)->field) 181 #define AAC_REP_GET_FIELD8(acc, d, s, field, r) \ 182 ddi_rep_get8((acc), (uint8_t *)&(d)->field, \ 183 (uint8_t *)&(s)->field, (r), DDI_DEV_AUTOINCR) 184 #define AAC_REP_GET_FIELD32(acc, d, s, field, r) \ 185 ddi_rep_get32((acc), (uint32_t *)&(d)->field, \ 186 (uint32_t *)&(s)->field, (r), DDI_DEV_AUTOINCR) 187 188 #define AAC_ENABLE_INTR(softs) { \ 189 if (softs->flags & AAC_FLAGS_NEW_COMM) \ 190 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_NEW); \ 191 else \ 192 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_BITS); \ 193 softs->state |= AAC_STATE_INTR; \ 194 } 195 196 #define AAC_DISABLE_INTR(softs) { \ 197 PCI_MEM_PUT32(softs, AAC_OIMR, ~0); \ 198 softs->state &= ~AAC_STATE_INTR; \ 199 } 200 #define AAC_STATUS_CLR(softs, mask) PCI_MEM_PUT32(softs, AAC_ODBR, mask) 201 #define AAC_STATUS_GET(softs) PCI_MEM_GET32(softs, AAC_ODBR) 202 #define AAC_NOTIFY(softs, val) PCI_MEM_PUT32(softs, AAC_IDBR, val) 203 #define AAC_OUTB_GET(softs) PCI_MEM_GET32(softs, AAC_OQUE) 204 #define AAC_OUTB_SET(softs, val) PCI_MEM_PUT32(softs, AAC_OQUE, val) 205 #define AAC_FWSTATUS_GET(softs) \ 206 ((softs)->aac_if.aif_get_fwstatus(softs)) 207 #define AAC_MAILBOX_GET(softs, mb) \ 208 ((softs)->aac_if.aif_get_mailbox((softs), (mb))) 209 #define AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3) \ 210 ((softs)->aac_if.aif_set_mailbox((softs), (cmd), \ 211 (arg0), (arg1), (arg2), (arg3))) 212 213 #define AAC_MGT_SLOT_NUM 2 214 #define AAC_THROTTLE_DRAIN -1 215 216 #define AAC_QUIESCE_TICK 1 /* 1 second */ 217 #define AAC_QUIESCE_TIMEOUT 180 /* 180 seconds */ 218 #define AAC_DEFAULT_TICK 10 /* 10 seconds */ 219 #define AAC_SYNC_TICK (30*60) /* 30 minutes */ 220 221 /* Poll time for aac_do_poll_io() */ 222 #define AAC_POLL_TIME 60 /* 60 seconds */ 223 224 /* IOP reset */ 225 #define AAC_IOP_RESET_SUCCEED 0 /* IOP reset succeed */ 226 #define AAC_IOP_RESET_FAILED -1 /* IOP reset failed */ 227 #define AAC_IOP_RESET_ABNORMAL -2 /* Reset operation abnormal */ 228 229 /* 230 * Hardware access functions 231 */ 232 static int aac_rx_get_fwstatus(struct aac_softstate *); 233 static int aac_rx_get_mailbox(struct aac_softstate *, int); 234 static void aac_rx_set_mailbox(struct aac_softstate *, uint32_t, uint32_t, 235 uint32_t, uint32_t, uint32_t); 236 static int aac_rkt_get_fwstatus(struct aac_softstate *); 237 static int aac_rkt_get_mailbox(struct aac_softstate *, int); 238 static void aac_rkt_set_mailbox(struct aac_softstate *, uint32_t, uint32_t, 239 uint32_t, uint32_t, uint32_t); 240 241 /* 242 * SCSA function prototypes 243 */ 244 static int aac_attach(dev_info_t *, ddi_attach_cmd_t); 245 static int aac_detach(dev_info_t *, ddi_detach_cmd_t); 246 static int aac_reset(dev_info_t *, ddi_reset_cmd_t); 247 static int aac_quiesce(dev_info_t *); 248 static int aac_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 249 250 /* 251 * Interrupt handler functions 252 */ 253 static int aac_query_intrs(struct aac_softstate *, int); 254 static int aac_add_intrs(struct aac_softstate *); 255 static void aac_remove_intrs(struct aac_softstate *); 256 static int aac_enable_intrs(struct aac_softstate *); 257 static int aac_disable_intrs(struct aac_softstate *); 258 static uint_t aac_intr_old(caddr_t); 259 static uint_t aac_intr_new(caddr_t); 260 static uint_t aac_softintr(caddr_t); 261 262 /* 263 * Internal functions in attach 264 */ 265 static int aac_check_card_type(struct aac_softstate *); 266 static int aac_check_firmware(struct aac_softstate *); 267 static int aac_common_attach(struct aac_softstate *); 268 static void aac_common_detach(struct aac_softstate *); 269 static int aac_probe_containers(struct aac_softstate *); 270 static int aac_alloc_comm_space(struct aac_softstate *); 271 static int aac_setup_comm_space(struct aac_softstate *); 272 static void aac_free_comm_space(struct aac_softstate *); 273 static int aac_hba_setup(struct aac_softstate *); 274 275 /* 276 * Sync FIB operation functions 277 */ 278 int aac_sync_mbcommand(struct aac_softstate *, uint32_t, uint32_t, 279 uint32_t, uint32_t, uint32_t, uint32_t *); 280 static int aac_sync_fib(struct aac_softstate *, uint16_t, uint16_t); 281 282 /* 283 * Command queue operation functions 284 */ 285 static void aac_cmd_initq(struct aac_cmd_queue *); 286 static void aac_cmd_enqueue(struct aac_cmd_queue *, struct aac_cmd *); 287 static struct aac_cmd *aac_cmd_dequeue(struct aac_cmd_queue *); 288 static void aac_cmd_delete(struct aac_cmd_queue *, struct aac_cmd *); 289 290 /* 291 * FIB queue operation functions 292 */ 293 static int aac_fib_enqueue(struct aac_softstate *, int, uint32_t, uint32_t); 294 static int aac_fib_dequeue(struct aac_softstate *, int, int *); 295 296 /* 297 * Slot operation functions 298 */ 299 static int aac_create_slots(struct aac_softstate *); 300 static void aac_destroy_slots(struct aac_softstate *); 301 static void aac_alloc_fibs(struct aac_softstate *); 302 static void aac_destroy_fibs(struct aac_softstate *); 303 static struct aac_slot *aac_get_slot(struct aac_softstate *); 304 static void aac_release_slot(struct aac_softstate *, struct aac_slot *); 305 static int aac_alloc_fib(struct aac_softstate *, struct aac_slot *); 306 static void aac_free_fib(struct aac_slot *); 307 308 /* 309 * Internal functions 310 */ 311 static void aac_cmd_fib_header(struct aac_softstate *, struct aac_cmd *, 312 uint16_t); 313 static void aac_cmd_fib_rawio(struct aac_softstate *, struct aac_cmd *); 314 static void aac_cmd_fib_brw64(struct aac_softstate *, struct aac_cmd *); 315 static void aac_cmd_fib_brw(struct aac_softstate *, struct aac_cmd *); 316 static void aac_cmd_fib_sync(struct aac_softstate *, struct aac_cmd *); 317 static void aac_cmd_fib_scsi32(struct aac_softstate *, struct aac_cmd *); 318 static void aac_cmd_fib_scsi64(struct aac_softstate *, struct aac_cmd *); 319 static void aac_cmd_fib_startstop(struct aac_softstate *, struct aac_cmd *); 320 static void aac_start_waiting_io(struct aac_softstate *); 321 static void aac_drain_comp_q(struct aac_softstate *); 322 int aac_do_io(struct aac_softstate *, struct aac_cmd *); 323 static int aac_sync_fib_slot_bind(struct aac_softstate *, struct aac_cmd *); 324 static void aac_sync_fib_slot_release(struct aac_softstate *, struct aac_cmd *); 325 static void aac_start_io(struct aac_softstate *, struct aac_cmd *); 326 static int aac_do_poll_io(struct aac_softstate *, struct aac_cmd *); 327 static int aac_do_sync_io(struct aac_softstate *, struct aac_cmd *); 328 static int aac_send_command(struct aac_softstate *, struct aac_slot *); 329 static void aac_cmd_timeout(struct aac_softstate *, struct aac_cmd *); 330 static int aac_dma_sync_ac(struct aac_cmd *); 331 static int aac_shutdown(struct aac_softstate *); 332 static int aac_reset_adapter(struct aac_softstate *); 333 static int aac_do_quiesce(struct aac_softstate *softs); 334 static int aac_do_unquiesce(struct aac_softstate *softs); 335 static void aac_unhold_bus(struct aac_softstate *, int); 336 static void aac_set_throttle(struct aac_softstate *, struct aac_device *, 337 int, int); 338 339 /* 340 * Adapter Initiated FIB handling function 341 */ 342 static int aac_handle_aif(struct aac_softstate *, struct aac_fib *); 343 344 /* 345 * Timeout handling thread function 346 */ 347 static void aac_daemon(void *); 348 349 /* 350 * IOCTL interface related functions 351 */ 352 static int aac_open(dev_t *, int, int, cred_t *); 353 static int aac_close(dev_t, int, int, cred_t *); 354 static int aac_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 355 extern int aac_do_ioctl(struct aac_softstate *, dev_t, int, intptr_t, int); 356 357 /* 358 * FMA Prototypes 359 */ 360 static void aac_fm_init(struct aac_softstate *); 361 static void aac_fm_fini(struct aac_softstate *); 362 static int aac_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *); 363 int aac_check_acc_handle(ddi_acc_handle_t); 364 int aac_check_dma_handle(ddi_dma_handle_t); 365 void aac_fm_ereport(struct aac_softstate *, char *); 366 367 /* 368 * Auto enumeration functions 369 */ 370 static dev_info_t *aac_find_child(struct aac_softstate *, uint16_t, uint8_t); 371 static int aac_tran_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t, 372 void *, dev_info_t **); 373 static int aac_dr_event(struct aac_softstate *, int, int, int); 374 375 #ifdef DEBUG 376 /* 377 * UART debug output support 378 */ 379 380 #define AAC_PRINT_BUFFER_SIZE 512 381 #define AAC_PRINT_TIMEOUT 250 /* 1/4 sec. = 250 msec. */ 382 383 #define AAC_FW_DBG_STRLEN_OFFSET 0x00 384 #define AAC_FW_DBG_FLAGS_OFFSET 0x04 385 #define AAC_FW_DBG_BLED_OFFSET 0x08 386 387 static int aac_get_fw_debug_buffer(struct aac_softstate *); 388 static void aac_print_scmd(struct aac_softstate *, struct aac_cmd *); 389 static void aac_print_aif(struct aac_softstate *, struct aac_aif_command *); 390 391 static char aac_prt_buf[AAC_PRINT_BUFFER_SIZE]; 392 static char aac_fmt[] = " %s"; 393 static char aac_fmt_header[] = " %s.%d: %s"; 394 static kmutex_t aac_prt_mutex; 395 396 /* 397 * Debug flags to be put into the softstate flags field 398 * when initialized 399 */ 400 uint32_t aac_debug_flags = 401 /* AACDB_FLAGS_KERNEL_PRINT | */ 402 /* AACDB_FLAGS_FW_PRINT | */ 403 /* AACDB_FLAGS_MISC | */ 404 /* AACDB_FLAGS_FUNC1 | */ 405 /* AACDB_FLAGS_FUNC2 | */ 406 /* AACDB_FLAGS_SCMD | */ 407 /* AACDB_FLAGS_AIF | */ 408 /* AACDB_FLAGS_FIB | */ 409 /* AACDB_FLAGS_IOCTL | */ 410 0; 411 uint32_t aac_debug_fib_flags = 412 /* AACDB_FLAGS_FIB_RW | */ 413 /* AACDB_FLAGS_FIB_IOCTL | */ 414 /* AACDB_FLAGS_FIB_SRB | */ 415 /* AACDB_FLAGS_FIB_SYNC | */ 416 /* AACDB_FLAGS_FIB_HEADER | */ 417 /* AACDB_FLAGS_FIB_TIMEOUT | */ 418 0; 419 420 #endif /* DEBUG */ 421 422 static struct cb_ops aac_cb_ops = { 423 aac_open, /* open */ 424 aac_close, /* close */ 425 nodev, /* strategy */ 426 nodev, /* print */ 427 nodev, /* dump */ 428 nodev, /* read */ 429 nodev, /* write */ 430 aac_ioctl, /* ioctl */ 431 nodev, /* devmap */ 432 nodev, /* mmap */ 433 nodev, /* segmap */ 434 nochpoll, /* poll */ 435 ddi_prop_op, /* cb_prop_op */ 436 NULL, /* streamtab */ 437 D_64BIT | D_NEW | D_MP | D_HOTPLUG, /* cb_flag */ 438 CB_REV, /* cb_rev */ 439 nodev, /* async I/O read entry point */ 440 nodev /* async I/O write entry point */ 441 }; 442 443 static struct dev_ops aac_dev_ops = { 444 DEVO_REV, 445 0, 446 aac_getinfo, 447 nulldev, 448 nulldev, 449 aac_attach, 450 aac_detach, 451 aac_reset, 452 &aac_cb_ops, 453 NULL, 454 NULL, 455 aac_quiesce, 456 }; 457 458 static struct modldrv aac_modldrv = { 459 &mod_driverops, 460 "AAC Driver " AAC_DRIVER_VERSION, 461 &aac_dev_ops, 462 }; 463 464 static struct modlinkage aac_modlinkage = { 465 MODREV_1, 466 &aac_modldrv, 467 NULL 468 }; 469 470 static struct aac_softstate *aac_softstatep; 471 472 /* 473 * Supported card list 474 * ordered in vendor id, subvendor id, subdevice id, and device id 475 */ 476 static struct aac_card_type aac_cards[] = { 477 {0x1028, 0x1, 0x1028, 0x1, AAC_HWIF_I960RX, 478 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 479 "Dell", "PERC 3/Di"}, 480 {0x1028, 0x2, 0x1028, 0x2, AAC_HWIF_I960RX, 481 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 482 "Dell", "PERC 3/Di"}, 483 {0x1028, 0x3, 0x1028, 0x3, AAC_HWIF_I960RX, 484 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 485 "Dell", "PERC 3/Si"}, 486 {0x1028, 0x8, 0x1028, 0xcf, AAC_HWIF_I960RX, 487 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 488 "Dell", "PERC 3/Di"}, 489 {0x1028, 0x4, 0x1028, 0xd0, AAC_HWIF_I960RX, 490 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 491 "Dell", "PERC 3/Si"}, 492 {0x1028, 0x2, 0x1028, 0xd1, AAC_HWIF_I960RX, 493 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 494 "Dell", "PERC 3/Di"}, 495 {0x1028, 0x2, 0x1028, 0xd9, AAC_HWIF_I960RX, 496 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 497 "Dell", "PERC 3/Di"}, 498 {0x1028, 0xa, 0x1028, 0x106, AAC_HWIF_I960RX, 499 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 500 "Dell", "PERC 3/Di"}, 501 {0x1028, 0xa, 0x1028, 0x11b, AAC_HWIF_I960RX, 502 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 503 "Dell", "PERC 3/Di"}, 504 {0x1028, 0xa, 0x1028, 0x121, AAC_HWIF_I960RX, 505 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 506 "Dell", "PERC 3/Di"}, 507 {0x9005, 0x285, 0x1028, 0x287, AAC_HWIF_I960RX, 508 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 509 "Dell", "PERC 320/DC"}, 510 {0x9005, 0x285, 0x1028, 0x291, AAC_HWIF_I960RX, 511 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Dell", "CERC SR2"}, 512 513 {0x9005, 0x285, 0x1014, 0x2f2, AAC_HWIF_I960RX, 514 0, AAC_TYPE_SCSI, "IBM", "ServeRAID 8i"}, 515 {0x9005, 0x285, 0x1014, 0x34d, AAC_HWIF_I960RX, 516 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8s"}, 517 {0x9005, 0x286, 0x1014, 0x9580, AAC_HWIF_RKT, 518 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8k"}, 519 520 {0x9005, 0x285, 0x103c, 0x3227, AAC_HWIF_I960RX, 521 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"}, 522 {0x9005, 0x285, 0xe11, 0x295, AAC_HWIF_I960RX, 523 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"}, 524 525 {0x9005, 0x285, 0x9005, 0x285, AAC_HWIF_I960RX, 526 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 527 "Adaptec", "2200S"}, 528 {0x9005, 0x285, 0x9005, 0x286, AAC_HWIF_I960RX, 529 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 530 "Adaptec", "2120S"}, 531 {0x9005, 0x285, 0x9005, 0x287, AAC_HWIF_I960RX, 532 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 533 "Adaptec", "2200S"}, 534 {0x9005, 0x285, 0x9005, 0x288, AAC_HWIF_I960RX, 535 0, AAC_TYPE_SCSI, "Adaptec", "3230S"}, 536 {0x9005, 0x285, 0x9005, 0x289, AAC_HWIF_I960RX, 537 0, AAC_TYPE_SCSI, "Adaptec", "3240S"}, 538 {0x9005, 0x285, 0x9005, 0x28a, AAC_HWIF_I960RX, 539 0, AAC_TYPE_SCSI, "Adaptec", "2020ZCR"}, 540 {0x9005, 0x285, 0x9005, 0x28b, AAC_HWIF_I960RX, 541 0, AAC_TYPE_SCSI, "Adaptec", "2025ZCR"}, 542 {0x9005, 0x286, 0x9005, 0x28c, AAC_HWIF_RKT, 543 0, AAC_TYPE_SCSI, "Adaptec", "2230S"}, 544 {0x9005, 0x286, 0x9005, 0x28d, AAC_HWIF_RKT, 545 0, AAC_TYPE_SCSI, "Adaptec", "2130S"}, 546 {0x9005, 0x285, 0x9005, 0x28e, AAC_HWIF_I960RX, 547 0, AAC_TYPE_SATA, "Adaptec", "2020SA"}, 548 {0x9005, 0x285, 0x9005, 0x28f, AAC_HWIF_I960RX, 549 0, AAC_TYPE_SATA, "Adaptec", "2025SA"}, 550 {0x9005, 0x285, 0x9005, 0x290, AAC_HWIF_I960RX, 551 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2410SA"}, 552 {0x9005, 0x285, 0x9005, 0x292, AAC_HWIF_I960RX, 553 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2810SA"}, 554 {0x9005, 0x285, 0x9005, 0x293, AAC_HWIF_I960RX, 555 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "21610SA"}, 556 {0x9005, 0x285, 0x9005, 0x294, AAC_HWIF_I960RX, 557 0, AAC_TYPE_SATA, "Adaptec", "2026ZCR"}, 558 {0x9005, 0x285, 0x9005, 0x296, AAC_HWIF_I960RX, 559 0, AAC_TYPE_SCSI, "Adaptec", "2240S"}, 560 {0x9005, 0x285, 0x9005, 0x297, AAC_HWIF_I960RX, 561 0, AAC_TYPE_SAS, "Adaptec", "4005SAS"}, 562 {0x9005, 0x285, 0x9005, 0x298, AAC_HWIF_I960RX, 563 0, AAC_TYPE_SAS, "Adaptec", "RAID 4000"}, 564 {0x9005, 0x285, 0x9005, 0x299, AAC_HWIF_I960RX, 565 0, AAC_TYPE_SAS, "Adaptec", "4800SAS"}, 566 {0x9005, 0x285, 0x9005, 0x29a, AAC_HWIF_I960RX, 567 0, AAC_TYPE_SAS, "Adaptec", "4805SAS"}, 568 {0x9005, 0x286, 0x9005, 0x29b, AAC_HWIF_RKT, 569 0, AAC_TYPE_SATA, "Adaptec", "2820SA"}, 570 {0x9005, 0x286, 0x9005, 0x29c, AAC_HWIF_RKT, 571 0, AAC_TYPE_SATA, "Adaptec", "2620SA"}, 572 {0x9005, 0x286, 0x9005, 0x29d, AAC_HWIF_RKT, 573 0, AAC_TYPE_SATA, "Adaptec", "2420SA"}, 574 {0x9005, 0x286, 0x9005, 0x29e, AAC_HWIF_RKT, 575 0, AAC_TYPE_SATA, "ICP", "9024RO"}, 576 {0x9005, 0x286, 0x9005, 0x29f, AAC_HWIF_RKT, 577 0, AAC_TYPE_SATA, "ICP", "9014RO"}, 578 {0x9005, 0x286, 0x9005, 0x2a0, AAC_HWIF_RKT, 579 0, AAC_TYPE_SATA, "ICP", "9047MA"}, 580 {0x9005, 0x286, 0x9005, 0x2a1, AAC_HWIF_RKT, 581 0, AAC_TYPE_SATA, "ICP", "9087MA"}, 582 {0x9005, 0x285, 0x9005, 0x2a4, AAC_HWIF_I960RX, 583 0, AAC_TYPE_SAS, "ICP", "9085LI"}, 584 {0x9005, 0x285, 0x9005, 0x2a5, AAC_HWIF_I960RX, 585 0, AAC_TYPE_SAS, "ICP", "5085BR"}, 586 {0x9005, 0x286, 0x9005, 0x2a6, AAC_HWIF_RKT, 587 0, AAC_TYPE_SATA, "ICP", "9067MA"}, 588 {0x9005, 0x285, 0x9005, 0x2b5, AAC_HWIF_I960RX, 589 0, AAC_TYPE_SAS, "Adaptec", "RAID 5445"}, 590 {0x9005, 0x285, 0x9005, 0x2b6, AAC_HWIF_I960RX, 591 0, AAC_TYPE_SAS, "Adaptec", "RAID 5805"}, 592 {0x9005, 0x285, 0x9005, 0x2b7, AAC_HWIF_I960RX, 593 0, AAC_TYPE_SAS, "Adaptec", "RAID 5085"}, 594 {0x9005, 0x285, 0x9005, 0x2b8, AAC_HWIF_I960RX, 595 0, AAC_TYPE_SAS, "ICP", "RAID ICP5445SL"}, 596 {0x9005, 0x285, 0x9005, 0x2b9, AAC_HWIF_I960RX, 597 0, AAC_TYPE_SAS, "ICP", "RAID ICP5085SL"}, 598 {0x9005, 0x285, 0x9005, 0x2ba, AAC_HWIF_I960RX, 599 0, AAC_TYPE_SAS, "ICP", "RAID ICP5805SL"}, 600 601 {0, 0, 0, 0, AAC_HWIF_UNKNOWN, 602 0, AAC_TYPE_UNKNOWN, "Unknown", "AAC card"}, 603 }; 604 605 /* 606 * Hardware access functions for i960 based cards 607 */ 608 static struct aac_interface aac_rx_interface = { 609 aac_rx_get_fwstatus, 610 aac_rx_get_mailbox, 611 aac_rx_set_mailbox 612 }; 613 614 /* 615 * Hardware access functions for Rocket based cards 616 */ 617 static struct aac_interface aac_rkt_interface = { 618 aac_rkt_get_fwstatus, 619 aac_rkt_get_mailbox, 620 aac_rkt_set_mailbox 621 }; 622 623 ddi_device_acc_attr_t aac_acc_attr = { 624 DDI_DEVICE_ATTR_V1, 625 DDI_STRUCTURE_LE_ACC, 626 DDI_STRICTORDER_ACC, 627 DDI_DEFAULT_ACC 628 }; 629 630 static struct { 631 int size; 632 int notify; 633 } aac_qinfo[] = { 634 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL}, 635 {AAC_HOST_HIGH_CMD_ENTRIES, 0}, 636 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY}, 637 {AAC_ADAP_HIGH_CMD_ENTRIES, 0}, 638 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL}, 639 {AAC_HOST_HIGH_RESP_ENTRIES, 0}, 640 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY}, 641 {AAC_ADAP_HIGH_RESP_ENTRIES, 0} 642 }; 643 644 /* 645 * Default aac dma attributes 646 */ 647 static ddi_dma_attr_t aac_dma_attr = { 648 DMA_ATTR_V0, 649 0, /* lowest usable address */ 650 0xffffffffull, /* high DMA address range */ 651 0xffffffffull, /* DMA counter register */ 652 AAC_DMA_ALIGN, /* DMA address alignment */ 653 1, /* DMA burstsizes */ 654 1, /* min effective DMA size */ 655 0xffffffffull, /* max DMA xfer size */ 656 0xffffffffull, /* segment boundary */ 657 1, /* s/g list length */ 658 AAC_BLK_SIZE, /* granularity of device */ 659 0 /* DMA transfer flags */ 660 }; 661 662 struct aac_drinfo { 663 struct aac_softstate *softs; 664 int tgt; 665 int lun; 666 int event; 667 }; 668 669 static int aac_tick = AAC_DEFAULT_TICK; /* tick for the internal timer */ 670 static uint32_t aac_timebase = 0; /* internal timer in seconds */ 671 static uint32_t aac_sync_time = 0; /* next time to sync. with firmware */ 672 673 /* 674 * Warlock directives 675 * 676 * Different variables with the same types have to be protected by the 677 * same mutex; otherwise, warlock will complain with "variables don't 678 * seem to be protected consistently". For example, 679 * aac_softstate::{q_wait, q_comp} are type of aac_cmd_queue, and protected 680 * by aac_softstate::{io_lock, q_comp_mutex} respectively. We have to 681 * declare them as protected explictly at aac_cmd_dequeue(). 682 */ 683 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt scsi_cdb scsi_status \ 684 scsi_arq_status scsi_descr_sense_hdr scsi_information_sense_descr \ 685 mode_format mode_geometry mode_header aac_cmd)) 686 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_cmd", aac_fib ddi_dma_cookie_t \ 687 aac_sge)) 688 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_fib", aac_blockread aac_blockwrite \ 689 aac_blockread64 aac_raw_io aac_sg_entry aac_sg_entry64 aac_sg_entryraw \ 690 aac_sg_table aac_srb)) 691 _NOTE(SCHEME_PROTECTS_DATA("unique to sync fib and cdb", scsi_inquiry)) 692 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address)) 693 _NOTE(SCHEME_PROTECTS_DATA("unique to dr event", aac_drinfo)) 694 _NOTE(SCHEME_PROTECTS_DATA("unique to scsi_transport", buf)) 695 696 int 697 _init(void) 698 { 699 int rval = 0; 700 701 #ifdef DEBUG 702 mutex_init(&aac_prt_mutex, NULL, MUTEX_DRIVER, NULL); 703 #endif 704 DBCALLED(NULL, 1); 705 706 if ((rval = ddi_soft_state_init((void *)&aac_softstatep, 707 sizeof (struct aac_softstate), 0)) != 0) 708 goto error; 709 710 if ((rval = scsi_hba_init(&aac_modlinkage)) != 0) { 711 ddi_soft_state_fini((void *)&aac_softstatep); 712 goto error; 713 } 714 715 if ((rval = mod_install(&aac_modlinkage)) != 0) { 716 ddi_soft_state_fini((void *)&aac_softstatep); 717 scsi_hba_fini(&aac_modlinkage); 718 goto error; 719 } 720 return (rval); 721 722 error: 723 AACDB_PRINT(NULL, CE_WARN, "Mod init error!"); 724 #ifdef DEBUG 725 mutex_destroy(&aac_prt_mutex); 726 #endif 727 return (rval); 728 } 729 730 int 731 _info(struct modinfo *modinfop) 732 { 733 DBCALLED(NULL, 1); 734 return (mod_info(&aac_modlinkage, modinfop)); 735 } 736 737 /* 738 * An HBA driver cannot be unload unless you reboot, 739 * so this function will be of no use. 740 */ 741 int 742 _fini(void) 743 { 744 int rval; 745 746 DBCALLED(NULL, 1); 747 748 if ((rval = mod_remove(&aac_modlinkage)) != 0) 749 goto error; 750 751 scsi_hba_fini(&aac_modlinkage); 752 ddi_soft_state_fini((void *)&aac_softstatep); 753 #ifdef DEBUG 754 mutex_destroy(&aac_prt_mutex); 755 #endif 756 return (0); 757 758 error: 759 AACDB_PRINT(NULL, CE_WARN, "AAC is busy, cannot unload!"); 760 return (rval); 761 } 762 763 static int 764 aac_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 765 { 766 int instance, i; 767 struct aac_softstate *softs = NULL; 768 int attach_state = 0; 769 char *data; 770 771 DBCALLED(NULL, 1); 772 773 switch (cmd) { 774 case DDI_ATTACH: 775 break; 776 case DDI_RESUME: 777 return (DDI_FAILURE); 778 default: 779 return (DDI_FAILURE); 780 } 781 782 instance = ddi_get_instance(dip); 783 784 /* Get soft state */ 785 if (ddi_soft_state_zalloc(aac_softstatep, instance) != DDI_SUCCESS) { 786 AACDB_PRINT(softs, CE_WARN, "Cannot alloc soft state"); 787 goto error; 788 } 789 softs = ddi_get_soft_state(aac_softstatep, instance); 790 attach_state |= AAC_ATTACH_SOFTSTATE_ALLOCED; 791 792 softs->instance = instance; 793 softs->devinfo_p = dip; 794 softs->buf_dma_attr = softs->addr_dma_attr = aac_dma_attr; 795 softs->addr_dma_attr.dma_attr_granular = 1; 796 softs->acc_attr = aac_acc_attr; 797 softs->reg_attr = aac_acc_attr; 798 softs->card = AAC_UNKNOWN_CARD; 799 #ifdef DEBUG 800 softs->debug_flags = aac_debug_flags; 801 softs->debug_fib_flags = aac_debug_fib_flags; 802 #endif 803 804 /* Initialize FMA */ 805 aac_fm_init(softs); 806 807 /* Check the card type */ 808 if (aac_check_card_type(softs) == AACERR) { 809 AACDB_PRINT(softs, CE_WARN, "Card not supported"); 810 goto error; 811 } 812 /* We have found the right card and everything is OK */ 813 attach_state |= AAC_ATTACH_CARD_DETECTED; 814 815 /* Map PCI mem space */ 816 if (ddi_regs_map_setup(dip, 1, 817 (caddr_t *)&softs->pci_mem_base_vaddr, 0, 818 softs->map_size_min, &softs->reg_attr, 819 &softs->pci_mem_handle) != DDI_SUCCESS) 820 goto error; 821 822 softs->map_size = softs->map_size_min; 823 attach_state |= AAC_ATTACH_PCI_MEM_MAPPED; 824 825 AAC_DISABLE_INTR(softs); 826 827 /* Init mutexes and condvars */ 828 mutex_init(&softs->q_comp_mutex, NULL, 829 MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri)); 830 cv_init(&softs->event, NULL, CV_DRIVER, NULL); 831 cv_init(&softs->sync_fib_cv, NULL, CV_DRIVER, NULL); 832 mutex_init(&softs->aifq_mutex, NULL, 833 MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri)); 834 cv_init(&softs->aifv, NULL, CV_DRIVER, NULL); 835 cv_init(&softs->drain_cv, NULL, CV_DRIVER, NULL); 836 mutex_init(&softs->io_lock, NULL, MUTEX_DRIVER, 837 DDI_INTR_PRI(softs->intr_pri)); 838 attach_state |= AAC_ATTACH_KMUTEX_INITED; 839 840 /* Init the cmd queues */ 841 for (i = 0; i < AAC_CMDQ_NUM; i++) 842 aac_cmd_initq(&softs->q_wait[i]); 843 aac_cmd_initq(&softs->q_busy); 844 aac_cmd_initq(&softs->q_comp); 845 846 /* Check for legacy device naming support */ 847 softs->legacy = 1; /* default to use legacy name */ 848 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 849 "legacy-name-enable", &data) == DDI_SUCCESS)) { 850 if (strcmp(data, "no") == 0) { 851 AACDB_PRINT(softs, CE_NOTE, "legacy-name disabled"); 852 softs->legacy = 0; 853 } 854 ddi_prop_free(data); 855 } 856 857 /* 858 * Everything has been set up till now, 859 * we will do some common attach. 860 */ 861 mutex_enter(&softs->io_lock); 862 if (aac_common_attach(softs) == AACERR) { 863 mutex_exit(&softs->io_lock); 864 goto error; 865 } 866 mutex_exit(&softs->io_lock); 867 attach_state |= AAC_ATTACH_COMM_SPACE_SETUP; 868 869 /* Check for buf breakup support */ 870 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 871 "breakup-enable", &data) == DDI_SUCCESS)) { 872 if (strcmp(data, "yes") == 0) { 873 AACDB_PRINT(softs, CE_NOTE, "buf breakup enabled"); 874 softs->flags |= AAC_FLAGS_BRKUP; 875 } 876 ddi_prop_free(data); 877 } 878 softs->dma_max = softs->buf_dma_attr.dma_attr_maxxfer; 879 if (softs->flags & AAC_FLAGS_BRKUP) { 880 softs->dma_max = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 881 DDI_PROP_DONTPASS, "dma-max", softs->dma_max); 882 } 883 884 if (aac_hba_setup(softs) != AACOK) 885 goto error; 886 attach_state |= AAC_ATTACH_SCSI_TRAN_SETUP; 887 888 /* Create devctl/scsi nodes for cfgadm */ 889 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 890 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) { 891 AACDB_PRINT(softs, CE_WARN, "failed to create devctl node"); 892 goto error; 893 } 894 attach_state |= AAC_ATTACH_CREATE_DEVCTL; 895 896 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, INST2SCSI(instance), 897 DDI_NT_SCSI_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 898 AACDB_PRINT(softs, CE_WARN, "failed to create scsi node"); 899 goto error; 900 } 901 attach_state |= AAC_ATTACH_CREATE_SCSI; 902 903 /* Create aac node for app. to issue ioctls */ 904 if (ddi_create_minor_node(dip, "aac", S_IFCHR, INST2AAC(instance), 905 DDI_PSEUDO, 0) != DDI_SUCCESS) { 906 AACDB_PRINT(softs, CE_WARN, "failed to create aac node"); 907 goto error; 908 } 909 910 /* Create a taskq for dealing with dr events */ 911 if ((softs->taskq = ddi_taskq_create(dip, "aac_dr_taskq", 1, 912 TASKQ_DEFAULTPRI, 0)) == NULL) { 913 AACDB_PRINT(softs, CE_WARN, "ddi_taskq_create failed"); 914 goto error; 915 } 916 917 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 918 softs->state |= AAC_STATE_RUN; 919 920 /* Create a thread for command timeout */ 921 softs->timeout_id = timeout(aac_daemon, (void *)softs, 922 (60 * drv_usectohz(1000000))); 923 924 /* Common attach is OK, so we are attached! */ 925 ddi_report_dev(dip); 926 AACDB_PRINT(softs, CE_NOTE, "aac attached ok"); 927 return (DDI_SUCCESS); 928 929 error: 930 if (softs && softs->taskq) 931 ddi_taskq_destroy(softs->taskq); 932 if (attach_state & AAC_ATTACH_CREATE_SCSI) 933 ddi_remove_minor_node(dip, "scsi"); 934 if (attach_state & AAC_ATTACH_CREATE_DEVCTL) 935 ddi_remove_minor_node(dip, "devctl"); 936 if (attach_state & AAC_ATTACH_COMM_SPACE_SETUP) 937 aac_common_detach(softs); 938 if (attach_state & AAC_ATTACH_SCSI_TRAN_SETUP) { 939 (void) scsi_hba_detach(dip); 940 scsi_hba_tran_free(AAC_DIP2TRAN(dip)); 941 } 942 if (attach_state & AAC_ATTACH_KMUTEX_INITED) { 943 mutex_destroy(&softs->q_comp_mutex); 944 cv_destroy(&softs->event); 945 cv_destroy(&softs->sync_fib_cv); 946 mutex_destroy(&softs->aifq_mutex); 947 cv_destroy(&softs->aifv); 948 cv_destroy(&softs->drain_cv); 949 mutex_destroy(&softs->io_lock); 950 } 951 if (attach_state & AAC_ATTACH_PCI_MEM_MAPPED) 952 ddi_regs_map_free(&softs->pci_mem_handle); 953 aac_fm_fini(softs); 954 if (attach_state & AAC_ATTACH_CARD_DETECTED) 955 softs->card = AACERR; 956 if (attach_state & AAC_ATTACH_SOFTSTATE_ALLOCED) 957 ddi_soft_state_free(aac_softstatep, instance); 958 return (DDI_FAILURE); 959 } 960 961 static int 962 aac_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 963 { 964 scsi_hba_tran_t *tran = AAC_DIP2TRAN(dip); 965 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran); 966 967 DBCALLED(softs, 1); 968 969 switch (cmd) { 970 case DDI_DETACH: 971 break; 972 case DDI_SUSPEND: 973 return (DDI_FAILURE); 974 default: 975 return (DDI_FAILURE); 976 } 977 978 mutex_enter(&softs->io_lock); 979 AAC_DISABLE_INTR(softs); 980 softs->state = AAC_STATE_STOPPED; 981 982 mutex_exit(&softs->io_lock); 983 (void) untimeout(softs->timeout_id); 984 mutex_enter(&softs->io_lock); 985 softs->timeout_id = 0; 986 987 ddi_taskq_destroy(softs->taskq); 988 989 ddi_remove_minor_node(dip, "aac"); 990 ddi_remove_minor_node(dip, "scsi"); 991 ddi_remove_minor_node(dip, "devctl"); 992 993 mutex_exit(&softs->io_lock); 994 995 aac_common_detach(softs); 996 997 mutex_enter(&softs->io_lock); 998 (void) scsi_hba_detach(dip); 999 scsi_hba_tran_free(tran); 1000 mutex_exit(&softs->io_lock); 1001 1002 mutex_destroy(&softs->q_comp_mutex); 1003 cv_destroy(&softs->event); 1004 cv_destroy(&softs->sync_fib_cv); 1005 mutex_destroy(&softs->aifq_mutex); 1006 cv_destroy(&softs->aifv); 1007 cv_destroy(&softs->drain_cv); 1008 mutex_destroy(&softs->io_lock); 1009 1010 ddi_regs_map_free(&softs->pci_mem_handle); 1011 aac_fm_fini(softs); 1012 softs->hwif = AAC_HWIF_UNKNOWN; 1013 softs->card = AAC_UNKNOWN_CARD; 1014 ddi_soft_state_free(aac_softstatep, ddi_get_instance(dip)); 1015 1016 return (DDI_SUCCESS); 1017 } 1018 1019 /*ARGSUSED*/ 1020 static int 1021 aac_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) 1022 { 1023 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 1024 1025 DBCALLED(softs, 1); 1026 1027 mutex_enter(&softs->io_lock); 1028 AAC_DISABLE_INTR(softs); 1029 (void) aac_shutdown(softs); 1030 mutex_exit(&softs->io_lock); 1031 1032 return (DDI_SUCCESS); 1033 } 1034 1035 /* 1036 * quiesce(9E) entry point. 1037 * 1038 * This function is called when the system is single-threaded at high 1039 * PIL with preemption disabled. Therefore, this function must not be 1040 * blocked. 1041 * 1042 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 1043 * DDI_FAILURE indicates an error condition and should almost never happen. 1044 */ 1045 static int 1046 aac_quiesce(dev_info_t *dip) 1047 { 1048 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 1049 1050 if (softs == NULL) 1051 return (DDI_FAILURE); 1052 1053 AAC_DISABLE_INTR(softs); 1054 1055 return (DDI_SUCCESS); 1056 } 1057 1058 /* ARGSUSED */ 1059 static int 1060 aac_getinfo(dev_info_t *self, ddi_info_cmd_t infocmd, void *arg, 1061 void **result) 1062 { 1063 int error = DDI_SUCCESS; 1064 1065 switch (infocmd) { 1066 case DDI_INFO_DEVT2INSTANCE: 1067 *result = (void *)(intptr_t)(MINOR2INST(getminor((dev_t)arg))); 1068 break; 1069 default: 1070 error = DDI_FAILURE; 1071 } 1072 return (error); 1073 } 1074 1075 /* 1076 * Bring the controller down to a dormant state and detach all child devices. 1077 * This function is called before detach or system shutdown. 1078 * Note: we can assume that the q_wait on the controller is empty, as we 1079 * won't allow shutdown if any device is open. 1080 */ 1081 static int 1082 aac_shutdown(struct aac_softstate *softs) 1083 { 1084 ddi_acc_handle_t acc; 1085 struct aac_close_command *cc; 1086 int rval; 1087 1088 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac); 1089 acc = softs->sync_ac.slotp->fib_acc_handle; 1090 1091 cc = (struct aac_close_command *)&softs->sync_ac.slotp->fibp->data[0]; 1092 1093 ddi_put32(acc, &cc->Command, VM_CloseAll); 1094 ddi_put32(acc, &cc->ContainerId, 0xfffffffful); 1095 1096 /* Flush all caches, set FW to write through mode */ 1097 rval = aac_sync_fib(softs, ContainerCommand, 1098 AAC_FIB_SIZEOF(struct aac_close_command)); 1099 aac_sync_fib_slot_release(softs, &softs->sync_ac); 1100 1101 AACDB_PRINT(softs, CE_NOTE, 1102 "shutting down aac %s", (rval == AACOK) ? "ok" : "fail"); 1103 return (rval); 1104 } 1105 1106 static uint_t 1107 aac_softintr(caddr_t arg) 1108 { 1109 struct aac_softstate *softs = (void *)arg; 1110 1111 if (!AAC_IS_Q_EMPTY(&softs->q_comp)) { 1112 aac_drain_comp_q(softs); 1113 } 1114 return (DDI_INTR_CLAIMED); 1115 } 1116 1117 /* 1118 * Setup auto sense data for pkt 1119 */ 1120 static void 1121 aac_set_arq_data(struct scsi_pkt *pkt, uchar_t key, 1122 uchar_t add_code, uchar_t qual_code, uint64_t info) 1123 { 1124 struct scsi_arq_status *arqstat = (void *)(pkt->pkt_scbp); 1125 1126 *pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */ 1127 pkt->pkt_state |= STATE_ARQ_DONE; 1128 1129 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD; 1130 arqstat->sts_rqpkt_reason = CMD_CMPLT; 1131 arqstat->sts_rqpkt_resid = 0; 1132 arqstat->sts_rqpkt_state = 1133 STATE_GOT_BUS | 1134 STATE_GOT_TARGET | 1135 STATE_SENT_CMD | 1136 STATE_XFERRED_DATA; 1137 arqstat->sts_rqpkt_statistics = 0; 1138 1139 if (info <= 0xfffffffful) { 1140 arqstat->sts_sensedata.es_valid = 1; 1141 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE; 1142 arqstat->sts_sensedata.es_code = CODE_FMT_FIXED_CURRENT; 1143 arqstat->sts_sensedata.es_key = key; 1144 arqstat->sts_sensedata.es_add_code = add_code; 1145 arqstat->sts_sensedata.es_qual_code = qual_code; 1146 1147 arqstat->sts_sensedata.es_info_1 = (info >> 24) & 0xFF; 1148 arqstat->sts_sensedata.es_info_2 = (info >> 16) & 0xFF; 1149 arqstat->sts_sensedata.es_info_3 = (info >> 8) & 0xFF; 1150 arqstat->sts_sensedata.es_info_4 = info & 0xFF; 1151 } else { /* 64-bit LBA */ 1152 struct scsi_descr_sense_hdr *dsp; 1153 struct scsi_information_sense_descr *isd; 1154 1155 dsp = (struct scsi_descr_sense_hdr *)&arqstat->sts_sensedata; 1156 dsp->ds_class = CLASS_EXTENDED_SENSE; 1157 dsp->ds_code = CODE_FMT_DESCR_CURRENT; 1158 dsp->ds_key = key; 1159 dsp->ds_add_code = add_code; 1160 dsp->ds_qual_code = qual_code; 1161 dsp->ds_addl_sense_length = 1162 sizeof (struct scsi_information_sense_descr); 1163 1164 isd = (struct scsi_information_sense_descr *)(dsp+1); 1165 isd->isd_descr_type = DESCR_INFORMATION; 1166 isd->isd_valid = 1; 1167 isd->isd_information[0] = (info >> 56) & 0xFF; 1168 isd->isd_information[1] = (info >> 48) & 0xFF; 1169 isd->isd_information[2] = (info >> 40) & 0xFF; 1170 isd->isd_information[3] = (info >> 32) & 0xFF; 1171 isd->isd_information[4] = (info >> 24) & 0xFF; 1172 isd->isd_information[5] = (info >> 16) & 0xFF; 1173 isd->isd_information[6] = (info >> 8) & 0xFF; 1174 isd->isd_information[7] = (info) & 0xFF; 1175 } 1176 } 1177 1178 /* 1179 * Setup auto sense data for HARDWARE ERROR 1180 */ 1181 static void 1182 aac_set_arq_data_hwerr(struct aac_cmd *acp) 1183 { 1184 union scsi_cdb *cdbp; 1185 uint64_t err_blkno; 1186 1187 cdbp = (void *)acp->pkt->pkt_cdbp; 1188 err_blkno = AAC_GETGXADDR(acp->cmdlen, cdbp); 1189 aac_set_arq_data(acp->pkt, KEY_HARDWARE_ERROR, 0x00, 0x00, err_blkno); 1190 } 1191 1192 /* 1193 * Send a command to the adapter in New Comm. interface 1194 */ 1195 static int 1196 aac_send_command(struct aac_softstate *softs, struct aac_slot *slotp) 1197 { 1198 uint32_t index, device; 1199 1200 index = PCI_MEM_GET32(softs, AAC_IQUE); 1201 if (index == 0xffffffffUL) { 1202 index = PCI_MEM_GET32(softs, AAC_IQUE); 1203 if (index == 0xffffffffUL) 1204 return (AACERR); 1205 } 1206 1207 device = index; 1208 PCI_MEM_PUT32(softs, device, 1209 (uint32_t)(slotp->fib_phyaddr & 0xfffffffful)); 1210 device += 4; 1211 PCI_MEM_PUT32(softs, device, (uint32_t)(slotp->fib_phyaddr >> 32)); 1212 device += 4; 1213 PCI_MEM_PUT32(softs, device, slotp->acp->fib_size); 1214 PCI_MEM_PUT32(softs, AAC_IQUE, index); 1215 return (AACOK); 1216 } 1217 1218 static void 1219 aac_end_io(struct aac_softstate *softs, struct aac_cmd *acp) 1220 { 1221 struct aac_device *dvp = acp->dvp; 1222 int q = AAC_CMDQ(acp); 1223 1224 if (acp->slotp) { /* outstanding cmd */ 1225 if (!(acp->flags & AAC_CMD_IN_SYNC_SLOT)) { 1226 aac_release_slot(softs, acp->slotp); 1227 acp->slotp = NULL; 1228 } 1229 if (dvp) { 1230 dvp->ncmds[q]--; 1231 if (dvp->throttle[q] == AAC_THROTTLE_DRAIN && 1232 dvp->ncmds[q] == 0 && q == AAC_CMDQ_ASYNC) 1233 aac_set_throttle(softs, dvp, q, 1234 softs->total_slots); 1235 /* 1236 * Setup auto sense data for UNIT ATTENTION 1237 * Each lun should generate a unit attention 1238 * condition when reset. 1239 * Phys. drives are treated as logical ones 1240 * during error recovery. 1241 */ 1242 if (dvp->type == AAC_DEV_LD) { 1243 struct aac_container *ctp = 1244 (struct aac_container *)dvp; 1245 if (ctp->reset == 0) 1246 goto noreset; 1247 1248 AACDB_PRINT(softs, CE_NOTE, 1249 "Unit attention: reset"); 1250 ctp->reset = 0; 1251 aac_set_arq_data(acp->pkt, KEY_UNIT_ATTENTION, 1252 0x29, 0x02, 0); 1253 } 1254 } 1255 noreset: 1256 softs->bus_ncmds[q]--; 1257 aac_cmd_delete(&softs->q_busy, acp); 1258 } else { /* cmd in waiting queue */ 1259 aac_cmd_delete(&softs->q_wait[q], acp); 1260 } 1261 1262 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) { /* async IO */ 1263 mutex_enter(&softs->q_comp_mutex); 1264 aac_cmd_enqueue(&softs->q_comp, acp); 1265 mutex_exit(&softs->q_comp_mutex); 1266 } else if (acp->flags & AAC_CMD_NO_CB) { /* sync IO */ 1267 cv_broadcast(&softs->event); 1268 } 1269 } 1270 1271 static void 1272 aac_handle_io(struct aac_softstate *softs, int index) 1273 { 1274 struct aac_slot *slotp; 1275 struct aac_cmd *acp; 1276 uint32_t fast; 1277 1278 fast = index & AAC_SENDERADDR_MASK_FAST_RESPONSE; 1279 index >>= 2; 1280 1281 /* Make sure firmware reported index is valid */ 1282 ASSERT(index >= 0 && index < softs->total_slots); 1283 slotp = &softs->io_slot[index]; 1284 ASSERT(slotp->index == index); 1285 acp = slotp->acp; 1286 1287 if (acp == NULL || acp->slotp != slotp) { 1288 cmn_err(CE_WARN, 1289 "Firmware error: invalid slot index received from FW"); 1290 return; 1291 } 1292 1293 acp->flags |= AAC_CMD_CMPLT; 1294 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU); 1295 1296 if (aac_check_dma_handle(slotp->fib_dma_handle) == DDI_SUCCESS) { 1297 /* 1298 * For fast response IO, the firmware do not return any FIB 1299 * data, so we need to fill in the FIB status and state so that 1300 * FIB users can handle it correctly. 1301 */ 1302 if (fast) { 1303 uint32_t state; 1304 1305 state = ddi_get32(slotp->fib_acc_handle, 1306 &slotp->fibp->Header.XferState); 1307 /* 1308 * Update state for CPU not for device, no DMA sync 1309 * needed 1310 */ 1311 ddi_put32(slotp->fib_acc_handle, 1312 &slotp->fibp->Header.XferState, 1313 state | AAC_FIBSTATE_DONEADAP); 1314 ddi_put32(slotp->fib_acc_handle, 1315 (void *)&slotp->fibp->data[0], ST_OK); 1316 } 1317 1318 /* Handle completed ac */ 1319 acp->ac_comp(softs, acp); 1320 } else { 1321 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1322 acp->flags |= AAC_CMD_ERR; 1323 if (acp->pkt) { 1324 acp->pkt->pkt_reason = CMD_TRAN_ERR; 1325 acp->pkt->pkt_statistics = 0; 1326 } 1327 } 1328 aac_end_io(softs, acp); 1329 } 1330 1331 /* 1332 * Interrupt handler for New Comm. interface 1333 * New Comm. interface use a different mechanism for interrupt. No explict 1334 * message queues, and driver need only accesses the mapped PCI mem space to 1335 * find the completed FIB or AIF. 1336 */ 1337 static int 1338 aac_process_intr_new(struct aac_softstate *softs) 1339 { 1340 uint32_t index; 1341 1342 index = AAC_OUTB_GET(softs); 1343 if (index == 0xfffffffful) 1344 index = AAC_OUTB_GET(softs); 1345 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 1346 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1347 return (0); 1348 } 1349 if (index != 0xfffffffful) { 1350 do { 1351 if ((index & AAC_SENDERADDR_MASK_AIF) == 0) { 1352 aac_handle_io(softs, index); 1353 } else if (index != 0xfffffffeul) { 1354 struct aac_fib *fibp; /* FIB in AIF queue */ 1355 uint16_t fib_size, fib_size0; 1356 1357 /* 1358 * 0xfffffffe means that the controller wants 1359 * more work, ignore it for now. Otherwise, 1360 * AIF received. 1361 */ 1362 index &= ~2; 1363 1364 mutex_enter(&softs->aifq_mutex); 1365 /* 1366 * Copy AIF from adapter to the empty AIF slot 1367 */ 1368 fibp = &softs->aifq[softs->aifq_idx].d; 1369 fib_size0 = PCI_MEM_GET16(softs, index + \ 1370 offsetof(struct aac_fib, Header.Size)); 1371 fib_size = (fib_size0 > AAC_FIB_SIZE) ? 1372 AAC_FIB_SIZE : fib_size0; 1373 PCI_MEM_REP_GET8(softs, index, fibp, 1374 fib_size); 1375 1376 if (aac_check_acc_handle(softs-> \ 1377 pci_mem_handle) == DDI_SUCCESS) 1378 (void) aac_handle_aif(softs, fibp); 1379 else 1380 ddi_fm_service_impact(softs->devinfo_p, 1381 DDI_SERVICE_UNAFFECTED); 1382 mutex_exit(&softs->aifq_mutex); 1383 1384 /* 1385 * AIF memory is owned by the adapter, so let it 1386 * know that we are done with it. 1387 */ 1388 AAC_OUTB_SET(softs, index); 1389 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY); 1390 } 1391 1392 index = AAC_OUTB_GET(softs); 1393 } while (index != 0xfffffffful); 1394 1395 /* 1396 * Process waiting cmds before start new ones to 1397 * ensure first IOs are serviced first. 1398 */ 1399 aac_start_waiting_io(softs); 1400 return (AAC_DB_COMMAND_READY); 1401 } else { 1402 return (0); 1403 } 1404 } 1405 1406 static uint_t 1407 aac_intr_new(caddr_t arg) 1408 { 1409 struct aac_softstate *softs = (void *)arg; 1410 uint_t rval; 1411 1412 mutex_enter(&softs->io_lock); 1413 if (aac_process_intr_new(softs)) 1414 rval = DDI_INTR_CLAIMED; 1415 else 1416 rval = DDI_INTR_UNCLAIMED; 1417 mutex_exit(&softs->io_lock); 1418 1419 aac_drain_comp_q(softs); 1420 return (rval); 1421 } 1422 1423 /* 1424 * Interrupt handler for old interface 1425 * Explicit message queues are used to send FIB to and get completed FIB from 1426 * the adapter. Driver and adapter maitain the queues in the producer/consumer 1427 * manner. The driver has to query the queues to find the completed FIB. 1428 */ 1429 static int 1430 aac_process_intr_old(struct aac_softstate *softs) 1431 { 1432 uint16_t status; 1433 1434 status = AAC_STATUS_GET(softs); 1435 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 1436 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1437 return (DDI_INTR_UNCLAIMED); 1438 } 1439 if (status & AAC_DB_RESPONSE_READY) { 1440 int slot_idx; 1441 1442 /* ACK the intr */ 1443 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY); 1444 (void) AAC_STATUS_GET(softs); 1445 while (aac_fib_dequeue(softs, AAC_HOST_NORM_RESP_Q, 1446 &slot_idx) == AACOK) 1447 aac_handle_io(softs, slot_idx); 1448 1449 /* 1450 * Process waiting cmds before start new ones to 1451 * ensure first IOs are serviced first. 1452 */ 1453 aac_start_waiting_io(softs); 1454 return (AAC_DB_RESPONSE_READY); 1455 } else if (status & AAC_DB_COMMAND_READY) { 1456 int aif_idx; 1457 1458 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_READY); 1459 (void) AAC_STATUS_GET(softs); 1460 if (aac_fib_dequeue(softs, AAC_HOST_NORM_CMD_Q, &aif_idx) == 1461 AACOK) { 1462 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 1463 struct aac_fib *fibp; /* FIB in AIF queue */ 1464 struct aac_fib *fibp0; /* FIB in communication space */ 1465 uint16_t fib_size, fib_size0; 1466 uint32_t fib_xfer_state; 1467 uint32_t addr, size; 1468 1469 ASSERT((aif_idx >= 0) && (aif_idx < AAC_ADAPTER_FIBS)); 1470 1471 #define AAC_SYNC_AIF(softs, aif_idx, type) \ 1472 { (void) ddi_dma_sync((softs)->comm_space_dma_handle, \ 1473 offsetof(struct aac_comm_space, \ 1474 adapter_fibs[(aif_idx)]), AAC_FIB_SIZE, \ 1475 (type)); } 1476 1477 mutex_enter(&softs->aifq_mutex); 1478 /* Copy AIF from adapter to the empty AIF slot */ 1479 fibp = &softs->aifq[softs->aifq_idx].d; 1480 AAC_SYNC_AIF(softs, aif_idx, DDI_DMA_SYNC_FORCPU); 1481 fibp0 = &softs->comm_space->adapter_fibs[aif_idx]; 1482 fib_size0 = ddi_get16(acc, &fibp0->Header.Size); 1483 fib_size = (fib_size0 > AAC_FIB_SIZE) ? 1484 AAC_FIB_SIZE : fib_size0; 1485 ddi_rep_get8(acc, (uint8_t *)fibp, (uint8_t *)fibp0, 1486 fib_size, DDI_DEV_AUTOINCR); 1487 1488 (void) aac_handle_aif(softs, fibp); 1489 mutex_exit(&softs->aifq_mutex); 1490 1491 /* Complete AIF back to adapter with good status */ 1492 fib_xfer_state = LE_32(fibp->Header.XferState); 1493 if (fib_xfer_state & AAC_FIBSTATE_FROMADAP) { 1494 ddi_put32(acc, &fibp0->Header.XferState, 1495 fib_xfer_state | AAC_FIBSTATE_DONEHOST); 1496 ddi_put32(acc, (void *)&fibp0->data[0], ST_OK); 1497 if (fib_size0 > AAC_FIB_SIZE) 1498 ddi_put16(acc, &fibp0->Header.Size, 1499 AAC_FIB_SIZE); 1500 AAC_SYNC_AIF(softs, aif_idx, 1501 DDI_DMA_SYNC_FORDEV); 1502 } 1503 1504 /* Put the AIF response on the response queue */ 1505 addr = ddi_get32(acc, 1506 &softs->comm_space->adapter_fibs[aif_idx]. \ 1507 Header.SenderFibAddress); 1508 size = (uint32_t)ddi_get16(acc, 1509 &softs->comm_space->adapter_fibs[aif_idx]. \ 1510 Header.Size); 1511 ddi_put32(acc, 1512 &softs->comm_space->adapter_fibs[aif_idx]. \ 1513 Header.ReceiverFibAddress, addr); 1514 if (aac_fib_enqueue(softs, AAC_ADAP_NORM_RESP_Q, 1515 addr, size) == AACERR) 1516 cmn_err(CE_NOTE, "!AIF ack failed"); 1517 } 1518 return (AAC_DB_COMMAND_READY); 1519 } else if (status & AAC_DB_PRINTF_READY) { 1520 /* ACK the intr */ 1521 AAC_STATUS_CLR(softs, AAC_DB_PRINTF_READY); 1522 (void) AAC_STATUS_GET(softs); 1523 (void) ddi_dma_sync(softs->comm_space_dma_handle, 1524 offsetof(struct aac_comm_space, adapter_print_buf), 1525 AAC_ADAPTER_PRINT_BUFSIZE, DDI_DMA_SYNC_FORCPU); 1526 if (aac_check_dma_handle(softs->comm_space_dma_handle) == 1527 DDI_SUCCESS) 1528 cmn_err(CE_NOTE, "MSG From Adapter: %s", 1529 softs->comm_space->adapter_print_buf); 1530 else 1531 ddi_fm_service_impact(softs->devinfo_p, 1532 DDI_SERVICE_UNAFFECTED); 1533 AAC_NOTIFY(softs, AAC_DB_PRINTF_READY); 1534 return (AAC_DB_PRINTF_READY); 1535 } else if (status & AAC_DB_COMMAND_NOT_FULL) { 1536 /* 1537 * Without these two condition statements, the OS could hang 1538 * after a while, especially if there are a lot of AIF's to 1539 * handle, for instance if a drive is pulled from an array 1540 * under heavy load. 1541 */ 1542 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL); 1543 return (AAC_DB_COMMAND_NOT_FULL); 1544 } else if (status & AAC_DB_RESPONSE_NOT_FULL) { 1545 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL); 1546 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_NOT_FULL); 1547 return (AAC_DB_RESPONSE_NOT_FULL); 1548 } else { 1549 return (0); 1550 } 1551 } 1552 1553 static uint_t 1554 aac_intr_old(caddr_t arg) 1555 { 1556 struct aac_softstate *softs = (void *)arg; 1557 int rval; 1558 1559 mutex_enter(&softs->io_lock); 1560 if (aac_process_intr_old(softs)) 1561 rval = DDI_INTR_CLAIMED; 1562 else 1563 rval = DDI_INTR_UNCLAIMED; 1564 mutex_exit(&softs->io_lock); 1565 1566 aac_drain_comp_q(softs); 1567 return (rval); 1568 } 1569 1570 /* 1571 * Query FIXED or MSI interrupts 1572 */ 1573 static int 1574 aac_query_intrs(struct aac_softstate *softs, int intr_type) 1575 { 1576 dev_info_t *dip = softs->devinfo_p; 1577 int avail, actual, count; 1578 int i, flag, ret; 1579 1580 AACDB_PRINT(softs, CE_NOTE, 1581 "aac_query_intrs:interrupt type 0x%x", intr_type); 1582 1583 /* Get number of interrupts */ 1584 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 1585 if ((ret != DDI_SUCCESS) || (count == 0)) { 1586 AACDB_PRINT(softs, CE_WARN, 1587 "ddi_intr_get_nintrs() failed, ret %d count %d", 1588 ret, count); 1589 return (DDI_FAILURE); 1590 } 1591 1592 /* Get number of available interrupts */ 1593 ret = ddi_intr_get_navail(dip, intr_type, &avail); 1594 if ((ret != DDI_SUCCESS) || (avail == 0)) { 1595 AACDB_PRINT(softs, CE_WARN, 1596 "ddi_intr_get_navail() failed, ret %d avail %d", 1597 ret, avail); 1598 return (DDI_FAILURE); 1599 } 1600 1601 AACDB_PRINT(softs, CE_NOTE, 1602 "ddi_intr_get_nvail returned %d, navail() returned %d", 1603 count, avail); 1604 1605 /* Allocate an array of interrupt handles */ 1606 softs->intr_size = count * sizeof (ddi_intr_handle_t); 1607 softs->htable = kmem_alloc(softs->intr_size, KM_SLEEP); 1608 1609 if (intr_type == DDI_INTR_TYPE_MSI) { 1610 count = 1; /* only one vector needed by now */ 1611 flag = DDI_INTR_ALLOC_STRICT; 1612 } else { /* must be DDI_INTR_TYPE_FIXED */ 1613 flag = DDI_INTR_ALLOC_NORMAL; 1614 } 1615 1616 /* Call ddi_intr_alloc() */ 1617 ret = ddi_intr_alloc(dip, softs->htable, intr_type, 0, 1618 count, &actual, flag); 1619 1620 if ((ret != DDI_SUCCESS) || (actual == 0)) { 1621 AACDB_PRINT(softs, CE_WARN, 1622 "ddi_intr_alloc() failed, ret = %d", ret); 1623 actual = 0; 1624 goto error; 1625 } 1626 1627 if (actual < count) { 1628 AACDB_PRINT(softs, CE_NOTE, 1629 "Requested: %d, Received: %d", count, actual); 1630 goto error; 1631 } 1632 1633 softs->intr_cnt = actual; 1634 1635 /* Get priority for first msi, assume remaining are all the same */ 1636 if ((ret = ddi_intr_get_pri(softs->htable[0], 1637 &softs->intr_pri)) != DDI_SUCCESS) { 1638 AACDB_PRINT(softs, CE_WARN, 1639 "ddi_intr_get_pri() failed, ret = %d", ret); 1640 goto error; 1641 } 1642 1643 /* Test for high level mutex */ 1644 if (softs->intr_pri >= ddi_intr_get_hilevel_pri()) { 1645 AACDB_PRINT(softs, CE_WARN, 1646 "aac_query_intrs: Hi level interrupt not supported"); 1647 goto error; 1648 } 1649 1650 return (DDI_SUCCESS); 1651 1652 error: 1653 /* Free already allocated intr */ 1654 for (i = 0; i < actual; i++) 1655 (void) ddi_intr_free(softs->htable[i]); 1656 1657 kmem_free(softs->htable, softs->intr_size); 1658 return (DDI_FAILURE); 1659 } 1660 1661 1662 /* 1663 * Register FIXED or MSI interrupts, and enable them 1664 */ 1665 static int 1666 aac_add_intrs(struct aac_softstate *softs) 1667 { 1668 int i, ret; 1669 int actual; 1670 ddi_intr_handler_t *aac_intr; 1671 1672 actual = softs->intr_cnt; 1673 aac_intr = (ddi_intr_handler_t *)((softs->flags & AAC_FLAGS_NEW_COMM) ? 1674 aac_intr_new : aac_intr_old); 1675 1676 /* Call ddi_intr_add_handler() */ 1677 for (i = 0; i < actual; i++) { 1678 if ((ret = ddi_intr_add_handler(softs->htable[i], 1679 aac_intr, (caddr_t)softs, NULL)) != DDI_SUCCESS) { 1680 cmn_err(CE_WARN, 1681 "ddi_intr_add_handler() failed ret = %d", ret); 1682 1683 /* Free already allocated intr */ 1684 for (i = 0; i < actual; i++) 1685 (void) ddi_intr_free(softs->htable[i]); 1686 1687 kmem_free(softs->htable, softs->intr_size); 1688 return (DDI_FAILURE); 1689 } 1690 } 1691 1692 if ((ret = ddi_intr_get_cap(softs->htable[0], &softs->intr_cap)) 1693 != DDI_SUCCESS) { 1694 cmn_err(CE_WARN, "ddi_intr_get_cap() failed, ret = %d", ret); 1695 1696 /* Free already allocated intr */ 1697 for (i = 0; i < actual; i++) 1698 (void) ddi_intr_free(softs->htable[i]); 1699 1700 kmem_free(softs->htable, softs->intr_size); 1701 return (DDI_FAILURE); 1702 } 1703 1704 return (DDI_SUCCESS); 1705 } 1706 1707 /* 1708 * Unregister FIXED or MSI interrupts 1709 */ 1710 static void 1711 aac_remove_intrs(struct aac_softstate *softs) 1712 { 1713 int i; 1714 1715 /* Disable all interrupts */ 1716 (void) aac_disable_intrs(softs); 1717 /* Call ddi_intr_remove_handler() */ 1718 for (i = 0; i < softs->intr_cnt; i++) { 1719 (void) ddi_intr_remove_handler(softs->htable[i]); 1720 (void) ddi_intr_free(softs->htable[i]); 1721 } 1722 1723 kmem_free(softs->htable, softs->intr_size); 1724 } 1725 1726 static int 1727 aac_enable_intrs(struct aac_softstate *softs) 1728 { 1729 int rval = AACOK; 1730 1731 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) { 1732 /* for MSI block enable */ 1733 if (ddi_intr_block_enable(softs->htable, softs->intr_cnt) != 1734 DDI_SUCCESS) 1735 rval = AACERR; 1736 } else { 1737 int i; 1738 1739 /* Call ddi_intr_enable() for legacy/MSI non block enable */ 1740 for (i = 0; i < softs->intr_cnt; i++) { 1741 if (ddi_intr_enable(softs->htable[i]) != DDI_SUCCESS) 1742 rval = AACERR; 1743 } 1744 } 1745 return (rval); 1746 } 1747 1748 static int 1749 aac_disable_intrs(struct aac_softstate *softs) 1750 { 1751 int rval = AACOK; 1752 1753 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) { 1754 /* Call ddi_intr_block_disable() */ 1755 if (ddi_intr_block_disable(softs->htable, softs->intr_cnt) != 1756 DDI_SUCCESS) 1757 rval = AACERR; 1758 } else { 1759 int i; 1760 1761 for (i = 0; i < softs->intr_cnt; i++) { 1762 if (ddi_intr_disable(softs->htable[i]) != DDI_SUCCESS) 1763 rval = AACERR; 1764 } 1765 } 1766 return (rval); 1767 } 1768 1769 /* 1770 * Set pkt_reason and OR in pkt_statistics flag 1771 */ 1772 static void 1773 aac_set_pkt_reason(struct aac_softstate *softs, struct aac_cmd *acp, 1774 uchar_t reason, uint_t stat) 1775 { 1776 #ifndef __lock_lint 1777 _NOTE(ARGUNUSED(softs)) 1778 #endif 1779 if (acp->pkt->pkt_reason == CMD_CMPLT) 1780 acp->pkt->pkt_reason = reason; 1781 acp->pkt->pkt_statistics |= stat; 1782 } 1783 1784 /* 1785 * Handle a finished pkt of soft SCMD 1786 */ 1787 static void 1788 aac_soft_callback(struct aac_softstate *softs, struct aac_cmd *acp) 1789 { 1790 ASSERT(acp->pkt); 1791 1792 acp->flags |= AAC_CMD_CMPLT; 1793 1794 acp->pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | \ 1795 STATE_SENT_CMD | STATE_GOT_STATUS; 1796 if (acp->pkt->pkt_state & STATE_XFERRED_DATA) 1797 acp->pkt->pkt_resid = 0; 1798 1799 /* AAC_CMD_NO_INTR means no complete callback */ 1800 if (!(acp->flags & AAC_CMD_NO_INTR)) { 1801 mutex_enter(&softs->q_comp_mutex); 1802 aac_cmd_enqueue(&softs->q_comp, acp); 1803 mutex_exit(&softs->q_comp_mutex); 1804 ddi_trigger_softintr(softs->softint_id); 1805 } 1806 } 1807 1808 /* 1809 * Handlers for completed IOs, common to aac_intr_new() and aac_intr_old() 1810 */ 1811 1812 /* 1813 * Handle completed logical device IO command 1814 */ 1815 /*ARGSUSED*/ 1816 static void 1817 aac_ld_complete(struct aac_softstate *softs, struct aac_cmd *acp) 1818 { 1819 struct aac_slot *slotp = acp->slotp; 1820 struct aac_blockread_response *resp; 1821 uint32_t status; 1822 1823 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 1824 ASSERT(!(acp->flags & AAC_CMD_NO_CB)); 1825 1826 acp->pkt->pkt_state |= STATE_GOT_STATUS; 1827 1828 /* 1829 * block_read/write has a similar response header, use blockread 1830 * response for both. 1831 */ 1832 resp = (struct aac_blockread_response *)&slotp->fibp->data[0]; 1833 status = ddi_get32(slotp->fib_acc_handle, &resp->Status); 1834 if (status == ST_OK) { 1835 acp->pkt->pkt_resid = 0; 1836 acp->pkt->pkt_state |= STATE_XFERRED_DATA; 1837 } else { 1838 aac_set_arq_data_hwerr(acp); 1839 } 1840 } 1841 1842 /* 1843 * Handle completed phys. device IO command 1844 */ 1845 static void 1846 aac_pd_complete(struct aac_softstate *softs, struct aac_cmd *acp) 1847 { 1848 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 1849 struct aac_fib *fibp = acp->slotp->fibp; 1850 struct scsi_pkt *pkt = acp->pkt; 1851 struct aac_srb_reply *resp; 1852 uint32_t resp_status; 1853 1854 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 1855 ASSERT(!(acp->flags & AAC_CMD_NO_CB)); 1856 1857 resp = (struct aac_srb_reply *)&fibp->data[0]; 1858 resp_status = ddi_get32(acc, &resp->status); 1859 1860 /* First check FIB status */ 1861 if (resp_status == ST_OK) { 1862 uint32_t scsi_status; 1863 uint32_t srb_status; 1864 uint32_t data_xfer_length; 1865 1866 scsi_status = ddi_get32(acc, &resp->scsi_status); 1867 srb_status = ddi_get32(acc, &resp->srb_status); 1868 data_xfer_length = ddi_get32(acc, &resp->data_xfer_length); 1869 1870 *pkt->pkt_scbp = (uint8_t)scsi_status; 1871 pkt->pkt_state |= STATE_GOT_STATUS; 1872 if (scsi_status == STATUS_GOOD) { 1873 uchar_t cmd = ((union scsi_cdb *)(void *) 1874 (pkt->pkt_cdbp))->scc_cmd; 1875 1876 /* Next check SRB status */ 1877 switch (srb_status & 0x3f) { 1878 case SRB_STATUS_DATA_OVERRUN: 1879 AACDB_PRINT(softs, CE_NOTE, "DATA_OVERRUN: " \ 1880 "scmd=%d, xfer=%d, buflen=%d", 1881 (uint32_t)cmd, data_xfer_length, 1882 acp->bcount); 1883 1884 switch (cmd) { 1885 case SCMD_READ: 1886 case SCMD_WRITE: 1887 case SCMD_READ_G1: 1888 case SCMD_WRITE_G1: 1889 case SCMD_READ_G4: 1890 case SCMD_WRITE_G4: 1891 case SCMD_READ_G5: 1892 case SCMD_WRITE_G5: 1893 aac_set_pkt_reason(softs, acp, 1894 CMD_DATA_OVR, 0); 1895 break; 1896 } 1897 /*FALLTHRU*/ 1898 case SRB_STATUS_ERROR_RECOVERY: 1899 case SRB_STATUS_PENDING: 1900 case SRB_STATUS_SUCCESS: 1901 /* 1902 * pkt_resid should only be calculated if the 1903 * status is ERROR_RECOVERY/PENDING/SUCCESS/ 1904 * OVERRUN/UNDERRUN 1905 */ 1906 if (data_xfer_length) { 1907 pkt->pkt_state |= STATE_XFERRED_DATA; 1908 pkt->pkt_resid = acp->bcount - \ 1909 data_xfer_length; 1910 ASSERT(pkt->pkt_resid >= 0); 1911 } 1912 break; 1913 case SRB_STATUS_ABORTED: 1914 AACDB_PRINT(softs, CE_NOTE, 1915 "SRB_STATUS_ABORTED, xfer=%d, resid=%d", 1916 data_xfer_length, pkt->pkt_resid); 1917 aac_set_pkt_reason(softs, acp, CMD_ABORTED, 1918 STAT_ABORTED); 1919 break; 1920 case SRB_STATUS_ABORT_FAILED: 1921 AACDB_PRINT(softs, CE_NOTE, 1922 "SRB_STATUS_ABORT_FAILED, xfer=%d, " \ 1923 "resid=%d", data_xfer_length, 1924 pkt->pkt_resid); 1925 aac_set_pkt_reason(softs, acp, CMD_ABORT_FAIL, 1926 0); 1927 break; 1928 case SRB_STATUS_PARITY_ERROR: 1929 AACDB_PRINT(softs, CE_NOTE, 1930 "SRB_STATUS_PARITY_ERROR, xfer=%d, " \ 1931 "resid=%d", data_xfer_length, 1932 pkt->pkt_resid); 1933 aac_set_pkt_reason(softs, acp, CMD_PER_FAIL, 0); 1934 break; 1935 case SRB_STATUS_NO_DEVICE: 1936 case SRB_STATUS_INVALID_PATH_ID: 1937 case SRB_STATUS_INVALID_TARGET_ID: 1938 case SRB_STATUS_INVALID_LUN: 1939 case SRB_STATUS_SELECTION_TIMEOUT: 1940 #ifdef DEBUG 1941 if (AAC_DEV_IS_VALID(acp->dvp)) { 1942 AACDB_PRINT(softs, CE_NOTE, 1943 "SRB_STATUS_NO_DEVICE(%d), " \ 1944 "xfer=%d, resid=%d ", 1945 srb_status & 0x3f, 1946 data_xfer_length, pkt->pkt_resid); 1947 } 1948 #endif 1949 aac_set_pkt_reason(softs, acp, CMD_DEV_GONE, 0); 1950 break; 1951 case SRB_STATUS_COMMAND_TIMEOUT: 1952 case SRB_STATUS_TIMEOUT: 1953 AACDB_PRINT(softs, CE_NOTE, 1954 "SRB_STATUS_COMMAND_TIMEOUT, xfer=%d, " \ 1955 "resid=%d", data_xfer_length, 1956 pkt->pkt_resid); 1957 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT, 1958 STAT_TIMEOUT); 1959 break; 1960 case SRB_STATUS_BUS_RESET: 1961 AACDB_PRINT(softs, CE_NOTE, 1962 "SRB_STATUS_BUS_RESET, xfer=%d, " \ 1963 "resid=%d", data_xfer_length, 1964 pkt->pkt_resid); 1965 aac_set_pkt_reason(softs, acp, CMD_RESET, 1966 STAT_BUS_RESET); 1967 break; 1968 default: 1969 AACDB_PRINT(softs, CE_NOTE, "srb_status=%d, " \ 1970 "xfer=%d, resid=%d", srb_status & 0x3f, 1971 data_xfer_length, pkt->pkt_resid); 1972 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0); 1973 break; 1974 } 1975 } else if (scsi_status == STATUS_CHECK) { 1976 /* CHECK CONDITION */ 1977 struct scsi_arq_status *arqstat = 1978 (void *)(pkt->pkt_scbp); 1979 uint32_t sense_data_size; 1980 1981 pkt->pkt_state |= STATE_ARQ_DONE; 1982 1983 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD; 1984 arqstat->sts_rqpkt_reason = CMD_CMPLT; 1985 arqstat->sts_rqpkt_resid = 0; 1986 arqstat->sts_rqpkt_state = 1987 STATE_GOT_BUS | 1988 STATE_GOT_TARGET | 1989 STATE_SENT_CMD | 1990 STATE_XFERRED_DATA; 1991 arqstat->sts_rqpkt_statistics = 0; 1992 1993 sense_data_size = ddi_get32(acc, 1994 &resp->sense_data_size); 1995 ASSERT(sense_data_size <= AAC_SENSE_BUFFERSIZE); 1996 AACDB_PRINT(softs, CE_NOTE, 1997 "CHECK CONDITION: sense len=%d, xfer len=%d", 1998 sense_data_size, data_xfer_length); 1999 2000 if (sense_data_size > SENSE_LENGTH) 2001 sense_data_size = SENSE_LENGTH; 2002 ddi_rep_get8(acc, (uint8_t *)&arqstat->sts_sensedata, 2003 (uint8_t *)resp->sense_data, sense_data_size, 2004 DDI_DEV_AUTOINCR); 2005 } else { 2006 AACDB_PRINT(softs, CE_WARN, "invaild scsi status: " \ 2007 "scsi_status=%d, srb_status=%d", 2008 scsi_status, srb_status); 2009 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0); 2010 } 2011 } else { 2012 AACDB_PRINT(softs, CE_NOTE, "SRB failed: fib status %d", 2013 resp_status); 2014 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0); 2015 } 2016 } 2017 2018 /* 2019 * Handle completed IOCTL command 2020 */ 2021 /*ARGSUSED*/ 2022 void 2023 aac_ioctl_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2024 { 2025 struct aac_slot *slotp = acp->slotp; 2026 2027 /* 2028 * NOTE: Both aac_ioctl_send_fib() and aac_send_raw_srb() 2029 * may wait on softs->event, so use cv_broadcast() instead 2030 * of cv_signal(). 2031 */ 2032 ASSERT(acp->flags & AAC_CMD_SYNC); 2033 ASSERT(acp->flags & AAC_CMD_NO_CB); 2034 2035 /* Get the size of the response FIB from its FIB.Header.Size field */ 2036 acp->fib_size = ddi_get16(slotp->fib_acc_handle, 2037 &slotp->fibp->Header.Size); 2038 2039 ASSERT(acp->fib_size <= softs->aac_max_fib_size); 2040 ddi_rep_get8(slotp->fib_acc_handle, (uint8_t *)acp->fibp, 2041 (uint8_t *)slotp->fibp, acp->fib_size, DDI_DEV_AUTOINCR); 2042 } 2043 2044 /* 2045 * Handle completed sync fib command 2046 */ 2047 /*ARGSUSED*/ 2048 void 2049 aac_sync_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2050 { 2051 } 2052 2053 /* 2054 * Handle completed Flush command 2055 */ 2056 /*ARGSUSED*/ 2057 static void 2058 aac_synccache_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2059 { 2060 struct aac_slot *slotp = acp->slotp; 2061 ddi_acc_handle_t acc = slotp->fib_acc_handle; 2062 struct aac_synchronize_reply *resp; 2063 uint32_t status; 2064 2065 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 2066 2067 acp->pkt->pkt_state |= STATE_GOT_STATUS; 2068 2069 resp = (struct aac_synchronize_reply *)&slotp->fibp->data[0]; 2070 status = ddi_get32(acc, &resp->Status); 2071 if (status != CT_OK) 2072 aac_set_arq_data_hwerr(acp); 2073 } 2074 2075 /*ARGSUSED*/ 2076 static void 2077 aac_startstop_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2078 { 2079 struct aac_slot *slotp = acp->slotp; 2080 ddi_acc_handle_t acc = slotp->fib_acc_handle; 2081 struct aac_Container_resp *resp; 2082 uint32_t status; 2083 2084 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 2085 2086 acp->pkt->pkt_state |= STATE_GOT_STATUS; 2087 2088 resp = (struct aac_Container_resp *)&slotp->fibp->data[0]; 2089 status = ddi_get32(acc, &resp->Status); 2090 if (status != 0) { 2091 AACDB_PRINT(softs, CE_WARN, "Cannot start/stop a unit"); 2092 aac_set_arq_data_hwerr(acp); 2093 } 2094 } 2095 2096 /* 2097 * Access PCI space to see if the driver can support the card 2098 */ 2099 static int 2100 aac_check_card_type(struct aac_softstate *softs) 2101 { 2102 ddi_acc_handle_t pci_config_handle; 2103 int card_index; 2104 uint32_t pci_cmd; 2105 2106 /* Map pci configuration space */ 2107 if ((pci_config_setup(softs->devinfo_p, &pci_config_handle)) != 2108 DDI_SUCCESS) { 2109 AACDB_PRINT(softs, CE_WARN, "Cannot setup pci config space"); 2110 return (AACERR); 2111 } 2112 2113 softs->vendid = pci_config_get16(pci_config_handle, PCI_CONF_VENID); 2114 softs->devid = pci_config_get16(pci_config_handle, PCI_CONF_DEVID); 2115 softs->subvendid = pci_config_get16(pci_config_handle, 2116 PCI_CONF_SUBVENID); 2117 softs->subsysid = pci_config_get16(pci_config_handle, 2118 PCI_CONF_SUBSYSID); 2119 2120 card_index = 0; 2121 while (!CARD_IS_UNKNOWN(card_index)) { 2122 if ((aac_cards[card_index].vendor == softs->vendid) && 2123 (aac_cards[card_index].device == softs->devid) && 2124 (aac_cards[card_index].subvendor == softs->subvendid) && 2125 (aac_cards[card_index].subsys == softs->subsysid)) { 2126 break; 2127 } 2128 card_index++; 2129 } 2130 2131 softs->card = card_index; 2132 softs->hwif = aac_cards[card_index].hwif; 2133 2134 /* 2135 * Unknown aac card 2136 * do a generic match based on the VendorID and DeviceID to 2137 * support the new cards in the aac family 2138 */ 2139 if (CARD_IS_UNKNOWN(card_index)) { 2140 if (softs->vendid != 0x9005) { 2141 AACDB_PRINT(softs, CE_WARN, 2142 "Unknown vendor 0x%x", softs->vendid); 2143 goto error; 2144 } 2145 switch (softs->devid) { 2146 case 0x285: 2147 softs->hwif = AAC_HWIF_I960RX; 2148 break; 2149 case 0x286: 2150 softs->hwif = AAC_HWIF_RKT; 2151 break; 2152 default: 2153 AACDB_PRINT(softs, CE_WARN, 2154 "Unknown device \"pci9005,%x\"", softs->devid); 2155 goto error; 2156 } 2157 } 2158 2159 /* Set hardware dependent interface */ 2160 switch (softs->hwif) { 2161 case AAC_HWIF_I960RX: 2162 softs->aac_if = aac_rx_interface; 2163 softs->map_size_min = AAC_MAP_SIZE_MIN_RX; 2164 break; 2165 case AAC_HWIF_RKT: 2166 softs->aac_if = aac_rkt_interface; 2167 softs->map_size_min = AAC_MAP_SIZE_MIN_RKT; 2168 break; 2169 default: 2170 AACDB_PRINT(softs, CE_WARN, 2171 "Unknown hardware interface %d", softs->hwif); 2172 goto error; 2173 } 2174 2175 /* Set card names */ 2176 (void *)strncpy(softs->vendor_name, aac_cards[card_index].vid, 2177 AAC_VENDOR_LEN); 2178 (void *)strncpy(softs->product_name, aac_cards[card_index].desc, 2179 AAC_PRODUCT_LEN); 2180 2181 /* Set up quirks */ 2182 softs->flags = aac_cards[card_index].quirks; 2183 2184 /* Force the busmaster enable bit on */ 2185 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM); 2186 if ((pci_cmd & PCI_COMM_ME) == 0) { 2187 pci_cmd |= PCI_COMM_ME; 2188 pci_config_put16(pci_config_handle, PCI_CONF_COMM, pci_cmd); 2189 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM); 2190 if ((pci_cmd & PCI_COMM_ME) == 0) { 2191 cmn_err(CE_CONT, "?Cannot enable busmaster bit"); 2192 goto error; 2193 } 2194 } 2195 2196 /* Set memory base to map */ 2197 softs->pci_mem_base_paddr = 0xfffffff0UL & \ 2198 pci_config_get32(pci_config_handle, PCI_CONF_BASE0); 2199 2200 pci_config_teardown(&pci_config_handle); 2201 2202 return (AACOK); /* card type detected */ 2203 error: 2204 pci_config_teardown(&pci_config_handle); 2205 return (AACERR); /* no matched card found */ 2206 } 2207 2208 /* 2209 * Do the usual interrupt handler setup stuff. 2210 */ 2211 static int 2212 aac_register_intrs(struct aac_softstate *softs) 2213 { 2214 dev_info_t *dip; 2215 int intr_types; 2216 2217 ASSERT(softs->devinfo_p); 2218 dip = softs->devinfo_p; 2219 2220 /* Get the type of device intrrupts */ 2221 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) { 2222 AACDB_PRINT(softs, CE_WARN, 2223 "ddi_intr_get_supported_types() failed"); 2224 return (AACERR); 2225 } 2226 AACDB_PRINT(softs, CE_NOTE, 2227 "ddi_intr_get_supported_types() ret: 0x%x", intr_types); 2228 2229 /* Query interrupt, and alloc/init all needed struct */ 2230 if (intr_types & DDI_INTR_TYPE_MSI) { 2231 if (aac_query_intrs(softs, DDI_INTR_TYPE_MSI) 2232 != DDI_SUCCESS) { 2233 AACDB_PRINT(softs, CE_WARN, 2234 "MSI interrupt query failed"); 2235 return (AACERR); 2236 } 2237 softs->intr_type = DDI_INTR_TYPE_MSI; 2238 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 2239 if (aac_query_intrs(softs, DDI_INTR_TYPE_FIXED) 2240 != DDI_SUCCESS) { 2241 AACDB_PRINT(softs, CE_WARN, 2242 "FIXED interrupt query failed"); 2243 return (AACERR); 2244 } 2245 softs->intr_type = DDI_INTR_TYPE_FIXED; 2246 } else { 2247 AACDB_PRINT(softs, CE_WARN, 2248 "Device cannot suppport both FIXED and MSI interrupts"); 2249 return (AACERR); 2250 } 2251 2252 /* Connect interrupt handlers */ 2253 if (aac_add_intrs(softs) != DDI_SUCCESS) { 2254 AACDB_PRINT(softs, CE_WARN, 2255 "Interrupt registration failed, intr type: %s", 2256 softs->intr_type == DDI_INTR_TYPE_MSI ? "MSI" : "FIXED"); 2257 return (AACERR); 2258 } 2259 (void) aac_enable_intrs(softs); 2260 2261 if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &softs->softint_id, 2262 NULL, NULL, aac_softintr, (caddr_t)softs) != DDI_SUCCESS) { 2263 AACDB_PRINT(softs, CE_WARN, 2264 "Can not setup soft interrupt handler!"); 2265 aac_remove_intrs(softs); 2266 return (AACERR); 2267 } 2268 2269 return (AACOK); 2270 } 2271 2272 static void 2273 aac_unregister_intrs(struct aac_softstate *softs) 2274 { 2275 aac_remove_intrs(softs); 2276 ddi_remove_softintr(softs->softint_id); 2277 } 2278 2279 /* 2280 * Check the firmware to determine the features to support and the FIB 2281 * parameters to use. 2282 */ 2283 static int 2284 aac_check_firmware(struct aac_softstate *softs) 2285 { 2286 uint32_t options; 2287 uint32_t atu_size; 2288 ddi_acc_handle_t pci_handle; 2289 uint8_t *data; 2290 uint32_t max_fibs; 2291 uint32_t max_fib_size; 2292 uint32_t sg_tablesize; 2293 uint32_t max_sectors; 2294 uint32_t status; 2295 2296 /* Get supported options */ 2297 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETINFO, 0, 0, 0, 0, 2298 &status)) != AACOK) { 2299 if (status != SRB_STATUS_INVALID_REQUEST) { 2300 cmn_err(CE_CONT, 2301 "?Fatal error: request adapter info error"); 2302 return (AACERR); 2303 } 2304 options = 0; 2305 atu_size = 0; 2306 } else { 2307 options = AAC_MAILBOX_GET(softs, 1); 2308 atu_size = AAC_MAILBOX_GET(softs, 2); 2309 } 2310 2311 if (softs->state & AAC_STATE_RESET) { 2312 if ((softs->support_opt == options) && 2313 (softs->atu_size == atu_size)) 2314 return (AACOK); 2315 2316 cmn_err(CE_WARN, 2317 "?Fatal error: firmware changed, system needs reboot"); 2318 return (AACERR); 2319 } 2320 2321 /* 2322 * The following critical settings are initialized only once during 2323 * driver attachment. 2324 */ 2325 softs->support_opt = options; 2326 softs->atu_size = atu_size; 2327 2328 /* Process supported options */ 2329 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && 2330 (softs->flags & AAC_FLAGS_NO4GB) == 0) { 2331 AACDB_PRINT(softs, CE_NOTE, "!Enable FIB map 4GB window"); 2332 softs->flags |= AAC_FLAGS_4GB_WINDOW; 2333 } else { 2334 /* 2335 * Quirk AAC_FLAGS_NO4GB is for FIB address and thus comm space 2336 * only. IO is handled by the DMA engine which does not suffer 2337 * from the ATU window programming workarounds necessary for 2338 * CPU copy operations. 2339 */ 2340 softs->addr_dma_attr.dma_attr_addr_lo = 0x2000ull; 2341 softs->addr_dma_attr.dma_attr_addr_hi = 0x7fffffffull; 2342 } 2343 2344 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0) { 2345 AACDB_PRINT(softs, CE_NOTE, "!Enable SG map 64-bit address"); 2346 softs->buf_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull; 2347 softs->buf_dma_attr.dma_attr_seg = 0xffffffffffffffffull; 2348 softs->flags |= AAC_FLAGS_SG_64BIT; 2349 } 2350 2351 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) { 2352 softs->flags |= AAC_FLAGS_ARRAY_64BIT; 2353 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array size"); 2354 } 2355 2356 if (options & AAC_SUPPORTED_NONDASD) { 2357 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p, 0, 2358 "nondasd-enable", (char **)&data) == DDI_SUCCESS)) { 2359 if (strcmp((char *)data, "yes") == 0) { 2360 AACDB_PRINT(softs, CE_NOTE, 2361 "!Enable Non-DASD access"); 2362 softs->flags |= AAC_FLAGS_NONDASD; 2363 } 2364 ddi_prop_free(data); 2365 } 2366 } 2367 2368 /* Read preferred settings */ 2369 max_fib_size = 0; 2370 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETCOMMPREF, 2371 0, 0, 0, 0, NULL)) == AACOK) { 2372 options = AAC_MAILBOX_GET(softs, 1); 2373 max_fib_size = (options & 0xffff); 2374 max_sectors = (options >> 16) << 1; 2375 options = AAC_MAILBOX_GET(softs, 2); 2376 sg_tablesize = (options >> 16); 2377 options = AAC_MAILBOX_GET(softs, 3); 2378 max_fibs = (options & 0xffff); 2379 } 2380 2381 /* Enable new comm. and rawio at the same time */ 2382 if ((softs->support_opt & AAC_SUPPORTED_NEW_COMM) && 2383 (max_fib_size != 0)) { 2384 /* read out and save PCI MBR */ 2385 if ((atu_size > softs->map_size) && 2386 (ddi_regs_map_setup(softs->devinfo_p, 1, 2387 (caddr_t *)&data, 0, atu_size, &softs->reg_attr, 2388 &pci_handle) == DDI_SUCCESS)) { 2389 ddi_regs_map_free(&softs->pci_mem_handle); 2390 softs->pci_mem_handle = pci_handle; 2391 softs->pci_mem_base_vaddr = data; 2392 softs->map_size = atu_size; 2393 } 2394 if (atu_size == softs->map_size) { 2395 softs->flags |= AAC_FLAGS_NEW_COMM; 2396 AACDB_PRINT(softs, CE_NOTE, 2397 "!Enable New Comm. interface"); 2398 } 2399 } 2400 2401 /* Set FIB parameters */ 2402 if (softs->flags & AAC_FLAGS_NEW_COMM) { 2403 softs->aac_max_fibs = max_fibs; 2404 softs->aac_max_fib_size = max_fib_size; 2405 softs->aac_max_sectors = max_sectors; 2406 softs->aac_sg_tablesize = sg_tablesize; 2407 2408 softs->flags |= AAC_FLAGS_RAW_IO; 2409 AACDB_PRINT(softs, CE_NOTE, "!Enable RawIO"); 2410 } else { 2411 softs->aac_max_fibs = 2412 (softs->flags & AAC_FLAGS_256FIBS) ? 256 : 512; 2413 softs->aac_max_fib_size = AAC_FIB_SIZE; 2414 softs->aac_max_sectors = 128; /* 64K */ 2415 if (softs->flags & AAC_FLAGS_17SG) 2416 softs->aac_sg_tablesize = 17; 2417 else if (softs->flags & AAC_FLAGS_34SG) 2418 softs->aac_sg_tablesize = 34; 2419 else if (softs->flags & AAC_FLAGS_SG_64BIT) 2420 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE - 2421 sizeof (struct aac_blockwrite64) + 2422 sizeof (struct aac_sg_entry64)) / 2423 sizeof (struct aac_sg_entry64); 2424 else 2425 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE - 2426 sizeof (struct aac_blockwrite) + 2427 sizeof (struct aac_sg_entry)) / 2428 sizeof (struct aac_sg_entry); 2429 } 2430 2431 if ((softs->flags & AAC_FLAGS_RAW_IO) && 2432 (softs->flags & AAC_FLAGS_ARRAY_64BIT)) { 2433 softs->flags |= AAC_FLAGS_LBA_64BIT; 2434 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array"); 2435 } 2436 softs->buf_dma_attr.dma_attr_sgllen = softs->aac_sg_tablesize; 2437 softs->buf_dma_attr.dma_attr_maxxfer = softs->aac_max_sectors << 9; 2438 /* 2439 * 64K maximum segment size in scatter gather list is controlled by 2440 * the NEW_COMM bit in the adapter information. If not set, the card 2441 * can only accept a maximum of 64K. It is not recommended to permit 2442 * more than 128KB of total transfer size to the adapters because 2443 * performance is negatively impacted. 2444 * 2445 * For new comm, segment size equals max xfer size. For old comm, 2446 * we use 64K for both. 2447 */ 2448 softs->buf_dma_attr.dma_attr_count_max = 2449 softs->buf_dma_attr.dma_attr_maxxfer - 1; 2450 2451 /* Setup FIB operations */ 2452 if (softs->flags & AAC_FLAGS_RAW_IO) 2453 softs->aac_cmd_fib = aac_cmd_fib_rawio; 2454 else if (softs->flags & AAC_FLAGS_SG_64BIT) 2455 softs->aac_cmd_fib = aac_cmd_fib_brw64; 2456 else 2457 softs->aac_cmd_fib = aac_cmd_fib_brw; 2458 softs->aac_cmd_fib_scsi = (softs->flags & AAC_FLAGS_SG_64BIT) ? \ 2459 aac_cmd_fib_scsi64 : aac_cmd_fib_scsi32; 2460 2461 /* 64-bit LBA needs descriptor format sense data */ 2462 softs->slen = sizeof (struct scsi_arq_status); 2463 if ((softs->flags & AAC_FLAGS_LBA_64BIT) && 2464 softs->slen < AAC_ARQ64_LENGTH) 2465 softs->slen = AAC_ARQ64_LENGTH; 2466 2467 AACDB_PRINT(softs, CE_NOTE, 2468 "!max_fibs %d max_fibsize 0x%x max_sectors %d max_sg %d", 2469 softs->aac_max_fibs, softs->aac_max_fib_size, 2470 softs->aac_max_sectors, softs->aac_sg_tablesize); 2471 2472 return (AACOK); 2473 } 2474 2475 static void 2476 aac_fsa_rev(struct aac_softstate *softs, struct FsaRev *fsarev0, 2477 struct FsaRev *fsarev1) 2478 { 2479 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle; 2480 2481 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.dash); 2482 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.type); 2483 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.minor); 2484 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.major); 2485 AAC_GET_FIELD32(acc, fsarev1, fsarev0, buildNumber); 2486 } 2487 2488 /* 2489 * The following function comes from Adaptec: 2490 * 2491 * Query adapter information and supplement adapter information 2492 */ 2493 static int 2494 aac_get_adapter_info(struct aac_softstate *softs, 2495 struct aac_adapter_info *ainfr, struct aac_supplement_adapter_info *sinfr) 2496 { 2497 struct aac_cmd *acp = &softs->sync_ac; 2498 ddi_acc_handle_t acc; 2499 struct aac_fib *fibp; 2500 struct aac_adapter_info *ainfp; 2501 struct aac_supplement_adapter_info *sinfp; 2502 int rval; 2503 2504 (void) aac_sync_fib_slot_bind(softs, acp); 2505 acc = acp->slotp->fib_acc_handle; 2506 fibp = acp->slotp->fibp; 2507 2508 ddi_put8(acc, &fibp->data[0], 0); 2509 if (aac_sync_fib(softs, RequestAdapterInfo, 2510 sizeof (struct aac_fib_header)) != AACOK) { 2511 AACDB_PRINT(softs, CE_WARN, "RequestAdapterInfo failed"); 2512 rval = AACERR; 2513 goto finish; 2514 } 2515 ainfp = (struct aac_adapter_info *)fibp->data; 2516 if (ainfr) { 2517 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions); 2518 AAC_GET_FIELD32(acc, ainfr, ainfp, PlatformBase); 2519 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuArchitecture); 2520 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuVariant); 2521 AAC_GET_FIELD32(acc, ainfr, ainfp, ClockSpeed); 2522 AAC_GET_FIELD32(acc, ainfr, ainfp, ExecutionMem); 2523 AAC_GET_FIELD32(acc, ainfr, ainfp, BufferMem); 2524 AAC_GET_FIELD32(acc, ainfr, ainfp, TotalMem); 2525 aac_fsa_rev(softs, &ainfp->KernelRevision, 2526 &ainfr->KernelRevision); 2527 aac_fsa_rev(softs, &ainfp->MonitorRevision, 2528 &ainfr->MonitorRevision); 2529 aac_fsa_rev(softs, &ainfp->HardwareRevision, 2530 &ainfr->HardwareRevision); 2531 aac_fsa_rev(softs, &ainfp->BIOSRevision, 2532 &ainfr->BIOSRevision); 2533 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusteringEnabled); 2534 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusterChannelMask); 2535 AAC_GET_FIELD64(acc, ainfr, ainfp, SerialNumber); 2536 AAC_GET_FIELD32(acc, ainfr, ainfp, batteryPlatform); 2537 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions); 2538 AAC_GET_FIELD32(acc, ainfr, ainfp, OemVariant); 2539 } 2540 if (sinfr) { 2541 if (!(softs->support_opt & 2542 AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO)) { 2543 AACDB_PRINT(softs, CE_WARN, 2544 "SupplementAdapterInfo not supported"); 2545 rval = AACERR; 2546 goto finish; 2547 } 2548 ddi_put8(acc, &fibp->data[0], 0); 2549 if (aac_sync_fib(softs, RequestSupplementAdapterInfo, 2550 sizeof (struct aac_fib_header)) != AACOK) { 2551 AACDB_PRINT(softs, CE_WARN, 2552 "RequestSupplementAdapterInfo failed"); 2553 rval = AACERR; 2554 goto finish; 2555 } 2556 sinfp = (struct aac_supplement_adapter_info *)fibp->data; 2557 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, AdapterTypeText[0], 17+1); 2558 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, Pad[0], 2); 2559 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashMemoryByteSize); 2560 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashImageId); 2561 AAC_GET_FIELD32(acc, sinfr, sinfp, MaxNumberPorts); 2562 AAC_GET_FIELD32(acc, sinfr, sinfp, Version); 2563 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits); 2564 AAC_GET_FIELD8(acc, sinfr, sinfp, SlotNumber); 2565 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, ReservedPad0[0], 3); 2566 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, BuildDate[0], 12); 2567 AAC_GET_FIELD32(acc, sinfr, sinfp, CurrentNumberPorts); 2568 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, VpdInfo, 2569 sizeof (struct vpd_info)); 2570 aac_fsa_rev(softs, &sinfp->FlashFirmwareRevision, 2571 &sinfr->FlashFirmwareRevision); 2572 AAC_GET_FIELD32(acc, sinfr, sinfp, RaidTypeMorphOptions); 2573 aac_fsa_rev(softs, &sinfp->FlashFirmwareBootRevision, 2574 &sinfr->FlashFirmwareBootRevision); 2575 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgPcbaSerialNo, 2576 MFG_PCBA_SERIAL_NUMBER_WIDTH); 2577 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgWWNName[0], 2578 MFG_WWN_WIDTH); 2579 AAC_GET_FIELD32(acc, sinfr, sinfp, SupportedOptions2); 2580 AAC_GET_FIELD32(acc, sinfr, sinfp, ExpansionFlag); 2581 if (sinfr->ExpansionFlag == 1) { 2582 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits3); 2583 AAC_GET_FIELD32(acc, sinfr, sinfp, 2584 SupportedPerformanceMode); 2585 AAC_REP_GET_FIELD32(acc, sinfr, sinfp, 2586 ReservedGrowth[0], 80); 2587 } 2588 } 2589 rval = AACOK; 2590 finish: 2591 aac_sync_fib_slot_release(softs, acp); 2592 return (rval); 2593 } 2594 2595 static int 2596 aac_get_bus_info(struct aac_softstate *softs, uint32_t *bus_max, 2597 uint32_t *tgt_max) 2598 { 2599 struct aac_cmd *acp = &softs->sync_ac; 2600 ddi_acc_handle_t acc; 2601 struct aac_fib *fibp; 2602 struct aac_ctcfg *c_cmd; 2603 struct aac_ctcfg_resp *c_resp; 2604 uint32_t scsi_method_id; 2605 struct aac_bus_info *cmd; 2606 struct aac_bus_info_response *resp; 2607 int rval; 2608 2609 (void) aac_sync_fib_slot_bind(softs, acp); 2610 acc = acp->slotp->fib_acc_handle; 2611 fibp = acp->slotp->fibp; 2612 2613 /* Detect MethodId */ 2614 c_cmd = (struct aac_ctcfg *)&fibp->data[0]; 2615 ddi_put32(acc, &c_cmd->Command, VM_ContainerConfig); 2616 ddi_put32(acc, &c_cmd->cmd, CT_GET_SCSI_METHOD); 2617 ddi_put32(acc, &c_cmd->param, 0); 2618 rval = aac_sync_fib(softs, ContainerCommand, 2619 AAC_FIB_SIZEOF(struct aac_ctcfg)); 2620 c_resp = (struct aac_ctcfg_resp *)&fibp->data[0]; 2621 if (rval != AACOK || ddi_get32(acc, &c_resp->Status) != 0) { 2622 AACDB_PRINT(softs, CE_WARN, 2623 "VM_ContainerConfig command fail"); 2624 rval = AACERR; 2625 goto finish; 2626 } 2627 scsi_method_id = ddi_get32(acc, &c_resp->param); 2628 2629 /* Detect phys. bus count and max. target id first */ 2630 cmd = (struct aac_bus_info *)&fibp->data[0]; 2631 ddi_put32(acc, &cmd->Command, VM_Ioctl); 2632 ddi_put32(acc, &cmd->ObjType, FT_DRIVE); /* physical drive */ 2633 ddi_put32(acc, &cmd->MethodId, scsi_method_id); 2634 ddi_put32(acc, &cmd->ObjectId, 0); 2635 ddi_put32(acc, &cmd->CtlCmd, GetBusInfo); 2636 /* 2637 * For VM_Ioctl, the firmware uses the Header.Size filled from the 2638 * driver as the size to be returned. Therefore the driver has to use 2639 * sizeof (struct aac_bus_info_response) because it is greater than 2640 * sizeof (struct aac_bus_info). 2641 */ 2642 rval = aac_sync_fib(softs, ContainerCommand, 2643 AAC_FIB_SIZEOF(struct aac_bus_info_response)); 2644 resp = (struct aac_bus_info_response *)cmd; 2645 2646 /* Scan all coordinates with INQUIRY */ 2647 if ((rval != AACOK) || (ddi_get32(acc, &resp->Status) != 0)) { 2648 AACDB_PRINT(softs, CE_WARN, "GetBusInfo command fail"); 2649 rval = AACERR; 2650 goto finish; 2651 } 2652 *bus_max = ddi_get32(acc, &resp->BusCount); 2653 *tgt_max = ddi_get32(acc, &resp->TargetsPerBus); 2654 2655 finish: 2656 aac_sync_fib_slot_release(softs, acp); 2657 return (AACOK); 2658 } 2659 2660 /* 2661 * The following function comes from Adaptec: 2662 * 2663 * Routine to be called during initialization of communications with 2664 * the adapter to handle possible adapter configuration issues. When 2665 * the adapter first boots up, it examines attached drives, etc, and 2666 * potentially comes up with a new or revised configuration (relative to 2667 * what's stored in it's NVRAM). Additionally it may discover problems 2668 * that make the current physical configuration unworkable (currently 2669 * applicable only to cluster configuration issues). 2670 * 2671 * If there are no configuration issues or the issues are considered 2672 * trival by the adapter, it will set it's configuration status to 2673 * "FSACT_CONTINUE" and execute the "commit confiuguration" action 2674 * automatically on it's own. 2675 * 2676 * However, if there are non-trivial issues, the adapter will set it's 2677 * internal configuration status to "FSACT_PAUSE" or "FASCT_ABORT" 2678 * and wait for some agent on the host to issue the "\ContainerCommand 2679 * \VM_ContainerConfig\CT_COMMIT_CONFIG" FIB command to cause the 2680 * adapter to commit the new/updated configuration and enable 2681 * un-inhibited operation. The host agent should first issue the 2682 * "\ContainerCommand\VM_ContainerConfig\CT_GET_CONFIG_STATUS" FIB 2683 * command to obtain information about config issues detected by 2684 * the adapter. 2685 * 2686 * Normally the adapter's PC BIOS will execute on the host following 2687 * adapter poweron and reset and will be responsible for querring the 2688 * adapter with CT_GET_CONFIG_STATUS and issuing the CT_COMMIT_CONFIG 2689 * command if appropriate. 2690 * 2691 * However, with the introduction of IOP reset support, the adapter may 2692 * boot up without the benefit of the adapter's PC BIOS host agent. 2693 * This routine is intended to take care of these issues in situations 2694 * where BIOS doesn't execute following adapter poweron or reset. The 2695 * CT_COMMIT_CONFIG command is a no-op if it's already been issued, so 2696 * there is no harm in doing this when it's already been done. 2697 */ 2698 static int 2699 aac_handle_adapter_config_issues(struct aac_softstate *softs) 2700 { 2701 struct aac_cmd *acp = &softs->sync_ac; 2702 ddi_acc_handle_t acc; 2703 struct aac_fib *fibp; 2704 struct aac_Container *cmd; 2705 struct aac_Container_resp *resp; 2706 struct aac_cf_status_header *cfg_sts_hdr; 2707 uint32_t resp_status; 2708 uint32_t ct_status; 2709 uint32_t cfg_stat_action; 2710 int rval; 2711 2712 (void) aac_sync_fib_slot_bind(softs, acp); 2713 acc = acp->slotp->fib_acc_handle; 2714 fibp = acp->slotp->fibp; 2715 2716 /* Get adapter config status */ 2717 cmd = (struct aac_Container *)&fibp->data[0]; 2718 2719 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 2720 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 2721 ddi_put32(acc, &cmd->CTCommand.command, CT_GET_CONFIG_STATUS); 2722 ddi_put32(acc, &cmd->CTCommand.param[CNT_SIZE], 2723 sizeof (struct aac_cf_status_header)); 2724 rval = aac_sync_fib(softs, ContainerCommand, 2725 AAC_FIB_SIZEOF(struct aac_Container)); 2726 resp = (struct aac_Container_resp *)cmd; 2727 cfg_sts_hdr = (struct aac_cf_status_header *)resp->CTResponse.data; 2728 2729 resp_status = ddi_get32(acc, &resp->Status); 2730 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]); 2731 if ((rval == AACOK) && (resp_status == 0) && (ct_status == CT_OK)) { 2732 cfg_stat_action = ddi_get32(acc, &cfg_sts_hdr->action); 2733 2734 /* Commit configuration if it's reasonable to do so. */ 2735 if (cfg_stat_action <= CFACT_PAUSE) { 2736 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 2737 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 2738 ddi_put32(acc, &cmd->CTCommand.command, 2739 CT_COMMIT_CONFIG); 2740 rval = aac_sync_fib(softs, ContainerCommand, 2741 AAC_FIB_SIZEOF(struct aac_Container)); 2742 2743 resp_status = ddi_get32(acc, &resp->Status); 2744 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]); 2745 if ((rval == AACOK) && (resp_status == 0) && 2746 (ct_status == CT_OK)) 2747 /* Successful completion */ 2748 rval = AACMPE_OK; 2749 else 2750 /* Auto-commit aborted due to error(s). */ 2751 rval = AACMPE_COMMIT_CONFIG; 2752 } else { 2753 /* 2754 * Auto-commit aborted due to adapter indicating 2755 * configuration issue(s) too dangerous to auto-commit. 2756 */ 2757 rval = AACMPE_CONFIG_STATUS; 2758 } 2759 } else { 2760 cmn_err(CE_WARN, "!Configuration issue, auto-commit aborted"); 2761 rval = AACMPE_CONFIG_STATUS; 2762 } 2763 2764 aac_sync_fib_slot_release(softs, acp); 2765 return (rval); 2766 } 2767 2768 /* 2769 * Hardware initialization and resource allocation 2770 */ 2771 static int 2772 aac_common_attach(struct aac_softstate *softs) 2773 { 2774 uint32_t status; 2775 int i; 2776 2777 DBCALLED(softs, 1); 2778 2779 /* 2780 * Do a little check here to make sure there aren't any outstanding 2781 * FIBs in the message queue. At this point there should not be and 2782 * if there are they are probably left over from another instance of 2783 * the driver like when the system crashes and the crash dump driver 2784 * gets loaded. 2785 */ 2786 while (AAC_OUTB_GET(softs) != 0xfffffffful) 2787 ; 2788 2789 /* 2790 * Wait the card to complete booting up before do anything that 2791 * attempts to communicate with it. 2792 */ 2793 status = AAC_FWSTATUS_GET(softs); 2794 if (status == AAC_SELF_TEST_FAILED || status == AAC_KERNEL_PANIC) 2795 goto error; 2796 i = AAC_FWUP_TIMEOUT * 1000; /* set timeout */ 2797 AAC_BUSYWAIT(AAC_FWSTATUS_GET(softs) & AAC_KERNEL_UP_AND_RUNNING, i); 2798 if (i == 0) { 2799 cmn_err(CE_CONT, "?Fatal error: controller not ready"); 2800 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2801 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2802 goto error; 2803 } 2804 2805 /* Read and set card supported options and settings */ 2806 if (aac_check_firmware(softs) == AACERR) { 2807 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2808 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2809 goto error; 2810 } 2811 2812 /* Add interrupt handlers */ 2813 if (aac_register_intrs(softs) == AACERR) { 2814 cmn_err(CE_CONT, 2815 "?Fatal error: interrupts register failed"); 2816 goto error; 2817 } 2818 2819 /* Setup communication space with the card */ 2820 if (softs->comm_space_dma_handle == NULL) { 2821 if (aac_alloc_comm_space(softs) != AACOK) 2822 goto error; 2823 } 2824 if (aac_setup_comm_space(softs) != AACOK) { 2825 cmn_err(CE_CONT, "?Setup communication space failed"); 2826 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2827 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2828 goto error; 2829 } 2830 2831 #ifdef DEBUG 2832 if (aac_get_fw_debug_buffer(softs) != AACOK) 2833 cmn_err(CE_CONT, "?firmware UART trace not supported"); 2834 #endif 2835 2836 /* Allocate slots */ 2837 if ((softs->total_slots == 0) && (aac_create_slots(softs) != AACOK)) { 2838 cmn_err(CE_CONT, "?Fatal error: slots allocate failed"); 2839 goto error; 2840 } 2841 AACDB_PRINT(softs, CE_NOTE, "%d slots allocated", softs->total_slots); 2842 2843 /* Allocate FIBs */ 2844 if (softs->total_fibs < softs->total_slots) { 2845 aac_alloc_fibs(softs); 2846 if (softs->total_fibs == 0) 2847 goto error; 2848 AACDB_PRINT(softs, CE_NOTE, "%d fibs allocated", 2849 softs->total_fibs); 2850 } 2851 2852 AAC_STATUS_CLR(softs, ~0); /* Clear out all interrupts */ 2853 AAC_ENABLE_INTR(softs); /* Enable the interrupts we can handle */ 2854 2855 /* Get adapter names */ 2856 if (CARD_IS_UNKNOWN(softs->card)) { 2857 struct aac_supplement_adapter_info sinf; 2858 2859 if (aac_get_adapter_info(softs, NULL, &sinf) != AACOK) { 2860 cmn_err(CE_CONT, "?Query adapter information failed"); 2861 } else { 2862 softs->feature_bits = sinf.FeatureBits; 2863 softs->support_opt2 = sinf.SupportedOptions2; 2864 2865 char *p, *p0, *p1; 2866 2867 /* 2868 * Now find the controller name in supp_adapter_info-> 2869 * AdapterTypeText. Use the first word as the vendor 2870 * and the other words as the product name. 2871 */ 2872 AACDB_PRINT(softs, CE_NOTE, "sinf.AdapterTypeText = " 2873 "\"%s\"", sinf.AdapterTypeText); 2874 p = sinf.AdapterTypeText; 2875 p0 = p1 = NULL; 2876 /* Skip heading spaces */ 2877 while (*p && (*p == ' ' || *p == '\t')) 2878 p++; 2879 p0 = p; 2880 while (*p && (*p != ' ' && *p != '\t')) 2881 p++; 2882 /* Remove middle spaces */ 2883 while (*p && (*p == ' ' || *p == '\t')) 2884 *p++ = 0; 2885 p1 = p; 2886 /* Remove trailing spaces */ 2887 p = p1 + strlen(p1) - 1; 2888 while (p > p1 && (*p == ' ' || *p == '\t')) 2889 *p-- = 0; 2890 if (*p0 && *p1) { 2891 (void *)strncpy(softs->vendor_name, p0, 2892 AAC_VENDOR_LEN); 2893 (void *)strncpy(softs->product_name, p1, 2894 AAC_PRODUCT_LEN); 2895 } else { 2896 cmn_err(CE_WARN, 2897 "?adapter name mis-formatted\n"); 2898 if (*p0) 2899 (void *)strncpy(softs->product_name, 2900 p0, AAC_PRODUCT_LEN); 2901 } 2902 } 2903 } 2904 2905 cmn_err(CE_NOTE, 2906 "!aac driver %d.%02d.%02d-%d, found card: " \ 2907 "%s %s(pci0x%x.%x.%x.%x) at 0x%x", 2908 AAC_DRIVER_MAJOR_VERSION, 2909 AAC_DRIVER_MINOR_VERSION, 2910 AAC_DRIVER_BUGFIX_LEVEL, 2911 AAC_DRIVER_BUILD, 2912 softs->vendor_name, softs->product_name, 2913 softs->vendid, softs->devid, softs->subvendid, softs->subsysid, 2914 softs->pci_mem_base_paddr); 2915 2916 /* Perform acceptance of adapter-detected config changes if possible */ 2917 if (aac_handle_adapter_config_issues(softs) != AACMPE_OK) { 2918 cmn_err(CE_CONT, "?Handle adapter config issues failed"); 2919 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2920 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2921 goto error; 2922 } 2923 2924 /* Setup containers (logical devices) */ 2925 if (aac_probe_containers(softs) != AACOK) { 2926 cmn_err(CE_CONT, "?Fatal error: get container info error"); 2927 goto error; 2928 } 2929 2930 /* Setup phys. devices */ 2931 if (softs->flags & AAC_FLAGS_NONDASD) { 2932 uint32_t bus_max, tgt_max; 2933 uint32_t bus, tgt; 2934 int index; 2935 2936 if (aac_get_bus_info(softs, &bus_max, &tgt_max) != AACOK) { 2937 cmn_err(CE_CONT, "?Fatal error: get bus info error"); 2938 goto error; 2939 } 2940 AACDB_PRINT(softs, CE_NOTE, "bus_max=%d, tgt_max=%d", 2941 bus_max, tgt_max); 2942 if (bus_max != softs->bus_max || tgt_max != softs->tgt_max) { 2943 if (softs->state & AAC_STATE_RESET) { 2944 cmn_err(CE_WARN, 2945 "?Fatal error: bus map changed"); 2946 goto error; 2947 } 2948 softs->bus_max = bus_max; 2949 softs->tgt_max = tgt_max; 2950 if (softs->nondasds) { 2951 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \ 2952 sizeof (struct aac_nondasd)); 2953 } 2954 softs->nondasds = kmem_zalloc(AAC_MAX_PD(softs) * \ 2955 sizeof (struct aac_nondasd), KM_SLEEP); 2956 2957 index = 0; 2958 for (bus = 0; bus < softs->bus_max; bus++) { 2959 for (tgt = 0; tgt < softs->tgt_max; tgt++) { 2960 struct aac_nondasd *dvp = 2961 &softs->nondasds[index++]; 2962 dvp->dev.type = AAC_DEV_PD; 2963 dvp->bus = bus; 2964 dvp->tid = tgt; 2965 } 2966 } 2967 } 2968 } 2969 2970 /* Check dma & acc handles allocated in attach */ 2971 if (aac_check_dma_handle(softs->comm_space_dma_handle) != DDI_SUCCESS) { 2972 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2973 goto error; 2974 } 2975 2976 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 2977 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2978 goto error; 2979 } 2980 2981 for (i = 0; i < softs->total_slots; i++) { 2982 if (aac_check_dma_handle(softs->io_slot[i].fib_dma_handle) != 2983 DDI_SUCCESS) { 2984 ddi_fm_service_impact(softs->devinfo_p, 2985 DDI_SERVICE_LOST); 2986 goto error; 2987 } 2988 } 2989 2990 return (AACOK); 2991 error: 2992 if (softs->state & AAC_STATE_RESET) 2993 return (AACERR); 2994 if (softs->nondasds) { 2995 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \ 2996 sizeof (struct aac_nondasd)); 2997 softs->nondasds = NULL; 2998 } 2999 if (softs->total_fibs > 0) 3000 aac_destroy_fibs(softs); 3001 if (softs->total_slots > 0) 3002 aac_destroy_slots(softs); 3003 if (softs->comm_space_dma_handle) 3004 aac_free_comm_space(softs); 3005 return (AACERR); 3006 } 3007 3008 /* 3009 * Hardware shutdown and resource release 3010 */ 3011 static void 3012 aac_common_detach(struct aac_softstate *softs) 3013 { 3014 DBCALLED(softs, 1); 3015 3016 aac_unregister_intrs(softs); 3017 3018 mutex_enter(&softs->io_lock); 3019 (void) aac_shutdown(softs); 3020 3021 if (softs->nondasds) { 3022 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \ 3023 sizeof (struct aac_nondasd)); 3024 softs->nondasds = NULL; 3025 } 3026 aac_destroy_fibs(softs); 3027 aac_destroy_slots(softs); 3028 aac_free_comm_space(softs); 3029 mutex_exit(&softs->io_lock); 3030 } 3031 3032 /* 3033 * Send a synchronous command to the controller and wait for a result. 3034 * Indicate if the controller completed the command with an error status. 3035 */ 3036 int 3037 aac_sync_mbcommand(struct aac_softstate *softs, uint32_t cmd, 3038 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3, 3039 uint32_t *statusp) 3040 { 3041 int timeout; 3042 uint32_t status; 3043 3044 if (statusp != NULL) 3045 *statusp = SRB_STATUS_SUCCESS; 3046 3047 /* Fill in mailbox */ 3048 AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3); 3049 3050 /* Ensure the sync command doorbell flag is cleared */ 3051 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND); 3052 3053 /* Then set it to signal the adapter */ 3054 AAC_NOTIFY(softs, AAC_DB_SYNC_COMMAND); 3055 3056 /* Spin waiting for the command to complete */ 3057 timeout = AAC_IMMEDIATE_TIMEOUT * 1000; 3058 AAC_BUSYWAIT(AAC_STATUS_GET(softs) & AAC_DB_SYNC_COMMAND, timeout); 3059 if (!timeout) { 3060 AACDB_PRINT(softs, CE_WARN, 3061 "Sync command timed out after %d seconds (0x%x)!", 3062 AAC_IMMEDIATE_TIMEOUT, AAC_FWSTATUS_GET(softs)); 3063 return (AACERR); 3064 } 3065 3066 /* Clear the completion flag */ 3067 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND); 3068 3069 /* Get the command status */ 3070 status = AAC_MAILBOX_GET(softs, 0); 3071 if (statusp != NULL) 3072 *statusp = status; 3073 if (status != SRB_STATUS_SUCCESS) { 3074 AACDB_PRINT(softs, CE_WARN, 3075 "Sync command fail: status = 0x%x", status); 3076 return (AACERR); 3077 } 3078 3079 return (AACOK); 3080 } 3081 3082 /* 3083 * Send a synchronous FIB to the adapter and wait for its completion 3084 */ 3085 static int 3086 aac_sync_fib(struct aac_softstate *softs, uint16_t cmd, uint16_t fibsize) 3087 { 3088 struct aac_cmd *acp = &softs->sync_ac; 3089 3090 acp->flags = AAC_CMD_SYNC | AAC_CMD_IN_SYNC_SLOT; 3091 if (softs->state & AAC_STATE_INTR) 3092 acp->flags |= AAC_CMD_NO_CB; 3093 else 3094 acp->flags |= AAC_CMD_NO_INTR; 3095 3096 acp->ac_comp = aac_sync_complete; 3097 acp->timeout = AAC_SYNC_TIMEOUT; 3098 acp->fib_size = fibsize; 3099 3100 /* 3101 * Only need to setup sync fib header, caller should have init 3102 * fib data 3103 */ 3104 aac_cmd_fib_header(softs, acp, cmd); 3105 3106 (void) ddi_dma_sync(acp->slotp->fib_dma_handle, 0, fibsize, 3107 DDI_DMA_SYNC_FORDEV); 3108 3109 aac_start_io(softs, acp); 3110 3111 if (softs->state & AAC_STATE_INTR) 3112 return (aac_do_sync_io(softs, acp)); 3113 else 3114 return (aac_do_poll_io(softs, acp)); 3115 } 3116 3117 static void 3118 aac_cmd_initq(struct aac_cmd_queue *q) 3119 { 3120 q->q_head = NULL; 3121 q->q_tail = (struct aac_cmd *)&q->q_head; 3122 } 3123 3124 /* 3125 * Remove a cmd from the head of q 3126 */ 3127 static struct aac_cmd * 3128 aac_cmd_dequeue(struct aac_cmd_queue *q) 3129 { 3130 struct aac_cmd *acp; 3131 3132 _NOTE(ASSUMING_PROTECTED(*q)) 3133 3134 if ((acp = q->q_head) != NULL) { 3135 if ((q->q_head = acp->next) != NULL) 3136 acp->next = NULL; 3137 else 3138 q->q_tail = (struct aac_cmd *)&q->q_head; 3139 acp->prev = NULL; 3140 } 3141 return (acp); 3142 } 3143 3144 /* 3145 * Add a cmd to the tail of q 3146 */ 3147 static void 3148 aac_cmd_enqueue(struct aac_cmd_queue *q, struct aac_cmd *acp) 3149 { 3150 ASSERT(acp->next == NULL); 3151 acp->prev = q->q_tail; 3152 q->q_tail->next = acp; 3153 q->q_tail = acp; 3154 } 3155 3156 /* 3157 * Remove the cmd ac from q 3158 */ 3159 static void 3160 aac_cmd_delete(struct aac_cmd_queue *q, struct aac_cmd *acp) 3161 { 3162 if (acp->prev) { 3163 if ((acp->prev->next = acp->next) != NULL) { 3164 acp->next->prev = acp->prev; 3165 acp->next = NULL; 3166 } else { 3167 q->q_tail = acp->prev; 3168 } 3169 acp->prev = NULL; 3170 } 3171 /* ac is not in the queue */ 3172 } 3173 3174 /* 3175 * Atomically insert an entry into the nominated queue, returns 0 on success or 3176 * AACERR if the queue is full. 3177 * 3178 * Note: it would be more efficient to defer notifying the controller in 3179 * the case where we may be inserting several entries in rapid succession, 3180 * but implementing this usefully may be difficult (it would involve a 3181 * separate queue/notify interface). 3182 */ 3183 static int 3184 aac_fib_enqueue(struct aac_softstate *softs, int queue, uint32_t fib_addr, 3185 uint32_t fib_size) 3186 { 3187 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3188 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3189 uint32_t pi, ci; 3190 3191 DBCALLED(softs, 2); 3192 3193 ASSERT(queue == AAC_ADAP_NORM_CMD_Q || queue == AAC_ADAP_NORM_RESP_Q); 3194 3195 /* Get the producer/consumer indices */ 3196 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \ 3197 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2, 3198 DDI_DMA_SYNC_FORCPU); 3199 if (aac_check_dma_handle(dma) != DDI_SUCCESS) { 3200 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 3201 return (AACERR); 3202 } 3203 3204 pi = ddi_get32(acc, 3205 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]); 3206 ci = ddi_get32(acc, 3207 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]); 3208 3209 /* 3210 * Wrap the queue first before we check the queue to see 3211 * if it is full 3212 */ 3213 if (pi >= aac_qinfo[queue].size) 3214 pi = 0; 3215 3216 /* XXX queue full */ 3217 if ((pi + 1) == ci) 3218 return (AACERR); 3219 3220 /* Fill in queue entry */ 3221 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_size), fib_size); 3222 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_addr), fib_addr); 3223 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \ 3224 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry), 3225 DDI_DMA_SYNC_FORDEV); 3226 3227 /* Update producer index */ 3228 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX], 3229 pi + 1); 3230 (void) ddi_dma_sync(dma, 3231 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX] - \ 3232 (uintptr_t)softs->comm_space, sizeof (uint32_t), 3233 DDI_DMA_SYNC_FORDEV); 3234 3235 if (aac_qinfo[queue].notify != 0) 3236 AAC_NOTIFY(softs, aac_qinfo[queue].notify); 3237 return (AACOK); 3238 } 3239 3240 /* 3241 * Atomically remove one entry from the nominated queue, returns 0 on 3242 * success or AACERR if the queue is empty. 3243 */ 3244 static int 3245 aac_fib_dequeue(struct aac_softstate *softs, int queue, int *idxp) 3246 { 3247 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3248 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3249 uint32_t pi, ci; 3250 int unfull = 0; 3251 3252 DBCALLED(softs, 2); 3253 3254 ASSERT(idxp); 3255 3256 /* Get the producer/consumer indices */ 3257 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \ 3258 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2, 3259 DDI_DMA_SYNC_FORCPU); 3260 pi = ddi_get32(acc, 3261 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]); 3262 ci = ddi_get32(acc, 3263 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]); 3264 3265 /* Check for queue empty */ 3266 if (ci == pi) 3267 return (AACERR); 3268 3269 if (pi >= aac_qinfo[queue].size) 3270 pi = 0; 3271 3272 /* Check for queue full */ 3273 if (ci == pi + 1) 3274 unfull = 1; 3275 3276 /* 3277 * The controller does not wrap the queue, 3278 * so we have to do it by ourselves 3279 */ 3280 if (ci >= aac_qinfo[queue].size) 3281 ci = 0; 3282 3283 /* Fetch the entry */ 3284 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \ 3285 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry), 3286 DDI_DMA_SYNC_FORCPU); 3287 if (aac_check_dma_handle(dma) != DDI_SUCCESS) { 3288 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 3289 return (AACERR); 3290 } 3291 3292 switch (queue) { 3293 case AAC_HOST_NORM_RESP_Q: 3294 case AAC_HOST_HIGH_RESP_Q: 3295 *idxp = ddi_get32(acc, 3296 &(softs->qentries[queue] + ci)->aq_fib_addr); 3297 break; 3298 3299 case AAC_HOST_NORM_CMD_Q: 3300 case AAC_HOST_HIGH_CMD_Q: 3301 *idxp = ddi_get32(acc, 3302 &(softs->qentries[queue] + ci)->aq_fib_addr) / AAC_FIB_SIZE; 3303 break; 3304 3305 default: 3306 cmn_err(CE_NOTE, "!Invalid queue in aac_fib_dequeue()"); 3307 return (AACERR); 3308 } 3309 3310 /* Update consumer index */ 3311 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX], 3312 ci + 1); 3313 (void) ddi_dma_sync(dma, 3314 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX] - \ 3315 (uintptr_t)softs->comm_space, sizeof (uint32_t), 3316 DDI_DMA_SYNC_FORDEV); 3317 3318 if (unfull && aac_qinfo[queue].notify != 0) 3319 AAC_NOTIFY(softs, aac_qinfo[queue].notify); 3320 return (AACOK); 3321 } 3322 3323 static struct aac_mntinforesp * 3324 aac_get_mntinfo(struct aac_softstate *softs, int cid) 3325 { 3326 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle; 3327 struct aac_fib *fibp = softs->sync_ac.slotp->fibp; 3328 struct aac_mntinfo *mi = (struct aac_mntinfo *)&fibp->data[0]; 3329 struct aac_mntinforesp *mir; 3330 3331 ddi_put32(acc, &mi->Command, /* Use 64-bit LBA if enabled */ 3332 (softs->flags & AAC_FLAGS_LBA_64BIT) ? 3333 VM_NameServe64 : VM_NameServe); 3334 ddi_put32(acc, &mi->MntType, FT_FILESYS); 3335 ddi_put32(acc, &mi->MntCount, cid); 3336 3337 if (aac_sync_fib(softs, ContainerCommand, 3338 AAC_FIB_SIZEOF(struct aac_mntinfo)) == AACERR) { 3339 AACDB_PRINT(softs, CE_WARN, "Error probe container %d", cid); 3340 return (NULL); 3341 } 3342 3343 mir = (struct aac_mntinforesp *)&fibp->data[0]; 3344 if (ddi_get32(acc, &mir->Status) == ST_OK) 3345 return (mir); 3346 return (NULL); 3347 } 3348 3349 static int 3350 aac_get_container_count(struct aac_softstate *softs, int *count) 3351 { 3352 ddi_acc_handle_t acc; 3353 struct aac_mntinforesp *mir; 3354 int rval; 3355 3356 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac); 3357 acc = softs->sync_ac.slotp->fib_acc_handle; 3358 3359 if ((mir = aac_get_mntinfo(softs, 0)) == NULL) { 3360 rval = AACERR; 3361 goto finish; 3362 } 3363 *count = ddi_get32(acc, &mir->MntRespCount); 3364 if (*count > AAC_MAX_LD) { 3365 AACDB_PRINT(softs, CE_CONT, 3366 "container count(%d) > AAC_MAX_LD", *count); 3367 rval = AACERR; 3368 goto finish; 3369 } 3370 rval = AACOK; 3371 3372 finish: 3373 aac_sync_fib_slot_release(softs, &softs->sync_ac); 3374 return (rval); 3375 } 3376 3377 static int 3378 aac_get_container_uid(struct aac_softstate *softs, uint32_t cid, uint32_t *uid) 3379 { 3380 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle; 3381 struct aac_Container *ct = (struct aac_Container *) \ 3382 &softs->sync_ac.slotp->fibp->data[0]; 3383 3384 bzero(ct, sizeof (*ct) - CT_PACKET_SIZE); 3385 ddi_put32(acc, &ct->Command, VM_ContainerConfig); 3386 ddi_put32(acc, &ct->CTCommand.command, CT_CID_TO_32BITS_UID); 3387 ddi_put32(acc, &ct->CTCommand.param[0], cid); 3388 3389 if (aac_sync_fib(softs, ContainerCommand, 3390 AAC_FIB_SIZEOF(struct aac_Container)) == AACERR) 3391 return (AACERR); 3392 if (ddi_get32(acc, &ct->CTCommand.param[0]) != CT_OK) 3393 return (AACERR); 3394 3395 *uid = ddi_get32(acc, &ct->CTCommand.param[1]); 3396 return (AACOK); 3397 } 3398 3399 /* 3400 * Request information of the container cid 3401 */ 3402 static struct aac_mntinforesp * 3403 aac_get_container_info(struct aac_softstate *softs, int cid) 3404 { 3405 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle; 3406 struct aac_mntinforesp *mir; 3407 int rval_uid; 3408 uint32_t uid; 3409 3410 /* Get container UID first so that it will not overwrite mntinfo */ 3411 rval_uid = aac_get_container_uid(softs, cid, &uid); 3412 3413 /* Get container basic info */ 3414 if ((mir = aac_get_mntinfo(softs, cid)) == NULL) { 3415 AACDB_PRINT(softs, CE_CONT, 3416 "query container %d info failed", cid); 3417 return (NULL); 3418 } 3419 if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) 3420 return (mir); 3421 if (rval_uid != AACOK) { 3422 AACDB_PRINT(softs, CE_CONT, 3423 "query container %d uid failed", cid); 3424 return (NULL); 3425 } 3426 3427 ddi_put32(acc, &mir->Status, uid); 3428 return (mir); 3429 } 3430 3431 static int 3432 aac_probe_container(struct aac_softstate *softs, uint32_t cid) 3433 { 3434 struct aac_container *dvp = &softs->containers[cid]; 3435 ddi_acc_handle_t acc; 3436 struct aac_mntinforesp *mir; 3437 uint64_t size; 3438 uint32_t uid; 3439 int rval; 3440 3441 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac); 3442 acc = softs->sync_ac.slotp->fib_acc_handle; 3443 3444 /* Get container basic info */ 3445 if ((mir = aac_get_container_info(softs, cid)) == NULL) { 3446 rval = AACERR; 3447 goto finish; 3448 } 3449 3450 if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) { 3451 if (AAC_DEV_IS_VALID(&dvp->dev)) { 3452 AACDB_PRINT(softs, CE_NOTE, 3453 ">>> Container %d deleted", cid); 3454 dvp->dev.flags &= ~AAC_DFLAG_VALID; 3455 (void) aac_dr_event(softs, dvp->cid, -1, 3456 AAC_EVT_OFFLINE); 3457 } 3458 } else { 3459 size = AAC_MIR_SIZE(softs, acc, mir); 3460 uid = ddi_get32(acc, &mir->Status); 3461 if (AAC_DEV_IS_VALID(&dvp->dev)) { 3462 if (dvp->uid != uid) { 3463 AACDB_PRINT(softs, CE_WARN, 3464 ">>> Container %u uid changed to %d", 3465 cid, uid); 3466 dvp->uid = uid; 3467 } 3468 if (dvp->size != size) { 3469 AACDB_PRINT(softs, CE_NOTE, 3470 ">>> Container %u size changed to %"PRIu64, 3471 cid, size); 3472 dvp->size = size; 3473 } 3474 } else { /* Init new container */ 3475 AACDB_PRINT(softs, CE_NOTE, 3476 ">>> Container %d added: " \ 3477 "size=0x%x.%08x, type=%d, name=%s", 3478 cid, 3479 ddi_get32(acc, &mir->MntObj.CapacityHigh), 3480 ddi_get32(acc, &mir->MntObj.Capacity), 3481 ddi_get32(acc, &mir->MntObj.VolType), 3482 mir->MntObj.FileSystemName); 3483 dvp->dev.flags |= AAC_DFLAG_VALID; 3484 dvp->dev.type = AAC_DEV_LD; 3485 3486 dvp->cid = cid; 3487 dvp->uid = uid; 3488 dvp->size = size; 3489 dvp->locked = 0; 3490 dvp->deleted = 0; 3491 (void) aac_dr_event(softs, dvp->cid, -1, 3492 AAC_EVT_ONLINE); 3493 } 3494 } 3495 rval = AACOK; 3496 3497 finish: 3498 aac_sync_fib_slot_release(softs, &softs->sync_ac); 3499 return (rval); 3500 } 3501 3502 /* 3503 * Do a rescan of all the possible containers and update the container list 3504 * with newly online/offline containers, and prepare for autoconfiguration. 3505 */ 3506 static int 3507 aac_probe_containers(struct aac_softstate *softs) 3508 { 3509 int i, count, total; 3510 3511 /* Loop over possible containers */ 3512 count = softs->container_count; 3513 if (aac_get_container_count(softs, &count) == AACERR) 3514 return (AACERR); 3515 for (i = total = 0; i < count; i++) { 3516 if (aac_probe_container(softs, i) == AACOK) 3517 total++; 3518 } 3519 if (count < softs->container_count) { 3520 struct aac_container *dvp; 3521 3522 for (dvp = &softs->containers[count]; 3523 dvp < &softs->containers[softs->container_count]; dvp++) { 3524 if (!AAC_DEV_IS_VALID(&dvp->dev)) 3525 continue; 3526 AACDB_PRINT(softs, CE_NOTE, ">>> Container %d deleted", 3527 dvp->cid); 3528 dvp->dev.flags &= ~AAC_DFLAG_VALID; 3529 (void) aac_dr_event(softs, dvp->cid, -1, 3530 AAC_EVT_OFFLINE); 3531 } 3532 } 3533 softs->container_count = count; 3534 AACDB_PRINT(softs, CE_CONT, "?Total %d container(s) found", total); 3535 return (AACOK); 3536 } 3537 3538 static int 3539 aac_alloc_comm_space(struct aac_softstate *softs) 3540 { 3541 size_t rlen; 3542 ddi_dma_cookie_t cookie; 3543 uint_t cookien; 3544 3545 /* Allocate DMA for comm. space */ 3546 if (ddi_dma_alloc_handle( 3547 softs->devinfo_p, 3548 &softs->addr_dma_attr, 3549 DDI_DMA_SLEEP, 3550 NULL, 3551 &softs->comm_space_dma_handle) != DDI_SUCCESS) { 3552 AACDB_PRINT(softs, CE_WARN, 3553 "Cannot alloc dma handle for communication area"); 3554 goto error; 3555 } 3556 if (ddi_dma_mem_alloc( 3557 softs->comm_space_dma_handle, 3558 sizeof (struct aac_comm_space), 3559 &softs->acc_attr, 3560 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3561 DDI_DMA_SLEEP, 3562 NULL, 3563 (caddr_t *)&softs->comm_space, 3564 &rlen, 3565 &softs->comm_space_acc_handle) != DDI_SUCCESS) { 3566 AACDB_PRINT(softs, CE_WARN, 3567 "Cannot alloc mem for communication area"); 3568 goto error; 3569 } 3570 if (ddi_dma_addr_bind_handle( 3571 softs->comm_space_dma_handle, 3572 NULL, 3573 (caddr_t)softs->comm_space, 3574 sizeof (struct aac_comm_space), 3575 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3576 DDI_DMA_SLEEP, 3577 NULL, 3578 &cookie, 3579 &cookien) != DDI_DMA_MAPPED) { 3580 AACDB_PRINT(softs, CE_WARN, 3581 "DMA bind failed for communication area"); 3582 goto error; 3583 } 3584 softs->comm_space_phyaddr = cookie.dmac_address; 3585 3586 return (AACOK); 3587 error: 3588 if (softs->comm_space_acc_handle) { 3589 ddi_dma_mem_free(&softs->comm_space_acc_handle); 3590 softs->comm_space_acc_handle = NULL; 3591 } 3592 if (softs->comm_space_dma_handle) { 3593 ddi_dma_free_handle(&softs->comm_space_dma_handle); 3594 softs->comm_space_dma_handle = NULL; 3595 } 3596 return (AACERR); 3597 } 3598 3599 static void 3600 aac_free_comm_space(struct aac_softstate *softs) 3601 { 3602 3603 (void) ddi_dma_unbind_handle(softs->comm_space_dma_handle); 3604 ddi_dma_mem_free(&softs->comm_space_acc_handle); 3605 softs->comm_space_acc_handle = NULL; 3606 ddi_dma_free_handle(&softs->comm_space_dma_handle); 3607 softs->comm_space_dma_handle = NULL; 3608 softs->comm_space_phyaddr = NULL; 3609 } 3610 3611 /* 3612 * Initialize the data structures that are required for the communication 3613 * interface to operate 3614 */ 3615 static int 3616 aac_setup_comm_space(struct aac_softstate *softs) 3617 { 3618 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3619 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3620 uint32_t comm_space_phyaddr; 3621 struct aac_adapter_init *initp; 3622 int qoffset; 3623 3624 comm_space_phyaddr = softs->comm_space_phyaddr; 3625 3626 /* Setup adapter init struct */ 3627 initp = &softs->comm_space->init_data; 3628 bzero(initp, sizeof (struct aac_adapter_init)); 3629 3630 ddi_put32(acc, &initp->InitStructRevision, AAC_INIT_STRUCT_REVISION); 3631 ddi_put32(acc, &initp->HostElapsedSeconds, ddi_get_time()); 3632 3633 /* Setup new/old comm. specific data */ 3634 if (softs->flags & AAC_FLAGS_RAW_IO) { 3635 uint32_t init_flags = 0; 3636 3637 if (softs->flags & AAC_FLAGS_NEW_COMM) 3638 init_flags |= AAC_INIT_FLAGS_NEW_COMM_SUPPORTED; 3639 /* AAC_SUPPORTED_POWER_MANAGEMENT */ 3640 init_flags |= AAC_INIT_FLAGS_DRIVER_SUPPORTS_PM; 3641 init_flags |= AAC_INIT_FLAGS_DRIVER_USES_UTC_TIME; 3642 3643 ddi_put32(acc, &initp->InitStructRevision, 3644 AAC_INIT_STRUCT_REVISION_4); 3645 ddi_put32(acc, &initp->InitFlags, init_flags); 3646 /* Setup the preferred settings */ 3647 ddi_put32(acc, &initp->MaxIoCommands, softs->aac_max_fibs); 3648 ddi_put32(acc, &initp->MaxIoSize, 3649 (softs->aac_max_sectors << 9)); 3650 ddi_put32(acc, &initp->MaxFibSize, softs->aac_max_fib_size); 3651 } else { 3652 /* 3653 * Tells the adapter about the physical location of various 3654 * important shared data structures 3655 */ 3656 ddi_put32(acc, &initp->AdapterFibsPhysicalAddress, 3657 comm_space_phyaddr + \ 3658 offsetof(struct aac_comm_space, adapter_fibs)); 3659 ddi_put32(acc, &initp->AdapterFibsVirtualAddress, 0); 3660 ddi_put32(acc, &initp->AdapterFibAlign, AAC_FIB_SIZE); 3661 ddi_put32(acc, &initp->AdapterFibsSize, 3662 AAC_ADAPTER_FIBS * AAC_FIB_SIZE); 3663 ddi_put32(acc, &initp->PrintfBufferAddress, 3664 comm_space_phyaddr + \ 3665 offsetof(struct aac_comm_space, adapter_print_buf)); 3666 ddi_put32(acc, &initp->PrintfBufferSize, 3667 AAC_ADAPTER_PRINT_BUFSIZE); 3668 ddi_put32(acc, &initp->MiniPortRevision, 3669 AAC_INIT_STRUCT_MINIPORT_REVISION); 3670 ddi_put32(acc, &initp->HostPhysMemPages, AAC_MAX_PFN); 3671 3672 qoffset = (comm_space_phyaddr + \ 3673 offsetof(struct aac_comm_space, qtable)) % \ 3674 AAC_QUEUE_ALIGN; 3675 if (qoffset) 3676 qoffset = AAC_QUEUE_ALIGN - qoffset; 3677 softs->qtablep = (struct aac_queue_table *) \ 3678 ((char *)&softs->comm_space->qtable + qoffset); 3679 ddi_put32(acc, &initp->CommHeaderAddress, comm_space_phyaddr + \ 3680 offsetof(struct aac_comm_space, qtable) + qoffset); 3681 3682 /* Init queue table */ 3683 ddi_put32(acc, &softs->qtablep-> \ 3684 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_PRODUCER_INDEX], 3685 AAC_HOST_NORM_CMD_ENTRIES); 3686 ddi_put32(acc, &softs->qtablep-> \ 3687 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_CONSUMER_INDEX], 3688 AAC_HOST_NORM_CMD_ENTRIES); 3689 ddi_put32(acc, &softs->qtablep-> \ 3690 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_PRODUCER_INDEX], 3691 AAC_HOST_HIGH_CMD_ENTRIES); 3692 ddi_put32(acc, &softs->qtablep-> \ 3693 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_CONSUMER_INDEX], 3694 AAC_HOST_HIGH_CMD_ENTRIES); 3695 ddi_put32(acc, &softs->qtablep-> \ 3696 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_PRODUCER_INDEX], 3697 AAC_ADAP_NORM_CMD_ENTRIES); 3698 ddi_put32(acc, &softs->qtablep-> \ 3699 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_CONSUMER_INDEX], 3700 AAC_ADAP_NORM_CMD_ENTRIES); 3701 ddi_put32(acc, &softs->qtablep-> \ 3702 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_PRODUCER_INDEX], 3703 AAC_ADAP_HIGH_CMD_ENTRIES); 3704 ddi_put32(acc, &softs->qtablep-> \ 3705 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_CONSUMER_INDEX], 3706 AAC_ADAP_HIGH_CMD_ENTRIES); 3707 ddi_put32(acc, &softs->qtablep-> \ 3708 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_PRODUCER_INDEX], 3709 AAC_HOST_NORM_RESP_ENTRIES); 3710 ddi_put32(acc, &softs->qtablep-> \ 3711 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_CONSUMER_INDEX], 3712 AAC_HOST_NORM_RESP_ENTRIES); 3713 ddi_put32(acc, &softs->qtablep-> \ 3714 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_PRODUCER_INDEX], 3715 AAC_HOST_HIGH_RESP_ENTRIES); 3716 ddi_put32(acc, &softs->qtablep-> \ 3717 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_CONSUMER_INDEX], 3718 AAC_HOST_HIGH_RESP_ENTRIES); 3719 ddi_put32(acc, &softs->qtablep-> \ 3720 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_PRODUCER_INDEX], 3721 AAC_ADAP_NORM_RESP_ENTRIES); 3722 ddi_put32(acc, &softs->qtablep-> \ 3723 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_CONSUMER_INDEX], 3724 AAC_ADAP_NORM_RESP_ENTRIES); 3725 ddi_put32(acc, &softs->qtablep-> \ 3726 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_PRODUCER_INDEX], 3727 AAC_ADAP_HIGH_RESP_ENTRIES); 3728 ddi_put32(acc, &softs->qtablep-> \ 3729 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_CONSUMER_INDEX], 3730 AAC_ADAP_HIGH_RESP_ENTRIES); 3731 3732 /* Init queue entries */ 3733 softs->qentries[AAC_HOST_NORM_CMD_Q] = 3734 &softs->qtablep->qt_HostNormCmdQueue[0]; 3735 softs->qentries[AAC_HOST_HIGH_CMD_Q] = 3736 &softs->qtablep->qt_HostHighCmdQueue[0]; 3737 softs->qentries[AAC_ADAP_NORM_CMD_Q] = 3738 &softs->qtablep->qt_AdapNormCmdQueue[0]; 3739 softs->qentries[AAC_ADAP_HIGH_CMD_Q] = 3740 &softs->qtablep->qt_AdapHighCmdQueue[0]; 3741 softs->qentries[AAC_HOST_NORM_RESP_Q] = 3742 &softs->qtablep->qt_HostNormRespQueue[0]; 3743 softs->qentries[AAC_HOST_HIGH_RESP_Q] = 3744 &softs->qtablep->qt_HostHighRespQueue[0]; 3745 softs->qentries[AAC_ADAP_NORM_RESP_Q] = 3746 &softs->qtablep->qt_AdapNormRespQueue[0]; 3747 softs->qentries[AAC_ADAP_HIGH_RESP_Q] = 3748 &softs->qtablep->qt_AdapHighRespQueue[0]; 3749 } 3750 (void) ddi_dma_sync(dma, 0, 0, DDI_DMA_SYNC_FORDEV); 3751 3752 /* Send init structure to the card */ 3753 if (aac_sync_mbcommand(softs, AAC_MONKER_INITSTRUCT, 3754 comm_space_phyaddr + \ 3755 offsetof(struct aac_comm_space, init_data), 3756 0, 0, 0, NULL) == AACERR) { 3757 AACDB_PRINT(softs, CE_WARN, 3758 "Cannot send init structure to adapter"); 3759 return (AACERR); 3760 } 3761 3762 return (AACOK); 3763 } 3764 3765 static uchar_t * 3766 aac_vendor_id(struct aac_softstate *softs, uchar_t *buf) 3767 { 3768 (void) memset(buf, ' ', AAC_VENDOR_LEN); 3769 bcopy(softs->vendor_name, buf, strlen(softs->vendor_name)); 3770 return (buf + AAC_VENDOR_LEN); 3771 } 3772 3773 static uchar_t * 3774 aac_product_id(struct aac_softstate *softs, uchar_t *buf) 3775 { 3776 (void) memset(buf, ' ', AAC_PRODUCT_LEN); 3777 bcopy(softs->product_name, buf, strlen(softs->product_name)); 3778 return (buf + AAC_PRODUCT_LEN); 3779 } 3780 3781 /* 3782 * Construct unit serial number from container uid 3783 */ 3784 static uchar_t * 3785 aac_lun_serialno(struct aac_softstate *softs, int tgt, uchar_t *buf) 3786 { 3787 int i, d; 3788 uint32_t uid; 3789 3790 ASSERT(tgt >= 0 && tgt < AAC_MAX_LD); 3791 3792 uid = softs->containers[tgt].uid; 3793 for (i = 7; i >= 0; i--) { 3794 d = uid & 0xf; 3795 buf[i] = d > 9 ? 'A' + (d - 0xa) : '0' + d; 3796 uid >>= 4; 3797 } 3798 return (buf + 8); 3799 } 3800 3801 /* 3802 * SPC-3 7.5 INQUIRY command implementation 3803 */ 3804 static void 3805 aac_inquiry(struct aac_softstate *softs, struct scsi_pkt *pkt, 3806 union scsi_cdb *cdbp, struct buf *bp) 3807 { 3808 int tgt = pkt->pkt_address.a_target; 3809 char *b_addr = NULL; 3810 uchar_t page = cdbp->cdb_opaque[2]; 3811 3812 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_CMDDT) { 3813 /* Command Support Data is not supported */ 3814 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x00, 0); 3815 return; 3816 } 3817 3818 if (bp && bp->b_un.b_addr && bp->b_bcount) { 3819 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 3820 bp_mapin(bp); 3821 b_addr = bp->b_un.b_addr; 3822 } 3823 3824 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_EVPD) { 3825 uchar_t *vpdp = (uchar_t *)b_addr; 3826 uchar_t *idp, *sp; 3827 3828 /* SPC-3 8.4 Vital product data parameters */ 3829 switch (page) { 3830 case 0x00: 3831 /* Supported VPD pages */ 3832 if (vpdp == NULL || 3833 bp->b_bcount < (AAC_VPD_PAGE_DATA + 3)) 3834 return; 3835 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3836 vpdp[AAC_VPD_PAGE_CODE] = 0x00; 3837 vpdp[AAC_VPD_PAGE_LENGTH] = 3; 3838 3839 vpdp[AAC_VPD_PAGE_DATA] = 0x00; 3840 vpdp[AAC_VPD_PAGE_DATA + 1] = 0x80; 3841 vpdp[AAC_VPD_PAGE_DATA + 2] = 0x83; 3842 3843 pkt->pkt_state |= STATE_XFERRED_DATA; 3844 break; 3845 3846 case 0x80: 3847 /* Unit serial number page */ 3848 if (vpdp == NULL || 3849 bp->b_bcount < (AAC_VPD_PAGE_DATA + 8)) 3850 return; 3851 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3852 vpdp[AAC_VPD_PAGE_CODE] = 0x80; 3853 vpdp[AAC_VPD_PAGE_LENGTH] = 8; 3854 3855 sp = &vpdp[AAC_VPD_PAGE_DATA]; 3856 (void) aac_lun_serialno(softs, tgt, sp); 3857 3858 pkt->pkt_state |= STATE_XFERRED_DATA; 3859 break; 3860 3861 case 0x83: 3862 /* Device identification page */ 3863 if (vpdp == NULL || 3864 bp->b_bcount < (AAC_VPD_PAGE_DATA + 32)) 3865 return; 3866 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3867 vpdp[AAC_VPD_PAGE_CODE] = 0x83; 3868 3869 idp = &vpdp[AAC_VPD_PAGE_DATA]; 3870 bzero(idp, AAC_VPD_ID_LENGTH); 3871 idp[AAC_VPD_ID_CODESET] = 0x02; 3872 idp[AAC_VPD_ID_TYPE] = 0x01; 3873 3874 /* 3875 * SPC-3 Table 111 - Identifier type 3876 * One recommanded method of constructing the remainder 3877 * of identifier field is to concatenate the product 3878 * identification field from the standard INQUIRY data 3879 * field and the product serial number field from the 3880 * unit serial number page. 3881 */ 3882 sp = &idp[AAC_VPD_ID_DATA]; 3883 sp = aac_vendor_id(softs, sp); 3884 sp = aac_product_id(softs, sp); 3885 sp = aac_lun_serialno(softs, tgt, sp); 3886 idp[AAC_VPD_ID_LENGTH] = (uintptr_t)sp - \ 3887 (uintptr_t)&idp[AAC_VPD_ID_DATA]; 3888 3889 vpdp[AAC_VPD_PAGE_LENGTH] = (uintptr_t)sp - \ 3890 (uintptr_t)&vpdp[AAC_VPD_PAGE_DATA]; 3891 pkt->pkt_state |= STATE_XFERRED_DATA; 3892 break; 3893 3894 default: 3895 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 3896 0x24, 0x00, 0); 3897 break; 3898 } 3899 } else { 3900 struct scsi_inquiry *inqp = (struct scsi_inquiry *)b_addr; 3901 size_t len = sizeof (struct scsi_inquiry); 3902 3903 if (page != 0) { 3904 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 3905 0x24, 0x00, 0); 3906 return; 3907 } 3908 if (inqp == NULL || bp->b_bcount < len) 3909 return; 3910 3911 bzero(inqp, len); 3912 inqp->inq_len = AAC_ADDITIONAL_LEN; 3913 inqp->inq_ansi = AAC_ANSI_VER; 3914 inqp->inq_rdf = AAC_RESP_DATA_FORMAT; 3915 (void) aac_vendor_id(softs, (uchar_t *)inqp->inq_vid); 3916 (void) aac_product_id(softs, (uchar_t *)inqp->inq_pid); 3917 bcopy("V1.0", inqp->inq_revision, 4); 3918 inqp->inq_cmdque = 1; /* enable tagged-queuing */ 3919 /* 3920 * For "sd-max-xfer-size" property which may impact performance 3921 * when IO threads increase. 3922 */ 3923 inqp->inq_wbus32 = 1; 3924 3925 pkt->pkt_state |= STATE_XFERRED_DATA; 3926 } 3927 } 3928 3929 /* 3930 * SPC-3 7.10 MODE SENSE command implementation 3931 */ 3932 static void 3933 aac_mode_sense(struct aac_softstate *softs, struct scsi_pkt *pkt, 3934 union scsi_cdb *cdbp, struct buf *bp, int capacity) 3935 { 3936 uchar_t pagecode; 3937 struct mode_header *headerp; 3938 struct mode_header_g1 *g1_headerp; 3939 unsigned int ncyl; 3940 caddr_t sense_data; 3941 caddr_t next_page; 3942 size_t sdata_size; 3943 size_t pages_size; 3944 int unsupport_page = 0; 3945 3946 ASSERT(cdbp->scc_cmd == SCMD_MODE_SENSE || 3947 cdbp->scc_cmd == SCMD_MODE_SENSE_G1); 3948 3949 if (!(bp && bp->b_un.b_addr && bp->b_bcount)) 3950 return; 3951 3952 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 3953 bp_mapin(bp); 3954 pkt->pkt_state |= STATE_XFERRED_DATA; 3955 pagecode = cdbp->cdb_un.sg.scsi[0] & 0x3F; 3956 3957 /* calculate the size of needed buffer */ 3958 if (cdbp->scc_cmd == SCMD_MODE_SENSE) 3959 sdata_size = MODE_HEADER_LENGTH; 3960 else /* must be SCMD_MODE_SENSE_G1 */ 3961 sdata_size = MODE_HEADER_LENGTH_G1; 3962 3963 pages_size = 0; 3964 switch (pagecode) { 3965 case SD_MODE_SENSE_PAGE3_CODE: 3966 pages_size += sizeof (struct mode_format); 3967 break; 3968 3969 case SD_MODE_SENSE_PAGE4_CODE: 3970 pages_size += sizeof (struct mode_geometry); 3971 break; 3972 3973 case MODEPAGE_CTRL_MODE: 3974 if (softs->flags & AAC_FLAGS_LBA_64BIT) { 3975 pages_size += sizeof (struct mode_control_scsi3); 3976 } else { 3977 unsupport_page = 1; 3978 } 3979 break; 3980 3981 case MODEPAGE_ALLPAGES: 3982 if (softs->flags & AAC_FLAGS_LBA_64BIT) { 3983 pages_size += sizeof (struct mode_format) + 3984 sizeof (struct mode_geometry) + 3985 sizeof (struct mode_control_scsi3); 3986 } else { 3987 pages_size += sizeof (struct mode_format) + 3988 sizeof (struct mode_geometry); 3989 } 3990 break; 3991 3992 default: 3993 /* unsupported pages */ 3994 unsupport_page = 1; 3995 } 3996 3997 /* allocate buffer to fill the send data */ 3998 sdata_size += pages_size; 3999 sense_data = kmem_zalloc(sdata_size, KM_SLEEP); 4000 4001 if (cdbp->scc_cmd == SCMD_MODE_SENSE) { 4002 headerp = (struct mode_header *)sense_data; 4003 headerp->length = MODE_HEADER_LENGTH + pages_size - 4004 sizeof (headerp->length); 4005 headerp->bdesc_length = 0; 4006 next_page = sense_data + sizeof (struct mode_header); 4007 } else { 4008 g1_headerp = (void *)sense_data; 4009 g1_headerp->length = BE_16(MODE_HEADER_LENGTH_G1 + pages_size - 4010 sizeof (g1_headerp->length)); 4011 g1_headerp->bdesc_length = 0; 4012 next_page = sense_data + sizeof (struct mode_header_g1); 4013 } 4014 4015 if (unsupport_page) 4016 goto finish; 4017 4018 if (pagecode == SD_MODE_SENSE_PAGE3_CODE || 4019 pagecode == MODEPAGE_ALLPAGES) { 4020 /* SBC-3 7.1.3.3 Format device page */ 4021 struct mode_format *page3p; 4022 4023 page3p = (void *)next_page; 4024 page3p->mode_page.code = SD_MODE_SENSE_PAGE3_CODE; 4025 page3p->mode_page.length = sizeof (struct mode_format); 4026 page3p->data_bytes_sect = BE_16(AAC_SECTOR_SIZE); 4027 page3p->sect_track = BE_16(AAC_SECTORS_PER_TRACK); 4028 4029 next_page += sizeof (struct mode_format); 4030 } 4031 4032 if (pagecode == SD_MODE_SENSE_PAGE4_CODE || 4033 pagecode == MODEPAGE_ALLPAGES) { 4034 /* SBC-3 7.1.3.8 Rigid disk device geometry page */ 4035 struct mode_geometry *page4p; 4036 4037 page4p = (void *)next_page; 4038 page4p->mode_page.code = SD_MODE_SENSE_PAGE4_CODE; 4039 page4p->mode_page.length = sizeof (struct mode_geometry); 4040 page4p->heads = AAC_NUMBER_OF_HEADS; 4041 page4p->rpm = BE_16(AAC_ROTATION_SPEED); 4042 ncyl = capacity / (AAC_NUMBER_OF_HEADS * AAC_SECTORS_PER_TRACK); 4043 page4p->cyl_lb = ncyl & 0xff; 4044 page4p->cyl_mb = (ncyl >> 8) & 0xff; 4045 page4p->cyl_ub = (ncyl >> 16) & 0xff; 4046 4047 next_page += sizeof (struct mode_geometry); 4048 } 4049 4050 if ((pagecode == MODEPAGE_CTRL_MODE || pagecode == MODEPAGE_ALLPAGES) && 4051 softs->flags & AAC_FLAGS_LBA_64BIT) { 4052 /* 64-bit LBA need large sense data */ 4053 struct mode_control_scsi3 *mctl; 4054 4055 mctl = (void *)next_page; 4056 mctl->mode_page.code = MODEPAGE_CTRL_MODE; 4057 mctl->mode_page.length = 4058 sizeof (struct mode_control_scsi3) - 4059 sizeof (struct mode_page); 4060 mctl->d_sense = 1; 4061 } 4062 4063 finish: 4064 /* copyout the valid data. */ 4065 bcopy(sense_data, bp->b_un.b_addr, min(sdata_size, bp->b_bcount)); 4066 kmem_free(sense_data, sdata_size); 4067 } 4068 4069 static int 4070 aac_name_node(dev_info_t *dip, char *name, int len) 4071 { 4072 int tgt, lun; 4073 4074 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4075 DDI_PROP_DONTPASS, "target", -1); 4076 if (tgt == -1) 4077 return (DDI_FAILURE); 4078 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4079 DDI_PROP_DONTPASS, "lun", -1); 4080 if (lun == -1) 4081 return (DDI_FAILURE); 4082 4083 (void) snprintf(name, len, "%x,%x", tgt, lun); 4084 return (DDI_SUCCESS); 4085 } 4086 4087 /*ARGSUSED*/ 4088 static int 4089 aac_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 4090 scsi_hba_tran_t *tran, struct scsi_device *sd) 4091 { 4092 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran); 4093 #if defined(DEBUG) || defined(__lock_lint) 4094 int ctl = ddi_get_instance(softs->devinfo_p); 4095 #endif 4096 uint16_t tgt = sd->sd_address.a_target; 4097 uint8_t lun = sd->sd_address.a_lun; 4098 struct aac_device *dvp; 4099 4100 DBCALLED(softs, 2); 4101 4102 if (ndi_dev_is_persistent_node(tgt_dip) == 0) { 4103 /* 4104 * If no persistent node exist, we don't allow .conf node 4105 * to be created. 4106 */ 4107 if (aac_find_child(softs, tgt, lun) != NULL) { 4108 if (ndi_merge_node(tgt_dip, aac_name_node) != 4109 DDI_SUCCESS) 4110 /* Create this .conf node */ 4111 return (DDI_SUCCESS); 4112 } 4113 return (DDI_FAILURE); 4114 } 4115 4116 /* 4117 * Only support container/phys. device that has been 4118 * detected and valid 4119 */ 4120 mutex_enter(&softs->io_lock); 4121 if (tgt >= AAC_MAX_DEV(softs)) { 4122 AACDB_PRINT_TRAN(softs, 4123 "aac_tran_tgt_init: c%dt%dL%d out", ctl, tgt, lun); 4124 mutex_exit(&softs->io_lock); 4125 return (DDI_FAILURE); 4126 } 4127 4128 if (tgt < AAC_MAX_LD) { 4129 dvp = (struct aac_device *)&softs->containers[tgt]; 4130 if (lun != 0 || !AAC_DEV_IS_VALID(dvp)) { 4131 AACDB_PRINT_TRAN(softs, "aac_tran_tgt_init: c%dt%dL%d", 4132 ctl, tgt, lun); 4133 mutex_exit(&softs->io_lock); 4134 return (DDI_FAILURE); 4135 } 4136 /* 4137 * Save the tgt_dip for the given target if one doesn't exist 4138 * already. Dip's for non-existance tgt's will be cleared in 4139 * tgt_free. 4140 */ 4141 if (softs->containers[tgt].dev.dip == NULL && 4142 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) 4143 softs->containers[tgt].dev.dip = tgt_dip; 4144 } else { 4145 dvp = (struct aac_device *)&softs->nondasds[AAC_PD(tgt)]; 4146 } 4147 4148 if (softs->flags & AAC_FLAGS_BRKUP) { 4149 if (ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip, 4150 "buf_break", 1) != DDI_PROP_SUCCESS) { 4151 cmn_err(CE_CONT, "unable to create " 4152 "property for t%dL%d (buf_break)", tgt, lun); 4153 } 4154 } 4155 4156 AACDB_PRINT(softs, CE_NOTE, 4157 "aac_tran_tgt_init: c%dt%dL%d ok (%s)", ctl, tgt, lun, 4158 (dvp->type == AAC_DEV_PD) ? "pd" : "ld"); 4159 mutex_exit(&softs->io_lock); 4160 return (DDI_SUCCESS); 4161 } 4162 4163 static void 4164 aac_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 4165 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 4166 { 4167 #ifndef __lock_lint 4168 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran)) 4169 #endif 4170 4171 struct aac_softstate *softs = SD2AAC(sd); 4172 int tgt = sd->sd_address.a_target; 4173 4174 mutex_enter(&softs->io_lock); 4175 if (tgt < AAC_MAX_LD) { 4176 if (softs->containers[tgt].dev.dip == tgt_dip) 4177 softs->containers[tgt].dev.dip = NULL; 4178 } else { 4179 softs->nondasds[AAC_PD(tgt)].dev.flags &= ~AAC_DFLAG_VALID; 4180 } 4181 mutex_exit(&softs->io_lock); 4182 } 4183 4184 /* 4185 * Check if the firmware is Up And Running. If it is in the Kernel Panic 4186 * state, (BlinkLED code + 1) is returned. 4187 * 0 -- firmware up and running 4188 * -1 -- firmware dead 4189 * >0 -- firmware kernel panic 4190 */ 4191 static int 4192 aac_check_adapter_health(struct aac_softstate *softs) 4193 { 4194 int rval; 4195 4196 rval = PCI_MEM_GET32(softs, AAC_OMR0); 4197 4198 if (rval & AAC_KERNEL_UP_AND_RUNNING) { 4199 rval = 0; 4200 } else if (rval & AAC_KERNEL_PANIC) { 4201 cmn_err(CE_WARN, "firmware panic"); 4202 rval = ((rval >> 16) & 0xff) + 1; /* avoid 0 as return value */ 4203 } else { 4204 cmn_err(CE_WARN, "firmware dead"); 4205 rval = -1; 4206 } 4207 return (rval); 4208 } 4209 4210 static void 4211 aac_abort_iocmd(struct aac_softstate *softs, struct aac_cmd *acp, 4212 uchar_t reason) 4213 { 4214 acp->flags |= AAC_CMD_ABORT; 4215 4216 if (acp->pkt) { 4217 if (acp->slotp) { /* outstanding cmd */ 4218 acp->pkt->pkt_state |= STATE_GOT_STATUS; 4219 } 4220 4221 switch (reason) { 4222 case CMD_TIMEOUT: 4223 AACDB_PRINT(softs, CE_NOTE, "CMD_TIMEOUT: acp=0x%p", 4224 acp); 4225 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT, 4226 STAT_TIMEOUT | STAT_BUS_RESET); 4227 break; 4228 case CMD_RESET: 4229 /* aac support only RESET_ALL */ 4230 AACDB_PRINT(softs, CE_NOTE, "CMD_RESET: acp=0x%p", acp); 4231 aac_set_pkt_reason(softs, acp, CMD_RESET, 4232 STAT_BUS_RESET); 4233 break; 4234 case CMD_ABORTED: 4235 AACDB_PRINT(softs, CE_NOTE, "CMD_ABORTED: acp=0x%p", 4236 acp); 4237 aac_set_pkt_reason(softs, acp, CMD_ABORTED, 4238 STAT_ABORTED); 4239 break; 4240 } 4241 } 4242 aac_end_io(softs, acp); 4243 } 4244 4245 /* 4246 * Abort all the pending commands of type iocmd or just the command pkt 4247 * corresponding to pkt 4248 */ 4249 static void 4250 aac_abort_iocmds(struct aac_softstate *softs, int iocmd, struct scsi_pkt *pkt, 4251 int reason) 4252 { 4253 struct aac_cmd *ac_arg, *acp; 4254 int i; 4255 4256 if (pkt == NULL) { 4257 ac_arg = NULL; 4258 } else { 4259 ac_arg = PKT2AC(pkt); 4260 iocmd = (ac_arg->flags & AAC_CMD_SYNC) ? 4261 AAC_IOCMD_SYNC : AAC_IOCMD_ASYNC; 4262 } 4263 4264 /* 4265 * a) outstanding commands on the controller 4266 * Note: should abort outstanding commands only after one 4267 * IOP reset has been done. 4268 */ 4269 if (iocmd & AAC_IOCMD_OUTSTANDING) { 4270 struct aac_cmd *acp; 4271 4272 for (i = 0; i < AAC_MAX_LD; i++) { 4273 if (AAC_DEV_IS_VALID(&softs->containers[i].dev)) 4274 softs->containers[i].reset = 1; 4275 } 4276 while ((acp = softs->q_busy.q_head) != NULL) 4277 aac_abort_iocmd(softs, acp, reason); 4278 } 4279 4280 /* b) commands in the waiting queues */ 4281 for (i = 0; i < AAC_CMDQ_NUM; i++) { 4282 if (iocmd & (1 << i)) { 4283 if (ac_arg) { 4284 aac_abort_iocmd(softs, ac_arg, reason); 4285 } else { 4286 while ((acp = softs->q_wait[i].q_head) != NULL) 4287 aac_abort_iocmd(softs, acp, reason); 4288 } 4289 } 4290 } 4291 } 4292 4293 /* 4294 * The draining thread is shared among quiesce threads. It terminates 4295 * when the adapter is quiesced or stopped by aac_stop_drain(). 4296 */ 4297 static void 4298 aac_check_drain(void *arg) 4299 { 4300 struct aac_softstate *softs = arg; 4301 4302 mutex_enter(&softs->io_lock); 4303 if (softs->ndrains) { 4304 softs->drain_timeid = 0; 4305 /* 4306 * If both ASYNC and SYNC bus throttle are held, 4307 * wake up threads only when both are drained out. 4308 */ 4309 if ((softs->bus_throttle[AAC_CMDQ_ASYNC] > 0 || 4310 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) && 4311 (softs->bus_throttle[AAC_CMDQ_SYNC] > 0 || 4312 softs->bus_ncmds[AAC_CMDQ_SYNC] == 0)) 4313 cv_broadcast(&softs->drain_cv); 4314 else 4315 softs->drain_timeid = timeout(aac_check_drain, softs, 4316 AAC_QUIESCE_TICK * drv_usectohz(1000000)); 4317 } 4318 mutex_exit(&softs->io_lock); 4319 } 4320 4321 /* 4322 * If not draining the outstanding cmds, drain them. Otherwise, 4323 * only update ndrains. 4324 */ 4325 static void 4326 aac_start_drain(struct aac_softstate *softs) 4327 { 4328 if (softs->ndrains == 0) { 4329 ASSERT(softs->drain_timeid == 0); 4330 softs->drain_timeid = timeout(aac_check_drain, softs, 4331 AAC_QUIESCE_TICK * drv_usectohz(1000000)); 4332 } 4333 softs->ndrains++; 4334 } 4335 4336 /* 4337 * Stop the draining thread when no other threads use it any longer. 4338 * Side effect: io_lock may be released in the middle. 4339 */ 4340 static void 4341 aac_stop_drain(struct aac_softstate *softs) 4342 { 4343 softs->ndrains--; 4344 if (softs->ndrains == 0) { 4345 if (softs->drain_timeid != 0) { 4346 timeout_id_t tid = softs->drain_timeid; 4347 4348 softs->drain_timeid = 0; 4349 mutex_exit(&softs->io_lock); 4350 (void) untimeout(tid); 4351 mutex_enter(&softs->io_lock); 4352 } 4353 } 4354 } 4355 4356 /* 4357 * The following function comes from Adaptec: 4358 * 4359 * Once do an IOP reset, basically the driver have to re-initialize the card 4360 * as if up from a cold boot, and the driver is responsible for any IO that 4361 * is outstanding to the adapter at the time of the IOP RESET. And prepare 4362 * for IOP RESET by making the init code modular with the ability to call it 4363 * from multiple places. 4364 */ 4365 static int 4366 aac_reset_adapter(struct aac_softstate *softs) 4367 { 4368 int health; 4369 uint32_t status; 4370 int rval = AAC_IOP_RESET_FAILED; 4371 4372 DBCALLED(softs, 1); 4373 4374 ASSERT(softs->state & AAC_STATE_RESET); 4375 4376 ddi_fm_acc_err_clear(softs->pci_mem_handle, DDI_FME_VER0); 4377 /* Disable interrupt */ 4378 AAC_DISABLE_INTR(softs); 4379 4380 health = aac_check_adapter_health(softs); 4381 if (health == -1) { 4382 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 4383 goto finish; 4384 } 4385 if (health == 0) /* flush drives if possible */ 4386 (void) aac_shutdown(softs); 4387 4388 /* Execute IOP reset */ 4389 if ((aac_sync_mbcommand(softs, AAC_IOP_RESET, 0, 0, 0, 0, 4390 &status)) != AACOK) { 4391 ddi_acc_handle_t acc; 4392 struct aac_fib *fibp; 4393 struct aac_pause_command *pc; 4394 4395 if ((status & 0xf) == 0xf) { 4396 uint32_t wait_count; 4397 4398 /* 4399 * Sunrise Lake has dual cores and we must drag the 4400 * other core with us to reset simultaneously. There 4401 * are 2 bits in the Inbound Reset Control and Status 4402 * Register (offset 0x38) of the Sunrise Lake to reset 4403 * the chip without clearing out the PCI configuration 4404 * info (COMMAND & BARS). 4405 */ 4406 PCI_MEM_PUT32(softs, AAC_IRCSR, AAC_IRCSR_CORES_RST); 4407 4408 /* 4409 * We need to wait for 5 seconds before accessing the MU 4410 * again 10000 * 100us = 1000,000us = 1000ms = 1s 4411 */ 4412 wait_count = 5 * 10000; 4413 while (wait_count) { 4414 drv_usecwait(100); /* delay 100 microseconds */ 4415 wait_count--; 4416 } 4417 } else { 4418 if (status == SRB_STATUS_INVALID_REQUEST) 4419 cmn_err(CE_WARN, "!IOP_RESET not supported"); 4420 else /* probably timeout */ 4421 cmn_err(CE_WARN, "!IOP_RESET failed"); 4422 4423 /* Unwind aac_shutdown() */ 4424 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac); 4425 acc = softs->sync_ac.slotp->fib_acc_handle; 4426 4427 fibp = softs->sync_ac.slotp->fibp; 4428 pc = (struct aac_pause_command *)&fibp->data[0]; 4429 4430 bzero(pc, sizeof (*pc)); 4431 ddi_put32(acc, &pc->Command, VM_ContainerConfig); 4432 ddi_put32(acc, &pc->Type, CT_PAUSE_IO); 4433 ddi_put32(acc, &pc->Timeout, 1); 4434 ddi_put32(acc, &pc->Min, 1); 4435 ddi_put32(acc, &pc->NoRescan, 1); 4436 4437 (void) aac_sync_fib(softs, ContainerCommand, 4438 AAC_FIB_SIZEOF(struct aac_pause_command)); 4439 aac_sync_fib_slot_release(softs, &softs->sync_ac); 4440 4441 if (aac_check_adapter_health(softs) != 0) 4442 ddi_fm_service_impact(softs->devinfo_p, 4443 DDI_SERVICE_LOST); 4444 else 4445 /* 4446 * IOP reset not supported or IOP not reseted 4447 */ 4448 rval = AAC_IOP_RESET_ABNORMAL; 4449 goto finish; 4450 } 4451 } 4452 4453 /* 4454 * Re-read and renegotiate the FIB parameters, as one of the actions 4455 * that can result from an IOP reset is the running of a new firmware 4456 * image. 4457 */ 4458 if (aac_common_attach(softs) != AACOK) 4459 goto finish; 4460 4461 rval = AAC_IOP_RESET_SUCCEED; 4462 4463 finish: 4464 AAC_ENABLE_INTR(softs); 4465 return (rval); 4466 } 4467 4468 static void 4469 aac_set_throttle(struct aac_softstate *softs, struct aac_device *dvp, int q, 4470 int throttle) 4471 { 4472 /* 4473 * If the bus is draining/quiesced, no changes to the throttles 4474 * are allowed. All throttles should have been set to 0. 4475 */ 4476 if ((softs->state & AAC_STATE_QUIESCED) || softs->ndrains) 4477 return; 4478 dvp->throttle[q] = throttle; 4479 } 4480 4481 static void 4482 aac_hold_bus(struct aac_softstate *softs, int iocmds) 4483 { 4484 int i, q; 4485 4486 /* Hold bus by holding every device on the bus */ 4487 for (q = 0; q < AAC_CMDQ_NUM; q++) { 4488 if (iocmds & (1 << q)) { 4489 softs->bus_throttle[q] = 0; 4490 for (i = 0; i < AAC_MAX_LD; i++) 4491 aac_set_throttle(softs, 4492 &softs->containers[i].dev, q, 0); 4493 for (i = 0; i < AAC_MAX_PD(softs); i++) 4494 aac_set_throttle(softs, 4495 &softs->nondasds[i].dev, q, 0); 4496 } 4497 } 4498 } 4499 4500 static void 4501 aac_unhold_bus(struct aac_softstate *softs, int iocmds) 4502 { 4503 int i, q, max_throttle; 4504 4505 for (q = 0; q < AAC_CMDQ_NUM; q++) { 4506 if (iocmds & (1 << q)) { 4507 /* 4508 * Should not unhold AAC_IOCMD_ASYNC bus, if it has been 4509 * quiesced or being drained by possibly some quiesce 4510 * threads. 4511 */ 4512 if (q == AAC_CMDQ_ASYNC && ((softs->state & 4513 AAC_STATE_QUIESCED) || softs->ndrains)) 4514 continue; 4515 if (q == AAC_CMDQ_ASYNC) 4516 max_throttle = softs->total_slots - 4517 AAC_MGT_SLOT_NUM; 4518 else 4519 max_throttle = softs->total_slots - 1; 4520 softs->bus_throttle[q] = max_throttle; 4521 for (i = 0; i < AAC_MAX_LD; i++) 4522 aac_set_throttle(softs, 4523 &softs->containers[i].dev, 4524 q, max_throttle); 4525 for (i = 0; i < AAC_MAX_PD(softs); i++) 4526 aac_set_throttle(softs, &softs->nondasds[i].dev, 4527 q, max_throttle); 4528 } 4529 } 4530 } 4531 4532 static int 4533 aac_do_reset(struct aac_softstate *softs) 4534 { 4535 int health; 4536 int rval; 4537 4538 softs->state |= AAC_STATE_RESET; 4539 health = aac_check_adapter_health(softs); 4540 4541 /* 4542 * Hold off new io commands and wait all outstanding io 4543 * commands to complete. 4544 */ 4545 if (health == 0) { 4546 int sync_cmds = softs->bus_ncmds[AAC_CMDQ_SYNC]; 4547 int async_cmds = softs->bus_ncmds[AAC_CMDQ_ASYNC]; 4548 4549 if (sync_cmds == 0 && async_cmds == 0) { 4550 rval = AAC_IOP_RESET_SUCCEED; 4551 goto finish; 4552 } 4553 /* 4554 * Give the adapter up to AAC_QUIESCE_TIMEOUT more seconds 4555 * to complete the outstanding io commands 4556 */ 4557 int timeout = AAC_QUIESCE_TIMEOUT * 1000 * 10; 4558 int (*intr_handler)(struct aac_softstate *); 4559 4560 aac_hold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 4561 /* 4562 * Poll the adapter by ourselves in case interrupt is disabled 4563 * and to avoid releasing the io_lock. 4564 */ 4565 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ? 4566 aac_process_intr_new : aac_process_intr_old; 4567 while ((softs->bus_ncmds[AAC_CMDQ_SYNC] || 4568 softs->bus_ncmds[AAC_CMDQ_ASYNC]) && timeout) { 4569 drv_usecwait(100); 4570 (void) intr_handler(softs); 4571 timeout--; 4572 } 4573 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 4574 4575 if (softs->bus_ncmds[AAC_CMDQ_SYNC] == 0 && 4576 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) { 4577 /* Cmds drained out */ 4578 rval = AAC_IOP_RESET_SUCCEED; 4579 goto finish; 4580 } else if (softs->bus_ncmds[AAC_CMDQ_SYNC] < sync_cmds || 4581 softs->bus_ncmds[AAC_CMDQ_ASYNC] < async_cmds) { 4582 /* Cmds not drained out, adapter overloaded */ 4583 rval = AAC_IOP_RESET_ABNORMAL; 4584 goto finish; 4585 } 4586 } 4587 4588 /* 4589 * If a longer waiting time still can't drain any outstanding io 4590 * commands, do IOP reset. 4591 */ 4592 if ((rval = aac_reset_adapter(softs)) == AAC_IOP_RESET_FAILED) 4593 softs->state |= AAC_STATE_DEAD; 4594 4595 finish: 4596 softs->state &= ~AAC_STATE_RESET; 4597 return (rval); 4598 } 4599 4600 static int 4601 aac_tran_reset(struct scsi_address *ap, int level) 4602 { 4603 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4604 int rval; 4605 4606 DBCALLED(softs, 1); 4607 4608 if (level != RESET_ALL) { 4609 cmn_err(CE_NOTE, "!reset target/lun not supported"); 4610 return (0); 4611 } 4612 4613 mutex_enter(&softs->io_lock); 4614 switch (rval = aac_do_reset(softs)) { 4615 case AAC_IOP_RESET_SUCCEED: 4616 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING | AAC_IOCMD_ASYNC, 4617 NULL, CMD_RESET); 4618 aac_start_waiting_io(softs); 4619 break; 4620 case AAC_IOP_RESET_FAILED: 4621 /* Abort IOCTL cmds when adapter is dead */ 4622 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_RESET); 4623 break; 4624 case AAC_IOP_RESET_ABNORMAL: 4625 aac_start_waiting_io(softs); 4626 } 4627 mutex_exit(&softs->io_lock); 4628 4629 aac_drain_comp_q(softs); 4630 return (rval == 0); 4631 } 4632 4633 static int 4634 aac_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 4635 { 4636 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4637 4638 DBCALLED(softs, 1); 4639 4640 mutex_enter(&softs->io_lock); 4641 aac_abort_iocmds(softs, 0, pkt, CMD_ABORTED); 4642 mutex_exit(&softs->io_lock); 4643 4644 aac_drain_comp_q(softs); 4645 return (1); 4646 } 4647 4648 void 4649 aac_free_dmamap(struct aac_cmd *acp) 4650 { 4651 /* Free dma mapping */ 4652 if (acp->flags & AAC_CMD_DMA_VALID) { 4653 ASSERT(acp->buf_dma_handle); 4654 (void) ddi_dma_unbind_handle(acp->buf_dma_handle); 4655 acp->flags &= ~AAC_CMD_DMA_VALID; 4656 } 4657 4658 if (acp->abp != NULL) { /* free non-aligned buf DMA */ 4659 ASSERT(acp->buf_dma_handle); 4660 if ((acp->flags & AAC_CMD_BUF_WRITE) == 0 && acp->bp) 4661 ddi_rep_get8(acp->abh, (uint8_t *)acp->bp->b_un.b_addr, 4662 (uint8_t *)acp->abp, acp->bp->b_bcount, 4663 DDI_DEV_AUTOINCR); 4664 ddi_dma_mem_free(&acp->abh); 4665 acp->abp = NULL; 4666 } 4667 4668 if (acp->buf_dma_handle) { 4669 ddi_dma_free_handle(&acp->buf_dma_handle); 4670 acp->buf_dma_handle = NULL; 4671 } 4672 } 4673 4674 static void 4675 aac_unknown_scmd(struct aac_softstate *softs, struct aac_cmd *acp) 4676 { 4677 AACDB_PRINT(softs, CE_CONT, "SCMD 0x%x not supported", 4678 ((union scsi_cdb *)(void *)acp->pkt->pkt_cdbp)->scc_cmd); 4679 aac_free_dmamap(acp); 4680 aac_set_arq_data(acp->pkt, KEY_ILLEGAL_REQUEST, 0x20, 0x00, 0); 4681 aac_soft_callback(softs, acp); 4682 } 4683 4684 /* 4685 * Handle command to logical device 4686 */ 4687 static int 4688 aac_tran_start_ld(struct aac_softstate *softs, struct aac_cmd *acp) 4689 { 4690 struct aac_container *dvp; 4691 struct scsi_pkt *pkt; 4692 union scsi_cdb *cdbp; 4693 struct buf *bp; 4694 int rval; 4695 4696 dvp = (struct aac_container *)acp->dvp; 4697 pkt = acp->pkt; 4698 cdbp = (void *)pkt->pkt_cdbp; 4699 bp = acp->bp; 4700 4701 switch (cdbp->scc_cmd) { 4702 case SCMD_INQUIRY: /* inquiry */ 4703 aac_free_dmamap(acp); 4704 aac_inquiry(softs, pkt, cdbp, bp); 4705 aac_soft_callback(softs, acp); 4706 rval = TRAN_ACCEPT; 4707 break; 4708 4709 case SCMD_READ_CAPACITY: /* read capacity */ 4710 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4711 struct scsi_capacity cap; 4712 uint64_t last_lba; 4713 4714 /* check 64-bit LBA */ 4715 last_lba = dvp->size - 1; 4716 if (last_lba > 0xffffffffull) { 4717 cap.capacity = 0xfffffffful; 4718 } else { 4719 cap.capacity = BE_32(last_lba); 4720 } 4721 cap.lbasize = BE_32(AAC_SECTOR_SIZE); 4722 4723 aac_free_dmamap(acp); 4724 if (bp->b_flags & (B_PHYS|B_PAGEIO)) 4725 bp_mapin(bp); 4726 bcopy(&cap, bp->b_un.b_addr, min(bp->b_bcount, 8)); 4727 pkt->pkt_state |= STATE_XFERRED_DATA; 4728 } 4729 aac_soft_callback(softs, acp); 4730 rval = TRAN_ACCEPT; 4731 break; 4732 4733 case SCMD_SVC_ACTION_IN_G4: /* read capacity 16 */ 4734 /* Check if containers need 64-bit LBA support */ 4735 if (cdbp->cdb_opaque[1] == SSVC_ACTION_READ_CAPACITY_G4) { 4736 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4737 struct scsi_capacity_16 cap16; 4738 int cap_len = sizeof (struct scsi_capacity_16); 4739 4740 bzero(&cap16, cap_len); 4741 cap16.sc_capacity = BE_64(dvp->size - 1); 4742 cap16.sc_lbasize = BE_32(AAC_SECTOR_SIZE); 4743 4744 aac_free_dmamap(acp); 4745 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 4746 bp_mapin(bp); 4747 bcopy(&cap16, bp->b_un.b_addr, 4748 min(bp->b_bcount, cap_len)); 4749 pkt->pkt_state |= STATE_XFERRED_DATA; 4750 } 4751 aac_soft_callback(softs, acp); 4752 } else { 4753 aac_unknown_scmd(softs, acp); 4754 } 4755 rval = TRAN_ACCEPT; 4756 break; 4757 4758 case SCMD_READ_G4: /* read_16 */ 4759 case SCMD_WRITE_G4: /* write_16 */ 4760 if (softs->flags & AAC_FLAGS_RAW_IO) { 4761 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */ 4762 acp->blkno = ((uint64_t) \ 4763 GETG4ADDR(cdbp) << 32) | \ 4764 (uint32_t)GETG4ADDRTL(cdbp); 4765 goto do_io; 4766 } 4767 AACDB_PRINT(softs, CE_WARN, "64-bit LBA not supported"); 4768 aac_unknown_scmd(softs, acp); 4769 rval = TRAN_ACCEPT; 4770 break; 4771 4772 case SCMD_READ: /* read_6 */ 4773 case SCMD_WRITE: /* write_6 */ 4774 acp->blkno = GETG0ADDR(cdbp); 4775 goto do_io; 4776 4777 case SCMD_READ_G5: /* read_12 */ 4778 case SCMD_WRITE_G5: /* write_12 */ 4779 acp->blkno = GETG5ADDR(cdbp); 4780 goto do_io; 4781 4782 case SCMD_READ_G1: /* read_10 */ 4783 case SCMD_WRITE_G1: /* write_10 */ 4784 acp->blkno = (uint32_t)GETG1ADDR(cdbp); 4785 do_io: 4786 if (acp->flags & AAC_CMD_DMA_VALID) { 4787 uint64_t cnt_size = dvp->size; 4788 4789 /* 4790 * If LBA > array size AND rawio, the 4791 * adapter may hang. So check it before 4792 * sending. 4793 * NOTE: (blkno + blkcnt) may overflow 4794 */ 4795 if ((acp->blkno < cnt_size) && 4796 ((acp->blkno + acp->bcount / 4797 AAC_BLK_SIZE) <= cnt_size)) { 4798 rval = aac_do_io(softs, acp); 4799 } else { 4800 /* 4801 * Request exceeds the capacity of disk, 4802 * set error block number to last LBA 4803 * + 1. 4804 */ 4805 aac_set_arq_data(pkt, 4806 KEY_ILLEGAL_REQUEST, 0x21, 4807 0x00, cnt_size); 4808 aac_soft_callback(softs, acp); 4809 rval = TRAN_ACCEPT; 4810 } 4811 } else if (acp->bcount == 0) { 4812 /* For 0 length IO, just return ok */ 4813 aac_soft_callback(softs, acp); 4814 rval = TRAN_ACCEPT; 4815 } else { 4816 rval = TRAN_BADPKT; 4817 } 4818 break; 4819 4820 case SCMD_MODE_SENSE: /* mode_sense_6 */ 4821 case SCMD_MODE_SENSE_G1: { /* mode_sense_10 */ 4822 int capacity; 4823 4824 aac_free_dmamap(acp); 4825 if (dvp->size > 0xffffffffull) 4826 capacity = 0xfffffffful; /* 64-bit LBA */ 4827 else 4828 capacity = dvp->size; 4829 aac_mode_sense(softs, pkt, cdbp, bp, capacity); 4830 aac_soft_callback(softs, acp); 4831 rval = TRAN_ACCEPT; 4832 break; 4833 } 4834 4835 case SCMD_START_STOP: 4836 if (softs->support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) { 4837 acp->aac_cmd_fib = aac_cmd_fib_startstop; 4838 acp->ac_comp = aac_startstop_complete; 4839 rval = aac_do_io(softs, acp); 4840 break; 4841 } 4842 /* FALLTHRU */ 4843 case SCMD_TEST_UNIT_READY: 4844 case SCMD_REQUEST_SENSE: 4845 case SCMD_FORMAT: 4846 aac_free_dmamap(acp); 4847 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4848 if (acp->flags & AAC_CMD_BUF_READ) { 4849 if (bp->b_flags & (B_PHYS|B_PAGEIO)) 4850 bp_mapin(bp); 4851 bzero(bp->b_un.b_addr, bp->b_bcount); 4852 } 4853 pkt->pkt_state |= STATE_XFERRED_DATA; 4854 } 4855 aac_soft_callback(softs, acp); 4856 rval = TRAN_ACCEPT; 4857 break; 4858 4859 case SCMD_SYNCHRONIZE_CACHE: 4860 acp->flags |= AAC_CMD_NTAG; 4861 acp->aac_cmd_fib = aac_cmd_fib_sync; 4862 acp->ac_comp = aac_synccache_complete; 4863 rval = aac_do_io(softs, acp); 4864 break; 4865 4866 case SCMD_DOORLOCK: 4867 aac_free_dmamap(acp); 4868 dvp->locked = (pkt->pkt_cdbp[4] & 0x01) ? 1 : 0; 4869 aac_soft_callback(softs, acp); 4870 rval = TRAN_ACCEPT; 4871 break; 4872 4873 default: /* unknown command */ 4874 aac_unknown_scmd(softs, acp); 4875 rval = TRAN_ACCEPT; 4876 break; 4877 } 4878 4879 return (rval); 4880 } 4881 4882 static int 4883 aac_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt) 4884 { 4885 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4886 struct aac_cmd *acp = PKT2AC(pkt); 4887 struct aac_device *dvp = acp->dvp; 4888 int rval; 4889 4890 DBCALLED(softs, 2); 4891 4892 /* 4893 * Reinitialize some fields of ac and pkt; the packet may 4894 * have been resubmitted 4895 */ 4896 acp->flags &= AAC_CMD_CONSISTENT | AAC_CMD_DMA_PARTIAL | \ 4897 AAC_CMD_BUF_READ | AAC_CMD_BUF_WRITE | AAC_CMD_DMA_VALID; 4898 acp->timeout = acp->pkt->pkt_time; 4899 if (pkt->pkt_flags & FLAG_NOINTR) 4900 acp->flags |= AAC_CMD_NO_INTR; 4901 #ifdef DEBUG 4902 acp->fib_flags = AACDB_FLAGS_FIB_SCMD; 4903 #endif 4904 pkt->pkt_reason = CMD_CMPLT; 4905 pkt->pkt_state = 0; 4906 pkt->pkt_statistics = 0; 4907 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */ 4908 4909 if (acp->flags & AAC_CMD_DMA_VALID) { 4910 pkt->pkt_resid = acp->bcount; 4911 /* Consistent packets need to be sync'ed first */ 4912 if ((acp->flags & AAC_CMD_CONSISTENT) && 4913 (acp->flags & AAC_CMD_BUF_WRITE)) 4914 if (aac_dma_sync_ac(acp) != AACOK) { 4915 ddi_fm_service_impact(softs->devinfo_p, 4916 DDI_SERVICE_UNAFFECTED); 4917 return (TRAN_BADPKT); 4918 } 4919 } else { 4920 pkt->pkt_resid = 0; 4921 } 4922 4923 mutex_enter(&softs->io_lock); 4924 AACDB_PRINT_SCMD(softs, acp); 4925 if ((dvp->flags & (AAC_DFLAG_VALID | AAC_DFLAG_CONFIGURING)) && 4926 !(softs->state & AAC_STATE_DEAD)) { 4927 if (dvp->type == AAC_DEV_LD) { 4928 if (ap->a_lun == 0) 4929 rval = aac_tran_start_ld(softs, acp); 4930 else 4931 goto error; 4932 } else { 4933 rval = aac_do_io(softs, acp); 4934 } 4935 } else { 4936 error: 4937 #ifdef DEBUG 4938 if (!(softs->state & AAC_STATE_DEAD)) { 4939 AACDB_PRINT_TRAN(softs, 4940 "Cannot send cmd to target t%dL%d: %s", 4941 ap->a_target, ap->a_lun, 4942 "target invalid"); 4943 } else { 4944 AACDB_PRINT(softs, CE_WARN, 4945 "Cannot send cmd to target t%dL%d: %s", 4946 ap->a_target, ap->a_lun, 4947 "adapter dead"); 4948 } 4949 #endif 4950 rval = TRAN_FATAL_ERROR; 4951 } 4952 mutex_exit(&softs->io_lock); 4953 return (rval); 4954 } 4955 4956 static int 4957 aac_tran_getcap(struct scsi_address *ap, char *cap, int whom) 4958 { 4959 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4960 struct aac_device *dvp; 4961 int rval; 4962 4963 DBCALLED(softs, 2); 4964 4965 /* We don't allow inquiring about capabilities for other targets */ 4966 if (cap == NULL || whom == 0) { 4967 AACDB_PRINT(softs, CE_WARN, 4968 "GetCap> %s not supported: whom=%d", cap, whom); 4969 return (-1); 4970 } 4971 4972 mutex_enter(&softs->io_lock); 4973 dvp = AAC_DEV(softs, ap->a_target); 4974 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) { 4975 mutex_exit(&softs->io_lock); 4976 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to getcap", 4977 ap->a_target, ap->a_lun); 4978 return (-1); 4979 } 4980 4981 switch (scsi_hba_lookup_capstr(cap)) { 4982 case SCSI_CAP_ARQ: /* auto request sense */ 4983 rval = 1; 4984 break; 4985 case SCSI_CAP_UNTAGGED_QING: 4986 case SCSI_CAP_TAGGED_QING: 4987 rval = 1; 4988 break; 4989 case SCSI_CAP_DMA_MAX: 4990 rval = softs->dma_max; 4991 break; 4992 default: 4993 rval = -1; 4994 break; 4995 } 4996 mutex_exit(&softs->io_lock); 4997 4998 AACDB_PRINT_TRAN(softs, "GetCap> %s t%dL%d: rval=%d", 4999 cap, ap->a_target, ap->a_lun, rval); 5000 return (rval); 5001 } 5002 5003 /*ARGSUSED*/ 5004 static int 5005 aac_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom) 5006 { 5007 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 5008 struct aac_device *dvp; 5009 int rval; 5010 5011 DBCALLED(softs, 2); 5012 5013 /* We don't allow inquiring about capabilities for other targets */ 5014 if (cap == NULL || whom == 0) { 5015 AACDB_PRINT(softs, CE_WARN, 5016 "SetCap> %s not supported: whom=%d", cap, whom); 5017 return (-1); 5018 } 5019 5020 mutex_enter(&softs->io_lock); 5021 dvp = AAC_DEV(softs, ap->a_target); 5022 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) { 5023 mutex_exit(&softs->io_lock); 5024 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to setcap", 5025 ap->a_target, ap->a_lun); 5026 return (-1); 5027 } 5028 5029 switch (scsi_hba_lookup_capstr(cap)) { 5030 case SCSI_CAP_ARQ: 5031 /* Force auto request sense */ 5032 rval = (value == 1) ? 1 : 0; 5033 break; 5034 case SCSI_CAP_UNTAGGED_QING: 5035 case SCSI_CAP_TAGGED_QING: 5036 rval = (value == 1) ? 1 : 0; 5037 break; 5038 default: 5039 rval = -1; 5040 break; 5041 } 5042 mutex_exit(&softs->io_lock); 5043 5044 AACDB_PRINT_TRAN(softs, "SetCap> %s t%dL%d val=%d: rval=%d", 5045 cap, ap->a_target, ap->a_lun, value, rval); 5046 return (rval); 5047 } 5048 5049 static void 5050 aac_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 5051 { 5052 struct aac_cmd *acp = PKT2AC(pkt); 5053 5054 DBCALLED(NULL, 2); 5055 5056 if (acp->sgt) { 5057 kmem_free(acp->sgt, sizeof (struct aac_sge) * \ 5058 acp->left_cookien); 5059 } 5060 aac_free_dmamap(acp); 5061 ASSERT(acp->slotp == NULL); 5062 scsi_hba_pkt_free(ap, pkt); 5063 } 5064 5065 int 5066 aac_cmd_dma_alloc(struct aac_softstate *softs, struct aac_cmd *acp, 5067 struct buf *bp, int flags, int (*cb)(), caddr_t arg) 5068 { 5069 int kf = (cb == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP; 5070 uint_t oldcookiec; 5071 int bioerr; 5072 int rval; 5073 5074 oldcookiec = acp->left_cookien; 5075 5076 /* Move window to build s/g map */ 5077 if (acp->total_nwin > 0) { 5078 if (++acp->cur_win < acp->total_nwin) { 5079 off_t off; 5080 size_t len; 5081 5082 rval = ddi_dma_getwin(acp->buf_dma_handle, acp->cur_win, 5083 &off, &len, &acp->cookie, &acp->left_cookien); 5084 if (rval == DDI_SUCCESS) 5085 goto get_dma_cookies; 5086 AACDB_PRINT(softs, CE_WARN, 5087 "ddi_dma_getwin() fail %d", rval); 5088 return (AACERR); 5089 } 5090 AACDB_PRINT(softs, CE_WARN, "Nothing to transfer"); 5091 return (AACERR); 5092 } 5093 5094 /* We need to transfer data, so we alloc DMA resources for this pkt */ 5095 if (bp && bp->b_bcount != 0 && !(acp->flags & AAC_CMD_DMA_VALID)) { 5096 uint_t dma_flags = 0; 5097 struct aac_sge *sge; 5098 5099 /* 5100 * We will still use this point to fake some 5101 * infomation in tran_start 5102 */ 5103 acp->bp = bp; 5104 5105 /* Set dma flags */ 5106 if (BUF_IS_READ(bp)) { 5107 dma_flags |= DDI_DMA_READ; 5108 acp->flags |= AAC_CMD_BUF_READ; 5109 } else { 5110 dma_flags |= DDI_DMA_WRITE; 5111 acp->flags |= AAC_CMD_BUF_WRITE; 5112 } 5113 if (flags & PKT_CONSISTENT) 5114 dma_flags |= DDI_DMA_CONSISTENT; 5115 if (flags & PKT_DMA_PARTIAL) 5116 dma_flags |= DDI_DMA_PARTIAL; 5117 5118 /* Alloc buf dma handle */ 5119 if (!acp->buf_dma_handle) { 5120 rval = ddi_dma_alloc_handle(softs->devinfo_p, 5121 &softs->buf_dma_attr, cb, arg, 5122 &acp->buf_dma_handle); 5123 if (rval != DDI_SUCCESS) { 5124 AACDB_PRINT(softs, CE_WARN, 5125 "Can't allocate DMA handle, errno=%d", 5126 rval); 5127 goto error_out; 5128 } 5129 } 5130 5131 /* Bind buf */ 5132 if (((uintptr_t)bp->b_un.b_addr & AAC_DMA_ALIGN_MASK) == 0) { 5133 rval = ddi_dma_buf_bind_handle(acp->buf_dma_handle, 5134 bp, dma_flags, cb, arg, &acp->cookie, 5135 &acp->left_cookien); 5136 } else { 5137 size_t bufsz; 5138 5139 AACDB_PRINT_TRAN(softs, 5140 "non-aligned buffer: addr=0x%p, cnt=%lu", 5141 (void *)bp->b_un.b_addr, bp->b_bcount); 5142 if (bp->b_flags & (B_PAGEIO|B_PHYS)) 5143 bp_mapin(bp); 5144 5145 rval = ddi_dma_mem_alloc(acp->buf_dma_handle, 5146 AAC_ROUNDUP(bp->b_bcount, AAC_DMA_ALIGN), 5147 &softs->acc_attr, DDI_DMA_STREAMING, 5148 cb, arg, &acp->abp, &bufsz, &acp->abh); 5149 5150 if (rval != DDI_SUCCESS) { 5151 AACDB_PRINT(softs, CE_NOTE, 5152 "Cannot alloc DMA to non-aligned buf"); 5153 bioerr = 0; 5154 goto error_out; 5155 } 5156 5157 if (acp->flags & AAC_CMD_BUF_WRITE) 5158 ddi_rep_put8(acp->abh, 5159 (uint8_t *)bp->b_un.b_addr, 5160 (uint8_t *)acp->abp, bp->b_bcount, 5161 DDI_DEV_AUTOINCR); 5162 5163 rval = ddi_dma_addr_bind_handle(acp->buf_dma_handle, 5164 NULL, acp->abp, bufsz, dma_flags, cb, arg, 5165 &acp->cookie, &acp->left_cookien); 5166 } 5167 5168 switch (rval) { 5169 case DDI_DMA_PARTIAL_MAP: 5170 if (ddi_dma_numwin(acp->buf_dma_handle, 5171 &acp->total_nwin) == DDI_FAILURE) { 5172 AACDB_PRINT(softs, CE_WARN, 5173 "Cannot get number of DMA windows"); 5174 bioerr = 0; 5175 goto error_out; 5176 } 5177 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)", 5178 acp->left_cookien); 5179 acp->cur_win = 0; 5180 break; 5181 5182 case DDI_DMA_MAPPED: 5183 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)", 5184 acp->left_cookien); 5185 acp->cur_win = 0; 5186 acp->total_nwin = 1; 5187 break; 5188 5189 case DDI_DMA_NORESOURCES: 5190 bioerr = 0; 5191 AACDB_PRINT(softs, CE_WARN, 5192 "Cannot bind buf for DMA: DDI_DMA_NORESOURCES"); 5193 goto error_out; 5194 case DDI_DMA_BADATTR: 5195 case DDI_DMA_NOMAPPING: 5196 bioerr = EFAULT; 5197 AACDB_PRINT(softs, CE_WARN, 5198 "Cannot bind buf for DMA: DDI_DMA_NOMAPPING"); 5199 goto error_out; 5200 case DDI_DMA_TOOBIG: 5201 bioerr = EINVAL; 5202 AACDB_PRINT(softs, CE_WARN, 5203 "Cannot bind buf for DMA: DDI_DMA_TOOBIG(%d)", 5204 bp->b_bcount); 5205 goto error_out; 5206 default: 5207 bioerr = EINVAL; 5208 AACDB_PRINT(softs, CE_WARN, 5209 "Cannot bind buf for DMA: %d", rval); 5210 goto error_out; 5211 } 5212 acp->flags |= AAC_CMD_DMA_VALID; 5213 5214 get_dma_cookies: 5215 ASSERT(acp->left_cookien > 0); 5216 if (acp->left_cookien > softs->aac_sg_tablesize) { 5217 AACDB_PRINT(softs, CE_NOTE, "large cookiec received %d", 5218 acp->left_cookien); 5219 bioerr = EINVAL; 5220 goto error_out; 5221 } 5222 if (oldcookiec != acp->left_cookien && acp->sgt != NULL) { 5223 kmem_free(acp->sgt, sizeof (struct aac_sge) * \ 5224 oldcookiec); 5225 acp->sgt = NULL; 5226 } 5227 if (acp->sgt == NULL) { 5228 acp->sgt = kmem_alloc(sizeof (struct aac_sge) * \ 5229 acp->left_cookien, kf); 5230 if (acp->sgt == NULL) { 5231 AACDB_PRINT(softs, CE_WARN, 5232 "sgt kmem_alloc fail"); 5233 bioerr = ENOMEM; 5234 goto error_out; 5235 } 5236 } 5237 5238 sge = &acp->sgt[0]; 5239 sge->bcount = acp->cookie.dmac_size; 5240 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress); 5241 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress); 5242 acp->bcount = acp->cookie.dmac_size; 5243 for (sge++; sge < &acp->sgt[acp->left_cookien]; sge++) { 5244 ddi_dma_nextcookie(acp->buf_dma_handle, &acp->cookie); 5245 sge->bcount = acp->cookie.dmac_size; 5246 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress); 5247 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress); 5248 acp->bcount += acp->cookie.dmac_size; 5249 } 5250 5251 /* 5252 * Note: The old DMA engine do not correctly handle 5253 * dma_attr_maxxfer attribute. So we have to ensure 5254 * it by ourself. 5255 */ 5256 if (acp->bcount > softs->buf_dma_attr.dma_attr_maxxfer) { 5257 AACDB_PRINT(softs, CE_NOTE, 5258 "large xfer size received %d\n", acp->bcount); 5259 bioerr = EINVAL; 5260 goto error_out; 5261 } 5262 5263 acp->total_xfer += acp->bcount; 5264 5265 if (acp->pkt) { 5266 /* Return remaining byte count */ 5267 if (acp->total_xfer <= bp->b_bcount) { 5268 acp->pkt->pkt_resid = bp->b_bcount - \ 5269 acp->total_xfer; 5270 } else { 5271 /* 5272 * Allocated DMA size is greater than the buf 5273 * size of bp. This is caused by devices like 5274 * tape. we have extra bytes allocated, but 5275 * the packet residual has to stay correct. 5276 */ 5277 acp->pkt->pkt_resid = 0; 5278 } 5279 AACDB_PRINT_TRAN(softs, 5280 "bp=0x%p, xfered=%d/%d, resid=%d", 5281 (void *)bp->b_un.b_addr, (int)acp->total_xfer, 5282 (int)bp->b_bcount, (int)acp->pkt->pkt_resid); 5283 } 5284 } 5285 return (AACOK); 5286 5287 error_out: 5288 bioerror(bp, bioerr); 5289 return (AACERR); 5290 } 5291 5292 static struct scsi_pkt * 5293 aac_tran_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 5294 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags, 5295 int (*callback)(), caddr_t arg) 5296 { 5297 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 5298 struct aac_cmd *acp, *new_acp; 5299 5300 DBCALLED(softs, 2); 5301 5302 /* Allocate pkt */ 5303 if (pkt == NULL) { 5304 int slen; 5305 5306 /* Force auto request sense */ 5307 slen = (statuslen > softs->slen) ? statuslen : softs->slen; 5308 pkt = scsi_hba_pkt_alloc(softs->devinfo_p, ap, cmdlen, 5309 slen, tgtlen, sizeof (struct aac_cmd), callback, arg); 5310 if (pkt == NULL) { 5311 AACDB_PRINT(softs, CE_WARN, "Alloc scsi pkt failed"); 5312 return (NULL); 5313 } 5314 acp = new_acp = PKT2AC(pkt); 5315 acp->pkt = pkt; 5316 acp->cmdlen = cmdlen; 5317 5318 if (ap->a_target < AAC_MAX_LD) { 5319 acp->dvp = &softs->containers[ap->a_target].dev; 5320 acp->aac_cmd_fib = softs->aac_cmd_fib; 5321 acp->ac_comp = aac_ld_complete; 5322 } else { 5323 _NOTE(ASSUMING_PROTECTED(softs->nondasds)) 5324 5325 acp->dvp = &softs->nondasds[AAC_PD(ap->a_target)].dev; 5326 acp->aac_cmd_fib = softs->aac_cmd_fib_scsi; 5327 acp->ac_comp = aac_pd_complete; 5328 } 5329 } else { 5330 acp = PKT2AC(pkt); 5331 new_acp = NULL; 5332 } 5333 5334 if (aac_cmd_dma_alloc(softs, acp, bp, flags, callback, arg) == AACOK) 5335 return (pkt); 5336 5337 if (new_acp) 5338 aac_tran_destroy_pkt(ap, pkt); 5339 return (NULL); 5340 } 5341 5342 /* 5343 * tran_sync_pkt(9E) - explicit DMA synchronization 5344 */ 5345 /*ARGSUSED*/ 5346 static void 5347 aac_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 5348 { 5349 struct aac_cmd *acp = PKT2AC(pkt); 5350 5351 DBCALLED(NULL, 2); 5352 5353 if (aac_dma_sync_ac(acp) != AACOK) 5354 ddi_fm_service_impact( 5355 (AAC_TRAN2SOFTS(ap->a_hba_tran))->devinfo_p, 5356 DDI_SERVICE_UNAFFECTED); 5357 } 5358 5359 /* 5360 * tran_dmafree(9E) - deallocate DMA resources allocated for command 5361 */ 5362 /*ARGSUSED*/ 5363 static void 5364 aac_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 5365 { 5366 struct aac_cmd *acp = PKT2AC(pkt); 5367 5368 DBCALLED(NULL, 2); 5369 5370 aac_free_dmamap(acp); 5371 } 5372 5373 static int 5374 aac_do_quiesce(struct aac_softstate *softs) 5375 { 5376 aac_hold_bus(softs, AAC_IOCMD_ASYNC); 5377 if (softs->bus_ncmds[AAC_CMDQ_ASYNC]) { 5378 aac_start_drain(softs); 5379 do { 5380 if (cv_wait_sig(&softs->drain_cv, 5381 &softs->io_lock) == 0) { 5382 /* Quiesce has been interrupted */ 5383 aac_stop_drain(softs); 5384 aac_unhold_bus(softs, AAC_IOCMD_ASYNC); 5385 aac_start_waiting_io(softs); 5386 return (AACERR); 5387 } 5388 } while (softs->bus_ncmds[AAC_CMDQ_ASYNC]); 5389 aac_stop_drain(softs); 5390 } 5391 5392 softs->state |= AAC_STATE_QUIESCED; 5393 return (AACOK); 5394 } 5395 5396 static int 5397 aac_tran_quiesce(dev_info_t *dip) 5398 { 5399 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 5400 int rval; 5401 5402 DBCALLED(softs, 1); 5403 5404 mutex_enter(&softs->io_lock); 5405 if (aac_do_quiesce(softs) == AACOK) 5406 rval = 0; 5407 else 5408 rval = 1; 5409 mutex_exit(&softs->io_lock); 5410 return (rval); 5411 } 5412 5413 static int 5414 aac_do_unquiesce(struct aac_softstate *softs) 5415 { 5416 softs->state &= ~AAC_STATE_QUIESCED; 5417 aac_unhold_bus(softs, AAC_IOCMD_ASYNC); 5418 5419 aac_start_waiting_io(softs); 5420 return (AACOK); 5421 } 5422 5423 static int 5424 aac_tran_unquiesce(dev_info_t *dip) 5425 { 5426 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 5427 int rval; 5428 5429 DBCALLED(softs, 1); 5430 5431 mutex_enter(&softs->io_lock); 5432 if (aac_do_unquiesce(softs) == AACOK) 5433 rval = 0; 5434 else 5435 rval = 1; 5436 mutex_exit(&softs->io_lock); 5437 return (rval); 5438 } 5439 5440 static int 5441 aac_hba_setup(struct aac_softstate *softs) 5442 { 5443 scsi_hba_tran_t *hba_tran; 5444 int rval; 5445 5446 hba_tran = scsi_hba_tran_alloc(softs->devinfo_p, SCSI_HBA_CANSLEEP); 5447 if (hba_tran == NULL) 5448 return (AACERR); 5449 hba_tran->tran_hba_private = softs; 5450 hba_tran->tran_tgt_init = aac_tran_tgt_init; 5451 hba_tran->tran_tgt_free = aac_tran_tgt_free; 5452 hba_tran->tran_tgt_probe = scsi_hba_probe; 5453 hba_tran->tran_start = aac_tran_start; 5454 hba_tran->tran_getcap = aac_tran_getcap; 5455 hba_tran->tran_setcap = aac_tran_setcap; 5456 hba_tran->tran_init_pkt = aac_tran_init_pkt; 5457 hba_tran->tran_destroy_pkt = aac_tran_destroy_pkt; 5458 hba_tran->tran_reset = aac_tran_reset; 5459 hba_tran->tran_abort = aac_tran_abort; 5460 hba_tran->tran_sync_pkt = aac_tran_sync_pkt; 5461 hba_tran->tran_dmafree = aac_tran_dmafree; 5462 hba_tran->tran_quiesce = aac_tran_quiesce; 5463 hba_tran->tran_unquiesce = aac_tran_unquiesce; 5464 hba_tran->tran_bus_config = aac_tran_bus_config; 5465 rval = scsi_hba_attach_setup(softs->devinfo_p, &softs->buf_dma_attr, 5466 hba_tran, 0); 5467 if (rval != DDI_SUCCESS) { 5468 scsi_hba_tran_free(hba_tran); 5469 AACDB_PRINT(softs, CE_WARN, "aac_hba_setup failed"); 5470 return (AACERR); 5471 } 5472 5473 softs->hba_tran = hba_tran; 5474 return (AACOK); 5475 } 5476 5477 /* 5478 * FIB setup operations 5479 */ 5480 5481 /* 5482 * Init FIB header 5483 */ 5484 static void 5485 aac_cmd_fib_header(struct aac_softstate *softs, struct aac_cmd *acp, 5486 uint16_t cmd) 5487 { 5488 struct aac_slot *slotp = acp->slotp; 5489 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5490 struct aac_fib *fibp = slotp->fibp; 5491 uint32_t xfer_state; 5492 5493 xfer_state = 5494 AAC_FIBSTATE_HOSTOWNED | 5495 AAC_FIBSTATE_INITIALISED | 5496 AAC_FIBSTATE_EMPTY | 5497 AAC_FIBSTATE_FROMHOST | 5498 AAC_FIBSTATE_REXPECTED | 5499 AAC_FIBSTATE_NORM; 5500 5501 if (!(acp->flags & AAC_CMD_SYNC)) { 5502 xfer_state |= 5503 AAC_FIBSTATE_ASYNC | 5504 AAC_FIBSTATE_FAST_RESPONSE; /* enable fast io */ 5505 } 5506 5507 ddi_put32(acc, &fibp->Header.XferState, xfer_state); 5508 ddi_put16(acc, &fibp->Header.Command, cmd); 5509 ddi_put8(acc, &fibp->Header.StructType, AAC_FIBTYPE_TFIB); 5510 ddi_put8(acc, &fibp->Header.Flags, 0); /* don't care */ 5511 ddi_put16(acc, &fibp->Header.Size, acp->fib_size); 5512 ddi_put16(acc, &fibp->Header.SenderSize, softs->aac_max_fib_size); 5513 ddi_put32(acc, &fibp->Header.SenderFibAddress, (slotp->index << 2)); 5514 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr); 5515 ddi_put32(acc, &fibp->Header.SenderData, 0); /* don't care */ 5516 } 5517 5518 /* 5519 * Init FIB for raw IO command 5520 */ 5521 static void 5522 aac_cmd_fib_rawio(struct aac_softstate *softs, struct aac_cmd *acp) 5523 { 5524 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5525 struct aac_raw_io *io = (struct aac_raw_io *)&acp->slotp->fibp->data[0]; 5526 struct aac_sg_entryraw *sgp; 5527 struct aac_sge *sge; 5528 5529 /* Calculate FIB size */ 5530 acp->fib_size = sizeof (struct aac_fib_header) + \ 5531 sizeof (struct aac_raw_io) + (acp->left_cookien - 1) * \ 5532 sizeof (struct aac_sg_entryraw); 5533 5534 aac_cmd_fib_header(softs, acp, RawIo); 5535 5536 ddi_put16(acc, &io->Flags, (acp->flags & AAC_CMD_BUF_READ) ? 1 : 0); 5537 ddi_put16(acc, &io->BpTotal, 0); 5538 ddi_put16(acc, &io->BpComplete, 0); 5539 5540 ddi_put32(acc, AAC_LO32(&io->BlockNumber), AAC_LS32(acp->blkno)); 5541 ddi_put32(acc, AAC_HI32(&io->BlockNumber), AAC_MS32(acp->blkno)); 5542 ddi_put16(acc, &io->ContainerId, 5543 ((struct aac_container *)acp->dvp)->cid); 5544 5545 /* Fill SG table */ 5546 ddi_put32(acc, &io->SgMapRaw.SgCount, acp->left_cookien); 5547 ddi_put32(acc, &io->ByteCount, acp->bcount); 5548 5549 for (sge = &acp->sgt[0], sgp = &io->SgMapRaw.SgEntryRaw[0]; 5550 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5551 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 5552 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 5553 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5554 sgp->Next = 0; 5555 sgp->Prev = 0; 5556 sgp->Flags = 0; 5557 } 5558 } 5559 5560 /* Init FIB for 64-bit block IO command */ 5561 static void 5562 aac_cmd_fib_brw64(struct aac_softstate *softs, struct aac_cmd *acp) 5563 { 5564 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5565 struct aac_blockread64 *br = (struct aac_blockread64 *) \ 5566 &acp->slotp->fibp->data[0]; 5567 struct aac_sg_entry64 *sgp; 5568 struct aac_sge *sge; 5569 5570 acp->fib_size = sizeof (struct aac_fib_header) + \ 5571 sizeof (struct aac_blockread64) + (acp->left_cookien - 1) * \ 5572 sizeof (struct aac_sg_entry64); 5573 5574 aac_cmd_fib_header(softs, acp, ContainerCommand64); 5575 5576 /* 5577 * The definitions for aac_blockread64 and aac_blockwrite64 5578 * are the same. 5579 */ 5580 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno); 5581 ddi_put16(acc, &br->ContainerId, 5582 ((struct aac_container *)acp->dvp)->cid); 5583 ddi_put32(acc, &br->Command, (acp->flags & AAC_CMD_BUF_READ) ? 5584 VM_CtHostRead64 : VM_CtHostWrite64); 5585 ddi_put16(acc, &br->Pad, 0); 5586 ddi_put16(acc, &br->Flags, 0); 5587 5588 /* Fill SG table */ 5589 ddi_put32(acc, &br->SgMap64.SgCount, acp->left_cookien); 5590 ddi_put16(acc, &br->SectorCount, acp->bcount / AAC_BLK_SIZE); 5591 5592 for (sge = &acp->sgt[0], sgp = &br->SgMap64.SgEntry64[0]; 5593 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5594 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 5595 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 5596 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5597 } 5598 } 5599 5600 /* Init FIB for block IO command */ 5601 static void 5602 aac_cmd_fib_brw(struct aac_softstate *softs, struct aac_cmd *acp) 5603 { 5604 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5605 struct aac_blockread *br = (struct aac_blockread *) \ 5606 &acp->slotp->fibp->data[0]; 5607 struct aac_sg_entry *sgp; 5608 struct aac_sge *sge = &acp->sgt[0]; 5609 5610 if (acp->flags & AAC_CMD_BUF_READ) { 5611 acp->fib_size = sizeof (struct aac_fib_header) + \ 5612 sizeof (struct aac_blockread) + (acp->left_cookien - 1) * \ 5613 sizeof (struct aac_sg_entry); 5614 5615 ddi_put32(acc, &br->Command, VM_CtBlockRead); 5616 ddi_put32(acc, &br->SgMap.SgCount, acp->left_cookien); 5617 sgp = &br->SgMap.SgEntry[0]; 5618 } else { 5619 struct aac_blockwrite *bw = (struct aac_blockwrite *)br; 5620 5621 acp->fib_size = sizeof (struct aac_fib_header) + \ 5622 sizeof (struct aac_blockwrite) + (acp->left_cookien - 1) * \ 5623 sizeof (struct aac_sg_entry); 5624 5625 ddi_put32(acc, &bw->Command, VM_CtBlockWrite); 5626 ddi_put32(acc, &bw->Stable, CUNSTABLE); 5627 ddi_put32(acc, &bw->SgMap.SgCount, acp->left_cookien); 5628 sgp = &bw->SgMap.SgEntry[0]; 5629 } 5630 aac_cmd_fib_header(softs, acp, ContainerCommand); 5631 5632 /* 5633 * aac_blockread and aac_blockwrite have the similar 5634 * structure head, so use br for bw here 5635 */ 5636 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno); 5637 ddi_put32(acc, &br->ContainerId, 5638 ((struct aac_container *)acp->dvp)->cid); 5639 ddi_put32(acc, &br->ByteCount, acp->bcount); 5640 5641 /* Fill SG table */ 5642 for (sge = &acp->sgt[0]; 5643 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5644 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32); 5645 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5646 } 5647 } 5648 5649 /*ARGSUSED*/ 5650 void 5651 aac_cmd_fib_copy(struct aac_softstate *softs, struct aac_cmd *acp) 5652 { 5653 struct aac_slot *slotp = acp->slotp; 5654 struct aac_fib *fibp = slotp->fibp; 5655 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5656 5657 ddi_rep_put8(acc, (uint8_t *)acp->fibp, (uint8_t *)fibp, 5658 acp->fib_size, /* only copy data of needed length */ 5659 DDI_DEV_AUTOINCR); 5660 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr); 5661 ddi_put32(acc, &fibp->Header.SenderFibAddress, slotp->index << 2); 5662 } 5663 5664 static void 5665 aac_cmd_fib_sync(struct aac_softstate *softs, struct aac_cmd *acp) 5666 { 5667 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5668 struct aac_synchronize_command *sync = 5669 (struct aac_synchronize_command *)&acp->slotp->fibp->data[0]; 5670 5671 acp->fib_size = AAC_FIB_SIZEOF(struct aac_synchronize_command); 5672 5673 aac_cmd_fib_header(softs, acp, ContainerCommand); 5674 ddi_put32(acc, &sync->Command, VM_ContainerConfig); 5675 ddi_put32(acc, &sync->Type, (uint32_t)CT_FLUSH_CACHE); 5676 ddi_put32(acc, &sync->Cid, ((struct aac_container *)acp->dvp)->cid); 5677 ddi_put32(acc, &sync->Count, 5678 sizeof (((struct aac_synchronize_reply *)0)->Data)); 5679 } 5680 5681 /* 5682 * Start/Stop unit (Power Management) 5683 */ 5684 static void 5685 aac_cmd_fib_startstop(struct aac_softstate *softs, struct aac_cmd *acp) 5686 { 5687 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5688 struct aac_Container *cmd = 5689 (struct aac_Container *)&acp->slotp->fibp->data[0]; 5690 union scsi_cdb *cdbp = (void *)acp->pkt->pkt_cdbp; 5691 5692 acp->fib_size = AAC_FIB_SIZEOF(struct aac_Container); 5693 5694 aac_cmd_fib_header(softs, acp, ContainerCommand); 5695 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 5696 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 5697 ddi_put32(acc, &cmd->CTCommand.command, CT_PM_DRIVER_SUPPORT); 5698 ddi_put32(acc, &cmd->CTCommand.param[0], cdbp->cdb_opaque[4] & 1 ? \ 5699 AAC_PM_DRIVERSUP_START_UNIT : AAC_PM_DRIVERSUP_STOP_UNIT); 5700 ddi_put32(acc, &cmd->CTCommand.param[1], 5701 ((struct aac_container *)acp->dvp)->cid); 5702 ddi_put32(acc, &cmd->CTCommand.param[2], cdbp->cdb_opaque[1] & 1); 5703 } 5704 5705 /* 5706 * Init FIB for pass-through SCMD 5707 */ 5708 static void 5709 aac_cmd_fib_srb(struct aac_cmd *acp) 5710 { 5711 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5712 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 5713 uint8_t *cdb; 5714 5715 ddi_put32(acc, &srb->function, SRBF_ExecuteScsi); 5716 ddi_put32(acc, &srb->retry_limit, 0); 5717 ddi_put32(acc, &srb->cdb_size, acp->cmdlen); 5718 ddi_put32(acc, &srb->timeout, 0); /* use driver timeout */ 5719 if (acp->fibp == NULL) { 5720 if (acp->flags & AAC_CMD_BUF_READ) 5721 ddi_put32(acc, &srb->flags, SRB_DataIn); 5722 else if (acp->flags & AAC_CMD_BUF_WRITE) 5723 ddi_put32(acc, &srb->flags, SRB_DataOut); 5724 ddi_put32(acc, &srb->channel, 5725 ((struct aac_nondasd *)acp->dvp)->bus); 5726 ddi_put32(acc, &srb->id, ((struct aac_nondasd *)acp->dvp)->tid); 5727 ddi_put32(acc, &srb->lun, 0); 5728 cdb = acp->pkt->pkt_cdbp; 5729 } else { 5730 struct aac_srb *srb0 = (struct aac_srb *)&acp->fibp->data[0]; 5731 5732 ddi_put32(acc, &srb->flags, srb0->flags); 5733 ddi_put32(acc, &srb->channel, srb0->channel); 5734 ddi_put32(acc, &srb->id, srb0->id); 5735 ddi_put32(acc, &srb->lun, srb0->lun); 5736 cdb = srb0->cdb; 5737 } 5738 ddi_rep_put8(acc, cdb, srb->cdb, acp->cmdlen, DDI_DEV_AUTOINCR); 5739 } 5740 5741 static void 5742 aac_cmd_fib_scsi32(struct aac_softstate *softs, struct aac_cmd *acp) 5743 { 5744 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5745 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 5746 struct aac_sg_entry *sgp; 5747 struct aac_sge *sge; 5748 5749 acp->fib_size = sizeof (struct aac_fib_header) + \ 5750 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \ 5751 acp->left_cookien * sizeof (struct aac_sg_entry); 5752 5753 /* Fill FIB and SRB headers, and copy cdb */ 5754 aac_cmd_fib_header(softs, acp, ScsiPortCommand); 5755 aac_cmd_fib_srb(acp); 5756 5757 /* Fill SG table */ 5758 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien); 5759 ddi_put32(acc, &srb->count, acp->bcount); 5760 5761 for (sge = &acp->sgt[0], sgp = &srb->sg.SgEntry[0]; 5762 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5763 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32); 5764 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5765 } 5766 } 5767 5768 static void 5769 aac_cmd_fib_scsi64(struct aac_softstate *softs, struct aac_cmd *acp) 5770 { 5771 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5772 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 5773 struct aac_sg_entry64 *sgp; 5774 struct aac_sge *sge; 5775 5776 acp->fib_size = sizeof (struct aac_fib_header) + \ 5777 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \ 5778 acp->left_cookien * sizeof (struct aac_sg_entry64); 5779 5780 /* Fill FIB and SRB headers, and copy cdb */ 5781 aac_cmd_fib_header(softs, acp, ScsiPortCommandU64); 5782 aac_cmd_fib_srb(acp); 5783 5784 /* Fill SG table */ 5785 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien); 5786 ddi_put32(acc, &srb->count, acp->bcount); 5787 5788 for (sge = &acp->sgt[0], 5789 sgp = &((struct aac_sg_table64 *)&srb->sg)->SgEntry64[0]; 5790 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5791 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 5792 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 5793 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5794 } 5795 } 5796 5797 static int 5798 aac_cmd_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp) 5799 { 5800 struct aac_slot *slotp; 5801 5802 if (slotp = aac_get_slot(softs)) { 5803 acp->slotp = slotp; 5804 slotp->acp = acp; 5805 acp->aac_cmd_fib(softs, acp); 5806 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, 5807 DDI_DMA_SYNC_FORDEV); 5808 return (AACOK); 5809 } 5810 return (AACERR); 5811 } 5812 5813 static int 5814 aac_bind_io(struct aac_softstate *softs, struct aac_cmd *acp) 5815 { 5816 struct aac_device *dvp = acp->dvp; 5817 int q = AAC_CMDQ(acp); 5818 5819 if (softs->bus_ncmds[q] < softs->bus_throttle[q]) { 5820 if (dvp) { 5821 if (dvp->ncmds[q] < dvp->throttle[q]) { 5822 if (!(acp->flags & AAC_CMD_NTAG) || 5823 dvp->ncmds[q] == 0) { 5824 return (aac_cmd_slot_bind(softs, acp)); 5825 } 5826 ASSERT(q == AAC_CMDQ_ASYNC); 5827 aac_set_throttle(softs, dvp, AAC_CMDQ_ASYNC, 5828 AAC_THROTTLE_DRAIN); 5829 } 5830 } else { 5831 return (aac_cmd_slot_bind(softs, acp)); 5832 } 5833 } 5834 return (AACERR); 5835 } 5836 5837 static int 5838 aac_sync_fib_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp) 5839 { 5840 struct aac_slot *slotp; 5841 5842 while (softs->sync_ac.slotp) 5843 cv_wait(&softs->sync_fib_cv, &softs->io_lock); 5844 5845 if (slotp = aac_get_slot(softs)) { 5846 ASSERT(acp->slotp == NULL); 5847 5848 acp->slotp = slotp; 5849 slotp->acp = acp; 5850 return (AACOK); 5851 } 5852 return (AACERR); 5853 } 5854 5855 static void 5856 aac_sync_fib_slot_release(struct aac_softstate *softs, struct aac_cmd *acp) 5857 { 5858 ASSERT(acp->slotp); 5859 5860 aac_release_slot(softs, acp->slotp); 5861 acp->slotp->acp = NULL; 5862 acp->slotp = NULL; 5863 5864 cv_signal(&softs->sync_fib_cv); 5865 } 5866 5867 static void 5868 aac_start_io(struct aac_softstate *softs, struct aac_cmd *acp) 5869 { 5870 struct aac_slot *slotp = acp->slotp; 5871 int q = AAC_CMDQ(acp); 5872 int rval; 5873 5874 /* Set ac and pkt */ 5875 if (acp->pkt) { /* ac from ioctl has no pkt */ 5876 acp->pkt->pkt_state |= 5877 STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD; 5878 } 5879 if (acp->timeout) /* 0 indicates no timeout */ 5880 acp->timeout += aac_timebase + aac_tick; 5881 5882 if (acp->dvp) 5883 acp->dvp->ncmds[q]++; 5884 softs->bus_ncmds[q]++; 5885 aac_cmd_enqueue(&softs->q_busy, acp); 5886 5887 AACDB_PRINT_FIB(softs, slotp); 5888 5889 if (softs->flags & AAC_FLAGS_NEW_COMM) { 5890 rval = aac_send_command(softs, slotp); 5891 } else { 5892 /* 5893 * If fib can not be enqueued, the adapter is in an abnormal 5894 * state, there will be no interrupt to us. 5895 */ 5896 rval = aac_fib_enqueue(softs, AAC_ADAP_NORM_CMD_Q, 5897 slotp->fib_phyaddr, acp->fib_size); 5898 } 5899 5900 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) 5901 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 5902 5903 /* 5904 * NOTE: We send command only when slots availabe, so should never 5905 * reach here. 5906 */ 5907 if (rval != AACOK) { 5908 AACDB_PRINT(softs, CE_NOTE, "SCMD send failed"); 5909 if (acp->pkt) { 5910 acp->pkt->pkt_state &= ~STATE_SENT_CMD; 5911 aac_set_pkt_reason(softs, acp, CMD_INCOMPLETE, 0); 5912 } 5913 aac_end_io(softs, acp); 5914 if (!(acp->flags & (AAC_CMD_NO_INTR | AAC_CMD_NO_CB))) 5915 ddi_trigger_softintr(softs->softint_id); 5916 } 5917 } 5918 5919 static void 5920 aac_start_waitq(struct aac_softstate *softs, struct aac_cmd_queue *q) 5921 { 5922 struct aac_cmd *acp, *next_acp; 5923 5924 /* Serve as many waiting io's as possible */ 5925 for (acp = q->q_head; acp; acp = next_acp) { 5926 next_acp = acp->next; 5927 if (aac_bind_io(softs, acp) == AACOK) { 5928 aac_cmd_delete(q, acp); 5929 aac_start_io(softs, acp); 5930 } 5931 if (softs->free_io_slot_head == NULL) 5932 break; 5933 } 5934 } 5935 5936 static void 5937 aac_start_waiting_io(struct aac_softstate *softs) 5938 { 5939 /* 5940 * Sync FIB io is served before async FIB io so that io requests 5941 * sent by interactive userland commands get responded asap. 5942 */ 5943 if (softs->q_wait[AAC_CMDQ_SYNC].q_head) 5944 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_SYNC]); 5945 if (softs->q_wait[AAC_CMDQ_ASYNC].q_head) 5946 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_ASYNC]); 5947 } 5948 5949 static void 5950 aac_drain_comp_q(struct aac_softstate *softs) 5951 { 5952 struct aac_cmd *acp; 5953 struct scsi_pkt *pkt; 5954 5955 /*CONSTCOND*/ 5956 while (1) { 5957 mutex_enter(&softs->q_comp_mutex); 5958 acp = aac_cmd_dequeue(&softs->q_comp); 5959 mutex_exit(&softs->q_comp_mutex); 5960 if (acp != NULL) { 5961 ASSERT(acp->pkt != NULL); 5962 pkt = acp->pkt; 5963 5964 if (pkt->pkt_reason == CMD_CMPLT) { 5965 /* 5966 * Consistent packets need to be sync'ed first 5967 */ 5968 if ((acp->flags & AAC_CMD_CONSISTENT) && 5969 (acp->flags & AAC_CMD_BUF_READ)) { 5970 if (aac_dma_sync_ac(acp) != AACOK) { 5971 ddi_fm_service_impact( 5972 softs->devinfo_p, 5973 DDI_SERVICE_UNAFFECTED); 5974 pkt->pkt_reason = CMD_TRAN_ERR; 5975 pkt->pkt_statistics = 0; 5976 } 5977 } 5978 if ((aac_check_acc_handle(softs-> \ 5979 comm_space_acc_handle) != DDI_SUCCESS) || 5980 (aac_check_acc_handle(softs-> \ 5981 pci_mem_handle) != DDI_SUCCESS)) { 5982 ddi_fm_service_impact(softs->devinfo_p, 5983 DDI_SERVICE_UNAFFECTED); 5984 ddi_fm_acc_err_clear(softs-> \ 5985 pci_mem_handle, DDI_FME_VER0); 5986 pkt->pkt_reason = CMD_TRAN_ERR; 5987 pkt->pkt_statistics = 0; 5988 } 5989 if (aac_check_dma_handle(softs-> \ 5990 comm_space_dma_handle) != DDI_SUCCESS) { 5991 ddi_fm_service_impact(softs->devinfo_p, 5992 DDI_SERVICE_UNAFFECTED); 5993 pkt->pkt_reason = CMD_TRAN_ERR; 5994 pkt->pkt_statistics = 0; 5995 } 5996 } 5997 scsi_hba_pkt_comp(pkt); 5998 } else { 5999 break; 6000 } 6001 } 6002 } 6003 6004 static int 6005 aac_alloc_fib(struct aac_softstate *softs, struct aac_slot *slotp) 6006 { 6007 size_t rlen; 6008 ddi_dma_cookie_t cookie; 6009 uint_t cookien; 6010 6011 /* Allocate FIB dma resource */ 6012 if (ddi_dma_alloc_handle( 6013 softs->devinfo_p, 6014 &softs->addr_dma_attr, 6015 DDI_DMA_SLEEP, 6016 NULL, 6017 &slotp->fib_dma_handle) != DDI_SUCCESS) { 6018 AACDB_PRINT(softs, CE_WARN, 6019 "Cannot alloc dma handle for slot fib area"); 6020 goto error; 6021 } 6022 if (ddi_dma_mem_alloc( 6023 slotp->fib_dma_handle, 6024 softs->aac_max_fib_size, 6025 &softs->acc_attr, 6026 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 6027 DDI_DMA_SLEEP, 6028 NULL, 6029 (caddr_t *)&slotp->fibp, 6030 &rlen, 6031 &slotp->fib_acc_handle) != DDI_SUCCESS) { 6032 AACDB_PRINT(softs, CE_WARN, 6033 "Cannot alloc mem for slot fib area"); 6034 goto error; 6035 } 6036 if (ddi_dma_addr_bind_handle( 6037 slotp->fib_dma_handle, 6038 NULL, 6039 (caddr_t)slotp->fibp, 6040 softs->aac_max_fib_size, 6041 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 6042 DDI_DMA_SLEEP, 6043 NULL, 6044 &cookie, 6045 &cookien) != DDI_DMA_MAPPED) { 6046 AACDB_PRINT(softs, CE_WARN, 6047 "dma bind failed for slot fib area"); 6048 goto error; 6049 } 6050 6051 /* Check dma handles allocated in fib attach */ 6052 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) { 6053 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 6054 goto error; 6055 } 6056 6057 /* Check acc handles allocated in fib attach */ 6058 if (aac_check_acc_handle(slotp->fib_acc_handle) != DDI_SUCCESS) { 6059 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 6060 goto error; 6061 } 6062 6063 slotp->fib_phyaddr = cookie.dmac_laddress; 6064 return (AACOK); 6065 6066 error: 6067 if (slotp->fib_acc_handle) { 6068 ddi_dma_mem_free(&slotp->fib_acc_handle); 6069 slotp->fib_acc_handle = NULL; 6070 } 6071 if (slotp->fib_dma_handle) { 6072 ddi_dma_free_handle(&slotp->fib_dma_handle); 6073 slotp->fib_dma_handle = NULL; 6074 } 6075 return (AACERR); 6076 } 6077 6078 static void 6079 aac_free_fib(struct aac_slot *slotp) 6080 { 6081 (void) ddi_dma_unbind_handle(slotp->fib_dma_handle); 6082 ddi_dma_mem_free(&slotp->fib_acc_handle); 6083 slotp->fib_acc_handle = NULL; 6084 ddi_dma_free_handle(&slotp->fib_dma_handle); 6085 slotp->fib_dma_handle = NULL; 6086 slotp->fib_phyaddr = 0; 6087 } 6088 6089 static void 6090 aac_alloc_fibs(struct aac_softstate *softs) 6091 { 6092 int i; 6093 struct aac_slot *slotp; 6094 6095 for (i = 0; i < softs->total_slots && 6096 softs->total_fibs < softs->total_slots; i++) { 6097 slotp = &(softs->io_slot[i]); 6098 if (slotp->fib_phyaddr) 6099 continue; 6100 if (aac_alloc_fib(softs, slotp) != AACOK) 6101 break; 6102 6103 /* Insert the slot to the free slot list */ 6104 aac_release_slot(softs, slotp); 6105 softs->total_fibs++; 6106 } 6107 } 6108 6109 static void 6110 aac_destroy_fibs(struct aac_softstate *softs) 6111 { 6112 struct aac_slot *slotp; 6113 6114 while ((slotp = softs->free_io_slot_head) != NULL) { 6115 ASSERT(slotp->fib_phyaddr); 6116 softs->free_io_slot_head = slotp->next; 6117 aac_free_fib(slotp); 6118 ASSERT(slotp->index == (slotp - softs->io_slot)); 6119 softs->total_fibs--; 6120 } 6121 ASSERT(softs->total_fibs == 0); 6122 } 6123 6124 static int 6125 aac_create_slots(struct aac_softstate *softs) 6126 { 6127 int i; 6128 6129 softs->total_slots = softs->aac_max_fibs; 6130 softs->io_slot = kmem_zalloc(sizeof (struct aac_slot) * \ 6131 softs->total_slots, KM_SLEEP); 6132 if (softs->io_slot == NULL) { 6133 AACDB_PRINT(softs, CE_WARN, "Cannot allocate slot"); 6134 return (AACERR); 6135 } 6136 for (i = 0; i < softs->total_slots; i++) 6137 softs->io_slot[i].index = i; 6138 softs->free_io_slot_head = NULL; 6139 softs->total_fibs = 0; 6140 return (AACOK); 6141 } 6142 6143 static void 6144 aac_destroy_slots(struct aac_softstate *softs) 6145 { 6146 ASSERT(softs->free_io_slot_head == NULL); 6147 6148 kmem_free(softs->io_slot, sizeof (struct aac_slot) * \ 6149 softs->total_slots); 6150 softs->io_slot = NULL; 6151 softs->total_slots = 0; 6152 } 6153 6154 struct aac_slot * 6155 aac_get_slot(struct aac_softstate *softs) 6156 { 6157 struct aac_slot *slotp; 6158 6159 if ((slotp = softs->free_io_slot_head) != NULL) { 6160 softs->free_io_slot_head = slotp->next; 6161 slotp->next = NULL; 6162 } 6163 return (slotp); 6164 } 6165 6166 static void 6167 aac_release_slot(struct aac_softstate *softs, struct aac_slot *slotp) 6168 { 6169 ASSERT((slotp->index >= 0) && (slotp->index < softs->total_slots)); 6170 ASSERT(slotp == &softs->io_slot[slotp->index]); 6171 6172 slotp->acp = NULL; 6173 slotp->next = softs->free_io_slot_head; 6174 softs->free_io_slot_head = slotp; 6175 } 6176 6177 int 6178 aac_do_io(struct aac_softstate *softs, struct aac_cmd *acp) 6179 { 6180 if (aac_bind_io(softs, acp) == AACOK) 6181 aac_start_io(softs, acp); 6182 else 6183 aac_cmd_enqueue(&softs->q_wait[AAC_CMDQ(acp)], acp); 6184 6185 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) 6186 return (TRAN_ACCEPT); 6187 /* 6188 * Because sync FIB is always 512 bytes and used for critical 6189 * functions, async FIB is used for poll IO. 6190 */ 6191 if (acp->flags & AAC_CMD_NO_INTR) { 6192 if (aac_do_poll_io(softs, acp) == AACOK) 6193 return (TRAN_ACCEPT); 6194 } else { 6195 if (aac_do_sync_io(softs, acp) == AACOK) 6196 return (TRAN_ACCEPT); 6197 } 6198 return (TRAN_BADPKT); 6199 } 6200 6201 static int 6202 aac_do_poll_io(struct aac_softstate *softs, struct aac_cmd *acp) 6203 { 6204 int (*intr_handler)(struct aac_softstate *); 6205 6206 /* 6207 * Interrupt is disabled, we have to poll the adapter by ourselves. 6208 */ 6209 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ? 6210 aac_process_intr_new : aac_process_intr_old; 6211 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) { 6212 int i = AAC_POLL_TIME * 1000; 6213 6214 AAC_BUSYWAIT((intr_handler(softs) != AAC_DB_RESPONSE_READY), i); 6215 if (i == 0) 6216 aac_cmd_timeout(softs, acp); 6217 } 6218 6219 ddi_trigger_softintr(softs->softint_id); 6220 6221 if ((acp->flags & AAC_CMD_CMPLT) && !(acp->flags & AAC_CMD_ERR)) 6222 return (AACOK); 6223 return (AACERR); 6224 } 6225 6226 static int 6227 aac_do_sync_io(struct aac_softstate *softs, struct aac_cmd *acp) 6228 { 6229 ASSERT(softs && acp); 6230 6231 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) 6232 cv_wait(&softs->event, &softs->io_lock); 6233 6234 if (acp->flags & AAC_CMD_CMPLT) 6235 return (AACOK); 6236 return (AACERR); 6237 } 6238 6239 static int 6240 aac_dma_sync_ac(struct aac_cmd *acp) 6241 { 6242 if (acp->buf_dma_handle) { 6243 if (acp->flags & AAC_CMD_BUF_WRITE) { 6244 if (acp->abp != NULL) 6245 ddi_rep_put8(acp->abh, 6246 (uint8_t *)acp->bp->b_un.b_addr, 6247 (uint8_t *)acp->abp, acp->bp->b_bcount, 6248 DDI_DEV_AUTOINCR); 6249 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0, 6250 DDI_DMA_SYNC_FORDEV); 6251 } else { 6252 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0, 6253 DDI_DMA_SYNC_FORCPU); 6254 if (aac_check_dma_handle(acp->buf_dma_handle) != 6255 DDI_SUCCESS) 6256 return (AACERR); 6257 if (acp->abp != NULL) 6258 ddi_rep_get8(acp->abh, 6259 (uint8_t *)acp->bp->b_un.b_addr, 6260 (uint8_t *)acp->abp, acp->bp->b_bcount, 6261 DDI_DEV_AUTOINCR); 6262 } 6263 } 6264 return (AACOK); 6265 } 6266 6267 /* 6268 * The following function comes from Adaptec: 6269 * 6270 * When driver sees a particular event that means containers are changed, it 6271 * will rescan containers. However a change may not be complete until some 6272 * other event is received. For example, creating or deleting an array will 6273 * incur as many as six AifEnConfigChange events which would generate six 6274 * container rescans. To diminish rescans, driver set a flag to wait for 6275 * another particular event. When sees that events come in, it will do rescan. 6276 */ 6277 static int 6278 aac_handle_aif(struct aac_softstate *softs, struct aac_fib *fibp) 6279 { 6280 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 6281 uint16_t fib_command; 6282 struct aac_aif_command *aif; 6283 int en_type; 6284 int devcfg_needed; 6285 int current, next; 6286 6287 fib_command = LE_16(fibp->Header.Command); 6288 if (fib_command != AifRequest) { 6289 cmn_err(CE_NOTE, "!Unknown command from controller: 0x%x", 6290 fib_command); 6291 return (AACERR); 6292 } 6293 6294 /* Update internal container state */ 6295 aif = (struct aac_aif_command *)&fibp->data[0]; 6296 6297 AACDB_PRINT_AIF(softs, aif); 6298 devcfg_needed = 0; 6299 en_type = LE_32((uint32_t)aif->data.EN.type); 6300 6301 switch (LE_32((uint32_t)aif->command)) { 6302 case AifCmdDriverNotify: { 6303 int cid = LE_32(aif->data.EN.data.ECC.container[0]); 6304 6305 switch (en_type) { 6306 case AifDenMorphComplete: 6307 case AifDenVolumeExtendComplete: 6308 if (AAC_DEV_IS_VALID(&softs->containers[cid].dev)) 6309 softs->devcfg_wait_on = AifEnConfigChange; 6310 break; 6311 } 6312 if (softs->devcfg_wait_on == en_type) 6313 devcfg_needed = 1; 6314 break; 6315 } 6316 6317 case AifCmdEventNotify: 6318 switch (en_type) { 6319 case AifEnAddContainer: 6320 case AifEnDeleteContainer: 6321 softs->devcfg_wait_on = AifEnConfigChange; 6322 break; 6323 case AifEnContainerChange: 6324 if (!softs->devcfg_wait_on) 6325 softs->devcfg_wait_on = AifEnConfigChange; 6326 break; 6327 case AifEnContainerEvent: 6328 if (ddi_get32(acc, &aif-> \ 6329 data.EN.data.ECE.eventType) == CT_PUP_MISSING_DRIVE) 6330 devcfg_needed = 1; 6331 break; 6332 } 6333 if (softs->devcfg_wait_on == en_type) 6334 devcfg_needed = 1; 6335 break; 6336 6337 case AifCmdJobProgress: 6338 if (LE_32((uint32_t)aif->data.PR[0].jd.type) == AifJobCtrZero) { 6339 int pr_status; 6340 uint32_t pr_ftick, pr_ctick; 6341 6342 pr_status = LE_32((uint32_t)aif->data.PR[0].status); 6343 pr_ctick = LE_32(aif->data.PR[0].currentTick); 6344 pr_ftick = LE_32(aif->data.PR[0].finalTick); 6345 6346 if ((pr_ctick == pr_ftick) || 6347 (pr_status == AifJobStsSuccess)) 6348 softs->devcfg_wait_on = AifEnContainerChange; 6349 else if ((pr_ctick == 0) && 6350 (pr_status == AifJobStsRunning)) 6351 softs->devcfg_wait_on = AifEnContainerChange; 6352 } 6353 break; 6354 } 6355 6356 mutex_exit(&softs->aifq_mutex); 6357 if (devcfg_needed) { 6358 softs->devcfg_wait_on = 0; 6359 (void) aac_probe_containers(softs); 6360 } 6361 mutex_enter(&softs->aifq_mutex); 6362 6363 /* Modify AIF contexts */ 6364 current = softs->aifq_idx; 6365 next = (current + 1) % AAC_AIFQ_LENGTH; 6366 if (next == 0) { 6367 struct aac_fib_context *ctx; 6368 6369 softs->aifq_wrap = 1; 6370 for (ctx = softs->fibctx; ctx; ctx = ctx->next) { 6371 if (next == ctx->ctx_idx) { 6372 ctx->ctx_filled = 1; 6373 } else if (current == ctx->ctx_idx && ctx->ctx_filled) { 6374 ctx->ctx_idx = next; 6375 AACDB_PRINT(softs, CE_NOTE, 6376 "-- AIF queue(%x) overrun", ctx->unique); 6377 } 6378 } 6379 } 6380 softs->aifq_idx = next; 6381 6382 /* Wakeup applications */ 6383 cv_broadcast(&softs->aifv); 6384 return (AACOK); 6385 } 6386 6387 /* 6388 * Timeout recovery 6389 */ 6390 /*ARGSUSED*/ 6391 static void 6392 aac_cmd_timeout(struct aac_softstate *softs, struct aac_cmd *acp) 6393 { 6394 #ifdef DEBUG 6395 acp->fib_flags |= AACDB_FLAGS_FIB_TIMEOUT; 6396 AACDB_PRINT(softs, CE_WARN, "acp %p timed out", acp); 6397 AACDB_PRINT_FIB(softs, acp->slotp); 6398 #endif 6399 6400 /* 6401 * Besides the firmware in unhealthy state, an overloaded 6402 * adapter may also incur pkt timeout. 6403 * There is a chance for an adapter with a slower IOP to take 6404 * longer than 60 seconds to process the commands, such as when 6405 * to perform IOs. So the adapter is doing a build on a RAID-5 6406 * while being required longer completion times should be 6407 * tolerated. 6408 */ 6409 switch (aac_do_reset(softs)) { 6410 case AAC_IOP_RESET_SUCCEED: 6411 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING, NULL, CMD_RESET); 6412 aac_start_waiting_io(softs); 6413 break; 6414 case AAC_IOP_RESET_FAILED: 6415 /* Abort all waiting cmds when adapter is dead */ 6416 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_TIMEOUT); 6417 break; 6418 case AAC_IOP_RESET_ABNORMAL: 6419 aac_start_waiting_io(softs); 6420 } 6421 } 6422 6423 /* 6424 * The following function comes from Adaptec: 6425 * 6426 * Time sync. command added to synchronize time with firmware every 30 6427 * minutes (required for correct AIF timestamps etc.) 6428 */ 6429 static int 6430 aac_sync_tick(struct aac_softstate *softs) 6431 { 6432 ddi_acc_handle_t acc; 6433 int rval; 6434 6435 /* Time sync. with firmware every AAC_SYNC_TICK */ 6436 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac); 6437 acc = softs->sync_ac.slotp->fib_acc_handle; 6438 6439 ddi_put32(acc, (void *)&softs->sync_ac.slotp->fibp->data[0], 6440 ddi_get_time()); 6441 rval = aac_sync_fib(softs, SendHostTime, AAC_FIB_SIZEOF(uint32_t)); 6442 aac_sync_fib_slot_release(softs, &softs->sync_ac); 6443 return (rval); 6444 } 6445 6446 static void 6447 aac_daemon(void *arg) 6448 { 6449 struct aac_softstate *softs = (struct aac_softstate *)arg; 6450 struct aac_cmd *acp; 6451 6452 DBCALLED(softs, 2); 6453 6454 mutex_enter(&softs->io_lock); 6455 /* Check slot for timeout pkts */ 6456 aac_timebase += aac_tick; 6457 for (acp = softs->q_busy.q_head; acp; acp = acp->next) { 6458 if (acp->timeout) { 6459 if (acp->timeout <= aac_timebase) { 6460 aac_cmd_timeout(softs, acp); 6461 ddi_trigger_softintr(softs->softint_id); 6462 } 6463 break; 6464 } 6465 } 6466 6467 /* Time sync. with firmware every AAC_SYNC_TICK */ 6468 if (aac_sync_time <= aac_timebase) { 6469 aac_sync_time = aac_timebase; 6470 if (aac_sync_tick(softs) != AACOK) 6471 aac_sync_time += aac_tick << 1; /* retry shortly */ 6472 else 6473 aac_sync_time += AAC_SYNC_TICK; 6474 } 6475 6476 if ((softs->state & AAC_STATE_RUN) && (softs->timeout_id != 0)) 6477 softs->timeout_id = timeout(aac_daemon, (void *)softs, 6478 (aac_tick * drv_usectohz(1000000))); 6479 mutex_exit(&softs->io_lock); 6480 } 6481 6482 /* 6483 * Architecture dependent functions 6484 */ 6485 static int 6486 aac_rx_get_fwstatus(struct aac_softstate *softs) 6487 { 6488 return (PCI_MEM_GET32(softs, AAC_OMR0)); 6489 } 6490 6491 static int 6492 aac_rx_get_mailbox(struct aac_softstate *softs, int mb) 6493 { 6494 return (PCI_MEM_GET32(softs, AAC_RX_MAILBOX + mb * 4)); 6495 } 6496 6497 static void 6498 aac_rx_set_mailbox(struct aac_softstate *softs, uint32_t cmd, 6499 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3) 6500 { 6501 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX, cmd); 6502 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 4, arg0); 6503 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 8, arg1); 6504 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 12, arg2); 6505 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 16, arg3); 6506 } 6507 6508 static int 6509 aac_rkt_get_fwstatus(struct aac_softstate *softs) 6510 { 6511 return (PCI_MEM_GET32(softs, AAC_OMR0)); 6512 } 6513 6514 static int 6515 aac_rkt_get_mailbox(struct aac_softstate *softs, int mb) 6516 { 6517 return (PCI_MEM_GET32(softs, AAC_RKT_MAILBOX + mb *4)); 6518 } 6519 6520 static void 6521 aac_rkt_set_mailbox(struct aac_softstate *softs, uint32_t cmd, 6522 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3) 6523 { 6524 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX, cmd); 6525 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 4, arg0); 6526 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 8, arg1); 6527 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 12, arg2); 6528 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 16, arg3); 6529 } 6530 6531 /* 6532 * cb_ops functions 6533 */ 6534 static int 6535 aac_open(dev_t *devp, int flag, int otyp, cred_t *cred) 6536 { 6537 struct aac_softstate *softs; 6538 int minor0, minor; 6539 int instance; 6540 6541 DBCALLED(NULL, 2); 6542 6543 if (otyp != OTYP_BLK && otyp != OTYP_CHR) 6544 return (EINVAL); 6545 6546 minor0 = getminor(*devp); 6547 minor = AAC_SCSA_MINOR(minor0); 6548 6549 if (AAC_IS_SCSA_NODE(minor)) 6550 return (scsi_hba_open(devp, flag, otyp, cred)); 6551 6552 instance = MINOR2INST(minor0); 6553 if (instance >= AAC_MAX_ADAPTERS) 6554 return (ENXIO); 6555 6556 softs = ddi_get_soft_state(aac_softstatep, instance); 6557 if (softs == NULL) 6558 return (ENXIO); 6559 6560 return (0); 6561 } 6562 6563 /*ARGSUSED*/ 6564 static int 6565 aac_close(dev_t dev, int flag, int otyp, cred_t *cred) 6566 { 6567 int minor0, minor; 6568 int instance; 6569 6570 DBCALLED(NULL, 2); 6571 6572 if (otyp != OTYP_BLK && otyp != OTYP_CHR) 6573 return (EINVAL); 6574 6575 minor0 = getminor(dev); 6576 minor = AAC_SCSA_MINOR(minor0); 6577 6578 if (AAC_IS_SCSA_NODE(minor)) 6579 return (scsi_hba_close(dev, flag, otyp, cred)); 6580 6581 instance = MINOR2INST(minor0); 6582 if (instance >= AAC_MAX_ADAPTERS) 6583 return (ENXIO); 6584 6585 return (0); 6586 } 6587 6588 static int 6589 aac_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, 6590 int *rval_p) 6591 { 6592 struct aac_softstate *softs; 6593 int minor0, minor; 6594 int instance; 6595 6596 DBCALLED(NULL, 2); 6597 6598 if (drv_priv(cred_p) != 0) 6599 return (EPERM); 6600 6601 minor0 = getminor(dev); 6602 minor = AAC_SCSA_MINOR(minor0); 6603 6604 if (AAC_IS_SCSA_NODE(minor)) 6605 return (scsi_hba_ioctl(dev, cmd, arg, flag, cred_p, rval_p)); 6606 6607 instance = MINOR2INST(minor0); 6608 if (instance < AAC_MAX_ADAPTERS) { 6609 softs = ddi_get_soft_state(aac_softstatep, instance); 6610 return (aac_do_ioctl(softs, dev, cmd, arg, flag)); 6611 } 6612 return (ENXIO); 6613 } 6614 6615 /* 6616 * The IO fault service error handling callback function 6617 */ 6618 /*ARGSUSED*/ 6619 static int 6620 aac_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 6621 { 6622 /* 6623 * as the driver can always deal with an error in any dma or 6624 * access handle, we can just return the fme_status value. 6625 */ 6626 pci_ereport_post(dip, err, NULL); 6627 return (err->fme_status); 6628 } 6629 6630 /* 6631 * aac_fm_init - initialize fma capabilities and register with IO 6632 * fault services. 6633 */ 6634 static void 6635 aac_fm_init(struct aac_softstate *softs) 6636 { 6637 /* 6638 * Need to change iblock to priority for new MSI intr 6639 */ 6640 ddi_iblock_cookie_t fm_ibc; 6641 6642 softs->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, softs->devinfo_p, 6643 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 6644 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 6645 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 6646 6647 /* Only register with IO Fault Services if we have some capability */ 6648 if (softs->fm_capabilities) { 6649 /* Adjust access and dma attributes for FMA */ 6650 softs->reg_attr.devacc_attr_access = DDI_FLAGERR_ACC; 6651 softs->addr_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 6652 softs->buf_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 6653 6654 /* 6655 * Register capabilities with IO Fault Services. 6656 * fm_capabilities will be updated to indicate 6657 * capabilities actually supported (not requested.) 6658 */ 6659 ddi_fm_init(softs->devinfo_p, &softs->fm_capabilities, &fm_ibc); 6660 6661 /* 6662 * Initialize pci ereport capabilities if ereport 6663 * capable (should always be.) 6664 */ 6665 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) || 6666 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6667 pci_ereport_setup(softs->devinfo_p); 6668 } 6669 6670 /* 6671 * Register error callback if error callback capable. 6672 */ 6673 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6674 ddi_fm_handler_register(softs->devinfo_p, 6675 aac_fm_error_cb, (void *) softs); 6676 } 6677 } 6678 } 6679 6680 /* 6681 * aac_fm_fini - Releases fma capabilities and un-registers with IO 6682 * fault services. 6683 */ 6684 static void 6685 aac_fm_fini(struct aac_softstate *softs) 6686 { 6687 /* Only unregister FMA capabilities if registered */ 6688 if (softs->fm_capabilities) { 6689 /* 6690 * Un-register error callback if error callback capable. 6691 */ 6692 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6693 ddi_fm_handler_unregister(softs->devinfo_p); 6694 } 6695 6696 /* 6697 * Release any resources allocated by pci_ereport_setup() 6698 */ 6699 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) || 6700 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6701 pci_ereport_teardown(softs->devinfo_p); 6702 } 6703 6704 /* Unregister from IO Fault Services */ 6705 ddi_fm_fini(softs->devinfo_p); 6706 6707 /* Adjust access and dma attributes for FMA */ 6708 softs->reg_attr.devacc_attr_access = DDI_DEFAULT_ACC; 6709 softs->addr_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 6710 softs->buf_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 6711 } 6712 } 6713 6714 int 6715 aac_check_acc_handle(ddi_acc_handle_t handle) 6716 { 6717 ddi_fm_error_t de; 6718 6719 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 6720 return (de.fme_status); 6721 } 6722 6723 int 6724 aac_check_dma_handle(ddi_dma_handle_t handle) 6725 { 6726 ddi_fm_error_t de; 6727 6728 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 6729 return (de.fme_status); 6730 } 6731 6732 void 6733 aac_fm_ereport(struct aac_softstate *softs, char *detail) 6734 { 6735 uint64_t ena; 6736 char buf[FM_MAX_CLASS]; 6737 6738 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 6739 ena = fm_ena_generate(0, FM_ENA_FMT1); 6740 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities)) { 6741 ddi_fm_ereport_post(softs->devinfo_p, buf, ena, DDI_NOSLEEP, 6742 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL); 6743 } 6744 } 6745 6746 /* 6747 * Autoconfiguration support 6748 */ 6749 static int 6750 aac_parse_devname(char *devnm, int *tgt, int *lun) 6751 { 6752 char devbuf[SCSI_MAXNAMELEN]; 6753 char *addr; 6754 char *p, *tp, *lp; 6755 long num; 6756 6757 /* Parse dev name and address */ 6758 (void) strcpy(devbuf, devnm); 6759 addr = ""; 6760 for (p = devbuf; *p != '\0'; p++) { 6761 if (*p == '@') { 6762 addr = p + 1; 6763 *p = '\0'; 6764 } else if (*p == ':') { 6765 *p = '\0'; 6766 break; 6767 } 6768 } 6769 6770 /* Parse taget and lun */ 6771 for (p = tp = addr, lp = NULL; *p != '\0'; p++) { 6772 if (*p == ',') { 6773 lp = p + 1; 6774 *p = '\0'; 6775 break; 6776 } 6777 } 6778 if (tgt && tp) { 6779 if (ddi_strtol(tp, NULL, 0x10, &num)) 6780 return (AACERR); 6781 *tgt = (int)num; 6782 } 6783 if (lun && lp) { 6784 if (ddi_strtol(lp, NULL, 0x10, &num)) 6785 return (AACERR); 6786 *lun = (int)num; 6787 } 6788 return (AACOK); 6789 } 6790 6791 static dev_info_t * 6792 aac_find_child(struct aac_softstate *softs, uint16_t tgt, uint8_t lun) 6793 { 6794 dev_info_t *child = NULL; 6795 char addr[SCSI_MAXNAMELEN]; 6796 char tmp[MAXNAMELEN]; 6797 6798 if (tgt < AAC_MAX_LD) { 6799 if (lun == 0) { 6800 struct aac_device *dvp = &softs->containers[tgt].dev; 6801 6802 child = dvp->dip; 6803 } 6804 } else { 6805 (void) sprintf(addr, "%x,%x", tgt, lun); 6806 for (child = ddi_get_child(softs->devinfo_p); 6807 child; child = ddi_get_next_sibling(child)) { 6808 /* We don't care about non-persistent node */ 6809 if (ndi_dev_is_persistent_node(child) == 0) 6810 continue; 6811 6812 if (aac_name_node(child, tmp, MAXNAMELEN) != 6813 DDI_SUCCESS) 6814 continue; 6815 if (strcmp(addr, tmp) == 0) 6816 break; 6817 } 6818 } 6819 return (child); 6820 } 6821 6822 static int 6823 aac_config_child(struct aac_softstate *softs, struct scsi_device *sd, 6824 dev_info_t **dipp) 6825 { 6826 char *nodename = NULL; 6827 char **compatible = NULL; 6828 int ncompatible = 0; 6829 char *childname; 6830 dev_info_t *ldip = NULL; 6831 int tgt = sd->sd_address.a_target; 6832 int lun = sd->sd_address.a_lun; 6833 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; 6834 int rval; 6835 6836 DBCALLED(softs, 2); 6837 6838 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype, 6839 NULL, &nodename, &compatible, &ncompatible); 6840 if (nodename == NULL) { 6841 AACDB_PRINT(softs, CE_WARN, 6842 "found no comptible driver for t%dL%d", tgt, lun); 6843 rval = NDI_FAILURE; 6844 goto finish; 6845 } 6846 childname = (softs->legacy && dtype == DTYPE_DIRECT) ? "sd" : nodename; 6847 6848 /* Create dev node */ 6849 rval = ndi_devi_alloc(softs->devinfo_p, childname, DEVI_SID_NODEID, 6850 &ldip); 6851 if (rval == NDI_SUCCESS) { 6852 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) 6853 != DDI_PROP_SUCCESS) { 6854 AACDB_PRINT(softs, CE_WARN, "unable to create " 6855 "property for t%dL%d (target)", tgt, lun); 6856 rval = NDI_FAILURE; 6857 goto finish; 6858 } 6859 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) 6860 != DDI_PROP_SUCCESS) { 6861 AACDB_PRINT(softs, CE_WARN, "unable to create " 6862 "property for t%dL%d (lun)", tgt, lun); 6863 rval = NDI_FAILURE; 6864 goto finish; 6865 } 6866 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip, 6867 "compatible", compatible, ncompatible) 6868 != DDI_PROP_SUCCESS) { 6869 AACDB_PRINT(softs, CE_WARN, "unable to create " 6870 "property for t%dL%d (compatible)", tgt, lun); 6871 rval = NDI_FAILURE; 6872 goto finish; 6873 } 6874 6875 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH); 6876 if (rval != NDI_SUCCESS) { 6877 AACDB_PRINT(softs, CE_WARN, "unable to online t%dL%d", 6878 tgt, lun); 6879 ndi_prop_remove_all(ldip); 6880 (void) ndi_devi_free(ldip); 6881 } 6882 } 6883 finish: 6884 if (dipp) 6885 *dipp = ldip; 6886 6887 scsi_hba_nodename_compatible_free(nodename, compatible); 6888 return (rval); 6889 } 6890 6891 /*ARGSUSED*/ 6892 static int 6893 aac_probe_lun(struct aac_softstate *softs, struct scsi_device *sd) 6894 { 6895 int tgt = sd->sd_address.a_target; 6896 int lun = sd->sd_address.a_lun; 6897 6898 DBCALLED(softs, 2); 6899 6900 if (tgt < AAC_MAX_LD) { 6901 int rval; 6902 6903 if (lun == 0) { 6904 mutex_enter(&softs->io_lock); 6905 rval = aac_probe_container(softs, tgt); 6906 mutex_exit(&softs->io_lock); 6907 if (rval == AACOK) { 6908 if (scsi_hba_probe(sd, NULL) == 6909 SCSIPROBE_EXISTS) 6910 return (NDI_SUCCESS); 6911 } 6912 } 6913 return (NDI_FAILURE); 6914 } else { 6915 int dtype; 6916 6917 if (scsi_hba_probe(sd, NULL) != SCSIPROBE_EXISTS) 6918 return (NDI_FAILURE); 6919 6920 dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; 6921 6922 AACDB_PRINT(softs, CE_NOTE, 6923 "Phys. device found: tgt %d dtype %d: %s", 6924 tgt, dtype, sd->sd_inq->inq_vid); 6925 6926 /* Only non-DASD exposed */ 6927 if (dtype != DTYPE_RODIRECT /* CDROM */ && 6928 dtype != DTYPE_SEQUENTIAL /* TAPE */ && 6929 dtype != DTYPE_ESI /* SES */) 6930 return (NDI_FAILURE); 6931 6932 AACDB_PRINT(softs, CE_NOTE, "non-DASD %d found", tgt); 6933 mutex_enter(&softs->io_lock); 6934 softs->nondasds[AAC_PD(tgt)].dev.flags |= AAC_DFLAG_VALID; 6935 mutex_exit(&softs->io_lock); 6936 return (NDI_SUCCESS); 6937 } 6938 } 6939 6940 static int 6941 aac_config_lun(struct aac_softstate *softs, uint16_t tgt, uint8_t lun, 6942 dev_info_t **ldip) 6943 { 6944 struct scsi_device sd; 6945 dev_info_t *child; 6946 int rval; 6947 6948 DBCALLED(softs, 2); 6949 6950 if ((child = aac_find_child(softs, tgt, lun)) != NULL) { 6951 if (ldip) 6952 *ldip = child; 6953 return (NDI_SUCCESS); 6954 } 6955 6956 bzero(&sd, sizeof (struct scsi_device)); 6957 sd.sd_address.a_hba_tran = softs->hba_tran; 6958 sd.sd_address.a_target = (uint16_t)tgt; 6959 sd.sd_address.a_lun = (uint8_t)lun; 6960 if ((rval = aac_probe_lun(softs, &sd)) == NDI_SUCCESS) 6961 rval = aac_config_child(softs, &sd, ldip); 6962 /* scsi_unprobe is blank now. Free buffer manually */ 6963 if (sd.sd_inq) { 6964 kmem_free(sd.sd_inq, SUN_INQSIZE); 6965 sd.sd_inq = (struct scsi_inquiry *)NULL; 6966 } 6967 return (rval); 6968 } 6969 6970 static int 6971 aac_config_tgt(struct aac_softstate *softs, int tgt) 6972 { 6973 struct scsi_address ap; 6974 struct buf *bp = NULL; 6975 int buf_len = AAC_SCSI_RPTLUNS_HEAD_SIZE + AAC_SCSI_RPTLUNS_ADDR_SIZE; 6976 int list_len = 0; 6977 int lun_total = 0; 6978 dev_info_t *ldip; 6979 int i; 6980 6981 ap.a_hba_tran = softs->hba_tran; 6982 ap.a_target = (uint16_t)tgt; 6983 ap.a_lun = 0; 6984 6985 for (i = 0; i < 2; i++) { 6986 struct scsi_pkt *pkt; 6987 uchar_t *cdb; 6988 uchar_t *p; 6989 uint32_t data; 6990 6991 if (bp == NULL) { 6992 if ((bp = scsi_alloc_consistent_buf(&ap, NULL, 6993 buf_len, B_READ, NULL_FUNC, NULL)) == NULL) 6994 return (AACERR); 6995 } 6996 if ((pkt = scsi_init_pkt(&ap, NULL, bp, CDB_GROUP5, 6997 sizeof (struct scsi_arq_status), 0, PKT_CONSISTENT, 6998 NULL, NULL)) == NULL) { 6999 scsi_free_consistent_buf(bp); 7000 return (AACERR); 7001 } 7002 cdb = pkt->pkt_cdbp; 7003 bzero(cdb, CDB_GROUP5); 7004 cdb[0] = SCMD_REPORT_LUNS; 7005 7006 /* Convert buffer len from local to LE_32 */ 7007 data = buf_len; 7008 for (p = &cdb[9]; p > &cdb[5]; p--) { 7009 *p = data & 0xff; 7010 data >>= 8; 7011 } 7012 7013 if (scsi_poll(pkt) < 0 || 7014 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk) { 7015 scsi_destroy_pkt(pkt); 7016 break; 7017 } 7018 7019 /* Convert list_len from LE_32 to local */ 7020 for (p = (uchar_t *)bp->b_un.b_addr; 7021 p < (uchar_t *)bp->b_un.b_addr + 4; p++) { 7022 data <<= 8; 7023 data |= *p; 7024 } 7025 list_len = data; 7026 if (buf_len < list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE) { 7027 scsi_free_consistent_buf(bp); 7028 bp = NULL; 7029 buf_len = list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE; 7030 } 7031 scsi_destroy_pkt(pkt); 7032 } 7033 if (i >= 2) { 7034 uint8_t *buf = (uint8_t *)(bp->b_un.b_addr + 7035 AAC_SCSI_RPTLUNS_HEAD_SIZE); 7036 7037 for (i = 0; i < (list_len / AAC_SCSI_RPTLUNS_ADDR_SIZE); i++) { 7038 uint16_t lun; 7039 7040 /* Determine report luns addressing type */ 7041 switch (buf[0] & AAC_SCSI_RPTLUNS_ADDR_MASK) { 7042 /* 7043 * Vendors in the field have been found to be 7044 * concatenating bus/target/lun to equal the 7045 * complete lun value instead of switching to 7046 * flat space addressing 7047 */ 7048 case AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL: 7049 case AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT: 7050 case AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE: 7051 lun = ((buf[0] & 0x3f) << 8) | buf[1]; 7052 if (lun > UINT8_MAX) { 7053 AACDB_PRINT(softs, CE_WARN, 7054 "abnormal lun number: %d", lun); 7055 break; 7056 } 7057 if (aac_config_lun(softs, tgt, lun, &ldip) == 7058 NDI_SUCCESS) 7059 lun_total++; 7060 break; 7061 } 7062 7063 buf += AAC_SCSI_RPTLUNS_ADDR_SIZE; 7064 } 7065 } else { 7066 /* The target may do not support SCMD_REPORT_LUNS. */ 7067 if (aac_config_lun(softs, tgt, 0, &ldip) == NDI_SUCCESS) 7068 lun_total++; 7069 } 7070 scsi_free_consistent_buf(bp); 7071 return (lun_total); 7072 } 7073 7074 static void 7075 aac_devcfg(struct aac_softstate *softs, int tgt, int en) 7076 { 7077 struct aac_device *dvp; 7078 7079 mutex_enter(&softs->io_lock); 7080 dvp = AAC_DEV(softs, tgt); 7081 if (en) 7082 dvp->flags |= AAC_DFLAG_CONFIGURING; 7083 else 7084 dvp->flags &= ~AAC_DFLAG_CONFIGURING; 7085 mutex_exit(&softs->io_lock); 7086 } 7087 7088 static int 7089 aac_tran_bus_config(dev_info_t *parent, uint_t flags, ddi_bus_config_op_t op, 7090 void *arg, dev_info_t **childp) 7091 { 7092 struct aac_softstate *softs; 7093 int circ = 0; 7094 int rval; 7095 7096 if ((softs = ddi_get_soft_state(aac_softstatep, 7097 ddi_get_instance(parent))) == NULL) 7098 return (NDI_FAILURE); 7099 7100 /* Commands for bus config should be blocked as the bus is quiesced */ 7101 mutex_enter(&softs->io_lock); 7102 if (softs->state & AAC_STATE_QUIESCED) { 7103 AACDB_PRINT(softs, CE_NOTE, 7104 "bus_config abroted because bus is quiesced"); 7105 mutex_exit(&softs->io_lock); 7106 return (NDI_FAILURE); 7107 } 7108 mutex_exit(&softs->io_lock); 7109 7110 DBCALLED(softs, 1); 7111 7112 /* Hold the nexus across the bus_config */ 7113 ndi_devi_enter(parent, &circ); 7114 switch (op) { 7115 case BUS_CONFIG_ONE: { 7116 int tgt, lun; 7117 7118 if (aac_parse_devname(arg, &tgt, &lun) != AACOK) { 7119 rval = NDI_FAILURE; 7120 break; 7121 } 7122 7123 AAC_DEVCFG_BEGIN(softs, tgt); 7124 rval = aac_config_lun(softs, tgt, lun, childp); 7125 AAC_DEVCFG_END(softs, tgt); 7126 break; 7127 } 7128 7129 case BUS_CONFIG_DRIVER: 7130 case BUS_CONFIG_ALL: { 7131 uint32_t bus, tgt; 7132 int index, total; 7133 7134 for (tgt = 0; tgt < AAC_MAX_LD; tgt++) { 7135 AAC_DEVCFG_BEGIN(softs, tgt); 7136 (void) aac_config_lun(softs, tgt, 0, NULL); 7137 AAC_DEVCFG_END(softs, tgt); 7138 } 7139 7140 /* Config the non-DASD devices connected to the card */ 7141 total = 0; 7142 index = AAC_MAX_LD; 7143 for (bus = 0; bus < softs->bus_max; bus++) { 7144 AACDB_PRINT(softs, CE_NOTE, "bus %d:", bus); 7145 for (tgt = 0; tgt < softs->tgt_max; tgt++, index++) { 7146 AAC_DEVCFG_BEGIN(softs, index); 7147 if (aac_config_tgt(softs, index)) 7148 total++; 7149 AAC_DEVCFG_END(softs, index); 7150 } 7151 } 7152 AACDB_PRINT(softs, CE_CONT, 7153 "?Total %d phys. device(s) found", total); 7154 rval = NDI_SUCCESS; 7155 break; 7156 } 7157 } 7158 7159 if (rval == NDI_SUCCESS) 7160 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0); 7161 ndi_devi_exit(parent, circ); 7162 return (rval); 7163 } 7164 7165 static void 7166 aac_handle_dr(struct aac_drinfo *drp) 7167 { 7168 struct aac_softstate *softs = drp->softs; 7169 struct aac_device *dvp; 7170 dev_info_t *dip; 7171 int valid; 7172 int circ1 = 0; 7173 7174 DBCALLED(softs, 1); 7175 7176 /* Hold the nexus across the bus_config */ 7177 mutex_enter(&softs->io_lock); 7178 dvp = AAC_DEV(softs, drp->tgt); 7179 valid = AAC_DEV_IS_VALID(dvp); 7180 dip = dvp->dip; 7181 mutex_exit(&softs->io_lock); 7182 7183 switch (drp->event) { 7184 case AAC_EVT_ONLINE: 7185 case AAC_EVT_OFFLINE: 7186 /* Device onlined */ 7187 if (dip == NULL && valid) { 7188 ndi_devi_enter(softs->devinfo_p, &circ1); 7189 (void) aac_config_lun(softs, drp->tgt, 0, NULL); 7190 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d onlined", 7191 softs->instance, drp->tgt, drp->lun); 7192 ndi_devi_exit(softs->devinfo_p, circ1); 7193 } 7194 /* Device offlined */ 7195 if (dip && !valid) { 7196 mutex_enter(&softs->io_lock); 7197 (void) aac_do_reset(softs); 7198 mutex_exit(&softs->io_lock); 7199 7200 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE); 7201 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d offlined", 7202 softs->instance, drp->tgt, drp->lun); 7203 } 7204 break; 7205 } 7206 kmem_free(drp, sizeof (struct aac_drinfo)); 7207 } 7208 7209 static int 7210 aac_dr_event(struct aac_softstate *softs, int tgt, int lun, int event) 7211 { 7212 struct aac_drinfo *drp; 7213 7214 DBCALLED(softs, 1); 7215 7216 if (softs->taskq == NULL || 7217 (drp = kmem_zalloc(sizeof (struct aac_drinfo), KM_NOSLEEP)) == NULL) 7218 return (AACERR); 7219 7220 drp->softs = softs; 7221 drp->tgt = tgt; 7222 drp->lun = lun; 7223 drp->event = event; 7224 if ((ddi_taskq_dispatch(softs->taskq, (void (*)(void *))aac_handle_dr, 7225 drp, DDI_NOSLEEP)) != DDI_SUCCESS) { 7226 AACDB_PRINT(softs, CE_WARN, "DR task start failed"); 7227 kmem_free(drp, sizeof (struct aac_drinfo)); 7228 return (AACERR); 7229 } 7230 return (AACOK); 7231 } 7232 7233 #ifdef DEBUG 7234 7235 /* -------------------------debug aid functions-------------------------- */ 7236 7237 #define AAC_FIB_CMD_KEY_STRINGS \ 7238 TestCommandResponse, "TestCommandResponse", \ 7239 TestAdapterCommand, "TestAdapterCommand", \ 7240 LastTestCommand, "LastTestCommand", \ 7241 ReinitHostNormCommandQueue, "ReinitHostNormCommandQueue", \ 7242 ReinitHostHighCommandQueue, "ReinitHostHighCommandQueue", \ 7243 ReinitHostHighRespQueue, "ReinitHostHighRespQueue", \ 7244 ReinitHostNormRespQueue, "ReinitHostNormRespQueue", \ 7245 ReinitAdapNormCommandQueue, "ReinitAdapNormCommandQueue", \ 7246 ReinitAdapHighCommandQueue, "ReinitAdapHighCommandQueue", \ 7247 ReinitAdapHighRespQueue, "ReinitAdapHighRespQueue", \ 7248 ReinitAdapNormRespQueue, "ReinitAdapNormRespQueue", \ 7249 InterfaceShutdown, "InterfaceShutdown", \ 7250 DmaCommandFib, "DmaCommandFib", \ 7251 StartProfile, "StartProfile", \ 7252 TermProfile, "TermProfile", \ 7253 SpeedTest, "SpeedTest", \ 7254 TakeABreakPt, "TakeABreakPt", \ 7255 RequestPerfData, "RequestPerfData", \ 7256 SetInterruptDefTimer, "SetInterruptDefTimer", \ 7257 SetInterruptDefCount, "SetInterruptDefCount", \ 7258 GetInterruptDefStatus, "GetInterruptDefStatus", \ 7259 LastCommCommand, "LastCommCommand", \ 7260 NuFileSystem, "NuFileSystem", \ 7261 UFS, "UFS", \ 7262 HostFileSystem, "HostFileSystem", \ 7263 LastFileSystemCommand, "LastFileSystemCommand", \ 7264 ContainerCommand, "ContainerCommand", \ 7265 ContainerCommand64, "ContainerCommand64", \ 7266 ClusterCommand, "ClusterCommand", \ 7267 ScsiPortCommand, "ScsiPortCommand", \ 7268 ScsiPortCommandU64, "ScsiPortCommandU64", \ 7269 AifRequest, "AifRequest", \ 7270 CheckRevision, "CheckRevision", \ 7271 FsaHostShutdown, "FsaHostShutdown", \ 7272 RequestAdapterInfo, "RequestAdapterInfo", \ 7273 IsAdapterPaused, "IsAdapterPaused", \ 7274 SendHostTime, "SendHostTime", \ 7275 LastMiscCommand, "LastMiscCommand" 7276 7277 #define AAC_CTVM_SUBCMD_KEY_STRINGS \ 7278 VM_Null, "VM_Null", \ 7279 VM_NameServe, "VM_NameServe", \ 7280 VM_ContainerConfig, "VM_ContainerConfig", \ 7281 VM_Ioctl, "VM_Ioctl", \ 7282 VM_FilesystemIoctl, "VM_FilesystemIoctl", \ 7283 VM_CloseAll, "VM_CloseAll", \ 7284 VM_CtBlockRead, "VM_CtBlockRead", \ 7285 VM_CtBlockWrite, "VM_CtBlockWrite", \ 7286 VM_SliceBlockRead, "VM_SliceBlockRead", \ 7287 VM_SliceBlockWrite, "VM_SliceBlockWrite", \ 7288 VM_DriveBlockRead, "VM_DriveBlockRead", \ 7289 VM_DriveBlockWrite, "VM_DriveBlockWrite", \ 7290 VM_EnclosureMgt, "VM_EnclosureMgt", \ 7291 VM_Unused, "VM_Unused", \ 7292 VM_CtBlockVerify, "VM_CtBlockVerify", \ 7293 VM_CtPerf, "VM_CtPerf", \ 7294 VM_CtBlockRead64, "VM_CtBlockRead64", \ 7295 VM_CtBlockWrite64, "VM_CtBlockWrite64", \ 7296 VM_CtBlockVerify64, "VM_CtBlockVerify64", \ 7297 VM_CtHostRead64, "VM_CtHostRead64", \ 7298 VM_CtHostWrite64, "VM_CtHostWrite64", \ 7299 VM_NameServe64, "VM_NameServe64" 7300 7301 #define AAC_CT_SUBCMD_KEY_STRINGS \ 7302 CT_Null, "CT_Null", \ 7303 CT_GET_SLICE_COUNT, "CT_GET_SLICE_COUNT", \ 7304 CT_GET_PARTITION_COUNT, "CT_GET_PARTITION_COUNT", \ 7305 CT_GET_PARTITION_INFO, "CT_GET_PARTITION_INFO", \ 7306 CT_GET_CONTAINER_COUNT, "CT_GET_CONTAINER_COUNT", \ 7307 CT_GET_CONTAINER_INFO_OLD, "CT_GET_CONTAINER_INFO_OLD", \ 7308 CT_WRITE_MBR, "CT_WRITE_MBR", \ 7309 CT_WRITE_PARTITION, "CT_WRITE_PARTITION", \ 7310 CT_UPDATE_PARTITION, "CT_UPDATE_PARTITION", \ 7311 CT_UNLOAD_CONTAINER, "CT_UNLOAD_CONTAINER", \ 7312 CT_CONFIG_SINGLE_PRIMARY, "CT_CONFIG_SINGLE_PRIMARY", \ 7313 CT_READ_CONFIG_AGE, "CT_READ_CONFIG_AGE", \ 7314 CT_WRITE_CONFIG_AGE, "CT_WRITE_CONFIG_AGE", \ 7315 CT_READ_SERIAL_NUMBER, "CT_READ_SERIAL_NUMBER", \ 7316 CT_ZERO_PAR_ENTRY, "CT_ZERO_PAR_ENTRY", \ 7317 CT_READ_MBR, "CT_READ_MBR", \ 7318 CT_READ_PARTITION, "CT_READ_PARTITION", \ 7319 CT_DESTROY_CONTAINER, "CT_DESTROY_CONTAINER", \ 7320 CT_DESTROY2_CONTAINER, "CT_DESTROY2_CONTAINER", \ 7321 CT_SLICE_SIZE, "CT_SLICE_SIZE", \ 7322 CT_CHECK_CONFLICTS, "CT_CHECK_CONFLICTS", \ 7323 CT_MOVE_CONTAINER, "CT_MOVE_CONTAINER", \ 7324 CT_READ_LAST_DRIVE, "CT_READ_LAST_DRIVE", \ 7325 CT_WRITE_LAST_DRIVE, "CT_WRITE_LAST_DRIVE", \ 7326 CT_UNMIRROR, "CT_UNMIRROR", \ 7327 CT_MIRROR_DELAY, "CT_MIRROR_DELAY", \ 7328 CT_GEN_MIRROR, "CT_GEN_MIRROR", \ 7329 CT_GEN_MIRROR2, "CT_GEN_MIRROR2", \ 7330 CT_TEST_CONTAINER, "CT_TEST_CONTAINER", \ 7331 CT_MOVE2, "CT_MOVE2", \ 7332 CT_SPLIT, "CT_SPLIT", \ 7333 CT_SPLIT2, "CT_SPLIT2", \ 7334 CT_SPLIT_BROKEN, "CT_SPLIT_BROKEN", \ 7335 CT_SPLIT_BROKEN2, "CT_SPLIT_BROKEN2", \ 7336 CT_RECONFIG, "CT_RECONFIG", \ 7337 CT_BREAK2, "CT_BREAK2", \ 7338 CT_BREAK, "CT_BREAK", \ 7339 CT_MERGE2, "CT_MERGE2", \ 7340 CT_MERGE, "CT_MERGE", \ 7341 CT_FORCE_ERROR, "CT_FORCE_ERROR", \ 7342 CT_CLEAR_ERROR, "CT_CLEAR_ERROR", \ 7343 CT_ASSIGN_FAILOVER, "CT_ASSIGN_FAILOVER", \ 7344 CT_CLEAR_FAILOVER, "CT_CLEAR_FAILOVER", \ 7345 CT_GET_FAILOVER_DATA, "CT_GET_FAILOVER_DATA", \ 7346 CT_VOLUME_ADD, "CT_VOLUME_ADD", \ 7347 CT_VOLUME_ADD2, "CT_VOLUME_ADD2", \ 7348 CT_MIRROR_STATUS, "CT_MIRROR_STATUS", \ 7349 CT_COPY_STATUS, "CT_COPY_STATUS", \ 7350 CT_COPY, "CT_COPY", \ 7351 CT_UNLOCK_CONTAINER, "CT_UNLOCK_CONTAINER", \ 7352 CT_LOCK_CONTAINER, "CT_LOCK_CONTAINER", \ 7353 CT_MAKE_READ_ONLY, "CT_MAKE_READ_ONLY", \ 7354 CT_MAKE_READ_WRITE, "CT_MAKE_READ_WRITE", \ 7355 CT_CLEAN_DEAD, "CT_CLEAN_DEAD", \ 7356 CT_ABORT_MIRROR_COMMAND, "CT_ABORT_MIRROR_COMMAND", \ 7357 CT_SET, "CT_SET", \ 7358 CT_GET, "CT_GET", \ 7359 CT_GET_NVLOG_ENTRY, "CT_GET_NVLOG_ENTRY", \ 7360 CT_GET_DELAY, "CT_GET_DELAY", \ 7361 CT_ZERO_CONTAINER_SPACE, "CT_ZERO_CONTAINER_SPACE", \ 7362 CT_GET_ZERO_STATUS, "CT_GET_ZERO_STATUS", \ 7363 CT_SCRUB, "CT_SCRUB", \ 7364 CT_GET_SCRUB_STATUS, "CT_GET_SCRUB_STATUS", \ 7365 CT_GET_SLICE_INFO, "CT_GET_SLICE_INFO", \ 7366 CT_GET_SCSI_METHOD, "CT_GET_SCSI_METHOD", \ 7367 CT_PAUSE_IO, "CT_PAUSE_IO", \ 7368 CT_RELEASE_IO, "CT_RELEASE_IO", \ 7369 CT_SCRUB2, "CT_SCRUB2", \ 7370 CT_MCHECK, "CT_MCHECK", \ 7371 CT_CORRUPT, "CT_CORRUPT", \ 7372 CT_GET_TASK_COUNT, "CT_GET_TASK_COUNT", \ 7373 CT_PROMOTE, "CT_PROMOTE", \ 7374 CT_SET_DEAD, "CT_SET_DEAD", \ 7375 CT_CONTAINER_OPTIONS, "CT_CONTAINER_OPTIONS", \ 7376 CT_GET_NV_PARAM, "CT_GET_NV_PARAM", \ 7377 CT_GET_PARAM, "CT_GET_PARAM", \ 7378 CT_NV_PARAM_SIZE, "CT_NV_PARAM_SIZE", \ 7379 CT_COMMON_PARAM_SIZE, "CT_COMMON_PARAM_SIZE", \ 7380 CT_PLATFORM_PARAM_SIZE, "CT_PLATFORM_PARAM_SIZE", \ 7381 CT_SET_NV_PARAM, "CT_SET_NV_PARAM", \ 7382 CT_ABORT_SCRUB, "CT_ABORT_SCRUB", \ 7383 CT_GET_SCRUB_ERROR, "CT_GET_SCRUB_ERROR", \ 7384 CT_LABEL_CONTAINER, "CT_LABEL_CONTAINER", \ 7385 CT_CONTINUE_DATA, "CT_CONTINUE_DATA", \ 7386 CT_STOP_DATA, "CT_STOP_DATA", \ 7387 CT_GET_PARTITION_TABLE, "CT_GET_PARTITION_TABLE", \ 7388 CT_GET_DISK_PARTITIONS, "CT_GET_DISK_PARTITIONS", \ 7389 CT_GET_MISC_STATUS, "CT_GET_MISC_STATUS", \ 7390 CT_GET_CONTAINER_PERF_INFO, "CT_GET_CONTAINER_PERF_INFO", \ 7391 CT_GET_TIME, "CT_GET_TIME", \ 7392 CT_READ_DATA, "CT_READ_DATA", \ 7393 CT_CTR, "CT_CTR", \ 7394 CT_CTL, "CT_CTL", \ 7395 CT_DRAINIO, "CT_DRAINIO", \ 7396 CT_RELEASEIO, "CT_RELEASEIO", \ 7397 CT_GET_NVRAM, "CT_GET_NVRAM", \ 7398 CT_GET_MEMORY, "CT_GET_MEMORY", \ 7399 CT_PRINT_CT_LOG, "CT_PRINT_CT_LOG", \ 7400 CT_ADD_LEVEL, "CT_ADD_LEVEL", \ 7401 CT_NV_ZERO, "CT_NV_ZERO", \ 7402 CT_READ_SIGNATURE, "CT_READ_SIGNATURE", \ 7403 CT_THROTTLE_ON, "CT_THROTTLE_ON", \ 7404 CT_THROTTLE_OFF, "CT_THROTTLE_OFF", \ 7405 CT_GET_THROTTLE_STATS, "CT_GET_THROTTLE_STATS", \ 7406 CT_MAKE_SNAPSHOT, "CT_MAKE_SNAPSHOT", \ 7407 CT_REMOVE_SNAPSHOT, "CT_REMOVE_SNAPSHOT", \ 7408 CT_WRITE_USER_FLAGS, "CT_WRITE_USER_FLAGS", \ 7409 CT_READ_USER_FLAGS, "CT_READ_USER_FLAGS", \ 7410 CT_MONITOR, "CT_MONITOR", \ 7411 CT_GEN_MORPH, "CT_GEN_MORPH", \ 7412 CT_GET_SNAPSHOT_INFO, "CT_GET_SNAPSHOT_INFO", \ 7413 CT_CACHE_SET, "CT_CACHE_SET", \ 7414 CT_CACHE_STAT, "CT_CACHE_STAT", \ 7415 CT_TRACE_START, "CT_TRACE_START", \ 7416 CT_TRACE_STOP, "CT_TRACE_STOP", \ 7417 CT_TRACE_ENABLE, "CT_TRACE_ENABLE", \ 7418 CT_TRACE_DISABLE, "CT_TRACE_DISABLE", \ 7419 CT_FORCE_CORE_DUMP, "CT_FORCE_CORE_DUMP", \ 7420 CT_SET_SERIAL_NUMBER, "CT_SET_SERIAL_NUMBER", \ 7421 CT_RESET_SERIAL_NUMBER, "CT_RESET_SERIAL_NUMBER", \ 7422 CT_ENABLE_RAID5, "CT_ENABLE_RAID5", \ 7423 CT_CLEAR_VALID_DUMP_FLAG, "CT_CLEAR_VALID_DUMP_FLAG", \ 7424 CT_GET_MEM_STATS, "CT_GET_MEM_STATS", \ 7425 CT_GET_CORE_SIZE, "CT_GET_CORE_SIZE", \ 7426 CT_CREATE_CONTAINER_OLD, "CT_CREATE_CONTAINER_OLD", \ 7427 CT_STOP_DUMPS, "CT_STOP_DUMPS", \ 7428 CT_PANIC_ON_TAKE_A_BREAK, "CT_PANIC_ON_TAKE_A_BREAK", \ 7429 CT_GET_CACHE_STATS, "CT_GET_CACHE_STATS", \ 7430 CT_MOVE_PARTITION, "CT_MOVE_PARTITION", \ 7431 CT_FLUSH_CACHE, "CT_FLUSH_CACHE", \ 7432 CT_READ_NAME, "CT_READ_NAME", \ 7433 CT_WRITE_NAME, "CT_WRITE_NAME", \ 7434 CT_TOSS_CACHE, "CT_TOSS_CACHE", \ 7435 CT_LOCK_DRAINIO, "CT_LOCK_DRAINIO", \ 7436 CT_CONTAINER_OFFLINE, "CT_CONTAINER_OFFLINE", \ 7437 CT_SET_CACHE_SIZE, "CT_SET_CACHE_SIZE", \ 7438 CT_CLEAN_SHUTDOWN_STATUS, "CT_CLEAN_SHUTDOWN_STATUS", \ 7439 CT_CLEAR_DISKLOG_ON_DISK, "CT_CLEAR_DISKLOG_ON_DISK", \ 7440 CT_CLEAR_ALL_DISKLOG, "CT_CLEAR_ALL_DISKLOG", \ 7441 CT_CACHE_FAVOR, "CT_CACHE_FAVOR", \ 7442 CT_READ_PASSTHRU_MBR, "CT_READ_PASSTHRU_MBR", \ 7443 CT_SCRUB_NOFIX, "CT_SCRUB_NOFIX", \ 7444 CT_SCRUB2_NOFIX, "CT_SCRUB2_NOFIX", \ 7445 CT_FLUSH, "CT_FLUSH", \ 7446 CT_REBUILD, "CT_REBUILD", \ 7447 CT_FLUSH_CONTAINER, "CT_FLUSH_CONTAINER", \ 7448 CT_RESTART, "CT_RESTART", \ 7449 CT_GET_CONFIG_STATUS, "CT_GET_CONFIG_STATUS", \ 7450 CT_TRACE_FLAG, "CT_TRACE_FLAG", \ 7451 CT_RESTART_MORPH, "CT_RESTART_MORPH", \ 7452 CT_GET_TRACE_INFO, "CT_GET_TRACE_INFO", \ 7453 CT_GET_TRACE_ITEM, "CT_GET_TRACE_ITEM", \ 7454 CT_COMMIT_CONFIG, "CT_COMMIT_CONFIG", \ 7455 CT_CONTAINER_EXISTS, "CT_CONTAINER_EXISTS", \ 7456 CT_GET_SLICE_FROM_DEVT, "CT_GET_SLICE_FROM_DEVT", \ 7457 CT_OPEN_READ_WRITE, "CT_OPEN_READ_WRITE", \ 7458 CT_WRITE_MEMORY_BLOCK, "CT_WRITE_MEMORY_BLOCK", \ 7459 CT_GET_CACHE_PARAMS, "CT_GET_CACHE_PARAMS", \ 7460 CT_CRAZY_CACHE, "CT_CRAZY_CACHE", \ 7461 CT_GET_PROFILE_STRUCT, "CT_GET_PROFILE_STRUCT", \ 7462 CT_SET_IO_TRACE_FLAG, "CT_SET_IO_TRACE_FLAG", \ 7463 CT_GET_IO_TRACE_STRUCT, "CT_GET_IO_TRACE_STRUCT", \ 7464 CT_CID_TO_64BITS_UID, "CT_CID_TO_64BITS_UID", \ 7465 CT_64BITS_UID_TO_CID, "CT_64BITS_UID_TO_CID", \ 7466 CT_PAR_TO_64BITS_UID, "CT_PAR_TO_64BITS_UID", \ 7467 CT_CID_TO_32BITS_UID, "CT_CID_TO_32BITS_UID", \ 7468 CT_32BITS_UID_TO_CID, "CT_32BITS_UID_TO_CID", \ 7469 CT_PAR_TO_32BITS_UID, "CT_PAR_TO_32BITS_UID", \ 7470 CT_SET_FAILOVER_OPTION, "CT_SET_FAILOVER_OPTION", \ 7471 CT_GET_FAILOVER_OPTION, "CT_GET_FAILOVER_OPTION", \ 7472 CT_STRIPE_ADD2, "CT_STRIPE_ADD2", \ 7473 CT_CREATE_VOLUME_SET, "CT_CREATE_VOLUME_SET", \ 7474 CT_CREATE_STRIPE_SET, "CT_CREATE_STRIPE_SET", \ 7475 CT_VERIFY_CONTAINER, "CT_VERIFY_CONTAINER", \ 7476 CT_IS_CONTAINER_DEAD, "CT_IS_CONTAINER_DEAD", \ 7477 CT_GET_CONTAINER_OPTION, "CT_GET_CONTAINER_OPTION", \ 7478 CT_GET_SNAPSHOT_UNUSED_STRUCT, "CT_GET_SNAPSHOT_UNUSED_STRUCT", \ 7479 CT_CLEAR_SNAPSHOT_UNUSED_STRUCT, "CT_CLEAR_SNAPSHOT_UNUSED_STRUCT", \ 7480 CT_GET_CONTAINER_INFO, "CT_GET_CONTAINER_INFO", \ 7481 CT_CREATE_CONTAINER, "CT_CREATE_CONTAINER", \ 7482 CT_CHANGE_CREATIONINFO, "CT_CHANGE_CREATIONINFO", \ 7483 CT_CHECK_CONFLICT_UID, "CT_CHECK_CONFLICT_UID", \ 7484 CT_CONTAINER_UID_CHECK, "CT_CONTAINER_UID_CHECK", \ 7485 CT_IS_CONTAINER_MEATADATA_STANDARD, \ 7486 "CT_IS_CONTAINER_MEATADATA_STANDARD", \ 7487 CT_IS_SLICE_METADATA_STANDARD, "CT_IS_SLICE_METADATA_STANDARD", \ 7488 CT_GET_IMPORT_COUNT, "CT_GET_IMPORT_COUNT", \ 7489 CT_CANCEL_ALL_IMPORTS, "CT_CANCEL_ALL_IMPORTS", \ 7490 CT_GET_IMPORT_INFO, "CT_GET_IMPORT_INFO", \ 7491 CT_IMPORT_ARRAY, "CT_IMPORT_ARRAY", \ 7492 CT_GET_LOG_SIZE, "CT_GET_LOG_SIZE", \ 7493 CT_ALARM_GET_STATE, "CT_ALARM_GET_STATE", \ 7494 CT_ALARM_SET_STATE, "CT_ALARM_SET_STATE", \ 7495 CT_ALARM_ON_OFF, "CT_ALARM_ON_OFF", \ 7496 CT_GET_EE_OEM_ID, "CT_GET_EE_OEM_ID", \ 7497 CT_GET_PPI_HEADERS, "CT_GET_PPI_HEADERS", \ 7498 CT_GET_PPI_DATA, "CT_GET_PPI_DATA", \ 7499 CT_GET_PPI_ENTRIES, "CT_GET_PPI_ENTRIES", \ 7500 CT_DELETE_PPI_BUNDLE, "CT_DELETE_PPI_BUNDLE", \ 7501 CT_GET_PARTITION_TABLE_2, "CT_GET_PARTITION_TABLE_2", \ 7502 CT_GET_PARTITION_INFO_2, "CT_GET_PARTITION_INFO_2", \ 7503 CT_GET_DISK_PARTITIONS_2, "CT_GET_DISK_PARTITIONS_2", \ 7504 CT_QUIESCE_ADAPTER, "CT_QUIESCE_ADAPTER", \ 7505 CT_CLEAR_PPI_TABLE, "CT_CLEAR_PPI_TABLE" 7506 7507 #define AAC_CL_SUBCMD_KEY_STRINGS \ 7508 CL_NULL, "CL_NULL", \ 7509 DS_INIT, "DS_INIT", \ 7510 DS_RESCAN, "DS_RESCAN", \ 7511 DS_CREATE, "DS_CREATE", \ 7512 DS_DELETE, "DS_DELETE", \ 7513 DS_ADD_DISK, "DS_ADD_DISK", \ 7514 DS_REMOVE_DISK, "DS_REMOVE_DISK", \ 7515 DS_MOVE_DISK, "DS_MOVE_DISK", \ 7516 DS_TAKE_OWNERSHIP, "DS_TAKE_OWNERSHIP", \ 7517 DS_RELEASE_OWNERSHIP, "DS_RELEASE_OWNERSHIP", \ 7518 DS_FORCE_OWNERSHIP, "DS_FORCE_OWNERSHIP", \ 7519 DS_GET_DISK_SET_PARAM, "DS_GET_DISK_SET_PARAM", \ 7520 DS_GET_DRIVE_PARAM, "DS_GET_DRIVE_PARAM", \ 7521 DS_GET_SLICE_PARAM, "DS_GET_SLICE_PARAM", \ 7522 DS_GET_DISK_SETS, "DS_GET_DISK_SETS", \ 7523 DS_GET_DRIVES, "DS_GET_DRIVES", \ 7524 DS_SET_DISK_SET_PARAM, "DS_SET_DISK_SET_PARAM", \ 7525 DS_ONLINE, "DS_ONLINE", \ 7526 DS_OFFLINE, "DS_OFFLINE", \ 7527 DS_ONLINE_CONTAINERS, "DS_ONLINE_CONTAINERS", \ 7528 DS_FSAPRINT, "DS_FSAPRINT", \ 7529 CL_CFG_SET_HOST_IDS, "CL_CFG_SET_HOST_IDS", \ 7530 CL_CFG_SET_PARTNER_HOST_IDS, "CL_CFG_SET_PARTNER_HOST_IDS", \ 7531 CL_CFG_GET_CLUSTER_CONFIG, "CL_CFG_GET_CLUSTER_CONFIG", \ 7532 CC_CLI_CLEAR_MESSAGE_BUFFER, "CC_CLI_CLEAR_MESSAGE_BUFFER", \ 7533 CC_SRV_CLEAR_MESSAGE_BUFFER, "CC_SRV_CLEAR_MESSAGE_BUFFER", \ 7534 CC_CLI_SHOW_MESSAGE_BUFFER, "CC_CLI_SHOW_MESSAGE_BUFFER", \ 7535 CC_SRV_SHOW_MESSAGE_BUFFER, "CC_SRV_SHOW_MESSAGE_BUFFER", \ 7536 CC_CLI_SEND_MESSAGE, "CC_CLI_SEND_MESSAGE", \ 7537 CC_SRV_SEND_MESSAGE, "CC_SRV_SEND_MESSAGE", \ 7538 CC_CLI_GET_MESSAGE, "CC_CLI_GET_MESSAGE", \ 7539 CC_SRV_GET_MESSAGE, "CC_SRV_GET_MESSAGE", \ 7540 CC_SEND_TEST_MESSAGE, "CC_SEND_TEST_MESSAGE", \ 7541 CC_GET_BUSINFO, "CC_GET_BUSINFO", \ 7542 CC_GET_PORTINFO, "CC_GET_PORTINFO", \ 7543 CC_GET_NAMEINFO, "CC_GET_NAMEINFO", \ 7544 CC_GET_CONFIGINFO, "CC_GET_CONFIGINFO", \ 7545 CQ_QUORUM_OP, "CQ_QUORUM_OP" 7546 7547 #define AAC_AIF_SUBCMD_KEY_STRINGS \ 7548 AifCmdEventNotify, "AifCmdEventNotify", \ 7549 AifCmdJobProgress, "AifCmdJobProgress", \ 7550 AifCmdAPIReport, "AifCmdAPIReport", \ 7551 AifCmdDriverNotify, "AifCmdDriverNotify", \ 7552 AifReqJobList, "AifReqJobList", \ 7553 AifReqJobsForCtr, "AifReqJobsForCtr", \ 7554 AifReqJobsForScsi, "AifReqJobsForScsi", \ 7555 AifReqJobReport, "AifReqJobReport", \ 7556 AifReqTerminateJob, "AifReqTerminateJob", \ 7557 AifReqSuspendJob, "AifReqSuspendJob", \ 7558 AifReqResumeJob, "AifReqResumeJob", \ 7559 AifReqSendAPIReport, "AifReqSendAPIReport", \ 7560 AifReqAPIJobStart, "AifReqAPIJobStart", \ 7561 AifReqAPIJobUpdate, "AifReqAPIJobUpdate", \ 7562 AifReqAPIJobFinish, "AifReqAPIJobFinish" 7563 7564 #define AAC_IOCTL_SUBCMD_KEY_STRINGS \ 7565 Reserved_IOCTL, "Reserved_IOCTL", \ 7566 GetDeviceHandle, "GetDeviceHandle", \ 7567 BusTargetLun_to_DeviceHandle, "BusTargetLun_to_DeviceHandle", \ 7568 DeviceHandle_to_BusTargetLun, "DeviceHandle_to_BusTargetLun", \ 7569 RescanBus, "RescanBus", \ 7570 GetDeviceProbeInfo, "GetDeviceProbeInfo", \ 7571 GetDeviceCapacity, "GetDeviceCapacity", \ 7572 GetContainerProbeInfo, "GetContainerProbeInfo", \ 7573 GetRequestedMemorySize, "GetRequestedMemorySize", \ 7574 GetBusInfo, "GetBusInfo", \ 7575 GetVendorSpecific, "GetVendorSpecific", \ 7576 EnhancedGetDeviceProbeInfo, "EnhancedGetDeviceProbeInfo", \ 7577 EnhancedGetBusInfo, "EnhancedGetBusInfo", \ 7578 SetupExtendedCounters, "SetupExtendedCounters", \ 7579 GetPerformanceCounters, "GetPerformanceCounters", \ 7580 ResetPerformanceCounters, "ResetPerformanceCounters", \ 7581 ReadModePage, "ReadModePage", \ 7582 WriteModePage, "WriteModePage", \ 7583 ReadDriveParameter, "ReadDriveParameter", \ 7584 WriteDriveParameter, "WriteDriveParameter", \ 7585 ResetAdapter, "ResetAdapter", \ 7586 ResetBus, "ResetBus", \ 7587 ResetBusDevice, "ResetBusDevice", \ 7588 ExecuteSrb, "ExecuteSrb", \ 7589 Create_IO_Task, "Create_IO_Task", \ 7590 Delete_IO_Task, "Delete_IO_Task", \ 7591 Get_IO_Task_Info, "Get_IO_Task_Info", \ 7592 Check_Task_Progress, "Check_Task_Progress", \ 7593 InjectError, "InjectError", \ 7594 GetDeviceDefectCounts, "GetDeviceDefectCounts", \ 7595 GetDeviceDefectInfo, "GetDeviceDefectInfo", \ 7596 GetDeviceStatus, "GetDeviceStatus", \ 7597 ClearDeviceStatus, "ClearDeviceStatus", \ 7598 DiskSpinControl, "DiskSpinControl", \ 7599 DiskSmartControl, "DiskSmartControl", \ 7600 WriteSame, "WriteSame", \ 7601 ReadWriteLong, "ReadWriteLong", \ 7602 FormatUnit, "FormatUnit", \ 7603 TargetDeviceControl, "TargetDeviceControl", \ 7604 TargetChannelControl, "TargetChannelControl", \ 7605 FlashNewCode, "FlashNewCode", \ 7606 DiskCheck, "DiskCheck", \ 7607 RequestSense, "RequestSense", \ 7608 DiskPERControl, "DiskPERControl", \ 7609 Read10, "Read10", \ 7610 Write10, "Write10" 7611 7612 #define AAC_AIFEN_KEY_STRINGS \ 7613 AifEnGeneric, "Generic", \ 7614 AifEnTaskComplete, "TaskComplete", \ 7615 AifEnConfigChange, "Config change", \ 7616 AifEnContainerChange, "Container change", \ 7617 AifEnDeviceFailure, "device failed", \ 7618 AifEnMirrorFailover, "Mirror failover", \ 7619 AifEnContainerEvent, "container event", \ 7620 AifEnFileSystemChange, "File system changed", \ 7621 AifEnConfigPause, "Container pause event", \ 7622 AifEnConfigResume, "Container resume event", \ 7623 AifEnFailoverChange, "Failover space assignment changed", \ 7624 AifEnRAID5RebuildDone, "RAID5 rebuild finished", \ 7625 AifEnEnclosureManagement, "Enclosure management event", \ 7626 AifEnBatteryEvent, "battery event", \ 7627 AifEnAddContainer, "Add container", \ 7628 AifEnDeleteContainer, "Delete container", \ 7629 AifEnSMARTEvent, "SMART Event", \ 7630 AifEnBatteryNeedsRecond, "battery needs reconditioning", \ 7631 AifEnClusterEvent, "cluster event", \ 7632 AifEnDiskSetEvent, "disk set event occured", \ 7633 AifDenMorphComplete, "morph operation completed", \ 7634 AifDenVolumeExtendComplete, "VolumeExtendComplete" 7635 7636 struct aac_key_strings { 7637 int key; 7638 char *message; 7639 }; 7640 7641 extern struct scsi_key_strings scsi_cmds[]; 7642 7643 static struct aac_key_strings aac_fib_cmds[] = { 7644 AAC_FIB_CMD_KEY_STRINGS, 7645 -1, NULL 7646 }; 7647 7648 static struct aac_key_strings aac_ctvm_subcmds[] = { 7649 AAC_CTVM_SUBCMD_KEY_STRINGS, 7650 -1, NULL 7651 }; 7652 7653 static struct aac_key_strings aac_ct_subcmds[] = { 7654 AAC_CT_SUBCMD_KEY_STRINGS, 7655 -1, NULL 7656 }; 7657 7658 static struct aac_key_strings aac_cl_subcmds[] = { 7659 AAC_CL_SUBCMD_KEY_STRINGS, 7660 -1, NULL 7661 }; 7662 7663 static struct aac_key_strings aac_aif_subcmds[] = { 7664 AAC_AIF_SUBCMD_KEY_STRINGS, 7665 -1, NULL 7666 }; 7667 7668 static struct aac_key_strings aac_ioctl_subcmds[] = { 7669 AAC_IOCTL_SUBCMD_KEY_STRINGS, 7670 -1, NULL 7671 }; 7672 7673 static struct aac_key_strings aac_aifens[] = { 7674 AAC_AIFEN_KEY_STRINGS, 7675 -1, NULL 7676 }; 7677 7678 /* 7679 * The following function comes from Adaptec: 7680 * 7681 * Get the firmware print buffer parameters from the firmware, 7682 * if the command was successful map in the address. 7683 */ 7684 static int 7685 aac_get_fw_debug_buffer(struct aac_softstate *softs) 7686 { 7687 if (aac_sync_mbcommand(softs, AAC_MONKER_GETDRVPROP, 7688 0, 0, 0, 0, NULL) == AACOK) { 7689 uint32_t mondrv_buf_paddrl = AAC_MAILBOX_GET(softs, 1); 7690 uint32_t mondrv_buf_paddrh = AAC_MAILBOX_GET(softs, 2); 7691 uint32_t mondrv_buf_size = AAC_MAILBOX_GET(softs, 3); 7692 uint32_t mondrv_hdr_size = AAC_MAILBOX_GET(softs, 4); 7693 7694 if (mondrv_buf_size) { 7695 uint32_t offset = mondrv_buf_paddrl - \ 7696 softs->pci_mem_base_paddr; 7697 7698 /* 7699 * See if the address is already mapped in, and 7700 * if so set it up from the base address 7701 */ 7702 if ((mondrv_buf_paddrh == 0) && 7703 (offset + mondrv_buf_size < softs->map_size)) { 7704 mutex_enter(&aac_prt_mutex); 7705 softs->debug_buf_offset = offset; 7706 softs->debug_header_size = mondrv_hdr_size; 7707 softs->debug_buf_size = mondrv_buf_size; 7708 softs->debug_fw_flags = 0; 7709 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT; 7710 mutex_exit(&aac_prt_mutex); 7711 7712 return (AACOK); 7713 } 7714 } 7715 } 7716 return (AACERR); 7717 } 7718 7719 int 7720 aac_dbflag_on(struct aac_softstate *softs, int flag) 7721 { 7722 int debug_flags = softs ? softs->debug_flags : aac_debug_flags; 7723 7724 return ((debug_flags & (AACDB_FLAGS_FW_PRINT | \ 7725 AACDB_FLAGS_KERNEL_PRINT)) && (debug_flags & flag)); 7726 } 7727 7728 static void 7729 aac_cmn_err(struct aac_softstate *softs, uint_t lev, char sl, int noheader) 7730 { 7731 if (noheader) { 7732 if (sl) { 7733 aac_fmt[0] = sl; 7734 cmn_err(lev, aac_fmt, aac_prt_buf); 7735 } else { 7736 cmn_err(lev, &aac_fmt[1], aac_prt_buf); 7737 } 7738 } else { 7739 if (sl) { 7740 aac_fmt_header[0] = sl; 7741 cmn_err(lev, aac_fmt_header, 7742 softs->vendor_name, softs->instance, 7743 aac_prt_buf); 7744 } else { 7745 cmn_err(lev, &aac_fmt_header[1], 7746 softs->vendor_name, softs->instance, 7747 aac_prt_buf); 7748 } 7749 } 7750 } 7751 7752 /* 7753 * The following function comes from Adaptec: 7754 * 7755 * Format and print out the data passed in to UART or console 7756 * as specified by debug flags. 7757 */ 7758 void 7759 aac_printf(struct aac_softstate *softs, uint_t lev, const char *fmt, ...) 7760 { 7761 va_list args; 7762 char sl; /* system log character */ 7763 7764 mutex_enter(&aac_prt_mutex); 7765 /* Set up parameters and call sprintf function to format the data */ 7766 if (strchr("^!?", fmt[0]) == NULL) { 7767 sl = 0; 7768 } else { 7769 sl = fmt[0]; 7770 fmt++; 7771 } 7772 va_start(args, fmt); 7773 (void) vsprintf(aac_prt_buf, fmt, args); 7774 va_end(args); 7775 7776 /* Make sure the softs structure has been passed in for this section */ 7777 if (softs) { 7778 if ((softs->debug_flags & AACDB_FLAGS_FW_PRINT) && 7779 /* If we are set up for a Firmware print */ 7780 (softs->debug_buf_size)) { 7781 uint32_t count, i; 7782 7783 /* Make sure the string size is within boundaries */ 7784 count = strlen(aac_prt_buf); 7785 if (count > softs->debug_buf_size) 7786 count = (uint16_t)softs->debug_buf_size; 7787 7788 /* 7789 * Wait for no more than AAC_PRINT_TIMEOUT for the 7790 * previous message length to clear (the handshake). 7791 */ 7792 for (i = 0; i < AAC_PRINT_TIMEOUT; i++) { 7793 if (!PCI_MEM_GET32(softs, 7794 softs->debug_buf_offset + \ 7795 AAC_FW_DBG_STRLEN_OFFSET)) 7796 break; 7797 7798 drv_usecwait(1000); 7799 } 7800 7801 /* 7802 * If the length is clear, copy over the message, the 7803 * flags, and the length. Make sure the length is the 7804 * last because that is the signal for the Firmware to 7805 * pick it up. 7806 */ 7807 if (!PCI_MEM_GET32(softs, softs->debug_buf_offset + \ 7808 AAC_FW_DBG_STRLEN_OFFSET)) { 7809 PCI_MEM_REP_PUT8(softs, 7810 softs->debug_buf_offset + \ 7811 softs->debug_header_size, 7812 aac_prt_buf, count); 7813 PCI_MEM_PUT32(softs, 7814 softs->debug_buf_offset + \ 7815 AAC_FW_DBG_FLAGS_OFFSET, 7816 softs->debug_fw_flags); 7817 PCI_MEM_PUT32(softs, 7818 softs->debug_buf_offset + \ 7819 AAC_FW_DBG_STRLEN_OFFSET, count); 7820 } else { 7821 cmn_err(CE_WARN, "UART output fail"); 7822 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT; 7823 } 7824 } 7825 7826 /* 7827 * If the Kernel Debug Print flag is set, send it off 7828 * to the Kernel Debugger 7829 */ 7830 if (softs->debug_flags & AACDB_FLAGS_KERNEL_PRINT) 7831 aac_cmn_err(softs, lev, sl, 7832 (softs->debug_flags & AACDB_FLAGS_NO_HEADERS)); 7833 } else { 7834 /* Driver not initialized yet, no firmware or header output */ 7835 if (aac_debug_flags & AACDB_FLAGS_KERNEL_PRINT) 7836 aac_cmn_err(softs, lev, sl, 1); 7837 } 7838 mutex_exit(&aac_prt_mutex); 7839 } 7840 7841 /* 7842 * Translate command number to description string 7843 */ 7844 static char * 7845 aac_cmd_name(int cmd, struct aac_key_strings *cmdlist) 7846 { 7847 int i; 7848 7849 for (i = 0; cmdlist[i].key != -1; i++) { 7850 if (cmd == cmdlist[i].key) 7851 return (cmdlist[i].message); 7852 } 7853 return (NULL); 7854 } 7855 7856 static void 7857 aac_print_scmd(struct aac_softstate *softs, struct aac_cmd *acp) 7858 { 7859 struct scsi_pkt *pkt = acp->pkt; 7860 struct scsi_address *ap = &pkt->pkt_address; 7861 int is_pd = 0; 7862 int ctl = ddi_get_instance(softs->devinfo_p); 7863 int tgt = ap->a_target; 7864 int lun = ap->a_lun; 7865 union scsi_cdb *cdbp = (void *)pkt->pkt_cdbp; 7866 uchar_t cmd = cdbp->scc_cmd; 7867 char *desc; 7868 7869 if (tgt >= AAC_MAX_LD) { 7870 is_pd = 1; 7871 ctl = ((struct aac_nondasd *)acp->dvp)->bus; 7872 tgt = ((struct aac_nondasd *)acp->dvp)->tid; 7873 lun = 0; 7874 } 7875 7876 if ((desc = aac_cmd_name(cmd, 7877 (struct aac_key_strings *)scsi_cmds)) == NULL) { 7878 aac_printf(softs, CE_NOTE, 7879 "SCMD> Unknown(0x%2x) --> c%dt%dL%d %s", 7880 cmd, ctl, tgt, lun, is_pd ? "(pd)" : ""); 7881 return; 7882 } 7883 7884 switch (cmd) { 7885 case SCMD_READ: 7886 case SCMD_WRITE: 7887 aac_printf(softs, CE_NOTE, 7888 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s", 7889 desc, GETG0ADDR(cdbp), GETG0COUNT(cdbp), 7890 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 7891 ctl, tgt, lun, is_pd ? "(pd)" : ""); 7892 break; 7893 case SCMD_READ_G1: 7894 case SCMD_WRITE_G1: 7895 aac_printf(softs, CE_NOTE, 7896 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s", 7897 desc, GETG1ADDR(cdbp), GETG1COUNT(cdbp), 7898 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 7899 ctl, tgt, lun, is_pd ? "(pd)" : ""); 7900 break; 7901 case SCMD_READ_G4: 7902 case SCMD_WRITE_G4: 7903 aac_printf(softs, CE_NOTE, 7904 "SCMD> %s 0x%x.%08x[%d] %s --> c%dt%dL%d %s", 7905 desc, GETG4ADDR(cdbp), GETG4ADDRTL(cdbp), 7906 GETG4COUNT(cdbp), 7907 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 7908 ctl, tgt, lun, is_pd ? "(pd)" : ""); 7909 break; 7910 case SCMD_READ_G5: 7911 case SCMD_WRITE_G5: 7912 aac_printf(softs, CE_NOTE, 7913 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s", 7914 desc, GETG5ADDR(cdbp), GETG5COUNT(cdbp), 7915 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 7916 ctl, tgt, lun, is_pd ? "(pd)" : ""); 7917 break; 7918 default: 7919 aac_printf(softs, CE_NOTE, "SCMD> %s --> c%dt%dL%d %s", 7920 desc, ctl, tgt, lun, is_pd ? "(pd)" : ""); 7921 } 7922 } 7923 7924 void 7925 aac_print_fib(struct aac_softstate *softs, struct aac_slot *slotp) 7926 { 7927 struct aac_cmd *acp = slotp->acp; 7928 struct aac_fib *fibp = slotp->fibp; 7929 ddi_acc_handle_t acc = slotp->fib_acc_handle; 7930 uint16_t fib_size; 7931 uint32_t fib_cmd, sub_cmd; 7932 char *cmdstr, *subcmdstr; 7933 char *caller; 7934 int i; 7935 7936 if (acp) { 7937 if (!(softs->debug_fib_flags & acp->fib_flags)) 7938 return; 7939 if (acp->fib_flags & AACDB_FLAGS_FIB_SCMD) 7940 caller = "SCMD"; 7941 else if (acp->fib_flags & AACDB_FLAGS_FIB_IOCTL) 7942 caller = "IOCTL"; 7943 else if (acp->fib_flags & AACDB_FLAGS_FIB_SRB) 7944 caller = "SRB"; 7945 else 7946 return; 7947 } else { 7948 if (!(softs->debug_fib_flags & AACDB_FLAGS_FIB_SYNC)) 7949 return; 7950 caller = "SYNC"; 7951 } 7952 7953 fib_cmd = ddi_get16(acc, &fibp->Header.Command); 7954 cmdstr = aac_cmd_name(fib_cmd, aac_fib_cmds); 7955 sub_cmd = (uint32_t)-1; 7956 subcmdstr = NULL; 7957 7958 /* Print FIB header */ 7959 if (softs->debug_fib_flags & AACDB_FLAGS_FIB_HEADER) { 7960 aac_printf(softs, CE_NOTE, "FIB> from %s", caller); 7961 aac_printf(softs, CE_NOTE, " XferState %d", 7962 ddi_get32(acc, &fibp->Header.XferState)); 7963 aac_printf(softs, CE_NOTE, " Command %d", 7964 ddi_get16(acc, &fibp->Header.Command)); 7965 aac_printf(softs, CE_NOTE, " StructType %d", 7966 ddi_get8(acc, &fibp->Header.StructType)); 7967 aac_printf(softs, CE_NOTE, " Flags 0x%x", 7968 ddi_get8(acc, &fibp->Header.Flags)); 7969 aac_printf(softs, CE_NOTE, " Size %d", 7970 ddi_get16(acc, &fibp->Header.Size)); 7971 aac_printf(softs, CE_NOTE, " SenderSize %d", 7972 ddi_get16(acc, &fibp->Header.SenderSize)); 7973 aac_printf(softs, CE_NOTE, " SenderAddr 0x%x", 7974 ddi_get32(acc, &fibp->Header.SenderFibAddress)); 7975 aac_printf(softs, CE_NOTE, " RcvrAddr 0x%x", 7976 ddi_get32(acc, &fibp->Header.ReceiverFibAddress)); 7977 aac_printf(softs, CE_NOTE, " SenderData 0x%x", 7978 ddi_get32(acc, &fibp->Header.SenderData)); 7979 } 7980 7981 /* Print FIB data */ 7982 switch (fib_cmd) { 7983 case ContainerCommand: 7984 sub_cmd = ddi_get32(acc, 7985 (void *)&(((uint32_t *)(void *)&fibp->data[0])[0])); 7986 subcmdstr = aac_cmd_name(sub_cmd, aac_ctvm_subcmds); 7987 if (subcmdstr == NULL) 7988 break; 7989 7990 switch (sub_cmd) { 7991 case VM_ContainerConfig: { 7992 struct aac_Container *pContainer = 7993 (struct aac_Container *)fibp->data; 7994 7995 fib_cmd = sub_cmd; 7996 cmdstr = subcmdstr; 7997 sub_cmd = (uint32_t)-1; 7998 subcmdstr = NULL; 7999 8000 sub_cmd = ddi_get32(acc, 8001 &pContainer->CTCommand.command); 8002 subcmdstr = aac_cmd_name(sub_cmd, aac_ct_subcmds); 8003 if (subcmdstr == NULL) 8004 break; 8005 aac_printf(softs, CE_NOTE, "FIB> %s (0x%x, 0x%x, 0x%x)", 8006 subcmdstr, 8007 ddi_get32(acc, &pContainer->CTCommand.param[0]), 8008 ddi_get32(acc, &pContainer->CTCommand.param[1]), 8009 ddi_get32(acc, &pContainer->CTCommand.param[2])); 8010 return; 8011 } 8012 8013 case VM_Ioctl: 8014 fib_cmd = sub_cmd; 8015 cmdstr = subcmdstr; 8016 sub_cmd = (uint32_t)-1; 8017 subcmdstr = NULL; 8018 8019 sub_cmd = ddi_get32(acc, 8020 (void *)&(((uint32_t *)(void *)&fibp->data[0])[4])); 8021 subcmdstr = aac_cmd_name(sub_cmd, aac_ioctl_subcmds); 8022 break; 8023 8024 case VM_CtBlockRead: 8025 case VM_CtBlockWrite: { 8026 struct aac_blockread *br = 8027 (struct aac_blockread *)fibp->data; 8028 struct aac_sg_table *sg = &br->SgMap; 8029 uint32_t sgcount = ddi_get32(acc, &sg->SgCount); 8030 8031 aac_printf(softs, CE_NOTE, 8032 "FIB> %s Container %d 0x%x/%d", subcmdstr, 8033 ddi_get32(acc, &br->ContainerId), 8034 ddi_get32(acc, &br->BlockNumber), 8035 ddi_get32(acc, &br->ByteCount)); 8036 for (i = 0; i < sgcount; i++) 8037 aac_printf(softs, CE_NOTE, 8038 " %d: 0x%08x/%d", i, 8039 ddi_get32(acc, &sg->SgEntry[i].SgAddress), 8040 ddi_get32(acc, &sg->SgEntry[i]. \ 8041 SgByteCount)); 8042 return; 8043 } 8044 } 8045 break; 8046 8047 case ContainerCommand64: { 8048 struct aac_blockread64 *br = 8049 (struct aac_blockread64 *)fibp->data; 8050 struct aac_sg_table64 *sg = &br->SgMap64; 8051 uint32_t sgcount = ddi_get32(acc, &sg->SgCount); 8052 uint64_t sgaddr; 8053 8054 sub_cmd = br->Command; 8055 subcmdstr = NULL; 8056 if (sub_cmd == VM_CtHostRead64) 8057 subcmdstr = "VM_CtHostRead64"; 8058 else if (sub_cmd == VM_CtHostWrite64) 8059 subcmdstr = "VM_CtHostWrite64"; 8060 else 8061 break; 8062 8063 aac_printf(softs, CE_NOTE, 8064 "FIB> %s Container %d 0x%x/%d", subcmdstr, 8065 ddi_get16(acc, &br->ContainerId), 8066 ddi_get32(acc, &br->BlockNumber), 8067 ddi_get16(acc, &br->SectorCount)); 8068 for (i = 0; i < sgcount; i++) { 8069 sgaddr = ddi_get64(acc, 8070 &sg->SgEntry64[i].SgAddress); 8071 aac_printf(softs, CE_NOTE, 8072 " %d: 0x%08x.%08x/%d", i, 8073 AAC_MS32(sgaddr), AAC_LS32(sgaddr), 8074 ddi_get32(acc, &sg->SgEntry64[i]. \ 8075 SgByteCount)); 8076 } 8077 return; 8078 } 8079 8080 case RawIo: { 8081 struct aac_raw_io *io = (struct aac_raw_io *)fibp->data; 8082 struct aac_sg_tableraw *sg = &io->SgMapRaw; 8083 uint32_t sgcount = ddi_get32(acc, &sg->SgCount); 8084 uint64_t sgaddr; 8085 8086 aac_printf(softs, CE_NOTE, 8087 "FIB> RawIo Container %d 0x%llx/%d 0x%x", 8088 ddi_get16(acc, &io->ContainerId), 8089 ddi_get64(acc, &io->BlockNumber), 8090 ddi_get32(acc, &io->ByteCount), 8091 ddi_get16(acc, &io->Flags)); 8092 for (i = 0; i < sgcount; i++) { 8093 sgaddr = ddi_get64(acc, &sg->SgEntryRaw[i].SgAddress); 8094 aac_printf(softs, CE_NOTE, " %d: 0x%08x.%08x/%d", i, 8095 AAC_MS32(sgaddr), AAC_LS32(sgaddr), 8096 ddi_get32(acc, &sg->SgEntryRaw[i].SgByteCount)); 8097 } 8098 return; 8099 } 8100 8101 case ClusterCommand: 8102 sub_cmd = ddi_get32(acc, 8103 (void *)&(((uint32_t *)(void *)fibp->data)[0])); 8104 subcmdstr = aac_cmd_name(sub_cmd, aac_cl_subcmds); 8105 break; 8106 8107 case AifRequest: 8108 sub_cmd = ddi_get32(acc, 8109 (void *)&(((uint32_t *)(void *)fibp->data)[0])); 8110 subcmdstr = aac_cmd_name(sub_cmd, aac_aif_subcmds); 8111 break; 8112 8113 default: 8114 break; 8115 } 8116 8117 fib_size = ddi_get16(acc, &(fibp->Header.Size)); 8118 if (subcmdstr) 8119 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d", 8120 subcmdstr, fib_size); 8121 else if (cmdstr && sub_cmd == (uint32_t)-1) 8122 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d", 8123 cmdstr, fib_size); 8124 else if (cmdstr) 8125 aac_printf(softs, CE_NOTE, "FIB> %s: Unknown(0x%x), sz=%d", 8126 cmdstr, sub_cmd, fib_size); 8127 else 8128 aac_printf(softs, CE_NOTE, "FIB> Unknown(0x%x), sz=%d", 8129 fib_cmd, fib_size); 8130 } 8131 8132 static void 8133 aac_print_aif(struct aac_softstate *softs, struct aac_aif_command *aif) 8134 { 8135 int aif_command; 8136 uint32_t aif_seqnumber; 8137 int aif_en_type; 8138 char *str; 8139 8140 aif_command = LE_32(aif->command); 8141 aif_seqnumber = LE_32(aif->seqNumber); 8142 aif_en_type = LE_32(aif->data.EN.type); 8143 8144 switch (aif_command) { 8145 case AifCmdEventNotify: 8146 str = aac_cmd_name(aif_en_type, aac_aifens); 8147 if (str) 8148 aac_printf(softs, CE_NOTE, "AIF! %s", str); 8149 else 8150 aac_printf(softs, CE_NOTE, "AIF! Unknown(0x%x)", 8151 aif_en_type); 8152 break; 8153 8154 case AifCmdJobProgress: 8155 switch (LE_32(aif->data.PR[0].status)) { 8156 case AifJobStsSuccess: 8157 str = "success"; break; 8158 case AifJobStsFinished: 8159 str = "finished"; break; 8160 case AifJobStsAborted: 8161 str = "aborted"; break; 8162 case AifJobStsFailed: 8163 str = "failed"; break; 8164 case AifJobStsSuspended: 8165 str = "suspended"; break; 8166 case AifJobStsRunning: 8167 str = "running"; break; 8168 default: 8169 str = "unknown"; break; 8170 } 8171 aac_printf(softs, CE_NOTE, 8172 "AIF! JobProgress (%d) - %s (%d, %d)", 8173 aif_seqnumber, str, 8174 LE_32(aif->data.PR[0].currentTick), 8175 LE_32(aif->data.PR[0].finalTick)); 8176 break; 8177 8178 case AifCmdAPIReport: 8179 aac_printf(softs, CE_NOTE, "AIF! APIReport (%d)", 8180 aif_seqnumber); 8181 break; 8182 8183 case AifCmdDriverNotify: 8184 aac_printf(softs, CE_NOTE, "AIF! DriverNotify (%d)", 8185 aif_seqnumber); 8186 break; 8187 8188 default: 8189 aac_printf(softs, CE_NOTE, "AIF! AIF %d (%d)", 8190 aif_command, aif_seqnumber); 8191 break; 8192 } 8193 } 8194 8195 #endif /* DEBUG */ 8196