1 /* 2 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6 /* 7 * Copyright 2005-08 Adaptec, Inc. 8 * Copyright (c) 2005-08 Adaptec Inc., Achim Leubner 9 * Copyright (c) 2000 Michael Smith 10 * Copyright (c) 2001 Scott Long 11 * Copyright (c) 2000 BSDi 12 * All rights reserved. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/modctl.h> 36 #include <sys/conf.h> 37 #include <sys/cmn_err.h> 38 #include <sys/ddi.h> 39 #include <sys/devops.h> 40 #include <sys/pci.h> 41 #include <sys/types.h> 42 #include <sys/ddidmareq.h> 43 #include <sys/scsi/scsi.h> 44 #include <sys/ksynch.h> 45 #include <sys/sunddi.h> 46 #include <sys/byteorder.h> 47 #include "aac_regs.h" 48 #include "aac.h" 49 50 /* 51 * FMA header files 52 */ 53 #include <sys/ddifm.h> 54 #include <sys/fm/protocol.h> 55 #include <sys/fm/util.h> 56 #include <sys/fm/io/ddi.h> 57 58 /* 59 * For minor nodes created by the SCSA framework, minor numbers are 60 * formed by left-shifting instance by INST_MINOR_SHIFT and OR in a 61 * number less than 64. 62 * 63 * To support cfgadm, need to confirm the SCSA framework by creating 64 * devctl/scsi and driver specific minor nodes under SCSA format, 65 * and calling scsi_hba_xxx() functions aacordingly. 66 */ 67 68 #define AAC_MINOR 32 69 #define INST2AAC(x) (((x) << INST_MINOR_SHIFT) | AAC_MINOR) 70 #define AAC_SCSA_MINOR(x) ((x) & TRAN_MINOR_MASK) 71 #define AAC_IS_SCSA_NODE(x) ((x) == DEVCTL_MINOR || (x) == SCSI_MINOR) 72 73 #define SD2TRAN(sd) ((sd)->sd_address.a_hba_tran) 74 #define AAC_TRAN2SOFTS(tran) ((struct aac_softstate *)(tran)->tran_hba_private) 75 #define AAC_DIP2TRAN(dip) ((scsi_hba_tran_t *)ddi_get_driver_private(dip)) 76 #define AAC_DIP2SOFTS(dip) (AAC_TRAN2SOFTS(AAC_DIP2TRAN(dip))) 77 #define SD2AAC(sd) (AAC_TRAN2SOFTS(SD2TRAN(sd))) 78 #define AAC_PD(t) ((t) - AAC_MAX_LD) 79 #define AAC_DEV(softs, t) (((t) < AAC_MAX_LD) ? \ 80 &(softs)->containers[(t)].dev : \ 81 ((t) < AAC_MAX_DEV(softs)) ? \ 82 &(softs)->nondasds[AAC_PD(t)].dev : NULL) 83 #define AAC_DEVCFG_BEGIN(softs, tgt) \ 84 aac_devcfg((softs), (tgt), 1) 85 #define AAC_DEVCFG_END(softs, tgt) \ 86 aac_devcfg((softs), (tgt), 0) 87 #define PKT2AC(pkt) ((struct aac_cmd *)(pkt)->pkt_ha_private) 88 #define AAC_BUSYWAIT(cond, timeout /* in millisecond */) { \ 89 if (!(cond)) { \ 90 int count = (timeout) * 10; \ 91 while (count) { \ 92 drv_usecwait(100); \ 93 if (cond) \ 94 break; \ 95 count--; \ 96 } \ 97 (timeout) = (count + 9) / 10; \ 98 } \ 99 } 100 101 #define AAC_SENSE_DATA_DESCR_LEN \ 102 (sizeof (struct scsi_descr_sense_hdr) + \ 103 sizeof (struct scsi_information_sense_descr)) 104 #define AAC_ARQ64_LENGTH \ 105 (sizeof (struct scsi_arq_status) + \ 106 AAC_SENSE_DATA_DESCR_LEN - SENSE_LENGTH) 107 108 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */ 109 #define AAC_GETGXADDR(cmdlen, cdbp) \ 110 ((cmdlen == 6) ? GETG0ADDR(cdbp) : \ 111 (cmdlen == 10) ? (uint32_t)GETG1ADDR(cdbp) : \ 112 ((uint64_t)GETG4ADDR(cdbp) << 32) | (uint32_t)GETG4ADDRTL(cdbp)) 113 114 #define AAC_CDB_INQUIRY_CMDDT 0x02 115 #define AAC_CDB_INQUIRY_EVPD 0x01 116 #define AAC_VPD_PAGE_CODE 1 117 #define AAC_VPD_PAGE_LENGTH 3 118 #define AAC_VPD_PAGE_DATA 4 119 #define AAC_VPD_ID_CODESET 0 120 #define AAC_VPD_ID_TYPE 1 121 #define AAC_VPD_ID_LENGTH 3 122 #define AAC_VPD_ID_DATA 4 123 124 #define AAC_SCSI_RPTLUNS_HEAD_SIZE 0x08 125 #define AAC_SCSI_RPTLUNS_ADDR_SIZE 0x08 126 #define AAC_SCSI_RPTLUNS_ADDR_MASK 0xC0 127 /* 00b - peripheral device addressing method */ 128 #define AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL 0x00 129 /* 01b - flat space addressing method */ 130 #define AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE 0x40 131 /* 10b - logical unit addressing method */ 132 #define AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT 0x80 133 134 /* Return the size of FIB with data part type data_type */ 135 #define AAC_FIB_SIZEOF(data_type) \ 136 (sizeof (struct aac_fib_header) + sizeof (data_type)) 137 /* Return the container size defined in mir */ 138 #define AAC_MIR_SIZE(softs, acc, mir) \ 139 (((softs)->flags & AAC_FLAGS_LBA_64BIT) ? \ 140 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity) + \ 141 ((uint64_t)ddi_get32((acc), &(mir)->MntObj.CapacityHigh) << 32) : \ 142 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity)) 143 144 /* The last entry of aac_cards[] is for unknown cards */ 145 #define AAC_UNKNOWN_CARD \ 146 (sizeof (aac_cards) / sizeof (struct aac_card_type) - 1) 147 #define CARD_IS_UNKNOWN(i) (i == AAC_UNKNOWN_CARD) 148 #define BUF_IS_READ(bp) ((bp)->b_flags & B_READ) 149 #define AAC_IS_Q_EMPTY(q) ((q)->q_head == NULL) 150 #define AAC_CMDQ(acp) (!((acp)->flags & AAC_CMD_SYNC)) 151 152 #define PCI_MEM_GET32(softs, off) \ 153 ddi_get32((softs)->pci_mem_handle, \ 154 (void *)((softs)->pci_mem_base_vaddr + (off))) 155 #define PCI_MEM_PUT32(softs, off, val) \ 156 ddi_put32((softs)->pci_mem_handle, \ 157 (void *)((softs)->pci_mem_base_vaddr + (off)), \ 158 (uint32_t)(val)) 159 #define PCI_MEM_GET16(softs, off) \ 160 ddi_get16((softs)->pci_mem_handle, \ 161 (void *)((softs)->pci_mem_base_vaddr + (off))) 162 #define PCI_MEM_PUT16(softs, off, val) \ 163 ddi_put16((softs)->pci_mem_handle, \ 164 (void *)((softs)->pci_mem_base_vaddr + (off)), (uint16_t)(val)) 165 /* Write host data at valp to device mem[off] repeatedly count times */ 166 #define PCI_MEM_REP_PUT8(softs, off, valp, count) \ 167 ddi_rep_put8((softs)->pci_mem_handle, (uint8_t *)(valp), \ 168 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \ 169 count, DDI_DEV_AUTOINCR) 170 /* Read device data at mem[off] to host addr valp repeatedly count times */ 171 #define PCI_MEM_REP_GET8(softs, off, valp, count) \ 172 ddi_rep_get8((softs)->pci_mem_handle, (uint8_t *)(valp), \ 173 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \ 174 count, DDI_DEV_AUTOINCR) 175 #define AAC_GET_FIELD8(acc, d, s, field) \ 176 (d)->field = ddi_get8(acc, (uint8_t *)&(s)->field) 177 #define AAC_GET_FIELD32(acc, d, s, field) \ 178 (d)->field = ddi_get32(acc, (uint32_t *)&(s)->field) 179 #define AAC_GET_FIELD64(acc, d, s, field) \ 180 (d)->field = ddi_get64(acc, (uint64_t *)&(s)->field) 181 #define AAC_REP_GET_FIELD8(acc, d, s, field, r) \ 182 ddi_rep_get8((acc), (uint8_t *)&(d)->field, \ 183 (uint8_t *)&(s)->field, (r), DDI_DEV_AUTOINCR) 184 #define AAC_REP_GET_FIELD32(acc, d, s, field, r) \ 185 ddi_rep_get32((acc), (uint32_t *)&(d)->field, \ 186 (uint32_t *)&(s)->field, (r), DDI_DEV_AUTOINCR) 187 188 #define AAC_ENABLE_INTR(softs) { \ 189 if (softs->flags & AAC_FLAGS_NEW_COMM) \ 190 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_NEW); \ 191 else \ 192 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_BITS); \ 193 } 194 195 #define AAC_DISABLE_INTR(softs) PCI_MEM_PUT32(softs, AAC_OIMR, ~0) 196 #define AAC_STATUS_CLR(softs, mask) PCI_MEM_PUT32(softs, AAC_ODBR, mask) 197 #define AAC_STATUS_GET(softs) PCI_MEM_GET32(softs, AAC_ODBR) 198 #define AAC_NOTIFY(softs, val) PCI_MEM_PUT32(softs, AAC_IDBR, val) 199 #define AAC_OUTB_GET(softs) PCI_MEM_GET32(softs, AAC_OQUE) 200 #define AAC_OUTB_SET(softs, val) PCI_MEM_PUT32(softs, AAC_OQUE, val) 201 #define AAC_FWSTATUS_GET(softs) \ 202 ((softs)->aac_if.aif_get_fwstatus(softs)) 203 #define AAC_MAILBOX_GET(softs, mb) \ 204 ((softs)->aac_if.aif_get_mailbox((softs), (mb))) 205 #define AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3) \ 206 ((softs)->aac_if.aif_set_mailbox((softs), (cmd), \ 207 (arg0), (arg1), (arg2), (arg3))) 208 209 #define AAC_THROTTLE_DRAIN -1 210 211 #define AAC_QUIESCE_TICK 1 /* 1 second */ 212 #define AAC_QUIESCE_TIMEOUT 180 /* 180 seconds */ 213 #define AAC_DEFAULT_TICK 10 /* 10 seconds */ 214 #define AAC_SYNC_TICK (30*60) /* 30 minutes */ 215 216 /* Poll time for aac_do_poll_io() */ 217 #define AAC_POLL_TIME 60 /* 60 seconds */ 218 219 /* IOP reset */ 220 #define AAC_IOP_RESET_SUCCEED 0 /* IOP reset succeed */ 221 #define AAC_IOP_RESET_FAILED -1 /* IOP reset failed */ 222 #define AAC_IOP_RESET_ABNORMAL -2 /* Reset operation abnormal */ 223 224 /* 225 * Hardware access functions 226 */ 227 static int aac_rx_get_fwstatus(struct aac_softstate *); 228 static int aac_rx_get_mailbox(struct aac_softstate *, int); 229 static void aac_rx_set_mailbox(struct aac_softstate *, uint32_t, uint32_t, 230 uint32_t, uint32_t, uint32_t); 231 static int aac_rkt_get_fwstatus(struct aac_softstate *); 232 static int aac_rkt_get_mailbox(struct aac_softstate *, int); 233 static void aac_rkt_set_mailbox(struct aac_softstate *, uint32_t, uint32_t, 234 uint32_t, uint32_t, uint32_t); 235 236 /* 237 * SCSA function prototypes 238 */ 239 static int aac_attach(dev_info_t *, ddi_attach_cmd_t); 240 static int aac_detach(dev_info_t *, ddi_detach_cmd_t); 241 static int aac_reset(dev_info_t *, ddi_reset_cmd_t); 242 static int aac_quiesce(dev_info_t *); 243 244 /* 245 * Interrupt handler functions 246 */ 247 static int aac_query_intrs(struct aac_softstate *, int); 248 static int aac_add_intrs(struct aac_softstate *); 249 static void aac_remove_intrs(struct aac_softstate *); 250 static uint_t aac_intr_old(caddr_t); 251 static uint_t aac_intr_new(caddr_t); 252 static uint_t aac_softintr(caddr_t); 253 254 /* 255 * Internal functions in attach 256 */ 257 static int aac_check_card_type(struct aac_softstate *); 258 static int aac_check_firmware(struct aac_softstate *); 259 static int aac_common_attach(struct aac_softstate *); 260 static void aac_common_detach(struct aac_softstate *); 261 static int aac_probe_containers(struct aac_softstate *); 262 static int aac_alloc_comm_space(struct aac_softstate *); 263 static int aac_setup_comm_space(struct aac_softstate *); 264 static void aac_free_comm_space(struct aac_softstate *); 265 static int aac_hba_setup(struct aac_softstate *); 266 267 /* 268 * Sync FIB operation functions 269 */ 270 int aac_sync_mbcommand(struct aac_softstate *, uint32_t, uint32_t, 271 uint32_t, uint32_t, uint32_t, uint32_t *); 272 static int aac_sync_fib(struct aac_softstate *, uint16_t, uint16_t); 273 274 /* 275 * Command queue operation functions 276 */ 277 static void aac_cmd_initq(struct aac_cmd_queue *); 278 static void aac_cmd_enqueue(struct aac_cmd_queue *, struct aac_cmd *); 279 static struct aac_cmd *aac_cmd_dequeue(struct aac_cmd_queue *); 280 static void aac_cmd_delete(struct aac_cmd_queue *, struct aac_cmd *); 281 282 /* 283 * FIB queue operation functions 284 */ 285 static int aac_fib_enqueue(struct aac_softstate *, int, uint32_t, uint32_t); 286 static int aac_fib_dequeue(struct aac_softstate *, int, int *); 287 288 /* 289 * Slot operation functions 290 */ 291 static int aac_create_slots(struct aac_softstate *); 292 static void aac_destroy_slots(struct aac_softstate *); 293 static void aac_alloc_fibs(struct aac_softstate *); 294 static void aac_destroy_fibs(struct aac_softstate *); 295 static struct aac_slot *aac_get_slot(struct aac_softstate *); 296 static void aac_release_slot(struct aac_softstate *, struct aac_slot *); 297 static int aac_alloc_fib(struct aac_softstate *, struct aac_slot *); 298 static void aac_free_fib(struct aac_slot *); 299 300 /* 301 * Internal functions 302 */ 303 static void aac_cmd_fib_header(struct aac_softstate *, struct aac_slot *, 304 uint16_t, uint16_t); 305 static void aac_cmd_fib_rawio(struct aac_softstate *, struct aac_cmd *); 306 static void aac_cmd_fib_brw64(struct aac_softstate *, struct aac_cmd *); 307 static void aac_cmd_fib_brw(struct aac_softstate *, struct aac_cmd *); 308 static void aac_cmd_fib_sync(struct aac_softstate *, struct aac_cmd *); 309 static void aac_cmd_fib_scsi32(struct aac_softstate *, struct aac_cmd *); 310 static void aac_cmd_fib_scsi64(struct aac_softstate *, struct aac_cmd *); 311 static void aac_cmd_fib_startstop(struct aac_softstate *, struct aac_cmd *); 312 static void aac_start_waiting_io(struct aac_softstate *); 313 static void aac_drain_comp_q(struct aac_softstate *); 314 int aac_do_io(struct aac_softstate *, struct aac_cmd *); 315 static int aac_do_poll_io(struct aac_softstate *, struct aac_cmd *); 316 static int aac_do_sync_io(struct aac_softstate *, struct aac_cmd *); 317 static int aac_send_command(struct aac_softstate *, struct aac_slot *); 318 static void aac_cmd_timeout(struct aac_softstate *, struct aac_cmd *); 319 static int aac_dma_sync_ac(struct aac_cmd *); 320 static int aac_shutdown(struct aac_softstate *); 321 static int aac_reset_adapter(struct aac_softstate *); 322 static int aac_do_quiesce(struct aac_softstate *softs); 323 static int aac_do_unquiesce(struct aac_softstate *softs); 324 static void aac_unhold_bus(struct aac_softstate *, int); 325 static void aac_set_throttle(struct aac_softstate *, struct aac_device *, 326 int, int); 327 328 /* 329 * Adapter Initiated FIB handling function 330 */ 331 static int aac_handle_aif(struct aac_softstate *, struct aac_fib *); 332 333 /* 334 * Timeout handling thread function 335 */ 336 static void aac_daemon(void *); 337 338 /* 339 * IOCTL interface related functions 340 */ 341 static int aac_open(dev_t *, int, int, cred_t *); 342 static int aac_close(dev_t, int, int, cred_t *); 343 static int aac_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 344 extern int aac_do_ioctl(struct aac_softstate *, dev_t, int, intptr_t, int); 345 346 /* 347 * FMA Prototypes 348 */ 349 static void aac_fm_init(struct aac_softstate *); 350 static void aac_fm_fini(struct aac_softstate *); 351 static int aac_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *); 352 int aac_check_acc_handle(ddi_acc_handle_t); 353 int aac_check_dma_handle(ddi_dma_handle_t); 354 void aac_fm_ereport(struct aac_softstate *, char *); 355 356 /* 357 * Auto enumeration functions 358 */ 359 static dev_info_t *aac_find_child(struct aac_softstate *, uint16_t, uint8_t); 360 static int aac_tran_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t, 361 void *, dev_info_t **); 362 static int aac_dr_event(struct aac_softstate *, int, int, int); 363 364 #ifdef DEBUG 365 /* 366 * UART debug output support 367 */ 368 369 #define AAC_PRINT_BUFFER_SIZE 512 370 #define AAC_PRINT_TIMEOUT 250 /* 1/4 sec. = 250 msec. */ 371 372 #define AAC_FW_DBG_STRLEN_OFFSET 0x00 373 #define AAC_FW_DBG_FLAGS_OFFSET 0x04 374 #define AAC_FW_DBG_BLED_OFFSET 0x08 375 376 static int aac_get_fw_debug_buffer(struct aac_softstate *); 377 static void aac_print_scmd(struct aac_softstate *, struct aac_cmd *); 378 static void aac_print_aif(struct aac_softstate *, struct aac_aif_command *); 379 380 static char aac_prt_buf[AAC_PRINT_BUFFER_SIZE]; 381 static char aac_fmt[] = " %s"; 382 static char aac_fmt_header[] = " %s.%d: %s"; 383 static kmutex_t aac_prt_mutex; 384 385 /* 386 * Debug flags to be put into the softstate flags field 387 * when initialized 388 */ 389 uint32_t aac_debug_flags = 390 /* AACDB_FLAGS_KERNEL_PRINT | */ 391 /* AACDB_FLAGS_FW_PRINT | */ 392 /* AACDB_FLAGS_MISC | */ 393 /* AACDB_FLAGS_FUNC1 | */ 394 /* AACDB_FLAGS_FUNC2 | */ 395 /* AACDB_FLAGS_SCMD | */ 396 /* AACDB_FLAGS_AIF | */ 397 /* AACDB_FLAGS_FIB | */ 398 /* AACDB_FLAGS_IOCTL | */ 399 0; 400 uint32_t aac_debug_fib_flags = 401 /* AACDB_FLAGS_FIB_RW | */ 402 /* AACDB_FLAGS_FIB_IOCTL | */ 403 /* AACDB_FLAGS_FIB_SRB | */ 404 /* AACDB_FLAGS_FIB_SYNC | */ 405 /* AACDB_FLAGS_FIB_HEADER | */ 406 /* AACDB_FLAGS_FIB_TIMEOUT | */ 407 0; 408 409 #endif /* DEBUG */ 410 411 static struct cb_ops aac_cb_ops = { 412 aac_open, /* open */ 413 aac_close, /* close */ 414 nodev, /* strategy */ 415 nodev, /* print */ 416 nodev, /* dump */ 417 nodev, /* read */ 418 nodev, /* write */ 419 aac_ioctl, /* ioctl */ 420 nodev, /* devmap */ 421 nodev, /* mmap */ 422 nodev, /* segmap */ 423 nochpoll, /* poll */ 424 ddi_prop_op, /* cb_prop_op */ 425 NULL, /* streamtab */ 426 D_64BIT | D_NEW | D_MP | D_HOTPLUG, /* cb_flag */ 427 CB_REV, /* cb_rev */ 428 nodev, /* async I/O read entry point */ 429 nodev /* async I/O write entry point */ 430 }; 431 432 static struct dev_ops aac_dev_ops = { 433 DEVO_REV, 434 0, 435 nodev, 436 nulldev, 437 nulldev, 438 aac_attach, 439 aac_detach, 440 aac_reset, 441 &aac_cb_ops, 442 NULL, 443 NULL, 444 aac_quiesce, 445 }; 446 447 static struct modldrv aac_modldrv = { 448 &mod_driverops, 449 "AAC Driver " AAC_DRIVER_VERSION, 450 &aac_dev_ops, 451 }; 452 453 static struct modlinkage aac_modlinkage = { 454 MODREV_1, 455 &aac_modldrv, 456 NULL 457 }; 458 459 static struct aac_softstate *aac_softstatep; 460 461 /* 462 * Supported card list 463 * ordered in vendor id, subvendor id, subdevice id, and device id 464 */ 465 static struct aac_card_type aac_cards[] = { 466 {0x1028, 0x1, 0x1028, 0x1, AAC_HWIF_I960RX, 467 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 468 "Dell", "PERC 3/Di"}, 469 {0x1028, 0x2, 0x1028, 0x2, AAC_HWIF_I960RX, 470 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 471 "Dell", "PERC 3/Di"}, 472 {0x1028, 0x3, 0x1028, 0x3, AAC_HWIF_I960RX, 473 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 474 "Dell", "PERC 3/Si"}, 475 {0x1028, 0x8, 0x1028, 0xcf, AAC_HWIF_I960RX, 476 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 477 "Dell", "PERC 3/Di"}, 478 {0x1028, 0x4, 0x1028, 0xd0, AAC_HWIF_I960RX, 479 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 480 "Dell", "PERC 3/Si"}, 481 {0x1028, 0x2, 0x1028, 0xd1, AAC_HWIF_I960RX, 482 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 483 "Dell", "PERC 3/Di"}, 484 {0x1028, 0x2, 0x1028, 0xd9, AAC_HWIF_I960RX, 485 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 486 "Dell", "PERC 3/Di"}, 487 {0x1028, 0xa, 0x1028, 0x106, AAC_HWIF_I960RX, 488 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 489 "Dell", "PERC 3/Di"}, 490 {0x1028, 0xa, 0x1028, 0x11b, AAC_HWIF_I960RX, 491 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 492 "Dell", "PERC 3/Di"}, 493 {0x1028, 0xa, 0x1028, 0x121, AAC_HWIF_I960RX, 494 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 495 "Dell", "PERC 3/Di"}, 496 {0x9005, 0x285, 0x1028, 0x287, AAC_HWIF_I960RX, 497 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 498 "Dell", "PERC 320/DC"}, 499 {0x9005, 0x285, 0x1028, 0x291, AAC_HWIF_I960RX, 500 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Dell", "CERC SR2"}, 501 502 {0x9005, 0x285, 0x1014, 0x2f2, AAC_HWIF_I960RX, 503 0, AAC_TYPE_SCSI, "IBM", "ServeRAID 8i"}, 504 {0x9005, 0x285, 0x1014, 0x34d, AAC_HWIF_I960RX, 505 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8s"}, 506 {0x9005, 0x286, 0x1014, 0x9580, AAC_HWIF_RKT, 507 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8k"}, 508 509 {0x9005, 0x285, 0x103c, 0x3227, AAC_HWIF_I960RX, 510 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"}, 511 {0x9005, 0x285, 0xe11, 0x295, AAC_HWIF_I960RX, 512 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"}, 513 514 {0x9005, 0x285, 0x9005, 0x285, AAC_HWIF_I960RX, 515 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 516 "Adaptec", "2200S"}, 517 {0x9005, 0x285, 0x9005, 0x286, AAC_HWIF_I960RX, 518 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 519 "Adaptec", "2120S"}, 520 {0x9005, 0x285, 0x9005, 0x287, AAC_HWIF_I960RX, 521 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 522 "Adaptec", "2200S"}, 523 {0x9005, 0x285, 0x9005, 0x288, AAC_HWIF_I960RX, 524 0, AAC_TYPE_SCSI, "Adaptec", "3230S"}, 525 {0x9005, 0x285, 0x9005, 0x289, AAC_HWIF_I960RX, 526 0, AAC_TYPE_SCSI, "Adaptec", "3240S"}, 527 {0x9005, 0x285, 0x9005, 0x28a, AAC_HWIF_I960RX, 528 0, AAC_TYPE_SCSI, "Adaptec", "2020ZCR"}, 529 {0x9005, 0x285, 0x9005, 0x28b, AAC_HWIF_I960RX, 530 0, AAC_TYPE_SCSI, "Adaptec", "2025ZCR"}, 531 {0x9005, 0x286, 0x9005, 0x28c, AAC_HWIF_RKT, 532 0, AAC_TYPE_SCSI, "Adaptec", "2230S"}, 533 {0x9005, 0x286, 0x9005, 0x28d, AAC_HWIF_RKT, 534 0, AAC_TYPE_SCSI, "Adaptec", "2130S"}, 535 {0x9005, 0x285, 0x9005, 0x28e, AAC_HWIF_I960RX, 536 0, AAC_TYPE_SATA, "Adaptec", "2020SA"}, 537 {0x9005, 0x285, 0x9005, 0x28f, AAC_HWIF_I960RX, 538 0, AAC_TYPE_SATA, "Adaptec", "2025SA"}, 539 {0x9005, 0x285, 0x9005, 0x290, AAC_HWIF_I960RX, 540 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2410SA"}, 541 {0x9005, 0x285, 0x9005, 0x292, AAC_HWIF_I960RX, 542 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2810SA"}, 543 {0x9005, 0x285, 0x9005, 0x293, AAC_HWIF_I960RX, 544 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "21610SA"}, 545 {0x9005, 0x285, 0x9005, 0x294, AAC_HWIF_I960RX, 546 0, AAC_TYPE_SATA, "Adaptec", "2026ZCR"}, 547 {0x9005, 0x285, 0x9005, 0x296, AAC_HWIF_I960RX, 548 0, AAC_TYPE_SCSI, "Adaptec", "2240S"}, 549 {0x9005, 0x285, 0x9005, 0x297, AAC_HWIF_I960RX, 550 0, AAC_TYPE_SAS, "Adaptec", "4005SAS"}, 551 {0x9005, 0x285, 0x9005, 0x298, AAC_HWIF_I960RX, 552 0, AAC_TYPE_SAS, "Adaptec", "RAID 4000"}, 553 {0x9005, 0x285, 0x9005, 0x299, AAC_HWIF_I960RX, 554 0, AAC_TYPE_SAS, "Adaptec", "4800SAS"}, 555 {0x9005, 0x285, 0x9005, 0x29a, AAC_HWIF_I960RX, 556 0, AAC_TYPE_SAS, "Adaptec", "4805SAS"}, 557 {0x9005, 0x286, 0x9005, 0x29b, AAC_HWIF_RKT, 558 0, AAC_TYPE_SATA, "Adaptec", "2820SA"}, 559 {0x9005, 0x286, 0x9005, 0x29c, AAC_HWIF_RKT, 560 0, AAC_TYPE_SATA, "Adaptec", "2620SA"}, 561 {0x9005, 0x286, 0x9005, 0x29d, AAC_HWIF_RKT, 562 0, AAC_TYPE_SATA, "Adaptec", "2420SA"}, 563 {0x9005, 0x286, 0x9005, 0x29e, AAC_HWIF_RKT, 564 0, AAC_TYPE_SATA, "ICP", "9024RO"}, 565 {0x9005, 0x286, 0x9005, 0x29f, AAC_HWIF_RKT, 566 0, AAC_TYPE_SATA, "ICP", "9014RO"}, 567 {0x9005, 0x286, 0x9005, 0x2a0, AAC_HWIF_RKT, 568 0, AAC_TYPE_SATA, "ICP", "9047MA"}, 569 {0x9005, 0x286, 0x9005, 0x2a1, AAC_HWIF_RKT, 570 0, AAC_TYPE_SATA, "ICP", "9087MA"}, 571 {0x9005, 0x285, 0x9005, 0x2a4, AAC_HWIF_I960RX, 572 0, AAC_TYPE_SAS, "ICP", "9085LI"}, 573 {0x9005, 0x285, 0x9005, 0x2a5, AAC_HWIF_I960RX, 574 0, AAC_TYPE_SAS, "ICP", "5085BR"}, 575 {0x9005, 0x286, 0x9005, 0x2a6, AAC_HWIF_RKT, 576 0, AAC_TYPE_SATA, "ICP", "9067MA"}, 577 {0x9005, 0x285, 0x9005, 0x2b5, AAC_HWIF_I960RX, 578 0, AAC_TYPE_SAS, "Adaptec", "RAID 5445"}, 579 {0x9005, 0x285, 0x9005, 0x2b6, AAC_HWIF_I960RX, 580 0, AAC_TYPE_SAS, "Adaptec", "RAID 5805"}, 581 {0x9005, 0x285, 0x9005, 0x2b7, AAC_HWIF_I960RX, 582 0, AAC_TYPE_SAS, "Adaptec", "RAID 5085"}, 583 {0x9005, 0x285, 0x9005, 0x2b8, AAC_HWIF_I960RX, 584 0, AAC_TYPE_SAS, "ICP", "RAID ICP5445SL"}, 585 {0x9005, 0x285, 0x9005, 0x2b9, AAC_HWIF_I960RX, 586 0, AAC_TYPE_SAS, "ICP", "RAID ICP5085SL"}, 587 {0x9005, 0x285, 0x9005, 0x2ba, AAC_HWIF_I960RX, 588 0, AAC_TYPE_SAS, "ICP", "RAID ICP5805SL"}, 589 590 {0, 0, 0, 0, AAC_HWIF_UNKNOWN, 591 0, AAC_TYPE_UNKNOWN, "Unknown", "AAC card"}, 592 }; 593 594 /* 595 * Hardware access functions for i960 based cards 596 */ 597 static struct aac_interface aac_rx_interface = { 598 aac_rx_get_fwstatus, 599 aac_rx_get_mailbox, 600 aac_rx_set_mailbox 601 }; 602 603 /* 604 * Hardware access functions for Rocket based cards 605 */ 606 static struct aac_interface aac_rkt_interface = { 607 aac_rkt_get_fwstatus, 608 aac_rkt_get_mailbox, 609 aac_rkt_set_mailbox 610 }; 611 612 ddi_device_acc_attr_t aac_acc_attr = { 613 DDI_DEVICE_ATTR_V0, 614 DDI_STRUCTURE_LE_ACC, 615 DDI_STRICTORDER_ACC 616 }; 617 618 static struct { 619 int size; 620 int notify; 621 } aac_qinfo[] = { 622 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL}, 623 {AAC_HOST_HIGH_CMD_ENTRIES, 0}, 624 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY}, 625 {AAC_ADAP_HIGH_CMD_ENTRIES, 0}, 626 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL}, 627 {AAC_HOST_HIGH_RESP_ENTRIES, 0}, 628 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY}, 629 {AAC_ADAP_HIGH_RESP_ENTRIES, 0} 630 }; 631 632 /* 633 * Default aac dma attributes 634 */ 635 static ddi_dma_attr_t aac_dma_attr = { 636 DMA_ATTR_V0, 637 0, /* lowest usable address */ 638 0xffffffffull, /* high DMA address range */ 639 0xffffffffull, /* DMA counter register */ 640 AAC_DMA_ALIGN, /* DMA address alignment */ 641 1, /* DMA burstsizes */ 642 1, /* min effective DMA size */ 643 0xffffffffull, /* max DMA xfer size */ 644 0xffffffffull, /* segment boundary */ 645 1, /* s/g list length */ 646 AAC_BLK_SIZE, /* granularity of device */ 647 0 /* DMA transfer flags */ 648 }; 649 650 struct aac_drinfo { 651 struct aac_softstate *softs; 652 int tgt; 653 int lun; 654 int event; 655 }; 656 657 static int aac_tick = AAC_DEFAULT_TICK; /* tick for the internal timer */ 658 static uint32_t aac_timebase = 0; /* internal timer in seconds */ 659 static uint32_t aac_sync_time = 0; /* next time to sync. with firmware */ 660 661 /* 662 * Warlock directives 663 * 664 * Different variables with the same types have to be protected by the 665 * same mutex; otherwise, warlock will complain with "variables don't 666 * seem to be protected consistently". For example, 667 * aac_softstate::{q_wait, q_comp} are type of aac_cmd_queue, and protected 668 * by aac_softstate::{io_lock, q_comp_mutex} respectively. We have to 669 * declare them as protected explictly at aac_cmd_dequeue(). 670 */ 671 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt scsi_cdb scsi_status \ 672 scsi_arq_status scsi_descr_sense_hdr scsi_information_sense_descr \ 673 mode_format mode_geometry mode_header aac_cmd)) 674 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_cmd", aac_fib ddi_dma_cookie_t \ 675 aac_sge)) 676 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_fib", aac_blockread aac_blockwrite \ 677 aac_blockread64 aac_raw_io aac_sg_entry aac_sg_entry64 aac_sg_entryraw \ 678 aac_sg_table aac_srb)) 679 _NOTE(SCHEME_PROTECTS_DATA("unique to sync fib and cdb", scsi_inquiry)) 680 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address)) 681 _NOTE(SCHEME_PROTECTS_DATA("unique to dr event", aac_drinfo)) 682 _NOTE(SCHEME_PROTECTS_DATA("unique to scsi_transport", buf)) 683 684 int 685 _init(void) 686 { 687 int rval = 0; 688 689 #ifdef DEBUG 690 mutex_init(&aac_prt_mutex, NULL, MUTEX_DRIVER, NULL); 691 #endif 692 DBCALLED(NULL, 1); 693 694 if ((rval = ddi_soft_state_init((void *)&aac_softstatep, 695 sizeof (struct aac_softstate), 0)) != 0) 696 goto error; 697 698 if ((rval = scsi_hba_init(&aac_modlinkage)) != 0) { 699 ddi_soft_state_fini((void *)&aac_softstatep); 700 goto error; 701 } 702 703 if ((rval = mod_install(&aac_modlinkage)) != 0) { 704 ddi_soft_state_fini((void *)&aac_softstatep); 705 scsi_hba_fini(&aac_modlinkage); 706 goto error; 707 } 708 return (rval); 709 710 error: 711 AACDB_PRINT(NULL, CE_WARN, "Mod init error!"); 712 #ifdef DEBUG 713 mutex_destroy(&aac_prt_mutex); 714 #endif 715 return (rval); 716 } 717 718 int 719 _info(struct modinfo *modinfop) 720 { 721 DBCALLED(NULL, 1); 722 return (mod_info(&aac_modlinkage, modinfop)); 723 } 724 725 /* 726 * An HBA driver cannot be unload unless you reboot, 727 * so this function will be of no use. 728 */ 729 int 730 _fini(void) 731 { 732 int rval; 733 734 DBCALLED(NULL, 1); 735 736 if ((rval = mod_remove(&aac_modlinkage)) != 0) 737 goto error; 738 739 scsi_hba_fini(&aac_modlinkage); 740 ddi_soft_state_fini((void *)&aac_softstatep); 741 #ifdef DEBUG 742 mutex_destroy(&aac_prt_mutex); 743 #endif 744 return (0); 745 746 error: 747 AACDB_PRINT(NULL, CE_WARN, "AAC is busy, cannot unload!"); 748 return (rval); 749 } 750 751 int aac_use_msi = 0; 752 753 static int 754 aac_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 755 { 756 int instance, i; 757 struct aac_softstate *softs = NULL; 758 int attach_state = 0; 759 char *data; 760 int intr_types; 761 762 DBCALLED(NULL, 1); 763 764 switch (cmd) { 765 case DDI_ATTACH: 766 break; 767 case DDI_RESUME: 768 return (DDI_FAILURE); 769 default: 770 return (DDI_FAILURE); 771 } 772 773 instance = ddi_get_instance(dip); 774 775 /* Get soft state */ 776 if (ddi_soft_state_zalloc(aac_softstatep, instance) != DDI_SUCCESS) { 777 AACDB_PRINT(softs, CE_WARN, "Cannot alloc soft state"); 778 goto error; 779 } 780 softs = ddi_get_soft_state(aac_softstatep, instance); 781 attach_state |= AAC_ATTACH_SOFTSTATE_ALLOCED; 782 783 softs->instance = instance; 784 softs->devinfo_p = dip; 785 softs->buf_dma_attr = softs->addr_dma_attr = aac_dma_attr; 786 softs->addr_dma_attr.dma_attr_granular = 1; 787 softs->acc_attr = aac_acc_attr; 788 softs->card = AAC_UNKNOWN_CARD; 789 #ifdef DEBUG 790 softs->debug_flags = aac_debug_flags; 791 softs->debug_fib_flags = aac_debug_fib_flags; 792 #endif 793 794 /* Initialize FMA */ 795 aac_fm_init(softs); 796 797 /* Check the card type */ 798 if (aac_check_card_type(softs) == AACERR) { 799 AACDB_PRINT(softs, CE_WARN, "Card not supported"); 800 goto error; 801 } 802 /* We have found the right card and everything is OK */ 803 attach_state |= AAC_ATTACH_CARD_DETECTED; 804 805 /* Map PCI mem space */ 806 if (ddi_regs_map_setup(dip, 1, 807 (caddr_t *)&softs->pci_mem_base_vaddr, 0, 808 softs->map_size_min, &softs->acc_attr, 809 &softs->pci_mem_handle) != DDI_SUCCESS) 810 goto error; 811 812 softs->map_size = softs->map_size_min; 813 attach_state |= AAC_ATTACH_PCI_MEM_MAPPED; 814 815 AAC_DISABLE_INTR(softs); 816 817 /* Get the type of device intrrupts */ 818 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) { 819 AACDB_PRINT(softs, CE_WARN, 820 "ddi_intr_get_supported_types() failed"); 821 goto error; 822 } 823 AACDB_PRINT(softs, CE_NOTE, 824 "ddi_intr_get_supported_types() ret: 0x%x", intr_types); 825 826 /* Query interrupt, and alloc/init all needed struct */ 827 if ((intr_types & DDI_INTR_TYPE_MSI) && aac_use_msi) { 828 if (aac_query_intrs(softs, DDI_INTR_TYPE_MSI) 829 != DDI_SUCCESS) { 830 AACDB_PRINT(softs, CE_WARN, 831 "MSI interrupt query failed"); 832 goto error; 833 } 834 softs->intr_type = DDI_INTR_TYPE_MSI; 835 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 836 if (aac_query_intrs(softs, DDI_INTR_TYPE_FIXED) 837 != DDI_SUCCESS) { 838 AACDB_PRINT(softs, CE_WARN, 839 "FIXED interrupt query failed"); 840 goto error; 841 } 842 softs->intr_type = DDI_INTR_TYPE_FIXED; 843 } else { 844 AACDB_PRINT(softs, CE_WARN, 845 "Device cannot suppport both FIXED and MSI interrupts"); 846 goto error; 847 } 848 849 /* Init mutexes */ 850 mutex_init(&softs->q_comp_mutex, NULL, 851 MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri)); 852 cv_init(&softs->event, NULL, CV_DRIVER, NULL); 853 mutex_init(&softs->aifq_mutex, NULL, 854 MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri)); 855 cv_init(&softs->aifv, NULL, CV_DRIVER, NULL); 856 cv_init(&softs->drain_cv, NULL, CV_DRIVER, NULL); 857 mutex_init(&softs->io_lock, NULL, MUTEX_DRIVER, 858 DDI_INTR_PRI(softs->intr_pri)); 859 attach_state |= AAC_ATTACH_KMUTEX_INITED; 860 861 /* Check for legacy device naming support */ 862 softs->legacy = 1; /* default to use legacy name */ 863 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 864 "legacy-name-enable", &data) == DDI_SUCCESS)) { 865 if (strcmp(data, "no") == 0) { 866 AACDB_PRINT(softs, CE_NOTE, "legacy-name disabled"); 867 softs->legacy = 0; 868 } 869 ddi_prop_free(data); 870 } 871 872 /* 873 * Everything has been set up till now, 874 * we will do some common attach. 875 */ 876 if (aac_common_attach(softs) == AACERR) 877 goto error; 878 attach_state |= AAC_ATTACH_COMM_SPACE_SETUP; 879 880 /* Check for buf breakup support */ 881 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 882 "breakup-enable", &data) == DDI_SUCCESS)) { 883 if (strcmp(data, "yes") == 0) { 884 AACDB_PRINT(softs, CE_NOTE, "buf breakup enabled"); 885 softs->flags |= AAC_FLAGS_BRKUP; 886 } 887 ddi_prop_free(data); 888 } 889 softs->dma_max = softs->buf_dma_attr.dma_attr_maxxfer; 890 if (softs->flags & AAC_FLAGS_BRKUP) { 891 softs->dma_max = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 892 DDI_PROP_DONTPASS, "dma-max", softs->dma_max); 893 } 894 895 /* Init the cmd queues */ 896 for (i = 0; i < AAC_CMDQ_NUM; i++) 897 aac_cmd_initq(&softs->q_wait[i]); 898 aac_cmd_initq(&softs->q_busy); 899 aac_cmd_initq(&softs->q_comp); 900 901 if (aac_hba_setup(softs) != AACOK) 902 goto error; 903 attach_state |= AAC_ATTACH_SCSI_TRAN_SETUP; 904 905 /* Connect interrupt handlers */ 906 if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &softs->softint_id, 907 NULL, NULL, aac_softintr, (caddr_t)softs) != DDI_SUCCESS) { 908 AACDB_PRINT(softs, CE_WARN, 909 "Can not setup soft interrupt handler!"); 910 goto error; 911 } 912 attach_state |= AAC_ATTACH_SOFT_INTR_SETUP; 913 914 if (aac_add_intrs(softs) != DDI_SUCCESS) { 915 AACDB_PRINT(softs, CE_WARN, 916 "Interrupt registration failed, intr type: %s", 917 softs->intr_type == DDI_INTR_TYPE_MSI ? "MSI" : "FIXED"); 918 goto error; 919 } 920 attach_state |= AAC_ATTACH_HARD_INTR_SETUP; 921 922 /* Create devctl/scsi nodes for cfgadm */ 923 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 924 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) { 925 AACDB_PRINT(softs, CE_WARN, "failed to create devctl node"); 926 goto error; 927 } 928 attach_state |= AAC_ATTACH_CREATE_DEVCTL; 929 930 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, INST2SCSI(instance), 931 DDI_NT_SCSI_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 932 AACDB_PRINT(softs, CE_WARN, "failed to create scsi node"); 933 goto error; 934 } 935 attach_state |= AAC_ATTACH_CREATE_SCSI; 936 937 /* Create aac node for app. to issue ioctls */ 938 if (ddi_create_minor_node(dip, "aac", S_IFCHR, INST2AAC(instance), 939 DDI_PSEUDO, 0) != DDI_SUCCESS) { 940 AACDB_PRINT(softs, CE_WARN, "failed to create aac node"); 941 goto error; 942 } 943 944 /* Create a taskq for dealing with dr events */ 945 if ((softs->taskq = ddi_taskq_create(dip, "aac_dr_taskq", 1, 946 TASKQ_DEFAULTPRI, 0)) == NULL) { 947 AACDB_PRINT(softs, CE_WARN, "ddi_taskq_create failed"); 948 goto error; 949 } 950 951 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 952 softs->state = AAC_STATE_RUN; 953 954 /* Create a thread for command timeout */ 955 softs->timeout_id = timeout(aac_daemon, (void *)softs, 956 (60 * drv_usectohz(1000000))); 957 958 /* Common attach is OK, so we are attached! */ 959 AAC_ENABLE_INTR(softs); 960 ddi_report_dev(dip); 961 AACDB_PRINT(softs, CE_NOTE, "aac attached ok"); 962 return (DDI_SUCCESS); 963 964 error: 965 if (softs && softs->taskq) 966 ddi_taskq_destroy(softs->taskq); 967 if (attach_state & AAC_ATTACH_CREATE_SCSI) 968 ddi_remove_minor_node(dip, "scsi"); 969 if (attach_state & AAC_ATTACH_CREATE_DEVCTL) 970 ddi_remove_minor_node(dip, "devctl"); 971 if (attach_state & AAC_ATTACH_COMM_SPACE_SETUP) 972 aac_common_detach(softs); 973 if (attach_state & AAC_ATTACH_SCSI_TRAN_SETUP) { 974 (void) scsi_hba_detach(dip); 975 scsi_hba_tran_free(AAC_DIP2TRAN(dip)); 976 } 977 if (attach_state & AAC_ATTACH_HARD_INTR_SETUP) 978 aac_remove_intrs(softs); 979 if (attach_state & AAC_ATTACH_SOFT_INTR_SETUP) 980 ddi_remove_softintr(softs->softint_id); 981 if (attach_state & AAC_ATTACH_KMUTEX_INITED) { 982 mutex_destroy(&softs->q_comp_mutex); 983 cv_destroy(&softs->event); 984 mutex_destroy(&softs->aifq_mutex); 985 cv_destroy(&softs->aifv); 986 cv_destroy(&softs->drain_cv); 987 mutex_destroy(&softs->io_lock); 988 } 989 if (attach_state & AAC_ATTACH_PCI_MEM_MAPPED) 990 ddi_regs_map_free(&softs->pci_mem_handle); 991 aac_fm_fini(softs); 992 if (attach_state & AAC_ATTACH_CARD_DETECTED) 993 softs->card = AACERR; 994 if (attach_state & AAC_ATTACH_SOFTSTATE_ALLOCED) 995 ddi_soft_state_free(aac_softstatep, instance); 996 return (DDI_FAILURE); 997 } 998 999 static int 1000 aac_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1001 { 1002 scsi_hba_tran_t *tran = AAC_DIP2TRAN(dip); 1003 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran); 1004 1005 DBCALLED(softs, 1); 1006 1007 switch (cmd) { 1008 case DDI_DETACH: 1009 break; 1010 case DDI_SUSPEND: 1011 return (DDI_FAILURE); 1012 default: 1013 return (DDI_FAILURE); 1014 } 1015 1016 mutex_enter(&softs->io_lock); 1017 AAC_DISABLE_INTR(softs); 1018 softs->state = AAC_STATE_STOPPED; 1019 1020 mutex_exit(&softs->io_lock); 1021 (void) untimeout(softs->timeout_id); 1022 mutex_enter(&softs->io_lock); 1023 softs->timeout_id = 0; 1024 1025 ddi_taskq_destroy(softs->taskq); 1026 1027 ddi_remove_minor_node(dip, "aac"); 1028 ddi_remove_minor_node(dip, "scsi"); 1029 ddi_remove_minor_node(dip, "devctl"); 1030 1031 mutex_exit(&softs->io_lock); 1032 aac_remove_intrs(softs); 1033 ddi_remove_softintr(softs->softint_id); 1034 1035 aac_common_detach(softs); 1036 1037 (void) scsi_hba_detach(dip); 1038 scsi_hba_tran_free(tran); 1039 1040 mutex_destroy(&softs->q_comp_mutex); 1041 cv_destroy(&softs->event); 1042 mutex_destroy(&softs->aifq_mutex); 1043 cv_destroy(&softs->aifv); 1044 cv_destroy(&softs->drain_cv); 1045 mutex_destroy(&softs->io_lock); 1046 1047 ddi_regs_map_free(&softs->pci_mem_handle); 1048 aac_fm_fini(softs); 1049 softs->hwif = AAC_HWIF_UNKNOWN; 1050 softs->card = AAC_UNKNOWN_CARD; 1051 ddi_soft_state_free(aac_softstatep, ddi_get_instance(dip)); 1052 1053 return (DDI_SUCCESS); 1054 } 1055 1056 /*ARGSUSED*/ 1057 static int 1058 aac_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) 1059 { 1060 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 1061 1062 DBCALLED(softs, 1); 1063 1064 mutex_enter(&softs->io_lock); 1065 (void) aac_shutdown(softs); 1066 mutex_exit(&softs->io_lock); 1067 1068 return (DDI_SUCCESS); 1069 } 1070 1071 /* 1072 * quiesce(9E) entry point. 1073 * 1074 * This function is called when the system is single-threaded at high 1075 * PIL with preemption disabled. Therefore, this function must not be 1076 * blocked. 1077 * 1078 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 1079 * DDI_FAILURE indicates an error condition and should almost never happen. 1080 */ 1081 static int 1082 aac_quiesce(dev_info_t *dip) 1083 { 1084 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 1085 1086 if (softs == NULL) 1087 return (DDI_FAILURE); 1088 1089 AAC_DISABLE_INTR(softs); 1090 1091 return (DDI_SUCCESS); 1092 } 1093 1094 /* 1095 * Bring the controller down to a dormant state and detach all child devices. 1096 * This function is called before detach or system shutdown. 1097 * Note: we can assume that the q_wait on the controller is empty, as we 1098 * won't allow shutdown if any device is open. 1099 */ 1100 static int 1101 aac_shutdown(struct aac_softstate *softs) 1102 { 1103 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 1104 struct aac_close_command *cc = (struct aac_close_command *) \ 1105 &softs->sync_slot.fibp->data[0]; 1106 int rval; 1107 1108 ddi_put32(acc, &cc->Command, VM_CloseAll); 1109 ddi_put32(acc, &cc->ContainerId, 0xfffffffful); 1110 1111 /* Flush all caches, set FW to write through mode */ 1112 rval = aac_sync_fib(softs, ContainerCommand, 1113 AAC_FIB_SIZEOF(struct aac_close_command)); 1114 1115 AACDB_PRINT(softs, CE_NOTE, 1116 "shutting down aac %s", (rval == AACOK) ? "ok" : "fail"); 1117 return (rval); 1118 } 1119 1120 static uint_t 1121 aac_softintr(caddr_t arg) 1122 { 1123 struct aac_softstate *softs = (void *)arg; 1124 1125 if (!AAC_IS_Q_EMPTY(&softs->q_comp)) { 1126 aac_drain_comp_q(softs); 1127 return (DDI_INTR_CLAIMED); 1128 } else { 1129 return (DDI_INTR_UNCLAIMED); 1130 } 1131 } 1132 1133 /* 1134 * Setup auto sense data for pkt 1135 */ 1136 static void 1137 aac_set_arq_data(struct scsi_pkt *pkt, uchar_t key, 1138 uchar_t add_code, uchar_t qual_code, uint64_t info) 1139 { 1140 struct scsi_arq_status *arqstat = (void *)(pkt->pkt_scbp); 1141 1142 *pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */ 1143 pkt->pkt_state |= STATE_ARQ_DONE; 1144 1145 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD; 1146 arqstat->sts_rqpkt_reason = CMD_CMPLT; 1147 arqstat->sts_rqpkt_resid = 0; 1148 arqstat->sts_rqpkt_state = 1149 STATE_GOT_BUS | 1150 STATE_GOT_TARGET | 1151 STATE_SENT_CMD | 1152 STATE_XFERRED_DATA; 1153 arqstat->sts_rqpkt_statistics = 0; 1154 1155 if (info <= 0xfffffffful) { 1156 arqstat->sts_sensedata.es_valid = 1; 1157 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE; 1158 arqstat->sts_sensedata.es_code = CODE_FMT_FIXED_CURRENT; 1159 arqstat->sts_sensedata.es_key = key; 1160 arqstat->sts_sensedata.es_add_code = add_code; 1161 arqstat->sts_sensedata.es_qual_code = qual_code; 1162 1163 arqstat->sts_sensedata.es_info_1 = (info >> 24) & 0xFF; 1164 arqstat->sts_sensedata.es_info_2 = (info >> 16) & 0xFF; 1165 arqstat->sts_sensedata.es_info_3 = (info >> 8) & 0xFF; 1166 arqstat->sts_sensedata.es_info_4 = info & 0xFF; 1167 } else { /* 64-bit LBA */ 1168 struct scsi_descr_sense_hdr *dsp; 1169 struct scsi_information_sense_descr *isd; 1170 1171 dsp = (struct scsi_descr_sense_hdr *)&arqstat->sts_sensedata; 1172 dsp->ds_class = CLASS_EXTENDED_SENSE; 1173 dsp->ds_code = CODE_FMT_DESCR_CURRENT; 1174 dsp->ds_key = key; 1175 dsp->ds_add_code = add_code; 1176 dsp->ds_qual_code = qual_code; 1177 dsp->ds_addl_sense_length = 1178 sizeof (struct scsi_information_sense_descr); 1179 1180 isd = (struct scsi_information_sense_descr *)(dsp+1); 1181 isd->isd_descr_type = DESCR_INFORMATION; 1182 isd->isd_valid = 1; 1183 isd->isd_information[0] = (info >> 56) & 0xFF; 1184 isd->isd_information[1] = (info >> 48) & 0xFF; 1185 isd->isd_information[2] = (info >> 40) & 0xFF; 1186 isd->isd_information[3] = (info >> 32) & 0xFF; 1187 isd->isd_information[4] = (info >> 24) & 0xFF; 1188 isd->isd_information[5] = (info >> 16) & 0xFF; 1189 isd->isd_information[6] = (info >> 8) & 0xFF; 1190 isd->isd_information[7] = (info) & 0xFF; 1191 } 1192 } 1193 1194 /* 1195 * Setup auto sense data for HARDWARE ERROR 1196 */ 1197 static void 1198 aac_set_arq_data_hwerr(struct aac_cmd *acp) 1199 { 1200 union scsi_cdb *cdbp; 1201 uint64_t err_blkno; 1202 1203 cdbp = (void *)acp->pkt->pkt_cdbp; 1204 err_blkno = AAC_GETGXADDR(acp->cmdlen, cdbp); 1205 aac_set_arq_data(acp->pkt, KEY_HARDWARE_ERROR, 0x00, 0x00, err_blkno); 1206 } 1207 1208 /* 1209 * Setup auto sense data for UNIT ATTENTION 1210 */ 1211 /*ARGSUSED*/ 1212 static void 1213 aac_set_arq_data_reset(struct aac_softstate *softs, struct aac_cmd *acp) 1214 { 1215 struct aac_container *dvp = (struct aac_container *)acp->dvp; 1216 1217 ASSERT(dvp->dev.type == AAC_DEV_LD); 1218 1219 if (dvp->reset) { 1220 dvp->reset = 0; 1221 aac_set_arq_data(acp->pkt, KEY_UNIT_ATTENTION, 0x29, 0x02, 0); 1222 } 1223 } 1224 1225 /* 1226 * Send a command to the adapter in New Comm. interface 1227 */ 1228 static int 1229 aac_send_command(struct aac_softstate *softs, struct aac_slot *slotp) 1230 { 1231 uint32_t index, device; 1232 1233 index = PCI_MEM_GET32(softs, AAC_IQUE); 1234 if (index == 0xffffffffUL) { 1235 index = PCI_MEM_GET32(softs, AAC_IQUE); 1236 if (index == 0xffffffffUL) 1237 return (AACERR); 1238 } 1239 1240 device = index; 1241 PCI_MEM_PUT32(softs, device, 1242 (uint32_t)(slotp->fib_phyaddr & 0xfffffffful)); 1243 device += 4; 1244 PCI_MEM_PUT32(softs, device, (uint32_t)(slotp->fib_phyaddr >> 32)); 1245 device += 4; 1246 PCI_MEM_PUT32(softs, device, slotp->acp->fib_size); 1247 PCI_MEM_PUT32(softs, AAC_IQUE, index); 1248 return (AACOK); 1249 } 1250 1251 static void 1252 aac_end_io(struct aac_softstate *softs, struct aac_cmd *acp) 1253 { 1254 struct aac_device *dvp = acp->dvp; 1255 int q = AAC_CMDQ(acp); 1256 1257 if (acp->slotp) { /* outstanding cmd */ 1258 aac_release_slot(softs, acp->slotp); 1259 acp->slotp = NULL; 1260 if (dvp) { 1261 dvp->ncmds[q]--; 1262 if (dvp->throttle[q] == AAC_THROTTLE_DRAIN && 1263 dvp->ncmds[q] == 0 && q == AAC_CMDQ_ASYNC) 1264 aac_set_throttle(softs, dvp, q, 1265 softs->total_slots); 1266 } 1267 softs->bus_ncmds[q]--; 1268 (void) aac_cmd_delete(&softs->q_busy, acp); 1269 } else { /* cmd in waiting queue */ 1270 aac_cmd_delete(&softs->q_wait[q], acp); 1271 } 1272 1273 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) { /* async IO */ 1274 mutex_enter(&softs->q_comp_mutex); 1275 aac_cmd_enqueue(&softs->q_comp, acp); 1276 mutex_exit(&softs->q_comp_mutex); 1277 } else if (acp->flags & AAC_CMD_NO_CB) { /* sync IO */ 1278 cv_broadcast(&softs->event); 1279 } 1280 } 1281 1282 static void 1283 aac_handle_io(struct aac_softstate *softs, int index) 1284 { 1285 struct aac_slot *slotp; 1286 struct aac_cmd *acp; 1287 uint32_t fast; 1288 1289 fast = index & AAC_SENDERADDR_MASK_FAST_RESPONSE; 1290 index >>= 2; 1291 1292 /* Make sure firmware reported index is valid */ 1293 ASSERT(index >= 0 && index < softs->total_slots); 1294 slotp = &softs->io_slot[index]; 1295 ASSERT(slotp->index == index); 1296 acp = slotp->acp; 1297 1298 if (acp == NULL || acp->slotp != slotp) { 1299 cmn_err(CE_WARN, 1300 "Firmware error: invalid slot index received from FW"); 1301 return; 1302 } 1303 1304 acp->flags |= AAC_CMD_CMPLT; 1305 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU); 1306 1307 if (aac_check_dma_handle(slotp->fib_dma_handle) == DDI_SUCCESS) { 1308 /* 1309 * For fast response IO, the firmware do not return any FIB 1310 * data, so we need to fill in the FIB status and state so that 1311 * FIB users can handle it correctly. 1312 */ 1313 if (fast) { 1314 uint32_t state; 1315 1316 state = ddi_get32(slotp->fib_acc_handle, 1317 &slotp->fibp->Header.XferState); 1318 /* 1319 * Update state for CPU not for device, no DMA sync 1320 * needed 1321 */ 1322 ddi_put32(slotp->fib_acc_handle, 1323 &slotp->fibp->Header.XferState, 1324 state | AAC_FIBSTATE_DONEADAP); 1325 ddi_put32(slotp->fib_acc_handle, 1326 (void *)&slotp->fibp->data[0], ST_OK); 1327 } 1328 1329 /* Handle completed ac */ 1330 acp->ac_comp(softs, acp); 1331 } else { 1332 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1333 acp->flags |= AAC_CMD_ERR; 1334 if (acp->pkt) { 1335 acp->pkt->pkt_reason = CMD_TRAN_ERR; 1336 acp->pkt->pkt_statistics = 0; 1337 } 1338 } 1339 aac_end_io(softs, acp); 1340 } 1341 1342 /* 1343 * Interrupt handler for New Comm. interface 1344 * New Comm. interface use a different mechanism for interrupt. No explict 1345 * message queues, and driver need only accesses the mapped PCI mem space to 1346 * find the completed FIB or AIF. 1347 */ 1348 static int 1349 aac_process_intr_new(struct aac_softstate *softs) 1350 { 1351 uint32_t index; 1352 1353 index = AAC_OUTB_GET(softs); 1354 if (index == 0xfffffffful) 1355 index = AAC_OUTB_GET(softs); 1356 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 1357 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1358 return (0); 1359 } 1360 if (index != 0xfffffffful) { 1361 do { 1362 if ((index & AAC_SENDERADDR_MASK_AIF) == 0) { 1363 aac_handle_io(softs, index); 1364 } else if (index != 0xfffffffeul) { 1365 struct aac_fib *fibp; /* FIB in AIF queue */ 1366 uint16_t fib_size, fib_size0; 1367 1368 /* 1369 * 0xfffffffe means that the controller wants 1370 * more work, ignore it for now. Otherwise, 1371 * AIF received. 1372 */ 1373 index &= ~2; 1374 1375 mutex_enter(&softs->aifq_mutex); 1376 /* 1377 * Copy AIF from adapter to the empty AIF slot 1378 */ 1379 fibp = &softs->aifq[softs->aifq_idx].d; 1380 fib_size0 = PCI_MEM_GET16(softs, index + \ 1381 offsetof(struct aac_fib, Header.Size)); 1382 fib_size = (fib_size0 > AAC_FIB_SIZE) ? 1383 AAC_FIB_SIZE : fib_size0; 1384 PCI_MEM_REP_GET8(softs, index, fibp, 1385 fib_size); 1386 1387 if (aac_check_acc_handle(softs-> \ 1388 pci_mem_handle) == DDI_SUCCESS) 1389 (void) aac_handle_aif(softs, fibp); 1390 else 1391 ddi_fm_service_impact(softs->devinfo_p, 1392 DDI_SERVICE_UNAFFECTED); 1393 mutex_exit(&softs->aifq_mutex); 1394 1395 /* 1396 * AIF memory is owned by the adapter, so let it 1397 * know that we are done with it. 1398 */ 1399 AAC_OUTB_SET(softs, index); 1400 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY); 1401 } 1402 1403 index = AAC_OUTB_GET(softs); 1404 } while (index != 0xfffffffful); 1405 1406 /* 1407 * Process waiting cmds before start new ones to 1408 * ensure first IOs are serviced first. 1409 */ 1410 aac_start_waiting_io(softs); 1411 return (AAC_DB_COMMAND_READY); 1412 } else { 1413 return (0); 1414 } 1415 } 1416 1417 static uint_t 1418 aac_intr_new(caddr_t arg) 1419 { 1420 struct aac_softstate *softs = (void *)arg; 1421 uint_t rval; 1422 1423 mutex_enter(&softs->io_lock); 1424 if (aac_process_intr_new(softs)) 1425 rval = DDI_INTR_CLAIMED; 1426 else 1427 rval = DDI_INTR_UNCLAIMED; 1428 mutex_exit(&softs->io_lock); 1429 1430 aac_drain_comp_q(softs); 1431 return (rval); 1432 } 1433 1434 /* 1435 * Interrupt handler for old interface 1436 * Explicit message queues are used to send FIB to and get completed FIB from 1437 * the adapter. Driver and adapter maitain the queues in the producer/consumer 1438 * manner. The driver has to query the queues to find the completed FIB. 1439 */ 1440 static int 1441 aac_process_intr_old(struct aac_softstate *softs) 1442 { 1443 uint16_t status; 1444 1445 status = AAC_STATUS_GET(softs); 1446 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 1447 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1448 return (DDI_INTR_UNCLAIMED); 1449 } 1450 if (status & AAC_DB_RESPONSE_READY) { 1451 int slot_idx; 1452 1453 /* ACK the intr */ 1454 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY); 1455 (void) AAC_STATUS_GET(softs); 1456 while (aac_fib_dequeue(softs, AAC_HOST_NORM_RESP_Q, 1457 &slot_idx) == AACOK) 1458 aac_handle_io(softs, slot_idx); 1459 1460 /* 1461 * Process waiting cmds before start new ones to 1462 * ensure first IOs are serviced first. 1463 */ 1464 aac_start_waiting_io(softs); 1465 return (AAC_DB_RESPONSE_READY); 1466 } else if (status & AAC_DB_COMMAND_READY) { 1467 int aif_idx; 1468 1469 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_READY); 1470 (void) AAC_STATUS_GET(softs); 1471 if (aac_fib_dequeue(softs, AAC_HOST_NORM_CMD_Q, &aif_idx) == 1472 AACOK) { 1473 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 1474 struct aac_fib *fibp; /* FIB in AIF queue */ 1475 struct aac_fib *fibp0; /* FIB in communication space */ 1476 uint16_t fib_size, fib_size0; 1477 uint32_t fib_xfer_state; 1478 uint32_t addr, size; 1479 1480 ASSERT((aif_idx >= 0) && (aif_idx < AAC_ADAPTER_FIBS)); 1481 1482 #define AAC_SYNC_AIF(softs, aif_idx, type) \ 1483 { (void) ddi_dma_sync((softs)->comm_space_dma_handle, \ 1484 offsetof(struct aac_comm_space, \ 1485 adapter_fibs[(aif_idx)]), AAC_FIB_SIZE, \ 1486 (type)); } 1487 1488 mutex_enter(&softs->aifq_mutex); 1489 /* Copy AIF from adapter to the empty AIF slot */ 1490 fibp = &softs->aifq[softs->aifq_idx].d; 1491 AAC_SYNC_AIF(softs, aif_idx, DDI_DMA_SYNC_FORCPU); 1492 fibp0 = &softs->comm_space->adapter_fibs[aif_idx]; 1493 fib_size0 = ddi_get16(acc, &fibp0->Header.Size); 1494 fib_size = (fib_size0 > AAC_FIB_SIZE) ? 1495 AAC_FIB_SIZE : fib_size0; 1496 ddi_rep_get8(acc, (uint8_t *)fibp, (uint8_t *)fibp0, 1497 fib_size, DDI_DEV_AUTOINCR); 1498 1499 (void) aac_handle_aif(softs, fibp); 1500 mutex_exit(&softs->aifq_mutex); 1501 1502 /* Complete AIF back to adapter with good status */ 1503 fib_xfer_state = LE_32(fibp->Header.XferState); 1504 if (fib_xfer_state & AAC_FIBSTATE_FROMADAP) { 1505 ddi_put32(acc, &fibp0->Header.XferState, 1506 fib_xfer_state | AAC_FIBSTATE_DONEHOST); 1507 ddi_put32(acc, (void *)&fibp0->data[0], ST_OK); 1508 if (fib_size0 > AAC_FIB_SIZE) 1509 ddi_put16(acc, &fibp0->Header.Size, 1510 AAC_FIB_SIZE); 1511 AAC_SYNC_AIF(softs, aif_idx, 1512 DDI_DMA_SYNC_FORDEV); 1513 } 1514 1515 /* Put the AIF response on the response queue */ 1516 addr = ddi_get32(acc, 1517 &softs->comm_space->adapter_fibs[aif_idx]. \ 1518 Header.SenderFibAddress); 1519 size = (uint32_t)ddi_get16(acc, 1520 &softs->comm_space->adapter_fibs[aif_idx]. \ 1521 Header.Size); 1522 ddi_put32(acc, 1523 &softs->comm_space->adapter_fibs[aif_idx]. \ 1524 Header.ReceiverFibAddress, addr); 1525 if (aac_fib_enqueue(softs, AAC_ADAP_NORM_RESP_Q, 1526 addr, size) == AACERR) 1527 cmn_err(CE_NOTE, "!AIF ack failed"); 1528 } 1529 return (AAC_DB_COMMAND_READY); 1530 } else if (status & AAC_DB_PRINTF_READY) { 1531 /* ACK the intr */ 1532 AAC_STATUS_CLR(softs, AAC_DB_PRINTF_READY); 1533 (void) AAC_STATUS_GET(softs); 1534 (void) ddi_dma_sync(softs->comm_space_dma_handle, 1535 offsetof(struct aac_comm_space, adapter_print_buf), 1536 AAC_ADAPTER_PRINT_BUFSIZE, DDI_DMA_SYNC_FORCPU); 1537 if (aac_check_dma_handle(softs->comm_space_dma_handle) == 1538 DDI_SUCCESS) 1539 cmn_err(CE_NOTE, "MSG From Adapter: %s", 1540 softs->comm_space->adapter_print_buf); 1541 else 1542 ddi_fm_service_impact(softs->devinfo_p, 1543 DDI_SERVICE_UNAFFECTED); 1544 AAC_NOTIFY(softs, AAC_DB_PRINTF_READY); 1545 return (AAC_DB_PRINTF_READY); 1546 } else if (status & AAC_DB_COMMAND_NOT_FULL) { 1547 /* 1548 * Without these two condition statements, the OS could hang 1549 * after a while, especially if there are a lot of AIF's to 1550 * handle, for instance if a drive is pulled from an array 1551 * under heavy load. 1552 */ 1553 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL); 1554 return (AAC_DB_COMMAND_NOT_FULL); 1555 } else if (status & AAC_DB_RESPONSE_NOT_FULL) { 1556 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL); 1557 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_NOT_FULL); 1558 return (AAC_DB_RESPONSE_NOT_FULL); 1559 } else { 1560 return (0); 1561 } 1562 } 1563 1564 static uint_t 1565 aac_intr_old(caddr_t arg) 1566 { 1567 struct aac_softstate *softs = (void *)arg; 1568 int rval; 1569 1570 mutex_enter(&softs->io_lock); 1571 if (aac_process_intr_old(softs)) 1572 rval = DDI_INTR_CLAIMED; 1573 else 1574 rval = DDI_INTR_UNCLAIMED; 1575 mutex_exit(&softs->io_lock); 1576 1577 aac_drain_comp_q(softs); 1578 return (rval); 1579 } 1580 1581 /* 1582 * Query FIXED or MSI interrupts 1583 */ 1584 static int 1585 aac_query_intrs(struct aac_softstate *softs, int intr_type) 1586 { 1587 dev_info_t *dip = softs->devinfo_p; 1588 int avail, actual, intr_size, count; 1589 int i, flag, ret; 1590 1591 AACDB_PRINT(softs, CE_NOTE, 1592 "aac_query_intrs:interrupt type 0x%x", intr_type); 1593 1594 /* Get number of interrupts */ 1595 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 1596 if ((ret != DDI_SUCCESS) || (count == 0)) { 1597 AACDB_PRINT(softs, CE_WARN, 1598 "ddi_intr_get_nintrs() failed, ret %d count %d", 1599 ret, count); 1600 return (DDI_FAILURE); 1601 } 1602 1603 /* Get number of available interrupts */ 1604 ret = ddi_intr_get_navail(dip, intr_type, &avail); 1605 if ((ret != DDI_SUCCESS) || (avail == 0)) { 1606 AACDB_PRINT(softs, CE_WARN, 1607 "ddi_intr_get_navail() failed, ret %d avail %d", 1608 ret, avail); 1609 return (DDI_FAILURE); 1610 } 1611 1612 AACDB_PRINT(softs, CE_NOTE, 1613 "ddi_intr_get_nvail returned %d, navail() returned %d", 1614 count, avail); 1615 1616 /* Allocate an array of interrupt handles */ 1617 intr_size = count * sizeof (ddi_intr_handle_t); 1618 softs->htable = kmem_alloc(intr_size, KM_SLEEP); 1619 1620 if (intr_type == DDI_INTR_TYPE_MSI) { 1621 count = 1; /* only one vector needed by now */ 1622 flag = DDI_INTR_ALLOC_STRICT; 1623 } else { /* must be DDI_INTR_TYPE_FIXED */ 1624 flag = DDI_INTR_ALLOC_NORMAL; 1625 } 1626 1627 /* Call ddi_intr_alloc() */ 1628 ret = ddi_intr_alloc(dip, softs->htable, intr_type, 0, 1629 count, &actual, flag); 1630 1631 if ((ret != DDI_SUCCESS) || (actual == 0)) { 1632 AACDB_PRINT(softs, CE_WARN, 1633 "ddi_intr_alloc() failed, ret = %d", ret); 1634 actual = 0; 1635 goto error; 1636 } 1637 1638 if (actual < count) { 1639 AACDB_PRINT(softs, CE_NOTE, 1640 "Requested: %d, Received: %d", count, actual); 1641 goto error; 1642 } 1643 1644 softs->intr_cnt = actual; 1645 1646 /* Get priority for first msi, assume remaining are all the same */ 1647 if ((ret = ddi_intr_get_pri(softs->htable[0], 1648 &softs->intr_pri)) != DDI_SUCCESS) { 1649 AACDB_PRINT(softs, CE_WARN, 1650 "ddi_intr_get_pri() failed, ret = %d", ret); 1651 goto error; 1652 } 1653 1654 /* Test for high level mutex */ 1655 if (softs->intr_pri >= ddi_intr_get_hilevel_pri()) { 1656 AACDB_PRINT(softs, CE_WARN, 1657 "aac_query_intrs: Hi level interrupt not supported"); 1658 goto error; 1659 } 1660 1661 return (DDI_SUCCESS); 1662 1663 error: 1664 /* Free already allocated intr */ 1665 for (i = 0; i < actual; i++) 1666 (void) ddi_intr_free(softs->htable[i]); 1667 1668 kmem_free(softs->htable, intr_size); 1669 return (DDI_FAILURE); 1670 } 1671 1672 1673 /* 1674 * Register FIXED or MSI interrupts, and enable them 1675 */ 1676 static int 1677 aac_add_intrs(struct aac_softstate *softs) 1678 { 1679 int i, ret; 1680 int intr_size, actual; 1681 ddi_intr_handler_t *aac_intr; 1682 1683 actual = softs->intr_cnt; 1684 intr_size = actual * sizeof (ddi_intr_handle_t); 1685 aac_intr = (ddi_intr_handler_t *)((softs->flags & AAC_FLAGS_NEW_COMM) ? 1686 aac_intr_new : aac_intr_old); 1687 1688 /* Call ddi_intr_add_handler() */ 1689 for (i = 0; i < actual; i++) { 1690 if ((ret = ddi_intr_add_handler(softs->htable[i], 1691 aac_intr, (caddr_t)softs, NULL)) != DDI_SUCCESS) { 1692 cmn_err(CE_WARN, 1693 "ddi_intr_add_handler() failed ret = %d", ret); 1694 1695 /* Free already allocated intr */ 1696 for (i = 0; i < actual; i++) 1697 (void) ddi_intr_free(softs->htable[i]); 1698 1699 kmem_free(softs->htable, intr_size); 1700 return (DDI_FAILURE); 1701 } 1702 } 1703 1704 if ((ret = ddi_intr_get_cap(softs->htable[0], &softs->intr_cap)) 1705 != DDI_SUCCESS) { 1706 cmn_err(CE_WARN, "ddi_intr_get_cap() failed, ret = %d", ret); 1707 1708 /* Free already allocated intr */ 1709 for (i = 0; i < actual; i++) 1710 (void) ddi_intr_free(softs->htable[i]); 1711 1712 kmem_free(softs->htable, intr_size); 1713 return (DDI_FAILURE); 1714 } 1715 1716 /* Enable interrupts */ 1717 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) { 1718 /* for MSI block enable */ 1719 (void) ddi_intr_block_enable(softs->htable, softs->intr_cnt); 1720 } else { 1721 /* Call ddi_intr_enable() for legacy/MSI non block enable */ 1722 for (i = 0; i < softs->intr_cnt; i++) 1723 (void) ddi_intr_enable(softs->htable[i]); 1724 } 1725 1726 return (DDI_SUCCESS); 1727 } 1728 1729 /* 1730 * Unregister FIXED or MSI interrupts 1731 */ 1732 static void 1733 aac_remove_intrs(struct aac_softstate *softs) 1734 { 1735 int i; 1736 1737 /* Disable all interrupts */ 1738 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) { 1739 /* Call ddi_intr_block_disable() */ 1740 (void) ddi_intr_block_disable(softs->htable, softs->intr_cnt); 1741 } else { 1742 for (i = 0; i < softs->intr_cnt; i++) 1743 (void) ddi_intr_disable(softs->htable[i]); 1744 } 1745 1746 /* Call ddi_intr_remove_handler() */ 1747 for (i = 0; i < softs->intr_cnt; i++) { 1748 (void) ddi_intr_remove_handler(softs->htable[i]); 1749 (void) ddi_intr_free(softs->htable[i]); 1750 } 1751 1752 kmem_free(softs->htable, softs->intr_cnt * sizeof (ddi_intr_handle_t)); 1753 } 1754 1755 /* 1756 * Set pkt_reason and OR in pkt_statistics flag 1757 */ 1758 static void 1759 aac_set_pkt_reason(struct aac_softstate *softs, struct aac_cmd *acp, 1760 uchar_t reason, uint_t stat) 1761 { 1762 #ifndef __lock_lint 1763 _NOTE(ARGUNUSED(softs)) 1764 #endif 1765 if (acp->pkt->pkt_reason == CMD_CMPLT) 1766 acp->pkt->pkt_reason = reason; 1767 acp->pkt->pkt_statistics |= stat; 1768 } 1769 1770 /* 1771 * Handle a finished pkt of soft SCMD 1772 */ 1773 static void 1774 aac_soft_callback(struct aac_softstate *softs, struct aac_cmd *acp) 1775 { 1776 ASSERT(acp->pkt); 1777 1778 acp->flags |= AAC_CMD_CMPLT; 1779 1780 acp->pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | \ 1781 STATE_SENT_CMD | STATE_GOT_STATUS; 1782 if (acp->pkt->pkt_state & STATE_XFERRED_DATA) 1783 acp->pkt->pkt_resid = 0; 1784 1785 /* AAC_CMD_NO_INTR means no complete callback */ 1786 if (!(acp->flags & AAC_CMD_NO_INTR)) { 1787 mutex_enter(&softs->q_comp_mutex); 1788 aac_cmd_enqueue(&softs->q_comp, acp); 1789 mutex_exit(&softs->q_comp_mutex); 1790 ddi_trigger_softintr(softs->softint_id); 1791 } 1792 } 1793 1794 /* 1795 * Handlers for completed IOs, common to aac_intr_new() and aac_intr_old() 1796 */ 1797 1798 /* 1799 * Handle completed logical device IO command 1800 */ 1801 /*ARGSUSED*/ 1802 static void 1803 aac_ld_complete(struct aac_softstate *softs, struct aac_cmd *acp) 1804 { 1805 struct aac_slot *slotp = acp->slotp; 1806 struct aac_blockread_response *resp; 1807 uint32_t status; 1808 1809 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 1810 ASSERT(!(acp->flags & AAC_CMD_NO_CB)); 1811 1812 acp->pkt->pkt_state |= STATE_GOT_STATUS; 1813 1814 /* 1815 * block_read/write has a similar response header, use blockread 1816 * response for both. 1817 */ 1818 resp = (struct aac_blockread_response *)&slotp->fibp->data[0]; 1819 status = ddi_get32(slotp->fib_acc_handle, &resp->Status); 1820 if (status == ST_OK) { 1821 acp->pkt->pkt_resid = 0; 1822 acp->pkt->pkt_state |= STATE_XFERRED_DATA; 1823 } else { 1824 aac_set_arq_data_hwerr(acp); 1825 } 1826 } 1827 1828 /* 1829 * Handle completed phys. device IO command 1830 */ 1831 static void 1832 aac_pd_complete(struct aac_softstate *softs, struct aac_cmd *acp) 1833 { 1834 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 1835 struct aac_fib *fibp = acp->slotp->fibp; 1836 struct scsi_pkt *pkt = acp->pkt; 1837 struct aac_srb_reply *resp; 1838 uint32_t resp_status; 1839 1840 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 1841 ASSERT(!(acp->flags & AAC_CMD_NO_CB)); 1842 1843 resp = (struct aac_srb_reply *)&fibp->data[0]; 1844 resp_status = ddi_get32(acc, &resp->status); 1845 1846 /* First check FIB status */ 1847 if (resp_status == ST_OK) { 1848 uint32_t scsi_status; 1849 uint32_t srb_status; 1850 uint32_t data_xfer_length; 1851 1852 scsi_status = ddi_get32(acc, &resp->scsi_status); 1853 srb_status = ddi_get32(acc, &resp->srb_status); 1854 data_xfer_length = ddi_get32(acc, &resp->data_xfer_length); 1855 1856 *pkt->pkt_scbp = (uint8_t)scsi_status; 1857 pkt->pkt_state |= STATE_GOT_STATUS; 1858 if (scsi_status == STATUS_GOOD) { 1859 uchar_t cmd = ((union scsi_cdb *)(void *) 1860 (pkt->pkt_cdbp))->scc_cmd; 1861 1862 /* Next check SRB status */ 1863 switch (srb_status & 0x3f) { 1864 case SRB_STATUS_DATA_OVERRUN: 1865 AACDB_PRINT(softs, CE_NOTE, "DATA_OVERRUN: " \ 1866 "scmd=%d, xfer=%d, buflen=%d", 1867 (uint32_t)cmd, data_xfer_length, 1868 acp->bcount); 1869 1870 switch (cmd) { 1871 case SCMD_READ: 1872 case SCMD_WRITE: 1873 case SCMD_READ_G1: 1874 case SCMD_WRITE_G1: 1875 case SCMD_READ_G4: 1876 case SCMD_WRITE_G4: 1877 case SCMD_READ_G5: 1878 case SCMD_WRITE_G5: 1879 aac_set_pkt_reason(softs, acp, 1880 CMD_DATA_OVR, 0); 1881 break; 1882 } 1883 /*FALLTHRU*/ 1884 case SRB_STATUS_ERROR_RECOVERY: 1885 case SRB_STATUS_PENDING: 1886 case SRB_STATUS_SUCCESS: 1887 /* 1888 * pkt_resid should only be calculated if the 1889 * status is ERROR_RECOVERY/PENDING/SUCCESS/ 1890 * OVERRUN/UNDERRUN 1891 */ 1892 if (data_xfer_length) { 1893 pkt->pkt_state |= STATE_XFERRED_DATA; 1894 pkt->pkt_resid = acp->bcount - \ 1895 data_xfer_length; 1896 ASSERT(pkt->pkt_resid >= 0); 1897 } 1898 break; 1899 case SRB_STATUS_ABORTED: 1900 AACDB_PRINT(softs, CE_NOTE, 1901 "SRB_STATUS_ABORTED, xfer=%d, resid=%d", 1902 data_xfer_length, pkt->pkt_resid); 1903 aac_set_pkt_reason(softs, acp, CMD_ABORTED, 1904 STAT_ABORTED); 1905 break; 1906 case SRB_STATUS_ABORT_FAILED: 1907 AACDB_PRINT(softs, CE_NOTE, 1908 "SRB_STATUS_ABORT_FAILED, xfer=%d, " \ 1909 "resid=%d", data_xfer_length, 1910 pkt->pkt_resid); 1911 aac_set_pkt_reason(softs, acp, CMD_ABORT_FAIL, 1912 0); 1913 break; 1914 case SRB_STATUS_PARITY_ERROR: 1915 AACDB_PRINT(softs, CE_NOTE, 1916 "SRB_STATUS_PARITY_ERROR, xfer=%d, " \ 1917 "resid=%d", data_xfer_length, 1918 pkt->pkt_resid); 1919 aac_set_pkt_reason(softs, acp, CMD_PER_FAIL, 0); 1920 break; 1921 case SRB_STATUS_NO_DEVICE: 1922 case SRB_STATUS_INVALID_PATH_ID: 1923 case SRB_STATUS_INVALID_TARGET_ID: 1924 case SRB_STATUS_INVALID_LUN: 1925 case SRB_STATUS_SELECTION_TIMEOUT: 1926 #ifdef DEBUG 1927 if (AAC_DEV_IS_VALID(acp->dvp)) { 1928 AACDB_PRINT(softs, CE_NOTE, 1929 "SRB_STATUS_NO_DEVICE(%d), " \ 1930 "xfer=%d, resid=%d ", 1931 srb_status & 0x3f, 1932 data_xfer_length, pkt->pkt_resid); 1933 } 1934 #endif 1935 aac_set_pkt_reason(softs, acp, CMD_DEV_GONE, 0); 1936 break; 1937 case SRB_STATUS_COMMAND_TIMEOUT: 1938 case SRB_STATUS_TIMEOUT: 1939 AACDB_PRINT(softs, CE_NOTE, 1940 "SRB_STATUS_COMMAND_TIMEOUT, xfer=%d, " \ 1941 "resid=%d", data_xfer_length, 1942 pkt->pkt_resid); 1943 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT, 1944 STAT_TIMEOUT); 1945 break; 1946 case SRB_STATUS_BUS_RESET: 1947 AACDB_PRINT(softs, CE_NOTE, 1948 "SRB_STATUS_BUS_RESET, xfer=%d, " \ 1949 "resid=%d", data_xfer_length, 1950 pkt->pkt_resid); 1951 aac_set_pkt_reason(softs, acp, CMD_RESET, 1952 STAT_BUS_RESET); 1953 break; 1954 default: 1955 AACDB_PRINT(softs, CE_NOTE, "srb_status=%d, " \ 1956 "xfer=%d, resid=%d", srb_status & 0x3f, 1957 data_xfer_length, pkt->pkt_resid); 1958 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0); 1959 break; 1960 } 1961 } else if (scsi_status == STATUS_CHECK) { 1962 /* CHECK CONDITION */ 1963 struct scsi_arq_status *arqstat = 1964 (void *)(pkt->pkt_scbp); 1965 uint32_t sense_data_size; 1966 1967 pkt->pkt_state |= STATE_ARQ_DONE; 1968 1969 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD; 1970 arqstat->sts_rqpkt_reason = CMD_CMPLT; 1971 arqstat->sts_rqpkt_resid = 0; 1972 arqstat->sts_rqpkt_state = 1973 STATE_GOT_BUS | 1974 STATE_GOT_TARGET | 1975 STATE_SENT_CMD | 1976 STATE_XFERRED_DATA; 1977 arqstat->sts_rqpkt_statistics = 0; 1978 1979 sense_data_size = ddi_get32(acc, 1980 &resp->sense_data_size); 1981 ASSERT(sense_data_size <= AAC_SENSE_BUFFERSIZE); 1982 AACDB_PRINT(softs, CE_NOTE, 1983 "CHECK CONDITION: sense len=%d, xfer len=%d", 1984 sense_data_size, data_xfer_length); 1985 1986 if (sense_data_size > SENSE_LENGTH) 1987 sense_data_size = SENSE_LENGTH; 1988 ddi_rep_get8(acc, (uint8_t *)&arqstat->sts_sensedata, 1989 (uint8_t *)resp->sense_data, sense_data_size, 1990 DDI_DEV_AUTOINCR); 1991 } else { 1992 AACDB_PRINT(softs, CE_WARN, "invaild scsi status: " \ 1993 "scsi_status=%d, srb_status=%d", 1994 scsi_status, srb_status); 1995 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0); 1996 } 1997 } else { 1998 AACDB_PRINT(softs, CE_NOTE, "SRB failed: fib status %d", 1999 resp_status); 2000 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0); 2001 } 2002 } 2003 2004 /* 2005 * Handle completed IOCTL command 2006 */ 2007 /*ARGSUSED*/ 2008 void 2009 aac_ioctl_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2010 { 2011 struct aac_slot *slotp = acp->slotp; 2012 2013 /* 2014 * NOTE: Both aac_ioctl_send_fib() and aac_send_raw_srb() 2015 * may wait on softs->event, so use cv_broadcast() instead 2016 * of cv_signal(). 2017 */ 2018 ASSERT(acp->flags & AAC_CMD_SYNC); 2019 ASSERT(acp->flags & AAC_CMD_NO_CB); 2020 2021 /* Get the size of the response FIB from its FIB.Header.Size field */ 2022 acp->fib_size = ddi_get16(slotp->fib_acc_handle, 2023 &slotp->fibp->Header.Size); 2024 2025 ASSERT(acp->fib_size <= softs->aac_max_fib_size); 2026 ddi_rep_get8(slotp->fib_acc_handle, (uint8_t *)acp->fibp, 2027 (uint8_t *)slotp->fibp, acp->fib_size, DDI_DEV_AUTOINCR); 2028 } 2029 2030 /* 2031 * Handle completed Flush command 2032 */ 2033 /*ARGSUSED*/ 2034 static void 2035 aac_synccache_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2036 { 2037 struct aac_slot *slotp = acp->slotp; 2038 ddi_acc_handle_t acc = slotp->fib_acc_handle; 2039 struct aac_synchronize_reply *resp; 2040 uint32_t status; 2041 2042 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 2043 2044 acp->pkt->pkt_state |= STATE_GOT_STATUS; 2045 2046 resp = (struct aac_synchronize_reply *)&slotp->fibp->data[0]; 2047 status = ddi_get32(acc, &resp->Status); 2048 if (status != CT_OK) 2049 aac_set_arq_data_hwerr(acp); 2050 } 2051 2052 static void 2053 aac_startstop_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2054 { 2055 struct aac_slot *slotp = acp->slotp; 2056 ddi_acc_handle_t acc = slotp->fib_acc_handle; 2057 struct aac_Container_resp *resp; 2058 uint32_t status; 2059 2060 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 2061 2062 acp->pkt->pkt_state |= STATE_GOT_STATUS; 2063 2064 resp = (struct aac_Container_resp *)&slotp->fibp->data[0]; 2065 status = ddi_get32(acc, &resp->Status); 2066 if (status != 0) { 2067 AACDB_PRINT(softs, CE_WARN, "Cannot start/stop a unit"); 2068 aac_set_arq_data_hwerr(acp); 2069 } 2070 } 2071 2072 /* 2073 * Access PCI space to see if the driver can support the card 2074 */ 2075 static int 2076 aac_check_card_type(struct aac_softstate *softs) 2077 { 2078 ddi_acc_handle_t pci_config_handle; 2079 int card_index; 2080 uint32_t pci_cmd; 2081 2082 /* Map pci configuration space */ 2083 if ((pci_config_setup(softs->devinfo_p, &pci_config_handle)) != 2084 DDI_SUCCESS) { 2085 AACDB_PRINT(softs, CE_WARN, "Cannot setup pci config space"); 2086 return (AACERR); 2087 } 2088 2089 softs->vendid = pci_config_get16(pci_config_handle, PCI_CONF_VENID); 2090 softs->devid = pci_config_get16(pci_config_handle, PCI_CONF_DEVID); 2091 softs->subvendid = pci_config_get16(pci_config_handle, 2092 PCI_CONF_SUBVENID); 2093 softs->subsysid = pci_config_get16(pci_config_handle, 2094 PCI_CONF_SUBSYSID); 2095 2096 card_index = 0; 2097 while (!CARD_IS_UNKNOWN(card_index)) { 2098 if ((aac_cards[card_index].vendor == softs->vendid) && 2099 (aac_cards[card_index].device == softs->devid) && 2100 (aac_cards[card_index].subvendor == softs->subvendid) && 2101 (aac_cards[card_index].subsys == softs->subsysid)) { 2102 break; 2103 } 2104 card_index++; 2105 } 2106 2107 softs->card = card_index; 2108 softs->hwif = aac_cards[card_index].hwif; 2109 2110 /* 2111 * Unknown aac card 2112 * do a generic match based on the VendorID and DeviceID to 2113 * support the new cards in the aac family 2114 */ 2115 if (CARD_IS_UNKNOWN(card_index)) { 2116 if (softs->vendid != 0x9005) { 2117 AACDB_PRINT(softs, CE_WARN, 2118 "Unknown vendor 0x%x", softs->vendid); 2119 goto error; 2120 } 2121 switch (softs->devid) { 2122 case 0x285: 2123 softs->hwif = AAC_HWIF_I960RX; 2124 break; 2125 case 0x286: 2126 softs->hwif = AAC_HWIF_RKT; 2127 break; 2128 default: 2129 AACDB_PRINT(softs, CE_WARN, 2130 "Unknown device \"pci9005,%x\"", softs->devid); 2131 goto error; 2132 } 2133 } 2134 2135 /* Set hardware dependent interface */ 2136 switch (softs->hwif) { 2137 case AAC_HWIF_I960RX: 2138 softs->aac_if = aac_rx_interface; 2139 softs->map_size_min = AAC_MAP_SIZE_MIN_RX; 2140 break; 2141 case AAC_HWIF_RKT: 2142 softs->aac_if = aac_rkt_interface; 2143 softs->map_size_min = AAC_MAP_SIZE_MIN_RKT; 2144 break; 2145 default: 2146 AACDB_PRINT(softs, CE_WARN, 2147 "Unknown hardware interface %d", softs->hwif); 2148 goto error; 2149 } 2150 2151 /* Set card names */ 2152 (void *)strncpy(softs->vendor_name, aac_cards[card_index].vid, 2153 AAC_VENDOR_LEN); 2154 (void *)strncpy(softs->product_name, aac_cards[card_index].desc, 2155 AAC_PRODUCT_LEN); 2156 2157 /* Set up quirks */ 2158 softs->flags = aac_cards[card_index].quirks; 2159 2160 /* Force the busmaster enable bit on */ 2161 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM); 2162 if ((pci_cmd & PCI_COMM_ME) == 0) { 2163 pci_cmd |= PCI_COMM_ME; 2164 pci_config_put16(pci_config_handle, PCI_CONF_COMM, pci_cmd); 2165 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM); 2166 if ((pci_cmd & PCI_COMM_ME) == 0) { 2167 cmn_err(CE_CONT, "?Cannot enable busmaster bit"); 2168 goto error; 2169 } 2170 } 2171 2172 /* Set memory base to map */ 2173 softs->pci_mem_base_paddr = 0xfffffff0UL & \ 2174 pci_config_get32(pci_config_handle, PCI_CONF_BASE0); 2175 2176 pci_config_teardown(&pci_config_handle); 2177 2178 return (AACOK); /* card type detected */ 2179 error: 2180 pci_config_teardown(&pci_config_handle); 2181 return (AACERR); /* no matched card found */ 2182 } 2183 2184 /* 2185 * Check the firmware to determine the features to support and the FIB 2186 * parameters to use. 2187 */ 2188 static int 2189 aac_check_firmware(struct aac_softstate *softs) 2190 { 2191 uint32_t options; 2192 uint32_t atu_size; 2193 ddi_acc_handle_t pci_handle; 2194 uint8_t *data; 2195 uint32_t max_fibs; 2196 uint32_t max_fib_size; 2197 uint32_t sg_tablesize; 2198 uint32_t max_sectors; 2199 uint32_t status; 2200 2201 /* Get supported options */ 2202 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETINFO, 0, 0, 0, 0, 2203 &status)) != AACOK) { 2204 if (status != SRB_STATUS_INVALID_REQUEST) { 2205 cmn_err(CE_CONT, 2206 "?Fatal error: request adapter info error"); 2207 return (AACERR); 2208 } 2209 options = 0; 2210 atu_size = 0; 2211 } else { 2212 options = AAC_MAILBOX_GET(softs, 1); 2213 atu_size = AAC_MAILBOX_GET(softs, 2); 2214 } 2215 2216 if (softs->state & AAC_STATE_RESET) { 2217 if ((softs->support_opt == options) && 2218 (softs->atu_size == atu_size)) 2219 return (AACOK); 2220 2221 cmn_err(CE_WARN, 2222 "?Fatal error: firmware changed, system needs reboot"); 2223 return (AACERR); 2224 } 2225 2226 /* 2227 * The following critical settings are initialized only once during 2228 * driver attachment. 2229 */ 2230 softs->support_opt = options; 2231 softs->atu_size = atu_size; 2232 2233 /* Process supported options */ 2234 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && 2235 (softs->flags & AAC_FLAGS_NO4GB) == 0) { 2236 AACDB_PRINT(softs, CE_NOTE, "!Enable FIB map 4GB window"); 2237 softs->flags |= AAC_FLAGS_4GB_WINDOW; 2238 } else { 2239 /* 2240 * Quirk AAC_FLAGS_NO4GB is for FIB address and thus comm space 2241 * only. IO is handled by the DMA engine which does not suffer 2242 * from the ATU window programming workarounds necessary for 2243 * CPU copy operations. 2244 */ 2245 softs->addr_dma_attr.dma_attr_addr_lo = 0x2000ull; 2246 softs->addr_dma_attr.dma_attr_addr_hi = 0x7fffffffull; 2247 } 2248 2249 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0) { 2250 AACDB_PRINT(softs, CE_NOTE, "!Enable SG map 64-bit address"); 2251 softs->buf_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull; 2252 softs->buf_dma_attr.dma_attr_seg = 0xffffffffffffffffull; 2253 softs->flags |= AAC_FLAGS_SG_64BIT; 2254 } 2255 2256 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) { 2257 softs->flags |= AAC_FLAGS_ARRAY_64BIT; 2258 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array size"); 2259 } 2260 2261 if (options & AAC_SUPPORTED_NONDASD) { 2262 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p, 0, 2263 "nondasd-enable", (char **)&data) == DDI_SUCCESS)) { 2264 if (strcmp((char *)data, "yes") == 0) { 2265 AACDB_PRINT(softs, CE_NOTE, 2266 "!Enable Non-DASD access"); 2267 softs->flags |= AAC_FLAGS_NONDASD; 2268 } 2269 ddi_prop_free(data); 2270 } 2271 } 2272 2273 /* Read preferred settings */ 2274 max_fib_size = 0; 2275 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETCOMMPREF, 2276 0, 0, 0, 0, NULL)) == AACOK) { 2277 options = AAC_MAILBOX_GET(softs, 1); 2278 max_fib_size = (options & 0xffff); 2279 max_sectors = (options >> 16) << 1; 2280 options = AAC_MAILBOX_GET(softs, 2); 2281 sg_tablesize = (options >> 16); 2282 options = AAC_MAILBOX_GET(softs, 3); 2283 max_fibs = (options & 0xffff); 2284 } 2285 2286 /* Enable new comm. and rawio at the same time */ 2287 if ((softs->support_opt & AAC_SUPPORTED_NEW_COMM) && 2288 (max_fib_size != 0)) { 2289 /* read out and save PCI MBR */ 2290 if ((atu_size > softs->map_size) && 2291 (ddi_regs_map_setup(softs->devinfo_p, 1, 2292 (caddr_t *)&data, 0, atu_size, &softs->acc_attr, 2293 &pci_handle) == DDI_SUCCESS)) { 2294 ddi_regs_map_free(&softs->pci_mem_handle); 2295 softs->pci_mem_handle = pci_handle; 2296 softs->pci_mem_base_vaddr = data; 2297 softs->map_size = atu_size; 2298 } 2299 if (atu_size == softs->map_size) { 2300 softs->flags |= AAC_FLAGS_NEW_COMM; 2301 AACDB_PRINT(softs, CE_NOTE, 2302 "!Enable New Comm. interface"); 2303 } 2304 } 2305 2306 /* Set FIB parameters */ 2307 if (softs->flags & AAC_FLAGS_NEW_COMM) { 2308 softs->aac_max_fibs = max_fibs; 2309 softs->aac_max_fib_size = max_fib_size; 2310 softs->aac_max_sectors = max_sectors; 2311 softs->aac_sg_tablesize = sg_tablesize; 2312 2313 softs->flags |= AAC_FLAGS_RAW_IO; 2314 AACDB_PRINT(softs, CE_NOTE, "!Enable RawIO"); 2315 } else { 2316 softs->aac_max_fibs = 2317 (softs->flags & AAC_FLAGS_256FIBS) ? 256 : 512; 2318 softs->aac_max_fib_size = AAC_FIB_SIZE; 2319 softs->aac_max_sectors = 128; /* 64K */ 2320 if (softs->flags & AAC_FLAGS_17SG) 2321 softs->aac_sg_tablesize = 17; 2322 else if (softs->flags & AAC_FLAGS_34SG) 2323 softs->aac_sg_tablesize = 34; 2324 else if (softs->flags & AAC_FLAGS_SG_64BIT) 2325 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE - 2326 sizeof (struct aac_blockwrite64) + 2327 sizeof (struct aac_sg_entry64)) / 2328 sizeof (struct aac_sg_entry64); 2329 else 2330 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE - 2331 sizeof (struct aac_blockwrite) + 2332 sizeof (struct aac_sg_entry)) / 2333 sizeof (struct aac_sg_entry); 2334 } 2335 2336 if ((softs->flags & AAC_FLAGS_RAW_IO) && 2337 (softs->flags & AAC_FLAGS_ARRAY_64BIT)) { 2338 softs->flags |= AAC_FLAGS_LBA_64BIT; 2339 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array"); 2340 } 2341 softs->buf_dma_attr.dma_attr_sgllen = softs->aac_sg_tablesize; 2342 softs->buf_dma_attr.dma_attr_maxxfer = softs->aac_max_sectors << 9; 2343 /* 2344 * 64K maximum segment size in scatter gather list is controlled by 2345 * the NEW_COMM bit in the adapter information. If not set, the card 2346 * can only accept a maximum of 64K. It is not recommended to permit 2347 * more than 128KB of total transfer size to the adapters because 2348 * performance is negatively impacted. 2349 * 2350 * For new comm, segment size equals max xfer size. For old comm, 2351 * we use 64K for both. 2352 */ 2353 softs->buf_dma_attr.dma_attr_count_max = 2354 softs->buf_dma_attr.dma_attr_maxxfer - 1; 2355 2356 /* Setup FIB operations */ 2357 if (softs->flags & AAC_FLAGS_RAW_IO) 2358 softs->aac_cmd_fib = aac_cmd_fib_rawio; 2359 else if (softs->flags & AAC_FLAGS_SG_64BIT) 2360 softs->aac_cmd_fib = aac_cmd_fib_brw64; 2361 else 2362 softs->aac_cmd_fib = aac_cmd_fib_brw; 2363 softs->aac_cmd_fib_scsi = (softs->flags & AAC_FLAGS_SG_64BIT) ? \ 2364 aac_cmd_fib_scsi64 : aac_cmd_fib_scsi32; 2365 2366 /* 64-bit LBA needs descriptor format sense data */ 2367 softs->slen = sizeof (struct scsi_arq_status); 2368 if ((softs->flags & AAC_FLAGS_LBA_64BIT) && 2369 softs->slen < AAC_ARQ64_LENGTH) 2370 softs->slen = AAC_ARQ64_LENGTH; 2371 2372 AACDB_PRINT(softs, CE_NOTE, 2373 "!max_fibs %d max_fibsize 0x%x max_sectors %d max_sg %d", 2374 softs->aac_max_fibs, softs->aac_max_fib_size, 2375 softs->aac_max_sectors, softs->aac_sg_tablesize); 2376 2377 return (AACOK); 2378 } 2379 2380 static void 2381 aac_fsa_rev(struct aac_softstate *softs, struct FsaRev *fsarev0, 2382 struct FsaRev *fsarev1) 2383 { 2384 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 2385 2386 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.dash); 2387 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.type); 2388 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.minor); 2389 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.major); 2390 AAC_GET_FIELD32(acc, fsarev1, fsarev0, buildNumber); 2391 } 2392 2393 /* 2394 * The following function comes from Adaptec: 2395 * 2396 * Query adapter information and supplement adapter information 2397 */ 2398 static int 2399 aac_get_adapter_info(struct aac_softstate *softs, 2400 struct aac_adapter_info *ainfr, struct aac_supplement_adapter_info *sinfr) 2401 { 2402 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 2403 struct aac_fib *fibp = softs->sync_slot.fibp; 2404 struct aac_adapter_info *ainfp; 2405 struct aac_supplement_adapter_info *sinfp; 2406 2407 ddi_put8(acc, &fibp->data[0], 0); 2408 if (aac_sync_fib(softs, RequestAdapterInfo, 2409 sizeof (struct aac_fib_header)) != AACOK) { 2410 AACDB_PRINT(softs, CE_WARN, "RequestAdapterInfo failed"); 2411 return (AACERR); 2412 } 2413 ainfp = (struct aac_adapter_info *)fibp->data; 2414 if (ainfr) { 2415 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions); 2416 AAC_GET_FIELD32(acc, ainfr, ainfp, PlatformBase); 2417 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuArchitecture); 2418 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuVariant); 2419 AAC_GET_FIELD32(acc, ainfr, ainfp, ClockSpeed); 2420 AAC_GET_FIELD32(acc, ainfr, ainfp, ExecutionMem); 2421 AAC_GET_FIELD32(acc, ainfr, ainfp, BufferMem); 2422 AAC_GET_FIELD32(acc, ainfr, ainfp, TotalMem); 2423 aac_fsa_rev(softs, &ainfp->KernelRevision, 2424 &ainfr->KernelRevision); 2425 aac_fsa_rev(softs, &ainfp->MonitorRevision, 2426 &ainfr->MonitorRevision); 2427 aac_fsa_rev(softs, &ainfp->HardwareRevision, 2428 &ainfr->HardwareRevision); 2429 aac_fsa_rev(softs, &ainfp->BIOSRevision, 2430 &ainfr->BIOSRevision); 2431 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusteringEnabled); 2432 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusterChannelMask); 2433 AAC_GET_FIELD64(acc, ainfr, ainfp, SerialNumber); 2434 AAC_GET_FIELD32(acc, ainfr, ainfp, batteryPlatform); 2435 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions); 2436 AAC_GET_FIELD32(acc, ainfr, ainfp, OemVariant); 2437 } 2438 if (sinfr) { 2439 if (!(softs->support_opt & 2440 AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO)) { 2441 AACDB_PRINT(softs, CE_WARN, 2442 "SupplementAdapterInfo not supported"); 2443 return (AACERR); 2444 } 2445 ddi_put8(acc, &fibp->data[0], 0); 2446 if (aac_sync_fib(softs, RequestSupplementAdapterInfo, 2447 sizeof (struct aac_fib_header)) != AACOK) { 2448 AACDB_PRINT(softs, CE_WARN, 2449 "RequestSupplementAdapterInfo failed"); 2450 return (AACERR); 2451 } 2452 sinfp = (struct aac_supplement_adapter_info *)fibp->data; 2453 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, AdapterTypeText[0], 17+1); 2454 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, Pad[0], 2); 2455 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashMemoryByteSize); 2456 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashImageId); 2457 AAC_GET_FIELD32(acc, sinfr, sinfp, MaxNumberPorts); 2458 AAC_GET_FIELD32(acc, sinfr, sinfp, Version); 2459 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits); 2460 AAC_GET_FIELD8(acc, sinfr, sinfp, SlotNumber); 2461 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, ReservedPad0[0], 3); 2462 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, BuildDate[0], 12); 2463 AAC_GET_FIELD32(acc, sinfr, sinfp, CurrentNumberPorts); 2464 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, VpdInfo, 2465 sizeof (struct vpd_info)); 2466 aac_fsa_rev(softs, &sinfp->FlashFirmwareRevision, 2467 &sinfr->FlashFirmwareRevision); 2468 AAC_GET_FIELD32(acc, sinfr, sinfp, RaidTypeMorphOptions); 2469 aac_fsa_rev(softs, &sinfp->FlashFirmwareBootRevision, 2470 &sinfr->FlashFirmwareBootRevision); 2471 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgPcbaSerialNo, 2472 MFG_PCBA_SERIAL_NUMBER_WIDTH); 2473 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgWWNName[0], 2474 MFG_WWN_WIDTH); 2475 AAC_GET_FIELD32(acc, sinfr, sinfp, SupportedOptions2); 2476 AAC_GET_FIELD32(acc, sinfr, sinfp, ExpansionFlag); 2477 if (sinfr->ExpansionFlag == 1) { 2478 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits3); 2479 AAC_GET_FIELD32(acc, sinfr, sinfp, 2480 SupportedPerformanceMode); 2481 AAC_REP_GET_FIELD32(acc, sinfr, sinfp, 2482 ReservedGrowth[0], 80); 2483 } 2484 } 2485 return (AACOK); 2486 } 2487 2488 static int 2489 aac_get_bus_info(struct aac_softstate *softs, uint32_t *bus_max, 2490 uint32_t *tgt_max) 2491 { 2492 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 2493 struct aac_fib *fibp = softs->sync_slot.fibp; 2494 struct aac_ctcfg *c_cmd; 2495 struct aac_ctcfg_resp *c_resp; 2496 uint32_t scsi_method_id; 2497 struct aac_bus_info *cmd; 2498 struct aac_bus_info_response *resp; 2499 int rval; 2500 2501 /* Detect MethodId */ 2502 c_cmd = (struct aac_ctcfg *)&fibp->data[0]; 2503 ddi_put32(acc, &c_cmd->Command, VM_ContainerConfig); 2504 ddi_put32(acc, &c_cmd->cmd, CT_GET_SCSI_METHOD); 2505 ddi_put32(acc, &c_cmd->param, 0); 2506 rval = aac_sync_fib(softs, ContainerCommand, 2507 AAC_FIB_SIZEOF(struct aac_ctcfg)); 2508 c_resp = (struct aac_ctcfg_resp *)&fibp->data[0]; 2509 if (rval != AACOK || ddi_get32(acc, &c_resp->Status) != 0) { 2510 AACDB_PRINT(softs, CE_WARN, 2511 "VM_ContainerConfig command fail"); 2512 return (AACERR); 2513 } 2514 scsi_method_id = ddi_get32(acc, &c_resp->param); 2515 2516 /* Detect phys. bus count and max. target id first */ 2517 cmd = (struct aac_bus_info *)&fibp->data[0]; 2518 ddi_put32(acc, &cmd->Command, VM_Ioctl); 2519 ddi_put32(acc, &cmd->ObjType, FT_DRIVE); /* physical drive */ 2520 ddi_put32(acc, &cmd->MethodId, scsi_method_id); 2521 ddi_put32(acc, &cmd->ObjectId, 0); 2522 ddi_put32(acc, &cmd->CtlCmd, GetBusInfo); 2523 /* 2524 * For VM_Ioctl, the firmware uses the Header.Size filled from the 2525 * driver as the size to be returned. Therefore the driver has to use 2526 * sizeof (struct aac_bus_info_response) because it is greater than 2527 * sizeof (struct aac_bus_info). 2528 */ 2529 rval = aac_sync_fib(softs, ContainerCommand, 2530 AAC_FIB_SIZEOF(struct aac_bus_info_response)); 2531 resp = (struct aac_bus_info_response *)cmd; 2532 2533 /* Scan all coordinates with INQUIRY */ 2534 if ((rval != AACOK) || (ddi_get32(acc, &resp->Status) != 0)) { 2535 AACDB_PRINT(softs, CE_WARN, "GetBusInfo command fail"); 2536 return (AACERR); 2537 } 2538 *bus_max = ddi_get32(acc, &resp->BusCount); 2539 *tgt_max = ddi_get32(acc, &resp->TargetsPerBus); 2540 return (AACOK); 2541 } 2542 2543 /* 2544 * The following function comes from Adaptec: 2545 * 2546 * Routine to be called during initialization of communications with 2547 * the adapter to handle possible adapter configuration issues. When 2548 * the adapter first boots up, it examines attached drives, etc, and 2549 * potentially comes up with a new or revised configuration (relative to 2550 * what's stored in it's NVRAM). Additionally it may discover problems 2551 * that make the current physical configuration unworkable (currently 2552 * applicable only to cluster configuration issues). 2553 * 2554 * If there are no configuration issues or the issues are considered 2555 * trival by the adapter, it will set it's configuration status to 2556 * "FSACT_CONTINUE" and execute the "commit confiuguration" action 2557 * automatically on it's own. 2558 * 2559 * However, if there are non-trivial issues, the adapter will set it's 2560 * internal configuration status to "FSACT_PAUSE" or "FASCT_ABORT" 2561 * and wait for some agent on the host to issue the "\ContainerCommand 2562 * \VM_ContainerConfig\CT_COMMIT_CONFIG" FIB command to cause the 2563 * adapter to commit the new/updated configuration and enable 2564 * un-inhibited operation. The host agent should first issue the 2565 * "\ContainerCommand\VM_ContainerConfig\CT_GET_CONFIG_STATUS" FIB 2566 * command to obtain information about config issues detected by 2567 * the adapter. 2568 * 2569 * Normally the adapter's PC BIOS will execute on the host following 2570 * adapter poweron and reset and will be responsible for querring the 2571 * adapter with CT_GET_CONFIG_STATUS and issuing the CT_COMMIT_CONFIG 2572 * command if appropriate. 2573 * 2574 * However, with the introduction of IOP reset support, the adapter may 2575 * boot up without the benefit of the adapter's PC BIOS host agent. 2576 * This routine is intended to take care of these issues in situations 2577 * where BIOS doesn't execute following adapter poweron or reset. The 2578 * CT_COMMIT_CONFIG command is a no-op if it's already been issued, so 2579 * there is no harm in doing this when it's already been done. 2580 */ 2581 static int 2582 aac_handle_adapter_config_issues(struct aac_softstate *softs) 2583 { 2584 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 2585 struct aac_fib *fibp = softs->sync_slot.fibp; 2586 struct aac_Container *cmd; 2587 struct aac_Container_resp *resp; 2588 struct aac_cf_status_header *cfg_sts_hdr; 2589 uint32_t resp_status; 2590 uint32_t ct_status; 2591 uint32_t cfg_stat_action; 2592 int rval; 2593 2594 /* Get adapter config status */ 2595 cmd = (struct aac_Container *)&fibp->data[0]; 2596 2597 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 2598 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 2599 ddi_put32(acc, &cmd->CTCommand.command, CT_GET_CONFIG_STATUS); 2600 ddi_put32(acc, &cmd->CTCommand.param[CNT_SIZE], 2601 sizeof (struct aac_cf_status_header)); 2602 rval = aac_sync_fib(softs, ContainerCommand, 2603 AAC_FIB_SIZEOF(struct aac_Container)); 2604 resp = (struct aac_Container_resp *)cmd; 2605 cfg_sts_hdr = (struct aac_cf_status_header *)resp->CTResponse.data; 2606 2607 resp_status = ddi_get32(acc, &resp->Status); 2608 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]); 2609 if ((rval == AACOK) && (resp_status == 0) && (ct_status == CT_OK)) { 2610 cfg_stat_action = ddi_get32(acc, &cfg_sts_hdr->action); 2611 2612 /* Commit configuration if it's reasonable to do so. */ 2613 if (cfg_stat_action <= CFACT_PAUSE) { 2614 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 2615 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 2616 ddi_put32(acc, &cmd->CTCommand.command, 2617 CT_COMMIT_CONFIG); 2618 rval = aac_sync_fib(softs, ContainerCommand, 2619 AAC_FIB_SIZEOF(struct aac_Container)); 2620 2621 resp_status = ddi_get32(acc, &resp->Status); 2622 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]); 2623 if ((rval == AACOK) && (resp_status == 0) && 2624 (ct_status == CT_OK)) 2625 /* Successful completion */ 2626 rval = AACMPE_OK; 2627 else 2628 /* Auto-commit aborted due to error(s). */ 2629 rval = AACMPE_COMMIT_CONFIG; 2630 } else { 2631 /* 2632 * Auto-commit aborted due to adapter indicating 2633 * configuration issue(s) too dangerous to auto-commit. 2634 */ 2635 rval = AACMPE_CONFIG_STATUS; 2636 } 2637 } else { 2638 cmn_err(CE_WARN, "!Configuration issue, auto-commit aborted"); 2639 rval = AACMPE_CONFIG_STATUS; 2640 } 2641 return (rval); 2642 } 2643 2644 /* 2645 * Hardware initialization and resource allocation 2646 */ 2647 static int 2648 aac_common_attach(struct aac_softstate *softs) 2649 { 2650 uint32_t status; 2651 int i; 2652 2653 DBCALLED(softs, 1); 2654 2655 /* 2656 * Do a little check here to make sure there aren't any outstanding 2657 * FIBs in the message queue. At this point there should not be and 2658 * if there are they are probably left over from another instance of 2659 * the driver like when the system crashes and the crash dump driver 2660 * gets loaded. 2661 */ 2662 while (AAC_OUTB_GET(softs) != 0xfffffffful) 2663 ; 2664 2665 /* 2666 * Wait the card to complete booting up before do anything that 2667 * attempts to communicate with it. 2668 */ 2669 status = AAC_FWSTATUS_GET(softs); 2670 if (status == AAC_SELF_TEST_FAILED || status == AAC_KERNEL_PANIC) 2671 goto error; 2672 i = AAC_FWUP_TIMEOUT * 1000; /* set timeout */ 2673 AAC_BUSYWAIT(AAC_FWSTATUS_GET(softs) & AAC_KERNEL_UP_AND_RUNNING, i); 2674 if (i == 0) { 2675 cmn_err(CE_CONT, "?Fatal error: controller not ready"); 2676 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2677 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2678 goto error; 2679 } 2680 2681 /* Read and set card supported options and settings */ 2682 if (aac_check_firmware(softs) == AACERR) { 2683 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2684 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2685 goto error; 2686 } 2687 2688 /* Clear out all interrupts */ 2689 AAC_STATUS_CLR(softs, ~0); 2690 2691 /* Setup communication space with the card */ 2692 if (softs->comm_space_dma_handle == NULL) { 2693 if (aac_alloc_comm_space(softs) != AACOK) 2694 goto error; 2695 } 2696 if (aac_setup_comm_space(softs) != AACOK) { 2697 cmn_err(CE_CONT, "?Setup communication space failed"); 2698 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2699 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2700 goto error; 2701 } 2702 2703 #ifdef DEBUG 2704 if (aac_get_fw_debug_buffer(softs) != AACOK) 2705 cmn_err(CE_CONT, "?firmware UART trace not supported"); 2706 #endif 2707 2708 /* Allocate slots */ 2709 if ((softs->total_slots == 0) && (aac_create_slots(softs) != AACOK)) { 2710 cmn_err(CE_CONT, "?Fatal error: slots allocate failed"); 2711 goto error; 2712 } 2713 AACDB_PRINT(softs, CE_NOTE, "%d slots allocated", softs->total_slots); 2714 2715 /* Allocate FIBs */ 2716 if (softs->total_fibs < softs->total_slots) { 2717 aac_alloc_fibs(softs); 2718 if (softs->total_fibs == 0) 2719 goto error; 2720 AACDB_PRINT(softs, CE_NOTE, "%d fibs allocated", 2721 softs->total_fibs); 2722 } 2723 2724 /* Get adapter names */ 2725 if (CARD_IS_UNKNOWN(softs->card)) { 2726 struct aac_supplement_adapter_info sinf; 2727 2728 if (aac_get_adapter_info(softs, NULL, &sinf) != AACOK) { 2729 cmn_err(CE_CONT, "?Query adapter information failed"); 2730 } else { 2731 softs->feature_bits = sinf.FeatureBits; 2732 softs->support_opt2 = sinf.SupportedOptions2; 2733 2734 char *p, *p0, *p1; 2735 2736 /* 2737 * Now find the controller name in supp_adapter_info-> 2738 * AdapterTypeText. Use the first word as the vendor 2739 * and the other words as the product name. 2740 */ 2741 AACDB_PRINT(softs, CE_NOTE, "sinf.AdapterTypeText = " 2742 "\"%s\"", sinf.AdapterTypeText); 2743 p = sinf.AdapterTypeText; 2744 p0 = p1 = NULL; 2745 /* Skip heading spaces */ 2746 while (*p && (*p == ' ' || *p == '\t')) 2747 p++; 2748 p0 = p; 2749 while (*p && (*p != ' ' && *p != '\t')) 2750 p++; 2751 /* Remove middle spaces */ 2752 while (*p && (*p == ' ' || *p == '\t')) 2753 *p++ = 0; 2754 p1 = p; 2755 /* Remove trailing spaces */ 2756 p = p1 + strlen(p1) - 1; 2757 while (p > p1 && (*p == ' ' || *p == '\t')) 2758 *p-- = 0; 2759 if (*p0 && *p1) { 2760 (void *)strncpy(softs->vendor_name, p0, 2761 AAC_VENDOR_LEN); 2762 (void *)strncpy(softs->product_name, p1, 2763 AAC_PRODUCT_LEN); 2764 } else { 2765 cmn_err(CE_WARN, 2766 "?adapter name mis-formatted\n"); 2767 if (*p0) 2768 (void *)strncpy(softs->product_name, 2769 p0, AAC_PRODUCT_LEN); 2770 } 2771 } 2772 } 2773 2774 cmn_err(CE_NOTE, 2775 "!aac driver %d.%02d.%02d-%d, found card: " \ 2776 "%s %s(pci0x%x.%x.%x.%x) at 0x%x", 2777 AAC_DRIVER_MAJOR_VERSION, 2778 AAC_DRIVER_MINOR_VERSION, 2779 AAC_DRIVER_BUGFIX_LEVEL, 2780 AAC_DRIVER_BUILD, 2781 softs->vendor_name, softs->product_name, 2782 softs->vendid, softs->devid, softs->subvendid, softs->subsysid, 2783 softs->pci_mem_base_paddr); 2784 2785 /* Perform acceptance of adapter-detected config changes if possible */ 2786 if (aac_handle_adapter_config_issues(softs) != AACMPE_OK) { 2787 cmn_err(CE_CONT, "?Handle adapter config issues failed"); 2788 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2789 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2790 goto error; 2791 } 2792 2793 /* Setup containers (logical devices) */ 2794 if (aac_probe_containers(softs) != AACOK) { 2795 cmn_err(CE_CONT, "?Fatal error: get container info error"); 2796 goto error; 2797 } 2798 2799 /* Setup phys. devices */ 2800 if (softs->flags & AAC_FLAGS_NONDASD) { 2801 uint32_t bus_max, tgt_max; 2802 uint32_t bus, tgt; 2803 int index; 2804 2805 if (aac_get_bus_info(softs, &bus_max, &tgt_max) != AACOK) { 2806 cmn_err(CE_CONT, "?Fatal error: get bus info error"); 2807 goto error; 2808 } 2809 AACDB_PRINT(softs, CE_NOTE, "bus_max=%d, tgt_max=%d", 2810 bus_max, tgt_max); 2811 if (bus_max != softs->bus_max || tgt_max != softs->tgt_max) { 2812 if (softs->state & AAC_STATE_RESET) { 2813 cmn_err(CE_WARN, 2814 "?Fatal error: bus map changed"); 2815 goto error; 2816 } 2817 softs->bus_max = bus_max; 2818 softs->tgt_max = tgt_max; 2819 if (softs->nondasds) { 2820 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \ 2821 sizeof (struct aac_nondasd)); 2822 } 2823 softs->nondasds = kmem_zalloc(AAC_MAX_PD(softs) * \ 2824 sizeof (struct aac_nondasd), KM_SLEEP); 2825 2826 index = 0; 2827 for (bus = 0; bus < softs->bus_max; bus++) { 2828 for (tgt = 0; tgt < softs->tgt_max; tgt++) { 2829 struct aac_nondasd *dvp = 2830 &softs->nondasds[index++]; 2831 dvp->dev.type = AAC_DEV_PD; 2832 dvp->bus = bus; 2833 dvp->tid = tgt; 2834 } 2835 } 2836 } 2837 } 2838 2839 /* Check dma & acc handles allocated in attach */ 2840 if (aac_check_dma_handle(softs->comm_space_dma_handle) != DDI_SUCCESS) { 2841 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2842 goto error; 2843 } 2844 2845 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 2846 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2847 goto error; 2848 } 2849 2850 for (i = 0; i < softs->total_slots; i++) { 2851 if (aac_check_dma_handle(softs->io_slot[i].fib_dma_handle) != 2852 DDI_SUCCESS) { 2853 ddi_fm_service_impact(softs->devinfo_p, 2854 DDI_SERVICE_LOST); 2855 goto error; 2856 } 2857 } 2858 2859 return (AACOK); 2860 error: 2861 if (softs->state & AAC_STATE_RESET) 2862 return (AACERR); 2863 if (softs->nondasds) { 2864 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \ 2865 sizeof (struct aac_nondasd)); 2866 softs->nondasds = NULL; 2867 } 2868 if (softs->total_fibs > 0) 2869 aac_destroy_fibs(softs); 2870 if (softs->total_slots > 0) 2871 aac_destroy_slots(softs); 2872 if (softs->comm_space_dma_handle) 2873 aac_free_comm_space(softs); 2874 return (AACERR); 2875 } 2876 2877 /* 2878 * Hardware shutdown and resource release 2879 */ 2880 static void 2881 aac_common_detach(struct aac_softstate *softs) 2882 { 2883 DBCALLED(softs, 1); 2884 2885 (void) aac_shutdown(softs); 2886 2887 if (softs->nondasds) { 2888 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \ 2889 sizeof (struct aac_nondasd)); 2890 softs->nondasds = NULL; 2891 } 2892 aac_destroy_fibs(softs); 2893 aac_destroy_slots(softs); 2894 aac_free_comm_space(softs); 2895 } 2896 2897 /* 2898 * Send a synchronous command to the controller and wait for a result. 2899 * Indicate if the controller completed the command with an error status. 2900 */ 2901 int 2902 aac_sync_mbcommand(struct aac_softstate *softs, uint32_t cmd, 2903 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3, 2904 uint32_t *statusp) 2905 { 2906 int timeout; 2907 uint32_t status; 2908 2909 if (statusp != NULL) 2910 *statusp = SRB_STATUS_SUCCESS; 2911 2912 /* Fill in mailbox */ 2913 AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3); 2914 2915 /* Ensure the sync command doorbell flag is cleared */ 2916 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND); 2917 2918 /* Then set it to signal the adapter */ 2919 AAC_NOTIFY(softs, AAC_DB_SYNC_COMMAND); 2920 2921 /* Spin waiting for the command to complete */ 2922 timeout = AAC_IMMEDIATE_TIMEOUT * 1000; 2923 AAC_BUSYWAIT(AAC_STATUS_GET(softs) & AAC_DB_SYNC_COMMAND, timeout); 2924 if (!timeout) { 2925 AACDB_PRINT(softs, CE_WARN, 2926 "Sync command timed out after %d seconds (0x%x)!", 2927 AAC_IMMEDIATE_TIMEOUT, AAC_FWSTATUS_GET(softs)); 2928 return (AACERR); 2929 } 2930 2931 /* Clear the completion flag */ 2932 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND); 2933 2934 /* Get the command status */ 2935 status = AAC_MAILBOX_GET(softs, 0); 2936 if (statusp != NULL) 2937 *statusp = status; 2938 if (status != SRB_STATUS_SUCCESS) { 2939 AACDB_PRINT(softs, CE_WARN, 2940 "Sync command fail: status = 0x%x", status); 2941 return (AACERR); 2942 } 2943 2944 return (AACOK); 2945 } 2946 2947 /* 2948 * Send a synchronous FIB to the adapter and wait for its completion 2949 */ 2950 static int 2951 aac_sync_fib(struct aac_softstate *softs, uint16_t cmd, uint16_t fibsize) 2952 { 2953 struct aac_slot *slotp = &softs->sync_slot; 2954 ddi_dma_handle_t dma = slotp->fib_dma_handle; 2955 uint32_t status; 2956 int rval; 2957 2958 /* Sync fib only supports 512 bytes */ 2959 if (fibsize > AAC_FIB_SIZE) 2960 return (AACERR); 2961 2962 /* 2963 * Setup sync fib 2964 * Need not reinitialize FIB header if it's already been filled 2965 * by others like aac_cmd_fib_scsi as aac_cmd. 2966 */ 2967 if (slotp->acp == NULL) 2968 aac_cmd_fib_header(softs, slotp, cmd, fibsize); 2969 2970 AACDB_PRINT_FIB(softs, &softs->sync_slot); 2971 2972 (void) ddi_dma_sync(dma, offsetof(struct aac_comm_space, sync_fib), 2973 fibsize, DDI_DMA_SYNC_FORDEV); 2974 2975 /* Give the FIB to the controller, wait for a response. */ 2976 rval = aac_sync_mbcommand(softs, AAC_MONKER_SYNCFIB, 2977 slotp->fib_phyaddr, 0, 0, 0, &status); 2978 if (rval == AACERR) { 2979 AACDB_PRINT(softs, CE_WARN, 2980 "Send sync fib to controller failed"); 2981 return (AACERR); 2982 } 2983 2984 (void) ddi_dma_sync(dma, offsetof(struct aac_comm_space, sync_fib), 2985 AAC_FIB_SIZE, DDI_DMA_SYNC_FORCPU); 2986 2987 if ((aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) || 2988 (aac_check_dma_handle(dma) != DDI_SUCCESS)) { 2989 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 2990 return (AACERR); 2991 } 2992 2993 return (AACOK); 2994 } 2995 2996 static void 2997 aac_cmd_initq(struct aac_cmd_queue *q) 2998 { 2999 q->q_head = NULL; 3000 q->q_tail = (struct aac_cmd *)&q->q_head; 3001 } 3002 3003 /* 3004 * Remove a cmd from the head of q 3005 */ 3006 static struct aac_cmd * 3007 aac_cmd_dequeue(struct aac_cmd_queue *q) 3008 { 3009 struct aac_cmd *acp; 3010 3011 _NOTE(ASSUMING_PROTECTED(*q)) 3012 3013 if ((acp = q->q_head) != NULL) { 3014 if ((q->q_head = acp->next) != NULL) 3015 acp->next = NULL; 3016 else 3017 q->q_tail = (struct aac_cmd *)&q->q_head; 3018 acp->prev = NULL; 3019 } 3020 return (acp); 3021 } 3022 3023 /* 3024 * Add a cmd to the tail of q 3025 */ 3026 static void 3027 aac_cmd_enqueue(struct aac_cmd_queue *q, struct aac_cmd *acp) 3028 { 3029 ASSERT(acp->next == NULL); 3030 acp->prev = q->q_tail; 3031 q->q_tail->next = acp; 3032 q->q_tail = acp; 3033 } 3034 3035 /* 3036 * Remove the cmd ac from q 3037 */ 3038 static void 3039 aac_cmd_delete(struct aac_cmd_queue *q, struct aac_cmd *acp) 3040 { 3041 if (acp->prev) { 3042 if ((acp->prev->next = acp->next) != NULL) { 3043 acp->next->prev = acp->prev; 3044 acp->next = NULL; 3045 } else { 3046 q->q_tail = acp->prev; 3047 } 3048 acp->prev = NULL; 3049 } 3050 /* ac is not in the queue */ 3051 } 3052 3053 /* 3054 * Atomically insert an entry into the nominated queue, returns 0 on success or 3055 * AACERR if the queue is full. 3056 * 3057 * Note: it would be more efficient to defer notifying the controller in 3058 * the case where we may be inserting several entries in rapid succession, 3059 * but implementing this usefully may be difficult (it would involve a 3060 * separate queue/notify interface). 3061 */ 3062 static int 3063 aac_fib_enqueue(struct aac_softstate *softs, int queue, uint32_t fib_addr, 3064 uint32_t fib_size) 3065 { 3066 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3067 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3068 uint32_t pi, ci; 3069 3070 DBCALLED(softs, 2); 3071 3072 ASSERT(queue == AAC_ADAP_NORM_CMD_Q || queue == AAC_ADAP_NORM_RESP_Q); 3073 3074 /* Get the producer/consumer indices */ 3075 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \ 3076 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2, 3077 DDI_DMA_SYNC_FORCPU); 3078 if (aac_check_dma_handle(dma) != DDI_SUCCESS) { 3079 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 3080 return (AACERR); 3081 } 3082 3083 pi = ddi_get32(acc, 3084 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]); 3085 ci = ddi_get32(acc, 3086 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]); 3087 3088 /* 3089 * Wrap the queue first before we check the queue to see 3090 * if it is full 3091 */ 3092 if (pi >= aac_qinfo[queue].size) 3093 pi = 0; 3094 3095 /* XXX queue full */ 3096 if ((pi + 1) == ci) 3097 return (AACERR); 3098 3099 /* Fill in queue entry */ 3100 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_size), fib_size); 3101 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_addr), fib_addr); 3102 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \ 3103 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry), 3104 DDI_DMA_SYNC_FORDEV); 3105 3106 /* Update producer index */ 3107 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX], 3108 pi + 1); 3109 (void) ddi_dma_sync(dma, 3110 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX] - \ 3111 (uintptr_t)softs->comm_space, sizeof (uint32_t), 3112 DDI_DMA_SYNC_FORDEV); 3113 3114 if (aac_qinfo[queue].notify != 0) 3115 AAC_NOTIFY(softs, aac_qinfo[queue].notify); 3116 return (AACOK); 3117 } 3118 3119 /* 3120 * Atomically remove one entry from the nominated queue, returns 0 on 3121 * success or AACERR if the queue is empty. 3122 */ 3123 static int 3124 aac_fib_dequeue(struct aac_softstate *softs, int queue, int *idxp) 3125 { 3126 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3127 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3128 uint32_t pi, ci; 3129 int unfull = 0; 3130 3131 DBCALLED(softs, 2); 3132 3133 ASSERT(idxp); 3134 3135 /* Get the producer/consumer indices */ 3136 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \ 3137 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2, 3138 DDI_DMA_SYNC_FORCPU); 3139 pi = ddi_get32(acc, 3140 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]); 3141 ci = ddi_get32(acc, 3142 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]); 3143 3144 /* Check for queue empty */ 3145 if (ci == pi) 3146 return (AACERR); 3147 3148 if (pi >= aac_qinfo[queue].size) 3149 pi = 0; 3150 3151 /* Check for queue full */ 3152 if (ci == pi + 1) 3153 unfull = 1; 3154 3155 /* 3156 * The controller does not wrap the queue, 3157 * so we have to do it by ourselves 3158 */ 3159 if (ci >= aac_qinfo[queue].size) 3160 ci = 0; 3161 3162 /* Fetch the entry */ 3163 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \ 3164 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry), 3165 DDI_DMA_SYNC_FORCPU); 3166 if (aac_check_dma_handle(dma) != DDI_SUCCESS) { 3167 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 3168 return (AACERR); 3169 } 3170 3171 switch (queue) { 3172 case AAC_HOST_NORM_RESP_Q: 3173 case AAC_HOST_HIGH_RESP_Q: 3174 *idxp = ddi_get32(acc, 3175 &(softs->qentries[queue] + ci)->aq_fib_addr); 3176 break; 3177 3178 case AAC_HOST_NORM_CMD_Q: 3179 case AAC_HOST_HIGH_CMD_Q: 3180 *idxp = ddi_get32(acc, 3181 &(softs->qentries[queue] + ci)->aq_fib_addr) / AAC_FIB_SIZE; 3182 break; 3183 3184 default: 3185 cmn_err(CE_NOTE, "!Invalid queue in aac_fib_dequeue()"); 3186 return (AACERR); 3187 } 3188 3189 /* Update consumer index */ 3190 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX], 3191 ci + 1); 3192 (void) ddi_dma_sync(dma, 3193 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX] - \ 3194 (uintptr_t)softs->comm_space, sizeof (uint32_t), 3195 DDI_DMA_SYNC_FORDEV); 3196 3197 if (unfull && aac_qinfo[queue].notify != 0) 3198 AAC_NOTIFY(softs, aac_qinfo[queue].notify); 3199 return (AACOK); 3200 } 3201 3202 /* 3203 * Request information of the container cid 3204 */ 3205 static struct aac_mntinforesp * 3206 aac_get_container_info(struct aac_softstate *softs, int cid) 3207 { 3208 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 3209 struct aac_fib *fibp = softs->sync_slot.fibp; 3210 struct aac_mntinfo *mi = (struct aac_mntinfo *)&fibp->data[0]; 3211 struct aac_mntinforesp *mir; 3212 3213 ddi_put32(acc, &mi->Command, /* Use 64-bit LBA if enabled */ 3214 (softs->flags & AAC_FLAGS_LBA_64BIT) ? 3215 VM_NameServe64 : VM_NameServe); 3216 ddi_put32(acc, &mi->MntType, FT_FILESYS); 3217 ddi_put32(acc, &mi->MntCount, cid); 3218 3219 if (aac_sync_fib(softs, ContainerCommand, 3220 AAC_FIB_SIZEOF(struct aac_mntinfo)) == AACERR) { 3221 AACDB_PRINT(softs, CE_WARN, "Error probe container %d", cid); 3222 return (NULL); 3223 } 3224 3225 mir = (struct aac_mntinforesp *)&fibp->data[0]; 3226 if (ddi_get32(acc, &mir->Status) == ST_OK) 3227 return (mir); 3228 return (NULL); 3229 } 3230 3231 static int 3232 aac_get_container_count(struct aac_softstate *softs, int *count) 3233 { 3234 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 3235 struct aac_mntinforesp *mir; 3236 3237 if ((mir = aac_get_container_info(softs, 0)) == NULL) 3238 return (AACERR); 3239 *count = ddi_get32(acc, &mir->MntRespCount); 3240 if (*count > AAC_MAX_LD) { 3241 AACDB_PRINT(softs, CE_CONT, 3242 "container count(%d) > AAC_MAX_LD", *count); 3243 return (AACERR); 3244 } 3245 return (AACOK); 3246 } 3247 3248 static int 3249 aac_get_container_uid(struct aac_softstate *softs, uint32_t cid, uint32_t *uid) 3250 { 3251 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 3252 struct aac_Container *ct = (struct aac_Container *) \ 3253 &softs->sync_slot.fibp->data[0]; 3254 3255 bzero(ct, sizeof (*ct) - CT_PACKET_SIZE); 3256 ddi_put32(acc, &ct->Command, VM_ContainerConfig); 3257 ddi_put32(acc, &ct->CTCommand.command, CT_CID_TO_32BITS_UID); 3258 ddi_put32(acc, &ct->CTCommand.param[0], cid); 3259 3260 if (aac_sync_fib(softs, ContainerCommand, 3261 AAC_FIB_SIZEOF(struct aac_Container)) == AACERR) 3262 return (AACERR); 3263 if (ddi_get32(acc, &ct->CTCommand.param[0]) != CT_OK) 3264 return (AACERR); 3265 3266 *uid = ddi_get32(acc, &ct->CTCommand.param[1]); 3267 return (AACOK); 3268 } 3269 3270 static int 3271 aac_probe_container(struct aac_softstate *softs, uint32_t cid) 3272 { 3273 struct aac_container *dvp = &softs->containers[cid]; 3274 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 3275 struct aac_mntinforesp *mir; 3276 uint64_t size; 3277 uint32_t uid; 3278 3279 /* Get container basic info */ 3280 if ((mir = aac_get_container_info(softs, cid)) == NULL) 3281 return (AACERR); 3282 3283 if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) { 3284 if (AAC_DEV_IS_VALID(&dvp->dev)) { 3285 AACDB_PRINT(softs, CE_NOTE, 3286 ">>> Container %d deleted", cid); 3287 dvp->dev.flags &= ~AAC_DFLAG_VALID; 3288 (void) aac_dr_event(softs, dvp->cid, -1, 3289 AAC_EVT_OFFLINE); 3290 } 3291 } else { 3292 size = AAC_MIR_SIZE(softs, acc, mir); 3293 3294 /* Get container UID */ 3295 if (aac_get_container_uid(softs, cid, &uid) == AACERR) { 3296 AACDB_PRINT(softs, CE_CONT, 3297 "query container %d uid failed", cid); 3298 return (AACERR); 3299 } 3300 AACDB_PRINT(softs, CE_CONT, "uid=0x%08x", uid); 3301 3302 if (AAC_DEV_IS_VALID(&dvp->dev)) { 3303 if (dvp->uid != uid) { 3304 AACDB_PRINT(softs, CE_WARN, 3305 ">>> Container %u uid changed to %d", 3306 cid, uid); 3307 dvp->uid = uid; 3308 } 3309 if (dvp->size != size) { 3310 AACDB_PRINT(softs, CE_NOTE, 3311 ">>> Container %u size changed to %"PRIu64, 3312 cid, size); 3313 dvp->size = size; 3314 } 3315 } else { /* Init new container */ 3316 AACDB_PRINT(softs, CE_NOTE, 3317 ">>> Container %d added: " \ 3318 "size=0x%x.%08x, type=%d, name=%s", 3319 cid, 3320 ddi_get32(acc, &mir->MntObj.CapacityHigh), 3321 ddi_get32(acc, &mir->MntObj.Capacity), 3322 ddi_get32(acc, &mir->MntObj.VolType), 3323 mir->MntObj.FileSystemName); 3324 dvp->dev.flags |= AAC_DFLAG_VALID; 3325 dvp->dev.type = AAC_DEV_LD; 3326 3327 dvp->cid = cid; 3328 dvp->uid = uid; 3329 dvp->size = size; 3330 dvp->locked = 0; 3331 dvp->deleted = 0; 3332 (void) aac_dr_event(softs, dvp->cid, -1, 3333 AAC_EVT_ONLINE); 3334 } 3335 } 3336 return (AACOK); 3337 } 3338 3339 /* 3340 * Do a rescan of all the possible containers and update the container list 3341 * with newly online/offline containers, and prepare for autoconfiguration. 3342 */ 3343 static int 3344 aac_probe_containers(struct aac_softstate *softs) 3345 { 3346 int i, count, total; 3347 3348 /* Loop over possible containers */ 3349 count = softs->container_count; 3350 if (aac_get_container_count(softs, &count) == AACERR) 3351 return (AACERR); 3352 for (i = total = 0; i < count; i++) { 3353 if (aac_probe_container(softs, i) == AACOK) 3354 total++; 3355 } 3356 if (count < softs->container_count) { 3357 struct aac_container *dvp; 3358 3359 for (dvp = &softs->containers[count]; 3360 dvp < &softs->containers[softs->container_count]; dvp++) { 3361 if (!AAC_DEV_IS_VALID(&dvp->dev)) 3362 continue; 3363 AACDB_PRINT(softs, CE_NOTE, ">>> Container %d deleted", 3364 dvp->cid); 3365 dvp->dev.flags &= ~AAC_DFLAG_VALID; 3366 (void) aac_dr_event(softs, dvp->cid, -1, 3367 AAC_EVT_OFFLINE); 3368 } 3369 } 3370 softs->container_count = count; 3371 AACDB_PRINT(softs, CE_CONT, "?Total %d container(s) found", total); 3372 return (AACOK); 3373 } 3374 3375 static int 3376 aac_alloc_comm_space(struct aac_softstate *softs) 3377 { 3378 size_t rlen; 3379 ddi_dma_cookie_t cookie; 3380 uint_t cookien; 3381 3382 /* Allocate DMA for comm. space */ 3383 if (ddi_dma_alloc_handle( 3384 softs->devinfo_p, 3385 &softs->addr_dma_attr, 3386 DDI_DMA_SLEEP, 3387 NULL, 3388 &softs->comm_space_dma_handle) != DDI_SUCCESS) { 3389 AACDB_PRINT(softs, CE_WARN, 3390 "Cannot alloc dma handle for communication area"); 3391 goto error; 3392 } 3393 if (ddi_dma_mem_alloc( 3394 softs->comm_space_dma_handle, 3395 sizeof (struct aac_comm_space), 3396 &softs->acc_attr, 3397 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3398 DDI_DMA_SLEEP, 3399 NULL, 3400 (caddr_t *)&softs->comm_space, 3401 &rlen, 3402 &softs->comm_space_acc_handle) != DDI_SUCCESS) { 3403 AACDB_PRINT(softs, CE_WARN, 3404 "Cannot alloc mem for communication area"); 3405 goto error; 3406 } 3407 if (ddi_dma_addr_bind_handle( 3408 softs->comm_space_dma_handle, 3409 NULL, 3410 (caddr_t)softs->comm_space, 3411 sizeof (struct aac_comm_space), 3412 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3413 DDI_DMA_SLEEP, 3414 NULL, 3415 &cookie, 3416 &cookien) != DDI_DMA_MAPPED) { 3417 AACDB_PRINT(softs, CE_WARN, 3418 "DMA bind failed for communication area"); 3419 goto error; 3420 } 3421 softs->comm_space_phyaddr = cookie.dmac_address; 3422 3423 /* Setup sync FIB space */ 3424 softs->sync_slot.fibp = &softs->comm_space->sync_fib; 3425 softs->sync_slot.fib_phyaddr = softs->comm_space_phyaddr + \ 3426 offsetof(struct aac_comm_space, sync_fib); 3427 softs->sync_slot.fib_acc_handle = softs->comm_space_acc_handle; 3428 softs->sync_slot.fib_dma_handle = softs->comm_space_dma_handle; 3429 3430 return (AACOK); 3431 error: 3432 if (softs->comm_space_acc_handle) { 3433 ddi_dma_mem_free(&softs->comm_space_acc_handle); 3434 softs->comm_space_acc_handle = NULL; 3435 } 3436 if (softs->comm_space_dma_handle) { 3437 ddi_dma_free_handle(&softs->comm_space_dma_handle); 3438 softs->comm_space_dma_handle = NULL; 3439 } 3440 return (AACERR); 3441 } 3442 3443 static void 3444 aac_free_comm_space(struct aac_softstate *softs) 3445 { 3446 softs->sync_slot.fibp = NULL; 3447 softs->sync_slot.fib_phyaddr = NULL; 3448 softs->sync_slot.fib_acc_handle = NULL; 3449 softs->sync_slot.fib_dma_handle = NULL; 3450 3451 (void) ddi_dma_unbind_handle(softs->comm_space_dma_handle); 3452 ddi_dma_mem_free(&softs->comm_space_acc_handle); 3453 softs->comm_space_acc_handle = NULL; 3454 ddi_dma_free_handle(&softs->comm_space_dma_handle); 3455 softs->comm_space_dma_handle = NULL; 3456 softs->comm_space_phyaddr = NULL; 3457 } 3458 3459 /* 3460 * Initialize the data structures that are required for the communication 3461 * interface to operate 3462 */ 3463 static int 3464 aac_setup_comm_space(struct aac_softstate *softs) 3465 { 3466 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3467 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3468 uint32_t comm_space_phyaddr; 3469 struct aac_adapter_init *initp; 3470 int qoffset; 3471 3472 comm_space_phyaddr = softs->comm_space_phyaddr; 3473 3474 /* Setup adapter init struct */ 3475 initp = &softs->comm_space->init_data; 3476 bzero(initp, sizeof (struct aac_adapter_init)); 3477 3478 ddi_put32(acc, &initp->InitStructRevision, AAC_INIT_STRUCT_REVISION); 3479 ddi_put32(acc, &initp->HostElapsedSeconds, ddi_get_time()); 3480 3481 /* Setup new/old comm. specific data */ 3482 if (softs->flags & AAC_FLAGS_RAW_IO) { 3483 uint32_t init_flags = 0; 3484 3485 if (softs->flags & AAC_FLAGS_NEW_COMM) 3486 init_flags |= AAC_INIT_FLAGS_NEW_COMM_SUPPORTED; 3487 /* AAC_SUPPORTED_POWER_MANAGEMENT */ 3488 init_flags |= AAC_INIT_FLAGS_DRIVER_SUPPORTS_PM; 3489 init_flags |= AAC_INIT_FLAGS_DRIVER_USES_UTC_TIME; 3490 3491 ddi_put32(acc, &initp->InitStructRevision, 3492 AAC_INIT_STRUCT_REVISION_4); 3493 ddi_put32(acc, &initp->InitFlags, init_flags); 3494 /* Setup the preferred settings */ 3495 ddi_put32(acc, &initp->MaxIoCommands, softs->aac_max_fibs); 3496 ddi_put32(acc, &initp->MaxIoSize, 3497 (softs->aac_max_sectors << 9)); 3498 ddi_put32(acc, &initp->MaxFibSize, softs->aac_max_fib_size); 3499 } else { 3500 /* 3501 * Tells the adapter about the physical location of various 3502 * important shared data structures 3503 */ 3504 ddi_put32(acc, &initp->AdapterFibsPhysicalAddress, 3505 comm_space_phyaddr + \ 3506 offsetof(struct aac_comm_space, adapter_fibs)); 3507 ddi_put32(acc, &initp->AdapterFibsVirtualAddress, 0); 3508 ddi_put32(acc, &initp->AdapterFibAlign, AAC_FIB_SIZE); 3509 ddi_put32(acc, &initp->AdapterFibsSize, 3510 AAC_ADAPTER_FIBS * AAC_FIB_SIZE); 3511 ddi_put32(acc, &initp->PrintfBufferAddress, 3512 comm_space_phyaddr + \ 3513 offsetof(struct aac_comm_space, adapter_print_buf)); 3514 ddi_put32(acc, &initp->PrintfBufferSize, 3515 AAC_ADAPTER_PRINT_BUFSIZE); 3516 ddi_put32(acc, &initp->MiniPortRevision, 3517 AAC_INIT_STRUCT_MINIPORT_REVISION); 3518 ddi_put32(acc, &initp->HostPhysMemPages, AAC_MAX_PFN); 3519 3520 qoffset = (comm_space_phyaddr + \ 3521 offsetof(struct aac_comm_space, qtable)) % \ 3522 AAC_QUEUE_ALIGN; 3523 if (qoffset) 3524 qoffset = AAC_QUEUE_ALIGN - qoffset; 3525 softs->qtablep = (struct aac_queue_table *) \ 3526 ((char *)&softs->comm_space->qtable + qoffset); 3527 ddi_put32(acc, &initp->CommHeaderAddress, comm_space_phyaddr + \ 3528 offsetof(struct aac_comm_space, qtable) + qoffset); 3529 3530 /* Init queue table */ 3531 ddi_put32(acc, &softs->qtablep-> \ 3532 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_PRODUCER_INDEX], 3533 AAC_HOST_NORM_CMD_ENTRIES); 3534 ddi_put32(acc, &softs->qtablep-> \ 3535 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_CONSUMER_INDEX], 3536 AAC_HOST_NORM_CMD_ENTRIES); 3537 ddi_put32(acc, &softs->qtablep-> \ 3538 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_PRODUCER_INDEX], 3539 AAC_HOST_HIGH_CMD_ENTRIES); 3540 ddi_put32(acc, &softs->qtablep-> \ 3541 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_CONSUMER_INDEX], 3542 AAC_HOST_HIGH_CMD_ENTRIES); 3543 ddi_put32(acc, &softs->qtablep-> \ 3544 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_PRODUCER_INDEX], 3545 AAC_ADAP_NORM_CMD_ENTRIES); 3546 ddi_put32(acc, &softs->qtablep-> \ 3547 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_CONSUMER_INDEX], 3548 AAC_ADAP_NORM_CMD_ENTRIES); 3549 ddi_put32(acc, &softs->qtablep-> \ 3550 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_PRODUCER_INDEX], 3551 AAC_ADAP_HIGH_CMD_ENTRIES); 3552 ddi_put32(acc, &softs->qtablep-> \ 3553 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_CONSUMER_INDEX], 3554 AAC_ADAP_HIGH_CMD_ENTRIES); 3555 ddi_put32(acc, &softs->qtablep-> \ 3556 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_PRODUCER_INDEX], 3557 AAC_HOST_NORM_RESP_ENTRIES); 3558 ddi_put32(acc, &softs->qtablep-> \ 3559 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_CONSUMER_INDEX], 3560 AAC_HOST_NORM_RESP_ENTRIES); 3561 ddi_put32(acc, &softs->qtablep-> \ 3562 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_PRODUCER_INDEX], 3563 AAC_HOST_HIGH_RESP_ENTRIES); 3564 ddi_put32(acc, &softs->qtablep-> \ 3565 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_CONSUMER_INDEX], 3566 AAC_HOST_HIGH_RESP_ENTRIES); 3567 ddi_put32(acc, &softs->qtablep-> \ 3568 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_PRODUCER_INDEX], 3569 AAC_ADAP_NORM_RESP_ENTRIES); 3570 ddi_put32(acc, &softs->qtablep-> \ 3571 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_CONSUMER_INDEX], 3572 AAC_ADAP_NORM_RESP_ENTRIES); 3573 ddi_put32(acc, &softs->qtablep-> \ 3574 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_PRODUCER_INDEX], 3575 AAC_ADAP_HIGH_RESP_ENTRIES); 3576 ddi_put32(acc, &softs->qtablep-> \ 3577 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_CONSUMER_INDEX], 3578 AAC_ADAP_HIGH_RESP_ENTRIES); 3579 3580 /* Init queue entries */ 3581 softs->qentries[AAC_HOST_NORM_CMD_Q] = 3582 &softs->qtablep->qt_HostNormCmdQueue[0]; 3583 softs->qentries[AAC_HOST_HIGH_CMD_Q] = 3584 &softs->qtablep->qt_HostHighCmdQueue[0]; 3585 softs->qentries[AAC_ADAP_NORM_CMD_Q] = 3586 &softs->qtablep->qt_AdapNormCmdQueue[0]; 3587 softs->qentries[AAC_ADAP_HIGH_CMD_Q] = 3588 &softs->qtablep->qt_AdapHighCmdQueue[0]; 3589 softs->qentries[AAC_HOST_NORM_RESP_Q] = 3590 &softs->qtablep->qt_HostNormRespQueue[0]; 3591 softs->qentries[AAC_HOST_HIGH_RESP_Q] = 3592 &softs->qtablep->qt_HostHighRespQueue[0]; 3593 softs->qentries[AAC_ADAP_NORM_RESP_Q] = 3594 &softs->qtablep->qt_AdapNormRespQueue[0]; 3595 softs->qentries[AAC_ADAP_HIGH_RESP_Q] = 3596 &softs->qtablep->qt_AdapHighRespQueue[0]; 3597 } 3598 (void) ddi_dma_sync(dma, 0, 0, DDI_DMA_SYNC_FORDEV); 3599 3600 /* Send init structure to the card */ 3601 if (aac_sync_mbcommand(softs, AAC_MONKER_INITSTRUCT, 3602 comm_space_phyaddr + \ 3603 offsetof(struct aac_comm_space, init_data), 3604 0, 0, 0, NULL) == AACERR) { 3605 AACDB_PRINT(softs, CE_WARN, 3606 "Cannot send init structure to adapter"); 3607 return (AACERR); 3608 } 3609 3610 return (AACOK); 3611 } 3612 3613 static uchar_t * 3614 aac_vendor_id(struct aac_softstate *softs, uchar_t *buf) 3615 { 3616 (void) memset(buf, ' ', AAC_VENDOR_LEN); 3617 bcopy(softs->vendor_name, buf, strlen(softs->vendor_name)); 3618 return (buf + AAC_VENDOR_LEN); 3619 } 3620 3621 static uchar_t * 3622 aac_product_id(struct aac_softstate *softs, uchar_t *buf) 3623 { 3624 (void) memset(buf, ' ', AAC_PRODUCT_LEN); 3625 bcopy(softs->product_name, buf, strlen(softs->product_name)); 3626 return (buf + AAC_PRODUCT_LEN); 3627 } 3628 3629 /* 3630 * Construct unit serial number from container uid 3631 */ 3632 static uchar_t * 3633 aac_lun_serialno(struct aac_softstate *softs, int tgt, uchar_t *buf) 3634 { 3635 int i, d; 3636 uint32_t uid; 3637 3638 ASSERT(tgt >= 0 && tgt < AAC_MAX_LD); 3639 3640 uid = softs->containers[tgt].uid; 3641 for (i = 7; i >= 0; i--) { 3642 d = uid & 0xf; 3643 buf[i] = d > 9 ? 'A' + (d - 0xa) : '0' + d; 3644 uid >>= 4; 3645 } 3646 return (buf + 8); 3647 } 3648 3649 /* 3650 * SPC-3 7.5 INQUIRY command implementation 3651 */ 3652 static void 3653 aac_inquiry(struct aac_softstate *softs, struct scsi_pkt *pkt, 3654 union scsi_cdb *cdbp, struct buf *bp) 3655 { 3656 int tgt = pkt->pkt_address.a_target; 3657 char *b_addr = NULL; 3658 uchar_t page = cdbp->cdb_opaque[2]; 3659 3660 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_CMDDT) { 3661 /* Command Support Data is not supported */ 3662 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x00, 0); 3663 return; 3664 } 3665 3666 if (bp && bp->b_un.b_addr && bp->b_bcount) { 3667 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 3668 bp_mapin(bp); 3669 b_addr = bp->b_un.b_addr; 3670 } 3671 3672 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_EVPD) { 3673 uchar_t *vpdp = (uchar_t *)b_addr; 3674 uchar_t *idp, *sp; 3675 3676 /* SPC-3 8.4 Vital product data parameters */ 3677 switch (page) { 3678 case 0x00: 3679 /* Supported VPD pages */ 3680 if (vpdp == NULL || 3681 bp->b_bcount < (AAC_VPD_PAGE_DATA + 3)) 3682 return; 3683 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3684 vpdp[AAC_VPD_PAGE_CODE] = 0x00; 3685 vpdp[AAC_VPD_PAGE_LENGTH] = 3; 3686 3687 vpdp[AAC_VPD_PAGE_DATA] = 0x00; 3688 vpdp[AAC_VPD_PAGE_DATA + 1] = 0x80; 3689 vpdp[AAC_VPD_PAGE_DATA + 2] = 0x83; 3690 3691 pkt->pkt_state |= STATE_XFERRED_DATA; 3692 break; 3693 3694 case 0x80: 3695 /* Unit serial number page */ 3696 if (vpdp == NULL || 3697 bp->b_bcount < (AAC_VPD_PAGE_DATA + 8)) 3698 return; 3699 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3700 vpdp[AAC_VPD_PAGE_CODE] = 0x80; 3701 vpdp[AAC_VPD_PAGE_LENGTH] = 8; 3702 3703 sp = &vpdp[AAC_VPD_PAGE_DATA]; 3704 (void) aac_lun_serialno(softs, tgt, sp); 3705 3706 pkt->pkt_state |= STATE_XFERRED_DATA; 3707 break; 3708 3709 case 0x83: 3710 /* Device identification page */ 3711 if (vpdp == NULL || 3712 bp->b_bcount < (AAC_VPD_PAGE_DATA + 32)) 3713 return; 3714 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3715 vpdp[AAC_VPD_PAGE_CODE] = 0x83; 3716 3717 idp = &vpdp[AAC_VPD_PAGE_DATA]; 3718 bzero(idp, AAC_VPD_ID_LENGTH); 3719 idp[AAC_VPD_ID_CODESET] = 0x02; 3720 idp[AAC_VPD_ID_TYPE] = 0x01; 3721 3722 /* 3723 * SPC-3 Table 111 - Identifier type 3724 * One recommanded method of constructing the remainder 3725 * of identifier field is to concatenate the product 3726 * identification field from the standard INQUIRY data 3727 * field and the product serial number field from the 3728 * unit serial number page. 3729 */ 3730 sp = &idp[AAC_VPD_ID_DATA]; 3731 sp = aac_vendor_id(softs, sp); 3732 sp = aac_product_id(softs, sp); 3733 sp = aac_lun_serialno(softs, tgt, sp); 3734 idp[AAC_VPD_ID_LENGTH] = (uintptr_t)sp - \ 3735 (uintptr_t)&idp[AAC_VPD_ID_DATA]; 3736 3737 vpdp[AAC_VPD_PAGE_LENGTH] = (uintptr_t)sp - \ 3738 (uintptr_t)&vpdp[AAC_VPD_PAGE_DATA]; 3739 pkt->pkt_state |= STATE_XFERRED_DATA; 3740 break; 3741 3742 default: 3743 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 3744 0x24, 0x00, 0); 3745 break; 3746 } 3747 } else { 3748 struct scsi_inquiry *inqp = (struct scsi_inquiry *)b_addr; 3749 size_t len = sizeof (struct scsi_inquiry); 3750 3751 if (page != 0) { 3752 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 3753 0x24, 0x00, 0); 3754 return; 3755 } 3756 if (inqp == NULL || bp->b_bcount < len) 3757 return; 3758 3759 bzero(inqp, len); 3760 inqp->inq_len = AAC_ADDITIONAL_LEN; 3761 inqp->inq_ansi = AAC_ANSI_VER; 3762 inqp->inq_rdf = AAC_RESP_DATA_FORMAT; 3763 (void) aac_vendor_id(softs, (uchar_t *)inqp->inq_vid); 3764 (void) aac_product_id(softs, (uchar_t *)inqp->inq_pid); 3765 bcopy("V1.0", inqp->inq_revision, 4); 3766 inqp->inq_cmdque = 1; /* enable tagged-queuing */ 3767 /* 3768 * For "sd-max-xfer-size" property which may impact performance 3769 * when IO threads increase. 3770 */ 3771 inqp->inq_wbus32 = 1; 3772 3773 pkt->pkt_state |= STATE_XFERRED_DATA; 3774 } 3775 } 3776 3777 /* 3778 * SPC-3 7.10 MODE SENSE command implementation 3779 */ 3780 static void 3781 aac_mode_sense(struct aac_softstate *softs, struct scsi_pkt *pkt, 3782 union scsi_cdb *cdbp, struct buf *bp, int capacity) 3783 { 3784 uchar_t pagecode; 3785 struct mode_header *headerp; 3786 struct mode_header_g1 *g1_headerp; 3787 unsigned int ncyl; 3788 caddr_t sense_data; 3789 caddr_t next_page; 3790 size_t sdata_size; 3791 size_t pages_size; 3792 int unsupport_page = 0; 3793 3794 ASSERT(cdbp->scc_cmd == SCMD_MODE_SENSE || 3795 cdbp->scc_cmd == SCMD_MODE_SENSE_G1); 3796 3797 if (!(bp && bp->b_un.b_addr && bp->b_bcount)) 3798 return; 3799 3800 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 3801 bp_mapin(bp); 3802 pkt->pkt_state |= STATE_XFERRED_DATA; 3803 pagecode = cdbp->cdb_un.sg.scsi[0] & 0x3F; 3804 3805 /* calculate the size of needed buffer */ 3806 if (cdbp->scc_cmd == SCMD_MODE_SENSE) 3807 sdata_size = MODE_HEADER_LENGTH; 3808 else /* must be SCMD_MODE_SENSE_G1 */ 3809 sdata_size = MODE_HEADER_LENGTH_G1; 3810 3811 pages_size = 0; 3812 switch (pagecode) { 3813 case SD_MODE_SENSE_PAGE3_CODE: 3814 pages_size += sizeof (struct mode_format); 3815 break; 3816 3817 case SD_MODE_SENSE_PAGE4_CODE: 3818 pages_size += sizeof (struct mode_geometry); 3819 break; 3820 3821 case MODEPAGE_CTRL_MODE: 3822 if (softs->flags & AAC_FLAGS_LBA_64BIT) { 3823 pages_size += sizeof (struct mode_control_scsi3); 3824 } else { 3825 unsupport_page = 1; 3826 } 3827 break; 3828 3829 case MODEPAGE_ALLPAGES: 3830 if (softs->flags & AAC_FLAGS_LBA_64BIT) { 3831 pages_size += sizeof (struct mode_format) + 3832 sizeof (struct mode_geometry) + 3833 sizeof (struct mode_control_scsi3); 3834 } else { 3835 pages_size += sizeof (struct mode_format) + 3836 sizeof (struct mode_geometry); 3837 } 3838 break; 3839 3840 default: 3841 /* unsupported pages */ 3842 unsupport_page = 1; 3843 } 3844 3845 /* allocate buffer to fill the send data */ 3846 sdata_size += pages_size; 3847 sense_data = kmem_zalloc(sdata_size, KM_SLEEP); 3848 3849 if (cdbp->scc_cmd == SCMD_MODE_SENSE) { 3850 headerp = (struct mode_header *)sense_data; 3851 headerp->length = MODE_HEADER_LENGTH + pages_size - 3852 sizeof (headerp->length); 3853 headerp->bdesc_length = 0; 3854 next_page = sense_data + sizeof (struct mode_header); 3855 } else { 3856 g1_headerp = (void *)sense_data; 3857 g1_headerp->length = BE_16(MODE_HEADER_LENGTH_G1 + pages_size - 3858 sizeof (g1_headerp->length)); 3859 g1_headerp->bdesc_length = 0; 3860 next_page = sense_data + sizeof (struct mode_header_g1); 3861 } 3862 3863 if (unsupport_page) 3864 goto finish; 3865 3866 if (pagecode == SD_MODE_SENSE_PAGE3_CODE || 3867 pagecode == MODEPAGE_ALLPAGES) { 3868 /* SBC-3 7.1.3.3 Format device page */ 3869 struct mode_format *page3p; 3870 3871 page3p = (void *)next_page; 3872 page3p->mode_page.code = SD_MODE_SENSE_PAGE3_CODE; 3873 page3p->mode_page.length = sizeof (struct mode_format); 3874 page3p->data_bytes_sect = BE_16(AAC_SECTOR_SIZE); 3875 page3p->sect_track = BE_16(AAC_SECTORS_PER_TRACK); 3876 3877 next_page += sizeof (struct mode_format); 3878 } 3879 3880 if (pagecode == SD_MODE_SENSE_PAGE4_CODE || 3881 pagecode == MODEPAGE_ALLPAGES) { 3882 /* SBC-3 7.1.3.8 Rigid disk device geometry page */ 3883 struct mode_geometry *page4p; 3884 3885 page4p = (void *)next_page; 3886 page4p->mode_page.code = SD_MODE_SENSE_PAGE4_CODE; 3887 page4p->mode_page.length = sizeof (struct mode_geometry); 3888 page4p->heads = AAC_NUMBER_OF_HEADS; 3889 page4p->rpm = BE_16(AAC_ROTATION_SPEED); 3890 ncyl = capacity / (AAC_NUMBER_OF_HEADS * AAC_SECTORS_PER_TRACK); 3891 page4p->cyl_lb = ncyl & 0xff; 3892 page4p->cyl_mb = (ncyl >> 8) & 0xff; 3893 page4p->cyl_ub = (ncyl >> 16) & 0xff; 3894 3895 next_page += sizeof (struct mode_geometry); 3896 } 3897 3898 if ((pagecode == MODEPAGE_CTRL_MODE || pagecode == MODEPAGE_ALLPAGES) && 3899 softs->flags & AAC_FLAGS_LBA_64BIT) { 3900 /* 64-bit LBA need large sense data */ 3901 struct mode_control_scsi3 *mctl; 3902 3903 mctl = (void *)next_page; 3904 mctl->mode_page.code = MODEPAGE_CTRL_MODE; 3905 mctl->mode_page.length = 3906 sizeof (struct mode_control_scsi3) - 3907 sizeof (struct mode_page); 3908 mctl->d_sense = 1; 3909 } 3910 3911 finish: 3912 /* copyout the valid data. */ 3913 bcopy(sense_data, bp->b_un.b_addr, min(sdata_size, bp->b_bcount)); 3914 kmem_free(sense_data, sdata_size); 3915 } 3916 3917 static int 3918 aac_name_node(dev_info_t *dip, char *name, int len) 3919 { 3920 int tgt, lun; 3921 3922 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3923 DDI_PROP_DONTPASS, "target", -1); 3924 if (tgt == -1) 3925 return (DDI_FAILURE); 3926 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3927 DDI_PROP_DONTPASS, "lun", -1); 3928 if (lun == -1) 3929 return (DDI_FAILURE); 3930 3931 (void) snprintf(name, len, "%x,%x", tgt, lun); 3932 return (DDI_SUCCESS); 3933 } 3934 3935 /*ARGSUSED*/ 3936 static int 3937 aac_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 3938 scsi_hba_tran_t *tran, struct scsi_device *sd) 3939 { 3940 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran); 3941 #if defined(DEBUG) || defined(__lock_lint) 3942 int ctl = ddi_get_instance(softs->devinfo_p); 3943 #endif 3944 uint16_t tgt = sd->sd_address.a_target; 3945 uint8_t lun = sd->sd_address.a_lun; 3946 struct aac_device *dvp; 3947 3948 DBCALLED(softs, 2); 3949 3950 if (ndi_dev_is_persistent_node(tgt_dip) == 0) { 3951 /* 3952 * If no persistent node exist, we don't allow .conf node 3953 * to be created. 3954 */ 3955 if (aac_find_child(softs, tgt, lun) != NULL) { 3956 if (ndi_merge_node(tgt_dip, aac_name_node) != 3957 DDI_SUCCESS) 3958 /* Create this .conf node */ 3959 return (DDI_SUCCESS); 3960 } 3961 return (DDI_FAILURE); 3962 } 3963 3964 /* 3965 * Only support container/phys. device that has been 3966 * detected and valid 3967 */ 3968 mutex_enter(&softs->io_lock); 3969 if (tgt >= AAC_MAX_DEV(softs)) { 3970 AACDB_PRINT_TRAN(softs, 3971 "aac_tran_tgt_init: c%dt%dL%d out", ctl, tgt, lun); 3972 mutex_exit(&softs->io_lock); 3973 return (DDI_FAILURE); 3974 } 3975 3976 if (tgt < AAC_MAX_LD) { 3977 dvp = (struct aac_device *)&softs->containers[tgt]; 3978 if (lun != 0 || !AAC_DEV_IS_VALID(dvp)) { 3979 AACDB_PRINT_TRAN(softs, "aac_tran_tgt_init: c%dt%dL%d", 3980 ctl, tgt, lun); 3981 mutex_exit(&softs->io_lock); 3982 return (DDI_FAILURE); 3983 } 3984 /* 3985 * Save the tgt_dip for the given target if one doesn't exist 3986 * already. Dip's for non-existance tgt's will be cleared in 3987 * tgt_free. 3988 */ 3989 if (softs->containers[tgt].dev.dip == NULL && 3990 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) 3991 softs->containers[tgt].dev.dip = tgt_dip; 3992 } else { 3993 dvp = (struct aac_device *)&softs->nondasds[AAC_PD(tgt)]; 3994 } 3995 3996 if (softs->flags & AAC_FLAGS_BRKUP) { 3997 if (ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip, 3998 "buf_break", 1) != DDI_PROP_SUCCESS) { 3999 cmn_err(CE_CONT, "unable to create " 4000 "property for t%dL%d (buf_break)", tgt, lun); 4001 } 4002 } 4003 4004 AACDB_PRINT(softs, CE_NOTE, 4005 "aac_tran_tgt_init: c%dt%dL%d ok (%s)", ctl, tgt, lun, 4006 (dvp->type == AAC_DEV_PD) ? "pd" : "ld"); 4007 mutex_exit(&softs->io_lock); 4008 return (DDI_SUCCESS); 4009 } 4010 4011 static void 4012 aac_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 4013 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 4014 { 4015 #ifndef __lock_lint 4016 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran)) 4017 #endif 4018 4019 struct aac_softstate *softs = SD2AAC(sd); 4020 int tgt = sd->sd_address.a_target; 4021 4022 mutex_enter(&softs->io_lock); 4023 if (tgt < AAC_MAX_LD) { 4024 if (softs->containers[tgt].dev.dip == tgt_dip) 4025 softs->containers[tgt].dev.dip = NULL; 4026 } else { 4027 softs->nondasds[AAC_PD(tgt)].dev.flags &= ~AAC_DFLAG_VALID; 4028 } 4029 mutex_exit(&softs->io_lock); 4030 } 4031 4032 /* 4033 * Check if the firmware is Up And Running. If it is in the Kernel Panic 4034 * state, (BlinkLED code + 1) is returned. 4035 * 0 -- firmware up and running 4036 * -1 -- firmware dead 4037 * >0 -- firmware kernel panic 4038 */ 4039 static int 4040 aac_check_adapter_health(struct aac_softstate *softs) 4041 { 4042 int rval; 4043 4044 rval = PCI_MEM_GET32(softs, AAC_OMR0); 4045 4046 if (rval & AAC_KERNEL_UP_AND_RUNNING) { 4047 rval = 0; 4048 } else if (rval & AAC_KERNEL_PANIC) { 4049 cmn_err(CE_WARN, "firmware panic"); 4050 rval = ((rval >> 16) & 0xff) + 1; /* avoid 0 as return value */ 4051 } else { 4052 cmn_err(CE_WARN, "firmware dead"); 4053 rval = -1; 4054 } 4055 return (rval); 4056 } 4057 4058 static void 4059 aac_abort_iocmd(struct aac_softstate *softs, struct aac_cmd *acp, 4060 uchar_t reason) 4061 { 4062 acp->flags |= AAC_CMD_ABORT; 4063 4064 if (acp->pkt) { 4065 /* 4066 * Each lun should generate a unit attention 4067 * condition when reset. 4068 * Phys. drives are treated as logical ones 4069 * during error recovery. 4070 */ 4071 if (acp->slotp) { /* outstanding cmd */ 4072 acp->pkt->pkt_state |= STATE_GOT_STATUS; 4073 aac_set_arq_data_reset(softs, acp); 4074 } 4075 4076 switch (reason) { 4077 case CMD_TIMEOUT: 4078 AACDB_PRINT(softs, CE_NOTE, "CMD_TIMEOUT: acp=0x%p", 4079 acp); 4080 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT, 4081 STAT_TIMEOUT | STAT_BUS_RESET); 4082 break; 4083 case CMD_RESET: 4084 /* aac support only RESET_ALL */ 4085 AACDB_PRINT(softs, CE_NOTE, "CMD_RESET: acp=0x%p", acp); 4086 aac_set_pkt_reason(softs, acp, CMD_RESET, 4087 STAT_BUS_RESET); 4088 break; 4089 case CMD_ABORTED: 4090 AACDB_PRINT(softs, CE_NOTE, "CMD_ABORTED: acp=0x%p", 4091 acp); 4092 aac_set_pkt_reason(softs, acp, CMD_ABORTED, 4093 STAT_ABORTED); 4094 break; 4095 } 4096 } 4097 aac_end_io(softs, acp); 4098 } 4099 4100 /* 4101 * Abort all the pending commands of type iocmd or just the command pkt 4102 * corresponding to pkt 4103 */ 4104 static void 4105 aac_abort_iocmds(struct aac_softstate *softs, int iocmd, struct scsi_pkt *pkt, 4106 int reason) 4107 { 4108 struct aac_cmd *ac_arg, *acp; 4109 int i; 4110 4111 if (pkt == NULL) { 4112 ac_arg = NULL; 4113 } else { 4114 ac_arg = PKT2AC(pkt); 4115 iocmd = (ac_arg->flags & AAC_CMD_SYNC) ? 4116 AAC_IOCMD_SYNC : AAC_IOCMD_ASYNC; 4117 } 4118 4119 /* 4120 * a) outstanding commands on the controller 4121 * Note: should abort outstanding commands only after one 4122 * IOP reset has been done. 4123 */ 4124 if (iocmd & AAC_IOCMD_OUTSTANDING) { 4125 struct aac_cmd *acp; 4126 4127 for (i = 0; i < AAC_MAX_LD; i++) { 4128 if (AAC_DEV_IS_VALID(&softs->containers[i].dev)) 4129 softs->containers[i].reset = 1; 4130 } 4131 while ((acp = softs->q_busy.q_head) != NULL) 4132 aac_abort_iocmd(softs, acp, reason); 4133 } 4134 4135 /* b) commands in the waiting queues */ 4136 for (i = 0; i < AAC_CMDQ_NUM; i++) { 4137 if (iocmd & (1 << i)) { 4138 if (ac_arg) { 4139 aac_abort_iocmd(softs, ac_arg, reason); 4140 } else { 4141 while ((acp = softs->q_wait[i].q_head) != NULL) 4142 aac_abort_iocmd(softs, acp, reason); 4143 } 4144 } 4145 } 4146 } 4147 4148 /* 4149 * The draining thread is shared among quiesce threads. It terminates 4150 * when the adapter is quiesced or stopped by aac_stop_drain(). 4151 */ 4152 static void 4153 aac_check_drain(void *arg) 4154 { 4155 struct aac_softstate *softs = arg; 4156 4157 mutex_enter(&softs->io_lock); 4158 if (softs->ndrains) { 4159 softs->drain_timeid = 0; 4160 /* 4161 * If both ASYNC and SYNC bus throttle are held, 4162 * wake up threads only when both are drained out. 4163 */ 4164 if ((softs->bus_throttle[AAC_CMDQ_ASYNC] > 0 || 4165 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) && 4166 (softs->bus_throttle[AAC_CMDQ_SYNC] > 0 || 4167 softs->bus_ncmds[AAC_CMDQ_SYNC] == 0)) 4168 cv_broadcast(&softs->drain_cv); 4169 else 4170 softs->drain_timeid = timeout(aac_check_drain, softs, 4171 AAC_QUIESCE_TICK * drv_usectohz(1000000)); 4172 } 4173 mutex_exit(&softs->io_lock); 4174 } 4175 4176 /* 4177 * If not draining the outstanding cmds, drain them. Otherwise, 4178 * only update ndrains. 4179 */ 4180 static void 4181 aac_start_drain(struct aac_softstate *softs) 4182 { 4183 if (softs->ndrains == 0) { 4184 ASSERT(softs->drain_timeid == 0); 4185 softs->drain_timeid = timeout(aac_check_drain, softs, 4186 AAC_QUIESCE_TICK * drv_usectohz(1000000)); 4187 } 4188 softs->ndrains++; 4189 } 4190 4191 /* 4192 * Stop the draining thread when no other threads use it any longer. 4193 * Side effect: io_lock may be released in the middle. 4194 */ 4195 static void 4196 aac_stop_drain(struct aac_softstate *softs) 4197 { 4198 softs->ndrains--; 4199 if (softs->ndrains == 0) { 4200 if (softs->drain_timeid != 0) { 4201 timeout_id_t tid = softs->drain_timeid; 4202 4203 softs->drain_timeid = 0; 4204 mutex_exit(&softs->io_lock); 4205 (void) untimeout(tid); 4206 mutex_enter(&softs->io_lock); 4207 } 4208 } 4209 } 4210 4211 /* 4212 * The following function comes from Adaptec: 4213 * 4214 * Once do an IOP reset, basically the driver have to re-initialize the card 4215 * as if up from a cold boot, and the driver is responsible for any IO that 4216 * is outstanding to the adapter at the time of the IOP RESET. And prepare 4217 * for IOP RESET by making the init code modular with the ability to call it 4218 * from multiple places. 4219 */ 4220 static int 4221 aac_reset_adapter(struct aac_softstate *softs) 4222 { 4223 int health; 4224 uint32_t status; 4225 int rval = AAC_IOP_RESET_FAILED; 4226 4227 DBCALLED(softs, 1); 4228 4229 ASSERT(softs->state & AAC_STATE_RESET); 4230 4231 ddi_fm_acc_err_clear(softs->pci_mem_handle, DDI_FME_VER0); 4232 /* Disable interrupt */ 4233 AAC_DISABLE_INTR(softs); 4234 4235 health = aac_check_adapter_health(softs); 4236 if (health == -1) { 4237 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 4238 goto finish; 4239 } 4240 if (health == 0) /* flush drives if possible */ 4241 (void) aac_shutdown(softs); 4242 4243 /* Execute IOP reset */ 4244 if ((aac_sync_mbcommand(softs, AAC_IOP_RESET, 0, 0, 0, 0, 4245 &status)) != AACOK) { 4246 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 4247 struct aac_fib *fibp; 4248 struct aac_pause_command *pc; 4249 4250 if ((status & 0xf) == 0xf) { 4251 uint32_t wait_count; 4252 4253 /* 4254 * Sunrise Lake has dual cores and we must drag the 4255 * other core with us to reset simultaneously. There 4256 * are 2 bits in the Inbound Reset Control and Status 4257 * Register (offset 0x38) of the Sunrise Lake to reset 4258 * the chip without clearing out the PCI configuration 4259 * info (COMMAND & BARS). 4260 */ 4261 PCI_MEM_PUT32(softs, AAC_IRCSR, AAC_IRCSR_CORES_RST); 4262 4263 /* 4264 * We need to wait for 5 seconds before accessing the MU 4265 * again 10000 * 100us = 1000,000us = 1000ms = 1s 4266 */ 4267 wait_count = 5 * 10000; 4268 while (wait_count) { 4269 drv_usecwait(100); /* delay 100 microseconds */ 4270 wait_count--; 4271 } 4272 } else { 4273 if (status == SRB_STATUS_INVALID_REQUEST) 4274 cmn_err(CE_WARN, "!IOP_RESET not supported"); 4275 else /* probably timeout */ 4276 cmn_err(CE_WARN, "!IOP_RESET failed"); 4277 4278 /* Unwind aac_shutdown() */ 4279 fibp = softs->sync_slot.fibp; 4280 pc = (struct aac_pause_command *)&fibp->data[0]; 4281 4282 bzero(pc, sizeof (*pc)); 4283 ddi_put32(acc, &pc->Command, VM_ContainerConfig); 4284 ddi_put32(acc, &pc->Type, CT_PAUSE_IO); 4285 ddi_put32(acc, &pc->Timeout, 1); 4286 ddi_put32(acc, &pc->Min, 1); 4287 ddi_put32(acc, &pc->NoRescan, 1); 4288 4289 (void) aac_sync_fib(softs, ContainerCommand, 4290 AAC_FIB_SIZEOF(struct aac_pause_command)); 4291 4292 if (aac_check_adapter_health(softs) != 0) 4293 ddi_fm_service_impact(softs->devinfo_p, 4294 DDI_SERVICE_LOST); 4295 else 4296 /* 4297 * IOP reset not supported or IOP not reseted 4298 */ 4299 rval = AAC_IOP_RESET_ABNORMAL; 4300 goto finish; 4301 } 4302 } 4303 4304 /* 4305 * Re-read and renegotiate the FIB parameters, as one of the actions 4306 * that can result from an IOP reset is the running of a new firmware 4307 * image. 4308 */ 4309 if (aac_common_attach(softs) != AACOK) 4310 goto finish; 4311 4312 rval = AAC_IOP_RESET_SUCCEED; 4313 4314 finish: 4315 AAC_ENABLE_INTR(softs); 4316 return (rval); 4317 } 4318 4319 static void 4320 aac_set_throttle(struct aac_softstate *softs, struct aac_device *dvp, int q, 4321 int throttle) 4322 { 4323 /* 4324 * If the bus is draining/quiesced, no changes to the throttles 4325 * are allowed. All throttles should have been set to 0. 4326 */ 4327 if ((softs->state & AAC_STATE_QUIESCED) || softs->ndrains) 4328 return; 4329 dvp->throttle[q] = throttle; 4330 } 4331 4332 static void 4333 aac_hold_bus(struct aac_softstate *softs, int iocmds) 4334 { 4335 int i, q; 4336 4337 /* Hold bus by holding every device on the bus */ 4338 for (q = 0; q < AAC_CMDQ_NUM; q++) { 4339 if (iocmds & (1 << q)) { 4340 softs->bus_throttle[q] = 0; 4341 for (i = 0; i < AAC_MAX_LD; i++) 4342 aac_set_throttle(softs, 4343 &softs->containers[i].dev, q, 0); 4344 for (i = 0; i < AAC_MAX_PD(softs); i++) 4345 aac_set_throttle(softs, 4346 &softs->nondasds[i].dev, q, 0); 4347 } 4348 } 4349 } 4350 4351 static void 4352 aac_unhold_bus(struct aac_softstate *softs, int iocmds) 4353 { 4354 int i, q; 4355 4356 for (q = 0; q < AAC_CMDQ_NUM; q++) { 4357 if (iocmds & (1 << q)) { 4358 /* 4359 * Should not unhold AAC_IOCMD_ASYNC bus, if it has been 4360 * quiesced or being drained by possibly some quiesce 4361 * threads. 4362 */ 4363 if (q == AAC_CMDQ_ASYNC && ((softs->state & 4364 AAC_STATE_QUIESCED) || softs->ndrains)) 4365 continue; 4366 softs->bus_throttle[q] = softs->total_slots; 4367 for (i = 0; i < AAC_MAX_LD; i++) 4368 aac_set_throttle(softs, 4369 &softs->containers[i].dev, 4370 q, softs->total_slots); 4371 for (i = 0; i < AAC_MAX_PD(softs); i++) 4372 aac_set_throttle(softs, &softs->nondasds[i].dev, 4373 q, softs->total_slots); 4374 } 4375 } 4376 } 4377 4378 static int 4379 aac_do_reset(struct aac_softstate *softs) 4380 { 4381 int health; 4382 int rval; 4383 4384 softs->state |= AAC_STATE_RESET; 4385 health = aac_check_adapter_health(softs); 4386 4387 /* 4388 * Hold off new io commands and wait all outstanding io 4389 * commands to complete. 4390 */ 4391 if (health == 0) { 4392 int sync_cmds = softs->bus_ncmds[AAC_CMDQ_SYNC]; 4393 int async_cmds = softs->bus_ncmds[AAC_CMDQ_ASYNC]; 4394 4395 if (sync_cmds == 0 && async_cmds == 0) { 4396 rval = AAC_IOP_RESET_SUCCEED; 4397 goto finish; 4398 } 4399 /* 4400 * Give the adapter up to AAC_QUIESCE_TIMEOUT more seconds 4401 * to complete the outstanding io commands 4402 */ 4403 int timeout = AAC_QUIESCE_TIMEOUT * 1000 * 10; 4404 int (*intr_handler)(struct aac_softstate *); 4405 4406 aac_hold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 4407 /* 4408 * Poll the adapter by ourselves in case interrupt is disabled 4409 * and to avoid releasing the io_lock. 4410 */ 4411 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ? 4412 aac_process_intr_new : aac_process_intr_old; 4413 while ((softs->bus_ncmds[AAC_CMDQ_SYNC] || 4414 softs->bus_ncmds[AAC_CMDQ_ASYNC]) && timeout) { 4415 drv_usecwait(100); 4416 (void) intr_handler(softs); 4417 timeout--; 4418 } 4419 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 4420 4421 if (softs->bus_ncmds[AAC_CMDQ_SYNC] == 0 && 4422 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) { 4423 /* Cmds drained out */ 4424 rval = AAC_IOP_RESET_SUCCEED; 4425 goto finish; 4426 } else if (softs->bus_ncmds[AAC_CMDQ_SYNC] < sync_cmds || 4427 softs->bus_ncmds[AAC_CMDQ_ASYNC] < async_cmds) { 4428 /* Cmds not drained out, adapter overloaded */ 4429 rval = AAC_IOP_RESET_ABNORMAL; 4430 goto finish; 4431 } 4432 } 4433 4434 /* 4435 * If a longer waiting time still can't drain any outstanding io 4436 * commands, do IOP reset. 4437 */ 4438 if ((rval = aac_reset_adapter(softs)) == AAC_IOP_RESET_FAILED) 4439 softs->state |= AAC_STATE_DEAD; 4440 4441 finish: 4442 softs->state &= ~AAC_STATE_RESET; 4443 return (rval); 4444 } 4445 4446 static int 4447 aac_tran_reset(struct scsi_address *ap, int level) 4448 { 4449 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4450 int rval; 4451 4452 DBCALLED(softs, 1); 4453 4454 if (level != RESET_ALL) { 4455 cmn_err(CE_NOTE, "!reset target/lun not supported"); 4456 return (0); 4457 } 4458 4459 mutex_enter(&softs->io_lock); 4460 switch (rval = aac_do_reset(softs)) { 4461 case AAC_IOP_RESET_SUCCEED: 4462 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING | AAC_IOCMD_ASYNC, 4463 NULL, CMD_RESET); 4464 aac_start_waiting_io(softs); 4465 break; 4466 case AAC_IOP_RESET_FAILED: 4467 /* Abort IOCTL cmds when adapter is dead */ 4468 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_RESET); 4469 break; 4470 case AAC_IOP_RESET_ABNORMAL: 4471 aac_start_waiting_io(softs); 4472 } 4473 mutex_exit(&softs->io_lock); 4474 4475 aac_drain_comp_q(softs); 4476 return (rval == 0); 4477 } 4478 4479 static int 4480 aac_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 4481 { 4482 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4483 4484 DBCALLED(softs, 1); 4485 4486 mutex_enter(&softs->io_lock); 4487 aac_abort_iocmds(softs, 0, pkt, CMD_ABORTED); 4488 mutex_exit(&softs->io_lock); 4489 4490 aac_drain_comp_q(softs); 4491 return (1); 4492 } 4493 4494 void 4495 aac_free_dmamap(struct aac_cmd *acp) 4496 { 4497 /* Free dma mapping */ 4498 if (acp->flags & AAC_CMD_DMA_VALID) { 4499 ASSERT(acp->buf_dma_handle); 4500 (void) ddi_dma_unbind_handle(acp->buf_dma_handle); 4501 acp->flags &= ~AAC_CMD_DMA_VALID; 4502 } 4503 4504 if (acp->abp != NULL) { /* free non-aligned buf DMA */ 4505 ASSERT(acp->buf_dma_handle); 4506 if ((acp->flags & AAC_CMD_BUF_WRITE) == 0 && acp->bp) 4507 ddi_rep_get8(acp->abh, (uint8_t *)acp->bp->b_un.b_addr, 4508 (uint8_t *)acp->abp, acp->bp->b_bcount, 4509 DDI_DEV_AUTOINCR); 4510 ddi_dma_mem_free(&acp->abh); 4511 acp->abp = NULL; 4512 } 4513 4514 if (acp->buf_dma_handle) { 4515 ddi_dma_free_handle(&acp->buf_dma_handle); 4516 acp->buf_dma_handle = NULL; 4517 } 4518 } 4519 4520 static void 4521 aac_unknown_scmd(struct aac_softstate *softs, struct aac_cmd *acp) 4522 { 4523 AACDB_PRINT(softs, CE_CONT, "SCMD 0x%x not supported", 4524 ((union scsi_cdb *)(void *)acp->pkt->pkt_cdbp)->scc_cmd); 4525 aac_free_dmamap(acp); 4526 aac_set_arq_data(acp->pkt, KEY_ILLEGAL_REQUEST, 0x20, 0x00, 0); 4527 aac_soft_callback(softs, acp); 4528 } 4529 4530 /* 4531 * Handle command to logical device 4532 */ 4533 static int 4534 aac_tran_start_ld(struct aac_softstate *softs, struct aac_cmd *acp) 4535 { 4536 struct aac_container *dvp; 4537 struct scsi_pkt *pkt; 4538 union scsi_cdb *cdbp; 4539 struct buf *bp; 4540 int rval; 4541 4542 dvp = (struct aac_container *)acp->dvp; 4543 pkt = acp->pkt; 4544 cdbp = (void *)pkt->pkt_cdbp; 4545 bp = acp->bp; 4546 4547 switch (cdbp->scc_cmd) { 4548 case SCMD_INQUIRY: /* inquiry */ 4549 aac_free_dmamap(acp); 4550 aac_inquiry(softs, pkt, cdbp, bp); 4551 aac_soft_callback(softs, acp); 4552 rval = TRAN_ACCEPT; 4553 break; 4554 4555 case SCMD_READ_CAPACITY: /* read capacity */ 4556 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4557 struct scsi_capacity cap; 4558 uint64_t last_lba; 4559 4560 /* check 64-bit LBA */ 4561 last_lba = dvp->size - 1; 4562 if (last_lba > 0xffffffffull) { 4563 cap.capacity = 0xfffffffful; 4564 } else { 4565 cap.capacity = BE_32(last_lba); 4566 } 4567 cap.lbasize = BE_32(AAC_SECTOR_SIZE); 4568 4569 aac_free_dmamap(acp); 4570 if (bp->b_flags & (B_PHYS|B_PAGEIO)) 4571 bp_mapin(bp); 4572 bcopy(&cap, bp->b_un.b_addr, min(bp->b_bcount, 8)); 4573 pkt->pkt_state |= STATE_XFERRED_DATA; 4574 } 4575 aac_soft_callback(softs, acp); 4576 rval = TRAN_ACCEPT; 4577 break; 4578 4579 case SCMD_SVC_ACTION_IN_G4: /* read capacity 16 */ 4580 /* Check if containers need 64-bit LBA support */ 4581 if (cdbp->cdb_opaque[1] == SSVC_ACTION_READ_CAPACITY_G4) { 4582 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4583 struct scsi_capacity_16 cap16; 4584 int cap_len = sizeof (struct scsi_capacity_16); 4585 4586 bzero(&cap16, cap_len); 4587 cap16.sc_capacity = BE_64(dvp->size - 1); 4588 cap16.sc_lbasize = BE_32(AAC_SECTOR_SIZE); 4589 4590 aac_free_dmamap(acp); 4591 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 4592 bp_mapin(bp); 4593 bcopy(&cap16, bp->b_un.b_addr, 4594 min(bp->b_bcount, cap_len)); 4595 pkt->pkt_state |= STATE_XFERRED_DATA; 4596 } 4597 aac_soft_callback(softs, acp); 4598 } else { 4599 aac_unknown_scmd(softs, acp); 4600 } 4601 rval = TRAN_ACCEPT; 4602 break; 4603 4604 case SCMD_READ_G4: /* read_16 */ 4605 case SCMD_WRITE_G4: /* write_16 */ 4606 if (softs->flags & AAC_FLAGS_RAW_IO) { 4607 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */ 4608 acp->blkno = ((uint64_t) \ 4609 GETG4ADDR(cdbp) << 32) | \ 4610 (uint32_t)GETG4ADDRTL(cdbp); 4611 goto do_io; 4612 } 4613 AACDB_PRINT(softs, CE_WARN, "64-bit LBA not supported"); 4614 aac_unknown_scmd(softs, acp); 4615 rval = TRAN_ACCEPT; 4616 break; 4617 4618 case SCMD_READ: /* read_6 */ 4619 case SCMD_WRITE: /* write_6 */ 4620 acp->blkno = GETG0ADDR(cdbp); 4621 goto do_io; 4622 4623 case SCMD_READ_G5: /* read_12 */ 4624 case SCMD_WRITE_G5: /* write_12 */ 4625 acp->blkno = GETG5ADDR(cdbp); 4626 goto do_io; 4627 4628 case SCMD_READ_G1: /* read_10 */ 4629 case SCMD_WRITE_G1: /* write_10 */ 4630 acp->blkno = (uint32_t)GETG1ADDR(cdbp); 4631 do_io: 4632 if (acp->flags & AAC_CMD_DMA_VALID) { 4633 uint64_t cnt_size = dvp->size; 4634 4635 /* 4636 * If LBA > array size AND rawio, the 4637 * adapter may hang. So check it before 4638 * sending. 4639 * NOTE: (blkno + blkcnt) may overflow 4640 */ 4641 if ((acp->blkno < cnt_size) && 4642 ((acp->blkno + acp->bcount / 4643 AAC_BLK_SIZE) <= cnt_size)) { 4644 rval = aac_do_io(softs, acp); 4645 } else { 4646 /* 4647 * Request exceeds the capacity of disk, 4648 * set error block number to last LBA 4649 * + 1. 4650 */ 4651 aac_set_arq_data(pkt, 4652 KEY_ILLEGAL_REQUEST, 0x21, 4653 0x00, cnt_size); 4654 aac_soft_callback(softs, acp); 4655 rval = TRAN_ACCEPT; 4656 } 4657 } else if (acp->bcount == 0) { 4658 /* For 0 length IO, just return ok */ 4659 aac_soft_callback(softs, acp); 4660 rval = TRAN_ACCEPT; 4661 } else { 4662 rval = TRAN_BADPKT; 4663 } 4664 break; 4665 4666 case SCMD_MODE_SENSE: /* mode_sense_6 */ 4667 case SCMD_MODE_SENSE_G1: { /* mode_sense_10 */ 4668 int capacity; 4669 4670 aac_free_dmamap(acp); 4671 if (dvp->size > 0xffffffffull) 4672 capacity = 0xfffffffful; /* 64-bit LBA */ 4673 else 4674 capacity = dvp->size; 4675 aac_mode_sense(softs, pkt, cdbp, bp, capacity); 4676 aac_soft_callback(softs, acp); 4677 rval = TRAN_ACCEPT; 4678 break; 4679 } 4680 4681 case SCMD_START_STOP: 4682 if (softs->support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) { 4683 acp->aac_cmd_fib = aac_cmd_fib_startstop; 4684 acp->ac_comp = aac_startstop_complete; 4685 rval = aac_do_io(softs, acp); 4686 break; 4687 } 4688 /* FALLTHRU */ 4689 case SCMD_TEST_UNIT_READY: 4690 case SCMD_REQUEST_SENSE: 4691 case SCMD_FORMAT: 4692 aac_free_dmamap(acp); 4693 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4694 if (acp->flags & AAC_CMD_BUF_READ) { 4695 if (bp->b_flags & (B_PHYS|B_PAGEIO)) 4696 bp_mapin(bp); 4697 bzero(bp->b_un.b_addr, bp->b_bcount); 4698 } 4699 pkt->pkt_state |= STATE_XFERRED_DATA; 4700 } 4701 aac_soft_callback(softs, acp); 4702 rval = TRAN_ACCEPT; 4703 break; 4704 4705 case SCMD_SYNCHRONIZE_CACHE: 4706 acp->flags |= AAC_CMD_NTAG; 4707 acp->aac_cmd_fib = aac_cmd_fib_sync; 4708 acp->ac_comp = aac_synccache_complete; 4709 rval = aac_do_io(softs, acp); 4710 break; 4711 4712 case SCMD_DOORLOCK: 4713 aac_free_dmamap(acp); 4714 dvp->locked = (pkt->pkt_cdbp[4] & 0x01) ? 1 : 0; 4715 aac_soft_callback(softs, acp); 4716 rval = TRAN_ACCEPT; 4717 break; 4718 4719 default: /* unknown command */ 4720 aac_unknown_scmd(softs, acp); 4721 rval = TRAN_ACCEPT; 4722 break; 4723 } 4724 4725 return (rval); 4726 } 4727 4728 static int 4729 aac_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt) 4730 { 4731 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4732 struct aac_cmd *acp = PKT2AC(pkt); 4733 struct aac_device *dvp = acp->dvp; 4734 int rval; 4735 4736 DBCALLED(softs, 2); 4737 4738 /* 4739 * Reinitialize some fields of ac and pkt; the packet may 4740 * have been resubmitted 4741 */ 4742 acp->flags &= AAC_CMD_CONSISTENT | AAC_CMD_DMA_PARTIAL | \ 4743 AAC_CMD_BUF_READ | AAC_CMD_BUF_WRITE | AAC_CMD_DMA_VALID; 4744 acp->timeout = acp->pkt->pkt_time; 4745 if (pkt->pkt_flags & FLAG_NOINTR) 4746 acp->flags |= AAC_CMD_NO_INTR; 4747 #ifdef DEBUG 4748 acp->fib_flags = AACDB_FLAGS_FIB_SCMD; 4749 #endif 4750 pkt->pkt_reason = CMD_CMPLT; 4751 pkt->pkt_state = 0; 4752 pkt->pkt_statistics = 0; 4753 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */ 4754 4755 if (acp->flags & AAC_CMD_DMA_VALID) { 4756 pkt->pkt_resid = acp->bcount; 4757 /* Consistent packets need to be sync'ed first */ 4758 if ((acp->flags & AAC_CMD_CONSISTENT) && 4759 (acp->flags & AAC_CMD_BUF_WRITE)) 4760 if (aac_dma_sync_ac(acp) != AACOK) { 4761 ddi_fm_service_impact(softs->devinfo_p, 4762 DDI_SERVICE_UNAFFECTED); 4763 return (TRAN_BADPKT); 4764 } 4765 } else { 4766 pkt->pkt_resid = 0; 4767 } 4768 4769 mutex_enter(&softs->io_lock); 4770 AACDB_PRINT_SCMD(softs, acp); 4771 if ((dvp->flags & (AAC_DFLAG_VALID | AAC_DFLAG_CONFIGURING)) && 4772 !(softs->state & AAC_STATE_DEAD)) { 4773 if (dvp->type == AAC_DEV_LD) { 4774 if (ap->a_lun == 0) 4775 rval = aac_tran_start_ld(softs, acp); 4776 else 4777 goto error; 4778 } else { 4779 rval = aac_do_io(softs, acp); 4780 } 4781 } else { 4782 error: 4783 #ifdef DEBUG 4784 if (!(softs->state & AAC_STATE_DEAD)) { 4785 AACDB_PRINT_TRAN(softs, 4786 "Cannot send cmd to target t%dL%d: %s", 4787 ap->a_target, ap->a_lun, 4788 "target invalid"); 4789 } else { 4790 AACDB_PRINT(softs, CE_WARN, 4791 "Cannot send cmd to target t%dL%d: %s", 4792 ap->a_target, ap->a_lun, 4793 "adapter dead"); 4794 } 4795 #endif 4796 rval = TRAN_FATAL_ERROR; 4797 } 4798 mutex_exit(&softs->io_lock); 4799 return (rval); 4800 } 4801 4802 static int 4803 aac_tran_getcap(struct scsi_address *ap, char *cap, int whom) 4804 { 4805 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4806 struct aac_device *dvp; 4807 int rval; 4808 4809 DBCALLED(softs, 2); 4810 4811 /* We don't allow inquiring about capabilities for other targets */ 4812 if (cap == NULL || whom == 0) { 4813 AACDB_PRINT(softs, CE_WARN, 4814 "GetCap> %s not supported: whom=%d", cap, whom); 4815 return (-1); 4816 } 4817 4818 mutex_enter(&softs->io_lock); 4819 dvp = AAC_DEV(softs, ap->a_target); 4820 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) { 4821 mutex_exit(&softs->io_lock); 4822 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to getcap", 4823 ap->a_target, ap->a_lun); 4824 return (-1); 4825 } 4826 4827 switch (scsi_hba_lookup_capstr(cap)) { 4828 case SCSI_CAP_ARQ: /* auto request sense */ 4829 rval = 1; 4830 break; 4831 case SCSI_CAP_UNTAGGED_QING: 4832 case SCSI_CAP_TAGGED_QING: 4833 rval = 1; 4834 break; 4835 case SCSI_CAP_DMA_MAX: 4836 rval = softs->dma_max; 4837 break; 4838 default: 4839 rval = -1; 4840 break; 4841 } 4842 mutex_exit(&softs->io_lock); 4843 4844 AACDB_PRINT_TRAN(softs, "GetCap> %s t%dL%d: rval=%d", 4845 cap, ap->a_target, ap->a_lun, rval); 4846 return (rval); 4847 } 4848 4849 /*ARGSUSED*/ 4850 static int 4851 aac_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom) 4852 { 4853 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4854 struct aac_device *dvp; 4855 int rval; 4856 4857 DBCALLED(softs, 2); 4858 4859 /* We don't allow inquiring about capabilities for other targets */ 4860 if (cap == NULL || whom == 0) { 4861 AACDB_PRINT(softs, CE_WARN, 4862 "SetCap> %s not supported: whom=%d", cap, whom); 4863 return (-1); 4864 } 4865 4866 mutex_enter(&softs->io_lock); 4867 dvp = AAC_DEV(softs, ap->a_target); 4868 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) { 4869 mutex_exit(&softs->io_lock); 4870 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to setcap", 4871 ap->a_target, ap->a_lun); 4872 return (-1); 4873 } 4874 4875 switch (scsi_hba_lookup_capstr(cap)) { 4876 case SCSI_CAP_ARQ: 4877 /* Force auto request sense */ 4878 rval = (value == 1) ? 1 : 0; 4879 break; 4880 case SCSI_CAP_UNTAGGED_QING: 4881 case SCSI_CAP_TAGGED_QING: 4882 rval = (value == 1) ? 1 : 0; 4883 break; 4884 default: 4885 rval = -1; 4886 break; 4887 } 4888 mutex_exit(&softs->io_lock); 4889 4890 AACDB_PRINT_TRAN(softs, "SetCap> %s t%dL%d val=%d: rval=%d", 4891 cap, ap->a_target, ap->a_lun, value, rval); 4892 return (rval); 4893 } 4894 4895 static void 4896 aac_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 4897 { 4898 struct aac_cmd *acp = PKT2AC(pkt); 4899 4900 DBCALLED(NULL, 2); 4901 4902 if (acp->sgt) { 4903 kmem_free(acp->sgt, sizeof (struct aac_sge) * \ 4904 acp->left_cookien); 4905 } 4906 aac_free_dmamap(acp); 4907 ASSERT(acp->slotp == NULL); 4908 scsi_hba_pkt_free(ap, pkt); 4909 } 4910 4911 int 4912 aac_cmd_dma_alloc(struct aac_softstate *softs, struct aac_cmd *acp, 4913 struct buf *bp, int flags, int (*cb)(), caddr_t arg) 4914 { 4915 int kf = (cb == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP; 4916 uint_t oldcookiec; 4917 int bioerr; 4918 int rval; 4919 4920 oldcookiec = acp->left_cookien; 4921 4922 /* Move window to build s/g map */ 4923 if (acp->total_nwin > 0) { 4924 if (++acp->cur_win < acp->total_nwin) { 4925 off_t off; 4926 size_t len; 4927 4928 rval = ddi_dma_getwin(acp->buf_dma_handle, acp->cur_win, 4929 &off, &len, &acp->cookie, &acp->left_cookien); 4930 if (rval == DDI_SUCCESS) 4931 goto get_dma_cookies; 4932 AACDB_PRINT(softs, CE_WARN, 4933 "ddi_dma_getwin() fail %d", rval); 4934 return (AACERR); 4935 } 4936 AACDB_PRINT(softs, CE_WARN, "Nothing to transfer"); 4937 return (AACERR); 4938 } 4939 4940 /* We need to transfer data, so we alloc DMA resources for this pkt */ 4941 if (bp && bp->b_bcount != 0 && !(acp->flags & AAC_CMD_DMA_VALID)) { 4942 uint_t dma_flags = 0; 4943 struct aac_sge *sge; 4944 4945 /* 4946 * We will still use this point to fake some 4947 * infomation in tran_start 4948 */ 4949 acp->bp = bp; 4950 4951 /* Set dma flags */ 4952 if (BUF_IS_READ(bp)) { 4953 dma_flags |= DDI_DMA_READ; 4954 acp->flags |= AAC_CMD_BUF_READ; 4955 } else { 4956 dma_flags |= DDI_DMA_WRITE; 4957 acp->flags |= AAC_CMD_BUF_WRITE; 4958 } 4959 if (flags & PKT_CONSISTENT) 4960 dma_flags |= DDI_DMA_CONSISTENT; 4961 if (flags & PKT_DMA_PARTIAL) 4962 dma_flags |= DDI_DMA_PARTIAL; 4963 4964 /* Alloc buf dma handle */ 4965 if (!acp->buf_dma_handle) { 4966 rval = ddi_dma_alloc_handle(softs->devinfo_p, 4967 &softs->buf_dma_attr, cb, arg, 4968 &acp->buf_dma_handle); 4969 if (rval != DDI_SUCCESS) { 4970 AACDB_PRINT(softs, CE_WARN, 4971 "Can't allocate DMA handle, errno=%d", 4972 rval); 4973 goto error_out; 4974 } 4975 } 4976 4977 /* Bind buf */ 4978 if (((uintptr_t)bp->b_un.b_addr & AAC_DMA_ALIGN_MASK) == 0) { 4979 rval = ddi_dma_buf_bind_handle(acp->buf_dma_handle, 4980 bp, dma_flags, cb, arg, &acp->cookie, 4981 &acp->left_cookien); 4982 } else { 4983 size_t bufsz; 4984 4985 AACDB_PRINT_TRAN(softs, 4986 "non-aligned buffer: addr=0x%p, cnt=%lu", 4987 (void *)bp->b_un.b_addr, bp->b_bcount); 4988 if (bp->b_flags & (B_PAGEIO|B_PHYS)) 4989 bp_mapin(bp); 4990 4991 rval = ddi_dma_mem_alloc(acp->buf_dma_handle, 4992 AAC_ROUNDUP(bp->b_bcount, AAC_DMA_ALIGN), 4993 &softs->acc_attr, DDI_DMA_STREAMING, 4994 cb, arg, &acp->abp, &bufsz, &acp->abh); 4995 4996 if (rval != DDI_SUCCESS) { 4997 AACDB_PRINT(softs, CE_NOTE, 4998 "Cannot alloc DMA to non-aligned buf"); 4999 bioerr = 0; 5000 goto error_out; 5001 } 5002 5003 if (acp->flags & AAC_CMD_BUF_WRITE) 5004 ddi_rep_put8(acp->abh, 5005 (uint8_t *)bp->b_un.b_addr, 5006 (uint8_t *)acp->abp, bp->b_bcount, 5007 DDI_DEV_AUTOINCR); 5008 5009 rval = ddi_dma_addr_bind_handle(acp->buf_dma_handle, 5010 NULL, acp->abp, bufsz, dma_flags, cb, arg, 5011 &acp->cookie, &acp->left_cookien); 5012 } 5013 5014 switch (rval) { 5015 case DDI_DMA_PARTIAL_MAP: 5016 if (ddi_dma_numwin(acp->buf_dma_handle, 5017 &acp->total_nwin) == DDI_FAILURE) { 5018 AACDB_PRINT(softs, CE_WARN, 5019 "Cannot get number of DMA windows"); 5020 bioerr = 0; 5021 goto error_out; 5022 } 5023 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)", 5024 acp->left_cookien); 5025 acp->cur_win = 0; 5026 break; 5027 5028 case DDI_DMA_MAPPED: 5029 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)", 5030 acp->left_cookien); 5031 acp->cur_win = 0; 5032 acp->total_nwin = 1; 5033 break; 5034 5035 case DDI_DMA_NORESOURCES: 5036 bioerr = 0; 5037 AACDB_PRINT(softs, CE_WARN, 5038 "Cannot bind buf for DMA: DDI_DMA_NORESOURCES"); 5039 goto error_out; 5040 case DDI_DMA_BADATTR: 5041 case DDI_DMA_NOMAPPING: 5042 bioerr = EFAULT; 5043 AACDB_PRINT(softs, CE_WARN, 5044 "Cannot bind buf for DMA: DDI_DMA_NOMAPPING"); 5045 goto error_out; 5046 case DDI_DMA_TOOBIG: 5047 bioerr = EINVAL; 5048 AACDB_PRINT(softs, CE_WARN, 5049 "Cannot bind buf for DMA: DDI_DMA_TOOBIG(%d)", 5050 bp->b_bcount); 5051 goto error_out; 5052 default: 5053 bioerr = EINVAL; 5054 AACDB_PRINT(softs, CE_WARN, 5055 "Cannot bind buf for DMA: %d", rval); 5056 goto error_out; 5057 } 5058 acp->flags |= AAC_CMD_DMA_VALID; 5059 5060 get_dma_cookies: 5061 ASSERT(acp->left_cookien > 0); 5062 if (acp->left_cookien > softs->aac_sg_tablesize) { 5063 AACDB_PRINT(softs, CE_NOTE, "large cookiec received %d", 5064 acp->left_cookien); 5065 bioerr = EINVAL; 5066 goto error_out; 5067 } 5068 if (oldcookiec != acp->left_cookien && acp->sgt != NULL) { 5069 kmem_free(acp->sgt, sizeof (struct aac_sge) * \ 5070 oldcookiec); 5071 acp->sgt = NULL; 5072 } 5073 if (acp->sgt == NULL) { 5074 acp->sgt = kmem_alloc(sizeof (struct aac_sge) * \ 5075 acp->left_cookien, kf); 5076 if (acp->sgt == NULL) { 5077 AACDB_PRINT(softs, CE_WARN, 5078 "sgt kmem_alloc fail"); 5079 bioerr = ENOMEM; 5080 goto error_out; 5081 } 5082 } 5083 5084 sge = &acp->sgt[0]; 5085 sge->bcount = acp->cookie.dmac_size; 5086 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress); 5087 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress); 5088 acp->bcount = acp->cookie.dmac_size; 5089 for (sge++; sge < &acp->sgt[acp->left_cookien]; sge++) { 5090 ddi_dma_nextcookie(acp->buf_dma_handle, &acp->cookie); 5091 sge->bcount = acp->cookie.dmac_size; 5092 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress); 5093 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress); 5094 acp->bcount += acp->cookie.dmac_size; 5095 } 5096 5097 /* 5098 * Note: The old DMA engine do not correctly handle 5099 * dma_attr_maxxfer attribute. So we have to ensure 5100 * it by ourself. 5101 */ 5102 if (acp->bcount > softs->buf_dma_attr.dma_attr_maxxfer) { 5103 AACDB_PRINT(softs, CE_NOTE, 5104 "large xfer size received %d\n", acp->bcount); 5105 bioerr = EINVAL; 5106 goto error_out; 5107 } 5108 5109 acp->total_xfer += acp->bcount; 5110 5111 if (acp->pkt) { 5112 /* Return remaining byte count */ 5113 if (acp->total_xfer <= bp->b_bcount) { 5114 acp->pkt->pkt_resid = bp->b_bcount - \ 5115 acp->total_xfer; 5116 } else { 5117 /* 5118 * Allocated DMA size is greater than the buf 5119 * size of bp. This is caused by devices like 5120 * tape. we have extra bytes allocated, but 5121 * the packet residual has to stay correct. 5122 */ 5123 acp->pkt->pkt_resid = 0; 5124 } 5125 AACDB_PRINT_TRAN(softs, 5126 "bp=0x%p, xfered=%d/%d, resid=%d", 5127 (void *)bp->b_un.b_addr, (int)acp->total_xfer, 5128 (int)bp->b_bcount, (int)acp->pkt->pkt_resid); 5129 } 5130 } 5131 return (AACOK); 5132 5133 error_out: 5134 bioerror(bp, bioerr); 5135 return (AACERR); 5136 } 5137 5138 static struct scsi_pkt * 5139 aac_tran_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 5140 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags, 5141 int (*callback)(), caddr_t arg) 5142 { 5143 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 5144 struct aac_cmd *acp, *new_acp; 5145 5146 DBCALLED(softs, 2); 5147 5148 /* Allocate pkt */ 5149 if (pkt == NULL) { 5150 int slen; 5151 5152 /* Force auto request sense */ 5153 slen = (statuslen > softs->slen) ? statuslen : softs->slen; 5154 pkt = scsi_hba_pkt_alloc(softs->devinfo_p, ap, cmdlen, 5155 slen, tgtlen, sizeof (struct aac_cmd), callback, arg); 5156 if (pkt == NULL) { 5157 AACDB_PRINT(softs, CE_WARN, "Alloc scsi pkt failed"); 5158 return (NULL); 5159 } 5160 acp = new_acp = PKT2AC(pkt); 5161 acp->pkt = pkt; 5162 acp->cmdlen = cmdlen; 5163 5164 if (ap->a_target < AAC_MAX_LD) { 5165 acp->dvp = &softs->containers[ap->a_target].dev; 5166 acp->aac_cmd_fib = softs->aac_cmd_fib; 5167 acp->ac_comp = aac_ld_complete; 5168 } else { 5169 _NOTE(ASSUMING_PROTECTED(softs->nondasds)) 5170 5171 acp->dvp = &softs->nondasds[AAC_PD(ap->a_target)].dev; 5172 acp->aac_cmd_fib = softs->aac_cmd_fib_scsi; 5173 acp->ac_comp = aac_pd_complete; 5174 } 5175 } else { 5176 acp = PKT2AC(pkt); 5177 new_acp = NULL; 5178 } 5179 5180 if (aac_cmd_dma_alloc(softs, acp, bp, flags, callback, arg) == AACOK) 5181 return (pkt); 5182 5183 if (new_acp) 5184 aac_tran_destroy_pkt(ap, pkt); 5185 return (NULL); 5186 } 5187 5188 /* 5189 * tran_sync_pkt(9E) - explicit DMA synchronization 5190 */ 5191 /*ARGSUSED*/ 5192 static void 5193 aac_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 5194 { 5195 struct aac_cmd *acp = PKT2AC(pkt); 5196 5197 DBCALLED(NULL, 2); 5198 5199 if (aac_dma_sync_ac(acp) != AACOK) 5200 ddi_fm_service_impact( 5201 (AAC_TRAN2SOFTS(ap->a_hba_tran))->devinfo_p, 5202 DDI_SERVICE_UNAFFECTED); 5203 } 5204 5205 /* 5206 * tran_dmafree(9E) - deallocate DMA resources allocated for command 5207 */ 5208 /*ARGSUSED*/ 5209 static void 5210 aac_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 5211 { 5212 struct aac_cmd *acp = PKT2AC(pkt); 5213 5214 DBCALLED(NULL, 2); 5215 5216 aac_free_dmamap(acp); 5217 } 5218 5219 static int 5220 aac_do_quiesce(struct aac_softstate *softs) 5221 { 5222 aac_hold_bus(softs, AAC_IOCMD_ASYNC); 5223 if (softs->bus_ncmds[AAC_CMDQ_ASYNC]) { 5224 aac_start_drain(softs); 5225 do { 5226 if (cv_wait_sig(&softs->drain_cv, 5227 &softs->io_lock) == 0) { 5228 /* Quiesce has been interrupted */ 5229 aac_stop_drain(softs); 5230 aac_unhold_bus(softs, AAC_IOCMD_ASYNC); 5231 aac_start_waiting_io(softs); 5232 return (AACERR); 5233 } 5234 } while (softs->bus_ncmds[AAC_CMDQ_ASYNC]); 5235 aac_stop_drain(softs); 5236 } 5237 5238 softs->state |= AAC_STATE_QUIESCED; 5239 return (AACOK); 5240 } 5241 5242 static int 5243 aac_tran_quiesce(dev_info_t *dip) 5244 { 5245 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 5246 int rval; 5247 5248 DBCALLED(softs, 1); 5249 5250 mutex_enter(&softs->io_lock); 5251 if (aac_do_quiesce(softs) == AACOK) 5252 rval = 0; 5253 else 5254 rval = 1; 5255 mutex_exit(&softs->io_lock); 5256 return (rval); 5257 } 5258 5259 static int 5260 aac_do_unquiesce(struct aac_softstate *softs) 5261 { 5262 softs->state &= ~AAC_STATE_QUIESCED; 5263 aac_unhold_bus(softs, AAC_IOCMD_ASYNC); 5264 5265 aac_start_waiting_io(softs); 5266 return (AACOK); 5267 } 5268 5269 static int 5270 aac_tran_unquiesce(dev_info_t *dip) 5271 { 5272 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 5273 int rval; 5274 5275 DBCALLED(softs, 1); 5276 5277 mutex_enter(&softs->io_lock); 5278 if (aac_do_unquiesce(softs) == AACOK) 5279 rval = 0; 5280 else 5281 rval = 1; 5282 mutex_exit(&softs->io_lock); 5283 return (rval); 5284 } 5285 5286 static int 5287 aac_hba_setup(struct aac_softstate *softs) 5288 { 5289 scsi_hba_tran_t *hba_tran; 5290 int rval; 5291 5292 hba_tran = scsi_hba_tran_alloc(softs->devinfo_p, SCSI_HBA_CANSLEEP); 5293 if (hba_tran == NULL) 5294 return (AACERR); 5295 hba_tran->tran_hba_private = softs; 5296 hba_tran->tran_tgt_init = aac_tran_tgt_init; 5297 hba_tran->tran_tgt_free = aac_tran_tgt_free; 5298 hba_tran->tran_tgt_probe = scsi_hba_probe; 5299 hba_tran->tran_start = aac_tran_start; 5300 hba_tran->tran_getcap = aac_tran_getcap; 5301 hba_tran->tran_setcap = aac_tran_setcap; 5302 hba_tran->tran_init_pkt = aac_tran_init_pkt; 5303 hba_tran->tran_destroy_pkt = aac_tran_destroy_pkt; 5304 hba_tran->tran_reset = aac_tran_reset; 5305 hba_tran->tran_abort = aac_tran_abort; 5306 hba_tran->tran_sync_pkt = aac_tran_sync_pkt; 5307 hba_tran->tran_dmafree = aac_tran_dmafree; 5308 hba_tran->tran_quiesce = aac_tran_quiesce; 5309 hba_tran->tran_unquiesce = aac_tran_unquiesce; 5310 hba_tran->tran_bus_config = aac_tran_bus_config; 5311 rval = scsi_hba_attach_setup(softs->devinfo_p, &softs->buf_dma_attr, 5312 hba_tran, 0); 5313 if (rval != DDI_SUCCESS) { 5314 scsi_hba_tran_free(hba_tran); 5315 AACDB_PRINT(softs, CE_WARN, "aac_hba_setup failed"); 5316 return (AACERR); 5317 } 5318 5319 softs->hba_tran = hba_tran; 5320 return (AACOK); 5321 } 5322 5323 /* 5324 * FIB setup operations 5325 */ 5326 5327 /* 5328 * Init FIB header 5329 */ 5330 static void 5331 aac_cmd_fib_header(struct aac_softstate *softs, struct aac_slot *slotp, 5332 uint16_t cmd, uint16_t fib_size) 5333 { 5334 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5335 struct aac_fib *fibp = slotp->fibp; 5336 uint32_t xfer_state; 5337 5338 xfer_state = 5339 AAC_FIBSTATE_HOSTOWNED | 5340 AAC_FIBSTATE_INITIALISED | 5341 AAC_FIBSTATE_EMPTY | 5342 AAC_FIBSTATE_FROMHOST | 5343 AAC_FIBSTATE_REXPECTED | 5344 AAC_FIBSTATE_NORM; 5345 if (slotp->acp && !(slotp->acp->flags & AAC_CMD_SYNC)) { 5346 xfer_state |= 5347 AAC_FIBSTATE_ASYNC | 5348 AAC_FIBSTATE_FAST_RESPONSE /* enable fast io */; 5349 ddi_put16(acc, &fibp->Header.SenderSize, 5350 softs->aac_max_fib_size); 5351 } else { 5352 ddi_put16(acc, &fibp->Header.SenderSize, AAC_FIB_SIZE); 5353 } 5354 5355 ddi_put32(acc, &fibp->Header.XferState, xfer_state); 5356 ddi_put16(acc, &fibp->Header.Command, cmd); 5357 ddi_put8(acc, &fibp->Header.StructType, AAC_FIBTYPE_TFIB); 5358 ddi_put8(acc, &fibp->Header.Flags, 0); /* don't care */ 5359 ddi_put16(acc, &fibp->Header.Size, fib_size); 5360 ddi_put32(acc, &fibp->Header.SenderFibAddress, (slotp->index << 2)); 5361 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr); 5362 ddi_put32(acc, &fibp->Header.SenderData, 0); /* don't care */ 5363 } 5364 5365 /* 5366 * Init FIB for raw IO command 5367 */ 5368 static void 5369 aac_cmd_fib_rawio(struct aac_softstate *softs, struct aac_cmd *acp) 5370 { 5371 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5372 struct aac_raw_io *io = (struct aac_raw_io *)&acp->slotp->fibp->data[0]; 5373 struct aac_sg_entryraw *sgp; 5374 struct aac_sge *sge; 5375 5376 /* Calculate FIB size */ 5377 acp->fib_size = sizeof (struct aac_fib_header) + \ 5378 sizeof (struct aac_raw_io) + (acp->left_cookien - 1) * \ 5379 sizeof (struct aac_sg_entryraw); 5380 5381 aac_cmd_fib_header(softs, acp->slotp, RawIo, acp->fib_size); 5382 5383 ddi_put16(acc, &io->Flags, (acp->flags & AAC_CMD_BUF_READ) ? 1 : 0); 5384 ddi_put16(acc, &io->BpTotal, 0); 5385 ddi_put16(acc, &io->BpComplete, 0); 5386 5387 ddi_put32(acc, AAC_LO32(&io->BlockNumber), AAC_LS32(acp->blkno)); 5388 ddi_put32(acc, AAC_HI32(&io->BlockNumber), AAC_MS32(acp->blkno)); 5389 ddi_put16(acc, &io->ContainerId, 5390 ((struct aac_container *)acp->dvp)->cid); 5391 5392 /* Fill SG table */ 5393 ddi_put32(acc, &io->SgMapRaw.SgCount, acp->left_cookien); 5394 ddi_put32(acc, &io->ByteCount, acp->bcount); 5395 5396 for (sge = &acp->sgt[0], sgp = &io->SgMapRaw.SgEntryRaw[0]; 5397 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5398 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 5399 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 5400 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5401 sgp->Next = 0; 5402 sgp->Prev = 0; 5403 sgp->Flags = 0; 5404 } 5405 } 5406 5407 /* Init FIB for 64-bit block IO command */ 5408 static void 5409 aac_cmd_fib_brw64(struct aac_softstate *softs, struct aac_cmd *acp) 5410 { 5411 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5412 struct aac_blockread64 *br = (struct aac_blockread64 *) \ 5413 &acp->slotp->fibp->data[0]; 5414 struct aac_sg_entry64 *sgp; 5415 struct aac_sge *sge; 5416 5417 acp->fib_size = sizeof (struct aac_fib_header) + \ 5418 sizeof (struct aac_blockread64) + (acp->left_cookien - 1) * \ 5419 sizeof (struct aac_sg_entry64); 5420 5421 aac_cmd_fib_header(softs, acp->slotp, ContainerCommand64, 5422 acp->fib_size); 5423 5424 /* 5425 * The definitions for aac_blockread64 and aac_blockwrite64 5426 * are the same. 5427 */ 5428 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno); 5429 ddi_put16(acc, &br->ContainerId, 5430 ((struct aac_container *)acp->dvp)->cid); 5431 ddi_put32(acc, &br->Command, (acp->flags & AAC_CMD_BUF_READ) ? 5432 VM_CtHostRead64 : VM_CtHostWrite64); 5433 ddi_put16(acc, &br->Pad, 0); 5434 ddi_put16(acc, &br->Flags, 0); 5435 5436 /* Fill SG table */ 5437 ddi_put32(acc, &br->SgMap64.SgCount, acp->left_cookien); 5438 ddi_put16(acc, &br->SectorCount, acp->bcount / AAC_BLK_SIZE); 5439 5440 for (sge = &acp->sgt[0], sgp = &br->SgMap64.SgEntry64[0]; 5441 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5442 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 5443 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 5444 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5445 } 5446 } 5447 5448 /* Init FIB for block IO command */ 5449 static void 5450 aac_cmd_fib_brw(struct aac_softstate *softs, struct aac_cmd *acp) 5451 { 5452 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5453 struct aac_blockread *br = (struct aac_blockread *) \ 5454 &acp->slotp->fibp->data[0]; 5455 struct aac_sg_entry *sgp; 5456 struct aac_sge *sge = &acp->sgt[0]; 5457 5458 if (acp->flags & AAC_CMD_BUF_READ) { 5459 acp->fib_size = sizeof (struct aac_fib_header) + \ 5460 sizeof (struct aac_blockread) + (acp->left_cookien - 1) * \ 5461 sizeof (struct aac_sg_entry); 5462 5463 ddi_put32(acc, &br->Command, VM_CtBlockRead); 5464 ddi_put32(acc, &br->SgMap.SgCount, acp->left_cookien); 5465 sgp = &br->SgMap.SgEntry[0]; 5466 } else { 5467 struct aac_blockwrite *bw = (struct aac_blockwrite *)br; 5468 5469 acp->fib_size = sizeof (struct aac_fib_header) + \ 5470 sizeof (struct aac_blockwrite) + (acp->left_cookien - 1) * \ 5471 sizeof (struct aac_sg_entry); 5472 5473 ddi_put32(acc, &bw->Command, VM_CtBlockWrite); 5474 ddi_put32(acc, &bw->Stable, CUNSTABLE); 5475 ddi_put32(acc, &bw->SgMap.SgCount, acp->left_cookien); 5476 sgp = &bw->SgMap.SgEntry[0]; 5477 } 5478 aac_cmd_fib_header(softs, acp->slotp, ContainerCommand, acp->fib_size); 5479 5480 /* 5481 * aac_blockread and aac_blockwrite have the similar 5482 * structure head, so use br for bw here 5483 */ 5484 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno); 5485 ddi_put32(acc, &br->ContainerId, 5486 ((struct aac_container *)acp->dvp)->cid); 5487 ddi_put32(acc, &br->ByteCount, acp->bcount); 5488 5489 /* Fill SG table */ 5490 for (sge = &acp->sgt[0]; 5491 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5492 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32); 5493 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5494 } 5495 } 5496 5497 /*ARGSUSED*/ 5498 void 5499 aac_cmd_fib_copy(struct aac_softstate *softs, struct aac_cmd *acp) 5500 { 5501 struct aac_slot *slotp = acp->slotp; 5502 struct aac_fib *fibp = slotp->fibp; 5503 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5504 5505 ddi_rep_put8(acc, (uint8_t *)acp->fibp, (uint8_t *)fibp, 5506 acp->fib_size, /* only copy data of needed length */ 5507 DDI_DEV_AUTOINCR); 5508 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr); 5509 ddi_put32(acc, &fibp->Header.SenderFibAddress, slotp->index << 2); 5510 } 5511 5512 static void 5513 aac_cmd_fib_sync(struct aac_softstate *softs, struct aac_cmd *acp) 5514 { 5515 struct aac_slot *slotp = acp->slotp; 5516 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5517 struct aac_synchronize_command *sync = 5518 (struct aac_synchronize_command *)&slotp->fibp->data[0]; 5519 5520 acp->fib_size = sizeof (struct aac_fib_header) + \ 5521 sizeof (struct aac_synchronize_command); 5522 5523 aac_cmd_fib_header(softs, slotp, ContainerCommand, acp->fib_size); 5524 ddi_put32(acc, &sync->Command, VM_ContainerConfig); 5525 ddi_put32(acc, &sync->Type, (uint32_t)CT_FLUSH_CACHE); 5526 ddi_put32(acc, &sync->Cid, ((struct aac_container *)acp->dvp)->cid); 5527 ddi_put32(acc, &sync->Count, 5528 sizeof (((struct aac_synchronize_reply *)0)->Data)); 5529 } 5530 5531 /* 5532 * Start/Stop unit (Power Management) 5533 */ 5534 static void 5535 aac_cmd_fib_startstop(struct aac_softstate *softs, struct aac_cmd *acp) 5536 { 5537 struct aac_slot *slotp = acp->slotp; 5538 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5539 struct aac_Container *cmd = 5540 (struct aac_Container *)&slotp->fibp->data[0]; 5541 union scsi_cdb *cdbp = (void *)acp->pkt->pkt_cdbp; 5542 5543 acp->fib_size = AAC_FIB_SIZEOF(struct aac_Container); 5544 5545 aac_cmd_fib_header(softs, slotp, ContainerCommand, acp->fib_size); 5546 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 5547 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 5548 ddi_put32(acc, &cmd->CTCommand.command, CT_PM_DRIVER_SUPPORT); 5549 ddi_put32(acc, &cmd->CTCommand.param[0], cdbp->cdb_opaque[4] & 1 ? \ 5550 AAC_PM_DRIVERSUP_START_UNIT : AAC_PM_DRIVERSUP_STOP_UNIT); 5551 ddi_put32(acc, &cmd->CTCommand.param[1], 5552 ((struct aac_container *)acp->dvp)->cid); 5553 ddi_put32(acc, &cmd->CTCommand.param[2], cdbp->cdb_opaque[1] & 1); 5554 } 5555 5556 /* 5557 * Init FIB for pass-through SCMD 5558 */ 5559 static void 5560 aac_cmd_fib_srb(struct aac_cmd *acp) 5561 { 5562 struct aac_slot *slotp = acp->slotp; 5563 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5564 struct aac_srb *srb = (struct aac_srb *)&slotp->fibp->data[0]; 5565 uint8_t *cdb; 5566 5567 ddi_put32(acc, &srb->function, SRBF_ExecuteScsi); 5568 ddi_put32(acc, &srb->retry_limit, 0); 5569 ddi_put32(acc, &srb->cdb_size, acp->cmdlen); 5570 ddi_put32(acc, &srb->timeout, 0); /* use driver timeout */ 5571 if (acp->fibp == NULL) { 5572 if (acp->flags & AAC_CMD_BUF_READ) 5573 ddi_put32(acc, &srb->flags, SRB_DataIn); 5574 else if (acp->flags & AAC_CMD_BUF_WRITE) 5575 ddi_put32(acc, &srb->flags, SRB_DataOut); 5576 ddi_put32(acc, &srb->channel, 5577 ((struct aac_nondasd *)acp->dvp)->bus); 5578 ddi_put32(acc, &srb->id, ((struct aac_nondasd *)acp->dvp)->tid); 5579 ddi_put32(acc, &srb->lun, 0); 5580 cdb = acp->pkt->pkt_cdbp; 5581 } else { 5582 struct aac_srb *srb0 = (struct aac_srb *)&acp->fibp->data[0]; 5583 5584 ddi_put32(acc, &srb->flags, srb0->flags); 5585 ddi_put32(acc, &srb->channel, srb0->channel); 5586 ddi_put32(acc, &srb->id, srb0->id); 5587 ddi_put32(acc, &srb->lun, srb0->lun); 5588 cdb = srb0->cdb; 5589 } 5590 ddi_rep_put8(acc, cdb, srb->cdb, acp->cmdlen, DDI_DEV_AUTOINCR); 5591 } 5592 5593 static void 5594 aac_cmd_fib_scsi32(struct aac_softstate *softs, struct aac_cmd *acp) 5595 { 5596 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5597 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 5598 struct aac_sg_entry *sgp; 5599 struct aac_sge *sge; 5600 5601 acp->fib_size = sizeof (struct aac_fib_header) + \ 5602 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \ 5603 acp->left_cookien * sizeof (struct aac_sg_entry); 5604 5605 /* Fill FIB and SRB headers, and copy cdb */ 5606 aac_cmd_fib_header(softs, acp->slotp, ScsiPortCommand, acp->fib_size); 5607 aac_cmd_fib_srb(acp); 5608 5609 /* Fill SG table */ 5610 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien); 5611 ddi_put32(acc, &srb->count, acp->bcount); 5612 5613 for (sge = &acp->sgt[0], sgp = &srb->sg.SgEntry[0]; 5614 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5615 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32); 5616 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5617 } 5618 } 5619 5620 static void 5621 aac_cmd_fib_scsi64(struct aac_softstate *softs, struct aac_cmd *acp) 5622 { 5623 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5624 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 5625 struct aac_sg_entry64 *sgp; 5626 struct aac_sge *sge; 5627 5628 acp->fib_size = sizeof (struct aac_fib_header) + \ 5629 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \ 5630 acp->left_cookien * sizeof (struct aac_sg_entry64); 5631 5632 /* Fill FIB and SRB headers, and copy cdb */ 5633 aac_cmd_fib_header(softs, acp->slotp, ScsiPortCommandU64, 5634 acp->fib_size); 5635 aac_cmd_fib_srb(acp); 5636 5637 /* Fill SG table */ 5638 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien); 5639 ddi_put32(acc, &srb->count, acp->bcount); 5640 5641 for (sge = &acp->sgt[0], 5642 sgp = &((struct aac_sg_table64 *)&srb->sg)->SgEntry64[0]; 5643 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5644 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 5645 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 5646 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5647 } 5648 } 5649 5650 static int 5651 aac_cmd_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp) 5652 { 5653 struct aac_slot *slotp; 5654 5655 if (slotp = aac_get_slot(softs)) { 5656 acp->slotp = slotp; 5657 slotp->acp = acp; 5658 acp->aac_cmd_fib(softs, acp); 5659 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, 5660 DDI_DMA_SYNC_FORDEV); 5661 return (AACOK); 5662 } 5663 return (AACERR); 5664 } 5665 5666 static int 5667 aac_bind_io(struct aac_softstate *softs, struct aac_cmd *acp) 5668 { 5669 struct aac_device *dvp = acp->dvp; 5670 int q = AAC_CMDQ(acp); 5671 5672 if (dvp) { 5673 if (dvp->ncmds[q] < dvp->throttle[q]) { 5674 if (!(acp->flags & AAC_CMD_NTAG) || 5675 dvp->ncmds[q] == 0) { 5676 do_bind: 5677 return (aac_cmd_slot_bind(softs, acp)); 5678 } 5679 ASSERT(q == AAC_CMDQ_ASYNC); 5680 aac_set_throttle(softs, dvp, AAC_CMDQ_ASYNC, 5681 AAC_THROTTLE_DRAIN); 5682 } 5683 } else { 5684 if (softs->bus_ncmds[q] < softs->bus_throttle[q]) 5685 goto do_bind; 5686 } 5687 return (AACERR); 5688 } 5689 5690 static void 5691 aac_start_io(struct aac_softstate *softs, struct aac_cmd *acp) 5692 { 5693 struct aac_slot *slotp = acp->slotp; 5694 int q = AAC_CMDQ(acp); 5695 int rval; 5696 5697 /* Set ac and pkt */ 5698 if (acp->pkt) { /* ac from ioctl has no pkt */ 5699 acp->pkt->pkt_state |= 5700 STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD; 5701 } 5702 if (acp->timeout) /* 0 indicates no timeout */ 5703 acp->timeout += aac_timebase + aac_tick; 5704 5705 if (acp->dvp) 5706 acp->dvp->ncmds[q]++; 5707 softs->bus_ncmds[q]++; 5708 aac_cmd_enqueue(&softs->q_busy, acp); 5709 5710 AACDB_PRINT_FIB(softs, slotp); 5711 5712 if (softs->flags & AAC_FLAGS_NEW_COMM) { 5713 rval = aac_send_command(softs, slotp); 5714 } else { 5715 /* 5716 * If fib can not be enqueued, the adapter is in an abnormal 5717 * state, there will be no interrupt to us. 5718 */ 5719 rval = aac_fib_enqueue(softs, AAC_ADAP_NORM_CMD_Q, 5720 slotp->fib_phyaddr, acp->fib_size); 5721 } 5722 5723 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) 5724 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 5725 5726 /* 5727 * NOTE: We send command only when slots availabe, so should never 5728 * reach here. 5729 */ 5730 if (rval != AACOK) { 5731 AACDB_PRINT(softs, CE_NOTE, "SCMD send failed"); 5732 if (acp->pkt) { 5733 acp->pkt->pkt_state &= ~STATE_SENT_CMD; 5734 aac_set_pkt_reason(softs, acp, CMD_INCOMPLETE, 0); 5735 } 5736 aac_end_io(softs, acp); 5737 if (!(acp->flags & (AAC_CMD_NO_INTR | AAC_CMD_NO_CB))) 5738 ddi_trigger_softintr(softs->softint_id); 5739 } 5740 } 5741 5742 static void 5743 aac_start_waitq(struct aac_softstate *softs, struct aac_cmd_queue *q) 5744 { 5745 struct aac_cmd *acp, *next_acp; 5746 5747 /* Serve as many waiting io's as possible */ 5748 for (acp = q->q_head; acp; acp = next_acp) { 5749 next_acp = acp->next; 5750 if (aac_bind_io(softs, acp) == AACOK) { 5751 aac_cmd_delete(q, acp); 5752 aac_start_io(softs, acp); 5753 } 5754 if (softs->free_io_slot_head == NULL) 5755 break; 5756 } 5757 } 5758 5759 static void 5760 aac_start_waiting_io(struct aac_softstate *softs) 5761 { 5762 /* 5763 * Sync FIB io is served before async FIB io so that io requests 5764 * sent by interactive userland commands get responded asap. 5765 */ 5766 if (softs->q_wait[AAC_CMDQ_SYNC].q_head) 5767 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_SYNC]); 5768 if (softs->q_wait[AAC_CMDQ_ASYNC].q_head) 5769 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_ASYNC]); 5770 } 5771 5772 static void 5773 aac_drain_comp_q(struct aac_softstate *softs) 5774 { 5775 struct aac_cmd *acp; 5776 struct scsi_pkt *pkt; 5777 5778 /*CONSTCOND*/ 5779 while (1) { 5780 mutex_enter(&softs->q_comp_mutex); 5781 acp = aac_cmd_dequeue(&softs->q_comp); 5782 mutex_exit(&softs->q_comp_mutex); 5783 if (acp != NULL) { 5784 ASSERT(acp->pkt != NULL); 5785 pkt = acp->pkt; 5786 5787 if (pkt->pkt_reason == CMD_CMPLT) { 5788 /* 5789 * Consistent packets need to be sync'ed first 5790 */ 5791 if ((acp->flags & AAC_CMD_CONSISTENT) && 5792 (acp->flags & AAC_CMD_BUF_READ)) { 5793 if (aac_dma_sync_ac(acp) != AACOK) { 5794 ddi_fm_service_impact( 5795 softs->devinfo_p, 5796 DDI_SERVICE_UNAFFECTED); 5797 pkt->pkt_reason = CMD_TRAN_ERR; 5798 pkt->pkt_statistics = 0; 5799 } 5800 } 5801 if ((aac_check_acc_handle(softs-> \ 5802 comm_space_acc_handle) != DDI_SUCCESS) || 5803 (aac_check_acc_handle(softs-> \ 5804 pci_mem_handle) != DDI_SUCCESS)) { 5805 ddi_fm_service_impact(softs->devinfo_p, 5806 DDI_SERVICE_UNAFFECTED); 5807 ddi_fm_acc_err_clear(softs-> \ 5808 pci_mem_handle, DDI_FME_VER0); 5809 pkt->pkt_reason = CMD_TRAN_ERR; 5810 pkt->pkt_statistics = 0; 5811 } 5812 if (aac_check_dma_handle(softs-> \ 5813 comm_space_dma_handle) != DDI_SUCCESS) { 5814 ddi_fm_service_impact(softs->devinfo_p, 5815 DDI_SERVICE_UNAFFECTED); 5816 pkt->pkt_reason = CMD_TRAN_ERR; 5817 pkt->pkt_statistics = 0; 5818 } 5819 } 5820 scsi_hba_pkt_comp(pkt); 5821 } else { 5822 break; 5823 } 5824 } 5825 } 5826 5827 static int 5828 aac_alloc_fib(struct aac_softstate *softs, struct aac_slot *slotp) 5829 { 5830 size_t rlen; 5831 ddi_dma_cookie_t cookie; 5832 uint_t cookien; 5833 5834 /* Allocate FIB dma resource */ 5835 if (ddi_dma_alloc_handle( 5836 softs->devinfo_p, 5837 &softs->addr_dma_attr, 5838 DDI_DMA_SLEEP, 5839 NULL, 5840 &slotp->fib_dma_handle) != DDI_SUCCESS) { 5841 AACDB_PRINT(softs, CE_WARN, 5842 "Cannot alloc dma handle for slot fib area"); 5843 goto error; 5844 } 5845 if (ddi_dma_mem_alloc( 5846 slotp->fib_dma_handle, 5847 softs->aac_max_fib_size, 5848 &softs->acc_attr, 5849 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 5850 DDI_DMA_SLEEP, 5851 NULL, 5852 (caddr_t *)&slotp->fibp, 5853 &rlen, 5854 &slotp->fib_acc_handle) != DDI_SUCCESS) { 5855 AACDB_PRINT(softs, CE_WARN, 5856 "Cannot alloc mem for slot fib area"); 5857 goto error; 5858 } 5859 if (ddi_dma_addr_bind_handle( 5860 slotp->fib_dma_handle, 5861 NULL, 5862 (caddr_t)slotp->fibp, 5863 softs->aac_max_fib_size, 5864 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 5865 DDI_DMA_SLEEP, 5866 NULL, 5867 &cookie, 5868 &cookien) != DDI_DMA_MAPPED) { 5869 AACDB_PRINT(softs, CE_WARN, 5870 "dma bind failed for slot fib area"); 5871 goto error; 5872 } 5873 5874 /* Check dma handles allocated in fib attach */ 5875 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) { 5876 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 5877 goto error; 5878 } 5879 5880 /* Check acc handles allocated in fib attach */ 5881 if (aac_check_acc_handle(slotp->fib_acc_handle) != DDI_SUCCESS) { 5882 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 5883 goto error; 5884 } 5885 5886 slotp->fib_phyaddr = cookie.dmac_laddress; 5887 return (AACOK); 5888 5889 error: 5890 if (slotp->fib_acc_handle) { 5891 ddi_dma_mem_free(&slotp->fib_acc_handle); 5892 slotp->fib_acc_handle = NULL; 5893 } 5894 if (slotp->fib_dma_handle) { 5895 ddi_dma_free_handle(&slotp->fib_dma_handle); 5896 slotp->fib_dma_handle = NULL; 5897 } 5898 return (AACERR); 5899 } 5900 5901 static void 5902 aac_free_fib(struct aac_slot *slotp) 5903 { 5904 (void) ddi_dma_unbind_handle(slotp->fib_dma_handle); 5905 ddi_dma_mem_free(&slotp->fib_acc_handle); 5906 slotp->fib_acc_handle = NULL; 5907 ddi_dma_free_handle(&slotp->fib_dma_handle); 5908 slotp->fib_dma_handle = NULL; 5909 slotp->fib_phyaddr = 0; 5910 } 5911 5912 static void 5913 aac_alloc_fibs(struct aac_softstate *softs) 5914 { 5915 int i; 5916 struct aac_slot *slotp; 5917 5918 for (i = 0; i < softs->total_slots && 5919 softs->total_fibs < softs->total_slots; i++) { 5920 slotp = &(softs->io_slot[i]); 5921 if (slotp->fib_phyaddr) 5922 continue; 5923 if (aac_alloc_fib(softs, slotp) != AACOK) 5924 break; 5925 5926 /* Insert the slot to the free slot list */ 5927 aac_release_slot(softs, slotp); 5928 softs->total_fibs++; 5929 } 5930 } 5931 5932 static void 5933 aac_destroy_fibs(struct aac_softstate *softs) 5934 { 5935 struct aac_slot *slotp; 5936 5937 while ((slotp = softs->free_io_slot_head) != NULL) { 5938 ASSERT(slotp->fib_phyaddr); 5939 softs->free_io_slot_head = slotp->next; 5940 aac_free_fib(slotp); 5941 ASSERT(slotp->index == (slotp - softs->io_slot)); 5942 softs->total_fibs--; 5943 } 5944 ASSERT(softs->total_fibs == 0); 5945 } 5946 5947 static int 5948 aac_create_slots(struct aac_softstate *softs) 5949 { 5950 int i; 5951 5952 softs->total_slots = softs->aac_max_fibs; 5953 softs->io_slot = kmem_zalloc(sizeof (struct aac_slot) * \ 5954 softs->total_slots, KM_SLEEP); 5955 if (softs->io_slot == NULL) { 5956 AACDB_PRINT(softs, CE_WARN, "Cannot allocate slot"); 5957 return (AACERR); 5958 } 5959 for (i = 0; i < softs->total_slots; i++) 5960 softs->io_slot[i].index = i; 5961 softs->free_io_slot_head = NULL; 5962 softs->total_fibs = 0; 5963 return (AACOK); 5964 } 5965 5966 static void 5967 aac_destroy_slots(struct aac_softstate *softs) 5968 { 5969 ASSERT(softs->free_io_slot_head == NULL); 5970 5971 kmem_free(softs->io_slot, sizeof (struct aac_slot) * \ 5972 softs->total_slots); 5973 softs->io_slot = NULL; 5974 softs->total_slots = 0; 5975 } 5976 5977 struct aac_slot * 5978 aac_get_slot(struct aac_softstate *softs) 5979 { 5980 struct aac_slot *slotp; 5981 5982 if ((slotp = softs->free_io_slot_head) != NULL) { 5983 softs->free_io_slot_head = slotp->next; 5984 slotp->next = NULL; 5985 } 5986 return (slotp); 5987 } 5988 5989 static void 5990 aac_release_slot(struct aac_softstate *softs, struct aac_slot *slotp) 5991 { 5992 ASSERT((slotp->index >= 0) && (slotp->index < softs->total_slots)); 5993 ASSERT(slotp == &softs->io_slot[slotp->index]); 5994 5995 slotp->acp = NULL; 5996 slotp->next = softs->free_io_slot_head; 5997 softs->free_io_slot_head = slotp; 5998 } 5999 6000 int 6001 aac_do_io(struct aac_softstate *softs, struct aac_cmd *acp) 6002 { 6003 if (aac_bind_io(softs, acp) == AACOK) 6004 aac_start_io(softs, acp); 6005 else 6006 aac_cmd_enqueue(&softs->q_wait[AAC_CMDQ(acp)], acp); 6007 6008 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) 6009 return (TRAN_ACCEPT); 6010 /* 6011 * Because sync FIB is always 512 bytes and used for critical 6012 * functions, async FIB is used for poll IO. 6013 */ 6014 if (acp->flags & AAC_CMD_NO_INTR) { 6015 if (aac_do_poll_io(softs, acp) == AACOK) 6016 return (TRAN_ACCEPT); 6017 } else { 6018 if (aac_do_sync_io(softs, acp) == AACOK) 6019 return (TRAN_ACCEPT); 6020 } 6021 return (TRAN_BADPKT); 6022 } 6023 6024 static int 6025 aac_do_poll_io(struct aac_softstate *softs, struct aac_cmd *acp) 6026 { 6027 int (*intr_handler)(struct aac_softstate *); 6028 6029 /* 6030 * Interrupt is disabled, we have to poll the adapter by ourselves. 6031 */ 6032 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ? 6033 aac_process_intr_new : aac_process_intr_old; 6034 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) { 6035 int i = AAC_POLL_TIME * 1000; 6036 6037 AAC_BUSYWAIT((intr_handler(softs) != AAC_DB_RESPONSE_READY), i); 6038 if (i == 0) 6039 aac_cmd_timeout(softs, acp); 6040 } 6041 6042 ddi_trigger_softintr(softs->softint_id); 6043 6044 if ((acp->flags & AAC_CMD_CMPLT) && !(acp->flags & AAC_CMD_ERR)) 6045 return (AACOK); 6046 return (AACERR); 6047 } 6048 6049 static int 6050 aac_do_sync_io(struct aac_softstate *softs, struct aac_cmd *acp) 6051 { 6052 ASSERT(softs && acp); 6053 6054 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) 6055 cv_wait(&softs->event, &softs->io_lock); 6056 6057 if (acp->flags & AAC_CMD_CMPLT) 6058 return (AACOK); 6059 return (AACERR); 6060 } 6061 6062 static int 6063 aac_dma_sync_ac(struct aac_cmd *acp) 6064 { 6065 if (acp->buf_dma_handle) { 6066 if (acp->flags & AAC_CMD_BUF_WRITE) { 6067 if (acp->abp != NULL) 6068 ddi_rep_put8(acp->abh, 6069 (uint8_t *)acp->bp->b_un.b_addr, 6070 (uint8_t *)acp->abp, acp->bp->b_bcount, 6071 DDI_DEV_AUTOINCR); 6072 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0, 6073 DDI_DMA_SYNC_FORDEV); 6074 } else { 6075 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0, 6076 DDI_DMA_SYNC_FORCPU); 6077 if (aac_check_dma_handle(acp->buf_dma_handle) != 6078 DDI_SUCCESS) 6079 return (AACERR); 6080 if (acp->abp != NULL) 6081 ddi_rep_get8(acp->abh, 6082 (uint8_t *)acp->bp->b_un.b_addr, 6083 (uint8_t *)acp->abp, acp->bp->b_bcount, 6084 DDI_DEV_AUTOINCR); 6085 } 6086 } 6087 return (AACOK); 6088 } 6089 6090 /* 6091 * The following function comes from Adaptec: 6092 * 6093 * When driver sees a particular event that means containers are changed, it 6094 * will rescan containers. However a change may not be complete until some 6095 * other event is received. For example, creating or deleting an array will 6096 * incur as many as six AifEnConfigChange events which would generate six 6097 * container rescans. To diminish rescans, driver set a flag to wait for 6098 * another particular event. When sees that events come in, it will do rescan. 6099 */ 6100 static int 6101 aac_handle_aif(struct aac_softstate *softs, struct aac_fib *fibp) 6102 { 6103 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 6104 uint16_t fib_command; 6105 struct aac_aif_command *aif; 6106 int en_type; 6107 int devcfg_needed; 6108 int current, next; 6109 6110 fib_command = LE_16(fibp->Header.Command); 6111 if (fib_command != AifRequest) { 6112 cmn_err(CE_NOTE, "!Unknown command from controller: 0x%x", 6113 fib_command); 6114 return (AACERR); 6115 } 6116 6117 /* Update internal container state */ 6118 aif = (struct aac_aif_command *)&fibp->data[0]; 6119 6120 AACDB_PRINT_AIF(softs, aif); 6121 devcfg_needed = 0; 6122 en_type = LE_32((uint32_t)aif->data.EN.type); 6123 6124 switch (LE_32((uint32_t)aif->command)) { 6125 case AifCmdDriverNotify: { 6126 int cid = LE_32(aif->data.EN.data.ECC.container[0]); 6127 6128 switch (en_type) { 6129 case AifDenMorphComplete: 6130 case AifDenVolumeExtendComplete: 6131 if (AAC_DEV_IS_VALID(&softs->containers[cid].dev)) 6132 softs->devcfg_wait_on = AifEnConfigChange; 6133 break; 6134 } 6135 if (softs->devcfg_wait_on == en_type) 6136 devcfg_needed = 1; 6137 break; 6138 } 6139 6140 case AifCmdEventNotify: 6141 switch (en_type) { 6142 case AifEnAddContainer: 6143 case AifEnDeleteContainer: 6144 softs->devcfg_wait_on = AifEnConfigChange; 6145 break; 6146 case AifEnContainerChange: 6147 if (!softs->devcfg_wait_on) 6148 softs->devcfg_wait_on = AifEnConfigChange; 6149 break; 6150 case AifEnContainerEvent: 6151 if (ddi_get32(acc, &aif-> \ 6152 data.EN.data.ECE.eventType) == CT_PUP_MISSING_DRIVE) 6153 devcfg_needed = 1; 6154 break; 6155 } 6156 if (softs->devcfg_wait_on == en_type) 6157 devcfg_needed = 1; 6158 break; 6159 6160 case AifCmdJobProgress: 6161 if (LE_32((uint32_t)aif->data.PR[0].jd.type) == AifJobCtrZero) { 6162 int pr_status; 6163 uint32_t pr_ftick, pr_ctick; 6164 6165 pr_status = LE_32((uint32_t)aif->data.PR[0].status); 6166 pr_ctick = LE_32(aif->data.PR[0].currentTick); 6167 pr_ftick = LE_32(aif->data.PR[0].finalTick); 6168 6169 if ((pr_ctick == pr_ftick) || 6170 (pr_status == AifJobStsSuccess)) 6171 softs->devcfg_wait_on = AifEnContainerChange; 6172 else if ((pr_ctick == 0) && 6173 (pr_status == AifJobStsRunning)) 6174 softs->devcfg_wait_on = AifEnContainerChange; 6175 } 6176 break; 6177 } 6178 6179 if (devcfg_needed) { 6180 softs->devcfg_wait_on = 0; 6181 (void) aac_probe_containers(softs); 6182 } 6183 6184 /* Modify AIF contexts */ 6185 current = softs->aifq_idx; 6186 next = (current + 1) % AAC_AIFQ_LENGTH; 6187 if (next == 0) { 6188 struct aac_fib_context *ctx; 6189 6190 softs->aifq_wrap = 1; 6191 for (ctx = softs->fibctx; ctx; ctx = ctx->next) { 6192 if (next == ctx->ctx_idx) { 6193 ctx->ctx_filled = 1; 6194 } else if (current == ctx->ctx_idx && ctx->ctx_filled) { 6195 ctx->ctx_idx = next; 6196 AACDB_PRINT(softs, CE_NOTE, 6197 "-- AIF queue(%x) overrun", ctx->unique); 6198 } 6199 } 6200 } 6201 softs->aifq_idx = next; 6202 6203 /* Wakeup applications */ 6204 cv_broadcast(&softs->aifv); 6205 return (AACOK); 6206 } 6207 6208 /* 6209 * Timeout recovery 6210 */ 6211 /*ARGSUSED*/ 6212 static void 6213 aac_cmd_timeout(struct aac_softstate *softs, struct aac_cmd *acp) 6214 { 6215 #ifdef DEBUG 6216 acp->fib_flags |= AACDB_FLAGS_FIB_TIMEOUT; 6217 AACDB_PRINT(softs, CE_WARN, "acp %p timed out", acp); 6218 AACDB_PRINT_FIB(softs, acp->slotp); 6219 #endif 6220 6221 /* 6222 * Besides the firmware in unhealthy state, an overloaded 6223 * adapter may also incur pkt timeout. 6224 * There is a chance for an adapter with a slower IOP to take 6225 * longer than 60 seconds to process the commands, such as when 6226 * to perform IOs. So the adapter is doing a build on a RAID-5 6227 * while being required longer completion times should be 6228 * tolerated. 6229 */ 6230 switch (aac_do_reset(softs)) { 6231 case AAC_IOP_RESET_SUCCEED: 6232 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING, NULL, CMD_RESET); 6233 aac_start_waiting_io(softs); 6234 break; 6235 case AAC_IOP_RESET_FAILED: 6236 /* Abort all waiting cmds when adapter is dead */ 6237 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_TIMEOUT); 6238 break; 6239 case AAC_IOP_RESET_ABNORMAL: 6240 aac_start_waiting_io(softs); 6241 } 6242 } 6243 6244 /* 6245 * The following function comes from Adaptec: 6246 * 6247 * Time sync. command added to synchronize time with firmware every 30 6248 * minutes (required for correct AIF timestamps etc.) 6249 */ 6250 static int 6251 aac_sync_tick(struct aac_softstate *softs) 6252 { 6253 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 6254 struct aac_fib *fibp = softs->sync_slot.fibp; 6255 6256 ddi_put32(acc, (void *)&fibp->data[0], ddi_get_time()); 6257 return (aac_sync_fib(softs, SendHostTime, AAC_FIB_SIZEOF(uint32_t))); 6258 } 6259 6260 static void 6261 aac_daemon(void *arg) 6262 { 6263 struct aac_softstate *softs = (struct aac_softstate *)arg; 6264 struct aac_cmd *acp; 6265 6266 DBCALLED(softs, 2); 6267 6268 mutex_enter(&softs->io_lock); 6269 /* Check slot for timeout pkts */ 6270 aac_timebase += aac_tick; 6271 for (acp = softs->q_busy.q_head; acp; acp = acp->next) { 6272 if (acp->timeout) { 6273 if (acp->timeout <= aac_timebase) { 6274 aac_cmd_timeout(softs, acp); 6275 ddi_trigger_softintr(softs->softint_id); 6276 } 6277 break; 6278 } 6279 } 6280 6281 /* Time sync. with firmware every AAC_SYNC_TICK */ 6282 if (aac_sync_time <= aac_timebase) { 6283 aac_sync_time = aac_timebase; 6284 if (aac_sync_tick(softs) != AACOK) 6285 aac_sync_time += aac_tick << 1; /* retry shortly */ 6286 else 6287 aac_sync_time += AAC_SYNC_TICK; 6288 } 6289 6290 if ((softs->state & AAC_STATE_RUN) && (softs->timeout_id != 0)) 6291 softs->timeout_id = timeout(aac_daemon, (void *)softs, 6292 (aac_tick * drv_usectohz(1000000))); 6293 mutex_exit(&softs->io_lock); 6294 } 6295 6296 /* 6297 * Architecture dependent functions 6298 */ 6299 static int 6300 aac_rx_get_fwstatus(struct aac_softstate *softs) 6301 { 6302 return (PCI_MEM_GET32(softs, AAC_OMR0)); 6303 } 6304 6305 static int 6306 aac_rx_get_mailbox(struct aac_softstate *softs, int mb) 6307 { 6308 return (PCI_MEM_GET32(softs, AAC_RX_MAILBOX + mb * 4)); 6309 } 6310 6311 static void 6312 aac_rx_set_mailbox(struct aac_softstate *softs, uint32_t cmd, 6313 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3) 6314 { 6315 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX, cmd); 6316 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 4, arg0); 6317 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 8, arg1); 6318 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 12, arg2); 6319 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 16, arg3); 6320 } 6321 6322 static int 6323 aac_rkt_get_fwstatus(struct aac_softstate *softs) 6324 { 6325 return (PCI_MEM_GET32(softs, AAC_OMR0)); 6326 } 6327 6328 static int 6329 aac_rkt_get_mailbox(struct aac_softstate *softs, int mb) 6330 { 6331 return (PCI_MEM_GET32(softs, AAC_RKT_MAILBOX + mb *4)); 6332 } 6333 6334 static void 6335 aac_rkt_set_mailbox(struct aac_softstate *softs, uint32_t cmd, 6336 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3) 6337 { 6338 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX, cmd); 6339 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 4, arg0); 6340 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 8, arg1); 6341 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 12, arg2); 6342 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 16, arg3); 6343 } 6344 6345 /* 6346 * cb_ops functions 6347 */ 6348 static int 6349 aac_open(dev_t *devp, int flag, int otyp, cred_t *cred) 6350 { 6351 struct aac_softstate *softs; 6352 int minor0, minor; 6353 int instance; 6354 6355 DBCALLED(NULL, 2); 6356 6357 if (otyp != OTYP_BLK && otyp != OTYP_CHR) 6358 return (EINVAL); 6359 6360 minor0 = getminor(*devp); 6361 minor = AAC_SCSA_MINOR(minor0); 6362 6363 if (AAC_IS_SCSA_NODE(minor)) 6364 return (scsi_hba_open(devp, flag, otyp, cred)); 6365 6366 instance = MINOR2INST(minor0); 6367 if (instance >= AAC_MAX_ADAPTERS) 6368 return (ENXIO); 6369 6370 softs = ddi_get_soft_state(aac_softstatep, instance); 6371 if (softs == NULL) 6372 return (ENXIO); 6373 6374 return (0); 6375 } 6376 6377 /*ARGSUSED*/ 6378 static int 6379 aac_close(dev_t dev, int flag, int otyp, cred_t *cred) 6380 { 6381 int minor0, minor; 6382 int instance; 6383 6384 DBCALLED(NULL, 2); 6385 6386 if (otyp != OTYP_BLK && otyp != OTYP_CHR) 6387 return (EINVAL); 6388 6389 minor0 = getminor(dev); 6390 minor = AAC_SCSA_MINOR(minor0); 6391 6392 if (AAC_IS_SCSA_NODE(minor)) 6393 return (scsi_hba_close(dev, flag, otyp, cred)); 6394 6395 instance = MINOR2INST(minor0); 6396 if (instance >= AAC_MAX_ADAPTERS) 6397 return (ENXIO); 6398 6399 return (0); 6400 } 6401 6402 static int 6403 aac_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, 6404 int *rval_p) 6405 { 6406 struct aac_softstate *softs; 6407 int minor0, minor; 6408 int instance; 6409 6410 DBCALLED(NULL, 2); 6411 6412 if (drv_priv(cred_p) != 0) 6413 return (EPERM); 6414 6415 minor0 = getminor(dev); 6416 minor = AAC_SCSA_MINOR(minor0); 6417 6418 if (AAC_IS_SCSA_NODE(minor)) 6419 return (scsi_hba_ioctl(dev, cmd, arg, flag, cred_p, rval_p)); 6420 6421 instance = MINOR2INST(minor0); 6422 if (instance < AAC_MAX_ADAPTERS) { 6423 softs = ddi_get_soft_state(aac_softstatep, instance); 6424 return (aac_do_ioctl(softs, dev, cmd, arg, flag)); 6425 } 6426 return (ENXIO); 6427 } 6428 6429 /* 6430 * The IO fault service error handling callback function 6431 */ 6432 /*ARGSUSED*/ 6433 static int 6434 aac_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 6435 { 6436 /* 6437 * as the driver can always deal with an error in any dma or 6438 * access handle, we can just return the fme_status value. 6439 */ 6440 pci_ereport_post(dip, err, NULL); 6441 return (err->fme_status); 6442 } 6443 6444 /* 6445 * aac_fm_init - initialize fma capabilities and register with IO 6446 * fault services. 6447 */ 6448 static void 6449 aac_fm_init(struct aac_softstate *softs) 6450 { 6451 /* 6452 * Need to change iblock to priority for new MSI intr 6453 */ 6454 ddi_iblock_cookie_t fm_ibc; 6455 6456 softs->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, softs->devinfo_p, 6457 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 6458 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 6459 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 6460 6461 /* Only register with IO Fault Services if we have some capability */ 6462 if (softs->fm_capabilities) { 6463 /* Adjust access and dma attributes for FMA */ 6464 softs->acc_attr.devacc_attr_access |= DDI_FLAGERR_ACC; 6465 softs->addr_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 6466 softs->buf_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 6467 6468 /* 6469 * Register capabilities with IO Fault Services. 6470 * fm_capabilities will be updated to indicate 6471 * capabilities actually supported (not requested.) 6472 */ 6473 ddi_fm_init(softs->devinfo_p, &softs->fm_capabilities, &fm_ibc); 6474 6475 /* 6476 * Initialize pci ereport capabilities if ereport 6477 * capable (should always be.) 6478 */ 6479 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) || 6480 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6481 pci_ereport_setup(softs->devinfo_p); 6482 } 6483 6484 /* 6485 * Register error callback if error callback capable. 6486 */ 6487 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6488 ddi_fm_handler_register(softs->devinfo_p, 6489 aac_fm_error_cb, (void *) softs); 6490 } 6491 } 6492 } 6493 6494 /* 6495 * aac_fm_fini - Releases fma capabilities and un-registers with IO 6496 * fault services. 6497 */ 6498 static void 6499 aac_fm_fini(struct aac_softstate *softs) 6500 { 6501 /* Only unregister FMA capabilities if registered */ 6502 if (softs->fm_capabilities) { 6503 /* 6504 * Un-register error callback if error callback capable. 6505 */ 6506 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6507 ddi_fm_handler_unregister(softs->devinfo_p); 6508 } 6509 6510 /* 6511 * Release any resources allocated by pci_ereport_setup() 6512 */ 6513 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) || 6514 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6515 pci_ereport_teardown(softs->devinfo_p); 6516 } 6517 6518 /* Unregister from IO Fault Services */ 6519 ddi_fm_fini(softs->devinfo_p); 6520 6521 /* Adjust access and dma attributes for FMA */ 6522 softs->acc_attr.devacc_attr_access &= ~DDI_FLAGERR_ACC; 6523 softs->addr_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 6524 softs->buf_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 6525 } 6526 } 6527 6528 int 6529 aac_check_acc_handle(ddi_acc_handle_t handle) 6530 { 6531 ddi_fm_error_t de; 6532 6533 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 6534 return (de.fme_status); 6535 } 6536 6537 int 6538 aac_check_dma_handle(ddi_dma_handle_t handle) 6539 { 6540 ddi_fm_error_t de; 6541 6542 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 6543 return (de.fme_status); 6544 } 6545 6546 void 6547 aac_fm_ereport(struct aac_softstate *softs, char *detail) 6548 { 6549 uint64_t ena; 6550 char buf[FM_MAX_CLASS]; 6551 6552 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 6553 ena = fm_ena_generate(0, FM_ENA_FMT1); 6554 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities)) { 6555 ddi_fm_ereport_post(softs->devinfo_p, buf, ena, DDI_NOSLEEP, 6556 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL); 6557 } 6558 } 6559 6560 /* 6561 * Autoconfiguration support 6562 */ 6563 static int 6564 aac_parse_devname(char *devnm, int *tgt, int *lun) 6565 { 6566 char devbuf[SCSI_MAXNAMELEN]; 6567 char *addr; 6568 char *p, *tp, *lp; 6569 long num; 6570 6571 /* Parse dev name and address */ 6572 (void) strcpy(devbuf, devnm); 6573 addr = ""; 6574 for (p = devbuf; *p != '\0'; p++) { 6575 if (*p == '@') { 6576 addr = p + 1; 6577 *p = '\0'; 6578 } else if (*p == ':') { 6579 *p = '\0'; 6580 break; 6581 } 6582 } 6583 6584 /* Parse taget and lun */ 6585 for (p = tp = addr, lp = NULL; *p != '\0'; p++) { 6586 if (*p == ',') { 6587 lp = p + 1; 6588 *p = '\0'; 6589 break; 6590 } 6591 } 6592 if (tgt && tp) { 6593 if (ddi_strtol(tp, NULL, 0x10, &num)) 6594 return (AACERR); 6595 *tgt = (int)num; 6596 } 6597 if (lun && lp) { 6598 if (ddi_strtol(lp, NULL, 0x10, &num)) 6599 return (AACERR); 6600 *lun = (int)num; 6601 } 6602 return (AACOK); 6603 } 6604 6605 static dev_info_t * 6606 aac_find_child(struct aac_softstate *softs, uint16_t tgt, uint8_t lun) 6607 { 6608 dev_info_t *child = NULL; 6609 char addr[SCSI_MAXNAMELEN]; 6610 char tmp[MAXNAMELEN]; 6611 6612 if (tgt < AAC_MAX_LD) { 6613 if (lun == 0) { 6614 struct aac_device *dvp = &softs->containers[tgt].dev; 6615 6616 child = dvp->dip; 6617 } 6618 } else { 6619 (void) sprintf(addr, "%x,%x", tgt, lun); 6620 for (child = ddi_get_child(softs->devinfo_p); 6621 child; child = ddi_get_next_sibling(child)) { 6622 /* We don't care about non-persistent node */ 6623 if (ndi_dev_is_persistent_node(child) == 0) 6624 continue; 6625 6626 if (aac_name_node(child, tmp, MAXNAMELEN) != 6627 DDI_SUCCESS) 6628 continue; 6629 if (strcmp(addr, tmp) == 0) 6630 break; 6631 } 6632 } 6633 return (child); 6634 } 6635 6636 static int 6637 aac_config_child(struct aac_softstate *softs, struct scsi_device *sd, 6638 dev_info_t **dipp) 6639 { 6640 char *nodename = NULL; 6641 char **compatible = NULL; 6642 int ncompatible = 0; 6643 char *childname; 6644 dev_info_t *ldip = NULL; 6645 int tgt = sd->sd_address.a_target; 6646 int lun = sd->sd_address.a_lun; 6647 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; 6648 int rval; 6649 6650 DBCALLED(softs, 2); 6651 6652 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype, 6653 NULL, &nodename, &compatible, &ncompatible); 6654 if (nodename == NULL) { 6655 AACDB_PRINT(softs, CE_WARN, 6656 "found no comptible driver for t%dL%d", tgt, lun); 6657 rval = NDI_FAILURE; 6658 goto finish; 6659 } 6660 childname = (softs->legacy && dtype == DTYPE_DIRECT) ? "sd" : nodename; 6661 6662 /* Create dev node */ 6663 rval = ndi_devi_alloc(softs->devinfo_p, childname, DEVI_SID_NODEID, 6664 &ldip); 6665 if (rval == NDI_SUCCESS) { 6666 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) 6667 != DDI_PROP_SUCCESS) { 6668 AACDB_PRINT(softs, CE_WARN, "unable to create " 6669 "property for t%dL%d (target)", tgt, lun); 6670 rval = NDI_FAILURE; 6671 goto finish; 6672 } 6673 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) 6674 != DDI_PROP_SUCCESS) { 6675 AACDB_PRINT(softs, CE_WARN, "unable to create " 6676 "property for t%dL%d (lun)", tgt, lun); 6677 rval = NDI_FAILURE; 6678 goto finish; 6679 } 6680 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip, 6681 "compatible", compatible, ncompatible) 6682 != DDI_PROP_SUCCESS) { 6683 AACDB_PRINT(softs, CE_WARN, "unable to create " 6684 "property for t%dL%d (compatible)", tgt, lun); 6685 rval = NDI_FAILURE; 6686 goto finish; 6687 } 6688 6689 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH); 6690 if (rval != NDI_SUCCESS) { 6691 AACDB_PRINT(softs, CE_WARN, "unable to online t%dL%d", 6692 tgt, lun); 6693 ndi_prop_remove_all(ldip); 6694 (void) ndi_devi_free(ldip); 6695 } 6696 } 6697 finish: 6698 if (dipp) 6699 *dipp = ldip; 6700 6701 scsi_hba_nodename_compatible_free(nodename, compatible); 6702 return (rval); 6703 } 6704 6705 /*ARGSUSED*/ 6706 static int 6707 aac_probe_lun(struct aac_softstate *softs, struct scsi_device *sd) 6708 { 6709 int tgt = sd->sd_address.a_target; 6710 int lun = sd->sd_address.a_lun; 6711 6712 DBCALLED(softs, 2); 6713 6714 if (tgt < AAC_MAX_LD) { 6715 int rval; 6716 6717 if (lun == 0) { 6718 mutex_enter(&softs->io_lock); 6719 rval = aac_probe_container(softs, tgt); 6720 mutex_exit(&softs->io_lock); 6721 if (rval == AACOK) { 6722 if (scsi_hba_probe(sd, NULL) == 6723 SCSIPROBE_EXISTS) 6724 return (NDI_SUCCESS); 6725 } 6726 } 6727 return (NDI_FAILURE); 6728 } else { 6729 int dtype; 6730 6731 if (scsi_hba_probe(sd, NULL) != SCSIPROBE_EXISTS) 6732 return (NDI_FAILURE); 6733 6734 dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; 6735 6736 AACDB_PRINT(softs, CE_NOTE, 6737 "Phys. device found: tgt %d dtype %d: %s", 6738 tgt, dtype, sd->sd_inq->inq_vid); 6739 6740 /* Only non-DASD exposed */ 6741 if (dtype != DTYPE_RODIRECT /* CDROM */ && 6742 dtype != DTYPE_SEQUENTIAL /* TAPE */ && 6743 dtype != DTYPE_ESI /* SES */) 6744 return (NDI_FAILURE); 6745 6746 AACDB_PRINT(softs, CE_NOTE, "non-DASD %d found", tgt); 6747 mutex_enter(&softs->io_lock); 6748 softs->nondasds[AAC_PD(tgt)].dev.flags |= AAC_DFLAG_VALID; 6749 mutex_exit(&softs->io_lock); 6750 return (NDI_SUCCESS); 6751 } 6752 } 6753 6754 static int 6755 aac_config_lun(struct aac_softstate *softs, uint16_t tgt, uint8_t lun, 6756 dev_info_t **ldip) 6757 { 6758 struct scsi_device sd; 6759 dev_info_t *child; 6760 int rval; 6761 6762 DBCALLED(softs, 2); 6763 6764 if ((child = aac_find_child(softs, tgt, lun)) != NULL) { 6765 if (ldip) 6766 *ldip = child; 6767 return (NDI_SUCCESS); 6768 } 6769 6770 bzero(&sd, sizeof (struct scsi_device)); 6771 sd.sd_address.a_hba_tran = softs->hba_tran; 6772 sd.sd_address.a_target = (uint16_t)tgt; 6773 sd.sd_address.a_lun = (uint8_t)lun; 6774 if ((rval = aac_probe_lun(softs, &sd)) == NDI_SUCCESS) 6775 rval = aac_config_child(softs, &sd, ldip); 6776 scsi_unprobe(&sd); 6777 return (rval); 6778 } 6779 6780 static int 6781 aac_config_tgt(struct aac_softstate *softs, int tgt) 6782 { 6783 struct scsi_address ap; 6784 struct buf *bp = NULL; 6785 int buf_len = AAC_SCSI_RPTLUNS_HEAD_SIZE + AAC_SCSI_RPTLUNS_ADDR_SIZE; 6786 int list_len = 0; 6787 int lun_total = 0; 6788 dev_info_t *ldip; 6789 int i; 6790 6791 ap.a_hba_tran = softs->hba_tran; 6792 ap.a_target = (uint16_t)tgt; 6793 ap.a_lun = 0; 6794 6795 for (i = 0; i < 2; i++) { 6796 struct scsi_pkt *pkt; 6797 uchar_t *cdb; 6798 uchar_t *p; 6799 uint32_t data; 6800 6801 if (bp == NULL) { 6802 if ((bp = scsi_alloc_consistent_buf(&ap, NULL, 6803 buf_len, B_READ, NULL_FUNC, NULL)) == NULL) 6804 return (AACERR); 6805 } 6806 if ((pkt = scsi_init_pkt(&ap, NULL, bp, CDB_GROUP5, 6807 sizeof (struct scsi_arq_status), 0, PKT_CONSISTENT, 6808 NULL, NULL)) == NULL) { 6809 scsi_free_consistent_buf(bp); 6810 return (AACERR); 6811 } 6812 cdb = pkt->pkt_cdbp; 6813 bzero(cdb, CDB_GROUP5); 6814 cdb[0] = SCMD_REPORT_LUNS; 6815 6816 /* Convert buffer len from local to LE_32 */ 6817 data = buf_len; 6818 for (p = &cdb[9]; p > &cdb[5]; p--) { 6819 *p = data & 0xff; 6820 data >>= 8; 6821 } 6822 6823 if (scsi_poll(pkt) < 0 || 6824 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk) { 6825 scsi_destroy_pkt(pkt); 6826 break; 6827 } 6828 6829 /* Convert list_len from LE_32 to local */ 6830 for (p = (uchar_t *)bp->b_un.b_addr; 6831 p < (uchar_t *)bp->b_un.b_addr + 4; p++) { 6832 data <<= 8; 6833 data |= *p; 6834 } 6835 list_len = data; 6836 if (buf_len < list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE) { 6837 scsi_free_consistent_buf(bp); 6838 bp = NULL; 6839 buf_len = list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE; 6840 } 6841 scsi_destroy_pkt(pkt); 6842 } 6843 if (i >= 2) { 6844 uint8_t *buf = (uint8_t *)(bp->b_un.b_addr + 6845 AAC_SCSI_RPTLUNS_HEAD_SIZE); 6846 6847 for (i = 0; i < (list_len / AAC_SCSI_RPTLUNS_ADDR_SIZE); i++) { 6848 uint16_t lun; 6849 6850 /* Determine report luns addressing type */ 6851 switch (buf[0] & AAC_SCSI_RPTLUNS_ADDR_MASK) { 6852 /* 6853 * Vendors in the field have been found to be 6854 * concatenating bus/target/lun to equal the 6855 * complete lun value instead of switching to 6856 * flat space addressing 6857 */ 6858 case AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL: 6859 case AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT: 6860 case AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE: 6861 lun = ((buf[0] & 0x3f) << 8) | buf[1]; 6862 if (lun > UINT8_MAX) { 6863 AACDB_PRINT(softs, CE_WARN, 6864 "abnormal lun number: %d", lun); 6865 break; 6866 } 6867 if (aac_config_lun(softs, tgt, lun, &ldip) == 6868 NDI_SUCCESS) 6869 lun_total++; 6870 break; 6871 } 6872 6873 buf += AAC_SCSI_RPTLUNS_ADDR_SIZE; 6874 } 6875 } else { 6876 /* The target may do not support SCMD_REPORT_LUNS. */ 6877 if (aac_config_lun(softs, tgt, 0, &ldip) == NDI_SUCCESS) 6878 lun_total++; 6879 } 6880 scsi_free_consistent_buf(bp); 6881 return (lun_total); 6882 } 6883 6884 static void 6885 aac_devcfg(struct aac_softstate *softs, int tgt, int en) 6886 { 6887 struct aac_device *dvp; 6888 6889 mutex_enter(&softs->io_lock); 6890 dvp = AAC_DEV(softs, tgt); 6891 if (en) 6892 dvp->flags |= AAC_DFLAG_CONFIGURING; 6893 else 6894 dvp->flags &= ~AAC_DFLAG_CONFIGURING; 6895 mutex_exit(&softs->io_lock); 6896 } 6897 6898 static int 6899 aac_tran_bus_config(dev_info_t *parent, uint_t flags, ddi_bus_config_op_t op, 6900 void *arg, dev_info_t **childp) 6901 { 6902 struct aac_softstate *softs; 6903 int circ = 0; 6904 int rval; 6905 6906 if ((softs = ddi_get_soft_state(aac_softstatep, 6907 ddi_get_instance(parent))) == NULL) 6908 return (NDI_FAILURE); 6909 6910 /* Commands for bus config should be blocked as the bus is quiesced */ 6911 mutex_enter(&softs->io_lock); 6912 if (softs->state & AAC_STATE_QUIESCED) { 6913 AACDB_PRINT(softs, CE_NOTE, 6914 "bus_config abroted because bus is quiesced"); 6915 mutex_exit(&softs->io_lock); 6916 return (NDI_FAILURE); 6917 } 6918 mutex_exit(&softs->io_lock); 6919 6920 DBCALLED(softs, 1); 6921 6922 /* Hold the nexus across the bus_config */ 6923 ndi_devi_enter(parent, &circ); 6924 switch (op) { 6925 case BUS_CONFIG_ONE: { 6926 int tgt, lun; 6927 6928 if (aac_parse_devname(arg, &tgt, &lun) != AACOK) { 6929 rval = NDI_FAILURE; 6930 break; 6931 } 6932 6933 AAC_DEVCFG_BEGIN(softs, tgt); 6934 rval = aac_config_lun(softs, tgt, lun, childp); 6935 AAC_DEVCFG_END(softs, tgt); 6936 break; 6937 } 6938 6939 case BUS_CONFIG_DRIVER: 6940 case BUS_CONFIG_ALL: { 6941 uint32_t bus, tgt; 6942 int index, total; 6943 6944 for (tgt = 0; tgt < AAC_MAX_LD; tgt++) { 6945 AAC_DEVCFG_BEGIN(softs, tgt); 6946 (void) aac_config_lun(softs, tgt, 0, NULL); 6947 AAC_DEVCFG_END(softs, tgt); 6948 } 6949 6950 /* Config the non-DASD devices connected to the card */ 6951 total = 0; 6952 index = AAC_MAX_LD; 6953 for (bus = 0; bus < softs->bus_max; bus++) { 6954 AACDB_PRINT(softs, CE_NOTE, "bus %d:", bus); 6955 for (tgt = 0; tgt < softs->tgt_max; tgt++, index++) { 6956 AAC_DEVCFG_BEGIN(softs, index); 6957 if (aac_config_tgt(softs, index)) 6958 total++; 6959 AAC_DEVCFG_END(softs, index); 6960 } 6961 } 6962 AACDB_PRINT(softs, CE_CONT, 6963 "?Total %d phys. device(s) found", total); 6964 rval = NDI_SUCCESS; 6965 break; 6966 } 6967 } 6968 6969 if (rval == NDI_SUCCESS) 6970 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0); 6971 ndi_devi_exit(parent, circ); 6972 return (rval); 6973 } 6974 6975 static void 6976 aac_handle_dr(struct aac_drinfo *drp) 6977 { 6978 struct aac_softstate *softs = drp->softs; 6979 struct aac_device *dvp; 6980 dev_info_t *dip; 6981 int valid; 6982 int circ1 = 0; 6983 6984 DBCALLED(softs, 1); 6985 6986 /* Hold the nexus across the bus_config */ 6987 mutex_enter(&softs->io_lock); 6988 dvp = AAC_DEV(softs, drp->tgt); 6989 valid = AAC_DEV_IS_VALID(dvp); 6990 dip = dvp->dip; 6991 mutex_exit(&softs->io_lock); 6992 6993 switch (drp->event) { 6994 case AAC_EVT_ONLINE: 6995 case AAC_EVT_OFFLINE: 6996 /* Device onlined */ 6997 if (dip == NULL && valid) { 6998 ndi_devi_enter(softs->devinfo_p, &circ1); 6999 (void) aac_config_lun(softs, drp->tgt, 0, NULL); 7000 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d onlined", 7001 softs->instance, drp->tgt, drp->lun); 7002 ndi_devi_exit(softs->devinfo_p, circ1); 7003 } 7004 /* Device offlined */ 7005 if (dip && !valid) { 7006 mutex_enter(&softs->io_lock); 7007 (void) aac_do_reset(softs); 7008 mutex_exit(&softs->io_lock); 7009 7010 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE); 7011 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d offlined", 7012 softs->instance, drp->tgt, drp->lun); 7013 } 7014 break; 7015 } 7016 kmem_free(drp, sizeof (struct aac_drinfo)); 7017 } 7018 7019 static int 7020 aac_dr_event(struct aac_softstate *softs, int tgt, int lun, int event) 7021 { 7022 struct aac_drinfo *drp; 7023 7024 DBCALLED(softs, 1); 7025 7026 if (softs->taskq == NULL || 7027 (drp = kmem_zalloc(sizeof (struct aac_drinfo), KM_NOSLEEP)) == NULL) 7028 return (AACERR); 7029 7030 drp->softs = softs; 7031 drp->tgt = tgt; 7032 drp->lun = lun; 7033 drp->event = event; 7034 if ((ddi_taskq_dispatch(softs->taskq, (void (*)(void *))aac_handle_dr, 7035 drp, DDI_NOSLEEP)) != DDI_SUCCESS) { 7036 AACDB_PRINT(softs, CE_WARN, "DR task start failed"); 7037 kmem_free(drp, sizeof (struct aac_drinfo)); 7038 return (AACERR); 7039 } 7040 return (AACOK); 7041 } 7042 7043 #ifdef DEBUG 7044 7045 /* -------------------------debug aid functions-------------------------- */ 7046 7047 #define AAC_FIB_CMD_KEY_STRINGS \ 7048 TestCommandResponse, "TestCommandResponse", \ 7049 TestAdapterCommand, "TestAdapterCommand", \ 7050 LastTestCommand, "LastTestCommand", \ 7051 ReinitHostNormCommandQueue, "ReinitHostNormCommandQueue", \ 7052 ReinitHostHighCommandQueue, "ReinitHostHighCommandQueue", \ 7053 ReinitHostHighRespQueue, "ReinitHostHighRespQueue", \ 7054 ReinitHostNormRespQueue, "ReinitHostNormRespQueue", \ 7055 ReinitAdapNormCommandQueue, "ReinitAdapNormCommandQueue", \ 7056 ReinitAdapHighCommandQueue, "ReinitAdapHighCommandQueue", \ 7057 ReinitAdapHighRespQueue, "ReinitAdapHighRespQueue", \ 7058 ReinitAdapNormRespQueue, "ReinitAdapNormRespQueue", \ 7059 InterfaceShutdown, "InterfaceShutdown", \ 7060 DmaCommandFib, "DmaCommandFib", \ 7061 StartProfile, "StartProfile", \ 7062 TermProfile, "TermProfile", \ 7063 SpeedTest, "SpeedTest", \ 7064 TakeABreakPt, "TakeABreakPt", \ 7065 RequestPerfData, "RequestPerfData", \ 7066 SetInterruptDefTimer, "SetInterruptDefTimer", \ 7067 SetInterruptDefCount, "SetInterruptDefCount", \ 7068 GetInterruptDefStatus, "GetInterruptDefStatus", \ 7069 LastCommCommand, "LastCommCommand", \ 7070 NuFileSystem, "NuFileSystem", \ 7071 UFS, "UFS", \ 7072 HostFileSystem, "HostFileSystem", \ 7073 LastFileSystemCommand, "LastFileSystemCommand", \ 7074 ContainerCommand, "ContainerCommand", \ 7075 ContainerCommand64, "ContainerCommand64", \ 7076 ClusterCommand, "ClusterCommand", \ 7077 ScsiPortCommand, "ScsiPortCommand", \ 7078 ScsiPortCommandU64, "ScsiPortCommandU64", \ 7079 AifRequest, "AifRequest", \ 7080 CheckRevision, "CheckRevision", \ 7081 FsaHostShutdown, "FsaHostShutdown", \ 7082 RequestAdapterInfo, "RequestAdapterInfo", \ 7083 IsAdapterPaused, "IsAdapterPaused", \ 7084 SendHostTime, "SendHostTime", \ 7085 LastMiscCommand, "LastMiscCommand" 7086 7087 #define AAC_CTVM_SUBCMD_KEY_STRINGS \ 7088 VM_Null, "VM_Null", \ 7089 VM_NameServe, "VM_NameServe", \ 7090 VM_ContainerConfig, "VM_ContainerConfig", \ 7091 VM_Ioctl, "VM_Ioctl", \ 7092 VM_FilesystemIoctl, "VM_FilesystemIoctl", \ 7093 VM_CloseAll, "VM_CloseAll", \ 7094 VM_CtBlockRead, "VM_CtBlockRead", \ 7095 VM_CtBlockWrite, "VM_CtBlockWrite", \ 7096 VM_SliceBlockRead, "VM_SliceBlockRead", \ 7097 VM_SliceBlockWrite, "VM_SliceBlockWrite", \ 7098 VM_DriveBlockRead, "VM_DriveBlockRead", \ 7099 VM_DriveBlockWrite, "VM_DriveBlockWrite", \ 7100 VM_EnclosureMgt, "VM_EnclosureMgt", \ 7101 VM_Unused, "VM_Unused", \ 7102 VM_CtBlockVerify, "VM_CtBlockVerify", \ 7103 VM_CtPerf, "VM_CtPerf", \ 7104 VM_CtBlockRead64, "VM_CtBlockRead64", \ 7105 VM_CtBlockWrite64, "VM_CtBlockWrite64", \ 7106 VM_CtBlockVerify64, "VM_CtBlockVerify64", \ 7107 VM_CtHostRead64, "VM_CtHostRead64", \ 7108 VM_CtHostWrite64, "VM_CtHostWrite64", \ 7109 VM_NameServe64, "VM_NameServe64" 7110 7111 #define AAC_CT_SUBCMD_KEY_STRINGS \ 7112 CT_Null, "CT_Null", \ 7113 CT_GET_SLICE_COUNT, "CT_GET_SLICE_COUNT", \ 7114 CT_GET_PARTITION_COUNT, "CT_GET_PARTITION_COUNT", \ 7115 CT_GET_PARTITION_INFO, "CT_GET_PARTITION_INFO", \ 7116 CT_GET_CONTAINER_COUNT, "CT_GET_CONTAINER_COUNT", \ 7117 CT_GET_CONTAINER_INFO_OLD, "CT_GET_CONTAINER_INFO_OLD", \ 7118 CT_WRITE_MBR, "CT_WRITE_MBR", \ 7119 CT_WRITE_PARTITION, "CT_WRITE_PARTITION", \ 7120 CT_UPDATE_PARTITION, "CT_UPDATE_PARTITION", \ 7121 CT_UNLOAD_CONTAINER, "CT_UNLOAD_CONTAINER", \ 7122 CT_CONFIG_SINGLE_PRIMARY, "CT_CONFIG_SINGLE_PRIMARY", \ 7123 CT_READ_CONFIG_AGE, "CT_READ_CONFIG_AGE", \ 7124 CT_WRITE_CONFIG_AGE, "CT_WRITE_CONFIG_AGE", \ 7125 CT_READ_SERIAL_NUMBER, "CT_READ_SERIAL_NUMBER", \ 7126 CT_ZERO_PAR_ENTRY, "CT_ZERO_PAR_ENTRY", \ 7127 CT_READ_MBR, "CT_READ_MBR", \ 7128 CT_READ_PARTITION, "CT_READ_PARTITION", \ 7129 CT_DESTROY_CONTAINER, "CT_DESTROY_CONTAINER", \ 7130 CT_DESTROY2_CONTAINER, "CT_DESTROY2_CONTAINER", \ 7131 CT_SLICE_SIZE, "CT_SLICE_SIZE", \ 7132 CT_CHECK_CONFLICTS, "CT_CHECK_CONFLICTS", \ 7133 CT_MOVE_CONTAINER, "CT_MOVE_CONTAINER", \ 7134 CT_READ_LAST_DRIVE, "CT_READ_LAST_DRIVE", \ 7135 CT_WRITE_LAST_DRIVE, "CT_WRITE_LAST_DRIVE", \ 7136 CT_UNMIRROR, "CT_UNMIRROR", \ 7137 CT_MIRROR_DELAY, "CT_MIRROR_DELAY", \ 7138 CT_GEN_MIRROR, "CT_GEN_MIRROR", \ 7139 CT_GEN_MIRROR2, "CT_GEN_MIRROR2", \ 7140 CT_TEST_CONTAINER, "CT_TEST_CONTAINER", \ 7141 CT_MOVE2, "CT_MOVE2", \ 7142 CT_SPLIT, "CT_SPLIT", \ 7143 CT_SPLIT2, "CT_SPLIT2", \ 7144 CT_SPLIT_BROKEN, "CT_SPLIT_BROKEN", \ 7145 CT_SPLIT_BROKEN2, "CT_SPLIT_BROKEN2", \ 7146 CT_RECONFIG, "CT_RECONFIG", \ 7147 CT_BREAK2, "CT_BREAK2", \ 7148 CT_BREAK, "CT_BREAK", \ 7149 CT_MERGE2, "CT_MERGE2", \ 7150 CT_MERGE, "CT_MERGE", \ 7151 CT_FORCE_ERROR, "CT_FORCE_ERROR", \ 7152 CT_CLEAR_ERROR, "CT_CLEAR_ERROR", \ 7153 CT_ASSIGN_FAILOVER, "CT_ASSIGN_FAILOVER", \ 7154 CT_CLEAR_FAILOVER, "CT_CLEAR_FAILOVER", \ 7155 CT_GET_FAILOVER_DATA, "CT_GET_FAILOVER_DATA", \ 7156 CT_VOLUME_ADD, "CT_VOLUME_ADD", \ 7157 CT_VOLUME_ADD2, "CT_VOLUME_ADD2", \ 7158 CT_MIRROR_STATUS, "CT_MIRROR_STATUS", \ 7159 CT_COPY_STATUS, "CT_COPY_STATUS", \ 7160 CT_COPY, "CT_COPY", \ 7161 CT_UNLOCK_CONTAINER, "CT_UNLOCK_CONTAINER", \ 7162 CT_LOCK_CONTAINER, "CT_LOCK_CONTAINER", \ 7163 CT_MAKE_READ_ONLY, "CT_MAKE_READ_ONLY", \ 7164 CT_MAKE_READ_WRITE, "CT_MAKE_READ_WRITE", \ 7165 CT_CLEAN_DEAD, "CT_CLEAN_DEAD", \ 7166 CT_ABORT_MIRROR_COMMAND, "CT_ABORT_MIRROR_COMMAND", \ 7167 CT_SET, "CT_SET", \ 7168 CT_GET, "CT_GET", \ 7169 CT_GET_NVLOG_ENTRY, "CT_GET_NVLOG_ENTRY", \ 7170 CT_GET_DELAY, "CT_GET_DELAY", \ 7171 CT_ZERO_CONTAINER_SPACE, "CT_ZERO_CONTAINER_SPACE", \ 7172 CT_GET_ZERO_STATUS, "CT_GET_ZERO_STATUS", \ 7173 CT_SCRUB, "CT_SCRUB", \ 7174 CT_GET_SCRUB_STATUS, "CT_GET_SCRUB_STATUS", \ 7175 CT_GET_SLICE_INFO, "CT_GET_SLICE_INFO", \ 7176 CT_GET_SCSI_METHOD, "CT_GET_SCSI_METHOD", \ 7177 CT_PAUSE_IO, "CT_PAUSE_IO", \ 7178 CT_RELEASE_IO, "CT_RELEASE_IO", \ 7179 CT_SCRUB2, "CT_SCRUB2", \ 7180 CT_MCHECK, "CT_MCHECK", \ 7181 CT_CORRUPT, "CT_CORRUPT", \ 7182 CT_GET_TASK_COUNT, "CT_GET_TASK_COUNT", \ 7183 CT_PROMOTE, "CT_PROMOTE", \ 7184 CT_SET_DEAD, "CT_SET_DEAD", \ 7185 CT_CONTAINER_OPTIONS, "CT_CONTAINER_OPTIONS", \ 7186 CT_GET_NV_PARAM, "CT_GET_NV_PARAM", \ 7187 CT_GET_PARAM, "CT_GET_PARAM", \ 7188 CT_NV_PARAM_SIZE, "CT_NV_PARAM_SIZE", \ 7189 CT_COMMON_PARAM_SIZE, "CT_COMMON_PARAM_SIZE", \ 7190 CT_PLATFORM_PARAM_SIZE, "CT_PLATFORM_PARAM_SIZE", \ 7191 CT_SET_NV_PARAM, "CT_SET_NV_PARAM", \ 7192 CT_ABORT_SCRUB, "CT_ABORT_SCRUB", \ 7193 CT_GET_SCRUB_ERROR, "CT_GET_SCRUB_ERROR", \ 7194 CT_LABEL_CONTAINER, "CT_LABEL_CONTAINER", \ 7195 CT_CONTINUE_DATA, "CT_CONTINUE_DATA", \ 7196 CT_STOP_DATA, "CT_STOP_DATA", \ 7197 CT_GET_PARTITION_TABLE, "CT_GET_PARTITION_TABLE", \ 7198 CT_GET_DISK_PARTITIONS, "CT_GET_DISK_PARTITIONS", \ 7199 CT_GET_MISC_STATUS, "CT_GET_MISC_STATUS", \ 7200 CT_GET_CONTAINER_PERF_INFO, "CT_GET_CONTAINER_PERF_INFO", \ 7201 CT_GET_TIME, "CT_GET_TIME", \ 7202 CT_READ_DATA, "CT_READ_DATA", \ 7203 CT_CTR, "CT_CTR", \ 7204 CT_CTL, "CT_CTL", \ 7205 CT_DRAINIO, "CT_DRAINIO", \ 7206 CT_RELEASEIO, "CT_RELEASEIO", \ 7207 CT_GET_NVRAM, "CT_GET_NVRAM", \ 7208 CT_GET_MEMORY, "CT_GET_MEMORY", \ 7209 CT_PRINT_CT_LOG, "CT_PRINT_CT_LOG", \ 7210 CT_ADD_LEVEL, "CT_ADD_LEVEL", \ 7211 CT_NV_ZERO, "CT_NV_ZERO", \ 7212 CT_READ_SIGNATURE, "CT_READ_SIGNATURE", \ 7213 CT_THROTTLE_ON, "CT_THROTTLE_ON", \ 7214 CT_THROTTLE_OFF, "CT_THROTTLE_OFF", \ 7215 CT_GET_THROTTLE_STATS, "CT_GET_THROTTLE_STATS", \ 7216 CT_MAKE_SNAPSHOT, "CT_MAKE_SNAPSHOT", \ 7217 CT_REMOVE_SNAPSHOT, "CT_REMOVE_SNAPSHOT", \ 7218 CT_WRITE_USER_FLAGS, "CT_WRITE_USER_FLAGS", \ 7219 CT_READ_USER_FLAGS, "CT_READ_USER_FLAGS", \ 7220 CT_MONITOR, "CT_MONITOR", \ 7221 CT_GEN_MORPH, "CT_GEN_MORPH", \ 7222 CT_GET_SNAPSHOT_INFO, "CT_GET_SNAPSHOT_INFO", \ 7223 CT_CACHE_SET, "CT_CACHE_SET", \ 7224 CT_CACHE_STAT, "CT_CACHE_STAT", \ 7225 CT_TRACE_START, "CT_TRACE_START", \ 7226 CT_TRACE_STOP, "CT_TRACE_STOP", \ 7227 CT_TRACE_ENABLE, "CT_TRACE_ENABLE", \ 7228 CT_TRACE_DISABLE, "CT_TRACE_DISABLE", \ 7229 CT_FORCE_CORE_DUMP, "CT_FORCE_CORE_DUMP", \ 7230 CT_SET_SERIAL_NUMBER, "CT_SET_SERIAL_NUMBER", \ 7231 CT_RESET_SERIAL_NUMBER, "CT_RESET_SERIAL_NUMBER", \ 7232 CT_ENABLE_RAID5, "CT_ENABLE_RAID5", \ 7233 CT_CLEAR_VALID_DUMP_FLAG, "CT_CLEAR_VALID_DUMP_FLAG", \ 7234 CT_GET_MEM_STATS, "CT_GET_MEM_STATS", \ 7235 CT_GET_CORE_SIZE, "CT_GET_CORE_SIZE", \ 7236 CT_CREATE_CONTAINER_OLD, "CT_CREATE_CONTAINER_OLD", \ 7237 CT_STOP_DUMPS, "CT_STOP_DUMPS", \ 7238 CT_PANIC_ON_TAKE_A_BREAK, "CT_PANIC_ON_TAKE_A_BREAK", \ 7239 CT_GET_CACHE_STATS, "CT_GET_CACHE_STATS", \ 7240 CT_MOVE_PARTITION, "CT_MOVE_PARTITION", \ 7241 CT_FLUSH_CACHE, "CT_FLUSH_CACHE", \ 7242 CT_READ_NAME, "CT_READ_NAME", \ 7243 CT_WRITE_NAME, "CT_WRITE_NAME", \ 7244 CT_TOSS_CACHE, "CT_TOSS_CACHE", \ 7245 CT_LOCK_DRAINIO, "CT_LOCK_DRAINIO", \ 7246 CT_CONTAINER_OFFLINE, "CT_CONTAINER_OFFLINE", \ 7247 CT_SET_CACHE_SIZE, "CT_SET_CACHE_SIZE", \ 7248 CT_CLEAN_SHUTDOWN_STATUS, "CT_CLEAN_SHUTDOWN_STATUS", \ 7249 CT_CLEAR_DISKLOG_ON_DISK, "CT_CLEAR_DISKLOG_ON_DISK", \ 7250 CT_CLEAR_ALL_DISKLOG, "CT_CLEAR_ALL_DISKLOG", \ 7251 CT_CACHE_FAVOR, "CT_CACHE_FAVOR", \ 7252 CT_READ_PASSTHRU_MBR, "CT_READ_PASSTHRU_MBR", \ 7253 CT_SCRUB_NOFIX, "CT_SCRUB_NOFIX", \ 7254 CT_SCRUB2_NOFIX, "CT_SCRUB2_NOFIX", \ 7255 CT_FLUSH, "CT_FLUSH", \ 7256 CT_REBUILD, "CT_REBUILD", \ 7257 CT_FLUSH_CONTAINER, "CT_FLUSH_CONTAINER", \ 7258 CT_RESTART, "CT_RESTART", \ 7259 CT_GET_CONFIG_STATUS, "CT_GET_CONFIG_STATUS", \ 7260 CT_TRACE_FLAG, "CT_TRACE_FLAG", \ 7261 CT_RESTART_MORPH, "CT_RESTART_MORPH", \ 7262 CT_GET_TRACE_INFO, "CT_GET_TRACE_INFO", \ 7263 CT_GET_TRACE_ITEM, "CT_GET_TRACE_ITEM", \ 7264 CT_COMMIT_CONFIG, "CT_COMMIT_CONFIG", \ 7265 CT_CONTAINER_EXISTS, "CT_CONTAINER_EXISTS", \ 7266 CT_GET_SLICE_FROM_DEVT, "CT_GET_SLICE_FROM_DEVT", \ 7267 CT_OPEN_READ_WRITE, "CT_OPEN_READ_WRITE", \ 7268 CT_WRITE_MEMORY_BLOCK, "CT_WRITE_MEMORY_BLOCK", \ 7269 CT_GET_CACHE_PARAMS, "CT_GET_CACHE_PARAMS", \ 7270 CT_CRAZY_CACHE, "CT_CRAZY_CACHE", \ 7271 CT_GET_PROFILE_STRUCT, "CT_GET_PROFILE_STRUCT", \ 7272 CT_SET_IO_TRACE_FLAG, "CT_SET_IO_TRACE_FLAG", \ 7273 CT_GET_IO_TRACE_STRUCT, "CT_GET_IO_TRACE_STRUCT", \ 7274 CT_CID_TO_64BITS_UID, "CT_CID_TO_64BITS_UID", \ 7275 CT_64BITS_UID_TO_CID, "CT_64BITS_UID_TO_CID", \ 7276 CT_PAR_TO_64BITS_UID, "CT_PAR_TO_64BITS_UID", \ 7277 CT_CID_TO_32BITS_UID, "CT_CID_TO_32BITS_UID", \ 7278 CT_32BITS_UID_TO_CID, "CT_32BITS_UID_TO_CID", \ 7279 CT_PAR_TO_32BITS_UID, "CT_PAR_TO_32BITS_UID", \ 7280 CT_SET_FAILOVER_OPTION, "CT_SET_FAILOVER_OPTION", \ 7281 CT_GET_FAILOVER_OPTION, "CT_GET_FAILOVER_OPTION", \ 7282 CT_STRIPE_ADD2, "CT_STRIPE_ADD2", \ 7283 CT_CREATE_VOLUME_SET, "CT_CREATE_VOLUME_SET", \ 7284 CT_CREATE_STRIPE_SET, "CT_CREATE_STRIPE_SET", \ 7285 CT_VERIFY_CONTAINER, "CT_VERIFY_CONTAINER", \ 7286 CT_IS_CONTAINER_DEAD, "CT_IS_CONTAINER_DEAD", \ 7287 CT_GET_CONTAINER_OPTION, "CT_GET_CONTAINER_OPTION", \ 7288 CT_GET_SNAPSHOT_UNUSED_STRUCT, "CT_GET_SNAPSHOT_UNUSED_STRUCT", \ 7289 CT_CLEAR_SNAPSHOT_UNUSED_STRUCT, "CT_CLEAR_SNAPSHOT_UNUSED_STRUCT", \ 7290 CT_GET_CONTAINER_INFO, "CT_GET_CONTAINER_INFO", \ 7291 CT_CREATE_CONTAINER, "CT_CREATE_CONTAINER", \ 7292 CT_CHANGE_CREATIONINFO, "CT_CHANGE_CREATIONINFO", \ 7293 CT_CHECK_CONFLICT_UID, "CT_CHECK_CONFLICT_UID", \ 7294 CT_CONTAINER_UID_CHECK, "CT_CONTAINER_UID_CHECK", \ 7295 CT_IS_CONTAINER_MEATADATA_STANDARD, \ 7296 "CT_IS_CONTAINER_MEATADATA_STANDARD", \ 7297 CT_IS_SLICE_METADATA_STANDARD, "CT_IS_SLICE_METADATA_STANDARD", \ 7298 CT_GET_IMPORT_COUNT, "CT_GET_IMPORT_COUNT", \ 7299 CT_CANCEL_ALL_IMPORTS, "CT_CANCEL_ALL_IMPORTS", \ 7300 CT_GET_IMPORT_INFO, "CT_GET_IMPORT_INFO", \ 7301 CT_IMPORT_ARRAY, "CT_IMPORT_ARRAY", \ 7302 CT_GET_LOG_SIZE, "CT_GET_LOG_SIZE", \ 7303 CT_ALARM_GET_STATE, "CT_ALARM_GET_STATE", \ 7304 CT_ALARM_SET_STATE, "CT_ALARM_SET_STATE", \ 7305 CT_ALARM_ON_OFF, "CT_ALARM_ON_OFF", \ 7306 CT_GET_EE_OEM_ID, "CT_GET_EE_OEM_ID", \ 7307 CT_GET_PPI_HEADERS, "CT_GET_PPI_HEADERS", \ 7308 CT_GET_PPI_DATA, "CT_GET_PPI_DATA", \ 7309 CT_GET_PPI_ENTRIES, "CT_GET_PPI_ENTRIES", \ 7310 CT_DELETE_PPI_BUNDLE, "CT_DELETE_PPI_BUNDLE", \ 7311 CT_GET_PARTITION_TABLE_2, "CT_GET_PARTITION_TABLE_2", \ 7312 CT_GET_PARTITION_INFO_2, "CT_GET_PARTITION_INFO_2", \ 7313 CT_GET_DISK_PARTITIONS_2, "CT_GET_DISK_PARTITIONS_2", \ 7314 CT_QUIESCE_ADAPTER, "CT_QUIESCE_ADAPTER", \ 7315 CT_CLEAR_PPI_TABLE, "CT_CLEAR_PPI_TABLE" 7316 7317 #define AAC_CL_SUBCMD_KEY_STRINGS \ 7318 CL_NULL, "CL_NULL", \ 7319 DS_INIT, "DS_INIT", \ 7320 DS_RESCAN, "DS_RESCAN", \ 7321 DS_CREATE, "DS_CREATE", \ 7322 DS_DELETE, "DS_DELETE", \ 7323 DS_ADD_DISK, "DS_ADD_DISK", \ 7324 DS_REMOVE_DISK, "DS_REMOVE_DISK", \ 7325 DS_MOVE_DISK, "DS_MOVE_DISK", \ 7326 DS_TAKE_OWNERSHIP, "DS_TAKE_OWNERSHIP", \ 7327 DS_RELEASE_OWNERSHIP, "DS_RELEASE_OWNERSHIP", \ 7328 DS_FORCE_OWNERSHIP, "DS_FORCE_OWNERSHIP", \ 7329 DS_GET_DISK_SET_PARAM, "DS_GET_DISK_SET_PARAM", \ 7330 DS_GET_DRIVE_PARAM, "DS_GET_DRIVE_PARAM", \ 7331 DS_GET_SLICE_PARAM, "DS_GET_SLICE_PARAM", \ 7332 DS_GET_DISK_SETS, "DS_GET_DISK_SETS", \ 7333 DS_GET_DRIVES, "DS_GET_DRIVES", \ 7334 DS_SET_DISK_SET_PARAM, "DS_SET_DISK_SET_PARAM", \ 7335 DS_ONLINE, "DS_ONLINE", \ 7336 DS_OFFLINE, "DS_OFFLINE", \ 7337 DS_ONLINE_CONTAINERS, "DS_ONLINE_CONTAINERS", \ 7338 DS_FSAPRINT, "DS_FSAPRINT", \ 7339 CL_CFG_SET_HOST_IDS, "CL_CFG_SET_HOST_IDS", \ 7340 CL_CFG_SET_PARTNER_HOST_IDS, "CL_CFG_SET_PARTNER_HOST_IDS", \ 7341 CL_CFG_GET_CLUSTER_CONFIG, "CL_CFG_GET_CLUSTER_CONFIG", \ 7342 CC_CLI_CLEAR_MESSAGE_BUFFER, "CC_CLI_CLEAR_MESSAGE_BUFFER", \ 7343 CC_SRV_CLEAR_MESSAGE_BUFFER, "CC_SRV_CLEAR_MESSAGE_BUFFER", \ 7344 CC_CLI_SHOW_MESSAGE_BUFFER, "CC_CLI_SHOW_MESSAGE_BUFFER", \ 7345 CC_SRV_SHOW_MESSAGE_BUFFER, "CC_SRV_SHOW_MESSAGE_BUFFER", \ 7346 CC_CLI_SEND_MESSAGE, "CC_CLI_SEND_MESSAGE", \ 7347 CC_SRV_SEND_MESSAGE, "CC_SRV_SEND_MESSAGE", \ 7348 CC_CLI_GET_MESSAGE, "CC_CLI_GET_MESSAGE", \ 7349 CC_SRV_GET_MESSAGE, "CC_SRV_GET_MESSAGE", \ 7350 CC_SEND_TEST_MESSAGE, "CC_SEND_TEST_MESSAGE", \ 7351 CC_GET_BUSINFO, "CC_GET_BUSINFO", \ 7352 CC_GET_PORTINFO, "CC_GET_PORTINFO", \ 7353 CC_GET_NAMEINFO, "CC_GET_NAMEINFO", \ 7354 CC_GET_CONFIGINFO, "CC_GET_CONFIGINFO", \ 7355 CQ_QUORUM_OP, "CQ_QUORUM_OP" 7356 7357 #define AAC_AIF_SUBCMD_KEY_STRINGS \ 7358 AifCmdEventNotify, "AifCmdEventNotify", \ 7359 AifCmdJobProgress, "AifCmdJobProgress", \ 7360 AifCmdAPIReport, "AifCmdAPIReport", \ 7361 AifCmdDriverNotify, "AifCmdDriverNotify", \ 7362 AifReqJobList, "AifReqJobList", \ 7363 AifReqJobsForCtr, "AifReqJobsForCtr", \ 7364 AifReqJobsForScsi, "AifReqJobsForScsi", \ 7365 AifReqJobReport, "AifReqJobReport", \ 7366 AifReqTerminateJob, "AifReqTerminateJob", \ 7367 AifReqSuspendJob, "AifReqSuspendJob", \ 7368 AifReqResumeJob, "AifReqResumeJob", \ 7369 AifReqSendAPIReport, "AifReqSendAPIReport", \ 7370 AifReqAPIJobStart, "AifReqAPIJobStart", \ 7371 AifReqAPIJobUpdate, "AifReqAPIJobUpdate", \ 7372 AifReqAPIJobFinish, "AifReqAPIJobFinish" 7373 7374 #define AAC_IOCTL_SUBCMD_KEY_STRINGS \ 7375 Reserved_IOCTL, "Reserved_IOCTL", \ 7376 GetDeviceHandle, "GetDeviceHandle", \ 7377 BusTargetLun_to_DeviceHandle, "BusTargetLun_to_DeviceHandle", \ 7378 DeviceHandle_to_BusTargetLun, "DeviceHandle_to_BusTargetLun", \ 7379 RescanBus, "RescanBus", \ 7380 GetDeviceProbeInfo, "GetDeviceProbeInfo", \ 7381 GetDeviceCapacity, "GetDeviceCapacity", \ 7382 GetContainerProbeInfo, "GetContainerProbeInfo", \ 7383 GetRequestedMemorySize, "GetRequestedMemorySize", \ 7384 GetBusInfo, "GetBusInfo", \ 7385 GetVendorSpecific, "GetVendorSpecific", \ 7386 EnhancedGetDeviceProbeInfo, "EnhancedGetDeviceProbeInfo", \ 7387 EnhancedGetBusInfo, "EnhancedGetBusInfo", \ 7388 SetupExtendedCounters, "SetupExtendedCounters", \ 7389 GetPerformanceCounters, "GetPerformanceCounters", \ 7390 ResetPerformanceCounters, "ResetPerformanceCounters", \ 7391 ReadModePage, "ReadModePage", \ 7392 WriteModePage, "WriteModePage", \ 7393 ReadDriveParameter, "ReadDriveParameter", \ 7394 WriteDriveParameter, "WriteDriveParameter", \ 7395 ResetAdapter, "ResetAdapter", \ 7396 ResetBus, "ResetBus", \ 7397 ResetBusDevice, "ResetBusDevice", \ 7398 ExecuteSrb, "ExecuteSrb", \ 7399 Create_IO_Task, "Create_IO_Task", \ 7400 Delete_IO_Task, "Delete_IO_Task", \ 7401 Get_IO_Task_Info, "Get_IO_Task_Info", \ 7402 Check_Task_Progress, "Check_Task_Progress", \ 7403 InjectError, "InjectError", \ 7404 GetDeviceDefectCounts, "GetDeviceDefectCounts", \ 7405 GetDeviceDefectInfo, "GetDeviceDefectInfo", \ 7406 GetDeviceStatus, "GetDeviceStatus", \ 7407 ClearDeviceStatus, "ClearDeviceStatus", \ 7408 DiskSpinControl, "DiskSpinControl", \ 7409 DiskSmartControl, "DiskSmartControl", \ 7410 WriteSame, "WriteSame", \ 7411 ReadWriteLong, "ReadWriteLong", \ 7412 FormatUnit, "FormatUnit", \ 7413 TargetDeviceControl, "TargetDeviceControl", \ 7414 TargetChannelControl, "TargetChannelControl", \ 7415 FlashNewCode, "FlashNewCode", \ 7416 DiskCheck, "DiskCheck", \ 7417 RequestSense, "RequestSense", \ 7418 DiskPERControl, "DiskPERControl", \ 7419 Read10, "Read10", \ 7420 Write10, "Write10" 7421 7422 #define AAC_AIFEN_KEY_STRINGS \ 7423 AifEnGeneric, "Generic", \ 7424 AifEnTaskComplete, "TaskComplete", \ 7425 AifEnConfigChange, "Config change", \ 7426 AifEnContainerChange, "Container change", \ 7427 AifEnDeviceFailure, "device failed", \ 7428 AifEnMirrorFailover, "Mirror failover", \ 7429 AifEnContainerEvent, "container event", \ 7430 AifEnFileSystemChange, "File system changed", \ 7431 AifEnConfigPause, "Container pause event", \ 7432 AifEnConfigResume, "Container resume event", \ 7433 AifEnFailoverChange, "Failover space assignment changed", \ 7434 AifEnRAID5RebuildDone, "RAID5 rebuild finished", \ 7435 AifEnEnclosureManagement, "Enclosure management event", \ 7436 AifEnBatteryEvent, "battery event", \ 7437 AifEnAddContainer, "Add container", \ 7438 AifEnDeleteContainer, "Delete container", \ 7439 AifEnSMARTEvent, "SMART Event", \ 7440 AifEnBatteryNeedsRecond, "battery needs reconditioning", \ 7441 AifEnClusterEvent, "cluster event", \ 7442 AifEnDiskSetEvent, "disk set event occured", \ 7443 AifDenMorphComplete, "morph operation completed", \ 7444 AifDenVolumeExtendComplete, "VolumeExtendComplete" 7445 7446 struct aac_key_strings { 7447 int key; 7448 char *message; 7449 }; 7450 7451 extern struct scsi_key_strings scsi_cmds[]; 7452 7453 static struct aac_key_strings aac_fib_cmds[] = { 7454 AAC_FIB_CMD_KEY_STRINGS, 7455 -1, NULL 7456 }; 7457 7458 static struct aac_key_strings aac_ctvm_subcmds[] = { 7459 AAC_CTVM_SUBCMD_KEY_STRINGS, 7460 -1, NULL 7461 }; 7462 7463 static struct aac_key_strings aac_ct_subcmds[] = { 7464 AAC_CT_SUBCMD_KEY_STRINGS, 7465 -1, NULL 7466 }; 7467 7468 static struct aac_key_strings aac_cl_subcmds[] = { 7469 AAC_CL_SUBCMD_KEY_STRINGS, 7470 -1, NULL 7471 }; 7472 7473 static struct aac_key_strings aac_aif_subcmds[] = { 7474 AAC_AIF_SUBCMD_KEY_STRINGS, 7475 -1, NULL 7476 }; 7477 7478 static struct aac_key_strings aac_ioctl_subcmds[] = { 7479 AAC_IOCTL_SUBCMD_KEY_STRINGS, 7480 -1, NULL 7481 }; 7482 7483 static struct aac_key_strings aac_aifens[] = { 7484 AAC_AIFEN_KEY_STRINGS, 7485 -1, NULL 7486 }; 7487 7488 /* 7489 * The following function comes from Adaptec: 7490 * 7491 * Get the firmware print buffer parameters from the firmware, 7492 * if the command was successful map in the address. 7493 */ 7494 static int 7495 aac_get_fw_debug_buffer(struct aac_softstate *softs) 7496 { 7497 if (aac_sync_mbcommand(softs, AAC_MONKER_GETDRVPROP, 7498 0, 0, 0, 0, NULL) == AACOK) { 7499 uint32_t mondrv_buf_paddrl = AAC_MAILBOX_GET(softs, 1); 7500 uint32_t mondrv_buf_paddrh = AAC_MAILBOX_GET(softs, 2); 7501 uint32_t mondrv_buf_size = AAC_MAILBOX_GET(softs, 3); 7502 uint32_t mondrv_hdr_size = AAC_MAILBOX_GET(softs, 4); 7503 7504 if (mondrv_buf_size) { 7505 uint32_t offset = mondrv_buf_paddrl - \ 7506 softs->pci_mem_base_paddr; 7507 7508 /* 7509 * See if the address is already mapped in, and 7510 * if so set it up from the base address 7511 */ 7512 if ((mondrv_buf_paddrh == 0) && 7513 (offset + mondrv_buf_size < softs->map_size)) { 7514 mutex_enter(&aac_prt_mutex); 7515 softs->debug_buf_offset = offset; 7516 softs->debug_header_size = mondrv_hdr_size; 7517 softs->debug_buf_size = mondrv_buf_size; 7518 softs->debug_fw_flags = 0; 7519 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT; 7520 mutex_exit(&aac_prt_mutex); 7521 7522 return (AACOK); 7523 } 7524 } 7525 } 7526 return (AACERR); 7527 } 7528 7529 int 7530 aac_dbflag_on(struct aac_softstate *softs, int flag) 7531 { 7532 int debug_flags = softs ? softs->debug_flags : aac_debug_flags; 7533 7534 return ((debug_flags & (AACDB_FLAGS_FW_PRINT | \ 7535 AACDB_FLAGS_KERNEL_PRINT)) && (debug_flags & flag)); 7536 } 7537 7538 static void 7539 aac_cmn_err(struct aac_softstate *softs, uint_t lev, char sl, int noheader) 7540 { 7541 if (noheader) { 7542 if (sl) { 7543 aac_fmt[0] = sl; 7544 cmn_err(lev, aac_fmt, aac_prt_buf); 7545 } else { 7546 cmn_err(lev, &aac_fmt[1], aac_prt_buf); 7547 } 7548 } else { 7549 if (sl) { 7550 aac_fmt_header[0] = sl; 7551 cmn_err(lev, aac_fmt_header, 7552 softs->vendor_name, softs->instance, 7553 aac_prt_buf); 7554 } else { 7555 cmn_err(lev, &aac_fmt_header[1], 7556 softs->vendor_name, softs->instance, 7557 aac_prt_buf); 7558 } 7559 } 7560 } 7561 7562 /* 7563 * The following function comes from Adaptec: 7564 * 7565 * Format and print out the data passed in to UART or console 7566 * as specified by debug flags. 7567 */ 7568 void 7569 aac_printf(struct aac_softstate *softs, uint_t lev, const char *fmt, ...) 7570 { 7571 va_list args; 7572 char sl; /* system log character */ 7573 7574 mutex_enter(&aac_prt_mutex); 7575 /* Set up parameters and call sprintf function to format the data */ 7576 if (strchr("^!?", fmt[0]) == NULL) { 7577 sl = 0; 7578 } else { 7579 sl = fmt[0]; 7580 fmt++; 7581 } 7582 va_start(args, fmt); 7583 (void) vsprintf(aac_prt_buf, fmt, args); 7584 va_end(args); 7585 7586 /* Make sure the softs structure has been passed in for this section */ 7587 if (softs) { 7588 if ((softs->debug_flags & AACDB_FLAGS_FW_PRINT) && 7589 /* If we are set up for a Firmware print */ 7590 (softs->debug_buf_size)) { 7591 uint32_t count, i; 7592 7593 /* Make sure the string size is within boundaries */ 7594 count = strlen(aac_prt_buf); 7595 if (count > softs->debug_buf_size) 7596 count = (uint16_t)softs->debug_buf_size; 7597 7598 /* 7599 * Wait for no more than AAC_PRINT_TIMEOUT for the 7600 * previous message length to clear (the handshake). 7601 */ 7602 for (i = 0; i < AAC_PRINT_TIMEOUT; i++) { 7603 if (!PCI_MEM_GET32(softs, 7604 softs->debug_buf_offset + \ 7605 AAC_FW_DBG_STRLEN_OFFSET)) 7606 break; 7607 7608 drv_usecwait(1000); 7609 } 7610 7611 /* 7612 * If the length is clear, copy over the message, the 7613 * flags, and the length. Make sure the length is the 7614 * last because that is the signal for the Firmware to 7615 * pick it up. 7616 */ 7617 if (!PCI_MEM_GET32(softs, softs->debug_buf_offset + \ 7618 AAC_FW_DBG_STRLEN_OFFSET)) { 7619 PCI_MEM_REP_PUT8(softs, 7620 softs->debug_buf_offset + \ 7621 softs->debug_header_size, 7622 aac_prt_buf, count); 7623 PCI_MEM_PUT32(softs, 7624 softs->debug_buf_offset + \ 7625 AAC_FW_DBG_FLAGS_OFFSET, 7626 softs->debug_fw_flags); 7627 PCI_MEM_PUT32(softs, 7628 softs->debug_buf_offset + \ 7629 AAC_FW_DBG_STRLEN_OFFSET, count); 7630 } else { 7631 cmn_err(CE_WARN, "UART output fail"); 7632 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT; 7633 } 7634 } 7635 7636 /* 7637 * If the Kernel Debug Print flag is set, send it off 7638 * to the Kernel Debugger 7639 */ 7640 if (softs->debug_flags & AACDB_FLAGS_KERNEL_PRINT) 7641 aac_cmn_err(softs, lev, sl, 7642 (softs->debug_flags & AACDB_FLAGS_NO_HEADERS)); 7643 } else { 7644 /* Driver not initialized yet, no firmware or header output */ 7645 if (aac_debug_flags & AACDB_FLAGS_KERNEL_PRINT) 7646 aac_cmn_err(softs, lev, sl, 1); 7647 } 7648 mutex_exit(&aac_prt_mutex); 7649 } 7650 7651 /* 7652 * Translate command number to description string 7653 */ 7654 static char * 7655 aac_cmd_name(int cmd, struct aac_key_strings *cmdlist) 7656 { 7657 int i; 7658 7659 for (i = 0; cmdlist[i].key != -1; i++) { 7660 if (cmd == cmdlist[i].key) 7661 return (cmdlist[i].message); 7662 } 7663 return (NULL); 7664 } 7665 7666 static void 7667 aac_print_scmd(struct aac_softstate *softs, struct aac_cmd *acp) 7668 { 7669 struct scsi_pkt *pkt = acp->pkt; 7670 struct scsi_address *ap = &pkt->pkt_address; 7671 int is_pd = 0; 7672 int ctl = ddi_get_instance(softs->devinfo_p); 7673 int tgt = ap->a_target; 7674 int lun = ap->a_lun; 7675 union scsi_cdb *cdbp = (void *)pkt->pkt_cdbp; 7676 uchar_t cmd = cdbp->scc_cmd; 7677 char *desc; 7678 7679 if (tgt >= AAC_MAX_LD) { 7680 is_pd = 1; 7681 ctl = ((struct aac_nondasd *)acp->dvp)->bus; 7682 tgt = ((struct aac_nondasd *)acp->dvp)->tid; 7683 lun = 0; 7684 } 7685 7686 if ((desc = aac_cmd_name(cmd, 7687 (struct aac_key_strings *)scsi_cmds)) == NULL) { 7688 aac_printf(softs, CE_NOTE, 7689 "SCMD> Unknown(0x%2x) --> c%dt%dL%d %s", 7690 cmd, ctl, tgt, lun, is_pd ? "(pd)" : ""); 7691 return; 7692 } 7693 7694 switch (cmd) { 7695 case SCMD_READ: 7696 case SCMD_WRITE: 7697 aac_printf(softs, CE_NOTE, 7698 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s", 7699 desc, GETG0ADDR(cdbp), GETG0COUNT(cdbp), 7700 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 7701 ctl, tgt, lun, is_pd ? "(pd)" : ""); 7702 break; 7703 case SCMD_READ_G1: 7704 case SCMD_WRITE_G1: 7705 aac_printf(softs, CE_NOTE, 7706 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s", 7707 desc, GETG1ADDR(cdbp), GETG1COUNT(cdbp), 7708 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 7709 ctl, tgt, lun, is_pd ? "(pd)" : ""); 7710 break; 7711 case SCMD_READ_G4: 7712 case SCMD_WRITE_G4: 7713 aac_printf(softs, CE_NOTE, 7714 "SCMD> %s 0x%x.%08x[%d] %s --> c%dt%dL%d %s", 7715 desc, GETG4ADDR(cdbp), GETG4ADDRTL(cdbp), 7716 GETG4COUNT(cdbp), 7717 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 7718 ctl, tgt, lun, is_pd ? "(pd)" : ""); 7719 break; 7720 case SCMD_READ_G5: 7721 case SCMD_WRITE_G5: 7722 aac_printf(softs, CE_NOTE, 7723 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s", 7724 desc, GETG5ADDR(cdbp), GETG5COUNT(cdbp), 7725 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 7726 ctl, tgt, lun, is_pd ? "(pd)" : ""); 7727 break; 7728 default: 7729 aac_printf(softs, CE_NOTE, "SCMD> %s --> c%dt%dL%d %s", 7730 desc, ctl, tgt, lun, is_pd ? "(pd)" : ""); 7731 } 7732 } 7733 7734 void 7735 aac_print_fib(struct aac_softstate *softs, struct aac_slot *slotp) 7736 { 7737 struct aac_cmd *acp = slotp->acp; 7738 struct aac_fib *fibp = slotp->fibp; 7739 ddi_acc_handle_t acc = slotp->fib_acc_handle; 7740 uint16_t fib_size; 7741 uint32_t fib_cmd, sub_cmd; 7742 char *cmdstr, *subcmdstr; 7743 char *caller; 7744 int i; 7745 7746 if (acp) { 7747 if (!(softs->debug_fib_flags & acp->fib_flags)) 7748 return; 7749 if (acp->fib_flags & AACDB_FLAGS_FIB_SCMD) 7750 caller = "SCMD"; 7751 else if (acp->fib_flags & AACDB_FLAGS_FIB_IOCTL) 7752 caller = "IOCTL"; 7753 else if (acp->fib_flags & AACDB_FLAGS_FIB_SRB) 7754 caller = "SRB"; 7755 else 7756 return; 7757 } else { 7758 if (!(softs->debug_fib_flags & AACDB_FLAGS_FIB_SYNC)) 7759 return; 7760 caller = "SYNC"; 7761 } 7762 7763 fib_cmd = ddi_get16(acc, &fibp->Header.Command); 7764 cmdstr = aac_cmd_name(fib_cmd, aac_fib_cmds); 7765 sub_cmd = (uint32_t)-1; 7766 subcmdstr = NULL; 7767 7768 /* Print FIB header */ 7769 if (softs->debug_fib_flags & AACDB_FLAGS_FIB_HEADER) { 7770 aac_printf(softs, CE_NOTE, "FIB> from %s", caller); 7771 aac_printf(softs, CE_NOTE, " XferState %d", 7772 ddi_get32(acc, &fibp->Header.XferState)); 7773 aac_printf(softs, CE_NOTE, " Command %d", 7774 ddi_get16(acc, &fibp->Header.Command)); 7775 aac_printf(softs, CE_NOTE, " StructType %d", 7776 ddi_get8(acc, &fibp->Header.StructType)); 7777 aac_printf(softs, CE_NOTE, " Flags 0x%x", 7778 ddi_get8(acc, &fibp->Header.Flags)); 7779 aac_printf(softs, CE_NOTE, " Size %d", 7780 ddi_get16(acc, &fibp->Header.Size)); 7781 aac_printf(softs, CE_NOTE, " SenderSize %d", 7782 ddi_get16(acc, &fibp->Header.SenderSize)); 7783 aac_printf(softs, CE_NOTE, " SenderAddr 0x%x", 7784 ddi_get32(acc, &fibp->Header.SenderFibAddress)); 7785 aac_printf(softs, CE_NOTE, " RcvrAddr 0x%x", 7786 ddi_get32(acc, &fibp->Header.ReceiverFibAddress)); 7787 aac_printf(softs, CE_NOTE, " SenderData 0x%x", 7788 ddi_get32(acc, &fibp->Header.SenderData)); 7789 } 7790 7791 /* Print FIB data */ 7792 switch (fib_cmd) { 7793 case ContainerCommand: 7794 sub_cmd = ddi_get32(acc, 7795 (void *)&(((uint32_t *)(void *)&fibp->data[0])[0])); 7796 subcmdstr = aac_cmd_name(sub_cmd, aac_ctvm_subcmds); 7797 if (subcmdstr == NULL) 7798 break; 7799 7800 switch (sub_cmd) { 7801 case VM_ContainerConfig: { 7802 struct aac_Container *pContainer = 7803 (struct aac_Container *)fibp->data; 7804 7805 fib_cmd = sub_cmd; 7806 cmdstr = subcmdstr; 7807 sub_cmd = (uint32_t)-1; 7808 subcmdstr = NULL; 7809 7810 sub_cmd = ddi_get32(acc, 7811 &pContainer->CTCommand.command); 7812 subcmdstr = aac_cmd_name(sub_cmd, aac_ct_subcmds); 7813 if (subcmdstr == NULL) 7814 break; 7815 aac_printf(softs, CE_NOTE, "FIB> %s (0x%x, 0x%x, 0x%x)", 7816 subcmdstr, 7817 ddi_get32(acc, &pContainer->CTCommand.param[0]), 7818 ddi_get32(acc, &pContainer->CTCommand.param[1]), 7819 ddi_get32(acc, &pContainer->CTCommand.param[2])); 7820 return; 7821 } 7822 7823 case VM_Ioctl: 7824 fib_cmd = sub_cmd; 7825 cmdstr = subcmdstr; 7826 sub_cmd = (uint32_t)-1; 7827 subcmdstr = NULL; 7828 7829 sub_cmd = ddi_get32(acc, 7830 (void *)&(((uint32_t *)(void *)&fibp->data[0])[4])); 7831 subcmdstr = aac_cmd_name(sub_cmd, aac_ioctl_subcmds); 7832 break; 7833 7834 case VM_CtBlockRead: 7835 case VM_CtBlockWrite: { 7836 struct aac_blockread *br = 7837 (struct aac_blockread *)fibp->data; 7838 struct aac_sg_table *sg = &br->SgMap; 7839 uint32_t sgcount = ddi_get32(acc, &sg->SgCount); 7840 7841 aac_printf(softs, CE_NOTE, 7842 "FIB> %s Container %d 0x%x/%d", subcmdstr, 7843 ddi_get32(acc, &br->ContainerId), 7844 ddi_get32(acc, &br->BlockNumber), 7845 ddi_get32(acc, &br->ByteCount)); 7846 for (i = 0; i < sgcount; i++) 7847 aac_printf(softs, CE_NOTE, 7848 " %d: 0x%08x/%d", i, 7849 ddi_get32(acc, &sg->SgEntry[i].SgAddress), 7850 ddi_get32(acc, &sg->SgEntry[i]. \ 7851 SgByteCount)); 7852 return; 7853 } 7854 } 7855 break; 7856 7857 case ContainerCommand64: { 7858 struct aac_blockread64 *br = 7859 (struct aac_blockread64 *)fibp->data; 7860 struct aac_sg_table64 *sg = &br->SgMap64; 7861 uint32_t sgcount = ddi_get32(acc, &sg->SgCount); 7862 uint64_t sgaddr; 7863 7864 sub_cmd = br->Command; 7865 subcmdstr = NULL; 7866 if (sub_cmd == VM_CtHostRead64) 7867 subcmdstr = "VM_CtHostRead64"; 7868 else if (sub_cmd == VM_CtHostWrite64) 7869 subcmdstr = "VM_CtHostWrite64"; 7870 else 7871 break; 7872 7873 aac_printf(softs, CE_NOTE, 7874 "FIB> %s Container %d 0x%x/%d", subcmdstr, 7875 ddi_get16(acc, &br->ContainerId), 7876 ddi_get32(acc, &br->BlockNumber), 7877 ddi_get16(acc, &br->SectorCount)); 7878 for (i = 0; i < sgcount; i++) { 7879 sgaddr = ddi_get64(acc, 7880 &sg->SgEntry64[i].SgAddress); 7881 aac_printf(softs, CE_NOTE, 7882 " %d: 0x%08x.%08x/%d", i, 7883 AAC_MS32(sgaddr), AAC_LS32(sgaddr), 7884 ddi_get32(acc, &sg->SgEntry64[i]. \ 7885 SgByteCount)); 7886 } 7887 return; 7888 } 7889 7890 case RawIo: { 7891 struct aac_raw_io *io = (struct aac_raw_io *)fibp->data; 7892 struct aac_sg_tableraw *sg = &io->SgMapRaw; 7893 uint32_t sgcount = ddi_get32(acc, &sg->SgCount); 7894 uint64_t sgaddr; 7895 7896 aac_printf(softs, CE_NOTE, 7897 "FIB> RawIo Container %d 0x%llx/%d 0x%x", 7898 ddi_get16(acc, &io->ContainerId), 7899 ddi_get64(acc, &io->BlockNumber), 7900 ddi_get32(acc, &io->ByteCount), 7901 ddi_get16(acc, &io->Flags)); 7902 for (i = 0; i < sgcount; i++) { 7903 sgaddr = ddi_get64(acc, &sg->SgEntryRaw[i].SgAddress); 7904 aac_printf(softs, CE_NOTE, " %d: 0x%08x.%08x/%d", i, 7905 AAC_MS32(sgaddr), AAC_LS32(sgaddr), 7906 ddi_get32(acc, &sg->SgEntryRaw[i].SgByteCount)); 7907 } 7908 return; 7909 } 7910 7911 case ClusterCommand: 7912 sub_cmd = ddi_get32(acc, 7913 (void *)&(((uint32_t *)(void *)fibp->data)[0])); 7914 subcmdstr = aac_cmd_name(sub_cmd, aac_cl_subcmds); 7915 break; 7916 7917 case AifRequest: 7918 sub_cmd = ddi_get32(acc, 7919 (void *)&(((uint32_t *)(void *)fibp->data)[0])); 7920 subcmdstr = aac_cmd_name(sub_cmd, aac_aif_subcmds); 7921 break; 7922 7923 default: 7924 break; 7925 } 7926 7927 fib_size = ddi_get16(acc, &(fibp->Header.Size)); 7928 if (subcmdstr) 7929 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d", 7930 subcmdstr, fib_size); 7931 else if (cmdstr && sub_cmd == (uint32_t)-1) 7932 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d", 7933 cmdstr, fib_size); 7934 else if (cmdstr) 7935 aac_printf(softs, CE_NOTE, "FIB> %s: Unknown(0x%x), sz=%d", 7936 cmdstr, sub_cmd, fib_size); 7937 else 7938 aac_printf(softs, CE_NOTE, "FIB> Unknown(0x%x), sz=%d", 7939 fib_cmd, fib_size); 7940 } 7941 7942 static void 7943 aac_print_aif(struct aac_softstate *softs, struct aac_aif_command *aif) 7944 { 7945 int aif_command; 7946 uint32_t aif_seqnumber; 7947 int aif_en_type; 7948 char *str; 7949 7950 aif_command = LE_32(aif->command); 7951 aif_seqnumber = LE_32(aif->seqNumber); 7952 aif_en_type = LE_32(aif->data.EN.type); 7953 7954 switch (aif_command) { 7955 case AifCmdEventNotify: 7956 str = aac_cmd_name(aif_en_type, aac_aifens); 7957 if (str) 7958 aac_printf(softs, CE_NOTE, "AIF! %s", str); 7959 else 7960 aac_printf(softs, CE_NOTE, "AIF! Unknown(0x%x)", 7961 aif_en_type); 7962 break; 7963 7964 case AifCmdJobProgress: 7965 switch (LE_32(aif->data.PR[0].status)) { 7966 case AifJobStsSuccess: 7967 str = "success"; break; 7968 case AifJobStsFinished: 7969 str = "finished"; break; 7970 case AifJobStsAborted: 7971 str = "aborted"; break; 7972 case AifJobStsFailed: 7973 str = "failed"; break; 7974 case AifJobStsSuspended: 7975 str = "suspended"; break; 7976 case AifJobStsRunning: 7977 str = "running"; break; 7978 default: 7979 str = "unknown"; break; 7980 } 7981 aac_printf(softs, CE_NOTE, 7982 "AIF! JobProgress (%d) - %s (%d, %d)", 7983 aif_seqnumber, str, 7984 LE_32(aif->data.PR[0].currentTick), 7985 LE_32(aif->data.PR[0].finalTick)); 7986 break; 7987 7988 case AifCmdAPIReport: 7989 aac_printf(softs, CE_NOTE, "AIF! APIReport (%d)", 7990 aif_seqnumber); 7991 break; 7992 7993 case AifCmdDriverNotify: 7994 aac_printf(softs, CE_NOTE, "AIF! DriverNotify (%d)", 7995 aif_seqnumber); 7996 break; 7997 7998 default: 7999 aac_printf(softs, CE_NOTE, "AIF! AIF %d (%d)", 8000 aif_command, aif_seqnumber); 8001 break; 8002 } 8003 } 8004 8005 #endif /* DEBUG */ 8006