1 /* 2 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6 /* 7 * Copyright 2005-08 Adaptec, Inc. 8 * Copyright (c) 2005-08 Adaptec Inc., Achim Leubner 9 * Copyright (c) 2000 Michael Smith 10 * Copyright (c) 2001 Scott Long 11 * Copyright (c) 2000 BSDi 12 * All rights reserved. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/modctl.h> 36 #include <sys/conf.h> 37 #include <sys/cmn_err.h> 38 #include <sys/ddi.h> 39 #include <sys/devops.h> 40 #include <sys/pci.h> 41 #include <sys/types.h> 42 #include <sys/ddidmareq.h> 43 #include <sys/scsi/scsi.h> 44 #include <sys/ksynch.h> 45 #include <sys/sunddi.h> 46 #include <sys/byteorder.h> 47 #include "aac_regs.h" 48 #include "aac.h" 49 50 /* 51 * FMA header files 52 */ 53 #include <sys/ddifm.h> 54 #include <sys/fm/protocol.h> 55 #include <sys/fm/util.h> 56 #include <sys/fm/io/ddi.h> 57 58 /* 59 * For minor nodes created by the SCSA framework, minor numbers are 60 * formed by left-shifting instance by INST_MINOR_SHIFT and OR in a 61 * number less than 64. 62 * 63 * To support cfgadm, need to confirm the SCSA framework by creating 64 * devctl/scsi and driver specific minor nodes under SCSA format, 65 * and calling scsi_hba_xxx() functions aacordingly. 66 */ 67 68 #define AAC_MINOR 32 69 #define INST2AAC(x) (((x) << INST_MINOR_SHIFT) | AAC_MINOR) 70 #define AAC_SCSA_MINOR(x) ((x) & TRAN_MINOR_MASK) 71 #define AAC_IS_SCSA_NODE(x) ((x) == DEVCTL_MINOR || (x) == SCSI_MINOR) 72 73 #define SD2TRAN(sd) ((sd)->sd_address.a_hba_tran) 74 #define AAC_TRAN2SOFTS(tran) ((struct aac_softstate *)(tran)->tran_hba_private) 75 #define AAC_DIP2TRAN(dip) ((scsi_hba_tran_t *)ddi_get_driver_private(dip)) 76 #define AAC_DIP2SOFTS(dip) (AAC_TRAN2SOFTS(AAC_DIP2TRAN(dip))) 77 #define SD2AAC(sd) (AAC_TRAN2SOFTS(SD2TRAN(sd))) 78 #define AAC_PD(t) ((t) - AAC_MAX_LD) 79 #define AAC_DEV(softs, t) (((t) < AAC_MAX_LD) ? \ 80 &(softs)->containers[(t)].dev : \ 81 ((t) < AAC_MAX_DEV(softs)) ? \ 82 &(softs)->nondasds[AAC_PD(t)].dev : NULL) 83 #define AAC_DEVCFG_BEGIN(softs, tgt) \ 84 aac_devcfg((softs), (tgt), 1) 85 #define AAC_DEVCFG_END(softs, tgt) \ 86 aac_devcfg((softs), (tgt), 0) 87 #define PKT2AC(pkt) ((struct aac_cmd *)(pkt)->pkt_ha_private) 88 #define AAC_BUSYWAIT(cond, timeout /* in millisecond */) { \ 89 if (!(cond)) { \ 90 int count = (timeout) * 10; \ 91 while (count) { \ 92 drv_usecwait(100); \ 93 if (cond) \ 94 break; \ 95 count--; \ 96 } \ 97 (timeout) = (count + 9) / 10; \ 98 } \ 99 } 100 101 #define AAC_SENSE_DATA_DESCR_LEN \ 102 (sizeof (struct scsi_descr_sense_hdr) + \ 103 sizeof (struct scsi_information_sense_descr)) 104 #define AAC_ARQ64_LENGTH \ 105 (sizeof (struct scsi_arq_status) + \ 106 AAC_SENSE_DATA_DESCR_LEN - SENSE_LENGTH) 107 108 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */ 109 #define AAC_GETGXADDR(cmdlen, cdbp) \ 110 ((cmdlen == 6) ? GETG0ADDR(cdbp) : \ 111 (cmdlen == 10) ? (uint32_t)GETG1ADDR(cdbp) : \ 112 ((uint64_t)GETG4ADDR(cdbp) << 32) | (uint32_t)GETG4ADDRTL(cdbp)) 113 114 #define AAC_CDB_INQUIRY_CMDDT 0x02 115 #define AAC_CDB_INQUIRY_EVPD 0x01 116 #define AAC_VPD_PAGE_CODE 1 117 #define AAC_VPD_PAGE_LENGTH 3 118 #define AAC_VPD_PAGE_DATA 4 119 #define AAC_VPD_ID_CODESET 0 120 #define AAC_VPD_ID_TYPE 1 121 #define AAC_VPD_ID_LENGTH 3 122 #define AAC_VPD_ID_DATA 4 123 124 #define AAC_SCSI_RPTLUNS_HEAD_SIZE 0x08 125 #define AAC_SCSI_RPTLUNS_ADDR_SIZE 0x08 126 #define AAC_SCSI_RPTLUNS_ADDR_MASK 0xC0 127 /* 00b - peripheral device addressing method */ 128 #define AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL 0x00 129 /* 01b - flat space addressing method */ 130 #define AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE 0x40 131 /* 10b - logical unit addressing method */ 132 #define AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT 0x80 133 134 /* Return the size of FIB with data part type data_type */ 135 #define AAC_FIB_SIZEOF(data_type) \ 136 (sizeof (struct aac_fib_header) + sizeof (data_type)) 137 /* Return the container size defined in mir */ 138 #define AAC_MIR_SIZE(softs, acc, mir) \ 139 (((softs)->flags & AAC_FLAGS_LBA_64BIT) ? \ 140 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity) + \ 141 ((uint64_t)ddi_get32((acc), &(mir)->MntObj.CapacityHigh) << 32) : \ 142 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity)) 143 144 /* The last entry of aac_cards[] is for unknown cards */ 145 #define AAC_UNKNOWN_CARD \ 146 (sizeof (aac_cards) / sizeof (struct aac_card_type) - 1) 147 #define CARD_IS_UNKNOWN(i) (i == AAC_UNKNOWN_CARD) 148 #define BUF_IS_READ(bp) ((bp)->b_flags & B_READ) 149 #define AAC_IS_Q_EMPTY(q) ((q)->q_head == NULL) 150 #define AAC_CMDQ(acp) (!((acp)->flags & AAC_CMD_SYNC)) 151 152 #define PCI_MEM_GET32(softs, off) \ 153 ddi_get32((softs)->pci_mem_handle, \ 154 (void *)((softs)->pci_mem_base_vaddr + (off))) 155 #define PCI_MEM_PUT32(softs, off, val) \ 156 ddi_put32((softs)->pci_mem_handle, \ 157 (void *)((softs)->pci_mem_base_vaddr + (off)), \ 158 (uint32_t)(val)) 159 #define PCI_MEM_GET16(softs, off) \ 160 ddi_get16((softs)->pci_mem_handle, \ 161 (void *)((softs)->pci_mem_base_vaddr + (off))) 162 #define PCI_MEM_PUT16(softs, off, val) \ 163 ddi_put16((softs)->pci_mem_handle, \ 164 (void *)((softs)->pci_mem_base_vaddr + (off)), (uint16_t)(val)) 165 /* Write host data at valp to device mem[off] repeatedly count times */ 166 #define PCI_MEM_REP_PUT8(softs, off, valp, count) \ 167 ddi_rep_put8((softs)->pci_mem_handle, (uint8_t *)(valp), \ 168 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \ 169 count, DDI_DEV_AUTOINCR) 170 /* Read device data at mem[off] to host addr valp repeatedly count times */ 171 #define PCI_MEM_REP_GET8(softs, off, valp, count) \ 172 ddi_rep_get8((softs)->pci_mem_handle, (uint8_t *)(valp), \ 173 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \ 174 count, DDI_DEV_AUTOINCR) 175 #define AAC_GET_FIELD8(acc, d, s, field) \ 176 (d)->field = ddi_get8(acc, (uint8_t *)&(s)->field) 177 #define AAC_GET_FIELD32(acc, d, s, field) \ 178 (d)->field = ddi_get32(acc, (uint32_t *)&(s)->field) 179 #define AAC_GET_FIELD64(acc, d, s, field) \ 180 (d)->field = ddi_get64(acc, (uint64_t *)&(s)->field) 181 #define AAC_REP_GET_FIELD8(acc, d, s, field, r) \ 182 ddi_rep_get8((acc), (uint8_t *)&(d)->field, \ 183 (uint8_t *)&(s)->field, (r), DDI_DEV_AUTOINCR) 184 #define AAC_REP_GET_FIELD32(acc, d, s, field, r) \ 185 ddi_rep_get32((acc), (uint32_t *)&(d)->field, \ 186 (uint32_t *)&(s)->field, (r), DDI_DEV_AUTOINCR) 187 188 #define AAC_ENABLE_INTR(softs) { \ 189 if (softs->flags & AAC_FLAGS_NEW_COMM) \ 190 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_NEW); \ 191 else \ 192 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_BITS); \ 193 } 194 195 #define AAC_DISABLE_INTR(softs) PCI_MEM_PUT32(softs, AAC_OIMR, ~0) 196 #define AAC_STATUS_CLR(softs, mask) PCI_MEM_PUT32(softs, AAC_ODBR, mask) 197 #define AAC_STATUS_GET(softs) PCI_MEM_GET32(softs, AAC_ODBR) 198 #define AAC_NOTIFY(softs, val) PCI_MEM_PUT32(softs, AAC_IDBR, val) 199 #define AAC_OUTB_GET(softs) PCI_MEM_GET32(softs, AAC_OQUE) 200 #define AAC_OUTB_SET(softs, val) PCI_MEM_PUT32(softs, AAC_OQUE, val) 201 #define AAC_FWSTATUS_GET(softs) \ 202 ((softs)->aac_if.aif_get_fwstatus(softs)) 203 #define AAC_MAILBOX_GET(softs, mb) \ 204 ((softs)->aac_if.aif_get_mailbox((softs), (mb))) 205 #define AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3) \ 206 ((softs)->aac_if.aif_set_mailbox((softs), (cmd), \ 207 (arg0), (arg1), (arg2), (arg3))) 208 209 #define AAC_THROTTLE_DRAIN -1 210 211 #define AAC_QUIESCE_TICK 1 /* 1 second */ 212 #define AAC_QUIESCE_TIMEOUT 180 /* 180 seconds */ 213 #define AAC_DEFAULT_TICK 10 /* 10 seconds */ 214 #define AAC_SYNC_TICK (30*60) /* 30 minutes */ 215 216 /* Poll time for aac_do_poll_io() */ 217 #define AAC_POLL_TIME 60 /* 60 seconds */ 218 219 /* IOP reset */ 220 #define AAC_IOP_RESET_SUCCEED 0 /* IOP reset succeed */ 221 #define AAC_IOP_RESET_FAILED -1 /* IOP reset failed */ 222 #define AAC_IOP_RESET_ABNORMAL -2 /* Reset operation abnormal */ 223 224 /* 225 * Hardware access functions 226 */ 227 static int aac_rx_get_fwstatus(struct aac_softstate *); 228 static int aac_rx_get_mailbox(struct aac_softstate *, int); 229 static void aac_rx_set_mailbox(struct aac_softstate *, uint32_t, uint32_t, 230 uint32_t, uint32_t, uint32_t); 231 static int aac_rkt_get_fwstatus(struct aac_softstate *); 232 static int aac_rkt_get_mailbox(struct aac_softstate *, int); 233 static void aac_rkt_set_mailbox(struct aac_softstate *, uint32_t, uint32_t, 234 uint32_t, uint32_t, uint32_t); 235 236 /* 237 * SCSA function prototypes 238 */ 239 static int aac_attach(dev_info_t *, ddi_attach_cmd_t); 240 static int aac_detach(dev_info_t *, ddi_detach_cmd_t); 241 static int aac_reset(dev_info_t *, ddi_reset_cmd_t); 242 static int aac_quiesce(dev_info_t *); 243 244 /* 245 * Interrupt handler functions 246 */ 247 static int aac_query_intrs(struct aac_softstate *, int); 248 static int aac_add_intrs(struct aac_softstate *); 249 static void aac_remove_intrs(struct aac_softstate *); 250 static uint_t aac_intr_old(caddr_t); 251 static uint_t aac_intr_new(caddr_t); 252 static uint_t aac_softintr(caddr_t); 253 254 /* 255 * Internal functions in attach 256 */ 257 static int aac_check_card_type(struct aac_softstate *); 258 static int aac_check_firmware(struct aac_softstate *); 259 static int aac_common_attach(struct aac_softstate *); 260 static void aac_common_detach(struct aac_softstate *); 261 static int aac_probe_containers(struct aac_softstate *); 262 static int aac_alloc_comm_space(struct aac_softstate *); 263 static int aac_setup_comm_space(struct aac_softstate *); 264 static void aac_free_comm_space(struct aac_softstate *); 265 static int aac_hba_setup(struct aac_softstate *); 266 267 /* 268 * Sync FIB operation functions 269 */ 270 int aac_sync_mbcommand(struct aac_softstate *, uint32_t, uint32_t, 271 uint32_t, uint32_t, uint32_t, uint32_t *); 272 static int aac_sync_fib(struct aac_softstate *, uint16_t, uint16_t); 273 274 /* 275 * Command queue operation functions 276 */ 277 static void aac_cmd_initq(struct aac_cmd_queue *); 278 static void aac_cmd_enqueue(struct aac_cmd_queue *, struct aac_cmd *); 279 static struct aac_cmd *aac_cmd_dequeue(struct aac_cmd_queue *); 280 static void aac_cmd_delete(struct aac_cmd_queue *, struct aac_cmd *); 281 282 /* 283 * FIB queue operation functions 284 */ 285 static int aac_fib_enqueue(struct aac_softstate *, int, uint32_t, uint32_t); 286 static int aac_fib_dequeue(struct aac_softstate *, int, int *); 287 288 /* 289 * Slot operation functions 290 */ 291 static int aac_create_slots(struct aac_softstate *); 292 static void aac_destroy_slots(struct aac_softstate *); 293 static void aac_alloc_fibs(struct aac_softstate *); 294 static void aac_destroy_fibs(struct aac_softstate *); 295 static struct aac_slot *aac_get_slot(struct aac_softstate *); 296 static void aac_release_slot(struct aac_softstate *, struct aac_slot *); 297 static int aac_alloc_fib(struct aac_softstate *, struct aac_slot *); 298 static void aac_free_fib(struct aac_slot *); 299 300 /* 301 * Internal functions 302 */ 303 static void aac_cmd_fib_header(struct aac_softstate *, struct aac_slot *, 304 uint16_t, uint16_t); 305 static void aac_cmd_fib_rawio(struct aac_softstate *, struct aac_cmd *); 306 static void aac_cmd_fib_brw64(struct aac_softstate *, struct aac_cmd *); 307 static void aac_cmd_fib_brw(struct aac_softstate *, struct aac_cmd *); 308 static void aac_cmd_fib_sync(struct aac_softstate *, struct aac_cmd *); 309 static void aac_cmd_fib_scsi32(struct aac_softstate *, struct aac_cmd *); 310 static void aac_cmd_fib_scsi64(struct aac_softstate *, struct aac_cmd *); 311 static void aac_cmd_fib_startstop(struct aac_softstate *, struct aac_cmd *); 312 static void aac_start_waiting_io(struct aac_softstate *); 313 static void aac_drain_comp_q(struct aac_softstate *); 314 int aac_do_io(struct aac_softstate *, struct aac_cmd *); 315 static int aac_do_poll_io(struct aac_softstate *, struct aac_cmd *); 316 static int aac_do_sync_io(struct aac_softstate *, struct aac_cmd *); 317 static int aac_send_command(struct aac_softstate *, struct aac_slot *); 318 static void aac_cmd_timeout(struct aac_softstate *, struct aac_cmd *); 319 static int aac_dma_sync_ac(struct aac_cmd *); 320 static int aac_shutdown(struct aac_softstate *); 321 static int aac_reset_adapter(struct aac_softstate *); 322 static int aac_do_quiesce(struct aac_softstate *softs); 323 static int aac_do_unquiesce(struct aac_softstate *softs); 324 static void aac_unhold_bus(struct aac_softstate *, int); 325 static void aac_set_throttle(struct aac_softstate *, struct aac_device *, 326 int, int); 327 328 /* 329 * Adapter Initiated FIB handling function 330 */ 331 static int aac_handle_aif(struct aac_softstate *, struct aac_fib *); 332 333 /* 334 * Timeout handling thread function 335 */ 336 static void aac_daemon(void *); 337 338 /* 339 * IOCTL interface related functions 340 */ 341 static int aac_open(dev_t *, int, int, cred_t *); 342 static int aac_close(dev_t, int, int, cred_t *); 343 static int aac_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 344 extern int aac_do_ioctl(struct aac_softstate *, dev_t, int, intptr_t, int); 345 346 /* 347 * FMA Prototypes 348 */ 349 static void aac_fm_init(struct aac_softstate *); 350 static void aac_fm_fini(struct aac_softstate *); 351 static int aac_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *); 352 int aac_check_acc_handle(ddi_acc_handle_t); 353 int aac_check_dma_handle(ddi_dma_handle_t); 354 void aac_fm_ereport(struct aac_softstate *, char *); 355 356 /* 357 * Auto enumeration functions 358 */ 359 static dev_info_t *aac_find_child(struct aac_softstate *, uint16_t, uint8_t); 360 static int aac_tran_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t, 361 void *, dev_info_t **); 362 static int aac_dr_event(struct aac_softstate *, int, int, int); 363 364 #ifdef DEBUG 365 /* 366 * UART debug output support 367 */ 368 369 #define AAC_PRINT_BUFFER_SIZE 512 370 #define AAC_PRINT_TIMEOUT 250 /* 1/4 sec. = 250 msec. */ 371 372 #define AAC_FW_DBG_STRLEN_OFFSET 0x00 373 #define AAC_FW_DBG_FLAGS_OFFSET 0x04 374 #define AAC_FW_DBG_BLED_OFFSET 0x08 375 376 static int aac_get_fw_debug_buffer(struct aac_softstate *); 377 static void aac_print_scmd(struct aac_softstate *, struct aac_cmd *); 378 static void aac_print_aif(struct aac_softstate *, struct aac_aif_command *); 379 380 static char aac_prt_buf[AAC_PRINT_BUFFER_SIZE]; 381 static char aac_fmt[] = " %s"; 382 static char aac_fmt_header[] = " %s.%d: %s"; 383 static kmutex_t aac_prt_mutex; 384 385 /* 386 * Debug flags to be put into the softstate flags field 387 * when initialized 388 */ 389 uint32_t aac_debug_flags = 390 /* AACDB_FLAGS_KERNEL_PRINT | */ 391 /* AACDB_FLAGS_FW_PRINT | */ 392 /* AACDB_FLAGS_MISC | */ 393 /* AACDB_FLAGS_FUNC1 | */ 394 /* AACDB_FLAGS_FUNC2 | */ 395 /* AACDB_FLAGS_SCMD | */ 396 /* AACDB_FLAGS_AIF | */ 397 /* AACDB_FLAGS_FIB | */ 398 /* AACDB_FLAGS_IOCTL | */ 399 0; 400 uint32_t aac_debug_fib_flags = 401 /* AACDB_FLAGS_FIB_RW | */ 402 /* AACDB_FLAGS_FIB_IOCTL | */ 403 /* AACDB_FLAGS_FIB_SRB | */ 404 /* AACDB_FLAGS_FIB_SYNC | */ 405 /* AACDB_FLAGS_FIB_HEADER | */ 406 /* AACDB_FLAGS_FIB_TIMEOUT | */ 407 0; 408 409 #endif /* DEBUG */ 410 411 static struct cb_ops aac_cb_ops = { 412 aac_open, /* open */ 413 aac_close, /* close */ 414 nodev, /* strategy */ 415 nodev, /* print */ 416 nodev, /* dump */ 417 nodev, /* read */ 418 nodev, /* write */ 419 aac_ioctl, /* ioctl */ 420 nodev, /* devmap */ 421 nodev, /* mmap */ 422 nodev, /* segmap */ 423 nochpoll, /* poll */ 424 ddi_prop_op, /* cb_prop_op */ 425 NULL, /* streamtab */ 426 D_64BIT | D_NEW | D_MP | D_HOTPLUG, /* cb_flag */ 427 CB_REV, /* cb_rev */ 428 nodev, /* async I/O read entry point */ 429 nodev /* async I/O write entry point */ 430 }; 431 432 static struct dev_ops aac_dev_ops = { 433 DEVO_REV, 434 0, 435 nodev, 436 nulldev, 437 nulldev, 438 aac_attach, 439 aac_detach, 440 aac_reset, 441 &aac_cb_ops, 442 NULL, 443 NULL, 444 aac_quiesce, 445 }; 446 447 static struct modldrv aac_modldrv = { 448 &mod_driverops, 449 "AAC Driver " AAC_DRIVER_VERSION, 450 &aac_dev_ops, 451 }; 452 453 static struct modlinkage aac_modlinkage = { 454 MODREV_1, 455 &aac_modldrv, 456 NULL 457 }; 458 459 static struct aac_softstate *aac_softstatep; 460 461 /* 462 * Supported card list 463 * ordered in vendor id, subvendor id, subdevice id, and device id 464 */ 465 static struct aac_card_type aac_cards[] = { 466 {0x1028, 0x1, 0x1028, 0x1, AAC_HWIF_I960RX, 467 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 468 "Dell", "PERC 3/Di"}, 469 {0x1028, 0x2, 0x1028, 0x2, AAC_HWIF_I960RX, 470 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 471 "Dell", "PERC 3/Di"}, 472 {0x1028, 0x3, 0x1028, 0x3, AAC_HWIF_I960RX, 473 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 474 "Dell", "PERC 3/Si"}, 475 {0x1028, 0x8, 0x1028, 0xcf, AAC_HWIF_I960RX, 476 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 477 "Dell", "PERC 3/Di"}, 478 {0x1028, 0x4, 0x1028, 0xd0, AAC_HWIF_I960RX, 479 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 480 "Dell", "PERC 3/Si"}, 481 {0x1028, 0x2, 0x1028, 0xd1, AAC_HWIF_I960RX, 482 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 483 "Dell", "PERC 3/Di"}, 484 {0x1028, 0x2, 0x1028, 0xd9, AAC_HWIF_I960RX, 485 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 486 "Dell", "PERC 3/Di"}, 487 {0x1028, 0xa, 0x1028, 0x106, AAC_HWIF_I960RX, 488 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 489 "Dell", "PERC 3/Di"}, 490 {0x1028, 0xa, 0x1028, 0x11b, AAC_HWIF_I960RX, 491 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 492 "Dell", "PERC 3/Di"}, 493 {0x1028, 0xa, 0x1028, 0x121, AAC_HWIF_I960RX, 494 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 495 "Dell", "PERC 3/Di"}, 496 {0x9005, 0x285, 0x1028, 0x287, AAC_HWIF_I960RX, 497 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 498 "Dell", "PERC 320/DC"}, 499 {0x9005, 0x285, 0x1028, 0x291, AAC_HWIF_I960RX, 500 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Dell", "CERC SR2"}, 501 502 {0x9005, 0x285, 0x1014, 0x2f2, AAC_HWIF_I960RX, 503 0, AAC_TYPE_SCSI, "IBM", "ServeRAID 8i"}, 504 {0x9005, 0x285, 0x1014, 0x34d, AAC_HWIF_I960RX, 505 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8s"}, 506 {0x9005, 0x286, 0x1014, 0x9580, AAC_HWIF_RKT, 507 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8k"}, 508 509 {0x9005, 0x285, 0x103c, 0x3227, AAC_HWIF_I960RX, 510 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"}, 511 {0x9005, 0x285, 0xe11, 0x295, AAC_HWIF_I960RX, 512 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"}, 513 514 {0x9005, 0x285, 0x9005, 0x285, AAC_HWIF_I960RX, 515 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 516 "Adaptec", "2200S"}, 517 {0x9005, 0x285, 0x9005, 0x286, AAC_HWIF_I960RX, 518 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 519 "Adaptec", "2120S"}, 520 {0x9005, 0x285, 0x9005, 0x287, AAC_HWIF_I960RX, 521 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 522 "Adaptec", "2200S"}, 523 {0x9005, 0x285, 0x9005, 0x288, AAC_HWIF_I960RX, 524 0, AAC_TYPE_SCSI, "Adaptec", "3230S"}, 525 {0x9005, 0x285, 0x9005, 0x289, AAC_HWIF_I960RX, 526 0, AAC_TYPE_SCSI, "Adaptec", "3240S"}, 527 {0x9005, 0x285, 0x9005, 0x28a, AAC_HWIF_I960RX, 528 0, AAC_TYPE_SCSI, "Adaptec", "2020ZCR"}, 529 {0x9005, 0x285, 0x9005, 0x28b, AAC_HWIF_I960RX, 530 0, AAC_TYPE_SCSI, "Adaptec", "2025ZCR"}, 531 {0x9005, 0x286, 0x9005, 0x28c, AAC_HWIF_RKT, 532 0, AAC_TYPE_SCSI, "Adaptec", "2230S"}, 533 {0x9005, 0x286, 0x9005, 0x28d, AAC_HWIF_RKT, 534 0, AAC_TYPE_SCSI, "Adaptec", "2130S"}, 535 {0x9005, 0x285, 0x9005, 0x28e, AAC_HWIF_I960RX, 536 0, AAC_TYPE_SATA, "Adaptec", "2020SA"}, 537 {0x9005, 0x285, 0x9005, 0x28f, AAC_HWIF_I960RX, 538 0, AAC_TYPE_SATA, "Adaptec", "2025SA"}, 539 {0x9005, 0x285, 0x9005, 0x290, AAC_HWIF_I960RX, 540 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2410SA"}, 541 {0x9005, 0x285, 0x9005, 0x292, AAC_HWIF_I960RX, 542 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2810SA"}, 543 {0x9005, 0x285, 0x9005, 0x293, AAC_HWIF_I960RX, 544 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "21610SA"}, 545 {0x9005, 0x285, 0x9005, 0x294, AAC_HWIF_I960RX, 546 0, AAC_TYPE_SATA, "Adaptec", "2026ZCR"}, 547 {0x9005, 0x285, 0x9005, 0x296, AAC_HWIF_I960RX, 548 0, AAC_TYPE_SCSI, "Adaptec", "2240S"}, 549 {0x9005, 0x285, 0x9005, 0x297, AAC_HWIF_I960RX, 550 0, AAC_TYPE_SAS, "Adaptec", "4005SAS"}, 551 {0x9005, 0x285, 0x9005, 0x298, AAC_HWIF_I960RX, 552 0, AAC_TYPE_SAS, "Adaptec", "RAID 4000"}, 553 {0x9005, 0x285, 0x9005, 0x299, AAC_HWIF_I960RX, 554 0, AAC_TYPE_SAS, "Adaptec", "4800SAS"}, 555 {0x9005, 0x285, 0x9005, 0x29a, AAC_HWIF_I960RX, 556 0, AAC_TYPE_SAS, "Adaptec", "4805SAS"}, 557 {0x9005, 0x286, 0x9005, 0x29b, AAC_HWIF_RKT, 558 0, AAC_TYPE_SATA, "Adaptec", "2820SA"}, 559 {0x9005, 0x286, 0x9005, 0x29c, AAC_HWIF_RKT, 560 0, AAC_TYPE_SATA, "Adaptec", "2620SA"}, 561 {0x9005, 0x286, 0x9005, 0x29d, AAC_HWIF_RKT, 562 0, AAC_TYPE_SATA, "Adaptec", "2420SA"}, 563 {0x9005, 0x286, 0x9005, 0x29e, AAC_HWIF_RKT, 564 0, AAC_TYPE_SATA, "ICP", "9024RO"}, 565 {0x9005, 0x286, 0x9005, 0x29f, AAC_HWIF_RKT, 566 0, AAC_TYPE_SATA, "ICP", "9014RO"}, 567 {0x9005, 0x286, 0x9005, 0x2a0, AAC_HWIF_RKT, 568 0, AAC_TYPE_SATA, "ICP", "9047MA"}, 569 {0x9005, 0x286, 0x9005, 0x2a1, AAC_HWIF_RKT, 570 0, AAC_TYPE_SATA, "ICP", "9087MA"}, 571 {0x9005, 0x285, 0x9005, 0x2a4, AAC_HWIF_I960RX, 572 0, AAC_TYPE_SAS, "ICP", "9085LI"}, 573 {0x9005, 0x285, 0x9005, 0x2a5, AAC_HWIF_I960RX, 574 0, AAC_TYPE_SAS, "ICP", "5085BR"}, 575 {0x9005, 0x286, 0x9005, 0x2a6, AAC_HWIF_RKT, 576 0, AAC_TYPE_SATA, "ICP", "9067MA"}, 577 {0x9005, 0x285, 0x9005, 0x2b5, AAC_HWIF_I960RX, 578 0, AAC_TYPE_SAS, "Adaptec", "RAID 5445"}, 579 {0x9005, 0x285, 0x9005, 0x2b6, AAC_HWIF_I960RX, 580 0, AAC_TYPE_SAS, "Adaptec", "RAID 5805"}, 581 {0x9005, 0x285, 0x9005, 0x2b7, AAC_HWIF_I960RX, 582 0, AAC_TYPE_SAS, "Adaptec", "RAID 5085"}, 583 {0x9005, 0x285, 0x9005, 0x2b8, AAC_HWIF_I960RX, 584 0, AAC_TYPE_SAS, "ICP", "RAID ICP5445SL"}, 585 {0x9005, 0x285, 0x9005, 0x2b9, AAC_HWIF_I960RX, 586 0, AAC_TYPE_SAS, "ICP", "RAID ICP5085SL"}, 587 {0x9005, 0x285, 0x9005, 0x2ba, AAC_HWIF_I960RX, 588 0, AAC_TYPE_SAS, "ICP", "RAID ICP5805SL"}, 589 590 {0, 0, 0, 0, AAC_HWIF_UNKNOWN, 591 0, AAC_TYPE_UNKNOWN, "Unknown", "AAC card"}, 592 }; 593 594 /* 595 * Hardware access functions for i960 based cards 596 */ 597 static struct aac_interface aac_rx_interface = { 598 aac_rx_get_fwstatus, 599 aac_rx_get_mailbox, 600 aac_rx_set_mailbox 601 }; 602 603 /* 604 * Hardware access functions for Rocket based cards 605 */ 606 static struct aac_interface aac_rkt_interface = { 607 aac_rkt_get_fwstatus, 608 aac_rkt_get_mailbox, 609 aac_rkt_set_mailbox 610 }; 611 612 ddi_device_acc_attr_t aac_acc_attr = { 613 DDI_DEVICE_ATTR_V1, 614 DDI_STRUCTURE_LE_ACC, 615 DDI_STRICTORDER_ACC, 616 DDI_DEFAULT_ACC 617 }; 618 619 static struct { 620 int size; 621 int notify; 622 } aac_qinfo[] = { 623 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL}, 624 {AAC_HOST_HIGH_CMD_ENTRIES, 0}, 625 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY}, 626 {AAC_ADAP_HIGH_CMD_ENTRIES, 0}, 627 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL}, 628 {AAC_HOST_HIGH_RESP_ENTRIES, 0}, 629 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY}, 630 {AAC_ADAP_HIGH_RESP_ENTRIES, 0} 631 }; 632 633 /* 634 * Default aac dma attributes 635 */ 636 static ddi_dma_attr_t aac_dma_attr = { 637 DMA_ATTR_V0, 638 0, /* lowest usable address */ 639 0xffffffffull, /* high DMA address range */ 640 0xffffffffull, /* DMA counter register */ 641 AAC_DMA_ALIGN, /* DMA address alignment */ 642 1, /* DMA burstsizes */ 643 1, /* min effective DMA size */ 644 0xffffffffull, /* max DMA xfer size */ 645 0xffffffffull, /* segment boundary */ 646 1, /* s/g list length */ 647 AAC_BLK_SIZE, /* granularity of device */ 648 0 /* DMA transfer flags */ 649 }; 650 651 struct aac_drinfo { 652 struct aac_softstate *softs; 653 int tgt; 654 int lun; 655 int event; 656 }; 657 658 static int aac_tick = AAC_DEFAULT_TICK; /* tick for the internal timer */ 659 static uint32_t aac_timebase = 0; /* internal timer in seconds */ 660 static uint32_t aac_sync_time = 0; /* next time to sync. with firmware */ 661 662 /* 663 * Warlock directives 664 * 665 * Different variables with the same types have to be protected by the 666 * same mutex; otherwise, warlock will complain with "variables don't 667 * seem to be protected consistently". For example, 668 * aac_softstate::{q_wait, q_comp} are type of aac_cmd_queue, and protected 669 * by aac_softstate::{io_lock, q_comp_mutex} respectively. We have to 670 * declare them as protected explictly at aac_cmd_dequeue(). 671 */ 672 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt scsi_cdb scsi_status \ 673 scsi_arq_status scsi_descr_sense_hdr scsi_information_sense_descr \ 674 mode_format mode_geometry mode_header aac_cmd)) 675 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_cmd", aac_fib ddi_dma_cookie_t \ 676 aac_sge)) 677 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_fib", aac_blockread aac_blockwrite \ 678 aac_blockread64 aac_raw_io aac_sg_entry aac_sg_entry64 aac_sg_entryraw \ 679 aac_sg_table aac_srb)) 680 _NOTE(SCHEME_PROTECTS_DATA("unique to sync fib and cdb", scsi_inquiry)) 681 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address)) 682 _NOTE(SCHEME_PROTECTS_DATA("unique to dr event", aac_drinfo)) 683 _NOTE(SCHEME_PROTECTS_DATA("unique to scsi_transport", buf)) 684 685 int 686 _init(void) 687 { 688 int rval = 0; 689 690 #ifdef DEBUG 691 mutex_init(&aac_prt_mutex, NULL, MUTEX_DRIVER, NULL); 692 #endif 693 DBCALLED(NULL, 1); 694 695 if ((rval = ddi_soft_state_init((void *)&aac_softstatep, 696 sizeof (struct aac_softstate), 0)) != 0) 697 goto error; 698 699 if ((rval = scsi_hba_init(&aac_modlinkage)) != 0) { 700 ddi_soft_state_fini((void *)&aac_softstatep); 701 goto error; 702 } 703 704 if ((rval = mod_install(&aac_modlinkage)) != 0) { 705 ddi_soft_state_fini((void *)&aac_softstatep); 706 scsi_hba_fini(&aac_modlinkage); 707 goto error; 708 } 709 return (rval); 710 711 error: 712 AACDB_PRINT(NULL, CE_WARN, "Mod init error!"); 713 #ifdef DEBUG 714 mutex_destroy(&aac_prt_mutex); 715 #endif 716 return (rval); 717 } 718 719 int 720 _info(struct modinfo *modinfop) 721 { 722 DBCALLED(NULL, 1); 723 return (mod_info(&aac_modlinkage, modinfop)); 724 } 725 726 /* 727 * An HBA driver cannot be unload unless you reboot, 728 * so this function will be of no use. 729 */ 730 int 731 _fini(void) 732 { 733 int rval; 734 735 DBCALLED(NULL, 1); 736 737 if ((rval = mod_remove(&aac_modlinkage)) != 0) 738 goto error; 739 740 scsi_hba_fini(&aac_modlinkage); 741 ddi_soft_state_fini((void *)&aac_softstatep); 742 #ifdef DEBUG 743 mutex_destroy(&aac_prt_mutex); 744 #endif 745 return (0); 746 747 error: 748 AACDB_PRINT(NULL, CE_WARN, "AAC is busy, cannot unload!"); 749 return (rval); 750 } 751 752 int aac_use_msi = 0; 753 754 static int 755 aac_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 756 { 757 int instance, i; 758 struct aac_softstate *softs = NULL; 759 int attach_state = 0; 760 char *data; 761 int intr_types; 762 763 DBCALLED(NULL, 1); 764 765 switch (cmd) { 766 case DDI_ATTACH: 767 break; 768 case DDI_RESUME: 769 return (DDI_FAILURE); 770 default: 771 return (DDI_FAILURE); 772 } 773 774 instance = ddi_get_instance(dip); 775 776 /* Get soft state */ 777 if (ddi_soft_state_zalloc(aac_softstatep, instance) != DDI_SUCCESS) { 778 AACDB_PRINT(softs, CE_WARN, "Cannot alloc soft state"); 779 goto error; 780 } 781 softs = ddi_get_soft_state(aac_softstatep, instance); 782 attach_state |= AAC_ATTACH_SOFTSTATE_ALLOCED; 783 784 softs->instance = instance; 785 softs->devinfo_p = dip; 786 softs->buf_dma_attr = softs->addr_dma_attr = aac_dma_attr; 787 softs->addr_dma_attr.dma_attr_granular = 1; 788 softs->acc_attr = aac_acc_attr; 789 softs->reg_attr = aac_acc_attr; 790 softs->card = AAC_UNKNOWN_CARD; 791 #ifdef DEBUG 792 softs->debug_flags = aac_debug_flags; 793 softs->debug_fib_flags = aac_debug_fib_flags; 794 #endif 795 796 /* Initialize FMA */ 797 aac_fm_init(softs); 798 799 /* Check the card type */ 800 if (aac_check_card_type(softs) == AACERR) { 801 AACDB_PRINT(softs, CE_WARN, "Card not supported"); 802 goto error; 803 } 804 /* We have found the right card and everything is OK */ 805 attach_state |= AAC_ATTACH_CARD_DETECTED; 806 807 /* Map PCI mem space */ 808 if (ddi_regs_map_setup(dip, 1, 809 (caddr_t *)&softs->pci_mem_base_vaddr, 0, 810 softs->map_size_min, &softs->reg_attr, 811 &softs->pci_mem_handle) != DDI_SUCCESS) 812 goto error; 813 814 softs->map_size = softs->map_size_min; 815 attach_state |= AAC_ATTACH_PCI_MEM_MAPPED; 816 817 AAC_DISABLE_INTR(softs); 818 819 /* Get the type of device intrrupts */ 820 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) { 821 AACDB_PRINT(softs, CE_WARN, 822 "ddi_intr_get_supported_types() failed"); 823 goto error; 824 } 825 AACDB_PRINT(softs, CE_NOTE, 826 "ddi_intr_get_supported_types() ret: 0x%x", intr_types); 827 828 /* Query interrupt, and alloc/init all needed struct */ 829 if ((intr_types & DDI_INTR_TYPE_MSI) && aac_use_msi) { 830 if (aac_query_intrs(softs, DDI_INTR_TYPE_MSI) 831 != DDI_SUCCESS) { 832 AACDB_PRINT(softs, CE_WARN, 833 "MSI interrupt query failed"); 834 goto error; 835 } 836 softs->intr_type = DDI_INTR_TYPE_MSI; 837 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 838 if (aac_query_intrs(softs, DDI_INTR_TYPE_FIXED) 839 != DDI_SUCCESS) { 840 AACDB_PRINT(softs, CE_WARN, 841 "FIXED interrupt query failed"); 842 goto error; 843 } 844 softs->intr_type = DDI_INTR_TYPE_FIXED; 845 } else { 846 AACDB_PRINT(softs, CE_WARN, 847 "Device cannot suppport both FIXED and MSI interrupts"); 848 goto error; 849 } 850 851 /* Init mutexes */ 852 mutex_init(&softs->q_comp_mutex, NULL, 853 MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri)); 854 cv_init(&softs->event, NULL, CV_DRIVER, NULL); 855 mutex_init(&softs->aifq_mutex, NULL, 856 MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri)); 857 cv_init(&softs->aifv, NULL, CV_DRIVER, NULL); 858 cv_init(&softs->drain_cv, NULL, CV_DRIVER, NULL); 859 mutex_init(&softs->io_lock, NULL, MUTEX_DRIVER, 860 DDI_INTR_PRI(softs->intr_pri)); 861 attach_state |= AAC_ATTACH_KMUTEX_INITED; 862 863 /* Check for legacy device naming support */ 864 softs->legacy = 1; /* default to use legacy name */ 865 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 866 "legacy-name-enable", &data) == DDI_SUCCESS)) { 867 if (strcmp(data, "no") == 0) { 868 AACDB_PRINT(softs, CE_NOTE, "legacy-name disabled"); 869 softs->legacy = 0; 870 } 871 ddi_prop_free(data); 872 } 873 874 /* 875 * Everything has been set up till now, 876 * we will do some common attach. 877 */ 878 if (aac_common_attach(softs) == AACERR) 879 goto error; 880 attach_state |= AAC_ATTACH_COMM_SPACE_SETUP; 881 882 /* Check for buf breakup support */ 883 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 884 "breakup-enable", &data) == DDI_SUCCESS)) { 885 if (strcmp(data, "yes") == 0) { 886 AACDB_PRINT(softs, CE_NOTE, "buf breakup enabled"); 887 softs->flags |= AAC_FLAGS_BRKUP; 888 } 889 ddi_prop_free(data); 890 } 891 softs->dma_max = softs->buf_dma_attr.dma_attr_maxxfer; 892 if (softs->flags & AAC_FLAGS_BRKUP) { 893 softs->dma_max = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 894 DDI_PROP_DONTPASS, "dma-max", softs->dma_max); 895 } 896 897 /* Init the cmd queues */ 898 for (i = 0; i < AAC_CMDQ_NUM; i++) 899 aac_cmd_initq(&softs->q_wait[i]); 900 aac_cmd_initq(&softs->q_busy); 901 aac_cmd_initq(&softs->q_comp); 902 903 if (aac_hba_setup(softs) != AACOK) 904 goto error; 905 attach_state |= AAC_ATTACH_SCSI_TRAN_SETUP; 906 907 /* Connect interrupt handlers */ 908 if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &softs->softint_id, 909 NULL, NULL, aac_softintr, (caddr_t)softs) != DDI_SUCCESS) { 910 AACDB_PRINT(softs, CE_WARN, 911 "Can not setup soft interrupt handler!"); 912 goto error; 913 } 914 attach_state |= AAC_ATTACH_SOFT_INTR_SETUP; 915 916 if (aac_add_intrs(softs) != DDI_SUCCESS) { 917 AACDB_PRINT(softs, CE_WARN, 918 "Interrupt registration failed, intr type: %s", 919 softs->intr_type == DDI_INTR_TYPE_MSI ? "MSI" : "FIXED"); 920 goto error; 921 } 922 attach_state |= AAC_ATTACH_HARD_INTR_SETUP; 923 924 /* Create devctl/scsi nodes for cfgadm */ 925 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 926 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) { 927 AACDB_PRINT(softs, CE_WARN, "failed to create devctl node"); 928 goto error; 929 } 930 attach_state |= AAC_ATTACH_CREATE_DEVCTL; 931 932 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, INST2SCSI(instance), 933 DDI_NT_SCSI_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 934 AACDB_PRINT(softs, CE_WARN, "failed to create scsi node"); 935 goto error; 936 } 937 attach_state |= AAC_ATTACH_CREATE_SCSI; 938 939 /* Create aac node for app. to issue ioctls */ 940 if (ddi_create_minor_node(dip, "aac", S_IFCHR, INST2AAC(instance), 941 DDI_PSEUDO, 0) != DDI_SUCCESS) { 942 AACDB_PRINT(softs, CE_WARN, "failed to create aac node"); 943 goto error; 944 } 945 946 /* Create a taskq for dealing with dr events */ 947 if ((softs->taskq = ddi_taskq_create(dip, "aac_dr_taskq", 1, 948 TASKQ_DEFAULTPRI, 0)) == NULL) { 949 AACDB_PRINT(softs, CE_WARN, "ddi_taskq_create failed"); 950 goto error; 951 } 952 953 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 954 softs->state = AAC_STATE_RUN; 955 956 /* Create a thread for command timeout */ 957 softs->timeout_id = timeout(aac_daemon, (void *)softs, 958 (60 * drv_usectohz(1000000))); 959 960 /* Common attach is OK, so we are attached! */ 961 AAC_ENABLE_INTR(softs); 962 ddi_report_dev(dip); 963 AACDB_PRINT(softs, CE_NOTE, "aac attached ok"); 964 return (DDI_SUCCESS); 965 966 error: 967 if (softs && softs->taskq) 968 ddi_taskq_destroy(softs->taskq); 969 if (attach_state & AAC_ATTACH_CREATE_SCSI) 970 ddi_remove_minor_node(dip, "scsi"); 971 if (attach_state & AAC_ATTACH_CREATE_DEVCTL) 972 ddi_remove_minor_node(dip, "devctl"); 973 if (attach_state & AAC_ATTACH_COMM_SPACE_SETUP) 974 aac_common_detach(softs); 975 if (attach_state & AAC_ATTACH_SCSI_TRAN_SETUP) { 976 (void) scsi_hba_detach(dip); 977 scsi_hba_tran_free(AAC_DIP2TRAN(dip)); 978 } 979 if (attach_state & AAC_ATTACH_HARD_INTR_SETUP) 980 aac_remove_intrs(softs); 981 if (attach_state & AAC_ATTACH_SOFT_INTR_SETUP) 982 ddi_remove_softintr(softs->softint_id); 983 if (attach_state & AAC_ATTACH_KMUTEX_INITED) { 984 mutex_destroy(&softs->q_comp_mutex); 985 cv_destroy(&softs->event); 986 mutex_destroy(&softs->aifq_mutex); 987 cv_destroy(&softs->aifv); 988 cv_destroy(&softs->drain_cv); 989 mutex_destroy(&softs->io_lock); 990 } 991 if (attach_state & AAC_ATTACH_PCI_MEM_MAPPED) 992 ddi_regs_map_free(&softs->pci_mem_handle); 993 aac_fm_fini(softs); 994 if (attach_state & AAC_ATTACH_CARD_DETECTED) 995 softs->card = AACERR; 996 if (attach_state & AAC_ATTACH_SOFTSTATE_ALLOCED) 997 ddi_soft_state_free(aac_softstatep, instance); 998 return (DDI_FAILURE); 999 } 1000 1001 static int 1002 aac_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1003 { 1004 scsi_hba_tran_t *tran = AAC_DIP2TRAN(dip); 1005 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran); 1006 1007 DBCALLED(softs, 1); 1008 1009 switch (cmd) { 1010 case DDI_DETACH: 1011 break; 1012 case DDI_SUSPEND: 1013 return (DDI_FAILURE); 1014 default: 1015 return (DDI_FAILURE); 1016 } 1017 1018 mutex_enter(&softs->io_lock); 1019 AAC_DISABLE_INTR(softs); 1020 softs->state = AAC_STATE_STOPPED; 1021 1022 mutex_exit(&softs->io_lock); 1023 (void) untimeout(softs->timeout_id); 1024 mutex_enter(&softs->io_lock); 1025 softs->timeout_id = 0; 1026 1027 ddi_taskq_destroy(softs->taskq); 1028 1029 ddi_remove_minor_node(dip, "aac"); 1030 ddi_remove_minor_node(dip, "scsi"); 1031 ddi_remove_minor_node(dip, "devctl"); 1032 1033 mutex_exit(&softs->io_lock); 1034 aac_remove_intrs(softs); 1035 ddi_remove_softintr(softs->softint_id); 1036 1037 aac_common_detach(softs); 1038 1039 (void) scsi_hba_detach(dip); 1040 scsi_hba_tran_free(tran); 1041 1042 mutex_destroy(&softs->q_comp_mutex); 1043 cv_destroy(&softs->event); 1044 mutex_destroy(&softs->aifq_mutex); 1045 cv_destroy(&softs->aifv); 1046 cv_destroy(&softs->drain_cv); 1047 mutex_destroy(&softs->io_lock); 1048 1049 ddi_regs_map_free(&softs->pci_mem_handle); 1050 aac_fm_fini(softs); 1051 softs->hwif = AAC_HWIF_UNKNOWN; 1052 softs->card = AAC_UNKNOWN_CARD; 1053 ddi_soft_state_free(aac_softstatep, ddi_get_instance(dip)); 1054 1055 return (DDI_SUCCESS); 1056 } 1057 1058 /*ARGSUSED*/ 1059 static int 1060 aac_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) 1061 { 1062 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 1063 1064 DBCALLED(softs, 1); 1065 1066 mutex_enter(&softs->io_lock); 1067 (void) aac_shutdown(softs); 1068 mutex_exit(&softs->io_lock); 1069 1070 return (DDI_SUCCESS); 1071 } 1072 1073 /* 1074 * quiesce(9E) entry point. 1075 * 1076 * This function is called when the system is single-threaded at high 1077 * PIL with preemption disabled. Therefore, this function must not be 1078 * blocked. 1079 * 1080 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 1081 * DDI_FAILURE indicates an error condition and should almost never happen. 1082 */ 1083 static int 1084 aac_quiesce(dev_info_t *dip) 1085 { 1086 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 1087 1088 if (softs == NULL) 1089 return (DDI_FAILURE); 1090 1091 AAC_DISABLE_INTR(softs); 1092 1093 return (DDI_SUCCESS); 1094 } 1095 1096 /* 1097 * Bring the controller down to a dormant state and detach all child devices. 1098 * This function is called before detach or system shutdown. 1099 * Note: we can assume that the q_wait on the controller is empty, as we 1100 * won't allow shutdown if any device is open. 1101 */ 1102 static int 1103 aac_shutdown(struct aac_softstate *softs) 1104 { 1105 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 1106 struct aac_close_command *cc = (struct aac_close_command *) \ 1107 &softs->sync_slot.fibp->data[0]; 1108 int rval; 1109 1110 ddi_put32(acc, &cc->Command, VM_CloseAll); 1111 ddi_put32(acc, &cc->ContainerId, 0xfffffffful); 1112 1113 /* Flush all caches, set FW to write through mode */ 1114 rval = aac_sync_fib(softs, ContainerCommand, 1115 AAC_FIB_SIZEOF(struct aac_close_command)); 1116 1117 AACDB_PRINT(softs, CE_NOTE, 1118 "shutting down aac %s", (rval == AACOK) ? "ok" : "fail"); 1119 return (rval); 1120 } 1121 1122 static uint_t 1123 aac_softintr(caddr_t arg) 1124 { 1125 struct aac_softstate *softs = (void *)arg; 1126 1127 if (!AAC_IS_Q_EMPTY(&softs->q_comp)) { 1128 aac_drain_comp_q(softs); 1129 return (DDI_INTR_CLAIMED); 1130 } else { 1131 return (DDI_INTR_UNCLAIMED); 1132 } 1133 } 1134 1135 /* 1136 * Setup auto sense data for pkt 1137 */ 1138 static void 1139 aac_set_arq_data(struct scsi_pkt *pkt, uchar_t key, 1140 uchar_t add_code, uchar_t qual_code, uint64_t info) 1141 { 1142 struct scsi_arq_status *arqstat = (void *)(pkt->pkt_scbp); 1143 1144 *pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */ 1145 pkt->pkt_state |= STATE_ARQ_DONE; 1146 1147 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD; 1148 arqstat->sts_rqpkt_reason = CMD_CMPLT; 1149 arqstat->sts_rqpkt_resid = 0; 1150 arqstat->sts_rqpkt_state = 1151 STATE_GOT_BUS | 1152 STATE_GOT_TARGET | 1153 STATE_SENT_CMD | 1154 STATE_XFERRED_DATA; 1155 arqstat->sts_rqpkt_statistics = 0; 1156 1157 if (info <= 0xfffffffful) { 1158 arqstat->sts_sensedata.es_valid = 1; 1159 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE; 1160 arqstat->sts_sensedata.es_code = CODE_FMT_FIXED_CURRENT; 1161 arqstat->sts_sensedata.es_key = key; 1162 arqstat->sts_sensedata.es_add_code = add_code; 1163 arqstat->sts_sensedata.es_qual_code = qual_code; 1164 1165 arqstat->sts_sensedata.es_info_1 = (info >> 24) & 0xFF; 1166 arqstat->sts_sensedata.es_info_2 = (info >> 16) & 0xFF; 1167 arqstat->sts_sensedata.es_info_3 = (info >> 8) & 0xFF; 1168 arqstat->sts_sensedata.es_info_4 = info & 0xFF; 1169 } else { /* 64-bit LBA */ 1170 struct scsi_descr_sense_hdr *dsp; 1171 struct scsi_information_sense_descr *isd; 1172 1173 dsp = (struct scsi_descr_sense_hdr *)&arqstat->sts_sensedata; 1174 dsp->ds_class = CLASS_EXTENDED_SENSE; 1175 dsp->ds_code = CODE_FMT_DESCR_CURRENT; 1176 dsp->ds_key = key; 1177 dsp->ds_add_code = add_code; 1178 dsp->ds_qual_code = qual_code; 1179 dsp->ds_addl_sense_length = 1180 sizeof (struct scsi_information_sense_descr); 1181 1182 isd = (struct scsi_information_sense_descr *)(dsp+1); 1183 isd->isd_descr_type = DESCR_INFORMATION; 1184 isd->isd_valid = 1; 1185 isd->isd_information[0] = (info >> 56) & 0xFF; 1186 isd->isd_information[1] = (info >> 48) & 0xFF; 1187 isd->isd_information[2] = (info >> 40) & 0xFF; 1188 isd->isd_information[3] = (info >> 32) & 0xFF; 1189 isd->isd_information[4] = (info >> 24) & 0xFF; 1190 isd->isd_information[5] = (info >> 16) & 0xFF; 1191 isd->isd_information[6] = (info >> 8) & 0xFF; 1192 isd->isd_information[7] = (info) & 0xFF; 1193 } 1194 } 1195 1196 /* 1197 * Setup auto sense data for HARDWARE ERROR 1198 */ 1199 static void 1200 aac_set_arq_data_hwerr(struct aac_cmd *acp) 1201 { 1202 union scsi_cdb *cdbp; 1203 uint64_t err_blkno; 1204 1205 cdbp = (void *)acp->pkt->pkt_cdbp; 1206 err_blkno = AAC_GETGXADDR(acp->cmdlen, cdbp); 1207 aac_set_arq_data(acp->pkt, KEY_HARDWARE_ERROR, 0x00, 0x00, err_blkno); 1208 } 1209 1210 /* 1211 * Setup auto sense data for UNIT ATTENTION 1212 */ 1213 /*ARGSUSED*/ 1214 static void 1215 aac_set_arq_data_reset(struct aac_softstate *softs, struct aac_cmd *acp) 1216 { 1217 struct aac_container *dvp = (struct aac_container *)acp->dvp; 1218 1219 ASSERT(dvp->dev.type == AAC_DEV_LD); 1220 1221 if (dvp->reset) { 1222 dvp->reset = 0; 1223 aac_set_arq_data(acp->pkt, KEY_UNIT_ATTENTION, 0x29, 0x02, 0); 1224 } 1225 } 1226 1227 /* 1228 * Send a command to the adapter in New Comm. interface 1229 */ 1230 static int 1231 aac_send_command(struct aac_softstate *softs, struct aac_slot *slotp) 1232 { 1233 uint32_t index, device; 1234 1235 index = PCI_MEM_GET32(softs, AAC_IQUE); 1236 if (index == 0xffffffffUL) { 1237 index = PCI_MEM_GET32(softs, AAC_IQUE); 1238 if (index == 0xffffffffUL) 1239 return (AACERR); 1240 } 1241 1242 device = index; 1243 PCI_MEM_PUT32(softs, device, 1244 (uint32_t)(slotp->fib_phyaddr & 0xfffffffful)); 1245 device += 4; 1246 PCI_MEM_PUT32(softs, device, (uint32_t)(slotp->fib_phyaddr >> 32)); 1247 device += 4; 1248 PCI_MEM_PUT32(softs, device, slotp->acp->fib_size); 1249 PCI_MEM_PUT32(softs, AAC_IQUE, index); 1250 return (AACOK); 1251 } 1252 1253 static void 1254 aac_end_io(struct aac_softstate *softs, struct aac_cmd *acp) 1255 { 1256 struct aac_device *dvp = acp->dvp; 1257 int q = AAC_CMDQ(acp); 1258 1259 if (acp->slotp) { /* outstanding cmd */ 1260 aac_release_slot(softs, acp->slotp); 1261 acp->slotp = NULL; 1262 if (dvp) { 1263 dvp->ncmds[q]--; 1264 if (dvp->throttle[q] == AAC_THROTTLE_DRAIN && 1265 dvp->ncmds[q] == 0 && q == AAC_CMDQ_ASYNC) 1266 aac_set_throttle(softs, dvp, q, 1267 softs->total_slots); 1268 } 1269 softs->bus_ncmds[q]--; 1270 (void) aac_cmd_delete(&softs->q_busy, acp); 1271 } else { /* cmd in waiting queue */ 1272 aac_cmd_delete(&softs->q_wait[q], acp); 1273 } 1274 1275 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) { /* async IO */ 1276 mutex_enter(&softs->q_comp_mutex); 1277 aac_cmd_enqueue(&softs->q_comp, acp); 1278 mutex_exit(&softs->q_comp_mutex); 1279 } else if (acp->flags & AAC_CMD_NO_CB) { /* sync IO */ 1280 cv_broadcast(&softs->event); 1281 } 1282 } 1283 1284 static void 1285 aac_handle_io(struct aac_softstate *softs, int index) 1286 { 1287 struct aac_slot *slotp; 1288 struct aac_cmd *acp; 1289 uint32_t fast; 1290 1291 fast = index & AAC_SENDERADDR_MASK_FAST_RESPONSE; 1292 index >>= 2; 1293 1294 /* Make sure firmware reported index is valid */ 1295 ASSERT(index >= 0 && index < softs->total_slots); 1296 slotp = &softs->io_slot[index]; 1297 ASSERT(slotp->index == index); 1298 acp = slotp->acp; 1299 1300 if (acp == NULL || acp->slotp != slotp) { 1301 cmn_err(CE_WARN, 1302 "Firmware error: invalid slot index received from FW"); 1303 return; 1304 } 1305 1306 acp->flags |= AAC_CMD_CMPLT; 1307 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU); 1308 1309 if (aac_check_dma_handle(slotp->fib_dma_handle) == DDI_SUCCESS) { 1310 /* 1311 * For fast response IO, the firmware do not return any FIB 1312 * data, so we need to fill in the FIB status and state so that 1313 * FIB users can handle it correctly. 1314 */ 1315 if (fast) { 1316 uint32_t state; 1317 1318 state = ddi_get32(slotp->fib_acc_handle, 1319 &slotp->fibp->Header.XferState); 1320 /* 1321 * Update state for CPU not for device, no DMA sync 1322 * needed 1323 */ 1324 ddi_put32(slotp->fib_acc_handle, 1325 &slotp->fibp->Header.XferState, 1326 state | AAC_FIBSTATE_DONEADAP); 1327 ddi_put32(slotp->fib_acc_handle, 1328 (void *)&slotp->fibp->data[0], ST_OK); 1329 } 1330 1331 /* Handle completed ac */ 1332 acp->ac_comp(softs, acp); 1333 } else { 1334 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1335 acp->flags |= AAC_CMD_ERR; 1336 if (acp->pkt) { 1337 acp->pkt->pkt_reason = CMD_TRAN_ERR; 1338 acp->pkt->pkt_statistics = 0; 1339 } 1340 } 1341 aac_end_io(softs, acp); 1342 } 1343 1344 /* 1345 * Interrupt handler for New Comm. interface 1346 * New Comm. interface use a different mechanism for interrupt. No explict 1347 * message queues, and driver need only accesses the mapped PCI mem space to 1348 * find the completed FIB or AIF. 1349 */ 1350 static int 1351 aac_process_intr_new(struct aac_softstate *softs) 1352 { 1353 uint32_t index; 1354 1355 index = AAC_OUTB_GET(softs); 1356 if (index == 0xfffffffful) 1357 index = AAC_OUTB_GET(softs); 1358 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 1359 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1360 return (0); 1361 } 1362 if (index != 0xfffffffful) { 1363 do { 1364 if ((index & AAC_SENDERADDR_MASK_AIF) == 0) { 1365 aac_handle_io(softs, index); 1366 } else if (index != 0xfffffffeul) { 1367 struct aac_fib *fibp; /* FIB in AIF queue */ 1368 uint16_t fib_size, fib_size0; 1369 1370 /* 1371 * 0xfffffffe means that the controller wants 1372 * more work, ignore it for now. Otherwise, 1373 * AIF received. 1374 */ 1375 index &= ~2; 1376 1377 mutex_enter(&softs->aifq_mutex); 1378 /* 1379 * Copy AIF from adapter to the empty AIF slot 1380 */ 1381 fibp = &softs->aifq[softs->aifq_idx].d; 1382 fib_size0 = PCI_MEM_GET16(softs, index + \ 1383 offsetof(struct aac_fib, Header.Size)); 1384 fib_size = (fib_size0 > AAC_FIB_SIZE) ? 1385 AAC_FIB_SIZE : fib_size0; 1386 PCI_MEM_REP_GET8(softs, index, fibp, 1387 fib_size); 1388 1389 if (aac_check_acc_handle(softs-> \ 1390 pci_mem_handle) == DDI_SUCCESS) 1391 (void) aac_handle_aif(softs, fibp); 1392 else 1393 ddi_fm_service_impact(softs->devinfo_p, 1394 DDI_SERVICE_UNAFFECTED); 1395 mutex_exit(&softs->aifq_mutex); 1396 1397 /* 1398 * AIF memory is owned by the adapter, so let it 1399 * know that we are done with it. 1400 */ 1401 AAC_OUTB_SET(softs, index); 1402 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY); 1403 } 1404 1405 index = AAC_OUTB_GET(softs); 1406 } while (index != 0xfffffffful); 1407 1408 /* 1409 * Process waiting cmds before start new ones to 1410 * ensure first IOs are serviced first. 1411 */ 1412 aac_start_waiting_io(softs); 1413 return (AAC_DB_COMMAND_READY); 1414 } else { 1415 return (0); 1416 } 1417 } 1418 1419 static uint_t 1420 aac_intr_new(caddr_t arg) 1421 { 1422 struct aac_softstate *softs = (void *)arg; 1423 uint_t rval; 1424 1425 mutex_enter(&softs->io_lock); 1426 if (aac_process_intr_new(softs)) 1427 rval = DDI_INTR_CLAIMED; 1428 else 1429 rval = DDI_INTR_UNCLAIMED; 1430 mutex_exit(&softs->io_lock); 1431 1432 aac_drain_comp_q(softs); 1433 return (rval); 1434 } 1435 1436 /* 1437 * Interrupt handler for old interface 1438 * Explicit message queues are used to send FIB to and get completed FIB from 1439 * the adapter. Driver and adapter maitain the queues in the producer/consumer 1440 * manner. The driver has to query the queues to find the completed FIB. 1441 */ 1442 static int 1443 aac_process_intr_old(struct aac_softstate *softs) 1444 { 1445 uint16_t status; 1446 1447 status = AAC_STATUS_GET(softs); 1448 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 1449 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1450 return (DDI_INTR_UNCLAIMED); 1451 } 1452 if (status & AAC_DB_RESPONSE_READY) { 1453 int slot_idx; 1454 1455 /* ACK the intr */ 1456 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY); 1457 (void) AAC_STATUS_GET(softs); 1458 while (aac_fib_dequeue(softs, AAC_HOST_NORM_RESP_Q, 1459 &slot_idx) == AACOK) 1460 aac_handle_io(softs, slot_idx); 1461 1462 /* 1463 * Process waiting cmds before start new ones to 1464 * ensure first IOs are serviced first. 1465 */ 1466 aac_start_waiting_io(softs); 1467 return (AAC_DB_RESPONSE_READY); 1468 } else if (status & AAC_DB_COMMAND_READY) { 1469 int aif_idx; 1470 1471 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_READY); 1472 (void) AAC_STATUS_GET(softs); 1473 if (aac_fib_dequeue(softs, AAC_HOST_NORM_CMD_Q, &aif_idx) == 1474 AACOK) { 1475 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 1476 struct aac_fib *fibp; /* FIB in AIF queue */ 1477 struct aac_fib *fibp0; /* FIB in communication space */ 1478 uint16_t fib_size, fib_size0; 1479 uint32_t fib_xfer_state; 1480 uint32_t addr, size; 1481 1482 ASSERT((aif_idx >= 0) && (aif_idx < AAC_ADAPTER_FIBS)); 1483 1484 #define AAC_SYNC_AIF(softs, aif_idx, type) \ 1485 { (void) ddi_dma_sync((softs)->comm_space_dma_handle, \ 1486 offsetof(struct aac_comm_space, \ 1487 adapter_fibs[(aif_idx)]), AAC_FIB_SIZE, \ 1488 (type)); } 1489 1490 mutex_enter(&softs->aifq_mutex); 1491 /* Copy AIF from adapter to the empty AIF slot */ 1492 fibp = &softs->aifq[softs->aifq_idx].d; 1493 AAC_SYNC_AIF(softs, aif_idx, DDI_DMA_SYNC_FORCPU); 1494 fibp0 = &softs->comm_space->adapter_fibs[aif_idx]; 1495 fib_size0 = ddi_get16(acc, &fibp0->Header.Size); 1496 fib_size = (fib_size0 > AAC_FIB_SIZE) ? 1497 AAC_FIB_SIZE : fib_size0; 1498 ddi_rep_get8(acc, (uint8_t *)fibp, (uint8_t *)fibp0, 1499 fib_size, DDI_DEV_AUTOINCR); 1500 1501 (void) aac_handle_aif(softs, fibp); 1502 mutex_exit(&softs->aifq_mutex); 1503 1504 /* Complete AIF back to adapter with good status */ 1505 fib_xfer_state = LE_32(fibp->Header.XferState); 1506 if (fib_xfer_state & AAC_FIBSTATE_FROMADAP) { 1507 ddi_put32(acc, &fibp0->Header.XferState, 1508 fib_xfer_state | AAC_FIBSTATE_DONEHOST); 1509 ddi_put32(acc, (void *)&fibp0->data[0], ST_OK); 1510 if (fib_size0 > AAC_FIB_SIZE) 1511 ddi_put16(acc, &fibp0->Header.Size, 1512 AAC_FIB_SIZE); 1513 AAC_SYNC_AIF(softs, aif_idx, 1514 DDI_DMA_SYNC_FORDEV); 1515 } 1516 1517 /* Put the AIF response on the response queue */ 1518 addr = ddi_get32(acc, 1519 &softs->comm_space->adapter_fibs[aif_idx]. \ 1520 Header.SenderFibAddress); 1521 size = (uint32_t)ddi_get16(acc, 1522 &softs->comm_space->adapter_fibs[aif_idx]. \ 1523 Header.Size); 1524 ddi_put32(acc, 1525 &softs->comm_space->adapter_fibs[aif_idx]. \ 1526 Header.ReceiverFibAddress, addr); 1527 if (aac_fib_enqueue(softs, AAC_ADAP_NORM_RESP_Q, 1528 addr, size) == AACERR) 1529 cmn_err(CE_NOTE, "!AIF ack failed"); 1530 } 1531 return (AAC_DB_COMMAND_READY); 1532 } else if (status & AAC_DB_PRINTF_READY) { 1533 /* ACK the intr */ 1534 AAC_STATUS_CLR(softs, AAC_DB_PRINTF_READY); 1535 (void) AAC_STATUS_GET(softs); 1536 (void) ddi_dma_sync(softs->comm_space_dma_handle, 1537 offsetof(struct aac_comm_space, adapter_print_buf), 1538 AAC_ADAPTER_PRINT_BUFSIZE, DDI_DMA_SYNC_FORCPU); 1539 if (aac_check_dma_handle(softs->comm_space_dma_handle) == 1540 DDI_SUCCESS) 1541 cmn_err(CE_NOTE, "MSG From Adapter: %s", 1542 softs->comm_space->adapter_print_buf); 1543 else 1544 ddi_fm_service_impact(softs->devinfo_p, 1545 DDI_SERVICE_UNAFFECTED); 1546 AAC_NOTIFY(softs, AAC_DB_PRINTF_READY); 1547 return (AAC_DB_PRINTF_READY); 1548 } else if (status & AAC_DB_COMMAND_NOT_FULL) { 1549 /* 1550 * Without these two condition statements, the OS could hang 1551 * after a while, especially if there are a lot of AIF's to 1552 * handle, for instance if a drive is pulled from an array 1553 * under heavy load. 1554 */ 1555 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL); 1556 return (AAC_DB_COMMAND_NOT_FULL); 1557 } else if (status & AAC_DB_RESPONSE_NOT_FULL) { 1558 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL); 1559 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_NOT_FULL); 1560 return (AAC_DB_RESPONSE_NOT_FULL); 1561 } else { 1562 return (0); 1563 } 1564 } 1565 1566 static uint_t 1567 aac_intr_old(caddr_t arg) 1568 { 1569 struct aac_softstate *softs = (void *)arg; 1570 int rval; 1571 1572 mutex_enter(&softs->io_lock); 1573 if (aac_process_intr_old(softs)) 1574 rval = DDI_INTR_CLAIMED; 1575 else 1576 rval = DDI_INTR_UNCLAIMED; 1577 mutex_exit(&softs->io_lock); 1578 1579 aac_drain_comp_q(softs); 1580 return (rval); 1581 } 1582 1583 /* 1584 * Query FIXED or MSI interrupts 1585 */ 1586 static int 1587 aac_query_intrs(struct aac_softstate *softs, int intr_type) 1588 { 1589 dev_info_t *dip = softs->devinfo_p; 1590 int avail, actual, intr_size, count; 1591 int i, flag, ret; 1592 1593 AACDB_PRINT(softs, CE_NOTE, 1594 "aac_query_intrs:interrupt type 0x%x", intr_type); 1595 1596 /* Get number of interrupts */ 1597 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 1598 if ((ret != DDI_SUCCESS) || (count == 0)) { 1599 AACDB_PRINT(softs, CE_WARN, 1600 "ddi_intr_get_nintrs() failed, ret %d count %d", 1601 ret, count); 1602 return (DDI_FAILURE); 1603 } 1604 1605 /* Get number of available interrupts */ 1606 ret = ddi_intr_get_navail(dip, intr_type, &avail); 1607 if ((ret != DDI_SUCCESS) || (avail == 0)) { 1608 AACDB_PRINT(softs, CE_WARN, 1609 "ddi_intr_get_navail() failed, ret %d avail %d", 1610 ret, avail); 1611 return (DDI_FAILURE); 1612 } 1613 1614 AACDB_PRINT(softs, CE_NOTE, 1615 "ddi_intr_get_nvail returned %d, navail() returned %d", 1616 count, avail); 1617 1618 /* Allocate an array of interrupt handles */ 1619 intr_size = count * sizeof (ddi_intr_handle_t); 1620 softs->htable = kmem_alloc(intr_size, KM_SLEEP); 1621 1622 if (intr_type == DDI_INTR_TYPE_MSI) { 1623 count = 1; /* only one vector needed by now */ 1624 flag = DDI_INTR_ALLOC_STRICT; 1625 } else { /* must be DDI_INTR_TYPE_FIXED */ 1626 flag = DDI_INTR_ALLOC_NORMAL; 1627 } 1628 1629 /* Call ddi_intr_alloc() */ 1630 ret = ddi_intr_alloc(dip, softs->htable, intr_type, 0, 1631 count, &actual, flag); 1632 1633 if ((ret != DDI_SUCCESS) || (actual == 0)) { 1634 AACDB_PRINT(softs, CE_WARN, 1635 "ddi_intr_alloc() failed, ret = %d", ret); 1636 actual = 0; 1637 goto error; 1638 } 1639 1640 if (actual < count) { 1641 AACDB_PRINT(softs, CE_NOTE, 1642 "Requested: %d, Received: %d", count, actual); 1643 goto error; 1644 } 1645 1646 softs->intr_cnt = actual; 1647 1648 /* Get priority for first msi, assume remaining are all the same */ 1649 if ((ret = ddi_intr_get_pri(softs->htable[0], 1650 &softs->intr_pri)) != DDI_SUCCESS) { 1651 AACDB_PRINT(softs, CE_WARN, 1652 "ddi_intr_get_pri() failed, ret = %d", ret); 1653 goto error; 1654 } 1655 1656 /* Test for high level mutex */ 1657 if (softs->intr_pri >= ddi_intr_get_hilevel_pri()) { 1658 AACDB_PRINT(softs, CE_WARN, 1659 "aac_query_intrs: Hi level interrupt not supported"); 1660 goto error; 1661 } 1662 1663 return (DDI_SUCCESS); 1664 1665 error: 1666 /* Free already allocated intr */ 1667 for (i = 0; i < actual; i++) 1668 (void) ddi_intr_free(softs->htable[i]); 1669 1670 kmem_free(softs->htable, intr_size); 1671 return (DDI_FAILURE); 1672 } 1673 1674 1675 /* 1676 * Register FIXED or MSI interrupts, and enable them 1677 */ 1678 static int 1679 aac_add_intrs(struct aac_softstate *softs) 1680 { 1681 int i, ret; 1682 int intr_size, actual; 1683 ddi_intr_handler_t *aac_intr; 1684 1685 actual = softs->intr_cnt; 1686 intr_size = actual * sizeof (ddi_intr_handle_t); 1687 aac_intr = (ddi_intr_handler_t *)((softs->flags & AAC_FLAGS_NEW_COMM) ? 1688 aac_intr_new : aac_intr_old); 1689 1690 /* Call ddi_intr_add_handler() */ 1691 for (i = 0; i < actual; i++) { 1692 if ((ret = ddi_intr_add_handler(softs->htable[i], 1693 aac_intr, (caddr_t)softs, NULL)) != DDI_SUCCESS) { 1694 cmn_err(CE_WARN, 1695 "ddi_intr_add_handler() failed ret = %d", ret); 1696 1697 /* Free already allocated intr */ 1698 for (i = 0; i < actual; i++) 1699 (void) ddi_intr_free(softs->htable[i]); 1700 1701 kmem_free(softs->htable, intr_size); 1702 return (DDI_FAILURE); 1703 } 1704 } 1705 1706 if ((ret = ddi_intr_get_cap(softs->htable[0], &softs->intr_cap)) 1707 != DDI_SUCCESS) { 1708 cmn_err(CE_WARN, "ddi_intr_get_cap() failed, ret = %d", ret); 1709 1710 /* Free already allocated intr */ 1711 for (i = 0; i < actual; i++) 1712 (void) ddi_intr_free(softs->htable[i]); 1713 1714 kmem_free(softs->htable, intr_size); 1715 return (DDI_FAILURE); 1716 } 1717 1718 /* Enable interrupts */ 1719 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) { 1720 /* for MSI block enable */ 1721 (void) ddi_intr_block_enable(softs->htable, softs->intr_cnt); 1722 } else { 1723 /* Call ddi_intr_enable() for legacy/MSI non block enable */ 1724 for (i = 0; i < softs->intr_cnt; i++) 1725 (void) ddi_intr_enable(softs->htable[i]); 1726 } 1727 1728 return (DDI_SUCCESS); 1729 } 1730 1731 /* 1732 * Unregister FIXED or MSI interrupts 1733 */ 1734 static void 1735 aac_remove_intrs(struct aac_softstate *softs) 1736 { 1737 int i; 1738 1739 /* Disable all interrupts */ 1740 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) { 1741 /* Call ddi_intr_block_disable() */ 1742 (void) ddi_intr_block_disable(softs->htable, softs->intr_cnt); 1743 } else { 1744 for (i = 0; i < softs->intr_cnt; i++) 1745 (void) ddi_intr_disable(softs->htable[i]); 1746 } 1747 1748 /* Call ddi_intr_remove_handler() */ 1749 for (i = 0; i < softs->intr_cnt; i++) { 1750 (void) ddi_intr_remove_handler(softs->htable[i]); 1751 (void) ddi_intr_free(softs->htable[i]); 1752 } 1753 1754 kmem_free(softs->htable, softs->intr_cnt * sizeof (ddi_intr_handle_t)); 1755 } 1756 1757 /* 1758 * Set pkt_reason and OR in pkt_statistics flag 1759 */ 1760 static void 1761 aac_set_pkt_reason(struct aac_softstate *softs, struct aac_cmd *acp, 1762 uchar_t reason, uint_t stat) 1763 { 1764 #ifndef __lock_lint 1765 _NOTE(ARGUNUSED(softs)) 1766 #endif 1767 if (acp->pkt->pkt_reason == CMD_CMPLT) 1768 acp->pkt->pkt_reason = reason; 1769 acp->pkt->pkt_statistics |= stat; 1770 } 1771 1772 /* 1773 * Handle a finished pkt of soft SCMD 1774 */ 1775 static void 1776 aac_soft_callback(struct aac_softstate *softs, struct aac_cmd *acp) 1777 { 1778 ASSERT(acp->pkt); 1779 1780 acp->flags |= AAC_CMD_CMPLT; 1781 1782 acp->pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | \ 1783 STATE_SENT_CMD | STATE_GOT_STATUS; 1784 if (acp->pkt->pkt_state & STATE_XFERRED_DATA) 1785 acp->pkt->pkt_resid = 0; 1786 1787 /* AAC_CMD_NO_INTR means no complete callback */ 1788 if (!(acp->flags & AAC_CMD_NO_INTR)) { 1789 mutex_enter(&softs->q_comp_mutex); 1790 aac_cmd_enqueue(&softs->q_comp, acp); 1791 mutex_exit(&softs->q_comp_mutex); 1792 ddi_trigger_softintr(softs->softint_id); 1793 } 1794 } 1795 1796 /* 1797 * Handlers for completed IOs, common to aac_intr_new() and aac_intr_old() 1798 */ 1799 1800 /* 1801 * Handle completed logical device IO command 1802 */ 1803 /*ARGSUSED*/ 1804 static void 1805 aac_ld_complete(struct aac_softstate *softs, struct aac_cmd *acp) 1806 { 1807 struct aac_slot *slotp = acp->slotp; 1808 struct aac_blockread_response *resp; 1809 uint32_t status; 1810 1811 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 1812 ASSERT(!(acp->flags & AAC_CMD_NO_CB)); 1813 1814 acp->pkt->pkt_state |= STATE_GOT_STATUS; 1815 1816 /* 1817 * block_read/write has a similar response header, use blockread 1818 * response for both. 1819 */ 1820 resp = (struct aac_blockread_response *)&slotp->fibp->data[0]; 1821 status = ddi_get32(slotp->fib_acc_handle, &resp->Status); 1822 if (status == ST_OK) { 1823 acp->pkt->pkt_resid = 0; 1824 acp->pkt->pkt_state |= STATE_XFERRED_DATA; 1825 } else { 1826 aac_set_arq_data_hwerr(acp); 1827 } 1828 } 1829 1830 /* 1831 * Handle completed phys. device IO command 1832 */ 1833 static void 1834 aac_pd_complete(struct aac_softstate *softs, struct aac_cmd *acp) 1835 { 1836 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 1837 struct aac_fib *fibp = acp->slotp->fibp; 1838 struct scsi_pkt *pkt = acp->pkt; 1839 struct aac_srb_reply *resp; 1840 uint32_t resp_status; 1841 1842 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 1843 ASSERT(!(acp->flags & AAC_CMD_NO_CB)); 1844 1845 resp = (struct aac_srb_reply *)&fibp->data[0]; 1846 resp_status = ddi_get32(acc, &resp->status); 1847 1848 /* First check FIB status */ 1849 if (resp_status == ST_OK) { 1850 uint32_t scsi_status; 1851 uint32_t srb_status; 1852 uint32_t data_xfer_length; 1853 1854 scsi_status = ddi_get32(acc, &resp->scsi_status); 1855 srb_status = ddi_get32(acc, &resp->srb_status); 1856 data_xfer_length = ddi_get32(acc, &resp->data_xfer_length); 1857 1858 *pkt->pkt_scbp = (uint8_t)scsi_status; 1859 pkt->pkt_state |= STATE_GOT_STATUS; 1860 if (scsi_status == STATUS_GOOD) { 1861 uchar_t cmd = ((union scsi_cdb *)(void *) 1862 (pkt->pkt_cdbp))->scc_cmd; 1863 1864 /* Next check SRB status */ 1865 switch (srb_status & 0x3f) { 1866 case SRB_STATUS_DATA_OVERRUN: 1867 AACDB_PRINT(softs, CE_NOTE, "DATA_OVERRUN: " \ 1868 "scmd=%d, xfer=%d, buflen=%d", 1869 (uint32_t)cmd, data_xfer_length, 1870 acp->bcount); 1871 1872 switch (cmd) { 1873 case SCMD_READ: 1874 case SCMD_WRITE: 1875 case SCMD_READ_G1: 1876 case SCMD_WRITE_G1: 1877 case SCMD_READ_G4: 1878 case SCMD_WRITE_G4: 1879 case SCMD_READ_G5: 1880 case SCMD_WRITE_G5: 1881 aac_set_pkt_reason(softs, acp, 1882 CMD_DATA_OVR, 0); 1883 break; 1884 } 1885 /*FALLTHRU*/ 1886 case SRB_STATUS_ERROR_RECOVERY: 1887 case SRB_STATUS_PENDING: 1888 case SRB_STATUS_SUCCESS: 1889 /* 1890 * pkt_resid should only be calculated if the 1891 * status is ERROR_RECOVERY/PENDING/SUCCESS/ 1892 * OVERRUN/UNDERRUN 1893 */ 1894 if (data_xfer_length) { 1895 pkt->pkt_state |= STATE_XFERRED_DATA; 1896 pkt->pkt_resid = acp->bcount - \ 1897 data_xfer_length; 1898 ASSERT(pkt->pkt_resid >= 0); 1899 } 1900 break; 1901 case SRB_STATUS_ABORTED: 1902 AACDB_PRINT(softs, CE_NOTE, 1903 "SRB_STATUS_ABORTED, xfer=%d, resid=%d", 1904 data_xfer_length, pkt->pkt_resid); 1905 aac_set_pkt_reason(softs, acp, CMD_ABORTED, 1906 STAT_ABORTED); 1907 break; 1908 case SRB_STATUS_ABORT_FAILED: 1909 AACDB_PRINT(softs, CE_NOTE, 1910 "SRB_STATUS_ABORT_FAILED, xfer=%d, " \ 1911 "resid=%d", data_xfer_length, 1912 pkt->pkt_resid); 1913 aac_set_pkt_reason(softs, acp, CMD_ABORT_FAIL, 1914 0); 1915 break; 1916 case SRB_STATUS_PARITY_ERROR: 1917 AACDB_PRINT(softs, CE_NOTE, 1918 "SRB_STATUS_PARITY_ERROR, xfer=%d, " \ 1919 "resid=%d", data_xfer_length, 1920 pkt->pkt_resid); 1921 aac_set_pkt_reason(softs, acp, CMD_PER_FAIL, 0); 1922 break; 1923 case SRB_STATUS_NO_DEVICE: 1924 case SRB_STATUS_INVALID_PATH_ID: 1925 case SRB_STATUS_INVALID_TARGET_ID: 1926 case SRB_STATUS_INVALID_LUN: 1927 case SRB_STATUS_SELECTION_TIMEOUT: 1928 #ifdef DEBUG 1929 if (AAC_DEV_IS_VALID(acp->dvp)) { 1930 AACDB_PRINT(softs, CE_NOTE, 1931 "SRB_STATUS_NO_DEVICE(%d), " \ 1932 "xfer=%d, resid=%d ", 1933 srb_status & 0x3f, 1934 data_xfer_length, pkt->pkt_resid); 1935 } 1936 #endif 1937 aac_set_pkt_reason(softs, acp, CMD_DEV_GONE, 0); 1938 break; 1939 case SRB_STATUS_COMMAND_TIMEOUT: 1940 case SRB_STATUS_TIMEOUT: 1941 AACDB_PRINT(softs, CE_NOTE, 1942 "SRB_STATUS_COMMAND_TIMEOUT, xfer=%d, " \ 1943 "resid=%d", data_xfer_length, 1944 pkt->pkt_resid); 1945 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT, 1946 STAT_TIMEOUT); 1947 break; 1948 case SRB_STATUS_BUS_RESET: 1949 AACDB_PRINT(softs, CE_NOTE, 1950 "SRB_STATUS_BUS_RESET, xfer=%d, " \ 1951 "resid=%d", data_xfer_length, 1952 pkt->pkt_resid); 1953 aac_set_pkt_reason(softs, acp, CMD_RESET, 1954 STAT_BUS_RESET); 1955 break; 1956 default: 1957 AACDB_PRINT(softs, CE_NOTE, "srb_status=%d, " \ 1958 "xfer=%d, resid=%d", srb_status & 0x3f, 1959 data_xfer_length, pkt->pkt_resid); 1960 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0); 1961 break; 1962 } 1963 } else if (scsi_status == STATUS_CHECK) { 1964 /* CHECK CONDITION */ 1965 struct scsi_arq_status *arqstat = 1966 (void *)(pkt->pkt_scbp); 1967 uint32_t sense_data_size; 1968 1969 pkt->pkt_state |= STATE_ARQ_DONE; 1970 1971 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD; 1972 arqstat->sts_rqpkt_reason = CMD_CMPLT; 1973 arqstat->sts_rqpkt_resid = 0; 1974 arqstat->sts_rqpkt_state = 1975 STATE_GOT_BUS | 1976 STATE_GOT_TARGET | 1977 STATE_SENT_CMD | 1978 STATE_XFERRED_DATA; 1979 arqstat->sts_rqpkt_statistics = 0; 1980 1981 sense_data_size = ddi_get32(acc, 1982 &resp->sense_data_size); 1983 ASSERT(sense_data_size <= AAC_SENSE_BUFFERSIZE); 1984 AACDB_PRINT(softs, CE_NOTE, 1985 "CHECK CONDITION: sense len=%d, xfer len=%d", 1986 sense_data_size, data_xfer_length); 1987 1988 if (sense_data_size > SENSE_LENGTH) 1989 sense_data_size = SENSE_LENGTH; 1990 ddi_rep_get8(acc, (uint8_t *)&arqstat->sts_sensedata, 1991 (uint8_t *)resp->sense_data, sense_data_size, 1992 DDI_DEV_AUTOINCR); 1993 } else { 1994 AACDB_PRINT(softs, CE_WARN, "invaild scsi status: " \ 1995 "scsi_status=%d, srb_status=%d", 1996 scsi_status, srb_status); 1997 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0); 1998 } 1999 } else { 2000 AACDB_PRINT(softs, CE_NOTE, "SRB failed: fib status %d", 2001 resp_status); 2002 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0); 2003 } 2004 } 2005 2006 /* 2007 * Handle completed IOCTL command 2008 */ 2009 /*ARGSUSED*/ 2010 void 2011 aac_ioctl_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2012 { 2013 struct aac_slot *slotp = acp->slotp; 2014 2015 /* 2016 * NOTE: Both aac_ioctl_send_fib() and aac_send_raw_srb() 2017 * may wait on softs->event, so use cv_broadcast() instead 2018 * of cv_signal(). 2019 */ 2020 ASSERT(acp->flags & AAC_CMD_SYNC); 2021 ASSERT(acp->flags & AAC_CMD_NO_CB); 2022 2023 /* Get the size of the response FIB from its FIB.Header.Size field */ 2024 acp->fib_size = ddi_get16(slotp->fib_acc_handle, 2025 &slotp->fibp->Header.Size); 2026 2027 ASSERT(acp->fib_size <= softs->aac_max_fib_size); 2028 ddi_rep_get8(slotp->fib_acc_handle, (uint8_t *)acp->fibp, 2029 (uint8_t *)slotp->fibp, acp->fib_size, DDI_DEV_AUTOINCR); 2030 } 2031 2032 /* 2033 * Handle completed Flush command 2034 */ 2035 /*ARGSUSED*/ 2036 static void 2037 aac_synccache_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2038 { 2039 struct aac_slot *slotp = acp->slotp; 2040 ddi_acc_handle_t acc = slotp->fib_acc_handle; 2041 struct aac_synchronize_reply *resp; 2042 uint32_t status; 2043 2044 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 2045 2046 acp->pkt->pkt_state |= STATE_GOT_STATUS; 2047 2048 resp = (struct aac_synchronize_reply *)&slotp->fibp->data[0]; 2049 status = ddi_get32(acc, &resp->Status); 2050 if (status != CT_OK) 2051 aac_set_arq_data_hwerr(acp); 2052 } 2053 2054 static void 2055 aac_startstop_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2056 { 2057 struct aac_slot *slotp = acp->slotp; 2058 ddi_acc_handle_t acc = slotp->fib_acc_handle; 2059 struct aac_Container_resp *resp; 2060 uint32_t status; 2061 2062 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 2063 2064 acp->pkt->pkt_state |= STATE_GOT_STATUS; 2065 2066 resp = (struct aac_Container_resp *)&slotp->fibp->data[0]; 2067 status = ddi_get32(acc, &resp->Status); 2068 if (status != 0) { 2069 AACDB_PRINT(softs, CE_WARN, "Cannot start/stop a unit"); 2070 aac_set_arq_data_hwerr(acp); 2071 } 2072 } 2073 2074 /* 2075 * Access PCI space to see if the driver can support the card 2076 */ 2077 static int 2078 aac_check_card_type(struct aac_softstate *softs) 2079 { 2080 ddi_acc_handle_t pci_config_handle; 2081 int card_index; 2082 uint32_t pci_cmd; 2083 2084 /* Map pci configuration space */ 2085 if ((pci_config_setup(softs->devinfo_p, &pci_config_handle)) != 2086 DDI_SUCCESS) { 2087 AACDB_PRINT(softs, CE_WARN, "Cannot setup pci config space"); 2088 return (AACERR); 2089 } 2090 2091 softs->vendid = pci_config_get16(pci_config_handle, PCI_CONF_VENID); 2092 softs->devid = pci_config_get16(pci_config_handle, PCI_CONF_DEVID); 2093 softs->subvendid = pci_config_get16(pci_config_handle, 2094 PCI_CONF_SUBVENID); 2095 softs->subsysid = pci_config_get16(pci_config_handle, 2096 PCI_CONF_SUBSYSID); 2097 2098 card_index = 0; 2099 while (!CARD_IS_UNKNOWN(card_index)) { 2100 if ((aac_cards[card_index].vendor == softs->vendid) && 2101 (aac_cards[card_index].device == softs->devid) && 2102 (aac_cards[card_index].subvendor == softs->subvendid) && 2103 (aac_cards[card_index].subsys == softs->subsysid)) { 2104 break; 2105 } 2106 card_index++; 2107 } 2108 2109 softs->card = card_index; 2110 softs->hwif = aac_cards[card_index].hwif; 2111 2112 /* 2113 * Unknown aac card 2114 * do a generic match based on the VendorID and DeviceID to 2115 * support the new cards in the aac family 2116 */ 2117 if (CARD_IS_UNKNOWN(card_index)) { 2118 if (softs->vendid != 0x9005) { 2119 AACDB_PRINT(softs, CE_WARN, 2120 "Unknown vendor 0x%x", softs->vendid); 2121 goto error; 2122 } 2123 switch (softs->devid) { 2124 case 0x285: 2125 softs->hwif = AAC_HWIF_I960RX; 2126 break; 2127 case 0x286: 2128 softs->hwif = AAC_HWIF_RKT; 2129 break; 2130 default: 2131 AACDB_PRINT(softs, CE_WARN, 2132 "Unknown device \"pci9005,%x\"", softs->devid); 2133 goto error; 2134 } 2135 } 2136 2137 /* Set hardware dependent interface */ 2138 switch (softs->hwif) { 2139 case AAC_HWIF_I960RX: 2140 softs->aac_if = aac_rx_interface; 2141 softs->map_size_min = AAC_MAP_SIZE_MIN_RX; 2142 break; 2143 case AAC_HWIF_RKT: 2144 softs->aac_if = aac_rkt_interface; 2145 softs->map_size_min = AAC_MAP_SIZE_MIN_RKT; 2146 break; 2147 default: 2148 AACDB_PRINT(softs, CE_WARN, 2149 "Unknown hardware interface %d", softs->hwif); 2150 goto error; 2151 } 2152 2153 /* Set card names */ 2154 (void *)strncpy(softs->vendor_name, aac_cards[card_index].vid, 2155 AAC_VENDOR_LEN); 2156 (void *)strncpy(softs->product_name, aac_cards[card_index].desc, 2157 AAC_PRODUCT_LEN); 2158 2159 /* Set up quirks */ 2160 softs->flags = aac_cards[card_index].quirks; 2161 2162 /* Force the busmaster enable bit on */ 2163 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM); 2164 if ((pci_cmd & PCI_COMM_ME) == 0) { 2165 pci_cmd |= PCI_COMM_ME; 2166 pci_config_put16(pci_config_handle, PCI_CONF_COMM, pci_cmd); 2167 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM); 2168 if ((pci_cmd & PCI_COMM_ME) == 0) { 2169 cmn_err(CE_CONT, "?Cannot enable busmaster bit"); 2170 goto error; 2171 } 2172 } 2173 2174 /* Set memory base to map */ 2175 softs->pci_mem_base_paddr = 0xfffffff0UL & \ 2176 pci_config_get32(pci_config_handle, PCI_CONF_BASE0); 2177 2178 pci_config_teardown(&pci_config_handle); 2179 2180 return (AACOK); /* card type detected */ 2181 error: 2182 pci_config_teardown(&pci_config_handle); 2183 return (AACERR); /* no matched card found */ 2184 } 2185 2186 /* 2187 * Check the firmware to determine the features to support and the FIB 2188 * parameters to use. 2189 */ 2190 static int 2191 aac_check_firmware(struct aac_softstate *softs) 2192 { 2193 uint32_t options; 2194 uint32_t atu_size; 2195 ddi_acc_handle_t pci_handle; 2196 uint8_t *data; 2197 uint32_t max_fibs; 2198 uint32_t max_fib_size; 2199 uint32_t sg_tablesize; 2200 uint32_t max_sectors; 2201 uint32_t status; 2202 2203 /* Get supported options */ 2204 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETINFO, 0, 0, 0, 0, 2205 &status)) != AACOK) { 2206 if (status != SRB_STATUS_INVALID_REQUEST) { 2207 cmn_err(CE_CONT, 2208 "?Fatal error: request adapter info error"); 2209 return (AACERR); 2210 } 2211 options = 0; 2212 atu_size = 0; 2213 } else { 2214 options = AAC_MAILBOX_GET(softs, 1); 2215 atu_size = AAC_MAILBOX_GET(softs, 2); 2216 } 2217 2218 if (softs->state & AAC_STATE_RESET) { 2219 if ((softs->support_opt == options) && 2220 (softs->atu_size == atu_size)) 2221 return (AACOK); 2222 2223 cmn_err(CE_WARN, 2224 "?Fatal error: firmware changed, system needs reboot"); 2225 return (AACERR); 2226 } 2227 2228 /* 2229 * The following critical settings are initialized only once during 2230 * driver attachment. 2231 */ 2232 softs->support_opt = options; 2233 softs->atu_size = atu_size; 2234 2235 /* Process supported options */ 2236 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && 2237 (softs->flags & AAC_FLAGS_NO4GB) == 0) { 2238 AACDB_PRINT(softs, CE_NOTE, "!Enable FIB map 4GB window"); 2239 softs->flags |= AAC_FLAGS_4GB_WINDOW; 2240 } else { 2241 /* 2242 * Quirk AAC_FLAGS_NO4GB is for FIB address and thus comm space 2243 * only. IO is handled by the DMA engine which does not suffer 2244 * from the ATU window programming workarounds necessary for 2245 * CPU copy operations. 2246 */ 2247 softs->addr_dma_attr.dma_attr_addr_lo = 0x2000ull; 2248 softs->addr_dma_attr.dma_attr_addr_hi = 0x7fffffffull; 2249 } 2250 2251 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0) { 2252 AACDB_PRINT(softs, CE_NOTE, "!Enable SG map 64-bit address"); 2253 softs->buf_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull; 2254 softs->buf_dma_attr.dma_attr_seg = 0xffffffffffffffffull; 2255 softs->flags |= AAC_FLAGS_SG_64BIT; 2256 } 2257 2258 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) { 2259 softs->flags |= AAC_FLAGS_ARRAY_64BIT; 2260 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array size"); 2261 } 2262 2263 if (options & AAC_SUPPORTED_NONDASD) { 2264 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p, 0, 2265 "nondasd-enable", (char **)&data) == DDI_SUCCESS)) { 2266 if (strcmp((char *)data, "yes") == 0) { 2267 AACDB_PRINT(softs, CE_NOTE, 2268 "!Enable Non-DASD access"); 2269 softs->flags |= AAC_FLAGS_NONDASD; 2270 } 2271 ddi_prop_free(data); 2272 } 2273 } 2274 2275 /* Read preferred settings */ 2276 max_fib_size = 0; 2277 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETCOMMPREF, 2278 0, 0, 0, 0, NULL)) == AACOK) { 2279 options = AAC_MAILBOX_GET(softs, 1); 2280 max_fib_size = (options & 0xffff); 2281 max_sectors = (options >> 16) << 1; 2282 options = AAC_MAILBOX_GET(softs, 2); 2283 sg_tablesize = (options >> 16); 2284 options = AAC_MAILBOX_GET(softs, 3); 2285 max_fibs = (options & 0xffff); 2286 } 2287 2288 /* Enable new comm. and rawio at the same time */ 2289 if ((softs->support_opt & AAC_SUPPORTED_NEW_COMM) && 2290 (max_fib_size != 0)) { 2291 /* read out and save PCI MBR */ 2292 if ((atu_size > softs->map_size) && 2293 (ddi_regs_map_setup(softs->devinfo_p, 1, 2294 (caddr_t *)&data, 0, atu_size, &softs->reg_attr, 2295 &pci_handle) == DDI_SUCCESS)) { 2296 ddi_regs_map_free(&softs->pci_mem_handle); 2297 softs->pci_mem_handle = pci_handle; 2298 softs->pci_mem_base_vaddr = data; 2299 softs->map_size = atu_size; 2300 } 2301 if (atu_size == softs->map_size) { 2302 softs->flags |= AAC_FLAGS_NEW_COMM; 2303 AACDB_PRINT(softs, CE_NOTE, 2304 "!Enable New Comm. interface"); 2305 } 2306 } 2307 2308 /* Set FIB parameters */ 2309 if (softs->flags & AAC_FLAGS_NEW_COMM) { 2310 softs->aac_max_fibs = max_fibs; 2311 softs->aac_max_fib_size = max_fib_size; 2312 softs->aac_max_sectors = max_sectors; 2313 softs->aac_sg_tablesize = sg_tablesize; 2314 2315 softs->flags |= AAC_FLAGS_RAW_IO; 2316 AACDB_PRINT(softs, CE_NOTE, "!Enable RawIO"); 2317 } else { 2318 softs->aac_max_fibs = 2319 (softs->flags & AAC_FLAGS_256FIBS) ? 256 : 512; 2320 softs->aac_max_fib_size = AAC_FIB_SIZE; 2321 softs->aac_max_sectors = 128; /* 64K */ 2322 if (softs->flags & AAC_FLAGS_17SG) 2323 softs->aac_sg_tablesize = 17; 2324 else if (softs->flags & AAC_FLAGS_34SG) 2325 softs->aac_sg_tablesize = 34; 2326 else if (softs->flags & AAC_FLAGS_SG_64BIT) 2327 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE - 2328 sizeof (struct aac_blockwrite64) + 2329 sizeof (struct aac_sg_entry64)) / 2330 sizeof (struct aac_sg_entry64); 2331 else 2332 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE - 2333 sizeof (struct aac_blockwrite) + 2334 sizeof (struct aac_sg_entry)) / 2335 sizeof (struct aac_sg_entry); 2336 } 2337 2338 if ((softs->flags & AAC_FLAGS_RAW_IO) && 2339 (softs->flags & AAC_FLAGS_ARRAY_64BIT)) { 2340 softs->flags |= AAC_FLAGS_LBA_64BIT; 2341 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array"); 2342 } 2343 softs->buf_dma_attr.dma_attr_sgllen = softs->aac_sg_tablesize; 2344 softs->buf_dma_attr.dma_attr_maxxfer = softs->aac_max_sectors << 9; 2345 /* 2346 * 64K maximum segment size in scatter gather list is controlled by 2347 * the NEW_COMM bit in the adapter information. If not set, the card 2348 * can only accept a maximum of 64K. It is not recommended to permit 2349 * more than 128KB of total transfer size to the adapters because 2350 * performance is negatively impacted. 2351 * 2352 * For new comm, segment size equals max xfer size. For old comm, 2353 * we use 64K for both. 2354 */ 2355 softs->buf_dma_attr.dma_attr_count_max = 2356 softs->buf_dma_attr.dma_attr_maxxfer - 1; 2357 2358 /* Setup FIB operations */ 2359 if (softs->flags & AAC_FLAGS_RAW_IO) 2360 softs->aac_cmd_fib = aac_cmd_fib_rawio; 2361 else if (softs->flags & AAC_FLAGS_SG_64BIT) 2362 softs->aac_cmd_fib = aac_cmd_fib_brw64; 2363 else 2364 softs->aac_cmd_fib = aac_cmd_fib_brw; 2365 softs->aac_cmd_fib_scsi = (softs->flags & AAC_FLAGS_SG_64BIT) ? \ 2366 aac_cmd_fib_scsi64 : aac_cmd_fib_scsi32; 2367 2368 /* 64-bit LBA needs descriptor format sense data */ 2369 softs->slen = sizeof (struct scsi_arq_status); 2370 if ((softs->flags & AAC_FLAGS_LBA_64BIT) && 2371 softs->slen < AAC_ARQ64_LENGTH) 2372 softs->slen = AAC_ARQ64_LENGTH; 2373 2374 AACDB_PRINT(softs, CE_NOTE, 2375 "!max_fibs %d max_fibsize 0x%x max_sectors %d max_sg %d", 2376 softs->aac_max_fibs, softs->aac_max_fib_size, 2377 softs->aac_max_sectors, softs->aac_sg_tablesize); 2378 2379 return (AACOK); 2380 } 2381 2382 static void 2383 aac_fsa_rev(struct aac_softstate *softs, struct FsaRev *fsarev0, 2384 struct FsaRev *fsarev1) 2385 { 2386 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 2387 2388 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.dash); 2389 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.type); 2390 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.minor); 2391 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.major); 2392 AAC_GET_FIELD32(acc, fsarev1, fsarev0, buildNumber); 2393 } 2394 2395 /* 2396 * The following function comes from Adaptec: 2397 * 2398 * Query adapter information and supplement adapter information 2399 */ 2400 static int 2401 aac_get_adapter_info(struct aac_softstate *softs, 2402 struct aac_adapter_info *ainfr, struct aac_supplement_adapter_info *sinfr) 2403 { 2404 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 2405 struct aac_fib *fibp = softs->sync_slot.fibp; 2406 struct aac_adapter_info *ainfp; 2407 struct aac_supplement_adapter_info *sinfp; 2408 2409 ddi_put8(acc, &fibp->data[0], 0); 2410 if (aac_sync_fib(softs, RequestAdapterInfo, 2411 sizeof (struct aac_fib_header)) != AACOK) { 2412 AACDB_PRINT(softs, CE_WARN, "RequestAdapterInfo failed"); 2413 return (AACERR); 2414 } 2415 ainfp = (struct aac_adapter_info *)fibp->data; 2416 if (ainfr) { 2417 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions); 2418 AAC_GET_FIELD32(acc, ainfr, ainfp, PlatformBase); 2419 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuArchitecture); 2420 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuVariant); 2421 AAC_GET_FIELD32(acc, ainfr, ainfp, ClockSpeed); 2422 AAC_GET_FIELD32(acc, ainfr, ainfp, ExecutionMem); 2423 AAC_GET_FIELD32(acc, ainfr, ainfp, BufferMem); 2424 AAC_GET_FIELD32(acc, ainfr, ainfp, TotalMem); 2425 aac_fsa_rev(softs, &ainfp->KernelRevision, 2426 &ainfr->KernelRevision); 2427 aac_fsa_rev(softs, &ainfp->MonitorRevision, 2428 &ainfr->MonitorRevision); 2429 aac_fsa_rev(softs, &ainfp->HardwareRevision, 2430 &ainfr->HardwareRevision); 2431 aac_fsa_rev(softs, &ainfp->BIOSRevision, 2432 &ainfr->BIOSRevision); 2433 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusteringEnabled); 2434 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusterChannelMask); 2435 AAC_GET_FIELD64(acc, ainfr, ainfp, SerialNumber); 2436 AAC_GET_FIELD32(acc, ainfr, ainfp, batteryPlatform); 2437 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions); 2438 AAC_GET_FIELD32(acc, ainfr, ainfp, OemVariant); 2439 } 2440 if (sinfr) { 2441 if (!(softs->support_opt & 2442 AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO)) { 2443 AACDB_PRINT(softs, CE_WARN, 2444 "SupplementAdapterInfo not supported"); 2445 return (AACERR); 2446 } 2447 ddi_put8(acc, &fibp->data[0], 0); 2448 if (aac_sync_fib(softs, RequestSupplementAdapterInfo, 2449 sizeof (struct aac_fib_header)) != AACOK) { 2450 AACDB_PRINT(softs, CE_WARN, 2451 "RequestSupplementAdapterInfo failed"); 2452 return (AACERR); 2453 } 2454 sinfp = (struct aac_supplement_adapter_info *)fibp->data; 2455 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, AdapterTypeText[0], 17+1); 2456 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, Pad[0], 2); 2457 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashMemoryByteSize); 2458 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashImageId); 2459 AAC_GET_FIELD32(acc, sinfr, sinfp, MaxNumberPorts); 2460 AAC_GET_FIELD32(acc, sinfr, sinfp, Version); 2461 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits); 2462 AAC_GET_FIELD8(acc, sinfr, sinfp, SlotNumber); 2463 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, ReservedPad0[0], 3); 2464 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, BuildDate[0], 12); 2465 AAC_GET_FIELD32(acc, sinfr, sinfp, CurrentNumberPorts); 2466 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, VpdInfo, 2467 sizeof (struct vpd_info)); 2468 aac_fsa_rev(softs, &sinfp->FlashFirmwareRevision, 2469 &sinfr->FlashFirmwareRevision); 2470 AAC_GET_FIELD32(acc, sinfr, sinfp, RaidTypeMorphOptions); 2471 aac_fsa_rev(softs, &sinfp->FlashFirmwareBootRevision, 2472 &sinfr->FlashFirmwareBootRevision); 2473 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgPcbaSerialNo, 2474 MFG_PCBA_SERIAL_NUMBER_WIDTH); 2475 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgWWNName[0], 2476 MFG_WWN_WIDTH); 2477 AAC_GET_FIELD32(acc, sinfr, sinfp, SupportedOptions2); 2478 AAC_GET_FIELD32(acc, sinfr, sinfp, ExpansionFlag); 2479 if (sinfr->ExpansionFlag == 1) { 2480 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits3); 2481 AAC_GET_FIELD32(acc, sinfr, sinfp, 2482 SupportedPerformanceMode); 2483 AAC_REP_GET_FIELD32(acc, sinfr, sinfp, 2484 ReservedGrowth[0], 80); 2485 } 2486 } 2487 return (AACOK); 2488 } 2489 2490 static int 2491 aac_get_bus_info(struct aac_softstate *softs, uint32_t *bus_max, 2492 uint32_t *tgt_max) 2493 { 2494 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 2495 struct aac_fib *fibp = softs->sync_slot.fibp; 2496 struct aac_ctcfg *c_cmd; 2497 struct aac_ctcfg_resp *c_resp; 2498 uint32_t scsi_method_id; 2499 struct aac_bus_info *cmd; 2500 struct aac_bus_info_response *resp; 2501 int rval; 2502 2503 /* Detect MethodId */ 2504 c_cmd = (struct aac_ctcfg *)&fibp->data[0]; 2505 ddi_put32(acc, &c_cmd->Command, VM_ContainerConfig); 2506 ddi_put32(acc, &c_cmd->cmd, CT_GET_SCSI_METHOD); 2507 ddi_put32(acc, &c_cmd->param, 0); 2508 rval = aac_sync_fib(softs, ContainerCommand, 2509 AAC_FIB_SIZEOF(struct aac_ctcfg)); 2510 c_resp = (struct aac_ctcfg_resp *)&fibp->data[0]; 2511 if (rval != AACOK || ddi_get32(acc, &c_resp->Status) != 0) { 2512 AACDB_PRINT(softs, CE_WARN, 2513 "VM_ContainerConfig command fail"); 2514 return (AACERR); 2515 } 2516 scsi_method_id = ddi_get32(acc, &c_resp->param); 2517 2518 /* Detect phys. bus count and max. target id first */ 2519 cmd = (struct aac_bus_info *)&fibp->data[0]; 2520 ddi_put32(acc, &cmd->Command, VM_Ioctl); 2521 ddi_put32(acc, &cmd->ObjType, FT_DRIVE); /* physical drive */ 2522 ddi_put32(acc, &cmd->MethodId, scsi_method_id); 2523 ddi_put32(acc, &cmd->ObjectId, 0); 2524 ddi_put32(acc, &cmd->CtlCmd, GetBusInfo); 2525 /* 2526 * For VM_Ioctl, the firmware uses the Header.Size filled from the 2527 * driver as the size to be returned. Therefore the driver has to use 2528 * sizeof (struct aac_bus_info_response) because it is greater than 2529 * sizeof (struct aac_bus_info). 2530 */ 2531 rval = aac_sync_fib(softs, ContainerCommand, 2532 AAC_FIB_SIZEOF(struct aac_bus_info_response)); 2533 resp = (struct aac_bus_info_response *)cmd; 2534 2535 /* Scan all coordinates with INQUIRY */ 2536 if ((rval != AACOK) || (ddi_get32(acc, &resp->Status) != 0)) { 2537 AACDB_PRINT(softs, CE_WARN, "GetBusInfo command fail"); 2538 return (AACERR); 2539 } 2540 *bus_max = ddi_get32(acc, &resp->BusCount); 2541 *tgt_max = ddi_get32(acc, &resp->TargetsPerBus); 2542 return (AACOK); 2543 } 2544 2545 /* 2546 * The following function comes from Adaptec: 2547 * 2548 * Routine to be called during initialization of communications with 2549 * the adapter to handle possible adapter configuration issues. When 2550 * the adapter first boots up, it examines attached drives, etc, and 2551 * potentially comes up with a new or revised configuration (relative to 2552 * what's stored in it's NVRAM). Additionally it may discover problems 2553 * that make the current physical configuration unworkable (currently 2554 * applicable only to cluster configuration issues). 2555 * 2556 * If there are no configuration issues or the issues are considered 2557 * trival by the adapter, it will set it's configuration status to 2558 * "FSACT_CONTINUE" and execute the "commit confiuguration" action 2559 * automatically on it's own. 2560 * 2561 * However, if there are non-trivial issues, the adapter will set it's 2562 * internal configuration status to "FSACT_PAUSE" or "FASCT_ABORT" 2563 * and wait for some agent on the host to issue the "\ContainerCommand 2564 * \VM_ContainerConfig\CT_COMMIT_CONFIG" FIB command to cause the 2565 * adapter to commit the new/updated configuration and enable 2566 * un-inhibited operation. The host agent should first issue the 2567 * "\ContainerCommand\VM_ContainerConfig\CT_GET_CONFIG_STATUS" FIB 2568 * command to obtain information about config issues detected by 2569 * the adapter. 2570 * 2571 * Normally the adapter's PC BIOS will execute on the host following 2572 * adapter poweron and reset and will be responsible for querring the 2573 * adapter with CT_GET_CONFIG_STATUS and issuing the CT_COMMIT_CONFIG 2574 * command if appropriate. 2575 * 2576 * However, with the introduction of IOP reset support, the adapter may 2577 * boot up without the benefit of the adapter's PC BIOS host agent. 2578 * This routine is intended to take care of these issues in situations 2579 * where BIOS doesn't execute following adapter poweron or reset. The 2580 * CT_COMMIT_CONFIG command is a no-op if it's already been issued, so 2581 * there is no harm in doing this when it's already been done. 2582 */ 2583 static int 2584 aac_handle_adapter_config_issues(struct aac_softstate *softs) 2585 { 2586 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 2587 struct aac_fib *fibp = softs->sync_slot.fibp; 2588 struct aac_Container *cmd; 2589 struct aac_Container_resp *resp; 2590 struct aac_cf_status_header *cfg_sts_hdr; 2591 uint32_t resp_status; 2592 uint32_t ct_status; 2593 uint32_t cfg_stat_action; 2594 int rval; 2595 2596 /* Get adapter config status */ 2597 cmd = (struct aac_Container *)&fibp->data[0]; 2598 2599 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 2600 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 2601 ddi_put32(acc, &cmd->CTCommand.command, CT_GET_CONFIG_STATUS); 2602 ddi_put32(acc, &cmd->CTCommand.param[CNT_SIZE], 2603 sizeof (struct aac_cf_status_header)); 2604 rval = aac_sync_fib(softs, ContainerCommand, 2605 AAC_FIB_SIZEOF(struct aac_Container)); 2606 resp = (struct aac_Container_resp *)cmd; 2607 cfg_sts_hdr = (struct aac_cf_status_header *)resp->CTResponse.data; 2608 2609 resp_status = ddi_get32(acc, &resp->Status); 2610 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]); 2611 if ((rval == AACOK) && (resp_status == 0) && (ct_status == CT_OK)) { 2612 cfg_stat_action = ddi_get32(acc, &cfg_sts_hdr->action); 2613 2614 /* Commit configuration if it's reasonable to do so. */ 2615 if (cfg_stat_action <= CFACT_PAUSE) { 2616 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 2617 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 2618 ddi_put32(acc, &cmd->CTCommand.command, 2619 CT_COMMIT_CONFIG); 2620 rval = aac_sync_fib(softs, ContainerCommand, 2621 AAC_FIB_SIZEOF(struct aac_Container)); 2622 2623 resp_status = ddi_get32(acc, &resp->Status); 2624 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]); 2625 if ((rval == AACOK) && (resp_status == 0) && 2626 (ct_status == CT_OK)) 2627 /* Successful completion */ 2628 rval = AACMPE_OK; 2629 else 2630 /* Auto-commit aborted due to error(s). */ 2631 rval = AACMPE_COMMIT_CONFIG; 2632 } else { 2633 /* 2634 * Auto-commit aborted due to adapter indicating 2635 * configuration issue(s) too dangerous to auto-commit. 2636 */ 2637 rval = AACMPE_CONFIG_STATUS; 2638 } 2639 } else { 2640 cmn_err(CE_WARN, "!Configuration issue, auto-commit aborted"); 2641 rval = AACMPE_CONFIG_STATUS; 2642 } 2643 return (rval); 2644 } 2645 2646 /* 2647 * Hardware initialization and resource allocation 2648 */ 2649 static int 2650 aac_common_attach(struct aac_softstate *softs) 2651 { 2652 uint32_t status; 2653 int i; 2654 2655 DBCALLED(softs, 1); 2656 2657 /* 2658 * Do a little check here to make sure there aren't any outstanding 2659 * FIBs in the message queue. At this point there should not be and 2660 * if there are they are probably left over from another instance of 2661 * the driver like when the system crashes and the crash dump driver 2662 * gets loaded. 2663 */ 2664 while (AAC_OUTB_GET(softs) != 0xfffffffful) 2665 ; 2666 2667 /* 2668 * Wait the card to complete booting up before do anything that 2669 * attempts to communicate with it. 2670 */ 2671 status = AAC_FWSTATUS_GET(softs); 2672 if (status == AAC_SELF_TEST_FAILED || status == AAC_KERNEL_PANIC) 2673 goto error; 2674 i = AAC_FWUP_TIMEOUT * 1000; /* set timeout */ 2675 AAC_BUSYWAIT(AAC_FWSTATUS_GET(softs) & AAC_KERNEL_UP_AND_RUNNING, i); 2676 if (i == 0) { 2677 cmn_err(CE_CONT, "?Fatal error: controller not ready"); 2678 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2679 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2680 goto error; 2681 } 2682 2683 /* Read and set card supported options and settings */ 2684 if (aac_check_firmware(softs) == AACERR) { 2685 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2686 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2687 goto error; 2688 } 2689 2690 /* Clear out all interrupts */ 2691 AAC_STATUS_CLR(softs, ~0); 2692 2693 /* Setup communication space with the card */ 2694 if (softs->comm_space_dma_handle == NULL) { 2695 if (aac_alloc_comm_space(softs) != AACOK) 2696 goto error; 2697 } 2698 if (aac_setup_comm_space(softs) != AACOK) { 2699 cmn_err(CE_CONT, "?Setup communication space failed"); 2700 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2701 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2702 goto error; 2703 } 2704 2705 #ifdef DEBUG 2706 if (aac_get_fw_debug_buffer(softs) != AACOK) 2707 cmn_err(CE_CONT, "?firmware UART trace not supported"); 2708 #endif 2709 2710 /* Allocate slots */ 2711 if ((softs->total_slots == 0) && (aac_create_slots(softs) != AACOK)) { 2712 cmn_err(CE_CONT, "?Fatal error: slots allocate failed"); 2713 goto error; 2714 } 2715 AACDB_PRINT(softs, CE_NOTE, "%d slots allocated", softs->total_slots); 2716 2717 /* Allocate FIBs */ 2718 if (softs->total_fibs < softs->total_slots) { 2719 aac_alloc_fibs(softs); 2720 if (softs->total_fibs == 0) 2721 goto error; 2722 AACDB_PRINT(softs, CE_NOTE, "%d fibs allocated", 2723 softs->total_fibs); 2724 } 2725 2726 /* Get adapter names */ 2727 if (CARD_IS_UNKNOWN(softs->card)) { 2728 struct aac_supplement_adapter_info sinf; 2729 2730 if (aac_get_adapter_info(softs, NULL, &sinf) != AACOK) { 2731 cmn_err(CE_CONT, "?Query adapter information failed"); 2732 } else { 2733 softs->feature_bits = sinf.FeatureBits; 2734 softs->support_opt2 = sinf.SupportedOptions2; 2735 2736 char *p, *p0, *p1; 2737 2738 /* 2739 * Now find the controller name in supp_adapter_info-> 2740 * AdapterTypeText. Use the first word as the vendor 2741 * and the other words as the product name. 2742 */ 2743 AACDB_PRINT(softs, CE_NOTE, "sinf.AdapterTypeText = " 2744 "\"%s\"", sinf.AdapterTypeText); 2745 p = sinf.AdapterTypeText; 2746 p0 = p1 = NULL; 2747 /* Skip heading spaces */ 2748 while (*p && (*p == ' ' || *p == '\t')) 2749 p++; 2750 p0 = p; 2751 while (*p && (*p != ' ' && *p != '\t')) 2752 p++; 2753 /* Remove middle spaces */ 2754 while (*p && (*p == ' ' || *p == '\t')) 2755 *p++ = 0; 2756 p1 = p; 2757 /* Remove trailing spaces */ 2758 p = p1 + strlen(p1) - 1; 2759 while (p > p1 && (*p == ' ' || *p == '\t')) 2760 *p-- = 0; 2761 if (*p0 && *p1) { 2762 (void *)strncpy(softs->vendor_name, p0, 2763 AAC_VENDOR_LEN); 2764 (void *)strncpy(softs->product_name, p1, 2765 AAC_PRODUCT_LEN); 2766 } else { 2767 cmn_err(CE_WARN, 2768 "?adapter name mis-formatted\n"); 2769 if (*p0) 2770 (void *)strncpy(softs->product_name, 2771 p0, AAC_PRODUCT_LEN); 2772 } 2773 } 2774 } 2775 2776 cmn_err(CE_NOTE, 2777 "!aac driver %d.%02d.%02d-%d, found card: " \ 2778 "%s %s(pci0x%x.%x.%x.%x) at 0x%x", 2779 AAC_DRIVER_MAJOR_VERSION, 2780 AAC_DRIVER_MINOR_VERSION, 2781 AAC_DRIVER_BUGFIX_LEVEL, 2782 AAC_DRIVER_BUILD, 2783 softs->vendor_name, softs->product_name, 2784 softs->vendid, softs->devid, softs->subvendid, softs->subsysid, 2785 softs->pci_mem_base_paddr); 2786 2787 /* Perform acceptance of adapter-detected config changes if possible */ 2788 if (aac_handle_adapter_config_issues(softs) != AACMPE_OK) { 2789 cmn_err(CE_CONT, "?Handle adapter config issues failed"); 2790 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2791 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2792 goto error; 2793 } 2794 2795 /* Setup containers (logical devices) */ 2796 if (aac_probe_containers(softs) != AACOK) { 2797 cmn_err(CE_CONT, "?Fatal error: get container info error"); 2798 goto error; 2799 } 2800 2801 /* Setup phys. devices */ 2802 if (softs->flags & AAC_FLAGS_NONDASD) { 2803 uint32_t bus_max, tgt_max; 2804 uint32_t bus, tgt; 2805 int index; 2806 2807 if (aac_get_bus_info(softs, &bus_max, &tgt_max) != AACOK) { 2808 cmn_err(CE_CONT, "?Fatal error: get bus info error"); 2809 goto error; 2810 } 2811 AACDB_PRINT(softs, CE_NOTE, "bus_max=%d, tgt_max=%d", 2812 bus_max, tgt_max); 2813 if (bus_max != softs->bus_max || tgt_max != softs->tgt_max) { 2814 if (softs->state & AAC_STATE_RESET) { 2815 cmn_err(CE_WARN, 2816 "?Fatal error: bus map changed"); 2817 goto error; 2818 } 2819 softs->bus_max = bus_max; 2820 softs->tgt_max = tgt_max; 2821 if (softs->nondasds) { 2822 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \ 2823 sizeof (struct aac_nondasd)); 2824 } 2825 softs->nondasds = kmem_zalloc(AAC_MAX_PD(softs) * \ 2826 sizeof (struct aac_nondasd), KM_SLEEP); 2827 2828 index = 0; 2829 for (bus = 0; bus < softs->bus_max; bus++) { 2830 for (tgt = 0; tgt < softs->tgt_max; tgt++) { 2831 struct aac_nondasd *dvp = 2832 &softs->nondasds[index++]; 2833 dvp->dev.type = AAC_DEV_PD; 2834 dvp->bus = bus; 2835 dvp->tid = tgt; 2836 } 2837 } 2838 } 2839 } 2840 2841 /* Check dma & acc handles allocated in attach */ 2842 if (aac_check_dma_handle(softs->comm_space_dma_handle) != DDI_SUCCESS) { 2843 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2844 goto error; 2845 } 2846 2847 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 2848 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2849 goto error; 2850 } 2851 2852 for (i = 0; i < softs->total_slots; i++) { 2853 if (aac_check_dma_handle(softs->io_slot[i].fib_dma_handle) != 2854 DDI_SUCCESS) { 2855 ddi_fm_service_impact(softs->devinfo_p, 2856 DDI_SERVICE_LOST); 2857 goto error; 2858 } 2859 } 2860 2861 return (AACOK); 2862 error: 2863 if (softs->state & AAC_STATE_RESET) 2864 return (AACERR); 2865 if (softs->nondasds) { 2866 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \ 2867 sizeof (struct aac_nondasd)); 2868 softs->nondasds = NULL; 2869 } 2870 if (softs->total_fibs > 0) 2871 aac_destroy_fibs(softs); 2872 if (softs->total_slots > 0) 2873 aac_destroy_slots(softs); 2874 if (softs->comm_space_dma_handle) 2875 aac_free_comm_space(softs); 2876 return (AACERR); 2877 } 2878 2879 /* 2880 * Hardware shutdown and resource release 2881 */ 2882 static void 2883 aac_common_detach(struct aac_softstate *softs) 2884 { 2885 DBCALLED(softs, 1); 2886 2887 (void) aac_shutdown(softs); 2888 2889 if (softs->nondasds) { 2890 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \ 2891 sizeof (struct aac_nondasd)); 2892 softs->nondasds = NULL; 2893 } 2894 aac_destroy_fibs(softs); 2895 aac_destroy_slots(softs); 2896 aac_free_comm_space(softs); 2897 } 2898 2899 /* 2900 * Send a synchronous command to the controller and wait for a result. 2901 * Indicate if the controller completed the command with an error status. 2902 */ 2903 int 2904 aac_sync_mbcommand(struct aac_softstate *softs, uint32_t cmd, 2905 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3, 2906 uint32_t *statusp) 2907 { 2908 int timeout; 2909 uint32_t status; 2910 2911 if (statusp != NULL) 2912 *statusp = SRB_STATUS_SUCCESS; 2913 2914 /* Fill in mailbox */ 2915 AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3); 2916 2917 /* Ensure the sync command doorbell flag is cleared */ 2918 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND); 2919 2920 /* Then set it to signal the adapter */ 2921 AAC_NOTIFY(softs, AAC_DB_SYNC_COMMAND); 2922 2923 /* Spin waiting for the command to complete */ 2924 timeout = AAC_IMMEDIATE_TIMEOUT * 1000; 2925 AAC_BUSYWAIT(AAC_STATUS_GET(softs) & AAC_DB_SYNC_COMMAND, timeout); 2926 if (!timeout) { 2927 AACDB_PRINT(softs, CE_WARN, 2928 "Sync command timed out after %d seconds (0x%x)!", 2929 AAC_IMMEDIATE_TIMEOUT, AAC_FWSTATUS_GET(softs)); 2930 return (AACERR); 2931 } 2932 2933 /* Clear the completion flag */ 2934 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND); 2935 2936 /* Get the command status */ 2937 status = AAC_MAILBOX_GET(softs, 0); 2938 if (statusp != NULL) 2939 *statusp = status; 2940 if (status != SRB_STATUS_SUCCESS) { 2941 AACDB_PRINT(softs, CE_WARN, 2942 "Sync command fail: status = 0x%x", status); 2943 return (AACERR); 2944 } 2945 2946 return (AACOK); 2947 } 2948 2949 /* 2950 * Send a synchronous FIB to the adapter and wait for its completion 2951 */ 2952 static int 2953 aac_sync_fib(struct aac_softstate *softs, uint16_t cmd, uint16_t fibsize) 2954 { 2955 struct aac_slot *slotp = &softs->sync_slot; 2956 ddi_dma_handle_t dma = slotp->fib_dma_handle; 2957 uint32_t status; 2958 int rval; 2959 2960 /* Sync fib only supports 512 bytes */ 2961 if (fibsize > AAC_FIB_SIZE) 2962 return (AACERR); 2963 2964 /* 2965 * Setup sync fib 2966 * Need not reinitialize FIB header if it's already been filled 2967 * by others like aac_cmd_fib_scsi as aac_cmd. 2968 */ 2969 if (slotp->acp == NULL) 2970 aac_cmd_fib_header(softs, slotp, cmd, fibsize); 2971 2972 AACDB_PRINT_FIB(softs, &softs->sync_slot); 2973 2974 (void) ddi_dma_sync(dma, offsetof(struct aac_comm_space, sync_fib), 2975 fibsize, DDI_DMA_SYNC_FORDEV); 2976 2977 /* Give the FIB to the controller, wait for a response. */ 2978 rval = aac_sync_mbcommand(softs, AAC_MONKER_SYNCFIB, 2979 slotp->fib_phyaddr, 0, 0, 0, &status); 2980 if (rval == AACERR) { 2981 AACDB_PRINT(softs, CE_WARN, 2982 "Send sync fib to controller failed"); 2983 return (AACERR); 2984 } 2985 2986 (void) ddi_dma_sync(dma, offsetof(struct aac_comm_space, sync_fib), 2987 AAC_FIB_SIZE, DDI_DMA_SYNC_FORCPU); 2988 2989 if ((aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) || 2990 (aac_check_dma_handle(dma) != DDI_SUCCESS)) { 2991 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 2992 return (AACERR); 2993 } 2994 2995 return (AACOK); 2996 } 2997 2998 static void 2999 aac_cmd_initq(struct aac_cmd_queue *q) 3000 { 3001 q->q_head = NULL; 3002 q->q_tail = (struct aac_cmd *)&q->q_head; 3003 } 3004 3005 /* 3006 * Remove a cmd from the head of q 3007 */ 3008 static struct aac_cmd * 3009 aac_cmd_dequeue(struct aac_cmd_queue *q) 3010 { 3011 struct aac_cmd *acp; 3012 3013 _NOTE(ASSUMING_PROTECTED(*q)) 3014 3015 if ((acp = q->q_head) != NULL) { 3016 if ((q->q_head = acp->next) != NULL) 3017 acp->next = NULL; 3018 else 3019 q->q_tail = (struct aac_cmd *)&q->q_head; 3020 acp->prev = NULL; 3021 } 3022 return (acp); 3023 } 3024 3025 /* 3026 * Add a cmd to the tail of q 3027 */ 3028 static void 3029 aac_cmd_enqueue(struct aac_cmd_queue *q, struct aac_cmd *acp) 3030 { 3031 ASSERT(acp->next == NULL); 3032 acp->prev = q->q_tail; 3033 q->q_tail->next = acp; 3034 q->q_tail = acp; 3035 } 3036 3037 /* 3038 * Remove the cmd ac from q 3039 */ 3040 static void 3041 aac_cmd_delete(struct aac_cmd_queue *q, struct aac_cmd *acp) 3042 { 3043 if (acp->prev) { 3044 if ((acp->prev->next = acp->next) != NULL) { 3045 acp->next->prev = acp->prev; 3046 acp->next = NULL; 3047 } else { 3048 q->q_tail = acp->prev; 3049 } 3050 acp->prev = NULL; 3051 } 3052 /* ac is not in the queue */ 3053 } 3054 3055 /* 3056 * Atomically insert an entry into the nominated queue, returns 0 on success or 3057 * AACERR if the queue is full. 3058 * 3059 * Note: it would be more efficient to defer notifying the controller in 3060 * the case where we may be inserting several entries in rapid succession, 3061 * but implementing this usefully may be difficult (it would involve a 3062 * separate queue/notify interface). 3063 */ 3064 static int 3065 aac_fib_enqueue(struct aac_softstate *softs, int queue, uint32_t fib_addr, 3066 uint32_t fib_size) 3067 { 3068 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3069 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3070 uint32_t pi, ci; 3071 3072 DBCALLED(softs, 2); 3073 3074 ASSERT(queue == AAC_ADAP_NORM_CMD_Q || queue == AAC_ADAP_NORM_RESP_Q); 3075 3076 /* Get the producer/consumer indices */ 3077 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \ 3078 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2, 3079 DDI_DMA_SYNC_FORCPU); 3080 if (aac_check_dma_handle(dma) != DDI_SUCCESS) { 3081 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 3082 return (AACERR); 3083 } 3084 3085 pi = ddi_get32(acc, 3086 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]); 3087 ci = ddi_get32(acc, 3088 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]); 3089 3090 /* 3091 * Wrap the queue first before we check the queue to see 3092 * if it is full 3093 */ 3094 if (pi >= aac_qinfo[queue].size) 3095 pi = 0; 3096 3097 /* XXX queue full */ 3098 if ((pi + 1) == ci) 3099 return (AACERR); 3100 3101 /* Fill in queue entry */ 3102 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_size), fib_size); 3103 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_addr), fib_addr); 3104 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \ 3105 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry), 3106 DDI_DMA_SYNC_FORDEV); 3107 3108 /* Update producer index */ 3109 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX], 3110 pi + 1); 3111 (void) ddi_dma_sync(dma, 3112 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX] - \ 3113 (uintptr_t)softs->comm_space, sizeof (uint32_t), 3114 DDI_DMA_SYNC_FORDEV); 3115 3116 if (aac_qinfo[queue].notify != 0) 3117 AAC_NOTIFY(softs, aac_qinfo[queue].notify); 3118 return (AACOK); 3119 } 3120 3121 /* 3122 * Atomically remove one entry from the nominated queue, returns 0 on 3123 * success or AACERR if the queue is empty. 3124 */ 3125 static int 3126 aac_fib_dequeue(struct aac_softstate *softs, int queue, int *idxp) 3127 { 3128 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3129 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3130 uint32_t pi, ci; 3131 int unfull = 0; 3132 3133 DBCALLED(softs, 2); 3134 3135 ASSERT(idxp); 3136 3137 /* Get the producer/consumer indices */ 3138 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \ 3139 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2, 3140 DDI_DMA_SYNC_FORCPU); 3141 pi = ddi_get32(acc, 3142 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]); 3143 ci = ddi_get32(acc, 3144 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]); 3145 3146 /* Check for queue empty */ 3147 if (ci == pi) 3148 return (AACERR); 3149 3150 if (pi >= aac_qinfo[queue].size) 3151 pi = 0; 3152 3153 /* Check for queue full */ 3154 if (ci == pi + 1) 3155 unfull = 1; 3156 3157 /* 3158 * The controller does not wrap the queue, 3159 * so we have to do it by ourselves 3160 */ 3161 if (ci >= aac_qinfo[queue].size) 3162 ci = 0; 3163 3164 /* Fetch the entry */ 3165 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \ 3166 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry), 3167 DDI_DMA_SYNC_FORCPU); 3168 if (aac_check_dma_handle(dma) != DDI_SUCCESS) { 3169 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 3170 return (AACERR); 3171 } 3172 3173 switch (queue) { 3174 case AAC_HOST_NORM_RESP_Q: 3175 case AAC_HOST_HIGH_RESP_Q: 3176 *idxp = ddi_get32(acc, 3177 &(softs->qentries[queue] + ci)->aq_fib_addr); 3178 break; 3179 3180 case AAC_HOST_NORM_CMD_Q: 3181 case AAC_HOST_HIGH_CMD_Q: 3182 *idxp = ddi_get32(acc, 3183 &(softs->qentries[queue] + ci)->aq_fib_addr) / AAC_FIB_SIZE; 3184 break; 3185 3186 default: 3187 cmn_err(CE_NOTE, "!Invalid queue in aac_fib_dequeue()"); 3188 return (AACERR); 3189 } 3190 3191 /* Update consumer index */ 3192 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX], 3193 ci + 1); 3194 (void) ddi_dma_sync(dma, 3195 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX] - \ 3196 (uintptr_t)softs->comm_space, sizeof (uint32_t), 3197 DDI_DMA_SYNC_FORDEV); 3198 3199 if (unfull && aac_qinfo[queue].notify != 0) 3200 AAC_NOTIFY(softs, aac_qinfo[queue].notify); 3201 return (AACOK); 3202 } 3203 3204 /* 3205 * Request information of the container cid 3206 */ 3207 static struct aac_mntinforesp * 3208 aac_get_container_info(struct aac_softstate *softs, int cid) 3209 { 3210 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 3211 struct aac_fib *fibp = softs->sync_slot.fibp; 3212 struct aac_mntinfo *mi = (struct aac_mntinfo *)&fibp->data[0]; 3213 struct aac_mntinforesp *mir; 3214 3215 ddi_put32(acc, &mi->Command, /* Use 64-bit LBA if enabled */ 3216 (softs->flags & AAC_FLAGS_LBA_64BIT) ? 3217 VM_NameServe64 : VM_NameServe); 3218 ddi_put32(acc, &mi->MntType, FT_FILESYS); 3219 ddi_put32(acc, &mi->MntCount, cid); 3220 3221 if (aac_sync_fib(softs, ContainerCommand, 3222 AAC_FIB_SIZEOF(struct aac_mntinfo)) == AACERR) { 3223 AACDB_PRINT(softs, CE_WARN, "Error probe container %d", cid); 3224 return (NULL); 3225 } 3226 3227 mir = (struct aac_mntinforesp *)&fibp->data[0]; 3228 if (ddi_get32(acc, &mir->Status) == ST_OK) 3229 return (mir); 3230 return (NULL); 3231 } 3232 3233 static int 3234 aac_get_container_count(struct aac_softstate *softs, int *count) 3235 { 3236 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 3237 struct aac_mntinforesp *mir; 3238 3239 if ((mir = aac_get_container_info(softs, 0)) == NULL) 3240 return (AACERR); 3241 *count = ddi_get32(acc, &mir->MntRespCount); 3242 if (*count > AAC_MAX_LD) { 3243 AACDB_PRINT(softs, CE_CONT, 3244 "container count(%d) > AAC_MAX_LD", *count); 3245 return (AACERR); 3246 } 3247 return (AACOK); 3248 } 3249 3250 static int 3251 aac_get_container_uid(struct aac_softstate *softs, uint32_t cid, uint32_t *uid) 3252 { 3253 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 3254 struct aac_Container *ct = (struct aac_Container *) \ 3255 &softs->sync_slot.fibp->data[0]; 3256 3257 bzero(ct, sizeof (*ct) - CT_PACKET_SIZE); 3258 ddi_put32(acc, &ct->Command, VM_ContainerConfig); 3259 ddi_put32(acc, &ct->CTCommand.command, CT_CID_TO_32BITS_UID); 3260 ddi_put32(acc, &ct->CTCommand.param[0], cid); 3261 3262 if (aac_sync_fib(softs, ContainerCommand, 3263 AAC_FIB_SIZEOF(struct aac_Container)) == AACERR) 3264 return (AACERR); 3265 if (ddi_get32(acc, &ct->CTCommand.param[0]) != CT_OK) 3266 return (AACERR); 3267 3268 *uid = ddi_get32(acc, &ct->CTCommand.param[1]); 3269 return (AACOK); 3270 } 3271 3272 static int 3273 aac_probe_container(struct aac_softstate *softs, uint32_t cid) 3274 { 3275 struct aac_container *dvp = &softs->containers[cid]; 3276 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 3277 struct aac_mntinforesp *mir; 3278 uint64_t size; 3279 uint32_t uid; 3280 3281 /* Get container basic info */ 3282 if ((mir = aac_get_container_info(softs, cid)) == NULL) 3283 return (AACERR); 3284 3285 if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) { 3286 if (AAC_DEV_IS_VALID(&dvp->dev)) { 3287 AACDB_PRINT(softs, CE_NOTE, 3288 ">>> Container %d deleted", cid); 3289 dvp->dev.flags &= ~AAC_DFLAG_VALID; 3290 (void) aac_dr_event(softs, dvp->cid, -1, 3291 AAC_EVT_OFFLINE); 3292 } 3293 } else { 3294 size = AAC_MIR_SIZE(softs, acc, mir); 3295 3296 /* Get container UID */ 3297 if (aac_get_container_uid(softs, cid, &uid) == AACERR) { 3298 AACDB_PRINT(softs, CE_CONT, 3299 "query container %d uid failed", cid); 3300 return (AACERR); 3301 } 3302 AACDB_PRINT(softs, CE_CONT, "uid=0x%08x", uid); 3303 3304 if (AAC_DEV_IS_VALID(&dvp->dev)) { 3305 if (dvp->uid != uid) { 3306 AACDB_PRINT(softs, CE_WARN, 3307 ">>> Container %u uid changed to %d", 3308 cid, uid); 3309 dvp->uid = uid; 3310 } 3311 if (dvp->size != size) { 3312 AACDB_PRINT(softs, CE_NOTE, 3313 ">>> Container %u size changed to %"PRIu64, 3314 cid, size); 3315 dvp->size = size; 3316 } 3317 } else { /* Init new container */ 3318 AACDB_PRINT(softs, CE_NOTE, 3319 ">>> Container %d added: " \ 3320 "size=0x%x.%08x, type=%d, name=%s", 3321 cid, 3322 ddi_get32(acc, &mir->MntObj.CapacityHigh), 3323 ddi_get32(acc, &mir->MntObj.Capacity), 3324 ddi_get32(acc, &mir->MntObj.VolType), 3325 mir->MntObj.FileSystemName); 3326 dvp->dev.flags |= AAC_DFLAG_VALID; 3327 dvp->dev.type = AAC_DEV_LD; 3328 3329 dvp->cid = cid; 3330 dvp->uid = uid; 3331 dvp->size = size; 3332 dvp->locked = 0; 3333 dvp->deleted = 0; 3334 (void) aac_dr_event(softs, dvp->cid, -1, 3335 AAC_EVT_ONLINE); 3336 } 3337 } 3338 return (AACOK); 3339 } 3340 3341 /* 3342 * Do a rescan of all the possible containers and update the container list 3343 * with newly online/offline containers, and prepare for autoconfiguration. 3344 */ 3345 static int 3346 aac_probe_containers(struct aac_softstate *softs) 3347 { 3348 int i, count, total; 3349 3350 /* Loop over possible containers */ 3351 count = softs->container_count; 3352 if (aac_get_container_count(softs, &count) == AACERR) 3353 return (AACERR); 3354 for (i = total = 0; i < count; i++) { 3355 if (aac_probe_container(softs, i) == AACOK) 3356 total++; 3357 } 3358 if (count < softs->container_count) { 3359 struct aac_container *dvp; 3360 3361 for (dvp = &softs->containers[count]; 3362 dvp < &softs->containers[softs->container_count]; dvp++) { 3363 if (!AAC_DEV_IS_VALID(&dvp->dev)) 3364 continue; 3365 AACDB_PRINT(softs, CE_NOTE, ">>> Container %d deleted", 3366 dvp->cid); 3367 dvp->dev.flags &= ~AAC_DFLAG_VALID; 3368 (void) aac_dr_event(softs, dvp->cid, -1, 3369 AAC_EVT_OFFLINE); 3370 } 3371 } 3372 softs->container_count = count; 3373 AACDB_PRINT(softs, CE_CONT, "?Total %d container(s) found", total); 3374 return (AACOK); 3375 } 3376 3377 static int 3378 aac_alloc_comm_space(struct aac_softstate *softs) 3379 { 3380 size_t rlen; 3381 ddi_dma_cookie_t cookie; 3382 uint_t cookien; 3383 3384 /* Allocate DMA for comm. space */ 3385 if (ddi_dma_alloc_handle( 3386 softs->devinfo_p, 3387 &softs->addr_dma_attr, 3388 DDI_DMA_SLEEP, 3389 NULL, 3390 &softs->comm_space_dma_handle) != DDI_SUCCESS) { 3391 AACDB_PRINT(softs, CE_WARN, 3392 "Cannot alloc dma handle for communication area"); 3393 goto error; 3394 } 3395 if (ddi_dma_mem_alloc( 3396 softs->comm_space_dma_handle, 3397 sizeof (struct aac_comm_space), 3398 &softs->acc_attr, 3399 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3400 DDI_DMA_SLEEP, 3401 NULL, 3402 (caddr_t *)&softs->comm_space, 3403 &rlen, 3404 &softs->comm_space_acc_handle) != DDI_SUCCESS) { 3405 AACDB_PRINT(softs, CE_WARN, 3406 "Cannot alloc mem for communication area"); 3407 goto error; 3408 } 3409 if (ddi_dma_addr_bind_handle( 3410 softs->comm_space_dma_handle, 3411 NULL, 3412 (caddr_t)softs->comm_space, 3413 sizeof (struct aac_comm_space), 3414 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3415 DDI_DMA_SLEEP, 3416 NULL, 3417 &cookie, 3418 &cookien) != DDI_DMA_MAPPED) { 3419 AACDB_PRINT(softs, CE_WARN, 3420 "DMA bind failed for communication area"); 3421 goto error; 3422 } 3423 softs->comm_space_phyaddr = cookie.dmac_address; 3424 3425 /* Setup sync FIB space */ 3426 softs->sync_slot.fibp = &softs->comm_space->sync_fib; 3427 softs->sync_slot.fib_phyaddr = softs->comm_space_phyaddr + \ 3428 offsetof(struct aac_comm_space, sync_fib); 3429 softs->sync_slot.fib_acc_handle = softs->comm_space_acc_handle; 3430 softs->sync_slot.fib_dma_handle = softs->comm_space_dma_handle; 3431 3432 return (AACOK); 3433 error: 3434 if (softs->comm_space_acc_handle) { 3435 ddi_dma_mem_free(&softs->comm_space_acc_handle); 3436 softs->comm_space_acc_handle = NULL; 3437 } 3438 if (softs->comm_space_dma_handle) { 3439 ddi_dma_free_handle(&softs->comm_space_dma_handle); 3440 softs->comm_space_dma_handle = NULL; 3441 } 3442 return (AACERR); 3443 } 3444 3445 static void 3446 aac_free_comm_space(struct aac_softstate *softs) 3447 { 3448 softs->sync_slot.fibp = NULL; 3449 softs->sync_slot.fib_phyaddr = NULL; 3450 softs->sync_slot.fib_acc_handle = NULL; 3451 softs->sync_slot.fib_dma_handle = NULL; 3452 3453 (void) ddi_dma_unbind_handle(softs->comm_space_dma_handle); 3454 ddi_dma_mem_free(&softs->comm_space_acc_handle); 3455 softs->comm_space_acc_handle = NULL; 3456 ddi_dma_free_handle(&softs->comm_space_dma_handle); 3457 softs->comm_space_dma_handle = NULL; 3458 softs->comm_space_phyaddr = NULL; 3459 } 3460 3461 /* 3462 * Initialize the data structures that are required for the communication 3463 * interface to operate 3464 */ 3465 static int 3466 aac_setup_comm_space(struct aac_softstate *softs) 3467 { 3468 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3469 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3470 uint32_t comm_space_phyaddr; 3471 struct aac_adapter_init *initp; 3472 int qoffset; 3473 3474 comm_space_phyaddr = softs->comm_space_phyaddr; 3475 3476 /* Setup adapter init struct */ 3477 initp = &softs->comm_space->init_data; 3478 bzero(initp, sizeof (struct aac_adapter_init)); 3479 3480 ddi_put32(acc, &initp->InitStructRevision, AAC_INIT_STRUCT_REVISION); 3481 ddi_put32(acc, &initp->HostElapsedSeconds, ddi_get_time()); 3482 3483 /* Setup new/old comm. specific data */ 3484 if (softs->flags & AAC_FLAGS_RAW_IO) { 3485 uint32_t init_flags = 0; 3486 3487 if (softs->flags & AAC_FLAGS_NEW_COMM) 3488 init_flags |= AAC_INIT_FLAGS_NEW_COMM_SUPPORTED; 3489 /* AAC_SUPPORTED_POWER_MANAGEMENT */ 3490 init_flags |= AAC_INIT_FLAGS_DRIVER_SUPPORTS_PM; 3491 init_flags |= AAC_INIT_FLAGS_DRIVER_USES_UTC_TIME; 3492 3493 ddi_put32(acc, &initp->InitStructRevision, 3494 AAC_INIT_STRUCT_REVISION_4); 3495 ddi_put32(acc, &initp->InitFlags, init_flags); 3496 /* Setup the preferred settings */ 3497 ddi_put32(acc, &initp->MaxIoCommands, softs->aac_max_fibs); 3498 ddi_put32(acc, &initp->MaxIoSize, 3499 (softs->aac_max_sectors << 9)); 3500 ddi_put32(acc, &initp->MaxFibSize, softs->aac_max_fib_size); 3501 } else { 3502 /* 3503 * Tells the adapter about the physical location of various 3504 * important shared data structures 3505 */ 3506 ddi_put32(acc, &initp->AdapterFibsPhysicalAddress, 3507 comm_space_phyaddr + \ 3508 offsetof(struct aac_comm_space, adapter_fibs)); 3509 ddi_put32(acc, &initp->AdapterFibsVirtualAddress, 0); 3510 ddi_put32(acc, &initp->AdapterFibAlign, AAC_FIB_SIZE); 3511 ddi_put32(acc, &initp->AdapterFibsSize, 3512 AAC_ADAPTER_FIBS * AAC_FIB_SIZE); 3513 ddi_put32(acc, &initp->PrintfBufferAddress, 3514 comm_space_phyaddr + \ 3515 offsetof(struct aac_comm_space, adapter_print_buf)); 3516 ddi_put32(acc, &initp->PrintfBufferSize, 3517 AAC_ADAPTER_PRINT_BUFSIZE); 3518 ddi_put32(acc, &initp->MiniPortRevision, 3519 AAC_INIT_STRUCT_MINIPORT_REVISION); 3520 ddi_put32(acc, &initp->HostPhysMemPages, AAC_MAX_PFN); 3521 3522 qoffset = (comm_space_phyaddr + \ 3523 offsetof(struct aac_comm_space, qtable)) % \ 3524 AAC_QUEUE_ALIGN; 3525 if (qoffset) 3526 qoffset = AAC_QUEUE_ALIGN - qoffset; 3527 softs->qtablep = (struct aac_queue_table *) \ 3528 ((char *)&softs->comm_space->qtable + qoffset); 3529 ddi_put32(acc, &initp->CommHeaderAddress, comm_space_phyaddr + \ 3530 offsetof(struct aac_comm_space, qtable) + qoffset); 3531 3532 /* Init queue table */ 3533 ddi_put32(acc, &softs->qtablep-> \ 3534 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_PRODUCER_INDEX], 3535 AAC_HOST_NORM_CMD_ENTRIES); 3536 ddi_put32(acc, &softs->qtablep-> \ 3537 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_CONSUMER_INDEX], 3538 AAC_HOST_NORM_CMD_ENTRIES); 3539 ddi_put32(acc, &softs->qtablep-> \ 3540 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_PRODUCER_INDEX], 3541 AAC_HOST_HIGH_CMD_ENTRIES); 3542 ddi_put32(acc, &softs->qtablep-> \ 3543 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_CONSUMER_INDEX], 3544 AAC_HOST_HIGH_CMD_ENTRIES); 3545 ddi_put32(acc, &softs->qtablep-> \ 3546 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_PRODUCER_INDEX], 3547 AAC_ADAP_NORM_CMD_ENTRIES); 3548 ddi_put32(acc, &softs->qtablep-> \ 3549 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_CONSUMER_INDEX], 3550 AAC_ADAP_NORM_CMD_ENTRIES); 3551 ddi_put32(acc, &softs->qtablep-> \ 3552 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_PRODUCER_INDEX], 3553 AAC_ADAP_HIGH_CMD_ENTRIES); 3554 ddi_put32(acc, &softs->qtablep-> \ 3555 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_CONSUMER_INDEX], 3556 AAC_ADAP_HIGH_CMD_ENTRIES); 3557 ddi_put32(acc, &softs->qtablep-> \ 3558 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_PRODUCER_INDEX], 3559 AAC_HOST_NORM_RESP_ENTRIES); 3560 ddi_put32(acc, &softs->qtablep-> \ 3561 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_CONSUMER_INDEX], 3562 AAC_HOST_NORM_RESP_ENTRIES); 3563 ddi_put32(acc, &softs->qtablep-> \ 3564 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_PRODUCER_INDEX], 3565 AAC_HOST_HIGH_RESP_ENTRIES); 3566 ddi_put32(acc, &softs->qtablep-> \ 3567 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_CONSUMER_INDEX], 3568 AAC_HOST_HIGH_RESP_ENTRIES); 3569 ddi_put32(acc, &softs->qtablep-> \ 3570 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_PRODUCER_INDEX], 3571 AAC_ADAP_NORM_RESP_ENTRIES); 3572 ddi_put32(acc, &softs->qtablep-> \ 3573 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_CONSUMER_INDEX], 3574 AAC_ADAP_NORM_RESP_ENTRIES); 3575 ddi_put32(acc, &softs->qtablep-> \ 3576 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_PRODUCER_INDEX], 3577 AAC_ADAP_HIGH_RESP_ENTRIES); 3578 ddi_put32(acc, &softs->qtablep-> \ 3579 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_CONSUMER_INDEX], 3580 AAC_ADAP_HIGH_RESP_ENTRIES); 3581 3582 /* Init queue entries */ 3583 softs->qentries[AAC_HOST_NORM_CMD_Q] = 3584 &softs->qtablep->qt_HostNormCmdQueue[0]; 3585 softs->qentries[AAC_HOST_HIGH_CMD_Q] = 3586 &softs->qtablep->qt_HostHighCmdQueue[0]; 3587 softs->qentries[AAC_ADAP_NORM_CMD_Q] = 3588 &softs->qtablep->qt_AdapNormCmdQueue[0]; 3589 softs->qentries[AAC_ADAP_HIGH_CMD_Q] = 3590 &softs->qtablep->qt_AdapHighCmdQueue[0]; 3591 softs->qentries[AAC_HOST_NORM_RESP_Q] = 3592 &softs->qtablep->qt_HostNormRespQueue[0]; 3593 softs->qentries[AAC_HOST_HIGH_RESP_Q] = 3594 &softs->qtablep->qt_HostHighRespQueue[0]; 3595 softs->qentries[AAC_ADAP_NORM_RESP_Q] = 3596 &softs->qtablep->qt_AdapNormRespQueue[0]; 3597 softs->qentries[AAC_ADAP_HIGH_RESP_Q] = 3598 &softs->qtablep->qt_AdapHighRespQueue[0]; 3599 } 3600 (void) ddi_dma_sync(dma, 0, 0, DDI_DMA_SYNC_FORDEV); 3601 3602 /* Send init structure to the card */ 3603 if (aac_sync_mbcommand(softs, AAC_MONKER_INITSTRUCT, 3604 comm_space_phyaddr + \ 3605 offsetof(struct aac_comm_space, init_data), 3606 0, 0, 0, NULL) == AACERR) { 3607 AACDB_PRINT(softs, CE_WARN, 3608 "Cannot send init structure to adapter"); 3609 return (AACERR); 3610 } 3611 3612 return (AACOK); 3613 } 3614 3615 static uchar_t * 3616 aac_vendor_id(struct aac_softstate *softs, uchar_t *buf) 3617 { 3618 (void) memset(buf, ' ', AAC_VENDOR_LEN); 3619 bcopy(softs->vendor_name, buf, strlen(softs->vendor_name)); 3620 return (buf + AAC_VENDOR_LEN); 3621 } 3622 3623 static uchar_t * 3624 aac_product_id(struct aac_softstate *softs, uchar_t *buf) 3625 { 3626 (void) memset(buf, ' ', AAC_PRODUCT_LEN); 3627 bcopy(softs->product_name, buf, strlen(softs->product_name)); 3628 return (buf + AAC_PRODUCT_LEN); 3629 } 3630 3631 /* 3632 * Construct unit serial number from container uid 3633 */ 3634 static uchar_t * 3635 aac_lun_serialno(struct aac_softstate *softs, int tgt, uchar_t *buf) 3636 { 3637 int i, d; 3638 uint32_t uid; 3639 3640 ASSERT(tgt >= 0 && tgt < AAC_MAX_LD); 3641 3642 uid = softs->containers[tgt].uid; 3643 for (i = 7; i >= 0; i--) { 3644 d = uid & 0xf; 3645 buf[i] = d > 9 ? 'A' + (d - 0xa) : '0' + d; 3646 uid >>= 4; 3647 } 3648 return (buf + 8); 3649 } 3650 3651 /* 3652 * SPC-3 7.5 INQUIRY command implementation 3653 */ 3654 static void 3655 aac_inquiry(struct aac_softstate *softs, struct scsi_pkt *pkt, 3656 union scsi_cdb *cdbp, struct buf *bp) 3657 { 3658 int tgt = pkt->pkt_address.a_target; 3659 char *b_addr = NULL; 3660 uchar_t page = cdbp->cdb_opaque[2]; 3661 3662 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_CMDDT) { 3663 /* Command Support Data is not supported */ 3664 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x00, 0); 3665 return; 3666 } 3667 3668 if (bp && bp->b_un.b_addr && bp->b_bcount) { 3669 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 3670 bp_mapin(bp); 3671 b_addr = bp->b_un.b_addr; 3672 } 3673 3674 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_EVPD) { 3675 uchar_t *vpdp = (uchar_t *)b_addr; 3676 uchar_t *idp, *sp; 3677 3678 /* SPC-3 8.4 Vital product data parameters */ 3679 switch (page) { 3680 case 0x00: 3681 /* Supported VPD pages */ 3682 if (vpdp == NULL || 3683 bp->b_bcount < (AAC_VPD_PAGE_DATA + 3)) 3684 return; 3685 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3686 vpdp[AAC_VPD_PAGE_CODE] = 0x00; 3687 vpdp[AAC_VPD_PAGE_LENGTH] = 3; 3688 3689 vpdp[AAC_VPD_PAGE_DATA] = 0x00; 3690 vpdp[AAC_VPD_PAGE_DATA + 1] = 0x80; 3691 vpdp[AAC_VPD_PAGE_DATA + 2] = 0x83; 3692 3693 pkt->pkt_state |= STATE_XFERRED_DATA; 3694 break; 3695 3696 case 0x80: 3697 /* Unit serial number page */ 3698 if (vpdp == NULL || 3699 bp->b_bcount < (AAC_VPD_PAGE_DATA + 8)) 3700 return; 3701 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3702 vpdp[AAC_VPD_PAGE_CODE] = 0x80; 3703 vpdp[AAC_VPD_PAGE_LENGTH] = 8; 3704 3705 sp = &vpdp[AAC_VPD_PAGE_DATA]; 3706 (void) aac_lun_serialno(softs, tgt, sp); 3707 3708 pkt->pkt_state |= STATE_XFERRED_DATA; 3709 break; 3710 3711 case 0x83: 3712 /* Device identification page */ 3713 if (vpdp == NULL || 3714 bp->b_bcount < (AAC_VPD_PAGE_DATA + 32)) 3715 return; 3716 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3717 vpdp[AAC_VPD_PAGE_CODE] = 0x83; 3718 3719 idp = &vpdp[AAC_VPD_PAGE_DATA]; 3720 bzero(idp, AAC_VPD_ID_LENGTH); 3721 idp[AAC_VPD_ID_CODESET] = 0x02; 3722 idp[AAC_VPD_ID_TYPE] = 0x01; 3723 3724 /* 3725 * SPC-3 Table 111 - Identifier type 3726 * One recommanded method of constructing the remainder 3727 * of identifier field is to concatenate the product 3728 * identification field from the standard INQUIRY data 3729 * field and the product serial number field from the 3730 * unit serial number page. 3731 */ 3732 sp = &idp[AAC_VPD_ID_DATA]; 3733 sp = aac_vendor_id(softs, sp); 3734 sp = aac_product_id(softs, sp); 3735 sp = aac_lun_serialno(softs, tgt, sp); 3736 idp[AAC_VPD_ID_LENGTH] = (uintptr_t)sp - \ 3737 (uintptr_t)&idp[AAC_VPD_ID_DATA]; 3738 3739 vpdp[AAC_VPD_PAGE_LENGTH] = (uintptr_t)sp - \ 3740 (uintptr_t)&vpdp[AAC_VPD_PAGE_DATA]; 3741 pkt->pkt_state |= STATE_XFERRED_DATA; 3742 break; 3743 3744 default: 3745 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 3746 0x24, 0x00, 0); 3747 break; 3748 } 3749 } else { 3750 struct scsi_inquiry *inqp = (struct scsi_inquiry *)b_addr; 3751 size_t len = sizeof (struct scsi_inquiry); 3752 3753 if (page != 0) { 3754 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 3755 0x24, 0x00, 0); 3756 return; 3757 } 3758 if (inqp == NULL || bp->b_bcount < len) 3759 return; 3760 3761 bzero(inqp, len); 3762 inqp->inq_len = AAC_ADDITIONAL_LEN; 3763 inqp->inq_ansi = AAC_ANSI_VER; 3764 inqp->inq_rdf = AAC_RESP_DATA_FORMAT; 3765 (void) aac_vendor_id(softs, (uchar_t *)inqp->inq_vid); 3766 (void) aac_product_id(softs, (uchar_t *)inqp->inq_pid); 3767 bcopy("V1.0", inqp->inq_revision, 4); 3768 inqp->inq_cmdque = 1; /* enable tagged-queuing */ 3769 /* 3770 * For "sd-max-xfer-size" property which may impact performance 3771 * when IO threads increase. 3772 */ 3773 inqp->inq_wbus32 = 1; 3774 3775 pkt->pkt_state |= STATE_XFERRED_DATA; 3776 } 3777 } 3778 3779 /* 3780 * SPC-3 7.10 MODE SENSE command implementation 3781 */ 3782 static void 3783 aac_mode_sense(struct aac_softstate *softs, struct scsi_pkt *pkt, 3784 union scsi_cdb *cdbp, struct buf *bp, int capacity) 3785 { 3786 uchar_t pagecode; 3787 struct mode_header *headerp; 3788 struct mode_header_g1 *g1_headerp; 3789 unsigned int ncyl; 3790 caddr_t sense_data; 3791 caddr_t next_page; 3792 size_t sdata_size; 3793 size_t pages_size; 3794 int unsupport_page = 0; 3795 3796 ASSERT(cdbp->scc_cmd == SCMD_MODE_SENSE || 3797 cdbp->scc_cmd == SCMD_MODE_SENSE_G1); 3798 3799 if (!(bp && bp->b_un.b_addr && bp->b_bcount)) 3800 return; 3801 3802 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 3803 bp_mapin(bp); 3804 pkt->pkt_state |= STATE_XFERRED_DATA; 3805 pagecode = cdbp->cdb_un.sg.scsi[0] & 0x3F; 3806 3807 /* calculate the size of needed buffer */ 3808 if (cdbp->scc_cmd == SCMD_MODE_SENSE) 3809 sdata_size = MODE_HEADER_LENGTH; 3810 else /* must be SCMD_MODE_SENSE_G1 */ 3811 sdata_size = MODE_HEADER_LENGTH_G1; 3812 3813 pages_size = 0; 3814 switch (pagecode) { 3815 case SD_MODE_SENSE_PAGE3_CODE: 3816 pages_size += sizeof (struct mode_format); 3817 break; 3818 3819 case SD_MODE_SENSE_PAGE4_CODE: 3820 pages_size += sizeof (struct mode_geometry); 3821 break; 3822 3823 case MODEPAGE_CTRL_MODE: 3824 if (softs->flags & AAC_FLAGS_LBA_64BIT) { 3825 pages_size += sizeof (struct mode_control_scsi3); 3826 } else { 3827 unsupport_page = 1; 3828 } 3829 break; 3830 3831 case MODEPAGE_ALLPAGES: 3832 if (softs->flags & AAC_FLAGS_LBA_64BIT) { 3833 pages_size += sizeof (struct mode_format) + 3834 sizeof (struct mode_geometry) + 3835 sizeof (struct mode_control_scsi3); 3836 } else { 3837 pages_size += sizeof (struct mode_format) + 3838 sizeof (struct mode_geometry); 3839 } 3840 break; 3841 3842 default: 3843 /* unsupported pages */ 3844 unsupport_page = 1; 3845 } 3846 3847 /* allocate buffer to fill the send data */ 3848 sdata_size += pages_size; 3849 sense_data = kmem_zalloc(sdata_size, KM_SLEEP); 3850 3851 if (cdbp->scc_cmd == SCMD_MODE_SENSE) { 3852 headerp = (struct mode_header *)sense_data; 3853 headerp->length = MODE_HEADER_LENGTH + pages_size - 3854 sizeof (headerp->length); 3855 headerp->bdesc_length = 0; 3856 next_page = sense_data + sizeof (struct mode_header); 3857 } else { 3858 g1_headerp = (void *)sense_data; 3859 g1_headerp->length = BE_16(MODE_HEADER_LENGTH_G1 + pages_size - 3860 sizeof (g1_headerp->length)); 3861 g1_headerp->bdesc_length = 0; 3862 next_page = sense_data + sizeof (struct mode_header_g1); 3863 } 3864 3865 if (unsupport_page) 3866 goto finish; 3867 3868 if (pagecode == SD_MODE_SENSE_PAGE3_CODE || 3869 pagecode == MODEPAGE_ALLPAGES) { 3870 /* SBC-3 7.1.3.3 Format device page */ 3871 struct mode_format *page3p; 3872 3873 page3p = (void *)next_page; 3874 page3p->mode_page.code = SD_MODE_SENSE_PAGE3_CODE; 3875 page3p->mode_page.length = sizeof (struct mode_format); 3876 page3p->data_bytes_sect = BE_16(AAC_SECTOR_SIZE); 3877 page3p->sect_track = BE_16(AAC_SECTORS_PER_TRACK); 3878 3879 next_page += sizeof (struct mode_format); 3880 } 3881 3882 if (pagecode == SD_MODE_SENSE_PAGE4_CODE || 3883 pagecode == MODEPAGE_ALLPAGES) { 3884 /* SBC-3 7.1.3.8 Rigid disk device geometry page */ 3885 struct mode_geometry *page4p; 3886 3887 page4p = (void *)next_page; 3888 page4p->mode_page.code = SD_MODE_SENSE_PAGE4_CODE; 3889 page4p->mode_page.length = sizeof (struct mode_geometry); 3890 page4p->heads = AAC_NUMBER_OF_HEADS; 3891 page4p->rpm = BE_16(AAC_ROTATION_SPEED); 3892 ncyl = capacity / (AAC_NUMBER_OF_HEADS * AAC_SECTORS_PER_TRACK); 3893 page4p->cyl_lb = ncyl & 0xff; 3894 page4p->cyl_mb = (ncyl >> 8) & 0xff; 3895 page4p->cyl_ub = (ncyl >> 16) & 0xff; 3896 3897 next_page += sizeof (struct mode_geometry); 3898 } 3899 3900 if ((pagecode == MODEPAGE_CTRL_MODE || pagecode == MODEPAGE_ALLPAGES) && 3901 softs->flags & AAC_FLAGS_LBA_64BIT) { 3902 /* 64-bit LBA need large sense data */ 3903 struct mode_control_scsi3 *mctl; 3904 3905 mctl = (void *)next_page; 3906 mctl->mode_page.code = MODEPAGE_CTRL_MODE; 3907 mctl->mode_page.length = 3908 sizeof (struct mode_control_scsi3) - 3909 sizeof (struct mode_page); 3910 mctl->d_sense = 1; 3911 } 3912 3913 finish: 3914 /* copyout the valid data. */ 3915 bcopy(sense_data, bp->b_un.b_addr, min(sdata_size, bp->b_bcount)); 3916 kmem_free(sense_data, sdata_size); 3917 } 3918 3919 static int 3920 aac_name_node(dev_info_t *dip, char *name, int len) 3921 { 3922 int tgt, lun; 3923 3924 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3925 DDI_PROP_DONTPASS, "target", -1); 3926 if (tgt == -1) 3927 return (DDI_FAILURE); 3928 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3929 DDI_PROP_DONTPASS, "lun", -1); 3930 if (lun == -1) 3931 return (DDI_FAILURE); 3932 3933 (void) snprintf(name, len, "%x,%x", tgt, lun); 3934 return (DDI_SUCCESS); 3935 } 3936 3937 /*ARGSUSED*/ 3938 static int 3939 aac_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 3940 scsi_hba_tran_t *tran, struct scsi_device *sd) 3941 { 3942 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran); 3943 #if defined(DEBUG) || defined(__lock_lint) 3944 int ctl = ddi_get_instance(softs->devinfo_p); 3945 #endif 3946 uint16_t tgt = sd->sd_address.a_target; 3947 uint8_t lun = sd->sd_address.a_lun; 3948 struct aac_device *dvp; 3949 3950 DBCALLED(softs, 2); 3951 3952 if (ndi_dev_is_persistent_node(tgt_dip) == 0) { 3953 /* 3954 * If no persistent node exist, we don't allow .conf node 3955 * to be created. 3956 */ 3957 if (aac_find_child(softs, tgt, lun) != NULL) { 3958 if (ndi_merge_node(tgt_dip, aac_name_node) != 3959 DDI_SUCCESS) 3960 /* Create this .conf node */ 3961 return (DDI_SUCCESS); 3962 } 3963 return (DDI_FAILURE); 3964 } 3965 3966 /* 3967 * Only support container/phys. device that has been 3968 * detected and valid 3969 */ 3970 mutex_enter(&softs->io_lock); 3971 if (tgt >= AAC_MAX_DEV(softs)) { 3972 AACDB_PRINT_TRAN(softs, 3973 "aac_tran_tgt_init: c%dt%dL%d out", ctl, tgt, lun); 3974 mutex_exit(&softs->io_lock); 3975 return (DDI_FAILURE); 3976 } 3977 3978 if (tgt < AAC_MAX_LD) { 3979 dvp = (struct aac_device *)&softs->containers[tgt]; 3980 if (lun != 0 || !AAC_DEV_IS_VALID(dvp)) { 3981 AACDB_PRINT_TRAN(softs, "aac_tran_tgt_init: c%dt%dL%d", 3982 ctl, tgt, lun); 3983 mutex_exit(&softs->io_lock); 3984 return (DDI_FAILURE); 3985 } 3986 /* 3987 * Save the tgt_dip for the given target if one doesn't exist 3988 * already. Dip's for non-existance tgt's will be cleared in 3989 * tgt_free. 3990 */ 3991 if (softs->containers[tgt].dev.dip == NULL && 3992 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) 3993 softs->containers[tgt].dev.dip = tgt_dip; 3994 } else { 3995 dvp = (struct aac_device *)&softs->nondasds[AAC_PD(tgt)]; 3996 } 3997 3998 if (softs->flags & AAC_FLAGS_BRKUP) { 3999 if (ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip, 4000 "buf_break", 1) != DDI_PROP_SUCCESS) { 4001 cmn_err(CE_CONT, "unable to create " 4002 "property for t%dL%d (buf_break)", tgt, lun); 4003 } 4004 } 4005 4006 AACDB_PRINT(softs, CE_NOTE, 4007 "aac_tran_tgt_init: c%dt%dL%d ok (%s)", ctl, tgt, lun, 4008 (dvp->type == AAC_DEV_PD) ? "pd" : "ld"); 4009 mutex_exit(&softs->io_lock); 4010 return (DDI_SUCCESS); 4011 } 4012 4013 static void 4014 aac_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 4015 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 4016 { 4017 #ifndef __lock_lint 4018 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran)) 4019 #endif 4020 4021 struct aac_softstate *softs = SD2AAC(sd); 4022 int tgt = sd->sd_address.a_target; 4023 4024 mutex_enter(&softs->io_lock); 4025 if (tgt < AAC_MAX_LD) { 4026 if (softs->containers[tgt].dev.dip == tgt_dip) 4027 softs->containers[tgt].dev.dip = NULL; 4028 } else { 4029 softs->nondasds[AAC_PD(tgt)].dev.flags &= ~AAC_DFLAG_VALID; 4030 } 4031 mutex_exit(&softs->io_lock); 4032 } 4033 4034 /* 4035 * Check if the firmware is Up And Running. If it is in the Kernel Panic 4036 * state, (BlinkLED code + 1) is returned. 4037 * 0 -- firmware up and running 4038 * -1 -- firmware dead 4039 * >0 -- firmware kernel panic 4040 */ 4041 static int 4042 aac_check_adapter_health(struct aac_softstate *softs) 4043 { 4044 int rval; 4045 4046 rval = PCI_MEM_GET32(softs, AAC_OMR0); 4047 4048 if (rval & AAC_KERNEL_UP_AND_RUNNING) { 4049 rval = 0; 4050 } else if (rval & AAC_KERNEL_PANIC) { 4051 cmn_err(CE_WARN, "firmware panic"); 4052 rval = ((rval >> 16) & 0xff) + 1; /* avoid 0 as return value */ 4053 } else { 4054 cmn_err(CE_WARN, "firmware dead"); 4055 rval = -1; 4056 } 4057 return (rval); 4058 } 4059 4060 static void 4061 aac_abort_iocmd(struct aac_softstate *softs, struct aac_cmd *acp, 4062 uchar_t reason) 4063 { 4064 acp->flags |= AAC_CMD_ABORT; 4065 4066 if (acp->pkt) { 4067 /* 4068 * Each lun should generate a unit attention 4069 * condition when reset. 4070 * Phys. drives are treated as logical ones 4071 * during error recovery. 4072 */ 4073 if (acp->slotp) { /* outstanding cmd */ 4074 acp->pkt->pkt_state |= STATE_GOT_STATUS; 4075 aac_set_arq_data_reset(softs, acp); 4076 } 4077 4078 switch (reason) { 4079 case CMD_TIMEOUT: 4080 AACDB_PRINT(softs, CE_NOTE, "CMD_TIMEOUT: acp=0x%p", 4081 acp); 4082 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT, 4083 STAT_TIMEOUT | STAT_BUS_RESET); 4084 break; 4085 case CMD_RESET: 4086 /* aac support only RESET_ALL */ 4087 AACDB_PRINT(softs, CE_NOTE, "CMD_RESET: acp=0x%p", acp); 4088 aac_set_pkt_reason(softs, acp, CMD_RESET, 4089 STAT_BUS_RESET); 4090 break; 4091 case CMD_ABORTED: 4092 AACDB_PRINT(softs, CE_NOTE, "CMD_ABORTED: acp=0x%p", 4093 acp); 4094 aac_set_pkt_reason(softs, acp, CMD_ABORTED, 4095 STAT_ABORTED); 4096 break; 4097 } 4098 } 4099 aac_end_io(softs, acp); 4100 } 4101 4102 /* 4103 * Abort all the pending commands of type iocmd or just the command pkt 4104 * corresponding to pkt 4105 */ 4106 static void 4107 aac_abort_iocmds(struct aac_softstate *softs, int iocmd, struct scsi_pkt *pkt, 4108 int reason) 4109 { 4110 struct aac_cmd *ac_arg, *acp; 4111 int i; 4112 4113 if (pkt == NULL) { 4114 ac_arg = NULL; 4115 } else { 4116 ac_arg = PKT2AC(pkt); 4117 iocmd = (ac_arg->flags & AAC_CMD_SYNC) ? 4118 AAC_IOCMD_SYNC : AAC_IOCMD_ASYNC; 4119 } 4120 4121 /* 4122 * a) outstanding commands on the controller 4123 * Note: should abort outstanding commands only after one 4124 * IOP reset has been done. 4125 */ 4126 if (iocmd & AAC_IOCMD_OUTSTANDING) { 4127 struct aac_cmd *acp; 4128 4129 for (i = 0; i < AAC_MAX_LD; i++) { 4130 if (AAC_DEV_IS_VALID(&softs->containers[i].dev)) 4131 softs->containers[i].reset = 1; 4132 } 4133 while ((acp = softs->q_busy.q_head) != NULL) 4134 aac_abort_iocmd(softs, acp, reason); 4135 } 4136 4137 /* b) commands in the waiting queues */ 4138 for (i = 0; i < AAC_CMDQ_NUM; i++) { 4139 if (iocmd & (1 << i)) { 4140 if (ac_arg) { 4141 aac_abort_iocmd(softs, ac_arg, reason); 4142 } else { 4143 while ((acp = softs->q_wait[i].q_head) != NULL) 4144 aac_abort_iocmd(softs, acp, reason); 4145 } 4146 } 4147 } 4148 } 4149 4150 /* 4151 * The draining thread is shared among quiesce threads. It terminates 4152 * when the adapter is quiesced or stopped by aac_stop_drain(). 4153 */ 4154 static void 4155 aac_check_drain(void *arg) 4156 { 4157 struct aac_softstate *softs = arg; 4158 4159 mutex_enter(&softs->io_lock); 4160 if (softs->ndrains) { 4161 softs->drain_timeid = 0; 4162 /* 4163 * If both ASYNC and SYNC bus throttle are held, 4164 * wake up threads only when both are drained out. 4165 */ 4166 if ((softs->bus_throttle[AAC_CMDQ_ASYNC] > 0 || 4167 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) && 4168 (softs->bus_throttle[AAC_CMDQ_SYNC] > 0 || 4169 softs->bus_ncmds[AAC_CMDQ_SYNC] == 0)) 4170 cv_broadcast(&softs->drain_cv); 4171 else 4172 softs->drain_timeid = timeout(aac_check_drain, softs, 4173 AAC_QUIESCE_TICK * drv_usectohz(1000000)); 4174 } 4175 mutex_exit(&softs->io_lock); 4176 } 4177 4178 /* 4179 * If not draining the outstanding cmds, drain them. Otherwise, 4180 * only update ndrains. 4181 */ 4182 static void 4183 aac_start_drain(struct aac_softstate *softs) 4184 { 4185 if (softs->ndrains == 0) { 4186 ASSERT(softs->drain_timeid == 0); 4187 softs->drain_timeid = timeout(aac_check_drain, softs, 4188 AAC_QUIESCE_TICK * drv_usectohz(1000000)); 4189 } 4190 softs->ndrains++; 4191 } 4192 4193 /* 4194 * Stop the draining thread when no other threads use it any longer. 4195 * Side effect: io_lock may be released in the middle. 4196 */ 4197 static void 4198 aac_stop_drain(struct aac_softstate *softs) 4199 { 4200 softs->ndrains--; 4201 if (softs->ndrains == 0) { 4202 if (softs->drain_timeid != 0) { 4203 timeout_id_t tid = softs->drain_timeid; 4204 4205 softs->drain_timeid = 0; 4206 mutex_exit(&softs->io_lock); 4207 (void) untimeout(tid); 4208 mutex_enter(&softs->io_lock); 4209 } 4210 } 4211 } 4212 4213 /* 4214 * The following function comes from Adaptec: 4215 * 4216 * Once do an IOP reset, basically the driver have to re-initialize the card 4217 * as if up from a cold boot, and the driver is responsible for any IO that 4218 * is outstanding to the adapter at the time of the IOP RESET. And prepare 4219 * for IOP RESET by making the init code modular with the ability to call it 4220 * from multiple places. 4221 */ 4222 static int 4223 aac_reset_adapter(struct aac_softstate *softs) 4224 { 4225 int health; 4226 uint32_t status; 4227 int rval = AAC_IOP_RESET_FAILED; 4228 4229 DBCALLED(softs, 1); 4230 4231 ASSERT(softs->state & AAC_STATE_RESET); 4232 4233 ddi_fm_acc_err_clear(softs->pci_mem_handle, DDI_FME_VER0); 4234 /* Disable interrupt */ 4235 AAC_DISABLE_INTR(softs); 4236 4237 health = aac_check_adapter_health(softs); 4238 if (health == -1) { 4239 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 4240 goto finish; 4241 } 4242 if (health == 0) /* flush drives if possible */ 4243 (void) aac_shutdown(softs); 4244 4245 /* Execute IOP reset */ 4246 if ((aac_sync_mbcommand(softs, AAC_IOP_RESET, 0, 0, 0, 0, 4247 &status)) != AACOK) { 4248 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 4249 struct aac_fib *fibp; 4250 struct aac_pause_command *pc; 4251 4252 if ((status & 0xf) == 0xf) { 4253 uint32_t wait_count; 4254 4255 /* 4256 * Sunrise Lake has dual cores and we must drag the 4257 * other core with us to reset simultaneously. There 4258 * are 2 bits in the Inbound Reset Control and Status 4259 * Register (offset 0x38) of the Sunrise Lake to reset 4260 * the chip without clearing out the PCI configuration 4261 * info (COMMAND & BARS). 4262 */ 4263 PCI_MEM_PUT32(softs, AAC_IRCSR, AAC_IRCSR_CORES_RST); 4264 4265 /* 4266 * We need to wait for 5 seconds before accessing the MU 4267 * again 10000 * 100us = 1000,000us = 1000ms = 1s 4268 */ 4269 wait_count = 5 * 10000; 4270 while (wait_count) { 4271 drv_usecwait(100); /* delay 100 microseconds */ 4272 wait_count--; 4273 } 4274 } else { 4275 if (status == SRB_STATUS_INVALID_REQUEST) 4276 cmn_err(CE_WARN, "!IOP_RESET not supported"); 4277 else /* probably timeout */ 4278 cmn_err(CE_WARN, "!IOP_RESET failed"); 4279 4280 /* Unwind aac_shutdown() */ 4281 fibp = softs->sync_slot.fibp; 4282 pc = (struct aac_pause_command *)&fibp->data[0]; 4283 4284 bzero(pc, sizeof (*pc)); 4285 ddi_put32(acc, &pc->Command, VM_ContainerConfig); 4286 ddi_put32(acc, &pc->Type, CT_PAUSE_IO); 4287 ddi_put32(acc, &pc->Timeout, 1); 4288 ddi_put32(acc, &pc->Min, 1); 4289 ddi_put32(acc, &pc->NoRescan, 1); 4290 4291 (void) aac_sync_fib(softs, ContainerCommand, 4292 AAC_FIB_SIZEOF(struct aac_pause_command)); 4293 4294 if (aac_check_adapter_health(softs) != 0) 4295 ddi_fm_service_impact(softs->devinfo_p, 4296 DDI_SERVICE_LOST); 4297 else 4298 /* 4299 * IOP reset not supported or IOP not reseted 4300 */ 4301 rval = AAC_IOP_RESET_ABNORMAL; 4302 goto finish; 4303 } 4304 } 4305 4306 /* 4307 * Re-read and renegotiate the FIB parameters, as one of the actions 4308 * that can result from an IOP reset is the running of a new firmware 4309 * image. 4310 */ 4311 if (aac_common_attach(softs) != AACOK) 4312 goto finish; 4313 4314 rval = AAC_IOP_RESET_SUCCEED; 4315 4316 finish: 4317 AAC_ENABLE_INTR(softs); 4318 return (rval); 4319 } 4320 4321 static void 4322 aac_set_throttle(struct aac_softstate *softs, struct aac_device *dvp, int q, 4323 int throttle) 4324 { 4325 /* 4326 * If the bus is draining/quiesced, no changes to the throttles 4327 * are allowed. All throttles should have been set to 0. 4328 */ 4329 if ((softs->state & AAC_STATE_QUIESCED) || softs->ndrains) 4330 return; 4331 dvp->throttle[q] = throttle; 4332 } 4333 4334 static void 4335 aac_hold_bus(struct aac_softstate *softs, int iocmds) 4336 { 4337 int i, q; 4338 4339 /* Hold bus by holding every device on the bus */ 4340 for (q = 0; q < AAC_CMDQ_NUM; q++) { 4341 if (iocmds & (1 << q)) { 4342 softs->bus_throttle[q] = 0; 4343 for (i = 0; i < AAC_MAX_LD; i++) 4344 aac_set_throttle(softs, 4345 &softs->containers[i].dev, q, 0); 4346 for (i = 0; i < AAC_MAX_PD(softs); i++) 4347 aac_set_throttle(softs, 4348 &softs->nondasds[i].dev, q, 0); 4349 } 4350 } 4351 } 4352 4353 static void 4354 aac_unhold_bus(struct aac_softstate *softs, int iocmds) 4355 { 4356 int i, q; 4357 4358 for (q = 0; q < AAC_CMDQ_NUM; q++) { 4359 if (iocmds & (1 << q)) { 4360 /* 4361 * Should not unhold AAC_IOCMD_ASYNC bus, if it has been 4362 * quiesced or being drained by possibly some quiesce 4363 * threads. 4364 */ 4365 if (q == AAC_CMDQ_ASYNC && ((softs->state & 4366 AAC_STATE_QUIESCED) || softs->ndrains)) 4367 continue; 4368 softs->bus_throttle[q] = softs->total_slots; 4369 for (i = 0; i < AAC_MAX_LD; i++) 4370 aac_set_throttle(softs, 4371 &softs->containers[i].dev, 4372 q, softs->total_slots); 4373 for (i = 0; i < AAC_MAX_PD(softs); i++) 4374 aac_set_throttle(softs, &softs->nondasds[i].dev, 4375 q, softs->total_slots); 4376 } 4377 } 4378 } 4379 4380 static int 4381 aac_do_reset(struct aac_softstate *softs) 4382 { 4383 int health; 4384 int rval; 4385 4386 softs->state |= AAC_STATE_RESET; 4387 health = aac_check_adapter_health(softs); 4388 4389 /* 4390 * Hold off new io commands and wait all outstanding io 4391 * commands to complete. 4392 */ 4393 if (health == 0) { 4394 int sync_cmds = softs->bus_ncmds[AAC_CMDQ_SYNC]; 4395 int async_cmds = softs->bus_ncmds[AAC_CMDQ_ASYNC]; 4396 4397 if (sync_cmds == 0 && async_cmds == 0) { 4398 rval = AAC_IOP_RESET_SUCCEED; 4399 goto finish; 4400 } 4401 /* 4402 * Give the adapter up to AAC_QUIESCE_TIMEOUT more seconds 4403 * to complete the outstanding io commands 4404 */ 4405 int timeout = AAC_QUIESCE_TIMEOUT * 1000 * 10; 4406 int (*intr_handler)(struct aac_softstate *); 4407 4408 aac_hold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 4409 /* 4410 * Poll the adapter by ourselves in case interrupt is disabled 4411 * and to avoid releasing the io_lock. 4412 */ 4413 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ? 4414 aac_process_intr_new : aac_process_intr_old; 4415 while ((softs->bus_ncmds[AAC_CMDQ_SYNC] || 4416 softs->bus_ncmds[AAC_CMDQ_ASYNC]) && timeout) { 4417 drv_usecwait(100); 4418 (void) intr_handler(softs); 4419 timeout--; 4420 } 4421 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 4422 4423 if (softs->bus_ncmds[AAC_CMDQ_SYNC] == 0 && 4424 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) { 4425 /* Cmds drained out */ 4426 rval = AAC_IOP_RESET_SUCCEED; 4427 goto finish; 4428 } else if (softs->bus_ncmds[AAC_CMDQ_SYNC] < sync_cmds || 4429 softs->bus_ncmds[AAC_CMDQ_ASYNC] < async_cmds) { 4430 /* Cmds not drained out, adapter overloaded */ 4431 rval = AAC_IOP_RESET_ABNORMAL; 4432 goto finish; 4433 } 4434 } 4435 4436 /* 4437 * If a longer waiting time still can't drain any outstanding io 4438 * commands, do IOP reset. 4439 */ 4440 if ((rval = aac_reset_adapter(softs)) == AAC_IOP_RESET_FAILED) 4441 softs->state |= AAC_STATE_DEAD; 4442 4443 finish: 4444 softs->state &= ~AAC_STATE_RESET; 4445 return (rval); 4446 } 4447 4448 static int 4449 aac_tran_reset(struct scsi_address *ap, int level) 4450 { 4451 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4452 int rval; 4453 4454 DBCALLED(softs, 1); 4455 4456 if (level != RESET_ALL) { 4457 cmn_err(CE_NOTE, "!reset target/lun not supported"); 4458 return (0); 4459 } 4460 4461 mutex_enter(&softs->io_lock); 4462 switch (rval = aac_do_reset(softs)) { 4463 case AAC_IOP_RESET_SUCCEED: 4464 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING | AAC_IOCMD_ASYNC, 4465 NULL, CMD_RESET); 4466 aac_start_waiting_io(softs); 4467 break; 4468 case AAC_IOP_RESET_FAILED: 4469 /* Abort IOCTL cmds when adapter is dead */ 4470 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_RESET); 4471 break; 4472 case AAC_IOP_RESET_ABNORMAL: 4473 aac_start_waiting_io(softs); 4474 } 4475 mutex_exit(&softs->io_lock); 4476 4477 aac_drain_comp_q(softs); 4478 return (rval == 0); 4479 } 4480 4481 static int 4482 aac_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 4483 { 4484 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4485 4486 DBCALLED(softs, 1); 4487 4488 mutex_enter(&softs->io_lock); 4489 aac_abort_iocmds(softs, 0, pkt, CMD_ABORTED); 4490 mutex_exit(&softs->io_lock); 4491 4492 aac_drain_comp_q(softs); 4493 return (1); 4494 } 4495 4496 void 4497 aac_free_dmamap(struct aac_cmd *acp) 4498 { 4499 /* Free dma mapping */ 4500 if (acp->flags & AAC_CMD_DMA_VALID) { 4501 ASSERT(acp->buf_dma_handle); 4502 (void) ddi_dma_unbind_handle(acp->buf_dma_handle); 4503 acp->flags &= ~AAC_CMD_DMA_VALID; 4504 } 4505 4506 if (acp->abp != NULL) { /* free non-aligned buf DMA */ 4507 ASSERT(acp->buf_dma_handle); 4508 if ((acp->flags & AAC_CMD_BUF_WRITE) == 0 && acp->bp) 4509 ddi_rep_get8(acp->abh, (uint8_t *)acp->bp->b_un.b_addr, 4510 (uint8_t *)acp->abp, acp->bp->b_bcount, 4511 DDI_DEV_AUTOINCR); 4512 ddi_dma_mem_free(&acp->abh); 4513 acp->abp = NULL; 4514 } 4515 4516 if (acp->buf_dma_handle) { 4517 ddi_dma_free_handle(&acp->buf_dma_handle); 4518 acp->buf_dma_handle = NULL; 4519 } 4520 } 4521 4522 static void 4523 aac_unknown_scmd(struct aac_softstate *softs, struct aac_cmd *acp) 4524 { 4525 AACDB_PRINT(softs, CE_CONT, "SCMD 0x%x not supported", 4526 ((union scsi_cdb *)(void *)acp->pkt->pkt_cdbp)->scc_cmd); 4527 aac_free_dmamap(acp); 4528 aac_set_arq_data(acp->pkt, KEY_ILLEGAL_REQUEST, 0x20, 0x00, 0); 4529 aac_soft_callback(softs, acp); 4530 } 4531 4532 /* 4533 * Handle command to logical device 4534 */ 4535 static int 4536 aac_tran_start_ld(struct aac_softstate *softs, struct aac_cmd *acp) 4537 { 4538 struct aac_container *dvp; 4539 struct scsi_pkt *pkt; 4540 union scsi_cdb *cdbp; 4541 struct buf *bp; 4542 int rval; 4543 4544 dvp = (struct aac_container *)acp->dvp; 4545 pkt = acp->pkt; 4546 cdbp = (void *)pkt->pkt_cdbp; 4547 bp = acp->bp; 4548 4549 switch (cdbp->scc_cmd) { 4550 case SCMD_INQUIRY: /* inquiry */ 4551 aac_free_dmamap(acp); 4552 aac_inquiry(softs, pkt, cdbp, bp); 4553 aac_soft_callback(softs, acp); 4554 rval = TRAN_ACCEPT; 4555 break; 4556 4557 case SCMD_READ_CAPACITY: /* read capacity */ 4558 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4559 struct scsi_capacity cap; 4560 uint64_t last_lba; 4561 4562 /* check 64-bit LBA */ 4563 last_lba = dvp->size - 1; 4564 if (last_lba > 0xffffffffull) { 4565 cap.capacity = 0xfffffffful; 4566 } else { 4567 cap.capacity = BE_32(last_lba); 4568 } 4569 cap.lbasize = BE_32(AAC_SECTOR_SIZE); 4570 4571 aac_free_dmamap(acp); 4572 if (bp->b_flags & (B_PHYS|B_PAGEIO)) 4573 bp_mapin(bp); 4574 bcopy(&cap, bp->b_un.b_addr, min(bp->b_bcount, 8)); 4575 pkt->pkt_state |= STATE_XFERRED_DATA; 4576 } 4577 aac_soft_callback(softs, acp); 4578 rval = TRAN_ACCEPT; 4579 break; 4580 4581 case SCMD_SVC_ACTION_IN_G4: /* read capacity 16 */ 4582 /* Check if containers need 64-bit LBA support */ 4583 if (cdbp->cdb_opaque[1] == SSVC_ACTION_READ_CAPACITY_G4) { 4584 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4585 struct scsi_capacity_16 cap16; 4586 int cap_len = sizeof (struct scsi_capacity_16); 4587 4588 bzero(&cap16, cap_len); 4589 cap16.sc_capacity = BE_64(dvp->size - 1); 4590 cap16.sc_lbasize = BE_32(AAC_SECTOR_SIZE); 4591 4592 aac_free_dmamap(acp); 4593 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 4594 bp_mapin(bp); 4595 bcopy(&cap16, bp->b_un.b_addr, 4596 min(bp->b_bcount, cap_len)); 4597 pkt->pkt_state |= STATE_XFERRED_DATA; 4598 } 4599 aac_soft_callback(softs, acp); 4600 } else { 4601 aac_unknown_scmd(softs, acp); 4602 } 4603 rval = TRAN_ACCEPT; 4604 break; 4605 4606 case SCMD_READ_G4: /* read_16 */ 4607 case SCMD_WRITE_G4: /* write_16 */ 4608 if (softs->flags & AAC_FLAGS_RAW_IO) { 4609 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */ 4610 acp->blkno = ((uint64_t) \ 4611 GETG4ADDR(cdbp) << 32) | \ 4612 (uint32_t)GETG4ADDRTL(cdbp); 4613 goto do_io; 4614 } 4615 AACDB_PRINT(softs, CE_WARN, "64-bit LBA not supported"); 4616 aac_unknown_scmd(softs, acp); 4617 rval = TRAN_ACCEPT; 4618 break; 4619 4620 case SCMD_READ: /* read_6 */ 4621 case SCMD_WRITE: /* write_6 */ 4622 acp->blkno = GETG0ADDR(cdbp); 4623 goto do_io; 4624 4625 case SCMD_READ_G5: /* read_12 */ 4626 case SCMD_WRITE_G5: /* write_12 */ 4627 acp->blkno = GETG5ADDR(cdbp); 4628 goto do_io; 4629 4630 case SCMD_READ_G1: /* read_10 */ 4631 case SCMD_WRITE_G1: /* write_10 */ 4632 acp->blkno = (uint32_t)GETG1ADDR(cdbp); 4633 do_io: 4634 if (acp->flags & AAC_CMD_DMA_VALID) { 4635 uint64_t cnt_size = dvp->size; 4636 4637 /* 4638 * If LBA > array size AND rawio, the 4639 * adapter may hang. So check it before 4640 * sending. 4641 * NOTE: (blkno + blkcnt) may overflow 4642 */ 4643 if ((acp->blkno < cnt_size) && 4644 ((acp->blkno + acp->bcount / 4645 AAC_BLK_SIZE) <= cnt_size)) { 4646 rval = aac_do_io(softs, acp); 4647 } else { 4648 /* 4649 * Request exceeds the capacity of disk, 4650 * set error block number to last LBA 4651 * + 1. 4652 */ 4653 aac_set_arq_data(pkt, 4654 KEY_ILLEGAL_REQUEST, 0x21, 4655 0x00, cnt_size); 4656 aac_soft_callback(softs, acp); 4657 rval = TRAN_ACCEPT; 4658 } 4659 } else if (acp->bcount == 0) { 4660 /* For 0 length IO, just return ok */ 4661 aac_soft_callback(softs, acp); 4662 rval = TRAN_ACCEPT; 4663 } else { 4664 rval = TRAN_BADPKT; 4665 } 4666 break; 4667 4668 case SCMD_MODE_SENSE: /* mode_sense_6 */ 4669 case SCMD_MODE_SENSE_G1: { /* mode_sense_10 */ 4670 int capacity; 4671 4672 aac_free_dmamap(acp); 4673 if (dvp->size > 0xffffffffull) 4674 capacity = 0xfffffffful; /* 64-bit LBA */ 4675 else 4676 capacity = dvp->size; 4677 aac_mode_sense(softs, pkt, cdbp, bp, capacity); 4678 aac_soft_callback(softs, acp); 4679 rval = TRAN_ACCEPT; 4680 break; 4681 } 4682 4683 case SCMD_START_STOP: 4684 if (softs->support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) { 4685 acp->aac_cmd_fib = aac_cmd_fib_startstop; 4686 acp->ac_comp = aac_startstop_complete; 4687 rval = aac_do_io(softs, acp); 4688 break; 4689 } 4690 /* FALLTHRU */ 4691 case SCMD_TEST_UNIT_READY: 4692 case SCMD_REQUEST_SENSE: 4693 case SCMD_FORMAT: 4694 aac_free_dmamap(acp); 4695 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4696 if (acp->flags & AAC_CMD_BUF_READ) { 4697 if (bp->b_flags & (B_PHYS|B_PAGEIO)) 4698 bp_mapin(bp); 4699 bzero(bp->b_un.b_addr, bp->b_bcount); 4700 } 4701 pkt->pkt_state |= STATE_XFERRED_DATA; 4702 } 4703 aac_soft_callback(softs, acp); 4704 rval = TRAN_ACCEPT; 4705 break; 4706 4707 case SCMD_SYNCHRONIZE_CACHE: 4708 acp->flags |= AAC_CMD_NTAG; 4709 acp->aac_cmd_fib = aac_cmd_fib_sync; 4710 acp->ac_comp = aac_synccache_complete; 4711 rval = aac_do_io(softs, acp); 4712 break; 4713 4714 case SCMD_DOORLOCK: 4715 aac_free_dmamap(acp); 4716 dvp->locked = (pkt->pkt_cdbp[4] & 0x01) ? 1 : 0; 4717 aac_soft_callback(softs, acp); 4718 rval = TRAN_ACCEPT; 4719 break; 4720 4721 default: /* unknown command */ 4722 aac_unknown_scmd(softs, acp); 4723 rval = TRAN_ACCEPT; 4724 break; 4725 } 4726 4727 return (rval); 4728 } 4729 4730 static int 4731 aac_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt) 4732 { 4733 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4734 struct aac_cmd *acp = PKT2AC(pkt); 4735 struct aac_device *dvp = acp->dvp; 4736 int rval; 4737 4738 DBCALLED(softs, 2); 4739 4740 /* 4741 * Reinitialize some fields of ac and pkt; the packet may 4742 * have been resubmitted 4743 */ 4744 acp->flags &= AAC_CMD_CONSISTENT | AAC_CMD_DMA_PARTIAL | \ 4745 AAC_CMD_BUF_READ | AAC_CMD_BUF_WRITE | AAC_CMD_DMA_VALID; 4746 acp->timeout = acp->pkt->pkt_time; 4747 if (pkt->pkt_flags & FLAG_NOINTR) 4748 acp->flags |= AAC_CMD_NO_INTR; 4749 #ifdef DEBUG 4750 acp->fib_flags = AACDB_FLAGS_FIB_SCMD; 4751 #endif 4752 pkt->pkt_reason = CMD_CMPLT; 4753 pkt->pkt_state = 0; 4754 pkt->pkt_statistics = 0; 4755 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */ 4756 4757 if (acp->flags & AAC_CMD_DMA_VALID) { 4758 pkt->pkt_resid = acp->bcount; 4759 /* Consistent packets need to be sync'ed first */ 4760 if ((acp->flags & AAC_CMD_CONSISTENT) && 4761 (acp->flags & AAC_CMD_BUF_WRITE)) 4762 if (aac_dma_sync_ac(acp) != AACOK) { 4763 ddi_fm_service_impact(softs->devinfo_p, 4764 DDI_SERVICE_UNAFFECTED); 4765 return (TRAN_BADPKT); 4766 } 4767 } else { 4768 pkt->pkt_resid = 0; 4769 } 4770 4771 mutex_enter(&softs->io_lock); 4772 AACDB_PRINT_SCMD(softs, acp); 4773 if ((dvp->flags & (AAC_DFLAG_VALID | AAC_DFLAG_CONFIGURING)) && 4774 !(softs->state & AAC_STATE_DEAD)) { 4775 if (dvp->type == AAC_DEV_LD) { 4776 if (ap->a_lun == 0) 4777 rval = aac_tran_start_ld(softs, acp); 4778 else 4779 goto error; 4780 } else { 4781 rval = aac_do_io(softs, acp); 4782 } 4783 } else { 4784 error: 4785 #ifdef DEBUG 4786 if (!(softs->state & AAC_STATE_DEAD)) { 4787 AACDB_PRINT_TRAN(softs, 4788 "Cannot send cmd to target t%dL%d: %s", 4789 ap->a_target, ap->a_lun, 4790 "target invalid"); 4791 } else { 4792 AACDB_PRINT(softs, CE_WARN, 4793 "Cannot send cmd to target t%dL%d: %s", 4794 ap->a_target, ap->a_lun, 4795 "adapter dead"); 4796 } 4797 #endif 4798 rval = TRAN_FATAL_ERROR; 4799 } 4800 mutex_exit(&softs->io_lock); 4801 return (rval); 4802 } 4803 4804 static int 4805 aac_tran_getcap(struct scsi_address *ap, char *cap, int whom) 4806 { 4807 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4808 struct aac_device *dvp; 4809 int rval; 4810 4811 DBCALLED(softs, 2); 4812 4813 /* We don't allow inquiring about capabilities for other targets */ 4814 if (cap == NULL || whom == 0) { 4815 AACDB_PRINT(softs, CE_WARN, 4816 "GetCap> %s not supported: whom=%d", cap, whom); 4817 return (-1); 4818 } 4819 4820 mutex_enter(&softs->io_lock); 4821 dvp = AAC_DEV(softs, ap->a_target); 4822 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) { 4823 mutex_exit(&softs->io_lock); 4824 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to getcap", 4825 ap->a_target, ap->a_lun); 4826 return (-1); 4827 } 4828 4829 switch (scsi_hba_lookup_capstr(cap)) { 4830 case SCSI_CAP_ARQ: /* auto request sense */ 4831 rval = 1; 4832 break; 4833 case SCSI_CAP_UNTAGGED_QING: 4834 case SCSI_CAP_TAGGED_QING: 4835 rval = 1; 4836 break; 4837 case SCSI_CAP_DMA_MAX: 4838 rval = softs->dma_max; 4839 break; 4840 default: 4841 rval = -1; 4842 break; 4843 } 4844 mutex_exit(&softs->io_lock); 4845 4846 AACDB_PRINT_TRAN(softs, "GetCap> %s t%dL%d: rval=%d", 4847 cap, ap->a_target, ap->a_lun, rval); 4848 return (rval); 4849 } 4850 4851 /*ARGSUSED*/ 4852 static int 4853 aac_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom) 4854 { 4855 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4856 struct aac_device *dvp; 4857 int rval; 4858 4859 DBCALLED(softs, 2); 4860 4861 /* We don't allow inquiring about capabilities for other targets */ 4862 if (cap == NULL || whom == 0) { 4863 AACDB_PRINT(softs, CE_WARN, 4864 "SetCap> %s not supported: whom=%d", cap, whom); 4865 return (-1); 4866 } 4867 4868 mutex_enter(&softs->io_lock); 4869 dvp = AAC_DEV(softs, ap->a_target); 4870 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) { 4871 mutex_exit(&softs->io_lock); 4872 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to setcap", 4873 ap->a_target, ap->a_lun); 4874 return (-1); 4875 } 4876 4877 switch (scsi_hba_lookup_capstr(cap)) { 4878 case SCSI_CAP_ARQ: 4879 /* Force auto request sense */ 4880 rval = (value == 1) ? 1 : 0; 4881 break; 4882 case SCSI_CAP_UNTAGGED_QING: 4883 case SCSI_CAP_TAGGED_QING: 4884 rval = (value == 1) ? 1 : 0; 4885 break; 4886 default: 4887 rval = -1; 4888 break; 4889 } 4890 mutex_exit(&softs->io_lock); 4891 4892 AACDB_PRINT_TRAN(softs, "SetCap> %s t%dL%d val=%d: rval=%d", 4893 cap, ap->a_target, ap->a_lun, value, rval); 4894 return (rval); 4895 } 4896 4897 static void 4898 aac_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 4899 { 4900 struct aac_cmd *acp = PKT2AC(pkt); 4901 4902 DBCALLED(NULL, 2); 4903 4904 if (acp->sgt) { 4905 kmem_free(acp->sgt, sizeof (struct aac_sge) * \ 4906 acp->left_cookien); 4907 } 4908 aac_free_dmamap(acp); 4909 ASSERT(acp->slotp == NULL); 4910 scsi_hba_pkt_free(ap, pkt); 4911 } 4912 4913 int 4914 aac_cmd_dma_alloc(struct aac_softstate *softs, struct aac_cmd *acp, 4915 struct buf *bp, int flags, int (*cb)(), caddr_t arg) 4916 { 4917 int kf = (cb == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP; 4918 uint_t oldcookiec; 4919 int bioerr; 4920 int rval; 4921 4922 oldcookiec = acp->left_cookien; 4923 4924 /* Move window to build s/g map */ 4925 if (acp->total_nwin > 0) { 4926 if (++acp->cur_win < acp->total_nwin) { 4927 off_t off; 4928 size_t len; 4929 4930 rval = ddi_dma_getwin(acp->buf_dma_handle, acp->cur_win, 4931 &off, &len, &acp->cookie, &acp->left_cookien); 4932 if (rval == DDI_SUCCESS) 4933 goto get_dma_cookies; 4934 AACDB_PRINT(softs, CE_WARN, 4935 "ddi_dma_getwin() fail %d", rval); 4936 return (AACERR); 4937 } 4938 AACDB_PRINT(softs, CE_WARN, "Nothing to transfer"); 4939 return (AACERR); 4940 } 4941 4942 /* We need to transfer data, so we alloc DMA resources for this pkt */ 4943 if (bp && bp->b_bcount != 0 && !(acp->flags & AAC_CMD_DMA_VALID)) { 4944 uint_t dma_flags = 0; 4945 struct aac_sge *sge; 4946 4947 /* 4948 * We will still use this point to fake some 4949 * infomation in tran_start 4950 */ 4951 acp->bp = bp; 4952 4953 /* Set dma flags */ 4954 if (BUF_IS_READ(bp)) { 4955 dma_flags |= DDI_DMA_READ; 4956 acp->flags |= AAC_CMD_BUF_READ; 4957 } else { 4958 dma_flags |= DDI_DMA_WRITE; 4959 acp->flags |= AAC_CMD_BUF_WRITE; 4960 } 4961 if (flags & PKT_CONSISTENT) 4962 dma_flags |= DDI_DMA_CONSISTENT; 4963 if (flags & PKT_DMA_PARTIAL) 4964 dma_flags |= DDI_DMA_PARTIAL; 4965 4966 /* Alloc buf dma handle */ 4967 if (!acp->buf_dma_handle) { 4968 rval = ddi_dma_alloc_handle(softs->devinfo_p, 4969 &softs->buf_dma_attr, cb, arg, 4970 &acp->buf_dma_handle); 4971 if (rval != DDI_SUCCESS) { 4972 AACDB_PRINT(softs, CE_WARN, 4973 "Can't allocate DMA handle, errno=%d", 4974 rval); 4975 goto error_out; 4976 } 4977 } 4978 4979 /* Bind buf */ 4980 if (((uintptr_t)bp->b_un.b_addr & AAC_DMA_ALIGN_MASK) == 0) { 4981 rval = ddi_dma_buf_bind_handle(acp->buf_dma_handle, 4982 bp, dma_flags, cb, arg, &acp->cookie, 4983 &acp->left_cookien); 4984 } else { 4985 size_t bufsz; 4986 4987 AACDB_PRINT_TRAN(softs, 4988 "non-aligned buffer: addr=0x%p, cnt=%lu", 4989 (void *)bp->b_un.b_addr, bp->b_bcount); 4990 if (bp->b_flags & (B_PAGEIO|B_PHYS)) 4991 bp_mapin(bp); 4992 4993 rval = ddi_dma_mem_alloc(acp->buf_dma_handle, 4994 AAC_ROUNDUP(bp->b_bcount, AAC_DMA_ALIGN), 4995 &softs->acc_attr, DDI_DMA_STREAMING, 4996 cb, arg, &acp->abp, &bufsz, &acp->abh); 4997 4998 if (rval != DDI_SUCCESS) { 4999 AACDB_PRINT(softs, CE_NOTE, 5000 "Cannot alloc DMA to non-aligned buf"); 5001 bioerr = 0; 5002 goto error_out; 5003 } 5004 5005 if (acp->flags & AAC_CMD_BUF_WRITE) 5006 ddi_rep_put8(acp->abh, 5007 (uint8_t *)bp->b_un.b_addr, 5008 (uint8_t *)acp->abp, bp->b_bcount, 5009 DDI_DEV_AUTOINCR); 5010 5011 rval = ddi_dma_addr_bind_handle(acp->buf_dma_handle, 5012 NULL, acp->abp, bufsz, dma_flags, cb, arg, 5013 &acp->cookie, &acp->left_cookien); 5014 } 5015 5016 switch (rval) { 5017 case DDI_DMA_PARTIAL_MAP: 5018 if (ddi_dma_numwin(acp->buf_dma_handle, 5019 &acp->total_nwin) == DDI_FAILURE) { 5020 AACDB_PRINT(softs, CE_WARN, 5021 "Cannot get number of DMA windows"); 5022 bioerr = 0; 5023 goto error_out; 5024 } 5025 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)", 5026 acp->left_cookien); 5027 acp->cur_win = 0; 5028 break; 5029 5030 case DDI_DMA_MAPPED: 5031 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)", 5032 acp->left_cookien); 5033 acp->cur_win = 0; 5034 acp->total_nwin = 1; 5035 break; 5036 5037 case DDI_DMA_NORESOURCES: 5038 bioerr = 0; 5039 AACDB_PRINT(softs, CE_WARN, 5040 "Cannot bind buf for DMA: DDI_DMA_NORESOURCES"); 5041 goto error_out; 5042 case DDI_DMA_BADATTR: 5043 case DDI_DMA_NOMAPPING: 5044 bioerr = EFAULT; 5045 AACDB_PRINT(softs, CE_WARN, 5046 "Cannot bind buf for DMA: DDI_DMA_NOMAPPING"); 5047 goto error_out; 5048 case DDI_DMA_TOOBIG: 5049 bioerr = EINVAL; 5050 AACDB_PRINT(softs, CE_WARN, 5051 "Cannot bind buf for DMA: DDI_DMA_TOOBIG(%d)", 5052 bp->b_bcount); 5053 goto error_out; 5054 default: 5055 bioerr = EINVAL; 5056 AACDB_PRINT(softs, CE_WARN, 5057 "Cannot bind buf for DMA: %d", rval); 5058 goto error_out; 5059 } 5060 acp->flags |= AAC_CMD_DMA_VALID; 5061 5062 get_dma_cookies: 5063 ASSERT(acp->left_cookien > 0); 5064 if (acp->left_cookien > softs->aac_sg_tablesize) { 5065 AACDB_PRINT(softs, CE_NOTE, "large cookiec received %d", 5066 acp->left_cookien); 5067 bioerr = EINVAL; 5068 goto error_out; 5069 } 5070 if (oldcookiec != acp->left_cookien && acp->sgt != NULL) { 5071 kmem_free(acp->sgt, sizeof (struct aac_sge) * \ 5072 oldcookiec); 5073 acp->sgt = NULL; 5074 } 5075 if (acp->sgt == NULL) { 5076 acp->sgt = kmem_alloc(sizeof (struct aac_sge) * \ 5077 acp->left_cookien, kf); 5078 if (acp->sgt == NULL) { 5079 AACDB_PRINT(softs, CE_WARN, 5080 "sgt kmem_alloc fail"); 5081 bioerr = ENOMEM; 5082 goto error_out; 5083 } 5084 } 5085 5086 sge = &acp->sgt[0]; 5087 sge->bcount = acp->cookie.dmac_size; 5088 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress); 5089 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress); 5090 acp->bcount = acp->cookie.dmac_size; 5091 for (sge++; sge < &acp->sgt[acp->left_cookien]; sge++) { 5092 ddi_dma_nextcookie(acp->buf_dma_handle, &acp->cookie); 5093 sge->bcount = acp->cookie.dmac_size; 5094 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress); 5095 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress); 5096 acp->bcount += acp->cookie.dmac_size; 5097 } 5098 5099 /* 5100 * Note: The old DMA engine do not correctly handle 5101 * dma_attr_maxxfer attribute. So we have to ensure 5102 * it by ourself. 5103 */ 5104 if (acp->bcount > softs->buf_dma_attr.dma_attr_maxxfer) { 5105 AACDB_PRINT(softs, CE_NOTE, 5106 "large xfer size received %d\n", acp->bcount); 5107 bioerr = EINVAL; 5108 goto error_out; 5109 } 5110 5111 acp->total_xfer += acp->bcount; 5112 5113 if (acp->pkt) { 5114 /* Return remaining byte count */ 5115 if (acp->total_xfer <= bp->b_bcount) { 5116 acp->pkt->pkt_resid = bp->b_bcount - \ 5117 acp->total_xfer; 5118 } else { 5119 /* 5120 * Allocated DMA size is greater than the buf 5121 * size of bp. This is caused by devices like 5122 * tape. we have extra bytes allocated, but 5123 * the packet residual has to stay correct. 5124 */ 5125 acp->pkt->pkt_resid = 0; 5126 } 5127 AACDB_PRINT_TRAN(softs, 5128 "bp=0x%p, xfered=%d/%d, resid=%d", 5129 (void *)bp->b_un.b_addr, (int)acp->total_xfer, 5130 (int)bp->b_bcount, (int)acp->pkt->pkt_resid); 5131 } 5132 } 5133 return (AACOK); 5134 5135 error_out: 5136 bioerror(bp, bioerr); 5137 return (AACERR); 5138 } 5139 5140 static struct scsi_pkt * 5141 aac_tran_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 5142 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags, 5143 int (*callback)(), caddr_t arg) 5144 { 5145 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 5146 struct aac_cmd *acp, *new_acp; 5147 5148 DBCALLED(softs, 2); 5149 5150 /* Allocate pkt */ 5151 if (pkt == NULL) { 5152 int slen; 5153 5154 /* Force auto request sense */ 5155 slen = (statuslen > softs->slen) ? statuslen : softs->slen; 5156 pkt = scsi_hba_pkt_alloc(softs->devinfo_p, ap, cmdlen, 5157 slen, tgtlen, sizeof (struct aac_cmd), callback, arg); 5158 if (pkt == NULL) { 5159 AACDB_PRINT(softs, CE_WARN, "Alloc scsi pkt failed"); 5160 return (NULL); 5161 } 5162 acp = new_acp = PKT2AC(pkt); 5163 acp->pkt = pkt; 5164 acp->cmdlen = cmdlen; 5165 5166 if (ap->a_target < AAC_MAX_LD) { 5167 acp->dvp = &softs->containers[ap->a_target].dev; 5168 acp->aac_cmd_fib = softs->aac_cmd_fib; 5169 acp->ac_comp = aac_ld_complete; 5170 } else { 5171 _NOTE(ASSUMING_PROTECTED(softs->nondasds)) 5172 5173 acp->dvp = &softs->nondasds[AAC_PD(ap->a_target)].dev; 5174 acp->aac_cmd_fib = softs->aac_cmd_fib_scsi; 5175 acp->ac_comp = aac_pd_complete; 5176 } 5177 } else { 5178 acp = PKT2AC(pkt); 5179 new_acp = NULL; 5180 } 5181 5182 if (aac_cmd_dma_alloc(softs, acp, bp, flags, callback, arg) == AACOK) 5183 return (pkt); 5184 5185 if (new_acp) 5186 aac_tran_destroy_pkt(ap, pkt); 5187 return (NULL); 5188 } 5189 5190 /* 5191 * tran_sync_pkt(9E) - explicit DMA synchronization 5192 */ 5193 /*ARGSUSED*/ 5194 static void 5195 aac_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 5196 { 5197 struct aac_cmd *acp = PKT2AC(pkt); 5198 5199 DBCALLED(NULL, 2); 5200 5201 if (aac_dma_sync_ac(acp) != AACOK) 5202 ddi_fm_service_impact( 5203 (AAC_TRAN2SOFTS(ap->a_hba_tran))->devinfo_p, 5204 DDI_SERVICE_UNAFFECTED); 5205 } 5206 5207 /* 5208 * tran_dmafree(9E) - deallocate DMA resources allocated for command 5209 */ 5210 /*ARGSUSED*/ 5211 static void 5212 aac_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 5213 { 5214 struct aac_cmd *acp = PKT2AC(pkt); 5215 5216 DBCALLED(NULL, 2); 5217 5218 aac_free_dmamap(acp); 5219 } 5220 5221 static int 5222 aac_do_quiesce(struct aac_softstate *softs) 5223 { 5224 aac_hold_bus(softs, AAC_IOCMD_ASYNC); 5225 if (softs->bus_ncmds[AAC_CMDQ_ASYNC]) { 5226 aac_start_drain(softs); 5227 do { 5228 if (cv_wait_sig(&softs->drain_cv, 5229 &softs->io_lock) == 0) { 5230 /* Quiesce has been interrupted */ 5231 aac_stop_drain(softs); 5232 aac_unhold_bus(softs, AAC_IOCMD_ASYNC); 5233 aac_start_waiting_io(softs); 5234 return (AACERR); 5235 } 5236 } while (softs->bus_ncmds[AAC_CMDQ_ASYNC]); 5237 aac_stop_drain(softs); 5238 } 5239 5240 softs->state |= AAC_STATE_QUIESCED; 5241 return (AACOK); 5242 } 5243 5244 static int 5245 aac_tran_quiesce(dev_info_t *dip) 5246 { 5247 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 5248 int rval; 5249 5250 DBCALLED(softs, 1); 5251 5252 mutex_enter(&softs->io_lock); 5253 if (aac_do_quiesce(softs) == AACOK) 5254 rval = 0; 5255 else 5256 rval = 1; 5257 mutex_exit(&softs->io_lock); 5258 return (rval); 5259 } 5260 5261 static int 5262 aac_do_unquiesce(struct aac_softstate *softs) 5263 { 5264 softs->state &= ~AAC_STATE_QUIESCED; 5265 aac_unhold_bus(softs, AAC_IOCMD_ASYNC); 5266 5267 aac_start_waiting_io(softs); 5268 return (AACOK); 5269 } 5270 5271 static int 5272 aac_tran_unquiesce(dev_info_t *dip) 5273 { 5274 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 5275 int rval; 5276 5277 DBCALLED(softs, 1); 5278 5279 mutex_enter(&softs->io_lock); 5280 if (aac_do_unquiesce(softs) == AACOK) 5281 rval = 0; 5282 else 5283 rval = 1; 5284 mutex_exit(&softs->io_lock); 5285 return (rval); 5286 } 5287 5288 static int 5289 aac_hba_setup(struct aac_softstate *softs) 5290 { 5291 scsi_hba_tran_t *hba_tran; 5292 int rval; 5293 5294 hba_tran = scsi_hba_tran_alloc(softs->devinfo_p, SCSI_HBA_CANSLEEP); 5295 if (hba_tran == NULL) 5296 return (AACERR); 5297 hba_tran->tran_hba_private = softs; 5298 hba_tran->tran_tgt_init = aac_tran_tgt_init; 5299 hba_tran->tran_tgt_free = aac_tran_tgt_free; 5300 hba_tran->tran_tgt_probe = scsi_hba_probe; 5301 hba_tran->tran_start = aac_tran_start; 5302 hba_tran->tran_getcap = aac_tran_getcap; 5303 hba_tran->tran_setcap = aac_tran_setcap; 5304 hba_tran->tran_init_pkt = aac_tran_init_pkt; 5305 hba_tran->tran_destroy_pkt = aac_tran_destroy_pkt; 5306 hba_tran->tran_reset = aac_tran_reset; 5307 hba_tran->tran_abort = aac_tran_abort; 5308 hba_tran->tran_sync_pkt = aac_tran_sync_pkt; 5309 hba_tran->tran_dmafree = aac_tran_dmafree; 5310 hba_tran->tran_quiesce = aac_tran_quiesce; 5311 hba_tran->tran_unquiesce = aac_tran_unquiesce; 5312 hba_tran->tran_bus_config = aac_tran_bus_config; 5313 rval = scsi_hba_attach_setup(softs->devinfo_p, &softs->buf_dma_attr, 5314 hba_tran, 0); 5315 if (rval != DDI_SUCCESS) { 5316 scsi_hba_tran_free(hba_tran); 5317 AACDB_PRINT(softs, CE_WARN, "aac_hba_setup failed"); 5318 return (AACERR); 5319 } 5320 5321 softs->hba_tran = hba_tran; 5322 return (AACOK); 5323 } 5324 5325 /* 5326 * FIB setup operations 5327 */ 5328 5329 /* 5330 * Init FIB header 5331 */ 5332 static void 5333 aac_cmd_fib_header(struct aac_softstate *softs, struct aac_slot *slotp, 5334 uint16_t cmd, uint16_t fib_size) 5335 { 5336 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5337 struct aac_fib *fibp = slotp->fibp; 5338 uint32_t xfer_state; 5339 5340 xfer_state = 5341 AAC_FIBSTATE_HOSTOWNED | 5342 AAC_FIBSTATE_INITIALISED | 5343 AAC_FIBSTATE_EMPTY | 5344 AAC_FIBSTATE_FROMHOST | 5345 AAC_FIBSTATE_REXPECTED | 5346 AAC_FIBSTATE_NORM; 5347 if (slotp->acp && !(slotp->acp->flags & AAC_CMD_SYNC)) { 5348 xfer_state |= 5349 AAC_FIBSTATE_ASYNC | 5350 AAC_FIBSTATE_FAST_RESPONSE /* enable fast io */; 5351 ddi_put16(acc, &fibp->Header.SenderSize, 5352 softs->aac_max_fib_size); 5353 } else { 5354 ddi_put16(acc, &fibp->Header.SenderSize, AAC_FIB_SIZE); 5355 } 5356 5357 ddi_put32(acc, &fibp->Header.XferState, xfer_state); 5358 ddi_put16(acc, &fibp->Header.Command, cmd); 5359 ddi_put8(acc, &fibp->Header.StructType, AAC_FIBTYPE_TFIB); 5360 ddi_put8(acc, &fibp->Header.Flags, 0); /* don't care */ 5361 ddi_put16(acc, &fibp->Header.Size, fib_size); 5362 ddi_put32(acc, &fibp->Header.SenderFibAddress, (slotp->index << 2)); 5363 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr); 5364 ddi_put32(acc, &fibp->Header.SenderData, 0); /* don't care */ 5365 } 5366 5367 /* 5368 * Init FIB for raw IO command 5369 */ 5370 static void 5371 aac_cmd_fib_rawio(struct aac_softstate *softs, struct aac_cmd *acp) 5372 { 5373 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5374 struct aac_raw_io *io = (struct aac_raw_io *)&acp->slotp->fibp->data[0]; 5375 struct aac_sg_entryraw *sgp; 5376 struct aac_sge *sge; 5377 5378 /* Calculate FIB size */ 5379 acp->fib_size = sizeof (struct aac_fib_header) + \ 5380 sizeof (struct aac_raw_io) + (acp->left_cookien - 1) * \ 5381 sizeof (struct aac_sg_entryraw); 5382 5383 aac_cmd_fib_header(softs, acp->slotp, RawIo, acp->fib_size); 5384 5385 ddi_put16(acc, &io->Flags, (acp->flags & AAC_CMD_BUF_READ) ? 1 : 0); 5386 ddi_put16(acc, &io->BpTotal, 0); 5387 ddi_put16(acc, &io->BpComplete, 0); 5388 5389 ddi_put32(acc, AAC_LO32(&io->BlockNumber), AAC_LS32(acp->blkno)); 5390 ddi_put32(acc, AAC_HI32(&io->BlockNumber), AAC_MS32(acp->blkno)); 5391 ddi_put16(acc, &io->ContainerId, 5392 ((struct aac_container *)acp->dvp)->cid); 5393 5394 /* Fill SG table */ 5395 ddi_put32(acc, &io->SgMapRaw.SgCount, acp->left_cookien); 5396 ddi_put32(acc, &io->ByteCount, acp->bcount); 5397 5398 for (sge = &acp->sgt[0], sgp = &io->SgMapRaw.SgEntryRaw[0]; 5399 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5400 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 5401 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 5402 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5403 sgp->Next = 0; 5404 sgp->Prev = 0; 5405 sgp->Flags = 0; 5406 } 5407 } 5408 5409 /* Init FIB for 64-bit block IO command */ 5410 static void 5411 aac_cmd_fib_brw64(struct aac_softstate *softs, struct aac_cmd *acp) 5412 { 5413 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5414 struct aac_blockread64 *br = (struct aac_blockread64 *) \ 5415 &acp->slotp->fibp->data[0]; 5416 struct aac_sg_entry64 *sgp; 5417 struct aac_sge *sge; 5418 5419 acp->fib_size = sizeof (struct aac_fib_header) + \ 5420 sizeof (struct aac_blockread64) + (acp->left_cookien - 1) * \ 5421 sizeof (struct aac_sg_entry64); 5422 5423 aac_cmd_fib_header(softs, acp->slotp, ContainerCommand64, 5424 acp->fib_size); 5425 5426 /* 5427 * The definitions for aac_blockread64 and aac_blockwrite64 5428 * are the same. 5429 */ 5430 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno); 5431 ddi_put16(acc, &br->ContainerId, 5432 ((struct aac_container *)acp->dvp)->cid); 5433 ddi_put32(acc, &br->Command, (acp->flags & AAC_CMD_BUF_READ) ? 5434 VM_CtHostRead64 : VM_CtHostWrite64); 5435 ddi_put16(acc, &br->Pad, 0); 5436 ddi_put16(acc, &br->Flags, 0); 5437 5438 /* Fill SG table */ 5439 ddi_put32(acc, &br->SgMap64.SgCount, acp->left_cookien); 5440 ddi_put16(acc, &br->SectorCount, acp->bcount / AAC_BLK_SIZE); 5441 5442 for (sge = &acp->sgt[0], sgp = &br->SgMap64.SgEntry64[0]; 5443 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5444 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 5445 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 5446 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5447 } 5448 } 5449 5450 /* Init FIB for block IO command */ 5451 static void 5452 aac_cmd_fib_brw(struct aac_softstate *softs, struct aac_cmd *acp) 5453 { 5454 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5455 struct aac_blockread *br = (struct aac_blockread *) \ 5456 &acp->slotp->fibp->data[0]; 5457 struct aac_sg_entry *sgp; 5458 struct aac_sge *sge = &acp->sgt[0]; 5459 5460 if (acp->flags & AAC_CMD_BUF_READ) { 5461 acp->fib_size = sizeof (struct aac_fib_header) + \ 5462 sizeof (struct aac_blockread) + (acp->left_cookien - 1) * \ 5463 sizeof (struct aac_sg_entry); 5464 5465 ddi_put32(acc, &br->Command, VM_CtBlockRead); 5466 ddi_put32(acc, &br->SgMap.SgCount, acp->left_cookien); 5467 sgp = &br->SgMap.SgEntry[0]; 5468 } else { 5469 struct aac_blockwrite *bw = (struct aac_blockwrite *)br; 5470 5471 acp->fib_size = sizeof (struct aac_fib_header) + \ 5472 sizeof (struct aac_blockwrite) + (acp->left_cookien - 1) * \ 5473 sizeof (struct aac_sg_entry); 5474 5475 ddi_put32(acc, &bw->Command, VM_CtBlockWrite); 5476 ddi_put32(acc, &bw->Stable, CUNSTABLE); 5477 ddi_put32(acc, &bw->SgMap.SgCount, acp->left_cookien); 5478 sgp = &bw->SgMap.SgEntry[0]; 5479 } 5480 aac_cmd_fib_header(softs, acp->slotp, ContainerCommand, acp->fib_size); 5481 5482 /* 5483 * aac_blockread and aac_blockwrite have the similar 5484 * structure head, so use br for bw here 5485 */ 5486 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno); 5487 ddi_put32(acc, &br->ContainerId, 5488 ((struct aac_container *)acp->dvp)->cid); 5489 ddi_put32(acc, &br->ByteCount, acp->bcount); 5490 5491 /* Fill SG table */ 5492 for (sge = &acp->sgt[0]; 5493 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5494 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32); 5495 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5496 } 5497 } 5498 5499 /*ARGSUSED*/ 5500 void 5501 aac_cmd_fib_copy(struct aac_softstate *softs, struct aac_cmd *acp) 5502 { 5503 struct aac_slot *slotp = acp->slotp; 5504 struct aac_fib *fibp = slotp->fibp; 5505 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5506 5507 ddi_rep_put8(acc, (uint8_t *)acp->fibp, (uint8_t *)fibp, 5508 acp->fib_size, /* only copy data of needed length */ 5509 DDI_DEV_AUTOINCR); 5510 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr); 5511 ddi_put32(acc, &fibp->Header.SenderFibAddress, slotp->index << 2); 5512 } 5513 5514 static void 5515 aac_cmd_fib_sync(struct aac_softstate *softs, struct aac_cmd *acp) 5516 { 5517 struct aac_slot *slotp = acp->slotp; 5518 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5519 struct aac_synchronize_command *sync = 5520 (struct aac_synchronize_command *)&slotp->fibp->data[0]; 5521 5522 acp->fib_size = sizeof (struct aac_fib_header) + \ 5523 sizeof (struct aac_synchronize_command); 5524 5525 aac_cmd_fib_header(softs, slotp, ContainerCommand, acp->fib_size); 5526 ddi_put32(acc, &sync->Command, VM_ContainerConfig); 5527 ddi_put32(acc, &sync->Type, (uint32_t)CT_FLUSH_CACHE); 5528 ddi_put32(acc, &sync->Cid, ((struct aac_container *)acp->dvp)->cid); 5529 ddi_put32(acc, &sync->Count, 5530 sizeof (((struct aac_synchronize_reply *)0)->Data)); 5531 } 5532 5533 /* 5534 * Start/Stop unit (Power Management) 5535 */ 5536 static void 5537 aac_cmd_fib_startstop(struct aac_softstate *softs, struct aac_cmd *acp) 5538 { 5539 struct aac_slot *slotp = acp->slotp; 5540 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5541 struct aac_Container *cmd = 5542 (struct aac_Container *)&slotp->fibp->data[0]; 5543 union scsi_cdb *cdbp = (void *)acp->pkt->pkt_cdbp; 5544 5545 acp->fib_size = AAC_FIB_SIZEOF(struct aac_Container); 5546 5547 aac_cmd_fib_header(softs, slotp, ContainerCommand, acp->fib_size); 5548 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 5549 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 5550 ddi_put32(acc, &cmd->CTCommand.command, CT_PM_DRIVER_SUPPORT); 5551 ddi_put32(acc, &cmd->CTCommand.param[0], cdbp->cdb_opaque[4] & 1 ? \ 5552 AAC_PM_DRIVERSUP_START_UNIT : AAC_PM_DRIVERSUP_STOP_UNIT); 5553 ddi_put32(acc, &cmd->CTCommand.param[1], 5554 ((struct aac_container *)acp->dvp)->cid); 5555 ddi_put32(acc, &cmd->CTCommand.param[2], cdbp->cdb_opaque[1] & 1); 5556 } 5557 5558 /* 5559 * Init FIB for pass-through SCMD 5560 */ 5561 static void 5562 aac_cmd_fib_srb(struct aac_cmd *acp) 5563 { 5564 struct aac_slot *slotp = acp->slotp; 5565 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5566 struct aac_srb *srb = (struct aac_srb *)&slotp->fibp->data[0]; 5567 uint8_t *cdb; 5568 5569 ddi_put32(acc, &srb->function, SRBF_ExecuteScsi); 5570 ddi_put32(acc, &srb->retry_limit, 0); 5571 ddi_put32(acc, &srb->cdb_size, acp->cmdlen); 5572 ddi_put32(acc, &srb->timeout, 0); /* use driver timeout */ 5573 if (acp->fibp == NULL) { 5574 if (acp->flags & AAC_CMD_BUF_READ) 5575 ddi_put32(acc, &srb->flags, SRB_DataIn); 5576 else if (acp->flags & AAC_CMD_BUF_WRITE) 5577 ddi_put32(acc, &srb->flags, SRB_DataOut); 5578 ddi_put32(acc, &srb->channel, 5579 ((struct aac_nondasd *)acp->dvp)->bus); 5580 ddi_put32(acc, &srb->id, ((struct aac_nondasd *)acp->dvp)->tid); 5581 ddi_put32(acc, &srb->lun, 0); 5582 cdb = acp->pkt->pkt_cdbp; 5583 } else { 5584 struct aac_srb *srb0 = (struct aac_srb *)&acp->fibp->data[0]; 5585 5586 ddi_put32(acc, &srb->flags, srb0->flags); 5587 ddi_put32(acc, &srb->channel, srb0->channel); 5588 ddi_put32(acc, &srb->id, srb0->id); 5589 ddi_put32(acc, &srb->lun, srb0->lun); 5590 cdb = srb0->cdb; 5591 } 5592 ddi_rep_put8(acc, cdb, srb->cdb, acp->cmdlen, DDI_DEV_AUTOINCR); 5593 } 5594 5595 static void 5596 aac_cmd_fib_scsi32(struct aac_softstate *softs, struct aac_cmd *acp) 5597 { 5598 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5599 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 5600 struct aac_sg_entry *sgp; 5601 struct aac_sge *sge; 5602 5603 acp->fib_size = sizeof (struct aac_fib_header) + \ 5604 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \ 5605 acp->left_cookien * sizeof (struct aac_sg_entry); 5606 5607 /* Fill FIB and SRB headers, and copy cdb */ 5608 aac_cmd_fib_header(softs, acp->slotp, ScsiPortCommand, acp->fib_size); 5609 aac_cmd_fib_srb(acp); 5610 5611 /* Fill SG table */ 5612 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien); 5613 ddi_put32(acc, &srb->count, acp->bcount); 5614 5615 for (sge = &acp->sgt[0], sgp = &srb->sg.SgEntry[0]; 5616 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5617 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32); 5618 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5619 } 5620 } 5621 5622 static void 5623 aac_cmd_fib_scsi64(struct aac_softstate *softs, struct aac_cmd *acp) 5624 { 5625 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5626 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 5627 struct aac_sg_entry64 *sgp; 5628 struct aac_sge *sge; 5629 5630 acp->fib_size = sizeof (struct aac_fib_header) + \ 5631 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \ 5632 acp->left_cookien * sizeof (struct aac_sg_entry64); 5633 5634 /* Fill FIB and SRB headers, and copy cdb */ 5635 aac_cmd_fib_header(softs, acp->slotp, ScsiPortCommandU64, 5636 acp->fib_size); 5637 aac_cmd_fib_srb(acp); 5638 5639 /* Fill SG table */ 5640 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien); 5641 ddi_put32(acc, &srb->count, acp->bcount); 5642 5643 for (sge = &acp->sgt[0], 5644 sgp = &((struct aac_sg_table64 *)&srb->sg)->SgEntry64[0]; 5645 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5646 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 5647 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 5648 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5649 } 5650 } 5651 5652 static int 5653 aac_cmd_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp) 5654 { 5655 struct aac_slot *slotp; 5656 5657 if (slotp = aac_get_slot(softs)) { 5658 acp->slotp = slotp; 5659 slotp->acp = acp; 5660 acp->aac_cmd_fib(softs, acp); 5661 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, 5662 DDI_DMA_SYNC_FORDEV); 5663 return (AACOK); 5664 } 5665 return (AACERR); 5666 } 5667 5668 static int 5669 aac_bind_io(struct aac_softstate *softs, struct aac_cmd *acp) 5670 { 5671 struct aac_device *dvp = acp->dvp; 5672 int q = AAC_CMDQ(acp); 5673 5674 if (dvp) { 5675 if (dvp->ncmds[q] < dvp->throttle[q]) { 5676 if (!(acp->flags & AAC_CMD_NTAG) || 5677 dvp->ncmds[q] == 0) { 5678 do_bind: 5679 return (aac_cmd_slot_bind(softs, acp)); 5680 } 5681 ASSERT(q == AAC_CMDQ_ASYNC); 5682 aac_set_throttle(softs, dvp, AAC_CMDQ_ASYNC, 5683 AAC_THROTTLE_DRAIN); 5684 } 5685 } else { 5686 if (softs->bus_ncmds[q] < softs->bus_throttle[q]) 5687 goto do_bind; 5688 } 5689 return (AACERR); 5690 } 5691 5692 static void 5693 aac_start_io(struct aac_softstate *softs, struct aac_cmd *acp) 5694 { 5695 struct aac_slot *slotp = acp->slotp; 5696 int q = AAC_CMDQ(acp); 5697 int rval; 5698 5699 /* Set ac and pkt */ 5700 if (acp->pkt) { /* ac from ioctl has no pkt */ 5701 acp->pkt->pkt_state |= 5702 STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD; 5703 } 5704 if (acp->timeout) /* 0 indicates no timeout */ 5705 acp->timeout += aac_timebase + aac_tick; 5706 5707 if (acp->dvp) 5708 acp->dvp->ncmds[q]++; 5709 softs->bus_ncmds[q]++; 5710 aac_cmd_enqueue(&softs->q_busy, acp); 5711 5712 AACDB_PRINT_FIB(softs, slotp); 5713 5714 if (softs->flags & AAC_FLAGS_NEW_COMM) { 5715 rval = aac_send_command(softs, slotp); 5716 } else { 5717 /* 5718 * If fib can not be enqueued, the adapter is in an abnormal 5719 * state, there will be no interrupt to us. 5720 */ 5721 rval = aac_fib_enqueue(softs, AAC_ADAP_NORM_CMD_Q, 5722 slotp->fib_phyaddr, acp->fib_size); 5723 } 5724 5725 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) 5726 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 5727 5728 /* 5729 * NOTE: We send command only when slots availabe, so should never 5730 * reach here. 5731 */ 5732 if (rval != AACOK) { 5733 AACDB_PRINT(softs, CE_NOTE, "SCMD send failed"); 5734 if (acp->pkt) { 5735 acp->pkt->pkt_state &= ~STATE_SENT_CMD; 5736 aac_set_pkt_reason(softs, acp, CMD_INCOMPLETE, 0); 5737 } 5738 aac_end_io(softs, acp); 5739 if (!(acp->flags & (AAC_CMD_NO_INTR | AAC_CMD_NO_CB))) 5740 ddi_trigger_softintr(softs->softint_id); 5741 } 5742 } 5743 5744 static void 5745 aac_start_waitq(struct aac_softstate *softs, struct aac_cmd_queue *q) 5746 { 5747 struct aac_cmd *acp, *next_acp; 5748 5749 /* Serve as many waiting io's as possible */ 5750 for (acp = q->q_head; acp; acp = next_acp) { 5751 next_acp = acp->next; 5752 if (aac_bind_io(softs, acp) == AACOK) { 5753 aac_cmd_delete(q, acp); 5754 aac_start_io(softs, acp); 5755 } 5756 if (softs->free_io_slot_head == NULL) 5757 break; 5758 } 5759 } 5760 5761 static void 5762 aac_start_waiting_io(struct aac_softstate *softs) 5763 { 5764 /* 5765 * Sync FIB io is served before async FIB io so that io requests 5766 * sent by interactive userland commands get responded asap. 5767 */ 5768 if (softs->q_wait[AAC_CMDQ_SYNC].q_head) 5769 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_SYNC]); 5770 if (softs->q_wait[AAC_CMDQ_ASYNC].q_head) 5771 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_ASYNC]); 5772 } 5773 5774 static void 5775 aac_drain_comp_q(struct aac_softstate *softs) 5776 { 5777 struct aac_cmd *acp; 5778 struct scsi_pkt *pkt; 5779 5780 /*CONSTCOND*/ 5781 while (1) { 5782 mutex_enter(&softs->q_comp_mutex); 5783 acp = aac_cmd_dequeue(&softs->q_comp); 5784 mutex_exit(&softs->q_comp_mutex); 5785 if (acp != NULL) { 5786 ASSERT(acp->pkt != NULL); 5787 pkt = acp->pkt; 5788 5789 if (pkt->pkt_reason == CMD_CMPLT) { 5790 /* 5791 * Consistent packets need to be sync'ed first 5792 */ 5793 if ((acp->flags & AAC_CMD_CONSISTENT) && 5794 (acp->flags & AAC_CMD_BUF_READ)) { 5795 if (aac_dma_sync_ac(acp) != AACOK) { 5796 ddi_fm_service_impact( 5797 softs->devinfo_p, 5798 DDI_SERVICE_UNAFFECTED); 5799 pkt->pkt_reason = CMD_TRAN_ERR; 5800 pkt->pkt_statistics = 0; 5801 } 5802 } 5803 if ((aac_check_acc_handle(softs-> \ 5804 comm_space_acc_handle) != DDI_SUCCESS) || 5805 (aac_check_acc_handle(softs-> \ 5806 pci_mem_handle) != DDI_SUCCESS)) { 5807 ddi_fm_service_impact(softs->devinfo_p, 5808 DDI_SERVICE_UNAFFECTED); 5809 ddi_fm_acc_err_clear(softs-> \ 5810 pci_mem_handle, DDI_FME_VER0); 5811 pkt->pkt_reason = CMD_TRAN_ERR; 5812 pkt->pkt_statistics = 0; 5813 } 5814 if (aac_check_dma_handle(softs-> \ 5815 comm_space_dma_handle) != DDI_SUCCESS) { 5816 ddi_fm_service_impact(softs->devinfo_p, 5817 DDI_SERVICE_UNAFFECTED); 5818 pkt->pkt_reason = CMD_TRAN_ERR; 5819 pkt->pkt_statistics = 0; 5820 } 5821 } 5822 scsi_hba_pkt_comp(pkt); 5823 } else { 5824 break; 5825 } 5826 } 5827 } 5828 5829 static int 5830 aac_alloc_fib(struct aac_softstate *softs, struct aac_slot *slotp) 5831 { 5832 size_t rlen; 5833 ddi_dma_cookie_t cookie; 5834 uint_t cookien; 5835 5836 /* Allocate FIB dma resource */ 5837 if (ddi_dma_alloc_handle( 5838 softs->devinfo_p, 5839 &softs->addr_dma_attr, 5840 DDI_DMA_SLEEP, 5841 NULL, 5842 &slotp->fib_dma_handle) != DDI_SUCCESS) { 5843 AACDB_PRINT(softs, CE_WARN, 5844 "Cannot alloc dma handle for slot fib area"); 5845 goto error; 5846 } 5847 if (ddi_dma_mem_alloc( 5848 slotp->fib_dma_handle, 5849 softs->aac_max_fib_size, 5850 &softs->acc_attr, 5851 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 5852 DDI_DMA_SLEEP, 5853 NULL, 5854 (caddr_t *)&slotp->fibp, 5855 &rlen, 5856 &slotp->fib_acc_handle) != DDI_SUCCESS) { 5857 AACDB_PRINT(softs, CE_WARN, 5858 "Cannot alloc mem for slot fib area"); 5859 goto error; 5860 } 5861 if (ddi_dma_addr_bind_handle( 5862 slotp->fib_dma_handle, 5863 NULL, 5864 (caddr_t)slotp->fibp, 5865 softs->aac_max_fib_size, 5866 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 5867 DDI_DMA_SLEEP, 5868 NULL, 5869 &cookie, 5870 &cookien) != DDI_DMA_MAPPED) { 5871 AACDB_PRINT(softs, CE_WARN, 5872 "dma bind failed for slot fib area"); 5873 goto error; 5874 } 5875 5876 /* Check dma handles allocated in fib attach */ 5877 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) { 5878 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 5879 goto error; 5880 } 5881 5882 /* Check acc handles allocated in fib attach */ 5883 if (aac_check_acc_handle(slotp->fib_acc_handle) != DDI_SUCCESS) { 5884 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 5885 goto error; 5886 } 5887 5888 slotp->fib_phyaddr = cookie.dmac_laddress; 5889 return (AACOK); 5890 5891 error: 5892 if (slotp->fib_acc_handle) { 5893 ddi_dma_mem_free(&slotp->fib_acc_handle); 5894 slotp->fib_acc_handle = NULL; 5895 } 5896 if (slotp->fib_dma_handle) { 5897 ddi_dma_free_handle(&slotp->fib_dma_handle); 5898 slotp->fib_dma_handle = NULL; 5899 } 5900 return (AACERR); 5901 } 5902 5903 static void 5904 aac_free_fib(struct aac_slot *slotp) 5905 { 5906 (void) ddi_dma_unbind_handle(slotp->fib_dma_handle); 5907 ddi_dma_mem_free(&slotp->fib_acc_handle); 5908 slotp->fib_acc_handle = NULL; 5909 ddi_dma_free_handle(&slotp->fib_dma_handle); 5910 slotp->fib_dma_handle = NULL; 5911 slotp->fib_phyaddr = 0; 5912 } 5913 5914 static void 5915 aac_alloc_fibs(struct aac_softstate *softs) 5916 { 5917 int i; 5918 struct aac_slot *slotp; 5919 5920 for (i = 0; i < softs->total_slots && 5921 softs->total_fibs < softs->total_slots; i++) { 5922 slotp = &(softs->io_slot[i]); 5923 if (slotp->fib_phyaddr) 5924 continue; 5925 if (aac_alloc_fib(softs, slotp) != AACOK) 5926 break; 5927 5928 /* Insert the slot to the free slot list */ 5929 aac_release_slot(softs, slotp); 5930 softs->total_fibs++; 5931 } 5932 } 5933 5934 static void 5935 aac_destroy_fibs(struct aac_softstate *softs) 5936 { 5937 struct aac_slot *slotp; 5938 5939 while ((slotp = softs->free_io_slot_head) != NULL) { 5940 ASSERT(slotp->fib_phyaddr); 5941 softs->free_io_slot_head = slotp->next; 5942 aac_free_fib(slotp); 5943 ASSERT(slotp->index == (slotp - softs->io_slot)); 5944 softs->total_fibs--; 5945 } 5946 ASSERT(softs->total_fibs == 0); 5947 } 5948 5949 static int 5950 aac_create_slots(struct aac_softstate *softs) 5951 { 5952 int i; 5953 5954 softs->total_slots = softs->aac_max_fibs; 5955 softs->io_slot = kmem_zalloc(sizeof (struct aac_slot) * \ 5956 softs->total_slots, KM_SLEEP); 5957 if (softs->io_slot == NULL) { 5958 AACDB_PRINT(softs, CE_WARN, "Cannot allocate slot"); 5959 return (AACERR); 5960 } 5961 for (i = 0; i < softs->total_slots; i++) 5962 softs->io_slot[i].index = i; 5963 softs->free_io_slot_head = NULL; 5964 softs->total_fibs = 0; 5965 return (AACOK); 5966 } 5967 5968 static void 5969 aac_destroy_slots(struct aac_softstate *softs) 5970 { 5971 ASSERT(softs->free_io_slot_head == NULL); 5972 5973 kmem_free(softs->io_slot, sizeof (struct aac_slot) * \ 5974 softs->total_slots); 5975 softs->io_slot = NULL; 5976 softs->total_slots = 0; 5977 } 5978 5979 struct aac_slot * 5980 aac_get_slot(struct aac_softstate *softs) 5981 { 5982 struct aac_slot *slotp; 5983 5984 if ((slotp = softs->free_io_slot_head) != NULL) { 5985 softs->free_io_slot_head = slotp->next; 5986 slotp->next = NULL; 5987 } 5988 return (slotp); 5989 } 5990 5991 static void 5992 aac_release_slot(struct aac_softstate *softs, struct aac_slot *slotp) 5993 { 5994 ASSERT((slotp->index >= 0) && (slotp->index < softs->total_slots)); 5995 ASSERT(slotp == &softs->io_slot[slotp->index]); 5996 5997 slotp->acp = NULL; 5998 slotp->next = softs->free_io_slot_head; 5999 softs->free_io_slot_head = slotp; 6000 } 6001 6002 int 6003 aac_do_io(struct aac_softstate *softs, struct aac_cmd *acp) 6004 { 6005 if (aac_bind_io(softs, acp) == AACOK) 6006 aac_start_io(softs, acp); 6007 else 6008 aac_cmd_enqueue(&softs->q_wait[AAC_CMDQ(acp)], acp); 6009 6010 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) 6011 return (TRAN_ACCEPT); 6012 /* 6013 * Because sync FIB is always 512 bytes and used for critical 6014 * functions, async FIB is used for poll IO. 6015 */ 6016 if (acp->flags & AAC_CMD_NO_INTR) { 6017 if (aac_do_poll_io(softs, acp) == AACOK) 6018 return (TRAN_ACCEPT); 6019 } else { 6020 if (aac_do_sync_io(softs, acp) == AACOK) 6021 return (TRAN_ACCEPT); 6022 } 6023 return (TRAN_BADPKT); 6024 } 6025 6026 static int 6027 aac_do_poll_io(struct aac_softstate *softs, struct aac_cmd *acp) 6028 { 6029 int (*intr_handler)(struct aac_softstate *); 6030 6031 /* 6032 * Interrupt is disabled, we have to poll the adapter by ourselves. 6033 */ 6034 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ? 6035 aac_process_intr_new : aac_process_intr_old; 6036 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) { 6037 int i = AAC_POLL_TIME * 1000; 6038 6039 AAC_BUSYWAIT((intr_handler(softs) != AAC_DB_RESPONSE_READY), i); 6040 if (i == 0) 6041 aac_cmd_timeout(softs, acp); 6042 } 6043 6044 ddi_trigger_softintr(softs->softint_id); 6045 6046 if ((acp->flags & AAC_CMD_CMPLT) && !(acp->flags & AAC_CMD_ERR)) 6047 return (AACOK); 6048 return (AACERR); 6049 } 6050 6051 static int 6052 aac_do_sync_io(struct aac_softstate *softs, struct aac_cmd *acp) 6053 { 6054 ASSERT(softs && acp); 6055 6056 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) 6057 cv_wait(&softs->event, &softs->io_lock); 6058 6059 if (acp->flags & AAC_CMD_CMPLT) 6060 return (AACOK); 6061 return (AACERR); 6062 } 6063 6064 static int 6065 aac_dma_sync_ac(struct aac_cmd *acp) 6066 { 6067 if (acp->buf_dma_handle) { 6068 if (acp->flags & AAC_CMD_BUF_WRITE) { 6069 if (acp->abp != NULL) 6070 ddi_rep_put8(acp->abh, 6071 (uint8_t *)acp->bp->b_un.b_addr, 6072 (uint8_t *)acp->abp, acp->bp->b_bcount, 6073 DDI_DEV_AUTOINCR); 6074 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0, 6075 DDI_DMA_SYNC_FORDEV); 6076 } else { 6077 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0, 6078 DDI_DMA_SYNC_FORCPU); 6079 if (aac_check_dma_handle(acp->buf_dma_handle) != 6080 DDI_SUCCESS) 6081 return (AACERR); 6082 if (acp->abp != NULL) 6083 ddi_rep_get8(acp->abh, 6084 (uint8_t *)acp->bp->b_un.b_addr, 6085 (uint8_t *)acp->abp, acp->bp->b_bcount, 6086 DDI_DEV_AUTOINCR); 6087 } 6088 } 6089 return (AACOK); 6090 } 6091 6092 /* 6093 * The following function comes from Adaptec: 6094 * 6095 * When driver sees a particular event that means containers are changed, it 6096 * will rescan containers. However a change may not be complete until some 6097 * other event is received. For example, creating or deleting an array will 6098 * incur as many as six AifEnConfigChange events which would generate six 6099 * container rescans. To diminish rescans, driver set a flag to wait for 6100 * another particular event. When sees that events come in, it will do rescan. 6101 */ 6102 static int 6103 aac_handle_aif(struct aac_softstate *softs, struct aac_fib *fibp) 6104 { 6105 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 6106 uint16_t fib_command; 6107 struct aac_aif_command *aif; 6108 int en_type; 6109 int devcfg_needed; 6110 int current, next; 6111 6112 fib_command = LE_16(fibp->Header.Command); 6113 if (fib_command != AifRequest) { 6114 cmn_err(CE_NOTE, "!Unknown command from controller: 0x%x", 6115 fib_command); 6116 return (AACERR); 6117 } 6118 6119 /* Update internal container state */ 6120 aif = (struct aac_aif_command *)&fibp->data[0]; 6121 6122 AACDB_PRINT_AIF(softs, aif); 6123 devcfg_needed = 0; 6124 en_type = LE_32((uint32_t)aif->data.EN.type); 6125 6126 switch (LE_32((uint32_t)aif->command)) { 6127 case AifCmdDriverNotify: { 6128 int cid = LE_32(aif->data.EN.data.ECC.container[0]); 6129 6130 switch (en_type) { 6131 case AifDenMorphComplete: 6132 case AifDenVolumeExtendComplete: 6133 if (AAC_DEV_IS_VALID(&softs->containers[cid].dev)) 6134 softs->devcfg_wait_on = AifEnConfigChange; 6135 break; 6136 } 6137 if (softs->devcfg_wait_on == en_type) 6138 devcfg_needed = 1; 6139 break; 6140 } 6141 6142 case AifCmdEventNotify: 6143 switch (en_type) { 6144 case AifEnAddContainer: 6145 case AifEnDeleteContainer: 6146 softs->devcfg_wait_on = AifEnConfigChange; 6147 break; 6148 case AifEnContainerChange: 6149 if (!softs->devcfg_wait_on) 6150 softs->devcfg_wait_on = AifEnConfigChange; 6151 break; 6152 case AifEnContainerEvent: 6153 if (ddi_get32(acc, &aif-> \ 6154 data.EN.data.ECE.eventType) == CT_PUP_MISSING_DRIVE) 6155 devcfg_needed = 1; 6156 break; 6157 } 6158 if (softs->devcfg_wait_on == en_type) 6159 devcfg_needed = 1; 6160 break; 6161 6162 case AifCmdJobProgress: 6163 if (LE_32((uint32_t)aif->data.PR[0].jd.type) == AifJobCtrZero) { 6164 int pr_status; 6165 uint32_t pr_ftick, pr_ctick; 6166 6167 pr_status = LE_32((uint32_t)aif->data.PR[0].status); 6168 pr_ctick = LE_32(aif->data.PR[0].currentTick); 6169 pr_ftick = LE_32(aif->data.PR[0].finalTick); 6170 6171 if ((pr_ctick == pr_ftick) || 6172 (pr_status == AifJobStsSuccess)) 6173 softs->devcfg_wait_on = AifEnContainerChange; 6174 else if ((pr_ctick == 0) && 6175 (pr_status == AifJobStsRunning)) 6176 softs->devcfg_wait_on = AifEnContainerChange; 6177 } 6178 break; 6179 } 6180 6181 if (devcfg_needed) { 6182 softs->devcfg_wait_on = 0; 6183 (void) aac_probe_containers(softs); 6184 } 6185 6186 /* Modify AIF contexts */ 6187 current = softs->aifq_idx; 6188 next = (current + 1) % AAC_AIFQ_LENGTH; 6189 if (next == 0) { 6190 struct aac_fib_context *ctx; 6191 6192 softs->aifq_wrap = 1; 6193 for (ctx = softs->fibctx; ctx; ctx = ctx->next) { 6194 if (next == ctx->ctx_idx) { 6195 ctx->ctx_filled = 1; 6196 } else if (current == ctx->ctx_idx && ctx->ctx_filled) { 6197 ctx->ctx_idx = next; 6198 AACDB_PRINT(softs, CE_NOTE, 6199 "-- AIF queue(%x) overrun", ctx->unique); 6200 } 6201 } 6202 } 6203 softs->aifq_idx = next; 6204 6205 /* Wakeup applications */ 6206 cv_broadcast(&softs->aifv); 6207 return (AACOK); 6208 } 6209 6210 /* 6211 * Timeout recovery 6212 */ 6213 /*ARGSUSED*/ 6214 static void 6215 aac_cmd_timeout(struct aac_softstate *softs, struct aac_cmd *acp) 6216 { 6217 #ifdef DEBUG 6218 acp->fib_flags |= AACDB_FLAGS_FIB_TIMEOUT; 6219 AACDB_PRINT(softs, CE_WARN, "acp %p timed out", acp); 6220 AACDB_PRINT_FIB(softs, acp->slotp); 6221 #endif 6222 6223 /* 6224 * Besides the firmware in unhealthy state, an overloaded 6225 * adapter may also incur pkt timeout. 6226 * There is a chance for an adapter with a slower IOP to take 6227 * longer than 60 seconds to process the commands, such as when 6228 * to perform IOs. So the adapter is doing a build on a RAID-5 6229 * while being required longer completion times should be 6230 * tolerated. 6231 */ 6232 switch (aac_do_reset(softs)) { 6233 case AAC_IOP_RESET_SUCCEED: 6234 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING, NULL, CMD_RESET); 6235 aac_start_waiting_io(softs); 6236 break; 6237 case AAC_IOP_RESET_FAILED: 6238 /* Abort all waiting cmds when adapter is dead */ 6239 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_TIMEOUT); 6240 break; 6241 case AAC_IOP_RESET_ABNORMAL: 6242 aac_start_waiting_io(softs); 6243 } 6244 } 6245 6246 /* 6247 * The following function comes from Adaptec: 6248 * 6249 * Time sync. command added to synchronize time with firmware every 30 6250 * minutes (required for correct AIF timestamps etc.) 6251 */ 6252 static int 6253 aac_sync_tick(struct aac_softstate *softs) 6254 { 6255 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 6256 struct aac_fib *fibp = softs->sync_slot.fibp; 6257 6258 ddi_put32(acc, (void *)&fibp->data[0], ddi_get_time()); 6259 return (aac_sync_fib(softs, SendHostTime, AAC_FIB_SIZEOF(uint32_t))); 6260 } 6261 6262 static void 6263 aac_daemon(void *arg) 6264 { 6265 struct aac_softstate *softs = (struct aac_softstate *)arg; 6266 struct aac_cmd *acp; 6267 6268 DBCALLED(softs, 2); 6269 6270 mutex_enter(&softs->io_lock); 6271 /* Check slot for timeout pkts */ 6272 aac_timebase += aac_tick; 6273 for (acp = softs->q_busy.q_head; acp; acp = acp->next) { 6274 if (acp->timeout) { 6275 if (acp->timeout <= aac_timebase) { 6276 aac_cmd_timeout(softs, acp); 6277 ddi_trigger_softintr(softs->softint_id); 6278 } 6279 break; 6280 } 6281 } 6282 6283 /* Time sync. with firmware every AAC_SYNC_TICK */ 6284 if (aac_sync_time <= aac_timebase) { 6285 aac_sync_time = aac_timebase; 6286 if (aac_sync_tick(softs) != AACOK) 6287 aac_sync_time += aac_tick << 1; /* retry shortly */ 6288 else 6289 aac_sync_time += AAC_SYNC_TICK; 6290 } 6291 6292 if ((softs->state & AAC_STATE_RUN) && (softs->timeout_id != 0)) 6293 softs->timeout_id = timeout(aac_daemon, (void *)softs, 6294 (aac_tick * drv_usectohz(1000000))); 6295 mutex_exit(&softs->io_lock); 6296 } 6297 6298 /* 6299 * Architecture dependent functions 6300 */ 6301 static int 6302 aac_rx_get_fwstatus(struct aac_softstate *softs) 6303 { 6304 return (PCI_MEM_GET32(softs, AAC_OMR0)); 6305 } 6306 6307 static int 6308 aac_rx_get_mailbox(struct aac_softstate *softs, int mb) 6309 { 6310 return (PCI_MEM_GET32(softs, AAC_RX_MAILBOX + mb * 4)); 6311 } 6312 6313 static void 6314 aac_rx_set_mailbox(struct aac_softstate *softs, uint32_t cmd, 6315 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3) 6316 { 6317 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX, cmd); 6318 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 4, arg0); 6319 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 8, arg1); 6320 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 12, arg2); 6321 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 16, arg3); 6322 } 6323 6324 static int 6325 aac_rkt_get_fwstatus(struct aac_softstate *softs) 6326 { 6327 return (PCI_MEM_GET32(softs, AAC_OMR0)); 6328 } 6329 6330 static int 6331 aac_rkt_get_mailbox(struct aac_softstate *softs, int mb) 6332 { 6333 return (PCI_MEM_GET32(softs, AAC_RKT_MAILBOX + mb *4)); 6334 } 6335 6336 static void 6337 aac_rkt_set_mailbox(struct aac_softstate *softs, uint32_t cmd, 6338 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3) 6339 { 6340 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX, cmd); 6341 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 4, arg0); 6342 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 8, arg1); 6343 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 12, arg2); 6344 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 16, arg3); 6345 } 6346 6347 /* 6348 * cb_ops functions 6349 */ 6350 static int 6351 aac_open(dev_t *devp, int flag, int otyp, cred_t *cred) 6352 { 6353 struct aac_softstate *softs; 6354 int minor0, minor; 6355 int instance; 6356 6357 DBCALLED(NULL, 2); 6358 6359 if (otyp != OTYP_BLK && otyp != OTYP_CHR) 6360 return (EINVAL); 6361 6362 minor0 = getminor(*devp); 6363 minor = AAC_SCSA_MINOR(minor0); 6364 6365 if (AAC_IS_SCSA_NODE(minor)) 6366 return (scsi_hba_open(devp, flag, otyp, cred)); 6367 6368 instance = MINOR2INST(minor0); 6369 if (instance >= AAC_MAX_ADAPTERS) 6370 return (ENXIO); 6371 6372 softs = ddi_get_soft_state(aac_softstatep, instance); 6373 if (softs == NULL) 6374 return (ENXIO); 6375 6376 return (0); 6377 } 6378 6379 /*ARGSUSED*/ 6380 static int 6381 aac_close(dev_t dev, int flag, int otyp, cred_t *cred) 6382 { 6383 int minor0, minor; 6384 int instance; 6385 6386 DBCALLED(NULL, 2); 6387 6388 if (otyp != OTYP_BLK && otyp != OTYP_CHR) 6389 return (EINVAL); 6390 6391 minor0 = getminor(dev); 6392 minor = AAC_SCSA_MINOR(minor0); 6393 6394 if (AAC_IS_SCSA_NODE(minor)) 6395 return (scsi_hba_close(dev, flag, otyp, cred)); 6396 6397 instance = MINOR2INST(minor0); 6398 if (instance >= AAC_MAX_ADAPTERS) 6399 return (ENXIO); 6400 6401 return (0); 6402 } 6403 6404 static int 6405 aac_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, 6406 int *rval_p) 6407 { 6408 struct aac_softstate *softs; 6409 int minor0, minor; 6410 int instance; 6411 6412 DBCALLED(NULL, 2); 6413 6414 if (drv_priv(cred_p) != 0) 6415 return (EPERM); 6416 6417 minor0 = getminor(dev); 6418 minor = AAC_SCSA_MINOR(minor0); 6419 6420 if (AAC_IS_SCSA_NODE(minor)) 6421 return (scsi_hba_ioctl(dev, cmd, arg, flag, cred_p, rval_p)); 6422 6423 instance = MINOR2INST(minor0); 6424 if (instance < AAC_MAX_ADAPTERS) { 6425 softs = ddi_get_soft_state(aac_softstatep, instance); 6426 return (aac_do_ioctl(softs, dev, cmd, arg, flag)); 6427 } 6428 return (ENXIO); 6429 } 6430 6431 /* 6432 * The IO fault service error handling callback function 6433 */ 6434 /*ARGSUSED*/ 6435 static int 6436 aac_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 6437 { 6438 /* 6439 * as the driver can always deal with an error in any dma or 6440 * access handle, we can just return the fme_status value. 6441 */ 6442 pci_ereport_post(dip, err, NULL); 6443 return (err->fme_status); 6444 } 6445 6446 /* 6447 * aac_fm_init - initialize fma capabilities and register with IO 6448 * fault services. 6449 */ 6450 static void 6451 aac_fm_init(struct aac_softstate *softs) 6452 { 6453 /* 6454 * Need to change iblock to priority for new MSI intr 6455 */ 6456 ddi_iblock_cookie_t fm_ibc; 6457 6458 softs->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, softs->devinfo_p, 6459 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 6460 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 6461 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 6462 6463 /* Only register with IO Fault Services if we have some capability */ 6464 if (softs->fm_capabilities) { 6465 /* Adjust access and dma attributes for FMA */ 6466 softs->reg_attr.devacc_attr_access = DDI_FLAGERR_ACC; 6467 softs->addr_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 6468 softs->buf_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 6469 6470 /* 6471 * Register capabilities with IO Fault Services. 6472 * fm_capabilities will be updated to indicate 6473 * capabilities actually supported (not requested.) 6474 */ 6475 ddi_fm_init(softs->devinfo_p, &softs->fm_capabilities, &fm_ibc); 6476 6477 /* 6478 * Initialize pci ereport capabilities if ereport 6479 * capable (should always be.) 6480 */ 6481 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) || 6482 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6483 pci_ereport_setup(softs->devinfo_p); 6484 } 6485 6486 /* 6487 * Register error callback if error callback capable. 6488 */ 6489 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6490 ddi_fm_handler_register(softs->devinfo_p, 6491 aac_fm_error_cb, (void *) softs); 6492 } 6493 } 6494 } 6495 6496 /* 6497 * aac_fm_fini - Releases fma capabilities and un-registers with IO 6498 * fault services. 6499 */ 6500 static void 6501 aac_fm_fini(struct aac_softstate *softs) 6502 { 6503 /* Only unregister FMA capabilities if registered */ 6504 if (softs->fm_capabilities) { 6505 /* 6506 * Un-register error callback if error callback capable. 6507 */ 6508 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6509 ddi_fm_handler_unregister(softs->devinfo_p); 6510 } 6511 6512 /* 6513 * Release any resources allocated by pci_ereport_setup() 6514 */ 6515 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) || 6516 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6517 pci_ereport_teardown(softs->devinfo_p); 6518 } 6519 6520 /* Unregister from IO Fault Services */ 6521 ddi_fm_fini(softs->devinfo_p); 6522 6523 /* Adjust access and dma attributes for FMA */ 6524 softs->reg_attr.devacc_attr_access = DDI_DEFAULT_ACC; 6525 softs->addr_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 6526 softs->buf_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 6527 } 6528 } 6529 6530 int 6531 aac_check_acc_handle(ddi_acc_handle_t handle) 6532 { 6533 ddi_fm_error_t de; 6534 6535 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 6536 return (de.fme_status); 6537 } 6538 6539 int 6540 aac_check_dma_handle(ddi_dma_handle_t handle) 6541 { 6542 ddi_fm_error_t de; 6543 6544 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 6545 return (de.fme_status); 6546 } 6547 6548 void 6549 aac_fm_ereport(struct aac_softstate *softs, char *detail) 6550 { 6551 uint64_t ena; 6552 char buf[FM_MAX_CLASS]; 6553 6554 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 6555 ena = fm_ena_generate(0, FM_ENA_FMT1); 6556 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities)) { 6557 ddi_fm_ereport_post(softs->devinfo_p, buf, ena, DDI_NOSLEEP, 6558 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL); 6559 } 6560 } 6561 6562 /* 6563 * Autoconfiguration support 6564 */ 6565 static int 6566 aac_parse_devname(char *devnm, int *tgt, int *lun) 6567 { 6568 char devbuf[SCSI_MAXNAMELEN]; 6569 char *addr; 6570 char *p, *tp, *lp; 6571 long num; 6572 6573 /* Parse dev name and address */ 6574 (void) strcpy(devbuf, devnm); 6575 addr = ""; 6576 for (p = devbuf; *p != '\0'; p++) { 6577 if (*p == '@') { 6578 addr = p + 1; 6579 *p = '\0'; 6580 } else if (*p == ':') { 6581 *p = '\0'; 6582 break; 6583 } 6584 } 6585 6586 /* Parse taget and lun */ 6587 for (p = tp = addr, lp = NULL; *p != '\0'; p++) { 6588 if (*p == ',') { 6589 lp = p + 1; 6590 *p = '\0'; 6591 break; 6592 } 6593 } 6594 if (tgt && tp) { 6595 if (ddi_strtol(tp, NULL, 0x10, &num)) 6596 return (AACERR); 6597 *tgt = (int)num; 6598 } 6599 if (lun && lp) { 6600 if (ddi_strtol(lp, NULL, 0x10, &num)) 6601 return (AACERR); 6602 *lun = (int)num; 6603 } 6604 return (AACOK); 6605 } 6606 6607 static dev_info_t * 6608 aac_find_child(struct aac_softstate *softs, uint16_t tgt, uint8_t lun) 6609 { 6610 dev_info_t *child = NULL; 6611 char addr[SCSI_MAXNAMELEN]; 6612 char tmp[MAXNAMELEN]; 6613 6614 if (tgt < AAC_MAX_LD) { 6615 if (lun == 0) { 6616 struct aac_device *dvp = &softs->containers[tgt].dev; 6617 6618 child = dvp->dip; 6619 } 6620 } else { 6621 (void) sprintf(addr, "%x,%x", tgt, lun); 6622 for (child = ddi_get_child(softs->devinfo_p); 6623 child; child = ddi_get_next_sibling(child)) { 6624 /* We don't care about non-persistent node */ 6625 if (ndi_dev_is_persistent_node(child) == 0) 6626 continue; 6627 6628 if (aac_name_node(child, tmp, MAXNAMELEN) != 6629 DDI_SUCCESS) 6630 continue; 6631 if (strcmp(addr, tmp) == 0) 6632 break; 6633 } 6634 } 6635 return (child); 6636 } 6637 6638 static int 6639 aac_config_child(struct aac_softstate *softs, struct scsi_device *sd, 6640 dev_info_t **dipp) 6641 { 6642 char *nodename = NULL; 6643 char **compatible = NULL; 6644 int ncompatible = 0; 6645 char *childname; 6646 dev_info_t *ldip = NULL; 6647 int tgt = sd->sd_address.a_target; 6648 int lun = sd->sd_address.a_lun; 6649 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; 6650 int rval; 6651 6652 DBCALLED(softs, 2); 6653 6654 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype, 6655 NULL, &nodename, &compatible, &ncompatible); 6656 if (nodename == NULL) { 6657 AACDB_PRINT(softs, CE_WARN, 6658 "found no comptible driver for t%dL%d", tgt, lun); 6659 rval = NDI_FAILURE; 6660 goto finish; 6661 } 6662 childname = (softs->legacy && dtype == DTYPE_DIRECT) ? "sd" : nodename; 6663 6664 /* Create dev node */ 6665 rval = ndi_devi_alloc(softs->devinfo_p, childname, DEVI_SID_NODEID, 6666 &ldip); 6667 if (rval == NDI_SUCCESS) { 6668 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) 6669 != DDI_PROP_SUCCESS) { 6670 AACDB_PRINT(softs, CE_WARN, "unable to create " 6671 "property for t%dL%d (target)", tgt, lun); 6672 rval = NDI_FAILURE; 6673 goto finish; 6674 } 6675 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) 6676 != DDI_PROP_SUCCESS) { 6677 AACDB_PRINT(softs, CE_WARN, "unable to create " 6678 "property for t%dL%d (lun)", tgt, lun); 6679 rval = NDI_FAILURE; 6680 goto finish; 6681 } 6682 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip, 6683 "compatible", compatible, ncompatible) 6684 != DDI_PROP_SUCCESS) { 6685 AACDB_PRINT(softs, CE_WARN, "unable to create " 6686 "property for t%dL%d (compatible)", tgt, lun); 6687 rval = NDI_FAILURE; 6688 goto finish; 6689 } 6690 6691 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH); 6692 if (rval != NDI_SUCCESS) { 6693 AACDB_PRINT(softs, CE_WARN, "unable to online t%dL%d", 6694 tgt, lun); 6695 ndi_prop_remove_all(ldip); 6696 (void) ndi_devi_free(ldip); 6697 } 6698 } 6699 finish: 6700 if (dipp) 6701 *dipp = ldip; 6702 6703 scsi_hba_nodename_compatible_free(nodename, compatible); 6704 return (rval); 6705 } 6706 6707 /*ARGSUSED*/ 6708 static int 6709 aac_probe_lun(struct aac_softstate *softs, struct scsi_device *sd) 6710 { 6711 int tgt = sd->sd_address.a_target; 6712 int lun = sd->sd_address.a_lun; 6713 6714 DBCALLED(softs, 2); 6715 6716 if (tgt < AAC_MAX_LD) { 6717 int rval; 6718 6719 if (lun == 0) { 6720 mutex_enter(&softs->io_lock); 6721 rval = aac_probe_container(softs, tgt); 6722 mutex_exit(&softs->io_lock); 6723 if (rval == AACOK) { 6724 if (scsi_hba_probe(sd, NULL) == 6725 SCSIPROBE_EXISTS) 6726 return (NDI_SUCCESS); 6727 } 6728 } 6729 return (NDI_FAILURE); 6730 } else { 6731 int dtype; 6732 6733 if (scsi_hba_probe(sd, NULL) != SCSIPROBE_EXISTS) 6734 return (NDI_FAILURE); 6735 6736 dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; 6737 6738 AACDB_PRINT(softs, CE_NOTE, 6739 "Phys. device found: tgt %d dtype %d: %s", 6740 tgt, dtype, sd->sd_inq->inq_vid); 6741 6742 /* Only non-DASD exposed */ 6743 if (dtype != DTYPE_RODIRECT /* CDROM */ && 6744 dtype != DTYPE_SEQUENTIAL /* TAPE */ && 6745 dtype != DTYPE_ESI /* SES */) 6746 return (NDI_FAILURE); 6747 6748 AACDB_PRINT(softs, CE_NOTE, "non-DASD %d found", tgt); 6749 mutex_enter(&softs->io_lock); 6750 softs->nondasds[AAC_PD(tgt)].dev.flags |= AAC_DFLAG_VALID; 6751 mutex_exit(&softs->io_lock); 6752 return (NDI_SUCCESS); 6753 } 6754 } 6755 6756 static int 6757 aac_config_lun(struct aac_softstate *softs, uint16_t tgt, uint8_t lun, 6758 dev_info_t **ldip) 6759 { 6760 struct scsi_device sd; 6761 dev_info_t *child; 6762 int rval; 6763 6764 DBCALLED(softs, 2); 6765 6766 if ((child = aac_find_child(softs, tgt, lun)) != NULL) { 6767 if (ldip) 6768 *ldip = child; 6769 return (NDI_SUCCESS); 6770 } 6771 6772 bzero(&sd, sizeof (struct scsi_device)); 6773 sd.sd_address.a_hba_tran = softs->hba_tran; 6774 sd.sd_address.a_target = (uint16_t)tgt; 6775 sd.sd_address.a_lun = (uint8_t)lun; 6776 if ((rval = aac_probe_lun(softs, &sd)) == NDI_SUCCESS) 6777 rval = aac_config_child(softs, &sd, ldip); 6778 scsi_unprobe(&sd); 6779 return (rval); 6780 } 6781 6782 static int 6783 aac_config_tgt(struct aac_softstate *softs, int tgt) 6784 { 6785 struct scsi_address ap; 6786 struct buf *bp = NULL; 6787 int buf_len = AAC_SCSI_RPTLUNS_HEAD_SIZE + AAC_SCSI_RPTLUNS_ADDR_SIZE; 6788 int list_len = 0; 6789 int lun_total = 0; 6790 dev_info_t *ldip; 6791 int i; 6792 6793 ap.a_hba_tran = softs->hba_tran; 6794 ap.a_target = (uint16_t)tgt; 6795 ap.a_lun = 0; 6796 6797 for (i = 0; i < 2; i++) { 6798 struct scsi_pkt *pkt; 6799 uchar_t *cdb; 6800 uchar_t *p; 6801 uint32_t data; 6802 6803 if (bp == NULL) { 6804 if ((bp = scsi_alloc_consistent_buf(&ap, NULL, 6805 buf_len, B_READ, NULL_FUNC, NULL)) == NULL) 6806 return (AACERR); 6807 } 6808 if ((pkt = scsi_init_pkt(&ap, NULL, bp, CDB_GROUP5, 6809 sizeof (struct scsi_arq_status), 0, PKT_CONSISTENT, 6810 NULL, NULL)) == NULL) { 6811 scsi_free_consistent_buf(bp); 6812 return (AACERR); 6813 } 6814 cdb = pkt->pkt_cdbp; 6815 bzero(cdb, CDB_GROUP5); 6816 cdb[0] = SCMD_REPORT_LUNS; 6817 6818 /* Convert buffer len from local to LE_32 */ 6819 data = buf_len; 6820 for (p = &cdb[9]; p > &cdb[5]; p--) { 6821 *p = data & 0xff; 6822 data >>= 8; 6823 } 6824 6825 if (scsi_poll(pkt) < 0 || 6826 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk) { 6827 scsi_destroy_pkt(pkt); 6828 break; 6829 } 6830 6831 /* Convert list_len from LE_32 to local */ 6832 for (p = (uchar_t *)bp->b_un.b_addr; 6833 p < (uchar_t *)bp->b_un.b_addr + 4; p++) { 6834 data <<= 8; 6835 data |= *p; 6836 } 6837 list_len = data; 6838 if (buf_len < list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE) { 6839 scsi_free_consistent_buf(bp); 6840 bp = NULL; 6841 buf_len = list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE; 6842 } 6843 scsi_destroy_pkt(pkt); 6844 } 6845 if (i >= 2) { 6846 uint8_t *buf = (uint8_t *)(bp->b_un.b_addr + 6847 AAC_SCSI_RPTLUNS_HEAD_SIZE); 6848 6849 for (i = 0; i < (list_len / AAC_SCSI_RPTLUNS_ADDR_SIZE); i++) { 6850 uint16_t lun; 6851 6852 /* Determine report luns addressing type */ 6853 switch (buf[0] & AAC_SCSI_RPTLUNS_ADDR_MASK) { 6854 /* 6855 * Vendors in the field have been found to be 6856 * concatenating bus/target/lun to equal the 6857 * complete lun value instead of switching to 6858 * flat space addressing 6859 */ 6860 case AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL: 6861 case AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT: 6862 case AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE: 6863 lun = ((buf[0] & 0x3f) << 8) | buf[1]; 6864 if (lun > UINT8_MAX) { 6865 AACDB_PRINT(softs, CE_WARN, 6866 "abnormal lun number: %d", lun); 6867 break; 6868 } 6869 if (aac_config_lun(softs, tgt, lun, &ldip) == 6870 NDI_SUCCESS) 6871 lun_total++; 6872 break; 6873 } 6874 6875 buf += AAC_SCSI_RPTLUNS_ADDR_SIZE; 6876 } 6877 } else { 6878 /* The target may do not support SCMD_REPORT_LUNS. */ 6879 if (aac_config_lun(softs, tgt, 0, &ldip) == NDI_SUCCESS) 6880 lun_total++; 6881 } 6882 scsi_free_consistent_buf(bp); 6883 return (lun_total); 6884 } 6885 6886 static void 6887 aac_devcfg(struct aac_softstate *softs, int tgt, int en) 6888 { 6889 struct aac_device *dvp; 6890 6891 mutex_enter(&softs->io_lock); 6892 dvp = AAC_DEV(softs, tgt); 6893 if (en) 6894 dvp->flags |= AAC_DFLAG_CONFIGURING; 6895 else 6896 dvp->flags &= ~AAC_DFLAG_CONFIGURING; 6897 mutex_exit(&softs->io_lock); 6898 } 6899 6900 static int 6901 aac_tran_bus_config(dev_info_t *parent, uint_t flags, ddi_bus_config_op_t op, 6902 void *arg, dev_info_t **childp) 6903 { 6904 struct aac_softstate *softs; 6905 int circ = 0; 6906 int rval; 6907 6908 if ((softs = ddi_get_soft_state(aac_softstatep, 6909 ddi_get_instance(parent))) == NULL) 6910 return (NDI_FAILURE); 6911 6912 /* Commands for bus config should be blocked as the bus is quiesced */ 6913 mutex_enter(&softs->io_lock); 6914 if (softs->state & AAC_STATE_QUIESCED) { 6915 AACDB_PRINT(softs, CE_NOTE, 6916 "bus_config abroted because bus is quiesced"); 6917 mutex_exit(&softs->io_lock); 6918 return (NDI_FAILURE); 6919 } 6920 mutex_exit(&softs->io_lock); 6921 6922 DBCALLED(softs, 1); 6923 6924 /* Hold the nexus across the bus_config */ 6925 ndi_devi_enter(parent, &circ); 6926 switch (op) { 6927 case BUS_CONFIG_ONE: { 6928 int tgt, lun; 6929 6930 if (aac_parse_devname(arg, &tgt, &lun) != AACOK) { 6931 rval = NDI_FAILURE; 6932 break; 6933 } 6934 6935 AAC_DEVCFG_BEGIN(softs, tgt); 6936 rval = aac_config_lun(softs, tgt, lun, childp); 6937 AAC_DEVCFG_END(softs, tgt); 6938 break; 6939 } 6940 6941 case BUS_CONFIG_DRIVER: 6942 case BUS_CONFIG_ALL: { 6943 uint32_t bus, tgt; 6944 int index, total; 6945 6946 for (tgt = 0; tgt < AAC_MAX_LD; tgt++) { 6947 AAC_DEVCFG_BEGIN(softs, tgt); 6948 (void) aac_config_lun(softs, tgt, 0, NULL); 6949 AAC_DEVCFG_END(softs, tgt); 6950 } 6951 6952 /* Config the non-DASD devices connected to the card */ 6953 total = 0; 6954 index = AAC_MAX_LD; 6955 for (bus = 0; bus < softs->bus_max; bus++) { 6956 AACDB_PRINT(softs, CE_NOTE, "bus %d:", bus); 6957 for (tgt = 0; tgt < softs->tgt_max; tgt++, index++) { 6958 AAC_DEVCFG_BEGIN(softs, index); 6959 if (aac_config_tgt(softs, index)) 6960 total++; 6961 AAC_DEVCFG_END(softs, index); 6962 } 6963 } 6964 AACDB_PRINT(softs, CE_CONT, 6965 "?Total %d phys. device(s) found", total); 6966 rval = NDI_SUCCESS; 6967 break; 6968 } 6969 } 6970 6971 if (rval == NDI_SUCCESS) 6972 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0); 6973 ndi_devi_exit(parent, circ); 6974 return (rval); 6975 } 6976 6977 static void 6978 aac_handle_dr(struct aac_drinfo *drp) 6979 { 6980 struct aac_softstate *softs = drp->softs; 6981 struct aac_device *dvp; 6982 dev_info_t *dip; 6983 int valid; 6984 int circ1 = 0; 6985 6986 DBCALLED(softs, 1); 6987 6988 /* Hold the nexus across the bus_config */ 6989 mutex_enter(&softs->io_lock); 6990 dvp = AAC_DEV(softs, drp->tgt); 6991 valid = AAC_DEV_IS_VALID(dvp); 6992 dip = dvp->dip; 6993 mutex_exit(&softs->io_lock); 6994 6995 switch (drp->event) { 6996 case AAC_EVT_ONLINE: 6997 case AAC_EVT_OFFLINE: 6998 /* Device onlined */ 6999 if (dip == NULL && valid) { 7000 ndi_devi_enter(softs->devinfo_p, &circ1); 7001 (void) aac_config_lun(softs, drp->tgt, 0, NULL); 7002 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d onlined", 7003 softs->instance, drp->tgt, drp->lun); 7004 ndi_devi_exit(softs->devinfo_p, circ1); 7005 } 7006 /* Device offlined */ 7007 if (dip && !valid) { 7008 mutex_enter(&softs->io_lock); 7009 (void) aac_do_reset(softs); 7010 mutex_exit(&softs->io_lock); 7011 7012 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE); 7013 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d offlined", 7014 softs->instance, drp->tgt, drp->lun); 7015 } 7016 break; 7017 } 7018 kmem_free(drp, sizeof (struct aac_drinfo)); 7019 } 7020 7021 static int 7022 aac_dr_event(struct aac_softstate *softs, int tgt, int lun, int event) 7023 { 7024 struct aac_drinfo *drp; 7025 7026 DBCALLED(softs, 1); 7027 7028 if (softs->taskq == NULL || 7029 (drp = kmem_zalloc(sizeof (struct aac_drinfo), KM_NOSLEEP)) == NULL) 7030 return (AACERR); 7031 7032 drp->softs = softs; 7033 drp->tgt = tgt; 7034 drp->lun = lun; 7035 drp->event = event; 7036 if ((ddi_taskq_dispatch(softs->taskq, (void (*)(void *))aac_handle_dr, 7037 drp, DDI_NOSLEEP)) != DDI_SUCCESS) { 7038 AACDB_PRINT(softs, CE_WARN, "DR task start failed"); 7039 kmem_free(drp, sizeof (struct aac_drinfo)); 7040 return (AACERR); 7041 } 7042 return (AACOK); 7043 } 7044 7045 #ifdef DEBUG 7046 7047 /* -------------------------debug aid functions-------------------------- */ 7048 7049 #define AAC_FIB_CMD_KEY_STRINGS \ 7050 TestCommandResponse, "TestCommandResponse", \ 7051 TestAdapterCommand, "TestAdapterCommand", \ 7052 LastTestCommand, "LastTestCommand", \ 7053 ReinitHostNormCommandQueue, "ReinitHostNormCommandQueue", \ 7054 ReinitHostHighCommandQueue, "ReinitHostHighCommandQueue", \ 7055 ReinitHostHighRespQueue, "ReinitHostHighRespQueue", \ 7056 ReinitHostNormRespQueue, "ReinitHostNormRespQueue", \ 7057 ReinitAdapNormCommandQueue, "ReinitAdapNormCommandQueue", \ 7058 ReinitAdapHighCommandQueue, "ReinitAdapHighCommandQueue", \ 7059 ReinitAdapHighRespQueue, "ReinitAdapHighRespQueue", \ 7060 ReinitAdapNormRespQueue, "ReinitAdapNormRespQueue", \ 7061 InterfaceShutdown, "InterfaceShutdown", \ 7062 DmaCommandFib, "DmaCommandFib", \ 7063 StartProfile, "StartProfile", \ 7064 TermProfile, "TermProfile", \ 7065 SpeedTest, "SpeedTest", \ 7066 TakeABreakPt, "TakeABreakPt", \ 7067 RequestPerfData, "RequestPerfData", \ 7068 SetInterruptDefTimer, "SetInterruptDefTimer", \ 7069 SetInterruptDefCount, "SetInterruptDefCount", \ 7070 GetInterruptDefStatus, "GetInterruptDefStatus", \ 7071 LastCommCommand, "LastCommCommand", \ 7072 NuFileSystem, "NuFileSystem", \ 7073 UFS, "UFS", \ 7074 HostFileSystem, "HostFileSystem", \ 7075 LastFileSystemCommand, "LastFileSystemCommand", \ 7076 ContainerCommand, "ContainerCommand", \ 7077 ContainerCommand64, "ContainerCommand64", \ 7078 ClusterCommand, "ClusterCommand", \ 7079 ScsiPortCommand, "ScsiPortCommand", \ 7080 ScsiPortCommandU64, "ScsiPortCommandU64", \ 7081 AifRequest, "AifRequest", \ 7082 CheckRevision, "CheckRevision", \ 7083 FsaHostShutdown, "FsaHostShutdown", \ 7084 RequestAdapterInfo, "RequestAdapterInfo", \ 7085 IsAdapterPaused, "IsAdapterPaused", \ 7086 SendHostTime, "SendHostTime", \ 7087 LastMiscCommand, "LastMiscCommand" 7088 7089 #define AAC_CTVM_SUBCMD_KEY_STRINGS \ 7090 VM_Null, "VM_Null", \ 7091 VM_NameServe, "VM_NameServe", \ 7092 VM_ContainerConfig, "VM_ContainerConfig", \ 7093 VM_Ioctl, "VM_Ioctl", \ 7094 VM_FilesystemIoctl, "VM_FilesystemIoctl", \ 7095 VM_CloseAll, "VM_CloseAll", \ 7096 VM_CtBlockRead, "VM_CtBlockRead", \ 7097 VM_CtBlockWrite, "VM_CtBlockWrite", \ 7098 VM_SliceBlockRead, "VM_SliceBlockRead", \ 7099 VM_SliceBlockWrite, "VM_SliceBlockWrite", \ 7100 VM_DriveBlockRead, "VM_DriveBlockRead", \ 7101 VM_DriveBlockWrite, "VM_DriveBlockWrite", \ 7102 VM_EnclosureMgt, "VM_EnclosureMgt", \ 7103 VM_Unused, "VM_Unused", \ 7104 VM_CtBlockVerify, "VM_CtBlockVerify", \ 7105 VM_CtPerf, "VM_CtPerf", \ 7106 VM_CtBlockRead64, "VM_CtBlockRead64", \ 7107 VM_CtBlockWrite64, "VM_CtBlockWrite64", \ 7108 VM_CtBlockVerify64, "VM_CtBlockVerify64", \ 7109 VM_CtHostRead64, "VM_CtHostRead64", \ 7110 VM_CtHostWrite64, "VM_CtHostWrite64", \ 7111 VM_NameServe64, "VM_NameServe64" 7112 7113 #define AAC_CT_SUBCMD_KEY_STRINGS \ 7114 CT_Null, "CT_Null", \ 7115 CT_GET_SLICE_COUNT, "CT_GET_SLICE_COUNT", \ 7116 CT_GET_PARTITION_COUNT, "CT_GET_PARTITION_COUNT", \ 7117 CT_GET_PARTITION_INFO, "CT_GET_PARTITION_INFO", \ 7118 CT_GET_CONTAINER_COUNT, "CT_GET_CONTAINER_COUNT", \ 7119 CT_GET_CONTAINER_INFO_OLD, "CT_GET_CONTAINER_INFO_OLD", \ 7120 CT_WRITE_MBR, "CT_WRITE_MBR", \ 7121 CT_WRITE_PARTITION, "CT_WRITE_PARTITION", \ 7122 CT_UPDATE_PARTITION, "CT_UPDATE_PARTITION", \ 7123 CT_UNLOAD_CONTAINER, "CT_UNLOAD_CONTAINER", \ 7124 CT_CONFIG_SINGLE_PRIMARY, "CT_CONFIG_SINGLE_PRIMARY", \ 7125 CT_READ_CONFIG_AGE, "CT_READ_CONFIG_AGE", \ 7126 CT_WRITE_CONFIG_AGE, "CT_WRITE_CONFIG_AGE", \ 7127 CT_READ_SERIAL_NUMBER, "CT_READ_SERIAL_NUMBER", \ 7128 CT_ZERO_PAR_ENTRY, "CT_ZERO_PAR_ENTRY", \ 7129 CT_READ_MBR, "CT_READ_MBR", \ 7130 CT_READ_PARTITION, "CT_READ_PARTITION", \ 7131 CT_DESTROY_CONTAINER, "CT_DESTROY_CONTAINER", \ 7132 CT_DESTROY2_CONTAINER, "CT_DESTROY2_CONTAINER", \ 7133 CT_SLICE_SIZE, "CT_SLICE_SIZE", \ 7134 CT_CHECK_CONFLICTS, "CT_CHECK_CONFLICTS", \ 7135 CT_MOVE_CONTAINER, "CT_MOVE_CONTAINER", \ 7136 CT_READ_LAST_DRIVE, "CT_READ_LAST_DRIVE", \ 7137 CT_WRITE_LAST_DRIVE, "CT_WRITE_LAST_DRIVE", \ 7138 CT_UNMIRROR, "CT_UNMIRROR", \ 7139 CT_MIRROR_DELAY, "CT_MIRROR_DELAY", \ 7140 CT_GEN_MIRROR, "CT_GEN_MIRROR", \ 7141 CT_GEN_MIRROR2, "CT_GEN_MIRROR2", \ 7142 CT_TEST_CONTAINER, "CT_TEST_CONTAINER", \ 7143 CT_MOVE2, "CT_MOVE2", \ 7144 CT_SPLIT, "CT_SPLIT", \ 7145 CT_SPLIT2, "CT_SPLIT2", \ 7146 CT_SPLIT_BROKEN, "CT_SPLIT_BROKEN", \ 7147 CT_SPLIT_BROKEN2, "CT_SPLIT_BROKEN2", \ 7148 CT_RECONFIG, "CT_RECONFIG", \ 7149 CT_BREAK2, "CT_BREAK2", \ 7150 CT_BREAK, "CT_BREAK", \ 7151 CT_MERGE2, "CT_MERGE2", \ 7152 CT_MERGE, "CT_MERGE", \ 7153 CT_FORCE_ERROR, "CT_FORCE_ERROR", \ 7154 CT_CLEAR_ERROR, "CT_CLEAR_ERROR", \ 7155 CT_ASSIGN_FAILOVER, "CT_ASSIGN_FAILOVER", \ 7156 CT_CLEAR_FAILOVER, "CT_CLEAR_FAILOVER", \ 7157 CT_GET_FAILOVER_DATA, "CT_GET_FAILOVER_DATA", \ 7158 CT_VOLUME_ADD, "CT_VOLUME_ADD", \ 7159 CT_VOLUME_ADD2, "CT_VOLUME_ADD2", \ 7160 CT_MIRROR_STATUS, "CT_MIRROR_STATUS", \ 7161 CT_COPY_STATUS, "CT_COPY_STATUS", \ 7162 CT_COPY, "CT_COPY", \ 7163 CT_UNLOCK_CONTAINER, "CT_UNLOCK_CONTAINER", \ 7164 CT_LOCK_CONTAINER, "CT_LOCK_CONTAINER", \ 7165 CT_MAKE_READ_ONLY, "CT_MAKE_READ_ONLY", \ 7166 CT_MAKE_READ_WRITE, "CT_MAKE_READ_WRITE", \ 7167 CT_CLEAN_DEAD, "CT_CLEAN_DEAD", \ 7168 CT_ABORT_MIRROR_COMMAND, "CT_ABORT_MIRROR_COMMAND", \ 7169 CT_SET, "CT_SET", \ 7170 CT_GET, "CT_GET", \ 7171 CT_GET_NVLOG_ENTRY, "CT_GET_NVLOG_ENTRY", \ 7172 CT_GET_DELAY, "CT_GET_DELAY", \ 7173 CT_ZERO_CONTAINER_SPACE, "CT_ZERO_CONTAINER_SPACE", \ 7174 CT_GET_ZERO_STATUS, "CT_GET_ZERO_STATUS", \ 7175 CT_SCRUB, "CT_SCRUB", \ 7176 CT_GET_SCRUB_STATUS, "CT_GET_SCRUB_STATUS", \ 7177 CT_GET_SLICE_INFO, "CT_GET_SLICE_INFO", \ 7178 CT_GET_SCSI_METHOD, "CT_GET_SCSI_METHOD", \ 7179 CT_PAUSE_IO, "CT_PAUSE_IO", \ 7180 CT_RELEASE_IO, "CT_RELEASE_IO", \ 7181 CT_SCRUB2, "CT_SCRUB2", \ 7182 CT_MCHECK, "CT_MCHECK", \ 7183 CT_CORRUPT, "CT_CORRUPT", \ 7184 CT_GET_TASK_COUNT, "CT_GET_TASK_COUNT", \ 7185 CT_PROMOTE, "CT_PROMOTE", \ 7186 CT_SET_DEAD, "CT_SET_DEAD", \ 7187 CT_CONTAINER_OPTIONS, "CT_CONTAINER_OPTIONS", \ 7188 CT_GET_NV_PARAM, "CT_GET_NV_PARAM", \ 7189 CT_GET_PARAM, "CT_GET_PARAM", \ 7190 CT_NV_PARAM_SIZE, "CT_NV_PARAM_SIZE", \ 7191 CT_COMMON_PARAM_SIZE, "CT_COMMON_PARAM_SIZE", \ 7192 CT_PLATFORM_PARAM_SIZE, "CT_PLATFORM_PARAM_SIZE", \ 7193 CT_SET_NV_PARAM, "CT_SET_NV_PARAM", \ 7194 CT_ABORT_SCRUB, "CT_ABORT_SCRUB", \ 7195 CT_GET_SCRUB_ERROR, "CT_GET_SCRUB_ERROR", \ 7196 CT_LABEL_CONTAINER, "CT_LABEL_CONTAINER", \ 7197 CT_CONTINUE_DATA, "CT_CONTINUE_DATA", \ 7198 CT_STOP_DATA, "CT_STOP_DATA", \ 7199 CT_GET_PARTITION_TABLE, "CT_GET_PARTITION_TABLE", \ 7200 CT_GET_DISK_PARTITIONS, "CT_GET_DISK_PARTITIONS", \ 7201 CT_GET_MISC_STATUS, "CT_GET_MISC_STATUS", \ 7202 CT_GET_CONTAINER_PERF_INFO, "CT_GET_CONTAINER_PERF_INFO", \ 7203 CT_GET_TIME, "CT_GET_TIME", \ 7204 CT_READ_DATA, "CT_READ_DATA", \ 7205 CT_CTR, "CT_CTR", \ 7206 CT_CTL, "CT_CTL", \ 7207 CT_DRAINIO, "CT_DRAINIO", \ 7208 CT_RELEASEIO, "CT_RELEASEIO", \ 7209 CT_GET_NVRAM, "CT_GET_NVRAM", \ 7210 CT_GET_MEMORY, "CT_GET_MEMORY", \ 7211 CT_PRINT_CT_LOG, "CT_PRINT_CT_LOG", \ 7212 CT_ADD_LEVEL, "CT_ADD_LEVEL", \ 7213 CT_NV_ZERO, "CT_NV_ZERO", \ 7214 CT_READ_SIGNATURE, "CT_READ_SIGNATURE", \ 7215 CT_THROTTLE_ON, "CT_THROTTLE_ON", \ 7216 CT_THROTTLE_OFF, "CT_THROTTLE_OFF", \ 7217 CT_GET_THROTTLE_STATS, "CT_GET_THROTTLE_STATS", \ 7218 CT_MAKE_SNAPSHOT, "CT_MAKE_SNAPSHOT", \ 7219 CT_REMOVE_SNAPSHOT, "CT_REMOVE_SNAPSHOT", \ 7220 CT_WRITE_USER_FLAGS, "CT_WRITE_USER_FLAGS", \ 7221 CT_READ_USER_FLAGS, "CT_READ_USER_FLAGS", \ 7222 CT_MONITOR, "CT_MONITOR", \ 7223 CT_GEN_MORPH, "CT_GEN_MORPH", \ 7224 CT_GET_SNAPSHOT_INFO, "CT_GET_SNAPSHOT_INFO", \ 7225 CT_CACHE_SET, "CT_CACHE_SET", \ 7226 CT_CACHE_STAT, "CT_CACHE_STAT", \ 7227 CT_TRACE_START, "CT_TRACE_START", \ 7228 CT_TRACE_STOP, "CT_TRACE_STOP", \ 7229 CT_TRACE_ENABLE, "CT_TRACE_ENABLE", \ 7230 CT_TRACE_DISABLE, "CT_TRACE_DISABLE", \ 7231 CT_FORCE_CORE_DUMP, "CT_FORCE_CORE_DUMP", \ 7232 CT_SET_SERIAL_NUMBER, "CT_SET_SERIAL_NUMBER", \ 7233 CT_RESET_SERIAL_NUMBER, "CT_RESET_SERIAL_NUMBER", \ 7234 CT_ENABLE_RAID5, "CT_ENABLE_RAID5", \ 7235 CT_CLEAR_VALID_DUMP_FLAG, "CT_CLEAR_VALID_DUMP_FLAG", \ 7236 CT_GET_MEM_STATS, "CT_GET_MEM_STATS", \ 7237 CT_GET_CORE_SIZE, "CT_GET_CORE_SIZE", \ 7238 CT_CREATE_CONTAINER_OLD, "CT_CREATE_CONTAINER_OLD", \ 7239 CT_STOP_DUMPS, "CT_STOP_DUMPS", \ 7240 CT_PANIC_ON_TAKE_A_BREAK, "CT_PANIC_ON_TAKE_A_BREAK", \ 7241 CT_GET_CACHE_STATS, "CT_GET_CACHE_STATS", \ 7242 CT_MOVE_PARTITION, "CT_MOVE_PARTITION", \ 7243 CT_FLUSH_CACHE, "CT_FLUSH_CACHE", \ 7244 CT_READ_NAME, "CT_READ_NAME", \ 7245 CT_WRITE_NAME, "CT_WRITE_NAME", \ 7246 CT_TOSS_CACHE, "CT_TOSS_CACHE", \ 7247 CT_LOCK_DRAINIO, "CT_LOCK_DRAINIO", \ 7248 CT_CONTAINER_OFFLINE, "CT_CONTAINER_OFFLINE", \ 7249 CT_SET_CACHE_SIZE, "CT_SET_CACHE_SIZE", \ 7250 CT_CLEAN_SHUTDOWN_STATUS, "CT_CLEAN_SHUTDOWN_STATUS", \ 7251 CT_CLEAR_DISKLOG_ON_DISK, "CT_CLEAR_DISKLOG_ON_DISK", \ 7252 CT_CLEAR_ALL_DISKLOG, "CT_CLEAR_ALL_DISKLOG", \ 7253 CT_CACHE_FAVOR, "CT_CACHE_FAVOR", \ 7254 CT_READ_PASSTHRU_MBR, "CT_READ_PASSTHRU_MBR", \ 7255 CT_SCRUB_NOFIX, "CT_SCRUB_NOFIX", \ 7256 CT_SCRUB2_NOFIX, "CT_SCRUB2_NOFIX", \ 7257 CT_FLUSH, "CT_FLUSH", \ 7258 CT_REBUILD, "CT_REBUILD", \ 7259 CT_FLUSH_CONTAINER, "CT_FLUSH_CONTAINER", \ 7260 CT_RESTART, "CT_RESTART", \ 7261 CT_GET_CONFIG_STATUS, "CT_GET_CONFIG_STATUS", \ 7262 CT_TRACE_FLAG, "CT_TRACE_FLAG", \ 7263 CT_RESTART_MORPH, "CT_RESTART_MORPH", \ 7264 CT_GET_TRACE_INFO, "CT_GET_TRACE_INFO", \ 7265 CT_GET_TRACE_ITEM, "CT_GET_TRACE_ITEM", \ 7266 CT_COMMIT_CONFIG, "CT_COMMIT_CONFIG", \ 7267 CT_CONTAINER_EXISTS, "CT_CONTAINER_EXISTS", \ 7268 CT_GET_SLICE_FROM_DEVT, "CT_GET_SLICE_FROM_DEVT", \ 7269 CT_OPEN_READ_WRITE, "CT_OPEN_READ_WRITE", \ 7270 CT_WRITE_MEMORY_BLOCK, "CT_WRITE_MEMORY_BLOCK", \ 7271 CT_GET_CACHE_PARAMS, "CT_GET_CACHE_PARAMS", \ 7272 CT_CRAZY_CACHE, "CT_CRAZY_CACHE", \ 7273 CT_GET_PROFILE_STRUCT, "CT_GET_PROFILE_STRUCT", \ 7274 CT_SET_IO_TRACE_FLAG, "CT_SET_IO_TRACE_FLAG", \ 7275 CT_GET_IO_TRACE_STRUCT, "CT_GET_IO_TRACE_STRUCT", \ 7276 CT_CID_TO_64BITS_UID, "CT_CID_TO_64BITS_UID", \ 7277 CT_64BITS_UID_TO_CID, "CT_64BITS_UID_TO_CID", \ 7278 CT_PAR_TO_64BITS_UID, "CT_PAR_TO_64BITS_UID", \ 7279 CT_CID_TO_32BITS_UID, "CT_CID_TO_32BITS_UID", \ 7280 CT_32BITS_UID_TO_CID, "CT_32BITS_UID_TO_CID", \ 7281 CT_PAR_TO_32BITS_UID, "CT_PAR_TO_32BITS_UID", \ 7282 CT_SET_FAILOVER_OPTION, "CT_SET_FAILOVER_OPTION", \ 7283 CT_GET_FAILOVER_OPTION, "CT_GET_FAILOVER_OPTION", \ 7284 CT_STRIPE_ADD2, "CT_STRIPE_ADD2", \ 7285 CT_CREATE_VOLUME_SET, "CT_CREATE_VOLUME_SET", \ 7286 CT_CREATE_STRIPE_SET, "CT_CREATE_STRIPE_SET", \ 7287 CT_VERIFY_CONTAINER, "CT_VERIFY_CONTAINER", \ 7288 CT_IS_CONTAINER_DEAD, "CT_IS_CONTAINER_DEAD", \ 7289 CT_GET_CONTAINER_OPTION, "CT_GET_CONTAINER_OPTION", \ 7290 CT_GET_SNAPSHOT_UNUSED_STRUCT, "CT_GET_SNAPSHOT_UNUSED_STRUCT", \ 7291 CT_CLEAR_SNAPSHOT_UNUSED_STRUCT, "CT_CLEAR_SNAPSHOT_UNUSED_STRUCT", \ 7292 CT_GET_CONTAINER_INFO, "CT_GET_CONTAINER_INFO", \ 7293 CT_CREATE_CONTAINER, "CT_CREATE_CONTAINER", \ 7294 CT_CHANGE_CREATIONINFO, "CT_CHANGE_CREATIONINFO", \ 7295 CT_CHECK_CONFLICT_UID, "CT_CHECK_CONFLICT_UID", \ 7296 CT_CONTAINER_UID_CHECK, "CT_CONTAINER_UID_CHECK", \ 7297 CT_IS_CONTAINER_MEATADATA_STANDARD, \ 7298 "CT_IS_CONTAINER_MEATADATA_STANDARD", \ 7299 CT_IS_SLICE_METADATA_STANDARD, "CT_IS_SLICE_METADATA_STANDARD", \ 7300 CT_GET_IMPORT_COUNT, "CT_GET_IMPORT_COUNT", \ 7301 CT_CANCEL_ALL_IMPORTS, "CT_CANCEL_ALL_IMPORTS", \ 7302 CT_GET_IMPORT_INFO, "CT_GET_IMPORT_INFO", \ 7303 CT_IMPORT_ARRAY, "CT_IMPORT_ARRAY", \ 7304 CT_GET_LOG_SIZE, "CT_GET_LOG_SIZE", \ 7305 CT_ALARM_GET_STATE, "CT_ALARM_GET_STATE", \ 7306 CT_ALARM_SET_STATE, "CT_ALARM_SET_STATE", \ 7307 CT_ALARM_ON_OFF, "CT_ALARM_ON_OFF", \ 7308 CT_GET_EE_OEM_ID, "CT_GET_EE_OEM_ID", \ 7309 CT_GET_PPI_HEADERS, "CT_GET_PPI_HEADERS", \ 7310 CT_GET_PPI_DATA, "CT_GET_PPI_DATA", \ 7311 CT_GET_PPI_ENTRIES, "CT_GET_PPI_ENTRIES", \ 7312 CT_DELETE_PPI_BUNDLE, "CT_DELETE_PPI_BUNDLE", \ 7313 CT_GET_PARTITION_TABLE_2, "CT_GET_PARTITION_TABLE_2", \ 7314 CT_GET_PARTITION_INFO_2, "CT_GET_PARTITION_INFO_2", \ 7315 CT_GET_DISK_PARTITIONS_2, "CT_GET_DISK_PARTITIONS_2", \ 7316 CT_QUIESCE_ADAPTER, "CT_QUIESCE_ADAPTER", \ 7317 CT_CLEAR_PPI_TABLE, "CT_CLEAR_PPI_TABLE" 7318 7319 #define AAC_CL_SUBCMD_KEY_STRINGS \ 7320 CL_NULL, "CL_NULL", \ 7321 DS_INIT, "DS_INIT", \ 7322 DS_RESCAN, "DS_RESCAN", \ 7323 DS_CREATE, "DS_CREATE", \ 7324 DS_DELETE, "DS_DELETE", \ 7325 DS_ADD_DISK, "DS_ADD_DISK", \ 7326 DS_REMOVE_DISK, "DS_REMOVE_DISK", \ 7327 DS_MOVE_DISK, "DS_MOVE_DISK", \ 7328 DS_TAKE_OWNERSHIP, "DS_TAKE_OWNERSHIP", \ 7329 DS_RELEASE_OWNERSHIP, "DS_RELEASE_OWNERSHIP", \ 7330 DS_FORCE_OWNERSHIP, "DS_FORCE_OWNERSHIP", \ 7331 DS_GET_DISK_SET_PARAM, "DS_GET_DISK_SET_PARAM", \ 7332 DS_GET_DRIVE_PARAM, "DS_GET_DRIVE_PARAM", \ 7333 DS_GET_SLICE_PARAM, "DS_GET_SLICE_PARAM", \ 7334 DS_GET_DISK_SETS, "DS_GET_DISK_SETS", \ 7335 DS_GET_DRIVES, "DS_GET_DRIVES", \ 7336 DS_SET_DISK_SET_PARAM, "DS_SET_DISK_SET_PARAM", \ 7337 DS_ONLINE, "DS_ONLINE", \ 7338 DS_OFFLINE, "DS_OFFLINE", \ 7339 DS_ONLINE_CONTAINERS, "DS_ONLINE_CONTAINERS", \ 7340 DS_FSAPRINT, "DS_FSAPRINT", \ 7341 CL_CFG_SET_HOST_IDS, "CL_CFG_SET_HOST_IDS", \ 7342 CL_CFG_SET_PARTNER_HOST_IDS, "CL_CFG_SET_PARTNER_HOST_IDS", \ 7343 CL_CFG_GET_CLUSTER_CONFIG, "CL_CFG_GET_CLUSTER_CONFIG", \ 7344 CC_CLI_CLEAR_MESSAGE_BUFFER, "CC_CLI_CLEAR_MESSAGE_BUFFER", \ 7345 CC_SRV_CLEAR_MESSAGE_BUFFER, "CC_SRV_CLEAR_MESSAGE_BUFFER", \ 7346 CC_CLI_SHOW_MESSAGE_BUFFER, "CC_CLI_SHOW_MESSAGE_BUFFER", \ 7347 CC_SRV_SHOW_MESSAGE_BUFFER, "CC_SRV_SHOW_MESSAGE_BUFFER", \ 7348 CC_CLI_SEND_MESSAGE, "CC_CLI_SEND_MESSAGE", \ 7349 CC_SRV_SEND_MESSAGE, "CC_SRV_SEND_MESSAGE", \ 7350 CC_CLI_GET_MESSAGE, "CC_CLI_GET_MESSAGE", \ 7351 CC_SRV_GET_MESSAGE, "CC_SRV_GET_MESSAGE", \ 7352 CC_SEND_TEST_MESSAGE, "CC_SEND_TEST_MESSAGE", \ 7353 CC_GET_BUSINFO, "CC_GET_BUSINFO", \ 7354 CC_GET_PORTINFO, "CC_GET_PORTINFO", \ 7355 CC_GET_NAMEINFO, "CC_GET_NAMEINFO", \ 7356 CC_GET_CONFIGINFO, "CC_GET_CONFIGINFO", \ 7357 CQ_QUORUM_OP, "CQ_QUORUM_OP" 7358 7359 #define AAC_AIF_SUBCMD_KEY_STRINGS \ 7360 AifCmdEventNotify, "AifCmdEventNotify", \ 7361 AifCmdJobProgress, "AifCmdJobProgress", \ 7362 AifCmdAPIReport, "AifCmdAPIReport", \ 7363 AifCmdDriverNotify, "AifCmdDriverNotify", \ 7364 AifReqJobList, "AifReqJobList", \ 7365 AifReqJobsForCtr, "AifReqJobsForCtr", \ 7366 AifReqJobsForScsi, "AifReqJobsForScsi", \ 7367 AifReqJobReport, "AifReqJobReport", \ 7368 AifReqTerminateJob, "AifReqTerminateJob", \ 7369 AifReqSuspendJob, "AifReqSuspendJob", \ 7370 AifReqResumeJob, "AifReqResumeJob", \ 7371 AifReqSendAPIReport, "AifReqSendAPIReport", \ 7372 AifReqAPIJobStart, "AifReqAPIJobStart", \ 7373 AifReqAPIJobUpdate, "AifReqAPIJobUpdate", \ 7374 AifReqAPIJobFinish, "AifReqAPIJobFinish" 7375 7376 #define AAC_IOCTL_SUBCMD_KEY_STRINGS \ 7377 Reserved_IOCTL, "Reserved_IOCTL", \ 7378 GetDeviceHandle, "GetDeviceHandle", \ 7379 BusTargetLun_to_DeviceHandle, "BusTargetLun_to_DeviceHandle", \ 7380 DeviceHandle_to_BusTargetLun, "DeviceHandle_to_BusTargetLun", \ 7381 RescanBus, "RescanBus", \ 7382 GetDeviceProbeInfo, "GetDeviceProbeInfo", \ 7383 GetDeviceCapacity, "GetDeviceCapacity", \ 7384 GetContainerProbeInfo, "GetContainerProbeInfo", \ 7385 GetRequestedMemorySize, "GetRequestedMemorySize", \ 7386 GetBusInfo, "GetBusInfo", \ 7387 GetVendorSpecific, "GetVendorSpecific", \ 7388 EnhancedGetDeviceProbeInfo, "EnhancedGetDeviceProbeInfo", \ 7389 EnhancedGetBusInfo, "EnhancedGetBusInfo", \ 7390 SetupExtendedCounters, "SetupExtendedCounters", \ 7391 GetPerformanceCounters, "GetPerformanceCounters", \ 7392 ResetPerformanceCounters, "ResetPerformanceCounters", \ 7393 ReadModePage, "ReadModePage", \ 7394 WriteModePage, "WriteModePage", \ 7395 ReadDriveParameter, "ReadDriveParameter", \ 7396 WriteDriveParameter, "WriteDriveParameter", \ 7397 ResetAdapter, "ResetAdapter", \ 7398 ResetBus, "ResetBus", \ 7399 ResetBusDevice, "ResetBusDevice", \ 7400 ExecuteSrb, "ExecuteSrb", \ 7401 Create_IO_Task, "Create_IO_Task", \ 7402 Delete_IO_Task, "Delete_IO_Task", \ 7403 Get_IO_Task_Info, "Get_IO_Task_Info", \ 7404 Check_Task_Progress, "Check_Task_Progress", \ 7405 InjectError, "InjectError", \ 7406 GetDeviceDefectCounts, "GetDeviceDefectCounts", \ 7407 GetDeviceDefectInfo, "GetDeviceDefectInfo", \ 7408 GetDeviceStatus, "GetDeviceStatus", \ 7409 ClearDeviceStatus, "ClearDeviceStatus", \ 7410 DiskSpinControl, "DiskSpinControl", \ 7411 DiskSmartControl, "DiskSmartControl", \ 7412 WriteSame, "WriteSame", \ 7413 ReadWriteLong, "ReadWriteLong", \ 7414 FormatUnit, "FormatUnit", \ 7415 TargetDeviceControl, "TargetDeviceControl", \ 7416 TargetChannelControl, "TargetChannelControl", \ 7417 FlashNewCode, "FlashNewCode", \ 7418 DiskCheck, "DiskCheck", \ 7419 RequestSense, "RequestSense", \ 7420 DiskPERControl, "DiskPERControl", \ 7421 Read10, "Read10", \ 7422 Write10, "Write10" 7423 7424 #define AAC_AIFEN_KEY_STRINGS \ 7425 AifEnGeneric, "Generic", \ 7426 AifEnTaskComplete, "TaskComplete", \ 7427 AifEnConfigChange, "Config change", \ 7428 AifEnContainerChange, "Container change", \ 7429 AifEnDeviceFailure, "device failed", \ 7430 AifEnMirrorFailover, "Mirror failover", \ 7431 AifEnContainerEvent, "container event", \ 7432 AifEnFileSystemChange, "File system changed", \ 7433 AifEnConfigPause, "Container pause event", \ 7434 AifEnConfigResume, "Container resume event", \ 7435 AifEnFailoverChange, "Failover space assignment changed", \ 7436 AifEnRAID5RebuildDone, "RAID5 rebuild finished", \ 7437 AifEnEnclosureManagement, "Enclosure management event", \ 7438 AifEnBatteryEvent, "battery event", \ 7439 AifEnAddContainer, "Add container", \ 7440 AifEnDeleteContainer, "Delete container", \ 7441 AifEnSMARTEvent, "SMART Event", \ 7442 AifEnBatteryNeedsRecond, "battery needs reconditioning", \ 7443 AifEnClusterEvent, "cluster event", \ 7444 AifEnDiskSetEvent, "disk set event occured", \ 7445 AifDenMorphComplete, "morph operation completed", \ 7446 AifDenVolumeExtendComplete, "VolumeExtendComplete" 7447 7448 struct aac_key_strings { 7449 int key; 7450 char *message; 7451 }; 7452 7453 extern struct scsi_key_strings scsi_cmds[]; 7454 7455 static struct aac_key_strings aac_fib_cmds[] = { 7456 AAC_FIB_CMD_KEY_STRINGS, 7457 -1, NULL 7458 }; 7459 7460 static struct aac_key_strings aac_ctvm_subcmds[] = { 7461 AAC_CTVM_SUBCMD_KEY_STRINGS, 7462 -1, NULL 7463 }; 7464 7465 static struct aac_key_strings aac_ct_subcmds[] = { 7466 AAC_CT_SUBCMD_KEY_STRINGS, 7467 -1, NULL 7468 }; 7469 7470 static struct aac_key_strings aac_cl_subcmds[] = { 7471 AAC_CL_SUBCMD_KEY_STRINGS, 7472 -1, NULL 7473 }; 7474 7475 static struct aac_key_strings aac_aif_subcmds[] = { 7476 AAC_AIF_SUBCMD_KEY_STRINGS, 7477 -1, NULL 7478 }; 7479 7480 static struct aac_key_strings aac_ioctl_subcmds[] = { 7481 AAC_IOCTL_SUBCMD_KEY_STRINGS, 7482 -1, NULL 7483 }; 7484 7485 static struct aac_key_strings aac_aifens[] = { 7486 AAC_AIFEN_KEY_STRINGS, 7487 -1, NULL 7488 }; 7489 7490 /* 7491 * The following function comes from Adaptec: 7492 * 7493 * Get the firmware print buffer parameters from the firmware, 7494 * if the command was successful map in the address. 7495 */ 7496 static int 7497 aac_get_fw_debug_buffer(struct aac_softstate *softs) 7498 { 7499 if (aac_sync_mbcommand(softs, AAC_MONKER_GETDRVPROP, 7500 0, 0, 0, 0, NULL) == AACOK) { 7501 uint32_t mondrv_buf_paddrl = AAC_MAILBOX_GET(softs, 1); 7502 uint32_t mondrv_buf_paddrh = AAC_MAILBOX_GET(softs, 2); 7503 uint32_t mondrv_buf_size = AAC_MAILBOX_GET(softs, 3); 7504 uint32_t mondrv_hdr_size = AAC_MAILBOX_GET(softs, 4); 7505 7506 if (mondrv_buf_size) { 7507 uint32_t offset = mondrv_buf_paddrl - \ 7508 softs->pci_mem_base_paddr; 7509 7510 /* 7511 * See if the address is already mapped in, and 7512 * if so set it up from the base address 7513 */ 7514 if ((mondrv_buf_paddrh == 0) && 7515 (offset + mondrv_buf_size < softs->map_size)) { 7516 mutex_enter(&aac_prt_mutex); 7517 softs->debug_buf_offset = offset; 7518 softs->debug_header_size = mondrv_hdr_size; 7519 softs->debug_buf_size = mondrv_buf_size; 7520 softs->debug_fw_flags = 0; 7521 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT; 7522 mutex_exit(&aac_prt_mutex); 7523 7524 return (AACOK); 7525 } 7526 } 7527 } 7528 return (AACERR); 7529 } 7530 7531 int 7532 aac_dbflag_on(struct aac_softstate *softs, int flag) 7533 { 7534 int debug_flags = softs ? softs->debug_flags : aac_debug_flags; 7535 7536 return ((debug_flags & (AACDB_FLAGS_FW_PRINT | \ 7537 AACDB_FLAGS_KERNEL_PRINT)) && (debug_flags & flag)); 7538 } 7539 7540 static void 7541 aac_cmn_err(struct aac_softstate *softs, uint_t lev, char sl, int noheader) 7542 { 7543 if (noheader) { 7544 if (sl) { 7545 aac_fmt[0] = sl; 7546 cmn_err(lev, aac_fmt, aac_prt_buf); 7547 } else { 7548 cmn_err(lev, &aac_fmt[1], aac_prt_buf); 7549 } 7550 } else { 7551 if (sl) { 7552 aac_fmt_header[0] = sl; 7553 cmn_err(lev, aac_fmt_header, 7554 softs->vendor_name, softs->instance, 7555 aac_prt_buf); 7556 } else { 7557 cmn_err(lev, &aac_fmt_header[1], 7558 softs->vendor_name, softs->instance, 7559 aac_prt_buf); 7560 } 7561 } 7562 } 7563 7564 /* 7565 * The following function comes from Adaptec: 7566 * 7567 * Format and print out the data passed in to UART or console 7568 * as specified by debug flags. 7569 */ 7570 void 7571 aac_printf(struct aac_softstate *softs, uint_t lev, const char *fmt, ...) 7572 { 7573 va_list args; 7574 char sl; /* system log character */ 7575 7576 mutex_enter(&aac_prt_mutex); 7577 /* Set up parameters and call sprintf function to format the data */ 7578 if (strchr("^!?", fmt[0]) == NULL) { 7579 sl = 0; 7580 } else { 7581 sl = fmt[0]; 7582 fmt++; 7583 } 7584 va_start(args, fmt); 7585 (void) vsprintf(aac_prt_buf, fmt, args); 7586 va_end(args); 7587 7588 /* Make sure the softs structure has been passed in for this section */ 7589 if (softs) { 7590 if ((softs->debug_flags & AACDB_FLAGS_FW_PRINT) && 7591 /* If we are set up for a Firmware print */ 7592 (softs->debug_buf_size)) { 7593 uint32_t count, i; 7594 7595 /* Make sure the string size is within boundaries */ 7596 count = strlen(aac_prt_buf); 7597 if (count > softs->debug_buf_size) 7598 count = (uint16_t)softs->debug_buf_size; 7599 7600 /* 7601 * Wait for no more than AAC_PRINT_TIMEOUT for the 7602 * previous message length to clear (the handshake). 7603 */ 7604 for (i = 0; i < AAC_PRINT_TIMEOUT; i++) { 7605 if (!PCI_MEM_GET32(softs, 7606 softs->debug_buf_offset + \ 7607 AAC_FW_DBG_STRLEN_OFFSET)) 7608 break; 7609 7610 drv_usecwait(1000); 7611 } 7612 7613 /* 7614 * If the length is clear, copy over the message, the 7615 * flags, and the length. Make sure the length is the 7616 * last because that is the signal for the Firmware to 7617 * pick it up. 7618 */ 7619 if (!PCI_MEM_GET32(softs, softs->debug_buf_offset + \ 7620 AAC_FW_DBG_STRLEN_OFFSET)) { 7621 PCI_MEM_REP_PUT8(softs, 7622 softs->debug_buf_offset + \ 7623 softs->debug_header_size, 7624 aac_prt_buf, count); 7625 PCI_MEM_PUT32(softs, 7626 softs->debug_buf_offset + \ 7627 AAC_FW_DBG_FLAGS_OFFSET, 7628 softs->debug_fw_flags); 7629 PCI_MEM_PUT32(softs, 7630 softs->debug_buf_offset + \ 7631 AAC_FW_DBG_STRLEN_OFFSET, count); 7632 } else { 7633 cmn_err(CE_WARN, "UART output fail"); 7634 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT; 7635 } 7636 } 7637 7638 /* 7639 * If the Kernel Debug Print flag is set, send it off 7640 * to the Kernel Debugger 7641 */ 7642 if (softs->debug_flags & AACDB_FLAGS_KERNEL_PRINT) 7643 aac_cmn_err(softs, lev, sl, 7644 (softs->debug_flags & AACDB_FLAGS_NO_HEADERS)); 7645 } else { 7646 /* Driver not initialized yet, no firmware or header output */ 7647 if (aac_debug_flags & AACDB_FLAGS_KERNEL_PRINT) 7648 aac_cmn_err(softs, lev, sl, 1); 7649 } 7650 mutex_exit(&aac_prt_mutex); 7651 } 7652 7653 /* 7654 * Translate command number to description string 7655 */ 7656 static char * 7657 aac_cmd_name(int cmd, struct aac_key_strings *cmdlist) 7658 { 7659 int i; 7660 7661 for (i = 0; cmdlist[i].key != -1; i++) { 7662 if (cmd == cmdlist[i].key) 7663 return (cmdlist[i].message); 7664 } 7665 return (NULL); 7666 } 7667 7668 static void 7669 aac_print_scmd(struct aac_softstate *softs, struct aac_cmd *acp) 7670 { 7671 struct scsi_pkt *pkt = acp->pkt; 7672 struct scsi_address *ap = &pkt->pkt_address; 7673 int is_pd = 0; 7674 int ctl = ddi_get_instance(softs->devinfo_p); 7675 int tgt = ap->a_target; 7676 int lun = ap->a_lun; 7677 union scsi_cdb *cdbp = (void *)pkt->pkt_cdbp; 7678 uchar_t cmd = cdbp->scc_cmd; 7679 char *desc; 7680 7681 if (tgt >= AAC_MAX_LD) { 7682 is_pd = 1; 7683 ctl = ((struct aac_nondasd *)acp->dvp)->bus; 7684 tgt = ((struct aac_nondasd *)acp->dvp)->tid; 7685 lun = 0; 7686 } 7687 7688 if ((desc = aac_cmd_name(cmd, 7689 (struct aac_key_strings *)scsi_cmds)) == NULL) { 7690 aac_printf(softs, CE_NOTE, 7691 "SCMD> Unknown(0x%2x) --> c%dt%dL%d %s", 7692 cmd, ctl, tgt, lun, is_pd ? "(pd)" : ""); 7693 return; 7694 } 7695 7696 switch (cmd) { 7697 case SCMD_READ: 7698 case SCMD_WRITE: 7699 aac_printf(softs, CE_NOTE, 7700 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s", 7701 desc, GETG0ADDR(cdbp), GETG0COUNT(cdbp), 7702 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 7703 ctl, tgt, lun, is_pd ? "(pd)" : ""); 7704 break; 7705 case SCMD_READ_G1: 7706 case SCMD_WRITE_G1: 7707 aac_printf(softs, CE_NOTE, 7708 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s", 7709 desc, GETG1ADDR(cdbp), GETG1COUNT(cdbp), 7710 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 7711 ctl, tgt, lun, is_pd ? "(pd)" : ""); 7712 break; 7713 case SCMD_READ_G4: 7714 case SCMD_WRITE_G4: 7715 aac_printf(softs, CE_NOTE, 7716 "SCMD> %s 0x%x.%08x[%d] %s --> c%dt%dL%d %s", 7717 desc, GETG4ADDR(cdbp), GETG4ADDRTL(cdbp), 7718 GETG4COUNT(cdbp), 7719 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 7720 ctl, tgt, lun, is_pd ? "(pd)" : ""); 7721 break; 7722 case SCMD_READ_G5: 7723 case SCMD_WRITE_G5: 7724 aac_printf(softs, CE_NOTE, 7725 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s", 7726 desc, GETG5ADDR(cdbp), GETG5COUNT(cdbp), 7727 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 7728 ctl, tgt, lun, is_pd ? "(pd)" : ""); 7729 break; 7730 default: 7731 aac_printf(softs, CE_NOTE, "SCMD> %s --> c%dt%dL%d %s", 7732 desc, ctl, tgt, lun, is_pd ? "(pd)" : ""); 7733 } 7734 } 7735 7736 void 7737 aac_print_fib(struct aac_softstate *softs, struct aac_slot *slotp) 7738 { 7739 struct aac_cmd *acp = slotp->acp; 7740 struct aac_fib *fibp = slotp->fibp; 7741 ddi_acc_handle_t acc = slotp->fib_acc_handle; 7742 uint16_t fib_size; 7743 uint32_t fib_cmd, sub_cmd; 7744 char *cmdstr, *subcmdstr; 7745 char *caller; 7746 int i; 7747 7748 if (acp) { 7749 if (!(softs->debug_fib_flags & acp->fib_flags)) 7750 return; 7751 if (acp->fib_flags & AACDB_FLAGS_FIB_SCMD) 7752 caller = "SCMD"; 7753 else if (acp->fib_flags & AACDB_FLAGS_FIB_IOCTL) 7754 caller = "IOCTL"; 7755 else if (acp->fib_flags & AACDB_FLAGS_FIB_SRB) 7756 caller = "SRB"; 7757 else 7758 return; 7759 } else { 7760 if (!(softs->debug_fib_flags & AACDB_FLAGS_FIB_SYNC)) 7761 return; 7762 caller = "SYNC"; 7763 } 7764 7765 fib_cmd = ddi_get16(acc, &fibp->Header.Command); 7766 cmdstr = aac_cmd_name(fib_cmd, aac_fib_cmds); 7767 sub_cmd = (uint32_t)-1; 7768 subcmdstr = NULL; 7769 7770 /* Print FIB header */ 7771 if (softs->debug_fib_flags & AACDB_FLAGS_FIB_HEADER) { 7772 aac_printf(softs, CE_NOTE, "FIB> from %s", caller); 7773 aac_printf(softs, CE_NOTE, " XferState %d", 7774 ddi_get32(acc, &fibp->Header.XferState)); 7775 aac_printf(softs, CE_NOTE, " Command %d", 7776 ddi_get16(acc, &fibp->Header.Command)); 7777 aac_printf(softs, CE_NOTE, " StructType %d", 7778 ddi_get8(acc, &fibp->Header.StructType)); 7779 aac_printf(softs, CE_NOTE, " Flags 0x%x", 7780 ddi_get8(acc, &fibp->Header.Flags)); 7781 aac_printf(softs, CE_NOTE, " Size %d", 7782 ddi_get16(acc, &fibp->Header.Size)); 7783 aac_printf(softs, CE_NOTE, " SenderSize %d", 7784 ddi_get16(acc, &fibp->Header.SenderSize)); 7785 aac_printf(softs, CE_NOTE, " SenderAddr 0x%x", 7786 ddi_get32(acc, &fibp->Header.SenderFibAddress)); 7787 aac_printf(softs, CE_NOTE, " RcvrAddr 0x%x", 7788 ddi_get32(acc, &fibp->Header.ReceiverFibAddress)); 7789 aac_printf(softs, CE_NOTE, " SenderData 0x%x", 7790 ddi_get32(acc, &fibp->Header.SenderData)); 7791 } 7792 7793 /* Print FIB data */ 7794 switch (fib_cmd) { 7795 case ContainerCommand: 7796 sub_cmd = ddi_get32(acc, 7797 (void *)&(((uint32_t *)(void *)&fibp->data[0])[0])); 7798 subcmdstr = aac_cmd_name(sub_cmd, aac_ctvm_subcmds); 7799 if (subcmdstr == NULL) 7800 break; 7801 7802 switch (sub_cmd) { 7803 case VM_ContainerConfig: { 7804 struct aac_Container *pContainer = 7805 (struct aac_Container *)fibp->data; 7806 7807 fib_cmd = sub_cmd; 7808 cmdstr = subcmdstr; 7809 sub_cmd = (uint32_t)-1; 7810 subcmdstr = NULL; 7811 7812 sub_cmd = ddi_get32(acc, 7813 &pContainer->CTCommand.command); 7814 subcmdstr = aac_cmd_name(sub_cmd, aac_ct_subcmds); 7815 if (subcmdstr == NULL) 7816 break; 7817 aac_printf(softs, CE_NOTE, "FIB> %s (0x%x, 0x%x, 0x%x)", 7818 subcmdstr, 7819 ddi_get32(acc, &pContainer->CTCommand.param[0]), 7820 ddi_get32(acc, &pContainer->CTCommand.param[1]), 7821 ddi_get32(acc, &pContainer->CTCommand.param[2])); 7822 return; 7823 } 7824 7825 case VM_Ioctl: 7826 fib_cmd = sub_cmd; 7827 cmdstr = subcmdstr; 7828 sub_cmd = (uint32_t)-1; 7829 subcmdstr = NULL; 7830 7831 sub_cmd = ddi_get32(acc, 7832 (void *)&(((uint32_t *)(void *)&fibp->data[0])[4])); 7833 subcmdstr = aac_cmd_name(sub_cmd, aac_ioctl_subcmds); 7834 break; 7835 7836 case VM_CtBlockRead: 7837 case VM_CtBlockWrite: { 7838 struct aac_blockread *br = 7839 (struct aac_blockread *)fibp->data; 7840 struct aac_sg_table *sg = &br->SgMap; 7841 uint32_t sgcount = ddi_get32(acc, &sg->SgCount); 7842 7843 aac_printf(softs, CE_NOTE, 7844 "FIB> %s Container %d 0x%x/%d", subcmdstr, 7845 ddi_get32(acc, &br->ContainerId), 7846 ddi_get32(acc, &br->BlockNumber), 7847 ddi_get32(acc, &br->ByteCount)); 7848 for (i = 0; i < sgcount; i++) 7849 aac_printf(softs, CE_NOTE, 7850 " %d: 0x%08x/%d", i, 7851 ddi_get32(acc, &sg->SgEntry[i].SgAddress), 7852 ddi_get32(acc, &sg->SgEntry[i]. \ 7853 SgByteCount)); 7854 return; 7855 } 7856 } 7857 break; 7858 7859 case ContainerCommand64: { 7860 struct aac_blockread64 *br = 7861 (struct aac_blockread64 *)fibp->data; 7862 struct aac_sg_table64 *sg = &br->SgMap64; 7863 uint32_t sgcount = ddi_get32(acc, &sg->SgCount); 7864 uint64_t sgaddr; 7865 7866 sub_cmd = br->Command; 7867 subcmdstr = NULL; 7868 if (sub_cmd == VM_CtHostRead64) 7869 subcmdstr = "VM_CtHostRead64"; 7870 else if (sub_cmd == VM_CtHostWrite64) 7871 subcmdstr = "VM_CtHostWrite64"; 7872 else 7873 break; 7874 7875 aac_printf(softs, CE_NOTE, 7876 "FIB> %s Container %d 0x%x/%d", subcmdstr, 7877 ddi_get16(acc, &br->ContainerId), 7878 ddi_get32(acc, &br->BlockNumber), 7879 ddi_get16(acc, &br->SectorCount)); 7880 for (i = 0; i < sgcount; i++) { 7881 sgaddr = ddi_get64(acc, 7882 &sg->SgEntry64[i].SgAddress); 7883 aac_printf(softs, CE_NOTE, 7884 " %d: 0x%08x.%08x/%d", i, 7885 AAC_MS32(sgaddr), AAC_LS32(sgaddr), 7886 ddi_get32(acc, &sg->SgEntry64[i]. \ 7887 SgByteCount)); 7888 } 7889 return; 7890 } 7891 7892 case RawIo: { 7893 struct aac_raw_io *io = (struct aac_raw_io *)fibp->data; 7894 struct aac_sg_tableraw *sg = &io->SgMapRaw; 7895 uint32_t sgcount = ddi_get32(acc, &sg->SgCount); 7896 uint64_t sgaddr; 7897 7898 aac_printf(softs, CE_NOTE, 7899 "FIB> RawIo Container %d 0x%llx/%d 0x%x", 7900 ddi_get16(acc, &io->ContainerId), 7901 ddi_get64(acc, &io->BlockNumber), 7902 ddi_get32(acc, &io->ByteCount), 7903 ddi_get16(acc, &io->Flags)); 7904 for (i = 0; i < sgcount; i++) { 7905 sgaddr = ddi_get64(acc, &sg->SgEntryRaw[i].SgAddress); 7906 aac_printf(softs, CE_NOTE, " %d: 0x%08x.%08x/%d", i, 7907 AAC_MS32(sgaddr), AAC_LS32(sgaddr), 7908 ddi_get32(acc, &sg->SgEntryRaw[i].SgByteCount)); 7909 } 7910 return; 7911 } 7912 7913 case ClusterCommand: 7914 sub_cmd = ddi_get32(acc, 7915 (void *)&(((uint32_t *)(void *)fibp->data)[0])); 7916 subcmdstr = aac_cmd_name(sub_cmd, aac_cl_subcmds); 7917 break; 7918 7919 case AifRequest: 7920 sub_cmd = ddi_get32(acc, 7921 (void *)&(((uint32_t *)(void *)fibp->data)[0])); 7922 subcmdstr = aac_cmd_name(sub_cmd, aac_aif_subcmds); 7923 break; 7924 7925 default: 7926 break; 7927 } 7928 7929 fib_size = ddi_get16(acc, &(fibp->Header.Size)); 7930 if (subcmdstr) 7931 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d", 7932 subcmdstr, fib_size); 7933 else if (cmdstr && sub_cmd == (uint32_t)-1) 7934 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d", 7935 cmdstr, fib_size); 7936 else if (cmdstr) 7937 aac_printf(softs, CE_NOTE, "FIB> %s: Unknown(0x%x), sz=%d", 7938 cmdstr, sub_cmd, fib_size); 7939 else 7940 aac_printf(softs, CE_NOTE, "FIB> Unknown(0x%x), sz=%d", 7941 fib_cmd, fib_size); 7942 } 7943 7944 static void 7945 aac_print_aif(struct aac_softstate *softs, struct aac_aif_command *aif) 7946 { 7947 int aif_command; 7948 uint32_t aif_seqnumber; 7949 int aif_en_type; 7950 char *str; 7951 7952 aif_command = LE_32(aif->command); 7953 aif_seqnumber = LE_32(aif->seqNumber); 7954 aif_en_type = LE_32(aif->data.EN.type); 7955 7956 switch (aif_command) { 7957 case AifCmdEventNotify: 7958 str = aac_cmd_name(aif_en_type, aac_aifens); 7959 if (str) 7960 aac_printf(softs, CE_NOTE, "AIF! %s", str); 7961 else 7962 aac_printf(softs, CE_NOTE, "AIF! Unknown(0x%x)", 7963 aif_en_type); 7964 break; 7965 7966 case AifCmdJobProgress: 7967 switch (LE_32(aif->data.PR[0].status)) { 7968 case AifJobStsSuccess: 7969 str = "success"; break; 7970 case AifJobStsFinished: 7971 str = "finished"; break; 7972 case AifJobStsAborted: 7973 str = "aborted"; break; 7974 case AifJobStsFailed: 7975 str = "failed"; break; 7976 case AifJobStsSuspended: 7977 str = "suspended"; break; 7978 case AifJobStsRunning: 7979 str = "running"; break; 7980 default: 7981 str = "unknown"; break; 7982 } 7983 aac_printf(softs, CE_NOTE, 7984 "AIF! JobProgress (%d) - %s (%d, %d)", 7985 aif_seqnumber, str, 7986 LE_32(aif->data.PR[0].currentTick), 7987 LE_32(aif->data.PR[0].finalTick)); 7988 break; 7989 7990 case AifCmdAPIReport: 7991 aac_printf(softs, CE_NOTE, "AIF! APIReport (%d)", 7992 aif_seqnumber); 7993 break; 7994 7995 case AifCmdDriverNotify: 7996 aac_printf(softs, CE_NOTE, "AIF! DriverNotify (%d)", 7997 aif_seqnumber); 7998 break; 7999 8000 default: 8001 aac_printf(softs, CE_NOTE, "AIF! AIF %d (%d)", 8002 aif_command, aif_seqnumber); 8003 break; 8004 } 8005 } 8006 8007 #endif /* DEBUG */ 8008