1 /* 2 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 3 */ 4 5 /* 6 * Copyright 2023 Oxide Computer Company 7 * Copyright (c) 2018, Joyent, Inc. 8 * Copyright 2005-08 Adaptec, Inc. 9 * Copyright (c) 2005-08 Adaptec Inc., Achim Leubner 10 * Copyright (c) 2000 Michael Smith 11 * Copyright (c) 2001 Scott Long 12 * Copyright (c) 2000 BSDi 13 * All rights reserved. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 #include <sys/modctl.h> 37 #include <sys/conf.h> 38 #include <sys/cmn_err.h> 39 #include <sys/ddi.h> 40 #include <sys/devops.h> 41 #include <sys/pci.h> 42 #include <sys/types.h> 43 #include <sys/ddidmareq.h> 44 #include <sys/scsi/scsi.h> 45 #include <sys/ksynch.h> 46 #include <sys/sunddi.h> 47 #include <sys/byteorder.h> 48 #include "aac_regs.h" 49 #include "aac.h" 50 51 /* 52 * FMA header files 53 */ 54 #include <sys/ddifm.h> 55 #include <sys/fm/protocol.h> 56 #include <sys/fm/util.h> 57 #include <sys/fm/io/ddi.h> 58 59 /* 60 * For minor nodes created by the SCSA framework, minor numbers are 61 * formed by left-shifting instance by INST_MINOR_SHIFT and OR in a 62 * number less than 64. 63 * 64 * To support cfgadm, need to confirm the SCSA framework by creating 65 * devctl/scsi and driver specific minor nodes under SCSA format, 66 * and calling scsi_hba_xxx() functions aacordingly. 67 */ 68 69 #define AAC_MINOR 32 70 #define INST2AAC(x) (((x) << INST_MINOR_SHIFT) | AAC_MINOR) 71 #define AAC_SCSA_MINOR(x) ((x) & TRAN_MINOR_MASK) 72 #define AAC_IS_SCSA_NODE(x) ((x) == DEVCTL_MINOR || (x) == SCSI_MINOR) 73 74 #define SD2TRAN(sd) ((sd)->sd_address.a_hba_tran) 75 #define AAC_TRAN2SOFTS(tran) ((struct aac_softstate *)(tran)->tran_hba_private) 76 #define AAC_DIP2TRAN(dip) ((scsi_hba_tran_t *)ddi_get_driver_private(dip)) 77 #define AAC_DIP2SOFTS(dip) (AAC_TRAN2SOFTS(AAC_DIP2TRAN(dip))) 78 #define SD2AAC(sd) (AAC_TRAN2SOFTS(SD2TRAN(sd))) 79 #define AAC_PD(t) ((t) - AAC_MAX_LD) 80 #define AAC_DEV(softs, t) (((t) < AAC_MAX_LD) ? \ 81 &(softs)->containers[(t)].dev : \ 82 ((t) < AAC_MAX_DEV(softs)) ? \ 83 &(softs)->nondasds[AAC_PD(t)].dev : NULL) 84 #define AAC_DEVCFG_BEGIN(softs, tgt) \ 85 aac_devcfg((softs), (tgt), 1) 86 #define AAC_DEVCFG_END(softs, tgt) \ 87 aac_devcfg((softs), (tgt), 0) 88 #define PKT2AC(pkt) ((struct aac_cmd *)(pkt)->pkt_ha_private) 89 #define AAC_BUSYWAIT(cond, timeout /* in millisecond */) { \ 90 if (!(cond)) { \ 91 int count = (timeout) * 10; \ 92 while (count) { \ 93 drv_usecwait(100); \ 94 if (cond) \ 95 break; \ 96 count--; \ 97 } \ 98 (timeout) = (count + 9) / 10; \ 99 } \ 100 } 101 102 #define AAC_SENSE_DATA_DESCR_LEN \ 103 (sizeof (struct scsi_descr_sense_hdr) + \ 104 sizeof (struct scsi_information_sense_descr)) 105 #define AAC_ARQ64_LENGTH \ 106 (sizeof (struct scsi_arq_status) + \ 107 AAC_SENSE_DATA_DESCR_LEN - SENSE_LENGTH) 108 109 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */ 110 #define AAC_GETGXADDR(cmdlen, cdbp) \ 111 ((cmdlen == 6) ? GETG0ADDR(cdbp) : \ 112 (cmdlen == 10) ? (uint32_t)GETG1ADDR(cdbp) : \ 113 ((uint64_t)GETG4ADDR(cdbp) << 32) | (uint32_t)GETG4ADDRTL(cdbp)) 114 115 #define AAC_CDB_INQUIRY_CMDDT 0x02 116 #define AAC_CDB_INQUIRY_EVPD 0x01 117 #define AAC_VPD_PAGE_CODE 1 118 #define AAC_VPD_PAGE_LENGTH 3 119 #define AAC_VPD_PAGE_DATA 4 120 #define AAC_VPD_ID_CODESET 0 121 #define AAC_VPD_ID_TYPE 1 122 #define AAC_VPD_ID_LENGTH 3 123 #define AAC_VPD_ID_DATA 4 124 125 #define AAC_SCSI_RPTLUNS_HEAD_SIZE 0x08 126 #define AAC_SCSI_RPTLUNS_ADDR_SIZE 0x08 127 #define AAC_SCSI_RPTLUNS_ADDR_MASK 0xC0 128 /* 00b - peripheral device addressing method */ 129 #define AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL 0x00 130 /* 01b - flat space addressing method */ 131 #define AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE 0x40 132 /* 10b - logical unit addressing method */ 133 #define AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT 0x80 134 135 /* Return the size of FIB with data part type data_type */ 136 #define AAC_FIB_SIZEOF(data_type) \ 137 (sizeof (struct aac_fib_header) + sizeof (data_type)) 138 /* Return the container size defined in mir */ 139 #define AAC_MIR_SIZE(softs, acc, mir) \ 140 (((softs)->flags & AAC_FLAGS_LBA_64BIT) ? \ 141 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity) + \ 142 ((uint64_t)ddi_get32((acc), &(mir)->MntObj.CapacityHigh) << 32) : \ 143 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity)) 144 145 /* The last entry of aac_cards[] is for unknown cards */ 146 #define AAC_UNKNOWN_CARD \ 147 (sizeof (aac_cards) / sizeof (struct aac_card_type) - 1) 148 #define CARD_IS_UNKNOWN(i) (i == AAC_UNKNOWN_CARD) 149 #define BUF_IS_READ(bp) ((bp)->b_flags & B_READ) 150 #define AAC_IS_Q_EMPTY(q) ((q)->q_head == NULL) 151 #define AAC_CMDQ(acp) (!((acp)->flags & AAC_CMD_SYNC)) 152 153 #define PCI_MEM_GET32(softs, off) \ 154 ddi_get32((softs)->pci_mem_handle, \ 155 (void *)((softs)->pci_mem_base_vaddr + (off))) 156 #define PCI_MEM_PUT32(softs, off, val) \ 157 ddi_put32((softs)->pci_mem_handle, \ 158 (void *)((softs)->pci_mem_base_vaddr + (off)), \ 159 (uint32_t)(val)) 160 #define PCI_MEM_GET16(softs, off) \ 161 ddi_get16((softs)->pci_mem_handle, \ 162 (void *)((softs)->pci_mem_base_vaddr + (off))) 163 #define PCI_MEM_PUT16(softs, off, val) \ 164 ddi_put16((softs)->pci_mem_handle, \ 165 (void *)((softs)->pci_mem_base_vaddr + (off)), (uint16_t)(val)) 166 /* Write host data at valp to device mem[off] repeatedly count times */ 167 #define PCI_MEM_REP_PUT8(softs, off, valp, count) \ 168 ddi_rep_put8((softs)->pci_mem_handle, (uint8_t *)(valp), \ 169 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \ 170 count, DDI_DEV_AUTOINCR) 171 /* Read device data at mem[off] to host addr valp repeatedly count times */ 172 #define PCI_MEM_REP_GET8(softs, off, valp, count) \ 173 ddi_rep_get8((softs)->pci_mem_handle, (uint8_t *)(valp), \ 174 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \ 175 count, DDI_DEV_AUTOINCR) 176 #define AAC_GET_FIELD8(acc, d, s, field) \ 177 (d)->field = ddi_get8(acc, (uint8_t *)&(s)->field) 178 #define AAC_GET_FIELD32(acc, d, s, field) \ 179 (d)->field = ddi_get32(acc, (uint32_t *)&(s)->field) 180 #define AAC_GET_FIELD64(acc, d, s, field) \ 181 (d)->field = ddi_get64(acc, (uint64_t *)&(s)->field) 182 #define AAC_REP_GET_FIELD8(acc, d, s, field, r) \ 183 ddi_rep_get8((acc), (uint8_t *)&(d)->field, \ 184 (uint8_t *)&(s)->field, (r), DDI_DEV_AUTOINCR) 185 #define AAC_REP_GET_FIELD32(acc, d, s, field, r) \ 186 ddi_rep_get32((acc), (uint32_t *)&(d)->field, \ 187 (uint32_t *)&(s)->field, (r), DDI_DEV_AUTOINCR) 188 189 #define AAC_ENABLE_INTR(softs) { \ 190 if (softs->flags & AAC_FLAGS_NEW_COMM) \ 191 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_NEW); \ 192 else \ 193 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_BITS); \ 194 softs->state |= AAC_STATE_INTR; \ 195 } 196 197 #define AAC_DISABLE_INTR(softs) { \ 198 PCI_MEM_PUT32(softs, AAC_OIMR, ~0); \ 199 softs->state &= ~AAC_STATE_INTR; \ 200 } 201 #define AAC_STATUS_CLR(softs, mask) PCI_MEM_PUT32(softs, AAC_ODBR, mask) 202 #define AAC_STATUS_GET(softs) PCI_MEM_GET32(softs, AAC_ODBR) 203 #define AAC_NOTIFY(softs, val) PCI_MEM_PUT32(softs, AAC_IDBR, val) 204 #define AAC_OUTB_GET(softs) PCI_MEM_GET32(softs, AAC_OQUE) 205 #define AAC_OUTB_SET(softs, val) PCI_MEM_PUT32(softs, AAC_OQUE, val) 206 #define AAC_FWSTATUS_GET(softs) \ 207 ((softs)->aac_if.aif_get_fwstatus(softs)) 208 #define AAC_MAILBOX_GET(softs, mb) \ 209 ((softs)->aac_if.aif_get_mailbox((softs), (mb))) 210 #define AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3) \ 211 ((softs)->aac_if.aif_set_mailbox((softs), (cmd), \ 212 (arg0), (arg1), (arg2), (arg3))) 213 214 #define AAC_MGT_SLOT_NUM 2 215 #define AAC_THROTTLE_DRAIN -1 216 217 #define AAC_QUIESCE_TICK 1 /* 1 second */ 218 #define AAC_QUIESCE_TIMEOUT 180 /* 180 seconds */ 219 #define AAC_DEFAULT_TICK 10 /* 10 seconds */ 220 #define AAC_SYNC_TICK (30*60) /* 30 minutes */ 221 222 /* Poll time for aac_do_poll_io() */ 223 #define AAC_POLL_TIME 60 /* 60 seconds */ 224 225 /* IOP reset */ 226 #define AAC_IOP_RESET_SUCCEED 0 /* IOP reset succeed */ 227 #define AAC_IOP_RESET_FAILED -1 /* IOP reset failed */ 228 #define AAC_IOP_RESET_ABNORMAL -2 /* Reset operation abnormal */ 229 230 /* 231 * Hardware access functions 232 */ 233 static int aac_rx_get_fwstatus(struct aac_softstate *); 234 static int aac_rx_get_mailbox(struct aac_softstate *, int); 235 static void aac_rx_set_mailbox(struct aac_softstate *, uint32_t, uint32_t, 236 uint32_t, uint32_t, uint32_t); 237 static int aac_rkt_get_fwstatus(struct aac_softstate *); 238 static int aac_rkt_get_mailbox(struct aac_softstate *, int); 239 static void aac_rkt_set_mailbox(struct aac_softstate *, uint32_t, uint32_t, 240 uint32_t, uint32_t, uint32_t); 241 242 /* 243 * SCSA function prototypes 244 */ 245 static int aac_attach(dev_info_t *, ddi_attach_cmd_t); 246 static int aac_detach(dev_info_t *, ddi_detach_cmd_t); 247 static int aac_reset(dev_info_t *, ddi_reset_cmd_t); 248 static int aac_quiesce(dev_info_t *); 249 static int aac_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 250 251 /* 252 * Interrupt handler functions 253 */ 254 static int aac_query_intrs(struct aac_softstate *, int); 255 static int aac_add_intrs(struct aac_softstate *); 256 static void aac_remove_intrs(struct aac_softstate *); 257 static int aac_enable_intrs(struct aac_softstate *); 258 static int aac_disable_intrs(struct aac_softstate *); 259 static uint_t aac_intr_old(caddr_t, caddr_t); 260 static uint_t aac_intr_new(caddr_t, caddr_t); 261 static uint_t aac_softintr(caddr_t); 262 263 /* 264 * Internal functions in attach 265 */ 266 static int aac_check_card_type(struct aac_softstate *); 267 static int aac_check_firmware(struct aac_softstate *); 268 static int aac_common_attach(struct aac_softstate *); 269 static void aac_common_detach(struct aac_softstate *); 270 static int aac_probe_containers(struct aac_softstate *); 271 static int aac_alloc_comm_space(struct aac_softstate *); 272 static int aac_setup_comm_space(struct aac_softstate *); 273 static void aac_free_comm_space(struct aac_softstate *); 274 static int aac_hba_setup(struct aac_softstate *); 275 276 /* 277 * Sync FIB operation functions 278 */ 279 int aac_sync_mbcommand(struct aac_softstate *, uint32_t, uint32_t, 280 uint32_t, uint32_t, uint32_t, uint32_t *); 281 static int aac_sync_fib(struct aac_softstate *, uint16_t, uint16_t); 282 283 /* 284 * Command queue operation functions 285 */ 286 static void aac_cmd_initq(struct aac_cmd_queue *); 287 static void aac_cmd_enqueue(struct aac_cmd_queue *, struct aac_cmd *); 288 static struct aac_cmd *aac_cmd_dequeue(struct aac_cmd_queue *); 289 static void aac_cmd_delete(struct aac_cmd_queue *, struct aac_cmd *); 290 291 /* 292 * FIB queue operation functions 293 */ 294 static int aac_fib_enqueue(struct aac_softstate *, int, uint32_t, uint32_t); 295 static int aac_fib_dequeue(struct aac_softstate *, int, int *); 296 297 /* 298 * Slot operation functions 299 */ 300 static int aac_create_slots(struct aac_softstate *); 301 static void aac_destroy_slots(struct aac_softstate *); 302 static void aac_alloc_fibs(struct aac_softstate *); 303 static void aac_destroy_fibs(struct aac_softstate *); 304 static struct aac_slot *aac_get_slot(struct aac_softstate *); 305 static void aac_release_slot(struct aac_softstate *, struct aac_slot *); 306 static int aac_alloc_fib(struct aac_softstate *, struct aac_slot *); 307 static void aac_free_fib(struct aac_slot *); 308 309 /* 310 * Internal functions 311 */ 312 static void aac_cmd_fib_header(struct aac_softstate *, struct aac_cmd *, 313 uint16_t); 314 static void aac_cmd_fib_rawio(struct aac_softstate *, struct aac_cmd *); 315 static void aac_cmd_fib_brw64(struct aac_softstate *, struct aac_cmd *); 316 static void aac_cmd_fib_brw(struct aac_softstate *, struct aac_cmd *); 317 static void aac_cmd_fib_sync(struct aac_softstate *, struct aac_cmd *); 318 static void aac_cmd_fib_scsi32(struct aac_softstate *, struct aac_cmd *); 319 static void aac_cmd_fib_scsi64(struct aac_softstate *, struct aac_cmd *); 320 static void aac_cmd_fib_startstop(struct aac_softstate *, struct aac_cmd *); 321 static void aac_start_waiting_io(struct aac_softstate *); 322 static void aac_drain_comp_q(struct aac_softstate *); 323 int aac_do_io(struct aac_softstate *, struct aac_cmd *); 324 static int aac_sync_fib_slot_bind(struct aac_softstate *, struct aac_cmd *); 325 static void aac_sync_fib_slot_release(struct aac_softstate *, struct aac_cmd *); 326 static void aac_start_io(struct aac_softstate *, struct aac_cmd *); 327 static int aac_do_poll_io(struct aac_softstate *, struct aac_cmd *); 328 static int aac_do_sync_io(struct aac_softstate *, struct aac_cmd *); 329 static int aac_send_command(struct aac_softstate *, struct aac_slot *); 330 static void aac_cmd_timeout(struct aac_softstate *, struct aac_cmd *); 331 static int aac_dma_sync_ac(struct aac_cmd *); 332 static int aac_shutdown(struct aac_softstate *); 333 static int aac_reset_adapter(struct aac_softstate *); 334 static int aac_do_quiesce(struct aac_softstate *softs); 335 static int aac_do_unquiesce(struct aac_softstate *softs); 336 static void aac_unhold_bus(struct aac_softstate *, int); 337 static void aac_set_throttle(struct aac_softstate *, struct aac_device *, 338 int, int); 339 340 /* 341 * Adapter Initiated FIB handling function 342 */ 343 static void aac_save_aif(struct aac_softstate *, ddi_acc_handle_t, 344 struct aac_fib *, int); 345 static int aac_handle_aif(struct aac_softstate *, struct aac_aif_command *); 346 347 /* 348 * Event handling related functions 349 */ 350 static void aac_timer(void *); 351 static void aac_event_thread(struct aac_softstate *); 352 static void aac_event_disp(struct aac_softstate *, int); 353 354 /* 355 * IOCTL interface related functions 356 */ 357 static int aac_open(dev_t *, int, int, cred_t *); 358 static int aac_close(dev_t, int, int, cred_t *); 359 static int aac_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 360 extern int aac_do_ioctl(struct aac_softstate *, dev_t, int, intptr_t, int); 361 362 /* 363 * FMA Prototypes 364 */ 365 static void aac_fm_init(struct aac_softstate *); 366 static void aac_fm_fini(struct aac_softstate *); 367 static int aac_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *); 368 int aac_check_acc_handle(ddi_acc_handle_t); 369 int aac_check_dma_handle(ddi_dma_handle_t); 370 void aac_fm_ereport(struct aac_softstate *, char *); 371 372 /* 373 * Auto enumeration functions 374 */ 375 static dev_info_t *aac_find_child(struct aac_softstate *, uint16_t, uint8_t); 376 static int aac_tran_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t, 377 void *, dev_info_t **); 378 static int aac_handle_dr(struct aac_softstate *, int, int, int); 379 380 extern pri_t minclsyspri; 381 382 #ifdef DEBUG 383 /* 384 * UART debug output support 385 */ 386 387 #define AAC_PRINT_BUFFER_SIZE 512 388 #define AAC_PRINT_TIMEOUT 250 /* 1/4 sec. = 250 msec. */ 389 390 #define AAC_FW_DBG_STRLEN_OFFSET 0x00 391 #define AAC_FW_DBG_FLAGS_OFFSET 0x04 392 #define AAC_FW_DBG_BLED_OFFSET 0x08 393 394 static int aac_get_fw_debug_buffer(struct aac_softstate *); 395 static void aac_print_scmd(struct aac_softstate *, struct aac_cmd *); 396 static void aac_print_aif(struct aac_softstate *, struct aac_aif_command *); 397 398 static char aac_prt_buf[AAC_PRINT_BUFFER_SIZE]; 399 static char aac_fmt[] = " %s"; 400 static char aac_fmt_header[] = " %s.%d: %s"; 401 static kmutex_t aac_prt_mutex; 402 403 /* 404 * Debug flags to be put into the softstate flags field 405 * when initialized 406 */ 407 uint32_t aac_debug_flags = 408 /* AACDB_FLAGS_KERNEL_PRINT | */ 409 /* AACDB_FLAGS_FW_PRINT | */ 410 /* AACDB_FLAGS_MISC | */ 411 /* AACDB_FLAGS_FUNC1 | */ 412 /* AACDB_FLAGS_FUNC2 | */ 413 /* AACDB_FLAGS_SCMD | */ 414 /* AACDB_FLAGS_AIF | */ 415 /* AACDB_FLAGS_FIB | */ 416 /* AACDB_FLAGS_IOCTL | */ 417 0; 418 uint32_t aac_debug_fib_flags = 419 /* AACDB_FLAGS_FIB_RW | */ 420 /* AACDB_FLAGS_FIB_IOCTL | */ 421 /* AACDB_FLAGS_FIB_SRB | */ 422 /* AACDB_FLAGS_FIB_SYNC | */ 423 /* AACDB_FLAGS_FIB_HEADER | */ 424 /* AACDB_FLAGS_FIB_TIMEOUT | */ 425 0; 426 427 #endif /* DEBUG */ 428 429 static struct cb_ops aac_cb_ops = { 430 aac_open, /* open */ 431 aac_close, /* close */ 432 nodev, /* strategy */ 433 nodev, /* print */ 434 nodev, /* dump */ 435 nodev, /* read */ 436 nodev, /* write */ 437 aac_ioctl, /* ioctl */ 438 nodev, /* devmap */ 439 nodev, /* mmap */ 440 nodev, /* segmap */ 441 nochpoll, /* poll */ 442 ddi_prop_op, /* cb_prop_op */ 443 NULL, /* streamtab */ 444 D_64BIT | D_NEW | D_MP | D_HOTPLUG, /* cb_flag */ 445 CB_REV, /* cb_rev */ 446 nodev, /* async I/O read entry point */ 447 nodev /* async I/O write entry point */ 448 }; 449 450 static struct dev_ops aac_dev_ops = { 451 DEVO_REV, 452 0, 453 aac_getinfo, 454 nulldev, 455 nulldev, 456 aac_attach, 457 aac_detach, 458 aac_reset, 459 &aac_cb_ops, 460 NULL, 461 NULL, 462 aac_quiesce, 463 }; 464 465 static struct modldrv aac_modldrv = { 466 &mod_driverops, 467 "AAC Driver " AAC_DRIVER_VERSION, 468 &aac_dev_ops, 469 }; 470 471 static struct modlinkage aac_modlinkage = { 472 MODREV_1, 473 &aac_modldrv, 474 NULL 475 }; 476 477 static struct aac_softstate *aac_softstatep; 478 479 /* 480 * Supported card list 481 * ordered in vendor id, subvendor id, subdevice id, and device id 482 */ 483 static struct aac_card_type aac_cards[] = { 484 {0x1028, 0x1, 0x1028, 0x1, AAC_HWIF_I960RX, 485 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 486 "Dell", "PERC 3/Di"}, 487 {0x1028, 0x2, 0x1028, 0x2, AAC_HWIF_I960RX, 488 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 489 "Dell", "PERC 3/Di"}, 490 {0x1028, 0x3, 0x1028, 0x3, AAC_HWIF_I960RX, 491 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 492 "Dell", "PERC 3/Si"}, 493 {0x1028, 0x8, 0x1028, 0xcf, AAC_HWIF_I960RX, 494 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 495 "Dell", "PERC 3/Di"}, 496 {0x1028, 0x4, 0x1028, 0xd0, AAC_HWIF_I960RX, 497 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 498 "Dell", "PERC 3/Si"}, 499 {0x1028, 0x2, 0x1028, 0xd1, AAC_HWIF_I960RX, 500 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 501 "Dell", "PERC 3/Di"}, 502 {0x1028, 0x2, 0x1028, 0xd9, AAC_HWIF_I960RX, 503 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 504 "Dell", "PERC 3/Di"}, 505 {0x1028, 0xa, 0x1028, 0x106, AAC_HWIF_I960RX, 506 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 507 "Dell", "PERC 3/Di"}, 508 {0x1028, 0xa, 0x1028, 0x11b, AAC_HWIF_I960RX, 509 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 510 "Dell", "PERC 3/Di"}, 511 {0x1028, 0xa, 0x1028, 0x121, AAC_HWIF_I960RX, 512 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 513 "Dell", "PERC 3/Di"}, 514 {0x9005, 0x285, 0x1028, 0x287, AAC_HWIF_I960RX, 515 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 516 "Dell", "PERC 320/DC"}, 517 {0x9005, 0x285, 0x1028, 0x291, AAC_HWIF_I960RX, 518 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Dell", "CERC SR2"}, 519 520 {0x9005, 0x285, 0x1014, 0x2f2, AAC_HWIF_I960RX, 521 0, AAC_TYPE_SCSI, "IBM", "ServeRAID 8i"}, 522 {0x9005, 0x285, 0x1014, 0x34d, AAC_HWIF_I960RX, 523 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8s"}, 524 {0x9005, 0x286, 0x1014, 0x9580, AAC_HWIF_RKT, 525 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8k"}, 526 527 {0x9005, 0x285, 0x103c, 0x3227, AAC_HWIF_I960RX, 528 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"}, 529 {0x9005, 0x285, 0xe11, 0x295, AAC_HWIF_I960RX, 530 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"}, 531 532 {0x9005, 0x285, 0x9005, 0x285, AAC_HWIF_I960RX, 533 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 534 "Adaptec", "2200S"}, 535 {0x9005, 0x285, 0x9005, 0x286, AAC_HWIF_I960RX, 536 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 537 "Adaptec", "2120S"}, 538 {0x9005, 0x285, 0x9005, 0x287, AAC_HWIF_I960RX, 539 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 540 "Adaptec", "2200S"}, 541 {0x9005, 0x285, 0x9005, 0x288, AAC_HWIF_I960RX, 542 0, AAC_TYPE_SCSI, "Adaptec", "3230S"}, 543 {0x9005, 0x285, 0x9005, 0x289, AAC_HWIF_I960RX, 544 0, AAC_TYPE_SCSI, "Adaptec", "3240S"}, 545 {0x9005, 0x285, 0x9005, 0x28a, AAC_HWIF_I960RX, 546 0, AAC_TYPE_SCSI, "Adaptec", "2020ZCR"}, 547 {0x9005, 0x285, 0x9005, 0x28b, AAC_HWIF_I960RX, 548 0, AAC_TYPE_SCSI, "Adaptec", "2025ZCR"}, 549 {0x9005, 0x286, 0x9005, 0x28c, AAC_HWIF_RKT, 550 0, AAC_TYPE_SCSI, "Adaptec", "2230S"}, 551 {0x9005, 0x286, 0x9005, 0x28d, AAC_HWIF_RKT, 552 0, AAC_TYPE_SCSI, "Adaptec", "2130S"}, 553 {0x9005, 0x285, 0x9005, 0x28e, AAC_HWIF_I960RX, 554 0, AAC_TYPE_SATA, "Adaptec", "2020SA"}, 555 {0x9005, 0x285, 0x9005, 0x28f, AAC_HWIF_I960RX, 556 0, AAC_TYPE_SATA, "Adaptec", "2025SA"}, 557 {0x9005, 0x285, 0x9005, 0x290, AAC_HWIF_I960RX, 558 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2410SA"}, 559 {0x9005, 0x285, 0x9005, 0x292, AAC_HWIF_I960RX, 560 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2810SA"}, 561 {0x9005, 0x285, 0x9005, 0x293, AAC_HWIF_I960RX, 562 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "21610SA"}, 563 {0x9005, 0x285, 0x9005, 0x294, AAC_HWIF_I960RX, 564 0, AAC_TYPE_SATA, "Adaptec", "2026ZCR"}, 565 {0x9005, 0x285, 0x9005, 0x296, AAC_HWIF_I960RX, 566 0, AAC_TYPE_SCSI, "Adaptec", "2240S"}, 567 {0x9005, 0x285, 0x9005, 0x297, AAC_HWIF_I960RX, 568 0, AAC_TYPE_SAS, "Adaptec", "4005SAS"}, 569 {0x9005, 0x285, 0x9005, 0x298, AAC_HWIF_I960RX, 570 0, AAC_TYPE_SAS, "Adaptec", "RAID 4000"}, 571 {0x9005, 0x285, 0x9005, 0x299, AAC_HWIF_I960RX, 572 0, AAC_TYPE_SAS, "Adaptec", "4800SAS"}, 573 {0x9005, 0x285, 0x9005, 0x29a, AAC_HWIF_I960RX, 574 0, AAC_TYPE_SAS, "Adaptec", "4805SAS"}, 575 {0x9005, 0x286, 0x9005, 0x29b, AAC_HWIF_RKT, 576 0, AAC_TYPE_SATA, "Adaptec", "2820SA"}, 577 {0x9005, 0x286, 0x9005, 0x29c, AAC_HWIF_RKT, 578 0, AAC_TYPE_SATA, "Adaptec", "2620SA"}, 579 {0x9005, 0x286, 0x9005, 0x29d, AAC_HWIF_RKT, 580 0, AAC_TYPE_SATA, "Adaptec", "2420SA"}, 581 {0x9005, 0x286, 0x9005, 0x29e, AAC_HWIF_RKT, 582 0, AAC_TYPE_SATA, "ICP", "9024RO"}, 583 {0x9005, 0x286, 0x9005, 0x29f, AAC_HWIF_RKT, 584 0, AAC_TYPE_SATA, "ICP", "9014RO"}, 585 {0x9005, 0x286, 0x9005, 0x2a0, AAC_HWIF_RKT, 586 0, AAC_TYPE_SATA, "ICP", "9047MA"}, 587 {0x9005, 0x286, 0x9005, 0x2a1, AAC_HWIF_RKT, 588 0, AAC_TYPE_SATA, "ICP", "9087MA"}, 589 {0x9005, 0x285, 0x9005, 0x2a4, AAC_HWIF_I960RX, 590 0, AAC_TYPE_SAS, "ICP", "9085LI"}, 591 {0x9005, 0x285, 0x9005, 0x2a5, AAC_HWIF_I960RX, 592 0, AAC_TYPE_SAS, "ICP", "5085BR"}, 593 {0x9005, 0x286, 0x9005, 0x2a6, AAC_HWIF_RKT, 594 0, AAC_TYPE_SATA, "ICP", "9067MA"}, 595 {0x9005, 0x285, 0x9005, 0x2b5, AAC_HWIF_I960RX, 596 0, AAC_TYPE_SAS, "Adaptec", "RAID 5445"}, 597 {0x9005, 0x285, 0x9005, 0x2b6, AAC_HWIF_I960RX, 598 0, AAC_TYPE_SAS, "Adaptec", "RAID 5805"}, 599 {0x9005, 0x285, 0x9005, 0x2b7, AAC_HWIF_I960RX, 600 0, AAC_TYPE_SAS, "Adaptec", "RAID 5085"}, 601 {0x9005, 0x285, 0x9005, 0x2b8, AAC_HWIF_I960RX, 602 0, AAC_TYPE_SAS, "ICP", "RAID ICP5445SL"}, 603 {0x9005, 0x285, 0x9005, 0x2b9, AAC_HWIF_I960RX, 604 0, AAC_TYPE_SAS, "ICP", "RAID ICP5085SL"}, 605 {0x9005, 0x285, 0x9005, 0x2ba, AAC_HWIF_I960RX, 606 0, AAC_TYPE_SAS, "ICP", "RAID ICP5805SL"}, 607 608 {0, 0, 0, 0, AAC_HWIF_UNKNOWN, 609 0, AAC_TYPE_UNKNOWN, "Unknown", "AAC card"}, 610 }; 611 612 /* 613 * Hardware access functions for i960 based cards 614 */ 615 static struct aac_interface aac_rx_interface = { 616 aac_rx_get_fwstatus, 617 aac_rx_get_mailbox, 618 aac_rx_set_mailbox 619 }; 620 621 /* 622 * Hardware access functions for Rocket based cards 623 */ 624 static struct aac_interface aac_rkt_interface = { 625 aac_rkt_get_fwstatus, 626 aac_rkt_get_mailbox, 627 aac_rkt_set_mailbox 628 }; 629 630 ddi_device_acc_attr_t aac_acc_attr = { 631 DDI_DEVICE_ATTR_V1, 632 DDI_STRUCTURE_LE_ACC, 633 DDI_STRICTORDER_ACC, 634 DDI_DEFAULT_ACC 635 }; 636 637 static struct { 638 int size; 639 int notify; 640 } aac_qinfo[] = { 641 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL}, 642 {AAC_HOST_HIGH_CMD_ENTRIES, 0}, 643 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY}, 644 {AAC_ADAP_HIGH_CMD_ENTRIES, 0}, 645 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL}, 646 {AAC_HOST_HIGH_RESP_ENTRIES, 0}, 647 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY}, 648 {AAC_ADAP_HIGH_RESP_ENTRIES, 0} 649 }; 650 651 /* 652 * Default aac dma attributes 653 */ 654 static ddi_dma_attr_t aac_dma_attr = { 655 DMA_ATTR_V0, 656 0, /* lowest usable address */ 657 0xffffffffull, /* high DMA address range */ 658 0xffffffffull, /* DMA counter register */ 659 AAC_DMA_ALIGN, /* DMA address alignment */ 660 1, /* DMA burstsizes */ 661 1, /* min effective DMA size */ 662 0xffffffffull, /* max DMA xfer size */ 663 0xffffffffull, /* segment boundary */ 664 1, /* s/g list length */ 665 AAC_BLK_SIZE, /* granularity of device */ 666 0 /* DMA transfer flags */ 667 }; 668 669 static int aac_tick = AAC_DEFAULT_TICK; /* tick for the internal timer */ 670 static uint32_t aac_timebase = 0; /* internal timer in seconds */ 671 672 /* 673 * Warlock directives 674 * 675 * Different variables with the same types have to be protected by the 676 * same mutex; otherwise, warlock will complain with "variables don't 677 * seem to be protected consistently". For example, 678 * aac_softstate::{q_wait, q_comp} are type of aac_cmd_queue, and protected 679 * by aac_softstate::{io_lock, q_comp_mutex} respectively. We have to 680 * declare them as protected explictly at aac_cmd_dequeue(). 681 */ 682 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt scsi_cdb scsi_status \ 683 scsi_arq_status scsi_descr_sense_hdr scsi_information_sense_descr \ 684 mode_format mode_geometry mode_header aac_cmd)) 685 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_cmd", aac_fib ddi_dma_cookie_t \ 686 aac_sge)) 687 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_fib", aac_blockread aac_blockwrite \ 688 aac_blockread64 aac_raw_io aac_sg_entry aac_sg_entry64 aac_sg_entryraw \ 689 aac_sg_table aac_srb)) 690 _NOTE(SCHEME_PROTECTS_DATA("unique to sync fib and cdb", scsi_inquiry)) 691 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address)) 692 _NOTE(SCHEME_PROTECTS_DATA("unique to scsi_transport", buf)) 693 694 int 695 _init(void) 696 { 697 int rval = 0; 698 699 #ifdef DEBUG 700 mutex_init(&aac_prt_mutex, NULL, MUTEX_DRIVER, NULL); 701 #endif 702 DBCALLED(NULL, 1); 703 704 if ((rval = ddi_soft_state_init((void *)&aac_softstatep, 705 sizeof (struct aac_softstate), 0)) != 0) 706 goto error; 707 708 if ((rval = scsi_hba_init(&aac_modlinkage)) != 0) { 709 ddi_soft_state_fini((void *)&aac_softstatep); 710 goto error; 711 } 712 713 if ((rval = mod_install(&aac_modlinkage)) != 0) { 714 ddi_soft_state_fini((void *)&aac_softstatep); 715 scsi_hba_fini(&aac_modlinkage); 716 goto error; 717 } 718 return (rval); 719 720 error: 721 AACDB_PRINT(NULL, CE_WARN, "Mod init error!"); 722 #ifdef DEBUG 723 mutex_destroy(&aac_prt_mutex); 724 #endif 725 return (rval); 726 } 727 728 int 729 _info(struct modinfo *modinfop) 730 { 731 DBCALLED(NULL, 1); 732 return (mod_info(&aac_modlinkage, modinfop)); 733 } 734 735 /* 736 * An HBA driver cannot be unload unless you reboot, 737 * so this function will be of no use. 738 */ 739 int 740 _fini(void) 741 { 742 int rval; 743 744 DBCALLED(NULL, 1); 745 746 if ((rval = mod_remove(&aac_modlinkage)) != 0) 747 goto error; 748 749 scsi_hba_fini(&aac_modlinkage); 750 ddi_soft_state_fini((void *)&aac_softstatep); 751 #ifdef DEBUG 752 mutex_destroy(&aac_prt_mutex); 753 #endif 754 return (0); 755 756 error: 757 AACDB_PRINT(NULL, CE_WARN, "AAC is busy, cannot unload!"); 758 return (rval); 759 } 760 761 static int 762 aac_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 763 { 764 int instance, i; 765 struct aac_softstate *softs = NULL; 766 int attach_state = 0; 767 char *data; 768 769 DBCALLED(NULL, 1); 770 771 switch (cmd) { 772 case DDI_ATTACH: 773 break; 774 case DDI_RESUME: 775 return (DDI_FAILURE); 776 default: 777 return (DDI_FAILURE); 778 } 779 780 instance = ddi_get_instance(dip); 781 782 /* Get soft state */ 783 if (ddi_soft_state_zalloc(aac_softstatep, instance) != DDI_SUCCESS) { 784 AACDB_PRINT(softs, CE_WARN, "Cannot alloc soft state"); 785 goto error; 786 } 787 softs = ddi_get_soft_state(aac_softstatep, instance); 788 attach_state |= AAC_ATTACH_SOFTSTATE_ALLOCED; 789 790 softs->instance = instance; 791 softs->devinfo_p = dip; 792 softs->buf_dma_attr = softs->addr_dma_attr = aac_dma_attr; 793 softs->addr_dma_attr.dma_attr_granular = 1; 794 softs->acc_attr = aac_acc_attr; 795 softs->reg_attr = aac_acc_attr; 796 softs->card = AAC_UNKNOWN_CARD; 797 #ifdef DEBUG 798 softs->debug_flags = aac_debug_flags; 799 softs->debug_fib_flags = aac_debug_fib_flags; 800 #endif 801 802 /* Initialize FMA */ 803 aac_fm_init(softs); 804 805 /* Check the card type */ 806 if (aac_check_card_type(softs) == AACERR) { 807 AACDB_PRINT(softs, CE_WARN, "Card not supported"); 808 goto error; 809 } 810 /* We have found the right card and everything is OK */ 811 attach_state |= AAC_ATTACH_CARD_DETECTED; 812 813 /* Map PCI mem space */ 814 if (ddi_regs_map_setup(dip, 1, 815 (caddr_t *)&softs->pci_mem_base_vaddr, 0, 816 softs->map_size_min, &softs->reg_attr, 817 &softs->pci_mem_handle) != DDI_SUCCESS) 818 goto error; 819 820 softs->map_size = softs->map_size_min; 821 attach_state |= AAC_ATTACH_PCI_MEM_MAPPED; 822 823 AAC_DISABLE_INTR(softs); 824 825 /* Init mutexes and condvars */ 826 mutex_init(&softs->io_lock, NULL, MUTEX_DRIVER, 827 DDI_INTR_PRI(softs->intr_pri)); 828 mutex_init(&softs->q_comp_mutex, NULL, MUTEX_DRIVER, 829 DDI_INTR_PRI(softs->intr_pri)); 830 mutex_init(&softs->time_mutex, NULL, MUTEX_DRIVER, 831 DDI_INTR_PRI(softs->intr_pri)); 832 mutex_init(&softs->ev_lock, NULL, MUTEX_DRIVER, 833 DDI_INTR_PRI(softs->intr_pri)); 834 mutex_init(&softs->aifq_mutex, NULL, 835 MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri)); 836 cv_init(&softs->event, NULL, CV_DRIVER, NULL); 837 cv_init(&softs->sync_fib_cv, NULL, CV_DRIVER, NULL); 838 cv_init(&softs->drain_cv, NULL, CV_DRIVER, NULL); 839 cv_init(&softs->event_wait_cv, NULL, CV_DRIVER, NULL); 840 cv_init(&softs->event_disp_cv, NULL, CV_DRIVER, NULL); 841 cv_init(&softs->aifq_cv, NULL, CV_DRIVER, NULL); 842 attach_state |= AAC_ATTACH_KMUTEX_INITED; 843 844 /* Init the cmd queues */ 845 for (i = 0; i < AAC_CMDQ_NUM; i++) 846 aac_cmd_initq(&softs->q_wait[i]); 847 aac_cmd_initq(&softs->q_busy); 848 aac_cmd_initq(&softs->q_comp); 849 850 /* Check for legacy device naming support */ 851 softs->legacy = 1; /* default to use legacy name */ 852 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 853 "legacy-name-enable", &data) == DDI_SUCCESS)) { 854 if (strcmp(data, "no") == 0) { 855 AACDB_PRINT(softs, CE_NOTE, "legacy-name disabled"); 856 softs->legacy = 0; 857 } 858 ddi_prop_free(data); 859 } 860 861 /* 862 * Everything has been set up till now, 863 * we will do some common attach. 864 */ 865 mutex_enter(&softs->io_lock); 866 if (aac_common_attach(softs) == AACERR) { 867 mutex_exit(&softs->io_lock); 868 goto error; 869 } 870 mutex_exit(&softs->io_lock); 871 attach_state |= AAC_ATTACH_COMM_SPACE_SETUP; 872 873 /* Check for buf breakup support */ 874 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 875 "breakup-enable", &data) == DDI_SUCCESS)) { 876 if (strcmp(data, "yes") == 0) { 877 AACDB_PRINT(softs, CE_NOTE, "buf breakup enabled"); 878 softs->flags |= AAC_FLAGS_BRKUP; 879 } 880 ddi_prop_free(data); 881 } 882 softs->dma_max = softs->buf_dma_attr.dma_attr_maxxfer; 883 if (softs->flags & AAC_FLAGS_BRKUP) { 884 softs->dma_max = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 885 DDI_PROP_DONTPASS, "dma-max", softs->dma_max); 886 } 887 888 if (aac_hba_setup(softs) != AACOK) 889 goto error; 890 attach_state |= AAC_ATTACH_SCSI_TRAN_SETUP; 891 892 /* Create devctl/scsi nodes for cfgadm */ 893 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 894 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) { 895 AACDB_PRINT(softs, CE_WARN, "failed to create devctl node"); 896 goto error; 897 } 898 attach_state |= AAC_ATTACH_CREATE_DEVCTL; 899 900 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, INST2SCSI(instance), 901 DDI_NT_SCSI_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 902 AACDB_PRINT(softs, CE_WARN, "failed to create scsi node"); 903 goto error; 904 } 905 attach_state |= AAC_ATTACH_CREATE_SCSI; 906 907 /* Create aac node for app. to issue ioctls */ 908 if (ddi_create_minor_node(dip, "aac", S_IFCHR, INST2AAC(instance), 909 DDI_PSEUDO, 0) != DDI_SUCCESS) { 910 AACDB_PRINT(softs, CE_WARN, "failed to create aac node"); 911 goto error; 912 } 913 914 /* Common attach is OK, so we are attached! */ 915 softs->state |= AAC_STATE_RUN; 916 917 /* Create event thread */ 918 softs->fibctx_p = &softs->aifctx; 919 if ((softs->event_thread = thread_create(NULL, 0, aac_event_thread, 920 softs, 0, &p0, TS_RUN, minclsyspri)) == NULL) { 921 AACDB_PRINT(softs, CE_WARN, "aif thread create failed"); 922 softs->state &= ~AAC_STATE_RUN; 923 goto error; 924 } 925 926 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 927 928 /* Create a thread for command timeout */ 929 softs->timeout_id = timeout(aac_timer, (void *)softs, 930 (aac_tick * drv_usectohz(1000000))); 931 932 /* Common attach is OK, so we are attached! */ 933 ddi_report_dev(dip); 934 AACDB_PRINT(softs, CE_NOTE, "aac attached ok"); 935 return (DDI_SUCCESS); 936 937 error: 938 if (attach_state & AAC_ATTACH_CREATE_SCSI) 939 ddi_remove_minor_node(dip, "scsi"); 940 if (attach_state & AAC_ATTACH_CREATE_DEVCTL) 941 ddi_remove_minor_node(dip, "devctl"); 942 if (attach_state & AAC_ATTACH_COMM_SPACE_SETUP) 943 aac_common_detach(softs); 944 if (attach_state & AAC_ATTACH_SCSI_TRAN_SETUP) { 945 (void) scsi_hba_detach(dip); 946 scsi_hba_tran_free(AAC_DIP2TRAN(dip)); 947 } 948 if (attach_state & AAC_ATTACH_KMUTEX_INITED) { 949 mutex_destroy(&softs->io_lock); 950 mutex_destroy(&softs->q_comp_mutex); 951 mutex_destroy(&softs->time_mutex); 952 mutex_destroy(&softs->ev_lock); 953 mutex_destroy(&softs->aifq_mutex); 954 cv_destroy(&softs->event); 955 cv_destroy(&softs->sync_fib_cv); 956 cv_destroy(&softs->drain_cv); 957 cv_destroy(&softs->event_wait_cv); 958 cv_destroy(&softs->event_disp_cv); 959 cv_destroy(&softs->aifq_cv); 960 } 961 if (attach_state & AAC_ATTACH_PCI_MEM_MAPPED) 962 ddi_regs_map_free(&softs->pci_mem_handle); 963 aac_fm_fini(softs); 964 if (attach_state & AAC_ATTACH_CARD_DETECTED) 965 softs->card = AACERR; 966 if (attach_state & AAC_ATTACH_SOFTSTATE_ALLOCED) 967 ddi_soft_state_free(aac_softstatep, instance); 968 return (DDI_FAILURE); 969 } 970 971 static int 972 aac_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 973 { 974 scsi_hba_tran_t *tran = AAC_DIP2TRAN(dip); 975 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran); 976 977 DBCALLED(softs, 1); 978 979 switch (cmd) { 980 case DDI_DETACH: 981 break; 982 case DDI_SUSPEND: 983 return (DDI_FAILURE); 984 default: 985 return (DDI_FAILURE); 986 } 987 988 mutex_enter(&softs->io_lock); 989 AAC_DISABLE_INTR(softs); 990 softs->state = AAC_STATE_STOPPED; 991 992 ddi_remove_minor_node(dip, "aac"); 993 ddi_remove_minor_node(dip, "scsi"); 994 ddi_remove_minor_node(dip, "devctl"); 995 mutex_exit(&softs->io_lock); 996 997 aac_common_detach(softs); 998 999 mutex_enter(&softs->io_lock); 1000 (void) scsi_hba_detach(dip); 1001 scsi_hba_tran_free(tran); 1002 mutex_exit(&softs->io_lock); 1003 1004 /* Stop timer */ 1005 mutex_enter(&softs->time_mutex); 1006 if (softs->timeout_id) { 1007 timeout_id_t tid = softs->timeout_id; 1008 softs->timeout_id = 0; 1009 1010 mutex_exit(&softs->time_mutex); 1011 (void) untimeout(tid); 1012 mutex_enter(&softs->time_mutex); 1013 } 1014 mutex_exit(&softs->time_mutex); 1015 1016 /* Destroy event thread */ 1017 mutex_enter(&softs->ev_lock); 1018 cv_signal(&softs->event_disp_cv); 1019 cv_wait(&softs->event_wait_cv, &softs->ev_lock); 1020 mutex_exit(&softs->ev_lock); 1021 1022 cv_destroy(&softs->aifq_cv); 1023 cv_destroy(&softs->event_disp_cv); 1024 cv_destroy(&softs->event_wait_cv); 1025 cv_destroy(&softs->drain_cv); 1026 cv_destroy(&softs->sync_fib_cv); 1027 cv_destroy(&softs->event); 1028 mutex_destroy(&softs->aifq_mutex); 1029 mutex_destroy(&softs->ev_lock); 1030 mutex_destroy(&softs->time_mutex); 1031 mutex_destroy(&softs->q_comp_mutex); 1032 mutex_destroy(&softs->io_lock); 1033 1034 ddi_regs_map_free(&softs->pci_mem_handle); 1035 aac_fm_fini(softs); 1036 softs->hwif = AAC_HWIF_UNKNOWN; 1037 softs->card = AAC_UNKNOWN_CARD; 1038 ddi_soft_state_free(aac_softstatep, ddi_get_instance(dip)); 1039 1040 return (DDI_SUCCESS); 1041 } 1042 1043 /*ARGSUSED*/ 1044 static int 1045 aac_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) 1046 { 1047 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 1048 1049 DBCALLED(softs, 1); 1050 1051 mutex_enter(&softs->io_lock); 1052 AAC_DISABLE_INTR(softs); 1053 (void) aac_shutdown(softs); 1054 mutex_exit(&softs->io_lock); 1055 1056 return (DDI_SUCCESS); 1057 } 1058 1059 /* 1060 * quiesce(9E) entry point. 1061 * 1062 * This function is called when the system is single-threaded at high 1063 * PIL with preemption disabled. Therefore, this function must not be 1064 * blocked. 1065 * 1066 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 1067 * DDI_FAILURE indicates an error condition and should almost never happen. 1068 */ 1069 static int 1070 aac_quiesce(dev_info_t *dip) 1071 { 1072 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 1073 1074 if (softs == NULL) 1075 return (DDI_FAILURE); 1076 1077 _NOTE(ASSUMING_PROTECTED(softs->state)) 1078 AAC_DISABLE_INTR(softs); 1079 1080 return (DDI_SUCCESS); 1081 } 1082 1083 /* ARGSUSED */ 1084 static int 1085 aac_getinfo(dev_info_t *self, ddi_info_cmd_t infocmd, void *arg, 1086 void **result) 1087 { 1088 int error = DDI_SUCCESS; 1089 1090 switch (infocmd) { 1091 case DDI_INFO_DEVT2INSTANCE: 1092 *result = (void *)(intptr_t)(MINOR2INST(getminor((dev_t)arg))); 1093 break; 1094 default: 1095 error = DDI_FAILURE; 1096 } 1097 return (error); 1098 } 1099 1100 /* 1101 * Bring the controller down to a dormant state and detach all child devices. 1102 * This function is called before detach or system shutdown. 1103 * Note: we can assume that the q_wait on the controller is empty, as we 1104 * won't allow shutdown if any device is open. 1105 */ 1106 static int 1107 aac_shutdown(struct aac_softstate *softs) 1108 { 1109 ddi_acc_handle_t acc; 1110 struct aac_close_command *cc; 1111 int rval; 1112 1113 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac); 1114 acc = softs->sync_ac.slotp->fib_acc_handle; 1115 1116 cc = (struct aac_close_command *)&softs->sync_ac.slotp->fibp->data[0]; 1117 1118 ddi_put32(acc, &cc->Command, VM_CloseAll); 1119 ddi_put32(acc, &cc->ContainerId, 0xfffffffful); 1120 1121 /* Flush all caches, set FW to write through mode */ 1122 rval = aac_sync_fib(softs, ContainerCommand, 1123 AAC_FIB_SIZEOF(struct aac_close_command)); 1124 aac_sync_fib_slot_release(softs, &softs->sync_ac); 1125 1126 AACDB_PRINT(softs, CE_NOTE, 1127 "shutting down aac %s", (rval == AACOK) ? "ok" : "fail"); 1128 return (rval); 1129 } 1130 1131 static uint_t 1132 aac_softintr(caddr_t arg) 1133 { 1134 struct aac_softstate *softs = (void *)arg; 1135 1136 if (!AAC_IS_Q_EMPTY(&softs->q_comp)) { 1137 aac_drain_comp_q(softs); 1138 } 1139 return (DDI_INTR_CLAIMED); 1140 } 1141 1142 /* 1143 * Setup auto sense data for pkt 1144 */ 1145 static void 1146 aac_set_arq_data(struct scsi_pkt *pkt, uchar_t key, 1147 uchar_t add_code, uchar_t qual_code, uint64_t info) 1148 { 1149 struct scsi_arq_status *arqstat = (void *)(pkt->pkt_scbp); 1150 1151 *pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */ 1152 pkt->pkt_state |= STATE_ARQ_DONE; 1153 1154 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD; 1155 arqstat->sts_rqpkt_reason = CMD_CMPLT; 1156 arqstat->sts_rqpkt_resid = 0; 1157 arqstat->sts_rqpkt_state = 1158 STATE_GOT_BUS | 1159 STATE_GOT_TARGET | 1160 STATE_SENT_CMD | 1161 STATE_XFERRED_DATA; 1162 arqstat->sts_rqpkt_statistics = 0; 1163 1164 if (info <= 0xfffffffful) { 1165 arqstat->sts_sensedata.es_valid = 1; 1166 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE; 1167 arqstat->sts_sensedata.es_code = CODE_FMT_FIXED_CURRENT; 1168 arqstat->sts_sensedata.es_key = key; 1169 arqstat->sts_sensedata.es_add_code = add_code; 1170 arqstat->sts_sensedata.es_qual_code = qual_code; 1171 1172 arqstat->sts_sensedata.es_info_1 = (info >> 24) & 0xFF; 1173 arqstat->sts_sensedata.es_info_2 = (info >> 16) & 0xFF; 1174 arqstat->sts_sensedata.es_info_3 = (info >> 8) & 0xFF; 1175 arqstat->sts_sensedata.es_info_4 = info & 0xFF; 1176 } else { /* 64-bit LBA */ 1177 struct scsi_descr_sense_hdr *dsp; 1178 struct scsi_information_sense_descr *isd; 1179 1180 dsp = (struct scsi_descr_sense_hdr *)&arqstat->sts_sensedata; 1181 dsp->ds_class = CLASS_EXTENDED_SENSE; 1182 dsp->ds_code = CODE_FMT_DESCR_CURRENT; 1183 dsp->ds_key = key; 1184 dsp->ds_add_code = add_code; 1185 dsp->ds_qual_code = qual_code; 1186 dsp->ds_addl_sense_length = 1187 sizeof (struct scsi_information_sense_descr); 1188 1189 isd = (struct scsi_information_sense_descr *)(dsp+1); 1190 isd->isd_descr_type = DESCR_INFORMATION; 1191 isd->isd_valid = 1; 1192 isd->isd_information[0] = (info >> 56) & 0xFF; 1193 isd->isd_information[1] = (info >> 48) & 0xFF; 1194 isd->isd_information[2] = (info >> 40) & 0xFF; 1195 isd->isd_information[3] = (info >> 32) & 0xFF; 1196 isd->isd_information[4] = (info >> 24) & 0xFF; 1197 isd->isd_information[5] = (info >> 16) & 0xFF; 1198 isd->isd_information[6] = (info >> 8) & 0xFF; 1199 isd->isd_information[7] = (info) & 0xFF; 1200 } 1201 } 1202 1203 /* 1204 * Setup auto sense data for HARDWARE ERROR 1205 */ 1206 static void 1207 aac_set_arq_data_hwerr(struct aac_cmd *acp) 1208 { 1209 union scsi_cdb *cdbp; 1210 uint64_t err_blkno; 1211 1212 cdbp = (void *)acp->pkt->pkt_cdbp; 1213 err_blkno = AAC_GETGXADDR(acp->cmdlen, cdbp); 1214 aac_set_arq_data(acp->pkt, KEY_HARDWARE_ERROR, 0x00, 0x00, err_blkno); 1215 } 1216 1217 /* 1218 * Send a command to the adapter in New Comm. interface 1219 */ 1220 static int 1221 aac_send_command(struct aac_softstate *softs, struct aac_slot *slotp) 1222 { 1223 uint32_t index, device; 1224 1225 index = PCI_MEM_GET32(softs, AAC_IQUE); 1226 if (index == 0xffffffffUL) { 1227 index = PCI_MEM_GET32(softs, AAC_IQUE); 1228 if (index == 0xffffffffUL) 1229 return (AACERR); 1230 } 1231 1232 device = index; 1233 PCI_MEM_PUT32(softs, device, 1234 (uint32_t)(slotp->fib_phyaddr & 0xfffffffful)); 1235 device += 4; 1236 PCI_MEM_PUT32(softs, device, (uint32_t)(slotp->fib_phyaddr >> 32)); 1237 device += 4; 1238 PCI_MEM_PUT32(softs, device, slotp->acp->fib_size); 1239 PCI_MEM_PUT32(softs, AAC_IQUE, index); 1240 return (AACOK); 1241 } 1242 1243 static void 1244 aac_end_io(struct aac_softstate *softs, struct aac_cmd *acp) 1245 { 1246 struct aac_device *dvp = acp->dvp; 1247 int q = AAC_CMDQ(acp); 1248 1249 if (acp->slotp) { /* outstanding cmd */ 1250 if (!(acp->flags & AAC_CMD_IN_SYNC_SLOT)) { 1251 aac_release_slot(softs, acp->slotp); 1252 acp->slotp = NULL; 1253 } 1254 if (dvp) { 1255 dvp->ncmds[q]--; 1256 if (dvp->throttle[q] == AAC_THROTTLE_DRAIN && 1257 dvp->ncmds[q] == 0 && q == AAC_CMDQ_ASYNC) 1258 aac_set_throttle(softs, dvp, q, 1259 softs->total_slots); 1260 /* 1261 * Setup auto sense data for UNIT ATTENTION 1262 * Each lun should generate a unit attention 1263 * condition when reset. 1264 * Phys. drives are treated as logical ones 1265 * during error recovery. 1266 */ 1267 if (dvp->type == AAC_DEV_LD) { 1268 struct aac_container *ctp = 1269 (struct aac_container *)dvp; 1270 if (ctp->reset == 0) 1271 goto noreset; 1272 1273 AACDB_PRINT(softs, CE_NOTE, 1274 "Unit attention: reset"); 1275 ctp->reset = 0; 1276 aac_set_arq_data(acp->pkt, KEY_UNIT_ATTENTION, 1277 0x29, 0x02, 0); 1278 } 1279 } 1280 noreset: 1281 softs->bus_ncmds[q]--; 1282 aac_cmd_delete(&softs->q_busy, acp); 1283 } else { /* cmd in waiting queue */ 1284 aac_cmd_delete(&softs->q_wait[q], acp); 1285 } 1286 1287 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) { /* async IO */ 1288 mutex_enter(&softs->q_comp_mutex); 1289 aac_cmd_enqueue(&softs->q_comp, acp); 1290 mutex_exit(&softs->q_comp_mutex); 1291 } else if (acp->flags & AAC_CMD_NO_CB) { /* sync IO */ 1292 cv_broadcast(&softs->event); 1293 } 1294 } 1295 1296 static void 1297 aac_handle_io(struct aac_softstate *softs, int index) 1298 { 1299 struct aac_slot *slotp; 1300 struct aac_cmd *acp; 1301 uint32_t fast; 1302 1303 fast = index & AAC_SENDERADDR_MASK_FAST_RESPONSE; 1304 index >>= 2; 1305 1306 /* Make sure firmware reported index is valid */ 1307 ASSERT(index >= 0 && index < softs->total_slots); 1308 slotp = &softs->io_slot[index]; 1309 ASSERT(slotp->index == index); 1310 acp = slotp->acp; 1311 1312 if (acp == NULL || acp->slotp != slotp) { 1313 cmn_err(CE_WARN, 1314 "Firmware error: invalid slot index received from FW"); 1315 return; 1316 } 1317 1318 acp->flags |= AAC_CMD_CMPLT; 1319 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU); 1320 1321 if (aac_check_dma_handle(slotp->fib_dma_handle) == DDI_SUCCESS) { 1322 /* 1323 * For fast response IO, the firmware do not return any FIB 1324 * data, so we need to fill in the FIB status and state so that 1325 * FIB users can handle it correctly. 1326 */ 1327 if (fast) { 1328 uint32_t state; 1329 1330 state = ddi_get32(slotp->fib_acc_handle, 1331 &slotp->fibp->Header.XferState); 1332 /* 1333 * Update state for CPU not for device, no DMA sync 1334 * needed 1335 */ 1336 ddi_put32(slotp->fib_acc_handle, 1337 &slotp->fibp->Header.XferState, 1338 state | AAC_FIBSTATE_DONEADAP); 1339 ddi_put32(slotp->fib_acc_handle, 1340 (void *)&slotp->fibp->data[0], ST_OK); 1341 } 1342 1343 /* Handle completed ac */ 1344 acp->ac_comp(softs, acp); 1345 } else { 1346 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1347 acp->flags |= AAC_CMD_ERR; 1348 if (acp->pkt) { 1349 acp->pkt->pkt_reason = CMD_TRAN_ERR; 1350 acp->pkt->pkt_statistics = 0; 1351 } 1352 } 1353 aac_end_io(softs, acp); 1354 } 1355 1356 /* 1357 * Interrupt handler for New Comm. interface 1358 * New Comm. interface use a different mechanism for interrupt. No explict 1359 * message queues, and driver need only accesses the mapped PCI mem space to 1360 * find the completed FIB or AIF. 1361 */ 1362 static int 1363 aac_process_intr_new(struct aac_softstate *softs) 1364 { 1365 uint32_t index; 1366 1367 index = AAC_OUTB_GET(softs); 1368 if (index == 0xfffffffful) 1369 index = AAC_OUTB_GET(softs); 1370 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 1371 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1372 return (0); 1373 } 1374 if (index != 0xfffffffful) { 1375 do { 1376 if ((index & AAC_SENDERADDR_MASK_AIF) == 0) { 1377 aac_handle_io(softs, index); 1378 } else if (index != 0xfffffffeul) { 1379 struct aac_fib *fibp; /* FIB in AIF queue */ 1380 uint16_t fib_size; 1381 1382 /* 1383 * 0xfffffffe means that the controller wants 1384 * more work, ignore it for now. Otherwise, 1385 * AIF received. 1386 */ 1387 index &= ~2; 1388 1389 fibp = (struct aac_fib *)(softs-> \ 1390 pci_mem_base_vaddr + index); 1391 fib_size = PCI_MEM_GET16(softs, index + \ 1392 offsetof(struct aac_fib, Header.Size)); 1393 1394 aac_save_aif(softs, softs->pci_mem_handle, 1395 fibp, fib_size); 1396 1397 /* 1398 * AIF memory is owned by the adapter, so let it 1399 * know that we are done with it. 1400 */ 1401 AAC_OUTB_SET(softs, index); 1402 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY); 1403 } 1404 1405 index = AAC_OUTB_GET(softs); 1406 } while (index != 0xfffffffful); 1407 1408 /* 1409 * Process waiting cmds before start new ones to 1410 * ensure first IOs are serviced first. 1411 */ 1412 aac_start_waiting_io(softs); 1413 return (AAC_DB_COMMAND_READY); 1414 } else { 1415 return (0); 1416 } 1417 } 1418 1419 static uint_t 1420 aac_intr_new(caddr_t arg, caddr_t arg1 __unused) 1421 { 1422 struct aac_softstate *softs = (void *)arg; 1423 uint_t rval; 1424 1425 mutex_enter(&softs->io_lock); 1426 if (aac_process_intr_new(softs)) 1427 rval = DDI_INTR_CLAIMED; 1428 else 1429 rval = DDI_INTR_UNCLAIMED; 1430 mutex_exit(&softs->io_lock); 1431 1432 aac_drain_comp_q(softs); 1433 return (rval); 1434 } 1435 1436 /* 1437 * Interrupt handler for old interface 1438 * Explicit message queues are used to send FIB to and get completed FIB from 1439 * the adapter. Driver and adapter maitain the queues in the producer/consumer 1440 * manner. The driver has to query the queues to find the completed FIB. 1441 */ 1442 static int 1443 aac_process_intr_old(struct aac_softstate *softs) 1444 { 1445 uint16_t status; 1446 1447 status = AAC_STATUS_GET(softs); 1448 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 1449 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1450 return (DDI_INTR_UNCLAIMED); 1451 } 1452 if (status & AAC_DB_RESPONSE_READY) { 1453 int slot_idx; 1454 1455 /* ACK the intr */ 1456 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY); 1457 (void) AAC_STATUS_GET(softs); 1458 while (aac_fib_dequeue(softs, AAC_HOST_NORM_RESP_Q, 1459 &slot_idx) == AACOK) 1460 aac_handle_io(softs, slot_idx); 1461 1462 /* 1463 * Process waiting cmds before start new ones to 1464 * ensure first IOs are serviced first. 1465 */ 1466 aac_start_waiting_io(softs); 1467 return (AAC_DB_RESPONSE_READY); 1468 } else if (status & AAC_DB_COMMAND_READY) { 1469 int aif_idx; 1470 1471 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_READY); 1472 (void) AAC_STATUS_GET(softs); 1473 if (aac_fib_dequeue(softs, AAC_HOST_NORM_CMD_Q, &aif_idx) == 1474 AACOK) { 1475 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 1476 struct aac_fib *fibp; /* FIB in communication space */ 1477 uint16_t fib_size; 1478 uint32_t fib_xfer_state; 1479 uint32_t addr, size; 1480 1481 ASSERT((aif_idx >= 0) && (aif_idx < AAC_ADAPTER_FIBS)); 1482 1483 #define AAC_SYNC_AIF(softs, aif_idx, type) \ 1484 { (void) ddi_dma_sync((softs)->comm_space_dma_handle, \ 1485 offsetof(struct aac_comm_space, \ 1486 adapter_fibs[(aif_idx)]), AAC_FIB_SIZE, \ 1487 (type)); } 1488 1489 /* Copy AIF from adapter to the empty AIF slot */ 1490 AAC_SYNC_AIF(softs, aif_idx, DDI_DMA_SYNC_FORCPU); 1491 fibp = &softs->comm_space->adapter_fibs[aif_idx]; 1492 fib_size = ddi_get16(acc, &fibp->Header.Size); 1493 1494 aac_save_aif(softs, acc, fibp, fib_size); 1495 1496 /* Complete AIF back to adapter with good status */ 1497 fib_xfer_state = LE_32(fibp->Header.XferState); 1498 if (fib_xfer_state & AAC_FIBSTATE_FROMADAP) { 1499 ddi_put32(acc, &fibp->Header.XferState, 1500 fib_xfer_state | AAC_FIBSTATE_DONEHOST); 1501 ddi_put32(acc, (void *)&fibp->data[0], ST_OK); 1502 if (fib_size > AAC_FIB_SIZE) 1503 ddi_put16(acc, &fibp->Header.Size, 1504 AAC_FIB_SIZE); 1505 AAC_SYNC_AIF(softs, aif_idx, 1506 DDI_DMA_SYNC_FORDEV); 1507 } 1508 1509 /* Put the AIF response on the response queue */ 1510 addr = ddi_get32(acc, 1511 &softs->comm_space->adapter_fibs[aif_idx]. \ 1512 Header.SenderFibAddress); 1513 size = (uint32_t)ddi_get16(acc, 1514 &softs->comm_space->adapter_fibs[aif_idx]. \ 1515 Header.Size); 1516 ddi_put32(acc, 1517 &softs->comm_space->adapter_fibs[aif_idx]. \ 1518 Header.ReceiverFibAddress, addr); 1519 if (aac_fib_enqueue(softs, AAC_ADAP_NORM_RESP_Q, 1520 addr, size) == AACERR) 1521 cmn_err(CE_NOTE, "!AIF ack failed"); 1522 } 1523 return (AAC_DB_COMMAND_READY); 1524 } else if (status & AAC_DB_PRINTF_READY) { 1525 /* ACK the intr */ 1526 AAC_STATUS_CLR(softs, AAC_DB_PRINTF_READY); 1527 (void) AAC_STATUS_GET(softs); 1528 (void) ddi_dma_sync(softs->comm_space_dma_handle, 1529 offsetof(struct aac_comm_space, adapter_print_buf), 1530 AAC_ADAPTER_PRINT_BUFSIZE, DDI_DMA_SYNC_FORCPU); 1531 if (aac_check_dma_handle(softs->comm_space_dma_handle) == 1532 DDI_SUCCESS) 1533 cmn_err(CE_NOTE, "MSG From Adapter: %s", 1534 softs->comm_space->adapter_print_buf); 1535 else 1536 ddi_fm_service_impact(softs->devinfo_p, 1537 DDI_SERVICE_UNAFFECTED); 1538 AAC_NOTIFY(softs, AAC_DB_PRINTF_READY); 1539 return (AAC_DB_PRINTF_READY); 1540 } else if (status & AAC_DB_COMMAND_NOT_FULL) { 1541 /* 1542 * Without these two condition statements, the OS could hang 1543 * after a while, especially if there are a lot of AIF's to 1544 * handle, for instance if a drive is pulled from an array 1545 * under heavy load. 1546 */ 1547 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL); 1548 return (AAC_DB_COMMAND_NOT_FULL); 1549 } else if (status & AAC_DB_RESPONSE_NOT_FULL) { 1550 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL); 1551 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_NOT_FULL); 1552 return (AAC_DB_RESPONSE_NOT_FULL); 1553 } else { 1554 return (0); 1555 } 1556 } 1557 1558 static uint_t 1559 aac_intr_old(caddr_t arg, caddr_t arg1 __unused) 1560 { 1561 struct aac_softstate *softs = (void *)arg; 1562 int rval; 1563 1564 mutex_enter(&softs->io_lock); 1565 if (aac_process_intr_old(softs)) 1566 rval = DDI_INTR_CLAIMED; 1567 else 1568 rval = DDI_INTR_UNCLAIMED; 1569 mutex_exit(&softs->io_lock); 1570 1571 aac_drain_comp_q(softs); 1572 return (rval); 1573 } 1574 1575 /* 1576 * Query FIXED or MSI interrupts 1577 */ 1578 static int 1579 aac_query_intrs(struct aac_softstate *softs, int intr_type) 1580 { 1581 dev_info_t *dip = softs->devinfo_p; 1582 int avail, actual, count; 1583 int i, flag, ret; 1584 1585 AACDB_PRINT(softs, CE_NOTE, 1586 "aac_query_intrs:interrupt type 0x%x", intr_type); 1587 1588 /* Get number of interrupts */ 1589 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 1590 if ((ret != DDI_SUCCESS) || (count == 0)) { 1591 AACDB_PRINT(softs, CE_WARN, 1592 "ddi_intr_get_nintrs() failed, ret %d count %d", 1593 ret, count); 1594 return (DDI_FAILURE); 1595 } 1596 1597 /* Get number of available interrupts */ 1598 ret = ddi_intr_get_navail(dip, intr_type, &avail); 1599 if ((ret != DDI_SUCCESS) || (avail == 0)) { 1600 AACDB_PRINT(softs, CE_WARN, 1601 "ddi_intr_get_navail() failed, ret %d avail %d", 1602 ret, avail); 1603 return (DDI_FAILURE); 1604 } 1605 1606 AACDB_PRINT(softs, CE_NOTE, 1607 "ddi_intr_get_nvail returned %d, navail() returned %d", 1608 count, avail); 1609 1610 /* Allocate an array of interrupt handles */ 1611 softs->intr_size = count * sizeof (ddi_intr_handle_t); 1612 softs->htable = kmem_alloc(softs->intr_size, KM_SLEEP); 1613 1614 if (intr_type == DDI_INTR_TYPE_MSI) { 1615 count = 1; /* only one vector needed by now */ 1616 flag = DDI_INTR_ALLOC_STRICT; 1617 } else { /* must be DDI_INTR_TYPE_FIXED */ 1618 flag = DDI_INTR_ALLOC_NORMAL; 1619 } 1620 1621 /* Call ddi_intr_alloc() */ 1622 ret = ddi_intr_alloc(dip, softs->htable, intr_type, 0, 1623 count, &actual, flag); 1624 1625 if ((ret != DDI_SUCCESS) || (actual == 0)) { 1626 AACDB_PRINT(softs, CE_WARN, 1627 "ddi_intr_alloc() failed, ret = %d", ret); 1628 actual = 0; 1629 goto error; 1630 } 1631 1632 if (actual < count) { 1633 AACDB_PRINT(softs, CE_NOTE, 1634 "Requested: %d, Received: %d", count, actual); 1635 goto error; 1636 } 1637 1638 softs->intr_cnt = actual; 1639 1640 /* Get priority for first msi, assume remaining are all the same */ 1641 if ((ret = ddi_intr_get_pri(softs->htable[0], 1642 &softs->intr_pri)) != DDI_SUCCESS) { 1643 AACDB_PRINT(softs, CE_WARN, 1644 "ddi_intr_get_pri() failed, ret = %d", ret); 1645 goto error; 1646 } 1647 1648 /* Test for high level mutex */ 1649 if (softs->intr_pri >= ddi_intr_get_hilevel_pri()) { 1650 AACDB_PRINT(softs, CE_WARN, 1651 "aac_query_intrs: Hi level interrupt not supported"); 1652 goto error; 1653 } 1654 1655 return (DDI_SUCCESS); 1656 1657 error: 1658 /* Free already allocated intr */ 1659 for (i = 0; i < actual; i++) 1660 (void) ddi_intr_free(softs->htable[i]); 1661 1662 kmem_free(softs->htable, softs->intr_size); 1663 return (DDI_FAILURE); 1664 } 1665 1666 1667 /* 1668 * Register FIXED or MSI interrupts, and enable them 1669 */ 1670 static int 1671 aac_add_intrs(struct aac_softstate *softs) 1672 { 1673 int i, ret; 1674 int actual; 1675 ddi_intr_handler_t *aac_intr; 1676 1677 actual = softs->intr_cnt; 1678 aac_intr = ((softs->flags & AAC_FLAGS_NEW_COMM) ? 1679 aac_intr_new : aac_intr_old); 1680 1681 /* Call ddi_intr_add_handler() */ 1682 for (i = 0; i < actual; i++) { 1683 if ((ret = ddi_intr_add_handler(softs->htable[i], 1684 aac_intr, (caddr_t)softs, NULL)) != DDI_SUCCESS) { 1685 cmn_err(CE_WARN, 1686 "ddi_intr_add_handler() failed ret = %d", ret); 1687 1688 /* Free already allocated intr */ 1689 for (i = 0; i < actual; i++) 1690 (void) ddi_intr_free(softs->htable[i]); 1691 1692 kmem_free(softs->htable, softs->intr_size); 1693 return (DDI_FAILURE); 1694 } 1695 } 1696 1697 if ((ret = ddi_intr_get_cap(softs->htable[0], &softs->intr_cap)) 1698 != DDI_SUCCESS) { 1699 cmn_err(CE_WARN, "ddi_intr_get_cap() failed, ret = %d", ret); 1700 1701 /* Free already allocated intr */ 1702 for (i = 0; i < actual; i++) 1703 (void) ddi_intr_free(softs->htable[i]); 1704 1705 kmem_free(softs->htable, softs->intr_size); 1706 return (DDI_FAILURE); 1707 } 1708 1709 return (DDI_SUCCESS); 1710 } 1711 1712 /* 1713 * Unregister FIXED or MSI interrupts 1714 */ 1715 static void 1716 aac_remove_intrs(struct aac_softstate *softs) 1717 { 1718 int i; 1719 1720 /* Disable all interrupts */ 1721 (void) aac_disable_intrs(softs); 1722 /* Call ddi_intr_remove_handler() */ 1723 for (i = 0; i < softs->intr_cnt; i++) { 1724 (void) ddi_intr_remove_handler(softs->htable[i]); 1725 (void) ddi_intr_free(softs->htable[i]); 1726 } 1727 1728 kmem_free(softs->htable, softs->intr_size); 1729 } 1730 1731 static int 1732 aac_enable_intrs(struct aac_softstate *softs) 1733 { 1734 int rval = AACOK; 1735 1736 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) { 1737 /* for MSI block enable */ 1738 if (ddi_intr_block_enable(softs->htable, softs->intr_cnt) != 1739 DDI_SUCCESS) 1740 rval = AACERR; 1741 } else { 1742 int i; 1743 1744 /* Call ddi_intr_enable() for legacy/MSI non block enable */ 1745 for (i = 0; i < softs->intr_cnt; i++) { 1746 if (ddi_intr_enable(softs->htable[i]) != DDI_SUCCESS) 1747 rval = AACERR; 1748 } 1749 } 1750 return (rval); 1751 } 1752 1753 static int 1754 aac_disable_intrs(struct aac_softstate *softs) 1755 { 1756 int rval = AACOK; 1757 1758 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) { 1759 /* Call ddi_intr_block_disable() */ 1760 if (ddi_intr_block_disable(softs->htable, softs->intr_cnt) != 1761 DDI_SUCCESS) 1762 rval = AACERR; 1763 } else { 1764 int i; 1765 1766 for (i = 0; i < softs->intr_cnt; i++) { 1767 if (ddi_intr_disable(softs->htable[i]) != DDI_SUCCESS) 1768 rval = AACERR; 1769 } 1770 } 1771 return (rval); 1772 } 1773 1774 /* 1775 * Set pkt_reason and OR in pkt_statistics flag 1776 */ 1777 static void 1778 aac_set_pkt_reason(struct aac_softstate *softs, struct aac_cmd *acp, 1779 uchar_t reason, uint_t stat) 1780 { 1781 #ifndef __lock_lint 1782 _NOTE(ARGUNUSED(softs)) 1783 #endif 1784 if (acp->pkt->pkt_reason == CMD_CMPLT) 1785 acp->pkt->pkt_reason = reason; 1786 acp->pkt->pkt_statistics |= stat; 1787 } 1788 1789 /* 1790 * Handle a finished pkt of soft SCMD 1791 */ 1792 static void 1793 aac_soft_callback(struct aac_softstate *softs, struct aac_cmd *acp) 1794 { 1795 ASSERT(acp->pkt); 1796 1797 acp->flags |= AAC_CMD_CMPLT; 1798 1799 acp->pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | \ 1800 STATE_SENT_CMD | STATE_GOT_STATUS; 1801 if (acp->pkt->pkt_state & STATE_XFERRED_DATA) 1802 acp->pkt->pkt_resid = 0; 1803 1804 /* AAC_CMD_NO_INTR means no complete callback */ 1805 if (!(acp->flags & AAC_CMD_NO_INTR)) { 1806 mutex_enter(&softs->q_comp_mutex); 1807 aac_cmd_enqueue(&softs->q_comp, acp); 1808 mutex_exit(&softs->q_comp_mutex); 1809 ddi_trigger_softintr(softs->softint_id); 1810 } 1811 } 1812 1813 /* 1814 * Handlers for completed IOs, common to aac_intr_new() and aac_intr_old() 1815 */ 1816 1817 /* 1818 * Handle completed logical device IO command 1819 */ 1820 /*ARGSUSED*/ 1821 static void 1822 aac_ld_complete(struct aac_softstate *softs, struct aac_cmd *acp) 1823 { 1824 struct aac_slot *slotp = acp->slotp; 1825 struct aac_blockread_response *resp; 1826 uint32_t status; 1827 1828 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 1829 ASSERT(!(acp->flags & AAC_CMD_NO_CB)); 1830 1831 acp->pkt->pkt_state |= STATE_GOT_STATUS; 1832 1833 /* 1834 * block_read/write has a similar response header, use blockread 1835 * response for both. 1836 */ 1837 resp = (struct aac_blockread_response *)&slotp->fibp->data[0]; 1838 status = ddi_get32(slotp->fib_acc_handle, &resp->Status); 1839 if (status == ST_OK) { 1840 acp->pkt->pkt_resid = 0; 1841 acp->pkt->pkt_state |= STATE_XFERRED_DATA; 1842 } else { 1843 aac_set_arq_data_hwerr(acp); 1844 } 1845 } 1846 1847 /* 1848 * Handle completed phys. device IO command 1849 */ 1850 static void 1851 aac_pd_complete(struct aac_softstate *softs, struct aac_cmd *acp) 1852 { 1853 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 1854 struct aac_fib *fibp = acp->slotp->fibp; 1855 struct scsi_pkt *pkt = acp->pkt; 1856 struct aac_srb_reply *resp; 1857 uint32_t resp_status; 1858 1859 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 1860 ASSERT(!(acp->flags & AAC_CMD_NO_CB)); 1861 1862 resp = (struct aac_srb_reply *)&fibp->data[0]; 1863 resp_status = ddi_get32(acc, &resp->status); 1864 1865 /* First check FIB status */ 1866 if (resp_status == ST_OK) { 1867 uint32_t scsi_status; 1868 uint32_t srb_status; 1869 uint32_t data_xfer_length; 1870 1871 scsi_status = ddi_get32(acc, &resp->scsi_status); 1872 srb_status = ddi_get32(acc, &resp->srb_status); 1873 data_xfer_length = ddi_get32(acc, &resp->data_xfer_length); 1874 1875 *pkt->pkt_scbp = (uint8_t)scsi_status; 1876 pkt->pkt_state |= STATE_GOT_STATUS; 1877 if (scsi_status == STATUS_GOOD) { 1878 uchar_t cmd = ((union scsi_cdb *)(void *) 1879 (pkt->pkt_cdbp))->scc_cmd; 1880 1881 /* Next check SRB status */ 1882 switch (srb_status & 0x3f) { 1883 case SRB_STATUS_DATA_OVERRUN: 1884 AACDB_PRINT(softs, CE_NOTE, "DATA_OVERRUN: " \ 1885 "scmd=%d, xfer=%d, buflen=%d", 1886 (uint32_t)cmd, data_xfer_length, 1887 acp->bcount); 1888 1889 switch (cmd) { 1890 case SCMD_READ: 1891 case SCMD_WRITE: 1892 case SCMD_READ_G1: 1893 case SCMD_WRITE_G1: 1894 case SCMD_READ_G4: 1895 case SCMD_WRITE_G4: 1896 case SCMD_READ_G5: 1897 case SCMD_WRITE_G5: 1898 aac_set_pkt_reason(softs, acp, 1899 CMD_DATA_OVR, 0); 1900 break; 1901 } 1902 /*FALLTHRU*/ 1903 case SRB_STATUS_ERROR_RECOVERY: 1904 case SRB_STATUS_PENDING: 1905 case SRB_STATUS_SUCCESS: 1906 /* 1907 * pkt_resid should only be calculated if the 1908 * status is ERROR_RECOVERY/PENDING/SUCCESS/ 1909 * OVERRUN/UNDERRUN 1910 */ 1911 if (data_xfer_length) { 1912 pkt->pkt_state |= STATE_XFERRED_DATA; 1913 pkt->pkt_resid = acp->bcount - \ 1914 data_xfer_length; 1915 ASSERT(pkt->pkt_resid >= 0); 1916 } 1917 break; 1918 case SRB_STATUS_ABORTED: 1919 AACDB_PRINT(softs, CE_NOTE, 1920 "SRB_STATUS_ABORTED, xfer=%d, resid=%d", 1921 data_xfer_length, pkt->pkt_resid); 1922 aac_set_pkt_reason(softs, acp, CMD_ABORTED, 1923 STAT_ABORTED); 1924 break; 1925 case SRB_STATUS_ABORT_FAILED: 1926 AACDB_PRINT(softs, CE_NOTE, 1927 "SRB_STATUS_ABORT_FAILED, xfer=%d, " \ 1928 "resid=%d", data_xfer_length, 1929 pkt->pkt_resid); 1930 aac_set_pkt_reason(softs, acp, CMD_ABORT_FAIL, 1931 0); 1932 break; 1933 case SRB_STATUS_PARITY_ERROR: 1934 AACDB_PRINT(softs, CE_NOTE, 1935 "SRB_STATUS_PARITY_ERROR, xfer=%d, " \ 1936 "resid=%d", data_xfer_length, 1937 pkt->pkt_resid); 1938 aac_set_pkt_reason(softs, acp, CMD_PER_FAIL, 0); 1939 break; 1940 case SRB_STATUS_NO_DEVICE: 1941 case SRB_STATUS_INVALID_PATH_ID: 1942 case SRB_STATUS_INVALID_TARGET_ID: 1943 case SRB_STATUS_INVALID_LUN: 1944 case SRB_STATUS_SELECTION_TIMEOUT: 1945 #ifdef DEBUG 1946 if (AAC_DEV_IS_VALID(acp->dvp)) { 1947 AACDB_PRINT(softs, CE_NOTE, 1948 "SRB_STATUS_NO_DEVICE(%d), " \ 1949 "xfer=%d, resid=%d ", 1950 srb_status & 0x3f, 1951 data_xfer_length, pkt->pkt_resid); 1952 } 1953 #endif 1954 aac_set_pkt_reason(softs, acp, CMD_DEV_GONE, 0); 1955 break; 1956 case SRB_STATUS_COMMAND_TIMEOUT: 1957 case SRB_STATUS_TIMEOUT: 1958 AACDB_PRINT(softs, CE_NOTE, 1959 "SRB_STATUS_COMMAND_TIMEOUT, xfer=%d, " \ 1960 "resid=%d", data_xfer_length, 1961 pkt->pkt_resid); 1962 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT, 1963 STAT_TIMEOUT); 1964 break; 1965 case SRB_STATUS_BUS_RESET: 1966 AACDB_PRINT(softs, CE_NOTE, 1967 "SRB_STATUS_BUS_RESET, xfer=%d, " \ 1968 "resid=%d", data_xfer_length, 1969 pkt->pkt_resid); 1970 aac_set_pkt_reason(softs, acp, CMD_RESET, 1971 STAT_BUS_RESET); 1972 break; 1973 default: 1974 AACDB_PRINT(softs, CE_NOTE, "srb_status=%d, " \ 1975 "xfer=%d, resid=%d", srb_status & 0x3f, 1976 data_xfer_length, pkt->pkt_resid); 1977 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0); 1978 break; 1979 } 1980 } else if (scsi_status == STATUS_CHECK) { 1981 /* CHECK CONDITION */ 1982 struct scsi_arq_status *arqstat = 1983 (void *)(pkt->pkt_scbp); 1984 uint32_t sense_data_size; 1985 1986 pkt->pkt_state |= STATE_ARQ_DONE; 1987 1988 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD; 1989 arqstat->sts_rqpkt_reason = CMD_CMPLT; 1990 arqstat->sts_rqpkt_resid = 0; 1991 arqstat->sts_rqpkt_state = 1992 STATE_GOT_BUS | 1993 STATE_GOT_TARGET | 1994 STATE_SENT_CMD | 1995 STATE_XFERRED_DATA; 1996 arqstat->sts_rqpkt_statistics = 0; 1997 1998 sense_data_size = ddi_get32(acc, 1999 &resp->sense_data_size); 2000 ASSERT(sense_data_size <= AAC_SENSE_BUFFERSIZE); 2001 AACDB_PRINT(softs, CE_NOTE, 2002 "CHECK CONDITION: sense len=%d, xfer len=%d", 2003 sense_data_size, data_xfer_length); 2004 2005 if (sense_data_size > SENSE_LENGTH) 2006 sense_data_size = SENSE_LENGTH; 2007 ddi_rep_get8(acc, (uint8_t *)&arqstat->sts_sensedata, 2008 (uint8_t *)resp->sense_data, sense_data_size, 2009 DDI_DEV_AUTOINCR); 2010 } else { 2011 AACDB_PRINT(softs, CE_WARN, "invaild scsi status: " \ 2012 "scsi_status=%d, srb_status=%d", 2013 scsi_status, srb_status); 2014 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0); 2015 } 2016 } else { 2017 AACDB_PRINT(softs, CE_NOTE, "SRB failed: fib status %d", 2018 resp_status); 2019 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0); 2020 } 2021 } 2022 2023 /* 2024 * Handle completed IOCTL command 2025 */ 2026 /*ARGSUSED*/ 2027 void 2028 aac_ioctl_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2029 { 2030 struct aac_slot *slotp = acp->slotp; 2031 2032 /* 2033 * NOTE: Both aac_ioctl_send_fib() and aac_send_raw_srb() 2034 * may wait on softs->event, so use cv_broadcast() instead 2035 * of cv_signal(). 2036 */ 2037 ASSERT(acp->flags & AAC_CMD_SYNC); 2038 ASSERT(acp->flags & AAC_CMD_NO_CB); 2039 2040 /* Get the size of the response FIB from its FIB.Header.Size field */ 2041 acp->fib_size = ddi_get16(slotp->fib_acc_handle, 2042 &slotp->fibp->Header.Size); 2043 2044 ASSERT(acp->fib_size <= softs->aac_max_fib_size); 2045 ddi_rep_get8(slotp->fib_acc_handle, (uint8_t *)acp->fibp, 2046 (uint8_t *)slotp->fibp, acp->fib_size, DDI_DEV_AUTOINCR); 2047 } 2048 2049 /* 2050 * Handle completed sync fib command 2051 */ 2052 /*ARGSUSED*/ 2053 void 2054 aac_sync_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2055 { 2056 } 2057 2058 /* 2059 * Handle completed Flush command 2060 */ 2061 /*ARGSUSED*/ 2062 static void 2063 aac_synccache_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2064 { 2065 struct aac_slot *slotp = acp->slotp; 2066 ddi_acc_handle_t acc = slotp->fib_acc_handle; 2067 struct aac_synchronize_reply *resp; 2068 uint32_t status; 2069 2070 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 2071 2072 acp->pkt->pkt_state |= STATE_GOT_STATUS; 2073 2074 resp = (struct aac_synchronize_reply *)&slotp->fibp->data[0]; 2075 status = ddi_get32(acc, &resp->Status); 2076 if (status != CT_OK) 2077 aac_set_arq_data_hwerr(acp); 2078 } 2079 2080 /*ARGSUSED*/ 2081 static void 2082 aac_startstop_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2083 { 2084 struct aac_slot *slotp = acp->slotp; 2085 ddi_acc_handle_t acc = slotp->fib_acc_handle; 2086 struct aac_Container_resp *resp; 2087 uint32_t status; 2088 2089 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 2090 2091 acp->pkt->pkt_state |= STATE_GOT_STATUS; 2092 2093 resp = (struct aac_Container_resp *)&slotp->fibp->data[0]; 2094 status = ddi_get32(acc, &resp->Status); 2095 if (status != 0) { 2096 AACDB_PRINT(softs, CE_WARN, "Cannot start/stop a unit"); 2097 aac_set_arq_data_hwerr(acp); 2098 } 2099 } 2100 2101 /* 2102 * Access PCI space to see if the driver can support the card 2103 */ 2104 static int 2105 aac_check_card_type(struct aac_softstate *softs) 2106 { 2107 ddi_acc_handle_t pci_config_handle; 2108 int card_index; 2109 uint32_t pci_cmd; 2110 2111 /* Map pci configuration space */ 2112 if ((pci_config_setup(softs->devinfo_p, &pci_config_handle)) != 2113 DDI_SUCCESS) { 2114 AACDB_PRINT(softs, CE_WARN, "Cannot setup pci config space"); 2115 return (AACERR); 2116 } 2117 2118 softs->vendid = pci_config_get16(pci_config_handle, PCI_CONF_VENID); 2119 softs->devid = pci_config_get16(pci_config_handle, PCI_CONF_DEVID); 2120 softs->subvendid = pci_config_get16(pci_config_handle, 2121 PCI_CONF_SUBVENID); 2122 softs->subsysid = pci_config_get16(pci_config_handle, 2123 PCI_CONF_SUBSYSID); 2124 2125 card_index = 0; 2126 while (!CARD_IS_UNKNOWN(card_index)) { 2127 if ((aac_cards[card_index].vendor == softs->vendid) && 2128 (aac_cards[card_index].device == softs->devid) && 2129 (aac_cards[card_index].subvendor == softs->subvendid) && 2130 (aac_cards[card_index].subsys == softs->subsysid)) { 2131 break; 2132 } 2133 card_index++; 2134 } 2135 2136 softs->card = card_index; 2137 softs->hwif = aac_cards[card_index].hwif; 2138 2139 /* 2140 * Unknown aac card 2141 * do a generic match based on the VendorID and DeviceID to 2142 * support the new cards in the aac family 2143 */ 2144 if (CARD_IS_UNKNOWN(card_index)) { 2145 if (softs->vendid != 0x9005) { 2146 AACDB_PRINT(softs, CE_WARN, 2147 "Unknown vendor 0x%x", softs->vendid); 2148 goto error; 2149 } 2150 switch (softs->devid) { 2151 case 0x285: 2152 softs->hwif = AAC_HWIF_I960RX; 2153 break; 2154 case 0x286: 2155 softs->hwif = AAC_HWIF_RKT; 2156 break; 2157 default: 2158 AACDB_PRINT(softs, CE_WARN, 2159 "Unknown device \"pci9005,%x\"", softs->devid); 2160 goto error; 2161 } 2162 } 2163 2164 /* Set hardware dependent interface */ 2165 switch (softs->hwif) { 2166 case AAC_HWIF_I960RX: 2167 softs->aac_if = aac_rx_interface; 2168 softs->map_size_min = AAC_MAP_SIZE_MIN_RX; 2169 break; 2170 case AAC_HWIF_RKT: 2171 softs->aac_if = aac_rkt_interface; 2172 softs->map_size_min = AAC_MAP_SIZE_MIN_RKT; 2173 break; 2174 default: 2175 AACDB_PRINT(softs, CE_WARN, 2176 "Unknown hardware interface %d", softs->hwif); 2177 goto error; 2178 } 2179 2180 /* Set card names */ 2181 (void *)strncpy(softs->vendor_name, aac_cards[card_index].vid, 2182 AAC_VENDOR_LEN); 2183 (void *)strncpy(softs->product_name, aac_cards[card_index].desc, 2184 AAC_PRODUCT_LEN); 2185 2186 /* Set up quirks */ 2187 softs->flags = aac_cards[card_index].quirks; 2188 2189 /* Force the busmaster enable bit on */ 2190 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM); 2191 if ((pci_cmd & PCI_COMM_ME) == 0) { 2192 pci_cmd |= PCI_COMM_ME; 2193 pci_config_put16(pci_config_handle, PCI_CONF_COMM, pci_cmd); 2194 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM); 2195 if ((pci_cmd & PCI_COMM_ME) == 0) { 2196 cmn_err(CE_CONT, "?Cannot enable busmaster bit"); 2197 goto error; 2198 } 2199 } 2200 2201 /* Set memory base to map */ 2202 softs->pci_mem_base_paddr = 0xfffffff0UL & \ 2203 pci_config_get32(pci_config_handle, PCI_CONF_BASE0); 2204 2205 pci_config_teardown(&pci_config_handle); 2206 2207 return (AACOK); /* card type detected */ 2208 error: 2209 pci_config_teardown(&pci_config_handle); 2210 return (AACERR); /* no matched card found */ 2211 } 2212 2213 /* 2214 * Do the usual interrupt handler setup stuff. 2215 */ 2216 static int 2217 aac_register_intrs(struct aac_softstate *softs) 2218 { 2219 dev_info_t *dip; 2220 int intr_types; 2221 2222 ASSERT(softs->devinfo_p); 2223 dip = softs->devinfo_p; 2224 2225 /* Get the type of device intrrupts */ 2226 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) { 2227 AACDB_PRINT(softs, CE_WARN, 2228 "ddi_intr_get_supported_types() failed"); 2229 return (AACERR); 2230 } 2231 AACDB_PRINT(softs, CE_NOTE, 2232 "ddi_intr_get_supported_types() ret: 0x%x", intr_types); 2233 2234 /* Query interrupt, and alloc/init all needed struct */ 2235 if (intr_types & DDI_INTR_TYPE_MSI) { 2236 if (aac_query_intrs(softs, DDI_INTR_TYPE_MSI) 2237 != DDI_SUCCESS) { 2238 AACDB_PRINT(softs, CE_WARN, 2239 "MSI interrupt query failed"); 2240 return (AACERR); 2241 } 2242 softs->intr_type = DDI_INTR_TYPE_MSI; 2243 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 2244 if (aac_query_intrs(softs, DDI_INTR_TYPE_FIXED) 2245 != DDI_SUCCESS) { 2246 AACDB_PRINT(softs, CE_WARN, 2247 "FIXED interrupt query failed"); 2248 return (AACERR); 2249 } 2250 softs->intr_type = DDI_INTR_TYPE_FIXED; 2251 } else { 2252 AACDB_PRINT(softs, CE_WARN, 2253 "Device cannot suppport both FIXED and MSI interrupts"); 2254 return (AACERR); 2255 } 2256 2257 /* Connect interrupt handlers */ 2258 if (aac_add_intrs(softs) != DDI_SUCCESS) { 2259 AACDB_PRINT(softs, CE_WARN, 2260 "Interrupt registration failed, intr type: %s", 2261 softs->intr_type == DDI_INTR_TYPE_MSI ? "MSI" : "FIXED"); 2262 return (AACERR); 2263 } 2264 (void) aac_enable_intrs(softs); 2265 2266 if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &softs->softint_id, 2267 NULL, NULL, aac_softintr, (caddr_t)softs) != DDI_SUCCESS) { 2268 AACDB_PRINT(softs, CE_WARN, 2269 "Can not setup soft interrupt handler!"); 2270 aac_remove_intrs(softs); 2271 return (AACERR); 2272 } 2273 2274 return (AACOK); 2275 } 2276 2277 static void 2278 aac_unregister_intrs(struct aac_softstate *softs) 2279 { 2280 aac_remove_intrs(softs); 2281 ddi_remove_softintr(softs->softint_id); 2282 } 2283 2284 /* 2285 * Check the firmware to determine the features to support and the FIB 2286 * parameters to use. 2287 */ 2288 static int 2289 aac_check_firmware(struct aac_softstate *softs) 2290 { 2291 uint32_t options; 2292 uint32_t atu_size; 2293 ddi_acc_handle_t pci_handle; 2294 uint8_t *data; 2295 uint32_t max_fibs; 2296 uint32_t max_fib_size; 2297 uint32_t sg_tablesize; 2298 uint32_t max_sectors; 2299 uint32_t status; 2300 2301 max_fibs = 0; 2302 max_sectors = 0; 2303 sg_tablesize = 0; 2304 2305 /* Get supported options */ 2306 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETINFO, 0, 0, 0, 0, 2307 &status)) != AACOK) { 2308 if (status != SRB_STATUS_INVALID_REQUEST) { 2309 cmn_err(CE_CONT, 2310 "?Fatal error: request adapter info error"); 2311 return (AACERR); 2312 } 2313 options = 0; 2314 atu_size = 0; 2315 } else { 2316 options = AAC_MAILBOX_GET(softs, 1); 2317 atu_size = AAC_MAILBOX_GET(softs, 2); 2318 } 2319 2320 if (softs->state & AAC_STATE_RESET) { 2321 if ((softs->support_opt == options) && 2322 (softs->atu_size == atu_size)) 2323 return (AACOK); 2324 2325 cmn_err(CE_WARN, 2326 "?Fatal error: firmware changed, system needs reboot"); 2327 return (AACERR); 2328 } 2329 2330 /* 2331 * The following critical settings are initialized only once during 2332 * driver attachment. 2333 */ 2334 softs->support_opt = options; 2335 softs->atu_size = atu_size; 2336 2337 /* Process supported options */ 2338 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && 2339 (softs->flags & AAC_FLAGS_NO4GB) == 0) { 2340 AACDB_PRINT(softs, CE_NOTE, "!Enable FIB map 4GB window"); 2341 softs->flags |= AAC_FLAGS_4GB_WINDOW; 2342 } else { 2343 /* 2344 * Quirk AAC_FLAGS_NO4GB is for FIB address and thus comm space 2345 * only. IO is handled by the DMA engine which does not suffer 2346 * from the ATU window programming workarounds necessary for 2347 * CPU copy operations. 2348 */ 2349 softs->addr_dma_attr.dma_attr_addr_lo = 0x2000ull; 2350 softs->addr_dma_attr.dma_attr_addr_hi = 0x7fffffffull; 2351 } 2352 2353 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0) { 2354 AACDB_PRINT(softs, CE_NOTE, "!Enable SG map 64-bit address"); 2355 softs->buf_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull; 2356 softs->buf_dma_attr.dma_attr_seg = 0xffffffffffffffffull; 2357 softs->flags |= AAC_FLAGS_SG_64BIT; 2358 } 2359 2360 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) { 2361 softs->flags |= AAC_FLAGS_ARRAY_64BIT; 2362 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array size"); 2363 } 2364 2365 if (options & AAC_SUPPORTED_NONDASD) { 2366 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p, 0, 2367 "nondasd-enable", (char **)&data) == DDI_SUCCESS)) { 2368 if (strcmp((char *)data, "yes") == 0) { 2369 AACDB_PRINT(softs, CE_NOTE, 2370 "!Enable Non-DASD access"); 2371 softs->flags |= AAC_FLAGS_NONDASD; 2372 } 2373 ddi_prop_free(data); 2374 } 2375 } 2376 2377 /* Read preferred settings */ 2378 max_fib_size = 0; 2379 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETCOMMPREF, 2380 0, 0, 0, 0, NULL)) == AACOK) { 2381 options = AAC_MAILBOX_GET(softs, 1); 2382 max_fib_size = (options & 0xffff); 2383 max_sectors = (options >> 16) << 1; 2384 options = AAC_MAILBOX_GET(softs, 2); 2385 sg_tablesize = (options >> 16); 2386 options = AAC_MAILBOX_GET(softs, 3); 2387 max_fibs = (options & 0xffff); 2388 } 2389 2390 /* Enable new comm. and rawio at the same time */ 2391 if ((softs->support_opt & AAC_SUPPORTED_NEW_COMM) && 2392 (max_fib_size != 0)) { 2393 /* read out and save PCI MBR */ 2394 if ((atu_size > softs->map_size) && 2395 (ddi_regs_map_setup(softs->devinfo_p, 1, 2396 (caddr_t *)&data, 0, atu_size, &softs->reg_attr, 2397 &pci_handle) == DDI_SUCCESS)) { 2398 ddi_regs_map_free(&softs->pci_mem_handle); 2399 softs->pci_mem_handle = pci_handle; 2400 softs->pci_mem_base_vaddr = data; 2401 softs->map_size = atu_size; 2402 } 2403 if (atu_size == softs->map_size) { 2404 softs->flags |= AAC_FLAGS_NEW_COMM; 2405 AACDB_PRINT(softs, CE_NOTE, 2406 "!Enable New Comm. interface"); 2407 } 2408 } 2409 2410 /* Set FIB parameters */ 2411 if (softs->flags & AAC_FLAGS_NEW_COMM) { 2412 softs->aac_max_fibs = max_fibs; 2413 softs->aac_max_fib_size = max_fib_size; 2414 softs->aac_max_sectors = max_sectors; 2415 softs->aac_sg_tablesize = sg_tablesize; 2416 2417 softs->flags |= AAC_FLAGS_RAW_IO; 2418 AACDB_PRINT(softs, CE_NOTE, "!Enable RawIO"); 2419 } else { 2420 softs->aac_max_fibs = 2421 (softs->flags & AAC_FLAGS_256FIBS) ? 256 : 512; 2422 softs->aac_max_fib_size = AAC_FIB_SIZE; 2423 softs->aac_max_sectors = 128; /* 64K */ 2424 if (softs->flags & AAC_FLAGS_17SG) 2425 softs->aac_sg_tablesize = 17; 2426 else if (softs->flags & AAC_FLAGS_34SG) 2427 softs->aac_sg_tablesize = 34; 2428 else if (softs->flags & AAC_FLAGS_SG_64BIT) 2429 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE - 2430 sizeof (struct aac_blockwrite64) + 2431 sizeof (struct aac_sg_entry64)) / 2432 sizeof (struct aac_sg_entry64); 2433 else 2434 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE - 2435 sizeof (struct aac_blockwrite) + 2436 sizeof (struct aac_sg_entry)) / 2437 sizeof (struct aac_sg_entry); 2438 } 2439 2440 if ((softs->flags & AAC_FLAGS_RAW_IO) && 2441 (softs->flags & AAC_FLAGS_ARRAY_64BIT)) { 2442 softs->flags |= AAC_FLAGS_LBA_64BIT; 2443 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array"); 2444 } 2445 softs->buf_dma_attr.dma_attr_sgllen = softs->aac_sg_tablesize; 2446 softs->buf_dma_attr.dma_attr_maxxfer = softs->aac_max_sectors << 9; 2447 /* 2448 * 64K maximum segment size in scatter gather list is controlled by 2449 * the NEW_COMM bit in the adapter information. If not set, the card 2450 * can only accept a maximum of 64K. It is not recommended to permit 2451 * more than 128KB of total transfer size to the adapters because 2452 * performance is negatively impacted. 2453 * 2454 * For new comm, segment size equals max xfer size. For old comm, 2455 * we use 64K for both. 2456 */ 2457 softs->buf_dma_attr.dma_attr_count_max = 2458 softs->buf_dma_attr.dma_attr_maxxfer - 1; 2459 2460 /* Setup FIB operations */ 2461 if (softs->flags & AAC_FLAGS_RAW_IO) 2462 softs->aac_cmd_fib = aac_cmd_fib_rawio; 2463 else if (softs->flags & AAC_FLAGS_SG_64BIT) 2464 softs->aac_cmd_fib = aac_cmd_fib_brw64; 2465 else 2466 softs->aac_cmd_fib = aac_cmd_fib_brw; 2467 softs->aac_cmd_fib_scsi = (softs->flags & AAC_FLAGS_SG_64BIT) ? \ 2468 aac_cmd_fib_scsi64 : aac_cmd_fib_scsi32; 2469 2470 /* 64-bit LBA needs descriptor format sense data */ 2471 softs->slen = sizeof (struct scsi_arq_status); 2472 if ((softs->flags & AAC_FLAGS_LBA_64BIT) && 2473 softs->slen < AAC_ARQ64_LENGTH) 2474 softs->slen = AAC_ARQ64_LENGTH; 2475 2476 AACDB_PRINT(softs, CE_NOTE, 2477 "!max_fibs %d max_fibsize 0x%x max_sectors %d max_sg %d", 2478 softs->aac_max_fibs, softs->aac_max_fib_size, 2479 softs->aac_max_sectors, softs->aac_sg_tablesize); 2480 2481 return (AACOK); 2482 } 2483 2484 static void 2485 aac_fsa_rev(struct aac_softstate *softs, struct FsaRev *fsarev0, 2486 struct FsaRev *fsarev1) 2487 { 2488 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle; 2489 2490 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.dash); 2491 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.type); 2492 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.minor); 2493 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.major); 2494 AAC_GET_FIELD32(acc, fsarev1, fsarev0, buildNumber); 2495 } 2496 2497 /* 2498 * The following function comes from Adaptec: 2499 * 2500 * Query adapter information and supplement adapter information 2501 */ 2502 static int 2503 aac_get_adapter_info(struct aac_softstate *softs, 2504 struct aac_adapter_info *ainfr, struct aac_supplement_adapter_info *sinfr) 2505 { 2506 struct aac_cmd *acp = &softs->sync_ac; 2507 ddi_acc_handle_t acc; 2508 struct aac_fib *fibp; 2509 struct aac_adapter_info *ainfp; 2510 struct aac_supplement_adapter_info *sinfp; 2511 int rval; 2512 2513 (void) aac_sync_fib_slot_bind(softs, acp); 2514 acc = acp->slotp->fib_acc_handle; 2515 fibp = acp->slotp->fibp; 2516 2517 ddi_put8(acc, &fibp->data[0], 0); 2518 if (aac_sync_fib(softs, RequestAdapterInfo, 2519 AAC_FIB_SIZEOF(struct aac_adapter_info)) != AACOK) { 2520 AACDB_PRINT(softs, CE_WARN, "RequestAdapterInfo failed"); 2521 rval = AACERR; 2522 goto finish; 2523 } 2524 ainfp = (struct aac_adapter_info *)fibp->data; 2525 if (ainfr) { 2526 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions); 2527 AAC_GET_FIELD32(acc, ainfr, ainfp, PlatformBase); 2528 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuArchitecture); 2529 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuVariant); 2530 AAC_GET_FIELD32(acc, ainfr, ainfp, ClockSpeed); 2531 AAC_GET_FIELD32(acc, ainfr, ainfp, ExecutionMem); 2532 AAC_GET_FIELD32(acc, ainfr, ainfp, BufferMem); 2533 AAC_GET_FIELD32(acc, ainfr, ainfp, TotalMem); 2534 aac_fsa_rev(softs, &ainfp->KernelRevision, 2535 &ainfr->KernelRevision); 2536 aac_fsa_rev(softs, &ainfp->MonitorRevision, 2537 &ainfr->MonitorRevision); 2538 aac_fsa_rev(softs, &ainfp->HardwareRevision, 2539 &ainfr->HardwareRevision); 2540 aac_fsa_rev(softs, &ainfp->BIOSRevision, 2541 &ainfr->BIOSRevision); 2542 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusteringEnabled); 2543 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusterChannelMask); 2544 AAC_GET_FIELD64(acc, ainfr, ainfp, SerialNumber); 2545 AAC_GET_FIELD32(acc, ainfr, ainfp, batteryPlatform); 2546 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions); 2547 AAC_GET_FIELD32(acc, ainfr, ainfp, OemVariant); 2548 } 2549 if (sinfr) { 2550 if (!(softs->support_opt & 2551 AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO)) { 2552 AACDB_PRINT(softs, CE_WARN, 2553 "SupplementAdapterInfo not supported"); 2554 rval = AACERR; 2555 goto finish; 2556 } 2557 ddi_put8(acc, &fibp->data[0], 0); 2558 if (aac_sync_fib(softs, RequestSupplementAdapterInfo, 2559 AAC_FIB_SIZEOF(struct aac_supplement_adapter_info)) 2560 != AACOK) { 2561 AACDB_PRINT(softs, CE_WARN, 2562 "RequestSupplementAdapterInfo failed"); 2563 rval = AACERR; 2564 goto finish; 2565 } 2566 sinfp = (struct aac_supplement_adapter_info *)fibp->data; 2567 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, AdapterTypeText[0], 17+1); 2568 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, Pad[0], 2); 2569 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashMemoryByteSize); 2570 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashImageId); 2571 AAC_GET_FIELD32(acc, sinfr, sinfp, MaxNumberPorts); 2572 AAC_GET_FIELD32(acc, sinfr, sinfp, Version); 2573 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits); 2574 AAC_GET_FIELD8(acc, sinfr, sinfp, SlotNumber); 2575 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, ReservedPad0[0], 3); 2576 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, BuildDate[0], 12); 2577 AAC_GET_FIELD32(acc, sinfr, sinfp, CurrentNumberPorts); 2578 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, VpdInfo, 2579 sizeof (struct vpd_info)); 2580 aac_fsa_rev(softs, &sinfp->FlashFirmwareRevision, 2581 &sinfr->FlashFirmwareRevision); 2582 AAC_GET_FIELD32(acc, sinfr, sinfp, RaidTypeMorphOptions); 2583 aac_fsa_rev(softs, &sinfp->FlashFirmwareBootRevision, 2584 &sinfr->FlashFirmwareBootRevision); 2585 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgPcbaSerialNo, 2586 MFG_PCBA_SERIAL_NUMBER_WIDTH); 2587 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgWWNName[0], 2588 MFG_WWN_WIDTH); 2589 AAC_GET_FIELD32(acc, sinfr, sinfp, SupportedOptions2); 2590 AAC_GET_FIELD32(acc, sinfr, sinfp, ExpansionFlag); 2591 if (sinfr->ExpansionFlag == 1) { 2592 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits3); 2593 AAC_GET_FIELD32(acc, sinfr, sinfp, 2594 SupportedPerformanceMode); 2595 AAC_REP_GET_FIELD32(acc, sinfr, sinfp, 2596 ReservedGrowth[0], 80); 2597 } 2598 } 2599 rval = AACOK; 2600 finish: 2601 aac_sync_fib_slot_release(softs, acp); 2602 return (rval); 2603 } 2604 2605 static int 2606 aac_get_bus_info(struct aac_softstate *softs, uint32_t *bus_max, 2607 uint32_t *tgt_max) 2608 { 2609 struct aac_cmd *acp = &softs->sync_ac; 2610 ddi_acc_handle_t acc; 2611 struct aac_fib *fibp; 2612 struct aac_ctcfg *c_cmd; 2613 struct aac_ctcfg_resp *c_resp; 2614 uint32_t scsi_method_id; 2615 struct aac_bus_info *cmd; 2616 struct aac_bus_info_response *resp; 2617 int rval; 2618 2619 (void) aac_sync_fib_slot_bind(softs, acp); 2620 acc = acp->slotp->fib_acc_handle; 2621 fibp = acp->slotp->fibp; 2622 2623 /* Detect MethodId */ 2624 c_cmd = (struct aac_ctcfg *)&fibp->data[0]; 2625 ddi_put32(acc, &c_cmd->Command, VM_ContainerConfig); 2626 ddi_put32(acc, &c_cmd->cmd, CT_GET_SCSI_METHOD); 2627 ddi_put32(acc, &c_cmd->param, 0); 2628 rval = aac_sync_fib(softs, ContainerCommand, 2629 AAC_FIB_SIZEOF(struct aac_ctcfg)); 2630 c_resp = (struct aac_ctcfg_resp *)&fibp->data[0]; 2631 if (rval != AACOK || ddi_get32(acc, &c_resp->Status) != 0) { 2632 AACDB_PRINT(softs, CE_WARN, 2633 "VM_ContainerConfig command fail"); 2634 rval = AACERR; 2635 goto finish; 2636 } 2637 scsi_method_id = ddi_get32(acc, &c_resp->param); 2638 2639 /* Detect phys. bus count and max. target id first */ 2640 cmd = (struct aac_bus_info *)&fibp->data[0]; 2641 ddi_put32(acc, &cmd->Command, VM_Ioctl); 2642 ddi_put32(acc, &cmd->ObjType, FT_DRIVE); /* physical drive */ 2643 ddi_put32(acc, &cmd->MethodId, scsi_method_id); 2644 ddi_put32(acc, &cmd->ObjectId, 0); 2645 ddi_put32(acc, &cmd->CtlCmd, GetBusInfo); 2646 /* 2647 * For VM_Ioctl, the firmware uses the Header.Size filled from the 2648 * driver as the size to be returned. Therefore the driver has to use 2649 * sizeof (struct aac_bus_info_response) because it is greater than 2650 * sizeof (struct aac_bus_info). 2651 */ 2652 rval = aac_sync_fib(softs, ContainerCommand, 2653 AAC_FIB_SIZEOF(struct aac_bus_info_response)); 2654 resp = (struct aac_bus_info_response *)cmd; 2655 2656 /* Scan all coordinates with INQUIRY */ 2657 if ((rval != AACOK) || (ddi_get32(acc, &resp->Status) != 0)) { 2658 AACDB_PRINT(softs, CE_WARN, "GetBusInfo command fail"); 2659 rval = AACERR; 2660 goto finish; 2661 } 2662 *bus_max = ddi_get32(acc, &resp->BusCount); 2663 *tgt_max = ddi_get32(acc, &resp->TargetsPerBus); 2664 2665 finish: 2666 aac_sync_fib_slot_release(softs, acp); 2667 return (AACOK); 2668 } 2669 2670 /* 2671 * The following function comes from Adaptec: 2672 * 2673 * Routine to be called during initialization of communications with 2674 * the adapter to handle possible adapter configuration issues. When 2675 * the adapter first boots up, it examines attached drives, etc, and 2676 * potentially comes up with a new or revised configuration (relative to 2677 * what's stored in it's NVRAM). Additionally it may discover problems 2678 * that make the current physical configuration unworkable (currently 2679 * applicable only to cluster configuration issues). 2680 * 2681 * If there are no configuration issues or the issues are considered 2682 * trival by the adapter, it will set it's configuration status to 2683 * "FSACT_CONTINUE" and execute the "commit confiuguration" action 2684 * automatically on it's own. 2685 * 2686 * However, if there are non-trivial issues, the adapter will set it's 2687 * internal configuration status to "FSACT_PAUSE" or "FASCT_ABORT" 2688 * and wait for some agent on the host to issue the "\ContainerCommand 2689 * \VM_ContainerConfig\CT_COMMIT_CONFIG" FIB command to cause the 2690 * adapter to commit the new/updated configuration and enable 2691 * un-inhibited operation. The host agent should first issue the 2692 * "\ContainerCommand\VM_ContainerConfig\CT_GET_CONFIG_STATUS" FIB 2693 * command to obtain information about config issues detected by 2694 * the adapter. 2695 * 2696 * Normally the adapter's PC BIOS will execute on the host following 2697 * adapter poweron and reset and will be responsible for querring the 2698 * adapter with CT_GET_CONFIG_STATUS and issuing the CT_COMMIT_CONFIG 2699 * command if appropriate. 2700 * 2701 * However, with the introduction of IOP reset support, the adapter may 2702 * boot up without the benefit of the adapter's PC BIOS host agent. 2703 * This routine is intended to take care of these issues in situations 2704 * where BIOS doesn't execute following adapter poweron or reset. The 2705 * CT_COMMIT_CONFIG command is a no-op if it's already been issued, so 2706 * there is no harm in doing this when it's already been done. 2707 */ 2708 static int 2709 aac_handle_adapter_config_issues(struct aac_softstate *softs) 2710 { 2711 struct aac_cmd *acp = &softs->sync_ac; 2712 ddi_acc_handle_t acc; 2713 struct aac_fib *fibp; 2714 struct aac_Container *cmd; 2715 struct aac_Container_resp *resp; 2716 struct aac_cf_status_header *cfg_sts_hdr; 2717 uint32_t resp_status; 2718 uint32_t ct_status; 2719 uint32_t cfg_stat_action; 2720 int rval; 2721 2722 (void) aac_sync_fib_slot_bind(softs, acp); 2723 acc = acp->slotp->fib_acc_handle; 2724 fibp = acp->slotp->fibp; 2725 2726 /* Get adapter config status */ 2727 cmd = (struct aac_Container *)&fibp->data[0]; 2728 2729 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 2730 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 2731 ddi_put32(acc, &cmd->CTCommand.command, CT_GET_CONFIG_STATUS); 2732 ddi_put32(acc, &cmd->CTCommand.param[CNT_SIZE], 2733 sizeof (struct aac_cf_status_header)); 2734 rval = aac_sync_fib(softs, ContainerCommand, 2735 AAC_FIB_SIZEOF(struct aac_Container)); 2736 resp = (struct aac_Container_resp *)cmd; 2737 cfg_sts_hdr = (struct aac_cf_status_header *)resp->CTResponse.data; 2738 2739 resp_status = ddi_get32(acc, &resp->Status); 2740 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]); 2741 if ((rval == AACOK) && (resp_status == 0) && (ct_status == CT_OK)) { 2742 cfg_stat_action = ddi_get32(acc, &cfg_sts_hdr->action); 2743 2744 /* Commit configuration if it's reasonable to do so. */ 2745 if (cfg_stat_action <= CFACT_PAUSE) { 2746 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 2747 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 2748 ddi_put32(acc, &cmd->CTCommand.command, 2749 CT_COMMIT_CONFIG); 2750 rval = aac_sync_fib(softs, ContainerCommand, 2751 AAC_FIB_SIZEOF(struct aac_Container)); 2752 2753 resp_status = ddi_get32(acc, &resp->Status); 2754 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]); 2755 if ((rval == AACOK) && (resp_status == 0) && 2756 (ct_status == CT_OK)) 2757 /* Successful completion */ 2758 rval = AACMPE_OK; 2759 else 2760 /* Auto-commit aborted due to error(s). */ 2761 rval = AACMPE_COMMIT_CONFIG; 2762 } else { 2763 /* 2764 * Auto-commit aborted due to adapter indicating 2765 * configuration issue(s) too dangerous to auto-commit. 2766 */ 2767 rval = AACMPE_CONFIG_STATUS; 2768 } 2769 } else { 2770 cmn_err(CE_WARN, "!Configuration issue, auto-commit aborted"); 2771 rval = AACMPE_CONFIG_STATUS; 2772 } 2773 2774 aac_sync_fib_slot_release(softs, acp); 2775 return (rval); 2776 } 2777 2778 /* 2779 * Hardware initialization and resource allocation 2780 */ 2781 static int 2782 aac_common_attach(struct aac_softstate *softs) 2783 { 2784 uint32_t status; 2785 int i; 2786 struct aac_supplement_adapter_info sinf; 2787 2788 DBCALLED(softs, 1); 2789 2790 /* 2791 * Do a little check here to make sure there aren't any outstanding 2792 * FIBs in the message queue. At this point there should not be and 2793 * if there are they are probably left over from another instance of 2794 * the driver like when the system crashes and the crash dump driver 2795 * gets loaded. 2796 */ 2797 while (AAC_OUTB_GET(softs) != 0xfffffffful) 2798 ; 2799 2800 /* 2801 * Wait the card to complete booting up before do anything that 2802 * attempts to communicate with it. 2803 */ 2804 status = AAC_FWSTATUS_GET(softs); 2805 if (status == AAC_SELF_TEST_FAILED || status == AAC_KERNEL_PANIC) 2806 goto error; 2807 i = AAC_FWUP_TIMEOUT * 1000; /* set timeout */ 2808 AAC_BUSYWAIT(AAC_FWSTATUS_GET(softs) & AAC_KERNEL_UP_AND_RUNNING, i); 2809 if (i == 0) { 2810 cmn_err(CE_CONT, "?Fatal error: controller not ready"); 2811 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2812 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2813 goto error; 2814 } 2815 2816 /* Read and set card supported options and settings */ 2817 if (aac_check_firmware(softs) == AACERR) { 2818 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2819 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2820 goto error; 2821 } 2822 2823 /* Add interrupt handlers */ 2824 if (aac_register_intrs(softs) == AACERR) { 2825 cmn_err(CE_CONT, 2826 "?Fatal error: interrupts register failed"); 2827 goto error; 2828 } 2829 2830 /* Setup communication space with the card */ 2831 if (softs->comm_space_dma_handle == NULL) { 2832 if (aac_alloc_comm_space(softs) != AACOK) 2833 goto error; 2834 } 2835 if (aac_setup_comm_space(softs) != AACOK) { 2836 cmn_err(CE_CONT, "?Setup communication space failed"); 2837 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2838 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2839 goto error; 2840 } 2841 2842 #ifdef DEBUG 2843 if (aac_get_fw_debug_buffer(softs) != AACOK) 2844 cmn_err(CE_CONT, "?firmware UART trace not supported"); 2845 #endif 2846 2847 /* Allocate slots */ 2848 if ((softs->total_slots == 0) && (aac_create_slots(softs) != AACOK)) { 2849 cmn_err(CE_CONT, "?Fatal error: slots allocate failed"); 2850 goto error; 2851 } 2852 AACDB_PRINT(softs, CE_NOTE, "%d slots allocated", softs->total_slots); 2853 2854 /* Allocate FIBs */ 2855 if (softs->total_fibs < softs->total_slots) { 2856 aac_alloc_fibs(softs); 2857 if (softs->total_fibs == 0) 2858 goto error; 2859 AACDB_PRINT(softs, CE_NOTE, "%d fibs allocated", 2860 softs->total_fibs); 2861 } 2862 2863 AAC_STATUS_CLR(softs, ~0); /* Clear out all interrupts */ 2864 AAC_ENABLE_INTR(softs); /* Enable the interrupts we can handle */ 2865 2866 if (aac_get_adapter_info(softs, NULL, &sinf) == AACOK) { 2867 softs->feature_bits = sinf.FeatureBits; 2868 softs->support_opt2 = sinf.SupportedOptions2; 2869 2870 /* Get adapter names */ 2871 if (CARD_IS_UNKNOWN(softs->card)) { 2872 char *p, *p0, *p1; 2873 2874 /* 2875 * Now find the controller name in supp_adapter_info-> 2876 * AdapterTypeText. Use the first word as the vendor 2877 * and the other words as the product name. 2878 */ 2879 AACDB_PRINT(softs, CE_NOTE, "sinf.AdapterTypeText = " 2880 "\"%s\"", sinf.AdapterTypeText); 2881 p = sinf.AdapterTypeText; 2882 p0 = p1 = NULL; 2883 /* Skip heading spaces */ 2884 while (*p && (*p == ' ' || *p == '\t')) 2885 p++; 2886 p0 = p; 2887 while (*p && (*p != ' ' && *p != '\t')) 2888 p++; 2889 /* Remove middle spaces */ 2890 while (*p && (*p == ' ' || *p == '\t')) 2891 *p++ = 0; 2892 p1 = p; 2893 /* Remove trailing spaces */ 2894 p = p1 + strlen(p1) - 1; 2895 while (p > p1 && (*p == ' ' || *p == '\t')) 2896 *p-- = 0; 2897 if (*p0 && *p1) { 2898 (void *)strncpy(softs->vendor_name, p0, 2899 AAC_VENDOR_LEN); 2900 (void *)strncpy(softs->product_name, p1, 2901 AAC_PRODUCT_LEN); 2902 } else { 2903 cmn_err(CE_WARN, 2904 "?adapter name mis-formatted\n"); 2905 if (*p0) 2906 (void *)strncpy(softs->product_name, 2907 p0, AAC_PRODUCT_LEN); 2908 } 2909 } 2910 } else { 2911 cmn_err(CE_CONT, "?Query adapter information failed"); 2912 } 2913 2914 2915 cmn_err(CE_NOTE, 2916 "!aac driver %d.%02d.%02d-%d, found card: " \ 2917 "%s %s(pci0x%x.%x.%x.%x) at 0x%x", 2918 AAC_DRIVER_MAJOR_VERSION, 2919 AAC_DRIVER_MINOR_VERSION, 2920 AAC_DRIVER_BUGFIX_LEVEL, 2921 AAC_DRIVER_BUILD, 2922 softs->vendor_name, softs->product_name, 2923 softs->vendid, softs->devid, softs->subvendid, softs->subsysid, 2924 softs->pci_mem_base_paddr); 2925 2926 /* Perform acceptance of adapter-detected config changes if possible */ 2927 if (aac_handle_adapter_config_issues(softs) != AACMPE_OK) { 2928 cmn_err(CE_CONT, "?Handle adapter config issues failed"); 2929 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2930 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2931 goto error; 2932 } 2933 2934 /* Setup containers (logical devices) */ 2935 if (aac_probe_containers(softs) != AACOK) { 2936 cmn_err(CE_CONT, "?Fatal error: get container info error"); 2937 goto error; 2938 } 2939 2940 /* Check for JBOD support. Default disable */ 2941 char *data; 2942 if (softs->feature_bits & AAC_FEATURE_SUPPORTED_JBOD) { 2943 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p, 2944 0, "jbod-enable", &data) == DDI_SUCCESS)) { 2945 if (strcmp(data, "yes") == 0) { 2946 AACDB_PRINT(softs, CE_NOTE, 2947 "Enable JBOD access"); 2948 softs->flags |= AAC_FLAGS_JBOD; 2949 } 2950 ddi_prop_free(data); 2951 } 2952 } 2953 2954 /* Setup phys. devices */ 2955 if (softs->flags & (AAC_FLAGS_NONDASD | AAC_FLAGS_JBOD)) { 2956 uint32_t bus_max, tgt_max; 2957 uint32_t bus, tgt; 2958 int index; 2959 2960 if (aac_get_bus_info(softs, &bus_max, &tgt_max) != AACOK) { 2961 cmn_err(CE_CONT, "?Fatal error: get bus info error"); 2962 goto error; 2963 } 2964 AACDB_PRINT(softs, CE_NOTE, "bus_max=%d, tgt_max=%d", 2965 bus_max, tgt_max); 2966 if (bus_max != softs->bus_max || tgt_max != softs->tgt_max) { 2967 if (softs->state & AAC_STATE_RESET) { 2968 cmn_err(CE_WARN, 2969 "?Fatal error: bus map changed"); 2970 goto error; 2971 } 2972 softs->bus_max = bus_max; 2973 softs->tgt_max = tgt_max; 2974 if (softs->nondasds) { 2975 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \ 2976 sizeof (struct aac_nondasd)); 2977 } 2978 softs->nondasds = kmem_zalloc(AAC_MAX_PD(softs) * \ 2979 sizeof (struct aac_nondasd), KM_SLEEP); 2980 2981 index = 0; 2982 for (bus = 0; bus < softs->bus_max; bus++) { 2983 for (tgt = 0; tgt < softs->tgt_max; tgt++) { 2984 struct aac_nondasd *dvp = 2985 &softs->nondasds[index++]; 2986 dvp->dev.type = AAC_DEV_PD; 2987 dvp->bus = bus; 2988 dvp->tid = tgt; 2989 } 2990 } 2991 } 2992 } 2993 2994 /* Check dma & acc handles allocated in attach */ 2995 if (aac_check_dma_handle(softs->comm_space_dma_handle) != DDI_SUCCESS) { 2996 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2997 goto error; 2998 } 2999 3000 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 3001 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 3002 goto error; 3003 } 3004 3005 for (i = 0; i < softs->total_slots; i++) { 3006 if (aac_check_dma_handle(softs->io_slot[i].fib_dma_handle) != 3007 DDI_SUCCESS) { 3008 ddi_fm_service_impact(softs->devinfo_p, 3009 DDI_SERVICE_LOST); 3010 goto error; 3011 } 3012 } 3013 3014 return (AACOK); 3015 error: 3016 if (softs->state & AAC_STATE_RESET) 3017 return (AACERR); 3018 if (softs->nondasds) { 3019 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \ 3020 sizeof (struct aac_nondasd)); 3021 softs->nondasds = NULL; 3022 } 3023 if (softs->total_fibs > 0) 3024 aac_destroy_fibs(softs); 3025 if (softs->total_slots > 0) 3026 aac_destroy_slots(softs); 3027 if (softs->comm_space_dma_handle) 3028 aac_free_comm_space(softs); 3029 return (AACERR); 3030 } 3031 3032 /* 3033 * Hardware shutdown and resource release 3034 */ 3035 static void 3036 aac_common_detach(struct aac_softstate *softs) 3037 { 3038 DBCALLED(softs, 1); 3039 3040 aac_unregister_intrs(softs); 3041 3042 mutex_enter(&softs->io_lock); 3043 (void) aac_shutdown(softs); 3044 3045 if (softs->nondasds) { 3046 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \ 3047 sizeof (struct aac_nondasd)); 3048 softs->nondasds = NULL; 3049 } 3050 aac_destroy_fibs(softs); 3051 aac_destroy_slots(softs); 3052 aac_free_comm_space(softs); 3053 mutex_exit(&softs->io_lock); 3054 } 3055 3056 /* 3057 * Send a synchronous command to the controller and wait for a result. 3058 * Indicate if the controller completed the command with an error status. 3059 */ 3060 int 3061 aac_sync_mbcommand(struct aac_softstate *softs, uint32_t cmd, 3062 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3, 3063 uint32_t *statusp) 3064 { 3065 int timeout; 3066 uint32_t status; 3067 3068 if (statusp != NULL) 3069 *statusp = SRB_STATUS_SUCCESS; 3070 3071 /* Fill in mailbox */ 3072 AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3); 3073 3074 /* Ensure the sync command doorbell flag is cleared */ 3075 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND); 3076 3077 /* Then set it to signal the adapter */ 3078 AAC_NOTIFY(softs, AAC_DB_SYNC_COMMAND); 3079 3080 /* Spin waiting for the command to complete */ 3081 timeout = AAC_IMMEDIATE_TIMEOUT * 1000; 3082 AAC_BUSYWAIT(AAC_STATUS_GET(softs) & AAC_DB_SYNC_COMMAND, timeout); 3083 if (!timeout) { 3084 AACDB_PRINT(softs, CE_WARN, 3085 "Sync command timed out after %d seconds (0x%x)!", 3086 AAC_IMMEDIATE_TIMEOUT, AAC_FWSTATUS_GET(softs)); 3087 return (AACERR); 3088 } 3089 3090 /* Clear the completion flag */ 3091 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND); 3092 3093 /* Get the command status */ 3094 status = AAC_MAILBOX_GET(softs, 0); 3095 if (statusp != NULL) 3096 *statusp = status; 3097 if (status != SRB_STATUS_SUCCESS) { 3098 AACDB_PRINT(softs, CE_WARN, 3099 "Sync command fail: status = 0x%x", status); 3100 return (AACERR); 3101 } 3102 3103 return (AACOK); 3104 } 3105 3106 /* 3107 * Send a synchronous FIB to the adapter and wait for its completion 3108 */ 3109 static int 3110 aac_sync_fib(struct aac_softstate *softs, uint16_t cmd, uint16_t fibsize) 3111 { 3112 struct aac_cmd *acp = &softs->sync_ac; 3113 3114 acp->flags = AAC_CMD_SYNC | AAC_CMD_IN_SYNC_SLOT; 3115 if (softs->state & AAC_STATE_INTR) 3116 acp->flags |= AAC_CMD_NO_CB; 3117 else 3118 acp->flags |= AAC_CMD_NO_INTR; 3119 3120 acp->ac_comp = aac_sync_complete; 3121 acp->timeout = AAC_SYNC_TIMEOUT; 3122 acp->fib_size = fibsize; 3123 3124 /* 3125 * Only need to setup sync fib header, caller should have init 3126 * fib data 3127 */ 3128 aac_cmd_fib_header(softs, acp, cmd); 3129 3130 (void) ddi_dma_sync(acp->slotp->fib_dma_handle, 0, fibsize, 3131 DDI_DMA_SYNC_FORDEV); 3132 3133 aac_start_io(softs, acp); 3134 3135 if (softs->state & AAC_STATE_INTR) 3136 return (aac_do_sync_io(softs, acp)); 3137 else 3138 return (aac_do_poll_io(softs, acp)); 3139 } 3140 3141 static void 3142 aac_cmd_initq(struct aac_cmd_queue *q) 3143 { 3144 q->q_head = NULL; 3145 q->q_tail = (struct aac_cmd *)&q->q_head; 3146 } 3147 3148 /* 3149 * Remove a cmd from the head of q 3150 */ 3151 static struct aac_cmd * 3152 aac_cmd_dequeue(struct aac_cmd_queue *q) 3153 { 3154 struct aac_cmd *acp; 3155 3156 _NOTE(ASSUMING_PROTECTED(*q)) 3157 3158 if ((acp = q->q_head) != NULL) { 3159 if ((q->q_head = acp->next) != NULL) 3160 acp->next = NULL; 3161 else 3162 q->q_tail = (struct aac_cmd *)&q->q_head; 3163 acp->prev = NULL; 3164 } 3165 return (acp); 3166 } 3167 3168 /* 3169 * Add a cmd to the tail of q 3170 */ 3171 static void 3172 aac_cmd_enqueue(struct aac_cmd_queue *q, struct aac_cmd *acp) 3173 { 3174 ASSERT(acp->next == NULL); 3175 acp->prev = q->q_tail; 3176 q->q_tail->next = acp; 3177 q->q_tail = acp; 3178 } 3179 3180 /* 3181 * Remove the cmd ac from q 3182 */ 3183 static void 3184 aac_cmd_delete(struct aac_cmd_queue *q, struct aac_cmd *acp) 3185 { 3186 if (acp->prev) { 3187 if ((acp->prev->next = acp->next) != NULL) { 3188 acp->next->prev = acp->prev; 3189 acp->next = NULL; 3190 } else { 3191 q->q_tail = acp->prev; 3192 } 3193 acp->prev = NULL; 3194 } 3195 /* ac is not in the queue */ 3196 } 3197 3198 /* 3199 * Atomically insert an entry into the nominated queue, returns 0 on success or 3200 * AACERR if the queue is full. 3201 * 3202 * Note: it would be more efficient to defer notifying the controller in 3203 * the case where we may be inserting several entries in rapid succession, 3204 * but implementing this usefully may be difficult (it would involve a 3205 * separate queue/notify interface). 3206 */ 3207 static int 3208 aac_fib_enqueue(struct aac_softstate *softs, int queue, uint32_t fib_addr, 3209 uint32_t fib_size) 3210 { 3211 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3212 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3213 uint32_t pi, ci; 3214 3215 DBCALLED(softs, 2); 3216 3217 ASSERT(queue == AAC_ADAP_NORM_CMD_Q || queue == AAC_ADAP_NORM_RESP_Q); 3218 3219 /* Get the producer/consumer indices */ 3220 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \ 3221 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2, 3222 DDI_DMA_SYNC_FORCPU); 3223 if (aac_check_dma_handle(dma) != DDI_SUCCESS) { 3224 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 3225 return (AACERR); 3226 } 3227 3228 pi = ddi_get32(acc, 3229 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]); 3230 ci = ddi_get32(acc, 3231 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]); 3232 3233 /* 3234 * Wrap the queue first before we check the queue to see 3235 * if it is full 3236 */ 3237 if (pi >= aac_qinfo[queue].size) 3238 pi = 0; 3239 3240 /* XXX queue full */ 3241 if ((pi + 1) == ci) 3242 return (AACERR); 3243 3244 /* Fill in queue entry */ 3245 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_size), fib_size); 3246 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_addr), fib_addr); 3247 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \ 3248 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry), 3249 DDI_DMA_SYNC_FORDEV); 3250 3251 /* Update producer index */ 3252 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX], 3253 pi + 1); 3254 (void) ddi_dma_sync(dma, 3255 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX] - \ 3256 (uintptr_t)softs->comm_space, sizeof (uint32_t), 3257 DDI_DMA_SYNC_FORDEV); 3258 3259 if (aac_qinfo[queue].notify != 0) 3260 AAC_NOTIFY(softs, aac_qinfo[queue].notify); 3261 return (AACOK); 3262 } 3263 3264 /* 3265 * Atomically remove one entry from the nominated queue, returns 0 on 3266 * success or AACERR if the queue is empty. 3267 */ 3268 static int 3269 aac_fib_dequeue(struct aac_softstate *softs, int queue, int *idxp) 3270 { 3271 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3272 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3273 uint32_t pi, ci; 3274 int unfull = 0; 3275 3276 DBCALLED(softs, 2); 3277 3278 ASSERT(idxp); 3279 3280 /* Get the producer/consumer indices */ 3281 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \ 3282 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2, 3283 DDI_DMA_SYNC_FORCPU); 3284 pi = ddi_get32(acc, 3285 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]); 3286 ci = ddi_get32(acc, 3287 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]); 3288 3289 /* Check for queue empty */ 3290 if (ci == pi) 3291 return (AACERR); 3292 3293 if (pi >= aac_qinfo[queue].size) 3294 pi = 0; 3295 3296 /* Check for queue full */ 3297 if (ci == pi + 1) 3298 unfull = 1; 3299 3300 /* 3301 * The controller does not wrap the queue, 3302 * so we have to do it by ourselves 3303 */ 3304 if (ci >= aac_qinfo[queue].size) 3305 ci = 0; 3306 3307 /* Fetch the entry */ 3308 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \ 3309 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry), 3310 DDI_DMA_SYNC_FORCPU); 3311 if (aac_check_dma_handle(dma) != DDI_SUCCESS) { 3312 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 3313 return (AACERR); 3314 } 3315 3316 switch (queue) { 3317 case AAC_HOST_NORM_RESP_Q: 3318 case AAC_HOST_HIGH_RESP_Q: 3319 *idxp = ddi_get32(acc, 3320 &(softs->qentries[queue] + ci)->aq_fib_addr); 3321 break; 3322 3323 case AAC_HOST_NORM_CMD_Q: 3324 case AAC_HOST_HIGH_CMD_Q: 3325 *idxp = ddi_get32(acc, 3326 &(softs->qentries[queue] + ci)->aq_fib_addr) / AAC_FIB_SIZE; 3327 break; 3328 3329 default: 3330 cmn_err(CE_NOTE, "!Invalid queue in aac_fib_dequeue()"); 3331 return (AACERR); 3332 } 3333 3334 /* Update consumer index */ 3335 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX], 3336 ci + 1); 3337 (void) ddi_dma_sync(dma, 3338 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX] - \ 3339 (uintptr_t)softs->comm_space, sizeof (uint32_t), 3340 DDI_DMA_SYNC_FORDEV); 3341 3342 if (unfull && aac_qinfo[queue].notify != 0) 3343 AAC_NOTIFY(softs, aac_qinfo[queue].notify); 3344 return (AACOK); 3345 } 3346 3347 static struct aac_mntinforesp * 3348 aac_get_mntinfo(struct aac_softstate *softs, int cid) 3349 { 3350 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle; 3351 struct aac_fib *fibp = softs->sync_ac.slotp->fibp; 3352 struct aac_mntinfo *mi = (struct aac_mntinfo *)&fibp->data[0]; 3353 struct aac_mntinforesp *mir; 3354 3355 ddi_put32(acc, &mi->Command, /* Use 64-bit LBA if enabled */ 3356 (softs->flags & AAC_FLAGS_LBA_64BIT) ? 3357 VM_NameServe64 : VM_NameServe); 3358 ddi_put32(acc, &mi->MntType, FT_FILESYS); 3359 ddi_put32(acc, &mi->MntCount, cid); 3360 3361 if (aac_sync_fib(softs, ContainerCommand, 3362 AAC_FIB_SIZEOF(struct aac_mntinfo)) == AACERR) { 3363 AACDB_PRINT(softs, CE_WARN, "Error probe container %d", cid); 3364 return (NULL); 3365 } 3366 3367 mir = (struct aac_mntinforesp *)&fibp->data[0]; 3368 if (ddi_get32(acc, &mir->Status) == ST_OK) 3369 return (mir); 3370 return (NULL); 3371 } 3372 3373 static int 3374 aac_get_container_count(struct aac_softstate *softs, int *count) 3375 { 3376 ddi_acc_handle_t acc; 3377 struct aac_mntinforesp *mir; 3378 int rval; 3379 3380 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac); 3381 acc = softs->sync_ac.slotp->fib_acc_handle; 3382 3383 if ((mir = aac_get_mntinfo(softs, 0)) == NULL) { 3384 rval = AACERR; 3385 goto finish; 3386 } 3387 *count = ddi_get32(acc, &mir->MntRespCount); 3388 if (*count > AAC_MAX_LD) { 3389 AACDB_PRINT(softs, CE_CONT, 3390 "container count(%d) > AAC_MAX_LD", *count); 3391 rval = AACERR; 3392 goto finish; 3393 } 3394 rval = AACOK; 3395 3396 finish: 3397 aac_sync_fib_slot_release(softs, &softs->sync_ac); 3398 return (rval); 3399 } 3400 3401 static int 3402 aac_get_container_uid(struct aac_softstate *softs, uint32_t cid, uint32_t *uid) 3403 { 3404 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle; 3405 struct aac_Container *ct = (struct aac_Container *) \ 3406 &softs->sync_ac.slotp->fibp->data[0]; 3407 3408 bzero(ct, sizeof (*ct) - CT_PACKET_SIZE); 3409 ddi_put32(acc, &ct->Command, VM_ContainerConfig); 3410 ddi_put32(acc, &ct->CTCommand.command, CT_CID_TO_32BITS_UID); 3411 ddi_put32(acc, &ct->CTCommand.param[0], cid); 3412 3413 if (aac_sync_fib(softs, ContainerCommand, 3414 AAC_FIB_SIZEOF(struct aac_Container)) == AACERR) 3415 return (AACERR); 3416 if (ddi_get32(acc, &ct->CTCommand.param[0]) != CT_OK) 3417 return (AACERR); 3418 3419 *uid = ddi_get32(acc, &ct->CTCommand.param[1]); 3420 return (AACOK); 3421 } 3422 3423 /* 3424 * Request information of the container cid 3425 */ 3426 static struct aac_mntinforesp * 3427 aac_get_container_info(struct aac_softstate *softs, int cid) 3428 { 3429 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle; 3430 struct aac_mntinforesp *mir; 3431 int rval_uid; 3432 uint32_t uid; 3433 3434 /* Get container UID first so that it will not overwrite mntinfo */ 3435 rval_uid = aac_get_container_uid(softs, cid, &uid); 3436 3437 /* Get container basic info */ 3438 if ((mir = aac_get_mntinfo(softs, cid)) == NULL) { 3439 AACDB_PRINT(softs, CE_CONT, 3440 "query container %d info failed", cid); 3441 return (NULL); 3442 } 3443 if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) 3444 return (mir); 3445 if (rval_uid != AACOK) { 3446 AACDB_PRINT(softs, CE_CONT, 3447 "query container %d uid failed", cid); 3448 return (NULL); 3449 } 3450 3451 ddi_put32(acc, &mir->Status, uid); 3452 return (mir); 3453 } 3454 3455 static enum aac_cfg_event 3456 aac_probe_container(struct aac_softstate *softs, uint32_t cid) 3457 { 3458 enum aac_cfg_event event = AAC_CFG_NULL_NOEXIST; 3459 struct aac_container *dvp = &softs->containers[cid]; 3460 struct aac_mntinforesp *mir; 3461 ddi_acc_handle_t acc; 3462 3463 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac); 3464 acc = softs->sync_ac.slotp->fib_acc_handle; 3465 3466 /* Get container basic info */ 3467 if ((mir = aac_get_container_info(softs, cid)) == NULL) { 3468 /* AAC_CFG_NULL_NOEXIST */ 3469 goto finish; 3470 } 3471 3472 if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) { 3473 if (AAC_DEV_IS_VALID(&dvp->dev)) { 3474 AACDB_PRINT(softs, CE_NOTE, 3475 ">>> Container %d deleted", cid); 3476 dvp->dev.flags &= ~AAC_DFLAG_VALID; 3477 event = AAC_CFG_DELETE; 3478 } 3479 /* AAC_CFG_NULL_NOEXIST */ 3480 } else { 3481 uint64_t size; 3482 uint32_t uid; 3483 3484 event = AAC_CFG_NULL_EXIST; 3485 3486 size = AAC_MIR_SIZE(softs, acc, mir); 3487 uid = ddi_get32(acc, &mir->Status); 3488 if (AAC_DEV_IS_VALID(&dvp->dev)) { 3489 if (dvp->uid != uid) { 3490 AACDB_PRINT(softs, CE_WARN, 3491 ">>> Container %u uid changed to %d", 3492 cid, uid); 3493 dvp->uid = uid; 3494 event = AAC_CFG_CHANGE; 3495 } 3496 if (dvp->size != size) { 3497 AACDB_PRINT(softs, CE_NOTE, 3498 ">>> Container %u size changed to %"PRIu64, 3499 cid, size); 3500 dvp->size = size; 3501 event = AAC_CFG_CHANGE; 3502 } 3503 } else { /* Init new container */ 3504 AACDB_PRINT(softs, CE_NOTE, 3505 ">>> Container %d added: " \ 3506 "size=0x%x.%08x, type=%d, name=%s", 3507 cid, 3508 ddi_get32(acc, &mir->MntObj.CapacityHigh), 3509 ddi_get32(acc, &mir->MntObj.Capacity), 3510 ddi_get32(acc, &mir->MntObj.VolType), 3511 mir->MntObj.FileSystemName); 3512 dvp->dev.flags |= AAC_DFLAG_VALID; 3513 dvp->dev.type = AAC_DEV_LD; 3514 3515 dvp->cid = cid; 3516 dvp->uid = uid; 3517 dvp->size = size; 3518 dvp->locked = 0; 3519 dvp->deleted = 0; 3520 3521 event = AAC_CFG_ADD; 3522 } 3523 } 3524 3525 finish: 3526 aac_sync_fib_slot_release(softs, &softs->sync_ac); 3527 return (event); 3528 } 3529 3530 /* 3531 * Do a rescan of all the possible containers and update the container list 3532 * with newly online/offline containers, and prepare for autoconfiguration. 3533 */ 3534 static int 3535 aac_probe_containers(struct aac_softstate *softs) 3536 { 3537 int i, count, total; 3538 3539 /* Loop over possible containers */ 3540 count = softs->container_count; 3541 if (aac_get_container_count(softs, &count) == AACERR) 3542 return (AACERR); 3543 3544 for (i = total = 0; i < count; i++) { 3545 enum aac_cfg_event event = aac_probe_container(softs, i); 3546 if ((event != AAC_CFG_NULL_NOEXIST) && 3547 (event != AAC_CFG_NULL_EXIST)) { 3548 (void) aac_handle_dr(softs, i, -1, event); 3549 total++; 3550 } 3551 } 3552 3553 if (count < softs->container_count) { 3554 struct aac_container *dvp; 3555 3556 for (dvp = &softs->containers[count]; 3557 dvp < &softs->containers[softs->container_count]; dvp++) { 3558 if (!AAC_DEV_IS_VALID(&dvp->dev)) 3559 continue; 3560 AACDB_PRINT(softs, CE_NOTE, ">>> Container %d deleted", 3561 dvp->cid); 3562 dvp->dev.flags &= ~AAC_DFLAG_VALID; 3563 (void) aac_handle_dr(softs, dvp->cid, -1, 3564 AAC_CFG_DELETE); 3565 } 3566 } 3567 3568 softs->container_count = count; 3569 AACDB_PRINT(softs, CE_CONT, "?Total %d container(s) found", total); 3570 return (AACOK); 3571 } 3572 3573 static int 3574 aac_probe_jbod(struct aac_softstate *softs, int tgt, int event) 3575 { 3576 ASSERT(AAC_MAX_LD <= tgt); 3577 ASSERT(tgt < AAC_MAX_DEV(softs)); 3578 struct aac_device *dvp; 3579 dvp = AAC_DEV(softs, tgt); 3580 3581 switch (event) { 3582 case AAC_CFG_ADD: 3583 AACDB_PRINT(softs, CE_NOTE, 3584 ">>> Jbod %d added", tgt - AAC_MAX_LD); 3585 dvp->flags |= AAC_DFLAG_VALID; 3586 dvp->type = AAC_DEV_PD; 3587 break; 3588 case AAC_CFG_DELETE: 3589 AACDB_PRINT(softs, CE_NOTE, 3590 ">>> Jbod %d deleted", tgt - AAC_MAX_LD); 3591 dvp->flags &= ~AAC_DFLAG_VALID; 3592 break; 3593 default: 3594 return (AACERR); 3595 } 3596 (void) aac_handle_dr(softs, tgt, 0, event); 3597 return (AACOK); 3598 } 3599 3600 static int 3601 aac_alloc_comm_space(struct aac_softstate *softs) 3602 { 3603 size_t rlen; 3604 ddi_dma_cookie_t cookie; 3605 uint_t cookien; 3606 3607 /* Allocate DMA for comm. space */ 3608 if (ddi_dma_alloc_handle( 3609 softs->devinfo_p, 3610 &softs->addr_dma_attr, 3611 DDI_DMA_SLEEP, 3612 NULL, 3613 &softs->comm_space_dma_handle) != DDI_SUCCESS) { 3614 AACDB_PRINT(softs, CE_WARN, 3615 "Cannot alloc dma handle for communication area"); 3616 goto error; 3617 } 3618 if (ddi_dma_mem_alloc( 3619 softs->comm_space_dma_handle, 3620 sizeof (struct aac_comm_space), 3621 &softs->acc_attr, 3622 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3623 DDI_DMA_SLEEP, 3624 NULL, 3625 (caddr_t *)&softs->comm_space, 3626 &rlen, 3627 &softs->comm_space_acc_handle) != DDI_SUCCESS) { 3628 AACDB_PRINT(softs, CE_WARN, 3629 "Cannot alloc mem for communication area"); 3630 goto error; 3631 } 3632 if (ddi_dma_addr_bind_handle( 3633 softs->comm_space_dma_handle, 3634 NULL, 3635 (caddr_t)softs->comm_space, 3636 sizeof (struct aac_comm_space), 3637 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3638 DDI_DMA_SLEEP, 3639 NULL, 3640 &cookie, 3641 &cookien) != DDI_DMA_MAPPED) { 3642 AACDB_PRINT(softs, CE_WARN, 3643 "DMA bind failed for communication area"); 3644 goto error; 3645 } 3646 softs->comm_space_phyaddr = cookie.dmac_address; 3647 3648 return (AACOK); 3649 error: 3650 if (softs->comm_space_acc_handle) { 3651 ddi_dma_mem_free(&softs->comm_space_acc_handle); 3652 softs->comm_space_acc_handle = NULL; 3653 } 3654 if (softs->comm_space_dma_handle) { 3655 ddi_dma_free_handle(&softs->comm_space_dma_handle); 3656 softs->comm_space_dma_handle = NULL; 3657 } 3658 return (AACERR); 3659 } 3660 3661 static void 3662 aac_free_comm_space(struct aac_softstate *softs) 3663 { 3664 3665 (void) ddi_dma_unbind_handle(softs->comm_space_dma_handle); 3666 ddi_dma_mem_free(&softs->comm_space_acc_handle); 3667 softs->comm_space_acc_handle = NULL; 3668 ddi_dma_free_handle(&softs->comm_space_dma_handle); 3669 softs->comm_space_dma_handle = NULL; 3670 softs->comm_space_phyaddr = 0; 3671 } 3672 3673 /* 3674 * Initialize the data structures that are required for the communication 3675 * interface to operate 3676 */ 3677 static int 3678 aac_setup_comm_space(struct aac_softstate *softs) 3679 { 3680 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3681 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3682 uint32_t comm_space_phyaddr; 3683 struct aac_adapter_init *initp; 3684 int qoffset; 3685 3686 comm_space_phyaddr = softs->comm_space_phyaddr; 3687 3688 /* Setup adapter init struct */ 3689 initp = &softs->comm_space->init_data; 3690 bzero(initp, sizeof (struct aac_adapter_init)); 3691 3692 ddi_put32(acc, &initp->InitStructRevision, AAC_INIT_STRUCT_REVISION); 3693 ddi_put32(acc, &initp->HostElapsedSeconds, ddi_get_time()); 3694 3695 /* Setup new/old comm. specific data */ 3696 if (softs->flags & AAC_FLAGS_RAW_IO) { 3697 uint32_t init_flags = 0; 3698 3699 if (softs->flags & AAC_FLAGS_NEW_COMM) 3700 init_flags |= AAC_INIT_FLAGS_NEW_COMM_SUPPORTED; 3701 /* AAC_SUPPORTED_POWER_MANAGEMENT */ 3702 init_flags |= AAC_INIT_FLAGS_DRIVER_SUPPORTS_PM; 3703 init_flags |= AAC_INIT_FLAGS_DRIVER_USES_UTC_TIME; 3704 3705 ddi_put32(acc, &initp->InitStructRevision, 3706 AAC_INIT_STRUCT_REVISION_4); 3707 ddi_put32(acc, &initp->InitFlags, init_flags); 3708 /* Setup the preferred settings */ 3709 ddi_put32(acc, &initp->MaxIoCommands, softs->aac_max_fibs); 3710 ddi_put32(acc, &initp->MaxIoSize, 3711 (softs->aac_max_sectors << 9)); 3712 ddi_put32(acc, &initp->MaxFibSize, softs->aac_max_fib_size); 3713 } else { 3714 /* 3715 * Tells the adapter about the physical location of various 3716 * important shared data structures 3717 */ 3718 ddi_put32(acc, &initp->AdapterFibsPhysicalAddress, 3719 comm_space_phyaddr + \ 3720 offsetof(struct aac_comm_space, adapter_fibs)); 3721 ddi_put32(acc, &initp->AdapterFibsVirtualAddress, 0); 3722 ddi_put32(acc, &initp->AdapterFibAlign, AAC_FIB_SIZE); 3723 ddi_put32(acc, &initp->AdapterFibsSize, 3724 AAC_ADAPTER_FIBS * AAC_FIB_SIZE); 3725 ddi_put32(acc, &initp->PrintfBufferAddress, 3726 comm_space_phyaddr + \ 3727 offsetof(struct aac_comm_space, adapter_print_buf)); 3728 ddi_put32(acc, &initp->PrintfBufferSize, 3729 AAC_ADAPTER_PRINT_BUFSIZE); 3730 ddi_put32(acc, &initp->MiniPortRevision, 3731 AAC_INIT_STRUCT_MINIPORT_REVISION); 3732 ddi_put32(acc, &initp->HostPhysMemPages, AAC_MAX_PFN); 3733 3734 qoffset = (comm_space_phyaddr + \ 3735 offsetof(struct aac_comm_space, qtable)) % \ 3736 AAC_QUEUE_ALIGN; 3737 if (qoffset) 3738 qoffset = AAC_QUEUE_ALIGN - qoffset; 3739 softs->qtablep = (struct aac_queue_table *) \ 3740 ((char *)&softs->comm_space->qtable + qoffset); 3741 ddi_put32(acc, &initp->CommHeaderAddress, comm_space_phyaddr + \ 3742 offsetof(struct aac_comm_space, qtable) + qoffset); 3743 3744 /* Init queue table */ 3745 ddi_put32(acc, &softs->qtablep-> \ 3746 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_PRODUCER_INDEX], 3747 AAC_HOST_NORM_CMD_ENTRIES); 3748 ddi_put32(acc, &softs->qtablep-> \ 3749 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_CONSUMER_INDEX], 3750 AAC_HOST_NORM_CMD_ENTRIES); 3751 ddi_put32(acc, &softs->qtablep-> \ 3752 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_PRODUCER_INDEX], 3753 AAC_HOST_HIGH_CMD_ENTRIES); 3754 ddi_put32(acc, &softs->qtablep-> \ 3755 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_CONSUMER_INDEX], 3756 AAC_HOST_HIGH_CMD_ENTRIES); 3757 ddi_put32(acc, &softs->qtablep-> \ 3758 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_PRODUCER_INDEX], 3759 AAC_ADAP_NORM_CMD_ENTRIES); 3760 ddi_put32(acc, &softs->qtablep-> \ 3761 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_CONSUMER_INDEX], 3762 AAC_ADAP_NORM_CMD_ENTRIES); 3763 ddi_put32(acc, &softs->qtablep-> \ 3764 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_PRODUCER_INDEX], 3765 AAC_ADAP_HIGH_CMD_ENTRIES); 3766 ddi_put32(acc, &softs->qtablep-> \ 3767 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_CONSUMER_INDEX], 3768 AAC_ADAP_HIGH_CMD_ENTRIES); 3769 ddi_put32(acc, &softs->qtablep-> \ 3770 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_PRODUCER_INDEX], 3771 AAC_HOST_NORM_RESP_ENTRIES); 3772 ddi_put32(acc, &softs->qtablep-> \ 3773 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_CONSUMER_INDEX], 3774 AAC_HOST_NORM_RESP_ENTRIES); 3775 ddi_put32(acc, &softs->qtablep-> \ 3776 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_PRODUCER_INDEX], 3777 AAC_HOST_HIGH_RESP_ENTRIES); 3778 ddi_put32(acc, &softs->qtablep-> \ 3779 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_CONSUMER_INDEX], 3780 AAC_HOST_HIGH_RESP_ENTRIES); 3781 ddi_put32(acc, &softs->qtablep-> \ 3782 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_PRODUCER_INDEX], 3783 AAC_ADAP_NORM_RESP_ENTRIES); 3784 ddi_put32(acc, &softs->qtablep-> \ 3785 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_CONSUMER_INDEX], 3786 AAC_ADAP_NORM_RESP_ENTRIES); 3787 ddi_put32(acc, &softs->qtablep-> \ 3788 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_PRODUCER_INDEX], 3789 AAC_ADAP_HIGH_RESP_ENTRIES); 3790 ddi_put32(acc, &softs->qtablep-> \ 3791 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_CONSUMER_INDEX], 3792 AAC_ADAP_HIGH_RESP_ENTRIES); 3793 3794 /* Init queue entries */ 3795 softs->qentries[AAC_HOST_NORM_CMD_Q] = 3796 &softs->qtablep->qt_HostNormCmdQueue[0]; 3797 softs->qentries[AAC_HOST_HIGH_CMD_Q] = 3798 &softs->qtablep->qt_HostHighCmdQueue[0]; 3799 softs->qentries[AAC_ADAP_NORM_CMD_Q] = 3800 &softs->qtablep->qt_AdapNormCmdQueue[0]; 3801 softs->qentries[AAC_ADAP_HIGH_CMD_Q] = 3802 &softs->qtablep->qt_AdapHighCmdQueue[0]; 3803 softs->qentries[AAC_HOST_NORM_RESP_Q] = 3804 &softs->qtablep->qt_HostNormRespQueue[0]; 3805 softs->qentries[AAC_HOST_HIGH_RESP_Q] = 3806 &softs->qtablep->qt_HostHighRespQueue[0]; 3807 softs->qentries[AAC_ADAP_NORM_RESP_Q] = 3808 &softs->qtablep->qt_AdapNormRespQueue[0]; 3809 softs->qentries[AAC_ADAP_HIGH_RESP_Q] = 3810 &softs->qtablep->qt_AdapHighRespQueue[0]; 3811 } 3812 (void) ddi_dma_sync(dma, 0, 0, DDI_DMA_SYNC_FORDEV); 3813 3814 /* Send init structure to the card */ 3815 if (aac_sync_mbcommand(softs, AAC_MONKER_INITSTRUCT, 3816 comm_space_phyaddr + \ 3817 offsetof(struct aac_comm_space, init_data), 3818 0, 0, 0, NULL) == AACERR) { 3819 AACDB_PRINT(softs, CE_WARN, 3820 "Cannot send init structure to adapter"); 3821 return (AACERR); 3822 } 3823 3824 return (AACOK); 3825 } 3826 3827 static uchar_t * 3828 aac_vendor_id(struct aac_softstate *softs, uchar_t *buf) 3829 { 3830 (void) memset(buf, ' ', AAC_VENDOR_LEN); 3831 bcopy(softs->vendor_name, buf, strlen(softs->vendor_name)); 3832 return (buf + AAC_VENDOR_LEN); 3833 } 3834 3835 static uchar_t * 3836 aac_product_id(struct aac_softstate *softs, uchar_t *buf) 3837 { 3838 (void) memset(buf, ' ', AAC_PRODUCT_LEN); 3839 bcopy(softs->product_name, buf, strlen(softs->product_name)); 3840 return (buf + AAC_PRODUCT_LEN); 3841 } 3842 3843 /* 3844 * Construct unit serial number from container uid 3845 */ 3846 static uchar_t * 3847 aac_lun_serialno(struct aac_softstate *softs, int tgt, uchar_t *buf) 3848 { 3849 int i, d; 3850 uint32_t uid; 3851 3852 ASSERT(tgt >= 0 && tgt < AAC_MAX_LD); 3853 3854 uid = softs->containers[tgt].uid; 3855 for (i = 7; i >= 0; i--) { 3856 d = uid & 0xf; 3857 buf[i] = d > 9 ? 'A' + (d - 0xa) : '0' + d; 3858 uid >>= 4; 3859 } 3860 return (buf + 8); 3861 } 3862 3863 /* 3864 * SPC-3 7.5 INQUIRY command implementation 3865 */ 3866 static void 3867 aac_inquiry(struct aac_softstate *softs, struct scsi_pkt *pkt, 3868 union scsi_cdb *cdbp, struct buf *bp) 3869 { 3870 int tgt = pkt->pkt_address.a_target; 3871 char *b_addr = NULL; 3872 uchar_t page = cdbp->cdb_opaque[2]; 3873 3874 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_CMDDT) { 3875 /* Command Support Data is not supported */ 3876 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x00, 0); 3877 return; 3878 } 3879 3880 if (bp && bp->b_un.b_addr && bp->b_bcount) { 3881 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 3882 bp_mapin(bp); 3883 b_addr = bp->b_un.b_addr; 3884 } 3885 3886 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_EVPD) { 3887 uchar_t *vpdp = (uchar_t *)b_addr; 3888 uchar_t *idp, *sp; 3889 3890 /* SPC-3 8.4 Vital product data parameters */ 3891 switch (page) { 3892 case 0x00: 3893 /* Supported VPD pages */ 3894 if (vpdp == NULL || 3895 bp->b_bcount < (AAC_VPD_PAGE_DATA + 3)) 3896 return; 3897 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3898 vpdp[AAC_VPD_PAGE_CODE] = 0x00; 3899 vpdp[AAC_VPD_PAGE_LENGTH] = 3; 3900 3901 vpdp[AAC_VPD_PAGE_DATA] = 0x00; 3902 vpdp[AAC_VPD_PAGE_DATA + 1] = 0x80; 3903 vpdp[AAC_VPD_PAGE_DATA + 2] = 0x83; 3904 3905 pkt->pkt_state |= STATE_XFERRED_DATA; 3906 break; 3907 3908 case 0x80: 3909 /* Unit serial number page */ 3910 if (vpdp == NULL || 3911 bp->b_bcount < (AAC_VPD_PAGE_DATA + 8)) 3912 return; 3913 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3914 vpdp[AAC_VPD_PAGE_CODE] = 0x80; 3915 vpdp[AAC_VPD_PAGE_LENGTH] = 8; 3916 3917 sp = &vpdp[AAC_VPD_PAGE_DATA]; 3918 (void) aac_lun_serialno(softs, tgt, sp); 3919 3920 pkt->pkt_state |= STATE_XFERRED_DATA; 3921 break; 3922 3923 case 0x83: 3924 /* Device identification page */ 3925 if (vpdp == NULL || 3926 bp->b_bcount < (AAC_VPD_PAGE_DATA + 32)) 3927 return; 3928 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3929 vpdp[AAC_VPD_PAGE_CODE] = 0x83; 3930 3931 idp = &vpdp[AAC_VPD_PAGE_DATA]; 3932 bzero(idp, AAC_VPD_ID_LENGTH); 3933 idp[AAC_VPD_ID_CODESET] = 0x02; 3934 idp[AAC_VPD_ID_TYPE] = 0x01; 3935 3936 /* 3937 * SPC-3 Table 111 - Identifier type 3938 * One recommanded method of constructing the remainder 3939 * of identifier field is to concatenate the product 3940 * identification field from the standard INQUIRY data 3941 * field and the product serial number field from the 3942 * unit serial number page. 3943 */ 3944 sp = &idp[AAC_VPD_ID_DATA]; 3945 sp = aac_vendor_id(softs, sp); 3946 sp = aac_product_id(softs, sp); 3947 sp = aac_lun_serialno(softs, tgt, sp); 3948 idp[AAC_VPD_ID_LENGTH] = (uintptr_t)sp - \ 3949 (uintptr_t)&idp[AAC_VPD_ID_DATA]; 3950 3951 vpdp[AAC_VPD_PAGE_LENGTH] = (uintptr_t)sp - \ 3952 (uintptr_t)&vpdp[AAC_VPD_PAGE_DATA]; 3953 pkt->pkt_state |= STATE_XFERRED_DATA; 3954 break; 3955 3956 default: 3957 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 3958 0x24, 0x00, 0); 3959 break; 3960 } 3961 } else { 3962 struct scsi_inquiry *inqp = (struct scsi_inquiry *)b_addr; 3963 size_t len = sizeof (struct scsi_inquiry); 3964 3965 if (page != 0) { 3966 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 3967 0x24, 0x00, 0); 3968 return; 3969 } 3970 if (inqp == NULL || bp->b_bcount < len) 3971 return; 3972 3973 bzero(inqp, len); 3974 inqp->inq_len = AAC_ADDITIONAL_LEN; 3975 inqp->inq_ansi = AAC_ANSI_VER; 3976 inqp->inq_rdf = AAC_RESP_DATA_FORMAT; 3977 (void) aac_vendor_id(softs, (uchar_t *)inqp->inq_vid); 3978 (void) aac_product_id(softs, (uchar_t *)inqp->inq_pid); 3979 bcopy("V1.0", inqp->inq_revision, 4); 3980 inqp->inq_cmdque = 1; /* enable tagged-queuing */ 3981 /* 3982 * For "sd-max-xfer-size" property which may impact performance 3983 * when IO threads increase. 3984 */ 3985 inqp->inq_wbus32 = 1; 3986 3987 pkt->pkt_state |= STATE_XFERRED_DATA; 3988 } 3989 } 3990 3991 /* 3992 * SPC-3 7.10 MODE SENSE command implementation 3993 */ 3994 static void 3995 aac_mode_sense(struct aac_softstate *softs, struct scsi_pkt *pkt, 3996 union scsi_cdb *cdbp, struct buf *bp, int capacity) 3997 { 3998 uchar_t pagecode; 3999 struct mode_header *headerp; 4000 struct mode_header_g1 *g1_headerp; 4001 unsigned int ncyl; 4002 caddr_t sense_data; 4003 caddr_t next_page; 4004 size_t sdata_size; 4005 size_t pages_size; 4006 int unsupport_page = 0; 4007 4008 ASSERT(cdbp->scc_cmd == SCMD_MODE_SENSE || 4009 cdbp->scc_cmd == SCMD_MODE_SENSE_G1); 4010 4011 if (!(bp && bp->b_un.b_addr && bp->b_bcount)) 4012 return; 4013 4014 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 4015 bp_mapin(bp); 4016 pkt->pkt_state |= STATE_XFERRED_DATA; 4017 pagecode = cdbp->cdb_un.sg.scsi[0] & 0x3F; 4018 4019 /* calculate the size of needed buffer */ 4020 if (cdbp->scc_cmd == SCMD_MODE_SENSE) 4021 sdata_size = MODE_HEADER_LENGTH; 4022 else /* must be SCMD_MODE_SENSE_G1 */ 4023 sdata_size = MODE_HEADER_LENGTH_G1; 4024 4025 pages_size = 0; 4026 switch (pagecode) { 4027 case SD_MODE_SENSE_PAGE3_CODE: 4028 pages_size += sizeof (struct mode_format); 4029 break; 4030 4031 case SD_MODE_SENSE_PAGE4_CODE: 4032 pages_size += sizeof (struct mode_geometry); 4033 break; 4034 4035 case MODEPAGE_CTRL_MODE: 4036 if (softs->flags & AAC_FLAGS_LBA_64BIT) { 4037 pages_size += sizeof (struct mode_control_scsi3); 4038 } else { 4039 unsupport_page = 1; 4040 } 4041 break; 4042 4043 case MODEPAGE_ALLPAGES: 4044 if (softs->flags & AAC_FLAGS_LBA_64BIT) { 4045 pages_size += sizeof (struct mode_format) + 4046 sizeof (struct mode_geometry) + 4047 sizeof (struct mode_control_scsi3); 4048 } else { 4049 pages_size += sizeof (struct mode_format) + 4050 sizeof (struct mode_geometry); 4051 } 4052 break; 4053 4054 default: 4055 /* unsupported pages */ 4056 unsupport_page = 1; 4057 } 4058 4059 /* allocate buffer to fill the send data */ 4060 sdata_size += pages_size; 4061 sense_data = kmem_zalloc(sdata_size, KM_SLEEP); 4062 4063 if (cdbp->scc_cmd == SCMD_MODE_SENSE) { 4064 headerp = (struct mode_header *)sense_data; 4065 headerp->length = MODE_HEADER_LENGTH + pages_size - 4066 sizeof (headerp->length); 4067 headerp->bdesc_length = 0; 4068 next_page = sense_data + sizeof (struct mode_header); 4069 } else { 4070 g1_headerp = (void *)sense_data; 4071 g1_headerp->length = BE_16(MODE_HEADER_LENGTH_G1 + pages_size - 4072 sizeof (g1_headerp->length)); 4073 g1_headerp->bdesc_length = 0; 4074 next_page = sense_data + sizeof (struct mode_header_g1); 4075 } 4076 4077 if (unsupport_page) 4078 goto finish; 4079 4080 if (pagecode == SD_MODE_SENSE_PAGE3_CODE || 4081 pagecode == MODEPAGE_ALLPAGES) { 4082 /* SBC-3 7.1.3.3 Format device page */ 4083 struct mode_format *page3p; 4084 4085 page3p = (void *)next_page; 4086 page3p->mode_page.code = SD_MODE_SENSE_PAGE3_CODE; 4087 page3p->mode_page.length = sizeof (struct mode_format); 4088 page3p->data_bytes_sect = BE_16(AAC_SECTOR_SIZE); 4089 page3p->sect_track = BE_16(AAC_SECTORS_PER_TRACK); 4090 4091 next_page += sizeof (struct mode_format); 4092 } 4093 4094 if (pagecode == SD_MODE_SENSE_PAGE4_CODE || 4095 pagecode == MODEPAGE_ALLPAGES) { 4096 /* SBC-3 7.1.3.8 Rigid disk device geometry page */ 4097 struct mode_geometry *page4p; 4098 4099 page4p = (void *)next_page; 4100 page4p->mode_page.code = SD_MODE_SENSE_PAGE4_CODE; 4101 page4p->mode_page.length = sizeof (struct mode_geometry); 4102 page4p->heads = AAC_NUMBER_OF_HEADS; 4103 page4p->rpm = BE_16(AAC_ROTATION_SPEED); 4104 ncyl = capacity / (AAC_NUMBER_OF_HEADS * AAC_SECTORS_PER_TRACK); 4105 page4p->cyl_lb = ncyl & 0xff; 4106 page4p->cyl_mb = (ncyl >> 8) & 0xff; 4107 page4p->cyl_ub = (ncyl >> 16) & 0xff; 4108 4109 next_page += sizeof (struct mode_geometry); 4110 } 4111 4112 if ((pagecode == MODEPAGE_CTRL_MODE || pagecode == MODEPAGE_ALLPAGES) && 4113 softs->flags & AAC_FLAGS_LBA_64BIT) { 4114 /* 64-bit LBA need large sense data */ 4115 struct mode_control_scsi3 *mctl; 4116 4117 mctl = (void *)next_page; 4118 mctl->mode_page.code = MODEPAGE_CTRL_MODE; 4119 mctl->mode_page.length = 4120 sizeof (struct mode_control_scsi3) - 4121 sizeof (struct mode_page); 4122 mctl->d_sense = 1; 4123 } 4124 4125 finish: 4126 /* copyout the valid data. */ 4127 bcopy(sense_data, bp->b_un.b_addr, min(sdata_size, bp->b_bcount)); 4128 kmem_free(sense_data, sdata_size); 4129 } 4130 4131 static int 4132 aac_name_node(dev_info_t *dip, char *name, int len) 4133 { 4134 int tgt, lun; 4135 4136 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4137 DDI_PROP_DONTPASS, "target", -1); 4138 if (tgt == -1) 4139 return (DDI_FAILURE); 4140 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4141 DDI_PROP_DONTPASS, "lun", -1); 4142 if (lun == -1) 4143 return (DDI_FAILURE); 4144 4145 (void) snprintf(name, len, "%x,%x", tgt, lun); 4146 return (DDI_SUCCESS); 4147 } 4148 4149 /*ARGSUSED*/ 4150 static int 4151 aac_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 4152 scsi_hba_tran_t *tran, struct scsi_device *sd) 4153 { 4154 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran); 4155 #if defined(DEBUG) || defined(__lock_lint) 4156 int ctl = ddi_get_instance(softs->devinfo_p); 4157 #endif 4158 uint16_t tgt = sd->sd_address.a_target; 4159 uint8_t lun = sd->sd_address.a_lun; 4160 struct aac_device *dvp; 4161 4162 DBCALLED(softs, 2); 4163 4164 if (ndi_dev_is_persistent_node(tgt_dip) == 0) { 4165 /* 4166 * If no persistent node exist, we don't allow .conf node 4167 * to be created. 4168 */ 4169 if (aac_find_child(softs, tgt, lun) != NULL) { 4170 if (ndi_merge_node(tgt_dip, aac_name_node) != 4171 DDI_SUCCESS) 4172 /* Create this .conf node */ 4173 return (DDI_SUCCESS); 4174 } 4175 return (DDI_FAILURE); 4176 } 4177 4178 /* 4179 * Only support container/phys. device that has been 4180 * detected and valid 4181 */ 4182 mutex_enter(&softs->io_lock); 4183 if (tgt >= AAC_MAX_DEV(softs)) { 4184 AACDB_PRINT_TRAN(softs, 4185 "aac_tran_tgt_init: c%dt%dL%d out", ctl, tgt, lun); 4186 mutex_exit(&softs->io_lock); 4187 return (DDI_FAILURE); 4188 } 4189 4190 if (tgt < AAC_MAX_LD) { 4191 dvp = (struct aac_device *)&softs->containers[tgt]; 4192 if (lun != 0 || !AAC_DEV_IS_VALID(dvp)) { 4193 AACDB_PRINT_TRAN(softs, "aac_tran_tgt_init: c%dt%dL%d", 4194 ctl, tgt, lun); 4195 mutex_exit(&softs->io_lock); 4196 return (DDI_FAILURE); 4197 } 4198 /* 4199 * Save the tgt_dip for the given target if one doesn't exist 4200 * already. Dip's for non-existance tgt's will be cleared in 4201 * tgt_free. 4202 */ 4203 if (softs->containers[tgt].dev.dip == NULL && 4204 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) 4205 softs->containers[tgt].dev.dip = tgt_dip; 4206 } else { 4207 dvp = (struct aac_device *)&softs->nondasds[AAC_PD(tgt)]; 4208 /* 4209 * Save the tgt_dip for the given target if one doesn't exist 4210 * already. Dip's for non-existance tgt's will be cleared in 4211 * tgt_free. 4212 */ 4213 4214 if (softs->nondasds[AAC_PD(tgt)].dev.dip == NULL && 4215 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) 4216 softs->nondasds[AAC_PD(tgt)].dev.dip = tgt_dip; 4217 } 4218 4219 if (softs->flags & AAC_FLAGS_BRKUP) { 4220 if (ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip, 4221 "buf_break", 1) != DDI_PROP_SUCCESS) { 4222 cmn_err(CE_CONT, "unable to create " 4223 "property for t%dL%d (buf_break)", tgt, lun); 4224 } 4225 } 4226 4227 AACDB_PRINT(softs, CE_NOTE, 4228 "aac_tran_tgt_init: c%dt%dL%d ok (%s)", ctl, tgt, lun, 4229 (dvp->type == AAC_DEV_PD) ? "pd" : "ld"); 4230 mutex_exit(&softs->io_lock); 4231 return (DDI_SUCCESS); 4232 } 4233 4234 static void 4235 aac_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 4236 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 4237 { 4238 #ifndef __lock_lint 4239 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran)) 4240 #endif 4241 4242 struct aac_softstate *softs = SD2AAC(sd); 4243 int tgt = sd->sd_address.a_target; 4244 4245 mutex_enter(&softs->io_lock); 4246 if (tgt < AAC_MAX_LD) { 4247 if (softs->containers[tgt].dev.dip == tgt_dip) 4248 softs->containers[tgt].dev.dip = NULL; 4249 } else { 4250 if (softs->nondasds[AAC_PD(tgt)].dev.dip == tgt_dip) 4251 softs->nondasds[AAC_PD(tgt)].dev.dip = NULL; 4252 softs->nondasds[AAC_PD(tgt)].dev.flags &= ~AAC_DFLAG_VALID; 4253 } 4254 mutex_exit(&softs->io_lock); 4255 } 4256 4257 /* 4258 * Check if the firmware is Up And Running. If it is in the Kernel Panic 4259 * state, (BlinkLED code + 1) is returned. 4260 * 0 -- firmware up and running 4261 * -1 -- firmware dead 4262 * >0 -- firmware kernel panic 4263 */ 4264 static int 4265 aac_check_adapter_health(struct aac_softstate *softs) 4266 { 4267 int rval; 4268 4269 rval = PCI_MEM_GET32(softs, AAC_OMR0); 4270 4271 if (rval & AAC_KERNEL_UP_AND_RUNNING) { 4272 rval = 0; 4273 } else if (rval & AAC_KERNEL_PANIC) { 4274 cmn_err(CE_WARN, "firmware panic"); 4275 rval = ((rval >> 16) & 0xff) + 1; /* avoid 0 as return value */ 4276 } else { 4277 cmn_err(CE_WARN, "firmware dead"); 4278 rval = -1; 4279 } 4280 return (rval); 4281 } 4282 4283 static void 4284 aac_abort_iocmd(struct aac_softstate *softs, struct aac_cmd *acp, 4285 uchar_t reason) 4286 { 4287 acp->flags |= AAC_CMD_ABORT; 4288 4289 if (acp->pkt) { 4290 if (acp->slotp) { /* outstanding cmd */ 4291 acp->pkt->pkt_state |= STATE_GOT_STATUS; 4292 } 4293 4294 switch (reason) { 4295 case CMD_TIMEOUT: 4296 AACDB_PRINT(softs, CE_NOTE, "CMD_TIMEOUT: acp=0x%p", 4297 acp); 4298 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT, 4299 STAT_TIMEOUT | STAT_BUS_RESET); 4300 break; 4301 case CMD_RESET: 4302 /* aac support only RESET_ALL */ 4303 AACDB_PRINT(softs, CE_NOTE, "CMD_RESET: acp=0x%p", acp); 4304 aac_set_pkt_reason(softs, acp, CMD_RESET, 4305 STAT_BUS_RESET); 4306 break; 4307 case CMD_ABORTED: 4308 AACDB_PRINT(softs, CE_NOTE, "CMD_ABORTED: acp=0x%p", 4309 acp); 4310 aac_set_pkt_reason(softs, acp, CMD_ABORTED, 4311 STAT_ABORTED); 4312 break; 4313 } 4314 } 4315 aac_end_io(softs, acp); 4316 } 4317 4318 /* 4319 * Abort all the pending commands of type iocmd or just the command pkt 4320 * corresponding to pkt 4321 */ 4322 static void 4323 aac_abort_iocmds(struct aac_softstate *softs, int iocmd, struct scsi_pkt *pkt, 4324 int reason) 4325 { 4326 struct aac_cmd *ac_arg, *acp; 4327 int i; 4328 4329 if (pkt == NULL) { 4330 ac_arg = NULL; 4331 } else { 4332 ac_arg = PKT2AC(pkt); 4333 iocmd = (ac_arg->flags & AAC_CMD_SYNC) ? 4334 AAC_IOCMD_SYNC : AAC_IOCMD_ASYNC; 4335 } 4336 4337 /* 4338 * a) outstanding commands on the controller 4339 * Note: should abort outstanding commands only after one 4340 * IOP reset has been done. 4341 */ 4342 if (iocmd & AAC_IOCMD_OUTSTANDING) { 4343 struct aac_cmd *acp; 4344 4345 for (i = 0; i < AAC_MAX_LD; i++) { 4346 if (AAC_DEV_IS_VALID(&softs->containers[i].dev)) 4347 softs->containers[i].reset = 1; 4348 } 4349 while ((acp = softs->q_busy.q_head) != NULL) 4350 aac_abort_iocmd(softs, acp, reason); 4351 } 4352 4353 /* b) commands in the waiting queues */ 4354 for (i = 0; i < AAC_CMDQ_NUM; i++) { 4355 if (iocmd & (1 << i)) { 4356 if (ac_arg) { 4357 aac_abort_iocmd(softs, ac_arg, reason); 4358 } else { 4359 while ((acp = softs->q_wait[i].q_head) != NULL) 4360 aac_abort_iocmd(softs, acp, reason); 4361 } 4362 } 4363 } 4364 } 4365 4366 /* 4367 * The draining thread is shared among quiesce threads. It terminates 4368 * when the adapter is quiesced or stopped by aac_stop_drain(). 4369 */ 4370 static void 4371 aac_check_drain(void *arg) 4372 { 4373 struct aac_softstate *softs = arg; 4374 4375 mutex_enter(&softs->io_lock); 4376 if (softs->ndrains) { 4377 softs->drain_timeid = 0; 4378 /* 4379 * If both ASYNC and SYNC bus throttle are held, 4380 * wake up threads only when both are drained out. 4381 */ 4382 if ((softs->bus_throttle[AAC_CMDQ_ASYNC] > 0 || 4383 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) && 4384 (softs->bus_throttle[AAC_CMDQ_SYNC] > 0 || 4385 softs->bus_ncmds[AAC_CMDQ_SYNC] == 0)) 4386 cv_broadcast(&softs->drain_cv); 4387 else 4388 softs->drain_timeid = timeout(aac_check_drain, softs, 4389 AAC_QUIESCE_TICK * drv_usectohz(1000000)); 4390 } 4391 mutex_exit(&softs->io_lock); 4392 } 4393 4394 /* 4395 * If not draining the outstanding cmds, drain them. Otherwise, 4396 * only update ndrains. 4397 */ 4398 static void 4399 aac_start_drain(struct aac_softstate *softs) 4400 { 4401 if (softs->ndrains == 0) { 4402 ASSERT(softs->drain_timeid == 0); 4403 softs->drain_timeid = timeout(aac_check_drain, softs, 4404 AAC_QUIESCE_TICK * drv_usectohz(1000000)); 4405 } 4406 softs->ndrains++; 4407 } 4408 4409 /* 4410 * Stop the draining thread when no other threads use it any longer. 4411 * Side effect: io_lock may be released in the middle. 4412 */ 4413 static void 4414 aac_stop_drain(struct aac_softstate *softs) 4415 { 4416 softs->ndrains--; 4417 if (softs->ndrains == 0) { 4418 if (softs->drain_timeid != 0) { 4419 timeout_id_t tid = softs->drain_timeid; 4420 4421 softs->drain_timeid = 0; 4422 mutex_exit(&softs->io_lock); 4423 (void) untimeout(tid); 4424 mutex_enter(&softs->io_lock); 4425 } 4426 } 4427 } 4428 4429 /* 4430 * The following function comes from Adaptec: 4431 * 4432 * Once do an IOP reset, basically the driver have to re-initialize the card 4433 * as if up from a cold boot, and the driver is responsible for any IO that 4434 * is outstanding to the adapter at the time of the IOP RESET. And prepare 4435 * for IOP RESET by making the init code modular with the ability to call it 4436 * from multiple places. 4437 */ 4438 static int 4439 aac_reset_adapter(struct aac_softstate *softs) 4440 { 4441 int health; 4442 uint32_t status; 4443 int rval = AAC_IOP_RESET_FAILED; 4444 4445 DBCALLED(softs, 1); 4446 4447 ASSERT(softs->state & AAC_STATE_RESET); 4448 4449 ddi_fm_acc_err_clear(softs->pci_mem_handle, DDI_FME_VER0); 4450 /* Disable interrupt */ 4451 AAC_DISABLE_INTR(softs); 4452 4453 health = aac_check_adapter_health(softs); 4454 if (health == -1) { 4455 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 4456 goto finish; 4457 } 4458 if (health == 0) /* flush drives if possible */ 4459 (void) aac_shutdown(softs); 4460 4461 /* Execute IOP reset */ 4462 if ((aac_sync_mbcommand(softs, AAC_IOP_RESET, 0, 0, 0, 0, 4463 &status)) != AACOK) { 4464 ddi_acc_handle_t acc; 4465 struct aac_fib *fibp; 4466 struct aac_pause_command *pc; 4467 4468 if ((status & 0xf) == 0xf) { 4469 uint32_t wait_count; 4470 4471 /* 4472 * Sunrise Lake has dual cores and we must drag the 4473 * other core with us to reset simultaneously. There 4474 * are 2 bits in the Inbound Reset Control and Status 4475 * Register (offset 0x38) of the Sunrise Lake to reset 4476 * the chip without clearing out the PCI configuration 4477 * info (COMMAND & BARS). 4478 */ 4479 PCI_MEM_PUT32(softs, AAC_IRCSR, AAC_IRCSR_CORES_RST); 4480 4481 /* 4482 * We need to wait for 5 seconds before accessing the MU 4483 * again 10000 * 100us = 1000,000us = 1000ms = 1s 4484 */ 4485 wait_count = 5 * 10000; 4486 while (wait_count) { 4487 drv_usecwait(100); /* delay 100 microseconds */ 4488 wait_count--; 4489 } 4490 } else { 4491 if (status == SRB_STATUS_INVALID_REQUEST) 4492 cmn_err(CE_WARN, "!IOP_RESET not supported"); 4493 else /* probably timeout */ 4494 cmn_err(CE_WARN, "!IOP_RESET failed"); 4495 4496 /* Unwind aac_shutdown() */ 4497 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac); 4498 acc = softs->sync_ac.slotp->fib_acc_handle; 4499 4500 fibp = softs->sync_ac.slotp->fibp; 4501 pc = (struct aac_pause_command *)&fibp->data[0]; 4502 4503 bzero(pc, sizeof (*pc)); 4504 ddi_put32(acc, &pc->Command, VM_ContainerConfig); 4505 ddi_put32(acc, &pc->Type, CT_PAUSE_IO); 4506 ddi_put32(acc, &pc->Timeout, 1); 4507 ddi_put32(acc, &pc->Min, 1); 4508 ddi_put32(acc, &pc->NoRescan, 1); 4509 4510 (void) aac_sync_fib(softs, ContainerCommand, 4511 AAC_FIB_SIZEOF(struct aac_pause_command)); 4512 aac_sync_fib_slot_release(softs, &softs->sync_ac); 4513 4514 if (aac_check_adapter_health(softs) != 0) 4515 ddi_fm_service_impact(softs->devinfo_p, 4516 DDI_SERVICE_LOST); 4517 else 4518 /* 4519 * IOP reset not supported or IOP not reseted 4520 */ 4521 rval = AAC_IOP_RESET_ABNORMAL; 4522 goto finish; 4523 } 4524 } 4525 4526 /* 4527 * Re-read and renegotiate the FIB parameters, as one of the actions 4528 * that can result from an IOP reset is the running of a new firmware 4529 * image. 4530 */ 4531 if (aac_common_attach(softs) != AACOK) 4532 goto finish; 4533 4534 rval = AAC_IOP_RESET_SUCCEED; 4535 4536 finish: 4537 AAC_ENABLE_INTR(softs); 4538 return (rval); 4539 } 4540 4541 static void 4542 aac_set_throttle(struct aac_softstate *softs, struct aac_device *dvp, int q, 4543 int throttle) 4544 { 4545 /* 4546 * If the bus is draining/quiesced, no changes to the throttles 4547 * are allowed. All throttles should have been set to 0. 4548 */ 4549 if ((softs->state & AAC_STATE_QUIESCED) || softs->ndrains) 4550 return; 4551 dvp->throttle[q] = throttle; 4552 } 4553 4554 static void 4555 aac_hold_bus(struct aac_softstate *softs, int iocmds) 4556 { 4557 int i, q; 4558 4559 /* Hold bus by holding every device on the bus */ 4560 for (q = 0; q < AAC_CMDQ_NUM; q++) { 4561 if (iocmds & (1 << q)) { 4562 softs->bus_throttle[q] = 0; 4563 for (i = 0; i < AAC_MAX_LD; i++) 4564 aac_set_throttle(softs, 4565 &softs->containers[i].dev, q, 0); 4566 for (i = 0; i < AAC_MAX_PD(softs); i++) 4567 aac_set_throttle(softs, 4568 &softs->nondasds[i].dev, q, 0); 4569 } 4570 } 4571 } 4572 4573 static void 4574 aac_unhold_bus(struct aac_softstate *softs, int iocmds) 4575 { 4576 int i, q, max_throttle; 4577 4578 for (q = 0; q < AAC_CMDQ_NUM; q++) { 4579 if (iocmds & (1 << q)) { 4580 /* 4581 * Should not unhold AAC_IOCMD_ASYNC bus, if it has been 4582 * quiesced or being drained by possibly some quiesce 4583 * threads. 4584 */ 4585 if (q == AAC_CMDQ_ASYNC && ((softs->state & 4586 AAC_STATE_QUIESCED) || softs->ndrains)) 4587 continue; 4588 if (q == AAC_CMDQ_ASYNC) 4589 max_throttle = softs->total_slots - 4590 AAC_MGT_SLOT_NUM; 4591 else 4592 max_throttle = softs->total_slots - 1; 4593 softs->bus_throttle[q] = max_throttle; 4594 for (i = 0; i < AAC_MAX_LD; i++) 4595 aac_set_throttle(softs, 4596 &softs->containers[i].dev, 4597 q, max_throttle); 4598 for (i = 0; i < AAC_MAX_PD(softs); i++) 4599 aac_set_throttle(softs, &softs->nondasds[i].dev, 4600 q, max_throttle); 4601 } 4602 } 4603 } 4604 4605 static int 4606 aac_do_reset(struct aac_softstate *softs) 4607 { 4608 int health; 4609 int rval; 4610 4611 softs->state |= AAC_STATE_RESET; 4612 health = aac_check_adapter_health(softs); 4613 4614 /* 4615 * Hold off new io commands and wait all outstanding io 4616 * commands to complete. 4617 */ 4618 if (health == 0) { 4619 int sync_cmds = softs->bus_ncmds[AAC_CMDQ_SYNC]; 4620 int async_cmds = softs->bus_ncmds[AAC_CMDQ_ASYNC]; 4621 4622 if (sync_cmds == 0 && async_cmds == 0) { 4623 rval = AAC_IOP_RESET_SUCCEED; 4624 goto finish; 4625 } 4626 /* 4627 * Give the adapter up to AAC_QUIESCE_TIMEOUT more seconds 4628 * to complete the outstanding io commands 4629 */ 4630 int timeout = AAC_QUIESCE_TIMEOUT * 1000 * 10; 4631 int (*intr_handler)(struct aac_softstate *); 4632 4633 aac_hold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 4634 /* 4635 * Poll the adapter by ourselves in case interrupt is disabled 4636 * and to avoid releasing the io_lock. 4637 */ 4638 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ? 4639 aac_process_intr_new : aac_process_intr_old; 4640 while ((softs->bus_ncmds[AAC_CMDQ_SYNC] || 4641 softs->bus_ncmds[AAC_CMDQ_ASYNC]) && timeout) { 4642 drv_usecwait(100); 4643 (void) intr_handler(softs); 4644 timeout--; 4645 } 4646 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 4647 4648 if (softs->bus_ncmds[AAC_CMDQ_SYNC] == 0 && 4649 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) { 4650 /* Cmds drained out */ 4651 rval = AAC_IOP_RESET_SUCCEED; 4652 goto finish; 4653 } else if (softs->bus_ncmds[AAC_CMDQ_SYNC] < sync_cmds || 4654 softs->bus_ncmds[AAC_CMDQ_ASYNC] < async_cmds) { 4655 /* Cmds not drained out, adapter overloaded */ 4656 rval = AAC_IOP_RESET_ABNORMAL; 4657 goto finish; 4658 } 4659 } 4660 4661 /* 4662 * If a longer waiting time still can't drain any outstanding io 4663 * commands, do IOP reset. 4664 */ 4665 if ((rval = aac_reset_adapter(softs)) == AAC_IOP_RESET_FAILED) 4666 softs->state |= AAC_STATE_DEAD; 4667 4668 finish: 4669 softs->state &= ~AAC_STATE_RESET; 4670 return (rval); 4671 } 4672 4673 static int 4674 aac_tran_reset(struct scsi_address *ap, int level) 4675 { 4676 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4677 int rval; 4678 4679 DBCALLED(softs, 1); 4680 4681 if (level != RESET_ALL) { 4682 cmn_err(CE_NOTE, "!reset target/lun not supported"); 4683 return (0); 4684 } 4685 4686 mutex_enter(&softs->io_lock); 4687 switch (rval = aac_do_reset(softs)) { 4688 case AAC_IOP_RESET_SUCCEED: 4689 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING | AAC_IOCMD_ASYNC, 4690 NULL, CMD_RESET); 4691 aac_start_waiting_io(softs); 4692 break; 4693 case AAC_IOP_RESET_FAILED: 4694 /* Abort IOCTL cmds when adapter is dead */ 4695 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_RESET); 4696 break; 4697 case AAC_IOP_RESET_ABNORMAL: 4698 aac_start_waiting_io(softs); 4699 } 4700 mutex_exit(&softs->io_lock); 4701 4702 aac_drain_comp_q(softs); 4703 return (rval == 0); 4704 } 4705 4706 static int 4707 aac_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 4708 { 4709 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4710 4711 DBCALLED(softs, 1); 4712 4713 mutex_enter(&softs->io_lock); 4714 aac_abort_iocmds(softs, 0, pkt, CMD_ABORTED); 4715 mutex_exit(&softs->io_lock); 4716 4717 aac_drain_comp_q(softs); 4718 return (1); 4719 } 4720 4721 void 4722 aac_free_dmamap(struct aac_cmd *acp) 4723 { 4724 /* Free dma mapping */ 4725 if (acp->flags & AAC_CMD_DMA_VALID) { 4726 ASSERT(acp->buf_dma_handle); 4727 (void) ddi_dma_unbind_handle(acp->buf_dma_handle); 4728 acp->flags &= ~AAC_CMD_DMA_VALID; 4729 } 4730 4731 if (acp->abp != NULL) { /* free non-aligned buf DMA */ 4732 ASSERT(acp->buf_dma_handle); 4733 if ((acp->flags & AAC_CMD_BUF_WRITE) == 0 && acp->bp) 4734 ddi_rep_get8(acp->abh, (uint8_t *)acp->bp->b_un.b_addr, 4735 (uint8_t *)acp->abp, acp->bp->b_bcount, 4736 DDI_DEV_AUTOINCR); 4737 ddi_dma_mem_free(&acp->abh); 4738 acp->abp = NULL; 4739 } 4740 4741 if (acp->buf_dma_handle) { 4742 ddi_dma_free_handle(&acp->buf_dma_handle); 4743 acp->buf_dma_handle = NULL; 4744 } 4745 } 4746 4747 static void 4748 aac_unknown_scmd(struct aac_softstate *softs, struct aac_cmd *acp) 4749 { 4750 AACDB_PRINT(softs, CE_CONT, "SCMD 0x%x not supported", 4751 ((union scsi_cdb *)(void *)acp->pkt->pkt_cdbp)->scc_cmd); 4752 aac_free_dmamap(acp); 4753 aac_set_arq_data(acp->pkt, KEY_ILLEGAL_REQUEST, 0x20, 0x00, 0); 4754 aac_soft_callback(softs, acp); 4755 } 4756 4757 /* 4758 * Handle command to logical device 4759 */ 4760 static int 4761 aac_tran_start_ld(struct aac_softstate *softs, struct aac_cmd *acp) 4762 { 4763 struct aac_container *dvp; 4764 struct scsi_pkt *pkt; 4765 union scsi_cdb *cdbp; 4766 struct buf *bp; 4767 int rval; 4768 4769 dvp = (struct aac_container *)acp->dvp; 4770 pkt = acp->pkt; 4771 cdbp = (void *)pkt->pkt_cdbp; 4772 bp = acp->bp; 4773 4774 switch (cdbp->scc_cmd) { 4775 case SCMD_INQUIRY: /* inquiry */ 4776 aac_free_dmamap(acp); 4777 aac_inquiry(softs, pkt, cdbp, bp); 4778 aac_soft_callback(softs, acp); 4779 rval = TRAN_ACCEPT; 4780 break; 4781 4782 case SCMD_READ_CAPACITY: /* read capacity */ 4783 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4784 struct scsi_capacity cap; 4785 uint64_t last_lba; 4786 4787 /* check 64-bit LBA */ 4788 last_lba = dvp->size - 1; 4789 if (last_lba > 0xffffffffull) { 4790 cap.capacity = 0xfffffffful; 4791 } else { 4792 cap.capacity = BE_32(last_lba); 4793 } 4794 cap.lbasize = BE_32(AAC_SECTOR_SIZE); 4795 4796 aac_free_dmamap(acp); 4797 if (bp->b_flags & (B_PHYS|B_PAGEIO)) 4798 bp_mapin(bp); 4799 bcopy(&cap, bp->b_un.b_addr, min(bp->b_bcount, 8)); 4800 pkt->pkt_state |= STATE_XFERRED_DATA; 4801 } 4802 aac_soft_callback(softs, acp); 4803 rval = TRAN_ACCEPT; 4804 break; 4805 4806 case SCMD_SVC_ACTION_IN_G4: /* read capacity 16 */ 4807 /* Check if containers need 64-bit LBA support */ 4808 if (cdbp->cdb_opaque[1] == SSVC_ACTION_READ_CAPACITY_G4) { 4809 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4810 struct scsi_capacity_16 cap16; 4811 int cap_len = sizeof (struct scsi_capacity_16); 4812 4813 bzero(&cap16, cap_len); 4814 cap16.sc_capacity = BE_64(dvp->size - 1); 4815 cap16.sc_lbasize = BE_32(AAC_SECTOR_SIZE); 4816 4817 aac_free_dmamap(acp); 4818 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 4819 bp_mapin(bp); 4820 bcopy(&cap16, bp->b_un.b_addr, 4821 min(bp->b_bcount, cap_len)); 4822 pkt->pkt_state |= STATE_XFERRED_DATA; 4823 } 4824 aac_soft_callback(softs, acp); 4825 } else { 4826 aac_unknown_scmd(softs, acp); 4827 } 4828 rval = TRAN_ACCEPT; 4829 break; 4830 4831 case SCMD_READ_G4: /* read_16 */ 4832 case SCMD_WRITE_G4: /* write_16 */ 4833 if (softs->flags & AAC_FLAGS_RAW_IO) { 4834 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */ 4835 acp->blkno = ((uint64_t) \ 4836 GETG4ADDR(cdbp) << 32) | \ 4837 (uint32_t)GETG4ADDRTL(cdbp); 4838 goto do_io; 4839 } 4840 AACDB_PRINT(softs, CE_WARN, "64-bit LBA not supported"); 4841 aac_unknown_scmd(softs, acp); 4842 rval = TRAN_ACCEPT; 4843 break; 4844 4845 case SCMD_READ: /* read_6 */ 4846 case SCMD_WRITE: /* write_6 */ 4847 acp->blkno = GETG0ADDR(cdbp); 4848 goto do_io; 4849 4850 case SCMD_READ_G5: /* read_12 */ 4851 case SCMD_WRITE_G5: /* write_12 */ 4852 acp->blkno = GETG5ADDR(cdbp); 4853 goto do_io; 4854 4855 case SCMD_READ_G1: /* read_10 */ 4856 case SCMD_WRITE_G1: /* write_10 */ 4857 acp->blkno = (uint32_t)GETG1ADDR(cdbp); 4858 do_io: 4859 if (acp->flags & AAC_CMD_DMA_VALID) { 4860 uint64_t cnt_size = dvp->size; 4861 4862 /* 4863 * If LBA > array size AND rawio, the 4864 * adapter may hang. So check it before 4865 * sending. 4866 * NOTE: (blkno + blkcnt) may overflow 4867 */ 4868 if ((acp->blkno < cnt_size) && 4869 ((acp->blkno + acp->bcount / 4870 AAC_BLK_SIZE) <= cnt_size)) { 4871 rval = aac_do_io(softs, acp); 4872 } else { 4873 /* 4874 * Request exceeds the capacity of disk, 4875 * set error block number to last LBA 4876 * + 1. 4877 */ 4878 aac_set_arq_data(pkt, 4879 KEY_ILLEGAL_REQUEST, 0x21, 4880 0x00, cnt_size); 4881 aac_soft_callback(softs, acp); 4882 rval = TRAN_ACCEPT; 4883 } 4884 } else if (acp->bcount == 0) { 4885 /* For 0 length IO, just return ok */ 4886 aac_soft_callback(softs, acp); 4887 rval = TRAN_ACCEPT; 4888 } else { 4889 rval = TRAN_BADPKT; 4890 } 4891 break; 4892 4893 case SCMD_MODE_SENSE: /* mode_sense_6 */ 4894 case SCMD_MODE_SENSE_G1: { /* mode_sense_10 */ 4895 int capacity; 4896 4897 aac_free_dmamap(acp); 4898 if (dvp->size > 0xffffffffull) 4899 capacity = 0xfffffffful; /* 64-bit LBA */ 4900 else 4901 capacity = dvp->size; 4902 aac_mode_sense(softs, pkt, cdbp, bp, capacity); 4903 aac_soft_callback(softs, acp); 4904 rval = TRAN_ACCEPT; 4905 break; 4906 } 4907 4908 case SCMD_START_STOP: 4909 if (softs->support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) { 4910 acp->aac_cmd_fib = aac_cmd_fib_startstop; 4911 acp->ac_comp = aac_startstop_complete; 4912 rval = aac_do_io(softs, acp); 4913 break; 4914 } 4915 /* FALLTHRU */ 4916 case SCMD_TEST_UNIT_READY: 4917 case SCMD_REQUEST_SENSE: 4918 case SCMD_FORMAT: 4919 aac_free_dmamap(acp); 4920 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4921 if (acp->flags & AAC_CMD_BUF_READ) { 4922 if (bp->b_flags & (B_PHYS|B_PAGEIO)) 4923 bp_mapin(bp); 4924 bzero(bp->b_un.b_addr, bp->b_bcount); 4925 } 4926 pkt->pkt_state |= STATE_XFERRED_DATA; 4927 } 4928 aac_soft_callback(softs, acp); 4929 rval = TRAN_ACCEPT; 4930 break; 4931 4932 case SCMD_SYNCHRONIZE_CACHE: 4933 acp->flags |= AAC_CMD_NTAG; 4934 acp->aac_cmd_fib = aac_cmd_fib_sync; 4935 acp->ac_comp = aac_synccache_complete; 4936 rval = aac_do_io(softs, acp); 4937 break; 4938 4939 case SCMD_DOORLOCK: 4940 aac_free_dmamap(acp); 4941 dvp->locked = (pkt->pkt_cdbp[4] & 0x01) ? 1 : 0; 4942 aac_soft_callback(softs, acp); 4943 rval = TRAN_ACCEPT; 4944 break; 4945 4946 default: /* unknown command */ 4947 aac_unknown_scmd(softs, acp); 4948 rval = TRAN_ACCEPT; 4949 break; 4950 } 4951 4952 return (rval); 4953 } 4954 4955 static int 4956 aac_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt) 4957 { 4958 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4959 struct aac_cmd *acp = PKT2AC(pkt); 4960 struct aac_device *dvp = acp->dvp; 4961 int rval; 4962 4963 DBCALLED(softs, 2); 4964 4965 /* 4966 * Reinitialize some fields of ac and pkt; the packet may 4967 * have been resubmitted 4968 */ 4969 acp->flags &= AAC_CMD_CONSISTENT | AAC_CMD_DMA_PARTIAL | \ 4970 AAC_CMD_BUF_READ | AAC_CMD_BUF_WRITE | AAC_CMD_DMA_VALID; 4971 acp->timeout = acp->pkt->pkt_time; 4972 if (pkt->pkt_flags & FLAG_NOINTR) 4973 acp->flags |= AAC_CMD_NO_INTR; 4974 #ifdef DEBUG 4975 acp->fib_flags = AACDB_FLAGS_FIB_SCMD; 4976 #endif 4977 pkt->pkt_reason = CMD_CMPLT; 4978 pkt->pkt_state = 0; 4979 pkt->pkt_statistics = 0; 4980 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */ 4981 4982 if (acp->flags & AAC_CMD_DMA_VALID) { 4983 pkt->pkt_resid = acp->bcount; 4984 /* Consistent packets need to be sync'ed first */ 4985 if ((acp->flags & AAC_CMD_CONSISTENT) && 4986 (acp->flags & AAC_CMD_BUF_WRITE)) 4987 if (aac_dma_sync_ac(acp) != AACOK) { 4988 ddi_fm_service_impact(softs->devinfo_p, 4989 DDI_SERVICE_UNAFFECTED); 4990 return (TRAN_BADPKT); 4991 } 4992 } else { 4993 pkt->pkt_resid = 0; 4994 } 4995 4996 mutex_enter(&softs->io_lock); 4997 AACDB_PRINT_SCMD(softs, acp); 4998 if ((dvp->flags & (AAC_DFLAG_VALID | AAC_DFLAG_CONFIGURING)) && 4999 !(softs->state & AAC_STATE_DEAD)) { 5000 if (dvp->type == AAC_DEV_LD) { 5001 if (ap->a_lun == 0) 5002 rval = aac_tran_start_ld(softs, acp); 5003 else 5004 goto error; 5005 } else { 5006 rval = aac_do_io(softs, acp); 5007 } 5008 } else { 5009 error: 5010 #ifdef DEBUG 5011 if (!(softs->state & AAC_STATE_DEAD)) { 5012 AACDB_PRINT_TRAN(softs, 5013 "Cannot send cmd to target t%dL%d: %s", 5014 ap->a_target, ap->a_lun, 5015 "target invalid"); 5016 } else { 5017 AACDB_PRINT(softs, CE_WARN, 5018 "Cannot send cmd to target t%dL%d: %s", 5019 ap->a_target, ap->a_lun, 5020 "adapter dead"); 5021 } 5022 #endif 5023 rval = TRAN_FATAL_ERROR; 5024 } 5025 mutex_exit(&softs->io_lock); 5026 return (rval); 5027 } 5028 5029 static int 5030 aac_tran_getcap(struct scsi_address *ap, char *cap, int whom) 5031 { 5032 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 5033 struct aac_device *dvp; 5034 int rval; 5035 5036 DBCALLED(softs, 2); 5037 5038 /* We don't allow inquiring about capabilities for other targets */ 5039 if (cap == NULL || whom == 0) { 5040 AACDB_PRINT(softs, CE_WARN, 5041 "GetCap> %s not supported: whom=%d", cap, whom); 5042 return (-1); 5043 } 5044 5045 mutex_enter(&softs->io_lock); 5046 dvp = AAC_DEV(softs, ap->a_target); 5047 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) { 5048 mutex_exit(&softs->io_lock); 5049 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to getcap", 5050 ap->a_target, ap->a_lun); 5051 return (-1); 5052 } 5053 5054 switch (scsi_hba_lookup_capstr(cap)) { 5055 case SCSI_CAP_ARQ: /* auto request sense */ 5056 rval = 1; 5057 break; 5058 case SCSI_CAP_UNTAGGED_QING: 5059 case SCSI_CAP_TAGGED_QING: 5060 rval = 1; 5061 break; 5062 case SCSI_CAP_DMA_MAX: 5063 rval = softs->dma_max; 5064 break; 5065 default: 5066 rval = -1; 5067 break; 5068 } 5069 mutex_exit(&softs->io_lock); 5070 5071 AACDB_PRINT_TRAN(softs, "GetCap> %s t%dL%d: rval=%d", 5072 cap, ap->a_target, ap->a_lun, rval); 5073 return (rval); 5074 } 5075 5076 /*ARGSUSED*/ 5077 static int 5078 aac_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom) 5079 { 5080 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 5081 struct aac_device *dvp; 5082 int rval; 5083 5084 DBCALLED(softs, 2); 5085 5086 /* We don't allow inquiring about capabilities for other targets */ 5087 if (cap == NULL || whom == 0) { 5088 AACDB_PRINT(softs, CE_WARN, 5089 "SetCap> %s not supported: whom=%d", cap, whom); 5090 return (-1); 5091 } 5092 5093 mutex_enter(&softs->io_lock); 5094 dvp = AAC_DEV(softs, ap->a_target); 5095 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) { 5096 mutex_exit(&softs->io_lock); 5097 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to setcap", 5098 ap->a_target, ap->a_lun); 5099 return (-1); 5100 } 5101 5102 switch (scsi_hba_lookup_capstr(cap)) { 5103 case SCSI_CAP_ARQ: 5104 /* Force auto request sense */ 5105 rval = (value == 1) ? 1 : 0; 5106 break; 5107 case SCSI_CAP_UNTAGGED_QING: 5108 case SCSI_CAP_TAGGED_QING: 5109 rval = (value == 1) ? 1 : 0; 5110 break; 5111 default: 5112 rval = -1; 5113 break; 5114 } 5115 mutex_exit(&softs->io_lock); 5116 5117 AACDB_PRINT_TRAN(softs, "SetCap> %s t%dL%d val=%d: rval=%d", 5118 cap, ap->a_target, ap->a_lun, value, rval); 5119 return (rval); 5120 } 5121 5122 static void 5123 aac_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 5124 { 5125 struct aac_cmd *acp = PKT2AC(pkt); 5126 5127 DBCALLED(NULL, 2); 5128 5129 if (acp->sgt) { 5130 kmem_free(acp->sgt, sizeof (struct aac_sge) * \ 5131 acp->left_cookien); 5132 } 5133 aac_free_dmamap(acp); 5134 ASSERT(acp->slotp == NULL); 5135 scsi_hba_pkt_free(ap, pkt); 5136 } 5137 5138 int 5139 aac_cmd_dma_alloc(struct aac_softstate *softs, struct aac_cmd *acp, 5140 struct buf *bp, int flags, int (*cb)(), caddr_t arg) 5141 { 5142 int kf = (cb == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP; 5143 uint_t oldcookiec; 5144 int bioerr = 0; 5145 int rval; 5146 5147 oldcookiec = acp->left_cookien; 5148 5149 /* Move window to build s/g map */ 5150 if (acp->total_nwin > 0) { 5151 if (++acp->cur_win < acp->total_nwin) { 5152 off_t off; 5153 size_t len; 5154 5155 rval = ddi_dma_getwin(acp->buf_dma_handle, acp->cur_win, 5156 &off, &len, &acp->cookie, &acp->left_cookien); 5157 if (rval == DDI_SUCCESS) 5158 goto get_dma_cookies; 5159 AACDB_PRINT(softs, CE_WARN, 5160 "ddi_dma_getwin() fail %d", rval); 5161 return (AACERR); 5162 } 5163 AACDB_PRINT(softs, CE_WARN, "Nothing to transfer"); 5164 return (AACERR); 5165 } 5166 5167 /* We need to transfer data, so we alloc DMA resources for this pkt */ 5168 if (bp && bp->b_bcount != 0 && !(acp->flags & AAC_CMD_DMA_VALID)) { 5169 uint_t dma_flags = 0; 5170 struct aac_sge *sge; 5171 5172 /* 5173 * We will still use this point to fake some 5174 * infomation in tran_start 5175 */ 5176 acp->bp = bp; 5177 5178 /* Set dma flags */ 5179 if (BUF_IS_READ(bp)) { 5180 dma_flags |= DDI_DMA_READ; 5181 acp->flags |= AAC_CMD_BUF_READ; 5182 } else { 5183 dma_flags |= DDI_DMA_WRITE; 5184 acp->flags |= AAC_CMD_BUF_WRITE; 5185 } 5186 if (flags & PKT_CONSISTENT) 5187 dma_flags |= DDI_DMA_CONSISTENT; 5188 if (flags & PKT_DMA_PARTIAL) 5189 dma_flags |= DDI_DMA_PARTIAL; 5190 5191 /* Alloc buf dma handle */ 5192 if (!acp->buf_dma_handle) { 5193 rval = ddi_dma_alloc_handle(softs->devinfo_p, 5194 &softs->buf_dma_attr, cb, arg, 5195 &acp->buf_dma_handle); 5196 if (rval != DDI_SUCCESS) { 5197 AACDB_PRINT(softs, CE_WARN, 5198 "Can't allocate DMA handle, errno=%d", 5199 rval); 5200 goto error_out; 5201 } 5202 } 5203 5204 /* Bind buf */ 5205 if (((uintptr_t)bp->b_un.b_addr & AAC_DMA_ALIGN_MASK) == 0) { 5206 rval = ddi_dma_buf_bind_handle(acp->buf_dma_handle, 5207 bp, dma_flags, cb, arg, &acp->cookie, 5208 &acp->left_cookien); 5209 } else { 5210 size_t bufsz; 5211 5212 AACDB_PRINT_TRAN(softs, 5213 "non-aligned buffer: addr=0x%p, cnt=%lu", 5214 (void *)bp->b_un.b_addr, bp->b_bcount); 5215 if (bp->b_flags & (B_PAGEIO|B_PHYS)) 5216 bp_mapin(bp); 5217 5218 rval = ddi_dma_mem_alloc(acp->buf_dma_handle, 5219 AAC_ROUNDUP(bp->b_bcount, AAC_DMA_ALIGN), 5220 &softs->acc_attr, DDI_DMA_STREAMING, 5221 cb, arg, &acp->abp, &bufsz, &acp->abh); 5222 5223 if (rval != DDI_SUCCESS) { 5224 AACDB_PRINT(softs, CE_NOTE, 5225 "Cannot alloc DMA to non-aligned buf"); 5226 bioerr = 0; 5227 goto error_out; 5228 } 5229 5230 if (acp->flags & AAC_CMD_BUF_WRITE) 5231 ddi_rep_put8(acp->abh, 5232 (uint8_t *)bp->b_un.b_addr, 5233 (uint8_t *)acp->abp, bp->b_bcount, 5234 DDI_DEV_AUTOINCR); 5235 5236 rval = ddi_dma_addr_bind_handle(acp->buf_dma_handle, 5237 NULL, acp->abp, bufsz, dma_flags, cb, arg, 5238 &acp->cookie, &acp->left_cookien); 5239 } 5240 5241 switch (rval) { 5242 case DDI_DMA_PARTIAL_MAP: 5243 if (ddi_dma_numwin(acp->buf_dma_handle, 5244 &acp->total_nwin) == DDI_FAILURE) { 5245 AACDB_PRINT(softs, CE_WARN, 5246 "Cannot get number of DMA windows"); 5247 bioerr = 0; 5248 goto error_out; 5249 } 5250 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)", 5251 acp->left_cookien); 5252 acp->cur_win = 0; 5253 break; 5254 5255 case DDI_DMA_MAPPED: 5256 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)", 5257 acp->left_cookien); 5258 acp->cur_win = 0; 5259 acp->total_nwin = 1; 5260 break; 5261 5262 case DDI_DMA_NORESOURCES: 5263 bioerr = 0; 5264 AACDB_PRINT(softs, CE_WARN, 5265 "Cannot bind buf for DMA: DDI_DMA_NORESOURCES"); 5266 goto error_out; 5267 case DDI_DMA_BADATTR: 5268 case DDI_DMA_NOMAPPING: 5269 bioerr = EFAULT; 5270 AACDB_PRINT(softs, CE_WARN, 5271 "Cannot bind buf for DMA: DDI_DMA_NOMAPPING"); 5272 goto error_out; 5273 case DDI_DMA_TOOBIG: 5274 bioerr = EINVAL; 5275 AACDB_PRINT(softs, CE_WARN, 5276 "Cannot bind buf for DMA: DDI_DMA_TOOBIG(%d)", 5277 bp->b_bcount); 5278 goto error_out; 5279 default: 5280 bioerr = EINVAL; 5281 AACDB_PRINT(softs, CE_WARN, 5282 "Cannot bind buf for DMA: %d", rval); 5283 goto error_out; 5284 } 5285 acp->flags |= AAC_CMD_DMA_VALID; 5286 5287 get_dma_cookies: 5288 ASSERT(acp->left_cookien > 0); 5289 if (acp->left_cookien > softs->aac_sg_tablesize) { 5290 AACDB_PRINT(softs, CE_NOTE, "large cookiec received %d", 5291 acp->left_cookien); 5292 bioerr = EINVAL; 5293 goto error_out; 5294 } 5295 if (oldcookiec != acp->left_cookien && acp->sgt != NULL) { 5296 kmem_free(acp->sgt, sizeof (struct aac_sge) * \ 5297 oldcookiec); 5298 acp->sgt = NULL; 5299 } 5300 if (acp->sgt == NULL) { 5301 acp->sgt = kmem_alloc(sizeof (struct aac_sge) * \ 5302 acp->left_cookien, kf); 5303 if (acp->sgt == NULL) { 5304 AACDB_PRINT(softs, CE_WARN, 5305 "sgt kmem_alloc fail"); 5306 bioerr = ENOMEM; 5307 goto error_out; 5308 } 5309 } 5310 5311 sge = &acp->sgt[0]; 5312 sge->bcount = acp->cookie.dmac_size; 5313 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress); 5314 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress); 5315 acp->bcount = acp->cookie.dmac_size; 5316 for (sge++; sge < &acp->sgt[acp->left_cookien]; sge++) { 5317 ddi_dma_nextcookie(acp->buf_dma_handle, &acp->cookie); 5318 sge->bcount = acp->cookie.dmac_size; 5319 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress); 5320 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress); 5321 acp->bcount += acp->cookie.dmac_size; 5322 } 5323 5324 /* 5325 * Note: The old DMA engine do not correctly handle 5326 * dma_attr_maxxfer attribute. So we have to ensure 5327 * it by ourself. 5328 */ 5329 if (acp->bcount > softs->buf_dma_attr.dma_attr_maxxfer) { 5330 AACDB_PRINT(softs, CE_NOTE, 5331 "large xfer size received %d\n", acp->bcount); 5332 bioerr = EINVAL; 5333 goto error_out; 5334 } 5335 5336 acp->total_xfer += acp->bcount; 5337 5338 if (acp->pkt) { 5339 /* Return remaining byte count */ 5340 if (acp->total_xfer <= bp->b_bcount) { 5341 acp->pkt->pkt_resid = bp->b_bcount - \ 5342 acp->total_xfer; 5343 } else { 5344 /* 5345 * Allocated DMA size is greater than the buf 5346 * size of bp. This is caused by devices like 5347 * tape. we have extra bytes allocated, but 5348 * the packet residual has to stay correct. 5349 */ 5350 acp->pkt->pkt_resid = 0; 5351 } 5352 AACDB_PRINT_TRAN(softs, 5353 "bp=0x%p, xfered=%d/%d, resid=%d", 5354 (void *)bp->b_un.b_addr, (int)acp->total_xfer, 5355 (int)bp->b_bcount, (int)acp->pkt->pkt_resid); 5356 } 5357 } 5358 return (AACOK); 5359 5360 error_out: 5361 bioerror(bp, bioerr); 5362 return (AACERR); 5363 } 5364 5365 static struct scsi_pkt * 5366 aac_tran_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 5367 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags, 5368 int (*callback)(), caddr_t arg) 5369 { 5370 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 5371 struct aac_cmd *acp, *new_acp; 5372 5373 DBCALLED(softs, 2); 5374 5375 /* Allocate pkt */ 5376 if (pkt == NULL) { 5377 int slen; 5378 5379 /* Force auto request sense */ 5380 slen = (statuslen > softs->slen) ? statuslen : softs->slen; 5381 pkt = scsi_hba_pkt_alloc(softs->devinfo_p, ap, cmdlen, 5382 slen, tgtlen, sizeof (struct aac_cmd), callback, arg); 5383 if (pkt == NULL) { 5384 AACDB_PRINT(softs, CE_WARN, "Alloc scsi pkt failed"); 5385 return (NULL); 5386 } 5387 acp = new_acp = PKT2AC(pkt); 5388 acp->pkt = pkt; 5389 acp->cmdlen = cmdlen; 5390 5391 if (ap->a_target < AAC_MAX_LD) { 5392 acp->dvp = &softs->containers[ap->a_target].dev; 5393 acp->aac_cmd_fib = softs->aac_cmd_fib; 5394 acp->ac_comp = aac_ld_complete; 5395 } else { 5396 _NOTE(ASSUMING_PROTECTED(softs->nondasds)) 5397 5398 acp->dvp = &softs->nondasds[AAC_PD(ap->a_target)].dev; 5399 acp->aac_cmd_fib = softs->aac_cmd_fib_scsi; 5400 acp->ac_comp = aac_pd_complete; 5401 } 5402 } else { 5403 acp = PKT2AC(pkt); 5404 new_acp = NULL; 5405 } 5406 5407 if (aac_cmd_dma_alloc(softs, acp, bp, flags, callback, arg) == AACOK) 5408 return (pkt); 5409 5410 if (new_acp) 5411 aac_tran_destroy_pkt(ap, pkt); 5412 return (NULL); 5413 } 5414 5415 /* 5416 * tran_sync_pkt(9E) - explicit DMA synchronization 5417 */ 5418 /*ARGSUSED*/ 5419 static void 5420 aac_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 5421 { 5422 struct aac_cmd *acp = PKT2AC(pkt); 5423 5424 DBCALLED(NULL, 2); 5425 5426 if (aac_dma_sync_ac(acp) != AACOK) 5427 ddi_fm_service_impact( 5428 (AAC_TRAN2SOFTS(ap->a_hba_tran))->devinfo_p, 5429 DDI_SERVICE_UNAFFECTED); 5430 } 5431 5432 /* 5433 * tran_dmafree(9E) - deallocate DMA resources allocated for command 5434 */ 5435 /*ARGSUSED*/ 5436 static void 5437 aac_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 5438 { 5439 struct aac_cmd *acp = PKT2AC(pkt); 5440 5441 DBCALLED(NULL, 2); 5442 5443 aac_free_dmamap(acp); 5444 } 5445 5446 static int 5447 aac_do_quiesce(struct aac_softstate *softs) 5448 { 5449 aac_hold_bus(softs, AAC_IOCMD_ASYNC); 5450 if (softs->bus_ncmds[AAC_CMDQ_ASYNC]) { 5451 aac_start_drain(softs); 5452 do { 5453 if (cv_wait_sig(&softs->drain_cv, 5454 &softs->io_lock) == 0) { 5455 /* Quiesce has been interrupted */ 5456 aac_stop_drain(softs); 5457 aac_unhold_bus(softs, AAC_IOCMD_ASYNC); 5458 aac_start_waiting_io(softs); 5459 return (AACERR); 5460 } 5461 } while (softs->bus_ncmds[AAC_CMDQ_ASYNC]); 5462 aac_stop_drain(softs); 5463 } 5464 5465 softs->state |= AAC_STATE_QUIESCED; 5466 return (AACOK); 5467 } 5468 5469 static int 5470 aac_tran_quiesce(dev_info_t *dip) 5471 { 5472 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 5473 int rval; 5474 5475 DBCALLED(softs, 1); 5476 5477 mutex_enter(&softs->io_lock); 5478 if (aac_do_quiesce(softs) == AACOK) 5479 rval = 0; 5480 else 5481 rval = 1; 5482 mutex_exit(&softs->io_lock); 5483 return (rval); 5484 } 5485 5486 static int 5487 aac_do_unquiesce(struct aac_softstate *softs) 5488 { 5489 softs->state &= ~AAC_STATE_QUIESCED; 5490 aac_unhold_bus(softs, AAC_IOCMD_ASYNC); 5491 5492 aac_start_waiting_io(softs); 5493 return (AACOK); 5494 } 5495 5496 static int 5497 aac_tran_unquiesce(dev_info_t *dip) 5498 { 5499 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 5500 int rval; 5501 5502 DBCALLED(softs, 1); 5503 5504 mutex_enter(&softs->io_lock); 5505 if (aac_do_unquiesce(softs) == AACOK) 5506 rval = 0; 5507 else 5508 rval = 1; 5509 mutex_exit(&softs->io_lock); 5510 return (rval); 5511 } 5512 5513 static int 5514 aac_hba_setup(struct aac_softstate *softs) 5515 { 5516 scsi_hba_tran_t *hba_tran; 5517 int rval; 5518 5519 hba_tran = scsi_hba_tran_alloc(softs->devinfo_p, SCSI_HBA_CANSLEEP); 5520 if (hba_tran == NULL) 5521 return (AACERR); 5522 hba_tran->tran_hba_private = softs; 5523 hba_tran->tran_tgt_init = aac_tran_tgt_init; 5524 hba_tran->tran_tgt_free = aac_tran_tgt_free; 5525 hba_tran->tran_tgt_probe = scsi_hba_probe; 5526 hba_tran->tran_start = aac_tran_start; 5527 hba_tran->tran_getcap = aac_tran_getcap; 5528 hba_tran->tran_setcap = aac_tran_setcap; 5529 hba_tran->tran_init_pkt = aac_tran_init_pkt; 5530 hba_tran->tran_destroy_pkt = aac_tran_destroy_pkt; 5531 hba_tran->tran_reset = aac_tran_reset; 5532 hba_tran->tran_abort = aac_tran_abort; 5533 hba_tran->tran_sync_pkt = aac_tran_sync_pkt; 5534 hba_tran->tran_dmafree = aac_tran_dmafree; 5535 hba_tran->tran_quiesce = aac_tran_quiesce; 5536 hba_tran->tran_unquiesce = aac_tran_unquiesce; 5537 hba_tran->tran_bus_config = aac_tran_bus_config; 5538 rval = scsi_hba_attach_setup(softs->devinfo_p, &softs->buf_dma_attr, 5539 hba_tran, 0); 5540 if (rval != DDI_SUCCESS) { 5541 scsi_hba_tran_free(hba_tran); 5542 AACDB_PRINT(softs, CE_WARN, "aac_hba_setup failed"); 5543 return (AACERR); 5544 } 5545 5546 softs->hba_tran = hba_tran; 5547 return (AACOK); 5548 } 5549 5550 /* 5551 * FIB setup operations 5552 */ 5553 5554 /* 5555 * Init FIB header 5556 */ 5557 static void 5558 aac_cmd_fib_header(struct aac_softstate *softs, struct aac_cmd *acp, 5559 uint16_t cmd) 5560 { 5561 struct aac_slot *slotp = acp->slotp; 5562 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5563 struct aac_fib *fibp = slotp->fibp; 5564 uint32_t xfer_state; 5565 5566 xfer_state = 5567 AAC_FIBSTATE_HOSTOWNED | 5568 AAC_FIBSTATE_INITIALISED | 5569 AAC_FIBSTATE_EMPTY | 5570 AAC_FIBSTATE_FAST_RESPONSE | /* enable fast io */ 5571 AAC_FIBSTATE_FROMHOST | 5572 AAC_FIBSTATE_REXPECTED | 5573 AAC_FIBSTATE_NORM; 5574 5575 if (!(acp->flags & AAC_CMD_SYNC)) 5576 xfer_state |= AAC_FIBSTATE_ASYNC; 5577 5578 ddi_put32(acc, &fibp->Header.XferState, xfer_state); 5579 ddi_put16(acc, &fibp->Header.Command, cmd); 5580 ddi_put8(acc, &fibp->Header.StructType, AAC_FIBTYPE_TFIB); 5581 ddi_put8(acc, &fibp->Header.Flags, 0); /* don't care */ 5582 ddi_put16(acc, &fibp->Header.Size, acp->fib_size); 5583 ddi_put16(acc, &fibp->Header.SenderSize, softs->aac_max_fib_size); 5584 ddi_put32(acc, &fibp->Header.SenderFibAddress, (slotp->index << 2)); 5585 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr); 5586 ddi_put32(acc, &fibp->Header.SenderData, 0); /* don't care */ 5587 } 5588 5589 /* 5590 * Init FIB for raw IO command 5591 */ 5592 static void 5593 aac_cmd_fib_rawio(struct aac_softstate *softs, struct aac_cmd *acp) 5594 { 5595 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5596 struct aac_raw_io *io = (struct aac_raw_io *)&acp->slotp->fibp->data[0]; 5597 struct aac_sg_entryraw *sgp; 5598 struct aac_sge *sge; 5599 5600 /* Calculate FIB size */ 5601 acp->fib_size = sizeof (struct aac_fib_header) + \ 5602 sizeof (struct aac_raw_io) + (acp->left_cookien - 1) * \ 5603 sizeof (struct aac_sg_entryraw); 5604 5605 aac_cmd_fib_header(softs, acp, RawIo); 5606 5607 ddi_put16(acc, &io->Flags, (acp->flags & AAC_CMD_BUF_READ) ? 1 : 0); 5608 ddi_put16(acc, &io->BpTotal, 0); 5609 ddi_put16(acc, &io->BpComplete, 0); 5610 5611 ddi_put32(acc, AAC_LO32(&io->BlockNumber), AAC_LS32(acp->blkno)); 5612 ddi_put32(acc, AAC_HI32(&io->BlockNumber), AAC_MS32(acp->blkno)); 5613 ddi_put16(acc, &io->ContainerId, 5614 ((struct aac_container *)acp->dvp)->cid); 5615 5616 /* Fill SG table */ 5617 ddi_put32(acc, &io->SgMapRaw.SgCount, acp->left_cookien); 5618 ddi_put32(acc, &io->ByteCount, acp->bcount); 5619 5620 for (sge = &acp->sgt[0], sgp = &io->SgMapRaw.SgEntryRaw[0]; 5621 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5622 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 5623 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 5624 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5625 sgp->Next = 0; 5626 sgp->Prev = 0; 5627 sgp->Flags = 0; 5628 } 5629 } 5630 5631 /* Init FIB for 64-bit block IO command */ 5632 static void 5633 aac_cmd_fib_brw64(struct aac_softstate *softs, struct aac_cmd *acp) 5634 { 5635 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5636 struct aac_blockread64 *br = (struct aac_blockread64 *) \ 5637 &acp->slotp->fibp->data[0]; 5638 struct aac_sg_entry64 *sgp; 5639 struct aac_sge *sge; 5640 5641 acp->fib_size = sizeof (struct aac_fib_header) + \ 5642 sizeof (struct aac_blockread64) + (acp->left_cookien - 1) * \ 5643 sizeof (struct aac_sg_entry64); 5644 5645 aac_cmd_fib_header(softs, acp, ContainerCommand64); 5646 5647 /* 5648 * The definitions for aac_blockread64 and aac_blockwrite64 5649 * are the same. 5650 */ 5651 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno); 5652 ddi_put16(acc, &br->ContainerId, 5653 ((struct aac_container *)acp->dvp)->cid); 5654 ddi_put32(acc, &br->Command, (acp->flags & AAC_CMD_BUF_READ) ? 5655 VM_CtHostRead64 : VM_CtHostWrite64); 5656 ddi_put16(acc, &br->Pad, 0); 5657 ddi_put16(acc, &br->Flags, 0); 5658 5659 /* Fill SG table */ 5660 ddi_put32(acc, &br->SgMap64.SgCount, acp->left_cookien); 5661 ddi_put16(acc, &br->SectorCount, acp->bcount / AAC_BLK_SIZE); 5662 5663 for (sge = &acp->sgt[0], sgp = &br->SgMap64.SgEntry64[0]; 5664 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5665 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 5666 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 5667 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5668 } 5669 } 5670 5671 /* Init FIB for block IO command */ 5672 static void 5673 aac_cmd_fib_brw(struct aac_softstate *softs, struct aac_cmd *acp) 5674 { 5675 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5676 struct aac_blockread *br = (struct aac_blockread *) \ 5677 &acp->slotp->fibp->data[0]; 5678 struct aac_sg_entry *sgp; 5679 struct aac_sge *sge = &acp->sgt[0]; 5680 5681 if (acp->flags & AAC_CMD_BUF_READ) { 5682 acp->fib_size = sizeof (struct aac_fib_header) + \ 5683 sizeof (struct aac_blockread) + (acp->left_cookien - 1) * \ 5684 sizeof (struct aac_sg_entry); 5685 5686 ddi_put32(acc, &br->Command, VM_CtBlockRead); 5687 ddi_put32(acc, &br->SgMap.SgCount, acp->left_cookien); 5688 sgp = &br->SgMap.SgEntry[0]; 5689 } else { 5690 struct aac_blockwrite *bw = (struct aac_blockwrite *)br; 5691 5692 acp->fib_size = sizeof (struct aac_fib_header) + \ 5693 sizeof (struct aac_blockwrite) + (acp->left_cookien - 1) * \ 5694 sizeof (struct aac_sg_entry); 5695 5696 ddi_put32(acc, &bw->Command, VM_CtBlockWrite); 5697 ddi_put32(acc, &bw->Stable, CUNSTABLE); 5698 ddi_put32(acc, &bw->SgMap.SgCount, acp->left_cookien); 5699 sgp = &bw->SgMap.SgEntry[0]; 5700 } 5701 aac_cmd_fib_header(softs, acp, ContainerCommand); 5702 5703 /* 5704 * aac_blockread and aac_blockwrite have the similar 5705 * structure head, so use br for bw here 5706 */ 5707 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno); 5708 ddi_put32(acc, &br->ContainerId, 5709 ((struct aac_container *)acp->dvp)->cid); 5710 ddi_put32(acc, &br->ByteCount, acp->bcount); 5711 5712 /* Fill SG table */ 5713 for (sge = &acp->sgt[0]; 5714 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5715 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32); 5716 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5717 } 5718 } 5719 5720 /*ARGSUSED*/ 5721 void 5722 aac_cmd_fib_copy(struct aac_softstate *softs, struct aac_cmd *acp) 5723 { 5724 struct aac_slot *slotp = acp->slotp; 5725 struct aac_fib *fibp = slotp->fibp; 5726 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5727 5728 ddi_rep_put8(acc, (uint8_t *)acp->fibp, (uint8_t *)fibp, 5729 acp->fib_size, /* only copy data of needed length */ 5730 DDI_DEV_AUTOINCR); 5731 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr); 5732 ddi_put32(acc, &fibp->Header.SenderFibAddress, slotp->index << 2); 5733 } 5734 5735 static void 5736 aac_cmd_fib_sync(struct aac_softstate *softs, struct aac_cmd *acp) 5737 { 5738 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5739 struct aac_synchronize_command *sync = 5740 (struct aac_synchronize_command *)&acp->slotp->fibp->data[0]; 5741 5742 acp->fib_size = AAC_FIB_SIZEOF(struct aac_synchronize_command); 5743 5744 aac_cmd_fib_header(softs, acp, ContainerCommand); 5745 ddi_put32(acc, &sync->Command, VM_ContainerConfig); 5746 ddi_put32(acc, &sync->Type, (uint32_t)CT_FLUSH_CACHE); 5747 ddi_put32(acc, &sync->Cid, ((struct aac_container *)acp->dvp)->cid); 5748 ddi_put32(acc, &sync->Count, 5749 sizeof (((struct aac_synchronize_reply *)0)->Data)); 5750 } 5751 5752 /* 5753 * Start/Stop unit (Power Management) 5754 */ 5755 static void 5756 aac_cmd_fib_startstop(struct aac_softstate *softs, struct aac_cmd *acp) 5757 { 5758 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5759 struct aac_Container *cmd = 5760 (struct aac_Container *)&acp->slotp->fibp->data[0]; 5761 union scsi_cdb *cdbp = (void *)acp->pkt->pkt_cdbp; 5762 5763 acp->fib_size = AAC_FIB_SIZEOF(struct aac_Container); 5764 5765 aac_cmd_fib_header(softs, acp, ContainerCommand); 5766 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 5767 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 5768 ddi_put32(acc, &cmd->CTCommand.command, CT_PM_DRIVER_SUPPORT); 5769 ddi_put32(acc, &cmd->CTCommand.param[0], cdbp->cdb_opaque[4] & 1 ? \ 5770 AAC_PM_DRIVERSUP_START_UNIT : AAC_PM_DRIVERSUP_STOP_UNIT); 5771 ddi_put32(acc, &cmd->CTCommand.param[1], 5772 ((struct aac_container *)acp->dvp)->cid); 5773 ddi_put32(acc, &cmd->CTCommand.param[2], cdbp->cdb_opaque[1] & 1); 5774 } 5775 5776 /* 5777 * Init FIB for pass-through SCMD 5778 */ 5779 static void 5780 aac_cmd_fib_srb(struct aac_cmd *acp) 5781 { 5782 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5783 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 5784 uint8_t *cdb; 5785 5786 ddi_put32(acc, &srb->function, SRBF_ExecuteScsi); 5787 ddi_put32(acc, &srb->retry_limit, 0); 5788 ddi_put32(acc, &srb->cdb_size, acp->cmdlen); 5789 ddi_put32(acc, &srb->timeout, 0); /* use driver timeout */ 5790 if (acp->fibp == NULL) { 5791 if (acp->flags & AAC_CMD_BUF_READ) 5792 ddi_put32(acc, &srb->flags, SRB_DataIn); 5793 else if (acp->flags & AAC_CMD_BUF_WRITE) 5794 ddi_put32(acc, &srb->flags, SRB_DataOut); 5795 ddi_put32(acc, &srb->channel, 5796 ((struct aac_nondasd *)acp->dvp)->bus); 5797 ddi_put32(acc, &srb->id, ((struct aac_nondasd *)acp->dvp)->tid); 5798 ddi_put32(acc, &srb->lun, 0); 5799 cdb = acp->pkt->pkt_cdbp; 5800 } else { 5801 struct aac_srb *srb0 = (struct aac_srb *)&acp->fibp->data[0]; 5802 5803 ddi_put32(acc, &srb->flags, srb0->flags); 5804 ddi_put32(acc, &srb->channel, srb0->channel); 5805 ddi_put32(acc, &srb->id, srb0->id); 5806 ddi_put32(acc, &srb->lun, srb0->lun); 5807 cdb = srb0->cdb; 5808 } 5809 ddi_rep_put8(acc, cdb, srb->cdb, acp->cmdlen, DDI_DEV_AUTOINCR); 5810 } 5811 5812 static void 5813 aac_cmd_fib_scsi32(struct aac_softstate *softs, struct aac_cmd *acp) 5814 { 5815 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5816 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 5817 struct aac_sg_entry *sgp; 5818 struct aac_sge *sge; 5819 5820 acp->fib_size = sizeof (struct aac_fib_header) + \ 5821 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \ 5822 acp->left_cookien * sizeof (struct aac_sg_entry); 5823 5824 /* Fill FIB and SRB headers, and copy cdb */ 5825 aac_cmd_fib_header(softs, acp, ScsiPortCommand); 5826 aac_cmd_fib_srb(acp); 5827 5828 /* Fill SG table */ 5829 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien); 5830 ddi_put32(acc, &srb->count, acp->bcount); 5831 5832 for (sge = &acp->sgt[0], sgp = &srb->sg.SgEntry[0]; 5833 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5834 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32); 5835 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5836 } 5837 } 5838 5839 static void 5840 aac_cmd_fib_scsi64(struct aac_softstate *softs, struct aac_cmd *acp) 5841 { 5842 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5843 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 5844 struct aac_sg_entry64 *sgp; 5845 struct aac_sge *sge; 5846 5847 acp->fib_size = sizeof (struct aac_fib_header) + \ 5848 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \ 5849 acp->left_cookien * sizeof (struct aac_sg_entry64); 5850 5851 /* Fill FIB and SRB headers, and copy cdb */ 5852 aac_cmd_fib_header(softs, acp, ScsiPortCommandU64); 5853 aac_cmd_fib_srb(acp); 5854 5855 /* Fill SG table */ 5856 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien); 5857 ddi_put32(acc, &srb->count, acp->bcount); 5858 5859 for (sge = &acp->sgt[0], 5860 sgp = &((struct aac_sg_table64 *)&srb->sg)->SgEntry64[0]; 5861 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5862 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 5863 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 5864 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5865 } 5866 } 5867 5868 static int 5869 aac_cmd_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp) 5870 { 5871 struct aac_slot *slotp; 5872 5873 if (slotp = aac_get_slot(softs)) { 5874 acp->slotp = slotp; 5875 slotp->acp = acp; 5876 acp->aac_cmd_fib(softs, acp); 5877 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, 5878 DDI_DMA_SYNC_FORDEV); 5879 return (AACOK); 5880 } 5881 return (AACERR); 5882 } 5883 5884 static int 5885 aac_bind_io(struct aac_softstate *softs, struct aac_cmd *acp) 5886 { 5887 struct aac_device *dvp = acp->dvp; 5888 int q = AAC_CMDQ(acp); 5889 5890 if (softs->bus_ncmds[q] < softs->bus_throttle[q]) { 5891 if (dvp) { 5892 if (dvp->ncmds[q] < dvp->throttle[q]) { 5893 if (!(acp->flags & AAC_CMD_NTAG) || 5894 dvp->ncmds[q] == 0) { 5895 return (aac_cmd_slot_bind(softs, acp)); 5896 } 5897 ASSERT(q == AAC_CMDQ_ASYNC); 5898 aac_set_throttle(softs, dvp, AAC_CMDQ_ASYNC, 5899 AAC_THROTTLE_DRAIN); 5900 } 5901 } else { 5902 return (aac_cmd_slot_bind(softs, acp)); 5903 } 5904 } 5905 return (AACERR); 5906 } 5907 5908 static int 5909 aac_sync_fib_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp) 5910 { 5911 struct aac_slot *slotp; 5912 5913 while (softs->sync_ac.slotp) 5914 cv_wait(&softs->sync_fib_cv, &softs->io_lock); 5915 5916 if (slotp = aac_get_slot(softs)) { 5917 ASSERT(acp->slotp == NULL); 5918 5919 acp->slotp = slotp; 5920 slotp->acp = acp; 5921 return (AACOK); 5922 } 5923 return (AACERR); 5924 } 5925 5926 static void 5927 aac_sync_fib_slot_release(struct aac_softstate *softs, struct aac_cmd *acp) 5928 { 5929 ASSERT(acp->slotp); 5930 5931 aac_release_slot(softs, acp->slotp); 5932 acp->slotp->acp = NULL; 5933 acp->slotp = NULL; 5934 5935 cv_signal(&softs->sync_fib_cv); 5936 } 5937 5938 static void 5939 aac_start_io(struct aac_softstate *softs, struct aac_cmd *acp) 5940 { 5941 struct aac_slot *slotp = acp->slotp; 5942 int q = AAC_CMDQ(acp); 5943 int rval; 5944 5945 /* Set ac and pkt */ 5946 if (acp->pkt) { /* ac from ioctl has no pkt */ 5947 acp->pkt->pkt_state |= 5948 STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD; 5949 } 5950 if (acp->timeout) /* 0 indicates no timeout */ 5951 acp->timeout += aac_timebase + aac_tick; 5952 5953 if (acp->dvp) 5954 acp->dvp->ncmds[q]++; 5955 softs->bus_ncmds[q]++; 5956 aac_cmd_enqueue(&softs->q_busy, acp); 5957 5958 AACDB_PRINT_FIB(softs, slotp); 5959 5960 if (softs->flags & AAC_FLAGS_NEW_COMM) { 5961 rval = aac_send_command(softs, slotp); 5962 } else { 5963 /* 5964 * If fib can not be enqueued, the adapter is in an abnormal 5965 * state, there will be no interrupt to us. 5966 */ 5967 rval = aac_fib_enqueue(softs, AAC_ADAP_NORM_CMD_Q, 5968 slotp->fib_phyaddr, acp->fib_size); 5969 } 5970 5971 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) 5972 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 5973 5974 /* 5975 * NOTE: We send command only when slots availabe, so should never 5976 * reach here. 5977 */ 5978 if (rval != AACOK) { 5979 AACDB_PRINT(softs, CE_NOTE, "SCMD send failed"); 5980 if (acp->pkt) { 5981 acp->pkt->pkt_state &= ~STATE_SENT_CMD; 5982 aac_set_pkt_reason(softs, acp, CMD_INCOMPLETE, 0); 5983 } 5984 aac_end_io(softs, acp); 5985 if (!(acp->flags & (AAC_CMD_NO_INTR | AAC_CMD_NO_CB))) 5986 ddi_trigger_softintr(softs->softint_id); 5987 } 5988 } 5989 5990 static void 5991 aac_start_waitq(struct aac_softstate *softs, struct aac_cmd_queue *q) 5992 { 5993 struct aac_cmd *acp, *next_acp; 5994 5995 /* Serve as many waiting io's as possible */ 5996 for (acp = q->q_head; acp; acp = next_acp) { 5997 next_acp = acp->next; 5998 if (aac_bind_io(softs, acp) == AACOK) { 5999 aac_cmd_delete(q, acp); 6000 aac_start_io(softs, acp); 6001 } 6002 if (softs->free_io_slot_head == NULL) 6003 break; 6004 } 6005 } 6006 6007 static void 6008 aac_start_waiting_io(struct aac_softstate *softs) 6009 { 6010 /* 6011 * Sync FIB io is served before async FIB io so that io requests 6012 * sent by interactive userland commands get responded asap. 6013 */ 6014 if (softs->q_wait[AAC_CMDQ_SYNC].q_head) 6015 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_SYNC]); 6016 if (softs->q_wait[AAC_CMDQ_ASYNC].q_head) 6017 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_ASYNC]); 6018 } 6019 6020 static void 6021 aac_drain_comp_q(struct aac_softstate *softs) 6022 { 6023 struct aac_cmd *acp; 6024 struct scsi_pkt *pkt; 6025 6026 /*CONSTCOND*/ 6027 while (1) { 6028 mutex_enter(&softs->q_comp_mutex); 6029 acp = aac_cmd_dequeue(&softs->q_comp); 6030 mutex_exit(&softs->q_comp_mutex); 6031 if (acp != NULL) { 6032 ASSERT(acp->pkt != NULL); 6033 pkt = acp->pkt; 6034 6035 if (pkt->pkt_reason == CMD_CMPLT) { 6036 /* 6037 * Consistent packets need to be sync'ed first 6038 */ 6039 if ((acp->flags & AAC_CMD_CONSISTENT) && 6040 (acp->flags & AAC_CMD_BUF_READ)) { 6041 if (aac_dma_sync_ac(acp) != AACOK) { 6042 ddi_fm_service_impact( 6043 softs->devinfo_p, 6044 DDI_SERVICE_UNAFFECTED); 6045 pkt->pkt_reason = CMD_TRAN_ERR; 6046 pkt->pkt_statistics = 0; 6047 } 6048 } 6049 if ((aac_check_acc_handle(softs-> \ 6050 comm_space_acc_handle) != DDI_SUCCESS) || 6051 (aac_check_acc_handle(softs-> \ 6052 pci_mem_handle) != DDI_SUCCESS)) { 6053 ddi_fm_service_impact(softs->devinfo_p, 6054 DDI_SERVICE_UNAFFECTED); 6055 ddi_fm_acc_err_clear(softs-> \ 6056 pci_mem_handle, DDI_FME_VER0); 6057 pkt->pkt_reason = CMD_TRAN_ERR; 6058 pkt->pkt_statistics = 0; 6059 } 6060 if (aac_check_dma_handle(softs-> \ 6061 comm_space_dma_handle) != DDI_SUCCESS) { 6062 ddi_fm_service_impact(softs->devinfo_p, 6063 DDI_SERVICE_UNAFFECTED); 6064 pkt->pkt_reason = CMD_TRAN_ERR; 6065 pkt->pkt_statistics = 0; 6066 } 6067 } 6068 scsi_hba_pkt_comp(pkt); 6069 } else { 6070 break; 6071 } 6072 } 6073 } 6074 6075 static int 6076 aac_alloc_fib(struct aac_softstate *softs, struct aac_slot *slotp) 6077 { 6078 size_t rlen; 6079 ddi_dma_cookie_t cookie; 6080 uint_t cookien; 6081 6082 /* Allocate FIB dma resource */ 6083 if (ddi_dma_alloc_handle( 6084 softs->devinfo_p, 6085 &softs->addr_dma_attr, 6086 DDI_DMA_SLEEP, 6087 NULL, 6088 &slotp->fib_dma_handle) != DDI_SUCCESS) { 6089 AACDB_PRINT(softs, CE_WARN, 6090 "Cannot alloc dma handle for slot fib area"); 6091 goto error; 6092 } 6093 if (ddi_dma_mem_alloc( 6094 slotp->fib_dma_handle, 6095 softs->aac_max_fib_size, 6096 &softs->acc_attr, 6097 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 6098 DDI_DMA_SLEEP, 6099 NULL, 6100 (caddr_t *)&slotp->fibp, 6101 &rlen, 6102 &slotp->fib_acc_handle) != DDI_SUCCESS) { 6103 AACDB_PRINT(softs, CE_WARN, 6104 "Cannot alloc mem for slot fib area"); 6105 goto error; 6106 } 6107 if (ddi_dma_addr_bind_handle( 6108 slotp->fib_dma_handle, 6109 NULL, 6110 (caddr_t)slotp->fibp, 6111 softs->aac_max_fib_size, 6112 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 6113 DDI_DMA_SLEEP, 6114 NULL, 6115 &cookie, 6116 &cookien) != DDI_DMA_MAPPED) { 6117 AACDB_PRINT(softs, CE_WARN, 6118 "dma bind failed for slot fib area"); 6119 goto error; 6120 } 6121 6122 /* Check dma handles allocated in fib attach */ 6123 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) { 6124 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 6125 goto error; 6126 } 6127 6128 /* Check acc handles allocated in fib attach */ 6129 if (aac_check_acc_handle(slotp->fib_acc_handle) != DDI_SUCCESS) { 6130 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 6131 goto error; 6132 } 6133 6134 slotp->fib_phyaddr = cookie.dmac_laddress; 6135 return (AACOK); 6136 6137 error: 6138 if (slotp->fib_acc_handle) { 6139 ddi_dma_mem_free(&slotp->fib_acc_handle); 6140 slotp->fib_acc_handle = NULL; 6141 } 6142 if (slotp->fib_dma_handle) { 6143 ddi_dma_free_handle(&slotp->fib_dma_handle); 6144 slotp->fib_dma_handle = NULL; 6145 } 6146 return (AACERR); 6147 } 6148 6149 static void 6150 aac_free_fib(struct aac_slot *slotp) 6151 { 6152 (void) ddi_dma_unbind_handle(slotp->fib_dma_handle); 6153 ddi_dma_mem_free(&slotp->fib_acc_handle); 6154 slotp->fib_acc_handle = NULL; 6155 ddi_dma_free_handle(&slotp->fib_dma_handle); 6156 slotp->fib_dma_handle = NULL; 6157 slotp->fib_phyaddr = 0; 6158 } 6159 6160 static void 6161 aac_alloc_fibs(struct aac_softstate *softs) 6162 { 6163 int i; 6164 struct aac_slot *slotp; 6165 6166 for (i = 0; i < softs->total_slots && 6167 softs->total_fibs < softs->total_slots; i++) { 6168 slotp = &(softs->io_slot[i]); 6169 if (slotp->fib_phyaddr) 6170 continue; 6171 if (aac_alloc_fib(softs, slotp) != AACOK) 6172 break; 6173 6174 /* Insert the slot to the free slot list */ 6175 aac_release_slot(softs, slotp); 6176 softs->total_fibs++; 6177 } 6178 } 6179 6180 static void 6181 aac_destroy_fibs(struct aac_softstate *softs) 6182 { 6183 struct aac_slot *slotp; 6184 6185 while ((slotp = softs->free_io_slot_head) != NULL) { 6186 ASSERT(slotp->fib_phyaddr); 6187 softs->free_io_slot_head = slotp->next; 6188 aac_free_fib(slotp); 6189 ASSERT(slotp->index == (slotp - softs->io_slot)); 6190 softs->total_fibs--; 6191 } 6192 ASSERT(softs->total_fibs == 0); 6193 } 6194 6195 static int 6196 aac_create_slots(struct aac_softstate *softs) 6197 { 6198 int i; 6199 6200 softs->total_slots = softs->aac_max_fibs; 6201 softs->io_slot = kmem_zalloc(sizeof (struct aac_slot) * \ 6202 softs->total_slots, KM_SLEEP); 6203 if (softs->io_slot == NULL) { 6204 AACDB_PRINT(softs, CE_WARN, "Cannot allocate slot"); 6205 return (AACERR); 6206 } 6207 for (i = 0; i < softs->total_slots; i++) 6208 softs->io_slot[i].index = i; 6209 softs->free_io_slot_head = NULL; 6210 softs->total_fibs = 0; 6211 return (AACOK); 6212 } 6213 6214 static void 6215 aac_destroy_slots(struct aac_softstate *softs) 6216 { 6217 ASSERT(softs->free_io_slot_head == NULL); 6218 6219 kmem_free(softs->io_slot, sizeof (struct aac_slot) * \ 6220 softs->total_slots); 6221 softs->io_slot = NULL; 6222 softs->total_slots = 0; 6223 } 6224 6225 struct aac_slot * 6226 aac_get_slot(struct aac_softstate *softs) 6227 { 6228 struct aac_slot *slotp; 6229 6230 if ((slotp = softs->free_io_slot_head) != NULL) { 6231 softs->free_io_slot_head = slotp->next; 6232 slotp->next = NULL; 6233 } 6234 return (slotp); 6235 } 6236 6237 static void 6238 aac_release_slot(struct aac_softstate *softs, struct aac_slot *slotp) 6239 { 6240 ASSERT((slotp->index >= 0) && (slotp->index < softs->total_slots)); 6241 ASSERT(slotp == &softs->io_slot[slotp->index]); 6242 6243 slotp->acp = NULL; 6244 slotp->next = softs->free_io_slot_head; 6245 softs->free_io_slot_head = slotp; 6246 } 6247 6248 int 6249 aac_do_io(struct aac_softstate *softs, struct aac_cmd *acp) 6250 { 6251 if (aac_bind_io(softs, acp) == AACOK) 6252 aac_start_io(softs, acp); 6253 else 6254 aac_cmd_enqueue(&softs->q_wait[AAC_CMDQ(acp)], acp); 6255 6256 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) 6257 return (TRAN_ACCEPT); 6258 /* 6259 * Because sync FIB is always 512 bytes and used for critical 6260 * functions, async FIB is used for poll IO. 6261 */ 6262 if (acp->flags & AAC_CMD_NO_INTR) { 6263 if (aac_do_poll_io(softs, acp) == AACOK) 6264 return (TRAN_ACCEPT); 6265 } else { 6266 if (aac_do_sync_io(softs, acp) == AACOK) 6267 return (TRAN_ACCEPT); 6268 } 6269 return (TRAN_BADPKT); 6270 } 6271 6272 static int 6273 aac_do_poll_io(struct aac_softstate *softs, struct aac_cmd *acp) 6274 { 6275 int (*intr_handler)(struct aac_softstate *); 6276 6277 /* 6278 * Interrupt is disabled, we have to poll the adapter by ourselves. 6279 */ 6280 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ? 6281 aac_process_intr_new : aac_process_intr_old; 6282 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) { 6283 int i = AAC_POLL_TIME * 1000; 6284 6285 AAC_BUSYWAIT((intr_handler(softs) != AAC_DB_RESPONSE_READY), i); 6286 if (i == 0) 6287 aac_cmd_timeout(softs, acp); 6288 } 6289 6290 ddi_trigger_softintr(softs->softint_id); 6291 6292 if ((acp->flags & AAC_CMD_CMPLT) && !(acp->flags & AAC_CMD_ERR)) 6293 return (AACOK); 6294 return (AACERR); 6295 } 6296 6297 static int 6298 aac_do_sync_io(struct aac_softstate *softs, struct aac_cmd *acp) 6299 { 6300 ASSERT(softs && acp); 6301 6302 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) 6303 cv_wait(&softs->event, &softs->io_lock); 6304 6305 if (acp->flags & AAC_CMD_CMPLT) 6306 return (AACOK); 6307 return (AACERR); 6308 } 6309 6310 static int 6311 aac_dma_sync_ac(struct aac_cmd *acp) 6312 { 6313 if (acp->buf_dma_handle) { 6314 if (acp->flags & AAC_CMD_BUF_WRITE) { 6315 if (acp->abp != NULL) 6316 ddi_rep_put8(acp->abh, 6317 (uint8_t *)acp->bp->b_un.b_addr, 6318 (uint8_t *)acp->abp, acp->bp->b_bcount, 6319 DDI_DEV_AUTOINCR); 6320 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0, 6321 DDI_DMA_SYNC_FORDEV); 6322 } else { 6323 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0, 6324 DDI_DMA_SYNC_FORCPU); 6325 if (aac_check_dma_handle(acp->buf_dma_handle) != 6326 DDI_SUCCESS) 6327 return (AACERR); 6328 if (acp->abp != NULL) 6329 ddi_rep_get8(acp->abh, 6330 (uint8_t *)acp->bp->b_un.b_addr, 6331 (uint8_t *)acp->abp, acp->bp->b_bcount, 6332 DDI_DEV_AUTOINCR); 6333 } 6334 } 6335 return (AACOK); 6336 } 6337 6338 /* 6339 * Copy AIF from adapter to the empty AIF slot and inform AIF threads 6340 */ 6341 static void 6342 aac_save_aif(struct aac_softstate *softs, ddi_acc_handle_t acc, 6343 struct aac_fib *fibp0, int fib_size0) 6344 { 6345 struct aac_fib *fibp; /* FIB in AIF queue */ 6346 int fib_size; 6347 uint16_t fib_command; 6348 int current, next; 6349 6350 /* Ignore non AIF messages */ 6351 fib_command = ddi_get16(acc, &fibp0->Header.Command); 6352 if (fib_command != AifRequest) { 6353 cmn_err(CE_WARN, "!Unknown command from controller"); 6354 return; 6355 } 6356 6357 mutex_enter(&softs->aifq_mutex); 6358 6359 /* Save AIF */ 6360 fibp = &softs->aifq[softs->aifq_idx].d; 6361 fib_size = (fib_size0 > AAC_FIB_SIZE) ? AAC_FIB_SIZE : fib_size0; 6362 ddi_rep_get8(acc, (uint8_t *)fibp, (uint8_t *)fibp0, fib_size, 6363 DDI_DEV_AUTOINCR); 6364 6365 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 6366 ddi_fm_service_impact(softs->devinfo_p, 6367 DDI_SERVICE_UNAFFECTED); 6368 mutex_exit(&softs->aifq_mutex); 6369 return; 6370 } 6371 6372 AACDB_PRINT_AIF(softs, (struct aac_aif_command *)&fibp->data[0]); 6373 6374 /* Modify AIF contexts */ 6375 current = softs->aifq_idx; 6376 next = (current + 1) % AAC_AIFQ_LENGTH; 6377 if (next == 0) { 6378 struct aac_fib_context *ctx_p; 6379 6380 softs->aifq_wrap = 1; 6381 for (ctx_p = softs->fibctx_p; ctx_p; ctx_p = ctx_p->next) { 6382 if (next == ctx_p->ctx_idx) { 6383 ctx_p->ctx_flags |= AAC_CTXFLAG_FILLED; 6384 } else if (current == ctx_p->ctx_idx && 6385 (ctx_p->ctx_flags & AAC_CTXFLAG_FILLED)) { 6386 ctx_p->ctx_idx = next; 6387 ctx_p->ctx_overrun++; 6388 } 6389 } 6390 } 6391 softs->aifq_idx = next; 6392 6393 /* Wakeup AIF threads */ 6394 cv_broadcast(&softs->aifq_cv); 6395 mutex_exit(&softs->aifq_mutex); 6396 6397 /* Wakeup event thread to handle aif */ 6398 aac_event_disp(softs, AAC_EVENT_AIF); 6399 } 6400 6401 static int 6402 aac_return_aif_common(struct aac_softstate *softs, struct aac_fib_context *ctx, 6403 struct aac_fib **fibpp) 6404 { 6405 int current; 6406 6407 current = ctx->ctx_idx; 6408 if (current == softs->aifq_idx && 6409 !(ctx->ctx_flags & AAC_CTXFLAG_FILLED)) 6410 return (EAGAIN); /* Empty */ 6411 6412 *fibpp = &softs->aifq[current].d; 6413 6414 ctx->ctx_flags &= ~AAC_CTXFLAG_FILLED; 6415 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH; 6416 return (0); 6417 } 6418 6419 int 6420 aac_return_aif(struct aac_softstate *softs, struct aac_fib_context *ctx, 6421 struct aac_fib **fibpp) 6422 { 6423 int rval; 6424 6425 mutex_enter(&softs->aifq_mutex); 6426 rval = aac_return_aif_common(softs, ctx, fibpp); 6427 mutex_exit(&softs->aifq_mutex); 6428 return (rval); 6429 } 6430 6431 int 6432 aac_return_aif_wait(struct aac_softstate *softs, struct aac_fib_context *ctx, 6433 struct aac_fib **fibpp) 6434 { 6435 int rval; 6436 6437 mutex_enter(&softs->aifq_mutex); 6438 rval = aac_return_aif_common(softs, ctx, fibpp); 6439 if (rval == EAGAIN) { 6440 AACDB_PRINT(softs, CE_NOTE, "Waiting for AIF"); 6441 rval = cv_wait_sig(&softs->aifq_cv, &softs->aifq_mutex); 6442 } 6443 mutex_exit(&softs->aifq_mutex); 6444 return ((rval > 0) ? 0 : EINTR); 6445 } 6446 6447 /* 6448 * The following function comes from Adaptec: 6449 * 6450 * When driver sees a particular event that means containers are changed, it 6451 * will rescan containers. However a change may not be complete until some 6452 * other event is received. For example, creating or deleting an array will 6453 * incur as many as six AifEnConfigChange events which would generate six 6454 * container rescans. To diminish rescans, driver set a flag to wait for 6455 * another particular event. When sees that events come in, it will do rescan. 6456 */ 6457 static int 6458 aac_handle_aif(struct aac_softstate *softs, struct aac_aif_command *aif) 6459 { 6460 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 6461 int en_type; 6462 int devcfg_needed; 6463 int cid; 6464 uint32_t bus_id, tgt_id; 6465 enum aac_cfg_event event = AAC_CFG_NULL_EXIST; 6466 6467 devcfg_needed = 0; 6468 en_type = LE_32((uint32_t)aif->data.EN.type); 6469 6470 switch (LE_32((uint32_t)aif->command)) { 6471 case AifCmdDriverNotify: { 6472 cid = LE_32(aif->data.EN.data.ECC.container[0]); 6473 6474 switch (en_type) { 6475 case AifDenMorphComplete: 6476 case AifDenVolumeExtendComplete: 6477 if (AAC_DEV_IS_VALID(&softs->containers[cid].dev)) 6478 softs->devcfg_wait_on = AifEnConfigChange; 6479 break; 6480 } 6481 if (softs->devcfg_wait_on == en_type) 6482 devcfg_needed = 1; 6483 break; 6484 } 6485 6486 case AifCmdEventNotify: 6487 cid = LE_32(aif->data.EN.data.ECC.container[0]); 6488 switch (en_type) { 6489 case AifEnAddContainer: 6490 case AifEnDeleteContainer: 6491 softs->devcfg_wait_on = AifEnConfigChange; 6492 break; 6493 case AifEnContainerChange: 6494 if (!softs->devcfg_wait_on) 6495 softs->devcfg_wait_on = AifEnConfigChange; 6496 break; 6497 case AifEnContainerEvent: 6498 if (ddi_get32(acc, &aif-> \ 6499 data.EN.data.ECE.eventType) == CT_PUP_MISSING_DRIVE) 6500 devcfg_needed = 1; 6501 break; 6502 case AifEnAddJBOD: 6503 if (!(softs->flags & AAC_FLAGS_JBOD)) 6504 return (AACERR); 6505 event = AAC_CFG_ADD; 6506 bus_id = (cid >> 24) & 0xf; 6507 tgt_id = cid & 0xffff; 6508 break; 6509 case AifEnDeleteJBOD: 6510 if (!(softs->flags & AAC_FLAGS_JBOD)) 6511 return (AACERR); 6512 event = AAC_CFG_DELETE; 6513 bus_id = (cid >> 24) & 0xf; 6514 tgt_id = cid & 0xffff; 6515 break; 6516 } 6517 if (softs->devcfg_wait_on == en_type) 6518 devcfg_needed = 1; 6519 break; 6520 6521 case AifCmdJobProgress: 6522 if (LE_32((uint32_t)aif->data.PR[0].jd.type) == AifJobCtrZero) { 6523 int pr_status; 6524 uint32_t pr_ftick, pr_ctick; 6525 6526 pr_status = LE_32((uint32_t)aif->data.PR[0].status); 6527 pr_ctick = LE_32(aif->data.PR[0].currentTick); 6528 pr_ftick = LE_32(aif->data.PR[0].finalTick); 6529 6530 if ((pr_ctick == pr_ftick) || 6531 (pr_status == AifJobStsSuccess)) 6532 softs->devcfg_wait_on = AifEnContainerChange; 6533 else if ((pr_ctick == 0) && 6534 (pr_status == AifJobStsRunning)) 6535 softs->devcfg_wait_on = AifEnContainerChange; 6536 } 6537 break; 6538 } 6539 6540 if (devcfg_needed) { 6541 softs->devcfg_wait_on = 0; 6542 (void) aac_probe_containers(softs); 6543 } 6544 6545 if (event != AAC_CFG_NULL_EXIST) { 6546 ASSERT(en_type == AifEnAddJBOD || en_type == AifEnDeleteJBOD); 6547 (void) aac_probe_jbod(softs, 6548 AAC_P2VTGT(softs, bus_id, tgt_id), event); 6549 } 6550 return (AACOK); 6551 } 6552 6553 6554 /* 6555 * Check and handle AIF events 6556 */ 6557 static void 6558 aac_aif_event(struct aac_softstate *softs) 6559 { 6560 struct aac_fib *fibp; 6561 6562 /*CONSTCOND*/ 6563 while (1) { 6564 if (aac_return_aif(softs, &softs->aifctx, &fibp) != 0) 6565 break; /* No more AIFs to handle, end loop */ 6566 6567 /* AIF overrun, array create/delete may missed. */ 6568 if (softs->aifctx.ctx_overrun) { 6569 softs->aifctx.ctx_overrun = 0; 6570 } 6571 6572 /* AIF received, handle it */ 6573 struct aac_aif_command *aifp = 6574 (struct aac_aif_command *)&fibp->data[0]; 6575 uint32_t aif_command = LE_32((uint32_t)aifp->command); 6576 6577 if (aif_command == AifCmdDriverNotify || 6578 aif_command == AifCmdEventNotify || 6579 aif_command == AifCmdJobProgress) 6580 (void) aac_handle_aif(softs, aifp); 6581 } 6582 } 6583 6584 /* 6585 * Timeout recovery 6586 */ 6587 /*ARGSUSED*/ 6588 static void 6589 aac_cmd_timeout(struct aac_softstate *softs, struct aac_cmd *acp) 6590 { 6591 #ifdef DEBUG 6592 acp->fib_flags |= AACDB_FLAGS_FIB_TIMEOUT; 6593 AACDB_PRINT(softs, CE_WARN, "acp %p timed out", acp); 6594 AACDB_PRINT_FIB(softs, acp->slotp); 6595 #endif 6596 6597 /* 6598 * Besides the firmware in unhealthy state, an overloaded 6599 * adapter may also incur pkt timeout. 6600 * There is a chance for an adapter with a slower IOP to take 6601 * longer than 60 seconds to process the commands, such as when 6602 * to perform IOs. So the adapter is doing a build on a RAID-5 6603 * while being required longer completion times should be 6604 * tolerated. 6605 */ 6606 switch (aac_do_reset(softs)) { 6607 case AAC_IOP_RESET_SUCCEED: 6608 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING, NULL, CMD_RESET); 6609 aac_start_waiting_io(softs); 6610 break; 6611 case AAC_IOP_RESET_FAILED: 6612 /* Abort all waiting cmds when adapter is dead */ 6613 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_TIMEOUT); 6614 break; 6615 case AAC_IOP_RESET_ABNORMAL: 6616 aac_start_waiting_io(softs); 6617 } 6618 } 6619 6620 /* 6621 * The following function comes from Adaptec: 6622 * 6623 * Time sync. command added to synchronize time with firmware every 30 6624 * minutes (required for correct AIF timestamps etc.) 6625 */ 6626 static void 6627 aac_sync_tick(struct aac_softstate *softs) 6628 { 6629 ddi_acc_handle_t acc; 6630 int rval; 6631 6632 mutex_enter(&softs->time_mutex); 6633 ASSERT(softs->time_sync <= softs->timebase); 6634 softs->time_sync = 0; 6635 mutex_exit(&softs->time_mutex); 6636 6637 /* Time sync. with firmware every AAC_SYNC_TICK */ 6638 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac); 6639 acc = softs->sync_ac.slotp->fib_acc_handle; 6640 6641 ddi_put32(acc, (void *)&softs->sync_ac.slotp->fibp->data[0], 6642 ddi_get_time()); 6643 rval = aac_sync_fib(softs, SendHostTime, AAC_FIB_SIZEOF(uint32_t)); 6644 aac_sync_fib_slot_release(softs, &softs->sync_ac); 6645 6646 mutex_enter(&softs->time_mutex); 6647 softs->time_sync = softs->timebase; 6648 if (rval != AACOK) 6649 /* retry shortly */ 6650 softs->time_sync += aac_tick << 1; 6651 else 6652 softs->time_sync += AAC_SYNC_TICK; 6653 mutex_exit(&softs->time_mutex); 6654 } 6655 6656 /* 6657 * Timeout checking and handling 6658 */ 6659 static void 6660 aac_daemon(struct aac_softstate *softs) 6661 { 6662 int time_out; /* set if timeout happened */ 6663 int time_adjust; 6664 uint32_t softs_timebase; 6665 6666 mutex_enter(&softs->time_mutex); 6667 ASSERT(softs->time_out <= softs->timebase); 6668 softs->time_out = 0; 6669 softs_timebase = softs->timebase; 6670 mutex_exit(&softs->time_mutex); 6671 6672 /* Check slots for timeout pkts */ 6673 time_adjust = 0; 6674 do { 6675 struct aac_cmd *acp; 6676 6677 time_out = 0; 6678 for (acp = softs->q_busy.q_head; acp; acp = acp->next) { 6679 if (acp->timeout == 0) 6680 continue; 6681 6682 /* 6683 * If timeout happened, update outstanding cmds 6684 * to be checked later again. 6685 */ 6686 if (time_adjust) { 6687 acp->timeout += time_adjust; 6688 continue; 6689 } 6690 6691 if (acp->timeout <= softs_timebase) { 6692 aac_cmd_timeout(softs, acp); 6693 time_out = 1; 6694 time_adjust = aac_tick * drv_usectohz(1000000); 6695 break; /* timeout happened */ 6696 } else { 6697 break; /* no timeout */ 6698 } 6699 } 6700 } while (time_out); 6701 6702 mutex_enter(&softs->time_mutex); 6703 softs->time_out = softs->timebase + aac_tick; 6704 mutex_exit(&softs->time_mutex); 6705 } 6706 6707 /* 6708 * The event thread handles various tasks serially for the other parts of 6709 * the driver, so that they can run fast. 6710 */ 6711 static void 6712 aac_event_thread(struct aac_softstate *softs) 6713 { 6714 int run = 1; 6715 6716 DBCALLED(softs, 1); 6717 6718 mutex_enter(&softs->ev_lock); 6719 while (run) { 6720 int events; 6721 6722 if ((events = softs->events) == 0) { 6723 cv_wait(&softs->event_disp_cv, &softs->ev_lock); 6724 events = softs->events; 6725 } 6726 softs->events = 0; 6727 mutex_exit(&softs->ev_lock); 6728 6729 mutex_enter(&softs->io_lock); 6730 if ((softs->state & AAC_STATE_RUN) && 6731 (softs->state & AAC_STATE_DEAD) == 0) { 6732 if (events & AAC_EVENT_TIMEOUT) 6733 aac_daemon(softs); 6734 if (events & AAC_EVENT_SYNCTICK) 6735 aac_sync_tick(softs); 6736 if (events & AAC_EVENT_AIF) 6737 aac_aif_event(softs); 6738 } else { 6739 run = 0; 6740 } 6741 mutex_exit(&softs->io_lock); 6742 6743 mutex_enter(&softs->ev_lock); 6744 } 6745 6746 cv_signal(&softs->event_wait_cv); 6747 mutex_exit(&softs->ev_lock); 6748 } 6749 6750 /* 6751 * Internal timer. It is only responsbile for time counting and report time 6752 * related events. Events handling is done by aac_event_thread(), so that 6753 * the timer itself could be as precise as possible. 6754 */ 6755 static void 6756 aac_timer(void *arg) 6757 { 6758 struct aac_softstate *softs = arg; 6759 int events = 0; 6760 6761 mutex_enter(&softs->time_mutex); 6762 6763 /* If timer is being stopped, exit */ 6764 if (softs->timeout_id) { 6765 softs->timeout_id = timeout(aac_timer, (void *)softs, 6766 (aac_tick * drv_usectohz(1000000))); 6767 } else { 6768 mutex_exit(&softs->time_mutex); 6769 return; 6770 } 6771 6772 /* Time counting */ 6773 softs->timebase += aac_tick; 6774 6775 /* Check time related events */ 6776 if (softs->time_out && softs->time_out <= softs->timebase) 6777 events |= AAC_EVENT_TIMEOUT; 6778 if (softs->time_sync && softs->time_sync <= softs->timebase) 6779 events |= AAC_EVENT_SYNCTICK; 6780 6781 mutex_exit(&softs->time_mutex); 6782 6783 if (events) 6784 aac_event_disp(softs, events); 6785 } 6786 6787 /* 6788 * Dispatch events to daemon thread for handling 6789 */ 6790 static void 6791 aac_event_disp(struct aac_softstate *softs, int events) 6792 { 6793 mutex_enter(&softs->ev_lock); 6794 softs->events |= events; 6795 cv_broadcast(&softs->event_disp_cv); 6796 mutex_exit(&softs->ev_lock); 6797 } 6798 6799 /* 6800 * Architecture dependent functions 6801 */ 6802 static int 6803 aac_rx_get_fwstatus(struct aac_softstate *softs) 6804 { 6805 return (PCI_MEM_GET32(softs, AAC_OMR0)); 6806 } 6807 6808 static int 6809 aac_rx_get_mailbox(struct aac_softstate *softs, int mb) 6810 { 6811 return (PCI_MEM_GET32(softs, AAC_RX_MAILBOX + mb * 4)); 6812 } 6813 6814 static void 6815 aac_rx_set_mailbox(struct aac_softstate *softs, uint32_t cmd, 6816 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3) 6817 { 6818 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX, cmd); 6819 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 4, arg0); 6820 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 8, arg1); 6821 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 12, arg2); 6822 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 16, arg3); 6823 } 6824 6825 static int 6826 aac_rkt_get_fwstatus(struct aac_softstate *softs) 6827 { 6828 return (PCI_MEM_GET32(softs, AAC_OMR0)); 6829 } 6830 6831 static int 6832 aac_rkt_get_mailbox(struct aac_softstate *softs, int mb) 6833 { 6834 return (PCI_MEM_GET32(softs, AAC_RKT_MAILBOX + mb *4)); 6835 } 6836 6837 static void 6838 aac_rkt_set_mailbox(struct aac_softstate *softs, uint32_t cmd, 6839 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3) 6840 { 6841 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX, cmd); 6842 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 4, arg0); 6843 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 8, arg1); 6844 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 12, arg2); 6845 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 16, arg3); 6846 } 6847 6848 /* 6849 * cb_ops functions 6850 */ 6851 static int 6852 aac_open(dev_t *devp, int flag, int otyp, cred_t *cred) 6853 { 6854 struct aac_softstate *softs; 6855 int minor0, minor; 6856 int instance; 6857 6858 DBCALLED(NULL, 2); 6859 6860 if (otyp != OTYP_BLK && otyp != OTYP_CHR) 6861 return (EINVAL); 6862 6863 minor0 = getminor(*devp); 6864 minor = AAC_SCSA_MINOR(minor0); 6865 6866 if (AAC_IS_SCSA_NODE(minor)) 6867 return (scsi_hba_open(devp, flag, otyp, cred)); 6868 6869 instance = MINOR2INST(minor0); 6870 if (instance >= AAC_MAX_ADAPTERS) 6871 return (ENXIO); 6872 6873 softs = ddi_get_soft_state(aac_softstatep, instance); 6874 if (softs == NULL) 6875 return (ENXIO); 6876 6877 return (0); 6878 } 6879 6880 /*ARGSUSED*/ 6881 static int 6882 aac_close(dev_t dev, int flag, int otyp, cred_t *cred) 6883 { 6884 int minor0, minor; 6885 int instance; 6886 6887 DBCALLED(NULL, 2); 6888 6889 if (otyp != OTYP_BLK && otyp != OTYP_CHR) 6890 return (EINVAL); 6891 6892 minor0 = getminor(dev); 6893 minor = AAC_SCSA_MINOR(minor0); 6894 6895 if (AAC_IS_SCSA_NODE(minor)) 6896 return (scsi_hba_close(dev, flag, otyp, cred)); 6897 6898 instance = MINOR2INST(minor0); 6899 if (instance >= AAC_MAX_ADAPTERS) 6900 return (ENXIO); 6901 6902 return (0); 6903 } 6904 6905 static int 6906 aac_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, 6907 int *rval_p) 6908 { 6909 struct aac_softstate *softs; 6910 int minor0, minor; 6911 int instance; 6912 6913 DBCALLED(NULL, 2); 6914 6915 if (drv_priv(cred_p) != 0) 6916 return (EPERM); 6917 6918 minor0 = getminor(dev); 6919 minor = AAC_SCSA_MINOR(minor0); 6920 6921 if (AAC_IS_SCSA_NODE(minor)) 6922 return (scsi_hba_ioctl(dev, cmd, arg, flag, cred_p, rval_p)); 6923 6924 instance = MINOR2INST(minor0); 6925 if (instance < AAC_MAX_ADAPTERS) { 6926 softs = ddi_get_soft_state(aac_softstatep, instance); 6927 return (aac_do_ioctl(softs, dev, cmd, arg, flag)); 6928 } 6929 return (ENXIO); 6930 } 6931 6932 /* 6933 * The IO fault service error handling callback function 6934 */ 6935 /*ARGSUSED*/ 6936 static int 6937 aac_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 6938 { 6939 /* 6940 * as the driver can always deal with an error in any dma or 6941 * access handle, we can just return the fme_status value. 6942 */ 6943 pci_ereport_post(dip, err, NULL); 6944 return (err->fme_status); 6945 } 6946 6947 /* 6948 * aac_fm_init - initialize fma capabilities and register with IO 6949 * fault services. 6950 */ 6951 static void 6952 aac_fm_init(struct aac_softstate *softs) 6953 { 6954 /* 6955 * Need to change iblock to priority for new MSI intr 6956 */ 6957 ddi_iblock_cookie_t fm_ibc; 6958 6959 softs->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, softs->devinfo_p, 6960 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 6961 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 6962 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 6963 6964 /* Only register with IO Fault Services if we have some capability */ 6965 if (softs->fm_capabilities) { 6966 /* Adjust access and dma attributes for FMA */ 6967 softs->reg_attr.devacc_attr_access = DDI_FLAGERR_ACC; 6968 softs->addr_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 6969 softs->buf_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 6970 6971 /* 6972 * Register capabilities with IO Fault Services. 6973 * fm_capabilities will be updated to indicate 6974 * capabilities actually supported (not requested.) 6975 */ 6976 ddi_fm_init(softs->devinfo_p, &softs->fm_capabilities, &fm_ibc); 6977 6978 /* 6979 * Initialize pci ereport capabilities if ereport 6980 * capable (should always be.) 6981 */ 6982 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) || 6983 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6984 pci_ereport_setup(softs->devinfo_p); 6985 } 6986 6987 /* 6988 * Register error callback if error callback capable. 6989 */ 6990 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6991 ddi_fm_handler_register(softs->devinfo_p, 6992 aac_fm_error_cb, (void *) softs); 6993 } 6994 } 6995 } 6996 6997 /* 6998 * aac_fm_fini - Releases fma capabilities and un-registers with IO 6999 * fault services. 7000 */ 7001 static void 7002 aac_fm_fini(struct aac_softstate *softs) 7003 { 7004 /* Only unregister FMA capabilities if registered */ 7005 if (softs->fm_capabilities) { 7006 /* 7007 * Un-register error callback if error callback capable. 7008 */ 7009 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 7010 ddi_fm_handler_unregister(softs->devinfo_p); 7011 } 7012 7013 /* 7014 * Release any resources allocated by pci_ereport_setup() 7015 */ 7016 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) || 7017 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 7018 pci_ereport_teardown(softs->devinfo_p); 7019 } 7020 7021 /* Unregister from IO Fault Services */ 7022 ddi_fm_fini(softs->devinfo_p); 7023 7024 /* Adjust access and dma attributes for FMA */ 7025 softs->reg_attr.devacc_attr_access = DDI_DEFAULT_ACC; 7026 softs->addr_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 7027 softs->buf_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 7028 } 7029 } 7030 7031 int 7032 aac_check_acc_handle(ddi_acc_handle_t handle) 7033 { 7034 ddi_fm_error_t de; 7035 7036 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 7037 return (de.fme_status); 7038 } 7039 7040 int 7041 aac_check_dma_handle(ddi_dma_handle_t handle) 7042 { 7043 ddi_fm_error_t de; 7044 7045 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 7046 return (de.fme_status); 7047 } 7048 7049 void 7050 aac_fm_ereport(struct aac_softstate *softs, char *detail) 7051 { 7052 uint64_t ena; 7053 char buf[FM_MAX_CLASS]; 7054 7055 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 7056 ena = fm_ena_generate(0, FM_ENA_FMT1); 7057 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities)) { 7058 ddi_fm_ereport_post(softs->devinfo_p, buf, ena, DDI_NOSLEEP, 7059 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL); 7060 } 7061 } 7062 7063 /* 7064 * Autoconfiguration support 7065 */ 7066 static int 7067 aac_parse_devname(char *devnm, int *tgt, int *lun) 7068 { 7069 char devbuf[SCSI_MAXNAMELEN]; 7070 char *addr; 7071 char *p, *tp, *lp; 7072 long num; 7073 7074 /* Parse dev name and address */ 7075 (void) strcpy(devbuf, devnm); 7076 addr = ""; 7077 for (p = devbuf; *p != '\0'; p++) { 7078 if (*p == '@') { 7079 addr = p + 1; 7080 *p = '\0'; 7081 } else if (*p == ':') { 7082 *p = '\0'; 7083 break; 7084 } 7085 } 7086 7087 /* Parse taget and lun */ 7088 for (p = tp = addr, lp = NULL; *p != '\0'; p++) { 7089 if (*p == ',') { 7090 lp = p + 1; 7091 *p = '\0'; 7092 break; 7093 } 7094 } 7095 if (tgt && tp) { 7096 if (ddi_strtol(tp, NULL, 0x10, &num)) 7097 return (AACERR); 7098 *tgt = (int)num; 7099 } 7100 if (lun && lp) { 7101 if (ddi_strtol(lp, NULL, 0x10, &num)) 7102 return (AACERR); 7103 *lun = (int)num; 7104 } 7105 return (AACOK); 7106 } 7107 7108 static dev_info_t * 7109 aac_find_child(struct aac_softstate *softs, uint16_t tgt, uint8_t lun) 7110 { 7111 dev_info_t *child = NULL; 7112 char addr[SCSI_MAXNAMELEN]; 7113 char tmp[MAXNAMELEN]; 7114 7115 if (tgt < AAC_MAX_LD) { 7116 if (lun == 0) { 7117 struct aac_device *dvp = &softs->containers[tgt].dev; 7118 7119 child = dvp->dip; 7120 } 7121 } else { 7122 (void) sprintf(addr, "%x,%x", tgt, lun); 7123 for (child = ddi_get_child(softs->devinfo_p); 7124 child; child = ddi_get_next_sibling(child)) { 7125 /* We don't care about non-persistent node */ 7126 if (ndi_dev_is_persistent_node(child) == 0) 7127 continue; 7128 7129 if (aac_name_node(child, tmp, MAXNAMELEN) != 7130 DDI_SUCCESS) 7131 continue; 7132 if (strcmp(addr, tmp) == 0) 7133 break; 7134 } 7135 } 7136 return (child); 7137 } 7138 7139 static int 7140 aac_config_child(struct aac_softstate *softs, struct scsi_device *sd, 7141 dev_info_t **dipp) 7142 { 7143 char *nodename = NULL; 7144 char **compatible = NULL; 7145 int ncompatible = 0; 7146 char *childname; 7147 dev_info_t *ldip = NULL; 7148 int tgt = sd->sd_address.a_target; 7149 int lun = sd->sd_address.a_lun; 7150 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; 7151 int rval; 7152 7153 DBCALLED(softs, 2); 7154 7155 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype, 7156 NULL, &nodename, &compatible, &ncompatible); 7157 if (nodename == NULL) { 7158 AACDB_PRINT(softs, CE_WARN, 7159 "found no comptible driver for t%dL%d", tgt, lun); 7160 rval = NDI_FAILURE; 7161 goto finish; 7162 } 7163 childname = (softs->legacy && dtype == DTYPE_DIRECT) ? "sd" : nodename; 7164 7165 /* Create dev node */ 7166 rval = ndi_devi_alloc(softs->devinfo_p, childname, DEVI_SID_NODEID, 7167 &ldip); 7168 if (rval == NDI_SUCCESS) { 7169 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) 7170 != DDI_PROP_SUCCESS) { 7171 AACDB_PRINT(softs, CE_WARN, "unable to create " 7172 "property for t%dL%d (target)", tgt, lun); 7173 rval = NDI_FAILURE; 7174 goto finish; 7175 } 7176 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) 7177 != DDI_PROP_SUCCESS) { 7178 AACDB_PRINT(softs, CE_WARN, "unable to create " 7179 "property for t%dL%d (lun)", tgt, lun); 7180 rval = NDI_FAILURE; 7181 goto finish; 7182 } 7183 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip, 7184 "compatible", compatible, ncompatible) 7185 != DDI_PROP_SUCCESS) { 7186 AACDB_PRINT(softs, CE_WARN, "unable to create " 7187 "property for t%dL%d (compatible)", tgt, lun); 7188 rval = NDI_FAILURE; 7189 goto finish; 7190 } 7191 7192 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH); 7193 if (rval != NDI_SUCCESS) { 7194 AACDB_PRINT(softs, CE_WARN, "unable to online t%dL%d", 7195 tgt, lun); 7196 ndi_prop_remove_all(ldip); 7197 (void) ndi_devi_free(ldip); 7198 } 7199 } 7200 finish: 7201 if (dipp) 7202 *dipp = ldip; 7203 7204 scsi_hba_nodename_compatible_free(nodename, compatible); 7205 return (rval); 7206 } 7207 7208 /*ARGSUSED*/ 7209 static int 7210 aac_probe_lun(struct aac_softstate *softs, struct scsi_device *sd) 7211 { 7212 int tgt = sd->sd_address.a_target; 7213 int lun = sd->sd_address.a_lun; 7214 7215 DBCALLED(softs, 2); 7216 7217 if (tgt < AAC_MAX_LD) { 7218 enum aac_cfg_event event; 7219 7220 if (lun == 0) { 7221 mutex_enter(&softs->io_lock); 7222 event = aac_probe_container(softs, tgt); 7223 mutex_exit(&softs->io_lock); 7224 if ((event != AAC_CFG_NULL_NOEXIST) && 7225 (event != AAC_CFG_DELETE)) { 7226 if (scsi_hba_probe(sd, NULL) == 7227 SCSIPROBE_EXISTS) 7228 return (NDI_SUCCESS); 7229 } 7230 } 7231 return (NDI_FAILURE); 7232 } else { 7233 int dtype; 7234 int qual; /* device qualifier */ 7235 7236 if (scsi_hba_probe(sd, NULL) != SCSIPROBE_EXISTS) 7237 return (NDI_FAILURE); 7238 7239 dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; 7240 qual = dtype >> 5; 7241 7242 AACDB_PRINT(softs, CE_NOTE, 7243 "Phys. device found: tgt %d dtype %d: %s", 7244 tgt, dtype, sd->sd_inq->inq_vid); 7245 7246 /* Only non-DASD and JBOD mode DASD are allowed exposed */ 7247 if (dtype == DTYPE_RODIRECT /* CDROM */ || 7248 dtype == DTYPE_SEQUENTIAL /* TAPE */ || 7249 dtype == DTYPE_ESI /* SES */) { 7250 if (!(softs->flags & AAC_FLAGS_NONDASD)) 7251 return (NDI_FAILURE); 7252 AACDB_PRINT(softs, CE_NOTE, "non-DASD %d found", tgt); 7253 7254 } else if (dtype == DTYPE_DIRECT) { 7255 if (!(softs->flags & AAC_FLAGS_JBOD) || qual != 0) 7256 return (NDI_FAILURE); 7257 AACDB_PRINT(softs, CE_NOTE, "JBOD DASD %d found", tgt); 7258 } 7259 7260 mutex_enter(&softs->io_lock); 7261 softs->nondasds[AAC_PD(tgt)].dev.flags |= AAC_DFLAG_VALID; 7262 mutex_exit(&softs->io_lock); 7263 return (NDI_SUCCESS); 7264 } 7265 } 7266 7267 static int 7268 aac_config_lun(struct aac_softstate *softs, uint16_t tgt, uint8_t lun, 7269 dev_info_t **ldip) 7270 { 7271 struct scsi_device sd; 7272 dev_info_t *child; 7273 int rval; 7274 7275 DBCALLED(softs, 2); 7276 7277 if ((child = aac_find_child(softs, tgt, lun)) != NULL) { 7278 if (ldip) 7279 *ldip = child; 7280 return (NDI_SUCCESS); 7281 } 7282 7283 bzero(&sd, sizeof (struct scsi_device)); 7284 sd.sd_address.a_hba_tran = softs->hba_tran; 7285 sd.sd_address.a_target = (uint16_t)tgt; 7286 sd.sd_address.a_lun = (uint8_t)lun; 7287 if ((rval = aac_probe_lun(softs, &sd)) == NDI_SUCCESS) 7288 rval = aac_config_child(softs, &sd, ldip); 7289 /* scsi_unprobe is blank now. Free buffer manually */ 7290 if (sd.sd_inq) { 7291 kmem_free(sd.sd_inq, SUN_INQSIZE); 7292 sd.sd_inq = (struct scsi_inquiry *)NULL; 7293 } 7294 return (rval); 7295 } 7296 7297 static int 7298 aac_config_tgt(struct aac_softstate *softs, int tgt) 7299 { 7300 struct scsi_address ap; 7301 struct buf *bp = NULL; 7302 int buf_len = AAC_SCSI_RPTLUNS_HEAD_SIZE + AAC_SCSI_RPTLUNS_ADDR_SIZE; 7303 int list_len = 0; 7304 int lun_total = 0; 7305 dev_info_t *ldip; 7306 int i; 7307 7308 ap.a_hba_tran = softs->hba_tran; 7309 ap.a_target = (uint16_t)tgt; 7310 ap.a_lun = 0; 7311 7312 for (i = 0; i < 2; i++) { 7313 struct scsi_pkt *pkt; 7314 uchar_t *cdb; 7315 uchar_t *p; 7316 uint32_t data; 7317 7318 if (bp == NULL) { 7319 if ((bp = scsi_alloc_consistent_buf(&ap, NULL, 7320 buf_len, B_READ, NULL_FUNC, NULL)) == NULL) 7321 return (AACERR); 7322 } 7323 if ((pkt = scsi_init_pkt(&ap, NULL, bp, CDB_GROUP5, 7324 sizeof (struct scsi_arq_status), 0, PKT_CONSISTENT, 7325 NULL, NULL)) == NULL) { 7326 scsi_free_consistent_buf(bp); 7327 return (AACERR); 7328 } 7329 cdb = pkt->pkt_cdbp; 7330 bzero(cdb, CDB_GROUP5); 7331 cdb[0] = SCMD_REPORT_LUNS; 7332 7333 /* Convert buffer len from local to LE_32 */ 7334 data = buf_len; 7335 for (p = &cdb[9]; p > &cdb[5]; p--) { 7336 *p = data & 0xff; 7337 data >>= 8; 7338 } 7339 7340 if (scsi_poll(pkt) < 0 || 7341 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk) { 7342 scsi_destroy_pkt(pkt); 7343 break; 7344 } 7345 7346 /* Convert list_len from LE_32 to local */ 7347 for (p = (uchar_t *)bp->b_un.b_addr; 7348 p < (uchar_t *)bp->b_un.b_addr + 4; p++) { 7349 data <<= 8; 7350 data |= *p; 7351 } 7352 list_len = data; 7353 if (buf_len < list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE) { 7354 scsi_free_consistent_buf(bp); 7355 bp = NULL; 7356 buf_len = list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE; 7357 } 7358 scsi_destroy_pkt(pkt); 7359 } 7360 if (i >= 2) { 7361 uint8_t *buf = (uint8_t *)(bp->b_un.b_addr + 7362 AAC_SCSI_RPTLUNS_HEAD_SIZE); 7363 7364 for (i = 0; i < (list_len / AAC_SCSI_RPTLUNS_ADDR_SIZE); i++) { 7365 uint16_t lun; 7366 7367 /* Determine report luns addressing type */ 7368 switch (buf[0] & AAC_SCSI_RPTLUNS_ADDR_MASK) { 7369 /* 7370 * Vendors in the field have been found to be 7371 * concatenating bus/target/lun to equal the 7372 * complete lun value instead of switching to 7373 * flat space addressing 7374 */ 7375 case AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL: 7376 case AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT: 7377 case AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE: 7378 lun = ((buf[0] & 0x3f) << 8) | buf[1]; 7379 if (lun > UINT8_MAX) { 7380 AACDB_PRINT(softs, CE_WARN, 7381 "abnormal lun number: %d", lun); 7382 break; 7383 } 7384 if (aac_config_lun(softs, tgt, lun, &ldip) == 7385 NDI_SUCCESS) 7386 lun_total++; 7387 break; 7388 } 7389 7390 buf += AAC_SCSI_RPTLUNS_ADDR_SIZE; 7391 } 7392 } else { 7393 /* The target may do not support SCMD_REPORT_LUNS. */ 7394 if (aac_config_lun(softs, tgt, 0, &ldip) == NDI_SUCCESS) 7395 lun_total++; 7396 } 7397 scsi_free_consistent_buf(bp); 7398 return (lun_total); 7399 } 7400 7401 static void 7402 aac_devcfg(struct aac_softstate *softs, int tgt, int en) 7403 { 7404 struct aac_device *dvp; 7405 7406 mutex_enter(&softs->io_lock); 7407 dvp = AAC_DEV(softs, tgt); 7408 if (en) 7409 dvp->flags |= AAC_DFLAG_CONFIGURING; 7410 else 7411 dvp->flags &= ~AAC_DFLAG_CONFIGURING; 7412 mutex_exit(&softs->io_lock); 7413 } 7414 7415 static int 7416 aac_tran_bus_config(dev_info_t *parent, uint_t flags, ddi_bus_config_op_t op, 7417 void *arg, dev_info_t **childp) 7418 { 7419 struct aac_softstate *softs; 7420 int rval = NDI_FAILURE; 7421 7422 if ((softs = ddi_get_soft_state(aac_softstatep, 7423 ddi_get_instance(parent))) == NULL) 7424 return (NDI_FAILURE); 7425 7426 /* Commands for bus config should be blocked as the bus is quiesced */ 7427 mutex_enter(&softs->io_lock); 7428 if (softs->state & AAC_STATE_QUIESCED) { 7429 AACDB_PRINT(softs, CE_NOTE, 7430 "bus_config aborted because bus is quiesced"); 7431 mutex_exit(&softs->io_lock); 7432 return (NDI_FAILURE); 7433 } 7434 mutex_exit(&softs->io_lock); 7435 7436 DBCALLED(softs, 1); 7437 7438 /* Hold the nexus across the bus_config */ 7439 ndi_devi_enter(parent); 7440 switch (op) { 7441 case BUS_CONFIG_ONE: { 7442 int tgt, lun; 7443 7444 if (aac_parse_devname(arg, &tgt, &lun) != AACOK) { 7445 rval = NDI_FAILURE; 7446 break; 7447 } 7448 if (tgt >= AAC_MAX_LD) { 7449 if (tgt >= AAC_MAX_DEV(softs)) { 7450 rval = NDI_FAILURE; 7451 break; 7452 } 7453 } 7454 7455 AAC_DEVCFG_BEGIN(softs, tgt); 7456 rval = aac_config_lun(softs, tgt, lun, childp); 7457 AAC_DEVCFG_END(softs, tgt); 7458 break; 7459 } 7460 7461 case BUS_CONFIG_DRIVER: 7462 case BUS_CONFIG_ALL: { 7463 uint32_t bus, tgt; 7464 int index, total; 7465 7466 for (tgt = 0; tgt < AAC_MAX_LD; tgt++) { 7467 AAC_DEVCFG_BEGIN(softs, tgt); 7468 (void) aac_config_lun(softs, tgt, 0, NULL); 7469 AAC_DEVCFG_END(softs, tgt); 7470 } 7471 7472 /* Config the non-DASD devices connected to the card */ 7473 total = 0; 7474 index = AAC_MAX_LD; 7475 for (bus = 0; bus < softs->bus_max; bus++) { 7476 AACDB_PRINT(softs, CE_NOTE, "bus %d:", bus); 7477 for (tgt = 0; tgt < softs->tgt_max; tgt++, index++) { 7478 AAC_DEVCFG_BEGIN(softs, index); 7479 if (aac_config_tgt(softs, index)) 7480 total++; 7481 AAC_DEVCFG_END(softs, index); 7482 } 7483 } 7484 AACDB_PRINT(softs, CE_CONT, 7485 "?Total %d phys. device(s) found", total); 7486 rval = NDI_SUCCESS; 7487 break; 7488 } 7489 } 7490 7491 if (rval == NDI_SUCCESS) 7492 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0); 7493 ndi_devi_exit(parent); 7494 return (rval); 7495 } 7496 7497 /*ARGSUSED*/ 7498 static int 7499 aac_handle_dr(struct aac_softstate *softs, int tgt, int lun, int event) 7500 { 7501 struct aac_device *dvp; 7502 dev_info_t *dip; 7503 int valid; 7504 7505 DBCALLED(softs, 1); 7506 7507 /* Hold the nexus across the bus_config */ 7508 dvp = AAC_DEV(softs, tgt); 7509 valid = AAC_DEV_IS_VALID(dvp); 7510 dip = dvp->dip; 7511 if (!(softs->state & AAC_STATE_RUN)) 7512 return (AACERR); 7513 mutex_exit(&softs->io_lock); 7514 7515 switch (event) { 7516 case AAC_CFG_ADD: 7517 case AAC_CFG_DELETE: 7518 /* Device onlined */ 7519 if (dip == NULL && valid) { 7520 ndi_devi_enter(softs->devinfo_p); 7521 (void) aac_config_lun(softs, tgt, 0, NULL); 7522 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d onlined", 7523 softs->instance, tgt, lun); 7524 ndi_devi_exit(softs->devinfo_p); 7525 } 7526 /* Device offlined */ 7527 if (dip && !valid) { 7528 mutex_enter(&softs->io_lock); 7529 (void) aac_do_reset(softs); 7530 mutex_exit(&softs->io_lock); 7531 7532 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE); 7533 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d offlined", 7534 softs->instance, tgt, lun); 7535 } 7536 break; 7537 } 7538 7539 mutex_enter(&softs->io_lock); 7540 return (AACOK); 7541 } 7542 7543 #ifdef DEBUG 7544 7545 /* -------------------------debug aid functions-------------------------- */ 7546 7547 #define AAC_FIB_CMD_KEY_STRINGS \ 7548 TestCommandResponse, "TestCommandResponse", \ 7549 TestAdapterCommand, "TestAdapterCommand", \ 7550 LastTestCommand, "LastTestCommand", \ 7551 ReinitHostNormCommandQueue, "ReinitHostNormCommandQueue", \ 7552 ReinitHostHighCommandQueue, "ReinitHostHighCommandQueue", \ 7553 ReinitHostHighRespQueue, "ReinitHostHighRespQueue", \ 7554 ReinitHostNormRespQueue, "ReinitHostNormRespQueue", \ 7555 ReinitAdapNormCommandQueue, "ReinitAdapNormCommandQueue", \ 7556 ReinitAdapHighCommandQueue, "ReinitAdapHighCommandQueue", \ 7557 ReinitAdapHighRespQueue, "ReinitAdapHighRespQueue", \ 7558 ReinitAdapNormRespQueue, "ReinitAdapNormRespQueue", \ 7559 InterfaceShutdown, "InterfaceShutdown", \ 7560 DmaCommandFib, "DmaCommandFib", \ 7561 StartProfile, "StartProfile", \ 7562 TermProfile, "TermProfile", \ 7563 SpeedTest, "SpeedTest", \ 7564 TakeABreakPt, "TakeABreakPt", \ 7565 RequestPerfData, "RequestPerfData", \ 7566 SetInterruptDefTimer, "SetInterruptDefTimer", \ 7567 SetInterruptDefCount, "SetInterruptDefCount", \ 7568 GetInterruptDefStatus, "GetInterruptDefStatus", \ 7569 LastCommCommand, "LastCommCommand", \ 7570 NuFileSystem, "NuFileSystem", \ 7571 UFS, "UFS", \ 7572 HostFileSystem, "HostFileSystem", \ 7573 LastFileSystemCommand, "LastFileSystemCommand", \ 7574 ContainerCommand, "ContainerCommand", \ 7575 ContainerCommand64, "ContainerCommand64", \ 7576 ClusterCommand, "ClusterCommand", \ 7577 ScsiPortCommand, "ScsiPortCommand", \ 7578 ScsiPortCommandU64, "ScsiPortCommandU64", \ 7579 AifRequest, "AifRequest", \ 7580 CheckRevision, "CheckRevision", \ 7581 FsaHostShutdown, "FsaHostShutdown", \ 7582 RequestAdapterInfo, "RequestAdapterInfo", \ 7583 IsAdapterPaused, "IsAdapterPaused", \ 7584 SendHostTime, "SendHostTime", \ 7585 LastMiscCommand, "LastMiscCommand" 7586 7587 #define AAC_CTVM_SUBCMD_KEY_STRINGS \ 7588 VM_Null, "VM_Null", \ 7589 VM_NameServe, "VM_NameServe", \ 7590 VM_ContainerConfig, "VM_ContainerConfig", \ 7591 VM_Ioctl, "VM_Ioctl", \ 7592 VM_FilesystemIoctl, "VM_FilesystemIoctl", \ 7593 VM_CloseAll, "VM_CloseAll", \ 7594 VM_CtBlockRead, "VM_CtBlockRead", \ 7595 VM_CtBlockWrite, "VM_CtBlockWrite", \ 7596 VM_SliceBlockRead, "VM_SliceBlockRead", \ 7597 VM_SliceBlockWrite, "VM_SliceBlockWrite", \ 7598 VM_DriveBlockRead, "VM_DriveBlockRead", \ 7599 VM_DriveBlockWrite, "VM_DriveBlockWrite", \ 7600 VM_EnclosureMgt, "VM_EnclosureMgt", \ 7601 VM_Unused, "VM_Unused", \ 7602 VM_CtBlockVerify, "VM_CtBlockVerify", \ 7603 VM_CtPerf, "VM_CtPerf", \ 7604 VM_CtBlockRead64, "VM_CtBlockRead64", \ 7605 VM_CtBlockWrite64, "VM_CtBlockWrite64", \ 7606 VM_CtBlockVerify64, "VM_CtBlockVerify64", \ 7607 VM_CtHostRead64, "VM_CtHostRead64", \ 7608 VM_CtHostWrite64, "VM_CtHostWrite64", \ 7609 VM_NameServe64, "VM_NameServe64" 7610 7611 #define AAC_CT_SUBCMD_KEY_STRINGS \ 7612 CT_Null, "CT_Null", \ 7613 CT_GET_SLICE_COUNT, "CT_GET_SLICE_COUNT", \ 7614 CT_GET_PARTITION_COUNT, "CT_GET_PARTITION_COUNT", \ 7615 CT_GET_PARTITION_INFO, "CT_GET_PARTITION_INFO", \ 7616 CT_GET_CONTAINER_COUNT, "CT_GET_CONTAINER_COUNT", \ 7617 CT_GET_CONTAINER_INFO_OLD, "CT_GET_CONTAINER_INFO_OLD", \ 7618 CT_WRITE_MBR, "CT_WRITE_MBR", \ 7619 CT_WRITE_PARTITION, "CT_WRITE_PARTITION", \ 7620 CT_UPDATE_PARTITION, "CT_UPDATE_PARTITION", \ 7621 CT_UNLOAD_CONTAINER, "CT_UNLOAD_CONTAINER", \ 7622 CT_CONFIG_SINGLE_PRIMARY, "CT_CONFIG_SINGLE_PRIMARY", \ 7623 CT_READ_CONFIG_AGE, "CT_READ_CONFIG_AGE", \ 7624 CT_WRITE_CONFIG_AGE, "CT_WRITE_CONFIG_AGE", \ 7625 CT_READ_SERIAL_NUMBER, "CT_READ_SERIAL_NUMBER", \ 7626 CT_ZERO_PAR_ENTRY, "CT_ZERO_PAR_ENTRY", \ 7627 CT_READ_MBR, "CT_READ_MBR", \ 7628 CT_READ_PARTITION, "CT_READ_PARTITION", \ 7629 CT_DESTROY_CONTAINER, "CT_DESTROY_CONTAINER", \ 7630 CT_DESTROY2_CONTAINER, "CT_DESTROY2_CONTAINER", \ 7631 CT_SLICE_SIZE, "CT_SLICE_SIZE", \ 7632 CT_CHECK_CONFLICTS, "CT_CHECK_CONFLICTS", \ 7633 CT_MOVE_CONTAINER, "CT_MOVE_CONTAINER", \ 7634 CT_READ_LAST_DRIVE, "CT_READ_LAST_DRIVE", \ 7635 CT_WRITE_LAST_DRIVE, "CT_WRITE_LAST_DRIVE", \ 7636 CT_UNMIRROR, "CT_UNMIRROR", \ 7637 CT_MIRROR_DELAY, "CT_MIRROR_DELAY", \ 7638 CT_GEN_MIRROR, "CT_GEN_MIRROR", \ 7639 CT_GEN_MIRROR2, "CT_GEN_MIRROR2", \ 7640 CT_TEST_CONTAINER, "CT_TEST_CONTAINER", \ 7641 CT_MOVE2, "CT_MOVE2", \ 7642 CT_SPLIT, "CT_SPLIT", \ 7643 CT_SPLIT2, "CT_SPLIT2", \ 7644 CT_SPLIT_BROKEN, "CT_SPLIT_BROKEN", \ 7645 CT_SPLIT_BROKEN2, "CT_SPLIT_BROKEN2", \ 7646 CT_RECONFIG, "CT_RECONFIG", \ 7647 CT_BREAK2, "CT_BREAK2", \ 7648 CT_BREAK, "CT_BREAK", \ 7649 CT_MERGE2, "CT_MERGE2", \ 7650 CT_MERGE, "CT_MERGE", \ 7651 CT_FORCE_ERROR, "CT_FORCE_ERROR", \ 7652 CT_CLEAR_ERROR, "CT_CLEAR_ERROR", \ 7653 CT_ASSIGN_FAILOVER, "CT_ASSIGN_FAILOVER", \ 7654 CT_CLEAR_FAILOVER, "CT_CLEAR_FAILOVER", \ 7655 CT_GET_FAILOVER_DATA, "CT_GET_FAILOVER_DATA", \ 7656 CT_VOLUME_ADD, "CT_VOLUME_ADD", \ 7657 CT_VOLUME_ADD2, "CT_VOLUME_ADD2", \ 7658 CT_MIRROR_STATUS, "CT_MIRROR_STATUS", \ 7659 CT_COPY_STATUS, "CT_COPY_STATUS", \ 7660 CT_COPY, "CT_COPY", \ 7661 CT_UNLOCK_CONTAINER, "CT_UNLOCK_CONTAINER", \ 7662 CT_LOCK_CONTAINER, "CT_LOCK_CONTAINER", \ 7663 CT_MAKE_READ_ONLY, "CT_MAKE_READ_ONLY", \ 7664 CT_MAKE_READ_WRITE, "CT_MAKE_READ_WRITE", \ 7665 CT_CLEAN_DEAD, "CT_CLEAN_DEAD", \ 7666 CT_ABORT_MIRROR_COMMAND, "CT_ABORT_MIRROR_COMMAND", \ 7667 CT_SET, "CT_SET", \ 7668 CT_GET, "CT_GET", \ 7669 CT_GET_NVLOG_ENTRY, "CT_GET_NVLOG_ENTRY", \ 7670 CT_GET_DELAY, "CT_GET_DELAY", \ 7671 CT_ZERO_CONTAINER_SPACE, "CT_ZERO_CONTAINER_SPACE", \ 7672 CT_GET_ZERO_STATUS, "CT_GET_ZERO_STATUS", \ 7673 CT_SCRUB, "CT_SCRUB", \ 7674 CT_GET_SCRUB_STATUS, "CT_GET_SCRUB_STATUS", \ 7675 CT_GET_SLICE_INFO, "CT_GET_SLICE_INFO", \ 7676 CT_GET_SCSI_METHOD, "CT_GET_SCSI_METHOD", \ 7677 CT_PAUSE_IO, "CT_PAUSE_IO", \ 7678 CT_RELEASE_IO, "CT_RELEASE_IO", \ 7679 CT_SCRUB2, "CT_SCRUB2", \ 7680 CT_MCHECK, "CT_MCHECK", \ 7681 CT_CORRUPT, "CT_CORRUPT", \ 7682 CT_GET_TASK_COUNT, "CT_GET_TASK_COUNT", \ 7683 CT_PROMOTE, "CT_PROMOTE", \ 7684 CT_SET_DEAD, "CT_SET_DEAD", \ 7685 CT_CONTAINER_OPTIONS, "CT_CONTAINER_OPTIONS", \ 7686 CT_GET_NV_PARAM, "CT_GET_NV_PARAM", \ 7687 CT_GET_PARAM, "CT_GET_PARAM", \ 7688 CT_NV_PARAM_SIZE, "CT_NV_PARAM_SIZE", \ 7689 CT_COMMON_PARAM_SIZE, "CT_COMMON_PARAM_SIZE", \ 7690 CT_PLATFORM_PARAM_SIZE, "CT_PLATFORM_PARAM_SIZE", \ 7691 CT_SET_NV_PARAM, "CT_SET_NV_PARAM", \ 7692 CT_ABORT_SCRUB, "CT_ABORT_SCRUB", \ 7693 CT_GET_SCRUB_ERROR, "CT_GET_SCRUB_ERROR", \ 7694 CT_LABEL_CONTAINER, "CT_LABEL_CONTAINER", \ 7695 CT_CONTINUE_DATA, "CT_CONTINUE_DATA", \ 7696 CT_STOP_DATA, "CT_STOP_DATA", \ 7697 CT_GET_PARTITION_TABLE, "CT_GET_PARTITION_TABLE", \ 7698 CT_GET_DISK_PARTITIONS, "CT_GET_DISK_PARTITIONS", \ 7699 CT_GET_MISC_STATUS, "CT_GET_MISC_STATUS", \ 7700 CT_GET_CONTAINER_PERF_INFO, "CT_GET_CONTAINER_PERF_INFO", \ 7701 CT_GET_TIME, "CT_GET_TIME", \ 7702 CT_READ_DATA, "CT_READ_DATA", \ 7703 CT_CTR, "CT_CTR", \ 7704 CT_CTL, "CT_CTL", \ 7705 CT_DRAINIO, "CT_DRAINIO", \ 7706 CT_RELEASEIO, "CT_RELEASEIO", \ 7707 CT_GET_NVRAM, "CT_GET_NVRAM", \ 7708 CT_GET_MEMORY, "CT_GET_MEMORY", \ 7709 CT_PRINT_CT_LOG, "CT_PRINT_CT_LOG", \ 7710 CT_ADD_LEVEL, "CT_ADD_LEVEL", \ 7711 CT_NV_ZERO, "CT_NV_ZERO", \ 7712 CT_READ_SIGNATURE, "CT_READ_SIGNATURE", \ 7713 CT_THROTTLE_ON, "CT_THROTTLE_ON", \ 7714 CT_THROTTLE_OFF, "CT_THROTTLE_OFF", \ 7715 CT_GET_THROTTLE_STATS, "CT_GET_THROTTLE_STATS", \ 7716 CT_MAKE_SNAPSHOT, "CT_MAKE_SNAPSHOT", \ 7717 CT_REMOVE_SNAPSHOT, "CT_REMOVE_SNAPSHOT", \ 7718 CT_WRITE_USER_FLAGS, "CT_WRITE_USER_FLAGS", \ 7719 CT_READ_USER_FLAGS, "CT_READ_USER_FLAGS", \ 7720 CT_MONITOR, "CT_MONITOR", \ 7721 CT_GEN_MORPH, "CT_GEN_MORPH", \ 7722 CT_GET_SNAPSHOT_INFO, "CT_GET_SNAPSHOT_INFO", \ 7723 CT_CACHE_SET, "CT_CACHE_SET", \ 7724 CT_CACHE_STAT, "CT_CACHE_STAT", \ 7725 CT_TRACE_START, "CT_TRACE_START", \ 7726 CT_TRACE_STOP, "CT_TRACE_STOP", \ 7727 CT_TRACE_ENABLE, "CT_TRACE_ENABLE", \ 7728 CT_TRACE_DISABLE, "CT_TRACE_DISABLE", \ 7729 CT_FORCE_CORE_DUMP, "CT_FORCE_CORE_DUMP", \ 7730 CT_SET_SERIAL_NUMBER, "CT_SET_SERIAL_NUMBER", \ 7731 CT_RESET_SERIAL_NUMBER, "CT_RESET_SERIAL_NUMBER", \ 7732 CT_ENABLE_RAID5, "CT_ENABLE_RAID5", \ 7733 CT_CLEAR_VALID_DUMP_FLAG, "CT_CLEAR_VALID_DUMP_FLAG", \ 7734 CT_GET_MEM_STATS, "CT_GET_MEM_STATS", \ 7735 CT_GET_CORE_SIZE, "CT_GET_CORE_SIZE", \ 7736 CT_CREATE_CONTAINER_OLD, "CT_CREATE_CONTAINER_OLD", \ 7737 CT_STOP_DUMPS, "CT_STOP_DUMPS", \ 7738 CT_PANIC_ON_TAKE_A_BREAK, "CT_PANIC_ON_TAKE_A_BREAK", \ 7739 CT_GET_CACHE_STATS, "CT_GET_CACHE_STATS", \ 7740 CT_MOVE_PARTITION, "CT_MOVE_PARTITION", \ 7741 CT_FLUSH_CACHE, "CT_FLUSH_CACHE", \ 7742 CT_READ_NAME, "CT_READ_NAME", \ 7743 CT_WRITE_NAME, "CT_WRITE_NAME", \ 7744 CT_TOSS_CACHE, "CT_TOSS_CACHE", \ 7745 CT_LOCK_DRAINIO, "CT_LOCK_DRAINIO", \ 7746 CT_CONTAINER_OFFLINE, "CT_CONTAINER_OFFLINE", \ 7747 CT_SET_CACHE_SIZE, "CT_SET_CACHE_SIZE", \ 7748 CT_CLEAN_SHUTDOWN_STATUS, "CT_CLEAN_SHUTDOWN_STATUS", \ 7749 CT_CLEAR_DISKLOG_ON_DISK, "CT_CLEAR_DISKLOG_ON_DISK", \ 7750 CT_CLEAR_ALL_DISKLOG, "CT_CLEAR_ALL_DISKLOG", \ 7751 CT_CACHE_FAVOR, "CT_CACHE_FAVOR", \ 7752 CT_READ_PASSTHRU_MBR, "CT_READ_PASSTHRU_MBR", \ 7753 CT_SCRUB_NOFIX, "CT_SCRUB_NOFIX", \ 7754 CT_SCRUB2_NOFIX, "CT_SCRUB2_NOFIX", \ 7755 CT_FLUSH, "CT_FLUSH", \ 7756 CT_REBUILD, "CT_REBUILD", \ 7757 CT_FLUSH_CONTAINER, "CT_FLUSH_CONTAINER", \ 7758 CT_RESTART, "CT_RESTART", \ 7759 CT_GET_CONFIG_STATUS, "CT_GET_CONFIG_STATUS", \ 7760 CT_TRACE_FLAG, "CT_TRACE_FLAG", \ 7761 CT_RESTART_MORPH, "CT_RESTART_MORPH", \ 7762 CT_GET_TRACE_INFO, "CT_GET_TRACE_INFO", \ 7763 CT_GET_TRACE_ITEM, "CT_GET_TRACE_ITEM", \ 7764 CT_COMMIT_CONFIG, "CT_COMMIT_CONFIG", \ 7765 CT_CONTAINER_EXISTS, "CT_CONTAINER_EXISTS", \ 7766 CT_GET_SLICE_FROM_DEVT, "CT_GET_SLICE_FROM_DEVT", \ 7767 CT_OPEN_READ_WRITE, "CT_OPEN_READ_WRITE", \ 7768 CT_WRITE_MEMORY_BLOCK, "CT_WRITE_MEMORY_BLOCK", \ 7769 CT_GET_CACHE_PARAMS, "CT_GET_CACHE_PARAMS", \ 7770 CT_CRAZY_CACHE, "CT_CRAZY_CACHE", \ 7771 CT_GET_PROFILE_STRUCT, "CT_GET_PROFILE_STRUCT", \ 7772 CT_SET_IO_TRACE_FLAG, "CT_SET_IO_TRACE_FLAG", \ 7773 CT_GET_IO_TRACE_STRUCT, "CT_GET_IO_TRACE_STRUCT", \ 7774 CT_CID_TO_64BITS_UID, "CT_CID_TO_64BITS_UID", \ 7775 CT_64BITS_UID_TO_CID, "CT_64BITS_UID_TO_CID", \ 7776 CT_PAR_TO_64BITS_UID, "CT_PAR_TO_64BITS_UID", \ 7777 CT_CID_TO_32BITS_UID, "CT_CID_TO_32BITS_UID", \ 7778 CT_32BITS_UID_TO_CID, "CT_32BITS_UID_TO_CID", \ 7779 CT_PAR_TO_32BITS_UID, "CT_PAR_TO_32BITS_UID", \ 7780 CT_SET_FAILOVER_OPTION, "CT_SET_FAILOVER_OPTION", \ 7781 CT_GET_FAILOVER_OPTION, "CT_GET_FAILOVER_OPTION", \ 7782 CT_STRIPE_ADD2, "CT_STRIPE_ADD2", \ 7783 CT_CREATE_VOLUME_SET, "CT_CREATE_VOLUME_SET", \ 7784 CT_CREATE_STRIPE_SET, "CT_CREATE_STRIPE_SET", \ 7785 CT_VERIFY_CONTAINER, "CT_VERIFY_CONTAINER", \ 7786 CT_IS_CONTAINER_DEAD, "CT_IS_CONTAINER_DEAD", \ 7787 CT_GET_CONTAINER_OPTION, "CT_GET_CONTAINER_OPTION", \ 7788 CT_GET_SNAPSHOT_UNUSED_STRUCT, "CT_GET_SNAPSHOT_UNUSED_STRUCT", \ 7789 CT_CLEAR_SNAPSHOT_UNUSED_STRUCT, "CT_CLEAR_SNAPSHOT_UNUSED_STRUCT", \ 7790 CT_GET_CONTAINER_INFO, "CT_GET_CONTAINER_INFO", \ 7791 CT_CREATE_CONTAINER, "CT_CREATE_CONTAINER", \ 7792 CT_CHANGE_CREATIONINFO, "CT_CHANGE_CREATIONINFO", \ 7793 CT_CHECK_CONFLICT_UID, "CT_CHECK_CONFLICT_UID", \ 7794 CT_CONTAINER_UID_CHECK, "CT_CONTAINER_UID_CHECK", \ 7795 CT_IS_CONTAINER_MEATADATA_STANDARD, \ 7796 "CT_IS_CONTAINER_MEATADATA_STANDARD", \ 7797 CT_IS_SLICE_METADATA_STANDARD, "CT_IS_SLICE_METADATA_STANDARD", \ 7798 CT_GET_IMPORT_COUNT, "CT_GET_IMPORT_COUNT", \ 7799 CT_CANCEL_ALL_IMPORTS, "CT_CANCEL_ALL_IMPORTS", \ 7800 CT_GET_IMPORT_INFO, "CT_GET_IMPORT_INFO", \ 7801 CT_IMPORT_ARRAY, "CT_IMPORT_ARRAY", \ 7802 CT_GET_LOG_SIZE, "CT_GET_LOG_SIZE", \ 7803 CT_ALARM_GET_STATE, "CT_ALARM_GET_STATE", \ 7804 CT_ALARM_SET_STATE, "CT_ALARM_SET_STATE", \ 7805 CT_ALARM_ON_OFF, "CT_ALARM_ON_OFF", \ 7806 CT_GET_EE_OEM_ID, "CT_GET_EE_OEM_ID", \ 7807 CT_GET_PPI_HEADERS, "CT_GET_PPI_HEADERS", \ 7808 CT_GET_PPI_DATA, "CT_GET_PPI_DATA", \ 7809 CT_GET_PPI_ENTRIES, "CT_GET_PPI_ENTRIES", \ 7810 CT_DELETE_PPI_BUNDLE, "CT_DELETE_PPI_BUNDLE", \ 7811 CT_GET_PARTITION_TABLE_2, "CT_GET_PARTITION_TABLE_2", \ 7812 CT_GET_PARTITION_INFO_2, "CT_GET_PARTITION_INFO_2", \ 7813 CT_GET_DISK_PARTITIONS_2, "CT_GET_DISK_PARTITIONS_2", \ 7814 CT_QUIESCE_ADAPTER, "CT_QUIESCE_ADAPTER", \ 7815 CT_CLEAR_PPI_TABLE, "CT_CLEAR_PPI_TABLE" 7816 7817 #define AAC_CL_SUBCMD_KEY_STRINGS \ 7818 CL_NULL, "CL_NULL", \ 7819 DS_INIT, "DS_INIT", \ 7820 DS_RESCAN, "DS_RESCAN", \ 7821 DS_CREATE, "DS_CREATE", \ 7822 DS_DELETE, "DS_DELETE", \ 7823 DS_ADD_DISK, "DS_ADD_DISK", \ 7824 DS_REMOVE_DISK, "DS_REMOVE_DISK", \ 7825 DS_MOVE_DISK, "DS_MOVE_DISK", \ 7826 DS_TAKE_OWNERSHIP, "DS_TAKE_OWNERSHIP", \ 7827 DS_RELEASE_OWNERSHIP, "DS_RELEASE_OWNERSHIP", \ 7828 DS_FORCE_OWNERSHIP, "DS_FORCE_OWNERSHIP", \ 7829 DS_GET_DISK_SET_PARAM, "DS_GET_DISK_SET_PARAM", \ 7830 DS_GET_DRIVE_PARAM, "DS_GET_DRIVE_PARAM", \ 7831 DS_GET_SLICE_PARAM, "DS_GET_SLICE_PARAM", \ 7832 DS_GET_DISK_SETS, "DS_GET_DISK_SETS", \ 7833 DS_GET_DRIVES, "DS_GET_DRIVES", \ 7834 DS_SET_DISK_SET_PARAM, "DS_SET_DISK_SET_PARAM", \ 7835 DS_ONLINE, "DS_ONLINE", \ 7836 DS_OFFLINE, "DS_OFFLINE", \ 7837 DS_ONLINE_CONTAINERS, "DS_ONLINE_CONTAINERS", \ 7838 DS_FSAPRINT, "DS_FSAPRINT", \ 7839 CL_CFG_SET_HOST_IDS, "CL_CFG_SET_HOST_IDS", \ 7840 CL_CFG_SET_PARTNER_HOST_IDS, "CL_CFG_SET_PARTNER_HOST_IDS", \ 7841 CL_CFG_GET_CLUSTER_CONFIG, "CL_CFG_GET_CLUSTER_CONFIG", \ 7842 CC_CLI_CLEAR_MESSAGE_BUFFER, "CC_CLI_CLEAR_MESSAGE_BUFFER", \ 7843 CC_SRV_CLEAR_MESSAGE_BUFFER, "CC_SRV_CLEAR_MESSAGE_BUFFER", \ 7844 CC_CLI_SHOW_MESSAGE_BUFFER, "CC_CLI_SHOW_MESSAGE_BUFFER", \ 7845 CC_SRV_SHOW_MESSAGE_BUFFER, "CC_SRV_SHOW_MESSAGE_BUFFER", \ 7846 CC_CLI_SEND_MESSAGE, "CC_CLI_SEND_MESSAGE", \ 7847 CC_SRV_SEND_MESSAGE, "CC_SRV_SEND_MESSAGE", \ 7848 CC_CLI_GET_MESSAGE, "CC_CLI_GET_MESSAGE", \ 7849 CC_SRV_GET_MESSAGE, "CC_SRV_GET_MESSAGE", \ 7850 CC_SEND_TEST_MESSAGE, "CC_SEND_TEST_MESSAGE", \ 7851 CC_GET_BUSINFO, "CC_GET_BUSINFO", \ 7852 CC_GET_PORTINFO, "CC_GET_PORTINFO", \ 7853 CC_GET_NAMEINFO, "CC_GET_NAMEINFO", \ 7854 CC_GET_CONFIGINFO, "CC_GET_CONFIGINFO", \ 7855 CQ_QUORUM_OP, "CQ_QUORUM_OP" 7856 7857 #define AAC_AIF_SUBCMD_KEY_STRINGS \ 7858 AifCmdEventNotify, "AifCmdEventNotify", \ 7859 AifCmdJobProgress, "AifCmdJobProgress", \ 7860 AifCmdAPIReport, "AifCmdAPIReport", \ 7861 AifCmdDriverNotify, "AifCmdDriverNotify", \ 7862 AifReqJobList, "AifReqJobList", \ 7863 AifReqJobsForCtr, "AifReqJobsForCtr", \ 7864 AifReqJobsForScsi, "AifReqJobsForScsi", \ 7865 AifReqJobReport, "AifReqJobReport", \ 7866 AifReqTerminateJob, "AifReqTerminateJob", \ 7867 AifReqSuspendJob, "AifReqSuspendJob", \ 7868 AifReqResumeJob, "AifReqResumeJob", \ 7869 AifReqSendAPIReport, "AifReqSendAPIReport", \ 7870 AifReqAPIJobStart, "AifReqAPIJobStart", \ 7871 AifReqAPIJobUpdate, "AifReqAPIJobUpdate", \ 7872 AifReqAPIJobFinish, "AifReqAPIJobFinish" 7873 7874 #define AAC_IOCTL_SUBCMD_KEY_STRINGS \ 7875 Reserved_IOCTL, "Reserved_IOCTL", \ 7876 GetDeviceHandle, "GetDeviceHandle", \ 7877 BusTargetLun_to_DeviceHandle, "BusTargetLun_to_DeviceHandle", \ 7878 DeviceHandle_to_BusTargetLun, "DeviceHandle_to_BusTargetLun", \ 7879 RescanBus, "RescanBus", \ 7880 GetDeviceProbeInfo, "GetDeviceProbeInfo", \ 7881 GetDeviceCapacity, "GetDeviceCapacity", \ 7882 GetContainerProbeInfo, "GetContainerProbeInfo", \ 7883 GetRequestedMemorySize, "GetRequestedMemorySize", \ 7884 GetBusInfo, "GetBusInfo", \ 7885 GetVendorSpecific, "GetVendorSpecific", \ 7886 EnhancedGetDeviceProbeInfo, "EnhancedGetDeviceProbeInfo", \ 7887 EnhancedGetBusInfo, "EnhancedGetBusInfo", \ 7888 SetupExtendedCounters, "SetupExtendedCounters", \ 7889 GetPerformanceCounters, "GetPerformanceCounters", \ 7890 ResetPerformanceCounters, "ResetPerformanceCounters", \ 7891 ReadModePage, "ReadModePage", \ 7892 WriteModePage, "WriteModePage", \ 7893 ReadDriveParameter, "ReadDriveParameter", \ 7894 WriteDriveParameter, "WriteDriveParameter", \ 7895 ResetAdapter, "ResetAdapter", \ 7896 ResetBus, "ResetBus", \ 7897 ResetBusDevice, "ResetBusDevice", \ 7898 ExecuteSrb, "ExecuteSrb", \ 7899 Create_IO_Task, "Create_IO_Task", \ 7900 Delete_IO_Task, "Delete_IO_Task", \ 7901 Get_IO_Task_Info, "Get_IO_Task_Info", \ 7902 Check_Task_Progress, "Check_Task_Progress", \ 7903 InjectError, "InjectError", \ 7904 GetDeviceDefectCounts, "GetDeviceDefectCounts", \ 7905 GetDeviceDefectInfo, "GetDeviceDefectInfo", \ 7906 GetDeviceStatus, "GetDeviceStatus", \ 7907 ClearDeviceStatus, "ClearDeviceStatus", \ 7908 DiskSpinControl, "DiskSpinControl", \ 7909 DiskSmartControl, "DiskSmartControl", \ 7910 WriteSame, "WriteSame", \ 7911 ReadWriteLong, "ReadWriteLong", \ 7912 FormatUnit, "FormatUnit", \ 7913 TargetDeviceControl, "TargetDeviceControl", \ 7914 TargetChannelControl, "TargetChannelControl", \ 7915 FlashNewCode, "FlashNewCode", \ 7916 DiskCheck, "DiskCheck", \ 7917 RequestSense, "RequestSense", \ 7918 DiskPERControl, "DiskPERControl", \ 7919 Read10, "Read10", \ 7920 Write10, "Write10" 7921 7922 #define AAC_AIFEN_KEY_STRINGS \ 7923 AifEnGeneric, "Generic", \ 7924 AifEnTaskComplete, "TaskComplete", \ 7925 AifEnConfigChange, "Config change", \ 7926 AifEnContainerChange, "Container change", \ 7927 AifEnDeviceFailure, "device failed", \ 7928 AifEnMirrorFailover, "Mirror failover", \ 7929 AifEnContainerEvent, "container event", \ 7930 AifEnFileSystemChange, "File system changed", \ 7931 AifEnConfigPause, "Container pause event", \ 7932 AifEnConfigResume, "Container resume event", \ 7933 AifEnFailoverChange, "Failover space assignment changed", \ 7934 AifEnRAID5RebuildDone, "RAID5 rebuild finished", \ 7935 AifEnEnclosureManagement, "Enclosure management event", \ 7936 AifEnBatteryEvent, "battery event", \ 7937 AifEnAddContainer, "Add container", \ 7938 AifEnDeleteContainer, "Delete container", \ 7939 AifEnSMARTEvent, "SMART Event", \ 7940 AifEnBatteryNeedsRecond, "battery needs reconditioning", \ 7941 AifEnClusterEvent, "cluster event", \ 7942 AifEnDiskSetEvent, "disk set event occured", \ 7943 AifDenMorphComplete, "morph operation completed", \ 7944 AifDenVolumeExtendComplete, "VolumeExtendComplete" 7945 7946 struct aac_key_strings { 7947 int key; 7948 char *message; 7949 }; 7950 7951 extern struct scsi_key_strings scsi_cmds[]; 7952 7953 static struct aac_key_strings aac_fib_cmds[] = { 7954 AAC_FIB_CMD_KEY_STRINGS, 7955 -1, NULL 7956 }; 7957 7958 static struct aac_key_strings aac_ctvm_subcmds[] = { 7959 AAC_CTVM_SUBCMD_KEY_STRINGS, 7960 -1, NULL 7961 }; 7962 7963 static struct aac_key_strings aac_ct_subcmds[] = { 7964 AAC_CT_SUBCMD_KEY_STRINGS, 7965 -1, NULL 7966 }; 7967 7968 static struct aac_key_strings aac_cl_subcmds[] = { 7969 AAC_CL_SUBCMD_KEY_STRINGS, 7970 -1, NULL 7971 }; 7972 7973 static struct aac_key_strings aac_aif_subcmds[] = { 7974 AAC_AIF_SUBCMD_KEY_STRINGS, 7975 -1, NULL 7976 }; 7977 7978 static struct aac_key_strings aac_ioctl_subcmds[] = { 7979 AAC_IOCTL_SUBCMD_KEY_STRINGS, 7980 -1, NULL 7981 }; 7982 7983 static struct aac_key_strings aac_aifens[] = { 7984 AAC_AIFEN_KEY_STRINGS, 7985 -1, NULL 7986 }; 7987 7988 /* 7989 * The following function comes from Adaptec: 7990 * 7991 * Get the firmware print buffer parameters from the firmware, 7992 * if the command was successful map in the address. 7993 */ 7994 static int 7995 aac_get_fw_debug_buffer(struct aac_softstate *softs) 7996 { 7997 if (aac_sync_mbcommand(softs, AAC_MONKER_GETDRVPROP, 7998 0, 0, 0, 0, NULL) == AACOK) { 7999 uint32_t mondrv_buf_paddrl = AAC_MAILBOX_GET(softs, 1); 8000 uint32_t mondrv_buf_paddrh = AAC_MAILBOX_GET(softs, 2); 8001 uint32_t mondrv_buf_size = AAC_MAILBOX_GET(softs, 3); 8002 uint32_t mondrv_hdr_size = AAC_MAILBOX_GET(softs, 4); 8003 8004 if (mondrv_buf_size) { 8005 uint32_t offset = mondrv_buf_paddrl - \ 8006 softs->pci_mem_base_paddr; 8007 8008 /* 8009 * See if the address is already mapped in, and 8010 * if so set it up from the base address 8011 */ 8012 if ((mondrv_buf_paddrh == 0) && 8013 (offset + mondrv_buf_size < softs->map_size)) { 8014 mutex_enter(&aac_prt_mutex); 8015 softs->debug_buf_offset = offset; 8016 softs->debug_header_size = mondrv_hdr_size; 8017 softs->debug_buf_size = mondrv_buf_size; 8018 softs->debug_fw_flags = 0; 8019 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT; 8020 mutex_exit(&aac_prt_mutex); 8021 8022 return (AACOK); 8023 } 8024 } 8025 } 8026 return (AACERR); 8027 } 8028 8029 int 8030 aac_dbflag_on(struct aac_softstate *softs, int flag) 8031 { 8032 int debug_flags = softs ? softs->debug_flags : aac_debug_flags; 8033 8034 return ((debug_flags & (AACDB_FLAGS_FW_PRINT | \ 8035 AACDB_FLAGS_KERNEL_PRINT)) && (debug_flags & flag)); 8036 } 8037 8038 static void 8039 aac_cmn_err(struct aac_softstate *softs, uint_t lev, char sl, int noheader) 8040 { 8041 if (noheader) { 8042 if (sl) { 8043 aac_fmt[0] = sl; 8044 cmn_err(lev, aac_fmt, aac_prt_buf); 8045 } else { 8046 cmn_err(lev, &aac_fmt[1], aac_prt_buf); 8047 } 8048 } else { 8049 if (sl) { 8050 aac_fmt_header[0] = sl; 8051 cmn_err(lev, aac_fmt_header, 8052 softs->vendor_name, softs->instance, 8053 aac_prt_buf); 8054 } else { 8055 cmn_err(lev, &aac_fmt_header[1], 8056 softs->vendor_name, softs->instance, 8057 aac_prt_buf); 8058 } 8059 } 8060 } 8061 8062 /* 8063 * The following function comes from Adaptec: 8064 * 8065 * Format and print out the data passed in to UART or console 8066 * as specified by debug flags. 8067 */ 8068 void 8069 aac_printf(struct aac_softstate *softs, uint_t lev, const char *fmt, ...) 8070 { 8071 va_list args; 8072 char sl; /* system log character */ 8073 8074 mutex_enter(&aac_prt_mutex); 8075 /* Set up parameters and call sprintf function to format the data */ 8076 if (strchr("^!?", fmt[0]) == NULL) { 8077 sl = 0; 8078 } else { 8079 sl = fmt[0]; 8080 fmt++; 8081 } 8082 va_start(args, fmt); 8083 (void) vsprintf(aac_prt_buf, fmt, args); 8084 va_end(args); 8085 8086 /* Make sure the softs structure has been passed in for this section */ 8087 if (softs) { 8088 if ((softs->debug_flags & AACDB_FLAGS_FW_PRINT) && 8089 /* If we are set up for a Firmware print */ 8090 (softs->debug_buf_size)) { 8091 uint32_t count, i; 8092 8093 /* Make sure the string size is within boundaries */ 8094 count = strlen(aac_prt_buf); 8095 if (count > softs->debug_buf_size) 8096 count = (uint16_t)softs->debug_buf_size; 8097 8098 /* 8099 * Wait for no more than AAC_PRINT_TIMEOUT for the 8100 * previous message length to clear (the handshake). 8101 */ 8102 for (i = 0; i < AAC_PRINT_TIMEOUT; i++) { 8103 if (!PCI_MEM_GET32(softs, 8104 softs->debug_buf_offset + \ 8105 AAC_FW_DBG_STRLEN_OFFSET)) 8106 break; 8107 8108 drv_usecwait(1000); 8109 } 8110 8111 /* 8112 * If the length is clear, copy over the message, the 8113 * flags, and the length. Make sure the length is the 8114 * last because that is the signal for the Firmware to 8115 * pick it up. 8116 */ 8117 if (!PCI_MEM_GET32(softs, softs->debug_buf_offset + \ 8118 AAC_FW_DBG_STRLEN_OFFSET)) { 8119 PCI_MEM_REP_PUT8(softs, 8120 softs->debug_buf_offset + \ 8121 softs->debug_header_size, 8122 aac_prt_buf, count); 8123 PCI_MEM_PUT32(softs, 8124 softs->debug_buf_offset + \ 8125 AAC_FW_DBG_FLAGS_OFFSET, 8126 softs->debug_fw_flags); 8127 PCI_MEM_PUT32(softs, 8128 softs->debug_buf_offset + \ 8129 AAC_FW_DBG_STRLEN_OFFSET, count); 8130 } else { 8131 cmn_err(CE_WARN, "UART output fail"); 8132 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT; 8133 } 8134 } 8135 8136 /* 8137 * If the Kernel Debug Print flag is set, send it off 8138 * to the Kernel Debugger 8139 */ 8140 if (softs->debug_flags & AACDB_FLAGS_KERNEL_PRINT) 8141 aac_cmn_err(softs, lev, sl, 8142 (softs->debug_flags & AACDB_FLAGS_NO_HEADERS)); 8143 } else { 8144 /* Driver not initialized yet, no firmware or header output */ 8145 if (aac_debug_flags & AACDB_FLAGS_KERNEL_PRINT) 8146 aac_cmn_err(softs, lev, sl, 1); 8147 } 8148 mutex_exit(&aac_prt_mutex); 8149 } 8150 8151 /* 8152 * Translate command number to description string 8153 */ 8154 static char * 8155 aac_cmd_name(int cmd, struct aac_key_strings *cmdlist) 8156 { 8157 int i; 8158 8159 for (i = 0; cmdlist[i].key != -1; i++) { 8160 if (cmd == cmdlist[i].key) 8161 return (cmdlist[i].message); 8162 } 8163 return (NULL); 8164 } 8165 8166 static void 8167 aac_print_scmd(struct aac_softstate *softs, struct aac_cmd *acp) 8168 { 8169 struct scsi_pkt *pkt = acp->pkt; 8170 struct scsi_address *ap = &pkt->pkt_address; 8171 int is_pd = 0; 8172 int ctl = ddi_get_instance(softs->devinfo_p); 8173 int tgt = ap->a_target; 8174 int lun = ap->a_lun; 8175 union scsi_cdb *cdbp = (void *)pkt->pkt_cdbp; 8176 uchar_t cmd = cdbp->scc_cmd; 8177 char *desc; 8178 8179 if (tgt >= AAC_MAX_LD) { 8180 is_pd = 1; 8181 ctl = ((struct aac_nondasd *)acp->dvp)->bus; 8182 tgt = ((struct aac_nondasd *)acp->dvp)->tid; 8183 lun = 0; 8184 } 8185 8186 if ((desc = aac_cmd_name(cmd, 8187 (struct aac_key_strings *)scsi_cmds)) == NULL) { 8188 aac_printf(softs, CE_NOTE, 8189 "SCMD> Unknown(0x%2x) --> c%dt%dL%d %s", 8190 cmd, ctl, tgt, lun, is_pd ? "(pd)" : ""); 8191 return; 8192 } 8193 8194 switch (cmd) { 8195 case SCMD_READ: 8196 case SCMD_WRITE: 8197 aac_printf(softs, CE_NOTE, 8198 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s", 8199 desc, GETG0ADDR(cdbp), GETG0COUNT(cdbp), 8200 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 8201 ctl, tgt, lun, is_pd ? "(pd)" : ""); 8202 break; 8203 case SCMD_READ_G1: 8204 case SCMD_WRITE_G1: 8205 aac_printf(softs, CE_NOTE, 8206 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s", 8207 desc, GETG1ADDR(cdbp), GETG1COUNT(cdbp), 8208 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 8209 ctl, tgt, lun, is_pd ? "(pd)" : ""); 8210 break; 8211 case SCMD_READ_G4: 8212 case SCMD_WRITE_G4: 8213 aac_printf(softs, CE_NOTE, 8214 "SCMD> %s 0x%x.%08x[%d] %s --> c%dt%dL%d %s", 8215 desc, GETG4ADDR(cdbp), GETG4ADDRTL(cdbp), 8216 GETG4COUNT(cdbp), 8217 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 8218 ctl, tgt, lun, is_pd ? "(pd)" : ""); 8219 break; 8220 case SCMD_READ_G5: 8221 case SCMD_WRITE_G5: 8222 aac_printf(softs, CE_NOTE, 8223 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s", 8224 desc, GETG5ADDR(cdbp), GETG5COUNT(cdbp), 8225 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 8226 ctl, tgt, lun, is_pd ? "(pd)" : ""); 8227 break; 8228 default: 8229 aac_printf(softs, CE_NOTE, "SCMD> %s --> c%dt%dL%d %s", 8230 desc, ctl, tgt, lun, is_pd ? "(pd)" : ""); 8231 } 8232 } 8233 8234 void 8235 aac_print_fib(struct aac_softstate *softs, struct aac_slot *slotp) 8236 { 8237 struct aac_cmd *acp = slotp->acp; 8238 struct aac_fib *fibp = slotp->fibp; 8239 ddi_acc_handle_t acc = slotp->fib_acc_handle; 8240 uint16_t fib_size; 8241 uint32_t fib_cmd, sub_cmd; 8242 char *cmdstr, *subcmdstr; 8243 char *caller; 8244 int i; 8245 8246 if (acp) { 8247 if (!(softs->debug_fib_flags & acp->fib_flags)) 8248 return; 8249 if (acp->fib_flags & AACDB_FLAGS_FIB_SCMD) 8250 caller = "SCMD"; 8251 else if (acp->fib_flags & AACDB_FLAGS_FIB_IOCTL) 8252 caller = "IOCTL"; 8253 else if (acp->fib_flags & AACDB_FLAGS_FIB_SRB) 8254 caller = "SRB"; 8255 else 8256 return; 8257 } else { 8258 if (!(softs->debug_fib_flags & AACDB_FLAGS_FIB_SYNC)) 8259 return; 8260 caller = "SYNC"; 8261 } 8262 8263 fib_cmd = ddi_get16(acc, &fibp->Header.Command); 8264 cmdstr = aac_cmd_name(fib_cmd, aac_fib_cmds); 8265 sub_cmd = (uint32_t)-1; 8266 subcmdstr = NULL; 8267 8268 /* Print FIB header */ 8269 if (softs->debug_fib_flags & AACDB_FLAGS_FIB_HEADER) { 8270 aac_printf(softs, CE_NOTE, "FIB> from %s", caller); 8271 aac_printf(softs, CE_NOTE, " XferState %d", 8272 ddi_get32(acc, &fibp->Header.XferState)); 8273 aac_printf(softs, CE_NOTE, " Command %d", 8274 ddi_get16(acc, &fibp->Header.Command)); 8275 aac_printf(softs, CE_NOTE, " StructType %d", 8276 ddi_get8(acc, &fibp->Header.StructType)); 8277 aac_printf(softs, CE_NOTE, " Flags 0x%x", 8278 ddi_get8(acc, &fibp->Header.Flags)); 8279 aac_printf(softs, CE_NOTE, " Size %d", 8280 ddi_get16(acc, &fibp->Header.Size)); 8281 aac_printf(softs, CE_NOTE, " SenderSize %d", 8282 ddi_get16(acc, &fibp->Header.SenderSize)); 8283 aac_printf(softs, CE_NOTE, " SenderAddr 0x%x", 8284 ddi_get32(acc, &fibp->Header.SenderFibAddress)); 8285 aac_printf(softs, CE_NOTE, " RcvrAddr 0x%x", 8286 ddi_get32(acc, &fibp->Header.ReceiverFibAddress)); 8287 aac_printf(softs, CE_NOTE, " SenderData 0x%x", 8288 ddi_get32(acc, &fibp->Header.SenderData)); 8289 } 8290 8291 /* Print FIB data */ 8292 switch (fib_cmd) { 8293 case ContainerCommand: 8294 sub_cmd = ddi_get32(acc, 8295 (void *)&(((uint32_t *)(void *)&fibp->data[0])[0])); 8296 subcmdstr = aac_cmd_name(sub_cmd, aac_ctvm_subcmds); 8297 if (subcmdstr == NULL) 8298 break; 8299 8300 switch (sub_cmd) { 8301 case VM_ContainerConfig: { 8302 struct aac_Container *pContainer = 8303 (struct aac_Container *)fibp->data; 8304 8305 fib_cmd = sub_cmd; 8306 cmdstr = subcmdstr; 8307 sub_cmd = (uint32_t)-1; 8308 subcmdstr = NULL; 8309 8310 sub_cmd = ddi_get32(acc, 8311 &pContainer->CTCommand.command); 8312 subcmdstr = aac_cmd_name(sub_cmd, aac_ct_subcmds); 8313 if (subcmdstr == NULL) 8314 break; 8315 aac_printf(softs, CE_NOTE, "FIB> %s (0x%x, 0x%x, 0x%x)", 8316 subcmdstr, 8317 ddi_get32(acc, &pContainer->CTCommand.param[0]), 8318 ddi_get32(acc, &pContainer->CTCommand.param[1]), 8319 ddi_get32(acc, &pContainer->CTCommand.param[2])); 8320 return; 8321 } 8322 8323 case VM_Ioctl: 8324 fib_cmd = sub_cmd; 8325 cmdstr = subcmdstr; 8326 sub_cmd = (uint32_t)-1; 8327 subcmdstr = NULL; 8328 8329 sub_cmd = ddi_get32(acc, 8330 (void *)&(((uint32_t *)(void *)&fibp->data[0])[4])); 8331 subcmdstr = aac_cmd_name(sub_cmd, aac_ioctl_subcmds); 8332 break; 8333 8334 case VM_CtBlockRead: 8335 case VM_CtBlockWrite: { 8336 struct aac_blockread *br = 8337 (struct aac_blockread *)fibp->data; 8338 struct aac_sg_table *sg = &br->SgMap; 8339 uint32_t sgcount = ddi_get32(acc, &sg->SgCount); 8340 8341 aac_printf(softs, CE_NOTE, 8342 "FIB> %s Container %d 0x%x/%d", subcmdstr, 8343 ddi_get32(acc, &br->ContainerId), 8344 ddi_get32(acc, &br->BlockNumber), 8345 ddi_get32(acc, &br->ByteCount)); 8346 for (i = 0; i < sgcount; i++) 8347 aac_printf(softs, CE_NOTE, 8348 " %d: 0x%08x/%d", i, 8349 ddi_get32(acc, &sg->SgEntry[i].SgAddress), 8350 ddi_get32(acc, &sg->SgEntry[i]. \ 8351 SgByteCount)); 8352 return; 8353 } 8354 } 8355 break; 8356 8357 case ContainerCommand64: { 8358 struct aac_blockread64 *br = 8359 (struct aac_blockread64 *)fibp->data; 8360 struct aac_sg_table64 *sg = &br->SgMap64; 8361 uint32_t sgcount = ddi_get32(acc, &sg->SgCount); 8362 uint64_t sgaddr; 8363 8364 sub_cmd = br->Command; 8365 subcmdstr = NULL; 8366 if (sub_cmd == VM_CtHostRead64) 8367 subcmdstr = "VM_CtHostRead64"; 8368 else if (sub_cmd == VM_CtHostWrite64) 8369 subcmdstr = "VM_CtHostWrite64"; 8370 else 8371 break; 8372 8373 aac_printf(softs, CE_NOTE, 8374 "FIB> %s Container %d 0x%x/%d", subcmdstr, 8375 ddi_get16(acc, &br->ContainerId), 8376 ddi_get32(acc, &br->BlockNumber), 8377 ddi_get16(acc, &br->SectorCount)); 8378 for (i = 0; i < sgcount; i++) { 8379 sgaddr = ddi_get64(acc, 8380 &sg->SgEntry64[i].SgAddress); 8381 aac_printf(softs, CE_NOTE, 8382 " %d: 0x%08x.%08x/%d", i, 8383 AAC_MS32(sgaddr), AAC_LS32(sgaddr), 8384 ddi_get32(acc, &sg->SgEntry64[i]. \ 8385 SgByteCount)); 8386 } 8387 return; 8388 } 8389 8390 case RawIo: { 8391 struct aac_raw_io *io = (struct aac_raw_io *)fibp->data; 8392 struct aac_sg_tableraw *sg = &io->SgMapRaw; 8393 uint32_t sgcount = ddi_get32(acc, &sg->SgCount); 8394 uint64_t sgaddr; 8395 8396 aac_printf(softs, CE_NOTE, 8397 "FIB> RawIo Container %d 0x%llx/%d 0x%x", 8398 ddi_get16(acc, &io->ContainerId), 8399 ddi_get64(acc, &io->BlockNumber), 8400 ddi_get32(acc, &io->ByteCount), 8401 ddi_get16(acc, &io->Flags)); 8402 for (i = 0; i < sgcount; i++) { 8403 sgaddr = ddi_get64(acc, &sg->SgEntryRaw[i].SgAddress); 8404 aac_printf(softs, CE_NOTE, " %d: 0x%08x.%08x/%d", i, 8405 AAC_MS32(sgaddr), AAC_LS32(sgaddr), 8406 ddi_get32(acc, &sg->SgEntryRaw[i].SgByteCount)); 8407 } 8408 return; 8409 } 8410 8411 case ClusterCommand: 8412 sub_cmd = ddi_get32(acc, 8413 (void *)&(((uint32_t *)(void *)fibp->data)[0])); 8414 subcmdstr = aac_cmd_name(sub_cmd, aac_cl_subcmds); 8415 break; 8416 8417 case AifRequest: 8418 sub_cmd = ddi_get32(acc, 8419 (void *)&(((uint32_t *)(void *)fibp->data)[0])); 8420 subcmdstr = aac_cmd_name(sub_cmd, aac_aif_subcmds); 8421 break; 8422 8423 default: 8424 break; 8425 } 8426 8427 fib_size = ddi_get16(acc, &(fibp->Header.Size)); 8428 if (subcmdstr) 8429 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d", 8430 subcmdstr, fib_size); 8431 else if (cmdstr && sub_cmd == (uint32_t)-1) 8432 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d", 8433 cmdstr, fib_size); 8434 else if (cmdstr) 8435 aac_printf(softs, CE_NOTE, "FIB> %s: Unknown(0x%x), sz=%d", 8436 cmdstr, sub_cmd, fib_size); 8437 else 8438 aac_printf(softs, CE_NOTE, "FIB> Unknown(0x%x), sz=%d", 8439 fib_cmd, fib_size); 8440 } 8441 8442 static void 8443 aac_print_aif(struct aac_softstate *softs, struct aac_aif_command *aif) 8444 { 8445 int aif_command; 8446 uint32_t aif_seqnumber; 8447 int aif_en_type; 8448 char *str; 8449 8450 aif_command = LE_32(aif->command); 8451 aif_seqnumber = LE_32(aif->seqNumber); 8452 aif_en_type = LE_32(aif->data.EN.type); 8453 8454 switch (aif_command) { 8455 case AifCmdEventNotify: 8456 str = aac_cmd_name(aif_en_type, aac_aifens); 8457 if (str) 8458 aac_printf(softs, CE_NOTE, "AIF! %s", str); 8459 else 8460 aac_printf(softs, CE_NOTE, "AIF! Unknown(0x%x)", 8461 aif_en_type); 8462 break; 8463 8464 case AifCmdJobProgress: 8465 switch (LE_32(aif->data.PR[0].status)) { 8466 case AifJobStsSuccess: 8467 str = "success"; break; 8468 case AifJobStsFinished: 8469 str = "finished"; break; 8470 case AifJobStsAborted: 8471 str = "aborted"; break; 8472 case AifJobStsFailed: 8473 str = "failed"; break; 8474 case AifJobStsSuspended: 8475 str = "suspended"; break; 8476 case AifJobStsRunning: 8477 str = "running"; break; 8478 default: 8479 str = "unknown"; break; 8480 } 8481 aac_printf(softs, CE_NOTE, 8482 "AIF! JobProgress (%d) - %s (%d, %d)", 8483 aif_seqnumber, str, 8484 LE_32(aif->data.PR[0].currentTick), 8485 LE_32(aif->data.PR[0].finalTick)); 8486 break; 8487 8488 case AifCmdAPIReport: 8489 aac_printf(softs, CE_NOTE, "AIF! APIReport (%d)", 8490 aif_seqnumber); 8491 break; 8492 8493 case AifCmdDriverNotify: 8494 aac_printf(softs, CE_NOTE, "AIF! DriverNotify (%d)", 8495 aif_seqnumber); 8496 break; 8497 8498 default: 8499 aac_printf(softs, CE_NOTE, "AIF! AIF %d (%d)", 8500 aif_command, aif_seqnumber); 8501 break; 8502 } 8503 } 8504 8505 #endif /* DEBUG */ 8506