1 /* 2 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6 /* 7 * Copyright 2005-08 Adaptec, Inc. 8 * Copyright (c) 2005-08 Adaptec Inc., Achim Leubner 9 * Copyright (c) 2000 Michael Smith 10 * Copyright (c) 2001 Scott Long 11 * Copyright (c) 2000 BSDi 12 * All rights reserved. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/modctl.h> 36 #include <sys/conf.h> 37 #include <sys/cmn_err.h> 38 #include <sys/ddi.h> 39 #include <sys/devops.h> 40 #include <sys/pci.h> 41 #include <sys/types.h> 42 #include <sys/ddidmareq.h> 43 #include <sys/scsi/scsi.h> 44 #include <sys/ksynch.h> 45 #include <sys/sunddi.h> 46 #include <sys/byteorder.h> 47 #include "aac_regs.h" 48 #include "aac.h" 49 50 /* 51 * FMA header files 52 */ 53 #include <sys/ddifm.h> 54 #include <sys/fm/protocol.h> 55 #include <sys/fm/util.h> 56 #include <sys/fm/io/ddi.h> 57 58 /* 59 * For minor nodes created by the SCSA framework, minor numbers are 60 * formed by left-shifting instance by INST_MINOR_SHIFT and OR in a 61 * number less than 64. 62 * 63 * To support cfgadm, need to confirm the SCSA framework by creating 64 * devctl/scsi and driver specific minor nodes under SCSA format, 65 * and calling scsi_hba_xxx() functions aacordingly. 66 */ 67 68 #define AAC_MINOR 32 69 #define INST2AAC(x) (((x) << INST_MINOR_SHIFT) | AAC_MINOR) 70 #define AAC_SCSA_MINOR(x) ((x) & TRAN_MINOR_MASK) 71 #define AAC_IS_SCSA_NODE(x) ((x) == DEVCTL_MINOR || (x) == SCSI_MINOR) 72 73 #define SD2TRAN(sd) ((sd)->sd_address.a_hba_tran) 74 #define AAC_TRAN2SOFTS(tran) ((struct aac_softstate *)(tran)->tran_hba_private) 75 #define AAC_DIP2TRAN(dip) ((scsi_hba_tran_t *)ddi_get_driver_private(dip)) 76 #define AAC_DIP2SOFTS(dip) (AAC_TRAN2SOFTS(AAC_DIP2TRAN(dip))) 77 #define SD2AAC(sd) (AAC_TRAN2SOFTS(SD2TRAN(sd))) 78 #define AAC_PD(t) ((t) - AAC_MAX_LD) 79 #define AAC_DEV(softs, t) (((t) < AAC_MAX_LD) ? \ 80 &(softs)->containers[(t)].dev : \ 81 ((t) < AAC_MAX_DEV(softs)) ? \ 82 &(softs)->nondasds[AAC_PD(t)].dev : NULL) 83 #define AAC_DEVCFG_BEGIN(softs, tgt) \ 84 aac_devcfg((softs), (tgt), 1) 85 #define AAC_DEVCFG_END(softs, tgt) \ 86 aac_devcfg((softs), (tgt), 0) 87 #define PKT2AC(pkt) ((struct aac_cmd *)(pkt)->pkt_ha_private) 88 #define AAC_BUSYWAIT(cond, timeout /* in millisecond */) { \ 89 if (!(cond)) { \ 90 int count = (timeout) * 10; \ 91 while (count) { \ 92 drv_usecwait(100); \ 93 if (cond) \ 94 break; \ 95 count--; \ 96 } \ 97 (timeout) = (count + 9) / 10; \ 98 } \ 99 } 100 101 #define AAC_SENSE_DATA_DESCR_LEN \ 102 (sizeof (struct scsi_descr_sense_hdr) + \ 103 sizeof (struct scsi_information_sense_descr)) 104 #define AAC_ARQ64_LENGTH \ 105 (sizeof (struct scsi_arq_status) + \ 106 AAC_SENSE_DATA_DESCR_LEN - SENSE_LENGTH) 107 108 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */ 109 #define AAC_GETGXADDR(cmdlen, cdbp) \ 110 ((cmdlen == 6) ? GETG0ADDR(cdbp) : \ 111 (cmdlen == 10) ? (uint32_t)GETG1ADDR(cdbp) : \ 112 ((uint64_t)GETG4ADDR(cdbp) << 32) | (uint32_t)GETG4ADDRTL(cdbp)) 113 114 #define AAC_CDB_INQUIRY_CMDDT 0x02 115 #define AAC_CDB_INQUIRY_EVPD 0x01 116 #define AAC_VPD_PAGE_CODE 1 117 #define AAC_VPD_PAGE_LENGTH 3 118 #define AAC_VPD_PAGE_DATA 4 119 #define AAC_VPD_ID_CODESET 0 120 #define AAC_VPD_ID_TYPE 1 121 #define AAC_VPD_ID_LENGTH 3 122 #define AAC_VPD_ID_DATA 4 123 124 #define AAC_SCSI_RPTLUNS_HEAD_SIZE 0x08 125 #define AAC_SCSI_RPTLUNS_ADDR_SIZE 0x08 126 #define AAC_SCSI_RPTLUNS_ADDR_MASK 0xC0 127 /* 00b - peripheral device addressing method */ 128 #define AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL 0x00 129 /* 01b - flat space addressing method */ 130 #define AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE 0x40 131 /* 10b - logical unit addressing method */ 132 #define AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT 0x80 133 134 /* Return the size of FIB with data part type data_type */ 135 #define AAC_FIB_SIZEOF(data_type) \ 136 (sizeof (struct aac_fib_header) + sizeof (data_type)) 137 /* Return the container size defined in mir */ 138 #define AAC_MIR_SIZE(softs, acc, mir) \ 139 (((softs)->flags & AAC_FLAGS_LBA_64BIT) ? \ 140 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity) + \ 141 ((uint64_t)ddi_get32((acc), &(mir)->MntObj.CapacityHigh) << 32) : \ 142 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity)) 143 144 /* The last entry of aac_cards[] is for unknown cards */ 145 #define AAC_UNKNOWN_CARD \ 146 (sizeof (aac_cards) / sizeof (struct aac_card_type) - 1) 147 #define CARD_IS_UNKNOWN(i) (i == AAC_UNKNOWN_CARD) 148 #define BUF_IS_READ(bp) ((bp)->b_flags & B_READ) 149 #define AAC_IS_Q_EMPTY(q) ((q)->q_head == NULL) 150 #define AAC_CMDQ(acp) (!((acp)->flags & AAC_CMD_SYNC)) 151 152 #define PCI_MEM_GET32(softs, off) \ 153 ddi_get32((softs)->pci_mem_handle, \ 154 (void *)((softs)->pci_mem_base_vaddr + (off))) 155 #define PCI_MEM_PUT32(softs, off, val) \ 156 ddi_put32((softs)->pci_mem_handle, \ 157 (void *)((softs)->pci_mem_base_vaddr + (off)), \ 158 (uint32_t)(val)) 159 #define PCI_MEM_GET16(softs, off) \ 160 ddi_get16((softs)->pci_mem_handle, \ 161 (void *)((softs)->pci_mem_base_vaddr + (off))) 162 #define PCI_MEM_PUT16(softs, off, val) \ 163 ddi_put16((softs)->pci_mem_handle, \ 164 (void *)((softs)->pci_mem_base_vaddr + (off)), (uint16_t)(val)) 165 /* Write host data at valp to device mem[off] repeatedly count times */ 166 #define PCI_MEM_REP_PUT8(softs, off, valp, count) \ 167 ddi_rep_put8((softs)->pci_mem_handle, (uint8_t *)(valp), \ 168 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \ 169 count, DDI_DEV_AUTOINCR) 170 /* Read device data at mem[off] to host addr valp repeatedly count times */ 171 #define PCI_MEM_REP_GET8(softs, off, valp, count) \ 172 ddi_rep_get8((softs)->pci_mem_handle, (uint8_t *)(valp), \ 173 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \ 174 count, DDI_DEV_AUTOINCR) 175 #define AAC_GET_FIELD8(acc, d, s, field) \ 176 (d)->field = ddi_get8(acc, (uint8_t *)&(s)->field) 177 #define AAC_GET_FIELD32(acc, d, s, field) \ 178 (d)->field = ddi_get32(acc, (uint32_t *)&(s)->field) 179 #define AAC_GET_FIELD64(acc, d, s, field) \ 180 (d)->field = ddi_get64(acc, (uint64_t *)&(s)->field) 181 #define AAC_REP_GET_FIELD8(acc, d, s, field, r) \ 182 ddi_rep_get8((acc), (uint8_t *)&(d)->field, \ 183 (uint8_t *)&(s)->field, (r), DDI_DEV_AUTOINCR) 184 #define AAC_REP_GET_FIELD32(acc, d, s, field, r) \ 185 ddi_rep_get32((acc), (uint32_t *)&(d)->field, \ 186 (uint32_t *)&(s)->field, (r), DDI_DEV_AUTOINCR) 187 188 #define AAC_ENABLE_INTR(softs) { \ 189 if (softs->flags & AAC_FLAGS_NEW_COMM) \ 190 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_NEW); \ 191 else \ 192 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_BITS); \ 193 } 194 195 #define AAC_DISABLE_INTR(softs) PCI_MEM_PUT32(softs, AAC_OIMR, ~0) 196 #define AAC_STATUS_CLR(softs, mask) PCI_MEM_PUT32(softs, AAC_ODBR, mask) 197 #define AAC_STATUS_GET(softs) PCI_MEM_GET32(softs, AAC_ODBR) 198 #define AAC_NOTIFY(softs, val) PCI_MEM_PUT32(softs, AAC_IDBR, val) 199 #define AAC_OUTB_GET(softs) PCI_MEM_GET32(softs, AAC_OQUE) 200 #define AAC_OUTB_SET(softs, val) PCI_MEM_PUT32(softs, AAC_OQUE, val) 201 #define AAC_FWSTATUS_GET(softs) \ 202 ((softs)->aac_if.aif_get_fwstatus(softs)) 203 #define AAC_MAILBOX_GET(softs, mb) \ 204 ((softs)->aac_if.aif_get_mailbox((softs), (mb))) 205 #define AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3) \ 206 ((softs)->aac_if.aif_set_mailbox((softs), (cmd), \ 207 (arg0), (arg1), (arg2), (arg3))) 208 209 #define AAC_THROTTLE_DRAIN -1 210 211 #define AAC_QUIESCE_TICK 1 /* 1 second */ 212 #define AAC_QUIESCE_TIMEOUT 180 /* 180 seconds */ 213 #define AAC_DEFAULT_TICK 10 /* 10 seconds */ 214 #define AAC_SYNC_TICK (30*60) /* 30 minutes */ 215 216 /* Poll time for aac_do_poll_io() */ 217 #define AAC_POLL_TIME 60 /* 60 seconds */ 218 219 /* IOP reset */ 220 #define AAC_IOP_RESET_SUCCEED 0 /* IOP reset succeed */ 221 #define AAC_IOP_RESET_FAILED -1 /* IOP reset failed */ 222 #define AAC_IOP_RESET_ABNORMAL -2 /* Reset operation abnormal */ 223 224 /* 225 * Hardware access functions 226 */ 227 static int aac_rx_get_fwstatus(struct aac_softstate *); 228 static int aac_rx_get_mailbox(struct aac_softstate *, int); 229 static void aac_rx_set_mailbox(struct aac_softstate *, uint32_t, uint32_t, 230 uint32_t, uint32_t, uint32_t); 231 static int aac_rkt_get_fwstatus(struct aac_softstate *); 232 static int aac_rkt_get_mailbox(struct aac_softstate *, int); 233 static void aac_rkt_set_mailbox(struct aac_softstate *, uint32_t, uint32_t, 234 uint32_t, uint32_t, uint32_t); 235 236 /* 237 * SCSA function prototypes 238 */ 239 static int aac_attach(dev_info_t *, ddi_attach_cmd_t); 240 static int aac_detach(dev_info_t *, ddi_detach_cmd_t); 241 static int aac_reset(dev_info_t *, ddi_reset_cmd_t); 242 static int aac_quiesce(dev_info_t *); 243 244 /* 245 * Interrupt handler functions 246 */ 247 static int aac_query_intrs(struct aac_softstate *, int); 248 static int aac_add_intrs(struct aac_softstate *); 249 static void aac_remove_intrs(struct aac_softstate *); 250 static uint_t aac_intr_old(caddr_t); 251 static uint_t aac_intr_new(caddr_t); 252 static uint_t aac_softintr(caddr_t); 253 254 /* 255 * Internal functions in attach 256 */ 257 static int aac_check_card_type(struct aac_softstate *); 258 static int aac_check_firmware(struct aac_softstate *); 259 static int aac_common_attach(struct aac_softstate *); 260 static void aac_common_detach(struct aac_softstate *); 261 static int aac_probe_containers(struct aac_softstate *); 262 static int aac_alloc_comm_space(struct aac_softstate *); 263 static int aac_setup_comm_space(struct aac_softstate *); 264 static void aac_free_comm_space(struct aac_softstate *); 265 static int aac_hba_setup(struct aac_softstate *); 266 267 /* 268 * Sync FIB operation functions 269 */ 270 int aac_sync_mbcommand(struct aac_softstate *, uint32_t, uint32_t, 271 uint32_t, uint32_t, uint32_t, uint32_t *); 272 static int aac_sync_fib(struct aac_softstate *, uint16_t, uint16_t); 273 274 /* 275 * Command queue operation functions 276 */ 277 static void aac_cmd_initq(struct aac_cmd_queue *); 278 static void aac_cmd_enqueue(struct aac_cmd_queue *, struct aac_cmd *); 279 static struct aac_cmd *aac_cmd_dequeue(struct aac_cmd_queue *); 280 static void aac_cmd_delete(struct aac_cmd_queue *, struct aac_cmd *); 281 282 /* 283 * FIB queue operation functions 284 */ 285 static int aac_fib_enqueue(struct aac_softstate *, int, uint32_t, uint32_t); 286 static int aac_fib_dequeue(struct aac_softstate *, int, int *); 287 288 /* 289 * Slot operation functions 290 */ 291 static int aac_create_slots(struct aac_softstate *); 292 static void aac_destroy_slots(struct aac_softstate *); 293 static void aac_alloc_fibs(struct aac_softstate *); 294 static void aac_destroy_fibs(struct aac_softstate *); 295 static struct aac_slot *aac_get_slot(struct aac_softstate *); 296 static void aac_release_slot(struct aac_softstate *, struct aac_slot *); 297 static int aac_alloc_fib(struct aac_softstate *, struct aac_slot *); 298 static void aac_free_fib(struct aac_slot *); 299 300 /* 301 * Internal functions 302 */ 303 static void aac_cmd_fib_header(struct aac_softstate *, struct aac_slot *, 304 uint16_t, uint16_t); 305 static void aac_cmd_fib_rawio(struct aac_softstate *, struct aac_cmd *); 306 static void aac_cmd_fib_brw64(struct aac_softstate *, struct aac_cmd *); 307 static void aac_cmd_fib_brw(struct aac_softstate *, struct aac_cmd *); 308 static void aac_cmd_fib_sync(struct aac_softstate *, struct aac_cmd *); 309 static void aac_cmd_fib_scsi32(struct aac_softstate *, struct aac_cmd *); 310 static void aac_cmd_fib_scsi64(struct aac_softstate *, struct aac_cmd *); 311 static void aac_start_waiting_io(struct aac_softstate *); 312 static void aac_drain_comp_q(struct aac_softstate *); 313 int aac_do_io(struct aac_softstate *, struct aac_cmd *); 314 static int aac_do_poll_io(struct aac_softstate *, struct aac_cmd *); 315 static int aac_do_sync_io(struct aac_softstate *, struct aac_cmd *); 316 static int aac_send_command(struct aac_softstate *, struct aac_slot *); 317 static void aac_cmd_timeout(struct aac_softstate *, struct aac_cmd *); 318 static int aac_dma_sync_ac(struct aac_cmd *); 319 static int aac_shutdown(struct aac_softstate *); 320 static int aac_reset_adapter(struct aac_softstate *); 321 static int aac_do_quiesce(struct aac_softstate *softs); 322 static int aac_do_unquiesce(struct aac_softstate *softs); 323 static void aac_unhold_bus(struct aac_softstate *, int); 324 static void aac_set_throttle(struct aac_softstate *, struct aac_device *, 325 int, int); 326 327 /* 328 * Adapter Initiated FIB handling function 329 */ 330 static int aac_handle_aif(struct aac_softstate *, struct aac_fib *); 331 332 /* 333 * Timeout handling thread function 334 */ 335 static void aac_daemon(void *); 336 337 /* 338 * IOCTL interface related functions 339 */ 340 static int aac_open(dev_t *, int, int, cred_t *); 341 static int aac_close(dev_t, int, int, cred_t *); 342 static int aac_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 343 extern int aac_do_ioctl(struct aac_softstate *, dev_t, int, intptr_t, int); 344 345 /* 346 * FMA Prototypes 347 */ 348 static void aac_fm_init(struct aac_softstate *); 349 static void aac_fm_fini(struct aac_softstate *); 350 static int aac_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *); 351 int aac_check_acc_handle(ddi_acc_handle_t); 352 int aac_check_dma_handle(ddi_dma_handle_t); 353 void aac_fm_ereport(struct aac_softstate *, char *); 354 355 /* 356 * Auto enumeration functions 357 */ 358 static dev_info_t *aac_find_child(struct aac_softstate *, uint16_t, uint8_t); 359 static int aac_tran_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t, 360 void *, dev_info_t **); 361 static int aac_dr_event(struct aac_softstate *, int, int, int); 362 363 #ifdef DEBUG 364 /* 365 * UART debug output support 366 */ 367 368 #define AAC_PRINT_BUFFER_SIZE 512 369 #define AAC_PRINT_TIMEOUT 250 /* 1/4 sec. = 250 msec. */ 370 371 #define AAC_FW_DBG_STRLEN_OFFSET 0x00 372 #define AAC_FW_DBG_FLAGS_OFFSET 0x04 373 #define AAC_FW_DBG_BLED_OFFSET 0x08 374 375 static int aac_get_fw_debug_buffer(struct aac_softstate *); 376 static void aac_print_scmd(struct aac_softstate *, struct aac_cmd *); 377 static void aac_print_aif(struct aac_softstate *, struct aac_aif_command *); 378 379 static char aac_prt_buf[AAC_PRINT_BUFFER_SIZE]; 380 static char aac_fmt[] = " %s"; 381 static char aac_fmt_header[] = " %s.%d: %s"; 382 static kmutex_t aac_prt_mutex; 383 384 /* 385 * Debug flags to be put into the softstate flags field 386 * when initialized 387 */ 388 uint32_t aac_debug_flags = 389 /* AACDB_FLAGS_KERNEL_PRINT | */ 390 /* AACDB_FLAGS_FW_PRINT | */ 391 /* AACDB_FLAGS_MISC | */ 392 /* AACDB_FLAGS_FUNC1 | */ 393 /* AACDB_FLAGS_FUNC2 | */ 394 /* AACDB_FLAGS_SCMD | */ 395 /* AACDB_FLAGS_AIF | */ 396 /* AACDB_FLAGS_FIB | */ 397 /* AACDB_FLAGS_IOCTL | */ 398 0; 399 uint32_t aac_debug_fib_flags = 400 /* AACDB_FLAGS_FIB_RW | */ 401 /* AACDB_FLAGS_FIB_IOCTL | */ 402 /* AACDB_FLAGS_FIB_SRB | */ 403 /* AACDB_FLAGS_FIB_SYNC | */ 404 /* AACDB_FLAGS_FIB_HEADER | */ 405 /* AACDB_FLAGS_FIB_TIMEOUT | */ 406 0; 407 408 #endif /* DEBUG */ 409 410 static struct cb_ops aac_cb_ops = { 411 aac_open, /* open */ 412 aac_close, /* close */ 413 nodev, /* strategy */ 414 nodev, /* print */ 415 nodev, /* dump */ 416 nodev, /* read */ 417 nodev, /* write */ 418 aac_ioctl, /* ioctl */ 419 nodev, /* devmap */ 420 nodev, /* mmap */ 421 nodev, /* segmap */ 422 nochpoll, /* poll */ 423 ddi_prop_op, /* cb_prop_op */ 424 NULL, /* streamtab */ 425 D_64BIT | D_NEW | D_MP | D_HOTPLUG, /* cb_flag */ 426 CB_REV, /* cb_rev */ 427 nodev, /* async I/O read entry point */ 428 nodev /* async I/O write entry point */ 429 }; 430 431 static struct dev_ops aac_dev_ops = { 432 DEVO_REV, 433 0, 434 nodev, 435 nulldev, 436 nulldev, 437 aac_attach, 438 aac_detach, 439 aac_reset, 440 &aac_cb_ops, 441 NULL, 442 NULL, 443 aac_quiesce, 444 }; 445 446 static struct modldrv aac_modldrv = { 447 &mod_driverops, 448 "AAC Driver " AAC_DRIVER_VERSION, 449 &aac_dev_ops, 450 }; 451 452 static struct modlinkage aac_modlinkage = { 453 MODREV_1, 454 &aac_modldrv, 455 NULL 456 }; 457 458 static struct aac_softstate *aac_softstatep; 459 460 /* 461 * Supported card list 462 * ordered in vendor id, subvendor id, subdevice id, and device id 463 */ 464 static struct aac_card_type aac_cards[] = { 465 {0x1028, 0x1, 0x1028, 0x1, AAC_HWIF_I960RX, 466 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 467 "Dell", "PERC 3/Di"}, 468 {0x1028, 0x2, 0x1028, 0x2, AAC_HWIF_I960RX, 469 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 470 "Dell", "PERC 3/Di"}, 471 {0x1028, 0x3, 0x1028, 0x3, AAC_HWIF_I960RX, 472 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 473 "Dell", "PERC 3/Si"}, 474 {0x1028, 0x8, 0x1028, 0xcf, AAC_HWIF_I960RX, 475 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 476 "Dell", "PERC 3/Di"}, 477 {0x1028, 0x4, 0x1028, 0xd0, AAC_HWIF_I960RX, 478 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 479 "Dell", "PERC 3/Si"}, 480 {0x1028, 0x2, 0x1028, 0xd1, AAC_HWIF_I960RX, 481 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 482 "Dell", "PERC 3/Di"}, 483 {0x1028, 0x2, 0x1028, 0xd9, AAC_HWIF_I960RX, 484 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 485 "Dell", "PERC 3/Di"}, 486 {0x1028, 0xa, 0x1028, 0x106, AAC_HWIF_I960RX, 487 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 488 "Dell", "PERC 3/Di"}, 489 {0x1028, 0xa, 0x1028, 0x11b, AAC_HWIF_I960RX, 490 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 491 "Dell", "PERC 3/Di"}, 492 {0x1028, 0xa, 0x1028, 0x121, AAC_HWIF_I960RX, 493 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 494 "Dell", "PERC 3/Di"}, 495 {0x9005, 0x285, 0x1028, 0x287, AAC_HWIF_I960RX, 496 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 497 "Dell", "PERC 320/DC"}, 498 {0x9005, 0x285, 0x1028, 0x291, AAC_HWIF_I960RX, 499 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Dell", "CERC SR2"}, 500 501 {0x9005, 0x285, 0x1014, 0x2f2, AAC_HWIF_I960RX, 502 0, AAC_TYPE_SCSI, "IBM", "ServeRAID 8i"}, 503 {0x9005, 0x285, 0x1014, 0x34d, AAC_HWIF_I960RX, 504 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8s"}, 505 {0x9005, 0x286, 0x1014, 0x9580, AAC_HWIF_RKT, 506 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8k"}, 507 508 {0x9005, 0x285, 0x103c, 0x3227, AAC_HWIF_I960RX, 509 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"}, 510 {0x9005, 0x285, 0xe11, 0x295, AAC_HWIF_I960RX, 511 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"}, 512 513 {0x9005, 0x285, 0x9005, 0x285, AAC_HWIF_I960RX, 514 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 515 "Adaptec", "2200S"}, 516 {0x9005, 0x285, 0x9005, 0x286, AAC_HWIF_I960RX, 517 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 518 "Adaptec", "2120S"}, 519 {0x9005, 0x285, 0x9005, 0x287, AAC_HWIF_I960RX, 520 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 521 "Adaptec", "2200S"}, 522 {0x9005, 0x285, 0x9005, 0x288, AAC_HWIF_I960RX, 523 0, AAC_TYPE_SCSI, "Adaptec", "3230S"}, 524 {0x9005, 0x285, 0x9005, 0x289, AAC_HWIF_I960RX, 525 0, AAC_TYPE_SCSI, "Adaptec", "3240S"}, 526 {0x9005, 0x285, 0x9005, 0x28a, AAC_HWIF_I960RX, 527 0, AAC_TYPE_SCSI, "Adaptec", "2020ZCR"}, 528 {0x9005, 0x285, 0x9005, 0x28b, AAC_HWIF_I960RX, 529 0, AAC_TYPE_SCSI, "Adaptec", "2025ZCR"}, 530 {0x9005, 0x286, 0x9005, 0x28c, AAC_HWIF_RKT, 531 0, AAC_TYPE_SCSI, "Adaptec", "2230S"}, 532 {0x9005, 0x286, 0x9005, 0x28d, AAC_HWIF_RKT, 533 0, AAC_TYPE_SCSI, "Adaptec", "2130S"}, 534 {0x9005, 0x285, 0x9005, 0x28e, AAC_HWIF_I960RX, 535 0, AAC_TYPE_SATA, "Adaptec", "2020SA"}, 536 {0x9005, 0x285, 0x9005, 0x28f, AAC_HWIF_I960RX, 537 0, AAC_TYPE_SATA, "Adaptec", "2025SA"}, 538 {0x9005, 0x285, 0x9005, 0x290, AAC_HWIF_I960RX, 539 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2410SA"}, 540 {0x9005, 0x285, 0x9005, 0x292, AAC_HWIF_I960RX, 541 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2810SA"}, 542 {0x9005, 0x285, 0x9005, 0x293, AAC_HWIF_I960RX, 543 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "21610SA"}, 544 {0x9005, 0x285, 0x9005, 0x294, AAC_HWIF_I960RX, 545 0, AAC_TYPE_SATA, "Adaptec", "2026ZCR"}, 546 {0x9005, 0x285, 0x9005, 0x296, AAC_HWIF_I960RX, 547 0, AAC_TYPE_SCSI, "Adaptec", "2240S"}, 548 {0x9005, 0x285, 0x9005, 0x297, AAC_HWIF_I960RX, 549 0, AAC_TYPE_SAS, "Adaptec", "4005SAS"}, 550 {0x9005, 0x285, 0x9005, 0x298, AAC_HWIF_I960RX, 551 0, AAC_TYPE_SAS, "Adaptec", "RAID 4000"}, 552 {0x9005, 0x285, 0x9005, 0x299, AAC_HWIF_I960RX, 553 0, AAC_TYPE_SAS, "Adaptec", "4800SAS"}, 554 {0x9005, 0x285, 0x9005, 0x29a, AAC_HWIF_I960RX, 555 0, AAC_TYPE_SAS, "Adaptec", "4805SAS"}, 556 {0x9005, 0x286, 0x9005, 0x29b, AAC_HWIF_RKT, 557 0, AAC_TYPE_SATA, "Adaptec", "2820SA"}, 558 {0x9005, 0x286, 0x9005, 0x29c, AAC_HWIF_RKT, 559 0, AAC_TYPE_SATA, "Adaptec", "2620SA"}, 560 {0x9005, 0x286, 0x9005, 0x29d, AAC_HWIF_RKT, 561 0, AAC_TYPE_SATA, "Adaptec", "2420SA"}, 562 {0x9005, 0x286, 0x9005, 0x29e, AAC_HWIF_RKT, 563 0, AAC_TYPE_SATA, "ICP", "9024RO"}, 564 {0x9005, 0x286, 0x9005, 0x29f, AAC_HWIF_RKT, 565 0, AAC_TYPE_SATA, "ICP", "9014RO"}, 566 {0x9005, 0x286, 0x9005, 0x2a0, AAC_HWIF_RKT, 567 0, AAC_TYPE_SATA, "ICP", "9047MA"}, 568 {0x9005, 0x286, 0x9005, 0x2a1, AAC_HWIF_RKT, 569 0, AAC_TYPE_SATA, "ICP", "9087MA"}, 570 {0x9005, 0x285, 0x9005, 0x2a4, AAC_HWIF_I960RX, 571 0, AAC_TYPE_SAS, "ICP", "9085LI"}, 572 {0x9005, 0x285, 0x9005, 0x2a5, AAC_HWIF_I960RX, 573 0, AAC_TYPE_SAS, "ICP", "5085BR"}, 574 {0x9005, 0x286, 0x9005, 0x2a6, AAC_HWIF_RKT, 575 0, AAC_TYPE_SATA, "ICP", "9067MA"}, 576 {0x9005, 0x285, 0x9005, 0x2b5, AAC_HWIF_I960RX, 577 0, AAC_TYPE_SAS, "Adaptec", "RAID 5445"}, 578 {0x9005, 0x285, 0x9005, 0x2b6, AAC_HWIF_I960RX, 579 0, AAC_TYPE_SAS, "Adaptec", "RAID 5805"}, 580 {0x9005, 0x285, 0x9005, 0x2b7, AAC_HWIF_I960RX, 581 0, AAC_TYPE_SAS, "Adaptec", "RAID 5085"}, 582 {0x9005, 0x285, 0x9005, 0x2b8, AAC_HWIF_I960RX, 583 0, AAC_TYPE_SAS, "ICP", "RAID ICP5445SL"}, 584 {0x9005, 0x285, 0x9005, 0x2b9, AAC_HWIF_I960RX, 585 0, AAC_TYPE_SAS, "ICP", "RAID ICP5085SL"}, 586 {0x9005, 0x285, 0x9005, 0x2ba, AAC_HWIF_I960RX, 587 0, AAC_TYPE_SAS, "ICP", "RAID ICP5805SL"}, 588 589 {0, 0, 0, 0, AAC_HWIF_UNKNOWN, 590 0, AAC_TYPE_UNKNOWN, "Unknown", "AAC card"}, 591 }; 592 593 /* 594 * Hardware access functions for i960 based cards 595 */ 596 static struct aac_interface aac_rx_interface = { 597 aac_rx_get_fwstatus, 598 aac_rx_get_mailbox, 599 aac_rx_set_mailbox 600 }; 601 602 /* 603 * Hardware access functions for Rocket based cards 604 */ 605 static struct aac_interface aac_rkt_interface = { 606 aac_rkt_get_fwstatus, 607 aac_rkt_get_mailbox, 608 aac_rkt_set_mailbox 609 }; 610 611 ddi_device_acc_attr_t aac_acc_attr = { 612 DDI_DEVICE_ATTR_V0, 613 DDI_STRUCTURE_LE_ACC, 614 DDI_STRICTORDER_ACC 615 }; 616 617 static struct { 618 int size; 619 int notify; 620 } aac_qinfo[] = { 621 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL}, 622 {AAC_HOST_HIGH_CMD_ENTRIES, 0}, 623 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY}, 624 {AAC_ADAP_HIGH_CMD_ENTRIES, 0}, 625 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL}, 626 {AAC_HOST_HIGH_RESP_ENTRIES, 0}, 627 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY}, 628 {AAC_ADAP_HIGH_RESP_ENTRIES, 0} 629 }; 630 631 /* 632 * Default aac dma attributes 633 */ 634 static ddi_dma_attr_t aac_dma_attr = { 635 DMA_ATTR_V0, 636 0, /* lowest usable address */ 637 0xffffffffull, /* high DMA address range */ 638 0xffffffffull, /* DMA counter register */ 639 AAC_DMA_ALIGN, /* DMA address alignment */ 640 1, /* DMA burstsizes */ 641 1, /* min effective DMA size */ 642 0xffffffffull, /* max DMA xfer size */ 643 0xffffffffull, /* segment boundary */ 644 1, /* s/g list length */ 645 AAC_BLK_SIZE, /* granularity of device */ 646 0 /* DMA transfer flags */ 647 }; 648 649 struct aac_drinfo { 650 struct aac_softstate *softs; 651 int tgt; 652 int lun; 653 int event; 654 }; 655 656 static int aac_tick = AAC_DEFAULT_TICK; /* tick for the internal timer */ 657 static uint32_t aac_timebase = 0; /* internal timer in seconds */ 658 static uint32_t aac_sync_time = 0; /* next time to sync. with firmware */ 659 660 /* 661 * Warlock directives 662 * 663 * Different variables with the same types have to be protected by the 664 * same mutex; otherwise, warlock will complain with "variables don't 665 * seem to be protected consistently". For example, 666 * aac_softstate::{q_wait, q_comp} are type of aac_cmd_queue, and protected 667 * by aac_softstate::{io_lock, q_comp_mutex} respectively. We have to 668 * declare them as protected explictly at aac_cmd_dequeue(). 669 */ 670 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt scsi_cdb scsi_status \ 671 scsi_arq_status scsi_descr_sense_hdr scsi_information_sense_descr \ 672 mode_format mode_geometry mode_header aac_cmd)) 673 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_cmd", aac_fib ddi_dma_cookie_t \ 674 aac_sge)) 675 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_fib", aac_blockread aac_blockwrite \ 676 aac_blockread64 aac_raw_io aac_sg_entry aac_sg_entry64 aac_sg_entryraw \ 677 aac_sg_table aac_srb)) 678 _NOTE(SCHEME_PROTECTS_DATA("unique to sync fib and cdb", scsi_inquiry)) 679 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address)) 680 _NOTE(SCHEME_PROTECTS_DATA("unique to dr event", aac_drinfo)) 681 _NOTE(SCHEME_PROTECTS_DATA("unique to scsi_transport", buf)) 682 683 int 684 _init(void) 685 { 686 int rval = 0; 687 688 #ifdef DEBUG 689 mutex_init(&aac_prt_mutex, NULL, MUTEX_DRIVER, NULL); 690 #endif 691 DBCALLED(NULL, 1); 692 693 if ((rval = ddi_soft_state_init((void *)&aac_softstatep, 694 sizeof (struct aac_softstate), 0)) != 0) 695 goto error; 696 697 if ((rval = scsi_hba_init(&aac_modlinkage)) != 0) { 698 ddi_soft_state_fini((void *)&aac_softstatep); 699 goto error; 700 } 701 702 if ((rval = mod_install(&aac_modlinkage)) != 0) { 703 ddi_soft_state_fini((void *)&aac_softstatep); 704 scsi_hba_fini(&aac_modlinkage); 705 goto error; 706 } 707 return (rval); 708 709 error: 710 AACDB_PRINT(NULL, CE_WARN, "Mod init error!"); 711 #ifdef DEBUG 712 mutex_destroy(&aac_prt_mutex); 713 #endif 714 return (rval); 715 } 716 717 int 718 _info(struct modinfo *modinfop) 719 { 720 DBCALLED(NULL, 1); 721 return (mod_info(&aac_modlinkage, modinfop)); 722 } 723 724 /* 725 * An HBA driver cannot be unload unless you reboot, 726 * so this function will be of no use. 727 */ 728 int 729 _fini(void) 730 { 731 int rval; 732 733 DBCALLED(NULL, 1); 734 735 if ((rval = mod_remove(&aac_modlinkage)) != 0) 736 goto error; 737 738 scsi_hba_fini(&aac_modlinkage); 739 ddi_soft_state_fini((void *)&aac_softstatep); 740 #ifdef DEBUG 741 mutex_destroy(&aac_prt_mutex); 742 #endif 743 return (0); 744 745 error: 746 AACDB_PRINT(NULL, CE_WARN, "AAC is busy, cannot unload!"); 747 return (rval); 748 } 749 750 static int 751 aac_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 752 { 753 int instance, i; 754 struct aac_softstate *softs = NULL; 755 int attach_state = 0; 756 char *data; 757 int intr_types; 758 759 DBCALLED(NULL, 1); 760 761 switch (cmd) { 762 case DDI_ATTACH: 763 break; 764 case DDI_RESUME: 765 return (DDI_FAILURE); 766 default: 767 return (DDI_FAILURE); 768 } 769 770 instance = ddi_get_instance(dip); 771 772 /* Get soft state */ 773 if (ddi_soft_state_zalloc(aac_softstatep, instance) != DDI_SUCCESS) { 774 AACDB_PRINT(softs, CE_WARN, "Cannot alloc soft state"); 775 goto error; 776 } 777 softs = ddi_get_soft_state(aac_softstatep, instance); 778 attach_state |= AAC_ATTACH_SOFTSTATE_ALLOCED; 779 780 softs->instance = instance; 781 softs->devinfo_p = dip; 782 softs->buf_dma_attr = softs->addr_dma_attr = aac_dma_attr; 783 softs->addr_dma_attr.dma_attr_granular = 1; 784 softs->acc_attr = aac_acc_attr; 785 softs->card = AAC_UNKNOWN_CARD; 786 #ifdef DEBUG 787 softs->debug_flags = aac_debug_flags; 788 softs->debug_fib_flags = aac_debug_fib_flags; 789 #endif 790 791 /* Initialize FMA */ 792 aac_fm_init(softs); 793 794 /* Check the card type */ 795 if (aac_check_card_type(softs) == AACERR) { 796 AACDB_PRINT(softs, CE_WARN, "Card not supported"); 797 goto error; 798 } 799 /* We have found the right card and everything is OK */ 800 attach_state |= AAC_ATTACH_CARD_DETECTED; 801 802 /* Map PCI mem space */ 803 if (ddi_regs_map_setup(dip, 1, 804 (caddr_t *)&softs->pci_mem_base_vaddr, 0, 805 softs->map_size_min, &softs->acc_attr, 806 &softs->pci_mem_handle) != DDI_SUCCESS) 807 goto error; 808 809 softs->map_size = softs->map_size_min; 810 attach_state |= AAC_ATTACH_PCI_MEM_MAPPED; 811 812 AAC_DISABLE_INTR(softs); 813 814 /* Get the type of device intrrupts */ 815 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) { 816 AACDB_PRINT(softs, CE_WARN, 817 "ddi_intr_get_supported_types() failed"); 818 goto error; 819 } 820 AACDB_PRINT(softs, CE_NOTE, 821 "ddi_intr_get_supported_types() ret: 0x%x", intr_types); 822 823 /* Query interrupt, and alloc/init all needed struct */ 824 if (intr_types & DDI_INTR_TYPE_MSI) { 825 if (aac_query_intrs(softs, DDI_INTR_TYPE_MSI) 826 != DDI_SUCCESS) { 827 AACDB_PRINT(softs, CE_WARN, 828 "MSI interrupt query failed"); 829 goto error; 830 } 831 softs->intr_type = DDI_INTR_TYPE_MSI; 832 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 833 if (aac_query_intrs(softs, DDI_INTR_TYPE_FIXED) 834 != DDI_SUCCESS) { 835 AACDB_PRINT(softs, CE_WARN, 836 "FIXED interrupt query failed"); 837 goto error; 838 } 839 softs->intr_type = DDI_INTR_TYPE_FIXED; 840 } else { 841 AACDB_PRINT(softs, CE_WARN, 842 "Device cannot suppport both FIXED and MSI interrupts"); 843 goto error; 844 } 845 846 /* Init mutexes */ 847 mutex_init(&softs->q_comp_mutex, NULL, 848 MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri)); 849 cv_init(&softs->event, NULL, CV_DRIVER, NULL); 850 mutex_init(&softs->aifq_mutex, NULL, 851 MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri)); 852 cv_init(&softs->aifv, NULL, CV_DRIVER, NULL); 853 cv_init(&softs->drain_cv, NULL, CV_DRIVER, NULL); 854 mutex_init(&softs->io_lock, NULL, MUTEX_DRIVER, 855 DDI_INTR_PRI(softs->intr_pri)); 856 attach_state |= AAC_ATTACH_KMUTEX_INITED; 857 858 /* Check for legacy device naming support */ 859 softs->legacy = 1; /* default to use legacy name */ 860 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 861 "legacy-name-enable", &data) == DDI_SUCCESS)) { 862 if (strcmp(data, "no") == 0) { 863 AACDB_PRINT(softs, CE_NOTE, "legacy-name disabled"); 864 softs->legacy = 0; 865 } 866 ddi_prop_free(data); 867 } 868 869 /* 870 * Everything has been set up till now, 871 * we will do some common attach. 872 */ 873 if (aac_common_attach(softs) == AACERR) 874 goto error; 875 attach_state |= AAC_ATTACH_COMM_SPACE_SETUP; 876 877 /* Init the cmd queues */ 878 for (i = 0; i < AAC_CMDQ_NUM; i++) 879 aac_cmd_initq(&softs->q_wait[i]); 880 aac_cmd_initq(&softs->q_busy); 881 aac_cmd_initq(&softs->q_comp); 882 883 if (aac_hba_setup(softs) != AACOK) 884 goto error; 885 attach_state |= AAC_ATTACH_SCSI_TRAN_SETUP; 886 887 /* Connect interrupt handlers */ 888 if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &softs->softint_id, 889 NULL, NULL, aac_softintr, (caddr_t)softs) != DDI_SUCCESS) { 890 AACDB_PRINT(softs, CE_WARN, 891 "Can not setup soft interrupt handler!"); 892 goto error; 893 } 894 attach_state |= AAC_ATTACH_SOFT_INTR_SETUP; 895 896 if (aac_add_intrs(softs) != DDI_SUCCESS) { 897 AACDB_PRINT(softs, CE_WARN, 898 "Interrupt registration failed, intr type: %s", 899 softs->intr_type == DDI_INTR_TYPE_MSI ? "MSI" : "FIXED"); 900 goto error; 901 } 902 attach_state |= AAC_ATTACH_HARD_INTR_SETUP; 903 904 /* Create devctl/scsi nodes for cfgadm */ 905 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 906 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) { 907 AACDB_PRINT(softs, CE_WARN, "failed to create devctl node"); 908 goto error; 909 } 910 attach_state |= AAC_ATTACH_CREATE_DEVCTL; 911 912 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, INST2SCSI(instance), 913 DDI_NT_SCSI_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 914 AACDB_PRINT(softs, CE_WARN, "failed to create scsi node"); 915 goto error; 916 } 917 attach_state |= AAC_ATTACH_CREATE_SCSI; 918 919 /* Create aac node for app. to issue ioctls */ 920 if (ddi_create_minor_node(dip, "aac", S_IFCHR, INST2AAC(instance), 921 DDI_PSEUDO, 0) != DDI_SUCCESS) { 922 AACDB_PRINT(softs, CE_WARN, "failed to create aac node"); 923 goto error; 924 } 925 926 /* Create a taskq for dealing with dr events */ 927 if ((softs->taskq = ddi_taskq_create(dip, "aac_dr_taskq", 1, 928 TASKQ_DEFAULTPRI, 0)) == NULL) { 929 AACDB_PRINT(softs, CE_WARN, "ddi_taskq_create failed"); 930 goto error; 931 } 932 933 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 934 softs->state = AAC_STATE_RUN; 935 936 /* Create a thread for command timeout */ 937 softs->timeout_id = timeout(aac_daemon, (void *)softs, 938 (60 * drv_usectohz(1000000))); 939 940 /* Common attach is OK, so we are attached! */ 941 AAC_ENABLE_INTR(softs); 942 ddi_report_dev(dip); 943 AACDB_PRINT(softs, CE_NOTE, "aac attached ok"); 944 return (DDI_SUCCESS); 945 946 error: 947 if (softs && softs->taskq) 948 ddi_taskq_destroy(softs->taskq); 949 if (attach_state & AAC_ATTACH_CREATE_SCSI) 950 ddi_remove_minor_node(dip, "scsi"); 951 if (attach_state & AAC_ATTACH_CREATE_DEVCTL) 952 ddi_remove_minor_node(dip, "devctl"); 953 if (attach_state & AAC_ATTACH_COMM_SPACE_SETUP) 954 aac_common_detach(softs); 955 if (attach_state & AAC_ATTACH_SCSI_TRAN_SETUP) { 956 (void) scsi_hba_detach(dip); 957 scsi_hba_tran_free(AAC_DIP2TRAN(dip)); 958 } 959 if (attach_state & AAC_ATTACH_HARD_INTR_SETUP) 960 aac_remove_intrs(softs); 961 if (attach_state & AAC_ATTACH_SOFT_INTR_SETUP) 962 ddi_remove_softintr(softs->softint_id); 963 if (attach_state & AAC_ATTACH_KMUTEX_INITED) { 964 mutex_destroy(&softs->q_comp_mutex); 965 cv_destroy(&softs->event); 966 mutex_destroy(&softs->aifq_mutex); 967 cv_destroy(&softs->aifv); 968 cv_destroy(&softs->drain_cv); 969 mutex_destroy(&softs->io_lock); 970 } 971 if (attach_state & AAC_ATTACH_PCI_MEM_MAPPED) 972 ddi_regs_map_free(&softs->pci_mem_handle); 973 aac_fm_fini(softs); 974 if (attach_state & AAC_ATTACH_CARD_DETECTED) 975 softs->card = AACERR; 976 if (attach_state & AAC_ATTACH_SOFTSTATE_ALLOCED) 977 ddi_soft_state_free(aac_softstatep, instance); 978 return (DDI_FAILURE); 979 } 980 981 static int 982 aac_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 983 { 984 scsi_hba_tran_t *tran = AAC_DIP2TRAN(dip); 985 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran); 986 987 DBCALLED(softs, 1); 988 989 switch (cmd) { 990 case DDI_DETACH: 991 break; 992 case DDI_SUSPEND: 993 return (DDI_FAILURE); 994 default: 995 return (DDI_FAILURE); 996 } 997 998 mutex_enter(&softs->io_lock); 999 AAC_DISABLE_INTR(softs); 1000 softs->state = AAC_STATE_STOPPED; 1001 1002 mutex_exit(&softs->io_lock); 1003 (void) untimeout(softs->timeout_id); 1004 mutex_enter(&softs->io_lock); 1005 softs->timeout_id = 0; 1006 1007 ddi_taskq_destroy(softs->taskq); 1008 1009 ddi_remove_minor_node(dip, "aac"); 1010 ddi_remove_minor_node(dip, "scsi"); 1011 ddi_remove_minor_node(dip, "devctl"); 1012 1013 mutex_exit(&softs->io_lock); 1014 aac_remove_intrs(softs); 1015 ddi_remove_softintr(softs->softint_id); 1016 1017 aac_common_detach(softs); 1018 1019 (void) scsi_hba_detach(dip); 1020 scsi_hba_tran_free(tran); 1021 1022 mutex_destroy(&softs->q_comp_mutex); 1023 cv_destroy(&softs->event); 1024 mutex_destroy(&softs->aifq_mutex); 1025 cv_destroy(&softs->aifv); 1026 cv_destroy(&softs->drain_cv); 1027 mutex_destroy(&softs->io_lock); 1028 1029 ddi_regs_map_free(&softs->pci_mem_handle); 1030 aac_fm_fini(softs); 1031 softs->hwif = AAC_HWIF_UNKNOWN; 1032 softs->card = AAC_UNKNOWN_CARD; 1033 ddi_soft_state_free(aac_softstatep, ddi_get_instance(dip)); 1034 1035 return (DDI_SUCCESS); 1036 } 1037 1038 /*ARGSUSED*/ 1039 static int 1040 aac_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) 1041 { 1042 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 1043 1044 DBCALLED(softs, 1); 1045 1046 mutex_enter(&softs->io_lock); 1047 (void) aac_shutdown(softs); 1048 mutex_exit(&softs->io_lock); 1049 1050 return (DDI_SUCCESS); 1051 } 1052 1053 /* 1054 * quiesce(9E) entry point. 1055 * 1056 * This function is called when the system is single-threaded at high 1057 * PIL with preemption disabled. Therefore, this function must not be 1058 * blocked. 1059 * 1060 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 1061 * DDI_FAILURE indicates an error condition and should almost never happen. 1062 */ 1063 static int 1064 aac_quiesce(dev_info_t *dip) 1065 { 1066 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 1067 1068 if (softs == NULL) 1069 return (DDI_FAILURE); 1070 1071 AAC_DISABLE_INTR(softs); 1072 1073 return (DDI_SUCCESS); 1074 } 1075 1076 /* 1077 * Bring the controller down to a dormant state and detach all child devices. 1078 * This function is called before detach or system shutdown. 1079 * Note: we can assume that the q_wait on the controller is empty, as we 1080 * won't allow shutdown if any device is open. 1081 */ 1082 static int 1083 aac_shutdown(struct aac_softstate *softs) 1084 { 1085 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 1086 struct aac_close_command *cc = (struct aac_close_command *) \ 1087 &softs->sync_slot.fibp->data[0]; 1088 int rval; 1089 1090 ddi_put32(acc, &cc->Command, VM_CloseAll); 1091 ddi_put32(acc, &cc->ContainerId, 0xfffffffful); 1092 1093 /* Flush all caches, set FW to write through mode */ 1094 rval = aac_sync_fib(softs, ContainerCommand, 1095 AAC_FIB_SIZEOF(struct aac_close_command)); 1096 1097 AACDB_PRINT(softs, CE_NOTE, 1098 "shutting down aac %s", (rval == AACOK) ? "ok" : "fail"); 1099 return (rval); 1100 } 1101 1102 static uint_t 1103 aac_softintr(caddr_t arg) 1104 { 1105 struct aac_softstate *softs = (void *)arg; 1106 1107 if (!AAC_IS_Q_EMPTY(&softs->q_comp)) { 1108 aac_drain_comp_q(softs); 1109 return (DDI_INTR_CLAIMED); 1110 } else { 1111 return (DDI_INTR_UNCLAIMED); 1112 } 1113 } 1114 1115 /* 1116 * Setup auto sense data for pkt 1117 */ 1118 static void 1119 aac_set_arq_data(struct scsi_pkt *pkt, uchar_t key, 1120 uchar_t add_code, uchar_t qual_code, uint64_t info) 1121 { 1122 struct scsi_arq_status *arqstat = (void *)(pkt->pkt_scbp); 1123 1124 *pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */ 1125 pkt->pkt_state |= STATE_ARQ_DONE; 1126 1127 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD; 1128 arqstat->sts_rqpkt_reason = CMD_CMPLT; 1129 arqstat->sts_rqpkt_resid = 0; 1130 arqstat->sts_rqpkt_state = 1131 STATE_GOT_BUS | 1132 STATE_GOT_TARGET | 1133 STATE_SENT_CMD | 1134 STATE_XFERRED_DATA; 1135 arqstat->sts_rqpkt_statistics = 0; 1136 1137 if (info <= 0xfffffffful) { 1138 arqstat->sts_sensedata.es_valid = 1; 1139 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE; 1140 arqstat->sts_sensedata.es_code = CODE_FMT_FIXED_CURRENT; 1141 arqstat->sts_sensedata.es_key = key; 1142 arqstat->sts_sensedata.es_add_code = add_code; 1143 arqstat->sts_sensedata.es_qual_code = qual_code; 1144 1145 arqstat->sts_sensedata.es_info_1 = (info >> 24) & 0xFF; 1146 arqstat->sts_sensedata.es_info_2 = (info >> 16) & 0xFF; 1147 arqstat->sts_sensedata.es_info_3 = (info >> 8) & 0xFF; 1148 arqstat->sts_sensedata.es_info_4 = info & 0xFF; 1149 } else { /* 64-bit LBA */ 1150 struct scsi_descr_sense_hdr *dsp; 1151 struct scsi_information_sense_descr *isd; 1152 1153 dsp = (struct scsi_descr_sense_hdr *)&arqstat->sts_sensedata; 1154 dsp->ds_class = CLASS_EXTENDED_SENSE; 1155 dsp->ds_code = CODE_FMT_DESCR_CURRENT; 1156 dsp->ds_key = key; 1157 dsp->ds_add_code = add_code; 1158 dsp->ds_qual_code = qual_code; 1159 dsp->ds_addl_sense_length = 1160 sizeof (struct scsi_information_sense_descr); 1161 1162 isd = (struct scsi_information_sense_descr *)(dsp+1); 1163 isd->isd_descr_type = DESCR_INFORMATION; 1164 isd->isd_valid = 1; 1165 isd->isd_information[0] = (info >> 56) & 0xFF; 1166 isd->isd_information[1] = (info >> 48) & 0xFF; 1167 isd->isd_information[2] = (info >> 40) & 0xFF; 1168 isd->isd_information[3] = (info >> 32) & 0xFF; 1169 isd->isd_information[4] = (info >> 24) & 0xFF; 1170 isd->isd_information[5] = (info >> 16) & 0xFF; 1171 isd->isd_information[6] = (info >> 8) & 0xFF; 1172 isd->isd_information[7] = (info) & 0xFF; 1173 } 1174 } 1175 1176 /* 1177 * Setup auto sense data for HARDWARE ERROR 1178 */ 1179 static void 1180 aac_set_arq_data_hwerr(struct aac_cmd *acp) 1181 { 1182 union scsi_cdb *cdbp; 1183 uint64_t err_blkno; 1184 1185 cdbp = (void *)acp->pkt->pkt_cdbp; 1186 err_blkno = AAC_GETGXADDR(acp->cmdlen, cdbp); 1187 aac_set_arq_data(acp->pkt, KEY_HARDWARE_ERROR, 0x00, 0x00, err_blkno); 1188 } 1189 1190 /* 1191 * Setup auto sense data for UNIT ATTENTION 1192 */ 1193 /*ARGSUSED*/ 1194 static void 1195 aac_set_arq_data_reset(struct aac_softstate *softs, struct aac_cmd *acp) 1196 { 1197 struct aac_container *dvp = (struct aac_container *)acp->dvp; 1198 1199 ASSERT(dvp->dev.type == AAC_DEV_LD); 1200 1201 if (dvp->reset) { 1202 dvp->reset = 0; 1203 aac_set_arq_data(acp->pkt, KEY_UNIT_ATTENTION, 0x29, 0x02, 0); 1204 } 1205 } 1206 1207 /* 1208 * Send a command to the adapter in New Comm. interface 1209 */ 1210 static int 1211 aac_send_command(struct aac_softstate *softs, struct aac_slot *slotp) 1212 { 1213 uint32_t index, device; 1214 1215 index = PCI_MEM_GET32(softs, AAC_IQUE); 1216 if (index == 0xffffffffUL) { 1217 index = PCI_MEM_GET32(softs, AAC_IQUE); 1218 if (index == 0xffffffffUL) 1219 return (AACERR); 1220 } 1221 1222 device = index; 1223 PCI_MEM_PUT32(softs, device, 1224 (uint32_t)(slotp->fib_phyaddr & 0xfffffffful)); 1225 device += 4; 1226 PCI_MEM_PUT32(softs, device, (uint32_t)(slotp->fib_phyaddr >> 32)); 1227 device += 4; 1228 PCI_MEM_PUT32(softs, device, slotp->acp->fib_size); 1229 PCI_MEM_PUT32(softs, AAC_IQUE, index); 1230 return (AACOK); 1231 } 1232 1233 static void 1234 aac_end_io(struct aac_softstate *softs, struct aac_cmd *acp) 1235 { 1236 struct aac_device *dvp = acp->dvp; 1237 int q = AAC_CMDQ(acp); 1238 1239 if (acp->slotp) { /* outstanding cmd */ 1240 aac_release_slot(softs, acp->slotp); 1241 acp->slotp = NULL; 1242 if (dvp) { 1243 dvp->ncmds[q]--; 1244 if (dvp->throttle[q] == AAC_THROTTLE_DRAIN && 1245 dvp->ncmds[q] == 0 && q == AAC_CMDQ_ASYNC) 1246 aac_set_throttle(softs, dvp, q, 1247 softs->total_slots); 1248 } 1249 softs->bus_ncmds[q]--; 1250 (void) aac_cmd_delete(&softs->q_busy, acp); 1251 } else { /* cmd in waiting queue */ 1252 aac_cmd_delete(&softs->q_wait[q], acp); 1253 } 1254 1255 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) { /* async IO */ 1256 mutex_enter(&softs->q_comp_mutex); 1257 aac_cmd_enqueue(&softs->q_comp, acp); 1258 mutex_exit(&softs->q_comp_mutex); 1259 } else if (acp->flags & AAC_CMD_NO_CB) { /* sync IO */ 1260 cv_broadcast(&softs->event); 1261 } 1262 } 1263 1264 static void 1265 aac_handle_io(struct aac_softstate *softs, int index) 1266 { 1267 struct aac_slot *slotp; 1268 struct aac_cmd *acp; 1269 uint32_t fast; 1270 1271 fast = index & AAC_SENDERADDR_MASK_FAST_RESPONSE; 1272 index >>= 2; 1273 1274 /* Make sure firmware reported index is valid */ 1275 ASSERT(index >= 0 && index < softs->total_slots); 1276 slotp = &softs->io_slot[index]; 1277 ASSERT(slotp->index == index); 1278 acp = slotp->acp; 1279 1280 if (acp == NULL || acp->slotp != slotp) { 1281 cmn_err(CE_WARN, 1282 "Firmware error: invalid slot index received from FW"); 1283 return; 1284 } 1285 1286 acp->flags |= AAC_CMD_CMPLT; 1287 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU); 1288 1289 if (aac_check_dma_handle(slotp->fib_dma_handle) == DDI_SUCCESS) { 1290 /* 1291 * For fast response IO, the firmware do not return any FIB 1292 * data, so we need to fill in the FIB status and state so that 1293 * FIB users can handle it correctly. 1294 */ 1295 if (fast) { 1296 uint32_t state; 1297 1298 state = ddi_get32(slotp->fib_acc_handle, 1299 &slotp->fibp->Header.XferState); 1300 /* 1301 * Update state for CPU not for device, no DMA sync 1302 * needed 1303 */ 1304 ddi_put32(slotp->fib_acc_handle, 1305 &slotp->fibp->Header.XferState, 1306 state | AAC_FIBSTATE_DONEADAP); 1307 ddi_put32(slotp->fib_acc_handle, 1308 (void *)&slotp->fibp->data[0], ST_OK); 1309 } 1310 1311 /* Handle completed ac */ 1312 acp->ac_comp(softs, acp); 1313 } else { 1314 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1315 acp->flags |= AAC_CMD_ERR; 1316 if (acp->pkt) { 1317 acp->pkt->pkt_reason = CMD_TRAN_ERR; 1318 acp->pkt->pkt_statistics = 0; 1319 } 1320 } 1321 aac_end_io(softs, acp); 1322 } 1323 1324 /* 1325 * Interrupt handler for New Comm. interface 1326 * New Comm. interface use a different mechanism for interrupt. No explict 1327 * message queues, and driver need only accesses the mapped PCI mem space to 1328 * find the completed FIB or AIF. 1329 */ 1330 static int 1331 aac_process_intr_new(struct aac_softstate *softs) 1332 { 1333 uint32_t index; 1334 1335 index = AAC_OUTB_GET(softs); 1336 if (index == 0xfffffffful) 1337 index = AAC_OUTB_GET(softs); 1338 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 1339 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1340 return (0); 1341 } 1342 if (index != 0xfffffffful) { 1343 do { 1344 if ((index & AAC_SENDERADDR_MASK_AIF) == 0) { 1345 aac_handle_io(softs, index); 1346 } else if (index != 0xfffffffeul) { 1347 struct aac_fib *fibp; /* FIB in AIF queue */ 1348 uint16_t fib_size, fib_size0; 1349 1350 /* 1351 * 0xfffffffe means that the controller wants 1352 * more work, ignore it for now. Otherwise, 1353 * AIF received. 1354 */ 1355 index &= ~2; 1356 1357 mutex_enter(&softs->aifq_mutex); 1358 /* 1359 * Copy AIF from adapter to the empty AIF slot 1360 */ 1361 fibp = &softs->aifq[softs->aifq_idx].d; 1362 fib_size0 = PCI_MEM_GET16(softs, index + \ 1363 offsetof(struct aac_fib, Header.Size)); 1364 fib_size = (fib_size0 > AAC_FIB_SIZE) ? 1365 AAC_FIB_SIZE : fib_size0; 1366 PCI_MEM_REP_GET8(softs, index, fibp, 1367 fib_size); 1368 1369 if (aac_check_acc_handle(softs-> \ 1370 pci_mem_handle) == DDI_SUCCESS) 1371 (void) aac_handle_aif(softs, fibp); 1372 else 1373 ddi_fm_service_impact(softs->devinfo_p, 1374 DDI_SERVICE_UNAFFECTED); 1375 mutex_exit(&softs->aifq_mutex); 1376 1377 /* 1378 * AIF memory is owned by the adapter, so let it 1379 * know that we are done with it. 1380 */ 1381 AAC_OUTB_SET(softs, index); 1382 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY); 1383 } 1384 1385 index = AAC_OUTB_GET(softs); 1386 } while (index != 0xfffffffful); 1387 1388 /* 1389 * Process waiting cmds before start new ones to 1390 * ensure first IOs are serviced first. 1391 */ 1392 aac_start_waiting_io(softs); 1393 return (AAC_DB_COMMAND_READY); 1394 } else { 1395 return (0); 1396 } 1397 } 1398 1399 static uint_t 1400 aac_intr_new(caddr_t arg) 1401 { 1402 struct aac_softstate *softs = (void *)arg; 1403 uint_t rval; 1404 1405 mutex_enter(&softs->io_lock); 1406 if (aac_process_intr_new(softs)) 1407 rval = DDI_INTR_CLAIMED; 1408 else 1409 rval = DDI_INTR_UNCLAIMED; 1410 mutex_exit(&softs->io_lock); 1411 1412 aac_drain_comp_q(softs); 1413 return (rval); 1414 } 1415 1416 /* 1417 * Interrupt handler for old interface 1418 * Explicit message queues are used to send FIB to and get completed FIB from 1419 * the adapter. Driver and adapter maitain the queues in the producer/consumer 1420 * manner. The driver has to query the queues to find the completed FIB. 1421 */ 1422 static int 1423 aac_process_intr_old(struct aac_softstate *softs) 1424 { 1425 uint16_t status; 1426 1427 status = AAC_STATUS_GET(softs); 1428 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 1429 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1430 return (DDI_INTR_UNCLAIMED); 1431 } 1432 if (status & AAC_DB_RESPONSE_READY) { 1433 int slot_idx; 1434 1435 /* ACK the intr */ 1436 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY); 1437 (void) AAC_STATUS_GET(softs); 1438 while (aac_fib_dequeue(softs, AAC_HOST_NORM_RESP_Q, 1439 &slot_idx) == AACOK) 1440 aac_handle_io(softs, slot_idx); 1441 1442 /* 1443 * Process waiting cmds before start new ones to 1444 * ensure first IOs are serviced first. 1445 */ 1446 aac_start_waiting_io(softs); 1447 return (AAC_DB_RESPONSE_READY); 1448 } else if (status & AAC_DB_COMMAND_READY) { 1449 int aif_idx; 1450 1451 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_READY); 1452 (void) AAC_STATUS_GET(softs); 1453 if (aac_fib_dequeue(softs, AAC_HOST_NORM_CMD_Q, &aif_idx) == 1454 AACOK) { 1455 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 1456 struct aac_fib *fibp; /* FIB in AIF queue */ 1457 struct aac_fib *fibp0; /* FIB in communication space */ 1458 uint16_t fib_size, fib_size0; 1459 uint32_t fib_xfer_state; 1460 uint32_t addr, size; 1461 1462 ASSERT((aif_idx >= 0) && (aif_idx < AAC_ADAPTER_FIBS)); 1463 1464 #define AAC_SYNC_AIF(softs, aif_idx, type) \ 1465 { (void) ddi_dma_sync((softs)->comm_space_dma_handle, \ 1466 offsetof(struct aac_comm_space, \ 1467 adapter_fibs[(aif_idx)]), AAC_FIB_SIZE, \ 1468 (type)); } 1469 1470 mutex_enter(&softs->aifq_mutex); 1471 /* Copy AIF from adapter to the empty AIF slot */ 1472 fibp = &softs->aifq[softs->aifq_idx].d; 1473 AAC_SYNC_AIF(softs, aif_idx, DDI_DMA_SYNC_FORCPU); 1474 fibp0 = &softs->comm_space->adapter_fibs[aif_idx]; 1475 fib_size0 = ddi_get16(acc, &fibp0->Header.Size); 1476 fib_size = (fib_size0 > AAC_FIB_SIZE) ? 1477 AAC_FIB_SIZE : fib_size0; 1478 ddi_rep_get8(acc, (uint8_t *)fibp, (uint8_t *)fibp0, 1479 fib_size, DDI_DEV_AUTOINCR); 1480 1481 (void) aac_handle_aif(softs, fibp); 1482 mutex_exit(&softs->aifq_mutex); 1483 1484 /* Complete AIF back to adapter with good status */ 1485 fib_xfer_state = LE_32(fibp->Header.XferState); 1486 if (fib_xfer_state & AAC_FIBSTATE_FROMADAP) { 1487 ddi_put32(acc, &fibp0->Header.XferState, 1488 fib_xfer_state | AAC_FIBSTATE_DONEHOST); 1489 ddi_put32(acc, (void *)&fibp0->data[0], ST_OK); 1490 if (fib_size0 > AAC_FIB_SIZE) 1491 ddi_put16(acc, &fibp0->Header.Size, 1492 AAC_FIB_SIZE); 1493 AAC_SYNC_AIF(softs, aif_idx, 1494 DDI_DMA_SYNC_FORDEV); 1495 } 1496 1497 /* Put the AIF response on the response queue */ 1498 addr = ddi_get32(acc, 1499 &softs->comm_space->adapter_fibs[aif_idx]. \ 1500 Header.SenderFibAddress); 1501 size = (uint32_t)ddi_get16(acc, 1502 &softs->comm_space->adapter_fibs[aif_idx]. \ 1503 Header.Size); 1504 ddi_put32(acc, 1505 &softs->comm_space->adapter_fibs[aif_idx]. \ 1506 Header.ReceiverFibAddress, addr); 1507 if (aac_fib_enqueue(softs, AAC_ADAP_NORM_RESP_Q, 1508 addr, size) == AACERR) 1509 cmn_err(CE_NOTE, "!AIF ack failed"); 1510 } 1511 return (AAC_DB_COMMAND_READY); 1512 } else if (status & AAC_DB_PRINTF_READY) { 1513 /* ACK the intr */ 1514 AAC_STATUS_CLR(softs, AAC_DB_PRINTF_READY); 1515 (void) AAC_STATUS_GET(softs); 1516 (void) ddi_dma_sync(softs->comm_space_dma_handle, 1517 offsetof(struct aac_comm_space, adapter_print_buf), 1518 AAC_ADAPTER_PRINT_BUFSIZE, DDI_DMA_SYNC_FORCPU); 1519 if (aac_check_dma_handle(softs->comm_space_dma_handle) == 1520 DDI_SUCCESS) 1521 cmn_err(CE_NOTE, "MSG From Adapter: %s", 1522 softs->comm_space->adapter_print_buf); 1523 else 1524 ddi_fm_service_impact(softs->devinfo_p, 1525 DDI_SERVICE_UNAFFECTED); 1526 AAC_NOTIFY(softs, AAC_DB_PRINTF_READY); 1527 return (AAC_DB_PRINTF_READY); 1528 } else if (status & AAC_DB_COMMAND_NOT_FULL) { 1529 /* 1530 * Without these two condition statements, the OS could hang 1531 * after a while, especially if there are a lot of AIF's to 1532 * handle, for instance if a drive is pulled from an array 1533 * under heavy load. 1534 */ 1535 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL); 1536 return (AAC_DB_COMMAND_NOT_FULL); 1537 } else if (status & AAC_DB_RESPONSE_NOT_FULL) { 1538 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL); 1539 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_NOT_FULL); 1540 return (AAC_DB_RESPONSE_NOT_FULL); 1541 } else { 1542 return (0); 1543 } 1544 } 1545 1546 static uint_t 1547 aac_intr_old(caddr_t arg) 1548 { 1549 struct aac_softstate *softs = (void *)arg; 1550 int rval; 1551 1552 mutex_enter(&softs->io_lock); 1553 if (aac_process_intr_old(softs)) 1554 rval = DDI_INTR_CLAIMED; 1555 else 1556 rval = DDI_INTR_UNCLAIMED; 1557 mutex_exit(&softs->io_lock); 1558 1559 aac_drain_comp_q(softs); 1560 return (rval); 1561 } 1562 1563 /* 1564 * Query FIXED or MSI interrupts 1565 */ 1566 static int 1567 aac_query_intrs(struct aac_softstate *softs, int intr_type) 1568 { 1569 dev_info_t *dip = softs->devinfo_p; 1570 int avail, actual, intr_size, count; 1571 int i, flag, ret; 1572 1573 AACDB_PRINT(softs, CE_NOTE, 1574 "aac_query_intrs:interrupt type 0x%x", intr_type); 1575 1576 /* Get number of interrupts */ 1577 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 1578 if ((ret != DDI_SUCCESS) || (count == 0)) { 1579 AACDB_PRINT(softs, CE_WARN, 1580 "ddi_intr_get_nintrs() failed, ret %d count %d", 1581 ret, count); 1582 return (DDI_FAILURE); 1583 } 1584 1585 /* Get number of available interrupts */ 1586 ret = ddi_intr_get_navail(dip, intr_type, &avail); 1587 if ((ret != DDI_SUCCESS) || (avail == 0)) { 1588 AACDB_PRINT(softs, CE_WARN, 1589 "ddi_intr_get_navail() failed, ret %d avail %d", 1590 ret, avail); 1591 return (DDI_FAILURE); 1592 } 1593 1594 AACDB_PRINT(softs, CE_NOTE, 1595 "ddi_intr_get_nvail returned %d, navail() returned %d", 1596 count, avail); 1597 1598 /* Allocate an array of interrupt handles */ 1599 intr_size = count * sizeof (ddi_intr_handle_t); 1600 softs->htable = kmem_alloc(intr_size, KM_SLEEP); 1601 1602 if (intr_type == DDI_INTR_TYPE_MSI) { 1603 count = 1; /* only one vector needed by now */ 1604 flag = DDI_INTR_ALLOC_STRICT; 1605 } else { /* must be DDI_INTR_TYPE_FIXED */ 1606 flag = DDI_INTR_ALLOC_NORMAL; 1607 } 1608 1609 /* Call ddi_intr_alloc() */ 1610 ret = ddi_intr_alloc(dip, softs->htable, intr_type, 0, 1611 count, &actual, flag); 1612 1613 if ((ret != DDI_SUCCESS) || (actual == 0)) { 1614 AACDB_PRINT(softs, CE_WARN, 1615 "ddi_intr_alloc() failed, ret = %d", ret); 1616 actual = 0; 1617 goto error; 1618 } 1619 1620 if (actual < count) { 1621 AACDB_PRINT(softs, CE_NOTE, 1622 "Requested: %d, Received: %d", count, actual); 1623 goto error; 1624 } 1625 1626 softs->intr_cnt = actual; 1627 1628 /* Get priority for first msi, assume remaining are all the same */ 1629 if ((ret = ddi_intr_get_pri(softs->htable[0], 1630 &softs->intr_pri)) != DDI_SUCCESS) { 1631 AACDB_PRINT(softs, CE_WARN, 1632 "ddi_intr_get_pri() failed, ret = %d", ret); 1633 goto error; 1634 } 1635 1636 /* Test for high level mutex */ 1637 if (softs->intr_pri >= ddi_intr_get_hilevel_pri()) { 1638 AACDB_PRINT(softs, CE_WARN, 1639 "aac_query_intrs: Hi level interrupt not supported"); 1640 goto error; 1641 } 1642 1643 return (DDI_SUCCESS); 1644 1645 error: 1646 /* Free already allocated intr */ 1647 for (i = 0; i < actual; i++) 1648 (void) ddi_intr_free(softs->htable[i]); 1649 1650 kmem_free(softs->htable, intr_size); 1651 return (DDI_FAILURE); 1652 } 1653 1654 /* 1655 * Register FIXED or MSI interrupts, and enable them 1656 */ 1657 static int 1658 aac_add_intrs(struct aac_softstate *softs) 1659 { 1660 int i, ret; 1661 int intr_size, actual; 1662 ddi_intr_handler_t *aac_intr; 1663 1664 actual = softs->intr_cnt; 1665 intr_size = actual * sizeof (ddi_intr_handle_t); 1666 aac_intr = (ddi_intr_handler_t *)((softs->flags & AAC_FLAGS_NEW_COMM) ? 1667 aac_intr_new : aac_intr_old); 1668 1669 /* Call ddi_intr_add_handler() */ 1670 for (i = 0; i < actual; i++) { 1671 if ((ret = ddi_intr_add_handler(softs->htable[i], 1672 aac_intr, (caddr_t)softs, NULL)) != DDI_SUCCESS) { 1673 cmn_err(CE_WARN, 1674 "ddi_intr_add_handler() failed ret = %d", ret); 1675 1676 /* Free already allocated intr */ 1677 for (i = 0; i < actual; i++) 1678 (void) ddi_intr_free(softs->htable[i]); 1679 1680 kmem_free(softs->htable, intr_size); 1681 return (DDI_FAILURE); 1682 } 1683 } 1684 1685 if ((ret = ddi_intr_get_cap(softs->htable[0], &softs->intr_cap)) 1686 != DDI_SUCCESS) { 1687 cmn_err(CE_WARN, "ddi_intr_get_cap() failed, ret = %d", ret); 1688 1689 /* Free already allocated intr */ 1690 for (i = 0; i < actual; i++) 1691 (void) ddi_intr_free(softs->htable[i]); 1692 1693 kmem_free(softs->htable, intr_size); 1694 return (DDI_FAILURE); 1695 } 1696 1697 /* Enable interrupts */ 1698 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) { 1699 /* for MSI block enable */ 1700 (void) ddi_intr_block_enable(softs->htable, softs->intr_cnt); 1701 } else { 1702 /* Call ddi_intr_enable() for legacy/MSI non block enable */ 1703 for (i = 0; i < softs->intr_cnt; i++) 1704 (void) ddi_intr_enable(softs->htable[i]); 1705 } 1706 1707 return (DDI_SUCCESS); 1708 } 1709 1710 /* 1711 * Unregister FIXED or MSI interrupts 1712 */ 1713 static void 1714 aac_remove_intrs(struct aac_softstate *softs) 1715 { 1716 int i; 1717 1718 /* Disable all interrupts */ 1719 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) { 1720 /* Call ddi_intr_block_disable() */ 1721 (void) ddi_intr_block_disable(softs->htable, softs->intr_cnt); 1722 } else { 1723 for (i = 0; i < softs->intr_cnt; i++) 1724 (void) ddi_intr_disable(softs->htable[i]); 1725 } 1726 1727 /* Call ddi_intr_remove_handler() */ 1728 for (i = 0; i < softs->intr_cnt; i++) { 1729 (void) ddi_intr_remove_handler(softs->htable[i]); 1730 (void) ddi_intr_free(softs->htable[i]); 1731 } 1732 1733 kmem_free(softs->htable, softs->intr_cnt * sizeof (ddi_intr_handle_t)); 1734 } 1735 1736 /* 1737 * Set pkt_reason and OR in pkt_statistics flag 1738 */ 1739 static void 1740 aac_set_pkt_reason(struct aac_softstate *softs, struct aac_cmd *acp, 1741 uchar_t reason, uint_t stat) 1742 { 1743 #ifndef __lock_lint 1744 _NOTE(ARGUNUSED(softs)) 1745 #endif 1746 if (acp->pkt->pkt_reason == CMD_CMPLT) 1747 acp->pkt->pkt_reason = reason; 1748 acp->pkt->pkt_statistics |= stat; 1749 } 1750 1751 /* 1752 * Handle a finished pkt of soft SCMD 1753 */ 1754 static void 1755 aac_soft_callback(struct aac_softstate *softs, struct aac_cmd *acp) 1756 { 1757 ASSERT(acp->pkt); 1758 1759 acp->flags |= AAC_CMD_CMPLT; 1760 1761 acp->pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | \ 1762 STATE_SENT_CMD | STATE_GOT_STATUS; 1763 if (acp->pkt->pkt_state & STATE_XFERRED_DATA) 1764 acp->pkt->pkt_resid = 0; 1765 1766 /* AAC_CMD_NO_INTR means no complete callback */ 1767 if (!(acp->flags & AAC_CMD_NO_INTR)) { 1768 mutex_enter(&softs->q_comp_mutex); 1769 aac_cmd_enqueue(&softs->q_comp, acp); 1770 mutex_exit(&softs->q_comp_mutex); 1771 ddi_trigger_softintr(softs->softint_id); 1772 } 1773 } 1774 1775 /* 1776 * Handlers for completed IOs, common to aac_intr_new() and aac_intr_old() 1777 */ 1778 1779 /* 1780 * Handle completed logical device IO command 1781 */ 1782 /*ARGSUSED*/ 1783 static void 1784 aac_ld_complete(struct aac_softstate *softs, struct aac_cmd *acp) 1785 { 1786 struct aac_slot *slotp = acp->slotp; 1787 struct aac_blockread_response *resp; 1788 uint32_t status; 1789 1790 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 1791 ASSERT(!(acp->flags & AAC_CMD_NO_CB)); 1792 1793 acp->pkt->pkt_state |= STATE_GOT_STATUS; 1794 1795 /* 1796 * block_read/write has a similar response header, use blockread 1797 * response for both. 1798 */ 1799 resp = (struct aac_blockread_response *)&slotp->fibp->data[0]; 1800 status = ddi_get32(slotp->fib_acc_handle, &resp->Status); 1801 if (status == ST_OK) { 1802 acp->pkt->pkt_resid = 0; 1803 acp->pkt->pkt_state |= STATE_XFERRED_DATA; 1804 } else { 1805 aac_set_arq_data_hwerr(acp); 1806 } 1807 } 1808 1809 /* 1810 * Handle completed phys. device IO command 1811 */ 1812 static void 1813 aac_pd_complete(struct aac_softstate *softs, struct aac_cmd *acp) 1814 { 1815 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 1816 struct aac_fib *fibp = acp->slotp->fibp; 1817 struct scsi_pkt *pkt = acp->pkt; 1818 struct aac_srb_reply *resp; 1819 uint32_t resp_status; 1820 1821 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 1822 ASSERT(!(acp->flags & AAC_CMD_NO_CB)); 1823 1824 resp = (struct aac_srb_reply *)&fibp->data[0]; 1825 resp_status = ddi_get32(acc, &resp->status); 1826 1827 /* First check FIB status */ 1828 if (resp_status == ST_OK) { 1829 uint32_t scsi_status; 1830 uint32_t srb_status; 1831 uint32_t data_xfer_length; 1832 1833 scsi_status = ddi_get32(acc, &resp->scsi_status); 1834 srb_status = ddi_get32(acc, &resp->srb_status); 1835 data_xfer_length = ddi_get32(acc, &resp->data_xfer_length); 1836 1837 *pkt->pkt_scbp = (uint8_t)scsi_status; 1838 pkt->pkt_state |= STATE_GOT_STATUS; 1839 if (scsi_status == STATUS_GOOD) { 1840 uchar_t cmd = ((union scsi_cdb *)(void *) 1841 (pkt->pkt_cdbp))->scc_cmd; 1842 1843 /* Next check SRB status */ 1844 switch (srb_status & 0x3f) { 1845 case SRB_STATUS_DATA_OVERRUN: 1846 AACDB_PRINT(softs, CE_NOTE, "DATA_OVERRUN: " \ 1847 "scmd=%d, xfer=%d, buflen=%d", 1848 (uint32_t)cmd, data_xfer_length, 1849 acp->bcount); 1850 1851 switch (cmd) { 1852 case SCMD_READ: 1853 case SCMD_WRITE: 1854 case SCMD_READ_G1: 1855 case SCMD_WRITE_G1: 1856 case SCMD_READ_G4: 1857 case SCMD_WRITE_G4: 1858 case SCMD_READ_G5: 1859 case SCMD_WRITE_G5: 1860 aac_set_pkt_reason(softs, acp, 1861 CMD_DATA_OVR, 0); 1862 break; 1863 } 1864 /*FALLTHRU*/ 1865 case SRB_STATUS_ERROR_RECOVERY: 1866 case SRB_STATUS_PENDING: 1867 case SRB_STATUS_SUCCESS: 1868 /* 1869 * pkt_resid should only be calculated if the 1870 * status is ERROR_RECOVERY/PENDING/SUCCESS/ 1871 * OVERRUN/UNDERRUN 1872 */ 1873 if (data_xfer_length) { 1874 pkt->pkt_state |= STATE_XFERRED_DATA; 1875 pkt->pkt_resid = acp->bcount - \ 1876 data_xfer_length; 1877 ASSERT(pkt->pkt_resid >= 0); 1878 } 1879 break; 1880 case SRB_STATUS_ABORTED: 1881 AACDB_PRINT(softs, CE_NOTE, 1882 "SRB_STATUS_ABORTED, xfer=%d, resid=%d", 1883 data_xfer_length, pkt->pkt_resid); 1884 aac_set_pkt_reason(softs, acp, CMD_ABORTED, 1885 STAT_ABORTED); 1886 break; 1887 case SRB_STATUS_ABORT_FAILED: 1888 AACDB_PRINT(softs, CE_NOTE, 1889 "SRB_STATUS_ABORT_FAILED, xfer=%d, " \ 1890 "resid=%d", data_xfer_length, 1891 pkt->pkt_resid); 1892 aac_set_pkt_reason(softs, acp, CMD_ABORT_FAIL, 1893 0); 1894 break; 1895 case SRB_STATUS_PARITY_ERROR: 1896 AACDB_PRINT(softs, CE_NOTE, 1897 "SRB_STATUS_PARITY_ERROR, xfer=%d, " \ 1898 "resid=%d", data_xfer_length, 1899 pkt->pkt_resid); 1900 aac_set_pkt_reason(softs, acp, CMD_PER_FAIL, 0); 1901 break; 1902 case SRB_STATUS_NO_DEVICE: 1903 case SRB_STATUS_INVALID_PATH_ID: 1904 case SRB_STATUS_INVALID_TARGET_ID: 1905 case SRB_STATUS_INVALID_LUN: 1906 case SRB_STATUS_SELECTION_TIMEOUT: 1907 #ifdef DEBUG 1908 if (AAC_DEV_IS_VALID(acp->dvp)) { 1909 AACDB_PRINT(softs, CE_NOTE, 1910 "SRB_STATUS_NO_DEVICE(%d), " \ 1911 "xfer=%d, resid=%d ", 1912 srb_status & 0x3f, 1913 data_xfer_length, pkt->pkt_resid); 1914 } 1915 #endif 1916 aac_set_pkt_reason(softs, acp, CMD_DEV_GONE, 0); 1917 break; 1918 case SRB_STATUS_COMMAND_TIMEOUT: 1919 case SRB_STATUS_TIMEOUT: 1920 AACDB_PRINT(softs, CE_NOTE, 1921 "SRB_STATUS_COMMAND_TIMEOUT, xfer=%d, " \ 1922 "resid=%d", data_xfer_length, 1923 pkt->pkt_resid); 1924 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT, 1925 STAT_TIMEOUT); 1926 break; 1927 case SRB_STATUS_BUS_RESET: 1928 AACDB_PRINT(softs, CE_NOTE, 1929 "SRB_STATUS_BUS_RESET, xfer=%d, " \ 1930 "resid=%d", data_xfer_length, 1931 pkt->pkt_resid); 1932 aac_set_pkt_reason(softs, acp, CMD_RESET, 1933 STAT_BUS_RESET); 1934 break; 1935 default: 1936 AACDB_PRINT(softs, CE_NOTE, "srb_status=%d, " \ 1937 "xfer=%d, resid=%d", srb_status & 0x3f, 1938 data_xfer_length, pkt->pkt_resid); 1939 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0); 1940 break; 1941 } 1942 } else if (scsi_status == STATUS_CHECK) { 1943 /* CHECK CONDITION */ 1944 struct scsi_arq_status *arqstat = 1945 (void *)(pkt->pkt_scbp); 1946 uint32_t sense_data_size; 1947 1948 pkt->pkt_state |= STATE_ARQ_DONE; 1949 1950 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD; 1951 arqstat->sts_rqpkt_reason = CMD_CMPLT; 1952 arqstat->sts_rqpkt_resid = 0; 1953 arqstat->sts_rqpkt_state = 1954 STATE_GOT_BUS | 1955 STATE_GOT_TARGET | 1956 STATE_SENT_CMD | 1957 STATE_XFERRED_DATA; 1958 arqstat->sts_rqpkt_statistics = 0; 1959 1960 sense_data_size = ddi_get32(acc, 1961 &resp->sense_data_size); 1962 ASSERT(sense_data_size <= AAC_SENSE_BUFFERSIZE); 1963 AACDB_PRINT(softs, CE_NOTE, 1964 "CHECK CONDITION: sense len=%d, xfer len=%d", 1965 sense_data_size, data_xfer_length); 1966 1967 if (sense_data_size > SENSE_LENGTH) 1968 sense_data_size = SENSE_LENGTH; 1969 ddi_rep_get8(acc, (uint8_t *)&arqstat->sts_sensedata, 1970 (uint8_t *)resp->sense_data, sense_data_size, 1971 DDI_DEV_AUTOINCR); 1972 } else { 1973 AACDB_PRINT(softs, CE_WARN, "invaild scsi status: " \ 1974 "scsi_status=%d, srb_status=%d", 1975 scsi_status, srb_status); 1976 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0); 1977 } 1978 } else { 1979 AACDB_PRINT(softs, CE_NOTE, "SRB failed: fib status %d", 1980 resp_status); 1981 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0); 1982 } 1983 } 1984 1985 /* 1986 * Handle completed IOCTL command 1987 */ 1988 /*ARGSUSED*/ 1989 void 1990 aac_ioctl_complete(struct aac_softstate *softs, struct aac_cmd *acp) 1991 { 1992 struct aac_slot *slotp = acp->slotp; 1993 1994 /* 1995 * NOTE: Both aac_ioctl_send_fib() and aac_send_raw_srb() 1996 * may wait on softs->event, so use cv_broadcast() instead 1997 * of cv_signal(). 1998 */ 1999 ASSERT(acp->flags & AAC_CMD_SYNC); 2000 ASSERT(acp->flags & AAC_CMD_NO_CB); 2001 2002 /* Get the size of the response FIB from its FIB.Header.Size field */ 2003 acp->fib_size = ddi_get16(slotp->fib_acc_handle, 2004 &slotp->fibp->Header.Size); 2005 2006 ASSERT(acp->fib_size <= softs->aac_max_fib_size); 2007 ddi_rep_get8(slotp->fib_acc_handle, (uint8_t *)acp->fibp, 2008 (uint8_t *)slotp->fibp, acp->fib_size, DDI_DEV_AUTOINCR); 2009 } 2010 2011 /* 2012 * Handle completed Flush command 2013 */ 2014 /*ARGSUSED*/ 2015 static void 2016 aac_synccache_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2017 { 2018 struct aac_slot *slotp = acp->slotp; 2019 ddi_acc_handle_t acc = slotp->fib_acc_handle; 2020 struct aac_synchronize_reply *resp; 2021 uint32_t status; 2022 2023 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 2024 2025 acp->pkt->pkt_state |= STATE_GOT_STATUS; 2026 2027 resp = (struct aac_synchronize_reply *)&slotp->fibp->data[0]; 2028 status = ddi_get32(acc, &resp->Status); 2029 if (status != CT_OK) 2030 aac_set_arq_data_hwerr(acp); 2031 } 2032 2033 /* 2034 * Access PCI space to see if the driver can support the card 2035 */ 2036 static int 2037 aac_check_card_type(struct aac_softstate *softs) 2038 { 2039 ddi_acc_handle_t pci_config_handle; 2040 int card_index; 2041 uint32_t pci_cmd; 2042 2043 /* Map pci configuration space */ 2044 if ((pci_config_setup(softs->devinfo_p, &pci_config_handle)) != 2045 DDI_SUCCESS) { 2046 AACDB_PRINT(softs, CE_WARN, "Cannot setup pci config space"); 2047 return (AACERR); 2048 } 2049 2050 softs->vendid = pci_config_get16(pci_config_handle, PCI_CONF_VENID); 2051 softs->devid = pci_config_get16(pci_config_handle, PCI_CONF_DEVID); 2052 softs->subvendid = pci_config_get16(pci_config_handle, 2053 PCI_CONF_SUBVENID); 2054 softs->subsysid = pci_config_get16(pci_config_handle, 2055 PCI_CONF_SUBSYSID); 2056 2057 card_index = 0; 2058 while (!CARD_IS_UNKNOWN(card_index)) { 2059 if ((aac_cards[card_index].vendor == softs->vendid) && 2060 (aac_cards[card_index].device == softs->devid) && 2061 (aac_cards[card_index].subvendor == softs->subvendid) && 2062 (aac_cards[card_index].subsys == softs->subsysid)) { 2063 break; 2064 } 2065 card_index++; 2066 } 2067 2068 softs->card = card_index; 2069 softs->hwif = aac_cards[card_index].hwif; 2070 2071 /* 2072 * Unknown aac card 2073 * do a generic match based on the VendorID and DeviceID to 2074 * support the new cards in the aac family 2075 */ 2076 if (CARD_IS_UNKNOWN(card_index)) { 2077 if (softs->vendid != 0x9005) { 2078 AACDB_PRINT(softs, CE_WARN, 2079 "Unknown vendor 0x%x", softs->vendid); 2080 goto error; 2081 } 2082 switch (softs->devid) { 2083 case 0x285: 2084 softs->hwif = AAC_HWIF_I960RX; 2085 break; 2086 case 0x286: 2087 softs->hwif = AAC_HWIF_RKT; 2088 break; 2089 default: 2090 AACDB_PRINT(softs, CE_WARN, 2091 "Unknown device \"pci9005,%x\"", softs->devid); 2092 goto error; 2093 } 2094 } 2095 2096 /* Set hardware dependent interface */ 2097 switch (softs->hwif) { 2098 case AAC_HWIF_I960RX: 2099 softs->aac_if = aac_rx_interface; 2100 softs->map_size_min = AAC_MAP_SIZE_MIN_RX; 2101 break; 2102 case AAC_HWIF_RKT: 2103 softs->aac_if = aac_rkt_interface; 2104 softs->map_size_min = AAC_MAP_SIZE_MIN_RKT; 2105 break; 2106 default: 2107 AACDB_PRINT(softs, CE_WARN, 2108 "Unknown hardware interface %d", softs->hwif); 2109 goto error; 2110 } 2111 2112 /* Set card names */ 2113 (void *)strncpy(softs->vendor_name, aac_cards[card_index].vid, 2114 AAC_VENDOR_LEN); 2115 (void *)strncpy(softs->product_name, aac_cards[card_index].desc, 2116 AAC_PRODUCT_LEN); 2117 2118 /* Set up quirks */ 2119 softs->flags = aac_cards[card_index].quirks; 2120 2121 /* Force the busmaster enable bit on */ 2122 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM); 2123 if ((pci_cmd & PCI_COMM_ME) == 0) { 2124 pci_cmd |= PCI_COMM_ME; 2125 pci_config_put16(pci_config_handle, PCI_CONF_COMM, pci_cmd); 2126 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM); 2127 if ((pci_cmd & PCI_COMM_ME) == 0) { 2128 cmn_err(CE_CONT, "?Cannot enable busmaster bit"); 2129 goto error; 2130 } 2131 } 2132 2133 /* Set memory base to map */ 2134 softs->pci_mem_base_paddr = 0xfffffff0UL & \ 2135 pci_config_get32(pci_config_handle, PCI_CONF_BASE0); 2136 2137 pci_config_teardown(&pci_config_handle); 2138 2139 return (AACOK); /* card type detected */ 2140 error: 2141 pci_config_teardown(&pci_config_handle); 2142 return (AACERR); /* no matched card found */ 2143 } 2144 2145 /* 2146 * Check the firmware to determine the features to support and the FIB 2147 * parameters to use. 2148 */ 2149 static int 2150 aac_check_firmware(struct aac_softstate *softs) 2151 { 2152 uint32_t options; 2153 uint32_t atu_size; 2154 ddi_acc_handle_t pci_handle; 2155 uint8_t *data; 2156 uint32_t max_fibs; 2157 uint32_t max_fib_size; 2158 uint32_t sg_tablesize; 2159 uint32_t max_sectors; 2160 uint32_t status; 2161 2162 /* Get supported options */ 2163 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETINFO, 0, 0, 0, 0, 2164 &status)) != AACOK) { 2165 if (status != SRB_STATUS_INVALID_REQUEST) { 2166 cmn_err(CE_CONT, 2167 "?Fatal error: request adapter info error"); 2168 return (AACERR); 2169 } 2170 options = 0; 2171 atu_size = 0; 2172 } else { 2173 options = AAC_MAILBOX_GET(softs, 1); 2174 atu_size = AAC_MAILBOX_GET(softs, 2); 2175 } 2176 2177 if (softs->state & AAC_STATE_RESET) { 2178 if ((softs->support_opt == options) && 2179 (softs->atu_size == atu_size)) 2180 return (AACOK); 2181 2182 cmn_err(CE_WARN, 2183 "?Fatal error: firmware changed, system needs reboot"); 2184 return (AACERR); 2185 } 2186 2187 /* 2188 * The following critical settings are initialized only once during 2189 * driver attachment. 2190 */ 2191 softs->support_opt = options; 2192 softs->atu_size = atu_size; 2193 2194 /* Process supported options */ 2195 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && 2196 (softs->flags & AAC_FLAGS_NO4GB) == 0) { 2197 AACDB_PRINT(softs, CE_NOTE, "!Enable FIB map 4GB window"); 2198 softs->flags |= AAC_FLAGS_4GB_WINDOW; 2199 } else { 2200 /* 2201 * Quirk AAC_FLAGS_NO4GB is for FIB address and thus comm space 2202 * only. IO is handled by the DMA engine which does not suffer 2203 * from the ATU window programming workarounds necessary for 2204 * CPU copy operations. 2205 */ 2206 softs->addr_dma_attr.dma_attr_addr_lo = 0x2000ull; 2207 softs->addr_dma_attr.dma_attr_addr_hi = 0x7fffffffull; 2208 } 2209 2210 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0) { 2211 AACDB_PRINT(softs, CE_NOTE, "!Enable SG map 64-bit address"); 2212 softs->buf_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull; 2213 softs->buf_dma_attr.dma_attr_seg = 0xffffffffffffffffull; 2214 softs->flags |= AAC_FLAGS_SG_64BIT; 2215 } 2216 2217 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) { 2218 softs->flags |= AAC_FLAGS_ARRAY_64BIT; 2219 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array size"); 2220 } 2221 2222 if (options & AAC_SUPPORTED_NONDASD) { 2223 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p, 0, 2224 "nondasd-enable", (char **)&data) == DDI_SUCCESS)) { 2225 if (strcmp((char *)data, "yes") == 0) { 2226 AACDB_PRINT(softs, CE_NOTE, 2227 "!Enable Non-DASD access"); 2228 softs->flags |= AAC_FLAGS_NONDASD; 2229 } 2230 ddi_prop_free(data); 2231 } 2232 } 2233 2234 /* Read preferred settings */ 2235 max_fib_size = 0; 2236 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETCOMMPREF, 2237 0, 0, 0, 0, NULL)) == AACOK) { 2238 options = AAC_MAILBOX_GET(softs, 1); 2239 max_fib_size = (options & 0xffff); 2240 max_sectors = (options >> 16) << 1; 2241 options = AAC_MAILBOX_GET(softs, 2); 2242 sg_tablesize = (options >> 16); 2243 options = AAC_MAILBOX_GET(softs, 3); 2244 max_fibs = (options & 0xffff); 2245 } 2246 2247 /* Enable new comm. and rawio at the same time */ 2248 if ((softs->support_opt & AAC_SUPPORTED_NEW_COMM) && 2249 (max_fib_size != 0)) { 2250 /* read out and save PCI MBR */ 2251 if ((atu_size > softs->map_size) && 2252 (ddi_regs_map_setup(softs->devinfo_p, 1, 2253 (caddr_t *)&data, 0, atu_size, &softs->acc_attr, 2254 &pci_handle) == DDI_SUCCESS)) { 2255 ddi_regs_map_free(&softs->pci_mem_handle); 2256 softs->pci_mem_handle = pci_handle; 2257 softs->pci_mem_base_vaddr = data; 2258 softs->map_size = atu_size; 2259 } 2260 if (atu_size == softs->map_size) { 2261 softs->flags |= AAC_FLAGS_NEW_COMM; 2262 AACDB_PRINT(softs, CE_NOTE, 2263 "!Enable New Comm. interface"); 2264 } 2265 } 2266 2267 /* Set FIB parameters */ 2268 if (softs->flags & AAC_FLAGS_NEW_COMM) { 2269 softs->aac_max_fibs = max_fibs; 2270 softs->aac_max_fib_size = max_fib_size; 2271 softs->aac_max_sectors = max_sectors; 2272 softs->aac_sg_tablesize = sg_tablesize; 2273 2274 softs->flags |= AAC_FLAGS_RAW_IO; 2275 AACDB_PRINT(softs, CE_NOTE, "!Enable RawIO"); 2276 } else { 2277 softs->aac_max_fibs = 2278 (softs->flags & AAC_FLAGS_256FIBS) ? 256 : 512; 2279 softs->aac_max_fib_size = AAC_FIB_SIZE; 2280 softs->aac_max_sectors = 128; /* 64K */ 2281 if (softs->flags & AAC_FLAGS_17SG) 2282 softs->aac_sg_tablesize = 17; 2283 else if (softs->flags & AAC_FLAGS_34SG) 2284 softs->aac_sg_tablesize = 34; 2285 else if (softs->flags & AAC_FLAGS_SG_64BIT) 2286 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE - 2287 sizeof (struct aac_blockwrite64) + 2288 sizeof (struct aac_sg_entry64)) / 2289 sizeof (struct aac_sg_entry64); 2290 else 2291 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE - 2292 sizeof (struct aac_blockwrite) + 2293 sizeof (struct aac_sg_entry)) / 2294 sizeof (struct aac_sg_entry); 2295 } 2296 2297 if ((softs->flags & AAC_FLAGS_RAW_IO) && 2298 (softs->flags & AAC_FLAGS_ARRAY_64BIT)) { 2299 softs->flags |= AAC_FLAGS_LBA_64BIT; 2300 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array"); 2301 } 2302 softs->buf_dma_attr.dma_attr_sgllen = softs->aac_sg_tablesize; 2303 softs->buf_dma_attr.dma_attr_maxxfer = softs->aac_max_sectors << 9; 2304 /* 2305 * 64K maximum segment size in scatter gather list is controlled by 2306 * the NEW_COMM bit in the adapter information. If not set, the card 2307 * can only accept a maximum of 64K. It is not recommended to permit 2308 * more than 128KB of total transfer size to the adapters because 2309 * performance is negatively impacted. 2310 * 2311 * For new comm, segment size equals max xfer size. For old comm, 2312 * we use 64K for both. 2313 */ 2314 softs->buf_dma_attr.dma_attr_count_max = 2315 softs->buf_dma_attr.dma_attr_maxxfer - 1; 2316 2317 /* Setup FIB operations */ 2318 if (softs->flags & AAC_FLAGS_RAW_IO) 2319 softs->aac_cmd_fib = aac_cmd_fib_rawio; 2320 else if (softs->flags & AAC_FLAGS_SG_64BIT) 2321 softs->aac_cmd_fib = aac_cmd_fib_brw64; 2322 else 2323 softs->aac_cmd_fib = aac_cmd_fib_brw; 2324 softs->aac_cmd_fib_scsi = (softs->flags & AAC_FLAGS_SG_64BIT) ? \ 2325 aac_cmd_fib_scsi64 : aac_cmd_fib_scsi32; 2326 2327 /* 64-bit LBA needs descriptor format sense data */ 2328 softs->slen = sizeof (struct scsi_arq_status); 2329 if ((softs->flags & AAC_FLAGS_LBA_64BIT) && 2330 softs->slen < AAC_ARQ64_LENGTH) 2331 softs->slen = AAC_ARQ64_LENGTH; 2332 2333 AACDB_PRINT(softs, CE_NOTE, 2334 "!max_fibs %d max_fibsize 0x%x max_sectors %d max_sg %d", 2335 softs->aac_max_fibs, softs->aac_max_fib_size, 2336 softs->aac_max_sectors, softs->aac_sg_tablesize); 2337 2338 return (AACOK); 2339 } 2340 2341 static void 2342 aac_fsa_rev(struct aac_softstate *softs, struct FsaRev *fsarev0, 2343 struct FsaRev *fsarev1) 2344 { 2345 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 2346 2347 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.dash); 2348 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.type); 2349 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.minor); 2350 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.major); 2351 AAC_GET_FIELD32(acc, fsarev1, fsarev0, buildNumber); 2352 } 2353 2354 /* 2355 * The following function comes from Adaptec: 2356 * 2357 * Query adapter information and supplement adapter information 2358 */ 2359 static int 2360 aac_get_adapter_info(struct aac_softstate *softs, 2361 struct aac_adapter_info *ainfr, struct aac_supplement_adapter_info *sinfr) 2362 { 2363 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 2364 struct aac_fib *fibp = softs->sync_slot.fibp; 2365 struct aac_adapter_info *ainfp; 2366 struct aac_supplement_adapter_info *sinfp; 2367 2368 ddi_put8(acc, &fibp->data[0], 0); 2369 if (aac_sync_fib(softs, RequestAdapterInfo, 2370 sizeof (struct aac_fib_header)) != AACOK) { 2371 AACDB_PRINT(softs, CE_WARN, "RequestAdapterInfo failed"); 2372 return (AACERR); 2373 } 2374 ainfp = (struct aac_adapter_info *)fibp->data; 2375 if (ainfr) { 2376 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions); 2377 AAC_GET_FIELD32(acc, ainfr, ainfp, PlatformBase); 2378 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuArchitecture); 2379 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuVariant); 2380 AAC_GET_FIELD32(acc, ainfr, ainfp, ClockSpeed); 2381 AAC_GET_FIELD32(acc, ainfr, ainfp, ExecutionMem); 2382 AAC_GET_FIELD32(acc, ainfr, ainfp, BufferMem); 2383 AAC_GET_FIELD32(acc, ainfr, ainfp, TotalMem); 2384 aac_fsa_rev(softs, &ainfp->KernelRevision, 2385 &ainfr->KernelRevision); 2386 aac_fsa_rev(softs, &ainfp->MonitorRevision, 2387 &ainfr->MonitorRevision); 2388 aac_fsa_rev(softs, &ainfp->HardwareRevision, 2389 &ainfr->HardwareRevision); 2390 aac_fsa_rev(softs, &ainfp->BIOSRevision, 2391 &ainfr->BIOSRevision); 2392 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusteringEnabled); 2393 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusterChannelMask); 2394 AAC_GET_FIELD64(acc, ainfr, ainfp, SerialNumber); 2395 AAC_GET_FIELD32(acc, ainfr, ainfp, batteryPlatform); 2396 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions); 2397 AAC_GET_FIELD32(acc, ainfr, ainfp, OemVariant); 2398 } 2399 if (sinfr) { 2400 if (!(softs->support_opt & 2401 AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO)) { 2402 AACDB_PRINT(softs, CE_WARN, 2403 "SupplementAdapterInfo not supported"); 2404 return (AACERR); 2405 } 2406 ddi_put8(acc, &fibp->data[0], 0); 2407 if (aac_sync_fib(softs, RequestSupplementAdapterInfo, 2408 sizeof (struct aac_fib_header)) != AACOK) { 2409 AACDB_PRINT(softs, CE_WARN, 2410 "RequestSupplementAdapterInfo failed"); 2411 return (AACERR); 2412 } 2413 sinfp = (struct aac_supplement_adapter_info *)fibp->data; 2414 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, AdapterTypeText[0], 17+1); 2415 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, Pad[0], 2); 2416 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashMemoryByteSize); 2417 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashImageId); 2418 AAC_GET_FIELD32(acc, sinfr, sinfp, MaxNumberPorts); 2419 AAC_GET_FIELD32(acc, sinfr, sinfp, Version); 2420 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits); 2421 AAC_GET_FIELD8(acc, sinfr, sinfp, SlotNumber); 2422 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, ReservedPad0[0], 3); 2423 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, BuildDate[0], 12); 2424 AAC_GET_FIELD32(acc, sinfr, sinfp, CurrentNumberPorts); 2425 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, VpdInfo, 2426 sizeof (struct vpd_info)); 2427 aac_fsa_rev(softs, &sinfp->FlashFirmwareRevision, 2428 &sinfr->FlashFirmwareRevision); 2429 AAC_GET_FIELD32(acc, sinfr, sinfp, RaidTypeMorphOptions); 2430 aac_fsa_rev(softs, &sinfp->FlashFirmwareBootRevision, 2431 &sinfr->FlashFirmwareBootRevision); 2432 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgPcbaSerialNo, 2433 MFG_PCBA_SERIAL_NUMBER_WIDTH); 2434 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgWWNName[0], 2435 MFG_WWN_WIDTH); 2436 AAC_REP_GET_FIELD32(acc, sinfr, sinfp, ReservedGrowth[0], 2); 2437 } 2438 return (AACOK); 2439 } 2440 2441 static int 2442 aac_get_bus_info(struct aac_softstate *softs, uint32_t *bus_max, 2443 uint32_t *tgt_max) 2444 { 2445 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 2446 struct aac_fib *fibp = softs->sync_slot.fibp; 2447 struct aac_ctcfg *c_cmd; 2448 struct aac_ctcfg_resp *c_resp; 2449 uint32_t scsi_method_id; 2450 struct aac_bus_info *cmd; 2451 struct aac_bus_info_response *resp; 2452 int rval; 2453 2454 /* Detect MethodId */ 2455 c_cmd = (struct aac_ctcfg *)&fibp->data[0]; 2456 ddi_put32(acc, &c_cmd->Command, VM_ContainerConfig); 2457 ddi_put32(acc, &c_cmd->cmd, CT_GET_SCSI_METHOD); 2458 ddi_put32(acc, &c_cmd->param, 0); 2459 rval = aac_sync_fib(softs, ContainerCommand, 2460 AAC_FIB_SIZEOF(struct aac_ctcfg)); 2461 c_resp = (struct aac_ctcfg_resp *)&fibp->data[0]; 2462 if (rval != AACOK || ddi_get32(acc, &c_resp->Status) != 0) { 2463 AACDB_PRINT(softs, CE_WARN, 2464 "VM_ContainerConfig command fail"); 2465 return (AACERR); 2466 } 2467 scsi_method_id = ddi_get32(acc, &c_resp->param); 2468 2469 /* Detect phys. bus count and max. target id first */ 2470 cmd = (struct aac_bus_info *)&fibp->data[0]; 2471 ddi_put32(acc, &cmd->Command, VM_Ioctl); 2472 ddi_put32(acc, &cmd->ObjType, FT_DRIVE); /* physical drive */ 2473 ddi_put32(acc, &cmd->MethodId, scsi_method_id); 2474 ddi_put32(acc, &cmd->ObjectId, 0); 2475 ddi_put32(acc, &cmd->CtlCmd, GetBusInfo); 2476 /* 2477 * For VM_Ioctl, the firmware uses the Header.Size filled from the 2478 * driver as the size to be returned. Therefore the driver has to use 2479 * sizeof (struct aac_bus_info_response) because it is greater than 2480 * sizeof (struct aac_bus_info). 2481 */ 2482 rval = aac_sync_fib(softs, ContainerCommand, 2483 AAC_FIB_SIZEOF(struct aac_bus_info_response)); 2484 resp = (struct aac_bus_info_response *)cmd; 2485 2486 /* Scan all coordinates with INQUIRY */ 2487 if ((rval != AACOK) || (ddi_get32(acc, &resp->Status) != 0)) { 2488 AACDB_PRINT(softs, CE_WARN, "GetBusInfo command fail"); 2489 return (AACERR); 2490 } 2491 *bus_max = ddi_get32(acc, &resp->BusCount); 2492 *tgt_max = ddi_get32(acc, &resp->TargetsPerBus); 2493 return (AACOK); 2494 } 2495 2496 /* 2497 * The following function comes from Adaptec: 2498 * 2499 * Routine to be called during initialization of communications with 2500 * the adapter to handle possible adapter configuration issues. When 2501 * the adapter first boots up, it examines attached drives, etc, and 2502 * potentially comes up with a new or revised configuration (relative to 2503 * what's stored in it's NVRAM). Additionally it may discover problems 2504 * that make the current physical configuration unworkable (currently 2505 * applicable only to cluster configuration issues). 2506 * 2507 * If there are no configuration issues or the issues are considered 2508 * trival by the adapter, it will set it's configuration status to 2509 * "FSACT_CONTINUE" and execute the "commit confiuguration" action 2510 * automatically on it's own. 2511 * 2512 * However, if there are non-trivial issues, the adapter will set it's 2513 * internal configuration status to "FSACT_PAUSE" or "FASCT_ABORT" 2514 * and wait for some agent on the host to issue the "\ContainerCommand 2515 * \VM_ContainerConfig\CT_COMMIT_CONFIG" FIB command to cause the 2516 * adapter to commit the new/updated configuration and enable 2517 * un-inhibited operation. The host agent should first issue the 2518 * "\ContainerCommand\VM_ContainerConfig\CT_GET_CONFIG_STATUS" FIB 2519 * command to obtain information about config issues detected by 2520 * the adapter. 2521 * 2522 * Normally the adapter's PC BIOS will execute on the host following 2523 * adapter poweron and reset and will be responsible for querring the 2524 * adapter with CT_GET_CONFIG_STATUS and issuing the CT_COMMIT_CONFIG 2525 * command if appropriate. 2526 * 2527 * However, with the introduction of IOP reset support, the adapter may 2528 * boot up without the benefit of the adapter's PC BIOS host agent. 2529 * This routine is intended to take care of these issues in situations 2530 * where BIOS doesn't execute following adapter poweron or reset. The 2531 * CT_COMMIT_CONFIG command is a no-op if it's already been issued, so 2532 * there is no harm in doing this when it's already been done. 2533 */ 2534 static int 2535 aac_handle_adapter_config_issues(struct aac_softstate *softs) 2536 { 2537 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 2538 struct aac_fib *fibp = softs->sync_slot.fibp; 2539 struct aac_Container *cmd; 2540 struct aac_Container_resp *resp; 2541 struct aac_cf_status_header *cfg_sts_hdr; 2542 uint32_t resp_status; 2543 uint32_t ct_status; 2544 uint32_t cfg_stat_action; 2545 int rval; 2546 2547 /* Get adapter config status */ 2548 cmd = (struct aac_Container *)&fibp->data[0]; 2549 2550 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 2551 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 2552 ddi_put32(acc, &cmd->CTCommand.command, CT_GET_CONFIG_STATUS); 2553 ddi_put32(acc, &cmd->CTCommand.param[CNT_SIZE], 2554 sizeof (struct aac_cf_status_header)); 2555 rval = aac_sync_fib(softs, ContainerCommand, 2556 AAC_FIB_SIZEOF(struct aac_Container)); 2557 resp = (struct aac_Container_resp *)cmd; 2558 cfg_sts_hdr = (struct aac_cf_status_header *)resp->CTResponse.data; 2559 2560 resp_status = ddi_get32(acc, &resp->Status); 2561 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]); 2562 if ((rval == AACOK) && (resp_status == 0) && (ct_status == CT_OK)) { 2563 cfg_stat_action = ddi_get32(acc, &cfg_sts_hdr->action); 2564 2565 /* Commit configuration if it's reasonable to do so. */ 2566 if (cfg_stat_action <= CFACT_PAUSE) { 2567 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 2568 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 2569 ddi_put32(acc, &cmd->CTCommand.command, 2570 CT_COMMIT_CONFIG); 2571 rval = aac_sync_fib(softs, ContainerCommand, 2572 AAC_FIB_SIZEOF(struct aac_Container)); 2573 2574 resp_status = ddi_get32(acc, &resp->Status); 2575 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]); 2576 if ((rval == AACOK) && (resp_status == 0) && 2577 (ct_status == CT_OK)) 2578 /* Successful completion */ 2579 rval = AACMPE_OK; 2580 else 2581 /* Auto-commit aborted due to error(s). */ 2582 rval = AACMPE_COMMIT_CONFIG; 2583 } else { 2584 /* 2585 * Auto-commit aborted due to adapter indicating 2586 * configuration issue(s) too dangerous to auto-commit. 2587 */ 2588 rval = AACMPE_CONFIG_STATUS; 2589 } 2590 } else { 2591 cmn_err(CE_WARN, "!Configuration issue, auto-commit aborted"); 2592 rval = AACMPE_CONFIG_STATUS; 2593 } 2594 return (rval); 2595 } 2596 2597 /* 2598 * Hardware initialization and resource allocation 2599 */ 2600 static int 2601 aac_common_attach(struct aac_softstate *softs) 2602 { 2603 uint32_t status; 2604 int i; 2605 2606 DBCALLED(softs, 1); 2607 2608 /* 2609 * Do a little check here to make sure there aren't any outstanding 2610 * FIBs in the message queue. At this point there should not be and 2611 * if there are they are probably left over from another instance of 2612 * the driver like when the system crashes and the crash dump driver 2613 * gets loaded. 2614 */ 2615 while (AAC_OUTB_GET(softs) != 0xfffffffful) 2616 ; 2617 2618 /* 2619 * Wait the card to complete booting up before do anything that 2620 * attempts to communicate with it. 2621 */ 2622 status = AAC_FWSTATUS_GET(softs); 2623 if (status == AAC_SELF_TEST_FAILED || status == AAC_KERNEL_PANIC) 2624 goto error; 2625 i = AAC_FWUP_TIMEOUT * 1000; /* set timeout */ 2626 AAC_BUSYWAIT(AAC_FWSTATUS_GET(softs) & AAC_KERNEL_UP_AND_RUNNING, i); 2627 if (i == 0) { 2628 cmn_err(CE_CONT, "?Fatal error: controller not ready"); 2629 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2630 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2631 goto error; 2632 } 2633 2634 /* Read and set card supported options and settings */ 2635 if (aac_check_firmware(softs) == AACERR) { 2636 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2637 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2638 goto error; 2639 } 2640 2641 /* Clear out all interrupts */ 2642 AAC_STATUS_CLR(softs, ~0); 2643 2644 /* Setup communication space with the card */ 2645 if (softs->comm_space_dma_handle == NULL) { 2646 if (aac_alloc_comm_space(softs) != AACOK) 2647 goto error; 2648 } 2649 if (aac_setup_comm_space(softs) != AACOK) { 2650 cmn_err(CE_CONT, "?Setup communication space failed"); 2651 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2652 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2653 goto error; 2654 } 2655 2656 #ifdef DEBUG 2657 if (aac_get_fw_debug_buffer(softs) != AACOK) 2658 cmn_err(CE_CONT, "?firmware UART trace not supported"); 2659 #endif 2660 2661 /* Allocate slots */ 2662 if ((softs->total_slots == 0) && (aac_create_slots(softs) != AACOK)) { 2663 cmn_err(CE_CONT, "?Fatal error: slots allocate failed"); 2664 goto error; 2665 } 2666 AACDB_PRINT(softs, CE_NOTE, "%d slots allocated", softs->total_slots); 2667 2668 /* Allocate FIBs */ 2669 if (softs->total_fibs < softs->total_slots) { 2670 aac_alloc_fibs(softs); 2671 if (softs->total_fibs == 0) 2672 goto error; 2673 AACDB_PRINT(softs, CE_NOTE, "%d fibs allocated", 2674 softs->total_fibs); 2675 } 2676 2677 /* Get adapter names */ 2678 if (CARD_IS_UNKNOWN(softs->card)) { 2679 struct aac_supplement_adapter_info sinf; 2680 2681 if (aac_get_adapter_info(softs, NULL, &sinf) != AACOK) { 2682 cmn_err(CE_CONT, "?Query adapter information failed"); 2683 } else { 2684 char *p, *p0, *p1; 2685 2686 /* 2687 * Now find the controller name in supp_adapter_info-> 2688 * AdapterTypeText. Use the first word as the vendor 2689 * and the other words as the product name. 2690 */ 2691 AACDB_PRINT(softs, CE_NOTE, "sinf.AdapterTypeText = " 2692 "\"%s\"", sinf.AdapterTypeText); 2693 p = sinf.AdapterTypeText; 2694 p0 = p1 = NULL; 2695 /* Skip heading spaces */ 2696 while (*p && (*p == ' ' || *p == '\t')) 2697 p++; 2698 p0 = p; 2699 while (*p && (*p != ' ' && *p != '\t')) 2700 p++; 2701 /* Remove middle spaces */ 2702 while (*p && (*p == ' ' || *p == '\t')) 2703 *p++ = 0; 2704 p1 = p; 2705 /* Remove trailing spaces */ 2706 p = p1 + strlen(p1) - 1; 2707 while (p > p1 && (*p == ' ' || *p == '\t')) 2708 *p-- = 0; 2709 if (*p0 && *p1) { 2710 (void *)strncpy(softs->vendor_name, p0, 2711 AAC_VENDOR_LEN); 2712 (void *)strncpy(softs->product_name, p1, 2713 AAC_PRODUCT_LEN); 2714 } else { 2715 cmn_err(CE_WARN, 2716 "?adapter name mis-formatted\n"); 2717 if (*p0) 2718 (void *)strncpy(softs->product_name, 2719 p0, AAC_PRODUCT_LEN); 2720 } 2721 } 2722 } 2723 2724 cmn_err(CE_NOTE, 2725 "!aac driver %d.%02d.%02d-%d, found card: " \ 2726 "%s %s(pci0x%x.%x.%x.%x) at 0x%x", 2727 AAC_DRIVER_MAJOR_VERSION, 2728 AAC_DRIVER_MINOR_VERSION, 2729 AAC_DRIVER_BUGFIX_LEVEL, 2730 AAC_DRIVER_BUILD, 2731 softs->vendor_name, softs->product_name, 2732 softs->vendid, softs->devid, softs->subvendid, softs->subsysid, 2733 softs->pci_mem_base_paddr); 2734 2735 /* Perform acceptance of adapter-detected config changes if possible */ 2736 if (aac_handle_adapter_config_issues(softs) != AACMPE_OK) { 2737 cmn_err(CE_CONT, "?Handle adapter config issues failed"); 2738 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2739 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2740 goto error; 2741 } 2742 2743 /* Setup containers (logical devices) */ 2744 if (aac_probe_containers(softs) != AACOK) { 2745 cmn_err(CE_CONT, "?Fatal error: get container info error"); 2746 goto error; 2747 } 2748 2749 /* Setup phys. devices */ 2750 if (softs->flags & AAC_FLAGS_NONDASD) { 2751 uint32_t bus_max, tgt_max; 2752 uint32_t bus, tgt; 2753 int index; 2754 2755 if (aac_get_bus_info(softs, &bus_max, &tgt_max) != AACOK) { 2756 cmn_err(CE_CONT, "?Fatal error: get bus info error"); 2757 goto error; 2758 } 2759 AACDB_PRINT(softs, CE_NOTE, "bus_max=%d, tgt_max=%d", 2760 bus_max, tgt_max); 2761 if (bus_max != softs->bus_max || tgt_max != softs->tgt_max) { 2762 if (softs->state & AAC_STATE_RESET) { 2763 cmn_err(CE_WARN, 2764 "?Fatal error: bus map changed"); 2765 goto error; 2766 } 2767 softs->bus_max = bus_max; 2768 softs->tgt_max = tgt_max; 2769 if (softs->nondasds) { 2770 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \ 2771 sizeof (struct aac_nondasd)); 2772 } 2773 softs->nondasds = kmem_zalloc(AAC_MAX_PD(softs) * \ 2774 sizeof (struct aac_nondasd), KM_SLEEP); 2775 2776 index = 0; 2777 for (bus = 0; bus < softs->bus_max; bus++) { 2778 for (tgt = 0; tgt < softs->tgt_max; tgt++) { 2779 struct aac_nondasd *dvp = 2780 &softs->nondasds[index++]; 2781 dvp->dev.type = AAC_DEV_PD; 2782 dvp->bus = bus; 2783 dvp->tid = tgt; 2784 } 2785 } 2786 } 2787 } 2788 2789 /* Check dma & acc handles allocated in attach */ 2790 if (aac_check_dma_handle(softs->comm_space_dma_handle) != DDI_SUCCESS) { 2791 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2792 goto error; 2793 } 2794 2795 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 2796 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2797 goto error; 2798 } 2799 2800 for (i = 0; i < softs->total_slots; i++) { 2801 if (aac_check_dma_handle(softs->io_slot[i].fib_dma_handle) != 2802 DDI_SUCCESS) { 2803 ddi_fm_service_impact(softs->devinfo_p, 2804 DDI_SERVICE_LOST); 2805 goto error; 2806 } 2807 } 2808 2809 return (AACOK); 2810 error: 2811 if (softs->state & AAC_STATE_RESET) 2812 return (AACERR); 2813 if (softs->nondasds) { 2814 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \ 2815 sizeof (struct aac_nondasd)); 2816 softs->nondasds = NULL; 2817 } 2818 if (softs->total_fibs > 0) 2819 aac_destroy_fibs(softs); 2820 if (softs->total_slots > 0) 2821 aac_destroy_slots(softs); 2822 if (softs->comm_space_dma_handle) 2823 aac_free_comm_space(softs); 2824 return (AACERR); 2825 } 2826 2827 /* 2828 * Hardware shutdown and resource release 2829 */ 2830 static void 2831 aac_common_detach(struct aac_softstate *softs) 2832 { 2833 DBCALLED(softs, 1); 2834 2835 (void) aac_shutdown(softs); 2836 2837 if (softs->nondasds) { 2838 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \ 2839 sizeof (struct aac_nondasd)); 2840 softs->nondasds = NULL; 2841 } 2842 aac_destroy_fibs(softs); 2843 aac_destroy_slots(softs); 2844 aac_free_comm_space(softs); 2845 } 2846 2847 /* 2848 * Send a synchronous command to the controller and wait for a result. 2849 * Indicate if the controller completed the command with an error status. 2850 */ 2851 int 2852 aac_sync_mbcommand(struct aac_softstate *softs, uint32_t cmd, 2853 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3, 2854 uint32_t *statusp) 2855 { 2856 int timeout; 2857 uint32_t status; 2858 2859 if (statusp != NULL) 2860 *statusp = SRB_STATUS_SUCCESS; 2861 2862 /* Fill in mailbox */ 2863 AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3); 2864 2865 /* Ensure the sync command doorbell flag is cleared */ 2866 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND); 2867 2868 /* Then set it to signal the adapter */ 2869 AAC_NOTIFY(softs, AAC_DB_SYNC_COMMAND); 2870 2871 /* Spin waiting for the command to complete */ 2872 timeout = AAC_IMMEDIATE_TIMEOUT * 1000; 2873 AAC_BUSYWAIT(AAC_STATUS_GET(softs) & AAC_DB_SYNC_COMMAND, timeout); 2874 if (!timeout) { 2875 AACDB_PRINT(softs, CE_WARN, 2876 "Sync command timed out after %d seconds (0x%x)!", 2877 AAC_IMMEDIATE_TIMEOUT, AAC_FWSTATUS_GET(softs)); 2878 return (AACERR); 2879 } 2880 2881 /* Clear the completion flag */ 2882 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND); 2883 2884 /* Get the command status */ 2885 status = AAC_MAILBOX_GET(softs, 0); 2886 if (statusp != NULL) 2887 *statusp = status; 2888 if (status != SRB_STATUS_SUCCESS) { 2889 AACDB_PRINT(softs, CE_WARN, 2890 "Sync command fail: status = 0x%x", status); 2891 return (AACERR); 2892 } 2893 2894 return (AACOK); 2895 } 2896 2897 /* 2898 * Send a synchronous FIB to the adapter and wait for its completion 2899 */ 2900 static int 2901 aac_sync_fib(struct aac_softstate *softs, uint16_t cmd, uint16_t fibsize) 2902 { 2903 struct aac_slot *slotp = &softs->sync_slot; 2904 ddi_dma_handle_t dma = slotp->fib_dma_handle; 2905 uint32_t status; 2906 int rval; 2907 2908 /* Sync fib only supports 512 bytes */ 2909 if (fibsize > AAC_FIB_SIZE) 2910 return (AACERR); 2911 2912 /* 2913 * Setup sync fib 2914 * Need not reinitialize FIB header if it's already been filled 2915 * by others like aac_cmd_fib_scsi as aac_cmd. 2916 */ 2917 if (slotp->acp == NULL) 2918 aac_cmd_fib_header(softs, slotp, cmd, fibsize); 2919 2920 AACDB_PRINT_FIB(softs, &softs->sync_slot); 2921 2922 (void) ddi_dma_sync(dma, offsetof(struct aac_comm_space, sync_fib), 2923 fibsize, DDI_DMA_SYNC_FORDEV); 2924 2925 /* Give the FIB to the controller, wait for a response. */ 2926 rval = aac_sync_mbcommand(softs, AAC_MONKER_SYNCFIB, 2927 slotp->fib_phyaddr, 0, 0, 0, &status); 2928 if (rval == AACERR) { 2929 AACDB_PRINT(softs, CE_WARN, 2930 "Send sync fib to controller failed"); 2931 return (AACERR); 2932 } 2933 2934 (void) ddi_dma_sync(dma, offsetof(struct aac_comm_space, sync_fib), 2935 AAC_FIB_SIZE, DDI_DMA_SYNC_FORCPU); 2936 2937 if ((aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) || 2938 (aac_check_dma_handle(dma) != DDI_SUCCESS)) { 2939 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 2940 return (AACERR); 2941 } 2942 2943 return (AACOK); 2944 } 2945 2946 static void 2947 aac_cmd_initq(struct aac_cmd_queue *q) 2948 { 2949 q->q_head = NULL; 2950 q->q_tail = (struct aac_cmd *)&q->q_head; 2951 } 2952 2953 /* 2954 * Remove a cmd from the head of q 2955 */ 2956 static struct aac_cmd * 2957 aac_cmd_dequeue(struct aac_cmd_queue *q) 2958 { 2959 struct aac_cmd *acp; 2960 2961 _NOTE(ASSUMING_PROTECTED(*q)) 2962 2963 if ((acp = q->q_head) != NULL) { 2964 if ((q->q_head = acp->next) != NULL) 2965 acp->next = NULL; 2966 else 2967 q->q_tail = (struct aac_cmd *)&q->q_head; 2968 acp->prev = NULL; 2969 } 2970 return (acp); 2971 } 2972 2973 /* 2974 * Add a cmd to the tail of q 2975 */ 2976 static void 2977 aac_cmd_enqueue(struct aac_cmd_queue *q, struct aac_cmd *acp) 2978 { 2979 ASSERT(acp->next == NULL); 2980 acp->prev = q->q_tail; 2981 q->q_tail->next = acp; 2982 q->q_tail = acp; 2983 } 2984 2985 /* 2986 * Remove the cmd ac from q 2987 */ 2988 static void 2989 aac_cmd_delete(struct aac_cmd_queue *q, struct aac_cmd *acp) 2990 { 2991 if (acp->prev) { 2992 if ((acp->prev->next = acp->next) != NULL) { 2993 acp->next->prev = acp->prev; 2994 acp->next = NULL; 2995 } else { 2996 q->q_tail = acp->prev; 2997 } 2998 acp->prev = NULL; 2999 } 3000 /* ac is not in the queue */ 3001 } 3002 3003 /* 3004 * Atomically insert an entry into the nominated queue, returns 0 on success or 3005 * AACERR if the queue is full. 3006 * 3007 * Note: it would be more efficient to defer notifying the controller in 3008 * the case where we may be inserting several entries in rapid succession, 3009 * but implementing this usefully may be difficult (it would involve a 3010 * separate queue/notify interface). 3011 */ 3012 static int 3013 aac_fib_enqueue(struct aac_softstate *softs, int queue, uint32_t fib_addr, 3014 uint32_t fib_size) 3015 { 3016 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3017 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3018 uint32_t pi, ci; 3019 3020 DBCALLED(softs, 2); 3021 3022 ASSERT(queue == AAC_ADAP_NORM_CMD_Q || queue == AAC_ADAP_NORM_RESP_Q); 3023 3024 /* Get the producer/consumer indices */ 3025 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \ 3026 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2, 3027 DDI_DMA_SYNC_FORCPU); 3028 if (aac_check_dma_handle(dma) != DDI_SUCCESS) { 3029 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 3030 return (AACERR); 3031 } 3032 3033 pi = ddi_get32(acc, 3034 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]); 3035 ci = ddi_get32(acc, 3036 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]); 3037 3038 /* 3039 * Wrap the queue first before we check the queue to see 3040 * if it is full 3041 */ 3042 if (pi >= aac_qinfo[queue].size) 3043 pi = 0; 3044 3045 /* XXX queue full */ 3046 if ((pi + 1) == ci) 3047 return (AACERR); 3048 3049 /* Fill in queue entry */ 3050 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_size), fib_size); 3051 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_addr), fib_addr); 3052 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \ 3053 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry), 3054 DDI_DMA_SYNC_FORDEV); 3055 3056 /* Update producer index */ 3057 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX], 3058 pi + 1); 3059 (void) ddi_dma_sync(dma, 3060 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX] - \ 3061 (uintptr_t)softs->comm_space, sizeof (uint32_t), 3062 DDI_DMA_SYNC_FORDEV); 3063 3064 if (aac_qinfo[queue].notify != 0) 3065 AAC_NOTIFY(softs, aac_qinfo[queue].notify); 3066 return (AACOK); 3067 } 3068 3069 /* 3070 * Atomically remove one entry from the nominated queue, returns 0 on 3071 * success or AACERR if the queue is empty. 3072 */ 3073 static int 3074 aac_fib_dequeue(struct aac_softstate *softs, int queue, int *idxp) 3075 { 3076 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3077 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3078 uint32_t pi, ci; 3079 int unfull = 0; 3080 3081 DBCALLED(softs, 2); 3082 3083 ASSERT(idxp); 3084 3085 /* Get the producer/consumer indices */ 3086 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \ 3087 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2, 3088 DDI_DMA_SYNC_FORCPU); 3089 pi = ddi_get32(acc, 3090 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]); 3091 ci = ddi_get32(acc, 3092 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]); 3093 3094 /* Check for queue empty */ 3095 if (ci == pi) 3096 return (AACERR); 3097 3098 if (pi >= aac_qinfo[queue].size) 3099 pi = 0; 3100 3101 /* Check for queue full */ 3102 if (ci == pi + 1) 3103 unfull = 1; 3104 3105 /* 3106 * The controller does not wrap the queue, 3107 * so we have to do it by ourselves 3108 */ 3109 if (ci >= aac_qinfo[queue].size) 3110 ci = 0; 3111 3112 /* Fetch the entry */ 3113 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \ 3114 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry), 3115 DDI_DMA_SYNC_FORCPU); 3116 if (aac_check_dma_handle(dma) != DDI_SUCCESS) { 3117 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 3118 return (AACERR); 3119 } 3120 3121 switch (queue) { 3122 case AAC_HOST_NORM_RESP_Q: 3123 case AAC_HOST_HIGH_RESP_Q: 3124 *idxp = ddi_get32(acc, 3125 &(softs->qentries[queue] + ci)->aq_fib_addr); 3126 break; 3127 3128 case AAC_HOST_NORM_CMD_Q: 3129 case AAC_HOST_HIGH_CMD_Q: 3130 *idxp = ddi_get32(acc, 3131 &(softs->qentries[queue] + ci)->aq_fib_addr) / AAC_FIB_SIZE; 3132 break; 3133 3134 default: 3135 cmn_err(CE_NOTE, "!Invalid queue in aac_fib_dequeue()"); 3136 return (AACERR); 3137 } 3138 3139 /* Update consumer index */ 3140 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX], 3141 ci + 1); 3142 (void) ddi_dma_sync(dma, 3143 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX] - \ 3144 (uintptr_t)softs->comm_space, sizeof (uint32_t), 3145 DDI_DMA_SYNC_FORDEV); 3146 3147 if (unfull && aac_qinfo[queue].notify != 0) 3148 AAC_NOTIFY(softs, aac_qinfo[queue].notify); 3149 return (AACOK); 3150 } 3151 3152 /* 3153 * Request information of the container cid 3154 */ 3155 static struct aac_mntinforesp * 3156 aac_get_container_info(struct aac_softstate *softs, int cid) 3157 { 3158 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 3159 struct aac_fib *fibp = softs->sync_slot.fibp; 3160 struct aac_mntinfo *mi = (struct aac_mntinfo *)&fibp->data[0]; 3161 struct aac_mntinforesp *mir; 3162 3163 ddi_put32(acc, &mi->Command, /* Use 64-bit LBA if enabled */ 3164 (softs->flags & AAC_FLAGS_LBA_64BIT) ? 3165 VM_NameServe64 : VM_NameServe); 3166 ddi_put32(acc, &mi->MntType, FT_FILESYS); 3167 ddi_put32(acc, &mi->MntCount, cid); 3168 3169 if (aac_sync_fib(softs, ContainerCommand, 3170 AAC_FIB_SIZEOF(struct aac_mntinfo)) == AACERR) { 3171 AACDB_PRINT(softs, CE_WARN, "Error probe container %d", cid); 3172 return (NULL); 3173 } 3174 3175 mir = (struct aac_mntinforesp *)&fibp->data[0]; 3176 if (ddi_get32(acc, &mir->Status) == ST_OK) 3177 return (mir); 3178 return (NULL); 3179 } 3180 3181 static int 3182 aac_get_container_count(struct aac_softstate *softs, int *count) 3183 { 3184 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 3185 struct aac_mntinforesp *mir; 3186 3187 if ((mir = aac_get_container_info(softs, 0)) == NULL) 3188 return (AACERR); 3189 *count = ddi_get32(acc, &mir->MntRespCount); 3190 if (*count > AAC_MAX_LD) { 3191 AACDB_PRINT(softs, CE_CONT, 3192 "container count(%d) > AAC_MAX_LD", *count); 3193 return (AACERR); 3194 } 3195 return (AACOK); 3196 } 3197 3198 static int 3199 aac_get_container_uid(struct aac_softstate *softs, uint32_t cid, uint32_t *uid) 3200 { 3201 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 3202 struct aac_Container *ct = (struct aac_Container *) \ 3203 &softs->sync_slot.fibp->data[0]; 3204 3205 bzero(ct, sizeof (*ct) - CT_PACKET_SIZE); 3206 ddi_put32(acc, &ct->Command, VM_ContainerConfig); 3207 ddi_put32(acc, &ct->CTCommand.command, CT_CID_TO_32BITS_UID); 3208 ddi_put32(acc, &ct->CTCommand.param[0], cid); 3209 3210 if (aac_sync_fib(softs, ContainerCommand, 3211 AAC_FIB_SIZEOF(struct aac_Container)) == AACERR) 3212 return (AACERR); 3213 if (ddi_get32(acc, &ct->CTCommand.param[0]) != CT_OK) 3214 return (AACERR); 3215 3216 *uid = ddi_get32(acc, &ct->CTCommand.param[1]); 3217 return (AACOK); 3218 } 3219 3220 static int 3221 aac_probe_container(struct aac_softstate *softs, uint32_t cid) 3222 { 3223 struct aac_container *dvp = &softs->containers[cid]; 3224 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 3225 struct aac_mntinforesp *mir; 3226 uint64_t size; 3227 uint32_t uid; 3228 3229 /* Get container basic info */ 3230 if ((mir = aac_get_container_info(softs, cid)) == NULL) 3231 return (AACERR); 3232 3233 if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) { 3234 if (AAC_DEV_IS_VALID(&dvp->dev)) { 3235 AACDB_PRINT(softs, CE_NOTE, 3236 ">>> Container %d deleted", cid); 3237 dvp->dev.flags &= ~AAC_DFLAG_VALID; 3238 (void) aac_dr_event(softs, dvp->cid, -1, 3239 AAC_EVT_OFFLINE); 3240 } 3241 } else { 3242 size = AAC_MIR_SIZE(softs, acc, mir); 3243 3244 /* Get container UID */ 3245 if (aac_get_container_uid(softs, cid, &uid) == AACERR) { 3246 AACDB_PRINT(softs, CE_CONT, 3247 "query container %d uid failed", cid); 3248 return (AACERR); 3249 } 3250 AACDB_PRINT(softs, CE_CONT, "uid=0x%08x", uid); 3251 3252 if (AAC_DEV_IS_VALID(&dvp->dev)) { 3253 if (dvp->uid != uid) { 3254 AACDB_PRINT(softs, CE_WARN, 3255 ">>> Container %u uid changed to %d", 3256 cid, uid); 3257 dvp->uid = uid; 3258 } 3259 if (dvp->size != size) { 3260 AACDB_PRINT(softs, CE_NOTE, 3261 ">>> Container %u size changed to %"PRIu64, 3262 cid, size); 3263 dvp->size = size; 3264 } 3265 } else { /* Init new container */ 3266 AACDB_PRINT(softs, CE_NOTE, 3267 ">>> Container %d added: " \ 3268 "size=0x%x.%08x, type=%d, name=%s", 3269 cid, 3270 ddi_get32(acc, &mir->MntObj.CapacityHigh), 3271 ddi_get32(acc, &mir->MntObj.Capacity), 3272 ddi_get32(acc, &mir->MntObj.VolType), 3273 mir->MntObj.FileSystemName); 3274 dvp->dev.flags |= AAC_DFLAG_VALID; 3275 dvp->dev.type = AAC_DEV_LD; 3276 3277 dvp->cid = cid; 3278 dvp->uid = uid; 3279 dvp->size = size; 3280 dvp->locked = 0; 3281 dvp->deleted = 0; 3282 (void) aac_dr_event(softs, dvp->cid, -1, 3283 AAC_EVT_ONLINE); 3284 } 3285 } 3286 return (AACOK); 3287 } 3288 3289 /* 3290 * Do a rescan of all the possible containers and update the container list 3291 * with newly online/offline containers, and prepare for autoconfiguration. 3292 */ 3293 static int 3294 aac_probe_containers(struct aac_softstate *softs) 3295 { 3296 int i, count, total; 3297 3298 /* Loop over possible containers */ 3299 count = softs->container_count; 3300 if (aac_get_container_count(softs, &count) == AACERR) 3301 return (AACERR); 3302 for (i = total = 0; i < count; i++) { 3303 if (aac_probe_container(softs, i) == AACOK) 3304 total++; 3305 } 3306 if (count < softs->container_count) { 3307 struct aac_container *dvp; 3308 3309 for (dvp = &softs->containers[count]; 3310 dvp < &softs->containers[softs->container_count]; dvp++) { 3311 if (!AAC_DEV_IS_VALID(&dvp->dev)) 3312 continue; 3313 AACDB_PRINT(softs, CE_NOTE, ">>> Container %d deleted", 3314 dvp->cid); 3315 dvp->dev.flags &= ~AAC_DFLAG_VALID; 3316 (void) aac_dr_event(softs, dvp->cid, -1, 3317 AAC_EVT_OFFLINE); 3318 } 3319 } 3320 softs->container_count = count; 3321 AACDB_PRINT(softs, CE_CONT, "?Total %d container(s) found", total); 3322 return (AACOK); 3323 } 3324 3325 static int 3326 aac_alloc_comm_space(struct aac_softstate *softs) 3327 { 3328 size_t rlen; 3329 ddi_dma_cookie_t cookie; 3330 uint_t cookien; 3331 3332 /* Allocate DMA for comm. space */ 3333 if (ddi_dma_alloc_handle( 3334 softs->devinfo_p, 3335 &softs->addr_dma_attr, 3336 DDI_DMA_SLEEP, 3337 NULL, 3338 &softs->comm_space_dma_handle) != DDI_SUCCESS) { 3339 AACDB_PRINT(softs, CE_WARN, 3340 "Cannot alloc dma handle for communication area"); 3341 goto error; 3342 } 3343 if (ddi_dma_mem_alloc( 3344 softs->comm_space_dma_handle, 3345 sizeof (struct aac_comm_space), 3346 &softs->acc_attr, 3347 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3348 DDI_DMA_SLEEP, 3349 NULL, 3350 (caddr_t *)&softs->comm_space, 3351 &rlen, 3352 &softs->comm_space_acc_handle) != DDI_SUCCESS) { 3353 AACDB_PRINT(softs, CE_WARN, 3354 "Cannot alloc mem for communication area"); 3355 goto error; 3356 } 3357 if (ddi_dma_addr_bind_handle( 3358 softs->comm_space_dma_handle, 3359 NULL, 3360 (caddr_t)softs->comm_space, 3361 sizeof (struct aac_comm_space), 3362 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3363 DDI_DMA_SLEEP, 3364 NULL, 3365 &cookie, 3366 &cookien) != DDI_DMA_MAPPED) { 3367 AACDB_PRINT(softs, CE_WARN, 3368 "DMA bind failed for communication area"); 3369 goto error; 3370 } 3371 softs->comm_space_phyaddr = cookie.dmac_address; 3372 3373 /* Setup sync FIB space */ 3374 softs->sync_slot.fibp = &softs->comm_space->sync_fib; 3375 softs->sync_slot.fib_phyaddr = softs->comm_space_phyaddr + \ 3376 offsetof(struct aac_comm_space, sync_fib); 3377 softs->sync_slot.fib_acc_handle = softs->comm_space_acc_handle; 3378 softs->sync_slot.fib_dma_handle = softs->comm_space_dma_handle; 3379 3380 return (AACOK); 3381 error: 3382 if (softs->comm_space_acc_handle) { 3383 ddi_dma_mem_free(&softs->comm_space_acc_handle); 3384 softs->comm_space_acc_handle = NULL; 3385 } 3386 if (softs->comm_space_dma_handle) { 3387 ddi_dma_free_handle(&softs->comm_space_dma_handle); 3388 softs->comm_space_dma_handle = NULL; 3389 } 3390 return (AACERR); 3391 } 3392 3393 static void 3394 aac_free_comm_space(struct aac_softstate *softs) 3395 { 3396 softs->sync_slot.fibp = NULL; 3397 softs->sync_slot.fib_phyaddr = NULL; 3398 softs->sync_slot.fib_acc_handle = NULL; 3399 softs->sync_slot.fib_dma_handle = NULL; 3400 3401 (void) ddi_dma_unbind_handle(softs->comm_space_dma_handle); 3402 ddi_dma_mem_free(&softs->comm_space_acc_handle); 3403 softs->comm_space_acc_handle = NULL; 3404 ddi_dma_free_handle(&softs->comm_space_dma_handle); 3405 softs->comm_space_dma_handle = NULL; 3406 softs->comm_space_phyaddr = NULL; 3407 } 3408 3409 /* 3410 * Initialize the data structures that are required for the communication 3411 * interface to operate 3412 */ 3413 static int 3414 aac_setup_comm_space(struct aac_softstate *softs) 3415 { 3416 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3417 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3418 uint32_t comm_space_phyaddr; 3419 struct aac_adapter_init *initp; 3420 int qoffset; 3421 3422 comm_space_phyaddr = softs->comm_space_phyaddr; 3423 3424 /* Setup adapter init struct */ 3425 initp = &softs->comm_space->init_data; 3426 bzero(initp, sizeof (struct aac_adapter_init)); 3427 3428 ddi_put32(acc, &initp->InitStructRevision, AAC_INIT_STRUCT_REVISION); 3429 ddi_put32(acc, &initp->HostElapsedSeconds, ddi_get_time()); 3430 3431 /* Setup new/old comm. specific data */ 3432 if (softs->flags & AAC_FLAGS_RAW_IO) { 3433 ddi_put32(acc, &initp->InitStructRevision, 3434 AAC_INIT_STRUCT_REVISION_4); 3435 ddi_put32(acc, &initp->InitFlags, 3436 (softs->flags & AAC_FLAGS_NEW_COMM) ? 3437 AAC_INIT_FLAGS_NEW_COMM_SUPPORTED : 0); 3438 /* Setup the preferred settings */ 3439 ddi_put32(acc, &initp->MaxIoCommands, softs->aac_max_fibs); 3440 ddi_put32(acc, &initp->MaxIoSize, 3441 (softs->aac_max_sectors << 9)); 3442 ddi_put32(acc, &initp->MaxFibSize, softs->aac_max_fib_size); 3443 } else { 3444 /* 3445 * Tells the adapter about the physical location of various 3446 * important shared data structures 3447 */ 3448 ddi_put32(acc, &initp->AdapterFibsPhysicalAddress, 3449 comm_space_phyaddr + \ 3450 offsetof(struct aac_comm_space, adapter_fibs)); 3451 ddi_put32(acc, &initp->AdapterFibsVirtualAddress, 0); 3452 ddi_put32(acc, &initp->AdapterFibAlign, AAC_FIB_SIZE); 3453 ddi_put32(acc, &initp->AdapterFibsSize, 3454 AAC_ADAPTER_FIBS * AAC_FIB_SIZE); 3455 ddi_put32(acc, &initp->PrintfBufferAddress, 3456 comm_space_phyaddr + \ 3457 offsetof(struct aac_comm_space, adapter_print_buf)); 3458 ddi_put32(acc, &initp->PrintfBufferSize, 3459 AAC_ADAPTER_PRINT_BUFSIZE); 3460 ddi_put32(acc, &initp->MiniPortRevision, 3461 AAC_INIT_STRUCT_MINIPORT_REVISION); 3462 ddi_put32(acc, &initp->HostPhysMemPages, AAC_MAX_PFN); 3463 3464 qoffset = (comm_space_phyaddr + \ 3465 offsetof(struct aac_comm_space, qtable)) % \ 3466 AAC_QUEUE_ALIGN; 3467 if (qoffset) 3468 qoffset = AAC_QUEUE_ALIGN - qoffset; 3469 softs->qtablep = (struct aac_queue_table *) \ 3470 ((char *)&softs->comm_space->qtable + qoffset); 3471 ddi_put32(acc, &initp->CommHeaderAddress, comm_space_phyaddr + \ 3472 offsetof(struct aac_comm_space, qtable) + qoffset); 3473 3474 /* Init queue table */ 3475 ddi_put32(acc, &softs->qtablep-> \ 3476 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_PRODUCER_INDEX], 3477 AAC_HOST_NORM_CMD_ENTRIES); 3478 ddi_put32(acc, &softs->qtablep-> \ 3479 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_CONSUMER_INDEX], 3480 AAC_HOST_NORM_CMD_ENTRIES); 3481 ddi_put32(acc, &softs->qtablep-> \ 3482 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_PRODUCER_INDEX], 3483 AAC_HOST_HIGH_CMD_ENTRIES); 3484 ddi_put32(acc, &softs->qtablep-> \ 3485 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_CONSUMER_INDEX], 3486 AAC_HOST_HIGH_CMD_ENTRIES); 3487 ddi_put32(acc, &softs->qtablep-> \ 3488 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_PRODUCER_INDEX], 3489 AAC_ADAP_NORM_CMD_ENTRIES); 3490 ddi_put32(acc, &softs->qtablep-> \ 3491 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_CONSUMER_INDEX], 3492 AAC_ADAP_NORM_CMD_ENTRIES); 3493 ddi_put32(acc, &softs->qtablep-> \ 3494 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_PRODUCER_INDEX], 3495 AAC_ADAP_HIGH_CMD_ENTRIES); 3496 ddi_put32(acc, &softs->qtablep-> \ 3497 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_CONSUMER_INDEX], 3498 AAC_ADAP_HIGH_CMD_ENTRIES); 3499 ddi_put32(acc, &softs->qtablep-> \ 3500 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_PRODUCER_INDEX], 3501 AAC_HOST_NORM_RESP_ENTRIES); 3502 ddi_put32(acc, &softs->qtablep-> \ 3503 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_CONSUMER_INDEX], 3504 AAC_HOST_NORM_RESP_ENTRIES); 3505 ddi_put32(acc, &softs->qtablep-> \ 3506 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_PRODUCER_INDEX], 3507 AAC_HOST_HIGH_RESP_ENTRIES); 3508 ddi_put32(acc, &softs->qtablep-> \ 3509 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_CONSUMER_INDEX], 3510 AAC_HOST_HIGH_RESP_ENTRIES); 3511 ddi_put32(acc, &softs->qtablep-> \ 3512 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_PRODUCER_INDEX], 3513 AAC_ADAP_NORM_RESP_ENTRIES); 3514 ddi_put32(acc, &softs->qtablep-> \ 3515 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_CONSUMER_INDEX], 3516 AAC_ADAP_NORM_RESP_ENTRIES); 3517 ddi_put32(acc, &softs->qtablep-> \ 3518 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_PRODUCER_INDEX], 3519 AAC_ADAP_HIGH_RESP_ENTRIES); 3520 ddi_put32(acc, &softs->qtablep-> \ 3521 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_CONSUMER_INDEX], 3522 AAC_ADAP_HIGH_RESP_ENTRIES); 3523 3524 /* Init queue entries */ 3525 softs->qentries[AAC_HOST_NORM_CMD_Q] = 3526 &softs->qtablep->qt_HostNormCmdQueue[0]; 3527 softs->qentries[AAC_HOST_HIGH_CMD_Q] = 3528 &softs->qtablep->qt_HostHighCmdQueue[0]; 3529 softs->qentries[AAC_ADAP_NORM_CMD_Q] = 3530 &softs->qtablep->qt_AdapNormCmdQueue[0]; 3531 softs->qentries[AAC_ADAP_HIGH_CMD_Q] = 3532 &softs->qtablep->qt_AdapHighCmdQueue[0]; 3533 softs->qentries[AAC_HOST_NORM_RESP_Q] = 3534 &softs->qtablep->qt_HostNormRespQueue[0]; 3535 softs->qentries[AAC_HOST_HIGH_RESP_Q] = 3536 &softs->qtablep->qt_HostHighRespQueue[0]; 3537 softs->qentries[AAC_ADAP_NORM_RESP_Q] = 3538 &softs->qtablep->qt_AdapNormRespQueue[0]; 3539 softs->qentries[AAC_ADAP_HIGH_RESP_Q] = 3540 &softs->qtablep->qt_AdapHighRespQueue[0]; 3541 } 3542 (void) ddi_dma_sync(dma, 0, 0, DDI_DMA_SYNC_FORDEV); 3543 3544 /* Send init structure to the card */ 3545 if (aac_sync_mbcommand(softs, AAC_MONKER_INITSTRUCT, 3546 comm_space_phyaddr + \ 3547 offsetof(struct aac_comm_space, init_data), 3548 0, 0, 0, NULL) == AACERR) { 3549 AACDB_PRINT(softs, CE_WARN, 3550 "Cannot send init structure to adapter"); 3551 return (AACERR); 3552 } 3553 3554 return (AACOK); 3555 } 3556 3557 static uchar_t * 3558 aac_vendor_id(struct aac_softstate *softs, uchar_t *buf) 3559 { 3560 (void) memset(buf, ' ', AAC_VENDOR_LEN); 3561 bcopy(softs->vendor_name, buf, strlen(softs->vendor_name)); 3562 return (buf + AAC_VENDOR_LEN); 3563 } 3564 3565 static uchar_t * 3566 aac_product_id(struct aac_softstate *softs, uchar_t *buf) 3567 { 3568 (void) memset(buf, ' ', AAC_PRODUCT_LEN); 3569 bcopy(softs->product_name, buf, strlen(softs->product_name)); 3570 return (buf + AAC_PRODUCT_LEN); 3571 } 3572 3573 /* 3574 * Construct unit serial number from container uid 3575 */ 3576 static uchar_t * 3577 aac_lun_serialno(struct aac_softstate *softs, int tgt, uchar_t *buf) 3578 { 3579 int i, d; 3580 uint32_t uid; 3581 3582 ASSERT(tgt >= 0 && tgt < AAC_MAX_LD); 3583 3584 uid = softs->containers[tgt].uid; 3585 for (i = 7; i >= 0; i--) { 3586 d = uid & 0xf; 3587 buf[i] = d > 9 ? 'A' + (d - 0xa) : '0' + d; 3588 uid >>= 4; 3589 } 3590 return (buf + 8); 3591 } 3592 3593 /* 3594 * SPC-3 7.5 INQUIRY command implementation 3595 */ 3596 static void 3597 aac_inquiry(struct aac_softstate *softs, struct scsi_pkt *pkt, 3598 union scsi_cdb *cdbp, struct buf *bp) 3599 { 3600 int tgt = pkt->pkt_address.a_target; 3601 char *b_addr = NULL; 3602 uchar_t page = cdbp->cdb_opaque[2]; 3603 3604 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_CMDDT) { 3605 /* Command Support Data is not supported */ 3606 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x00, 0); 3607 return; 3608 } 3609 3610 if (bp && bp->b_un.b_addr && bp->b_bcount) { 3611 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 3612 bp_mapin(bp); 3613 b_addr = bp->b_un.b_addr; 3614 } 3615 3616 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_EVPD) { 3617 uchar_t *vpdp = (uchar_t *)b_addr; 3618 uchar_t *idp, *sp; 3619 3620 /* SPC-3 8.4 Vital product data parameters */ 3621 switch (page) { 3622 case 0x00: 3623 /* Supported VPD pages */ 3624 if (vpdp == NULL || 3625 bp->b_bcount < (AAC_VPD_PAGE_DATA + 3)) 3626 return; 3627 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3628 vpdp[AAC_VPD_PAGE_CODE] = 0x00; 3629 vpdp[AAC_VPD_PAGE_LENGTH] = 3; 3630 3631 vpdp[AAC_VPD_PAGE_DATA] = 0x00; 3632 vpdp[AAC_VPD_PAGE_DATA + 1] = 0x80; 3633 vpdp[AAC_VPD_PAGE_DATA + 2] = 0x83; 3634 3635 pkt->pkt_state |= STATE_XFERRED_DATA; 3636 break; 3637 3638 case 0x80: 3639 /* Unit serial number page */ 3640 if (vpdp == NULL || 3641 bp->b_bcount < (AAC_VPD_PAGE_DATA + 8)) 3642 return; 3643 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3644 vpdp[AAC_VPD_PAGE_CODE] = 0x80; 3645 vpdp[AAC_VPD_PAGE_LENGTH] = 8; 3646 3647 sp = &vpdp[AAC_VPD_PAGE_DATA]; 3648 (void) aac_lun_serialno(softs, tgt, sp); 3649 3650 pkt->pkt_state |= STATE_XFERRED_DATA; 3651 break; 3652 3653 case 0x83: 3654 /* Device identification page */ 3655 if (vpdp == NULL || 3656 bp->b_bcount < (AAC_VPD_PAGE_DATA + 32)) 3657 return; 3658 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3659 vpdp[AAC_VPD_PAGE_CODE] = 0x83; 3660 3661 idp = &vpdp[AAC_VPD_PAGE_DATA]; 3662 bzero(idp, AAC_VPD_ID_LENGTH); 3663 idp[AAC_VPD_ID_CODESET] = 0x02; 3664 idp[AAC_VPD_ID_TYPE] = 0x01; 3665 3666 /* 3667 * SPC-3 Table 111 - Identifier type 3668 * One recommanded method of constructing the remainder 3669 * of identifier field is to concatenate the product 3670 * identification field from the standard INQUIRY data 3671 * field and the product serial number field from the 3672 * unit serial number page. 3673 */ 3674 sp = &idp[AAC_VPD_ID_DATA]; 3675 sp = aac_vendor_id(softs, sp); 3676 sp = aac_product_id(softs, sp); 3677 sp = aac_lun_serialno(softs, tgt, sp); 3678 idp[AAC_VPD_ID_LENGTH] = (uintptr_t)sp - \ 3679 (uintptr_t)&idp[AAC_VPD_ID_DATA]; 3680 3681 vpdp[AAC_VPD_PAGE_LENGTH] = (uintptr_t)sp - \ 3682 (uintptr_t)&vpdp[AAC_VPD_PAGE_DATA]; 3683 pkt->pkt_state |= STATE_XFERRED_DATA; 3684 break; 3685 3686 default: 3687 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 3688 0x24, 0x00, 0); 3689 break; 3690 } 3691 } else { 3692 struct scsi_inquiry *inqp = (struct scsi_inquiry *)b_addr; 3693 size_t len = sizeof (struct scsi_inquiry); 3694 3695 if (page != 0) { 3696 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 3697 0x24, 0x00, 0); 3698 return; 3699 } 3700 if (inqp == NULL || bp->b_bcount < len) 3701 return; 3702 3703 bzero(inqp, len); 3704 inqp->inq_len = AAC_ADDITIONAL_LEN; 3705 inqp->inq_ansi = AAC_ANSI_VER; 3706 inqp->inq_rdf = AAC_RESP_DATA_FORMAT; 3707 (void) aac_vendor_id(softs, (uchar_t *)inqp->inq_vid); 3708 (void) aac_product_id(softs, (uchar_t *)inqp->inq_pid); 3709 bcopy("V1.0", inqp->inq_revision, 4); 3710 inqp->inq_cmdque = 1; /* enable tagged-queuing */ 3711 /* 3712 * For "sd-max-xfer-size" property which may impact performance 3713 * when IO threads increase. 3714 */ 3715 inqp->inq_wbus32 = 1; 3716 3717 pkt->pkt_state |= STATE_XFERRED_DATA; 3718 } 3719 } 3720 3721 /* 3722 * SPC-3 7.10 MODE SENSE command implementation 3723 */ 3724 static void 3725 aac_mode_sense(struct aac_softstate *softs, struct scsi_pkt *pkt, 3726 union scsi_cdb *cdbp, struct buf *bp, int capacity) 3727 { 3728 uchar_t pagecode; 3729 struct mode_header *headerp; 3730 struct mode_header_g1 *g1_headerp; 3731 unsigned int ncyl; 3732 caddr_t sense_data; 3733 caddr_t next_page; 3734 size_t sdata_size; 3735 size_t pages_size; 3736 int unsupport_page = 0; 3737 3738 ASSERT(cdbp->scc_cmd == SCMD_MODE_SENSE || 3739 cdbp->scc_cmd == SCMD_MODE_SENSE_G1); 3740 3741 if (!(bp && bp->b_un.b_addr && bp->b_bcount)) 3742 return; 3743 3744 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 3745 bp_mapin(bp); 3746 pkt->pkt_state |= STATE_XFERRED_DATA; 3747 pagecode = cdbp->cdb_un.sg.scsi[0] & 0x3F; 3748 3749 /* calculate the size of needed buffer */ 3750 if (cdbp->scc_cmd == SCMD_MODE_SENSE) 3751 sdata_size = MODE_HEADER_LENGTH; 3752 else /* must be SCMD_MODE_SENSE_G1 */ 3753 sdata_size = MODE_HEADER_LENGTH_G1; 3754 3755 pages_size = 0; 3756 switch (pagecode) { 3757 case SD_MODE_SENSE_PAGE3_CODE: 3758 pages_size += sizeof (struct mode_format); 3759 break; 3760 3761 case SD_MODE_SENSE_PAGE4_CODE: 3762 pages_size += sizeof (struct mode_geometry); 3763 break; 3764 3765 case MODEPAGE_CTRL_MODE: 3766 if (softs->flags & AAC_FLAGS_LBA_64BIT) { 3767 pages_size += sizeof (struct mode_control_scsi3); 3768 } else { 3769 unsupport_page = 1; 3770 } 3771 break; 3772 3773 case MODEPAGE_ALLPAGES: 3774 if (softs->flags & AAC_FLAGS_LBA_64BIT) { 3775 pages_size += sizeof (struct mode_format) + 3776 sizeof (struct mode_geometry) + 3777 sizeof (struct mode_control_scsi3); 3778 } else { 3779 pages_size += sizeof (struct mode_format) + 3780 sizeof (struct mode_geometry); 3781 } 3782 break; 3783 3784 default: 3785 /* unsupported pages */ 3786 unsupport_page = 1; 3787 } 3788 3789 /* allocate buffer to fill the send data */ 3790 sdata_size += pages_size; 3791 sense_data = kmem_zalloc(sdata_size, KM_SLEEP); 3792 3793 if (cdbp->scc_cmd == SCMD_MODE_SENSE) { 3794 headerp = (struct mode_header *)sense_data; 3795 headerp->length = MODE_HEADER_LENGTH + pages_size - 3796 sizeof (headerp->length); 3797 headerp->bdesc_length = 0; 3798 next_page = sense_data + sizeof (struct mode_header); 3799 } else { 3800 g1_headerp = (void *)sense_data; 3801 g1_headerp->length = BE_16(MODE_HEADER_LENGTH_G1 + pages_size - 3802 sizeof (g1_headerp->length)); 3803 g1_headerp->bdesc_length = 0; 3804 next_page = sense_data + sizeof (struct mode_header_g1); 3805 } 3806 3807 if (unsupport_page) 3808 goto finish; 3809 3810 if (pagecode == SD_MODE_SENSE_PAGE3_CODE || 3811 pagecode == MODEPAGE_ALLPAGES) { 3812 /* SBC-3 7.1.3.3 Format device page */ 3813 struct mode_format *page3p; 3814 3815 page3p = (void *)next_page; 3816 page3p->mode_page.code = SD_MODE_SENSE_PAGE3_CODE; 3817 page3p->mode_page.length = sizeof (struct mode_format); 3818 page3p->data_bytes_sect = BE_16(AAC_SECTOR_SIZE); 3819 page3p->sect_track = BE_16(AAC_SECTORS_PER_TRACK); 3820 3821 next_page += sizeof (struct mode_format); 3822 } 3823 3824 if (pagecode == SD_MODE_SENSE_PAGE4_CODE || 3825 pagecode == MODEPAGE_ALLPAGES) { 3826 /* SBC-3 7.1.3.8 Rigid disk device geometry page */ 3827 struct mode_geometry *page4p; 3828 3829 page4p = (void *)next_page; 3830 page4p->mode_page.code = SD_MODE_SENSE_PAGE4_CODE; 3831 page4p->mode_page.length = sizeof (struct mode_geometry); 3832 page4p->heads = AAC_NUMBER_OF_HEADS; 3833 page4p->rpm = BE_16(AAC_ROTATION_SPEED); 3834 ncyl = capacity / (AAC_NUMBER_OF_HEADS * AAC_SECTORS_PER_TRACK); 3835 page4p->cyl_lb = ncyl & 0xff; 3836 page4p->cyl_mb = (ncyl >> 8) & 0xff; 3837 page4p->cyl_ub = (ncyl >> 16) & 0xff; 3838 3839 next_page += sizeof (struct mode_geometry); 3840 } 3841 3842 if ((pagecode == MODEPAGE_CTRL_MODE || pagecode == MODEPAGE_ALLPAGES) && 3843 softs->flags & AAC_FLAGS_LBA_64BIT) { 3844 /* 64-bit LBA need large sense data */ 3845 struct mode_control_scsi3 *mctl; 3846 3847 mctl = (void *)next_page; 3848 mctl->mode_page.code = MODEPAGE_CTRL_MODE; 3849 mctl->mode_page.length = 3850 sizeof (struct mode_control_scsi3) - 3851 sizeof (struct mode_page); 3852 mctl->d_sense = 1; 3853 } 3854 3855 finish: 3856 /* copyout the valid data. */ 3857 bcopy(sense_data, bp->b_un.b_addr, min(sdata_size, bp->b_bcount)); 3858 kmem_free(sense_data, sdata_size); 3859 } 3860 3861 static int 3862 aac_name_node(dev_info_t *dip, char *name, int len) 3863 { 3864 int tgt, lun; 3865 3866 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3867 DDI_PROP_DONTPASS, "target", -1); 3868 if (tgt == -1) 3869 return (DDI_FAILURE); 3870 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3871 DDI_PROP_DONTPASS, "lun", -1); 3872 if (lun == -1) 3873 return (DDI_FAILURE); 3874 3875 (void) snprintf(name, len, "%x,%x", tgt, lun); 3876 return (DDI_SUCCESS); 3877 } 3878 3879 /*ARGSUSED*/ 3880 static int 3881 aac_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 3882 scsi_hba_tran_t *tran, struct scsi_device *sd) 3883 { 3884 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran); 3885 #if defined(DEBUG) || defined(__lock_lint) 3886 int ctl = ddi_get_instance(softs->devinfo_p); 3887 #endif 3888 uint16_t tgt = sd->sd_address.a_target; 3889 uint8_t lun = sd->sd_address.a_lun; 3890 struct aac_device *dvp; 3891 3892 DBCALLED(softs, 2); 3893 3894 if (ndi_dev_is_persistent_node(tgt_dip) == 0) { 3895 /* 3896 * If no persistent node exist, we don't allow .conf node 3897 * to be created. 3898 */ 3899 if (aac_find_child(softs, tgt, lun) != NULL) { 3900 if (ndi_merge_node(tgt_dip, aac_name_node) != 3901 DDI_SUCCESS) 3902 /* Create this .conf node */ 3903 return (DDI_SUCCESS); 3904 } 3905 return (DDI_FAILURE); 3906 } 3907 3908 /* 3909 * Only support container/phys. device that has been 3910 * detected and valid 3911 */ 3912 mutex_enter(&softs->io_lock); 3913 if (tgt >= AAC_MAX_DEV(softs)) { 3914 AACDB_PRINT_TRAN(softs, 3915 "aac_tran_tgt_init: c%dt%dL%d out", ctl, tgt, lun); 3916 mutex_exit(&softs->io_lock); 3917 return (DDI_FAILURE); 3918 } 3919 3920 if (tgt < AAC_MAX_LD) { 3921 dvp = (struct aac_device *)&softs->containers[tgt]; 3922 if (lun != 0 || !AAC_DEV_IS_VALID(dvp)) { 3923 AACDB_PRINT_TRAN(softs, "aac_tran_tgt_init: c%dt%dL%d", 3924 ctl, tgt, lun); 3925 mutex_exit(&softs->io_lock); 3926 return (DDI_FAILURE); 3927 } 3928 /* 3929 * Save the tgt_dip for the given target if one doesn't exist 3930 * already. Dip's for non-existance tgt's will be cleared in 3931 * tgt_free. 3932 */ 3933 if (softs->containers[tgt].dev.dip == NULL && 3934 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) 3935 softs->containers[tgt].dev.dip = tgt_dip; 3936 } else { 3937 dvp = (struct aac_device *)&softs->nondasds[AAC_PD(tgt)]; 3938 } 3939 3940 AACDB_PRINT(softs, CE_NOTE, 3941 "aac_tran_tgt_init: c%dt%dL%d ok (%s)", ctl, tgt, lun, 3942 (dvp->type == AAC_DEV_PD) ? "pd" : "ld"); 3943 mutex_exit(&softs->io_lock); 3944 return (DDI_SUCCESS); 3945 } 3946 3947 static void 3948 aac_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 3949 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 3950 { 3951 #ifndef __lock_lint 3952 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran)) 3953 #endif 3954 3955 struct aac_softstate *softs = SD2AAC(sd); 3956 int tgt = sd->sd_address.a_target; 3957 3958 mutex_enter(&softs->io_lock); 3959 if (tgt < AAC_MAX_LD) { 3960 if (softs->containers[tgt].dev.dip == tgt_dip) 3961 softs->containers[tgt].dev.dip = NULL; 3962 } else { 3963 softs->nondasds[AAC_PD(tgt)].dev.flags &= ~AAC_DFLAG_VALID; 3964 } 3965 mutex_exit(&softs->io_lock); 3966 } 3967 3968 /* 3969 * Check if the firmware is Up And Running. If it is in the Kernel Panic 3970 * state, (BlinkLED code + 1) is returned. 3971 * 0 -- firmware up and running 3972 * -1 -- firmware dead 3973 * >0 -- firmware kernel panic 3974 */ 3975 static int 3976 aac_check_adapter_health(struct aac_softstate *softs) 3977 { 3978 int rval; 3979 3980 rval = PCI_MEM_GET32(softs, AAC_OMR0); 3981 3982 if (rval & AAC_KERNEL_UP_AND_RUNNING) { 3983 rval = 0; 3984 } else if (rval & AAC_KERNEL_PANIC) { 3985 cmn_err(CE_WARN, "firmware panic"); 3986 rval = ((rval >> 16) & 0xff) + 1; /* avoid 0 as return value */ 3987 } else { 3988 cmn_err(CE_WARN, "firmware dead"); 3989 rval = -1; 3990 } 3991 return (rval); 3992 } 3993 3994 static void 3995 aac_abort_iocmd(struct aac_softstate *softs, struct aac_cmd *acp, 3996 uchar_t reason) 3997 { 3998 acp->flags |= AAC_CMD_ABORT; 3999 4000 if (acp->pkt) { 4001 /* 4002 * Each lun should generate a unit attention 4003 * condition when reset. 4004 * Phys. drives are treated as logical ones 4005 * during error recovery. 4006 */ 4007 if (acp->slotp) { /* outstanding cmd */ 4008 acp->pkt->pkt_state |= STATE_GOT_STATUS; 4009 aac_set_arq_data_reset(softs, acp); 4010 } 4011 4012 switch (reason) { 4013 case CMD_TIMEOUT: 4014 AACDB_PRINT(softs, CE_NOTE, "CMD_TIMEOUT: acp=0x%p", 4015 acp); 4016 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT, 4017 STAT_TIMEOUT | STAT_BUS_RESET); 4018 break; 4019 case CMD_RESET: 4020 /* aac support only RESET_ALL */ 4021 AACDB_PRINT(softs, CE_NOTE, "CMD_RESET: acp=0x%p", acp); 4022 aac_set_pkt_reason(softs, acp, CMD_RESET, 4023 STAT_BUS_RESET); 4024 break; 4025 case CMD_ABORTED: 4026 AACDB_PRINT(softs, CE_NOTE, "CMD_ABORTED: acp=0x%p", 4027 acp); 4028 aac_set_pkt_reason(softs, acp, CMD_ABORTED, 4029 STAT_ABORTED); 4030 break; 4031 } 4032 } 4033 aac_end_io(softs, acp); 4034 } 4035 4036 /* 4037 * Abort all the pending commands of type iocmd or just the command pkt 4038 * corresponding to pkt 4039 */ 4040 static void 4041 aac_abort_iocmds(struct aac_softstate *softs, int iocmd, struct scsi_pkt *pkt, 4042 int reason) 4043 { 4044 struct aac_cmd *ac_arg, *acp; 4045 int i; 4046 4047 if (pkt == NULL) { 4048 ac_arg = NULL; 4049 } else { 4050 ac_arg = PKT2AC(pkt); 4051 iocmd = (ac_arg->flags & AAC_CMD_SYNC) ? 4052 AAC_IOCMD_SYNC : AAC_IOCMD_ASYNC; 4053 } 4054 4055 /* 4056 * a) outstanding commands on the controller 4057 * Note: should abort outstanding commands only after one 4058 * IOP reset has been done. 4059 */ 4060 if (iocmd & AAC_IOCMD_OUTSTANDING) { 4061 struct aac_cmd *acp; 4062 4063 for (i = 0; i < AAC_MAX_LD; i++) { 4064 if (AAC_DEV_IS_VALID(&softs->containers[i].dev)) 4065 softs->containers[i].reset = 1; 4066 } 4067 while ((acp = softs->q_busy.q_head) != NULL) 4068 aac_abort_iocmd(softs, acp, reason); 4069 } 4070 4071 /* b) commands in the waiting queues */ 4072 for (i = 0; i < AAC_CMDQ_NUM; i++) { 4073 if (iocmd & (1 << i)) { 4074 if (ac_arg) { 4075 aac_abort_iocmd(softs, ac_arg, reason); 4076 } else { 4077 while ((acp = softs->q_wait[i].q_head) != NULL) 4078 aac_abort_iocmd(softs, acp, reason); 4079 } 4080 } 4081 } 4082 } 4083 4084 /* 4085 * The draining thread is shared among quiesce threads. It terminates 4086 * when the adapter is quiesced or stopped by aac_stop_drain(). 4087 */ 4088 static void 4089 aac_check_drain(void *arg) 4090 { 4091 struct aac_softstate *softs = arg; 4092 4093 mutex_enter(&softs->io_lock); 4094 if (softs->ndrains) { 4095 softs->drain_timeid = 0; 4096 /* 4097 * If both ASYNC and SYNC bus throttle are held, 4098 * wake up threads only when both are drained out. 4099 */ 4100 if ((softs->bus_throttle[AAC_CMDQ_ASYNC] > 0 || 4101 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) && 4102 (softs->bus_throttle[AAC_CMDQ_SYNC] > 0 || 4103 softs->bus_ncmds[AAC_CMDQ_SYNC] == 0)) 4104 cv_broadcast(&softs->drain_cv); 4105 else 4106 softs->drain_timeid = timeout(aac_check_drain, softs, 4107 AAC_QUIESCE_TICK * drv_usectohz(1000000)); 4108 } 4109 mutex_exit(&softs->io_lock); 4110 } 4111 4112 /* 4113 * If not draining the outstanding cmds, drain them. Otherwise, 4114 * only update ndrains. 4115 */ 4116 static void 4117 aac_start_drain(struct aac_softstate *softs) 4118 { 4119 if (softs->ndrains == 0) { 4120 ASSERT(softs->drain_timeid == 0); 4121 softs->drain_timeid = timeout(aac_check_drain, softs, 4122 AAC_QUIESCE_TICK * drv_usectohz(1000000)); 4123 } 4124 softs->ndrains++; 4125 } 4126 4127 /* 4128 * Stop the draining thread when no other threads use it any longer. 4129 * Side effect: io_lock may be released in the middle. 4130 */ 4131 static void 4132 aac_stop_drain(struct aac_softstate *softs) 4133 { 4134 softs->ndrains--; 4135 if (softs->ndrains == 0) { 4136 if (softs->drain_timeid != 0) { 4137 timeout_id_t tid = softs->drain_timeid; 4138 4139 softs->drain_timeid = 0; 4140 mutex_exit(&softs->io_lock); 4141 (void) untimeout(tid); 4142 mutex_enter(&softs->io_lock); 4143 } 4144 } 4145 } 4146 4147 /* 4148 * The following function comes from Adaptec: 4149 * 4150 * Once do an IOP reset, basically the driver have to re-initialize the card 4151 * as if up from a cold boot, and the driver is responsible for any IO that 4152 * is outstanding to the adapter at the time of the IOP RESET. And prepare 4153 * for IOP RESET by making the init code modular with the ability to call it 4154 * from multiple places. 4155 */ 4156 static int 4157 aac_reset_adapter(struct aac_softstate *softs) 4158 { 4159 int health; 4160 uint32_t status; 4161 int rval = AAC_IOP_RESET_FAILED; 4162 4163 DBCALLED(softs, 1); 4164 4165 ASSERT(softs->state & AAC_STATE_RESET); 4166 4167 ddi_fm_acc_err_clear(softs->pci_mem_handle, DDI_FME_VER0); 4168 /* Disable interrupt */ 4169 AAC_DISABLE_INTR(softs); 4170 4171 health = aac_check_adapter_health(softs); 4172 if (health == -1) { 4173 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 4174 goto finish; 4175 } 4176 if (health == 0) /* flush drives if possible */ 4177 (void) aac_shutdown(softs); 4178 4179 /* Execute IOP reset */ 4180 if ((aac_sync_mbcommand(softs, AAC_IOP_RESET, 0, 0, 0, 0, 4181 &status)) != AACOK) { 4182 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 4183 struct aac_fib *fibp; 4184 struct aac_pause_command *pc; 4185 4186 if ((status & 0xf) == 0xf) { 4187 uint32_t wait_count; 4188 4189 /* 4190 * Sunrise Lake has dual cores and we must drag the 4191 * other core with us to reset simultaneously. There 4192 * are 2 bits in the Inbound Reset Control and Status 4193 * Register (offset 0x38) of the Sunrise Lake to reset 4194 * the chip without clearing out the PCI configuration 4195 * info (COMMAND & BARS). 4196 */ 4197 PCI_MEM_PUT32(softs, AAC_IRCSR, AAC_IRCSR_CORES_RST); 4198 4199 /* 4200 * We need to wait for 5 seconds before accessing the MU 4201 * again 10000 * 100us = 1000,000us = 1000ms = 1s 4202 */ 4203 wait_count = 5 * 10000; 4204 while (wait_count) { 4205 drv_usecwait(100); /* delay 100 microseconds */ 4206 wait_count--; 4207 } 4208 } else { 4209 if (status == SRB_STATUS_INVALID_REQUEST) 4210 cmn_err(CE_WARN, "!IOP_RESET not supported"); 4211 else /* probably timeout */ 4212 cmn_err(CE_WARN, "!IOP_RESET failed"); 4213 4214 /* Unwind aac_shutdown() */ 4215 fibp = softs->sync_slot.fibp; 4216 pc = (struct aac_pause_command *)&fibp->data[0]; 4217 4218 bzero(pc, sizeof (*pc)); 4219 ddi_put32(acc, &pc->Command, VM_ContainerConfig); 4220 ddi_put32(acc, &pc->Type, CT_PAUSE_IO); 4221 ddi_put32(acc, &pc->Timeout, 1); 4222 ddi_put32(acc, &pc->Min, 1); 4223 ddi_put32(acc, &pc->NoRescan, 1); 4224 4225 (void) aac_sync_fib(softs, ContainerCommand, 4226 AAC_FIB_SIZEOF(struct aac_pause_command)); 4227 4228 if (aac_check_adapter_health(softs) != 0) 4229 ddi_fm_service_impact(softs->devinfo_p, 4230 DDI_SERVICE_LOST); 4231 else 4232 /* 4233 * IOP reset not supported or IOP not reseted 4234 */ 4235 rval = AAC_IOP_RESET_ABNORMAL; 4236 goto finish; 4237 } 4238 } 4239 4240 /* 4241 * Re-read and renegotiate the FIB parameters, as one of the actions 4242 * that can result from an IOP reset is the running of a new firmware 4243 * image. 4244 */ 4245 if (aac_common_attach(softs) != AACOK) 4246 goto finish; 4247 4248 rval = AAC_IOP_RESET_SUCCEED; 4249 4250 finish: 4251 AAC_ENABLE_INTR(softs); 4252 return (rval); 4253 } 4254 4255 static void 4256 aac_set_throttle(struct aac_softstate *softs, struct aac_device *dvp, int q, 4257 int throttle) 4258 { 4259 /* 4260 * If the bus is draining/quiesced, no changes to the throttles 4261 * are allowed. All throttles should have been set to 0. 4262 */ 4263 if ((softs->state & AAC_STATE_QUIESCED) || softs->ndrains) 4264 return; 4265 dvp->throttle[q] = throttle; 4266 } 4267 4268 static void 4269 aac_hold_bus(struct aac_softstate *softs, int iocmds) 4270 { 4271 int i, q; 4272 4273 /* Hold bus by holding every device on the bus */ 4274 for (q = 0; q < AAC_CMDQ_NUM; q++) { 4275 if (iocmds & (1 << q)) { 4276 softs->bus_throttle[q] = 0; 4277 for (i = 0; i < AAC_MAX_LD; i++) 4278 aac_set_throttle(softs, 4279 &softs->containers[i].dev, q, 0); 4280 for (i = 0; i < AAC_MAX_PD(softs); i++) 4281 aac_set_throttle(softs, 4282 &softs->nondasds[i].dev, q, 0); 4283 } 4284 } 4285 } 4286 4287 static void 4288 aac_unhold_bus(struct aac_softstate *softs, int iocmds) 4289 { 4290 int i, q; 4291 4292 for (q = 0; q < AAC_CMDQ_NUM; q++) { 4293 if (iocmds & (1 << q)) { 4294 /* 4295 * Should not unhold AAC_IOCMD_ASYNC bus, if it has been 4296 * quiesced or being drained by possibly some quiesce 4297 * threads. 4298 */ 4299 if (q == AAC_CMDQ_ASYNC && ((softs->state & 4300 AAC_STATE_QUIESCED) || softs->ndrains)) 4301 continue; 4302 softs->bus_throttle[q] = softs->total_slots; 4303 for (i = 0; i < AAC_MAX_LD; i++) 4304 aac_set_throttle(softs, 4305 &softs->containers[i].dev, 4306 q, softs->total_slots); 4307 for (i = 0; i < AAC_MAX_PD(softs); i++) 4308 aac_set_throttle(softs, &softs->nondasds[i].dev, 4309 q, softs->total_slots); 4310 } 4311 } 4312 } 4313 4314 static int 4315 aac_do_reset(struct aac_softstate *softs) 4316 { 4317 int health; 4318 int rval; 4319 4320 softs->state |= AAC_STATE_RESET; 4321 health = aac_check_adapter_health(softs); 4322 4323 /* 4324 * Hold off new io commands and wait all outstanding io 4325 * commands to complete. 4326 */ 4327 if (health == 0) { 4328 int sync_cmds = softs->bus_ncmds[AAC_CMDQ_SYNC]; 4329 int async_cmds = softs->bus_ncmds[AAC_CMDQ_ASYNC]; 4330 4331 if (sync_cmds == 0 && async_cmds == 0) { 4332 rval = AAC_IOP_RESET_SUCCEED; 4333 goto finish; 4334 } 4335 /* 4336 * Give the adapter up to AAC_QUIESCE_TIMEOUT more seconds 4337 * to complete the outstanding io commands 4338 */ 4339 int timeout = AAC_QUIESCE_TIMEOUT * 1000 * 10; 4340 int (*intr_handler)(struct aac_softstate *); 4341 4342 aac_hold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 4343 /* 4344 * Poll the adapter by ourselves in case interrupt is disabled 4345 * and to avoid releasing the io_lock. 4346 */ 4347 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ? 4348 aac_process_intr_new : aac_process_intr_old; 4349 while ((softs->bus_ncmds[AAC_CMDQ_SYNC] || 4350 softs->bus_ncmds[AAC_CMDQ_ASYNC]) && timeout) { 4351 drv_usecwait(100); 4352 (void) intr_handler(softs); 4353 timeout--; 4354 } 4355 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 4356 4357 if (softs->bus_ncmds[AAC_CMDQ_SYNC] == 0 && 4358 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) { 4359 /* Cmds drained out */ 4360 rval = AAC_IOP_RESET_SUCCEED; 4361 goto finish; 4362 } else if (softs->bus_ncmds[AAC_CMDQ_SYNC] < sync_cmds || 4363 softs->bus_ncmds[AAC_CMDQ_ASYNC] < async_cmds) { 4364 /* Cmds not drained out, adapter overloaded */ 4365 rval = AAC_IOP_RESET_ABNORMAL; 4366 goto finish; 4367 } 4368 } 4369 4370 /* 4371 * If a longer waiting time still can't drain any outstanding io 4372 * commands, do IOP reset. 4373 */ 4374 if ((rval = aac_reset_adapter(softs)) == AAC_IOP_RESET_FAILED) 4375 softs->state |= AAC_STATE_DEAD; 4376 4377 finish: 4378 softs->state &= ~AAC_STATE_RESET; 4379 return (rval); 4380 } 4381 4382 static int 4383 aac_tran_reset(struct scsi_address *ap, int level) 4384 { 4385 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4386 int rval; 4387 4388 DBCALLED(softs, 1); 4389 4390 if (level != RESET_ALL) { 4391 cmn_err(CE_NOTE, "!reset target/lun not supported"); 4392 return (0); 4393 } 4394 4395 mutex_enter(&softs->io_lock); 4396 switch (rval = aac_do_reset(softs)) { 4397 case AAC_IOP_RESET_SUCCEED: 4398 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING | AAC_IOCMD_ASYNC, 4399 NULL, CMD_RESET); 4400 aac_start_waiting_io(softs); 4401 break; 4402 case AAC_IOP_RESET_FAILED: 4403 /* Abort IOCTL cmds when adapter is dead */ 4404 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_RESET); 4405 break; 4406 case AAC_IOP_RESET_ABNORMAL: 4407 aac_start_waiting_io(softs); 4408 } 4409 mutex_exit(&softs->io_lock); 4410 4411 aac_drain_comp_q(softs); 4412 return (rval == 0); 4413 } 4414 4415 static int 4416 aac_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 4417 { 4418 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4419 4420 DBCALLED(softs, 1); 4421 4422 mutex_enter(&softs->io_lock); 4423 aac_abort_iocmds(softs, 0, pkt, CMD_ABORTED); 4424 mutex_exit(&softs->io_lock); 4425 4426 aac_drain_comp_q(softs); 4427 return (1); 4428 } 4429 4430 void 4431 aac_free_dmamap(struct aac_cmd *acp) 4432 { 4433 /* Free dma mapping */ 4434 if (acp->flags & AAC_CMD_DMA_VALID) { 4435 ASSERT(acp->buf_dma_handle); 4436 (void) ddi_dma_unbind_handle(acp->buf_dma_handle); 4437 acp->flags &= ~AAC_CMD_DMA_VALID; 4438 } 4439 4440 if (acp->abp != NULL) { /* free non-aligned buf DMA */ 4441 ASSERT(acp->buf_dma_handle); 4442 if ((acp->flags & AAC_CMD_BUF_WRITE) == 0 && acp->bp) 4443 ddi_rep_get8(acp->abh, (uint8_t *)acp->bp->b_un.b_addr, 4444 (uint8_t *)acp->abp, acp->bp->b_bcount, 4445 DDI_DEV_AUTOINCR); 4446 ddi_dma_mem_free(&acp->abh); 4447 acp->abp = NULL; 4448 } 4449 4450 if (acp->buf_dma_handle) { 4451 ddi_dma_free_handle(&acp->buf_dma_handle); 4452 acp->buf_dma_handle = NULL; 4453 } 4454 } 4455 4456 static void 4457 aac_unknown_scmd(struct aac_softstate *softs, struct aac_cmd *acp) 4458 { 4459 AACDB_PRINT(softs, CE_CONT, "SCMD 0x%x not supported", 4460 ((union scsi_cdb *)(void *)acp->pkt->pkt_cdbp)->scc_cmd); 4461 aac_free_dmamap(acp); 4462 aac_set_arq_data(acp->pkt, KEY_ILLEGAL_REQUEST, 0x20, 0x00, 0); 4463 aac_soft_callback(softs, acp); 4464 } 4465 4466 /* 4467 * Handle command to logical device 4468 */ 4469 static int 4470 aac_tran_start_ld(struct aac_softstate *softs, struct aac_cmd *acp) 4471 { 4472 struct aac_container *dvp; 4473 struct scsi_pkt *pkt; 4474 union scsi_cdb *cdbp; 4475 struct buf *bp; 4476 int rval; 4477 4478 dvp = (struct aac_container *)acp->dvp; 4479 pkt = acp->pkt; 4480 cdbp = (void *)pkt->pkt_cdbp; 4481 bp = acp->bp; 4482 4483 switch (cdbp->scc_cmd) { 4484 case SCMD_INQUIRY: /* inquiry */ 4485 aac_free_dmamap(acp); 4486 aac_inquiry(softs, pkt, cdbp, bp); 4487 aac_soft_callback(softs, acp); 4488 rval = TRAN_ACCEPT; 4489 break; 4490 4491 case SCMD_READ_CAPACITY: /* read capacity */ 4492 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4493 struct scsi_capacity cap; 4494 uint64_t last_lba; 4495 4496 /* check 64-bit LBA */ 4497 last_lba = dvp->size - 1; 4498 if (last_lba > 0xffffffffull) { 4499 cap.capacity = 0xfffffffful; 4500 } else { 4501 cap.capacity = BE_32(last_lba); 4502 } 4503 cap.lbasize = BE_32(AAC_SECTOR_SIZE); 4504 4505 aac_free_dmamap(acp); 4506 if (bp->b_flags & (B_PHYS|B_PAGEIO)) 4507 bp_mapin(bp); 4508 bcopy(&cap, bp->b_un.b_addr, min(bp->b_bcount, 8)); 4509 pkt->pkt_state |= STATE_XFERRED_DATA; 4510 } 4511 aac_soft_callback(softs, acp); 4512 rval = TRAN_ACCEPT; 4513 break; 4514 4515 case SCMD_SVC_ACTION_IN_G4: /* read capacity 16 */ 4516 /* Check if containers need 64-bit LBA support */ 4517 if (cdbp->cdb_opaque[1] == SSVC_ACTION_READ_CAPACITY_G4) { 4518 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4519 struct scsi_capacity_16 cap16; 4520 int cap_len = sizeof (struct scsi_capacity_16); 4521 4522 bzero(&cap16, cap_len); 4523 cap16.sc_capacity = BE_64(dvp->size - 1); 4524 cap16.sc_lbasize = BE_32(AAC_SECTOR_SIZE); 4525 4526 aac_free_dmamap(acp); 4527 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 4528 bp_mapin(bp); 4529 bcopy(&cap16, bp->b_un.b_addr, 4530 min(bp->b_bcount, cap_len)); 4531 pkt->pkt_state |= STATE_XFERRED_DATA; 4532 } 4533 aac_soft_callback(softs, acp); 4534 } else { 4535 aac_unknown_scmd(softs, acp); 4536 } 4537 rval = TRAN_ACCEPT; 4538 break; 4539 4540 case SCMD_READ_G4: /* read_16 */ 4541 case SCMD_WRITE_G4: /* write_16 */ 4542 if (softs->flags & AAC_FLAGS_RAW_IO) { 4543 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */ 4544 acp->blkno = ((uint64_t) \ 4545 GETG4ADDR(cdbp) << 32) | \ 4546 (uint32_t)GETG4ADDRTL(cdbp); 4547 goto do_io; 4548 } 4549 AACDB_PRINT(softs, CE_WARN, "64-bit LBA not supported"); 4550 aac_unknown_scmd(softs, acp); 4551 rval = TRAN_ACCEPT; 4552 break; 4553 4554 case SCMD_READ: /* read_6 */ 4555 case SCMD_WRITE: /* write_6 */ 4556 acp->blkno = GETG0ADDR(cdbp); 4557 goto do_io; 4558 4559 case SCMD_READ_G5: /* read_12 */ 4560 case SCMD_WRITE_G5: /* write_12 */ 4561 acp->blkno = GETG5ADDR(cdbp); 4562 goto do_io; 4563 4564 case SCMD_READ_G1: /* read_10 */ 4565 case SCMD_WRITE_G1: /* write_10 */ 4566 acp->blkno = (uint32_t)GETG1ADDR(cdbp); 4567 do_io: 4568 if (acp->flags & AAC_CMD_DMA_VALID) { 4569 uint64_t cnt_size = dvp->size; 4570 4571 /* 4572 * If LBA > array size AND rawio, the 4573 * adapter may hang. So check it before 4574 * sending. 4575 * NOTE: (blkno + blkcnt) may overflow 4576 */ 4577 if ((acp->blkno < cnt_size) && 4578 ((acp->blkno + acp->bcount / 4579 AAC_BLK_SIZE) <= cnt_size)) { 4580 rval = aac_do_io(softs, acp); 4581 } else { 4582 /* 4583 * Request exceeds the capacity of disk, 4584 * set error block number to last LBA 4585 * + 1. 4586 */ 4587 aac_set_arq_data(pkt, 4588 KEY_ILLEGAL_REQUEST, 0x21, 4589 0x00, cnt_size); 4590 aac_soft_callback(softs, acp); 4591 rval = TRAN_ACCEPT; 4592 } 4593 } else if (acp->bcount == 0) { 4594 /* For 0 length IO, just return ok */ 4595 aac_soft_callback(softs, acp); 4596 rval = TRAN_ACCEPT; 4597 } else { 4598 rval = TRAN_BADPKT; 4599 } 4600 break; 4601 4602 case SCMD_MODE_SENSE: /* mode_sense_6 */ 4603 case SCMD_MODE_SENSE_G1: { /* mode_sense_10 */ 4604 int capacity; 4605 4606 aac_free_dmamap(acp); 4607 if (dvp->size > 0xffffffffull) 4608 capacity = 0xfffffffful; /* 64-bit LBA */ 4609 else 4610 capacity = dvp->size; 4611 aac_mode_sense(softs, pkt, cdbp, bp, capacity); 4612 aac_soft_callback(softs, acp); 4613 rval = TRAN_ACCEPT; 4614 break; 4615 } 4616 4617 case SCMD_TEST_UNIT_READY: 4618 case SCMD_REQUEST_SENSE: 4619 case SCMD_FORMAT: 4620 case SCMD_START_STOP: 4621 aac_free_dmamap(acp); 4622 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4623 if (acp->flags & AAC_CMD_BUF_READ) { 4624 if (bp->b_flags & (B_PHYS|B_PAGEIO)) 4625 bp_mapin(bp); 4626 bzero(bp->b_un.b_addr, bp->b_bcount); 4627 } 4628 pkt->pkt_state |= STATE_XFERRED_DATA; 4629 } 4630 aac_soft_callback(softs, acp); 4631 rval = TRAN_ACCEPT; 4632 break; 4633 4634 case SCMD_SYNCHRONIZE_CACHE: 4635 acp->flags |= AAC_CMD_NTAG; 4636 acp->aac_cmd_fib = aac_cmd_fib_sync; 4637 acp->ac_comp = aac_synccache_complete; 4638 rval = aac_do_io(softs, acp); 4639 break; 4640 4641 case SCMD_DOORLOCK: 4642 aac_free_dmamap(acp); 4643 dvp->locked = (pkt->pkt_cdbp[4] & 0x01) ? 1 : 0; 4644 aac_soft_callback(softs, acp); 4645 rval = TRAN_ACCEPT; 4646 break; 4647 4648 default: /* unknown command */ 4649 aac_unknown_scmd(softs, acp); 4650 rval = TRAN_ACCEPT; 4651 break; 4652 } 4653 4654 return (rval); 4655 } 4656 4657 static int 4658 aac_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt) 4659 { 4660 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4661 struct aac_cmd *acp = PKT2AC(pkt); 4662 struct aac_device *dvp = acp->dvp; 4663 int rval; 4664 4665 DBCALLED(softs, 2); 4666 4667 /* 4668 * Reinitialize some fields of ac and pkt; the packet may 4669 * have been resubmitted 4670 */ 4671 acp->flags &= AAC_CMD_CONSISTENT | AAC_CMD_DMA_PARTIAL | \ 4672 AAC_CMD_BUF_READ | AAC_CMD_BUF_WRITE | AAC_CMD_DMA_VALID; 4673 acp->timeout = acp->pkt->pkt_time; 4674 if (pkt->pkt_flags & FLAG_NOINTR) 4675 acp->flags |= AAC_CMD_NO_INTR; 4676 #ifdef DEBUG 4677 acp->fib_flags = AACDB_FLAGS_FIB_SCMD; 4678 #endif 4679 pkt->pkt_reason = CMD_CMPLT; 4680 pkt->pkt_state = 0; 4681 pkt->pkt_statistics = 0; 4682 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */ 4683 4684 if (acp->flags & AAC_CMD_DMA_VALID) { 4685 pkt->pkt_resid = acp->bcount; 4686 /* Consistent packets need to be sync'ed first */ 4687 if ((acp->flags & AAC_CMD_CONSISTENT) && 4688 (acp->flags & AAC_CMD_BUF_WRITE)) 4689 if (aac_dma_sync_ac(acp) != AACOK) { 4690 ddi_fm_service_impact(softs->devinfo_p, 4691 DDI_SERVICE_UNAFFECTED); 4692 return (TRAN_BADPKT); 4693 } 4694 } else { 4695 pkt->pkt_resid = 0; 4696 } 4697 4698 mutex_enter(&softs->io_lock); 4699 AACDB_PRINT_SCMD(softs, acp); 4700 if ((dvp->flags & (AAC_DFLAG_VALID | AAC_DFLAG_CONFIGURING)) && 4701 !(softs->state & AAC_STATE_DEAD)) { 4702 if (dvp->type == AAC_DEV_LD) { 4703 if (ap->a_lun == 0) 4704 rval = aac_tran_start_ld(softs, acp); 4705 else 4706 goto error; 4707 } else { 4708 rval = aac_do_io(softs, acp); 4709 } 4710 } else { 4711 error: 4712 #ifdef DEBUG 4713 if (!(softs->state & AAC_STATE_DEAD)) { 4714 AACDB_PRINT_TRAN(softs, 4715 "Cannot send cmd to target t%dL%d: %s", 4716 ap->a_target, ap->a_lun, 4717 "target invalid"); 4718 } else { 4719 AACDB_PRINT(softs, CE_WARN, 4720 "Cannot send cmd to target t%dL%d: %s", 4721 ap->a_target, ap->a_lun, 4722 "adapter dead"); 4723 } 4724 #endif 4725 rval = TRAN_FATAL_ERROR; 4726 } 4727 mutex_exit(&softs->io_lock); 4728 return (rval); 4729 } 4730 4731 static int 4732 aac_tran_getcap(struct scsi_address *ap, char *cap, int whom) 4733 { 4734 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4735 struct aac_device *dvp; 4736 int rval; 4737 4738 DBCALLED(softs, 2); 4739 4740 /* We don't allow inquiring about capabilities for other targets */ 4741 if (cap == NULL || whom == 0) { 4742 AACDB_PRINT(softs, CE_WARN, 4743 "GetCap> %s not supported: whom=%d", cap, whom); 4744 return (-1); 4745 } 4746 4747 mutex_enter(&softs->io_lock); 4748 dvp = AAC_DEV(softs, ap->a_target); 4749 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) { 4750 mutex_exit(&softs->io_lock); 4751 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to getcap", 4752 ap->a_target, ap->a_lun); 4753 return (-1); 4754 } 4755 4756 switch (scsi_hba_lookup_capstr(cap)) { 4757 case SCSI_CAP_ARQ: /* auto request sense */ 4758 rval = 1; 4759 break; 4760 case SCSI_CAP_UNTAGGED_QING: 4761 case SCSI_CAP_TAGGED_QING: 4762 rval = 1; 4763 break; 4764 case SCSI_CAP_DMA_MAX: 4765 rval = softs->buf_dma_attr.dma_attr_maxxfer; 4766 break; 4767 default: 4768 rval = -1; 4769 break; 4770 } 4771 mutex_exit(&softs->io_lock); 4772 4773 AACDB_PRINT_TRAN(softs, "GetCap> %s t%dL%d: rval=%d", 4774 cap, ap->a_target, ap->a_lun, rval); 4775 return (rval); 4776 } 4777 4778 /*ARGSUSED*/ 4779 static int 4780 aac_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom) 4781 { 4782 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4783 struct aac_device *dvp; 4784 int rval; 4785 4786 DBCALLED(softs, 2); 4787 4788 /* We don't allow inquiring about capabilities for other targets */ 4789 if (cap == NULL || whom == 0) { 4790 AACDB_PRINT(softs, CE_WARN, 4791 "SetCap> %s not supported: whom=%d", cap, whom); 4792 return (-1); 4793 } 4794 4795 mutex_enter(&softs->io_lock); 4796 dvp = AAC_DEV(softs, ap->a_target); 4797 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) { 4798 mutex_exit(&softs->io_lock); 4799 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to setcap", 4800 ap->a_target, ap->a_lun); 4801 return (-1); 4802 } 4803 4804 switch (scsi_hba_lookup_capstr(cap)) { 4805 case SCSI_CAP_ARQ: 4806 /* Force auto request sense */ 4807 rval = (value == 1) ? 1 : 0; 4808 break; 4809 case SCSI_CAP_UNTAGGED_QING: 4810 case SCSI_CAP_TAGGED_QING: 4811 rval = (value == 1) ? 1 : 0; 4812 break; 4813 default: 4814 rval = -1; 4815 break; 4816 } 4817 mutex_exit(&softs->io_lock); 4818 4819 AACDB_PRINT_TRAN(softs, "SetCap> %s t%dL%d val=%d: rval=%d", 4820 cap, ap->a_target, ap->a_lun, value, rval); 4821 return (rval); 4822 } 4823 4824 static void 4825 aac_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 4826 { 4827 struct aac_cmd *acp = PKT2AC(pkt); 4828 4829 DBCALLED(NULL, 2); 4830 4831 if (acp->sgt) { 4832 kmem_free(acp->sgt, sizeof (struct aac_sge) * \ 4833 acp->left_cookien); 4834 } 4835 aac_free_dmamap(acp); 4836 ASSERT(acp->slotp == NULL); 4837 scsi_hba_pkt_free(ap, pkt); 4838 } 4839 4840 int 4841 aac_cmd_dma_alloc(struct aac_softstate *softs, struct aac_cmd *acp, 4842 struct buf *bp, int flags, int (*cb)(), caddr_t arg) 4843 { 4844 int kf = (cb == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP; 4845 uint_t oldcookiec; 4846 int bioerr; 4847 int rval; 4848 4849 oldcookiec = acp->left_cookien; 4850 4851 /* Move window to build s/g map */ 4852 if (acp->total_nwin > 0) { 4853 if (++acp->cur_win < acp->total_nwin) { 4854 off_t off; 4855 size_t len; 4856 4857 rval = ddi_dma_getwin(acp->buf_dma_handle, acp->cur_win, 4858 &off, &len, &acp->cookie, &acp->left_cookien); 4859 if (rval == DDI_SUCCESS) 4860 goto get_dma_cookies; 4861 AACDB_PRINT(softs, CE_WARN, 4862 "ddi_dma_getwin() fail %d", rval); 4863 return (AACERR); 4864 } 4865 AACDB_PRINT(softs, CE_WARN, "Nothing to transfer"); 4866 return (AACERR); 4867 } 4868 4869 /* We need to transfer data, so we alloc DMA resources for this pkt */ 4870 if (bp && bp->b_bcount != 0 && !(acp->flags & AAC_CMD_DMA_VALID)) { 4871 uint_t dma_flags = 0; 4872 struct aac_sge *sge; 4873 4874 /* 4875 * We will still use this point to fake some 4876 * infomation in tran_start 4877 */ 4878 acp->bp = bp; 4879 4880 /* Set dma flags */ 4881 if (BUF_IS_READ(bp)) { 4882 dma_flags |= DDI_DMA_READ; 4883 acp->flags |= AAC_CMD_BUF_READ; 4884 } else { 4885 dma_flags |= DDI_DMA_WRITE; 4886 acp->flags |= AAC_CMD_BUF_WRITE; 4887 } 4888 if (flags & PKT_CONSISTENT) 4889 dma_flags |= DDI_DMA_CONSISTENT; 4890 if (flags & PKT_DMA_PARTIAL) 4891 dma_flags |= DDI_DMA_PARTIAL; 4892 4893 /* Alloc buf dma handle */ 4894 if (!acp->buf_dma_handle) { 4895 rval = ddi_dma_alloc_handle(softs->devinfo_p, 4896 &softs->buf_dma_attr, cb, arg, 4897 &acp->buf_dma_handle); 4898 if (rval != DDI_SUCCESS) { 4899 AACDB_PRINT(softs, CE_WARN, 4900 "Can't allocate DMA handle, errno=%d", 4901 rval); 4902 goto error_out; 4903 } 4904 } 4905 4906 /* Bind buf */ 4907 if (((uintptr_t)bp->b_un.b_addr & AAC_DMA_ALIGN_MASK) == 0) { 4908 rval = ddi_dma_buf_bind_handle(acp->buf_dma_handle, 4909 bp, dma_flags, cb, arg, &acp->cookie, 4910 &acp->left_cookien); 4911 } else { 4912 size_t bufsz; 4913 4914 AACDB_PRINT_TRAN(softs, 4915 "non-aligned buffer: addr=0x%p, cnt=%lu", 4916 (void *)bp->b_un.b_addr, bp->b_bcount); 4917 if (bp->b_flags & (B_PAGEIO|B_PHYS)) 4918 bp_mapin(bp); 4919 4920 rval = ddi_dma_mem_alloc(acp->buf_dma_handle, 4921 AAC_ROUNDUP(bp->b_bcount, AAC_DMA_ALIGN), 4922 &softs->acc_attr, DDI_DMA_STREAMING, 4923 cb, arg, &acp->abp, &bufsz, &acp->abh); 4924 4925 if (rval != DDI_SUCCESS) { 4926 AACDB_PRINT(softs, CE_NOTE, 4927 "Cannot alloc DMA to non-aligned buf"); 4928 bioerr = 0; 4929 goto error_out; 4930 } 4931 4932 if (acp->flags & AAC_CMD_BUF_WRITE) 4933 ddi_rep_put8(acp->abh, 4934 (uint8_t *)bp->b_un.b_addr, 4935 (uint8_t *)acp->abp, bp->b_bcount, 4936 DDI_DEV_AUTOINCR); 4937 4938 rval = ddi_dma_addr_bind_handle(acp->buf_dma_handle, 4939 NULL, acp->abp, bufsz, dma_flags, cb, arg, 4940 &acp->cookie, &acp->left_cookien); 4941 } 4942 4943 switch (rval) { 4944 case DDI_DMA_PARTIAL_MAP: 4945 if (ddi_dma_numwin(acp->buf_dma_handle, 4946 &acp->total_nwin) == DDI_FAILURE) { 4947 AACDB_PRINT(softs, CE_WARN, 4948 "Cannot get number of DMA windows"); 4949 bioerr = 0; 4950 goto error_out; 4951 } 4952 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)", 4953 acp->left_cookien); 4954 acp->cur_win = 0; 4955 break; 4956 4957 case DDI_DMA_MAPPED: 4958 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)", 4959 acp->left_cookien); 4960 acp->cur_win = 0; 4961 acp->total_nwin = 1; 4962 break; 4963 4964 case DDI_DMA_NORESOURCES: 4965 bioerr = 0; 4966 AACDB_PRINT(softs, CE_WARN, 4967 "Cannot bind buf for DMA: DDI_DMA_NORESOURCES"); 4968 goto error_out; 4969 case DDI_DMA_BADATTR: 4970 case DDI_DMA_NOMAPPING: 4971 bioerr = EFAULT; 4972 AACDB_PRINT(softs, CE_WARN, 4973 "Cannot bind buf for DMA: DDI_DMA_NOMAPPING"); 4974 goto error_out; 4975 case DDI_DMA_TOOBIG: 4976 bioerr = EINVAL; 4977 AACDB_PRINT(softs, CE_WARN, 4978 "Cannot bind buf for DMA: DDI_DMA_TOOBIG(%d)", 4979 bp->b_bcount); 4980 goto error_out; 4981 default: 4982 bioerr = EINVAL; 4983 AACDB_PRINT(softs, CE_WARN, 4984 "Cannot bind buf for DMA: %d", rval); 4985 goto error_out; 4986 } 4987 acp->flags |= AAC_CMD_DMA_VALID; 4988 4989 get_dma_cookies: 4990 ASSERT(acp->left_cookien > 0); 4991 if (acp->left_cookien > softs->aac_sg_tablesize) { 4992 AACDB_PRINT(softs, CE_NOTE, "large cookiec received %d", 4993 acp->left_cookien); 4994 bioerr = EINVAL; 4995 goto error_out; 4996 } 4997 if (oldcookiec != acp->left_cookien && acp->sgt != NULL) { 4998 kmem_free(acp->sgt, sizeof (struct aac_sge) * \ 4999 oldcookiec); 5000 acp->sgt = NULL; 5001 } 5002 if (acp->sgt == NULL) { 5003 acp->sgt = kmem_alloc(sizeof (struct aac_sge) * \ 5004 acp->left_cookien, kf); 5005 if (acp->sgt == NULL) { 5006 AACDB_PRINT(softs, CE_WARN, 5007 "sgt kmem_alloc fail"); 5008 bioerr = ENOMEM; 5009 goto error_out; 5010 } 5011 } 5012 5013 sge = &acp->sgt[0]; 5014 sge->bcount = acp->cookie.dmac_size; 5015 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress); 5016 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress); 5017 acp->bcount = acp->cookie.dmac_size; 5018 for (sge++; sge < &acp->sgt[acp->left_cookien]; sge++) { 5019 ddi_dma_nextcookie(acp->buf_dma_handle, &acp->cookie); 5020 sge->bcount = acp->cookie.dmac_size; 5021 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress); 5022 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress); 5023 acp->bcount += acp->cookie.dmac_size; 5024 } 5025 5026 /* 5027 * Note: The old DMA engine do not correctly handle 5028 * dma_attr_maxxfer attribute. So we have to ensure 5029 * it by ourself. 5030 */ 5031 if (acp->bcount > softs->buf_dma_attr.dma_attr_maxxfer) { 5032 AACDB_PRINT(softs, CE_NOTE, 5033 "large xfer size received %d\n", acp->bcount); 5034 bioerr = EINVAL; 5035 goto error_out; 5036 } 5037 5038 acp->total_xfer += acp->bcount; 5039 5040 if (acp->pkt) { 5041 /* Return remaining byte count */ 5042 if (acp->total_xfer <= bp->b_bcount) { 5043 acp->pkt->pkt_resid = bp->b_bcount - \ 5044 acp->total_xfer; 5045 } else { 5046 /* 5047 * Allocated DMA size is greater than the buf 5048 * size of bp. This is caused by devices like 5049 * tape. we have extra bytes allocated, but 5050 * the packet residual has to stay correct. 5051 */ 5052 acp->pkt->pkt_resid = 0; 5053 } 5054 AACDB_PRINT_TRAN(softs, 5055 "bp=0x%p, xfered=%d/%d, resid=%d", 5056 (void *)bp->b_un.b_addr, (int)acp->total_xfer, 5057 (int)bp->b_bcount, (int)acp->pkt->pkt_resid); 5058 } 5059 } 5060 return (AACOK); 5061 5062 error_out: 5063 bioerror(bp, bioerr); 5064 return (AACERR); 5065 } 5066 5067 static struct scsi_pkt * 5068 aac_tran_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 5069 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags, 5070 int (*callback)(), caddr_t arg) 5071 { 5072 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 5073 struct aac_cmd *acp, *new_acp; 5074 5075 DBCALLED(softs, 2); 5076 5077 /* Allocate pkt */ 5078 if (pkt == NULL) { 5079 int slen; 5080 5081 /* Force auto request sense */ 5082 slen = (statuslen > softs->slen) ? statuslen : softs->slen; 5083 pkt = scsi_hba_pkt_alloc(softs->devinfo_p, ap, cmdlen, 5084 slen, tgtlen, sizeof (struct aac_cmd), callback, arg); 5085 if (pkt == NULL) { 5086 AACDB_PRINT(softs, CE_WARN, "Alloc scsi pkt failed"); 5087 return (NULL); 5088 } 5089 acp = new_acp = PKT2AC(pkt); 5090 acp->pkt = pkt; 5091 acp->cmdlen = cmdlen; 5092 5093 if (ap->a_target < AAC_MAX_LD) { 5094 acp->dvp = &softs->containers[ap->a_target].dev; 5095 acp->aac_cmd_fib = softs->aac_cmd_fib; 5096 acp->ac_comp = aac_ld_complete; 5097 } else { 5098 _NOTE(ASSUMING_PROTECTED(softs->nondasds)) 5099 5100 acp->dvp = &softs->nondasds[AAC_PD(ap->a_target)].dev; 5101 acp->aac_cmd_fib = softs->aac_cmd_fib_scsi; 5102 acp->ac_comp = aac_pd_complete; 5103 } 5104 } else { 5105 acp = PKT2AC(pkt); 5106 new_acp = NULL; 5107 } 5108 5109 if (aac_cmd_dma_alloc(softs, acp, bp, flags, callback, arg) == AACOK) 5110 return (pkt); 5111 5112 if (new_acp) 5113 aac_tran_destroy_pkt(ap, pkt); 5114 return (NULL); 5115 } 5116 5117 /* 5118 * tran_sync_pkt(9E) - explicit DMA synchronization 5119 */ 5120 /*ARGSUSED*/ 5121 static void 5122 aac_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 5123 { 5124 struct aac_cmd *acp = PKT2AC(pkt); 5125 5126 DBCALLED(NULL, 2); 5127 5128 if (aac_dma_sync_ac(acp) != AACOK) 5129 ddi_fm_service_impact( 5130 (AAC_TRAN2SOFTS(ap->a_hba_tran))->devinfo_p, 5131 DDI_SERVICE_UNAFFECTED); 5132 } 5133 5134 /* 5135 * tran_dmafree(9E) - deallocate DMA resources allocated for command 5136 */ 5137 /*ARGSUSED*/ 5138 static void 5139 aac_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 5140 { 5141 struct aac_cmd *acp = PKT2AC(pkt); 5142 5143 DBCALLED(NULL, 2); 5144 5145 aac_free_dmamap(acp); 5146 } 5147 5148 static int 5149 aac_do_quiesce(struct aac_softstate *softs) 5150 { 5151 aac_hold_bus(softs, AAC_IOCMD_ASYNC); 5152 if (softs->bus_ncmds[AAC_CMDQ_ASYNC]) { 5153 aac_start_drain(softs); 5154 do { 5155 if (cv_wait_sig(&softs->drain_cv, 5156 &softs->io_lock) == 0) { 5157 /* Quiesce has been interrupted */ 5158 aac_stop_drain(softs); 5159 aac_unhold_bus(softs, AAC_IOCMD_ASYNC); 5160 aac_start_waiting_io(softs); 5161 return (AACERR); 5162 } 5163 } while (softs->bus_ncmds[AAC_CMDQ_ASYNC]); 5164 aac_stop_drain(softs); 5165 } 5166 5167 softs->state |= AAC_STATE_QUIESCED; 5168 return (AACOK); 5169 } 5170 5171 static int 5172 aac_tran_quiesce(dev_info_t *dip) 5173 { 5174 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 5175 int rval; 5176 5177 DBCALLED(softs, 1); 5178 5179 mutex_enter(&softs->io_lock); 5180 if (aac_do_quiesce(softs) == AACOK) 5181 rval = 0; 5182 else 5183 rval = 1; 5184 mutex_exit(&softs->io_lock); 5185 return (rval); 5186 } 5187 5188 static int 5189 aac_do_unquiesce(struct aac_softstate *softs) 5190 { 5191 softs->state &= ~AAC_STATE_QUIESCED; 5192 aac_unhold_bus(softs, AAC_IOCMD_ASYNC); 5193 5194 aac_start_waiting_io(softs); 5195 return (AACOK); 5196 } 5197 5198 static int 5199 aac_tran_unquiesce(dev_info_t *dip) 5200 { 5201 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 5202 int rval; 5203 5204 DBCALLED(softs, 1); 5205 5206 mutex_enter(&softs->io_lock); 5207 if (aac_do_unquiesce(softs) == AACOK) 5208 rval = 0; 5209 else 5210 rval = 1; 5211 mutex_exit(&softs->io_lock); 5212 return (rval); 5213 } 5214 5215 static int 5216 aac_hba_setup(struct aac_softstate *softs) 5217 { 5218 scsi_hba_tran_t *hba_tran; 5219 int rval; 5220 5221 hba_tran = scsi_hba_tran_alloc(softs->devinfo_p, SCSI_HBA_CANSLEEP); 5222 if (hba_tran == NULL) 5223 return (AACERR); 5224 hba_tran->tran_hba_private = softs; 5225 hba_tran->tran_tgt_init = aac_tran_tgt_init; 5226 hba_tran->tran_tgt_free = aac_tran_tgt_free; 5227 hba_tran->tran_tgt_probe = scsi_hba_probe; 5228 hba_tran->tran_start = aac_tran_start; 5229 hba_tran->tran_getcap = aac_tran_getcap; 5230 hba_tran->tran_setcap = aac_tran_setcap; 5231 hba_tran->tran_init_pkt = aac_tran_init_pkt; 5232 hba_tran->tran_destroy_pkt = aac_tran_destroy_pkt; 5233 hba_tran->tran_reset = aac_tran_reset; 5234 hba_tran->tran_abort = aac_tran_abort; 5235 hba_tran->tran_sync_pkt = aac_tran_sync_pkt; 5236 hba_tran->tran_dmafree = aac_tran_dmafree; 5237 hba_tran->tran_quiesce = aac_tran_quiesce; 5238 hba_tran->tran_unquiesce = aac_tran_unquiesce; 5239 hba_tran->tran_bus_config = aac_tran_bus_config; 5240 rval = scsi_hba_attach_setup(softs->devinfo_p, &softs->buf_dma_attr, 5241 hba_tran, 0); 5242 if (rval != DDI_SUCCESS) { 5243 scsi_hba_tran_free(hba_tran); 5244 AACDB_PRINT(softs, CE_WARN, "aac_hba_setup failed"); 5245 return (AACERR); 5246 } 5247 5248 softs->hba_tran = hba_tran; 5249 return (AACOK); 5250 } 5251 5252 /* 5253 * FIB setup operations 5254 */ 5255 5256 /* 5257 * Init FIB header 5258 */ 5259 static void 5260 aac_cmd_fib_header(struct aac_softstate *softs, struct aac_slot *slotp, 5261 uint16_t cmd, uint16_t fib_size) 5262 { 5263 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5264 struct aac_fib *fibp = slotp->fibp; 5265 uint32_t xfer_state; 5266 5267 xfer_state = 5268 AAC_FIBSTATE_HOSTOWNED | 5269 AAC_FIBSTATE_INITIALISED | 5270 AAC_FIBSTATE_EMPTY | 5271 AAC_FIBSTATE_FROMHOST | 5272 AAC_FIBSTATE_REXPECTED | 5273 AAC_FIBSTATE_NORM; 5274 if (slotp->acp && !(slotp->acp->flags & AAC_CMD_SYNC)) { 5275 xfer_state |= 5276 AAC_FIBSTATE_ASYNC | 5277 AAC_FIBSTATE_FAST_RESPONSE /* enable fast io */; 5278 ddi_put16(acc, &fibp->Header.SenderSize, 5279 softs->aac_max_fib_size); 5280 } else { 5281 ddi_put16(acc, &fibp->Header.SenderSize, AAC_FIB_SIZE); 5282 } 5283 5284 ddi_put32(acc, &fibp->Header.XferState, xfer_state); 5285 ddi_put16(acc, &fibp->Header.Command, cmd); 5286 ddi_put8(acc, &fibp->Header.StructType, AAC_FIBTYPE_TFIB); 5287 ddi_put8(acc, &fibp->Header.Flags, 0); /* don't care */ 5288 ddi_put16(acc, &fibp->Header.Size, fib_size); 5289 ddi_put32(acc, &fibp->Header.SenderFibAddress, (slotp->index << 2)); 5290 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr); 5291 ddi_put32(acc, &fibp->Header.SenderData, 0); /* don't care */ 5292 } 5293 5294 /* 5295 * Init FIB for raw IO command 5296 */ 5297 static void 5298 aac_cmd_fib_rawio(struct aac_softstate *softs, struct aac_cmd *acp) 5299 { 5300 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5301 struct aac_raw_io *io = (struct aac_raw_io *)&acp->slotp->fibp->data[0]; 5302 struct aac_sg_entryraw *sgp; 5303 struct aac_sge *sge; 5304 5305 /* Calculate FIB size */ 5306 acp->fib_size = sizeof (struct aac_fib_header) + \ 5307 sizeof (struct aac_raw_io) + (acp->left_cookien - 1) * \ 5308 sizeof (struct aac_sg_entryraw); 5309 5310 aac_cmd_fib_header(softs, acp->slotp, RawIo, acp->fib_size); 5311 5312 ddi_put16(acc, &io->Flags, (acp->flags & AAC_CMD_BUF_READ) ? 1 : 0); 5313 ddi_put16(acc, &io->BpTotal, 0); 5314 ddi_put16(acc, &io->BpComplete, 0); 5315 5316 ddi_put32(acc, AAC_LO32(&io->BlockNumber), AAC_LS32(acp->blkno)); 5317 ddi_put32(acc, AAC_HI32(&io->BlockNumber), AAC_MS32(acp->blkno)); 5318 ddi_put16(acc, &io->ContainerId, 5319 ((struct aac_container *)acp->dvp)->cid); 5320 5321 /* Fill SG table */ 5322 ddi_put32(acc, &io->SgMapRaw.SgCount, acp->left_cookien); 5323 ddi_put32(acc, &io->ByteCount, acp->bcount); 5324 5325 for (sge = &acp->sgt[0], sgp = &io->SgMapRaw.SgEntryRaw[0]; 5326 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5327 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 5328 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 5329 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5330 sgp->Next = 0; 5331 sgp->Prev = 0; 5332 sgp->Flags = 0; 5333 } 5334 } 5335 5336 /* Init FIB for 64-bit block IO command */ 5337 static void 5338 aac_cmd_fib_brw64(struct aac_softstate *softs, struct aac_cmd *acp) 5339 { 5340 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5341 struct aac_blockread64 *br = (struct aac_blockread64 *) \ 5342 &acp->slotp->fibp->data[0]; 5343 struct aac_sg_entry64 *sgp; 5344 struct aac_sge *sge; 5345 5346 acp->fib_size = sizeof (struct aac_fib_header) + \ 5347 sizeof (struct aac_blockread64) + (acp->left_cookien - 1) * \ 5348 sizeof (struct aac_sg_entry64); 5349 5350 aac_cmd_fib_header(softs, acp->slotp, ContainerCommand64, 5351 acp->fib_size); 5352 5353 /* 5354 * The definitions for aac_blockread64 and aac_blockwrite64 5355 * are the same. 5356 */ 5357 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno); 5358 ddi_put16(acc, &br->ContainerId, 5359 ((struct aac_container *)acp->dvp)->cid); 5360 ddi_put32(acc, &br->Command, (acp->flags & AAC_CMD_BUF_READ) ? 5361 VM_CtHostRead64 : VM_CtHostWrite64); 5362 ddi_put16(acc, &br->Pad, 0); 5363 ddi_put16(acc, &br->Flags, 0); 5364 5365 /* Fill SG table */ 5366 ddi_put32(acc, &br->SgMap64.SgCount, acp->left_cookien); 5367 ddi_put16(acc, &br->SectorCount, acp->bcount / AAC_BLK_SIZE); 5368 5369 for (sge = &acp->sgt[0], sgp = &br->SgMap64.SgEntry64[0]; 5370 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5371 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 5372 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 5373 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5374 } 5375 } 5376 5377 /* Init FIB for block IO command */ 5378 static void 5379 aac_cmd_fib_brw(struct aac_softstate *softs, struct aac_cmd *acp) 5380 { 5381 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5382 struct aac_blockread *br = (struct aac_blockread *) \ 5383 &acp->slotp->fibp->data[0]; 5384 struct aac_sg_entry *sgp; 5385 struct aac_sge *sge = &acp->sgt[0]; 5386 5387 if (acp->flags & AAC_CMD_BUF_READ) { 5388 acp->fib_size = sizeof (struct aac_fib_header) + \ 5389 sizeof (struct aac_blockread) + (acp->left_cookien - 1) * \ 5390 sizeof (struct aac_sg_entry); 5391 5392 ddi_put32(acc, &br->Command, VM_CtBlockRead); 5393 ddi_put32(acc, &br->SgMap.SgCount, acp->left_cookien); 5394 sgp = &br->SgMap.SgEntry[0]; 5395 } else { 5396 struct aac_blockwrite *bw = (struct aac_blockwrite *)br; 5397 5398 acp->fib_size = sizeof (struct aac_fib_header) + \ 5399 sizeof (struct aac_blockwrite) + (acp->left_cookien - 1) * \ 5400 sizeof (struct aac_sg_entry); 5401 5402 ddi_put32(acc, &bw->Command, VM_CtBlockWrite); 5403 ddi_put32(acc, &bw->Stable, CUNSTABLE); 5404 ddi_put32(acc, &bw->SgMap.SgCount, acp->left_cookien); 5405 sgp = &bw->SgMap.SgEntry[0]; 5406 } 5407 aac_cmd_fib_header(softs, acp->slotp, ContainerCommand, acp->fib_size); 5408 5409 /* 5410 * aac_blockread and aac_blockwrite have the similar 5411 * structure head, so use br for bw here 5412 */ 5413 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno); 5414 ddi_put32(acc, &br->ContainerId, 5415 ((struct aac_container *)acp->dvp)->cid); 5416 ddi_put32(acc, &br->ByteCount, acp->bcount); 5417 5418 /* Fill SG table */ 5419 for (sge = &acp->sgt[0]; 5420 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5421 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32); 5422 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5423 } 5424 } 5425 5426 /*ARGSUSED*/ 5427 void 5428 aac_cmd_fib_copy(struct aac_softstate *softs, struct aac_cmd *acp) 5429 { 5430 struct aac_slot *slotp = acp->slotp; 5431 struct aac_fib *fibp = slotp->fibp; 5432 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5433 5434 ddi_rep_put8(acc, (uint8_t *)acp->fibp, (uint8_t *)fibp, 5435 acp->fib_size, /* only copy data of needed length */ 5436 DDI_DEV_AUTOINCR); 5437 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr); 5438 ddi_put32(acc, &fibp->Header.SenderFibAddress, slotp->index << 2); 5439 } 5440 5441 static void 5442 aac_cmd_fib_sync(struct aac_softstate *softs, struct aac_cmd *acp) 5443 { 5444 struct aac_slot *slotp = acp->slotp; 5445 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5446 struct aac_synchronize_command *sync = 5447 (struct aac_synchronize_command *)&slotp->fibp->data[0]; 5448 5449 acp->fib_size = sizeof (struct aac_fib_header) + \ 5450 sizeof (struct aac_synchronize_command); 5451 5452 aac_cmd_fib_header(softs, slotp, ContainerCommand, acp->fib_size); 5453 ddi_put32(acc, &sync->Command, VM_ContainerConfig); 5454 ddi_put32(acc, &sync->Type, (uint32_t)CT_FLUSH_CACHE); 5455 ddi_put32(acc, &sync->Cid, ((struct aac_container *)acp->dvp)->cid); 5456 ddi_put32(acc, &sync->Count, 5457 sizeof (((struct aac_synchronize_reply *)0)->Data)); 5458 } 5459 5460 /* 5461 * Init FIB for pass-through SCMD 5462 */ 5463 static void 5464 aac_cmd_fib_srb(struct aac_cmd *acp) 5465 { 5466 struct aac_slot *slotp = acp->slotp; 5467 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5468 struct aac_srb *srb = (struct aac_srb *)&slotp->fibp->data[0]; 5469 uint8_t *cdb; 5470 5471 ddi_put32(acc, &srb->function, SRBF_ExecuteScsi); 5472 ddi_put32(acc, &srb->retry_limit, 0); 5473 ddi_put32(acc, &srb->cdb_size, acp->cmdlen); 5474 ddi_put32(acc, &srb->timeout, 0); /* use driver timeout */ 5475 if (acp->fibp == NULL) { 5476 if (acp->flags & AAC_CMD_BUF_READ) 5477 ddi_put32(acc, &srb->flags, SRB_DataIn); 5478 else if (acp->flags & AAC_CMD_BUF_WRITE) 5479 ddi_put32(acc, &srb->flags, SRB_DataOut); 5480 ddi_put32(acc, &srb->channel, 5481 ((struct aac_nondasd *)acp->dvp)->bus); 5482 ddi_put32(acc, &srb->id, ((struct aac_nondasd *)acp->dvp)->tid); 5483 ddi_put32(acc, &srb->lun, 0); 5484 cdb = acp->pkt->pkt_cdbp; 5485 } else { 5486 struct aac_srb *srb0 = (struct aac_srb *)&acp->fibp->data[0]; 5487 5488 ddi_put32(acc, &srb->flags, srb0->flags); 5489 ddi_put32(acc, &srb->channel, srb0->channel); 5490 ddi_put32(acc, &srb->id, srb0->id); 5491 ddi_put32(acc, &srb->lun, srb0->lun); 5492 cdb = srb0->cdb; 5493 } 5494 ddi_rep_put8(acc, cdb, srb->cdb, acp->cmdlen, DDI_DEV_AUTOINCR); 5495 } 5496 5497 static void 5498 aac_cmd_fib_scsi32(struct aac_softstate *softs, struct aac_cmd *acp) 5499 { 5500 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5501 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 5502 struct aac_sg_entry *sgp; 5503 struct aac_sge *sge; 5504 5505 acp->fib_size = sizeof (struct aac_fib_header) + \ 5506 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \ 5507 acp->left_cookien * sizeof (struct aac_sg_entry); 5508 5509 /* Fill FIB and SRB headers, and copy cdb */ 5510 aac_cmd_fib_header(softs, acp->slotp, ScsiPortCommand, acp->fib_size); 5511 aac_cmd_fib_srb(acp); 5512 5513 /* Fill SG table */ 5514 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien); 5515 ddi_put32(acc, &srb->count, acp->bcount); 5516 5517 for (sge = &acp->sgt[0], sgp = &srb->sg.SgEntry[0]; 5518 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5519 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32); 5520 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5521 } 5522 } 5523 5524 static void 5525 aac_cmd_fib_scsi64(struct aac_softstate *softs, struct aac_cmd *acp) 5526 { 5527 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5528 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 5529 struct aac_sg_entry64 *sgp; 5530 struct aac_sge *sge; 5531 5532 acp->fib_size = sizeof (struct aac_fib_header) + \ 5533 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \ 5534 acp->left_cookien * sizeof (struct aac_sg_entry64); 5535 5536 /* Fill FIB and SRB headers, and copy cdb */ 5537 aac_cmd_fib_header(softs, acp->slotp, ScsiPortCommandU64, 5538 acp->fib_size); 5539 aac_cmd_fib_srb(acp); 5540 5541 /* Fill SG table */ 5542 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien); 5543 ddi_put32(acc, &srb->count, acp->bcount); 5544 5545 for (sge = &acp->sgt[0], 5546 sgp = &((struct aac_sg_table64 *)&srb->sg)->SgEntry64[0]; 5547 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5548 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 5549 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 5550 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5551 } 5552 } 5553 5554 static int 5555 aac_cmd_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp) 5556 { 5557 struct aac_slot *slotp; 5558 5559 if (slotp = aac_get_slot(softs)) { 5560 acp->slotp = slotp; 5561 slotp->acp = acp; 5562 acp->aac_cmd_fib(softs, acp); 5563 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, 5564 DDI_DMA_SYNC_FORDEV); 5565 return (AACOK); 5566 } 5567 return (AACERR); 5568 } 5569 5570 static int 5571 aac_bind_io(struct aac_softstate *softs, struct aac_cmd *acp) 5572 { 5573 struct aac_device *dvp = acp->dvp; 5574 int q = AAC_CMDQ(acp); 5575 5576 if (dvp) { 5577 if (dvp->ncmds[q] < dvp->throttle[q]) { 5578 if (!(acp->flags & AAC_CMD_NTAG) || 5579 dvp->ncmds[q] == 0) { 5580 do_bind: 5581 return (aac_cmd_slot_bind(softs, acp)); 5582 } 5583 ASSERT(q == AAC_CMDQ_ASYNC); 5584 aac_set_throttle(softs, dvp, AAC_CMDQ_ASYNC, 5585 AAC_THROTTLE_DRAIN); 5586 } 5587 } else { 5588 if (softs->bus_ncmds[q] < softs->bus_throttle[q]) 5589 goto do_bind; 5590 } 5591 return (AACERR); 5592 } 5593 5594 static void 5595 aac_start_io(struct aac_softstate *softs, struct aac_cmd *acp) 5596 { 5597 struct aac_slot *slotp = acp->slotp; 5598 int q = AAC_CMDQ(acp); 5599 int rval; 5600 5601 /* Set ac and pkt */ 5602 if (acp->pkt) { /* ac from ioctl has no pkt */ 5603 acp->pkt->pkt_state |= 5604 STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD; 5605 } 5606 if (acp->timeout) /* 0 indicates no timeout */ 5607 acp->timeout += aac_timebase + aac_tick; 5608 5609 if (acp->dvp) 5610 acp->dvp->ncmds[q]++; 5611 softs->bus_ncmds[q]++; 5612 aac_cmd_enqueue(&softs->q_busy, acp); 5613 5614 AACDB_PRINT_FIB(softs, slotp); 5615 5616 if (softs->flags & AAC_FLAGS_NEW_COMM) { 5617 rval = aac_send_command(softs, slotp); 5618 } else { 5619 /* 5620 * If fib can not be enqueued, the adapter is in an abnormal 5621 * state, there will be no interrupt to us. 5622 */ 5623 rval = aac_fib_enqueue(softs, AAC_ADAP_NORM_CMD_Q, 5624 slotp->fib_phyaddr, acp->fib_size); 5625 } 5626 5627 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) 5628 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 5629 5630 /* 5631 * NOTE: We send command only when slots availabe, so should never 5632 * reach here. 5633 */ 5634 if (rval != AACOK) { 5635 AACDB_PRINT(softs, CE_NOTE, "SCMD send failed"); 5636 if (acp->pkt) { 5637 acp->pkt->pkt_state &= ~STATE_SENT_CMD; 5638 aac_set_pkt_reason(softs, acp, CMD_INCOMPLETE, 0); 5639 } 5640 aac_end_io(softs, acp); 5641 if (!(acp->flags & (AAC_CMD_NO_INTR | AAC_CMD_NO_CB))) 5642 ddi_trigger_softintr(softs->softint_id); 5643 } 5644 } 5645 5646 static void 5647 aac_start_waitq(struct aac_softstate *softs, struct aac_cmd_queue *q) 5648 { 5649 struct aac_cmd *acp, *next_acp; 5650 5651 /* Serve as many waiting io's as possible */ 5652 for (acp = q->q_head; acp; acp = next_acp) { 5653 next_acp = acp->next; 5654 if (aac_bind_io(softs, acp) == AACOK) { 5655 aac_cmd_delete(q, acp); 5656 aac_start_io(softs, acp); 5657 } 5658 if (softs->free_io_slot_head == NULL) 5659 break; 5660 } 5661 } 5662 5663 static void 5664 aac_start_waiting_io(struct aac_softstate *softs) 5665 { 5666 /* 5667 * Sync FIB io is served before async FIB io so that io requests 5668 * sent by interactive userland commands get responded asap. 5669 */ 5670 if (softs->q_wait[AAC_CMDQ_SYNC].q_head) 5671 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_SYNC]); 5672 if (softs->q_wait[AAC_CMDQ_ASYNC].q_head) 5673 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_ASYNC]); 5674 } 5675 5676 static void 5677 aac_drain_comp_q(struct aac_softstate *softs) 5678 { 5679 struct aac_cmd *acp; 5680 struct scsi_pkt *pkt; 5681 5682 /*CONSTCOND*/ 5683 while (1) { 5684 mutex_enter(&softs->q_comp_mutex); 5685 acp = aac_cmd_dequeue(&softs->q_comp); 5686 mutex_exit(&softs->q_comp_mutex); 5687 if (acp != NULL) { 5688 ASSERT(acp->pkt != NULL); 5689 pkt = acp->pkt; 5690 5691 if (pkt->pkt_reason == CMD_CMPLT) { 5692 /* 5693 * Consistent packets need to be sync'ed first 5694 */ 5695 if ((acp->flags & AAC_CMD_CONSISTENT) && 5696 (acp->flags & AAC_CMD_BUF_READ)) { 5697 if (aac_dma_sync_ac(acp) != AACOK) { 5698 ddi_fm_service_impact( 5699 softs->devinfo_p, 5700 DDI_SERVICE_UNAFFECTED); 5701 pkt->pkt_reason = CMD_TRAN_ERR; 5702 pkt->pkt_statistics = 0; 5703 } 5704 } 5705 if ((aac_check_acc_handle(softs-> \ 5706 comm_space_acc_handle) != DDI_SUCCESS) || 5707 (aac_check_acc_handle(softs-> \ 5708 pci_mem_handle) != DDI_SUCCESS)) { 5709 ddi_fm_service_impact(softs->devinfo_p, 5710 DDI_SERVICE_UNAFFECTED); 5711 ddi_fm_acc_err_clear(softs-> \ 5712 pci_mem_handle, DDI_FME_VER0); 5713 pkt->pkt_reason = CMD_TRAN_ERR; 5714 pkt->pkt_statistics = 0; 5715 } 5716 if (aac_check_dma_handle(softs-> \ 5717 comm_space_dma_handle) != DDI_SUCCESS) { 5718 ddi_fm_service_impact(softs->devinfo_p, 5719 DDI_SERVICE_UNAFFECTED); 5720 pkt->pkt_reason = CMD_TRAN_ERR; 5721 pkt->pkt_statistics = 0; 5722 } 5723 } 5724 (*pkt->pkt_comp)(pkt); 5725 } else { 5726 break; 5727 } 5728 } 5729 } 5730 5731 static int 5732 aac_alloc_fib(struct aac_softstate *softs, struct aac_slot *slotp) 5733 { 5734 size_t rlen; 5735 ddi_dma_cookie_t cookie; 5736 uint_t cookien; 5737 5738 /* Allocate FIB dma resource */ 5739 if (ddi_dma_alloc_handle( 5740 softs->devinfo_p, 5741 &softs->addr_dma_attr, 5742 DDI_DMA_SLEEP, 5743 NULL, 5744 &slotp->fib_dma_handle) != DDI_SUCCESS) { 5745 AACDB_PRINT(softs, CE_WARN, 5746 "Cannot alloc dma handle for slot fib area"); 5747 goto error; 5748 } 5749 if (ddi_dma_mem_alloc( 5750 slotp->fib_dma_handle, 5751 softs->aac_max_fib_size, 5752 &softs->acc_attr, 5753 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 5754 DDI_DMA_SLEEP, 5755 NULL, 5756 (caddr_t *)&slotp->fibp, 5757 &rlen, 5758 &slotp->fib_acc_handle) != DDI_SUCCESS) { 5759 AACDB_PRINT(softs, CE_WARN, 5760 "Cannot alloc mem for slot fib area"); 5761 goto error; 5762 } 5763 if (ddi_dma_addr_bind_handle( 5764 slotp->fib_dma_handle, 5765 NULL, 5766 (caddr_t)slotp->fibp, 5767 softs->aac_max_fib_size, 5768 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 5769 DDI_DMA_SLEEP, 5770 NULL, 5771 &cookie, 5772 &cookien) != DDI_DMA_MAPPED) { 5773 AACDB_PRINT(softs, CE_WARN, 5774 "dma bind failed for slot fib area"); 5775 goto error; 5776 } 5777 5778 /* Check dma handles allocated in fib attach */ 5779 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) { 5780 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 5781 goto error; 5782 } 5783 5784 /* Check acc handles allocated in fib attach */ 5785 if (aac_check_acc_handle(slotp->fib_acc_handle) != DDI_SUCCESS) { 5786 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 5787 goto error; 5788 } 5789 5790 slotp->fib_phyaddr = cookie.dmac_laddress; 5791 return (AACOK); 5792 5793 error: 5794 if (slotp->fib_acc_handle) { 5795 ddi_dma_mem_free(&slotp->fib_acc_handle); 5796 slotp->fib_acc_handle = NULL; 5797 } 5798 if (slotp->fib_dma_handle) { 5799 ddi_dma_free_handle(&slotp->fib_dma_handle); 5800 slotp->fib_dma_handle = NULL; 5801 } 5802 return (AACERR); 5803 } 5804 5805 static void 5806 aac_free_fib(struct aac_slot *slotp) 5807 { 5808 (void) ddi_dma_unbind_handle(slotp->fib_dma_handle); 5809 ddi_dma_mem_free(&slotp->fib_acc_handle); 5810 slotp->fib_acc_handle = NULL; 5811 ddi_dma_free_handle(&slotp->fib_dma_handle); 5812 slotp->fib_dma_handle = NULL; 5813 slotp->fib_phyaddr = 0; 5814 } 5815 5816 static void 5817 aac_alloc_fibs(struct aac_softstate *softs) 5818 { 5819 int i; 5820 struct aac_slot *slotp; 5821 5822 for (i = 0; i < softs->total_slots && 5823 softs->total_fibs < softs->total_slots; i++) { 5824 slotp = &(softs->io_slot[i]); 5825 if (slotp->fib_phyaddr) 5826 continue; 5827 if (aac_alloc_fib(softs, slotp) != AACOK) 5828 break; 5829 5830 /* Insert the slot to the free slot list */ 5831 aac_release_slot(softs, slotp); 5832 softs->total_fibs++; 5833 } 5834 } 5835 5836 static void 5837 aac_destroy_fibs(struct aac_softstate *softs) 5838 { 5839 struct aac_slot *slotp; 5840 5841 while ((slotp = softs->free_io_slot_head) != NULL) { 5842 ASSERT(slotp->fib_phyaddr); 5843 softs->free_io_slot_head = slotp->next; 5844 aac_free_fib(slotp); 5845 ASSERT(slotp->index == (slotp - softs->io_slot)); 5846 softs->total_fibs--; 5847 } 5848 ASSERT(softs->total_fibs == 0); 5849 } 5850 5851 static int 5852 aac_create_slots(struct aac_softstate *softs) 5853 { 5854 int i; 5855 5856 softs->total_slots = softs->aac_max_fibs; 5857 softs->io_slot = kmem_zalloc(sizeof (struct aac_slot) * \ 5858 softs->total_slots, KM_SLEEP); 5859 if (softs->io_slot == NULL) { 5860 AACDB_PRINT(softs, CE_WARN, "Cannot allocate slot"); 5861 return (AACERR); 5862 } 5863 for (i = 0; i < softs->total_slots; i++) 5864 softs->io_slot[i].index = i; 5865 softs->free_io_slot_head = NULL; 5866 softs->total_fibs = 0; 5867 return (AACOK); 5868 } 5869 5870 static void 5871 aac_destroy_slots(struct aac_softstate *softs) 5872 { 5873 ASSERT(softs->free_io_slot_head == NULL); 5874 5875 kmem_free(softs->io_slot, sizeof (struct aac_slot) * \ 5876 softs->total_slots); 5877 softs->io_slot = NULL; 5878 softs->total_slots = 0; 5879 } 5880 5881 struct aac_slot * 5882 aac_get_slot(struct aac_softstate *softs) 5883 { 5884 struct aac_slot *slotp; 5885 5886 if ((slotp = softs->free_io_slot_head) != NULL) { 5887 softs->free_io_slot_head = slotp->next; 5888 slotp->next = NULL; 5889 } 5890 return (slotp); 5891 } 5892 5893 static void 5894 aac_release_slot(struct aac_softstate *softs, struct aac_slot *slotp) 5895 { 5896 ASSERT((slotp->index >= 0) && (slotp->index < softs->total_slots)); 5897 ASSERT(slotp == &softs->io_slot[slotp->index]); 5898 5899 slotp->acp = NULL; 5900 slotp->next = softs->free_io_slot_head; 5901 softs->free_io_slot_head = slotp; 5902 } 5903 5904 int 5905 aac_do_io(struct aac_softstate *softs, struct aac_cmd *acp) 5906 { 5907 if (aac_bind_io(softs, acp) == AACOK) 5908 aac_start_io(softs, acp); 5909 else 5910 aac_cmd_enqueue(&softs->q_wait[AAC_CMDQ(acp)], acp); 5911 5912 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) 5913 return (TRAN_ACCEPT); 5914 /* 5915 * Because sync FIB is always 512 bytes and used for critical 5916 * functions, async FIB is used for poll IO. 5917 */ 5918 if (acp->flags & AAC_CMD_NO_INTR) { 5919 if (aac_do_poll_io(softs, acp) == AACOK) 5920 return (TRAN_ACCEPT); 5921 } else { 5922 if (aac_do_sync_io(softs, acp) == AACOK) 5923 return (TRAN_ACCEPT); 5924 } 5925 return (TRAN_BADPKT); 5926 } 5927 5928 static int 5929 aac_do_poll_io(struct aac_softstate *softs, struct aac_cmd *acp) 5930 { 5931 int (*intr_handler)(struct aac_softstate *); 5932 5933 /* 5934 * Interrupt is disabled, we have to poll the adapter by ourselves. 5935 */ 5936 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ? 5937 aac_process_intr_new : aac_process_intr_old; 5938 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) { 5939 int i = AAC_POLL_TIME * 1000; 5940 5941 AAC_BUSYWAIT((intr_handler(softs) != AAC_DB_RESPONSE_READY), i); 5942 if (i == 0) 5943 aac_cmd_timeout(softs, acp); 5944 } 5945 5946 ddi_trigger_softintr(softs->softint_id); 5947 5948 if ((acp->flags & AAC_CMD_CMPLT) && !(acp->flags & AAC_CMD_ERR)) 5949 return (AACOK); 5950 return (AACERR); 5951 } 5952 5953 static int 5954 aac_do_sync_io(struct aac_softstate *softs, struct aac_cmd *acp) 5955 { 5956 ASSERT(softs && acp); 5957 5958 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) 5959 cv_wait(&softs->event, &softs->io_lock); 5960 5961 if (acp->flags & AAC_CMD_CMPLT) 5962 return (AACOK); 5963 return (AACERR); 5964 } 5965 5966 static int 5967 aac_dma_sync_ac(struct aac_cmd *acp) 5968 { 5969 if (acp->buf_dma_handle) { 5970 if (acp->flags & AAC_CMD_BUF_WRITE) { 5971 if (acp->abp != NULL) 5972 ddi_rep_put8(acp->abh, 5973 (uint8_t *)acp->bp->b_un.b_addr, 5974 (uint8_t *)acp->abp, acp->bp->b_bcount, 5975 DDI_DEV_AUTOINCR); 5976 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0, 5977 DDI_DMA_SYNC_FORDEV); 5978 } else { 5979 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0, 5980 DDI_DMA_SYNC_FORCPU); 5981 if (aac_check_dma_handle(acp->buf_dma_handle) != 5982 DDI_SUCCESS) 5983 return (AACERR); 5984 if (acp->abp != NULL) 5985 ddi_rep_get8(acp->abh, 5986 (uint8_t *)acp->bp->b_un.b_addr, 5987 (uint8_t *)acp->abp, acp->bp->b_bcount, 5988 DDI_DEV_AUTOINCR); 5989 } 5990 } 5991 return (AACOK); 5992 } 5993 5994 /* 5995 * The following function comes from Adaptec: 5996 * 5997 * When driver sees a particular event that means containers are changed, it 5998 * will rescan containers. However a change may not be complete until some 5999 * other event is received. For example, creating or deleting an array will 6000 * incur as many as six AifEnConfigChange events which would generate six 6001 * container rescans. To diminish rescans, driver set a flag to wait for 6002 * another particular event. When sees that events come in, it will do rescan. 6003 */ 6004 static int 6005 aac_handle_aif(struct aac_softstate *softs, struct aac_fib *fibp) 6006 { 6007 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 6008 uint16_t fib_command; 6009 struct aac_aif_command *aif; 6010 int en_type; 6011 int devcfg_needed; 6012 int current, next; 6013 6014 fib_command = LE_16(fibp->Header.Command); 6015 if (fib_command != AifRequest) { 6016 cmn_err(CE_NOTE, "!Unknown command from controller: 0x%x", 6017 fib_command); 6018 return (AACERR); 6019 } 6020 6021 /* Update internal container state */ 6022 aif = (struct aac_aif_command *)&fibp->data[0]; 6023 6024 AACDB_PRINT_AIF(softs, aif); 6025 devcfg_needed = 0; 6026 en_type = LE_32((uint32_t)aif->data.EN.type); 6027 6028 switch (LE_32((uint32_t)aif->command)) { 6029 case AifCmdDriverNotify: { 6030 int cid = LE_32(aif->data.EN.data.ECC.container[0]); 6031 6032 switch (en_type) { 6033 case AifDenMorphComplete: 6034 case AifDenVolumeExtendComplete: 6035 if (AAC_DEV_IS_VALID(&softs->containers[cid].dev)) 6036 softs->devcfg_wait_on = AifEnConfigChange; 6037 break; 6038 } 6039 if (softs->devcfg_wait_on == en_type) 6040 devcfg_needed = 1; 6041 break; 6042 } 6043 6044 case AifCmdEventNotify: 6045 switch (en_type) { 6046 case AifEnAddContainer: 6047 case AifEnDeleteContainer: 6048 softs->devcfg_wait_on = AifEnConfigChange; 6049 break; 6050 case AifEnContainerChange: 6051 if (!softs->devcfg_wait_on) 6052 softs->devcfg_wait_on = AifEnConfigChange; 6053 break; 6054 case AifEnContainerEvent: 6055 if (ddi_get32(acc, &aif-> \ 6056 data.EN.data.ECE.eventType) == CT_PUP_MISSING_DRIVE) 6057 devcfg_needed = 1; 6058 break; 6059 } 6060 if (softs->devcfg_wait_on == en_type) 6061 devcfg_needed = 1; 6062 break; 6063 6064 case AifCmdJobProgress: 6065 if (LE_32((uint32_t)aif->data.PR[0].jd.type) == AifJobCtrZero) { 6066 int pr_status; 6067 uint32_t pr_ftick, pr_ctick; 6068 6069 pr_status = LE_32((uint32_t)aif->data.PR[0].status); 6070 pr_ctick = LE_32(aif->data.PR[0].currentTick); 6071 pr_ftick = LE_32(aif->data.PR[0].finalTick); 6072 6073 if ((pr_ctick == pr_ftick) || 6074 (pr_status == AifJobStsSuccess)) 6075 softs->devcfg_wait_on = AifEnContainerChange; 6076 else if ((pr_ctick == 0) && 6077 (pr_status == AifJobStsRunning)) 6078 softs->devcfg_wait_on = AifEnContainerChange; 6079 } 6080 break; 6081 } 6082 6083 if (devcfg_needed) { 6084 softs->devcfg_wait_on = 0; 6085 (void) aac_probe_containers(softs); 6086 } 6087 6088 /* Modify AIF contexts */ 6089 current = softs->aifq_idx; 6090 next = (current + 1) % AAC_AIFQ_LENGTH; 6091 if (next == 0) { 6092 struct aac_fib_context *ctx; 6093 6094 softs->aifq_wrap = 1; 6095 for (ctx = softs->fibctx; ctx; ctx = ctx->next) { 6096 if (next == ctx->ctx_idx) { 6097 ctx->ctx_filled = 1; 6098 } else if (current == ctx->ctx_idx && ctx->ctx_filled) { 6099 ctx->ctx_idx = next; 6100 AACDB_PRINT(softs, CE_NOTE, 6101 "-- AIF queue(%x) overrun", ctx->unique); 6102 } 6103 } 6104 } 6105 softs->aifq_idx = next; 6106 6107 /* Wakeup applications */ 6108 cv_broadcast(&softs->aifv); 6109 return (AACOK); 6110 } 6111 6112 /* 6113 * Timeout recovery 6114 */ 6115 /*ARGSUSED*/ 6116 static void 6117 aac_cmd_timeout(struct aac_softstate *softs, struct aac_cmd *acp) 6118 { 6119 #ifdef DEBUG 6120 acp->fib_flags |= AACDB_FLAGS_FIB_TIMEOUT; 6121 AACDB_PRINT(softs, CE_WARN, "acp %p timed out", acp); 6122 AACDB_PRINT_FIB(softs, acp->slotp); 6123 #endif 6124 6125 /* 6126 * Besides the firmware in unhealthy state, an overloaded 6127 * adapter may also incur pkt timeout. 6128 * There is a chance for an adapter with a slower IOP to take 6129 * longer than 60 seconds to process the commands, such as when 6130 * to perform IOs. So the adapter is doing a build on a RAID-5 6131 * while being required longer completion times should be 6132 * tolerated. 6133 */ 6134 switch (aac_do_reset(softs)) { 6135 case AAC_IOP_RESET_SUCCEED: 6136 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING, NULL, CMD_RESET); 6137 aac_start_waiting_io(softs); 6138 break; 6139 case AAC_IOP_RESET_FAILED: 6140 /* Abort all waiting cmds when adapter is dead */ 6141 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_TIMEOUT); 6142 break; 6143 case AAC_IOP_RESET_ABNORMAL: 6144 aac_start_waiting_io(softs); 6145 } 6146 } 6147 6148 /* 6149 * The following function comes from Adaptec: 6150 * 6151 * Time sync. command added to synchronize time with firmware every 30 6152 * minutes (required for correct AIF timestamps etc.) 6153 */ 6154 static int 6155 aac_sync_tick(struct aac_softstate *softs) 6156 { 6157 ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle; 6158 struct aac_fib *fibp = softs->sync_slot.fibp; 6159 6160 ddi_put32(acc, (void *)&fibp->data[0], ddi_get_time()); 6161 return (aac_sync_fib(softs, SendHostTime, AAC_FIB_SIZEOF(uint32_t))); 6162 } 6163 6164 static void 6165 aac_daemon(void *arg) 6166 { 6167 struct aac_softstate *softs = (struct aac_softstate *)arg; 6168 struct aac_cmd *acp; 6169 6170 DBCALLED(softs, 2); 6171 6172 mutex_enter(&softs->io_lock); 6173 /* Check slot for timeout pkts */ 6174 aac_timebase += aac_tick; 6175 for (acp = softs->q_busy.q_head; acp; acp = acp->next) { 6176 if (acp->timeout) { 6177 if (acp->timeout <= aac_timebase) { 6178 aac_cmd_timeout(softs, acp); 6179 ddi_trigger_softintr(softs->softint_id); 6180 } 6181 break; 6182 } 6183 } 6184 6185 /* Time sync. with firmware every AAC_SYNC_TICK */ 6186 if (aac_sync_time <= aac_timebase) { 6187 aac_sync_time = aac_timebase; 6188 if (aac_sync_tick(softs) != AACOK) 6189 aac_sync_time += aac_tick << 1; /* retry shortly */ 6190 else 6191 aac_sync_time += AAC_SYNC_TICK; 6192 } 6193 6194 if ((softs->state & AAC_STATE_RUN) && (softs->timeout_id != 0)) 6195 softs->timeout_id = timeout(aac_daemon, (void *)softs, 6196 (aac_tick * drv_usectohz(1000000))); 6197 mutex_exit(&softs->io_lock); 6198 } 6199 6200 /* 6201 * Architecture dependent functions 6202 */ 6203 static int 6204 aac_rx_get_fwstatus(struct aac_softstate *softs) 6205 { 6206 return (PCI_MEM_GET32(softs, AAC_OMR0)); 6207 } 6208 6209 static int 6210 aac_rx_get_mailbox(struct aac_softstate *softs, int mb) 6211 { 6212 return (PCI_MEM_GET32(softs, AAC_RX_MAILBOX + mb * 4)); 6213 } 6214 6215 static void 6216 aac_rx_set_mailbox(struct aac_softstate *softs, uint32_t cmd, 6217 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3) 6218 { 6219 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX, cmd); 6220 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 4, arg0); 6221 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 8, arg1); 6222 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 12, arg2); 6223 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 16, arg3); 6224 } 6225 6226 static int 6227 aac_rkt_get_fwstatus(struct aac_softstate *softs) 6228 { 6229 return (PCI_MEM_GET32(softs, AAC_OMR0)); 6230 } 6231 6232 static int 6233 aac_rkt_get_mailbox(struct aac_softstate *softs, int mb) 6234 { 6235 return (PCI_MEM_GET32(softs, AAC_RKT_MAILBOX + mb *4)); 6236 } 6237 6238 static void 6239 aac_rkt_set_mailbox(struct aac_softstate *softs, uint32_t cmd, 6240 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3) 6241 { 6242 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX, cmd); 6243 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 4, arg0); 6244 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 8, arg1); 6245 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 12, arg2); 6246 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 16, arg3); 6247 } 6248 6249 /* 6250 * cb_ops functions 6251 */ 6252 static int 6253 aac_open(dev_t *devp, int flag, int otyp, cred_t *cred) 6254 { 6255 struct aac_softstate *softs; 6256 int minor0, minor; 6257 int instance; 6258 6259 DBCALLED(NULL, 2); 6260 6261 if (otyp != OTYP_BLK && otyp != OTYP_CHR) 6262 return (EINVAL); 6263 6264 minor0 = getminor(*devp); 6265 minor = AAC_SCSA_MINOR(minor0); 6266 6267 if (AAC_IS_SCSA_NODE(minor)) 6268 return (scsi_hba_open(devp, flag, otyp, cred)); 6269 6270 instance = MINOR2INST(minor0); 6271 if (instance >= AAC_MAX_ADAPTERS) 6272 return (ENXIO); 6273 6274 softs = ddi_get_soft_state(aac_softstatep, instance); 6275 if (softs == NULL) 6276 return (ENXIO); 6277 6278 return (0); 6279 } 6280 6281 /*ARGSUSED*/ 6282 static int 6283 aac_close(dev_t dev, int flag, int otyp, cred_t *cred) 6284 { 6285 int minor0, minor; 6286 int instance; 6287 6288 DBCALLED(NULL, 2); 6289 6290 if (otyp != OTYP_BLK && otyp != OTYP_CHR) 6291 return (EINVAL); 6292 6293 minor0 = getminor(dev); 6294 minor = AAC_SCSA_MINOR(minor0); 6295 6296 if (AAC_IS_SCSA_NODE(minor)) 6297 return (scsi_hba_close(dev, flag, otyp, cred)); 6298 6299 instance = MINOR2INST(minor0); 6300 if (instance >= AAC_MAX_ADAPTERS) 6301 return (ENXIO); 6302 6303 return (0); 6304 } 6305 6306 static int 6307 aac_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, 6308 int *rval_p) 6309 { 6310 struct aac_softstate *softs; 6311 int minor0, minor; 6312 int instance; 6313 6314 DBCALLED(NULL, 2); 6315 6316 if (drv_priv(cred_p) != 0) 6317 return (EPERM); 6318 6319 minor0 = getminor(dev); 6320 minor = AAC_SCSA_MINOR(minor0); 6321 6322 if (AAC_IS_SCSA_NODE(minor)) 6323 return (scsi_hba_ioctl(dev, cmd, arg, flag, cred_p, rval_p)); 6324 6325 instance = MINOR2INST(minor0); 6326 if (instance < AAC_MAX_ADAPTERS) { 6327 softs = ddi_get_soft_state(aac_softstatep, instance); 6328 return (aac_do_ioctl(softs, dev, cmd, arg, flag)); 6329 } 6330 return (ENXIO); 6331 } 6332 6333 /* 6334 * The IO fault service error handling callback function 6335 */ 6336 /*ARGSUSED*/ 6337 static int 6338 aac_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 6339 { 6340 /* 6341 * as the driver can always deal with an error in any dma or 6342 * access handle, we can just return the fme_status value. 6343 */ 6344 pci_ereport_post(dip, err, NULL); 6345 return (err->fme_status); 6346 } 6347 6348 /* 6349 * aac_fm_init - initialize fma capabilities and register with IO 6350 * fault services. 6351 */ 6352 static void 6353 aac_fm_init(struct aac_softstate *softs) 6354 { 6355 /* 6356 * Need to change iblock to priority for new MSI intr 6357 */ 6358 ddi_iblock_cookie_t fm_ibc; 6359 6360 softs->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, softs->devinfo_p, 6361 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 6362 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 6363 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 6364 6365 /* Only register with IO Fault Services if we have some capability */ 6366 if (softs->fm_capabilities) { 6367 /* Adjust access and dma attributes for FMA */ 6368 softs->acc_attr.devacc_attr_access |= DDI_FLAGERR_ACC; 6369 softs->addr_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 6370 softs->buf_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 6371 6372 /* 6373 * Register capabilities with IO Fault Services. 6374 * fm_capabilities will be updated to indicate 6375 * capabilities actually supported (not requested.) 6376 */ 6377 ddi_fm_init(softs->devinfo_p, &softs->fm_capabilities, &fm_ibc); 6378 6379 /* 6380 * Initialize pci ereport capabilities if ereport 6381 * capable (should always be.) 6382 */ 6383 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) || 6384 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6385 pci_ereport_setup(softs->devinfo_p); 6386 } 6387 6388 /* 6389 * Register error callback if error callback capable. 6390 */ 6391 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6392 ddi_fm_handler_register(softs->devinfo_p, 6393 aac_fm_error_cb, (void *) softs); 6394 } 6395 } 6396 } 6397 6398 /* 6399 * aac_fm_fini - Releases fma capabilities and un-registers with IO 6400 * fault services. 6401 */ 6402 static void 6403 aac_fm_fini(struct aac_softstate *softs) 6404 { 6405 /* Only unregister FMA capabilities if registered */ 6406 if (softs->fm_capabilities) { 6407 /* 6408 * Un-register error callback if error callback capable. 6409 */ 6410 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6411 ddi_fm_handler_unregister(softs->devinfo_p); 6412 } 6413 6414 /* 6415 * Release any resources allocated by pci_ereport_setup() 6416 */ 6417 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) || 6418 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6419 pci_ereport_teardown(softs->devinfo_p); 6420 } 6421 6422 /* Unregister from IO Fault Services */ 6423 ddi_fm_fini(softs->devinfo_p); 6424 6425 /* Adjust access and dma attributes for FMA */ 6426 softs->acc_attr.devacc_attr_access &= ~DDI_FLAGERR_ACC; 6427 softs->addr_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 6428 softs->buf_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 6429 } 6430 } 6431 6432 int 6433 aac_check_acc_handle(ddi_acc_handle_t handle) 6434 { 6435 ddi_fm_error_t de; 6436 6437 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 6438 return (de.fme_status); 6439 } 6440 6441 int 6442 aac_check_dma_handle(ddi_dma_handle_t handle) 6443 { 6444 ddi_fm_error_t de; 6445 6446 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 6447 return (de.fme_status); 6448 } 6449 6450 void 6451 aac_fm_ereport(struct aac_softstate *softs, char *detail) 6452 { 6453 uint64_t ena; 6454 char buf[FM_MAX_CLASS]; 6455 6456 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 6457 ena = fm_ena_generate(0, FM_ENA_FMT1); 6458 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities)) { 6459 ddi_fm_ereport_post(softs->devinfo_p, buf, ena, DDI_NOSLEEP, 6460 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL); 6461 } 6462 } 6463 6464 /* 6465 * Autoconfiguration support 6466 */ 6467 static int 6468 aac_parse_devname(char *devnm, int *tgt, int *lun) 6469 { 6470 char devbuf[SCSI_MAXNAMELEN]; 6471 char *addr; 6472 char *p, *tp, *lp; 6473 long num; 6474 6475 /* Parse dev name and address */ 6476 (void) strcpy(devbuf, devnm); 6477 addr = ""; 6478 for (p = devbuf; *p != '\0'; p++) { 6479 if (*p == '@') { 6480 addr = p + 1; 6481 *p = '\0'; 6482 } else if (*p == ':') { 6483 *p = '\0'; 6484 break; 6485 } 6486 } 6487 6488 /* Parse taget and lun */ 6489 for (p = tp = addr, lp = NULL; *p != '\0'; p++) { 6490 if (*p == ',') { 6491 lp = p + 1; 6492 *p = '\0'; 6493 break; 6494 } 6495 } 6496 if (tgt && tp) { 6497 if (ddi_strtol(tp, NULL, 0x10, &num)) 6498 return (AACERR); 6499 *tgt = (int)num; 6500 } 6501 if (lun && lp) { 6502 if (ddi_strtol(lp, NULL, 0x10, &num)) 6503 return (AACERR); 6504 *lun = (int)num; 6505 } 6506 return (AACOK); 6507 } 6508 6509 static dev_info_t * 6510 aac_find_child(struct aac_softstate *softs, uint16_t tgt, uint8_t lun) 6511 { 6512 dev_info_t *child = NULL; 6513 char addr[SCSI_MAXNAMELEN]; 6514 char tmp[MAXNAMELEN]; 6515 6516 if (tgt < AAC_MAX_LD) { 6517 if (lun == 0) { 6518 struct aac_device *dvp = &softs->containers[tgt].dev; 6519 6520 child = dvp->dip; 6521 } 6522 } else { 6523 (void) sprintf(addr, "%x,%x", tgt, lun); 6524 for (child = ddi_get_child(softs->devinfo_p); 6525 child; child = ddi_get_next_sibling(child)) { 6526 /* We don't care about non-persistent node */ 6527 if (ndi_dev_is_persistent_node(child) == 0) 6528 continue; 6529 6530 if (aac_name_node(child, tmp, MAXNAMELEN) != 6531 DDI_SUCCESS) 6532 continue; 6533 if (strcmp(addr, tmp) == 0) 6534 break; 6535 } 6536 } 6537 return (child); 6538 } 6539 6540 static int 6541 aac_config_child(struct aac_softstate *softs, struct scsi_device *sd, 6542 dev_info_t **dipp) 6543 { 6544 char *nodename = NULL; 6545 char **compatible = NULL; 6546 int ncompatible = 0; 6547 char *childname; 6548 dev_info_t *ldip = NULL; 6549 int tgt = sd->sd_address.a_target; 6550 int lun = sd->sd_address.a_lun; 6551 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; 6552 int rval; 6553 6554 DBCALLED(softs, 2); 6555 6556 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype, 6557 NULL, &nodename, &compatible, &ncompatible); 6558 if (nodename == NULL) { 6559 AACDB_PRINT(softs, CE_WARN, 6560 "found no comptible driver for t%dL%d", tgt, lun); 6561 rval = NDI_FAILURE; 6562 goto finish; 6563 } 6564 childname = (softs->legacy && dtype == DTYPE_DIRECT) ? "sd" : nodename; 6565 6566 /* Create dev node */ 6567 rval = ndi_devi_alloc(softs->devinfo_p, childname, DEVI_SID_NODEID, 6568 &ldip); 6569 if (rval == NDI_SUCCESS) { 6570 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) 6571 != DDI_PROP_SUCCESS) { 6572 AACDB_PRINT(softs, CE_WARN, "unable to create " 6573 "property for t%dL%d (target)", tgt, lun); 6574 rval = NDI_FAILURE; 6575 goto finish; 6576 } 6577 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) 6578 != DDI_PROP_SUCCESS) { 6579 AACDB_PRINT(softs, CE_WARN, "unable to create " 6580 "property for t%dL%d (lun)", tgt, lun); 6581 rval = NDI_FAILURE; 6582 goto finish; 6583 } 6584 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip, 6585 "compatible", compatible, ncompatible) 6586 != DDI_PROP_SUCCESS) { 6587 AACDB_PRINT(softs, CE_WARN, "unable to create " 6588 "property for t%dL%d (compatible)", tgt, lun); 6589 rval = NDI_FAILURE; 6590 goto finish; 6591 } 6592 6593 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH); 6594 if (rval != NDI_SUCCESS) { 6595 AACDB_PRINT(softs, CE_WARN, "unable to online t%dL%d", 6596 tgt, lun); 6597 ndi_prop_remove_all(ldip); 6598 (void) ndi_devi_free(ldip); 6599 } 6600 } 6601 finish: 6602 if (dipp) 6603 *dipp = ldip; 6604 6605 scsi_hba_nodename_compatible_free(nodename, compatible); 6606 return (rval); 6607 } 6608 6609 /*ARGSUSED*/ 6610 static int 6611 aac_probe_lun(struct aac_softstate *softs, struct scsi_device *sd) 6612 { 6613 int tgt = sd->sd_address.a_target; 6614 int lun = sd->sd_address.a_lun; 6615 6616 DBCALLED(softs, 2); 6617 6618 if (tgt < AAC_MAX_LD) { 6619 int rval; 6620 6621 if (lun == 0) { 6622 mutex_enter(&softs->io_lock); 6623 rval = aac_probe_container(softs, tgt); 6624 mutex_exit(&softs->io_lock); 6625 if (rval == AACOK) { 6626 if (scsi_hba_probe(sd, NULL) == 6627 SCSIPROBE_EXISTS) 6628 return (NDI_SUCCESS); 6629 } 6630 } 6631 return (NDI_FAILURE); 6632 } else { 6633 int dtype; 6634 6635 if (scsi_hba_probe(sd, NULL) != SCSIPROBE_EXISTS) 6636 return (NDI_FAILURE); 6637 6638 dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; 6639 6640 AACDB_PRINT(softs, CE_NOTE, 6641 "Phys. device found: tgt %d dtype %d: %s", 6642 tgt, dtype, sd->sd_inq->inq_vid); 6643 6644 /* Only non-DASD exposed */ 6645 if (dtype != DTYPE_RODIRECT /* CDROM */ && 6646 dtype != DTYPE_SEQUENTIAL /* TAPE */ && 6647 dtype != DTYPE_ESI /* SES */) 6648 return (NDI_FAILURE); 6649 6650 AACDB_PRINT(softs, CE_NOTE, "non-DASD %d found", tgt); 6651 mutex_enter(&softs->io_lock); 6652 softs->nondasds[AAC_PD(tgt)].dev.flags |= AAC_DFLAG_VALID; 6653 mutex_exit(&softs->io_lock); 6654 return (NDI_SUCCESS); 6655 } 6656 } 6657 6658 static int 6659 aac_config_lun(struct aac_softstate *softs, uint16_t tgt, uint8_t lun, 6660 dev_info_t **ldip) 6661 { 6662 struct scsi_device sd; 6663 dev_info_t *child; 6664 int rval; 6665 6666 DBCALLED(softs, 2); 6667 6668 if ((child = aac_find_child(softs, tgt, lun)) != NULL) { 6669 if (ldip) 6670 *ldip = child; 6671 return (NDI_SUCCESS); 6672 } 6673 6674 bzero(&sd, sizeof (struct scsi_device)); 6675 sd.sd_address.a_hba_tran = softs->hba_tran; 6676 sd.sd_address.a_target = (uint16_t)tgt; 6677 sd.sd_address.a_lun = (uint8_t)lun; 6678 if ((rval = aac_probe_lun(softs, &sd)) == NDI_SUCCESS) 6679 rval = aac_config_child(softs, &sd, ldip); 6680 scsi_unprobe(&sd); 6681 return (rval); 6682 } 6683 6684 static int 6685 aac_config_tgt(struct aac_softstate *softs, int tgt) 6686 { 6687 struct scsi_address ap; 6688 struct buf *bp = NULL; 6689 int buf_len = AAC_SCSI_RPTLUNS_HEAD_SIZE + AAC_SCSI_RPTLUNS_ADDR_SIZE; 6690 int list_len = 0; 6691 int lun_total = 0; 6692 dev_info_t *ldip; 6693 int i; 6694 6695 ap.a_hba_tran = softs->hba_tran; 6696 ap.a_target = (uint16_t)tgt; 6697 ap.a_lun = 0; 6698 6699 for (i = 0; i < 2; i++) { 6700 struct scsi_pkt *pkt; 6701 uchar_t *cdb; 6702 uchar_t *p; 6703 uint32_t data; 6704 6705 if (bp == NULL) { 6706 if ((bp = scsi_alloc_consistent_buf(&ap, NULL, 6707 buf_len, B_READ, NULL_FUNC, NULL)) == NULL) 6708 return (AACERR); 6709 } 6710 if ((pkt = scsi_init_pkt(&ap, NULL, bp, CDB_GROUP5, 6711 sizeof (struct scsi_arq_status), 0, PKT_CONSISTENT, 6712 NULL, NULL)) == NULL) { 6713 scsi_free_consistent_buf(bp); 6714 return (AACERR); 6715 } 6716 cdb = pkt->pkt_cdbp; 6717 bzero(cdb, CDB_GROUP5); 6718 cdb[0] = SCMD_REPORT_LUNS; 6719 6720 /* Convert buffer len from local to LE_32 */ 6721 data = buf_len; 6722 for (p = &cdb[9]; p > &cdb[5]; p--) { 6723 *p = data & 0xff; 6724 data >>= 8; 6725 } 6726 6727 if (scsi_poll(pkt) < 0 || 6728 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk) { 6729 scsi_destroy_pkt(pkt); 6730 break; 6731 } 6732 6733 /* Convert list_len from LE_32 to local */ 6734 for (p = (uchar_t *)bp->b_un.b_addr; 6735 p < (uchar_t *)bp->b_un.b_addr + 4; p++) { 6736 data <<= 8; 6737 data |= *p; 6738 } 6739 list_len = data; 6740 if (buf_len < list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE) { 6741 scsi_free_consistent_buf(bp); 6742 bp = NULL; 6743 buf_len = list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE; 6744 } 6745 scsi_destroy_pkt(pkt); 6746 } 6747 if (i >= 2) { 6748 uint8_t *buf = (uint8_t *)(bp->b_un.b_addr + 6749 AAC_SCSI_RPTLUNS_HEAD_SIZE); 6750 6751 for (i = 0; i < (list_len / AAC_SCSI_RPTLUNS_ADDR_SIZE); i++) { 6752 uint16_t lun; 6753 6754 /* Determine report luns addressing type */ 6755 switch (buf[0] & AAC_SCSI_RPTLUNS_ADDR_MASK) { 6756 /* 6757 * Vendors in the field have been found to be 6758 * concatenating bus/target/lun to equal the 6759 * complete lun value instead of switching to 6760 * flat space addressing 6761 */ 6762 case AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL: 6763 case AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT: 6764 case AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE: 6765 lun = ((buf[0] & 0x3f) << 8) | buf[1]; 6766 if (lun > UINT8_MAX) { 6767 AACDB_PRINT(softs, CE_WARN, 6768 "abnormal lun number: %d", lun); 6769 break; 6770 } 6771 if (aac_config_lun(softs, tgt, lun, &ldip) == 6772 NDI_SUCCESS) 6773 lun_total++; 6774 break; 6775 } 6776 6777 buf += AAC_SCSI_RPTLUNS_ADDR_SIZE; 6778 } 6779 } else { 6780 /* The target may do not support SCMD_REPORT_LUNS. */ 6781 if (aac_config_lun(softs, tgt, 0, &ldip) == NDI_SUCCESS) 6782 lun_total++; 6783 } 6784 scsi_free_consistent_buf(bp); 6785 return (lun_total); 6786 } 6787 6788 static void 6789 aac_devcfg(struct aac_softstate *softs, int tgt, int en) 6790 { 6791 struct aac_device *dvp; 6792 6793 mutex_enter(&softs->io_lock); 6794 dvp = AAC_DEV(softs, tgt); 6795 if (en) 6796 dvp->flags |= AAC_DFLAG_CONFIGURING; 6797 else 6798 dvp->flags &= ~AAC_DFLAG_CONFIGURING; 6799 mutex_exit(&softs->io_lock); 6800 } 6801 6802 static int 6803 aac_tran_bus_config(dev_info_t *parent, uint_t flags, ddi_bus_config_op_t op, 6804 void *arg, dev_info_t **childp) 6805 { 6806 struct aac_softstate *softs; 6807 int circ = 0; 6808 int rval; 6809 6810 if ((softs = ddi_get_soft_state(aac_softstatep, 6811 ddi_get_instance(parent))) == NULL) 6812 return (NDI_FAILURE); 6813 6814 /* Commands for bus config should be blocked as the bus is quiesced */ 6815 mutex_enter(&softs->io_lock); 6816 if (softs->state & AAC_STATE_QUIESCED) { 6817 AACDB_PRINT(softs, CE_NOTE, 6818 "bus_config abroted because bus is quiesced"); 6819 mutex_exit(&softs->io_lock); 6820 return (NDI_FAILURE); 6821 } 6822 mutex_exit(&softs->io_lock); 6823 6824 DBCALLED(softs, 1); 6825 6826 /* Hold the nexus across the bus_config */ 6827 ndi_devi_enter(parent, &circ); 6828 switch (op) { 6829 case BUS_CONFIG_ONE: { 6830 int tgt, lun; 6831 6832 if (aac_parse_devname(arg, &tgt, &lun) != AACOK) { 6833 rval = NDI_FAILURE; 6834 break; 6835 } 6836 6837 AAC_DEVCFG_BEGIN(softs, tgt); 6838 rval = aac_config_lun(softs, tgt, lun, childp); 6839 AAC_DEVCFG_END(softs, tgt); 6840 break; 6841 } 6842 6843 case BUS_CONFIG_DRIVER: 6844 case BUS_CONFIG_ALL: { 6845 uint32_t bus, tgt; 6846 int index, total; 6847 6848 for (tgt = 0; tgt < AAC_MAX_LD; tgt++) { 6849 AAC_DEVCFG_BEGIN(softs, tgt); 6850 (void) aac_config_lun(softs, tgt, 0, NULL); 6851 AAC_DEVCFG_END(softs, tgt); 6852 } 6853 6854 /* Config the non-DASD devices connected to the card */ 6855 total = 0; 6856 index = AAC_MAX_LD; 6857 for (bus = 0; bus < softs->bus_max; bus++) { 6858 AACDB_PRINT(softs, CE_NOTE, "bus %d:", bus); 6859 for (tgt = 0; tgt < softs->tgt_max; tgt++, index++) { 6860 AAC_DEVCFG_BEGIN(softs, index); 6861 if (aac_config_tgt(softs, index)) 6862 total++; 6863 AAC_DEVCFG_END(softs, index); 6864 } 6865 } 6866 AACDB_PRINT(softs, CE_CONT, 6867 "?Total %d phys. device(s) found", total); 6868 rval = NDI_SUCCESS; 6869 break; 6870 } 6871 } 6872 6873 if (rval == NDI_SUCCESS) 6874 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0); 6875 ndi_devi_exit(parent, circ); 6876 return (rval); 6877 } 6878 6879 static void 6880 aac_handle_dr(struct aac_drinfo *drp) 6881 { 6882 struct aac_softstate *softs = drp->softs; 6883 struct aac_device *dvp; 6884 dev_info_t *dip; 6885 int valid; 6886 int circ1 = 0; 6887 6888 DBCALLED(softs, 1); 6889 6890 /* Hold the nexus across the bus_config */ 6891 mutex_enter(&softs->io_lock); 6892 dvp = AAC_DEV(softs, drp->tgt); 6893 valid = AAC_DEV_IS_VALID(dvp); 6894 dip = dvp->dip; 6895 mutex_exit(&softs->io_lock); 6896 6897 switch (drp->event) { 6898 case AAC_EVT_ONLINE: 6899 case AAC_EVT_OFFLINE: 6900 /* Device onlined */ 6901 if (dip == NULL && valid) { 6902 ndi_devi_enter(softs->devinfo_p, &circ1); 6903 (void) aac_config_lun(softs, drp->tgt, 0, NULL); 6904 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d onlined", 6905 softs->instance, drp->tgt, drp->lun); 6906 ndi_devi_exit(softs->devinfo_p, circ1); 6907 } 6908 /* Device offlined */ 6909 if (dip && !valid) { 6910 mutex_enter(&softs->io_lock); 6911 (void) aac_do_reset(softs); 6912 mutex_exit(&softs->io_lock); 6913 6914 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE); 6915 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d offlined", 6916 softs->instance, drp->tgt, drp->lun); 6917 } 6918 break; 6919 } 6920 kmem_free(drp, sizeof (struct aac_drinfo)); 6921 } 6922 6923 static int 6924 aac_dr_event(struct aac_softstate *softs, int tgt, int lun, int event) 6925 { 6926 struct aac_drinfo *drp; 6927 6928 DBCALLED(softs, 1); 6929 6930 if (softs->taskq == NULL || 6931 (drp = kmem_zalloc(sizeof (struct aac_drinfo), KM_NOSLEEP)) == NULL) 6932 return (AACERR); 6933 6934 drp->softs = softs; 6935 drp->tgt = tgt; 6936 drp->lun = lun; 6937 drp->event = event; 6938 if ((ddi_taskq_dispatch(softs->taskq, (void (*)(void *))aac_handle_dr, 6939 drp, DDI_NOSLEEP)) != DDI_SUCCESS) { 6940 AACDB_PRINT(softs, CE_WARN, "DR task start failed"); 6941 kmem_free(drp, sizeof (struct aac_drinfo)); 6942 return (AACERR); 6943 } 6944 return (AACOK); 6945 } 6946 6947 #ifdef DEBUG 6948 6949 /* -------------------------debug aid functions-------------------------- */ 6950 6951 #define AAC_FIB_CMD_KEY_STRINGS \ 6952 TestCommandResponse, "TestCommandResponse", \ 6953 TestAdapterCommand, "TestAdapterCommand", \ 6954 LastTestCommand, "LastTestCommand", \ 6955 ReinitHostNormCommandQueue, "ReinitHostNormCommandQueue", \ 6956 ReinitHostHighCommandQueue, "ReinitHostHighCommandQueue", \ 6957 ReinitHostHighRespQueue, "ReinitHostHighRespQueue", \ 6958 ReinitHostNormRespQueue, "ReinitHostNormRespQueue", \ 6959 ReinitAdapNormCommandQueue, "ReinitAdapNormCommandQueue", \ 6960 ReinitAdapHighCommandQueue, "ReinitAdapHighCommandQueue", \ 6961 ReinitAdapHighRespQueue, "ReinitAdapHighRespQueue", \ 6962 ReinitAdapNormRespQueue, "ReinitAdapNormRespQueue", \ 6963 InterfaceShutdown, "InterfaceShutdown", \ 6964 DmaCommandFib, "DmaCommandFib", \ 6965 StartProfile, "StartProfile", \ 6966 TermProfile, "TermProfile", \ 6967 SpeedTest, "SpeedTest", \ 6968 TakeABreakPt, "TakeABreakPt", \ 6969 RequestPerfData, "RequestPerfData", \ 6970 SetInterruptDefTimer, "SetInterruptDefTimer", \ 6971 SetInterruptDefCount, "SetInterruptDefCount", \ 6972 GetInterruptDefStatus, "GetInterruptDefStatus", \ 6973 LastCommCommand, "LastCommCommand", \ 6974 NuFileSystem, "NuFileSystem", \ 6975 UFS, "UFS", \ 6976 HostFileSystem, "HostFileSystem", \ 6977 LastFileSystemCommand, "LastFileSystemCommand", \ 6978 ContainerCommand, "ContainerCommand", \ 6979 ContainerCommand64, "ContainerCommand64", \ 6980 ClusterCommand, "ClusterCommand", \ 6981 ScsiPortCommand, "ScsiPortCommand", \ 6982 ScsiPortCommandU64, "ScsiPortCommandU64", \ 6983 AifRequest, "AifRequest", \ 6984 CheckRevision, "CheckRevision", \ 6985 FsaHostShutdown, "FsaHostShutdown", \ 6986 RequestAdapterInfo, "RequestAdapterInfo", \ 6987 IsAdapterPaused, "IsAdapterPaused", \ 6988 SendHostTime, "SendHostTime", \ 6989 LastMiscCommand, "LastMiscCommand" 6990 6991 #define AAC_CTVM_SUBCMD_KEY_STRINGS \ 6992 VM_Null, "VM_Null", \ 6993 VM_NameServe, "VM_NameServe", \ 6994 VM_ContainerConfig, "VM_ContainerConfig", \ 6995 VM_Ioctl, "VM_Ioctl", \ 6996 VM_FilesystemIoctl, "VM_FilesystemIoctl", \ 6997 VM_CloseAll, "VM_CloseAll", \ 6998 VM_CtBlockRead, "VM_CtBlockRead", \ 6999 VM_CtBlockWrite, "VM_CtBlockWrite", \ 7000 VM_SliceBlockRead, "VM_SliceBlockRead", \ 7001 VM_SliceBlockWrite, "VM_SliceBlockWrite", \ 7002 VM_DriveBlockRead, "VM_DriveBlockRead", \ 7003 VM_DriveBlockWrite, "VM_DriveBlockWrite", \ 7004 VM_EnclosureMgt, "VM_EnclosureMgt", \ 7005 VM_Unused, "VM_Unused", \ 7006 VM_CtBlockVerify, "VM_CtBlockVerify", \ 7007 VM_CtPerf, "VM_CtPerf", \ 7008 VM_CtBlockRead64, "VM_CtBlockRead64", \ 7009 VM_CtBlockWrite64, "VM_CtBlockWrite64", \ 7010 VM_CtBlockVerify64, "VM_CtBlockVerify64", \ 7011 VM_CtHostRead64, "VM_CtHostRead64", \ 7012 VM_CtHostWrite64, "VM_CtHostWrite64", \ 7013 VM_NameServe64, "VM_NameServe64" 7014 7015 #define AAC_CT_SUBCMD_KEY_STRINGS \ 7016 CT_Null, "CT_Null", \ 7017 CT_GET_SLICE_COUNT, "CT_GET_SLICE_COUNT", \ 7018 CT_GET_PARTITION_COUNT, "CT_GET_PARTITION_COUNT", \ 7019 CT_GET_PARTITION_INFO, "CT_GET_PARTITION_INFO", \ 7020 CT_GET_CONTAINER_COUNT, "CT_GET_CONTAINER_COUNT", \ 7021 CT_GET_CONTAINER_INFO_OLD, "CT_GET_CONTAINER_INFO_OLD", \ 7022 CT_WRITE_MBR, "CT_WRITE_MBR", \ 7023 CT_WRITE_PARTITION, "CT_WRITE_PARTITION", \ 7024 CT_UPDATE_PARTITION, "CT_UPDATE_PARTITION", \ 7025 CT_UNLOAD_CONTAINER, "CT_UNLOAD_CONTAINER", \ 7026 CT_CONFIG_SINGLE_PRIMARY, "CT_CONFIG_SINGLE_PRIMARY", \ 7027 CT_READ_CONFIG_AGE, "CT_READ_CONFIG_AGE", \ 7028 CT_WRITE_CONFIG_AGE, "CT_WRITE_CONFIG_AGE", \ 7029 CT_READ_SERIAL_NUMBER, "CT_READ_SERIAL_NUMBER", \ 7030 CT_ZERO_PAR_ENTRY, "CT_ZERO_PAR_ENTRY", \ 7031 CT_READ_MBR, "CT_READ_MBR", \ 7032 CT_READ_PARTITION, "CT_READ_PARTITION", \ 7033 CT_DESTROY_CONTAINER, "CT_DESTROY_CONTAINER", \ 7034 CT_DESTROY2_CONTAINER, "CT_DESTROY2_CONTAINER", \ 7035 CT_SLICE_SIZE, "CT_SLICE_SIZE", \ 7036 CT_CHECK_CONFLICTS, "CT_CHECK_CONFLICTS", \ 7037 CT_MOVE_CONTAINER, "CT_MOVE_CONTAINER", \ 7038 CT_READ_LAST_DRIVE, "CT_READ_LAST_DRIVE", \ 7039 CT_WRITE_LAST_DRIVE, "CT_WRITE_LAST_DRIVE", \ 7040 CT_UNMIRROR, "CT_UNMIRROR", \ 7041 CT_MIRROR_DELAY, "CT_MIRROR_DELAY", \ 7042 CT_GEN_MIRROR, "CT_GEN_MIRROR", \ 7043 CT_GEN_MIRROR2, "CT_GEN_MIRROR2", \ 7044 CT_TEST_CONTAINER, "CT_TEST_CONTAINER", \ 7045 CT_MOVE2, "CT_MOVE2", \ 7046 CT_SPLIT, "CT_SPLIT", \ 7047 CT_SPLIT2, "CT_SPLIT2", \ 7048 CT_SPLIT_BROKEN, "CT_SPLIT_BROKEN", \ 7049 CT_SPLIT_BROKEN2, "CT_SPLIT_BROKEN2", \ 7050 CT_RECONFIG, "CT_RECONFIG", \ 7051 CT_BREAK2, "CT_BREAK2", \ 7052 CT_BREAK, "CT_BREAK", \ 7053 CT_MERGE2, "CT_MERGE2", \ 7054 CT_MERGE, "CT_MERGE", \ 7055 CT_FORCE_ERROR, "CT_FORCE_ERROR", \ 7056 CT_CLEAR_ERROR, "CT_CLEAR_ERROR", \ 7057 CT_ASSIGN_FAILOVER, "CT_ASSIGN_FAILOVER", \ 7058 CT_CLEAR_FAILOVER, "CT_CLEAR_FAILOVER", \ 7059 CT_GET_FAILOVER_DATA, "CT_GET_FAILOVER_DATA", \ 7060 CT_VOLUME_ADD, "CT_VOLUME_ADD", \ 7061 CT_VOLUME_ADD2, "CT_VOLUME_ADD2", \ 7062 CT_MIRROR_STATUS, "CT_MIRROR_STATUS", \ 7063 CT_COPY_STATUS, "CT_COPY_STATUS", \ 7064 CT_COPY, "CT_COPY", \ 7065 CT_UNLOCK_CONTAINER, "CT_UNLOCK_CONTAINER", \ 7066 CT_LOCK_CONTAINER, "CT_LOCK_CONTAINER", \ 7067 CT_MAKE_READ_ONLY, "CT_MAKE_READ_ONLY", \ 7068 CT_MAKE_READ_WRITE, "CT_MAKE_READ_WRITE", \ 7069 CT_CLEAN_DEAD, "CT_CLEAN_DEAD", \ 7070 CT_ABORT_MIRROR_COMMAND, "CT_ABORT_MIRROR_COMMAND", \ 7071 CT_SET, "CT_SET", \ 7072 CT_GET, "CT_GET", \ 7073 CT_GET_NVLOG_ENTRY, "CT_GET_NVLOG_ENTRY", \ 7074 CT_GET_DELAY, "CT_GET_DELAY", \ 7075 CT_ZERO_CONTAINER_SPACE, "CT_ZERO_CONTAINER_SPACE", \ 7076 CT_GET_ZERO_STATUS, "CT_GET_ZERO_STATUS", \ 7077 CT_SCRUB, "CT_SCRUB", \ 7078 CT_GET_SCRUB_STATUS, "CT_GET_SCRUB_STATUS", \ 7079 CT_GET_SLICE_INFO, "CT_GET_SLICE_INFO", \ 7080 CT_GET_SCSI_METHOD, "CT_GET_SCSI_METHOD", \ 7081 CT_PAUSE_IO, "CT_PAUSE_IO", \ 7082 CT_RELEASE_IO, "CT_RELEASE_IO", \ 7083 CT_SCRUB2, "CT_SCRUB2", \ 7084 CT_MCHECK, "CT_MCHECK", \ 7085 CT_CORRUPT, "CT_CORRUPT", \ 7086 CT_GET_TASK_COUNT, "CT_GET_TASK_COUNT", \ 7087 CT_PROMOTE, "CT_PROMOTE", \ 7088 CT_SET_DEAD, "CT_SET_DEAD", \ 7089 CT_CONTAINER_OPTIONS, "CT_CONTAINER_OPTIONS", \ 7090 CT_GET_NV_PARAM, "CT_GET_NV_PARAM", \ 7091 CT_GET_PARAM, "CT_GET_PARAM", \ 7092 CT_NV_PARAM_SIZE, "CT_NV_PARAM_SIZE", \ 7093 CT_COMMON_PARAM_SIZE, "CT_COMMON_PARAM_SIZE", \ 7094 CT_PLATFORM_PARAM_SIZE, "CT_PLATFORM_PARAM_SIZE", \ 7095 CT_SET_NV_PARAM, "CT_SET_NV_PARAM", \ 7096 CT_ABORT_SCRUB, "CT_ABORT_SCRUB", \ 7097 CT_GET_SCRUB_ERROR, "CT_GET_SCRUB_ERROR", \ 7098 CT_LABEL_CONTAINER, "CT_LABEL_CONTAINER", \ 7099 CT_CONTINUE_DATA, "CT_CONTINUE_DATA", \ 7100 CT_STOP_DATA, "CT_STOP_DATA", \ 7101 CT_GET_PARTITION_TABLE, "CT_GET_PARTITION_TABLE", \ 7102 CT_GET_DISK_PARTITIONS, "CT_GET_DISK_PARTITIONS", \ 7103 CT_GET_MISC_STATUS, "CT_GET_MISC_STATUS", \ 7104 CT_GET_CONTAINER_PERF_INFO, "CT_GET_CONTAINER_PERF_INFO", \ 7105 CT_GET_TIME, "CT_GET_TIME", \ 7106 CT_READ_DATA, "CT_READ_DATA", \ 7107 CT_CTR, "CT_CTR", \ 7108 CT_CTL, "CT_CTL", \ 7109 CT_DRAINIO, "CT_DRAINIO", \ 7110 CT_RELEASEIO, "CT_RELEASEIO", \ 7111 CT_GET_NVRAM, "CT_GET_NVRAM", \ 7112 CT_GET_MEMORY, "CT_GET_MEMORY", \ 7113 CT_PRINT_CT_LOG, "CT_PRINT_CT_LOG", \ 7114 CT_ADD_LEVEL, "CT_ADD_LEVEL", \ 7115 CT_NV_ZERO, "CT_NV_ZERO", \ 7116 CT_READ_SIGNATURE, "CT_READ_SIGNATURE", \ 7117 CT_THROTTLE_ON, "CT_THROTTLE_ON", \ 7118 CT_THROTTLE_OFF, "CT_THROTTLE_OFF", \ 7119 CT_GET_THROTTLE_STATS, "CT_GET_THROTTLE_STATS", \ 7120 CT_MAKE_SNAPSHOT, "CT_MAKE_SNAPSHOT", \ 7121 CT_REMOVE_SNAPSHOT, "CT_REMOVE_SNAPSHOT", \ 7122 CT_WRITE_USER_FLAGS, "CT_WRITE_USER_FLAGS", \ 7123 CT_READ_USER_FLAGS, "CT_READ_USER_FLAGS", \ 7124 CT_MONITOR, "CT_MONITOR", \ 7125 CT_GEN_MORPH, "CT_GEN_MORPH", \ 7126 CT_GET_SNAPSHOT_INFO, "CT_GET_SNAPSHOT_INFO", \ 7127 CT_CACHE_SET, "CT_CACHE_SET", \ 7128 CT_CACHE_STAT, "CT_CACHE_STAT", \ 7129 CT_TRACE_START, "CT_TRACE_START", \ 7130 CT_TRACE_STOP, "CT_TRACE_STOP", \ 7131 CT_TRACE_ENABLE, "CT_TRACE_ENABLE", \ 7132 CT_TRACE_DISABLE, "CT_TRACE_DISABLE", \ 7133 CT_FORCE_CORE_DUMP, "CT_FORCE_CORE_DUMP", \ 7134 CT_SET_SERIAL_NUMBER, "CT_SET_SERIAL_NUMBER", \ 7135 CT_RESET_SERIAL_NUMBER, "CT_RESET_SERIAL_NUMBER", \ 7136 CT_ENABLE_RAID5, "CT_ENABLE_RAID5", \ 7137 CT_CLEAR_VALID_DUMP_FLAG, "CT_CLEAR_VALID_DUMP_FLAG", \ 7138 CT_GET_MEM_STATS, "CT_GET_MEM_STATS", \ 7139 CT_GET_CORE_SIZE, "CT_GET_CORE_SIZE", \ 7140 CT_CREATE_CONTAINER_OLD, "CT_CREATE_CONTAINER_OLD", \ 7141 CT_STOP_DUMPS, "CT_STOP_DUMPS", \ 7142 CT_PANIC_ON_TAKE_A_BREAK, "CT_PANIC_ON_TAKE_A_BREAK", \ 7143 CT_GET_CACHE_STATS, "CT_GET_CACHE_STATS", \ 7144 CT_MOVE_PARTITION, "CT_MOVE_PARTITION", \ 7145 CT_FLUSH_CACHE, "CT_FLUSH_CACHE", \ 7146 CT_READ_NAME, "CT_READ_NAME", \ 7147 CT_WRITE_NAME, "CT_WRITE_NAME", \ 7148 CT_TOSS_CACHE, "CT_TOSS_CACHE", \ 7149 CT_LOCK_DRAINIO, "CT_LOCK_DRAINIO", \ 7150 CT_CONTAINER_OFFLINE, "CT_CONTAINER_OFFLINE", \ 7151 CT_SET_CACHE_SIZE, "CT_SET_CACHE_SIZE", \ 7152 CT_CLEAN_SHUTDOWN_STATUS, "CT_CLEAN_SHUTDOWN_STATUS", \ 7153 CT_CLEAR_DISKLOG_ON_DISK, "CT_CLEAR_DISKLOG_ON_DISK", \ 7154 CT_CLEAR_ALL_DISKLOG, "CT_CLEAR_ALL_DISKLOG", \ 7155 CT_CACHE_FAVOR, "CT_CACHE_FAVOR", \ 7156 CT_READ_PASSTHRU_MBR, "CT_READ_PASSTHRU_MBR", \ 7157 CT_SCRUB_NOFIX, "CT_SCRUB_NOFIX", \ 7158 CT_SCRUB2_NOFIX, "CT_SCRUB2_NOFIX", \ 7159 CT_FLUSH, "CT_FLUSH", \ 7160 CT_REBUILD, "CT_REBUILD", \ 7161 CT_FLUSH_CONTAINER, "CT_FLUSH_CONTAINER", \ 7162 CT_RESTART, "CT_RESTART", \ 7163 CT_GET_CONFIG_STATUS, "CT_GET_CONFIG_STATUS", \ 7164 CT_TRACE_FLAG, "CT_TRACE_FLAG", \ 7165 CT_RESTART_MORPH, "CT_RESTART_MORPH", \ 7166 CT_GET_TRACE_INFO, "CT_GET_TRACE_INFO", \ 7167 CT_GET_TRACE_ITEM, "CT_GET_TRACE_ITEM", \ 7168 CT_COMMIT_CONFIG, "CT_COMMIT_CONFIG", \ 7169 CT_CONTAINER_EXISTS, "CT_CONTAINER_EXISTS", \ 7170 CT_GET_SLICE_FROM_DEVT, "CT_GET_SLICE_FROM_DEVT", \ 7171 CT_OPEN_READ_WRITE, "CT_OPEN_READ_WRITE", \ 7172 CT_WRITE_MEMORY_BLOCK, "CT_WRITE_MEMORY_BLOCK", \ 7173 CT_GET_CACHE_PARAMS, "CT_GET_CACHE_PARAMS", \ 7174 CT_CRAZY_CACHE, "CT_CRAZY_CACHE", \ 7175 CT_GET_PROFILE_STRUCT, "CT_GET_PROFILE_STRUCT", \ 7176 CT_SET_IO_TRACE_FLAG, "CT_SET_IO_TRACE_FLAG", \ 7177 CT_GET_IO_TRACE_STRUCT, "CT_GET_IO_TRACE_STRUCT", \ 7178 CT_CID_TO_64BITS_UID, "CT_CID_TO_64BITS_UID", \ 7179 CT_64BITS_UID_TO_CID, "CT_64BITS_UID_TO_CID", \ 7180 CT_PAR_TO_64BITS_UID, "CT_PAR_TO_64BITS_UID", \ 7181 CT_CID_TO_32BITS_UID, "CT_CID_TO_32BITS_UID", \ 7182 CT_32BITS_UID_TO_CID, "CT_32BITS_UID_TO_CID", \ 7183 CT_PAR_TO_32BITS_UID, "CT_PAR_TO_32BITS_UID", \ 7184 CT_SET_FAILOVER_OPTION, "CT_SET_FAILOVER_OPTION", \ 7185 CT_GET_FAILOVER_OPTION, "CT_GET_FAILOVER_OPTION", \ 7186 CT_STRIPE_ADD2, "CT_STRIPE_ADD2", \ 7187 CT_CREATE_VOLUME_SET, "CT_CREATE_VOLUME_SET", \ 7188 CT_CREATE_STRIPE_SET, "CT_CREATE_STRIPE_SET", \ 7189 CT_VERIFY_CONTAINER, "CT_VERIFY_CONTAINER", \ 7190 CT_IS_CONTAINER_DEAD, "CT_IS_CONTAINER_DEAD", \ 7191 CT_GET_CONTAINER_OPTION, "CT_GET_CONTAINER_OPTION", \ 7192 CT_GET_SNAPSHOT_UNUSED_STRUCT, "CT_GET_SNAPSHOT_UNUSED_STRUCT", \ 7193 CT_CLEAR_SNAPSHOT_UNUSED_STRUCT, "CT_CLEAR_SNAPSHOT_UNUSED_STRUCT", \ 7194 CT_GET_CONTAINER_INFO, "CT_GET_CONTAINER_INFO", \ 7195 CT_CREATE_CONTAINER, "CT_CREATE_CONTAINER", \ 7196 CT_CHANGE_CREATIONINFO, "CT_CHANGE_CREATIONINFO", \ 7197 CT_CHECK_CONFLICT_UID, "CT_CHECK_CONFLICT_UID", \ 7198 CT_CONTAINER_UID_CHECK, "CT_CONTAINER_UID_CHECK", \ 7199 CT_IS_CONTAINER_MEATADATA_STANDARD, \ 7200 "CT_IS_CONTAINER_MEATADATA_STANDARD", \ 7201 CT_IS_SLICE_METADATA_STANDARD, "CT_IS_SLICE_METADATA_STANDARD", \ 7202 CT_GET_IMPORT_COUNT, "CT_GET_IMPORT_COUNT", \ 7203 CT_CANCEL_ALL_IMPORTS, "CT_CANCEL_ALL_IMPORTS", \ 7204 CT_GET_IMPORT_INFO, "CT_GET_IMPORT_INFO", \ 7205 CT_IMPORT_ARRAY, "CT_IMPORT_ARRAY", \ 7206 CT_GET_LOG_SIZE, "CT_GET_LOG_SIZE", \ 7207 CT_ALARM_GET_STATE, "CT_ALARM_GET_STATE", \ 7208 CT_ALARM_SET_STATE, "CT_ALARM_SET_STATE", \ 7209 CT_ALARM_ON_OFF, "CT_ALARM_ON_OFF", \ 7210 CT_GET_EE_OEM_ID, "CT_GET_EE_OEM_ID", \ 7211 CT_GET_PPI_HEADERS, "CT_GET_PPI_HEADERS", \ 7212 CT_GET_PPI_DATA, "CT_GET_PPI_DATA", \ 7213 CT_GET_PPI_ENTRIES, "CT_GET_PPI_ENTRIES", \ 7214 CT_DELETE_PPI_BUNDLE, "CT_DELETE_PPI_BUNDLE", \ 7215 CT_GET_PARTITION_TABLE_2, "CT_GET_PARTITION_TABLE_2", \ 7216 CT_GET_PARTITION_INFO_2, "CT_GET_PARTITION_INFO_2", \ 7217 CT_GET_DISK_PARTITIONS_2, "CT_GET_DISK_PARTITIONS_2", \ 7218 CT_QUIESCE_ADAPTER, "CT_QUIESCE_ADAPTER", \ 7219 CT_CLEAR_PPI_TABLE, "CT_CLEAR_PPI_TABLE" 7220 7221 #define AAC_CL_SUBCMD_KEY_STRINGS \ 7222 CL_NULL, "CL_NULL", \ 7223 DS_INIT, "DS_INIT", \ 7224 DS_RESCAN, "DS_RESCAN", \ 7225 DS_CREATE, "DS_CREATE", \ 7226 DS_DELETE, "DS_DELETE", \ 7227 DS_ADD_DISK, "DS_ADD_DISK", \ 7228 DS_REMOVE_DISK, "DS_REMOVE_DISK", \ 7229 DS_MOVE_DISK, "DS_MOVE_DISK", \ 7230 DS_TAKE_OWNERSHIP, "DS_TAKE_OWNERSHIP", \ 7231 DS_RELEASE_OWNERSHIP, "DS_RELEASE_OWNERSHIP", \ 7232 DS_FORCE_OWNERSHIP, "DS_FORCE_OWNERSHIP", \ 7233 DS_GET_DISK_SET_PARAM, "DS_GET_DISK_SET_PARAM", \ 7234 DS_GET_DRIVE_PARAM, "DS_GET_DRIVE_PARAM", \ 7235 DS_GET_SLICE_PARAM, "DS_GET_SLICE_PARAM", \ 7236 DS_GET_DISK_SETS, "DS_GET_DISK_SETS", \ 7237 DS_GET_DRIVES, "DS_GET_DRIVES", \ 7238 DS_SET_DISK_SET_PARAM, "DS_SET_DISK_SET_PARAM", \ 7239 DS_ONLINE, "DS_ONLINE", \ 7240 DS_OFFLINE, "DS_OFFLINE", \ 7241 DS_ONLINE_CONTAINERS, "DS_ONLINE_CONTAINERS", \ 7242 DS_FSAPRINT, "DS_FSAPRINT", \ 7243 CL_CFG_SET_HOST_IDS, "CL_CFG_SET_HOST_IDS", \ 7244 CL_CFG_SET_PARTNER_HOST_IDS, "CL_CFG_SET_PARTNER_HOST_IDS", \ 7245 CL_CFG_GET_CLUSTER_CONFIG, "CL_CFG_GET_CLUSTER_CONFIG", \ 7246 CC_CLI_CLEAR_MESSAGE_BUFFER, "CC_CLI_CLEAR_MESSAGE_BUFFER", \ 7247 CC_SRV_CLEAR_MESSAGE_BUFFER, "CC_SRV_CLEAR_MESSAGE_BUFFER", \ 7248 CC_CLI_SHOW_MESSAGE_BUFFER, "CC_CLI_SHOW_MESSAGE_BUFFER", \ 7249 CC_SRV_SHOW_MESSAGE_BUFFER, "CC_SRV_SHOW_MESSAGE_BUFFER", \ 7250 CC_CLI_SEND_MESSAGE, "CC_CLI_SEND_MESSAGE", \ 7251 CC_SRV_SEND_MESSAGE, "CC_SRV_SEND_MESSAGE", \ 7252 CC_CLI_GET_MESSAGE, "CC_CLI_GET_MESSAGE", \ 7253 CC_SRV_GET_MESSAGE, "CC_SRV_GET_MESSAGE", \ 7254 CC_SEND_TEST_MESSAGE, "CC_SEND_TEST_MESSAGE", \ 7255 CC_GET_BUSINFO, "CC_GET_BUSINFO", \ 7256 CC_GET_PORTINFO, "CC_GET_PORTINFO", \ 7257 CC_GET_NAMEINFO, "CC_GET_NAMEINFO", \ 7258 CC_GET_CONFIGINFO, "CC_GET_CONFIGINFO", \ 7259 CQ_QUORUM_OP, "CQ_QUORUM_OP" 7260 7261 #define AAC_AIF_SUBCMD_KEY_STRINGS \ 7262 AifCmdEventNotify, "AifCmdEventNotify", \ 7263 AifCmdJobProgress, "AifCmdJobProgress", \ 7264 AifCmdAPIReport, "AifCmdAPIReport", \ 7265 AifCmdDriverNotify, "AifCmdDriverNotify", \ 7266 AifReqJobList, "AifReqJobList", \ 7267 AifReqJobsForCtr, "AifReqJobsForCtr", \ 7268 AifReqJobsForScsi, "AifReqJobsForScsi", \ 7269 AifReqJobReport, "AifReqJobReport", \ 7270 AifReqTerminateJob, "AifReqTerminateJob", \ 7271 AifReqSuspendJob, "AifReqSuspendJob", \ 7272 AifReqResumeJob, "AifReqResumeJob", \ 7273 AifReqSendAPIReport, "AifReqSendAPIReport", \ 7274 AifReqAPIJobStart, "AifReqAPIJobStart", \ 7275 AifReqAPIJobUpdate, "AifReqAPIJobUpdate", \ 7276 AifReqAPIJobFinish, "AifReqAPIJobFinish" 7277 7278 #define AAC_IOCTL_SUBCMD_KEY_STRINGS \ 7279 Reserved_IOCTL, "Reserved_IOCTL", \ 7280 GetDeviceHandle, "GetDeviceHandle", \ 7281 BusTargetLun_to_DeviceHandle, "BusTargetLun_to_DeviceHandle", \ 7282 DeviceHandle_to_BusTargetLun, "DeviceHandle_to_BusTargetLun", \ 7283 RescanBus, "RescanBus", \ 7284 GetDeviceProbeInfo, "GetDeviceProbeInfo", \ 7285 GetDeviceCapacity, "GetDeviceCapacity", \ 7286 GetContainerProbeInfo, "GetContainerProbeInfo", \ 7287 GetRequestedMemorySize, "GetRequestedMemorySize", \ 7288 GetBusInfo, "GetBusInfo", \ 7289 GetVendorSpecific, "GetVendorSpecific", \ 7290 EnhancedGetDeviceProbeInfo, "EnhancedGetDeviceProbeInfo", \ 7291 EnhancedGetBusInfo, "EnhancedGetBusInfo", \ 7292 SetupExtendedCounters, "SetupExtendedCounters", \ 7293 GetPerformanceCounters, "GetPerformanceCounters", \ 7294 ResetPerformanceCounters, "ResetPerformanceCounters", \ 7295 ReadModePage, "ReadModePage", \ 7296 WriteModePage, "WriteModePage", \ 7297 ReadDriveParameter, "ReadDriveParameter", \ 7298 WriteDriveParameter, "WriteDriveParameter", \ 7299 ResetAdapter, "ResetAdapter", \ 7300 ResetBus, "ResetBus", \ 7301 ResetBusDevice, "ResetBusDevice", \ 7302 ExecuteSrb, "ExecuteSrb", \ 7303 Create_IO_Task, "Create_IO_Task", \ 7304 Delete_IO_Task, "Delete_IO_Task", \ 7305 Get_IO_Task_Info, "Get_IO_Task_Info", \ 7306 Check_Task_Progress, "Check_Task_Progress", \ 7307 InjectError, "InjectError", \ 7308 GetDeviceDefectCounts, "GetDeviceDefectCounts", \ 7309 GetDeviceDefectInfo, "GetDeviceDefectInfo", \ 7310 GetDeviceStatus, "GetDeviceStatus", \ 7311 ClearDeviceStatus, "ClearDeviceStatus", \ 7312 DiskSpinControl, "DiskSpinControl", \ 7313 DiskSmartControl, "DiskSmartControl", \ 7314 WriteSame, "WriteSame", \ 7315 ReadWriteLong, "ReadWriteLong", \ 7316 FormatUnit, "FormatUnit", \ 7317 TargetDeviceControl, "TargetDeviceControl", \ 7318 TargetChannelControl, "TargetChannelControl", \ 7319 FlashNewCode, "FlashNewCode", \ 7320 DiskCheck, "DiskCheck", \ 7321 RequestSense, "RequestSense", \ 7322 DiskPERControl, "DiskPERControl", \ 7323 Read10, "Read10", \ 7324 Write10, "Write10" 7325 7326 #define AAC_AIFEN_KEY_STRINGS \ 7327 AifEnGeneric, "Generic", \ 7328 AifEnTaskComplete, "TaskComplete", \ 7329 AifEnConfigChange, "Config change", \ 7330 AifEnContainerChange, "Container change", \ 7331 AifEnDeviceFailure, "device failed", \ 7332 AifEnMirrorFailover, "Mirror failover", \ 7333 AifEnContainerEvent, "container event", \ 7334 AifEnFileSystemChange, "File system changed", \ 7335 AifEnConfigPause, "Container pause event", \ 7336 AifEnConfigResume, "Container resume event", \ 7337 AifEnFailoverChange, "Failover space assignment changed", \ 7338 AifEnRAID5RebuildDone, "RAID5 rebuild finished", \ 7339 AifEnEnclosureManagement, "Enclosure management event", \ 7340 AifEnBatteryEvent, "battery event", \ 7341 AifEnAddContainer, "Add container", \ 7342 AifEnDeleteContainer, "Delete container", \ 7343 AifEnSMARTEvent, "SMART Event", \ 7344 AifEnBatteryNeedsRecond, "battery needs reconditioning", \ 7345 AifEnClusterEvent, "cluster event", \ 7346 AifEnDiskSetEvent, "disk set event occured", \ 7347 AifDenMorphComplete, "morph operation completed", \ 7348 AifDenVolumeExtendComplete, "VolumeExtendComplete" 7349 7350 struct aac_key_strings { 7351 int key; 7352 char *message; 7353 }; 7354 7355 extern struct scsi_key_strings scsi_cmds[]; 7356 7357 static struct aac_key_strings aac_fib_cmds[] = { 7358 AAC_FIB_CMD_KEY_STRINGS, 7359 -1, NULL 7360 }; 7361 7362 static struct aac_key_strings aac_ctvm_subcmds[] = { 7363 AAC_CTVM_SUBCMD_KEY_STRINGS, 7364 -1, NULL 7365 }; 7366 7367 static struct aac_key_strings aac_ct_subcmds[] = { 7368 AAC_CT_SUBCMD_KEY_STRINGS, 7369 -1, NULL 7370 }; 7371 7372 static struct aac_key_strings aac_cl_subcmds[] = { 7373 AAC_CL_SUBCMD_KEY_STRINGS, 7374 -1, NULL 7375 }; 7376 7377 static struct aac_key_strings aac_aif_subcmds[] = { 7378 AAC_AIF_SUBCMD_KEY_STRINGS, 7379 -1, NULL 7380 }; 7381 7382 static struct aac_key_strings aac_ioctl_subcmds[] = { 7383 AAC_IOCTL_SUBCMD_KEY_STRINGS, 7384 -1, NULL 7385 }; 7386 7387 static struct aac_key_strings aac_aifens[] = { 7388 AAC_AIFEN_KEY_STRINGS, 7389 -1, NULL 7390 }; 7391 7392 /* 7393 * The following function comes from Adaptec: 7394 * 7395 * Get the firmware print buffer parameters from the firmware, 7396 * if the command was successful map in the address. 7397 */ 7398 static int 7399 aac_get_fw_debug_buffer(struct aac_softstate *softs) 7400 { 7401 if (aac_sync_mbcommand(softs, AAC_MONKER_GETDRVPROP, 7402 0, 0, 0, 0, NULL) == AACOK) { 7403 uint32_t mondrv_buf_paddrl = AAC_MAILBOX_GET(softs, 1); 7404 uint32_t mondrv_buf_paddrh = AAC_MAILBOX_GET(softs, 2); 7405 uint32_t mondrv_buf_size = AAC_MAILBOX_GET(softs, 3); 7406 uint32_t mondrv_hdr_size = AAC_MAILBOX_GET(softs, 4); 7407 7408 if (mondrv_buf_size) { 7409 uint32_t offset = mondrv_buf_paddrl - \ 7410 softs->pci_mem_base_paddr; 7411 7412 /* 7413 * See if the address is already mapped in, and 7414 * if so set it up from the base address 7415 */ 7416 if ((mondrv_buf_paddrh == 0) && 7417 (offset + mondrv_buf_size < softs->map_size)) { 7418 mutex_enter(&aac_prt_mutex); 7419 softs->debug_buf_offset = offset; 7420 softs->debug_header_size = mondrv_hdr_size; 7421 softs->debug_buf_size = mondrv_buf_size; 7422 softs->debug_fw_flags = 0; 7423 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT; 7424 mutex_exit(&aac_prt_mutex); 7425 7426 return (AACOK); 7427 } 7428 } 7429 } 7430 return (AACERR); 7431 } 7432 7433 int 7434 aac_dbflag_on(struct aac_softstate *softs, int flag) 7435 { 7436 int debug_flags = softs ? softs->debug_flags : aac_debug_flags; 7437 7438 return ((debug_flags & (AACDB_FLAGS_FW_PRINT | \ 7439 AACDB_FLAGS_KERNEL_PRINT)) && (debug_flags & flag)); 7440 } 7441 7442 static void 7443 aac_cmn_err(struct aac_softstate *softs, uint_t lev, char sl, int noheader) 7444 { 7445 if (noheader) { 7446 if (sl) { 7447 aac_fmt[0] = sl; 7448 cmn_err(lev, aac_fmt, aac_prt_buf); 7449 } else { 7450 cmn_err(lev, &aac_fmt[1], aac_prt_buf); 7451 } 7452 } else { 7453 if (sl) { 7454 aac_fmt_header[0] = sl; 7455 cmn_err(lev, aac_fmt_header, 7456 softs->vendor_name, softs->instance, 7457 aac_prt_buf); 7458 } else { 7459 cmn_err(lev, &aac_fmt_header[1], 7460 softs->vendor_name, softs->instance, 7461 aac_prt_buf); 7462 } 7463 } 7464 } 7465 7466 /* 7467 * The following function comes from Adaptec: 7468 * 7469 * Format and print out the data passed in to UART or console 7470 * as specified by debug flags. 7471 */ 7472 void 7473 aac_printf(struct aac_softstate *softs, uint_t lev, const char *fmt, ...) 7474 { 7475 va_list args; 7476 char sl; /* system log character */ 7477 7478 mutex_enter(&aac_prt_mutex); 7479 /* Set up parameters and call sprintf function to format the data */ 7480 if (strchr("^!?", fmt[0]) == NULL) { 7481 sl = 0; 7482 } else { 7483 sl = fmt[0]; 7484 fmt++; 7485 } 7486 va_start(args, fmt); 7487 (void) vsprintf(aac_prt_buf, fmt, args); 7488 va_end(args); 7489 7490 /* Make sure the softs structure has been passed in for this section */ 7491 if (softs) { 7492 if ((softs->debug_flags & AACDB_FLAGS_FW_PRINT) && 7493 /* If we are set up for a Firmware print */ 7494 (softs->debug_buf_size)) { 7495 uint32_t count, i; 7496 7497 /* Make sure the string size is within boundaries */ 7498 count = strlen(aac_prt_buf); 7499 if (count > softs->debug_buf_size) 7500 count = (uint16_t)softs->debug_buf_size; 7501 7502 /* 7503 * Wait for no more than AAC_PRINT_TIMEOUT for the 7504 * previous message length to clear (the handshake). 7505 */ 7506 for (i = 0; i < AAC_PRINT_TIMEOUT; i++) { 7507 if (!PCI_MEM_GET32(softs, 7508 softs->debug_buf_offset + \ 7509 AAC_FW_DBG_STRLEN_OFFSET)) 7510 break; 7511 7512 drv_usecwait(1000); 7513 } 7514 7515 /* 7516 * If the length is clear, copy over the message, the 7517 * flags, and the length. Make sure the length is the 7518 * last because that is the signal for the Firmware to 7519 * pick it up. 7520 */ 7521 if (!PCI_MEM_GET32(softs, softs->debug_buf_offset + \ 7522 AAC_FW_DBG_STRLEN_OFFSET)) { 7523 PCI_MEM_REP_PUT8(softs, 7524 softs->debug_buf_offset + \ 7525 softs->debug_header_size, 7526 aac_prt_buf, count); 7527 PCI_MEM_PUT32(softs, 7528 softs->debug_buf_offset + \ 7529 AAC_FW_DBG_FLAGS_OFFSET, 7530 softs->debug_fw_flags); 7531 PCI_MEM_PUT32(softs, 7532 softs->debug_buf_offset + \ 7533 AAC_FW_DBG_STRLEN_OFFSET, count); 7534 } else { 7535 cmn_err(CE_WARN, "UART output fail"); 7536 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT; 7537 } 7538 } 7539 7540 /* 7541 * If the Kernel Debug Print flag is set, send it off 7542 * to the Kernel Debugger 7543 */ 7544 if (softs->debug_flags & AACDB_FLAGS_KERNEL_PRINT) 7545 aac_cmn_err(softs, lev, sl, 7546 (softs->debug_flags & AACDB_FLAGS_NO_HEADERS)); 7547 } else { 7548 /* Driver not initialized yet, no firmware or header output */ 7549 if (aac_debug_flags & AACDB_FLAGS_KERNEL_PRINT) 7550 aac_cmn_err(softs, lev, sl, 1); 7551 } 7552 mutex_exit(&aac_prt_mutex); 7553 } 7554 7555 /* 7556 * Translate command number to description string 7557 */ 7558 static char * 7559 aac_cmd_name(int cmd, struct aac_key_strings *cmdlist) 7560 { 7561 int i; 7562 7563 for (i = 0; cmdlist[i].key != -1; i++) { 7564 if (cmd == cmdlist[i].key) 7565 return (cmdlist[i].message); 7566 } 7567 return (NULL); 7568 } 7569 7570 static void 7571 aac_print_scmd(struct aac_softstate *softs, struct aac_cmd *acp) 7572 { 7573 struct scsi_pkt *pkt = acp->pkt; 7574 struct scsi_address *ap = &pkt->pkt_address; 7575 int is_pd = 0; 7576 int ctl = ddi_get_instance(softs->devinfo_p); 7577 int tgt = ap->a_target; 7578 int lun = ap->a_lun; 7579 union scsi_cdb *cdbp = (void *)pkt->pkt_cdbp; 7580 uchar_t cmd = cdbp->scc_cmd; 7581 char *desc; 7582 7583 if (tgt >= AAC_MAX_LD) { 7584 is_pd = 1; 7585 ctl = ((struct aac_nondasd *)acp->dvp)->bus; 7586 tgt = ((struct aac_nondasd *)acp->dvp)->tid; 7587 lun = 0; 7588 } 7589 7590 if ((desc = aac_cmd_name(cmd, 7591 (struct aac_key_strings *)scsi_cmds)) == NULL) { 7592 aac_printf(softs, CE_NOTE, 7593 "SCMD> Unknown(0x%2x) --> c%dt%dL%d %s", 7594 cmd, ctl, tgt, lun, is_pd ? "(pd)" : ""); 7595 return; 7596 } 7597 7598 switch (cmd) { 7599 case SCMD_READ: 7600 case SCMD_WRITE: 7601 aac_printf(softs, CE_NOTE, 7602 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s", 7603 desc, GETG0ADDR(cdbp), GETG0COUNT(cdbp), 7604 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 7605 ctl, tgt, lun, is_pd ? "(pd)" : ""); 7606 break; 7607 case SCMD_READ_G1: 7608 case SCMD_WRITE_G1: 7609 aac_printf(softs, CE_NOTE, 7610 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s", 7611 desc, GETG1ADDR(cdbp), GETG1COUNT(cdbp), 7612 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 7613 ctl, tgt, lun, is_pd ? "(pd)" : ""); 7614 break; 7615 case SCMD_READ_G4: 7616 case SCMD_WRITE_G4: 7617 aac_printf(softs, CE_NOTE, 7618 "SCMD> %s 0x%x.%08x[%d] %s --> c%dt%dL%d %s", 7619 desc, GETG4ADDR(cdbp), GETG4ADDRTL(cdbp), 7620 GETG4COUNT(cdbp), 7621 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 7622 ctl, tgt, lun, is_pd ? "(pd)" : ""); 7623 break; 7624 case SCMD_READ_G5: 7625 case SCMD_WRITE_G5: 7626 aac_printf(softs, CE_NOTE, 7627 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s", 7628 desc, GETG5ADDR(cdbp), GETG5COUNT(cdbp), 7629 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 7630 ctl, tgt, lun, is_pd ? "(pd)" : ""); 7631 break; 7632 default: 7633 aac_printf(softs, CE_NOTE, "SCMD> %s --> c%dt%dL%d %s", 7634 desc, ctl, tgt, lun, is_pd ? "(pd)" : ""); 7635 } 7636 } 7637 7638 void 7639 aac_print_fib(struct aac_softstate *softs, struct aac_slot *slotp) 7640 { 7641 struct aac_cmd *acp = slotp->acp; 7642 struct aac_fib *fibp = slotp->fibp; 7643 ddi_acc_handle_t acc = slotp->fib_acc_handle; 7644 uint16_t fib_size; 7645 uint32_t fib_cmd, sub_cmd; 7646 char *cmdstr, *subcmdstr; 7647 char *caller; 7648 int i; 7649 7650 if (acp) { 7651 if (!(softs->debug_fib_flags & acp->fib_flags)) 7652 return; 7653 if (acp->fib_flags & AACDB_FLAGS_FIB_SCMD) 7654 caller = "SCMD"; 7655 else if (acp->fib_flags & AACDB_FLAGS_FIB_IOCTL) 7656 caller = "IOCTL"; 7657 else if (acp->fib_flags & AACDB_FLAGS_FIB_SRB) 7658 caller = "SRB"; 7659 else 7660 return; 7661 } else { 7662 if (!(softs->debug_fib_flags & AACDB_FLAGS_FIB_SYNC)) 7663 return; 7664 caller = "SYNC"; 7665 } 7666 7667 fib_cmd = ddi_get16(acc, &fibp->Header.Command); 7668 cmdstr = aac_cmd_name(fib_cmd, aac_fib_cmds); 7669 sub_cmd = (uint32_t)-1; 7670 subcmdstr = NULL; 7671 7672 /* Print FIB header */ 7673 if (softs->debug_fib_flags & AACDB_FLAGS_FIB_HEADER) { 7674 aac_printf(softs, CE_NOTE, "FIB> from %s", caller); 7675 aac_printf(softs, CE_NOTE, " XferState %d", 7676 ddi_get32(acc, &fibp->Header.XferState)); 7677 aac_printf(softs, CE_NOTE, " Command %d", 7678 ddi_get16(acc, &fibp->Header.Command)); 7679 aac_printf(softs, CE_NOTE, " StructType %d", 7680 ddi_get8(acc, &fibp->Header.StructType)); 7681 aac_printf(softs, CE_NOTE, " Flags 0x%x", 7682 ddi_get8(acc, &fibp->Header.Flags)); 7683 aac_printf(softs, CE_NOTE, " Size %d", 7684 ddi_get16(acc, &fibp->Header.Size)); 7685 aac_printf(softs, CE_NOTE, " SenderSize %d", 7686 ddi_get16(acc, &fibp->Header.SenderSize)); 7687 aac_printf(softs, CE_NOTE, " SenderAddr 0x%x", 7688 ddi_get32(acc, &fibp->Header.SenderFibAddress)); 7689 aac_printf(softs, CE_NOTE, " RcvrAddr 0x%x", 7690 ddi_get32(acc, &fibp->Header.ReceiverFibAddress)); 7691 aac_printf(softs, CE_NOTE, " SenderData 0x%x", 7692 ddi_get32(acc, &fibp->Header.SenderData)); 7693 } 7694 7695 /* Print FIB data */ 7696 switch (fib_cmd) { 7697 case ContainerCommand: 7698 sub_cmd = ddi_get32(acc, 7699 (void *)&(((uint32_t *)(void *)&fibp->data[0])[0])); 7700 subcmdstr = aac_cmd_name(sub_cmd, aac_ctvm_subcmds); 7701 if (subcmdstr == NULL) 7702 break; 7703 7704 switch (sub_cmd) { 7705 case VM_ContainerConfig: { 7706 struct aac_Container *pContainer = 7707 (struct aac_Container *)fibp->data; 7708 7709 fib_cmd = sub_cmd; 7710 cmdstr = subcmdstr; 7711 sub_cmd = (uint32_t)-1; 7712 subcmdstr = NULL; 7713 7714 sub_cmd = ddi_get32(acc, 7715 &pContainer->CTCommand.command); 7716 subcmdstr = aac_cmd_name(sub_cmd, aac_ct_subcmds); 7717 if (subcmdstr == NULL) 7718 break; 7719 aac_printf(softs, CE_NOTE, "FIB> %s (0x%x, 0x%x, 0x%x)", 7720 subcmdstr, 7721 ddi_get32(acc, &pContainer->CTCommand.param[0]), 7722 ddi_get32(acc, &pContainer->CTCommand.param[1]), 7723 ddi_get32(acc, &pContainer->CTCommand.param[2])); 7724 return; 7725 } 7726 7727 case VM_Ioctl: 7728 fib_cmd = sub_cmd; 7729 cmdstr = subcmdstr; 7730 sub_cmd = (uint32_t)-1; 7731 subcmdstr = NULL; 7732 7733 sub_cmd = ddi_get32(acc, 7734 (void *)&(((uint32_t *)(void *)&fibp->data[0])[4])); 7735 subcmdstr = aac_cmd_name(sub_cmd, aac_ioctl_subcmds); 7736 break; 7737 7738 case VM_CtBlockRead: 7739 case VM_CtBlockWrite: { 7740 struct aac_blockread *br = 7741 (struct aac_blockread *)fibp->data; 7742 struct aac_sg_table *sg = &br->SgMap; 7743 uint32_t sgcount = ddi_get32(acc, &sg->SgCount); 7744 7745 aac_printf(softs, CE_NOTE, 7746 "FIB> %s Container %d 0x%x/%d", subcmdstr, 7747 ddi_get32(acc, &br->ContainerId), 7748 ddi_get32(acc, &br->BlockNumber), 7749 ddi_get32(acc, &br->ByteCount)); 7750 for (i = 0; i < sgcount; i++) 7751 aac_printf(softs, CE_NOTE, 7752 " %d: 0x%08x/%d", i, 7753 ddi_get32(acc, &sg->SgEntry[i].SgAddress), 7754 ddi_get32(acc, &sg->SgEntry[i]. \ 7755 SgByteCount)); 7756 return; 7757 } 7758 } 7759 break; 7760 7761 case ContainerCommand64: { 7762 struct aac_blockread64 *br = 7763 (struct aac_blockread64 *)fibp->data; 7764 struct aac_sg_table64 *sg = &br->SgMap64; 7765 uint32_t sgcount = ddi_get32(acc, &sg->SgCount); 7766 uint64_t sgaddr; 7767 7768 sub_cmd = br->Command; 7769 subcmdstr = NULL; 7770 if (sub_cmd == VM_CtHostRead64) 7771 subcmdstr = "VM_CtHostRead64"; 7772 else if (sub_cmd == VM_CtHostWrite64) 7773 subcmdstr = "VM_CtHostWrite64"; 7774 else 7775 break; 7776 7777 aac_printf(softs, CE_NOTE, 7778 "FIB> %s Container %d 0x%x/%d", subcmdstr, 7779 ddi_get16(acc, &br->ContainerId), 7780 ddi_get32(acc, &br->BlockNumber), 7781 ddi_get16(acc, &br->SectorCount)); 7782 for (i = 0; i < sgcount; i++) { 7783 sgaddr = ddi_get64(acc, 7784 &sg->SgEntry64[i].SgAddress); 7785 aac_printf(softs, CE_NOTE, 7786 " %d: 0x%08x.%08x/%d", i, 7787 AAC_MS32(sgaddr), AAC_LS32(sgaddr), 7788 ddi_get32(acc, &sg->SgEntry64[i]. \ 7789 SgByteCount)); 7790 } 7791 return; 7792 } 7793 7794 case RawIo: { 7795 struct aac_raw_io *io = (struct aac_raw_io *)fibp->data; 7796 struct aac_sg_tableraw *sg = &io->SgMapRaw; 7797 uint32_t sgcount = ddi_get32(acc, &sg->SgCount); 7798 uint64_t sgaddr; 7799 7800 aac_printf(softs, CE_NOTE, 7801 "FIB> RawIo Container %d 0x%llx/%d 0x%x", 7802 ddi_get16(acc, &io->ContainerId), 7803 ddi_get64(acc, &io->BlockNumber), 7804 ddi_get32(acc, &io->ByteCount), 7805 ddi_get16(acc, &io->Flags)); 7806 for (i = 0; i < sgcount; i++) { 7807 sgaddr = ddi_get64(acc, &sg->SgEntryRaw[i].SgAddress); 7808 aac_printf(softs, CE_NOTE, " %d: 0x%08x.%08x/%d", i, 7809 AAC_MS32(sgaddr), AAC_LS32(sgaddr), 7810 ddi_get32(acc, &sg->SgEntryRaw[i].SgByteCount)); 7811 } 7812 return; 7813 } 7814 7815 case ClusterCommand: 7816 sub_cmd = ddi_get32(acc, 7817 (void *)&(((uint32_t *)(void *)fibp->data)[0])); 7818 subcmdstr = aac_cmd_name(sub_cmd, aac_cl_subcmds); 7819 break; 7820 7821 case AifRequest: 7822 sub_cmd = ddi_get32(acc, 7823 (void *)&(((uint32_t *)(void *)fibp->data)[0])); 7824 subcmdstr = aac_cmd_name(sub_cmd, aac_aif_subcmds); 7825 break; 7826 7827 default: 7828 break; 7829 } 7830 7831 fib_size = ddi_get16(acc, &(fibp->Header.Size)); 7832 if (subcmdstr) 7833 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d", 7834 subcmdstr, fib_size); 7835 else if (cmdstr && sub_cmd == (uint32_t)-1) 7836 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d", 7837 cmdstr, fib_size); 7838 else if (cmdstr) 7839 aac_printf(softs, CE_NOTE, "FIB> %s: Unknown(0x%x), sz=%d", 7840 cmdstr, sub_cmd, fib_size); 7841 else 7842 aac_printf(softs, CE_NOTE, "FIB> Unknown(0x%x), sz=%d", 7843 fib_cmd, fib_size); 7844 } 7845 7846 static void 7847 aac_print_aif(struct aac_softstate *softs, struct aac_aif_command *aif) 7848 { 7849 int aif_command; 7850 uint32_t aif_seqnumber; 7851 int aif_en_type; 7852 char *str; 7853 7854 aif_command = LE_32(aif->command); 7855 aif_seqnumber = LE_32(aif->seqNumber); 7856 aif_en_type = LE_32(aif->data.EN.type); 7857 7858 switch (aif_command) { 7859 case AifCmdEventNotify: 7860 str = aac_cmd_name(aif_en_type, aac_aifens); 7861 if (str) 7862 aac_printf(softs, CE_NOTE, "AIF! %s", str); 7863 else 7864 aac_printf(softs, CE_NOTE, "AIF! Unknown(0x%x)", 7865 aif_en_type); 7866 break; 7867 7868 case AifCmdJobProgress: 7869 switch (LE_32(aif->data.PR[0].status)) { 7870 case AifJobStsSuccess: 7871 str = "success"; break; 7872 case AifJobStsFinished: 7873 str = "finished"; break; 7874 case AifJobStsAborted: 7875 str = "aborted"; break; 7876 case AifJobStsFailed: 7877 str = "failed"; break; 7878 case AifJobStsSuspended: 7879 str = "suspended"; break; 7880 case AifJobStsRunning: 7881 str = "running"; break; 7882 default: 7883 str = "unknown"; break; 7884 } 7885 aac_printf(softs, CE_NOTE, 7886 "AIF! JobProgress (%d) - %s (%d, %d)", 7887 aif_seqnumber, str, 7888 LE_32(aif->data.PR[0].currentTick), 7889 LE_32(aif->data.PR[0].finalTick)); 7890 break; 7891 7892 case AifCmdAPIReport: 7893 aac_printf(softs, CE_NOTE, "AIF! APIReport (%d)", 7894 aif_seqnumber); 7895 break; 7896 7897 case AifCmdDriverNotify: 7898 aac_printf(softs, CE_NOTE, "AIF! DriverNotify (%d)", 7899 aif_seqnumber); 7900 break; 7901 7902 default: 7903 aac_printf(softs, CE_NOTE, "AIF! AIF %d (%d)", 7904 aif_command, aif_seqnumber); 7905 break; 7906 } 7907 } 7908 7909 #endif /* DEBUG */ 7910