1 /* 2 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 3 */ 4 5 /* 6 * Copyright (c) 2018, Joyent, Inc. 7 * Copyright 2005-08 Adaptec, Inc. 8 * Copyright (c) 2005-08 Adaptec Inc., Achim Leubner 9 * Copyright (c) 2000 Michael Smith 10 * Copyright (c) 2001 Scott Long 11 * Copyright (c) 2000 BSDi 12 * All rights reserved. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/modctl.h> 36 #include <sys/conf.h> 37 #include <sys/cmn_err.h> 38 #include <sys/ddi.h> 39 #include <sys/devops.h> 40 #include <sys/pci.h> 41 #include <sys/types.h> 42 #include <sys/ddidmareq.h> 43 #include <sys/scsi/scsi.h> 44 #include <sys/ksynch.h> 45 #include <sys/sunddi.h> 46 #include <sys/byteorder.h> 47 #include "aac_regs.h" 48 #include "aac.h" 49 50 /* 51 * FMA header files 52 */ 53 #include <sys/ddifm.h> 54 #include <sys/fm/protocol.h> 55 #include <sys/fm/util.h> 56 #include <sys/fm/io/ddi.h> 57 58 /* 59 * For minor nodes created by the SCSA framework, minor numbers are 60 * formed by left-shifting instance by INST_MINOR_SHIFT and OR in a 61 * number less than 64. 62 * 63 * To support cfgadm, need to confirm the SCSA framework by creating 64 * devctl/scsi and driver specific minor nodes under SCSA format, 65 * and calling scsi_hba_xxx() functions aacordingly. 66 */ 67 68 #define AAC_MINOR 32 69 #define INST2AAC(x) (((x) << INST_MINOR_SHIFT) | AAC_MINOR) 70 #define AAC_SCSA_MINOR(x) ((x) & TRAN_MINOR_MASK) 71 #define AAC_IS_SCSA_NODE(x) ((x) == DEVCTL_MINOR || (x) == SCSI_MINOR) 72 73 #define SD2TRAN(sd) ((sd)->sd_address.a_hba_tran) 74 #define AAC_TRAN2SOFTS(tran) ((struct aac_softstate *)(tran)->tran_hba_private) 75 #define AAC_DIP2TRAN(dip) ((scsi_hba_tran_t *)ddi_get_driver_private(dip)) 76 #define AAC_DIP2SOFTS(dip) (AAC_TRAN2SOFTS(AAC_DIP2TRAN(dip))) 77 #define SD2AAC(sd) (AAC_TRAN2SOFTS(SD2TRAN(sd))) 78 #define AAC_PD(t) ((t) - AAC_MAX_LD) 79 #define AAC_DEV(softs, t) (((t) < AAC_MAX_LD) ? \ 80 &(softs)->containers[(t)].dev : \ 81 ((t) < AAC_MAX_DEV(softs)) ? \ 82 &(softs)->nondasds[AAC_PD(t)].dev : NULL) 83 #define AAC_DEVCFG_BEGIN(softs, tgt) \ 84 aac_devcfg((softs), (tgt), 1) 85 #define AAC_DEVCFG_END(softs, tgt) \ 86 aac_devcfg((softs), (tgt), 0) 87 #define PKT2AC(pkt) ((struct aac_cmd *)(pkt)->pkt_ha_private) 88 #define AAC_BUSYWAIT(cond, timeout /* in millisecond */) { \ 89 if (!(cond)) { \ 90 int count = (timeout) * 10; \ 91 while (count) { \ 92 drv_usecwait(100); \ 93 if (cond) \ 94 break; \ 95 count--; \ 96 } \ 97 (timeout) = (count + 9) / 10; \ 98 } \ 99 } 100 101 #define AAC_SENSE_DATA_DESCR_LEN \ 102 (sizeof (struct scsi_descr_sense_hdr) + \ 103 sizeof (struct scsi_information_sense_descr)) 104 #define AAC_ARQ64_LENGTH \ 105 (sizeof (struct scsi_arq_status) + \ 106 AAC_SENSE_DATA_DESCR_LEN - SENSE_LENGTH) 107 108 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */ 109 #define AAC_GETGXADDR(cmdlen, cdbp) \ 110 ((cmdlen == 6) ? GETG0ADDR(cdbp) : \ 111 (cmdlen == 10) ? (uint32_t)GETG1ADDR(cdbp) : \ 112 ((uint64_t)GETG4ADDR(cdbp) << 32) | (uint32_t)GETG4ADDRTL(cdbp)) 113 114 #define AAC_CDB_INQUIRY_CMDDT 0x02 115 #define AAC_CDB_INQUIRY_EVPD 0x01 116 #define AAC_VPD_PAGE_CODE 1 117 #define AAC_VPD_PAGE_LENGTH 3 118 #define AAC_VPD_PAGE_DATA 4 119 #define AAC_VPD_ID_CODESET 0 120 #define AAC_VPD_ID_TYPE 1 121 #define AAC_VPD_ID_LENGTH 3 122 #define AAC_VPD_ID_DATA 4 123 124 #define AAC_SCSI_RPTLUNS_HEAD_SIZE 0x08 125 #define AAC_SCSI_RPTLUNS_ADDR_SIZE 0x08 126 #define AAC_SCSI_RPTLUNS_ADDR_MASK 0xC0 127 /* 00b - peripheral device addressing method */ 128 #define AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL 0x00 129 /* 01b - flat space addressing method */ 130 #define AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE 0x40 131 /* 10b - logical unit addressing method */ 132 #define AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT 0x80 133 134 /* Return the size of FIB with data part type data_type */ 135 #define AAC_FIB_SIZEOF(data_type) \ 136 (sizeof (struct aac_fib_header) + sizeof (data_type)) 137 /* Return the container size defined in mir */ 138 #define AAC_MIR_SIZE(softs, acc, mir) \ 139 (((softs)->flags & AAC_FLAGS_LBA_64BIT) ? \ 140 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity) + \ 141 ((uint64_t)ddi_get32((acc), &(mir)->MntObj.CapacityHigh) << 32) : \ 142 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity)) 143 144 /* The last entry of aac_cards[] is for unknown cards */ 145 #define AAC_UNKNOWN_CARD \ 146 (sizeof (aac_cards) / sizeof (struct aac_card_type) - 1) 147 #define CARD_IS_UNKNOWN(i) (i == AAC_UNKNOWN_CARD) 148 #define BUF_IS_READ(bp) ((bp)->b_flags & B_READ) 149 #define AAC_IS_Q_EMPTY(q) ((q)->q_head == NULL) 150 #define AAC_CMDQ(acp) (!((acp)->flags & AAC_CMD_SYNC)) 151 152 #define PCI_MEM_GET32(softs, off) \ 153 ddi_get32((softs)->pci_mem_handle, \ 154 (void *)((softs)->pci_mem_base_vaddr + (off))) 155 #define PCI_MEM_PUT32(softs, off, val) \ 156 ddi_put32((softs)->pci_mem_handle, \ 157 (void *)((softs)->pci_mem_base_vaddr + (off)), \ 158 (uint32_t)(val)) 159 #define PCI_MEM_GET16(softs, off) \ 160 ddi_get16((softs)->pci_mem_handle, \ 161 (void *)((softs)->pci_mem_base_vaddr + (off))) 162 #define PCI_MEM_PUT16(softs, off, val) \ 163 ddi_put16((softs)->pci_mem_handle, \ 164 (void *)((softs)->pci_mem_base_vaddr + (off)), (uint16_t)(val)) 165 /* Write host data at valp to device mem[off] repeatedly count times */ 166 #define PCI_MEM_REP_PUT8(softs, off, valp, count) \ 167 ddi_rep_put8((softs)->pci_mem_handle, (uint8_t *)(valp), \ 168 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \ 169 count, DDI_DEV_AUTOINCR) 170 /* Read device data at mem[off] to host addr valp repeatedly count times */ 171 #define PCI_MEM_REP_GET8(softs, off, valp, count) \ 172 ddi_rep_get8((softs)->pci_mem_handle, (uint8_t *)(valp), \ 173 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \ 174 count, DDI_DEV_AUTOINCR) 175 #define AAC_GET_FIELD8(acc, d, s, field) \ 176 (d)->field = ddi_get8(acc, (uint8_t *)&(s)->field) 177 #define AAC_GET_FIELD32(acc, d, s, field) \ 178 (d)->field = ddi_get32(acc, (uint32_t *)&(s)->field) 179 #define AAC_GET_FIELD64(acc, d, s, field) \ 180 (d)->field = ddi_get64(acc, (uint64_t *)&(s)->field) 181 #define AAC_REP_GET_FIELD8(acc, d, s, field, r) \ 182 ddi_rep_get8((acc), (uint8_t *)&(d)->field, \ 183 (uint8_t *)&(s)->field, (r), DDI_DEV_AUTOINCR) 184 #define AAC_REP_GET_FIELD32(acc, d, s, field, r) \ 185 ddi_rep_get32((acc), (uint32_t *)&(d)->field, \ 186 (uint32_t *)&(s)->field, (r), DDI_DEV_AUTOINCR) 187 188 #define AAC_ENABLE_INTR(softs) { \ 189 if (softs->flags & AAC_FLAGS_NEW_COMM) \ 190 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_NEW); \ 191 else \ 192 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_BITS); \ 193 softs->state |= AAC_STATE_INTR; \ 194 } 195 196 #define AAC_DISABLE_INTR(softs) { \ 197 PCI_MEM_PUT32(softs, AAC_OIMR, ~0); \ 198 softs->state &= ~AAC_STATE_INTR; \ 199 } 200 #define AAC_STATUS_CLR(softs, mask) PCI_MEM_PUT32(softs, AAC_ODBR, mask) 201 #define AAC_STATUS_GET(softs) PCI_MEM_GET32(softs, AAC_ODBR) 202 #define AAC_NOTIFY(softs, val) PCI_MEM_PUT32(softs, AAC_IDBR, val) 203 #define AAC_OUTB_GET(softs) PCI_MEM_GET32(softs, AAC_OQUE) 204 #define AAC_OUTB_SET(softs, val) PCI_MEM_PUT32(softs, AAC_OQUE, val) 205 #define AAC_FWSTATUS_GET(softs) \ 206 ((softs)->aac_if.aif_get_fwstatus(softs)) 207 #define AAC_MAILBOX_GET(softs, mb) \ 208 ((softs)->aac_if.aif_get_mailbox((softs), (mb))) 209 #define AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3) \ 210 ((softs)->aac_if.aif_set_mailbox((softs), (cmd), \ 211 (arg0), (arg1), (arg2), (arg3))) 212 213 #define AAC_MGT_SLOT_NUM 2 214 #define AAC_THROTTLE_DRAIN -1 215 216 #define AAC_QUIESCE_TICK 1 /* 1 second */ 217 #define AAC_QUIESCE_TIMEOUT 180 /* 180 seconds */ 218 #define AAC_DEFAULT_TICK 10 /* 10 seconds */ 219 #define AAC_SYNC_TICK (30*60) /* 30 minutes */ 220 221 /* Poll time for aac_do_poll_io() */ 222 #define AAC_POLL_TIME 60 /* 60 seconds */ 223 224 /* IOP reset */ 225 #define AAC_IOP_RESET_SUCCEED 0 /* IOP reset succeed */ 226 #define AAC_IOP_RESET_FAILED -1 /* IOP reset failed */ 227 #define AAC_IOP_RESET_ABNORMAL -2 /* Reset operation abnormal */ 228 229 /* 230 * Hardware access functions 231 */ 232 static int aac_rx_get_fwstatus(struct aac_softstate *); 233 static int aac_rx_get_mailbox(struct aac_softstate *, int); 234 static void aac_rx_set_mailbox(struct aac_softstate *, uint32_t, uint32_t, 235 uint32_t, uint32_t, uint32_t); 236 static int aac_rkt_get_fwstatus(struct aac_softstate *); 237 static int aac_rkt_get_mailbox(struct aac_softstate *, int); 238 static void aac_rkt_set_mailbox(struct aac_softstate *, uint32_t, uint32_t, 239 uint32_t, uint32_t, uint32_t); 240 241 /* 242 * SCSA function prototypes 243 */ 244 static int aac_attach(dev_info_t *, ddi_attach_cmd_t); 245 static int aac_detach(dev_info_t *, ddi_detach_cmd_t); 246 static int aac_reset(dev_info_t *, ddi_reset_cmd_t); 247 static int aac_quiesce(dev_info_t *); 248 static int aac_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 249 250 /* 251 * Interrupt handler functions 252 */ 253 static int aac_query_intrs(struct aac_softstate *, int); 254 static int aac_add_intrs(struct aac_softstate *); 255 static void aac_remove_intrs(struct aac_softstate *); 256 static int aac_enable_intrs(struct aac_softstate *); 257 static int aac_disable_intrs(struct aac_softstate *); 258 static uint_t aac_intr_old(caddr_t, caddr_t); 259 static uint_t aac_intr_new(caddr_t, caddr_t); 260 static uint_t aac_softintr(caddr_t); 261 262 /* 263 * Internal functions in attach 264 */ 265 static int aac_check_card_type(struct aac_softstate *); 266 static int aac_check_firmware(struct aac_softstate *); 267 static int aac_common_attach(struct aac_softstate *); 268 static void aac_common_detach(struct aac_softstate *); 269 static int aac_probe_containers(struct aac_softstate *); 270 static int aac_alloc_comm_space(struct aac_softstate *); 271 static int aac_setup_comm_space(struct aac_softstate *); 272 static void aac_free_comm_space(struct aac_softstate *); 273 static int aac_hba_setup(struct aac_softstate *); 274 275 /* 276 * Sync FIB operation functions 277 */ 278 int aac_sync_mbcommand(struct aac_softstate *, uint32_t, uint32_t, 279 uint32_t, uint32_t, uint32_t, uint32_t *); 280 static int aac_sync_fib(struct aac_softstate *, uint16_t, uint16_t); 281 282 /* 283 * Command queue operation functions 284 */ 285 static void aac_cmd_initq(struct aac_cmd_queue *); 286 static void aac_cmd_enqueue(struct aac_cmd_queue *, struct aac_cmd *); 287 static struct aac_cmd *aac_cmd_dequeue(struct aac_cmd_queue *); 288 static void aac_cmd_delete(struct aac_cmd_queue *, struct aac_cmd *); 289 290 /* 291 * FIB queue operation functions 292 */ 293 static int aac_fib_enqueue(struct aac_softstate *, int, uint32_t, uint32_t); 294 static int aac_fib_dequeue(struct aac_softstate *, int, int *); 295 296 /* 297 * Slot operation functions 298 */ 299 static int aac_create_slots(struct aac_softstate *); 300 static void aac_destroy_slots(struct aac_softstate *); 301 static void aac_alloc_fibs(struct aac_softstate *); 302 static void aac_destroy_fibs(struct aac_softstate *); 303 static struct aac_slot *aac_get_slot(struct aac_softstate *); 304 static void aac_release_slot(struct aac_softstate *, struct aac_slot *); 305 static int aac_alloc_fib(struct aac_softstate *, struct aac_slot *); 306 static void aac_free_fib(struct aac_slot *); 307 308 /* 309 * Internal functions 310 */ 311 static void aac_cmd_fib_header(struct aac_softstate *, struct aac_cmd *, 312 uint16_t); 313 static void aac_cmd_fib_rawio(struct aac_softstate *, struct aac_cmd *); 314 static void aac_cmd_fib_brw64(struct aac_softstate *, struct aac_cmd *); 315 static void aac_cmd_fib_brw(struct aac_softstate *, struct aac_cmd *); 316 static void aac_cmd_fib_sync(struct aac_softstate *, struct aac_cmd *); 317 static void aac_cmd_fib_scsi32(struct aac_softstate *, struct aac_cmd *); 318 static void aac_cmd_fib_scsi64(struct aac_softstate *, struct aac_cmd *); 319 static void aac_cmd_fib_startstop(struct aac_softstate *, struct aac_cmd *); 320 static void aac_start_waiting_io(struct aac_softstate *); 321 static void aac_drain_comp_q(struct aac_softstate *); 322 int aac_do_io(struct aac_softstate *, struct aac_cmd *); 323 static int aac_sync_fib_slot_bind(struct aac_softstate *, struct aac_cmd *); 324 static void aac_sync_fib_slot_release(struct aac_softstate *, struct aac_cmd *); 325 static void aac_start_io(struct aac_softstate *, struct aac_cmd *); 326 static int aac_do_poll_io(struct aac_softstate *, struct aac_cmd *); 327 static int aac_do_sync_io(struct aac_softstate *, struct aac_cmd *); 328 static int aac_send_command(struct aac_softstate *, struct aac_slot *); 329 static void aac_cmd_timeout(struct aac_softstate *, struct aac_cmd *); 330 static int aac_dma_sync_ac(struct aac_cmd *); 331 static int aac_shutdown(struct aac_softstate *); 332 static int aac_reset_adapter(struct aac_softstate *); 333 static int aac_do_quiesce(struct aac_softstate *softs); 334 static int aac_do_unquiesce(struct aac_softstate *softs); 335 static void aac_unhold_bus(struct aac_softstate *, int); 336 static void aac_set_throttle(struct aac_softstate *, struct aac_device *, 337 int, int); 338 339 /* 340 * Adapter Initiated FIB handling function 341 */ 342 static void aac_save_aif(struct aac_softstate *, ddi_acc_handle_t, 343 struct aac_fib *, int); 344 static int aac_handle_aif(struct aac_softstate *, struct aac_aif_command *); 345 346 /* 347 * Event handling related functions 348 */ 349 static void aac_timer(void *); 350 static void aac_event_thread(struct aac_softstate *); 351 static void aac_event_disp(struct aac_softstate *, int); 352 353 /* 354 * IOCTL interface related functions 355 */ 356 static int aac_open(dev_t *, int, int, cred_t *); 357 static int aac_close(dev_t, int, int, cred_t *); 358 static int aac_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 359 extern int aac_do_ioctl(struct aac_softstate *, dev_t, int, intptr_t, int); 360 361 /* 362 * FMA Prototypes 363 */ 364 static void aac_fm_init(struct aac_softstate *); 365 static void aac_fm_fini(struct aac_softstate *); 366 static int aac_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *); 367 int aac_check_acc_handle(ddi_acc_handle_t); 368 int aac_check_dma_handle(ddi_dma_handle_t); 369 void aac_fm_ereport(struct aac_softstate *, char *); 370 371 /* 372 * Auto enumeration functions 373 */ 374 static dev_info_t *aac_find_child(struct aac_softstate *, uint16_t, uint8_t); 375 static int aac_tran_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t, 376 void *, dev_info_t **); 377 static int aac_handle_dr(struct aac_softstate *, int, int, int); 378 379 extern pri_t minclsyspri; 380 381 #ifdef DEBUG 382 /* 383 * UART debug output support 384 */ 385 386 #define AAC_PRINT_BUFFER_SIZE 512 387 #define AAC_PRINT_TIMEOUT 250 /* 1/4 sec. = 250 msec. */ 388 389 #define AAC_FW_DBG_STRLEN_OFFSET 0x00 390 #define AAC_FW_DBG_FLAGS_OFFSET 0x04 391 #define AAC_FW_DBG_BLED_OFFSET 0x08 392 393 static int aac_get_fw_debug_buffer(struct aac_softstate *); 394 static void aac_print_scmd(struct aac_softstate *, struct aac_cmd *); 395 static void aac_print_aif(struct aac_softstate *, struct aac_aif_command *); 396 397 static char aac_prt_buf[AAC_PRINT_BUFFER_SIZE]; 398 static char aac_fmt[] = " %s"; 399 static char aac_fmt_header[] = " %s.%d: %s"; 400 static kmutex_t aac_prt_mutex; 401 402 /* 403 * Debug flags to be put into the softstate flags field 404 * when initialized 405 */ 406 uint32_t aac_debug_flags = 407 /* AACDB_FLAGS_KERNEL_PRINT | */ 408 /* AACDB_FLAGS_FW_PRINT | */ 409 /* AACDB_FLAGS_MISC | */ 410 /* AACDB_FLAGS_FUNC1 | */ 411 /* AACDB_FLAGS_FUNC2 | */ 412 /* AACDB_FLAGS_SCMD | */ 413 /* AACDB_FLAGS_AIF | */ 414 /* AACDB_FLAGS_FIB | */ 415 /* AACDB_FLAGS_IOCTL | */ 416 0; 417 uint32_t aac_debug_fib_flags = 418 /* AACDB_FLAGS_FIB_RW | */ 419 /* AACDB_FLAGS_FIB_IOCTL | */ 420 /* AACDB_FLAGS_FIB_SRB | */ 421 /* AACDB_FLAGS_FIB_SYNC | */ 422 /* AACDB_FLAGS_FIB_HEADER | */ 423 /* AACDB_FLAGS_FIB_TIMEOUT | */ 424 0; 425 426 #endif /* DEBUG */ 427 428 static struct cb_ops aac_cb_ops = { 429 aac_open, /* open */ 430 aac_close, /* close */ 431 nodev, /* strategy */ 432 nodev, /* print */ 433 nodev, /* dump */ 434 nodev, /* read */ 435 nodev, /* write */ 436 aac_ioctl, /* ioctl */ 437 nodev, /* devmap */ 438 nodev, /* mmap */ 439 nodev, /* segmap */ 440 nochpoll, /* poll */ 441 ddi_prop_op, /* cb_prop_op */ 442 NULL, /* streamtab */ 443 D_64BIT | D_NEW | D_MP | D_HOTPLUG, /* cb_flag */ 444 CB_REV, /* cb_rev */ 445 nodev, /* async I/O read entry point */ 446 nodev /* async I/O write entry point */ 447 }; 448 449 static struct dev_ops aac_dev_ops = { 450 DEVO_REV, 451 0, 452 aac_getinfo, 453 nulldev, 454 nulldev, 455 aac_attach, 456 aac_detach, 457 aac_reset, 458 &aac_cb_ops, 459 NULL, 460 NULL, 461 aac_quiesce, 462 }; 463 464 static struct modldrv aac_modldrv = { 465 &mod_driverops, 466 "AAC Driver " AAC_DRIVER_VERSION, 467 &aac_dev_ops, 468 }; 469 470 static struct modlinkage aac_modlinkage = { 471 MODREV_1, 472 &aac_modldrv, 473 NULL 474 }; 475 476 static struct aac_softstate *aac_softstatep; 477 478 /* 479 * Supported card list 480 * ordered in vendor id, subvendor id, subdevice id, and device id 481 */ 482 static struct aac_card_type aac_cards[] = { 483 {0x1028, 0x1, 0x1028, 0x1, AAC_HWIF_I960RX, 484 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 485 "Dell", "PERC 3/Di"}, 486 {0x1028, 0x2, 0x1028, 0x2, AAC_HWIF_I960RX, 487 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 488 "Dell", "PERC 3/Di"}, 489 {0x1028, 0x3, 0x1028, 0x3, AAC_HWIF_I960RX, 490 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 491 "Dell", "PERC 3/Si"}, 492 {0x1028, 0x8, 0x1028, 0xcf, AAC_HWIF_I960RX, 493 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 494 "Dell", "PERC 3/Di"}, 495 {0x1028, 0x4, 0x1028, 0xd0, AAC_HWIF_I960RX, 496 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 497 "Dell", "PERC 3/Si"}, 498 {0x1028, 0x2, 0x1028, 0xd1, AAC_HWIF_I960RX, 499 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 500 "Dell", "PERC 3/Di"}, 501 {0x1028, 0x2, 0x1028, 0xd9, AAC_HWIF_I960RX, 502 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 503 "Dell", "PERC 3/Di"}, 504 {0x1028, 0xa, 0x1028, 0x106, AAC_HWIF_I960RX, 505 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 506 "Dell", "PERC 3/Di"}, 507 {0x1028, 0xa, 0x1028, 0x11b, AAC_HWIF_I960RX, 508 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 509 "Dell", "PERC 3/Di"}, 510 {0x1028, 0xa, 0x1028, 0x121, AAC_HWIF_I960RX, 511 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 512 "Dell", "PERC 3/Di"}, 513 {0x9005, 0x285, 0x1028, 0x287, AAC_HWIF_I960RX, 514 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 515 "Dell", "PERC 320/DC"}, 516 {0x9005, 0x285, 0x1028, 0x291, AAC_HWIF_I960RX, 517 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Dell", "CERC SR2"}, 518 519 {0x9005, 0x285, 0x1014, 0x2f2, AAC_HWIF_I960RX, 520 0, AAC_TYPE_SCSI, "IBM", "ServeRAID 8i"}, 521 {0x9005, 0x285, 0x1014, 0x34d, AAC_HWIF_I960RX, 522 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8s"}, 523 {0x9005, 0x286, 0x1014, 0x9580, AAC_HWIF_RKT, 524 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8k"}, 525 526 {0x9005, 0x285, 0x103c, 0x3227, AAC_HWIF_I960RX, 527 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"}, 528 {0x9005, 0x285, 0xe11, 0x295, AAC_HWIF_I960RX, 529 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"}, 530 531 {0x9005, 0x285, 0x9005, 0x285, AAC_HWIF_I960RX, 532 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 533 "Adaptec", "2200S"}, 534 {0x9005, 0x285, 0x9005, 0x286, AAC_HWIF_I960RX, 535 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 536 "Adaptec", "2120S"}, 537 {0x9005, 0x285, 0x9005, 0x287, AAC_HWIF_I960RX, 538 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 539 "Adaptec", "2200S"}, 540 {0x9005, 0x285, 0x9005, 0x288, AAC_HWIF_I960RX, 541 0, AAC_TYPE_SCSI, "Adaptec", "3230S"}, 542 {0x9005, 0x285, 0x9005, 0x289, AAC_HWIF_I960RX, 543 0, AAC_TYPE_SCSI, "Adaptec", "3240S"}, 544 {0x9005, 0x285, 0x9005, 0x28a, AAC_HWIF_I960RX, 545 0, AAC_TYPE_SCSI, "Adaptec", "2020ZCR"}, 546 {0x9005, 0x285, 0x9005, 0x28b, AAC_HWIF_I960RX, 547 0, AAC_TYPE_SCSI, "Adaptec", "2025ZCR"}, 548 {0x9005, 0x286, 0x9005, 0x28c, AAC_HWIF_RKT, 549 0, AAC_TYPE_SCSI, "Adaptec", "2230S"}, 550 {0x9005, 0x286, 0x9005, 0x28d, AAC_HWIF_RKT, 551 0, AAC_TYPE_SCSI, "Adaptec", "2130S"}, 552 {0x9005, 0x285, 0x9005, 0x28e, AAC_HWIF_I960RX, 553 0, AAC_TYPE_SATA, "Adaptec", "2020SA"}, 554 {0x9005, 0x285, 0x9005, 0x28f, AAC_HWIF_I960RX, 555 0, AAC_TYPE_SATA, "Adaptec", "2025SA"}, 556 {0x9005, 0x285, 0x9005, 0x290, AAC_HWIF_I960RX, 557 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2410SA"}, 558 {0x9005, 0x285, 0x9005, 0x292, AAC_HWIF_I960RX, 559 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2810SA"}, 560 {0x9005, 0x285, 0x9005, 0x293, AAC_HWIF_I960RX, 561 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "21610SA"}, 562 {0x9005, 0x285, 0x9005, 0x294, AAC_HWIF_I960RX, 563 0, AAC_TYPE_SATA, "Adaptec", "2026ZCR"}, 564 {0x9005, 0x285, 0x9005, 0x296, AAC_HWIF_I960RX, 565 0, AAC_TYPE_SCSI, "Adaptec", "2240S"}, 566 {0x9005, 0x285, 0x9005, 0x297, AAC_HWIF_I960RX, 567 0, AAC_TYPE_SAS, "Adaptec", "4005SAS"}, 568 {0x9005, 0x285, 0x9005, 0x298, AAC_HWIF_I960RX, 569 0, AAC_TYPE_SAS, "Adaptec", "RAID 4000"}, 570 {0x9005, 0x285, 0x9005, 0x299, AAC_HWIF_I960RX, 571 0, AAC_TYPE_SAS, "Adaptec", "4800SAS"}, 572 {0x9005, 0x285, 0x9005, 0x29a, AAC_HWIF_I960RX, 573 0, AAC_TYPE_SAS, "Adaptec", "4805SAS"}, 574 {0x9005, 0x286, 0x9005, 0x29b, AAC_HWIF_RKT, 575 0, AAC_TYPE_SATA, "Adaptec", "2820SA"}, 576 {0x9005, 0x286, 0x9005, 0x29c, AAC_HWIF_RKT, 577 0, AAC_TYPE_SATA, "Adaptec", "2620SA"}, 578 {0x9005, 0x286, 0x9005, 0x29d, AAC_HWIF_RKT, 579 0, AAC_TYPE_SATA, "Adaptec", "2420SA"}, 580 {0x9005, 0x286, 0x9005, 0x29e, AAC_HWIF_RKT, 581 0, AAC_TYPE_SATA, "ICP", "9024RO"}, 582 {0x9005, 0x286, 0x9005, 0x29f, AAC_HWIF_RKT, 583 0, AAC_TYPE_SATA, "ICP", "9014RO"}, 584 {0x9005, 0x286, 0x9005, 0x2a0, AAC_HWIF_RKT, 585 0, AAC_TYPE_SATA, "ICP", "9047MA"}, 586 {0x9005, 0x286, 0x9005, 0x2a1, AAC_HWIF_RKT, 587 0, AAC_TYPE_SATA, "ICP", "9087MA"}, 588 {0x9005, 0x285, 0x9005, 0x2a4, AAC_HWIF_I960RX, 589 0, AAC_TYPE_SAS, "ICP", "9085LI"}, 590 {0x9005, 0x285, 0x9005, 0x2a5, AAC_HWIF_I960RX, 591 0, AAC_TYPE_SAS, "ICP", "5085BR"}, 592 {0x9005, 0x286, 0x9005, 0x2a6, AAC_HWIF_RKT, 593 0, AAC_TYPE_SATA, "ICP", "9067MA"}, 594 {0x9005, 0x285, 0x9005, 0x2b5, AAC_HWIF_I960RX, 595 0, AAC_TYPE_SAS, "Adaptec", "RAID 5445"}, 596 {0x9005, 0x285, 0x9005, 0x2b6, AAC_HWIF_I960RX, 597 0, AAC_TYPE_SAS, "Adaptec", "RAID 5805"}, 598 {0x9005, 0x285, 0x9005, 0x2b7, AAC_HWIF_I960RX, 599 0, AAC_TYPE_SAS, "Adaptec", "RAID 5085"}, 600 {0x9005, 0x285, 0x9005, 0x2b8, AAC_HWIF_I960RX, 601 0, AAC_TYPE_SAS, "ICP", "RAID ICP5445SL"}, 602 {0x9005, 0x285, 0x9005, 0x2b9, AAC_HWIF_I960RX, 603 0, AAC_TYPE_SAS, "ICP", "RAID ICP5085SL"}, 604 {0x9005, 0x285, 0x9005, 0x2ba, AAC_HWIF_I960RX, 605 0, AAC_TYPE_SAS, "ICP", "RAID ICP5805SL"}, 606 607 {0, 0, 0, 0, AAC_HWIF_UNKNOWN, 608 0, AAC_TYPE_UNKNOWN, "Unknown", "AAC card"}, 609 }; 610 611 /* 612 * Hardware access functions for i960 based cards 613 */ 614 static struct aac_interface aac_rx_interface = { 615 aac_rx_get_fwstatus, 616 aac_rx_get_mailbox, 617 aac_rx_set_mailbox 618 }; 619 620 /* 621 * Hardware access functions for Rocket based cards 622 */ 623 static struct aac_interface aac_rkt_interface = { 624 aac_rkt_get_fwstatus, 625 aac_rkt_get_mailbox, 626 aac_rkt_set_mailbox 627 }; 628 629 ddi_device_acc_attr_t aac_acc_attr = { 630 DDI_DEVICE_ATTR_V1, 631 DDI_STRUCTURE_LE_ACC, 632 DDI_STRICTORDER_ACC, 633 DDI_DEFAULT_ACC 634 }; 635 636 static struct { 637 int size; 638 int notify; 639 } aac_qinfo[] = { 640 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL}, 641 {AAC_HOST_HIGH_CMD_ENTRIES, 0}, 642 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY}, 643 {AAC_ADAP_HIGH_CMD_ENTRIES, 0}, 644 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL}, 645 {AAC_HOST_HIGH_RESP_ENTRIES, 0}, 646 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY}, 647 {AAC_ADAP_HIGH_RESP_ENTRIES, 0} 648 }; 649 650 /* 651 * Default aac dma attributes 652 */ 653 static ddi_dma_attr_t aac_dma_attr = { 654 DMA_ATTR_V0, 655 0, /* lowest usable address */ 656 0xffffffffull, /* high DMA address range */ 657 0xffffffffull, /* DMA counter register */ 658 AAC_DMA_ALIGN, /* DMA address alignment */ 659 1, /* DMA burstsizes */ 660 1, /* min effective DMA size */ 661 0xffffffffull, /* max DMA xfer size */ 662 0xffffffffull, /* segment boundary */ 663 1, /* s/g list length */ 664 AAC_BLK_SIZE, /* granularity of device */ 665 0 /* DMA transfer flags */ 666 }; 667 668 static int aac_tick = AAC_DEFAULT_TICK; /* tick for the internal timer */ 669 static uint32_t aac_timebase = 0; /* internal timer in seconds */ 670 671 /* 672 * Warlock directives 673 * 674 * Different variables with the same types have to be protected by the 675 * same mutex; otherwise, warlock will complain with "variables don't 676 * seem to be protected consistently". For example, 677 * aac_softstate::{q_wait, q_comp} are type of aac_cmd_queue, and protected 678 * by aac_softstate::{io_lock, q_comp_mutex} respectively. We have to 679 * declare them as protected explictly at aac_cmd_dequeue(). 680 */ 681 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt scsi_cdb scsi_status \ 682 scsi_arq_status scsi_descr_sense_hdr scsi_information_sense_descr \ 683 mode_format mode_geometry mode_header aac_cmd)) 684 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_cmd", aac_fib ddi_dma_cookie_t \ 685 aac_sge)) 686 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_fib", aac_blockread aac_blockwrite \ 687 aac_blockread64 aac_raw_io aac_sg_entry aac_sg_entry64 aac_sg_entryraw \ 688 aac_sg_table aac_srb)) 689 _NOTE(SCHEME_PROTECTS_DATA("unique to sync fib and cdb", scsi_inquiry)) 690 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address)) 691 _NOTE(SCHEME_PROTECTS_DATA("unique to scsi_transport", buf)) 692 693 int 694 _init(void) 695 { 696 int rval = 0; 697 698 #ifdef DEBUG 699 mutex_init(&aac_prt_mutex, NULL, MUTEX_DRIVER, NULL); 700 #endif 701 DBCALLED(NULL, 1); 702 703 if ((rval = ddi_soft_state_init((void *)&aac_softstatep, 704 sizeof (struct aac_softstate), 0)) != 0) 705 goto error; 706 707 if ((rval = scsi_hba_init(&aac_modlinkage)) != 0) { 708 ddi_soft_state_fini((void *)&aac_softstatep); 709 goto error; 710 } 711 712 if ((rval = mod_install(&aac_modlinkage)) != 0) { 713 ddi_soft_state_fini((void *)&aac_softstatep); 714 scsi_hba_fini(&aac_modlinkage); 715 goto error; 716 } 717 return (rval); 718 719 error: 720 AACDB_PRINT(NULL, CE_WARN, "Mod init error!"); 721 #ifdef DEBUG 722 mutex_destroy(&aac_prt_mutex); 723 #endif 724 return (rval); 725 } 726 727 int 728 _info(struct modinfo *modinfop) 729 { 730 DBCALLED(NULL, 1); 731 return (mod_info(&aac_modlinkage, modinfop)); 732 } 733 734 /* 735 * An HBA driver cannot be unload unless you reboot, 736 * so this function will be of no use. 737 */ 738 int 739 _fini(void) 740 { 741 int rval; 742 743 DBCALLED(NULL, 1); 744 745 if ((rval = mod_remove(&aac_modlinkage)) != 0) 746 goto error; 747 748 scsi_hba_fini(&aac_modlinkage); 749 ddi_soft_state_fini((void *)&aac_softstatep); 750 #ifdef DEBUG 751 mutex_destroy(&aac_prt_mutex); 752 #endif 753 return (0); 754 755 error: 756 AACDB_PRINT(NULL, CE_WARN, "AAC is busy, cannot unload!"); 757 return (rval); 758 } 759 760 static int 761 aac_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 762 { 763 int instance, i; 764 struct aac_softstate *softs = NULL; 765 int attach_state = 0; 766 char *data; 767 768 DBCALLED(NULL, 1); 769 770 switch (cmd) { 771 case DDI_ATTACH: 772 break; 773 case DDI_RESUME: 774 return (DDI_FAILURE); 775 default: 776 return (DDI_FAILURE); 777 } 778 779 instance = ddi_get_instance(dip); 780 781 /* Get soft state */ 782 if (ddi_soft_state_zalloc(aac_softstatep, instance) != DDI_SUCCESS) { 783 AACDB_PRINT(softs, CE_WARN, "Cannot alloc soft state"); 784 goto error; 785 } 786 softs = ddi_get_soft_state(aac_softstatep, instance); 787 attach_state |= AAC_ATTACH_SOFTSTATE_ALLOCED; 788 789 softs->instance = instance; 790 softs->devinfo_p = dip; 791 softs->buf_dma_attr = softs->addr_dma_attr = aac_dma_attr; 792 softs->addr_dma_attr.dma_attr_granular = 1; 793 softs->acc_attr = aac_acc_attr; 794 softs->reg_attr = aac_acc_attr; 795 softs->card = AAC_UNKNOWN_CARD; 796 #ifdef DEBUG 797 softs->debug_flags = aac_debug_flags; 798 softs->debug_fib_flags = aac_debug_fib_flags; 799 #endif 800 801 /* Initialize FMA */ 802 aac_fm_init(softs); 803 804 /* Check the card type */ 805 if (aac_check_card_type(softs) == AACERR) { 806 AACDB_PRINT(softs, CE_WARN, "Card not supported"); 807 goto error; 808 } 809 /* We have found the right card and everything is OK */ 810 attach_state |= AAC_ATTACH_CARD_DETECTED; 811 812 /* Map PCI mem space */ 813 if (ddi_regs_map_setup(dip, 1, 814 (caddr_t *)&softs->pci_mem_base_vaddr, 0, 815 softs->map_size_min, &softs->reg_attr, 816 &softs->pci_mem_handle) != DDI_SUCCESS) 817 goto error; 818 819 softs->map_size = softs->map_size_min; 820 attach_state |= AAC_ATTACH_PCI_MEM_MAPPED; 821 822 AAC_DISABLE_INTR(softs); 823 824 /* Init mutexes and condvars */ 825 mutex_init(&softs->io_lock, NULL, MUTEX_DRIVER, 826 DDI_INTR_PRI(softs->intr_pri)); 827 mutex_init(&softs->q_comp_mutex, NULL, MUTEX_DRIVER, 828 DDI_INTR_PRI(softs->intr_pri)); 829 mutex_init(&softs->time_mutex, NULL, MUTEX_DRIVER, 830 DDI_INTR_PRI(softs->intr_pri)); 831 mutex_init(&softs->ev_lock, NULL, MUTEX_DRIVER, 832 DDI_INTR_PRI(softs->intr_pri)); 833 mutex_init(&softs->aifq_mutex, NULL, 834 MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri)); 835 cv_init(&softs->event, NULL, CV_DRIVER, NULL); 836 cv_init(&softs->sync_fib_cv, NULL, CV_DRIVER, NULL); 837 cv_init(&softs->drain_cv, NULL, CV_DRIVER, NULL); 838 cv_init(&softs->event_wait_cv, NULL, CV_DRIVER, NULL); 839 cv_init(&softs->event_disp_cv, NULL, CV_DRIVER, NULL); 840 cv_init(&softs->aifq_cv, NULL, CV_DRIVER, NULL); 841 attach_state |= AAC_ATTACH_KMUTEX_INITED; 842 843 /* Init the cmd queues */ 844 for (i = 0; i < AAC_CMDQ_NUM; i++) 845 aac_cmd_initq(&softs->q_wait[i]); 846 aac_cmd_initq(&softs->q_busy); 847 aac_cmd_initq(&softs->q_comp); 848 849 /* Check for legacy device naming support */ 850 softs->legacy = 1; /* default to use legacy name */ 851 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 852 "legacy-name-enable", &data) == DDI_SUCCESS)) { 853 if (strcmp(data, "no") == 0) { 854 AACDB_PRINT(softs, CE_NOTE, "legacy-name disabled"); 855 softs->legacy = 0; 856 } 857 ddi_prop_free(data); 858 } 859 860 /* 861 * Everything has been set up till now, 862 * we will do some common attach. 863 */ 864 mutex_enter(&softs->io_lock); 865 if (aac_common_attach(softs) == AACERR) { 866 mutex_exit(&softs->io_lock); 867 goto error; 868 } 869 mutex_exit(&softs->io_lock); 870 attach_state |= AAC_ATTACH_COMM_SPACE_SETUP; 871 872 /* Check for buf breakup support */ 873 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 874 "breakup-enable", &data) == DDI_SUCCESS)) { 875 if (strcmp(data, "yes") == 0) { 876 AACDB_PRINT(softs, CE_NOTE, "buf breakup enabled"); 877 softs->flags |= AAC_FLAGS_BRKUP; 878 } 879 ddi_prop_free(data); 880 } 881 softs->dma_max = softs->buf_dma_attr.dma_attr_maxxfer; 882 if (softs->flags & AAC_FLAGS_BRKUP) { 883 softs->dma_max = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 884 DDI_PROP_DONTPASS, "dma-max", softs->dma_max); 885 } 886 887 if (aac_hba_setup(softs) != AACOK) 888 goto error; 889 attach_state |= AAC_ATTACH_SCSI_TRAN_SETUP; 890 891 /* Create devctl/scsi nodes for cfgadm */ 892 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 893 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) { 894 AACDB_PRINT(softs, CE_WARN, "failed to create devctl node"); 895 goto error; 896 } 897 attach_state |= AAC_ATTACH_CREATE_DEVCTL; 898 899 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, INST2SCSI(instance), 900 DDI_NT_SCSI_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 901 AACDB_PRINT(softs, CE_WARN, "failed to create scsi node"); 902 goto error; 903 } 904 attach_state |= AAC_ATTACH_CREATE_SCSI; 905 906 /* Create aac node for app. to issue ioctls */ 907 if (ddi_create_minor_node(dip, "aac", S_IFCHR, INST2AAC(instance), 908 DDI_PSEUDO, 0) != DDI_SUCCESS) { 909 AACDB_PRINT(softs, CE_WARN, "failed to create aac node"); 910 goto error; 911 } 912 913 /* Common attach is OK, so we are attached! */ 914 softs->state |= AAC_STATE_RUN; 915 916 /* Create event thread */ 917 softs->fibctx_p = &softs->aifctx; 918 if ((softs->event_thread = thread_create(NULL, 0, aac_event_thread, 919 softs, 0, &p0, TS_RUN, minclsyspri)) == NULL) { 920 AACDB_PRINT(softs, CE_WARN, "aif thread create failed"); 921 softs->state &= ~AAC_STATE_RUN; 922 goto error; 923 } 924 925 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 926 927 /* Create a thread for command timeout */ 928 softs->timeout_id = timeout(aac_timer, (void *)softs, 929 (aac_tick * drv_usectohz(1000000))); 930 931 /* Common attach is OK, so we are attached! */ 932 ddi_report_dev(dip); 933 AACDB_PRINT(softs, CE_NOTE, "aac attached ok"); 934 return (DDI_SUCCESS); 935 936 error: 937 if (attach_state & AAC_ATTACH_CREATE_SCSI) 938 ddi_remove_minor_node(dip, "scsi"); 939 if (attach_state & AAC_ATTACH_CREATE_DEVCTL) 940 ddi_remove_minor_node(dip, "devctl"); 941 if (attach_state & AAC_ATTACH_COMM_SPACE_SETUP) 942 aac_common_detach(softs); 943 if (attach_state & AAC_ATTACH_SCSI_TRAN_SETUP) { 944 (void) scsi_hba_detach(dip); 945 scsi_hba_tran_free(AAC_DIP2TRAN(dip)); 946 } 947 if (attach_state & AAC_ATTACH_KMUTEX_INITED) { 948 mutex_destroy(&softs->io_lock); 949 mutex_destroy(&softs->q_comp_mutex); 950 mutex_destroy(&softs->time_mutex); 951 mutex_destroy(&softs->ev_lock); 952 mutex_destroy(&softs->aifq_mutex); 953 cv_destroy(&softs->event); 954 cv_destroy(&softs->sync_fib_cv); 955 cv_destroy(&softs->drain_cv); 956 cv_destroy(&softs->event_wait_cv); 957 cv_destroy(&softs->event_disp_cv); 958 cv_destroy(&softs->aifq_cv); 959 } 960 if (attach_state & AAC_ATTACH_PCI_MEM_MAPPED) 961 ddi_regs_map_free(&softs->pci_mem_handle); 962 aac_fm_fini(softs); 963 if (attach_state & AAC_ATTACH_CARD_DETECTED) 964 softs->card = AACERR; 965 if (attach_state & AAC_ATTACH_SOFTSTATE_ALLOCED) 966 ddi_soft_state_free(aac_softstatep, instance); 967 return (DDI_FAILURE); 968 } 969 970 static int 971 aac_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 972 { 973 scsi_hba_tran_t *tran = AAC_DIP2TRAN(dip); 974 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran); 975 976 DBCALLED(softs, 1); 977 978 switch (cmd) { 979 case DDI_DETACH: 980 break; 981 case DDI_SUSPEND: 982 return (DDI_FAILURE); 983 default: 984 return (DDI_FAILURE); 985 } 986 987 mutex_enter(&softs->io_lock); 988 AAC_DISABLE_INTR(softs); 989 softs->state = AAC_STATE_STOPPED; 990 991 ddi_remove_minor_node(dip, "aac"); 992 ddi_remove_minor_node(dip, "scsi"); 993 ddi_remove_minor_node(dip, "devctl"); 994 mutex_exit(&softs->io_lock); 995 996 aac_common_detach(softs); 997 998 mutex_enter(&softs->io_lock); 999 (void) scsi_hba_detach(dip); 1000 scsi_hba_tran_free(tran); 1001 mutex_exit(&softs->io_lock); 1002 1003 /* Stop timer */ 1004 mutex_enter(&softs->time_mutex); 1005 if (softs->timeout_id) { 1006 timeout_id_t tid = softs->timeout_id; 1007 softs->timeout_id = 0; 1008 1009 mutex_exit(&softs->time_mutex); 1010 (void) untimeout(tid); 1011 mutex_enter(&softs->time_mutex); 1012 } 1013 mutex_exit(&softs->time_mutex); 1014 1015 /* Destroy event thread */ 1016 mutex_enter(&softs->ev_lock); 1017 cv_signal(&softs->event_disp_cv); 1018 cv_wait(&softs->event_wait_cv, &softs->ev_lock); 1019 mutex_exit(&softs->ev_lock); 1020 1021 cv_destroy(&softs->aifq_cv); 1022 cv_destroy(&softs->event_disp_cv); 1023 cv_destroy(&softs->event_wait_cv); 1024 cv_destroy(&softs->drain_cv); 1025 cv_destroy(&softs->sync_fib_cv); 1026 cv_destroy(&softs->event); 1027 mutex_destroy(&softs->aifq_mutex); 1028 mutex_destroy(&softs->ev_lock); 1029 mutex_destroy(&softs->time_mutex); 1030 mutex_destroy(&softs->q_comp_mutex); 1031 mutex_destroy(&softs->io_lock); 1032 1033 ddi_regs_map_free(&softs->pci_mem_handle); 1034 aac_fm_fini(softs); 1035 softs->hwif = AAC_HWIF_UNKNOWN; 1036 softs->card = AAC_UNKNOWN_CARD; 1037 ddi_soft_state_free(aac_softstatep, ddi_get_instance(dip)); 1038 1039 return (DDI_SUCCESS); 1040 } 1041 1042 /*ARGSUSED*/ 1043 static int 1044 aac_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) 1045 { 1046 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 1047 1048 DBCALLED(softs, 1); 1049 1050 mutex_enter(&softs->io_lock); 1051 AAC_DISABLE_INTR(softs); 1052 (void) aac_shutdown(softs); 1053 mutex_exit(&softs->io_lock); 1054 1055 return (DDI_SUCCESS); 1056 } 1057 1058 /* 1059 * quiesce(9E) entry point. 1060 * 1061 * This function is called when the system is single-threaded at high 1062 * PIL with preemption disabled. Therefore, this function must not be 1063 * blocked. 1064 * 1065 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 1066 * DDI_FAILURE indicates an error condition and should almost never happen. 1067 */ 1068 static int 1069 aac_quiesce(dev_info_t *dip) 1070 { 1071 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 1072 1073 if (softs == NULL) 1074 return (DDI_FAILURE); 1075 1076 _NOTE(ASSUMING_PROTECTED(softs->state)) 1077 AAC_DISABLE_INTR(softs); 1078 1079 return (DDI_SUCCESS); 1080 } 1081 1082 /* ARGSUSED */ 1083 static int 1084 aac_getinfo(dev_info_t *self, ddi_info_cmd_t infocmd, void *arg, 1085 void **result) 1086 { 1087 int error = DDI_SUCCESS; 1088 1089 switch (infocmd) { 1090 case DDI_INFO_DEVT2INSTANCE: 1091 *result = (void *)(intptr_t)(MINOR2INST(getminor((dev_t)arg))); 1092 break; 1093 default: 1094 error = DDI_FAILURE; 1095 } 1096 return (error); 1097 } 1098 1099 /* 1100 * Bring the controller down to a dormant state and detach all child devices. 1101 * This function is called before detach or system shutdown. 1102 * Note: we can assume that the q_wait on the controller is empty, as we 1103 * won't allow shutdown if any device is open. 1104 */ 1105 static int 1106 aac_shutdown(struct aac_softstate *softs) 1107 { 1108 ddi_acc_handle_t acc; 1109 struct aac_close_command *cc; 1110 int rval; 1111 1112 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac); 1113 acc = softs->sync_ac.slotp->fib_acc_handle; 1114 1115 cc = (struct aac_close_command *)&softs->sync_ac.slotp->fibp->data[0]; 1116 1117 ddi_put32(acc, &cc->Command, VM_CloseAll); 1118 ddi_put32(acc, &cc->ContainerId, 0xfffffffful); 1119 1120 /* Flush all caches, set FW to write through mode */ 1121 rval = aac_sync_fib(softs, ContainerCommand, 1122 AAC_FIB_SIZEOF(struct aac_close_command)); 1123 aac_sync_fib_slot_release(softs, &softs->sync_ac); 1124 1125 AACDB_PRINT(softs, CE_NOTE, 1126 "shutting down aac %s", (rval == AACOK) ? "ok" : "fail"); 1127 return (rval); 1128 } 1129 1130 static uint_t 1131 aac_softintr(caddr_t arg) 1132 { 1133 struct aac_softstate *softs = (void *)arg; 1134 1135 if (!AAC_IS_Q_EMPTY(&softs->q_comp)) { 1136 aac_drain_comp_q(softs); 1137 } 1138 return (DDI_INTR_CLAIMED); 1139 } 1140 1141 /* 1142 * Setup auto sense data for pkt 1143 */ 1144 static void 1145 aac_set_arq_data(struct scsi_pkt *pkt, uchar_t key, 1146 uchar_t add_code, uchar_t qual_code, uint64_t info) 1147 { 1148 struct scsi_arq_status *arqstat = (void *)(pkt->pkt_scbp); 1149 1150 *pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */ 1151 pkt->pkt_state |= STATE_ARQ_DONE; 1152 1153 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD; 1154 arqstat->sts_rqpkt_reason = CMD_CMPLT; 1155 arqstat->sts_rqpkt_resid = 0; 1156 arqstat->sts_rqpkt_state = 1157 STATE_GOT_BUS | 1158 STATE_GOT_TARGET | 1159 STATE_SENT_CMD | 1160 STATE_XFERRED_DATA; 1161 arqstat->sts_rqpkt_statistics = 0; 1162 1163 if (info <= 0xfffffffful) { 1164 arqstat->sts_sensedata.es_valid = 1; 1165 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE; 1166 arqstat->sts_sensedata.es_code = CODE_FMT_FIXED_CURRENT; 1167 arqstat->sts_sensedata.es_key = key; 1168 arqstat->sts_sensedata.es_add_code = add_code; 1169 arqstat->sts_sensedata.es_qual_code = qual_code; 1170 1171 arqstat->sts_sensedata.es_info_1 = (info >> 24) & 0xFF; 1172 arqstat->sts_sensedata.es_info_2 = (info >> 16) & 0xFF; 1173 arqstat->sts_sensedata.es_info_3 = (info >> 8) & 0xFF; 1174 arqstat->sts_sensedata.es_info_4 = info & 0xFF; 1175 } else { /* 64-bit LBA */ 1176 struct scsi_descr_sense_hdr *dsp; 1177 struct scsi_information_sense_descr *isd; 1178 1179 dsp = (struct scsi_descr_sense_hdr *)&arqstat->sts_sensedata; 1180 dsp->ds_class = CLASS_EXTENDED_SENSE; 1181 dsp->ds_code = CODE_FMT_DESCR_CURRENT; 1182 dsp->ds_key = key; 1183 dsp->ds_add_code = add_code; 1184 dsp->ds_qual_code = qual_code; 1185 dsp->ds_addl_sense_length = 1186 sizeof (struct scsi_information_sense_descr); 1187 1188 isd = (struct scsi_information_sense_descr *)(dsp+1); 1189 isd->isd_descr_type = DESCR_INFORMATION; 1190 isd->isd_valid = 1; 1191 isd->isd_information[0] = (info >> 56) & 0xFF; 1192 isd->isd_information[1] = (info >> 48) & 0xFF; 1193 isd->isd_information[2] = (info >> 40) & 0xFF; 1194 isd->isd_information[3] = (info >> 32) & 0xFF; 1195 isd->isd_information[4] = (info >> 24) & 0xFF; 1196 isd->isd_information[5] = (info >> 16) & 0xFF; 1197 isd->isd_information[6] = (info >> 8) & 0xFF; 1198 isd->isd_information[7] = (info) & 0xFF; 1199 } 1200 } 1201 1202 /* 1203 * Setup auto sense data for HARDWARE ERROR 1204 */ 1205 static void 1206 aac_set_arq_data_hwerr(struct aac_cmd *acp) 1207 { 1208 union scsi_cdb *cdbp; 1209 uint64_t err_blkno; 1210 1211 cdbp = (void *)acp->pkt->pkt_cdbp; 1212 err_blkno = AAC_GETGXADDR(acp->cmdlen, cdbp); 1213 aac_set_arq_data(acp->pkt, KEY_HARDWARE_ERROR, 0x00, 0x00, err_blkno); 1214 } 1215 1216 /* 1217 * Send a command to the adapter in New Comm. interface 1218 */ 1219 static int 1220 aac_send_command(struct aac_softstate *softs, struct aac_slot *slotp) 1221 { 1222 uint32_t index, device; 1223 1224 index = PCI_MEM_GET32(softs, AAC_IQUE); 1225 if (index == 0xffffffffUL) { 1226 index = PCI_MEM_GET32(softs, AAC_IQUE); 1227 if (index == 0xffffffffUL) 1228 return (AACERR); 1229 } 1230 1231 device = index; 1232 PCI_MEM_PUT32(softs, device, 1233 (uint32_t)(slotp->fib_phyaddr & 0xfffffffful)); 1234 device += 4; 1235 PCI_MEM_PUT32(softs, device, (uint32_t)(slotp->fib_phyaddr >> 32)); 1236 device += 4; 1237 PCI_MEM_PUT32(softs, device, slotp->acp->fib_size); 1238 PCI_MEM_PUT32(softs, AAC_IQUE, index); 1239 return (AACOK); 1240 } 1241 1242 static void 1243 aac_end_io(struct aac_softstate *softs, struct aac_cmd *acp) 1244 { 1245 struct aac_device *dvp = acp->dvp; 1246 int q = AAC_CMDQ(acp); 1247 1248 if (acp->slotp) { /* outstanding cmd */ 1249 if (!(acp->flags & AAC_CMD_IN_SYNC_SLOT)) { 1250 aac_release_slot(softs, acp->slotp); 1251 acp->slotp = NULL; 1252 } 1253 if (dvp) { 1254 dvp->ncmds[q]--; 1255 if (dvp->throttle[q] == AAC_THROTTLE_DRAIN && 1256 dvp->ncmds[q] == 0 && q == AAC_CMDQ_ASYNC) 1257 aac_set_throttle(softs, dvp, q, 1258 softs->total_slots); 1259 /* 1260 * Setup auto sense data for UNIT ATTENTION 1261 * Each lun should generate a unit attention 1262 * condition when reset. 1263 * Phys. drives are treated as logical ones 1264 * during error recovery. 1265 */ 1266 if (dvp->type == AAC_DEV_LD) { 1267 struct aac_container *ctp = 1268 (struct aac_container *)dvp; 1269 if (ctp->reset == 0) 1270 goto noreset; 1271 1272 AACDB_PRINT(softs, CE_NOTE, 1273 "Unit attention: reset"); 1274 ctp->reset = 0; 1275 aac_set_arq_data(acp->pkt, KEY_UNIT_ATTENTION, 1276 0x29, 0x02, 0); 1277 } 1278 } 1279 noreset: 1280 softs->bus_ncmds[q]--; 1281 aac_cmd_delete(&softs->q_busy, acp); 1282 } else { /* cmd in waiting queue */ 1283 aac_cmd_delete(&softs->q_wait[q], acp); 1284 } 1285 1286 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) { /* async IO */ 1287 mutex_enter(&softs->q_comp_mutex); 1288 aac_cmd_enqueue(&softs->q_comp, acp); 1289 mutex_exit(&softs->q_comp_mutex); 1290 } else if (acp->flags & AAC_CMD_NO_CB) { /* sync IO */ 1291 cv_broadcast(&softs->event); 1292 } 1293 } 1294 1295 static void 1296 aac_handle_io(struct aac_softstate *softs, int index) 1297 { 1298 struct aac_slot *slotp; 1299 struct aac_cmd *acp; 1300 uint32_t fast; 1301 1302 fast = index & AAC_SENDERADDR_MASK_FAST_RESPONSE; 1303 index >>= 2; 1304 1305 /* Make sure firmware reported index is valid */ 1306 ASSERT(index >= 0 && index < softs->total_slots); 1307 slotp = &softs->io_slot[index]; 1308 ASSERT(slotp->index == index); 1309 acp = slotp->acp; 1310 1311 if (acp == NULL || acp->slotp != slotp) { 1312 cmn_err(CE_WARN, 1313 "Firmware error: invalid slot index received from FW"); 1314 return; 1315 } 1316 1317 acp->flags |= AAC_CMD_CMPLT; 1318 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU); 1319 1320 if (aac_check_dma_handle(slotp->fib_dma_handle) == DDI_SUCCESS) { 1321 /* 1322 * For fast response IO, the firmware do not return any FIB 1323 * data, so we need to fill in the FIB status and state so that 1324 * FIB users can handle it correctly. 1325 */ 1326 if (fast) { 1327 uint32_t state; 1328 1329 state = ddi_get32(slotp->fib_acc_handle, 1330 &slotp->fibp->Header.XferState); 1331 /* 1332 * Update state for CPU not for device, no DMA sync 1333 * needed 1334 */ 1335 ddi_put32(slotp->fib_acc_handle, 1336 &slotp->fibp->Header.XferState, 1337 state | AAC_FIBSTATE_DONEADAP); 1338 ddi_put32(slotp->fib_acc_handle, 1339 (void *)&slotp->fibp->data[0], ST_OK); 1340 } 1341 1342 /* Handle completed ac */ 1343 acp->ac_comp(softs, acp); 1344 } else { 1345 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1346 acp->flags |= AAC_CMD_ERR; 1347 if (acp->pkt) { 1348 acp->pkt->pkt_reason = CMD_TRAN_ERR; 1349 acp->pkt->pkt_statistics = 0; 1350 } 1351 } 1352 aac_end_io(softs, acp); 1353 } 1354 1355 /* 1356 * Interrupt handler for New Comm. interface 1357 * New Comm. interface use a different mechanism for interrupt. No explict 1358 * message queues, and driver need only accesses the mapped PCI mem space to 1359 * find the completed FIB or AIF. 1360 */ 1361 static int 1362 aac_process_intr_new(struct aac_softstate *softs) 1363 { 1364 uint32_t index; 1365 1366 index = AAC_OUTB_GET(softs); 1367 if (index == 0xfffffffful) 1368 index = AAC_OUTB_GET(softs); 1369 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 1370 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1371 return (0); 1372 } 1373 if (index != 0xfffffffful) { 1374 do { 1375 if ((index & AAC_SENDERADDR_MASK_AIF) == 0) { 1376 aac_handle_io(softs, index); 1377 } else if (index != 0xfffffffeul) { 1378 struct aac_fib *fibp; /* FIB in AIF queue */ 1379 uint16_t fib_size; 1380 1381 /* 1382 * 0xfffffffe means that the controller wants 1383 * more work, ignore it for now. Otherwise, 1384 * AIF received. 1385 */ 1386 index &= ~2; 1387 1388 fibp = (struct aac_fib *)(softs-> \ 1389 pci_mem_base_vaddr + index); 1390 fib_size = PCI_MEM_GET16(softs, index + \ 1391 offsetof(struct aac_fib, Header.Size)); 1392 1393 aac_save_aif(softs, softs->pci_mem_handle, 1394 fibp, fib_size); 1395 1396 /* 1397 * AIF memory is owned by the adapter, so let it 1398 * know that we are done with it. 1399 */ 1400 AAC_OUTB_SET(softs, index); 1401 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY); 1402 } 1403 1404 index = AAC_OUTB_GET(softs); 1405 } while (index != 0xfffffffful); 1406 1407 /* 1408 * Process waiting cmds before start new ones to 1409 * ensure first IOs are serviced first. 1410 */ 1411 aac_start_waiting_io(softs); 1412 return (AAC_DB_COMMAND_READY); 1413 } else { 1414 return (0); 1415 } 1416 } 1417 1418 static uint_t 1419 aac_intr_new(caddr_t arg, caddr_t arg1 __unused) 1420 { 1421 struct aac_softstate *softs = (void *)arg; 1422 uint_t rval; 1423 1424 mutex_enter(&softs->io_lock); 1425 if (aac_process_intr_new(softs)) 1426 rval = DDI_INTR_CLAIMED; 1427 else 1428 rval = DDI_INTR_UNCLAIMED; 1429 mutex_exit(&softs->io_lock); 1430 1431 aac_drain_comp_q(softs); 1432 return (rval); 1433 } 1434 1435 /* 1436 * Interrupt handler for old interface 1437 * Explicit message queues are used to send FIB to and get completed FIB from 1438 * the adapter. Driver and adapter maitain the queues in the producer/consumer 1439 * manner. The driver has to query the queues to find the completed FIB. 1440 */ 1441 static int 1442 aac_process_intr_old(struct aac_softstate *softs) 1443 { 1444 uint16_t status; 1445 1446 status = AAC_STATUS_GET(softs); 1447 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 1448 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1449 return (DDI_INTR_UNCLAIMED); 1450 } 1451 if (status & AAC_DB_RESPONSE_READY) { 1452 int slot_idx; 1453 1454 /* ACK the intr */ 1455 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY); 1456 (void) AAC_STATUS_GET(softs); 1457 while (aac_fib_dequeue(softs, AAC_HOST_NORM_RESP_Q, 1458 &slot_idx) == AACOK) 1459 aac_handle_io(softs, slot_idx); 1460 1461 /* 1462 * Process waiting cmds before start new ones to 1463 * ensure first IOs are serviced first. 1464 */ 1465 aac_start_waiting_io(softs); 1466 return (AAC_DB_RESPONSE_READY); 1467 } else if (status & AAC_DB_COMMAND_READY) { 1468 int aif_idx; 1469 1470 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_READY); 1471 (void) AAC_STATUS_GET(softs); 1472 if (aac_fib_dequeue(softs, AAC_HOST_NORM_CMD_Q, &aif_idx) == 1473 AACOK) { 1474 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 1475 struct aac_fib *fibp; /* FIB in communication space */ 1476 uint16_t fib_size; 1477 uint32_t fib_xfer_state; 1478 uint32_t addr, size; 1479 1480 ASSERT((aif_idx >= 0) && (aif_idx < AAC_ADAPTER_FIBS)); 1481 1482 #define AAC_SYNC_AIF(softs, aif_idx, type) \ 1483 { (void) ddi_dma_sync((softs)->comm_space_dma_handle, \ 1484 offsetof(struct aac_comm_space, \ 1485 adapter_fibs[(aif_idx)]), AAC_FIB_SIZE, \ 1486 (type)); } 1487 1488 /* Copy AIF from adapter to the empty AIF slot */ 1489 AAC_SYNC_AIF(softs, aif_idx, DDI_DMA_SYNC_FORCPU); 1490 fibp = &softs->comm_space->adapter_fibs[aif_idx]; 1491 fib_size = ddi_get16(acc, &fibp->Header.Size); 1492 1493 aac_save_aif(softs, acc, fibp, fib_size); 1494 1495 /* Complete AIF back to adapter with good status */ 1496 fib_xfer_state = LE_32(fibp->Header.XferState); 1497 if (fib_xfer_state & AAC_FIBSTATE_FROMADAP) { 1498 ddi_put32(acc, &fibp->Header.XferState, 1499 fib_xfer_state | AAC_FIBSTATE_DONEHOST); 1500 ddi_put32(acc, (void *)&fibp->data[0], ST_OK); 1501 if (fib_size > AAC_FIB_SIZE) 1502 ddi_put16(acc, &fibp->Header.Size, 1503 AAC_FIB_SIZE); 1504 AAC_SYNC_AIF(softs, aif_idx, 1505 DDI_DMA_SYNC_FORDEV); 1506 } 1507 1508 /* Put the AIF response on the response queue */ 1509 addr = ddi_get32(acc, 1510 &softs->comm_space->adapter_fibs[aif_idx]. \ 1511 Header.SenderFibAddress); 1512 size = (uint32_t)ddi_get16(acc, 1513 &softs->comm_space->adapter_fibs[aif_idx]. \ 1514 Header.Size); 1515 ddi_put32(acc, 1516 &softs->comm_space->adapter_fibs[aif_idx]. \ 1517 Header.ReceiverFibAddress, addr); 1518 if (aac_fib_enqueue(softs, AAC_ADAP_NORM_RESP_Q, 1519 addr, size) == AACERR) 1520 cmn_err(CE_NOTE, "!AIF ack failed"); 1521 } 1522 return (AAC_DB_COMMAND_READY); 1523 } else if (status & AAC_DB_PRINTF_READY) { 1524 /* ACK the intr */ 1525 AAC_STATUS_CLR(softs, AAC_DB_PRINTF_READY); 1526 (void) AAC_STATUS_GET(softs); 1527 (void) ddi_dma_sync(softs->comm_space_dma_handle, 1528 offsetof(struct aac_comm_space, adapter_print_buf), 1529 AAC_ADAPTER_PRINT_BUFSIZE, DDI_DMA_SYNC_FORCPU); 1530 if (aac_check_dma_handle(softs->comm_space_dma_handle) == 1531 DDI_SUCCESS) 1532 cmn_err(CE_NOTE, "MSG From Adapter: %s", 1533 softs->comm_space->adapter_print_buf); 1534 else 1535 ddi_fm_service_impact(softs->devinfo_p, 1536 DDI_SERVICE_UNAFFECTED); 1537 AAC_NOTIFY(softs, AAC_DB_PRINTF_READY); 1538 return (AAC_DB_PRINTF_READY); 1539 } else if (status & AAC_DB_COMMAND_NOT_FULL) { 1540 /* 1541 * Without these two condition statements, the OS could hang 1542 * after a while, especially if there are a lot of AIF's to 1543 * handle, for instance if a drive is pulled from an array 1544 * under heavy load. 1545 */ 1546 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL); 1547 return (AAC_DB_COMMAND_NOT_FULL); 1548 } else if (status & AAC_DB_RESPONSE_NOT_FULL) { 1549 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL); 1550 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_NOT_FULL); 1551 return (AAC_DB_RESPONSE_NOT_FULL); 1552 } else { 1553 return (0); 1554 } 1555 } 1556 1557 static uint_t 1558 aac_intr_old(caddr_t arg, caddr_t arg1 __unused) 1559 { 1560 struct aac_softstate *softs = (void *)arg; 1561 int rval; 1562 1563 mutex_enter(&softs->io_lock); 1564 if (aac_process_intr_old(softs)) 1565 rval = DDI_INTR_CLAIMED; 1566 else 1567 rval = DDI_INTR_UNCLAIMED; 1568 mutex_exit(&softs->io_lock); 1569 1570 aac_drain_comp_q(softs); 1571 return (rval); 1572 } 1573 1574 /* 1575 * Query FIXED or MSI interrupts 1576 */ 1577 static int 1578 aac_query_intrs(struct aac_softstate *softs, int intr_type) 1579 { 1580 dev_info_t *dip = softs->devinfo_p; 1581 int avail, actual, count; 1582 int i, flag, ret; 1583 1584 AACDB_PRINT(softs, CE_NOTE, 1585 "aac_query_intrs:interrupt type 0x%x", intr_type); 1586 1587 /* Get number of interrupts */ 1588 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 1589 if ((ret != DDI_SUCCESS) || (count == 0)) { 1590 AACDB_PRINT(softs, CE_WARN, 1591 "ddi_intr_get_nintrs() failed, ret %d count %d", 1592 ret, count); 1593 return (DDI_FAILURE); 1594 } 1595 1596 /* Get number of available interrupts */ 1597 ret = ddi_intr_get_navail(dip, intr_type, &avail); 1598 if ((ret != DDI_SUCCESS) || (avail == 0)) { 1599 AACDB_PRINT(softs, CE_WARN, 1600 "ddi_intr_get_navail() failed, ret %d avail %d", 1601 ret, avail); 1602 return (DDI_FAILURE); 1603 } 1604 1605 AACDB_PRINT(softs, CE_NOTE, 1606 "ddi_intr_get_nvail returned %d, navail() returned %d", 1607 count, avail); 1608 1609 /* Allocate an array of interrupt handles */ 1610 softs->intr_size = count * sizeof (ddi_intr_handle_t); 1611 softs->htable = kmem_alloc(softs->intr_size, KM_SLEEP); 1612 1613 if (intr_type == DDI_INTR_TYPE_MSI) { 1614 count = 1; /* only one vector needed by now */ 1615 flag = DDI_INTR_ALLOC_STRICT; 1616 } else { /* must be DDI_INTR_TYPE_FIXED */ 1617 flag = DDI_INTR_ALLOC_NORMAL; 1618 } 1619 1620 /* Call ddi_intr_alloc() */ 1621 ret = ddi_intr_alloc(dip, softs->htable, intr_type, 0, 1622 count, &actual, flag); 1623 1624 if ((ret != DDI_SUCCESS) || (actual == 0)) { 1625 AACDB_PRINT(softs, CE_WARN, 1626 "ddi_intr_alloc() failed, ret = %d", ret); 1627 actual = 0; 1628 goto error; 1629 } 1630 1631 if (actual < count) { 1632 AACDB_PRINT(softs, CE_NOTE, 1633 "Requested: %d, Received: %d", count, actual); 1634 goto error; 1635 } 1636 1637 softs->intr_cnt = actual; 1638 1639 /* Get priority for first msi, assume remaining are all the same */ 1640 if ((ret = ddi_intr_get_pri(softs->htable[0], 1641 &softs->intr_pri)) != DDI_SUCCESS) { 1642 AACDB_PRINT(softs, CE_WARN, 1643 "ddi_intr_get_pri() failed, ret = %d", ret); 1644 goto error; 1645 } 1646 1647 /* Test for high level mutex */ 1648 if (softs->intr_pri >= ddi_intr_get_hilevel_pri()) { 1649 AACDB_PRINT(softs, CE_WARN, 1650 "aac_query_intrs: Hi level interrupt not supported"); 1651 goto error; 1652 } 1653 1654 return (DDI_SUCCESS); 1655 1656 error: 1657 /* Free already allocated intr */ 1658 for (i = 0; i < actual; i++) 1659 (void) ddi_intr_free(softs->htable[i]); 1660 1661 kmem_free(softs->htable, softs->intr_size); 1662 return (DDI_FAILURE); 1663 } 1664 1665 1666 /* 1667 * Register FIXED or MSI interrupts, and enable them 1668 */ 1669 static int 1670 aac_add_intrs(struct aac_softstate *softs) 1671 { 1672 int i, ret; 1673 int actual; 1674 ddi_intr_handler_t *aac_intr; 1675 1676 actual = softs->intr_cnt; 1677 aac_intr = ((softs->flags & AAC_FLAGS_NEW_COMM) ? 1678 aac_intr_new : aac_intr_old); 1679 1680 /* Call ddi_intr_add_handler() */ 1681 for (i = 0; i < actual; i++) { 1682 if ((ret = ddi_intr_add_handler(softs->htable[i], 1683 aac_intr, (caddr_t)softs, NULL)) != DDI_SUCCESS) { 1684 cmn_err(CE_WARN, 1685 "ddi_intr_add_handler() failed ret = %d", ret); 1686 1687 /* Free already allocated intr */ 1688 for (i = 0; i < actual; i++) 1689 (void) ddi_intr_free(softs->htable[i]); 1690 1691 kmem_free(softs->htable, softs->intr_size); 1692 return (DDI_FAILURE); 1693 } 1694 } 1695 1696 if ((ret = ddi_intr_get_cap(softs->htable[0], &softs->intr_cap)) 1697 != DDI_SUCCESS) { 1698 cmn_err(CE_WARN, "ddi_intr_get_cap() failed, ret = %d", ret); 1699 1700 /* Free already allocated intr */ 1701 for (i = 0; i < actual; i++) 1702 (void) ddi_intr_free(softs->htable[i]); 1703 1704 kmem_free(softs->htable, softs->intr_size); 1705 return (DDI_FAILURE); 1706 } 1707 1708 return (DDI_SUCCESS); 1709 } 1710 1711 /* 1712 * Unregister FIXED or MSI interrupts 1713 */ 1714 static void 1715 aac_remove_intrs(struct aac_softstate *softs) 1716 { 1717 int i; 1718 1719 /* Disable all interrupts */ 1720 (void) aac_disable_intrs(softs); 1721 /* Call ddi_intr_remove_handler() */ 1722 for (i = 0; i < softs->intr_cnt; i++) { 1723 (void) ddi_intr_remove_handler(softs->htable[i]); 1724 (void) ddi_intr_free(softs->htable[i]); 1725 } 1726 1727 kmem_free(softs->htable, softs->intr_size); 1728 } 1729 1730 static int 1731 aac_enable_intrs(struct aac_softstate *softs) 1732 { 1733 int rval = AACOK; 1734 1735 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) { 1736 /* for MSI block enable */ 1737 if (ddi_intr_block_enable(softs->htable, softs->intr_cnt) != 1738 DDI_SUCCESS) 1739 rval = AACERR; 1740 } else { 1741 int i; 1742 1743 /* Call ddi_intr_enable() for legacy/MSI non block enable */ 1744 for (i = 0; i < softs->intr_cnt; i++) { 1745 if (ddi_intr_enable(softs->htable[i]) != DDI_SUCCESS) 1746 rval = AACERR; 1747 } 1748 } 1749 return (rval); 1750 } 1751 1752 static int 1753 aac_disable_intrs(struct aac_softstate *softs) 1754 { 1755 int rval = AACOK; 1756 1757 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) { 1758 /* Call ddi_intr_block_disable() */ 1759 if (ddi_intr_block_disable(softs->htable, softs->intr_cnt) != 1760 DDI_SUCCESS) 1761 rval = AACERR; 1762 } else { 1763 int i; 1764 1765 for (i = 0; i < softs->intr_cnt; i++) { 1766 if (ddi_intr_disable(softs->htable[i]) != DDI_SUCCESS) 1767 rval = AACERR; 1768 } 1769 } 1770 return (rval); 1771 } 1772 1773 /* 1774 * Set pkt_reason and OR in pkt_statistics flag 1775 */ 1776 static void 1777 aac_set_pkt_reason(struct aac_softstate *softs, struct aac_cmd *acp, 1778 uchar_t reason, uint_t stat) 1779 { 1780 #ifndef __lock_lint 1781 _NOTE(ARGUNUSED(softs)) 1782 #endif 1783 if (acp->pkt->pkt_reason == CMD_CMPLT) 1784 acp->pkt->pkt_reason = reason; 1785 acp->pkt->pkt_statistics |= stat; 1786 } 1787 1788 /* 1789 * Handle a finished pkt of soft SCMD 1790 */ 1791 static void 1792 aac_soft_callback(struct aac_softstate *softs, struct aac_cmd *acp) 1793 { 1794 ASSERT(acp->pkt); 1795 1796 acp->flags |= AAC_CMD_CMPLT; 1797 1798 acp->pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | \ 1799 STATE_SENT_CMD | STATE_GOT_STATUS; 1800 if (acp->pkt->pkt_state & STATE_XFERRED_DATA) 1801 acp->pkt->pkt_resid = 0; 1802 1803 /* AAC_CMD_NO_INTR means no complete callback */ 1804 if (!(acp->flags & AAC_CMD_NO_INTR)) { 1805 mutex_enter(&softs->q_comp_mutex); 1806 aac_cmd_enqueue(&softs->q_comp, acp); 1807 mutex_exit(&softs->q_comp_mutex); 1808 ddi_trigger_softintr(softs->softint_id); 1809 } 1810 } 1811 1812 /* 1813 * Handlers for completed IOs, common to aac_intr_new() and aac_intr_old() 1814 */ 1815 1816 /* 1817 * Handle completed logical device IO command 1818 */ 1819 /*ARGSUSED*/ 1820 static void 1821 aac_ld_complete(struct aac_softstate *softs, struct aac_cmd *acp) 1822 { 1823 struct aac_slot *slotp = acp->slotp; 1824 struct aac_blockread_response *resp; 1825 uint32_t status; 1826 1827 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 1828 ASSERT(!(acp->flags & AAC_CMD_NO_CB)); 1829 1830 acp->pkt->pkt_state |= STATE_GOT_STATUS; 1831 1832 /* 1833 * block_read/write has a similar response header, use blockread 1834 * response for both. 1835 */ 1836 resp = (struct aac_blockread_response *)&slotp->fibp->data[0]; 1837 status = ddi_get32(slotp->fib_acc_handle, &resp->Status); 1838 if (status == ST_OK) { 1839 acp->pkt->pkt_resid = 0; 1840 acp->pkt->pkt_state |= STATE_XFERRED_DATA; 1841 } else { 1842 aac_set_arq_data_hwerr(acp); 1843 } 1844 } 1845 1846 /* 1847 * Handle completed phys. device IO command 1848 */ 1849 static void 1850 aac_pd_complete(struct aac_softstate *softs, struct aac_cmd *acp) 1851 { 1852 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 1853 struct aac_fib *fibp = acp->slotp->fibp; 1854 struct scsi_pkt *pkt = acp->pkt; 1855 struct aac_srb_reply *resp; 1856 uint32_t resp_status; 1857 1858 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 1859 ASSERT(!(acp->flags & AAC_CMD_NO_CB)); 1860 1861 resp = (struct aac_srb_reply *)&fibp->data[0]; 1862 resp_status = ddi_get32(acc, &resp->status); 1863 1864 /* First check FIB status */ 1865 if (resp_status == ST_OK) { 1866 uint32_t scsi_status; 1867 uint32_t srb_status; 1868 uint32_t data_xfer_length; 1869 1870 scsi_status = ddi_get32(acc, &resp->scsi_status); 1871 srb_status = ddi_get32(acc, &resp->srb_status); 1872 data_xfer_length = ddi_get32(acc, &resp->data_xfer_length); 1873 1874 *pkt->pkt_scbp = (uint8_t)scsi_status; 1875 pkt->pkt_state |= STATE_GOT_STATUS; 1876 if (scsi_status == STATUS_GOOD) { 1877 uchar_t cmd = ((union scsi_cdb *)(void *) 1878 (pkt->pkt_cdbp))->scc_cmd; 1879 1880 /* Next check SRB status */ 1881 switch (srb_status & 0x3f) { 1882 case SRB_STATUS_DATA_OVERRUN: 1883 AACDB_PRINT(softs, CE_NOTE, "DATA_OVERRUN: " \ 1884 "scmd=%d, xfer=%d, buflen=%d", 1885 (uint32_t)cmd, data_xfer_length, 1886 acp->bcount); 1887 1888 switch (cmd) { 1889 case SCMD_READ: 1890 case SCMD_WRITE: 1891 case SCMD_READ_G1: 1892 case SCMD_WRITE_G1: 1893 case SCMD_READ_G4: 1894 case SCMD_WRITE_G4: 1895 case SCMD_READ_G5: 1896 case SCMD_WRITE_G5: 1897 aac_set_pkt_reason(softs, acp, 1898 CMD_DATA_OVR, 0); 1899 break; 1900 } 1901 /*FALLTHRU*/ 1902 case SRB_STATUS_ERROR_RECOVERY: 1903 case SRB_STATUS_PENDING: 1904 case SRB_STATUS_SUCCESS: 1905 /* 1906 * pkt_resid should only be calculated if the 1907 * status is ERROR_RECOVERY/PENDING/SUCCESS/ 1908 * OVERRUN/UNDERRUN 1909 */ 1910 if (data_xfer_length) { 1911 pkt->pkt_state |= STATE_XFERRED_DATA; 1912 pkt->pkt_resid = acp->bcount - \ 1913 data_xfer_length; 1914 ASSERT(pkt->pkt_resid >= 0); 1915 } 1916 break; 1917 case SRB_STATUS_ABORTED: 1918 AACDB_PRINT(softs, CE_NOTE, 1919 "SRB_STATUS_ABORTED, xfer=%d, resid=%d", 1920 data_xfer_length, pkt->pkt_resid); 1921 aac_set_pkt_reason(softs, acp, CMD_ABORTED, 1922 STAT_ABORTED); 1923 break; 1924 case SRB_STATUS_ABORT_FAILED: 1925 AACDB_PRINT(softs, CE_NOTE, 1926 "SRB_STATUS_ABORT_FAILED, xfer=%d, " \ 1927 "resid=%d", data_xfer_length, 1928 pkt->pkt_resid); 1929 aac_set_pkt_reason(softs, acp, CMD_ABORT_FAIL, 1930 0); 1931 break; 1932 case SRB_STATUS_PARITY_ERROR: 1933 AACDB_PRINT(softs, CE_NOTE, 1934 "SRB_STATUS_PARITY_ERROR, xfer=%d, " \ 1935 "resid=%d", data_xfer_length, 1936 pkt->pkt_resid); 1937 aac_set_pkt_reason(softs, acp, CMD_PER_FAIL, 0); 1938 break; 1939 case SRB_STATUS_NO_DEVICE: 1940 case SRB_STATUS_INVALID_PATH_ID: 1941 case SRB_STATUS_INVALID_TARGET_ID: 1942 case SRB_STATUS_INVALID_LUN: 1943 case SRB_STATUS_SELECTION_TIMEOUT: 1944 #ifdef DEBUG 1945 if (AAC_DEV_IS_VALID(acp->dvp)) { 1946 AACDB_PRINT(softs, CE_NOTE, 1947 "SRB_STATUS_NO_DEVICE(%d), " \ 1948 "xfer=%d, resid=%d ", 1949 srb_status & 0x3f, 1950 data_xfer_length, pkt->pkt_resid); 1951 } 1952 #endif 1953 aac_set_pkt_reason(softs, acp, CMD_DEV_GONE, 0); 1954 break; 1955 case SRB_STATUS_COMMAND_TIMEOUT: 1956 case SRB_STATUS_TIMEOUT: 1957 AACDB_PRINT(softs, CE_NOTE, 1958 "SRB_STATUS_COMMAND_TIMEOUT, xfer=%d, " \ 1959 "resid=%d", data_xfer_length, 1960 pkt->pkt_resid); 1961 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT, 1962 STAT_TIMEOUT); 1963 break; 1964 case SRB_STATUS_BUS_RESET: 1965 AACDB_PRINT(softs, CE_NOTE, 1966 "SRB_STATUS_BUS_RESET, xfer=%d, " \ 1967 "resid=%d", data_xfer_length, 1968 pkt->pkt_resid); 1969 aac_set_pkt_reason(softs, acp, CMD_RESET, 1970 STAT_BUS_RESET); 1971 break; 1972 default: 1973 AACDB_PRINT(softs, CE_NOTE, "srb_status=%d, " \ 1974 "xfer=%d, resid=%d", srb_status & 0x3f, 1975 data_xfer_length, pkt->pkt_resid); 1976 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0); 1977 break; 1978 } 1979 } else if (scsi_status == STATUS_CHECK) { 1980 /* CHECK CONDITION */ 1981 struct scsi_arq_status *arqstat = 1982 (void *)(pkt->pkt_scbp); 1983 uint32_t sense_data_size; 1984 1985 pkt->pkt_state |= STATE_ARQ_DONE; 1986 1987 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD; 1988 arqstat->sts_rqpkt_reason = CMD_CMPLT; 1989 arqstat->sts_rqpkt_resid = 0; 1990 arqstat->sts_rqpkt_state = 1991 STATE_GOT_BUS | 1992 STATE_GOT_TARGET | 1993 STATE_SENT_CMD | 1994 STATE_XFERRED_DATA; 1995 arqstat->sts_rqpkt_statistics = 0; 1996 1997 sense_data_size = ddi_get32(acc, 1998 &resp->sense_data_size); 1999 ASSERT(sense_data_size <= AAC_SENSE_BUFFERSIZE); 2000 AACDB_PRINT(softs, CE_NOTE, 2001 "CHECK CONDITION: sense len=%d, xfer len=%d", 2002 sense_data_size, data_xfer_length); 2003 2004 if (sense_data_size > SENSE_LENGTH) 2005 sense_data_size = SENSE_LENGTH; 2006 ddi_rep_get8(acc, (uint8_t *)&arqstat->sts_sensedata, 2007 (uint8_t *)resp->sense_data, sense_data_size, 2008 DDI_DEV_AUTOINCR); 2009 } else { 2010 AACDB_PRINT(softs, CE_WARN, "invaild scsi status: " \ 2011 "scsi_status=%d, srb_status=%d", 2012 scsi_status, srb_status); 2013 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0); 2014 } 2015 } else { 2016 AACDB_PRINT(softs, CE_NOTE, "SRB failed: fib status %d", 2017 resp_status); 2018 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0); 2019 } 2020 } 2021 2022 /* 2023 * Handle completed IOCTL command 2024 */ 2025 /*ARGSUSED*/ 2026 void 2027 aac_ioctl_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2028 { 2029 struct aac_slot *slotp = acp->slotp; 2030 2031 /* 2032 * NOTE: Both aac_ioctl_send_fib() and aac_send_raw_srb() 2033 * may wait on softs->event, so use cv_broadcast() instead 2034 * of cv_signal(). 2035 */ 2036 ASSERT(acp->flags & AAC_CMD_SYNC); 2037 ASSERT(acp->flags & AAC_CMD_NO_CB); 2038 2039 /* Get the size of the response FIB from its FIB.Header.Size field */ 2040 acp->fib_size = ddi_get16(slotp->fib_acc_handle, 2041 &slotp->fibp->Header.Size); 2042 2043 ASSERT(acp->fib_size <= softs->aac_max_fib_size); 2044 ddi_rep_get8(slotp->fib_acc_handle, (uint8_t *)acp->fibp, 2045 (uint8_t *)slotp->fibp, acp->fib_size, DDI_DEV_AUTOINCR); 2046 } 2047 2048 /* 2049 * Handle completed sync fib command 2050 */ 2051 /*ARGSUSED*/ 2052 void 2053 aac_sync_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2054 { 2055 } 2056 2057 /* 2058 * Handle completed Flush command 2059 */ 2060 /*ARGSUSED*/ 2061 static void 2062 aac_synccache_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2063 { 2064 struct aac_slot *slotp = acp->slotp; 2065 ddi_acc_handle_t acc = slotp->fib_acc_handle; 2066 struct aac_synchronize_reply *resp; 2067 uint32_t status; 2068 2069 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 2070 2071 acp->pkt->pkt_state |= STATE_GOT_STATUS; 2072 2073 resp = (struct aac_synchronize_reply *)&slotp->fibp->data[0]; 2074 status = ddi_get32(acc, &resp->Status); 2075 if (status != CT_OK) 2076 aac_set_arq_data_hwerr(acp); 2077 } 2078 2079 /*ARGSUSED*/ 2080 static void 2081 aac_startstop_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2082 { 2083 struct aac_slot *slotp = acp->slotp; 2084 ddi_acc_handle_t acc = slotp->fib_acc_handle; 2085 struct aac_Container_resp *resp; 2086 uint32_t status; 2087 2088 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 2089 2090 acp->pkt->pkt_state |= STATE_GOT_STATUS; 2091 2092 resp = (struct aac_Container_resp *)&slotp->fibp->data[0]; 2093 status = ddi_get32(acc, &resp->Status); 2094 if (status != 0) { 2095 AACDB_PRINT(softs, CE_WARN, "Cannot start/stop a unit"); 2096 aac_set_arq_data_hwerr(acp); 2097 } 2098 } 2099 2100 /* 2101 * Access PCI space to see if the driver can support the card 2102 */ 2103 static int 2104 aac_check_card_type(struct aac_softstate *softs) 2105 { 2106 ddi_acc_handle_t pci_config_handle; 2107 int card_index; 2108 uint32_t pci_cmd; 2109 2110 /* Map pci configuration space */ 2111 if ((pci_config_setup(softs->devinfo_p, &pci_config_handle)) != 2112 DDI_SUCCESS) { 2113 AACDB_PRINT(softs, CE_WARN, "Cannot setup pci config space"); 2114 return (AACERR); 2115 } 2116 2117 softs->vendid = pci_config_get16(pci_config_handle, PCI_CONF_VENID); 2118 softs->devid = pci_config_get16(pci_config_handle, PCI_CONF_DEVID); 2119 softs->subvendid = pci_config_get16(pci_config_handle, 2120 PCI_CONF_SUBVENID); 2121 softs->subsysid = pci_config_get16(pci_config_handle, 2122 PCI_CONF_SUBSYSID); 2123 2124 card_index = 0; 2125 while (!CARD_IS_UNKNOWN(card_index)) { 2126 if ((aac_cards[card_index].vendor == softs->vendid) && 2127 (aac_cards[card_index].device == softs->devid) && 2128 (aac_cards[card_index].subvendor == softs->subvendid) && 2129 (aac_cards[card_index].subsys == softs->subsysid)) { 2130 break; 2131 } 2132 card_index++; 2133 } 2134 2135 softs->card = card_index; 2136 softs->hwif = aac_cards[card_index].hwif; 2137 2138 /* 2139 * Unknown aac card 2140 * do a generic match based on the VendorID and DeviceID to 2141 * support the new cards in the aac family 2142 */ 2143 if (CARD_IS_UNKNOWN(card_index)) { 2144 if (softs->vendid != 0x9005) { 2145 AACDB_PRINT(softs, CE_WARN, 2146 "Unknown vendor 0x%x", softs->vendid); 2147 goto error; 2148 } 2149 switch (softs->devid) { 2150 case 0x285: 2151 softs->hwif = AAC_HWIF_I960RX; 2152 break; 2153 case 0x286: 2154 softs->hwif = AAC_HWIF_RKT; 2155 break; 2156 default: 2157 AACDB_PRINT(softs, CE_WARN, 2158 "Unknown device \"pci9005,%x\"", softs->devid); 2159 goto error; 2160 } 2161 } 2162 2163 /* Set hardware dependent interface */ 2164 switch (softs->hwif) { 2165 case AAC_HWIF_I960RX: 2166 softs->aac_if = aac_rx_interface; 2167 softs->map_size_min = AAC_MAP_SIZE_MIN_RX; 2168 break; 2169 case AAC_HWIF_RKT: 2170 softs->aac_if = aac_rkt_interface; 2171 softs->map_size_min = AAC_MAP_SIZE_MIN_RKT; 2172 break; 2173 default: 2174 AACDB_PRINT(softs, CE_WARN, 2175 "Unknown hardware interface %d", softs->hwif); 2176 goto error; 2177 } 2178 2179 /* Set card names */ 2180 (void *)strncpy(softs->vendor_name, aac_cards[card_index].vid, 2181 AAC_VENDOR_LEN); 2182 (void *)strncpy(softs->product_name, aac_cards[card_index].desc, 2183 AAC_PRODUCT_LEN); 2184 2185 /* Set up quirks */ 2186 softs->flags = aac_cards[card_index].quirks; 2187 2188 /* Force the busmaster enable bit on */ 2189 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM); 2190 if ((pci_cmd & PCI_COMM_ME) == 0) { 2191 pci_cmd |= PCI_COMM_ME; 2192 pci_config_put16(pci_config_handle, PCI_CONF_COMM, pci_cmd); 2193 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM); 2194 if ((pci_cmd & PCI_COMM_ME) == 0) { 2195 cmn_err(CE_CONT, "?Cannot enable busmaster bit"); 2196 goto error; 2197 } 2198 } 2199 2200 /* Set memory base to map */ 2201 softs->pci_mem_base_paddr = 0xfffffff0UL & \ 2202 pci_config_get32(pci_config_handle, PCI_CONF_BASE0); 2203 2204 pci_config_teardown(&pci_config_handle); 2205 2206 return (AACOK); /* card type detected */ 2207 error: 2208 pci_config_teardown(&pci_config_handle); 2209 return (AACERR); /* no matched card found */ 2210 } 2211 2212 /* 2213 * Do the usual interrupt handler setup stuff. 2214 */ 2215 static int 2216 aac_register_intrs(struct aac_softstate *softs) 2217 { 2218 dev_info_t *dip; 2219 int intr_types; 2220 2221 ASSERT(softs->devinfo_p); 2222 dip = softs->devinfo_p; 2223 2224 /* Get the type of device intrrupts */ 2225 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) { 2226 AACDB_PRINT(softs, CE_WARN, 2227 "ddi_intr_get_supported_types() failed"); 2228 return (AACERR); 2229 } 2230 AACDB_PRINT(softs, CE_NOTE, 2231 "ddi_intr_get_supported_types() ret: 0x%x", intr_types); 2232 2233 /* Query interrupt, and alloc/init all needed struct */ 2234 if (intr_types & DDI_INTR_TYPE_MSI) { 2235 if (aac_query_intrs(softs, DDI_INTR_TYPE_MSI) 2236 != DDI_SUCCESS) { 2237 AACDB_PRINT(softs, CE_WARN, 2238 "MSI interrupt query failed"); 2239 return (AACERR); 2240 } 2241 softs->intr_type = DDI_INTR_TYPE_MSI; 2242 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 2243 if (aac_query_intrs(softs, DDI_INTR_TYPE_FIXED) 2244 != DDI_SUCCESS) { 2245 AACDB_PRINT(softs, CE_WARN, 2246 "FIXED interrupt query failed"); 2247 return (AACERR); 2248 } 2249 softs->intr_type = DDI_INTR_TYPE_FIXED; 2250 } else { 2251 AACDB_PRINT(softs, CE_WARN, 2252 "Device cannot suppport both FIXED and MSI interrupts"); 2253 return (AACERR); 2254 } 2255 2256 /* Connect interrupt handlers */ 2257 if (aac_add_intrs(softs) != DDI_SUCCESS) { 2258 AACDB_PRINT(softs, CE_WARN, 2259 "Interrupt registration failed, intr type: %s", 2260 softs->intr_type == DDI_INTR_TYPE_MSI ? "MSI" : "FIXED"); 2261 return (AACERR); 2262 } 2263 (void) aac_enable_intrs(softs); 2264 2265 if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &softs->softint_id, 2266 NULL, NULL, aac_softintr, (caddr_t)softs) != DDI_SUCCESS) { 2267 AACDB_PRINT(softs, CE_WARN, 2268 "Can not setup soft interrupt handler!"); 2269 aac_remove_intrs(softs); 2270 return (AACERR); 2271 } 2272 2273 return (AACOK); 2274 } 2275 2276 static void 2277 aac_unregister_intrs(struct aac_softstate *softs) 2278 { 2279 aac_remove_intrs(softs); 2280 ddi_remove_softintr(softs->softint_id); 2281 } 2282 2283 /* 2284 * Check the firmware to determine the features to support and the FIB 2285 * parameters to use. 2286 */ 2287 static int 2288 aac_check_firmware(struct aac_softstate *softs) 2289 { 2290 uint32_t options; 2291 uint32_t atu_size; 2292 ddi_acc_handle_t pci_handle; 2293 uint8_t *data; 2294 uint32_t max_fibs; 2295 uint32_t max_fib_size; 2296 uint32_t sg_tablesize; 2297 uint32_t max_sectors; 2298 uint32_t status; 2299 2300 max_fibs = 0; 2301 max_sectors = 0; 2302 sg_tablesize = 0; 2303 2304 /* Get supported options */ 2305 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETINFO, 0, 0, 0, 0, 2306 &status)) != AACOK) { 2307 if (status != SRB_STATUS_INVALID_REQUEST) { 2308 cmn_err(CE_CONT, 2309 "?Fatal error: request adapter info error"); 2310 return (AACERR); 2311 } 2312 options = 0; 2313 atu_size = 0; 2314 } else { 2315 options = AAC_MAILBOX_GET(softs, 1); 2316 atu_size = AAC_MAILBOX_GET(softs, 2); 2317 } 2318 2319 if (softs->state & AAC_STATE_RESET) { 2320 if ((softs->support_opt == options) && 2321 (softs->atu_size == atu_size)) 2322 return (AACOK); 2323 2324 cmn_err(CE_WARN, 2325 "?Fatal error: firmware changed, system needs reboot"); 2326 return (AACERR); 2327 } 2328 2329 /* 2330 * The following critical settings are initialized only once during 2331 * driver attachment. 2332 */ 2333 softs->support_opt = options; 2334 softs->atu_size = atu_size; 2335 2336 /* Process supported options */ 2337 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && 2338 (softs->flags & AAC_FLAGS_NO4GB) == 0) { 2339 AACDB_PRINT(softs, CE_NOTE, "!Enable FIB map 4GB window"); 2340 softs->flags |= AAC_FLAGS_4GB_WINDOW; 2341 } else { 2342 /* 2343 * Quirk AAC_FLAGS_NO4GB is for FIB address and thus comm space 2344 * only. IO is handled by the DMA engine which does not suffer 2345 * from the ATU window programming workarounds necessary for 2346 * CPU copy operations. 2347 */ 2348 softs->addr_dma_attr.dma_attr_addr_lo = 0x2000ull; 2349 softs->addr_dma_attr.dma_attr_addr_hi = 0x7fffffffull; 2350 } 2351 2352 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0) { 2353 AACDB_PRINT(softs, CE_NOTE, "!Enable SG map 64-bit address"); 2354 softs->buf_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull; 2355 softs->buf_dma_attr.dma_attr_seg = 0xffffffffffffffffull; 2356 softs->flags |= AAC_FLAGS_SG_64BIT; 2357 } 2358 2359 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) { 2360 softs->flags |= AAC_FLAGS_ARRAY_64BIT; 2361 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array size"); 2362 } 2363 2364 if (options & AAC_SUPPORTED_NONDASD) { 2365 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p, 0, 2366 "nondasd-enable", (char **)&data) == DDI_SUCCESS)) { 2367 if (strcmp((char *)data, "yes") == 0) { 2368 AACDB_PRINT(softs, CE_NOTE, 2369 "!Enable Non-DASD access"); 2370 softs->flags |= AAC_FLAGS_NONDASD; 2371 } 2372 ddi_prop_free(data); 2373 } 2374 } 2375 2376 /* Read preferred settings */ 2377 max_fib_size = 0; 2378 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETCOMMPREF, 2379 0, 0, 0, 0, NULL)) == AACOK) { 2380 options = AAC_MAILBOX_GET(softs, 1); 2381 max_fib_size = (options & 0xffff); 2382 max_sectors = (options >> 16) << 1; 2383 options = AAC_MAILBOX_GET(softs, 2); 2384 sg_tablesize = (options >> 16); 2385 options = AAC_MAILBOX_GET(softs, 3); 2386 max_fibs = (options & 0xffff); 2387 } 2388 2389 /* Enable new comm. and rawio at the same time */ 2390 if ((softs->support_opt & AAC_SUPPORTED_NEW_COMM) && 2391 (max_fib_size != 0)) { 2392 /* read out and save PCI MBR */ 2393 if ((atu_size > softs->map_size) && 2394 (ddi_regs_map_setup(softs->devinfo_p, 1, 2395 (caddr_t *)&data, 0, atu_size, &softs->reg_attr, 2396 &pci_handle) == DDI_SUCCESS)) { 2397 ddi_regs_map_free(&softs->pci_mem_handle); 2398 softs->pci_mem_handle = pci_handle; 2399 softs->pci_mem_base_vaddr = data; 2400 softs->map_size = atu_size; 2401 } 2402 if (atu_size == softs->map_size) { 2403 softs->flags |= AAC_FLAGS_NEW_COMM; 2404 AACDB_PRINT(softs, CE_NOTE, 2405 "!Enable New Comm. interface"); 2406 } 2407 } 2408 2409 /* Set FIB parameters */ 2410 if (softs->flags & AAC_FLAGS_NEW_COMM) { 2411 softs->aac_max_fibs = max_fibs; 2412 softs->aac_max_fib_size = max_fib_size; 2413 softs->aac_max_sectors = max_sectors; 2414 softs->aac_sg_tablesize = sg_tablesize; 2415 2416 softs->flags |= AAC_FLAGS_RAW_IO; 2417 AACDB_PRINT(softs, CE_NOTE, "!Enable RawIO"); 2418 } else { 2419 softs->aac_max_fibs = 2420 (softs->flags & AAC_FLAGS_256FIBS) ? 256 : 512; 2421 softs->aac_max_fib_size = AAC_FIB_SIZE; 2422 softs->aac_max_sectors = 128; /* 64K */ 2423 if (softs->flags & AAC_FLAGS_17SG) 2424 softs->aac_sg_tablesize = 17; 2425 else if (softs->flags & AAC_FLAGS_34SG) 2426 softs->aac_sg_tablesize = 34; 2427 else if (softs->flags & AAC_FLAGS_SG_64BIT) 2428 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE - 2429 sizeof (struct aac_blockwrite64) + 2430 sizeof (struct aac_sg_entry64)) / 2431 sizeof (struct aac_sg_entry64); 2432 else 2433 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE - 2434 sizeof (struct aac_blockwrite) + 2435 sizeof (struct aac_sg_entry)) / 2436 sizeof (struct aac_sg_entry); 2437 } 2438 2439 if ((softs->flags & AAC_FLAGS_RAW_IO) && 2440 (softs->flags & AAC_FLAGS_ARRAY_64BIT)) { 2441 softs->flags |= AAC_FLAGS_LBA_64BIT; 2442 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array"); 2443 } 2444 softs->buf_dma_attr.dma_attr_sgllen = softs->aac_sg_tablesize; 2445 softs->buf_dma_attr.dma_attr_maxxfer = softs->aac_max_sectors << 9; 2446 /* 2447 * 64K maximum segment size in scatter gather list is controlled by 2448 * the NEW_COMM bit in the adapter information. If not set, the card 2449 * can only accept a maximum of 64K. It is not recommended to permit 2450 * more than 128KB of total transfer size to the adapters because 2451 * performance is negatively impacted. 2452 * 2453 * For new comm, segment size equals max xfer size. For old comm, 2454 * we use 64K for both. 2455 */ 2456 softs->buf_dma_attr.dma_attr_count_max = 2457 softs->buf_dma_attr.dma_attr_maxxfer - 1; 2458 2459 /* Setup FIB operations */ 2460 if (softs->flags & AAC_FLAGS_RAW_IO) 2461 softs->aac_cmd_fib = aac_cmd_fib_rawio; 2462 else if (softs->flags & AAC_FLAGS_SG_64BIT) 2463 softs->aac_cmd_fib = aac_cmd_fib_brw64; 2464 else 2465 softs->aac_cmd_fib = aac_cmd_fib_brw; 2466 softs->aac_cmd_fib_scsi = (softs->flags & AAC_FLAGS_SG_64BIT) ? \ 2467 aac_cmd_fib_scsi64 : aac_cmd_fib_scsi32; 2468 2469 /* 64-bit LBA needs descriptor format sense data */ 2470 softs->slen = sizeof (struct scsi_arq_status); 2471 if ((softs->flags & AAC_FLAGS_LBA_64BIT) && 2472 softs->slen < AAC_ARQ64_LENGTH) 2473 softs->slen = AAC_ARQ64_LENGTH; 2474 2475 AACDB_PRINT(softs, CE_NOTE, 2476 "!max_fibs %d max_fibsize 0x%x max_sectors %d max_sg %d", 2477 softs->aac_max_fibs, softs->aac_max_fib_size, 2478 softs->aac_max_sectors, softs->aac_sg_tablesize); 2479 2480 return (AACOK); 2481 } 2482 2483 static void 2484 aac_fsa_rev(struct aac_softstate *softs, struct FsaRev *fsarev0, 2485 struct FsaRev *fsarev1) 2486 { 2487 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle; 2488 2489 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.dash); 2490 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.type); 2491 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.minor); 2492 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.major); 2493 AAC_GET_FIELD32(acc, fsarev1, fsarev0, buildNumber); 2494 } 2495 2496 /* 2497 * The following function comes from Adaptec: 2498 * 2499 * Query adapter information and supplement adapter information 2500 */ 2501 static int 2502 aac_get_adapter_info(struct aac_softstate *softs, 2503 struct aac_adapter_info *ainfr, struct aac_supplement_adapter_info *sinfr) 2504 { 2505 struct aac_cmd *acp = &softs->sync_ac; 2506 ddi_acc_handle_t acc; 2507 struct aac_fib *fibp; 2508 struct aac_adapter_info *ainfp; 2509 struct aac_supplement_adapter_info *sinfp; 2510 int rval; 2511 2512 (void) aac_sync_fib_slot_bind(softs, acp); 2513 acc = acp->slotp->fib_acc_handle; 2514 fibp = acp->slotp->fibp; 2515 2516 ddi_put8(acc, &fibp->data[0], 0); 2517 if (aac_sync_fib(softs, RequestAdapterInfo, 2518 AAC_FIB_SIZEOF(struct aac_adapter_info)) != AACOK) { 2519 AACDB_PRINT(softs, CE_WARN, "RequestAdapterInfo failed"); 2520 rval = AACERR; 2521 goto finish; 2522 } 2523 ainfp = (struct aac_adapter_info *)fibp->data; 2524 if (ainfr) { 2525 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions); 2526 AAC_GET_FIELD32(acc, ainfr, ainfp, PlatformBase); 2527 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuArchitecture); 2528 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuVariant); 2529 AAC_GET_FIELD32(acc, ainfr, ainfp, ClockSpeed); 2530 AAC_GET_FIELD32(acc, ainfr, ainfp, ExecutionMem); 2531 AAC_GET_FIELD32(acc, ainfr, ainfp, BufferMem); 2532 AAC_GET_FIELD32(acc, ainfr, ainfp, TotalMem); 2533 aac_fsa_rev(softs, &ainfp->KernelRevision, 2534 &ainfr->KernelRevision); 2535 aac_fsa_rev(softs, &ainfp->MonitorRevision, 2536 &ainfr->MonitorRevision); 2537 aac_fsa_rev(softs, &ainfp->HardwareRevision, 2538 &ainfr->HardwareRevision); 2539 aac_fsa_rev(softs, &ainfp->BIOSRevision, 2540 &ainfr->BIOSRevision); 2541 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusteringEnabled); 2542 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusterChannelMask); 2543 AAC_GET_FIELD64(acc, ainfr, ainfp, SerialNumber); 2544 AAC_GET_FIELD32(acc, ainfr, ainfp, batteryPlatform); 2545 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions); 2546 AAC_GET_FIELD32(acc, ainfr, ainfp, OemVariant); 2547 } 2548 if (sinfr) { 2549 if (!(softs->support_opt & 2550 AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO)) { 2551 AACDB_PRINT(softs, CE_WARN, 2552 "SupplementAdapterInfo not supported"); 2553 rval = AACERR; 2554 goto finish; 2555 } 2556 ddi_put8(acc, &fibp->data[0], 0); 2557 if (aac_sync_fib(softs, RequestSupplementAdapterInfo, 2558 AAC_FIB_SIZEOF(struct aac_supplement_adapter_info)) 2559 != AACOK) { 2560 AACDB_PRINT(softs, CE_WARN, 2561 "RequestSupplementAdapterInfo failed"); 2562 rval = AACERR; 2563 goto finish; 2564 } 2565 sinfp = (struct aac_supplement_adapter_info *)fibp->data; 2566 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, AdapterTypeText[0], 17+1); 2567 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, Pad[0], 2); 2568 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashMemoryByteSize); 2569 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashImageId); 2570 AAC_GET_FIELD32(acc, sinfr, sinfp, MaxNumberPorts); 2571 AAC_GET_FIELD32(acc, sinfr, sinfp, Version); 2572 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits); 2573 AAC_GET_FIELD8(acc, sinfr, sinfp, SlotNumber); 2574 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, ReservedPad0[0], 3); 2575 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, BuildDate[0], 12); 2576 AAC_GET_FIELD32(acc, sinfr, sinfp, CurrentNumberPorts); 2577 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, VpdInfo, 2578 sizeof (struct vpd_info)); 2579 aac_fsa_rev(softs, &sinfp->FlashFirmwareRevision, 2580 &sinfr->FlashFirmwareRevision); 2581 AAC_GET_FIELD32(acc, sinfr, sinfp, RaidTypeMorphOptions); 2582 aac_fsa_rev(softs, &sinfp->FlashFirmwareBootRevision, 2583 &sinfr->FlashFirmwareBootRevision); 2584 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgPcbaSerialNo, 2585 MFG_PCBA_SERIAL_NUMBER_WIDTH); 2586 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgWWNName[0], 2587 MFG_WWN_WIDTH); 2588 AAC_GET_FIELD32(acc, sinfr, sinfp, SupportedOptions2); 2589 AAC_GET_FIELD32(acc, sinfr, sinfp, ExpansionFlag); 2590 if (sinfr->ExpansionFlag == 1) { 2591 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits3); 2592 AAC_GET_FIELD32(acc, sinfr, sinfp, 2593 SupportedPerformanceMode); 2594 AAC_REP_GET_FIELD32(acc, sinfr, sinfp, 2595 ReservedGrowth[0], 80); 2596 } 2597 } 2598 rval = AACOK; 2599 finish: 2600 aac_sync_fib_slot_release(softs, acp); 2601 return (rval); 2602 } 2603 2604 static int 2605 aac_get_bus_info(struct aac_softstate *softs, uint32_t *bus_max, 2606 uint32_t *tgt_max) 2607 { 2608 struct aac_cmd *acp = &softs->sync_ac; 2609 ddi_acc_handle_t acc; 2610 struct aac_fib *fibp; 2611 struct aac_ctcfg *c_cmd; 2612 struct aac_ctcfg_resp *c_resp; 2613 uint32_t scsi_method_id; 2614 struct aac_bus_info *cmd; 2615 struct aac_bus_info_response *resp; 2616 int rval; 2617 2618 (void) aac_sync_fib_slot_bind(softs, acp); 2619 acc = acp->slotp->fib_acc_handle; 2620 fibp = acp->slotp->fibp; 2621 2622 /* Detect MethodId */ 2623 c_cmd = (struct aac_ctcfg *)&fibp->data[0]; 2624 ddi_put32(acc, &c_cmd->Command, VM_ContainerConfig); 2625 ddi_put32(acc, &c_cmd->cmd, CT_GET_SCSI_METHOD); 2626 ddi_put32(acc, &c_cmd->param, 0); 2627 rval = aac_sync_fib(softs, ContainerCommand, 2628 AAC_FIB_SIZEOF(struct aac_ctcfg)); 2629 c_resp = (struct aac_ctcfg_resp *)&fibp->data[0]; 2630 if (rval != AACOK || ddi_get32(acc, &c_resp->Status) != 0) { 2631 AACDB_PRINT(softs, CE_WARN, 2632 "VM_ContainerConfig command fail"); 2633 rval = AACERR; 2634 goto finish; 2635 } 2636 scsi_method_id = ddi_get32(acc, &c_resp->param); 2637 2638 /* Detect phys. bus count and max. target id first */ 2639 cmd = (struct aac_bus_info *)&fibp->data[0]; 2640 ddi_put32(acc, &cmd->Command, VM_Ioctl); 2641 ddi_put32(acc, &cmd->ObjType, FT_DRIVE); /* physical drive */ 2642 ddi_put32(acc, &cmd->MethodId, scsi_method_id); 2643 ddi_put32(acc, &cmd->ObjectId, 0); 2644 ddi_put32(acc, &cmd->CtlCmd, GetBusInfo); 2645 /* 2646 * For VM_Ioctl, the firmware uses the Header.Size filled from the 2647 * driver as the size to be returned. Therefore the driver has to use 2648 * sizeof (struct aac_bus_info_response) because it is greater than 2649 * sizeof (struct aac_bus_info). 2650 */ 2651 rval = aac_sync_fib(softs, ContainerCommand, 2652 AAC_FIB_SIZEOF(struct aac_bus_info_response)); 2653 resp = (struct aac_bus_info_response *)cmd; 2654 2655 /* Scan all coordinates with INQUIRY */ 2656 if ((rval != AACOK) || (ddi_get32(acc, &resp->Status) != 0)) { 2657 AACDB_PRINT(softs, CE_WARN, "GetBusInfo command fail"); 2658 rval = AACERR; 2659 goto finish; 2660 } 2661 *bus_max = ddi_get32(acc, &resp->BusCount); 2662 *tgt_max = ddi_get32(acc, &resp->TargetsPerBus); 2663 2664 finish: 2665 aac_sync_fib_slot_release(softs, acp); 2666 return (AACOK); 2667 } 2668 2669 /* 2670 * The following function comes from Adaptec: 2671 * 2672 * Routine to be called during initialization of communications with 2673 * the adapter to handle possible adapter configuration issues. When 2674 * the adapter first boots up, it examines attached drives, etc, and 2675 * potentially comes up with a new or revised configuration (relative to 2676 * what's stored in it's NVRAM). Additionally it may discover problems 2677 * that make the current physical configuration unworkable (currently 2678 * applicable only to cluster configuration issues). 2679 * 2680 * If there are no configuration issues or the issues are considered 2681 * trival by the adapter, it will set it's configuration status to 2682 * "FSACT_CONTINUE" and execute the "commit confiuguration" action 2683 * automatically on it's own. 2684 * 2685 * However, if there are non-trivial issues, the adapter will set it's 2686 * internal configuration status to "FSACT_PAUSE" or "FASCT_ABORT" 2687 * and wait for some agent on the host to issue the "\ContainerCommand 2688 * \VM_ContainerConfig\CT_COMMIT_CONFIG" FIB command to cause the 2689 * adapter to commit the new/updated configuration and enable 2690 * un-inhibited operation. The host agent should first issue the 2691 * "\ContainerCommand\VM_ContainerConfig\CT_GET_CONFIG_STATUS" FIB 2692 * command to obtain information about config issues detected by 2693 * the adapter. 2694 * 2695 * Normally the adapter's PC BIOS will execute on the host following 2696 * adapter poweron and reset and will be responsible for querring the 2697 * adapter with CT_GET_CONFIG_STATUS and issuing the CT_COMMIT_CONFIG 2698 * command if appropriate. 2699 * 2700 * However, with the introduction of IOP reset support, the adapter may 2701 * boot up without the benefit of the adapter's PC BIOS host agent. 2702 * This routine is intended to take care of these issues in situations 2703 * where BIOS doesn't execute following adapter poweron or reset. The 2704 * CT_COMMIT_CONFIG command is a no-op if it's already been issued, so 2705 * there is no harm in doing this when it's already been done. 2706 */ 2707 static int 2708 aac_handle_adapter_config_issues(struct aac_softstate *softs) 2709 { 2710 struct aac_cmd *acp = &softs->sync_ac; 2711 ddi_acc_handle_t acc; 2712 struct aac_fib *fibp; 2713 struct aac_Container *cmd; 2714 struct aac_Container_resp *resp; 2715 struct aac_cf_status_header *cfg_sts_hdr; 2716 uint32_t resp_status; 2717 uint32_t ct_status; 2718 uint32_t cfg_stat_action; 2719 int rval; 2720 2721 (void) aac_sync_fib_slot_bind(softs, acp); 2722 acc = acp->slotp->fib_acc_handle; 2723 fibp = acp->slotp->fibp; 2724 2725 /* Get adapter config status */ 2726 cmd = (struct aac_Container *)&fibp->data[0]; 2727 2728 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 2729 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 2730 ddi_put32(acc, &cmd->CTCommand.command, CT_GET_CONFIG_STATUS); 2731 ddi_put32(acc, &cmd->CTCommand.param[CNT_SIZE], 2732 sizeof (struct aac_cf_status_header)); 2733 rval = aac_sync_fib(softs, ContainerCommand, 2734 AAC_FIB_SIZEOF(struct aac_Container)); 2735 resp = (struct aac_Container_resp *)cmd; 2736 cfg_sts_hdr = (struct aac_cf_status_header *)resp->CTResponse.data; 2737 2738 resp_status = ddi_get32(acc, &resp->Status); 2739 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]); 2740 if ((rval == AACOK) && (resp_status == 0) && (ct_status == CT_OK)) { 2741 cfg_stat_action = ddi_get32(acc, &cfg_sts_hdr->action); 2742 2743 /* Commit configuration if it's reasonable to do so. */ 2744 if (cfg_stat_action <= CFACT_PAUSE) { 2745 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 2746 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 2747 ddi_put32(acc, &cmd->CTCommand.command, 2748 CT_COMMIT_CONFIG); 2749 rval = aac_sync_fib(softs, ContainerCommand, 2750 AAC_FIB_SIZEOF(struct aac_Container)); 2751 2752 resp_status = ddi_get32(acc, &resp->Status); 2753 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]); 2754 if ((rval == AACOK) && (resp_status == 0) && 2755 (ct_status == CT_OK)) 2756 /* Successful completion */ 2757 rval = AACMPE_OK; 2758 else 2759 /* Auto-commit aborted due to error(s). */ 2760 rval = AACMPE_COMMIT_CONFIG; 2761 } else { 2762 /* 2763 * Auto-commit aborted due to adapter indicating 2764 * configuration issue(s) too dangerous to auto-commit. 2765 */ 2766 rval = AACMPE_CONFIG_STATUS; 2767 } 2768 } else { 2769 cmn_err(CE_WARN, "!Configuration issue, auto-commit aborted"); 2770 rval = AACMPE_CONFIG_STATUS; 2771 } 2772 2773 aac_sync_fib_slot_release(softs, acp); 2774 return (rval); 2775 } 2776 2777 /* 2778 * Hardware initialization and resource allocation 2779 */ 2780 static int 2781 aac_common_attach(struct aac_softstate *softs) 2782 { 2783 uint32_t status; 2784 int i; 2785 struct aac_supplement_adapter_info sinf; 2786 2787 DBCALLED(softs, 1); 2788 2789 /* 2790 * Do a little check here to make sure there aren't any outstanding 2791 * FIBs in the message queue. At this point there should not be and 2792 * if there are they are probably left over from another instance of 2793 * the driver like when the system crashes and the crash dump driver 2794 * gets loaded. 2795 */ 2796 while (AAC_OUTB_GET(softs) != 0xfffffffful) 2797 ; 2798 2799 /* 2800 * Wait the card to complete booting up before do anything that 2801 * attempts to communicate with it. 2802 */ 2803 status = AAC_FWSTATUS_GET(softs); 2804 if (status == AAC_SELF_TEST_FAILED || status == AAC_KERNEL_PANIC) 2805 goto error; 2806 i = AAC_FWUP_TIMEOUT * 1000; /* set timeout */ 2807 AAC_BUSYWAIT(AAC_FWSTATUS_GET(softs) & AAC_KERNEL_UP_AND_RUNNING, i); 2808 if (i == 0) { 2809 cmn_err(CE_CONT, "?Fatal error: controller not ready"); 2810 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2811 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2812 goto error; 2813 } 2814 2815 /* Read and set card supported options and settings */ 2816 if (aac_check_firmware(softs) == AACERR) { 2817 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2818 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2819 goto error; 2820 } 2821 2822 /* Add interrupt handlers */ 2823 if (aac_register_intrs(softs) == AACERR) { 2824 cmn_err(CE_CONT, 2825 "?Fatal error: interrupts register failed"); 2826 goto error; 2827 } 2828 2829 /* Setup communication space with the card */ 2830 if (softs->comm_space_dma_handle == NULL) { 2831 if (aac_alloc_comm_space(softs) != AACOK) 2832 goto error; 2833 } 2834 if (aac_setup_comm_space(softs) != AACOK) { 2835 cmn_err(CE_CONT, "?Setup communication space failed"); 2836 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2837 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2838 goto error; 2839 } 2840 2841 #ifdef DEBUG 2842 if (aac_get_fw_debug_buffer(softs) != AACOK) 2843 cmn_err(CE_CONT, "?firmware UART trace not supported"); 2844 #endif 2845 2846 /* Allocate slots */ 2847 if ((softs->total_slots == 0) && (aac_create_slots(softs) != AACOK)) { 2848 cmn_err(CE_CONT, "?Fatal error: slots allocate failed"); 2849 goto error; 2850 } 2851 AACDB_PRINT(softs, CE_NOTE, "%d slots allocated", softs->total_slots); 2852 2853 /* Allocate FIBs */ 2854 if (softs->total_fibs < softs->total_slots) { 2855 aac_alloc_fibs(softs); 2856 if (softs->total_fibs == 0) 2857 goto error; 2858 AACDB_PRINT(softs, CE_NOTE, "%d fibs allocated", 2859 softs->total_fibs); 2860 } 2861 2862 AAC_STATUS_CLR(softs, ~0); /* Clear out all interrupts */ 2863 AAC_ENABLE_INTR(softs); /* Enable the interrupts we can handle */ 2864 2865 if (aac_get_adapter_info(softs, NULL, &sinf) == AACOK) { 2866 softs->feature_bits = sinf.FeatureBits; 2867 softs->support_opt2 = sinf.SupportedOptions2; 2868 2869 /* Get adapter names */ 2870 if (CARD_IS_UNKNOWN(softs->card)) { 2871 char *p, *p0, *p1; 2872 2873 /* 2874 * Now find the controller name in supp_adapter_info-> 2875 * AdapterTypeText. Use the first word as the vendor 2876 * and the other words as the product name. 2877 */ 2878 AACDB_PRINT(softs, CE_NOTE, "sinf.AdapterTypeText = " 2879 "\"%s\"", sinf.AdapterTypeText); 2880 p = sinf.AdapterTypeText; 2881 p0 = p1 = NULL; 2882 /* Skip heading spaces */ 2883 while (*p && (*p == ' ' || *p == '\t')) 2884 p++; 2885 p0 = p; 2886 while (*p && (*p != ' ' && *p != '\t')) 2887 p++; 2888 /* Remove middle spaces */ 2889 while (*p && (*p == ' ' || *p == '\t')) 2890 *p++ = 0; 2891 p1 = p; 2892 /* Remove trailing spaces */ 2893 p = p1 + strlen(p1) - 1; 2894 while (p > p1 && (*p == ' ' || *p == '\t')) 2895 *p-- = 0; 2896 if (*p0 && *p1) { 2897 (void *)strncpy(softs->vendor_name, p0, 2898 AAC_VENDOR_LEN); 2899 (void *)strncpy(softs->product_name, p1, 2900 AAC_PRODUCT_LEN); 2901 } else { 2902 cmn_err(CE_WARN, 2903 "?adapter name mis-formatted\n"); 2904 if (*p0) 2905 (void *)strncpy(softs->product_name, 2906 p0, AAC_PRODUCT_LEN); 2907 } 2908 } 2909 } else { 2910 cmn_err(CE_CONT, "?Query adapter information failed"); 2911 } 2912 2913 2914 cmn_err(CE_NOTE, 2915 "!aac driver %d.%02d.%02d-%d, found card: " \ 2916 "%s %s(pci0x%x.%x.%x.%x) at 0x%x", 2917 AAC_DRIVER_MAJOR_VERSION, 2918 AAC_DRIVER_MINOR_VERSION, 2919 AAC_DRIVER_BUGFIX_LEVEL, 2920 AAC_DRIVER_BUILD, 2921 softs->vendor_name, softs->product_name, 2922 softs->vendid, softs->devid, softs->subvendid, softs->subsysid, 2923 softs->pci_mem_base_paddr); 2924 2925 /* Perform acceptance of adapter-detected config changes if possible */ 2926 if (aac_handle_adapter_config_issues(softs) != AACMPE_OK) { 2927 cmn_err(CE_CONT, "?Handle adapter config issues failed"); 2928 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2929 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2930 goto error; 2931 } 2932 2933 /* Setup containers (logical devices) */ 2934 if (aac_probe_containers(softs) != AACOK) { 2935 cmn_err(CE_CONT, "?Fatal error: get container info error"); 2936 goto error; 2937 } 2938 2939 /* Check for JBOD support. Default disable */ 2940 char *data; 2941 if (softs->feature_bits & AAC_FEATURE_SUPPORTED_JBOD) { 2942 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p, 2943 0, "jbod-enable", &data) == DDI_SUCCESS)) { 2944 if (strcmp(data, "yes") == 0) { 2945 AACDB_PRINT(softs, CE_NOTE, 2946 "Enable JBOD access"); 2947 softs->flags |= AAC_FLAGS_JBOD; 2948 } 2949 ddi_prop_free(data); 2950 } 2951 } 2952 2953 /* Setup phys. devices */ 2954 if (softs->flags & (AAC_FLAGS_NONDASD | AAC_FLAGS_JBOD)) { 2955 uint32_t bus_max, tgt_max; 2956 uint32_t bus, tgt; 2957 int index; 2958 2959 if (aac_get_bus_info(softs, &bus_max, &tgt_max) != AACOK) { 2960 cmn_err(CE_CONT, "?Fatal error: get bus info error"); 2961 goto error; 2962 } 2963 AACDB_PRINT(softs, CE_NOTE, "bus_max=%d, tgt_max=%d", 2964 bus_max, tgt_max); 2965 if (bus_max != softs->bus_max || tgt_max != softs->tgt_max) { 2966 if (softs->state & AAC_STATE_RESET) { 2967 cmn_err(CE_WARN, 2968 "?Fatal error: bus map changed"); 2969 goto error; 2970 } 2971 softs->bus_max = bus_max; 2972 softs->tgt_max = tgt_max; 2973 if (softs->nondasds) { 2974 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \ 2975 sizeof (struct aac_nondasd)); 2976 } 2977 softs->nondasds = kmem_zalloc(AAC_MAX_PD(softs) * \ 2978 sizeof (struct aac_nondasd), KM_SLEEP); 2979 2980 index = 0; 2981 for (bus = 0; bus < softs->bus_max; bus++) { 2982 for (tgt = 0; tgt < softs->tgt_max; tgt++) { 2983 struct aac_nondasd *dvp = 2984 &softs->nondasds[index++]; 2985 dvp->dev.type = AAC_DEV_PD; 2986 dvp->bus = bus; 2987 dvp->tid = tgt; 2988 } 2989 } 2990 } 2991 } 2992 2993 /* Check dma & acc handles allocated in attach */ 2994 if (aac_check_dma_handle(softs->comm_space_dma_handle) != DDI_SUCCESS) { 2995 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2996 goto error; 2997 } 2998 2999 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 3000 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 3001 goto error; 3002 } 3003 3004 for (i = 0; i < softs->total_slots; i++) { 3005 if (aac_check_dma_handle(softs->io_slot[i].fib_dma_handle) != 3006 DDI_SUCCESS) { 3007 ddi_fm_service_impact(softs->devinfo_p, 3008 DDI_SERVICE_LOST); 3009 goto error; 3010 } 3011 } 3012 3013 return (AACOK); 3014 error: 3015 if (softs->state & AAC_STATE_RESET) 3016 return (AACERR); 3017 if (softs->nondasds) { 3018 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \ 3019 sizeof (struct aac_nondasd)); 3020 softs->nondasds = NULL; 3021 } 3022 if (softs->total_fibs > 0) 3023 aac_destroy_fibs(softs); 3024 if (softs->total_slots > 0) 3025 aac_destroy_slots(softs); 3026 if (softs->comm_space_dma_handle) 3027 aac_free_comm_space(softs); 3028 return (AACERR); 3029 } 3030 3031 /* 3032 * Hardware shutdown and resource release 3033 */ 3034 static void 3035 aac_common_detach(struct aac_softstate *softs) 3036 { 3037 DBCALLED(softs, 1); 3038 3039 aac_unregister_intrs(softs); 3040 3041 mutex_enter(&softs->io_lock); 3042 (void) aac_shutdown(softs); 3043 3044 if (softs->nondasds) { 3045 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \ 3046 sizeof (struct aac_nondasd)); 3047 softs->nondasds = NULL; 3048 } 3049 aac_destroy_fibs(softs); 3050 aac_destroy_slots(softs); 3051 aac_free_comm_space(softs); 3052 mutex_exit(&softs->io_lock); 3053 } 3054 3055 /* 3056 * Send a synchronous command to the controller and wait for a result. 3057 * Indicate if the controller completed the command with an error status. 3058 */ 3059 int 3060 aac_sync_mbcommand(struct aac_softstate *softs, uint32_t cmd, 3061 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3, 3062 uint32_t *statusp) 3063 { 3064 int timeout; 3065 uint32_t status; 3066 3067 if (statusp != NULL) 3068 *statusp = SRB_STATUS_SUCCESS; 3069 3070 /* Fill in mailbox */ 3071 AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3); 3072 3073 /* Ensure the sync command doorbell flag is cleared */ 3074 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND); 3075 3076 /* Then set it to signal the adapter */ 3077 AAC_NOTIFY(softs, AAC_DB_SYNC_COMMAND); 3078 3079 /* Spin waiting for the command to complete */ 3080 timeout = AAC_IMMEDIATE_TIMEOUT * 1000; 3081 AAC_BUSYWAIT(AAC_STATUS_GET(softs) & AAC_DB_SYNC_COMMAND, timeout); 3082 if (!timeout) { 3083 AACDB_PRINT(softs, CE_WARN, 3084 "Sync command timed out after %d seconds (0x%x)!", 3085 AAC_IMMEDIATE_TIMEOUT, AAC_FWSTATUS_GET(softs)); 3086 return (AACERR); 3087 } 3088 3089 /* Clear the completion flag */ 3090 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND); 3091 3092 /* Get the command status */ 3093 status = AAC_MAILBOX_GET(softs, 0); 3094 if (statusp != NULL) 3095 *statusp = status; 3096 if (status != SRB_STATUS_SUCCESS) { 3097 AACDB_PRINT(softs, CE_WARN, 3098 "Sync command fail: status = 0x%x", status); 3099 return (AACERR); 3100 } 3101 3102 return (AACOK); 3103 } 3104 3105 /* 3106 * Send a synchronous FIB to the adapter and wait for its completion 3107 */ 3108 static int 3109 aac_sync_fib(struct aac_softstate *softs, uint16_t cmd, uint16_t fibsize) 3110 { 3111 struct aac_cmd *acp = &softs->sync_ac; 3112 3113 acp->flags = AAC_CMD_SYNC | AAC_CMD_IN_SYNC_SLOT; 3114 if (softs->state & AAC_STATE_INTR) 3115 acp->flags |= AAC_CMD_NO_CB; 3116 else 3117 acp->flags |= AAC_CMD_NO_INTR; 3118 3119 acp->ac_comp = aac_sync_complete; 3120 acp->timeout = AAC_SYNC_TIMEOUT; 3121 acp->fib_size = fibsize; 3122 3123 /* 3124 * Only need to setup sync fib header, caller should have init 3125 * fib data 3126 */ 3127 aac_cmd_fib_header(softs, acp, cmd); 3128 3129 (void) ddi_dma_sync(acp->slotp->fib_dma_handle, 0, fibsize, 3130 DDI_DMA_SYNC_FORDEV); 3131 3132 aac_start_io(softs, acp); 3133 3134 if (softs->state & AAC_STATE_INTR) 3135 return (aac_do_sync_io(softs, acp)); 3136 else 3137 return (aac_do_poll_io(softs, acp)); 3138 } 3139 3140 static void 3141 aac_cmd_initq(struct aac_cmd_queue *q) 3142 { 3143 q->q_head = NULL; 3144 q->q_tail = (struct aac_cmd *)&q->q_head; 3145 } 3146 3147 /* 3148 * Remove a cmd from the head of q 3149 */ 3150 static struct aac_cmd * 3151 aac_cmd_dequeue(struct aac_cmd_queue *q) 3152 { 3153 struct aac_cmd *acp; 3154 3155 _NOTE(ASSUMING_PROTECTED(*q)) 3156 3157 if ((acp = q->q_head) != NULL) { 3158 if ((q->q_head = acp->next) != NULL) 3159 acp->next = NULL; 3160 else 3161 q->q_tail = (struct aac_cmd *)&q->q_head; 3162 acp->prev = NULL; 3163 } 3164 return (acp); 3165 } 3166 3167 /* 3168 * Add a cmd to the tail of q 3169 */ 3170 static void 3171 aac_cmd_enqueue(struct aac_cmd_queue *q, struct aac_cmd *acp) 3172 { 3173 ASSERT(acp->next == NULL); 3174 acp->prev = q->q_tail; 3175 q->q_tail->next = acp; 3176 q->q_tail = acp; 3177 } 3178 3179 /* 3180 * Remove the cmd ac from q 3181 */ 3182 static void 3183 aac_cmd_delete(struct aac_cmd_queue *q, struct aac_cmd *acp) 3184 { 3185 if (acp->prev) { 3186 if ((acp->prev->next = acp->next) != NULL) { 3187 acp->next->prev = acp->prev; 3188 acp->next = NULL; 3189 } else { 3190 q->q_tail = acp->prev; 3191 } 3192 acp->prev = NULL; 3193 } 3194 /* ac is not in the queue */ 3195 } 3196 3197 /* 3198 * Atomically insert an entry into the nominated queue, returns 0 on success or 3199 * AACERR if the queue is full. 3200 * 3201 * Note: it would be more efficient to defer notifying the controller in 3202 * the case where we may be inserting several entries in rapid succession, 3203 * but implementing this usefully may be difficult (it would involve a 3204 * separate queue/notify interface). 3205 */ 3206 static int 3207 aac_fib_enqueue(struct aac_softstate *softs, int queue, uint32_t fib_addr, 3208 uint32_t fib_size) 3209 { 3210 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3211 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3212 uint32_t pi, ci; 3213 3214 DBCALLED(softs, 2); 3215 3216 ASSERT(queue == AAC_ADAP_NORM_CMD_Q || queue == AAC_ADAP_NORM_RESP_Q); 3217 3218 /* Get the producer/consumer indices */ 3219 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \ 3220 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2, 3221 DDI_DMA_SYNC_FORCPU); 3222 if (aac_check_dma_handle(dma) != DDI_SUCCESS) { 3223 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 3224 return (AACERR); 3225 } 3226 3227 pi = ddi_get32(acc, 3228 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]); 3229 ci = ddi_get32(acc, 3230 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]); 3231 3232 /* 3233 * Wrap the queue first before we check the queue to see 3234 * if it is full 3235 */ 3236 if (pi >= aac_qinfo[queue].size) 3237 pi = 0; 3238 3239 /* XXX queue full */ 3240 if ((pi + 1) == ci) 3241 return (AACERR); 3242 3243 /* Fill in queue entry */ 3244 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_size), fib_size); 3245 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_addr), fib_addr); 3246 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \ 3247 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry), 3248 DDI_DMA_SYNC_FORDEV); 3249 3250 /* Update producer index */ 3251 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX], 3252 pi + 1); 3253 (void) ddi_dma_sync(dma, 3254 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX] - \ 3255 (uintptr_t)softs->comm_space, sizeof (uint32_t), 3256 DDI_DMA_SYNC_FORDEV); 3257 3258 if (aac_qinfo[queue].notify != 0) 3259 AAC_NOTIFY(softs, aac_qinfo[queue].notify); 3260 return (AACOK); 3261 } 3262 3263 /* 3264 * Atomically remove one entry from the nominated queue, returns 0 on 3265 * success or AACERR if the queue is empty. 3266 */ 3267 static int 3268 aac_fib_dequeue(struct aac_softstate *softs, int queue, int *idxp) 3269 { 3270 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3271 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3272 uint32_t pi, ci; 3273 int unfull = 0; 3274 3275 DBCALLED(softs, 2); 3276 3277 ASSERT(idxp); 3278 3279 /* Get the producer/consumer indices */ 3280 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \ 3281 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2, 3282 DDI_DMA_SYNC_FORCPU); 3283 pi = ddi_get32(acc, 3284 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]); 3285 ci = ddi_get32(acc, 3286 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]); 3287 3288 /* Check for queue empty */ 3289 if (ci == pi) 3290 return (AACERR); 3291 3292 if (pi >= aac_qinfo[queue].size) 3293 pi = 0; 3294 3295 /* Check for queue full */ 3296 if (ci == pi + 1) 3297 unfull = 1; 3298 3299 /* 3300 * The controller does not wrap the queue, 3301 * so we have to do it by ourselves 3302 */ 3303 if (ci >= aac_qinfo[queue].size) 3304 ci = 0; 3305 3306 /* Fetch the entry */ 3307 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \ 3308 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry), 3309 DDI_DMA_SYNC_FORCPU); 3310 if (aac_check_dma_handle(dma) != DDI_SUCCESS) { 3311 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 3312 return (AACERR); 3313 } 3314 3315 switch (queue) { 3316 case AAC_HOST_NORM_RESP_Q: 3317 case AAC_HOST_HIGH_RESP_Q: 3318 *idxp = ddi_get32(acc, 3319 &(softs->qentries[queue] + ci)->aq_fib_addr); 3320 break; 3321 3322 case AAC_HOST_NORM_CMD_Q: 3323 case AAC_HOST_HIGH_CMD_Q: 3324 *idxp = ddi_get32(acc, 3325 &(softs->qentries[queue] + ci)->aq_fib_addr) / AAC_FIB_SIZE; 3326 break; 3327 3328 default: 3329 cmn_err(CE_NOTE, "!Invalid queue in aac_fib_dequeue()"); 3330 return (AACERR); 3331 } 3332 3333 /* Update consumer index */ 3334 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX], 3335 ci + 1); 3336 (void) ddi_dma_sync(dma, 3337 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX] - \ 3338 (uintptr_t)softs->comm_space, sizeof (uint32_t), 3339 DDI_DMA_SYNC_FORDEV); 3340 3341 if (unfull && aac_qinfo[queue].notify != 0) 3342 AAC_NOTIFY(softs, aac_qinfo[queue].notify); 3343 return (AACOK); 3344 } 3345 3346 static struct aac_mntinforesp * 3347 aac_get_mntinfo(struct aac_softstate *softs, int cid) 3348 { 3349 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle; 3350 struct aac_fib *fibp = softs->sync_ac.slotp->fibp; 3351 struct aac_mntinfo *mi = (struct aac_mntinfo *)&fibp->data[0]; 3352 struct aac_mntinforesp *mir; 3353 3354 ddi_put32(acc, &mi->Command, /* Use 64-bit LBA if enabled */ 3355 (softs->flags & AAC_FLAGS_LBA_64BIT) ? 3356 VM_NameServe64 : VM_NameServe); 3357 ddi_put32(acc, &mi->MntType, FT_FILESYS); 3358 ddi_put32(acc, &mi->MntCount, cid); 3359 3360 if (aac_sync_fib(softs, ContainerCommand, 3361 AAC_FIB_SIZEOF(struct aac_mntinfo)) == AACERR) { 3362 AACDB_PRINT(softs, CE_WARN, "Error probe container %d", cid); 3363 return (NULL); 3364 } 3365 3366 mir = (struct aac_mntinforesp *)&fibp->data[0]; 3367 if (ddi_get32(acc, &mir->Status) == ST_OK) 3368 return (mir); 3369 return (NULL); 3370 } 3371 3372 static int 3373 aac_get_container_count(struct aac_softstate *softs, int *count) 3374 { 3375 ddi_acc_handle_t acc; 3376 struct aac_mntinforesp *mir; 3377 int rval; 3378 3379 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac); 3380 acc = softs->sync_ac.slotp->fib_acc_handle; 3381 3382 if ((mir = aac_get_mntinfo(softs, 0)) == NULL) { 3383 rval = AACERR; 3384 goto finish; 3385 } 3386 *count = ddi_get32(acc, &mir->MntRespCount); 3387 if (*count > AAC_MAX_LD) { 3388 AACDB_PRINT(softs, CE_CONT, 3389 "container count(%d) > AAC_MAX_LD", *count); 3390 rval = AACERR; 3391 goto finish; 3392 } 3393 rval = AACOK; 3394 3395 finish: 3396 aac_sync_fib_slot_release(softs, &softs->sync_ac); 3397 return (rval); 3398 } 3399 3400 static int 3401 aac_get_container_uid(struct aac_softstate *softs, uint32_t cid, uint32_t *uid) 3402 { 3403 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle; 3404 struct aac_Container *ct = (struct aac_Container *) \ 3405 &softs->sync_ac.slotp->fibp->data[0]; 3406 3407 bzero(ct, sizeof (*ct) - CT_PACKET_SIZE); 3408 ddi_put32(acc, &ct->Command, VM_ContainerConfig); 3409 ddi_put32(acc, &ct->CTCommand.command, CT_CID_TO_32BITS_UID); 3410 ddi_put32(acc, &ct->CTCommand.param[0], cid); 3411 3412 if (aac_sync_fib(softs, ContainerCommand, 3413 AAC_FIB_SIZEOF(struct aac_Container)) == AACERR) 3414 return (AACERR); 3415 if (ddi_get32(acc, &ct->CTCommand.param[0]) != CT_OK) 3416 return (AACERR); 3417 3418 *uid = ddi_get32(acc, &ct->CTCommand.param[1]); 3419 return (AACOK); 3420 } 3421 3422 /* 3423 * Request information of the container cid 3424 */ 3425 static struct aac_mntinforesp * 3426 aac_get_container_info(struct aac_softstate *softs, int cid) 3427 { 3428 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle; 3429 struct aac_mntinforesp *mir; 3430 int rval_uid; 3431 uint32_t uid; 3432 3433 /* Get container UID first so that it will not overwrite mntinfo */ 3434 rval_uid = aac_get_container_uid(softs, cid, &uid); 3435 3436 /* Get container basic info */ 3437 if ((mir = aac_get_mntinfo(softs, cid)) == NULL) { 3438 AACDB_PRINT(softs, CE_CONT, 3439 "query container %d info failed", cid); 3440 return (NULL); 3441 } 3442 if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) 3443 return (mir); 3444 if (rval_uid != AACOK) { 3445 AACDB_PRINT(softs, CE_CONT, 3446 "query container %d uid failed", cid); 3447 return (NULL); 3448 } 3449 3450 ddi_put32(acc, &mir->Status, uid); 3451 return (mir); 3452 } 3453 3454 static enum aac_cfg_event 3455 aac_probe_container(struct aac_softstate *softs, uint32_t cid) 3456 { 3457 enum aac_cfg_event event = AAC_CFG_NULL_NOEXIST; 3458 struct aac_container *dvp = &softs->containers[cid]; 3459 struct aac_mntinforesp *mir; 3460 ddi_acc_handle_t acc; 3461 3462 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac); 3463 acc = softs->sync_ac.slotp->fib_acc_handle; 3464 3465 /* Get container basic info */ 3466 if ((mir = aac_get_container_info(softs, cid)) == NULL) { 3467 /* AAC_CFG_NULL_NOEXIST */ 3468 goto finish; 3469 } 3470 3471 if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) { 3472 if (AAC_DEV_IS_VALID(&dvp->dev)) { 3473 AACDB_PRINT(softs, CE_NOTE, 3474 ">>> Container %d deleted", cid); 3475 dvp->dev.flags &= ~AAC_DFLAG_VALID; 3476 event = AAC_CFG_DELETE; 3477 } 3478 /* AAC_CFG_NULL_NOEXIST */ 3479 } else { 3480 uint64_t size; 3481 uint32_t uid; 3482 3483 event = AAC_CFG_NULL_EXIST; 3484 3485 size = AAC_MIR_SIZE(softs, acc, mir); 3486 uid = ddi_get32(acc, &mir->Status); 3487 if (AAC_DEV_IS_VALID(&dvp->dev)) { 3488 if (dvp->uid != uid) { 3489 AACDB_PRINT(softs, CE_WARN, 3490 ">>> Container %u uid changed to %d", 3491 cid, uid); 3492 dvp->uid = uid; 3493 event = AAC_CFG_CHANGE; 3494 } 3495 if (dvp->size != size) { 3496 AACDB_PRINT(softs, CE_NOTE, 3497 ">>> Container %u size changed to %"PRIu64, 3498 cid, size); 3499 dvp->size = size; 3500 event = AAC_CFG_CHANGE; 3501 } 3502 } else { /* Init new container */ 3503 AACDB_PRINT(softs, CE_NOTE, 3504 ">>> Container %d added: " \ 3505 "size=0x%x.%08x, type=%d, name=%s", 3506 cid, 3507 ddi_get32(acc, &mir->MntObj.CapacityHigh), 3508 ddi_get32(acc, &mir->MntObj.Capacity), 3509 ddi_get32(acc, &mir->MntObj.VolType), 3510 mir->MntObj.FileSystemName); 3511 dvp->dev.flags |= AAC_DFLAG_VALID; 3512 dvp->dev.type = AAC_DEV_LD; 3513 3514 dvp->cid = cid; 3515 dvp->uid = uid; 3516 dvp->size = size; 3517 dvp->locked = 0; 3518 dvp->deleted = 0; 3519 3520 event = AAC_CFG_ADD; 3521 } 3522 } 3523 3524 finish: 3525 aac_sync_fib_slot_release(softs, &softs->sync_ac); 3526 return (event); 3527 } 3528 3529 /* 3530 * Do a rescan of all the possible containers and update the container list 3531 * with newly online/offline containers, and prepare for autoconfiguration. 3532 */ 3533 static int 3534 aac_probe_containers(struct aac_softstate *softs) 3535 { 3536 int i, count, total; 3537 3538 /* Loop over possible containers */ 3539 count = softs->container_count; 3540 if (aac_get_container_count(softs, &count) == AACERR) 3541 return (AACERR); 3542 3543 for (i = total = 0; i < count; i++) { 3544 enum aac_cfg_event event = aac_probe_container(softs, i); 3545 if ((event != AAC_CFG_NULL_NOEXIST) && 3546 (event != AAC_CFG_NULL_EXIST)) { 3547 (void) aac_handle_dr(softs, i, -1, event); 3548 total++; 3549 } 3550 } 3551 3552 if (count < softs->container_count) { 3553 struct aac_container *dvp; 3554 3555 for (dvp = &softs->containers[count]; 3556 dvp < &softs->containers[softs->container_count]; dvp++) { 3557 if (!AAC_DEV_IS_VALID(&dvp->dev)) 3558 continue; 3559 AACDB_PRINT(softs, CE_NOTE, ">>> Container %d deleted", 3560 dvp->cid); 3561 dvp->dev.flags &= ~AAC_DFLAG_VALID; 3562 (void) aac_handle_dr(softs, dvp->cid, -1, 3563 AAC_CFG_DELETE); 3564 } 3565 } 3566 3567 softs->container_count = count; 3568 AACDB_PRINT(softs, CE_CONT, "?Total %d container(s) found", total); 3569 return (AACOK); 3570 } 3571 3572 static int 3573 aac_probe_jbod(struct aac_softstate *softs, int tgt, int event) 3574 { 3575 ASSERT(AAC_MAX_LD <= tgt); 3576 ASSERT(tgt < AAC_MAX_DEV(softs)); 3577 struct aac_device *dvp; 3578 dvp = AAC_DEV(softs, tgt); 3579 3580 switch (event) { 3581 case AAC_CFG_ADD: 3582 AACDB_PRINT(softs, CE_NOTE, 3583 ">>> Jbod %d added", tgt - AAC_MAX_LD); 3584 dvp->flags |= AAC_DFLAG_VALID; 3585 dvp->type = AAC_DEV_PD; 3586 break; 3587 case AAC_CFG_DELETE: 3588 AACDB_PRINT(softs, CE_NOTE, 3589 ">>> Jbod %d deleted", tgt - AAC_MAX_LD); 3590 dvp->flags &= ~AAC_DFLAG_VALID; 3591 break; 3592 default: 3593 return (AACERR); 3594 } 3595 (void) aac_handle_dr(softs, tgt, 0, event); 3596 return (AACOK); 3597 } 3598 3599 static int 3600 aac_alloc_comm_space(struct aac_softstate *softs) 3601 { 3602 size_t rlen; 3603 ddi_dma_cookie_t cookie; 3604 uint_t cookien; 3605 3606 /* Allocate DMA for comm. space */ 3607 if (ddi_dma_alloc_handle( 3608 softs->devinfo_p, 3609 &softs->addr_dma_attr, 3610 DDI_DMA_SLEEP, 3611 NULL, 3612 &softs->comm_space_dma_handle) != DDI_SUCCESS) { 3613 AACDB_PRINT(softs, CE_WARN, 3614 "Cannot alloc dma handle for communication area"); 3615 goto error; 3616 } 3617 if (ddi_dma_mem_alloc( 3618 softs->comm_space_dma_handle, 3619 sizeof (struct aac_comm_space), 3620 &softs->acc_attr, 3621 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3622 DDI_DMA_SLEEP, 3623 NULL, 3624 (caddr_t *)&softs->comm_space, 3625 &rlen, 3626 &softs->comm_space_acc_handle) != DDI_SUCCESS) { 3627 AACDB_PRINT(softs, CE_WARN, 3628 "Cannot alloc mem for communication area"); 3629 goto error; 3630 } 3631 if (ddi_dma_addr_bind_handle( 3632 softs->comm_space_dma_handle, 3633 NULL, 3634 (caddr_t)softs->comm_space, 3635 sizeof (struct aac_comm_space), 3636 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3637 DDI_DMA_SLEEP, 3638 NULL, 3639 &cookie, 3640 &cookien) != DDI_DMA_MAPPED) { 3641 AACDB_PRINT(softs, CE_WARN, 3642 "DMA bind failed for communication area"); 3643 goto error; 3644 } 3645 softs->comm_space_phyaddr = cookie.dmac_address; 3646 3647 return (AACOK); 3648 error: 3649 if (softs->comm_space_acc_handle) { 3650 ddi_dma_mem_free(&softs->comm_space_acc_handle); 3651 softs->comm_space_acc_handle = NULL; 3652 } 3653 if (softs->comm_space_dma_handle) { 3654 ddi_dma_free_handle(&softs->comm_space_dma_handle); 3655 softs->comm_space_dma_handle = NULL; 3656 } 3657 return (AACERR); 3658 } 3659 3660 static void 3661 aac_free_comm_space(struct aac_softstate *softs) 3662 { 3663 3664 (void) ddi_dma_unbind_handle(softs->comm_space_dma_handle); 3665 ddi_dma_mem_free(&softs->comm_space_acc_handle); 3666 softs->comm_space_acc_handle = NULL; 3667 ddi_dma_free_handle(&softs->comm_space_dma_handle); 3668 softs->comm_space_dma_handle = NULL; 3669 softs->comm_space_phyaddr = 0; 3670 } 3671 3672 /* 3673 * Initialize the data structures that are required for the communication 3674 * interface to operate 3675 */ 3676 static int 3677 aac_setup_comm_space(struct aac_softstate *softs) 3678 { 3679 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3680 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3681 uint32_t comm_space_phyaddr; 3682 struct aac_adapter_init *initp; 3683 int qoffset; 3684 3685 comm_space_phyaddr = softs->comm_space_phyaddr; 3686 3687 /* Setup adapter init struct */ 3688 initp = &softs->comm_space->init_data; 3689 bzero(initp, sizeof (struct aac_adapter_init)); 3690 3691 ddi_put32(acc, &initp->InitStructRevision, AAC_INIT_STRUCT_REVISION); 3692 ddi_put32(acc, &initp->HostElapsedSeconds, ddi_get_time()); 3693 3694 /* Setup new/old comm. specific data */ 3695 if (softs->flags & AAC_FLAGS_RAW_IO) { 3696 uint32_t init_flags = 0; 3697 3698 if (softs->flags & AAC_FLAGS_NEW_COMM) 3699 init_flags |= AAC_INIT_FLAGS_NEW_COMM_SUPPORTED; 3700 /* AAC_SUPPORTED_POWER_MANAGEMENT */ 3701 init_flags |= AAC_INIT_FLAGS_DRIVER_SUPPORTS_PM; 3702 init_flags |= AAC_INIT_FLAGS_DRIVER_USES_UTC_TIME; 3703 3704 ddi_put32(acc, &initp->InitStructRevision, 3705 AAC_INIT_STRUCT_REVISION_4); 3706 ddi_put32(acc, &initp->InitFlags, init_flags); 3707 /* Setup the preferred settings */ 3708 ddi_put32(acc, &initp->MaxIoCommands, softs->aac_max_fibs); 3709 ddi_put32(acc, &initp->MaxIoSize, 3710 (softs->aac_max_sectors << 9)); 3711 ddi_put32(acc, &initp->MaxFibSize, softs->aac_max_fib_size); 3712 } else { 3713 /* 3714 * Tells the adapter about the physical location of various 3715 * important shared data structures 3716 */ 3717 ddi_put32(acc, &initp->AdapterFibsPhysicalAddress, 3718 comm_space_phyaddr + \ 3719 offsetof(struct aac_comm_space, adapter_fibs)); 3720 ddi_put32(acc, &initp->AdapterFibsVirtualAddress, 0); 3721 ddi_put32(acc, &initp->AdapterFibAlign, AAC_FIB_SIZE); 3722 ddi_put32(acc, &initp->AdapterFibsSize, 3723 AAC_ADAPTER_FIBS * AAC_FIB_SIZE); 3724 ddi_put32(acc, &initp->PrintfBufferAddress, 3725 comm_space_phyaddr + \ 3726 offsetof(struct aac_comm_space, adapter_print_buf)); 3727 ddi_put32(acc, &initp->PrintfBufferSize, 3728 AAC_ADAPTER_PRINT_BUFSIZE); 3729 ddi_put32(acc, &initp->MiniPortRevision, 3730 AAC_INIT_STRUCT_MINIPORT_REVISION); 3731 ddi_put32(acc, &initp->HostPhysMemPages, AAC_MAX_PFN); 3732 3733 qoffset = (comm_space_phyaddr + \ 3734 offsetof(struct aac_comm_space, qtable)) % \ 3735 AAC_QUEUE_ALIGN; 3736 if (qoffset) 3737 qoffset = AAC_QUEUE_ALIGN - qoffset; 3738 softs->qtablep = (struct aac_queue_table *) \ 3739 ((char *)&softs->comm_space->qtable + qoffset); 3740 ddi_put32(acc, &initp->CommHeaderAddress, comm_space_phyaddr + \ 3741 offsetof(struct aac_comm_space, qtable) + qoffset); 3742 3743 /* Init queue table */ 3744 ddi_put32(acc, &softs->qtablep-> \ 3745 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_PRODUCER_INDEX], 3746 AAC_HOST_NORM_CMD_ENTRIES); 3747 ddi_put32(acc, &softs->qtablep-> \ 3748 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_CONSUMER_INDEX], 3749 AAC_HOST_NORM_CMD_ENTRIES); 3750 ddi_put32(acc, &softs->qtablep-> \ 3751 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_PRODUCER_INDEX], 3752 AAC_HOST_HIGH_CMD_ENTRIES); 3753 ddi_put32(acc, &softs->qtablep-> \ 3754 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_CONSUMER_INDEX], 3755 AAC_HOST_HIGH_CMD_ENTRIES); 3756 ddi_put32(acc, &softs->qtablep-> \ 3757 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_PRODUCER_INDEX], 3758 AAC_ADAP_NORM_CMD_ENTRIES); 3759 ddi_put32(acc, &softs->qtablep-> \ 3760 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_CONSUMER_INDEX], 3761 AAC_ADAP_NORM_CMD_ENTRIES); 3762 ddi_put32(acc, &softs->qtablep-> \ 3763 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_PRODUCER_INDEX], 3764 AAC_ADAP_HIGH_CMD_ENTRIES); 3765 ddi_put32(acc, &softs->qtablep-> \ 3766 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_CONSUMER_INDEX], 3767 AAC_ADAP_HIGH_CMD_ENTRIES); 3768 ddi_put32(acc, &softs->qtablep-> \ 3769 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_PRODUCER_INDEX], 3770 AAC_HOST_NORM_RESP_ENTRIES); 3771 ddi_put32(acc, &softs->qtablep-> \ 3772 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_CONSUMER_INDEX], 3773 AAC_HOST_NORM_RESP_ENTRIES); 3774 ddi_put32(acc, &softs->qtablep-> \ 3775 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_PRODUCER_INDEX], 3776 AAC_HOST_HIGH_RESP_ENTRIES); 3777 ddi_put32(acc, &softs->qtablep-> \ 3778 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_CONSUMER_INDEX], 3779 AAC_HOST_HIGH_RESP_ENTRIES); 3780 ddi_put32(acc, &softs->qtablep-> \ 3781 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_PRODUCER_INDEX], 3782 AAC_ADAP_NORM_RESP_ENTRIES); 3783 ddi_put32(acc, &softs->qtablep-> \ 3784 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_CONSUMER_INDEX], 3785 AAC_ADAP_NORM_RESP_ENTRIES); 3786 ddi_put32(acc, &softs->qtablep-> \ 3787 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_PRODUCER_INDEX], 3788 AAC_ADAP_HIGH_RESP_ENTRIES); 3789 ddi_put32(acc, &softs->qtablep-> \ 3790 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_CONSUMER_INDEX], 3791 AAC_ADAP_HIGH_RESP_ENTRIES); 3792 3793 /* Init queue entries */ 3794 softs->qentries[AAC_HOST_NORM_CMD_Q] = 3795 &softs->qtablep->qt_HostNormCmdQueue[0]; 3796 softs->qentries[AAC_HOST_HIGH_CMD_Q] = 3797 &softs->qtablep->qt_HostHighCmdQueue[0]; 3798 softs->qentries[AAC_ADAP_NORM_CMD_Q] = 3799 &softs->qtablep->qt_AdapNormCmdQueue[0]; 3800 softs->qentries[AAC_ADAP_HIGH_CMD_Q] = 3801 &softs->qtablep->qt_AdapHighCmdQueue[0]; 3802 softs->qentries[AAC_HOST_NORM_RESP_Q] = 3803 &softs->qtablep->qt_HostNormRespQueue[0]; 3804 softs->qentries[AAC_HOST_HIGH_RESP_Q] = 3805 &softs->qtablep->qt_HostHighRespQueue[0]; 3806 softs->qentries[AAC_ADAP_NORM_RESP_Q] = 3807 &softs->qtablep->qt_AdapNormRespQueue[0]; 3808 softs->qentries[AAC_ADAP_HIGH_RESP_Q] = 3809 &softs->qtablep->qt_AdapHighRespQueue[0]; 3810 } 3811 (void) ddi_dma_sync(dma, 0, 0, DDI_DMA_SYNC_FORDEV); 3812 3813 /* Send init structure to the card */ 3814 if (aac_sync_mbcommand(softs, AAC_MONKER_INITSTRUCT, 3815 comm_space_phyaddr + \ 3816 offsetof(struct aac_comm_space, init_data), 3817 0, 0, 0, NULL) == AACERR) { 3818 AACDB_PRINT(softs, CE_WARN, 3819 "Cannot send init structure to adapter"); 3820 return (AACERR); 3821 } 3822 3823 return (AACOK); 3824 } 3825 3826 static uchar_t * 3827 aac_vendor_id(struct aac_softstate *softs, uchar_t *buf) 3828 { 3829 (void) memset(buf, ' ', AAC_VENDOR_LEN); 3830 bcopy(softs->vendor_name, buf, strlen(softs->vendor_name)); 3831 return (buf + AAC_VENDOR_LEN); 3832 } 3833 3834 static uchar_t * 3835 aac_product_id(struct aac_softstate *softs, uchar_t *buf) 3836 { 3837 (void) memset(buf, ' ', AAC_PRODUCT_LEN); 3838 bcopy(softs->product_name, buf, strlen(softs->product_name)); 3839 return (buf + AAC_PRODUCT_LEN); 3840 } 3841 3842 /* 3843 * Construct unit serial number from container uid 3844 */ 3845 static uchar_t * 3846 aac_lun_serialno(struct aac_softstate *softs, int tgt, uchar_t *buf) 3847 { 3848 int i, d; 3849 uint32_t uid; 3850 3851 ASSERT(tgt >= 0 && tgt < AAC_MAX_LD); 3852 3853 uid = softs->containers[tgt].uid; 3854 for (i = 7; i >= 0; i--) { 3855 d = uid & 0xf; 3856 buf[i] = d > 9 ? 'A' + (d - 0xa) : '0' + d; 3857 uid >>= 4; 3858 } 3859 return (buf + 8); 3860 } 3861 3862 /* 3863 * SPC-3 7.5 INQUIRY command implementation 3864 */ 3865 static void 3866 aac_inquiry(struct aac_softstate *softs, struct scsi_pkt *pkt, 3867 union scsi_cdb *cdbp, struct buf *bp) 3868 { 3869 int tgt = pkt->pkt_address.a_target; 3870 char *b_addr = NULL; 3871 uchar_t page = cdbp->cdb_opaque[2]; 3872 3873 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_CMDDT) { 3874 /* Command Support Data is not supported */ 3875 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x00, 0); 3876 return; 3877 } 3878 3879 if (bp && bp->b_un.b_addr && bp->b_bcount) { 3880 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 3881 bp_mapin(bp); 3882 b_addr = bp->b_un.b_addr; 3883 } 3884 3885 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_EVPD) { 3886 uchar_t *vpdp = (uchar_t *)b_addr; 3887 uchar_t *idp, *sp; 3888 3889 /* SPC-3 8.4 Vital product data parameters */ 3890 switch (page) { 3891 case 0x00: 3892 /* Supported VPD pages */ 3893 if (vpdp == NULL || 3894 bp->b_bcount < (AAC_VPD_PAGE_DATA + 3)) 3895 return; 3896 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3897 vpdp[AAC_VPD_PAGE_CODE] = 0x00; 3898 vpdp[AAC_VPD_PAGE_LENGTH] = 3; 3899 3900 vpdp[AAC_VPD_PAGE_DATA] = 0x00; 3901 vpdp[AAC_VPD_PAGE_DATA + 1] = 0x80; 3902 vpdp[AAC_VPD_PAGE_DATA + 2] = 0x83; 3903 3904 pkt->pkt_state |= STATE_XFERRED_DATA; 3905 break; 3906 3907 case 0x80: 3908 /* Unit serial number page */ 3909 if (vpdp == NULL || 3910 bp->b_bcount < (AAC_VPD_PAGE_DATA + 8)) 3911 return; 3912 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3913 vpdp[AAC_VPD_PAGE_CODE] = 0x80; 3914 vpdp[AAC_VPD_PAGE_LENGTH] = 8; 3915 3916 sp = &vpdp[AAC_VPD_PAGE_DATA]; 3917 (void) aac_lun_serialno(softs, tgt, sp); 3918 3919 pkt->pkt_state |= STATE_XFERRED_DATA; 3920 break; 3921 3922 case 0x83: 3923 /* Device identification page */ 3924 if (vpdp == NULL || 3925 bp->b_bcount < (AAC_VPD_PAGE_DATA + 32)) 3926 return; 3927 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3928 vpdp[AAC_VPD_PAGE_CODE] = 0x83; 3929 3930 idp = &vpdp[AAC_VPD_PAGE_DATA]; 3931 bzero(idp, AAC_VPD_ID_LENGTH); 3932 idp[AAC_VPD_ID_CODESET] = 0x02; 3933 idp[AAC_VPD_ID_TYPE] = 0x01; 3934 3935 /* 3936 * SPC-3 Table 111 - Identifier type 3937 * One recommanded method of constructing the remainder 3938 * of identifier field is to concatenate the product 3939 * identification field from the standard INQUIRY data 3940 * field and the product serial number field from the 3941 * unit serial number page. 3942 */ 3943 sp = &idp[AAC_VPD_ID_DATA]; 3944 sp = aac_vendor_id(softs, sp); 3945 sp = aac_product_id(softs, sp); 3946 sp = aac_lun_serialno(softs, tgt, sp); 3947 idp[AAC_VPD_ID_LENGTH] = (uintptr_t)sp - \ 3948 (uintptr_t)&idp[AAC_VPD_ID_DATA]; 3949 3950 vpdp[AAC_VPD_PAGE_LENGTH] = (uintptr_t)sp - \ 3951 (uintptr_t)&vpdp[AAC_VPD_PAGE_DATA]; 3952 pkt->pkt_state |= STATE_XFERRED_DATA; 3953 break; 3954 3955 default: 3956 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 3957 0x24, 0x00, 0); 3958 break; 3959 } 3960 } else { 3961 struct scsi_inquiry *inqp = (struct scsi_inquiry *)b_addr; 3962 size_t len = sizeof (struct scsi_inquiry); 3963 3964 if (page != 0) { 3965 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 3966 0x24, 0x00, 0); 3967 return; 3968 } 3969 if (inqp == NULL || bp->b_bcount < len) 3970 return; 3971 3972 bzero(inqp, len); 3973 inqp->inq_len = AAC_ADDITIONAL_LEN; 3974 inqp->inq_ansi = AAC_ANSI_VER; 3975 inqp->inq_rdf = AAC_RESP_DATA_FORMAT; 3976 (void) aac_vendor_id(softs, (uchar_t *)inqp->inq_vid); 3977 (void) aac_product_id(softs, (uchar_t *)inqp->inq_pid); 3978 bcopy("V1.0", inqp->inq_revision, 4); 3979 inqp->inq_cmdque = 1; /* enable tagged-queuing */ 3980 /* 3981 * For "sd-max-xfer-size" property which may impact performance 3982 * when IO threads increase. 3983 */ 3984 inqp->inq_wbus32 = 1; 3985 3986 pkt->pkt_state |= STATE_XFERRED_DATA; 3987 } 3988 } 3989 3990 /* 3991 * SPC-3 7.10 MODE SENSE command implementation 3992 */ 3993 static void 3994 aac_mode_sense(struct aac_softstate *softs, struct scsi_pkt *pkt, 3995 union scsi_cdb *cdbp, struct buf *bp, int capacity) 3996 { 3997 uchar_t pagecode; 3998 struct mode_header *headerp; 3999 struct mode_header_g1 *g1_headerp; 4000 unsigned int ncyl; 4001 caddr_t sense_data; 4002 caddr_t next_page; 4003 size_t sdata_size; 4004 size_t pages_size; 4005 int unsupport_page = 0; 4006 4007 ASSERT(cdbp->scc_cmd == SCMD_MODE_SENSE || 4008 cdbp->scc_cmd == SCMD_MODE_SENSE_G1); 4009 4010 if (!(bp && bp->b_un.b_addr && bp->b_bcount)) 4011 return; 4012 4013 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 4014 bp_mapin(bp); 4015 pkt->pkt_state |= STATE_XFERRED_DATA; 4016 pagecode = cdbp->cdb_un.sg.scsi[0] & 0x3F; 4017 4018 /* calculate the size of needed buffer */ 4019 if (cdbp->scc_cmd == SCMD_MODE_SENSE) 4020 sdata_size = MODE_HEADER_LENGTH; 4021 else /* must be SCMD_MODE_SENSE_G1 */ 4022 sdata_size = MODE_HEADER_LENGTH_G1; 4023 4024 pages_size = 0; 4025 switch (pagecode) { 4026 case SD_MODE_SENSE_PAGE3_CODE: 4027 pages_size += sizeof (struct mode_format); 4028 break; 4029 4030 case SD_MODE_SENSE_PAGE4_CODE: 4031 pages_size += sizeof (struct mode_geometry); 4032 break; 4033 4034 case MODEPAGE_CTRL_MODE: 4035 if (softs->flags & AAC_FLAGS_LBA_64BIT) { 4036 pages_size += sizeof (struct mode_control_scsi3); 4037 } else { 4038 unsupport_page = 1; 4039 } 4040 break; 4041 4042 case MODEPAGE_ALLPAGES: 4043 if (softs->flags & AAC_FLAGS_LBA_64BIT) { 4044 pages_size += sizeof (struct mode_format) + 4045 sizeof (struct mode_geometry) + 4046 sizeof (struct mode_control_scsi3); 4047 } else { 4048 pages_size += sizeof (struct mode_format) + 4049 sizeof (struct mode_geometry); 4050 } 4051 break; 4052 4053 default: 4054 /* unsupported pages */ 4055 unsupport_page = 1; 4056 } 4057 4058 /* allocate buffer to fill the send data */ 4059 sdata_size += pages_size; 4060 sense_data = kmem_zalloc(sdata_size, KM_SLEEP); 4061 4062 if (cdbp->scc_cmd == SCMD_MODE_SENSE) { 4063 headerp = (struct mode_header *)sense_data; 4064 headerp->length = MODE_HEADER_LENGTH + pages_size - 4065 sizeof (headerp->length); 4066 headerp->bdesc_length = 0; 4067 next_page = sense_data + sizeof (struct mode_header); 4068 } else { 4069 g1_headerp = (void *)sense_data; 4070 g1_headerp->length = BE_16(MODE_HEADER_LENGTH_G1 + pages_size - 4071 sizeof (g1_headerp->length)); 4072 g1_headerp->bdesc_length = 0; 4073 next_page = sense_data + sizeof (struct mode_header_g1); 4074 } 4075 4076 if (unsupport_page) 4077 goto finish; 4078 4079 if (pagecode == SD_MODE_SENSE_PAGE3_CODE || 4080 pagecode == MODEPAGE_ALLPAGES) { 4081 /* SBC-3 7.1.3.3 Format device page */ 4082 struct mode_format *page3p; 4083 4084 page3p = (void *)next_page; 4085 page3p->mode_page.code = SD_MODE_SENSE_PAGE3_CODE; 4086 page3p->mode_page.length = sizeof (struct mode_format); 4087 page3p->data_bytes_sect = BE_16(AAC_SECTOR_SIZE); 4088 page3p->sect_track = BE_16(AAC_SECTORS_PER_TRACK); 4089 4090 next_page += sizeof (struct mode_format); 4091 } 4092 4093 if (pagecode == SD_MODE_SENSE_PAGE4_CODE || 4094 pagecode == MODEPAGE_ALLPAGES) { 4095 /* SBC-3 7.1.3.8 Rigid disk device geometry page */ 4096 struct mode_geometry *page4p; 4097 4098 page4p = (void *)next_page; 4099 page4p->mode_page.code = SD_MODE_SENSE_PAGE4_CODE; 4100 page4p->mode_page.length = sizeof (struct mode_geometry); 4101 page4p->heads = AAC_NUMBER_OF_HEADS; 4102 page4p->rpm = BE_16(AAC_ROTATION_SPEED); 4103 ncyl = capacity / (AAC_NUMBER_OF_HEADS * AAC_SECTORS_PER_TRACK); 4104 page4p->cyl_lb = ncyl & 0xff; 4105 page4p->cyl_mb = (ncyl >> 8) & 0xff; 4106 page4p->cyl_ub = (ncyl >> 16) & 0xff; 4107 4108 next_page += sizeof (struct mode_geometry); 4109 } 4110 4111 if ((pagecode == MODEPAGE_CTRL_MODE || pagecode == MODEPAGE_ALLPAGES) && 4112 softs->flags & AAC_FLAGS_LBA_64BIT) { 4113 /* 64-bit LBA need large sense data */ 4114 struct mode_control_scsi3 *mctl; 4115 4116 mctl = (void *)next_page; 4117 mctl->mode_page.code = MODEPAGE_CTRL_MODE; 4118 mctl->mode_page.length = 4119 sizeof (struct mode_control_scsi3) - 4120 sizeof (struct mode_page); 4121 mctl->d_sense = 1; 4122 } 4123 4124 finish: 4125 /* copyout the valid data. */ 4126 bcopy(sense_data, bp->b_un.b_addr, min(sdata_size, bp->b_bcount)); 4127 kmem_free(sense_data, sdata_size); 4128 } 4129 4130 static int 4131 aac_name_node(dev_info_t *dip, char *name, int len) 4132 { 4133 int tgt, lun; 4134 4135 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4136 DDI_PROP_DONTPASS, "target", -1); 4137 if (tgt == -1) 4138 return (DDI_FAILURE); 4139 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4140 DDI_PROP_DONTPASS, "lun", -1); 4141 if (lun == -1) 4142 return (DDI_FAILURE); 4143 4144 (void) snprintf(name, len, "%x,%x", tgt, lun); 4145 return (DDI_SUCCESS); 4146 } 4147 4148 /*ARGSUSED*/ 4149 static int 4150 aac_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 4151 scsi_hba_tran_t *tran, struct scsi_device *sd) 4152 { 4153 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran); 4154 #if defined(DEBUG) || defined(__lock_lint) 4155 int ctl = ddi_get_instance(softs->devinfo_p); 4156 #endif 4157 uint16_t tgt = sd->sd_address.a_target; 4158 uint8_t lun = sd->sd_address.a_lun; 4159 struct aac_device *dvp; 4160 4161 DBCALLED(softs, 2); 4162 4163 if (ndi_dev_is_persistent_node(tgt_dip) == 0) { 4164 /* 4165 * If no persistent node exist, we don't allow .conf node 4166 * to be created. 4167 */ 4168 if (aac_find_child(softs, tgt, lun) != NULL) { 4169 if (ndi_merge_node(tgt_dip, aac_name_node) != 4170 DDI_SUCCESS) 4171 /* Create this .conf node */ 4172 return (DDI_SUCCESS); 4173 } 4174 return (DDI_FAILURE); 4175 } 4176 4177 /* 4178 * Only support container/phys. device that has been 4179 * detected and valid 4180 */ 4181 mutex_enter(&softs->io_lock); 4182 if (tgt >= AAC_MAX_DEV(softs)) { 4183 AACDB_PRINT_TRAN(softs, 4184 "aac_tran_tgt_init: c%dt%dL%d out", ctl, tgt, lun); 4185 mutex_exit(&softs->io_lock); 4186 return (DDI_FAILURE); 4187 } 4188 4189 if (tgt < AAC_MAX_LD) { 4190 dvp = (struct aac_device *)&softs->containers[tgt]; 4191 if (lun != 0 || !AAC_DEV_IS_VALID(dvp)) { 4192 AACDB_PRINT_TRAN(softs, "aac_tran_tgt_init: c%dt%dL%d", 4193 ctl, tgt, lun); 4194 mutex_exit(&softs->io_lock); 4195 return (DDI_FAILURE); 4196 } 4197 /* 4198 * Save the tgt_dip for the given target if one doesn't exist 4199 * already. Dip's for non-existance tgt's will be cleared in 4200 * tgt_free. 4201 */ 4202 if (softs->containers[tgt].dev.dip == NULL && 4203 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) 4204 softs->containers[tgt].dev.dip = tgt_dip; 4205 } else { 4206 dvp = (struct aac_device *)&softs->nondasds[AAC_PD(tgt)]; 4207 /* 4208 * Save the tgt_dip for the given target if one doesn't exist 4209 * already. Dip's for non-existance tgt's will be cleared in 4210 * tgt_free. 4211 */ 4212 4213 if (softs->nondasds[AAC_PD(tgt)].dev.dip == NULL && 4214 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) 4215 softs->nondasds[AAC_PD(tgt)].dev.dip = tgt_dip; 4216 } 4217 4218 if (softs->flags & AAC_FLAGS_BRKUP) { 4219 if (ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip, 4220 "buf_break", 1) != DDI_PROP_SUCCESS) { 4221 cmn_err(CE_CONT, "unable to create " 4222 "property for t%dL%d (buf_break)", tgt, lun); 4223 } 4224 } 4225 4226 AACDB_PRINT(softs, CE_NOTE, 4227 "aac_tran_tgt_init: c%dt%dL%d ok (%s)", ctl, tgt, lun, 4228 (dvp->type == AAC_DEV_PD) ? "pd" : "ld"); 4229 mutex_exit(&softs->io_lock); 4230 return (DDI_SUCCESS); 4231 } 4232 4233 static void 4234 aac_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 4235 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 4236 { 4237 #ifndef __lock_lint 4238 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran)) 4239 #endif 4240 4241 struct aac_softstate *softs = SD2AAC(sd); 4242 int tgt = sd->sd_address.a_target; 4243 4244 mutex_enter(&softs->io_lock); 4245 if (tgt < AAC_MAX_LD) { 4246 if (softs->containers[tgt].dev.dip == tgt_dip) 4247 softs->containers[tgt].dev.dip = NULL; 4248 } else { 4249 if (softs->nondasds[AAC_PD(tgt)].dev.dip == tgt_dip) 4250 softs->nondasds[AAC_PD(tgt)].dev.dip = NULL; 4251 softs->nondasds[AAC_PD(tgt)].dev.flags &= ~AAC_DFLAG_VALID; 4252 } 4253 mutex_exit(&softs->io_lock); 4254 } 4255 4256 /* 4257 * Check if the firmware is Up And Running. If it is in the Kernel Panic 4258 * state, (BlinkLED code + 1) is returned. 4259 * 0 -- firmware up and running 4260 * -1 -- firmware dead 4261 * >0 -- firmware kernel panic 4262 */ 4263 static int 4264 aac_check_adapter_health(struct aac_softstate *softs) 4265 { 4266 int rval; 4267 4268 rval = PCI_MEM_GET32(softs, AAC_OMR0); 4269 4270 if (rval & AAC_KERNEL_UP_AND_RUNNING) { 4271 rval = 0; 4272 } else if (rval & AAC_KERNEL_PANIC) { 4273 cmn_err(CE_WARN, "firmware panic"); 4274 rval = ((rval >> 16) & 0xff) + 1; /* avoid 0 as return value */ 4275 } else { 4276 cmn_err(CE_WARN, "firmware dead"); 4277 rval = -1; 4278 } 4279 return (rval); 4280 } 4281 4282 static void 4283 aac_abort_iocmd(struct aac_softstate *softs, struct aac_cmd *acp, 4284 uchar_t reason) 4285 { 4286 acp->flags |= AAC_CMD_ABORT; 4287 4288 if (acp->pkt) { 4289 if (acp->slotp) { /* outstanding cmd */ 4290 acp->pkt->pkt_state |= STATE_GOT_STATUS; 4291 } 4292 4293 switch (reason) { 4294 case CMD_TIMEOUT: 4295 AACDB_PRINT(softs, CE_NOTE, "CMD_TIMEOUT: acp=0x%p", 4296 acp); 4297 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT, 4298 STAT_TIMEOUT | STAT_BUS_RESET); 4299 break; 4300 case CMD_RESET: 4301 /* aac support only RESET_ALL */ 4302 AACDB_PRINT(softs, CE_NOTE, "CMD_RESET: acp=0x%p", acp); 4303 aac_set_pkt_reason(softs, acp, CMD_RESET, 4304 STAT_BUS_RESET); 4305 break; 4306 case CMD_ABORTED: 4307 AACDB_PRINT(softs, CE_NOTE, "CMD_ABORTED: acp=0x%p", 4308 acp); 4309 aac_set_pkt_reason(softs, acp, CMD_ABORTED, 4310 STAT_ABORTED); 4311 break; 4312 } 4313 } 4314 aac_end_io(softs, acp); 4315 } 4316 4317 /* 4318 * Abort all the pending commands of type iocmd or just the command pkt 4319 * corresponding to pkt 4320 */ 4321 static void 4322 aac_abort_iocmds(struct aac_softstate *softs, int iocmd, struct scsi_pkt *pkt, 4323 int reason) 4324 { 4325 struct aac_cmd *ac_arg, *acp; 4326 int i; 4327 4328 if (pkt == NULL) { 4329 ac_arg = NULL; 4330 } else { 4331 ac_arg = PKT2AC(pkt); 4332 iocmd = (ac_arg->flags & AAC_CMD_SYNC) ? 4333 AAC_IOCMD_SYNC : AAC_IOCMD_ASYNC; 4334 } 4335 4336 /* 4337 * a) outstanding commands on the controller 4338 * Note: should abort outstanding commands only after one 4339 * IOP reset has been done. 4340 */ 4341 if (iocmd & AAC_IOCMD_OUTSTANDING) { 4342 struct aac_cmd *acp; 4343 4344 for (i = 0; i < AAC_MAX_LD; i++) { 4345 if (AAC_DEV_IS_VALID(&softs->containers[i].dev)) 4346 softs->containers[i].reset = 1; 4347 } 4348 while ((acp = softs->q_busy.q_head) != NULL) 4349 aac_abort_iocmd(softs, acp, reason); 4350 } 4351 4352 /* b) commands in the waiting queues */ 4353 for (i = 0; i < AAC_CMDQ_NUM; i++) { 4354 if (iocmd & (1 << i)) { 4355 if (ac_arg) { 4356 aac_abort_iocmd(softs, ac_arg, reason); 4357 } else { 4358 while ((acp = softs->q_wait[i].q_head) != NULL) 4359 aac_abort_iocmd(softs, acp, reason); 4360 } 4361 } 4362 } 4363 } 4364 4365 /* 4366 * The draining thread is shared among quiesce threads. It terminates 4367 * when the adapter is quiesced or stopped by aac_stop_drain(). 4368 */ 4369 static void 4370 aac_check_drain(void *arg) 4371 { 4372 struct aac_softstate *softs = arg; 4373 4374 mutex_enter(&softs->io_lock); 4375 if (softs->ndrains) { 4376 softs->drain_timeid = 0; 4377 /* 4378 * If both ASYNC and SYNC bus throttle are held, 4379 * wake up threads only when both are drained out. 4380 */ 4381 if ((softs->bus_throttle[AAC_CMDQ_ASYNC] > 0 || 4382 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) && 4383 (softs->bus_throttle[AAC_CMDQ_SYNC] > 0 || 4384 softs->bus_ncmds[AAC_CMDQ_SYNC] == 0)) 4385 cv_broadcast(&softs->drain_cv); 4386 else 4387 softs->drain_timeid = timeout(aac_check_drain, softs, 4388 AAC_QUIESCE_TICK * drv_usectohz(1000000)); 4389 } 4390 mutex_exit(&softs->io_lock); 4391 } 4392 4393 /* 4394 * If not draining the outstanding cmds, drain them. Otherwise, 4395 * only update ndrains. 4396 */ 4397 static void 4398 aac_start_drain(struct aac_softstate *softs) 4399 { 4400 if (softs->ndrains == 0) { 4401 ASSERT(softs->drain_timeid == 0); 4402 softs->drain_timeid = timeout(aac_check_drain, softs, 4403 AAC_QUIESCE_TICK * drv_usectohz(1000000)); 4404 } 4405 softs->ndrains++; 4406 } 4407 4408 /* 4409 * Stop the draining thread when no other threads use it any longer. 4410 * Side effect: io_lock may be released in the middle. 4411 */ 4412 static void 4413 aac_stop_drain(struct aac_softstate *softs) 4414 { 4415 softs->ndrains--; 4416 if (softs->ndrains == 0) { 4417 if (softs->drain_timeid != 0) { 4418 timeout_id_t tid = softs->drain_timeid; 4419 4420 softs->drain_timeid = 0; 4421 mutex_exit(&softs->io_lock); 4422 (void) untimeout(tid); 4423 mutex_enter(&softs->io_lock); 4424 } 4425 } 4426 } 4427 4428 /* 4429 * The following function comes from Adaptec: 4430 * 4431 * Once do an IOP reset, basically the driver have to re-initialize the card 4432 * as if up from a cold boot, and the driver is responsible for any IO that 4433 * is outstanding to the adapter at the time of the IOP RESET. And prepare 4434 * for IOP RESET by making the init code modular with the ability to call it 4435 * from multiple places. 4436 */ 4437 static int 4438 aac_reset_adapter(struct aac_softstate *softs) 4439 { 4440 int health; 4441 uint32_t status; 4442 int rval = AAC_IOP_RESET_FAILED; 4443 4444 DBCALLED(softs, 1); 4445 4446 ASSERT(softs->state & AAC_STATE_RESET); 4447 4448 ddi_fm_acc_err_clear(softs->pci_mem_handle, DDI_FME_VER0); 4449 /* Disable interrupt */ 4450 AAC_DISABLE_INTR(softs); 4451 4452 health = aac_check_adapter_health(softs); 4453 if (health == -1) { 4454 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 4455 goto finish; 4456 } 4457 if (health == 0) /* flush drives if possible */ 4458 (void) aac_shutdown(softs); 4459 4460 /* Execute IOP reset */ 4461 if ((aac_sync_mbcommand(softs, AAC_IOP_RESET, 0, 0, 0, 0, 4462 &status)) != AACOK) { 4463 ddi_acc_handle_t acc; 4464 struct aac_fib *fibp; 4465 struct aac_pause_command *pc; 4466 4467 if ((status & 0xf) == 0xf) { 4468 uint32_t wait_count; 4469 4470 /* 4471 * Sunrise Lake has dual cores and we must drag the 4472 * other core with us to reset simultaneously. There 4473 * are 2 bits in the Inbound Reset Control and Status 4474 * Register (offset 0x38) of the Sunrise Lake to reset 4475 * the chip without clearing out the PCI configuration 4476 * info (COMMAND & BARS). 4477 */ 4478 PCI_MEM_PUT32(softs, AAC_IRCSR, AAC_IRCSR_CORES_RST); 4479 4480 /* 4481 * We need to wait for 5 seconds before accessing the MU 4482 * again 10000 * 100us = 1000,000us = 1000ms = 1s 4483 */ 4484 wait_count = 5 * 10000; 4485 while (wait_count) { 4486 drv_usecwait(100); /* delay 100 microseconds */ 4487 wait_count--; 4488 } 4489 } else { 4490 if (status == SRB_STATUS_INVALID_REQUEST) 4491 cmn_err(CE_WARN, "!IOP_RESET not supported"); 4492 else /* probably timeout */ 4493 cmn_err(CE_WARN, "!IOP_RESET failed"); 4494 4495 /* Unwind aac_shutdown() */ 4496 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac); 4497 acc = softs->sync_ac.slotp->fib_acc_handle; 4498 4499 fibp = softs->sync_ac.slotp->fibp; 4500 pc = (struct aac_pause_command *)&fibp->data[0]; 4501 4502 bzero(pc, sizeof (*pc)); 4503 ddi_put32(acc, &pc->Command, VM_ContainerConfig); 4504 ddi_put32(acc, &pc->Type, CT_PAUSE_IO); 4505 ddi_put32(acc, &pc->Timeout, 1); 4506 ddi_put32(acc, &pc->Min, 1); 4507 ddi_put32(acc, &pc->NoRescan, 1); 4508 4509 (void) aac_sync_fib(softs, ContainerCommand, 4510 AAC_FIB_SIZEOF(struct aac_pause_command)); 4511 aac_sync_fib_slot_release(softs, &softs->sync_ac); 4512 4513 if (aac_check_adapter_health(softs) != 0) 4514 ddi_fm_service_impact(softs->devinfo_p, 4515 DDI_SERVICE_LOST); 4516 else 4517 /* 4518 * IOP reset not supported or IOP not reseted 4519 */ 4520 rval = AAC_IOP_RESET_ABNORMAL; 4521 goto finish; 4522 } 4523 } 4524 4525 /* 4526 * Re-read and renegotiate the FIB parameters, as one of the actions 4527 * that can result from an IOP reset is the running of a new firmware 4528 * image. 4529 */ 4530 if (aac_common_attach(softs) != AACOK) 4531 goto finish; 4532 4533 rval = AAC_IOP_RESET_SUCCEED; 4534 4535 finish: 4536 AAC_ENABLE_INTR(softs); 4537 return (rval); 4538 } 4539 4540 static void 4541 aac_set_throttle(struct aac_softstate *softs, struct aac_device *dvp, int q, 4542 int throttle) 4543 { 4544 /* 4545 * If the bus is draining/quiesced, no changes to the throttles 4546 * are allowed. All throttles should have been set to 0. 4547 */ 4548 if ((softs->state & AAC_STATE_QUIESCED) || softs->ndrains) 4549 return; 4550 dvp->throttle[q] = throttle; 4551 } 4552 4553 static void 4554 aac_hold_bus(struct aac_softstate *softs, int iocmds) 4555 { 4556 int i, q; 4557 4558 /* Hold bus by holding every device on the bus */ 4559 for (q = 0; q < AAC_CMDQ_NUM; q++) { 4560 if (iocmds & (1 << q)) { 4561 softs->bus_throttle[q] = 0; 4562 for (i = 0; i < AAC_MAX_LD; i++) 4563 aac_set_throttle(softs, 4564 &softs->containers[i].dev, q, 0); 4565 for (i = 0; i < AAC_MAX_PD(softs); i++) 4566 aac_set_throttle(softs, 4567 &softs->nondasds[i].dev, q, 0); 4568 } 4569 } 4570 } 4571 4572 static void 4573 aac_unhold_bus(struct aac_softstate *softs, int iocmds) 4574 { 4575 int i, q, max_throttle; 4576 4577 for (q = 0; q < AAC_CMDQ_NUM; q++) { 4578 if (iocmds & (1 << q)) { 4579 /* 4580 * Should not unhold AAC_IOCMD_ASYNC bus, if it has been 4581 * quiesced or being drained by possibly some quiesce 4582 * threads. 4583 */ 4584 if (q == AAC_CMDQ_ASYNC && ((softs->state & 4585 AAC_STATE_QUIESCED) || softs->ndrains)) 4586 continue; 4587 if (q == AAC_CMDQ_ASYNC) 4588 max_throttle = softs->total_slots - 4589 AAC_MGT_SLOT_NUM; 4590 else 4591 max_throttle = softs->total_slots - 1; 4592 softs->bus_throttle[q] = max_throttle; 4593 for (i = 0; i < AAC_MAX_LD; i++) 4594 aac_set_throttle(softs, 4595 &softs->containers[i].dev, 4596 q, max_throttle); 4597 for (i = 0; i < AAC_MAX_PD(softs); i++) 4598 aac_set_throttle(softs, &softs->nondasds[i].dev, 4599 q, max_throttle); 4600 } 4601 } 4602 } 4603 4604 static int 4605 aac_do_reset(struct aac_softstate *softs) 4606 { 4607 int health; 4608 int rval; 4609 4610 softs->state |= AAC_STATE_RESET; 4611 health = aac_check_adapter_health(softs); 4612 4613 /* 4614 * Hold off new io commands and wait all outstanding io 4615 * commands to complete. 4616 */ 4617 if (health == 0) { 4618 int sync_cmds = softs->bus_ncmds[AAC_CMDQ_SYNC]; 4619 int async_cmds = softs->bus_ncmds[AAC_CMDQ_ASYNC]; 4620 4621 if (sync_cmds == 0 && async_cmds == 0) { 4622 rval = AAC_IOP_RESET_SUCCEED; 4623 goto finish; 4624 } 4625 /* 4626 * Give the adapter up to AAC_QUIESCE_TIMEOUT more seconds 4627 * to complete the outstanding io commands 4628 */ 4629 int timeout = AAC_QUIESCE_TIMEOUT * 1000 * 10; 4630 int (*intr_handler)(struct aac_softstate *); 4631 4632 aac_hold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 4633 /* 4634 * Poll the adapter by ourselves in case interrupt is disabled 4635 * and to avoid releasing the io_lock. 4636 */ 4637 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ? 4638 aac_process_intr_new : aac_process_intr_old; 4639 while ((softs->bus_ncmds[AAC_CMDQ_SYNC] || 4640 softs->bus_ncmds[AAC_CMDQ_ASYNC]) && timeout) { 4641 drv_usecwait(100); 4642 (void) intr_handler(softs); 4643 timeout--; 4644 } 4645 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 4646 4647 if (softs->bus_ncmds[AAC_CMDQ_SYNC] == 0 && 4648 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) { 4649 /* Cmds drained out */ 4650 rval = AAC_IOP_RESET_SUCCEED; 4651 goto finish; 4652 } else if (softs->bus_ncmds[AAC_CMDQ_SYNC] < sync_cmds || 4653 softs->bus_ncmds[AAC_CMDQ_ASYNC] < async_cmds) { 4654 /* Cmds not drained out, adapter overloaded */ 4655 rval = AAC_IOP_RESET_ABNORMAL; 4656 goto finish; 4657 } 4658 } 4659 4660 /* 4661 * If a longer waiting time still can't drain any outstanding io 4662 * commands, do IOP reset. 4663 */ 4664 if ((rval = aac_reset_adapter(softs)) == AAC_IOP_RESET_FAILED) 4665 softs->state |= AAC_STATE_DEAD; 4666 4667 finish: 4668 softs->state &= ~AAC_STATE_RESET; 4669 return (rval); 4670 } 4671 4672 static int 4673 aac_tran_reset(struct scsi_address *ap, int level) 4674 { 4675 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4676 int rval; 4677 4678 DBCALLED(softs, 1); 4679 4680 if (level != RESET_ALL) { 4681 cmn_err(CE_NOTE, "!reset target/lun not supported"); 4682 return (0); 4683 } 4684 4685 mutex_enter(&softs->io_lock); 4686 switch (rval = aac_do_reset(softs)) { 4687 case AAC_IOP_RESET_SUCCEED: 4688 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING | AAC_IOCMD_ASYNC, 4689 NULL, CMD_RESET); 4690 aac_start_waiting_io(softs); 4691 break; 4692 case AAC_IOP_RESET_FAILED: 4693 /* Abort IOCTL cmds when adapter is dead */ 4694 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_RESET); 4695 break; 4696 case AAC_IOP_RESET_ABNORMAL: 4697 aac_start_waiting_io(softs); 4698 } 4699 mutex_exit(&softs->io_lock); 4700 4701 aac_drain_comp_q(softs); 4702 return (rval == 0); 4703 } 4704 4705 static int 4706 aac_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 4707 { 4708 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4709 4710 DBCALLED(softs, 1); 4711 4712 mutex_enter(&softs->io_lock); 4713 aac_abort_iocmds(softs, 0, pkt, CMD_ABORTED); 4714 mutex_exit(&softs->io_lock); 4715 4716 aac_drain_comp_q(softs); 4717 return (1); 4718 } 4719 4720 void 4721 aac_free_dmamap(struct aac_cmd *acp) 4722 { 4723 /* Free dma mapping */ 4724 if (acp->flags & AAC_CMD_DMA_VALID) { 4725 ASSERT(acp->buf_dma_handle); 4726 (void) ddi_dma_unbind_handle(acp->buf_dma_handle); 4727 acp->flags &= ~AAC_CMD_DMA_VALID; 4728 } 4729 4730 if (acp->abp != NULL) { /* free non-aligned buf DMA */ 4731 ASSERT(acp->buf_dma_handle); 4732 if ((acp->flags & AAC_CMD_BUF_WRITE) == 0 && acp->bp) 4733 ddi_rep_get8(acp->abh, (uint8_t *)acp->bp->b_un.b_addr, 4734 (uint8_t *)acp->abp, acp->bp->b_bcount, 4735 DDI_DEV_AUTOINCR); 4736 ddi_dma_mem_free(&acp->abh); 4737 acp->abp = NULL; 4738 } 4739 4740 if (acp->buf_dma_handle) { 4741 ddi_dma_free_handle(&acp->buf_dma_handle); 4742 acp->buf_dma_handle = NULL; 4743 } 4744 } 4745 4746 static void 4747 aac_unknown_scmd(struct aac_softstate *softs, struct aac_cmd *acp) 4748 { 4749 AACDB_PRINT(softs, CE_CONT, "SCMD 0x%x not supported", 4750 ((union scsi_cdb *)(void *)acp->pkt->pkt_cdbp)->scc_cmd); 4751 aac_free_dmamap(acp); 4752 aac_set_arq_data(acp->pkt, KEY_ILLEGAL_REQUEST, 0x20, 0x00, 0); 4753 aac_soft_callback(softs, acp); 4754 } 4755 4756 /* 4757 * Handle command to logical device 4758 */ 4759 static int 4760 aac_tran_start_ld(struct aac_softstate *softs, struct aac_cmd *acp) 4761 { 4762 struct aac_container *dvp; 4763 struct scsi_pkt *pkt; 4764 union scsi_cdb *cdbp; 4765 struct buf *bp; 4766 int rval; 4767 4768 dvp = (struct aac_container *)acp->dvp; 4769 pkt = acp->pkt; 4770 cdbp = (void *)pkt->pkt_cdbp; 4771 bp = acp->bp; 4772 4773 switch (cdbp->scc_cmd) { 4774 case SCMD_INQUIRY: /* inquiry */ 4775 aac_free_dmamap(acp); 4776 aac_inquiry(softs, pkt, cdbp, bp); 4777 aac_soft_callback(softs, acp); 4778 rval = TRAN_ACCEPT; 4779 break; 4780 4781 case SCMD_READ_CAPACITY: /* read capacity */ 4782 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4783 struct scsi_capacity cap; 4784 uint64_t last_lba; 4785 4786 /* check 64-bit LBA */ 4787 last_lba = dvp->size - 1; 4788 if (last_lba > 0xffffffffull) { 4789 cap.capacity = 0xfffffffful; 4790 } else { 4791 cap.capacity = BE_32(last_lba); 4792 } 4793 cap.lbasize = BE_32(AAC_SECTOR_SIZE); 4794 4795 aac_free_dmamap(acp); 4796 if (bp->b_flags & (B_PHYS|B_PAGEIO)) 4797 bp_mapin(bp); 4798 bcopy(&cap, bp->b_un.b_addr, min(bp->b_bcount, 8)); 4799 pkt->pkt_state |= STATE_XFERRED_DATA; 4800 } 4801 aac_soft_callback(softs, acp); 4802 rval = TRAN_ACCEPT; 4803 break; 4804 4805 case SCMD_SVC_ACTION_IN_G4: /* read capacity 16 */ 4806 /* Check if containers need 64-bit LBA support */ 4807 if (cdbp->cdb_opaque[1] == SSVC_ACTION_READ_CAPACITY_G4) { 4808 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4809 struct scsi_capacity_16 cap16; 4810 int cap_len = sizeof (struct scsi_capacity_16); 4811 4812 bzero(&cap16, cap_len); 4813 cap16.sc_capacity = BE_64(dvp->size - 1); 4814 cap16.sc_lbasize = BE_32(AAC_SECTOR_SIZE); 4815 4816 aac_free_dmamap(acp); 4817 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 4818 bp_mapin(bp); 4819 bcopy(&cap16, bp->b_un.b_addr, 4820 min(bp->b_bcount, cap_len)); 4821 pkt->pkt_state |= STATE_XFERRED_DATA; 4822 } 4823 aac_soft_callback(softs, acp); 4824 } else { 4825 aac_unknown_scmd(softs, acp); 4826 } 4827 rval = TRAN_ACCEPT; 4828 break; 4829 4830 case SCMD_READ_G4: /* read_16 */ 4831 case SCMD_WRITE_G4: /* write_16 */ 4832 if (softs->flags & AAC_FLAGS_RAW_IO) { 4833 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */ 4834 acp->blkno = ((uint64_t) \ 4835 GETG4ADDR(cdbp) << 32) | \ 4836 (uint32_t)GETG4ADDRTL(cdbp); 4837 goto do_io; 4838 } 4839 AACDB_PRINT(softs, CE_WARN, "64-bit LBA not supported"); 4840 aac_unknown_scmd(softs, acp); 4841 rval = TRAN_ACCEPT; 4842 break; 4843 4844 case SCMD_READ: /* read_6 */ 4845 case SCMD_WRITE: /* write_6 */ 4846 acp->blkno = GETG0ADDR(cdbp); 4847 goto do_io; 4848 4849 case SCMD_READ_G5: /* read_12 */ 4850 case SCMD_WRITE_G5: /* write_12 */ 4851 acp->blkno = GETG5ADDR(cdbp); 4852 goto do_io; 4853 4854 case SCMD_READ_G1: /* read_10 */ 4855 case SCMD_WRITE_G1: /* write_10 */ 4856 acp->blkno = (uint32_t)GETG1ADDR(cdbp); 4857 do_io: 4858 if (acp->flags & AAC_CMD_DMA_VALID) { 4859 uint64_t cnt_size = dvp->size; 4860 4861 /* 4862 * If LBA > array size AND rawio, the 4863 * adapter may hang. So check it before 4864 * sending. 4865 * NOTE: (blkno + blkcnt) may overflow 4866 */ 4867 if ((acp->blkno < cnt_size) && 4868 ((acp->blkno + acp->bcount / 4869 AAC_BLK_SIZE) <= cnt_size)) { 4870 rval = aac_do_io(softs, acp); 4871 } else { 4872 /* 4873 * Request exceeds the capacity of disk, 4874 * set error block number to last LBA 4875 * + 1. 4876 */ 4877 aac_set_arq_data(pkt, 4878 KEY_ILLEGAL_REQUEST, 0x21, 4879 0x00, cnt_size); 4880 aac_soft_callback(softs, acp); 4881 rval = TRAN_ACCEPT; 4882 } 4883 } else if (acp->bcount == 0) { 4884 /* For 0 length IO, just return ok */ 4885 aac_soft_callback(softs, acp); 4886 rval = TRAN_ACCEPT; 4887 } else { 4888 rval = TRAN_BADPKT; 4889 } 4890 break; 4891 4892 case SCMD_MODE_SENSE: /* mode_sense_6 */ 4893 case SCMD_MODE_SENSE_G1: { /* mode_sense_10 */ 4894 int capacity; 4895 4896 aac_free_dmamap(acp); 4897 if (dvp->size > 0xffffffffull) 4898 capacity = 0xfffffffful; /* 64-bit LBA */ 4899 else 4900 capacity = dvp->size; 4901 aac_mode_sense(softs, pkt, cdbp, bp, capacity); 4902 aac_soft_callback(softs, acp); 4903 rval = TRAN_ACCEPT; 4904 break; 4905 } 4906 4907 case SCMD_START_STOP: 4908 if (softs->support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) { 4909 acp->aac_cmd_fib = aac_cmd_fib_startstop; 4910 acp->ac_comp = aac_startstop_complete; 4911 rval = aac_do_io(softs, acp); 4912 break; 4913 } 4914 /* FALLTHRU */ 4915 case SCMD_TEST_UNIT_READY: 4916 case SCMD_REQUEST_SENSE: 4917 case SCMD_FORMAT: 4918 aac_free_dmamap(acp); 4919 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4920 if (acp->flags & AAC_CMD_BUF_READ) { 4921 if (bp->b_flags & (B_PHYS|B_PAGEIO)) 4922 bp_mapin(bp); 4923 bzero(bp->b_un.b_addr, bp->b_bcount); 4924 } 4925 pkt->pkt_state |= STATE_XFERRED_DATA; 4926 } 4927 aac_soft_callback(softs, acp); 4928 rval = TRAN_ACCEPT; 4929 break; 4930 4931 case SCMD_SYNCHRONIZE_CACHE: 4932 acp->flags |= AAC_CMD_NTAG; 4933 acp->aac_cmd_fib = aac_cmd_fib_sync; 4934 acp->ac_comp = aac_synccache_complete; 4935 rval = aac_do_io(softs, acp); 4936 break; 4937 4938 case SCMD_DOORLOCK: 4939 aac_free_dmamap(acp); 4940 dvp->locked = (pkt->pkt_cdbp[4] & 0x01) ? 1 : 0; 4941 aac_soft_callback(softs, acp); 4942 rval = TRAN_ACCEPT; 4943 break; 4944 4945 default: /* unknown command */ 4946 aac_unknown_scmd(softs, acp); 4947 rval = TRAN_ACCEPT; 4948 break; 4949 } 4950 4951 return (rval); 4952 } 4953 4954 static int 4955 aac_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt) 4956 { 4957 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4958 struct aac_cmd *acp = PKT2AC(pkt); 4959 struct aac_device *dvp = acp->dvp; 4960 int rval; 4961 4962 DBCALLED(softs, 2); 4963 4964 /* 4965 * Reinitialize some fields of ac and pkt; the packet may 4966 * have been resubmitted 4967 */ 4968 acp->flags &= AAC_CMD_CONSISTENT | AAC_CMD_DMA_PARTIAL | \ 4969 AAC_CMD_BUF_READ | AAC_CMD_BUF_WRITE | AAC_CMD_DMA_VALID; 4970 acp->timeout = acp->pkt->pkt_time; 4971 if (pkt->pkt_flags & FLAG_NOINTR) 4972 acp->flags |= AAC_CMD_NO_INTR; 4973 #ifdef DEBUG 4974 acp->fib_flags = AACDB_FLAGS_FIB_SCMD; 4975 #endif 4976 pkt->pkt_reason = CMD_CMPLT; 4977 pkt->pkt_state = 0; 4978 pkt->pkt_statistics = 0; 4979 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */ 4980 4981 if (acp->flags & AAC_CMD_DMA_VALID) { 4982 pkt->pkt_resid = acp->bcount; 4983 /* Consistent packets need to be sync'ed first */ 4984 if ((acp->flags & AAC_CMD_CONSISTENT) && 4985 (acp->flags & AAC_CMD_BUF_WRITE)) 4986 if (aac_dma_sync_ac(acp) != AACOK) { 4987 ddi_fm_service_impact(softs->devinfo_p, 4988 DDI_SERVICE_UNAFFECTED); 4989 return (TRAN_BADPKT); 4990 } 4991 } else { 4992 pkt->pkt_resid = 0; 4993 } 4994 4995 mutex_enter(&softs->io_lock); 4996 AACDB_PRINT_SCMD(softs, acp); 4997 if ((dvp->flags & (AAC_DFLAG_VALID | AAC_DFLAG_CONFIGURING)) && 4998 !(softs->state & AAC_STATE_DEAD)) { 4999 if (dvp->type == AAC_DEV_LD) { 5000 if (ap->a_lun == 0) 5001 rval = aac_tran_start_ld(softs, acp); 5002 else 5003 goto error; 5004 } else { 5005 rval = aac_do_io(softs, acp); 5006 } 5007 } else { 5008 error: 5009 #ifdef DEBUG 5010 if (!(softs->state & AAC_STATE_DEAD)) { 5011 AACDB_PRINT_TRAN(softs, 5012 "Cannot send cmd to target t%dL%d: %s", 5013 ap->a_target, ap->a_lun, 5014 "target invalid"); 5015 } else { 5016 AACDB_PRINT(softs, CE_WARN, 5017 "Cannot send cmd to target t%dL%d: %s", 5018 ap->a_target, ap->a_lun, 5019 "adapter dead"); 5020 } 5021 #endif 5022 rval = TRAN_FATAL_ERROR; 5023 } 5024 mutex_exit(&softs->io_lock); 5025 return (rval); 5026 } 5027 5028 static int 5029 aac_tran_getcap(struct scsi_address *ap, char *cap, int whom) 5030 { 5031 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 5032 struct aac_device *dvp; 5033 int rval; 5034 5035 DBCALLED(softs, 2); 5036 5037 /* We don't allow inquiring about capabilities for other targets */ 5038 if (cap == NULL || whom == 0) { 5039 AACDB_PRINT(softs, CE_WARN, 5040 "GetCap> %s not supported: whom=%d", cap, whom); 5041 return (-1); 5042 } 5043 5044 mutex_enter(&softs->io_lock); 5045 dvp = AAC_DEV(softs, ap->a_target); 5046 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) { 5047 mutex_exit(&softs->io_lock); 5048 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to getcap", 5049 ap->a_target, ap->a_lun); 5050 return (-1); 5051 } 5052 5053 switch (scsi_hba_lookup_capstr(cap)) { 5054 case SCSI_CAP_ARQ: /* auto request sense */ 5055 rval = 1; 5056 break; 5057 case SCSI_CAP_UNTAGGED_QING: 5058 case SCSI_CAP_TAGGED_QING: 5059 rval = 1; 5060 break; 5061 case SCSI_CAP_DMA_MAX: 5062 rval = softs->dma_max; 5063 break; 5064 default: 5065 rval = -1; 5066 break; 5067 } 5068 mutex_exit(&softs->io_lock); 5069 5070 AACDB_PRINT_TRAN(softs, "GetCap> %s t%dL%d: rval=%d", 5071 cap, ap->a_target, ap->a_lun, rval); 5072 return (rval); 5073 } 5074 5075 /*ARGSUSED*/ 5076 static int 5077 aac_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom) 5078 { 5079 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 5080 struct aac_device *dvp; 5081 int rval; 5082 5083 DBCALLED(softs, 2); 5084 5085 /* We don't allow inquiring about capabilities for other targets */ 5086 if (cap == NULL || whom == 0) { 5087 AACDB_PRINT(softs, CE_WARN, 5088 "SetCap> %s not supported: whom=%d", cap, whom); 5089 return (-1); 5090 } 5091 5092 mutex_enter(&softs->io_lock); 5093 dvp = AAC_DEV(softs, ap->a_target); 5094 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) { 5095 mutex_exit(&softs->io_lock); 5096 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to setcap", 5097 ap->a_target, ap->a_lun); 5098 return (-1); 5099 } 5100 5101 switch (scsi_hba_lookup_capstr(cap)) { 5102 case SCSI_CAP_ARQ: 5103 /* Force auto request sense */ 5104 rval = (value == 1) ? 1 : 0; 5105 break; 5106 case SCSI_CAP_UNTAGGED_QING: 5107 case SCSI_CAP_TAGGED_QING: 5108 rval = (value == 1) ? 1 : 0; 5109 break; 5110 default: 5111 rval = -1; 5112 break; 5113 } 5114 mutex_exit(&softs->io_lock); 5115 5116 AACDB_PRINT_TRAN(softs, "SetCap> %s t%dL%d val=%d: rval=%d", 5117 cap, ap->a_target, ap->a_lun, value, rval); 5118 return (rval); 5119 } 5120 5121 static void 5122 aac_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 5123 { 5124 struct aac_cmd *acp = PKT2AC(pkt); 5125 5126 DBCALLED(NULL, 2); 5127 5128 if (acp->sgt) { 5129 kmem_free(acp->sgt, sizeof (struct aac_sge) * \ 5130 acp->left_cookien); 5131 } 5132 aac_free_dmamap(acp); 5133 ASSERT(acp->slotp == NULL); 5134 scsi_hba_pkt_free(ap, pkt); 5135 } 5136 5137 int 5138 aac_cmd_dma_alloc(struct aac_softstate *softs, struct aac_cmd *acp, 5139 struct buf *bp, int flags, int (*cb)(), caddr_t arg) 5140 { 5141 int kf = (cb == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP; 5142 uint_t oldcookiec; 5143 int bioerr = 0; 5144 int rval; 5145 5146 oldcookiec = acp->left_cookien; 5147 5148 /* Move window to build s/g map */ 5149 if (acp->total_nwin > 0) { 5150 if (++acp->cur_win < acp->total_nwin) { 5151 off_t off; 5152 size_t len; 5153 5154 rval = ddi_dma_getwin(acp->buf_dma_handle, acp->cur_win, 5155 &off, &len, &acp->cookie, &acp->left_cookien); 5156 if (rval == DDI_SUCCESS) 5157 goto get_dma_cookies; 5158 AACDB_PRINT(softs, CE_WARN, 5159 "ddi_dma_getwin() fail %d", rval); 5160 return (AACERR); 5161 } 5162 AACDB_PRINT(softs, CE_WARN, "Nothing to transfer"); 5163 return (AACERR); 5164 } 5165 5166 /* We need to transfer data, so we alloc DMA resources for this pkt */ 5167 if (bp && bp->b_bcount != 0 && !(acp->flags & AAC_CMD_DMA_VALID)) { 5168 uint_t dma_flags = 0; 5169 struct aac_sge *sge; 5170 5171 /* 5172 * We will still use this point to fake some 5173 * infomation in tran_start 5174 */ 5175 acp->bp = bp; 5176 5177 /* Set dma flags */ 5178 if (BUF_IS_READ(bp)) { 5179 dma_flags |= DDI_DMA_READ; 5180 acp->flags |= AAC_CMD_BUF_READ; 5181 } else { 5182 dma_flags |= DDI_DMA_WRITE; 5183 acp->flags |= AAC_CMD_BUF_WRITE; 5184 } 5185 if (flags & PKT_CONSISTENT) 5186 dma_flags |= DDI_DMA_CONSISTENT; 5187 if (flags & PKT_DMA_PARTIAL) 5188 dma_flags |= DDI_DMA_PARTIAL; 5189 5190 /* Alloc buf dma handle */ 5191 if (!acp->buf_dma_handle) { 5192 rval = ddi_dma_alloc_handle(softs->devinfo_p, 5193 &softs->buf_dma_attr, cb, arg, 5194 &acp->buf_dma_handle); 5195 if (rval != DDI_SUCCESS) { 5196 AACDB_PRINT(softs, CE_WARN, 5197 "Can't allocate DMA handle, errno=%d", 5198 rval); 5199 goto error_out; 5200 } 5201 } 5202 5203 /* Bind buf */ 5204 if (((uintptr_t)bp->b_un.b_addr & AAC_DMA_ALIGN_MASK) == 0) { 5205 rval = ddi_dma_buf_bind_handle(acp->buf_dma_handle, 5206 bp, dma_flags, cb, arg, &acp->cookie, 5207 &acp->left_cookien); 5208 } else { 5209 size_t bufsz; 5210 5211 AACDB_PRINT_TRAN(softs, 5212 "non-aligned buffer: addr=0x%p, cnt=%lu", 5213 (void *)bp->b_un.b_addr, bp->b_bcount); 5214 if (bp->b_flags & (B_PAGEIO|B_PHYS)) 5215 bp_mapin(bp); 5216 5217 rval = ddi_dma_mem_alloc(acp->buf_dma_handle, 5218 AAC_ROUNDUP(bp->b_bcount, AAC_DMA_ALIGN), 5219 &softs->acc_attr, DDI_DMA_STREAMING, 5220 cb, arg, &acp->abp, &bufsz, &acp->abh); 5221 5222 if (rval != DDI_SUCCESS) { 5223 AACDB_PRINT(softs, CE_NOTE, 5224 "Cannot alloc DMA to non-aligned buf"); 5225 bioerr = 0; 5226 goto error_out; 5227 } 5228 5229 if (acp->flags & AAC_CMD_BUF_WRITE) 5230 ddi_rep_put8(acp->abh, 5231 (uint8_t *)bp->b_un.b_addr, 5232 (uint8_t *)acp->abp, bp->b_bcount, 5233 DDI_DEV_AUTOINCR); 5234 5235 rval = ddi_dma_addr_bind_handle(acp->buf_dma_handle, 5236 NULL, acp->abp, bufsz, dma_flags, cb, arg, 5237 &acp->cookie, &acp->left_cookien); 5238 } 5239 5240 switch (rval) { 5241 case DDI_DMA_PARTIAL_MAP: 5242 if (ddi_dma_numwin(acp->buf_dma_handle, 5243 &acp->total_nwin) == DDI_FAILURE) { 5244 AACDB_PRINT(softs, CE_WARN, 5245 "Cannot get number of DMA windows"); 5246 bioerr = 0; 5247 goto error_out; 5248 } 5249 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)", 5250 acp->left_cookien); 5251 acp->cur_win = 0; 5252 break; 5253 5254 case DDI_DMA_MAPPED: 5255 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)", 5256 acp->left_cookien); 5257 acp->cur_win = 0; 5258 acp->total_nwin = 1; 5259 break; 5260 5261 case DDI_DMA_NORESOURCES: 5262 bioerr = 0; 5263 AACDB_PRINT(softs, CE_WARN, 5264 "Cannot bind buf for DMA: DDI_DMA_NORESOURCES"); 5265 goto error_out; 5266 case DDI_DMA_BADATTR: 5267 case DDI_DMA_NOMAPPING: 5268 bioerr = EFAULT; 5269 AACDB_PRINT(softs, CE_WARN, 5270 "Cannot bind buf for DMA: DDI_DMA_NOMAPPING"); 5271 goto error_out; 5272 case DDI_DMA_TOOBIG: 5273 bioerr = EINVAL; 5274 AACDB_PRINT(softs, CE_WARN, 5275 "Cannot bind buf for DMA: DDI_DMA_TOOBIG(%d)", 5276 bp->b_bcount); 5277 goto error_out; 5278 default: 5279 bioerr = EINVAL; 5280 AACDB_PRINT(softs, CE_WARN, 5281 "Cannot bind buf for DMA: %d", rval); 5282 goto error_out; 5283 } 5284 acp->flags |= AAC_CMD_DMA_VALID; 5285 5286 get_dma_cookies: 5287 ASSERT(acp->left_cookien > 0); 5288 if (acp->left_cookien > softs->aac_sg_tablesize) { 5289 AACDB_PRINT(softs, CE_NOTE, "large cookiec received %d", 5290 acp->left_cookien); 5291 bioerr = EINVAL; 5292 goto error_out; 5293 } 5294 if (oldcookiec != acp->left_cookien && acp->sgt != NULL) { 5295 kmem_free(acp->sgt, sizeof (struct aac_sge) * \ 5296 oldcookiec); 5297 acp->sgt = NULL; 5298 } 5299 if (acp->sgt == NULL) { 5300 acp->sgt = kmem_alloc(sizeof (struct aac_sge) * \ 5301 acp->left_cookien, kf); 5302 if (acp->sgt == NULL) { 5303 AACDB_PRINT(softs, CE_WARN, 5304 "sgt kmem_alloc fail"); 5305 bioerr = ENOMEM; 5306 goto error_out; 5307 } 5308 } 5309 5310 sge = &acp->sgt[0]; 5311 sge->bcount = acp->cookie.dmac_size; 5312 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress); 5313 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress); 5314 acp->bcount = acp->cookie.dmac_size; 5315 for (sge++; sge < &acp->sgt[acp->left_cookien]; sge++) { 5316 ddi_dma_nextcookie(acp->buf_dma_handle, &acp->cookie); 5317 sge->bcount = acp->cookie.dmac_size; 5318 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress); 5319 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress); 5320 acp->bcount += acp->cookie.dmac_size; 5321 } 5322 5323 /* 5324 * Note: The old DMA engine do not correctly handle 5325 * dma_attr_maxxfer attribute. So we have to ensure 5326 * it by ourself. 5327 */ 5328 if (acp->bcount > softs->buf_dma_attr.dma_attr_maxxfer) { 5329 AACDB_PRINT(softs, CE_NOTE, 5330 "large xfer size received %d\n", acp->bcount); 5331 bioerr = EINVAL; 5332 goto error_out; 5333 } 5334 5335 acp->total_xfer += acp->bcount; 5336 5337 if (acp->pkt) { 5338 /* Return remaining byte count */ 5339 if (acp->total_xfer <= bp->b_bcount) { 5340 acp->pkt->pkt_resid = bp->b_bcount - \ 5341 acp->total_xfer; 5342 } else { 5343 /* 5344 * Allocated DMA size is greater than the buf 5345 * size of bp. This is caused by devices like 5346 * tape. we have extra bytes allocated, but 5347 * the packet residual has to stay correct. 5348 */ 5349 acp->pkt->pkt_resid = 0; 5350 } 5351 AACDB_PRINT_TRAN(softs, 5352 "bp=0x%p, xfered=%d/%d, resid=%d", 5353 (void *)bp->b_un.b_addr, (int)acp->total_xfer, 5354 (int)bp->b_bcount, (int)acp->pkt->pkt_resid); 5355 } 5356 } 5357 return (AACOK); 5358 5359 error_out: 5360 bioerror(bp, bioerr); 5361 return (AACERR); 5362 } 5363 5364 static struct scsi_pkt * 5365 aac_tran_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 5366 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags, 5367 int (*callback)(), caddr_t arg) 5368 { 5369 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 5370 struct aac_cmd *acp, *new_acp; 5371 5372 DBCALLED(softs, 2); 5373 5374 /* Allocate pkt */ 5375 if (pkt == NULL) { 5376 int slen; 5377 5378 /* Force auto request sense */ 5379 slen = (statuslen > softs->slen) ? statuslen : softs->slen; 5380 pkt = scsi_hba_pkt_alloc(softs->devinfo_p, ap, cmdlen, 5381 slen, tgtlen, sizeof (struct aac_cmd), callback, arg); 5382 if (pkt == NULL) { 5383 AACDB_PRINT(softs, CE_WARN, "Alloc scsi pkt failed"); 5384 return (NULL); 5385 } 5386 acp = new_acp = PKT2AC(pkt); 5387 acp->pkt = pkt; 5388 acp->cmdlen = cmdlen; 5389 5390 if (ap->a_target < AAC_MAX_LD) { 5391 acp->dvp = &softs->containers[ap->a_target].dev; 5392 acp->aac_cmd_fib = softs->aac_cmd_fib; 5393 acp->ac_comp = aac_ld_complete; 5394 } else { 5395 _NOTE(ASSUMING_PROTECTED(softs->nondasds)) 5396 5397 acp->dvp = &softs->nondasds[AAC_PD(ap->a_target)].dev; 5398 acp->aac_cmd_fib = softs->aac_cmd_fib_scsi; 5399 acp->ac_comp = aac_pd_complete; 5400 } 5401 } else { 5402 acp = PKT2AC(pkt); 5403 new_acp = NULL; 5404 } 5405 5406 if (aac_cmd_dma_alloc(softs, acp, bp, flags, callback, arg) == AACOK) 5407 return (pkt); 5408 5409 if (new_acp) 5410 aac_tran_destroy_pkt(ap, pkt); 5411 return (NULL); 5412 } 5413 5414 /* 5415 * tran_sync_pkt(9E) - explicit DMA synchronization 5416 */ 5417 /*ARGSUSED*/ 5418 static void 5419 aac_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 5420 { 5421 struct aac_cmd *acp = PKT2AC(pkt); 5422 5423 DBCALLED(NULL, 2); 5424 5425 if (aac_dma_sync_ac(acp) != AACOK) 5426 ddi_fm_service_impact( 5427 (AAC_TRAN2SOFTS(ap->a_hba_tran))->devinfo_p, 5428 DDI_SERVICE_UNAFFECTED); 5429 } 5430 5431 /* 5432 * tran_dmafree(9E) - deallocate DMA resources allocated for command 5433 */ 5434 /*ARGSUSED*/ 5435 static void 5436 aac_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 5437 { 5438 struct aac_cmd *acp = PKT2AC(pkt); 5439 5440 DBCALLED(NULL, 2); 5441 5442 aac_free_dmamap(acp); 5443 } 5444 5445 static int 5446 aac_do_quiesce(struct aac_softstate *softs) 5447 { 5448 aac_hold_bus(softs, AAC_IOCMD_ASYNC); 5449 if (softs->bus_ncmds[AAC_CMDQ_ASYNC]) { 5450 aac_start_drain(softs); 5451 do { 5452 if (cv_wait_sig(&softs->drain_cv, 5453 &softs->io_lock) == 0) { 5454 /* Quiesce has been interrupted */ 5455 aac_stop_drain(softs); 5456 aac_unhold_bus(softs, AAC_IOCMD_ASYNC); 5457 aac_start_waiting_io(softs); 5458 return (AACERR); 5459 } 5460 } while (softs->bus_ncmds[AAC_CMDQ_ASYNC]); 5461 aac_stop_drain(softs); 5462 } 5463 5464 softs->state |= AAC_STATE_QUIESCED; 5465 return (AACOK); 5466 } 5467 5468 static int 5469 aac_tran_quiesce(dev_info_t *dip) 5470 { 5471 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 5472 int rval; 5473 5474 DBCALLED(softs, 1); 5475 5476 mutex_enter(&softs->io_lock); 5477 if (aac_do_quiesce(softs) == AACOK) 5478 rval = 0; 5479 else 5480 rval = 1; 5481 mutex_exit(&softs->io_lock); 5482 return (rval); 5483 } 5484 5485 static int 5486 aac_do_unquiesce(struct aac_softstate *softs) 5487 { 5488 softs->state &= ~AAC_STATE_QUIESCED; 5489 aac_unhold_bus(softs, AAC_IOCMD_ASYNC); 5490 5491 aac_start_waiting_io(softs); 5492 return (AACOK); 5493 } 5494 5495 static int 5496 aac_tran_unquiesce(dev_info_t *dip) 5497 { 5498 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 5499 int rval; 5500 5501 DBCALLED(softs, 1); 5502 5503 mutex_enter(&softs->io_lock); 5504 if (aac_do_unquiesce(softs) == AACOK) 5505 rval = 0; 5506 else 5507 rval = 1; 5508 mutex_exit(&softs->io_lock); 5509 return (rval); 5510 } 5511 5512 static int 5513 aac_hba_setup(struct aac_softstate *softs) 5514 { 5515 scsi_hba_tran_t *hba_tran; 5516 int rval; 5517 5518 hba_tran = scsi_hba_tran_alloc(softs->devinfo_p, SCSI_HBA_CANSLEEP); 5519 if (hba_tran == NULL) 5520 return (AACERR); 5521 hba_tran->tran_hba_private = softs; 5522 hba_tran->tran_tgt_init = aac_tran_tgt_init; 5523 hba_tran->tran_tgt_free = aac_tran_tgt_free; 5524 hba_tran->tran_tgt_probe = scsi_hba_probe; 5525 hba_tran->tran_start = aac_tran_start; 5526 hba_tran->tran_getcap = aac_tran_getcap; 5527 hba_tran->tran_setcap = aac_tran_setcap; 5528 hba_tran->tran_init_pkt = aac_tran_init_pkt; 5529 hba_tran->tran_destroy_pkt = aac_tran_destroy_pkt; 5530 hba_tran->tran_reset = aac_tran_reset; 5531 hba_tran->tran_abort = aac_tran_abort; 5532 hba_tran->tran_sync_pkt = aac_tran_sync_pkt; 5533 hba_tran->tran_dmafree = aac_tran_dmafree; 5534 hba_tran->tran_quiesce = aac_tran_quiesce; 5535 hba_tran->tran_unquiesce = aac_tran_unquiesce; 5536 hba_tran->tran_bus_config = aac_tran_bus_config; 5537 rval = scsi_hba_attach_setup(softs->devinfo_p, &softs->buf_dma_attr, 5538 hba_tran, 0); 5539 if (rval != DDI_SUCCESS) { 5540 scsi_hba_tran_free(hba_tran); 5541 AACDB_PRINT(softs, CE_WARN, "aac_hba_setup failed"); 5542 return (AACERR); 5543 } 5544 5545 softs->hba_tran = hba_tran; 5546 return (AACOK); 5547 } 5548 5549 /* 5550 * FIB setup operations 5551 */ 5552 5553 /* 5554 * Init FIB header 5555 */ 5556 static void 5557 aac_cmd_fib_header(struct aac_softstate *softs, struct aac_cmd *acp, 5558 uint16_t cmd) 5559 { 5560 struct aac_slot *slotp = acp->slotp; 5561 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5562 struct aac_fib *fibp = slotp->fibp; 5563 uint32_t xfer_state; 5564 5565 xfer_state = 5566 AAC_FIBSTATE_HOSTOWNED | 5567 AAC_FIBSTATE_INITIALISED | 5568 AAC_FIBSTATE_EMPTY | 5569 AAC_FIBSTATE_FAST_RESPONSE | /* enable fast io */ 5570 AAC_FIBSTATE_FROMHOST | 5571 AAC_FIBSTATE_REXPECTED | 5572 AAC_FIBSTATE_NORM; 5573 5574 if (!(acp->flags & AAC_CMD_SYNC)) 5575 xfer_state |= AAC_FIBSTATE_ASYNC; 5576 5577 ddi_put32(acc, &fibp->Header.XferState, xfer_state); 5578 ddi_put16(acc, &fibp->Header.Command, cmd); 5579 ddi_put8(acc, &fibp->Header.StructType, AAC_FIBTYPE_TFIB); 5580 ddi_put8(acc, &fibp->Header.Flags, 0); /* don't care */ 5581 ddi_put16(acc, &fibp->Header.Size, acp->fib_size); 5582 ddi_put16(acc, &fibp->Header.SenderSize, softs->aac_max_fib_size); 5583 ddi_put32(acc, &fibp->Header.SenderFibAddress, (slotp->index << 2)); 5584 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr); 5585 ddi_put32(acc, &fibp->Header.SenderData, 0); /* don't care */ 5586 } 5587 5588 /* 5589 * Init FIB for raw IO command 5590 */ 5591 static void 5592 aac_cmd_fib_rawio(struct aac_softstate *softs, struct aac_cmd *acp) 5593 { 5594 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5595 struct aac_raw_io *io = (struct aac_raw_io *)&acp->slotp->fibp->data[0]; 5596 struct aac_sg_entryraw *sgp; 5597 struct aac_sge *sge; 5598 5599 /* Calculate FIB size */ 5600 acp->fib_size = sizeof (struct aac_fib_header) + \ 5601 sizeof (struct aac_raw_io) + (acp->left_cookien - 1) * \ 5602 sizeof (struct aac_sg_entryraw); 5603 5604 aac_cmd_fib_header(softs, acp, RawIo); 5605 5606 ddi_put16(acc, &io->Flags, (acp->flags & AAC_CMD_BUF_READ) ? 1 : 0); 5607 ddi_put16(acc, &io->BpTotal, 0); 5608 ddi_put16(acc, &io->BpComplete, 0); 5609 5610 ddi_put32(acc, AAC_LO32(&io->BlockNumber), AAC_LS32(acp->blkno)); 5611 ddi_put32(acc, AAC_HI32(&io->BlockNumber), AAC_MS32(acp->blkno)); 5612 ddi_put16(acc, &io->ContainerId, 5613 ((struct aac_container *)acp->dvp)->cid); 5614 5615 /* Fill SG table */ 5616 ddi_put32(acc, &io->SgMapRaw.SgCount, acp->left_cookien); 5617 ddi_put32(acc, &io->ByteCount, acp->bcount); 5618 5619 for (sge = &acp->sgt[0], sgp = &io->SgMapRaw.SgEntryRaw[0]; 5620 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5621 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 5622 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 5623 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5624 sgp->Next = 0; 5625 sgp->Prev = 0; 5626 sgp->Flags = 0; 5627 } 5628 } 5629 5630 /* Init FIB for 64-bit block IO command */ 5631 static void 5632 aac_cmd_fib_brw64(struct aac_softstate *softs, struct aac_cmd *acp) 5633 { 5634 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5635 struct aac_blockread64 *br = (struct aac_blockread64 *) \ 5636 &acp->slotp->fibp->data[0]; 5637 struct aac_sg_entry64 *sgp; 5638 struct aac_sge *sge; 5639 5640 acp->fib_size = sizeof (struct aac_fib_header) + \ 5641 sizeof (struct aac_blockread64) + (acp->left_cookien - 1) * \ 5642 sizeof (struct aac_sg_entry64); 5643 5644 aac_cmd_fib_header(softs, acp, ContainerCommand64); 5645 5646 /* 5647 * The definitions for aac_blockread64 and aac_blockwrite64 5648 * are the same. 5649 */ 5650 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno); 5651 ddi_put16(acc, &br->ContainerId, 5652 ((struct aac_container *)acp->dvp)->cid); 5653 ddi_put32(acc, &br->Command, (acp->flags & AAC_CMD_BUF_READ) ? 5654 VM_CtHostRead64 : VM_CtHostWrite64); 5655 ddi_put16(acc, &br->Pad, 0); 5656 ddi_put16(acc, &br->Flags, 0); 5657 5658 /* Fill SG table */ 5659 ddi_put32(acc, &br->SgMap64.SgCount, acp->left_cookien); 5660 ddi_put16(acc, &br->SectorCount, acp->bcount / AAC_BLK_SIZE); 5661 5662 for (sge = &acp->sgt[0], sgp = &br->SgMap64.SgEntry64[0]; 5663 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5664 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 5665 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 5666 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5667 } 5668 } 5669 5670 /* Init FIB for block IO command */ 5671 static void 5672 aac_cmd_fib_brw(struct aac_softstate *softs, struct aac_cmd *acp) 5673 { 5674 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5675 struct aac_blockread *br = (struct aac_blockread *) \ 5676 &acp->slotp->fibp->data[0]; 5677 struct aac_sg_entry *sgp; 5678 struct aac_sge *sge = &acp->sgt[0]; 5679 5680 if (acp->flags & AAC_CMD_BUF_READ) { 5681 acp->fib_size = sizeof (struct aac_fib_header) + \ 5682 sizeof (struct aac_blockread) + (acp->left_cookien - 1) * \ 5683 sizeof (struct aac_sg_entry); 5684 5685 ddi_put32(acc, &br->Command, VM_CtBlockRead); 5686 ddi_put32(acc, &br->SgMap.SgCount, acp->left_cookien); 5687 sgp = &br->SgMap.SgEntry[0]; 5688 } else { 5689 struct aac_blockwrite *bw = (struct aac_blockwrite *)br; 5690 5691 acp->fib_size = sizeof (struct aac_fib_header) + \ 5692 sizeof (struct aac_blockwrite) + (acp->left_cookien - 1) * \ 5693 sizeof (struct aac_sg_entry); 5694 5695 ddi_put32(acc, &bw->Command, VM_CtBlockWrite); 5696 ddi_put32(acc, &bw->Stable, CUNSTABLE); 5697 ddi_put32(acc, &bw->SgMap.SgCount, acp->left_cookien); 5698 sgp = &bw->SgMap.SgEntry[0]; 5699 } 5700 aac_cmd_fib_header(softs, acp, ContainerCommand); 5701 5702 /* 5703 * aac_blockread and aac_blockwrite have the similar 5704 * structure head, so use br for bw here 5705 */ 5706 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno); 5707 ddi_put32(acc, &br->ContainerId, 5708 ((struct aac_container *)acp->dvp)->cid); 5709 ddi_put32(acc, &br->ByteCount, acp->bcount); 5710 5711 /* Fill SG table */ 5712 for (sge = &acp->sgt[0]; 5713 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5714 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32); 5715 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5716 } 5717 } 5718 5719 /*ARGSUSED*/ 5720 void 5721 aac_cmd_fib_copy(struct aac_softstate *softs, struct aac_cmd *acp) 5722 { 5723 struct aac_slot *slotp = acp->slotp; 5724 struct aac_fib *fibp = slotp->fibp; 5725 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5726 5727 ddi_rep_put8(acc, (uint8_t *)acp->fibp, (uint8_t *)fibp, 5728 acp->fib_size, /* only copy data of needed length */ 5729 DDI_DEV_AUTOINCR); 5730 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr); 5731 ddi_put32(acc, &fibp->Header.SenderFibAddress, slotp->index << 2); 5732 } 5733 5734 static void 5735 aac_cmd_fib_sync(struct aac_softstate *softs, struct aac_cmd *acp) 5736 { 5737 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5738 struct aac_synchronize_command *sync = 5739 (struct aac_synchronize_command *)&acp->slotp->fibp->data[0]; 5740 5741 acp->fib_size = AAC_FIB_SIZEOF(struct aac_synchronize_command); 5742 5743 aac_cmd_fib_header(softs, acp, ContainerCommand); 5744 ddi_put32(acc, &sync->Command, VM_ContainerConfig); 5745 ddi_put32(acc, &sync->Type, (uint32_t)CT_FLUSH_CACHE); 5746 ddi_put32(acc, &sync->Cid, ((struct aac_container *)acp->dvp)->cid); 5747 ddi_put32(acc, &sync->Count, 5748 sizeof (((struct aac_synchronize_reply *)0)->Data)); 5749 } 5750 5751 /* 5752 * Start/Stop unit (Power Management) 5753 */ 5754 static void 5755 aac_cmd_fib_startstop(struct aac_softstate *softs, struct aac_cmd *acp) 5756 { 5757 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5758 struct aac_Container *cmd = 5759 (struct aac_Container *)&acp->slotp->fibp->data[0]; 5760 union scsi_cdb *cdbp = (void *)acp->pkt->pkt_cdbp; 5761 5762 acp->fib_size = AAC_FIB_SIZEOF(struct aac_Container); 5763 5764 aac_cmd_fib_header(softs, acp, ContainerCommand); 5765 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 5766 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 5767 ddi_put32(acc, &cmd->CTCommand.command, CT_PM_DRIVER_SUPPORT); 5768 ddi_put32(acc, &cmd->CTCommand.param[0], cdbp->cdb_opaque[4] & 1 ? \ 5769 AAC_PM_DRIVERSUP_START_UNIT : AAC_PM_DRIVERSUP_STOP_UNIT); 5770 ddi_put32(acc, &cmd->CTCommand.param[1], 5771 ((struct aac_container *)acp->dvp)->cid); 5772 ddi_put32(acc, &cmd->CTCommand.param[2], cdbp->cdb_opaque[1] & 1); 5773 } 5774 5775 /* 5776 * Init FIB for pass-through SCMD 5777 */ 5778 static void 5779 aac_cmd_fib_srb(struct aac_cmd *acp) 5780 { 5781 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5782 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 5783 uint8_t *cdb; 5784 5785 ddi_put32(acc, &srb->function, SRBF_ExecuteScsi); 5786 ddi_put32(acc, &srb->retry_limit, 0); 5787 ddi_put32(acc, &srb->cdb_size, acp->cmdlen); 5788 ddi_put32(acc, &srb->timeout, 0); /* use driver timeout */ 5789 if (acp->fibp == NULL) { 5790 if (acp->flags & AAC_CMD_BUF_READ) 5791 ddi_put32(acc, &srb->flags, SRB_DataIn); 5792 else if (acp->flags & AAC_CMD_BUF_WRITE) 5793 ddi_put32(acc, &srb->flags, SRB_DataOut); 5794 ddi_put32(acc, &srb->channel, 5795 ((struct aac_nondasd *)acp->dvp)->bus); 5796 ddi_put32(acc, &srb->id, ((struct aac_nondasd *)acp->dvp)->tid); 5797 ddi_put32(acc, &srb->lun, 0); 5798 cdb = acp->pkt->pkt_cdbp; 5799 } else { 5800 struct aac_srb *srb0 = (struct aac_srb *)&acp->fibp->data[0]; 5801 5802 ddi_put32(acc, &srb->flags, srb0->flags); 5803 ddi_put32(acc, &srb->channel, srb0->channel); 5804 ddi_put32(acc, &srb->id, srb0->id); 5805 ddi_put32(acc, &srb->lun, srb0->lun); 5806 cdb = srb0->cdb; 5807 } 5808 ddi_rep_put8(acc, cdb, srb->cdb, acp->cmdlen, DDI_DEV_AUTOINCR); 5809 } 5810 5811 static void 5812 aac_cmd_fib_scsi32(struct aac_softstate *softs, struct aac_cmd *acp) 5813 { 5814 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5815 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 5816 struct aac_sg_entry *sgp; 5817 struct aac_sge *sge; 5818 5819 acp->fib_size = sizeof (struct aac_fib_header) + \ 5820 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \ 5821 acp->left_cookien * sizeof (struct aac_sg_entry); 5822 5823 /* Fill FIB and SRB headers, and copy cdb */ 5824 aac_cmd_fib_header(softs, acp, ScsiPortCommand); 5825 aac_cmd_fib_srb(acp); 5826 5827 /* Fill SG table */ 5828 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien); 5829 ddi_put32(acc, &srb->count, acp->bcount); 5830 5831 for (sge = &acp->sgt[0], sgp = &srb->sg.SgEntry[0]; 5832 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5833 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32); 5834 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5835 } 5836 } 5837 5838 static void 5839 aac_cmd_fib_scsi64(struct aac_softstate *softs, struct aac_cmd *acp) 5840 { 5841 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5842 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 5843 struct aac_sg_entry64 *sgp; 5844 struct aac_sge *sge; 5845 5846 acp->fib_size = sizeof (struct aac_fib_header) + \ 5847 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \ 5848 acp->left_cookien * sizeof (struct aac_sg_entry64); 5849 5850 /* Fill FIB and SRB headers, and copy cdb */ 5851 aac_cmd_fib_header(softs, acp, ScsiPortCommandU64); 5852 aac_cmd_fib_srb(acp); 5853 5854 /* Fill SG table */ 5855 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien); 5856 ddi_put32(acc, &srb->count, acp->bcount); 5857 5858 for (sge = &acp->sgt[0], 5859 sgp = &((struct aac_sg_table64 *)&srb->sg)->SgEntry64[0]; 5860 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5861 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 5862 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 5863 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5864 } 5865 } 5866 5867 static int 5868 aac_cmd_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp) 5869 { 5870 struct aac_slot *slotp; 5871 5872 if (slotp = aac_get_slot(softs)) { 5873 acp->slotp = slotp; 5874 slotp->acp = acp; 5875 acp->aac_cmd_fib(softs, acp); 5876 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, 5877 DDI_DMA_SYNC_FORDEV); 5878 return (AACOK); 5879 } 5880 return (AACERR); 5881 } 5882 5883 static int 5884 aac_bind_io(struct aac_softstate *softs, struct aac_cmd *acp) 5885 { 5886 struct aac_device *dvp = acp->dvp; 5887 int q = AAC_CMDQ(acp); 5888 5889 if (softs->bus_ncmds[q] < softs->bus_throttle[q]) { 5890 if (dvp) { 5891 if (dvp->ncmds[q] < dvp->throttle[q]) { 5892 if (!(acp->flags & AAC_CMD_NTAG) || 5893 dvp->ncmds[q] == 0) { 5894 return (aac_cmd_slot_bind(softs, acp)); 5895 } 5896 ASSERT(q == AAC_CMDQ_ASYNC); 5897 aac_set_throttle(softs, dvp, AAC_CMDQ_ASYNC, 5898 AAC_THROTTLE_DRAIN); 5899 } 5900 } else { 5901 return (aac_cmd_slot_bind(softs, acp)); 5902 } 5903 } 5904 return (AACERR); 5905 } 5906 5907 static int 5908 aac_sync_fib_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp) 5909 { 5910 struct aac_slot *slotp; 5911 5912 while (softs->sync_ac.slotp) 5913 cv_wait(&softs->sync_fib_cv, &softs->io_lock); 5914 5915 if (slotp = aac_get_slot(softs)) { 5916 ASSERT(acp->slotp == NULL); 5917 5918 acp->slotp = slotp; 5919 slotp->acp = acp; 5920 return (AACOK); 5921 } 5922 return (AACERR); 5923 } 5924 5925 static void 5926 aac_sync_fib_slot_release(struct aac_softstate *softs, struct aac_cmd *acp) 5927 { 5928 ASSERT(acp->slotp); 5929 5930 aac_release_slot(softs, acp->slotp); 5931 acp->slotp->acp = NULL; 5932 acp->slotp = NULL; 5933 5934 cv_signal(&softs->sync_fib_cv); 5935 } 5936 5937 static void 5938 aac_start_io(struct aac_softstate *softs, struct aac_cmd *acp) 5939 { 5940 struct aac_slot *slotp = acp->slotp; 5941 int q = AAC_CMDQ(acp); 5942 int rval; 5943 5944 /* Set ac and pkt */ 5945 if (acp->pkt) { /* ac from ioctl has no pkt */ 5946 acp->pkt->pkt_state |= 5947 STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD; 5948 } 5949 if (acp->timeout) /* 0 indicates no timeout */ 5950 acp->timeout += aac_timebase + aac_tick; 5951 5952 if (acp->dvp) 5953 acp->dvp->ncmds[q]++; 5954 softs->bus_ncmds[q]++; 5955 aac_cmd_enqueue(&softs->q_busy, acp); 5956 5957 AACDB_PRINT_FIB(softs, slotp); 5958 5959 if (softs->flags & AAC_FLAGS_NEW_COMM) { 5960 rval = aac_send_command(softs, slotp); 5961 } else { 5962 /* 5963 * If fib can not be enqueued, the adapter is in an abnormal 5964 * state, there will be no interrupt to us. 5965 */ 5966 rval = aac_fib_enqueue(softs, AAC_ADAP_NORM_CMD_Q, 5967 slotp->fib_phyaddr, acp->fib_size); 5968 } 5969 5970 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) 5971 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 5972 5973 /* 5974 * NOTE: We send command only when slots availabe, so should never 5975 * reach here. 5976 */ 5977 if (rval != AACOK) { 5978 AACDB_PRINT(softs, CE_NOTE, "SCMD send failed"); 5979 if (acp->pkt) { 5980 acp->pkt->pkt_state &= ~STATE_SENT_CMD; 5981 aac_set_pkt_reason(softs, acp, CMD_INCOMPLETE, 0); 5982 } 5983 aac_end_io(softs, acp); 5984 if (!(acp->flags & (AAC_CMD_NO_INTR | AAC_CMD_NO_CB))) 5985 ddi_trigger_softintr(softs->softint_id); 5986 } 5987 } 5988 5989 static void 5990 aac_start_waitq(struct aac_softstate *softs, struct aac_cmd_queue *q) 5991 { 5992 struct aac_cmd *acp, *next_acp; 5993 5994 /* Serve as many waiting io's as possible */ 5995 for (acp = q->q_head; acp; acp = next_acp) { 5996 next_acp = acp->next; 5997 if (aac_bind_io(softs, acp) == AACOK) { 5998 aac_cmd_delete(q, acp); 5999 aac_start_io(softs, acp); 6000 } 6001 if (softs->free_io_slot_head == NULL) 6002 break; 6003 } 6004 } 6005 6006 static void 6007 aac_start_waiting_io(struct aac_softstate *softs) 6008 { 6009 /* 6010 * Sync FIB io is served before async FIB io so that io requests 6011 * sent by interactive userland commands get responded asap. 6012 */ 6013 if (softs->q_wait[AAC_CMDQ_SYNC].q_head) 6014 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_SYNC]); 6015 if (softs->q_wait[AAC_CMDQ_ASYNC].q_head) 6016 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_ASYNC]); 6017 } 6018 6019 static void 6020 aac_drain_comp_q(struct aac_softstate *softs) 6021 { 6022 struct aac_cmd *acp; 6023 struct scsi_pkt *pkt; 6024 6025 /*CONSTCOND*/ 6026 while (1) { 6027 mutex_enter(&softs->q_comp_mutex); 6028 acp = aac_cmd_dequeue(&softs->q_comp); 6029 mutex_exit(&softs->q_comp_mutex); 6030 if (acp != NULL) { 6031 ASSERT(acp->pkt != NULL); 6032 pkt = acp->pkt; 6033 6034 if (pkt->pkt_reason == CMD_CMPLT) { 6035 /* 6036 * Consistent packets need to be sync'ed first 6037 */ 6038 if ((acp->flags & AAC_CMD_CONSISTENT) && 6039 (acp->flags & AAC_CMD_BUF_READ)) { 6040 if (aac_dma_sync_ac(acp) != AACOK) { 6041 ddi_fm_service_impact( 6042 softs->devinfo_p, 6043 DDI_SERVICE_UNAFFECTED); 6044 pkt->pkt_reason = CMD_TRAN_ERR; 6045 pkt->pkt_statistics = 0; 6046 } 6047 } 6048 if ((aac_check_acc_handle(softs-> \ 6049 comm_space_acc_handle) != DDI_SUCCESS) || 6050 (aac_check_acc_handle(softs-> \ 6051 pci_mem_handle) != DDI_SUCCESS)) { 6052 ddi_fm_service_impact(softs->devinfo_p, 6053 DDI_SERVICE_UNAFFECTED); 6054 ddi_fm_acc_err_clear(softs-> \ 6055 pci_mem_handle, DDI_FME_VER0); 6056 pkt->pkt_reason = CMD_TRAN_ERR; 6057 pkt->pkt_statistics = 0; 6058 } 6059 if (aac_check_dma_handle(softs-> \ 6060 comm_space_dma_handle) != DDI_SUCCESS) { 6061 ddi_fm_service_impact(softs->devinfo_p, 6062 DDI_SERVICE_UNAFFECTED); 6063 pkt->pkt_reason = CMD_TRAN_ERR; 6064 pkt->pkt_statistics = 0; 6065 } 6066 } 6067 scsi_hba_pkt_comp(pkt); 6068 } else { 6069 break; 6070 } 6071 } 6072 } 6073 6074 static int 6075 aac_alloc_fib(struct aac_softstate *softs, struct aac_slot *slotp) 6076 { 6077 size_t rlen; 6078 ddi_dma_cookie_t cookie; 6079 uint_t cookien; 6080 6081 /* Allocate FIB dma resource */ 6082 if (ddi_dma_alloc_handle( 6083 softs->devinfo_p, 6084 &softs->addr_dma_attr, 6085 DDI_DMA_SLEEP, 6086 NULL, 6087 &slotp->fib_dma_handle) != DDI_SUCCESS) { 6088 AACDB_PRINT(softs, CE_WARN, 6089 "Cannot alloc dma handle for slot fib area"); 6090 goto error; 6091 } 6092 if (ddi_dma_mem_alloc( 6093 slotp->fib_dma_handle, 6094 softs->aac_max_fib_size, 6095 &softs->acc_attr, 6096 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 6097 DDI_DMA_SLEEP, 6098 NULL, 6099 (caddr_t *)&slotp->fibp, 6100 &rlen, 6101 &slotp->fib_acc_handle) != DDI_SUCCESS) { 6102 AACDB_PRINT(softs, CE_WARN, 6103 "Cannot alloc mem for slot fib area"); 6104 goto error; 6105 } 6106 if (ddi_dma_addr_bind_handle( 6107 slotp->fib_dma_handle, 6108 NULL, 6109 (caddr_t)slotp->fibp, 6110 softs->aac_max_fib_size, 6111 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 6112 DDI_DMA_SLEEP, 6113 NULL, 6114 &cookie, 6115 &cookien) != DDI_DMA_MAPPED) { 6116 AACDB_PRINT(softs, CE_WARN, 6117 "dma bind failed for slot fib area"); 6118 goto error; 6119 } 6120 6121 /* Check dma handles allocated in fib attach */ 6122 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) { 6123 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 6124 goto error; 6125 } 6126 6127 /* Check acc handles allocated in fib attach */ 6128 if (aac_check_acc_handle(slotp->fib_acc_handle) != DDI_SUCCESS) { 6129 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 6130 goto error; 6131 } 6132 6133 slotp->fib_phyaddr = cookie.dmac_laddress; 6134 return (AACOK); 6135 6136 error: 6137 if (slotp->fib_acc_handle) { 6138 ddi_dma_mem_free(&slotp->fib_acc_handle); 6139 slotp->fib_acc_handle = NULL; 6140 } 6141 if (slotp->fib_dma_handle) { 6142 ddi_dma_free_handle(&slotp->fib_dma_handle); 6143 slotp->fib_dma_handle = NULL; 6144 } 6145 return (AACERR); 6146 } 6147 6148 static void 6149 aac_free_fib(struct aac_slot *slotp) 6150 { 6151 (void) ddi_dma_unbind_handle(slotp->fib_dma_handle); 6152 ddi_dma_mem_free(&slotp->fib_acc_handle); 6153 slotp->fib_acc_handle = NULL; 6154 ddi_dma_free_handle(&slotp->fib_dma_handle); 6155 slotp->fib_dma_handle = NULL; 6156 slotp->fib_phyaddr = 0; 6157 } 6158 6159 static void 6160 aac_alloc_fibs(struct aac_softstate *softs) 6161 { 6162 int i; 6163 struct aac_slot *slotp; 6164 6165 for (i = 0; i < softs->total_slots && 6166 softs->total_fibs < softs->total_slots; i++) { 6167 slotp = &(softs->io_slot[i]); 6168 if (slotp->fib_phyaddr) 6169 continue; 6170 if (aac_alloc_fib(softs, slotp) != AACOK) 6171 break; 6172 6173 /* Insert the slot to the free slot list */ 6174 aac_release_slot(softs, slotp); 6175 softs->total_fibs++; 6176 } 6177 } 6178 6179 static void 6180 aac_destroy_fibs(struct aac_softstate *softs) 6181 { 6182 struct aac_slot *slotp; 6183 6184 while ((slotp = softs->free_io_slot_head) != NULL) { 6185 ASSERT(slotp->fib_phyaddr); 6186 softs->free_io_slot_head = slotp->next; 6187 aac_free_fib(slotp); 6188 ASSERT(slotp->index == (slotp - softs->io_slot)); 6189 softs->total_fibs--; 6190 } 6191 ASSERT(softs->total_fibs == 0); 6192 } 6193 6194 static int 6195 aac_create_slots(struct aac_softstate *softs) 6196 { 6197 int i; 6198 6199 softs->total_slots = softs->aac_max_fibs; 6200 softs->io_slot = kmem_zalloc(sizeof (struct aac_slot) * \ 6201 softs->total_slots, KM_SLEEP); 6202 if (softs->io_slot == NULL) { 6203 AACDB_PRINT(softs, CE_WARN, "Cannot allocate slot"); 6204 return (AACERR); 6205 } 6206 for (i = 0; i < softs->total_slots; i++) 6207 softs->io_slot[i].index = i; 6208 softs->free_io_slot_head = NULL; 6209 softs->total_fibs = 0; 6210 return (AACOK); 6211 } 6212 6213 static void 6214 aac_destroy_slots(struct aac_softstate *softs) 6215 { 6216 ASSERT(softs->free_io_slot_head == NULL); 6217 6218 kmem_free(softs->io_slot, sizeof (struct aac_slot) * \ 6219 softs->total_slots); 6220 softs->io_slot = NULL; 6221 softs->total_slots = 0; 6222 } 6223 6224 struct aac_slot * 6225 aac_get_slot(struct aac_softstate *softs) 6226 { 6227 struct aac_slot *slotp; 6228 6229 if ((slotp = softs->free_io_slot_head) != NULL) { 6230 softs->free_io_slot_head = slotp->next; 6231 slotp->next = NULL; 6232 } 6233 return (slotp); 6234 } 6235 6236 static void 6237 aac_release_slot(struct aac_softstate *softs, struct aac_slot *slotp) 6238 { 6239 ASSERT((slotp->index >= 0) && (slotp->index < softs->total_slots)); 6240 ASSERT(slotp == &softs->io_slot[slotp->index]); 6241 6242 slotp->acp = NULL; 6243 slotp->next = softs->free_io_slot_head; 6244 softs->free_io_slot_head = slotp; 6245 } 6246 6247 int 6248 aac_do_io(struct aac_softstate *softs, struct aac_cmd *acp) 6249 { 6250 if (aac_bind_io(softs, acp) == AACOK) 6251 aac_start_io(softs, acp); 6252 else 6253 aac_cmd_enqueue(&softs->q_wait[AAC_CMDQ(acp)], acp); 6254 6255 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) 6256 return (TRAN_ACCEPT); 6257 /* 6258 * Because sync FIB is always 512 bytes and used for critical 6259 * functions, async FIB is used for poll IO. 6260 */ 6261 if (acp->flags & AAC_CMD_NO_INTR) { 6262 if (aac_do_poll_io(softs, acp) == AACOK) 6263 return (TRAN_ACCEPT); 6264 } else { 6265 if (aac_do_sync_io(softs, acp) == AACOK) 6266 return (TRAN_ACCEPT); 6267 } 6268 return (TRAN_BADPKT); 6269 } 6270 6271 static int 6272 aac_do_poll_io(struct aac_softstate *softs, struct aac_cmd *acp) 6273 { 6274 int (*intr_handler)(struct aac_softstate *); 6275 6276 /* 6277 * Interrupt is disabled, we have to poll the adapter by ourselves. 6278 */ 6279 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ? 6280 aac_process_intr_new : aac_process_intr_old; 6281 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) { 6282 int i = AAC_POLL_TIME * 1000; 6283 6284 AAC_BUSYWAIT((intr_handler(softs) != AAC_DB_RESPONSE_READY), i); 6285 if (i == 0) 6286 aac_cmd_timeout(softs, acp); 6287 } 6288 6289 ddi_trigger_softintr(softs->softint_id); 6290 6291 if ((acp->flags & AAC_CMD_CMPLT) && !(acp->flags & AAC_CMD_ERR)) 6292 return (AACOK); 6293 return (AACERR); 6294 } 6295 6296 static int 6297 aac_do_sync_io(struct aac_softstate *softs, struct aac_cmd *acp) 6298 { 6299 ASSERT(softs && acp); 6300 6301 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) 6302 cv_wait(&softs->event, &softs->io_lock); 6303 6304 if (acp->flags & AAC_CMD_CMPLT) 6305 return (AACOK); 6306 return (AACERR); 6307 } 6308 6309 static int 6310 aac_dma_sync_ac(struct aac_cmd *acp) 6311 { 6312 if (acp->buf_dma_handle) { 6313 if (acp->flags & AAC_CMD_BUF_WRITE) { 6314 if (acp->abp != NULL) 6315 ddi_rep_put8(acp->abh, 6316 (uint8_t *)acp->bp->b_un.b_addr, 6317 (uint8_t *)acp->abp, acp->bp->b_bcount, 6318 DDI_DEV_AUTOINCR); 6319 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0, 6320 DDI_DMA_SYNC_FORDEV); 6321 } else { 6322 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0, 6323 DDI_DMA_SYNC_FORCPU); 6324 if (aac_check_dma_handle(acp->buf_dma_handle) != 6325 DDI_SUCCESS) 6326 return (AACERR); 6327 if (acp->abp != NULL) 6328 ddi_rep_get8(acp->abh, 6329 (uint8_t *)acp->bp->b_un.b_addr, 6330 (uint8_t *)acp->abp, acp->bp->b_bcount, 6331 DDI_DEV_AUTOINCR); 6332 } 6333 } 6334 return (AACOK); 6335 } 6336 6337 /* 6338 * Copy AIF from adapter to the empty AIF slot and inform AIF threads 6339 */ 6340 static void 6341 aac_save_aif(struct aac_softstate *softs, ddi_acc_handle_t acc, 6342 struct aac_fib *fibp0, int fib_size0) 6343 { 6344 struct aac_fib *fibp; /* FIB in AIF queue */ 6345 int fib_size; 6346 uint16_t fib_command; 6347 int current, next; 6348 6349 /* Ignore non AIF messages */ 6350 fib_command = ddi_get16(acc, &fibp0->Header.Command); 6351 if (fib_command != AifRequest) { 6352 cmn_err(CE_WARN, "!Unknown command from controller"); 6353 return; 6354 } 6355 6356 mutex_enter(&softs->aifq_mutex); 6357 6358 /* Save AIF */ 6359 fibp = &softs->aifq[softs->aifq_idx].d; 6360 fib_size = (fib_size0 > AAC_FIB_SIZE) ? AAC_FIB_SIZE : fib_size0; 6361 ddi_rep_get8(acc, (uint8_t *)fibp, (uint8_t *)fibp0, fib_size, 6362 DDI_DEV_AUTOINCR); 6363 6364 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 6365 ddi_fm_service_impact(softs->devinfo_p, 6366 DDI_SERVICE_UNAFFECTED); 6367 mutex_exit(&softs->aifq_mutex); 6368 return; 6369 } 6370 6371 AACDB_PRINT_AIF(softs, (struct aac_aif_command *)&fibp->data[0]); 6372 6373 /* Modify AIF contexts */ 6374 current = softs->aifq_idx; 6375 next = (current + 1) % AAC_AIFQ_LENGTH; 6376 if (next == 0) { 6377 struct aac_fib_context *ctx_p; 6378 6379 softs->aifq_wrap = 1; 6380 for (ctx_p = softs->fibctx_p; ctx_p; ctx_p = ctx_p->next) { 6381 if (next == ctx_p->ctx_idx) { 6382 ctx_p->ctx_flags |= AAC_CTXFLAG_FILLED; 6383 } else if (current == ctx_p->ctx_idx && 6384 (ctx_p->ctx_flags & AAC_CTXFLAG_FILLED)) { 6385 ctx_p->ctx_idx = next; 6386 ctx_p->ctx_overrun++; 6387 } 6388 } 6389 } 6390 softs->aifq_idx = next; 6391 6392 /* Wakeup AIF threads */ 6393 cv_broadcast(&softs->aifq_cv); 6394 mutex_exit(&softs->aifq_mutex); 6395 6396 /* Wakeup event thread to handle aif */ 6397 aac_event_disp(softs, AAC_EVENT_AIF); 6398 } 6399 6400 static int 6401 aac_return_aif_common(struct aac_softstate *softs, struct aac_fib_context *ctx, 6402 struct aac_fib **fibpp) 6403 { 6404 int current; 6405 6406 current = ctx->ctx_idx; 6407 if (current == softs->aifq_idx && 6408 !(ctx->ctx_flags & AAC_CTXFLAG_FILLED)) 6409 return (EAGAIN); /* Empty */ 6410 6411 *fibpp = &softs->aifq[current].d; 6412 6413 ctx->ctx_flags &= ~AAC_CTXFLAG_FILLED; 6414 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH; 6415 return (0); 6416 } 6417 6418 int 6419 aac_return_aif(struct aac_softstate *softs, struct aac_fib_context *ctx, 6420 struct aac_fib **fibpp) 6421 { 6422 int rval; 6423 6424 mutex_enter(&softs->aifq_mutex); 6425 rval = aac_return_aif_common(softs, ctx, fibpp); 6426 mutex_exit(&softs->aifq_mutex); 6427 return (rval); 6428 } 6429 6430 int 6431 aac_return_aif_wait(struct aac_softstate *softs, struct aac_fib_context *ctx, 6432 struct aac_fib **fibpp) 6433 { 6434 int rval; 6435 6436 mutex_enter(&softs->aifq_mutex); 6437 rval = aac_return_aif_common(softs, ctx, fibpp); 6438 if (rval == EAGAIN) { 6439 AACDB_PRINT(softs, CE_NOTE, "Waiting for AIF"); 6440 rval = cv_wait_sig(&softs->aifq_cv, &softs->aifq_mutex); 6441 } 6442 mutex_exit(&softs->aifq_mutex); 6443 return ((rval > 0) ? 0 : EINTR); 6444 } 6445 6446 /* 6447 * The following function comes from Adaptec: 6448 * 6449 * When driver sees a particular event that means containers are changed, it 6450 * will rescan containers. However a change may not be complete until some 6451 * other event is received. For example, creating or deleting an array will 6452 * incur as many as six AifEnConfigChange events which would generate six 6453 * container rescans. To diminish rescans, driver set a flag to wait for 6454 * another particular event. When sees that events come in, it will do rescan. 6455 */ 6456 static int 6457 aac_handle_aif(struct aac_softstate *softs, struct aac_aif_command *aif) 6458 { 6459 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 6460 int en_type; 6461 int devcfg_needed; 6462 int cid; 6463 uint32_t bus_id, tgt_id; 6464 enum aac_cfg_event event = AAC_CFG_NULL_EXIST; 6465 6466 devcfg_needed = 0; 6467 en_type = LE_32((uint32_t)aif->data.EN.type); 6468 6469 switch (LE_32((uint32_t)aif->command)) { 6470 case AifCmdDriverNotify: { 6471 cid = LE_32(aif->data.EN.data.ECC.container[0]); 6472 6473 switch (en_type) { 6474 case AifDenMorphComplete: 6475 case AifDenVolumeExtendComplete: 6476 if (AAC_DEV_IS_VALID(&softs->containers[cid].dev)) 6477 softs->devcfg_wait_on = AifEnConfigChange; 6478 break; 6479 } 6480 if (softs->devcfg_wait_on == en_type) 6481 devcfg_needed = 1; 6482 break; 6483 } 6484 6485 case AifCmdEventNotify: 6486 cid = LE_32(aif->data.EN.data.ECC.container[0]); 6487 switch (en_type) { 6488 case AifEnAddContainer: 6489 case AifEnDeleteContainer: 6490 softs->devcfg_wait_on = AifEnConfigChange; 6491 break; 6492 case AifEnContainerChange: 6493 if (!softs->devcfg_wait_on) 6494 softs->devcfg_wait_on = AifEnConfigChange; 6495 break; 6496 case AifEnContainerEvent: 6497 if (ddi_get32(acc, &aif-> \ 6498 data.EN.data.ECE.eventType) == CT_PUP_MISSING_DRIVE) 6499 devcfg_needed = 1; 6500 break; 6501 case AifEnAddJBOD: 6502 if (!(softs->flags & AAC_FLAGS_JBOD)) 6503 return (AACERR); 6504 event = AAC_CFG_ADD; 6505 bus_id = (cid >> 24) & 0xf; 6506 tgt_id = cid & 0xffff; 6507 break; 6508 case AifEnDeleteJBOD: 6509 if (!(softs->flags & AAC_FLAGS_JBOD)) 6510 return (AACERR); 6511 event = AAC_CFG_DELETE; 6512 bus_id = (cid >> 24) & 0xf; 6513 tgt_id = cid & 0xffff; 6514 break; 6515 } 6516 if (softs->devcfg_wait_on == en_type) 6517 devcfg_needed = 1; 6518 break; 6519 6520 case AifCmdJobProgress: 6521 if (LE_32((uint32_t)aif->data.PR[0].jd.type) == AifJobCtrZero) { 6522 int pr_status; 6523 uint32_t pr_ftick, pr_ctick; 6524 6525 pr_status = LE_32((uint32_t)aif->data.PR[0].status); 6526 pr_ctick = LE_32(aif->data.PR[0].currentTick); 6527 pr_ftick = LE_32(aif->data.PR[0].finalTick); 6528 6529 if ((pr_ctick == pr_ftick) || 6530 (pr_status == AifJobStsSuccess)) 6531 softs->devcfg_wait_on = AifEnContainerChange; 6532 else if ((pr_ctick == 0) && 6533 (pr_status == AifJobStsRunning)) 6534 softs->devcfg_wait_on = AifEnContainerChange; 6535 } 6536 break; 6537 } 6538 6539 if (devcfg_needed) { 6540 softs->devcfg_wait_on = 0; 6541 (void) aac_probe_containers(softs); 6542 } 6543 6544 if (event != AAC_CFG_NULL_EXIST) { 6545 ASSERT(en_type == AifEnAddJBOD || en_type == AifEnDeleteJBOD); 6546 (void) aac_probe_jbod(softs, 6547 AAC_P2VTGT(softs, bus_id, tgt_id), event); 6548 } 6549 return (AACOK); 6550 } 6551 6552 6553 /* 6554 * Check and handle AIF events 6555 */ 6556 static void 6557 aac_aif_event(struct aac_softstate *softs) 6558 { 6559 struct aac_fib *fibp; 6560 6561 /*CONSTCOND*/ 6562 while (1) { 6563 if (aac_return_aif(softs, &softs->aifctx, &fibp) != 0) 6564 break; /* No more AIFs to handle, end loop */ 6565 6566 /* AIF overrun, array create/delete may missed. */ 6567 if (softs->aifctx.ctx_overrun) { 6568 softs->aifctx.ctx_overrun = 0; 6569 } 6570 6571 /* AIF received, handle it */ 6572 struct aac_aif_command *aifp = 6573 (struct aac_aif_command *)&fibp->data[0]; 6574 uint32_t aif_command = LE_32((uint32_t)aifp->command); 6575 6576 if (aif_command == AifCmdDriverNotify || 6577 aif_command == AifCmdEventNotify || 6578 aif_command == AifCmdJobProgress) 6579 (void) aac_handle_aif(softs, aifp); 6580 } 6581 } 6582 6583 /* 6584 * Timeout recovery 6585 */ 6586 /*ARGSUSED*/ 6587 static void 6588 aac_cmd_timeout(struct aac_softstate *softs, struct aac_cmd *acp) 6589 { 6590 #ifdef DEBUG 6591 acp->fib_flags |= AACDB_FLAGS_FIB_TIMEOUT; 6592 AACDB_PRINT(softs, CE_WARN, "acp %p timed out", acp); 6593 AACDB_PRINT_FIB(softs, acp->slotp); 6594 #endif 6595 6596 /* 6597 * Besides the firmware in unhealthy state, an overloaded 6598 * adapter may also incur pkt timeout. 6599 * There is a chance for an adapter with a slower IOP to take 6600 * longer than 60 seconds to process the commands, such as when 6601 * to perform IOs. So the adapter is doing a build on a RAID-5 6602 * while being required longer completion times should be 6603 * tolerated. 6604 */ 6605 switch (aac_do_reset(softs)) { 6606 case AAC_IOP_RESET_SUCCEED: 6607 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING, NULL, CMD_RESET); 6608 aac_start_waiting_io(softs); 6609 break; 6610 case AAC_IOP_RESET_FAILED: 6611 /* Abort all waiting cmds when adapter is dead */ 6612 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_TIMEOUT); 6613 break; 6614 case AAC_IOP_RESET_ABNORMAL: 6615 aac_start_waiting_io(softs); 6616 } 6617 } 6618 6619 /* 6620 * The following function comes from Adaptec: 6621 * 6622 * Time sync. command added to synchronize time with firmware every 30 6623 * minutes (required for correct AIF timestamps etc.) 6624 */ 6625 static void 6626 aac_sync_tick(struct aac_softstate *softs) 6627 { 6628 ddi_acc_handle_t acc; 6629 int rval; 6630 6631 mutex_enter(&softs->time_mutex); 6632 ASSERT(softs->time_sync <= softs->timebase); 6633 softs->time_sync = 0; 6634 mutex_exit(&softs->time_mutex); 6635 6636 /* Time sync. with firmware every AAC_SYNC_TICK */ 6637 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac); 6638 acc = softs->sync_ac.slotp->fib_acc_handle; 6639 6640 ddi_put32(acc, (void *)&softs->sync_ac.slotp->fibp->data[0], 6641 ddi_get_time()); 6642 rval = aac_sync_fib(softs, SendHostTime, AAC_FIB_SIZEOF(uint32_t)); 6643 aac_sync_fib_slot_release(softs, &softs->sync_ac); 6644 6645 mutex_enter(&softs->time_mutex); 6646 softs->time_sync = softs->timebase; 6647 if (rval != AACOK) 6648 /* retry shortly */ 6649 softs->time_sync += aac_tick << 1; 6650 else 6651 softs->time_sync += AAC_SYNC_TICK; 6652 mutex_exit(&softs->time_mutex); 6653 } 6654 6655 /* 6656 * Timeout checking and handling 6657 */ 6658 static void 6659 aac_daemon(struct aac_softstate *softs) 6660 { 6661 int time_out; /* set if timeout happened */ 6662 int time_adjust; 6663 uint32_t softs_timebase; 6664 6665 mutex_enter(&softs->time_mutex); 6666 ASSERT(softs->time_out <= softs->timebase); 6667 softs->time_out = 0; 6668 softs_timebase = softs->timebase; 6669 mutex_exit(&softs->time_mutex); 6670 6671 /* Check slots for timeout pkts */ 6672 time_adjust = 0; 6673 do { 6674 struct aac_cmd *acp; 6675 6676 time_out = 0; 6677 for (acp = softs->q_busy.q_head; acp; acp = acp->next) { 6678 if (acp->timeout == 0) 6679 continue; 6680 6681 /* 6682 * If timeout happened, update outstanding cmds 6683 * to be checked later again. 6684 */ 6685 if (time_adjust) { 6686 acp->timeout += time_adjust; 6687 continue; 6688 } 6689 6690 if (acp->timeout <= softs_timebase) { 6691 aac_cmd_timeout(softs, acp); 6692 time_out = 1; 6693 time_adjust = aac_tick * drv_usectohz(1000000); 6694 break; /* timeout happened */ 6695 } else { 6696 break; /* no timeout */ 6697 } 6698 } 6699 } while (time_out); 6700 6701 mutex_enter(&softs->time_mutex); 6702 softs->time_out = softs->timebase + aac_tick; 6703 mutex_exit(&softs->time_mutex); 6704 } 6705 6706 /* 6707 * The event thread handles various tasks serially for the other parts of 6708 * the driver, so that they can run fast. 6709 */ 6710 static void 6711 aac_event_thread(struct aac_softstate *softs) 6712 { 6713 int run = 1; 6714 6715 DBCALLED(softs, 1); 6716 6717 mutex_enter(&softs->ev_lock); 6718 while (run) { 6719 int events; 6720 6721 if ((events = softs->events) == 0) { 6722 cv_wait(&softs->event_disp_cv, &softs->ev_lock); 6723 events = softs->events; 6724 } 6725 softs->events = 0; 6726 mutex_exit(&softs->ev_lock); 6727 6728 mutex_enter(&softs->io_lock); 6729 if ((softs->state & AAC_STATE_RUN) && 6730 (softs->state & AAC_STATE_DEAD) == 0) { 6731 if (events & AAC_EVENT_TIMEOUT) 6732 aac_daemon(softs); 6733 if (events & AAC_EVENT_SYNCTICK) 6734 aac_sync_tick(softs); 6735 if (events & AAC_EVENT_AIF) 6736 aac_aif_event(softs); 6737 } else { 6738 run = 0; 6739 } 6740 mutex_exit(&softs->io_lock); 6741 6742 mutex_enter(&softs->ev_lock); 6743 } 6744 6745 cv_signal(&softs->event_wait_cv); 6746 mutex_exit(&softs->ev_lock); 6747 } 6748 6749 /* 6750 * Internal timer. It is only responsbile for time counting and report time 6751 * related events. Events handling is done by aac_event_thread(), so that 6752 * the timer itself could be as precise as possible. 6753 */ 6754 static void 6755 aac_timer(void *arg) 6756 { 6757 struct aac_softstate *softs = arg; 6758 int events = 0; 6759 6760 mutex_enter(&softs->time_mutex); 6761 6762 /* If timer is being stopped, exit */ 6763 if (softs->timeout_id) { 6764 softs->timeout_id = timeout(aac_timer, (void *)softs, 6765 (aac_tick * drv_usectohz(1000000))); 6766 } else { 6767 mutex_exit(&softs->time_mutex); 6768 return; 6769 } 6770 6771 /* Time counting */ 6772 softs->timebase += aac_tick; 6773 6774 /* Check time related events */ 6775 if (softs->time_out && softs->time_out <= softs->timebase) 6776 events |= AAC_EVENT_TIMEOUT; 6777 if (softs->time_sync && softs->time_sync <= softs->timebase) 6778 events |= AAC_EVENT_SYNCTICK; 6779 6780 mutex_exit(&softs->time_mutex); 6781 6782 if (events) 6783 aac_event_disp(softs, events); 6784 } 6785 6786 /* 6787 * Dispatch events to daemon thread for handling 6788 */ 6789 static void 6790 aac_event_disp(struct aac_softstate *softs, int events) 6791 { 6792 mutex_enter(&softs->ev_lock); 6793 softs->events |= events; 6794 cv_broadcast(&softs->event_disp_cv); 6795 mutex_exit(&softs->ev_lock); 6796 } 6797 6798 /* 6799 * Architecture dependent functions 6800 */ 6801 static int 6802 aac_rx_get_fwstatus(struct aac_softstate *softs) 6803 { 6804 return (PCI_MEM_GET32(softs, AAC_OMR0)); 6805 } 6806 6807 static int 6808 aac_rx_get_mailbox(struct aac_softstate *softs, int mb) 6809 { 6810 return (PCI_MEM_GET32(softs, AAC_RX_MAILBOX + mb * 4)); 6811 } 6812 6813 static void 6814 aac_rx_set_mailbox(struct aac_softstate *softs, uint32_t cmd, 6815 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3) 6816 { 6817 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX, cmd); 6818 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 4, arg0); 6819 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 8, arg1); 6820 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 12, arg2); 6821 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 16, arg3); 6822 } 6823 6824 static int 6825 aac_rkt_get_fwstatus(struct aac_softstate *softs) 6826 { 6827 return (PCI_MEM_GET32(softs, AAC_OMR0)); 6828 } 6829 6830 static int 6831 aac_rkt_get_mailbox(struct aac_softstate *softs, int mb) 6832 { 6833 return (PCI_MEM_GET32(softs, AAC_RKT_MAILBOX + mb *4)); 6834 } 6835 6836 static void 6837 aac_rkt_set_mailbox(struct aac_softstate *softs, uint32_t cmd, 6838 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3) 6839 { 6840 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX, cmd); 6841 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 4, arg0); 6842 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 8, arg1); 6843 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 12, arg2); 6844 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 16, arg3); 6845 } 6846 6847 /* 6848 * cb_ops functions 6849 */ 6850 static int 6851 aac_open(dev_t *devp, int flag, int otyp, cred_t *cred) 6852 { 6853 struct aac_softstate *softs; 6854 int minor0, minor; 6855 int instance; 6856 6857 DBCALLED(NULL, 2); 6858 6859 if (otyp != OTYP_BLK && otyp != OTYP_CHR) 6860 return (EINVAL); 6861 6862 minor0 = getminor(*devp); 6863 minor = AAC_SCSA_MINOR(minor0); 6864 6865 if (AAC_IS_SCSA_NODE(minor)) 6866 return (scsi_hba_open(devp, flag, otyp, cred)); 6867 6868 instance = MINOR2INST(minor0); 6869 if (instance >= AAC_MAX_ADAPTERS) 6870 return (ENXIO); 6871 6872 softs = ddi_get_soft_state(aac_softstatep, instance); 6873 if (softs == NULL) 6874 return (ENXIO); 6875 6876 return (0); 6877 } 6878 6879 /*ARGSUSED*/ 6880 static int 6881 aac_close(dev_t dev, int flag, int otyp, cred_t *cred) 6882 { 6883 int minor0, minor; 6884 int instance; 6885 6886 DBCALLED(NULL, 2); 6887 6888 if (otyp != OTYP_BLK && otyp != OTYP_CHR) 6889 return (EINVAL); 6890 6891 minor0 = getminor(dev); 6892 minor = AAC_SCSA_MINOR(minor0); 6893 6894 if (AAC_IS_SCSA_NODE(minor)) 6895 return (scsi_hba_close(dev, flag, otyp, cred)); 6896 6897 instance = MINOR2INST(minor0); 6898 if (instance >= AAC_MAX_ADAPTERS) 6899 return (ENXIO); 6900 6901 return (0); 6902 } 6903 6904 static int 6905 aac_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, 6906 int *rval_p) 6907 { 6908 struct aac_softstate *softs; 6909 int minor0, minor; 6910 int instance; 6911 6912 DBCALLED(NULL, 2); 6913 6914 if (drv_priv(cred_p) != 0) 6915 return (EPERM); 6916 6917 minor0 = getminor(dev); 6918 minor = AAC_SCSA_MINOR(minor0); 6919 6920 if (AAC_IS_SCSA_NODE(minor)) 6921 return (scsi_hba_ioctl(dev, cmd, arg, flag, cred_p, rval_p)); 6922 6923 instance = MINOR2INST(minor0); 6924 if (instance < AAC_MAX_ADAPTERS) { 6925 softs = ddi_get_soft_state(aac_softstatep, instance); 6926 return (aac_do_ioctl(softs, dev, cmd, arg, flag)); 6927 } 6928 return (ENXIO); 6929 } 6930 6931 /* 6932 * The IO fault service error handling callback function 6933 */ 6934 /*ARGSUSED*/ 6935 static int 6936 aac_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 6937 { 6938 /* 6939 * as the driver can always deal with an error in any dma or 6940 * access handle, we can just return the fme_status value. 6941 */ 6942 pci_ereport_post(dip, err, NULL); 6943 return (err->fme_status); 6944 } 6945 6946 /* 6947 * aac_fm_init - initialize fma capabilities and register with IO 6948 * fault services. 6949 */ 6950 static void 6951 aac_fm_init(struct aac_softstate *softs) 6952 { 6953 /* 6954 * Need to change iblock to priority for new MSI intr 6955 */ 6956 ddi_iblock_cookie_t fm_ibc; 6957 6958 softs->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, softs->devinfo_p, 6959 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 6960 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 6961 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 6962 6963 /* Only register with IO Fault Services if we have some capability */ 6964 if (softs->fm_capabilities) { 6965 /* Adjust access and dma attributes for FMA */ 6966 softs->reg_attr.devacc_attr_access = DDI_FLAGERR_ACC; 6967 softs->addr_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 6968 softs->buf_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 6969 6970 /* 6971 * Register capabilities with IO Fault Services. 6972 * fm_capabilities will be updated to indicate 6973 * capabilities actually supported (not requested.) 6974 */ 6975 ddi_fm_init(softs->devinfo_p, &softs->fm_capabilities, &fm_ibc); 6976 6977 /* 6978 * Initialize pci ereport capabilities if ereport 6979 * capable (should always be.) 6980 */ 6981 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) || 6982 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6983 pci_ereport_setup(softs->devinfo_p); 6984 } 6985 6986 /* 6987 * Register error callback if error callback capable. 6988 */ 6989 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6990 ddi_fm_handler_register(softs->devinfo_p, 6991 aac_fm_error_cb, (void *) softs); 6992 } 6993 } 6994 } 6995 6996 /* 6997 * aac_fm_fini - Releases fma capabilities and un-registers with IO 6998 * fault services. 6999 */ 7000 static void 7001 aac_fm_fini(struct aac_softstate *softs) 7002 { 7003 /* Only unregister FMA capabilities if registered */ 7004 if (softs->fm_capabilities) { 7005 /* 7006 * Un-register error callback if error callback capable. 7007 */ 7008 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 7009 ddi_fm_handler_unregister(softs->devinfo_p); 7010 } 7011 7012 /* 7013 * Release any resources allocated by pci_ereport_setup() 7014 */ 7015 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) || 7016 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 7017 pci_ereport_teardown(softs->devinfo_p); 7018 } 7019 7020 /* Unregister from IO Fault Services */ 7021 ddi_fm_fini(softs->devinfo_p); 7022 7023 /* Adjust access and dma attributes for FMA */ 7024 softs->reg_attr.devacc_attr_access = DDI_DEFAULT_ACC; 7025 softs->addr_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 7026 softs->buf_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 7027 } 7028 } 7029 7030 int 7031 aac_check_acc_handle(ddi_acc_handle_t handle) 7032 { 7033 ddi_fm_error_t de; 7034 7035 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 7036 return (de.fme_status); 7037 } 7038 7039 int 7040 aac_check_dma_handle(ddi_dma_handle_t handle) 7041 { 7042 ddi_fm_error_t de; 7043 7044 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 7045 return (de.fme_status); 7046 } 7047 7048 void 7049 aac_fm_ereport(struct aac_softstate *softs, char *detail) 7050 { 7051 uint64_t ena; 7052 char buf[FM_MAX_CLASS]; 7053 7054 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 7055 ena = fm_ena_generate(0, FM_ENA_FMT1); 7056 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities)) { 7057 ddi_fm_ereport_post(softs->devinfo_p, buf, ena, DDI_NOSLEEP, 7058 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL); 7059 } 7060 } 7061 7062 /* 7063 * Autoconfiguration support 7064 */ 7065 static int 7066 aac_parse_devname(char *devnm, int *tgt, int *lun) 7067 { 7068 char devbuf[SCSI_MAXNAMELEN]; 7069 char *addr; 7070 char *p, *tp, *lp; 7071 long num; 7072 7073 /* Parse dev name and address */ 7074 (void) strcpy(devbuf, devnm); 7075 addr = ""; 7076 for (p = devbuf; *p != '\0'; p++) { 7077 if (*p == '@') { 7078 addr = p + 1; 7079 *p = '\0'; 7080 } else if (*p == ':') { 7081 *p = '\0'; 7082 break; 7083 } 7084 } 7085 7086 /* Parse taget and lun */ 7087 for (p = tp = addr, lp = NULL; *p != '\0'; p++) { 7088 if (*p == ',') { 7089 lp = p + 1; 7090 *p = '\0'; 7091 break; 7092 } 7093 } 7094 if (tgt && tp) { 7095 if (ddi_strtol(tp, NULL, 0x10, &num)) 7096 return (AACERR); 7097 *tgt = (int)num; 7098 } 7099 if (lun && lp) { 7100 if (ddi_strtol(lp, NULL, 0x10, &num)) 7101 return (AACERR); 7102 *lun = (int)num; 7103 } 7104 return (AACOK); 7105 } 7106 7107 static dev_info_t * 7108 aac_find_child(struct aac_softstate *softs, uint16_t tgt, uint8_t lun) 7109 { 7110 dev_info_t *child = NULL; 7111 char addr[SCSI_MAXNAMELEN]; 7112 char tmp[MAXNAMELEN]; 7113 7114 if (tgt < AAC_MAX_LD) { 7115 if (lun == 0) { 7116 struct aac_device *dvp = &softs->containers[tgt].dev; 7117 7118 child = dvp->dip; 7119 } 7120 } else { 7121 (void) sprintf(addr, "%x,%x", tgt, lun); 7122 for (child = ddi_get_child(softs->devinfo_p); 7123 child; child = ddi_get_next_sibling(child)) { 7124 /* We don't care about non-persistent node */ 7125 if (ndi_dev_is_persistent_node(child) == 0) 7126 continue; 7127 7128 if (aac_name_node(child, tmp, MAXNAMELEN) != 7129 DDI_SUCCESS) 7130 continue; 7131 if (strcmp(addr, tmp) == 0) 7132 break; 7133 } 7134 } 7135 return (child); 7136 } 7137 7138 static int 7139 aac_config_child(struct aac_softstate *softs, struct scsi_device *sd, 7140 dev_info_t **dipp) 7141 { 7142 char *nodename = NULL; 7143 char **compatible = NULL; 7144 int ncompatible = 0; 7145 char *childname; 7146 dev_info_t *ldip = NULL; 7147 int tgt = sd->sd_address.a_target; 7148 int lun = sd->sd_address.a_lun; 7149 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; 7150 int rval; 7151 7152 DBCALLED(softs, 2); 7153 7154 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype, 7155 NULL, &nodename, &compatible, &ncompatible); 7156 if (nodename == NULL) { 7157 AACDB_PRINT(softs, CE_WARN, 7158 "found no comptible driver for t%dL%d", tgt, lun); 7159 rval = NDI_FAILURE; 7160 goto finish; 7161 } 7162 childname = (softs->legacy && dtype == DTYPE_DIRECT) ? "sd" : nodename; 7163 7164 /* Create dev node */ 7165 rval = ndi_devi_alloc(softs->devinfo_p, childname, DEVI_SID_NODEID, 7166 &ldip); 7167 if (rval == NDI_SUCCESS) { 7168 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) 7169 != DDI_PROP_SUCCESS) { 7170 AACDB_PRINT(softs, CE_WARN, "unable to create " 7171 "property for t%dL%d (target)", tgt, lun); 7172 rval = NDI_FAILURE; 7173 goto finish; 7174 } 7175 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) 7176 != DDI_PROP_SUCCESS) { 7177 AACDB_PRINT(softs, CE_WARN, "unable to create " 7178 "property for t%dL%d (lun)", tgt, lun); 7179 rval = NDI_FAILURE; 7180 goto finish; 7181 } 7182 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip, 7183 "compatible", compatible, ncompatible) 7184 != DDI_PROP_SUCCESS) { 7185 AACDB_PRINT(softs, CE_WARN, "unable to create " 7186 "property for t%dL%d (compatible)", tgt, lun); 7187 rval = NDI_FAILURE; 7188 goto finish; 7189 } 7190 7191 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH); 7192 if (rval != NDI_SUCCESS) { 7193 AACDB_PRINT(softs, CE_WARN, "unable to online t%dL%d", 7194 tgt, lun); 7195 ndi_prop_remove_all(ldip); 7196 (void) ndi_devi_free(ldip); 7197 } 7198 } 7199 finish: 7200 if (dipp) 7201 *dipp = ldip; 7202 7203 scsi_hba_nodename_compatible_free(nodename, compatible); 7204 return (rval); 7205 } 7206 7207 /*ARGSUSED*/ 7208 static int 7209 aac_probe_lun(struct aac_softstate *softs, struct scsi_device *sd) 7210 { 7211 int tgt = sd->sd_address.a_target; 7212 int lun = sd->sd_address.a_lun; 7213 7214 DBCALLED(softs, 2); 7215 7216 if (tgt < AAC_MAX_LD) { 7217 enum aac_cfg_event event; 7218 7219 if (lun == 0) { 7220 mutex_enter(&softs->io_lock); 7221 event = aac_probe_container(softs, tgt); 7222 mutex_exit(&softs->io_lock); 7223 if ((event != AAC_CFG_NULL_NOEXIST) && 7224 (event != AAC_CFG_DELETE)) { 7225 if (scsi_hba_probe(sd, NULL) == 7226 SCSIPROBE_EXISTS) 7227 return (NDI_SUCCESS); 7228 } 7229 } 7230 return (NDI_FAILURE); 7231 } else { 7232 int dtype; 7233 int qual; /* device qualifier */ 7234 7235 if (scsi_hba_probe(sd, NULL) != SCSIPROBE_EXISTS) 7236 return (NDI_FAILURE); 7237 7238 dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; 7239 qual = dtype >> 5; 7240 7241 AACDB_PRINT(softs, CE_NOTE, 7242 "Phys. device found: tgt %d dtype %d: %s", 7243 tgt, dtype, sd->sd_inq->inq_vid); 7244 7245 /* Only non-DASD and JBOD mode DASD are allowed exposed */ 7246 if (dtype == DTYPE_RODIRECT /* CDROM */ || 7247 dtype == DTYPE_SEQUENTIAL /* TAPE */ || 7248 dtype == DTYPE_ESI /* SES */) { 7249 if (!(softs->flags & AAC_FLAGS_NONDASD)) 7250 return (NDI_FAILURE); 7251 AACDB_PRINT(softs, CE_NOTE, "non-DASD %d found", tgt); 7252 7253 } else if (dtype == DTYPE_DIRECT) { 7254 if (!(softs->flags & AAC_FLAGS_JBOD) || qual != 0) 7255 return (NDI_FAILURE); 7256 AACDB_PRINT(softs, CE_NOTE, "JBOD DASD %d found", tgt); 7257 } 7258 7259 mutex_enter(&softs->io_lock); 7260 softs->nondasds[AAC_PD(tgt)].dev.flags |= AAC_DFLAG_VALID; 7261 mutex_exit(&softs->io_lock); 7262 return (NDI_SUCCESS); 7263 } 7264 } 7265 7266 static int 7267 aac_config_lun(struct aac_softstate *softs, uint16_t tgt, uint8_t lun, 7268 dev_info_t **ldip) 7269 { 7270 struct scsi_device sd; 7271 dev_info_t *child; 7272 int rval; 7273 7274 DBCALLED(softs, 2); 7275 7276 if ((child = aac_find_child(softs, tgt, lun)) != NULL) { 7277 if (ldip) 7278 *ldip = child; 7279 return (NDI_SUCCESS); 7280 } 7281 7282 bzero(&sd, sizeof (struct scsi_device)); 7283 sd.sd_address.a_hba_tran = softs->hba_tran; 7284 sd.sd_address.a_target = (uint16_t)tgt; 7285 sd.sd_address.a_lun = (uint8_t)lun; 7286 if ((rval = aac_probe_lun(softs, &sd)) == NDI_SUCCESS) 7287 rval = aac_config_child(softs, &sd, ldip); 7288 /* scsi_unprobe is blank now. Free buffer manually */ 7289 if (sd.sd_inq) { 7290 kmem_free(sd.sd_inq, SUN_INQSIZE); 7291 sd.sd_inq = (struct scsi_inquiry *)NULL; 7292 } 7293 return (rval); 7294 } 7295 7296 static int 7297 aac_config_tgt(struct aac_softstate *softs, int tgt) 7298 { 7299 struct scsi_address ap; 7300 struct buf *bp = NULL; 7301 int buf_len = AAC_SCSI_RPTLUNS_HEAD_SIZE + AAC_SCSI_RPTLUNS_ADDR_SIZE; 7302 int list_len = 0; 7303 int lun_total = 0; 7304 dev_info_t *ldip; 7305 int i; 7306 7307 ap.a_hba_tran = softs->hba_tran; 7308 ap.a_target = (uint16_t)tgt; 7309 ap.a_lun = 0; 7310 7311 for (i = 0; i < 2; i++) { 7312 struct scsi_pkt *pkt; 7313 uchar_t *cdb; 7314 uchar_t *p; 7315 uint32_t data; 7316 7317 if (bp == NULL) { 7318 if ((bp = scsi_alloc_consistent_buf(&ap, NULL, 7319 buf_len, B_READ, NULL_FUNC, NULL)) == NULL) 7320 return (AACERR); 7321 } 7322 if ((pkt = scsi_init_pkt(&ap, NULL, bp, CDB_GROUP5, 7323 sizeof (struct scsi_arq_status), 0, PKT_CONSISTENT, 7324 NULL, NULL)) == NULL) { 7325 scsi_free_consistent_buf(bp); 7326 return (AACERR); 7327 } 7328 cdb = pkt->pkt_cdbp; 7329 bzero(cdb, CDB_GROUP5); 7330 cdb[0] = SCMD_REPORT_LUNS; 7331 7332 /* Convert buffer len from local to LE_32 */ 7333 data = buf_len; 7334 for (p = &cdb[9]; p > &cdb[5]; p--) { 7335 *p = data & 0xff; 7336 data >>= 8; 7337 } 7338 7339 if (scsi_poll(pkt) < 0 || 7340 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk) { 7341 scsi_destroy_pkt(pkt); 7342 break; 7343 } 7344 7345 /* Convert list_len from LE_32 to local */ 7346 for (p = (uchar_t *)bp->b_un.b_addr; 7347 p < (uchar_t *)bp->b_un.b_addr + 4; p++) { 7348 data <<= 8; 7349 data |= *p; 7350 } 7351 list_len = data; 7352 if (buf_len < list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE) { 7353 scsi_free_consistent_buf(bp); 7354 bp = NULL; 7355 buf_len = list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE; 7356 } 7357 scsi_destroy_pkt(pkt); 7358 } 7359 if (i >= 2) { 7360 uint8_t *buf = (uint8_t *)(bp->b_un.b_addr + 7361 AAC_SCSI_RPTLUNS_HEAD_SIZE); 7362 7363 for (i = 0; i < (list_len / AAC_SCSI_RPTLUNS_ADDR_SIZE); i++) { 7364 uint16_t lun; 7365 7366 /* Determine report luns addressing type */ 7367 switch (buf[0] & AAC_SCSI_RPTLUNS_ADDR_MASK) { 7368 /* 7369 * Vendors in the field have been found to be 7370 * concatenating bus/target/lun to equal the 7371 * complete lun value instead of switching to 7372 * flat space addressing 7373 */ 7374 case AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL: 7375 case AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT: 7376 case AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE: 7377 lun = ((buf[0] & 0x3f) << 8) | buf[1]; 7378 if (lun > UINT8_MAX) { 7379 AACDB_PRINT(softs, CE_WARN, 7380 "abnormal lun number: %d", lun); 7381 break; 7382 } 7383 if (aac_config_lun(softs, tgt, lun, &ldip) == 7384 NDI_SUCCESS) 7385 lun_total++; 7386 break; 7387 } 7388 7389 buf += AAC_SCSI_RPTLUNS_ADDR_SIZE; 7390 } 7391 } else { 7392 /* The target may do not support SCMD_REPORT_LUNS. */ 7393 if (aac_config_lun(softs, tgt, 0, &ldip) == NDI_SUCCESS) 7394 lun_total++; 7395 } 7396 scsi_free_consistent_buf(bp); 7397 return (lun_total); 7398 } 7399 7400 static void 7401 aac_devcfg(struct aac_softstate *softs, int tgt, int en) 7402 { 7403 struct aac_device *dvp; 7404 7405 mutex_enter(&softs->io_lock); 7406 dvp = AAC_DEV(softs, tgt); 7407 if (en) 7408 dvp->flags |= AAC_DFLAG_CONFIGURING; 7409 else 7410 dvp->flags &= ~AAC_DFLAG_CONFIGURING; 7411 mutex_exit(&softs->io_lock); 7412 } 7413 7414 static int 7415 aac_tran_bus_config(dev_info_t *parent, uint_t flags, ddi_bus_config_op_t op, 7416 void *arg, dev_info_t **childp) 7417 { 7418 struct aac_softstate *softs; 7419 int circ = 0; 7420 int rval = NDI_FAILURE; 7421 7422 if ((softs = ddi_get_soft_state(aac_softstatep, 7423 ddi_get_instance(parent))) == NULL) 7424 return (NDI_FAILURE); 7425 7426 /* Commands for bus config should be blocked as the bus is quiesced */ 7427 mutex_enter(&softs->io_lock); 7428 if (softs->state & AAC_STATE_QUIESCED) { 7429 AACDB_PRINT(softs, CE_NOTE, 7430 "bus_config aborted because bus is quiesced"); 7431 mutex_exit(&softs->io_lock); 7432 return (NDI_FAILURE); 7433 } 7434 mutex_exit(&softs->io_lock); 7435 7436 DBCALLED(softs, 1); 7437 7438 /* Hold the nexus across the bus_config */ 7439 ndi_devi_enter(parent, &circ); 7440 switch (op) { 7441 case BUS_CONFIG_ONE: { 7442 int tgt, lun; 7443 7444 if (aac_parse_devname(arg, &tgt, &lun) != AACOK) { 7445 rval = NDI_FAILURE; 7446 break; 7447 } 7448 if (tgt >= AAC_MAX_LD) { 7449 if (tgt >= AAC_MAX_DEV(softs)) { 7450 rval = NDI_FAILURE; 7451 break; 7452 } 7453 } 7454 7455 AAC_DEVCFG_BEGIN(softs, tgt); 7456 rval = aac_config_lun(softs, tgt, lun, childp); 7457 AAC_DEVCFG_END(softs, tgt); 7458 break; 7459 } 7460 7461 case BUS_CONFIG_DRIVER: 7462 case BUS_CONFIG_ALL: { 7463 uint32_t bus, tgt; 7464 int index, total; 7465 7466 for (tgt = 0; tgt < AAC_MAX_LD; tgt++) { 7467 AAC_DEVCFG_BEGIN(softs, tgt); 7468 (void) aac_config_lun(softs, tgt, 0, NULL); 7469 AAC_DEVCFG_END(softs, tgt); 7470 } 7471 7472 /* Config the non-DASD devices connected to the card */ 7473 total = 0; 7474 index = AAC_MAX_LD; 7475 for (bus = 0; bus < softs->bus_max; bus++) { 7476 AACDB_PRINT(softs, CE_NOTE, "bus %d:", bus); 7477 for (tgt = 0; tgt < softs->tgt_max; tgt++, index++) { 7478 AAC_DEVCFG_BEGIN(softs, index); 7479 if (aac_config_tgt(softs, index)) 7480 total++; 7481 AAC_DEVCFG_END(softs, index); 7482 } 7483 } 7484 AACDB_PRINT(softs, CE_CONT, 7485 "?Total %d phys. device(s) found", total); 7486 rval = NDI_SUCCESS; 7487 break; 7488 } 7489 } 7490 7491 if (rval == NDI_SUCCESS) 7492 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0); 7493 ndi_devi_exit(parent, circ); 7494 return (rval); 7495 } 7496 7497 /*ARGSUSED*/ 7498 static int 7499 aac_handle_dr(struct aac_softstate *softs, int tgt, int lun, int event) 7500 { 7501 struct aac_device *dvp; 7502 dev_info_t *dip; 7503 int valid; 7504 int circ1 = 0; 7505 7506 DBCALLED(softs, 1); 7507 7508 /* Hold the nexus across the bus_config */ 7509 dvp = AAC_DEV(softs, tgt); 7510 valid = AAC_DEV_IS_VALID(dvp); 7511 dip = dvp->dip; 7512 if (!(softs->state & AAC_STATE_RUN)) 7513 return (AACERR); 7514 mutex_exit(&softs->io_lock); 7515 7516 switch (event) { 7517 case AAC_CFG_ADD: 7518 case AAC_CFG_DELETE: 7519 /* Device onlined */ 7520 if (dip == NULL && valid) { 7521 ndi_devi_enter(softs->devinfo_p, &circ1); 7522 (void) aac_config_lun(softs, tgt, 0, NULL); 7523 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d onlined", 7524 softs->instance, tgt, lun); 7525 ndi_devi_exit(softs->devinfo_p, circ1); 7526 } 7527 /* Device offlined */ 7528 if (dip && !valid) { 7529 mutex_enter(&softs->io_lock); 7530 (void) aac_do_reset(softs); 7531 mutex_exit(&softs->io_lock); 7532 7533 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE); 7534 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d offlined", 7535 softs->instance, tgt, lun); 7536 } 7537 break; 7538 } 7539 7540 mutex_enter(&softs->io_lock); 7541 return (AACOK); 7542 } 7543 7544 #ifdef DEBUG 7545 7546 /* -------------------------debug aid functions-------------------------- */ 7547 7548 #define AAC_FIB_CMD_KEY_STRINGS \ 7549 TestCommandResponse, "TestCommandResponse", \ 7550 TestAdapterCommand, "TestAdapterCommand", \ 7551 LastTestCommand, "LastTestCommand", \ 7552 ReinitHostNormCommandQueue, "ReinitHostNormCommandQueue", \ 7553 ReinitHostHighCommandQueue, "ReinitHostHighCommandQueue", \ 7554 ReinitHostHighRespQueue, "ReinitHostHighRespQueue", \ 7555 ReinitHostNormRespQueue, "ReinitHostNormRespQueue", \ 7556 ReinitAdapNormCommandQueue, "ReinitAdapNormCommandQueue", \ 7557 ReinitAdapHighCommandQueue, "ReinitAdapHighCommandQueue", \ 7558 ReinitAdapHighRespQueue, "ReinitAdapHighRespQueue", \ 7559 ReinitAdapNormRespQueue, "ReinitAdapNormRespQueue", \ 7560 InterfaceShutdown, "InterfaceShutdown", \ 7561 DmaCommandFib, "DmaCommandFib", \ 7562 StartProfile, "StartProfile", \ 7563 TermProfile, "TermProfile", \ 7564 SpeedTest, "SpeedTest", \ 7565 TakeABreakPt, "TakeABreakPt", \ 7566 RequestPerfData, "RequestPerfData", \ 7567 SetInterruptDefTimer, "SetInterruptDefTimer", \ 7568 SetInterruptDefCount, "SetInterruptDefCount", \ 7569 GetInterruptDefStatus, "GetInterruptDefStatus", \ 7570 LastCommCommand, "LastCommCommand", \ 7571 NuFileSystem, "NuFileSystem", \ 7572 UFS, "UFS", \ 7573 HostFileSystem, "HostFileSystem", \ 7574 LastFileSystemCommand, "LastFileSystemCommand", \ 7575 ContainerCommand, "ContainerCommand", \ 7576 ContainerCommand64, "ContainerCommand64", \ 7577 ClusterCommand, "ClusterCommand", \ 7578 ScsiPortCommand, "ScsiPortCommand", \ 7579 ScsiPortCommandU64, "ScsiPortCommandU64", \ 7580 AifRequest, "AifRequest", \ 7581 CheckRevision, "CheckRevision", \ 7582 FsaHostShutdown, "FsaHostShutdown", \ 7583 RequestAdapterInfo, "RequestAdapterInfo", \ 7584 IsAdapterPaused, "IsAdapterPaused", \ 7585 SendHostTime, "SendHostTime", \ 7586 LastMiscCommand, "LastMiscCommand" 7587 7588 #define AAC_CTVM_SUBCMD_KEY_STRINGS \ 7589 VM_Null, "VM_Null", \ 7590 VM_NameServe, "VM_NameServe", \ 7591 VM_ContainerConfig, "VM_ContainerConfig", \ 7592 VM_Ioctl, "VM_Ioctl", \ 7593 VM_FilesystemIoctl, "VM_FilesystemIoctl", \ 7594 VM_CloseAll, "VM_CloseAll", \ 7595 VM_CtBlockRead, "VM_CtBlockRead", \ 7596 VM_CtBlockWrite, "VM_CtBlockWrite", \ 7597 VM_SliceBlockRead, "VM_SliceBlockRead", \ 7598 VM_SliceBlockWrite, "VM_SliceBlockWrite", \ 7599 VM_DriveBlockRead, "VM_DriveBlockRead", \ 7600 VM_DriveBlockWrite, "VM_DriveBlockWrite", \ 7601 VM_EnclosureMgt, "VM_EnclosureMgt", \ 7602 VM_Unused, "VM_Unused", \ 7603 VM_CtBlockVerify, "VM_CtBlockVerify", \ 7604 VM_CtPerf, "VM_CtPerf", \ 7605 VM_CtBlockRead64, "VM_CtBlockRead64", \ 7606 VM_CtBlockWrite64, "VM_CtBlockWrite64", \ 7607 VM_CtBlockVerify64, "VM_CtBlockVerify64", \ 7608 VM_CtHostRead64, "VM_CtHostRead64", \ 7609 VM_CtHostWrite64, "VM_CtHostWrite64", \ 7610 VM_NameServe64, "VM_NameServe64" 7611 7612 #define AAC_CT_SUBCMD_KEY_STRINGS \ 7613 CT_Null, "CT_Null", \ 7614 CT_GET_SLICE_COUNT, "CT_GET_SLICE_COUNT", \ 7615 CT_GET_PARTITION_COUNT, "CT_GET_PARTITION_COUNT", \ 7616 CT_GET_PARTITION_INFO, "CT_GET_PARTITION_INFO", \ 7617 CT_GET_CONTAINER_COUNT, "CT_GET_CONTAINER_COUNT", \ 7618 CT_GET_CONTAINER_INFO_OLD, "CT_GET_CONTAINER_INFO_OLD", \ 7619 CT_WRITE_MBR, "CT_WRITE_MBR", \ 7620 CT_WRITE_PARTITION, "CT_WRITE_PARTITION", \ 7621 CT_UPDATE_PARTITION, "CT_UPDATE_PARTITION", \ 7622 CT_UNLOAD_CONTAINER, "CT_UNLOAD_CONTAINER", \ 7623 CT_CONFIG_SINGLE_PRIMARY, "CT_CONFIG_SINGLE_PRIMARY", \ 7624 CT_READ_CONFIG_AGE, "CT_READ_CONFIG_AGE", \ 7625 CT_WRITE_CONFIG_AGE, "CT_WRITE_CONFIG_AGE", \ 7626 CT_READ_SERIAL_NUMBER, "CT_READ_SERIAL_NUMBER", \ 7627 CT_ZERO_PAR_ENTRY, "CT_ZERO_PAR_ENTRY", \ 7628 CT_READ_MBR, "CT_READ_MBR", \ 7629 CT_READ_PARTITION, "CT_READ_PARTITION", \ 7630 CT_DESTROY_CONTAINER, "CT_DESTROY_CONTAINER", \ 7631 CT_DESTROY2_CONTAINER, "CT_DESTROY2_CONTAINER", \ 7632 CT_SLICE_SIZE, "CT_SLICE_SIZE", \ 7633 CT_CHECK_CONFLICTS, "CT_CHECK_CONFLICTS", \ 7634 CT_MOVE_CONTAINER, "CT_MOVE_CONTAINER", \ 7635 CT_READ_LAST_DRIVE, "CT_READ_LAST_DRIVE", \ 7636 CT_WRITE_LAST_DRIVE, "CT_WRITE_LAST_DRIVE", \ 7637 CT_UNMIRROR, "CT_UNMIRROR", \ 7638 CT_MIRROR_DELAY, "CT_MIRROR_DELAY", \ 7639 CT_GEN_MIRROR, "CT_GEN_MIRROR", \ 7640 CT_GEN_MIRROR2, "CT_GEN_MIRROR2", \ 7641 CT_TEST_CONTAINER, "CT_TEST_CONTAINER", \ 7642 CT_MOVE2, "CT_MOVE2", \ 7643 CT_SPLIT, "CT_SPLIT", \ 7644 CT_SPLIT2, "CT_SPLIT2", \ 7645 CT_SPLIT_BROKEN, "CT_SPLIT_BROKEN", \ 7646 CT_SPLIT_BROKEN2, "CT_SPLIT_BROKEN2", \ 7647 CT_RECONFIG, "CT_RECONFIG", \ 7648 CT_BREAK2, "CT_BREAK2", \ 7649 CT_BREAK, "CT_BREAK", \ 7650 CT_MERGE2, "CT_MERGE2", \ 7651 CT_MERGE, "CT_MERGE", \ 7652 CT_FORCE_ERROR, "CT_FORCE_ERROR", \ 7653 CT_CLEAR_ERROR, "CT_CLEAR_ERROR", \ 7654 CT_ASSIGN_FAILOVER, "CT_ASSIGN_FAILOVER", \ 7655 CT_CLEAR_FAILOVER, "CT_CLEAR_FAILOVER", \ 7656 CT_GET_FAILOVER_DATA, "CT_GET_FAILOVER_DATA", \ 7657 CT_VOLUME_ADD, "CT_VOLUME_ADD", \ 7658 CT_VOLUME_ADD2, "CT_VOLUME_ADD2", \ 7659 CT_MIRROR_STATUS, "CT_MIRROR_STATUS", \ 7660 CT_COPY_STATUS, "CT_COPY_STATUS", \ 7661 CT_COPY, "CT_COPY", \ 7662 CT_UNLOCK_CONTAINER, "CT_UNLOCK_CONTAINER", \ 7663 CT_LOCK_CONTAINER, "CT_LOCK_CONTAINER", \ 7664 CT_MAKE_READ_ONLY, "CT_MAKE_READ_ONLY", \ 7665 CT_MAKE_READ_WRITE, "CT_MAKE_READ_WRITE", \ 7666 CT_CLEAN_DEAD, "CT_CLEAN_DEAD", \ 7667 CT_ABORT_MIRROR_COMMAND, "CT_ABORT_MIRROR_COMMAND", \ 7668 CT_SET, "CT_SET", \ 7669 CT_GET, "CT_GET", \ 7670 CT_GET_NVLOG_ENTRY, "CT_GET_NVLOG_ENTRY", \ 7671 CT_GET_DELAY, "CT_GET_DELAY", \ 7672 CT_ZERO_CONTAINER_SPACE, "CT_ZERO_CONTAINER_SPACE", \ 7673 CT_GET_ZERO_STATUS, "CT_GET_ZERO_STATUS", \ 7674 CT_SCRUB, "CT_SCRUB", \ 7675 CT_GET_SCRUB_STATUS, "CT_GET_SCRUB_STATUS", \ 7676 CT_GET_SLICE_INFO, "CT_GET_SLICE_INFO", \ 7677 CT_GET_SCSI_METHOD, "CT_GET_SCSI_METHOD", \ 7678 CT_PAUSE_IO, "CT_PAUSE_IO", \ 7679 CT_RELEASE_IO, "CT_RELEASE_IO", \ 7680 CT_SCRUB2, "CT_SCRUB2", \ 7681 CT_MCHECK, "CT_MCHECK", \ 7682 CT_CORRUPT, "CT_CORRUPT", \ 7683 CT_GET_TASK_COUNT, "CT_GET_TASK_COUNT", \ 7684 CT_PROMOTE, "CT_PROMOTE", \ 7685 CT_SET_DEAD, "CT_SET_DEAD", \ 7686 CT_CONTAINER_OPTIONS, "CT_CONTAINER_OPTIONS", \ 7687 CT_GET_NV_PARAM, "CT_GET_NV_PARAM", \ 7688 CT_GET_PARAM, "CT_GET_PARAM", \ 7689 CT_NV_PARAM_SIZE, "CT_NV_PARAM_SIZE", \ 7690 CT_COMMON_PARAM_SIZE, "CT_COMMON_PARAM_SIZE", \ 7691 CT_PLATFORM_PARAM_SIZE, "CT_PLATFORM_PARAM_SIZE", \ 7692 CT_SET_NV_PARAM, "CT_SET_NV_PARAM", \ 7693 CT_ABORT_SCRUB, "CT_ABORT_SCRUB", \ 7694 CT_GET_SCRUB_ERROR, "CT_GET_SCRUB_ERROR", \ 7695 CT_LABEL_CONTAINER, "CT_LABEL_CONTAINER", \ 7696 CT_CONTINUE_DATA, "CT_CONTINUE_DATA", \ 7697 CT_STOP_DATA, "CT_STOP_DATA", \ 7698 CT_GET_PARTITION_TABLE, "CT_GET_PARTITION_TABLE", \ 7699 CT_GET_DISK_PARTITIONS, "CT_GET_DISK_PARTITIONS", \ 7700 CT_GET_MISC_STATUS, "CT_GET_MISC_STATUS", \ 7701 CT_GET_CONTAINER_PERF_INFO, "CT_GET_CONTAINER_PERF_INFO", \ 7702 CT_GET_TIME, "CT_GET_TIME", \ 7703 CT_READ_DATA, "CT_READ_DATA", \ 7704 CT_CTR, "CT_CTR", \ 7705 CT_CTL, "CT_CTL", \ 7706 CT_DRAINIO, "CT_DRAINIO", \ 7707 CT_RELEASEIO, "CT_RELEASEIO", \ 7708 CT_GET_NVRAM, "CT_GET_NVRAM", \ 7709 CT_GET_MEMORY, "CT_GET_MEMORY", \ 7710 CT_PRINT_CT_LOG, "CT_PRINT_CT_LOG", \ 7711 CT_ADD_LEVEL, "CT_ADD_LEVEL", \ 7712 CT_NV_ZERO, "CT_NV_ZERO", \ 7713 CT_READ_SIGNATURE, "CT_READ_SIGNATURE", \ 7714 CT_THROTTLE_ON, "CT_THROTTLE_ON", \ 7715 CT_THROTTLE_OFF, "CT_THROTTLE_OFF", \ 7716 CT_GET_THROTTLE_STATS, "CT_GET_THROTTLE_STATS", \ 7717 CT_MAKE_SNAPSHOT, "CT_MAKE_SNAPSHOT", \ 7718 CT_REMOVE_SNAPSHOT, "CT_REMOVE_SNAPSHOT", \ 7719 CT_WRITE_USER_FLAGS, "CT_WRITE_USER_FLAGS", \ 7720 CT_READ_USER_FLAGS, "CT_READ_USER_FLAGS", \ 7721 CT_MONITOR, "CT_MONITOR", \ 7722 CT_GEN_MORPH, "CT_GEN_MORPH", \ 7723 CT_GET_SNAPSHOT_INFO, "CT_GET_SNAPSHOT_INFO", \ 7724 CT_CACHE_SET, "CT_CACHE_SET", \ 7725 CT_CACHE_STAT, "CT_CACHE_STAT", \ 7726 CT_TRACE_START, "CT_TRACE_START", \ 7727 CT_TRACE_STOP, "CT_TRACE_STOP", \ 7728 CT_TRACE_ENABLE, "CT_TRACE_ENABLE", \ 7729 CT_TRACE_DISABLE, "CT_TRACE_DISABLE", \ 7730 CT_FORCE_CORE_DUMP, "CT_FORCE_CORE_DUMP", \ 7731 CT_SET_SERIAL_NUMBER, "CT_SET_SERIAL_NUMBER", \ 7732 CT_RESET_SERIAL_NUMBER, "CT_RESET_SERIAL_NUMBER", \ 7733 CT_ENABLE_RAID5, "CT_ENABLE_RAID5", \ 7734 CT_CLEAR_VALID_DUMP_FLAG, "CT_CLEAR_VALID_DUMP_FLAG", \ 7735 CT_GET_MEM_STATS, "CT_GET_MEM_STATS", \ 7736 CT_GET_CORE_SIZE, "CT_GET_CORE_SIZE", \ 7737 CT_CREATE_CONTAINER_OLD, "CT_CREATE_CONTAINER_OLD", \ 7738 CT_STOP_DUMPS, "CT_STOP_DUMPS", \ 7739 CT_PANIC_ON_TAKE_A_BREAK, "CT_PANIC_ON_TAKE_A_BREAK", \ 7740 CT_GET_CACHE_STATS, "CT_GET_CACHE_STATS", \ 7741 CT_MOVE_PARTITION, "CT_MOVE_PARTITION", \ 7742 CT_FLUSH_CACHE, "CT_FLUSH_CACHE", \ 7743 CT_READ_NAME, "CT_READ_NAME", \ 7744 CT_WRITE_NAME, "CT_WRITE_NAME", \ 7745 CT_TOSS_CACHE, "CT_TOSS_CACHE", \ 7746 CT_LOCK_DRAINIO, "CT_LOCK_DRAINIO", \ 7747 CT_CONTAINER_OFFLINE, "CT_CONTAINER_OFFLINE", \ 7748 CT_SET_CACHE_SIZE, "CT_SET_CACHE_SIZE", \ 7749 CT_CLEAN_SHUTDOWN_STATUS, "CT_CLEAN_SHUTDOWN_STATUS", \ 7750 CT_CLEAR_DISKLOG_ON_DISK, "CT_CLEAR_DISKLOG_ON_DISK", \ 7751 CT_CLEAR_ALL_DISKLOG, "CT_CLEAR_ALL_DISKLOG", \ 7752 CT_CACHE_FAVOR, "CT_CACHE_FAVOR", \ 7753 CT_READ_PASSTHRU_MBR, "CT_READ_PASSTHRU_MBR", \ 7754 CT_SCRUB_NOFIX, "CT_SCRUB_NOFIX", \ 7755 CT_SCRUB2_NOFIX, "CT_SCRUB2_NOFIX", \ 7756 CT_FLUSH, "CT_FLUSH", \ 7757 CT_REBUILD, "CT_REBUILD", \ 7758 CT_FLUSH_CONTAINER, "CT_FLUSH_CONTAINER", \ 7759 CT_RESTART, "CT_RESTART", \ 7760 CT_GET_CONFIG_STATUS, "CT_GET_CONFIG_STATUS", \ 7761 CT_TRACE_FLAG, "CT_TRACE_FLAG", \ 7762 CT_RESTART_MORPH, "CT_RESTART_MORPH", \ 7763 CT_GET_TRACE_INFO, "CT_GET_TRACE_INFO", \ 7764 CT_GET_TRACE_ITEM, "CT_GET_TRACE_ITEM", \ 7765 CT_COMMIT_CONFIG, "CT_COMMIT_CONFIG", \ 7766 CT_CONTAINER_EXISTS, "CT_CONTAINER_EXISTS", \ 7767 CT_GET_SLICE_FROM_DEVT, "CT_GET_SLICE_FROM_DEVT", \ 7768 CT_OPEN_READ_WRITE, "CT_OPEN_READ_WRITE", \ 7769 CT_WRITE_MEMORY_BLOCK, "CT_WRITE_MEMORY_BLOCK", \ 7770 CT_GET_CACHE_PARAMS, "CT_GET_CACHE_PARAMS", \ 7771 CT_CRAZY_CACHE, "CT_CRAZY_CACHE", \ 7772 CT_GET_PROFILE_STRUCT, "CT_GET_PROFILE_STRUCT", \ 7773 CT_SET_IO_TRACE_FLAG, "CT_SET_IO_TRACE_FLAG", \ 7774 CT_GET_IO_TRACE_STRUCT, "CT_GET_IO_TRACE_STRUCT", \ 7775 CT_CID_TO_64BITS_UID, "CT_CID_TO_64BITS_UID", \ 7776 CT_64BITS_UID_TO_CID, "CT_64BITS_UID_TO_CID", \ 7777 CT_PAR_TO_64BITS_UID, "CT_PAR_TO_64BITS_UID", \ 7778 CT_CID_TO_32BITS_UID, "CT_CID_TO_32BITS_UID", \ 7779 CT_32BITS_UID_TO_CID, "CT_32BITS_UID_TO_CID", \ 7780 CT_PAR_TO_32BITS_UID, "CT_PAR_TO_32BITS_UID", \ 7781 CT_SET_FAILOVER_OPTION, "CT_SET_FAILOVER_OPTION", \ 7782 CT_GET_FAILOVER_OPTION, "CT_GET_FAILOVER_OPTION", \ 7783 CT_STRIPE_ADD2, "CT_STRIPE_ADD2", \ 7784 CT_CREATE_VOLUME_SET, "CT_CREATE_VOLUME_SET", \ 7785 CT_CREATE_STRIPE_SET, "CT_CREATE_STRIPE_SET", \ 7786 CT_VERIFY_CONTAINER, "CT_VERIFY_CONTAINER", \ 7787 CT_IS_CONTAINER_DEAD, "CT_IS_CONTAINER_DEAD", \ 7788 CT_GET_CONTAINER_OPTION, "CT_GET_CONTAINER_OPTION", \ 7789 CT_GET_SNAPSHOT_UNUSED_STRUCT, "CT_GET_SNAPSHOT_UNUSED_STRUCT", \ 7790 CT_CLEAR_SNAPSHOT_UNUSED_STRUCT, "CT_CLEAR_SNAPSHOT_UNUSED_STRUCT", \ 7791 CT_GET_CONTAINER_INFO, "CT_GET_CONTAINER_INFO", \ 7792 CT_CREATE_CONTAINER, "CT_CREATE_CONTAINER", \ 7793 CT_CHANGE_CREATIONINFO, "CT_CHANGE_CREATIONINFO", \ 7794 CT_CHECK_CONFLICT_UID, "CT_CHECK_CONFLICT_UID", \ 7795 CT_CONTAINER_UID_CHECK, "CT_CONTAINER_UID_CHECK", \ 7796 CT_IS_CONTAINER_MEATADATA_STANDARD, \ 7797 "CT_IS_CONTAINER_MEATADATA_STANDARD", \ 7798 CT_IS_SLICE_METADATA_STANDARD, "CT_IS_SLICE_METADATA_STANDARD", \ 7799 CT_GET_IMPORT_COUNT, "CT_GET_IMPORT_COUNT", \ 7800 CT_CANCEL_ALL_IMPORTS, "CT_CANCEL_ALL_IMPORTS", \ 7801 CT_GET_IMPORT_INFO, "CT_GET_IMPORT_INFO", \ 7802 CT_IMPORT_ARRAY, "CT_IMPORT_ARRAY", \ 7803 CT_GET_LOG_SIZE, "CT_GET_LOG_SIZE", \ 7804 CT_ALARM_GET_STATE, "CT_ALARM_GET_STATE", \ 7805 CT_ALARM_SET_STATE, "CT_ALARM_SET_STATE", \ 7806 CT_ALARM_ON_OFF, "CT_ALARM_ON_OFF", \ 7807 CT_GET_EE_OEM_ID, "CT_GET_EE_OEM_ID", \ 7808 CT_GET_PPI_HEADERS, "CT_GET_PPI_HEADERS", \ 7809 CT_GET_PPI_DATA, "CT_GET_PPI_DATA", \ 7810 CT_GET_PPI_ENTRIES, "CT_GET_PPI_ENTRIES", \ 7811 CT_DELETE_PPI_BUNDLE, "CT_DELETE_PPI_BUNDLE", \ 7812 CT_GET_PARTITION_TABLE_2, "CT_GET_PARTITION_TABLE_2", \ 7813 CT_GET_PARTITION_INFO_2, "CT_GET_PARTITION_INFO_2", \ 7814 CT_GET_DISK_PARTITIONS_2, "CT_GET_DISK_PARTITIONS_2", \ 7815 CT_QUIESCE_ADAPTER, "CT_QUIESCE_ADAPTER", \ 7816 CT_CLEAR_PPI_TABLE, "CT_CLEAR_PPI_TABLE" 7817 7818 #define AAC_CL_SUBCMD_KEY_STRINGS \ 7819 CL_NULL, "CL_NULL", \ 7820 DS_INIT, "DS_INIT", \ 7821 DS_RESCAN, "DS_RESCAN", \ 7822 DS_CREATE, "DS_CREATE", \ 7823 DS_DELETE, "DS_DELETE", \ 7824 DS_ADD_DISK, "DS_ADD_DISK", \ 7825 DS_REMOVE_DISK, "DS_REMOVE_DISK", \ 7826 DS_MOVE_DISK, "DS_MOVE_DISK", \ 7827 DS_TAKE_OWNERSHIP, "DS_TAKE_OWNERSHIP", \ 7828 DS_RELEASE_OWNERSHIP, "DS_RELEASE_OWNERSHIP", \ 7829 DS_FORCE_OWNERSHIP, "DS_FORCE_OWNERSHIP", \ 7830 DS_GET_DISK_SET_PARAM, "DS_GET_DISK_SET_PARAM", \ 7831 DS_GET_DRIVE_PARAM, "DS_GET_DRIVE_PARAM", \ 7832 DS_GET_SLICE_PARAM, "DS_GET_SLICE_PARAM", \ 7833 DS_GET_DISK_SETS, "DS_GET_DISK_SETS", \ 7834 DS_GET_DRIVES, "DS_GET_DRIVES", \ 7835 DS_SET_DISK_SET_PARAM, "DS_SET_DISK_SET_PARAM", \ 7836 DS_ONLINE, "DS_ONLINE", \ 7837 DS_OFFLINE, "DS_OFFLINE", \ 7838 DS_ONLINE_CONTAINERS, "DS_ONLINE_CONTAINERS", \ 7839 DS_FSAPRINT, "DS_FSAPRINT", \ 7840 CL_CFG_SET_HOST_IDS, "CL_CFG_SET_HOST_IDS", \ 7841 CL_CFG_SET_PARTNER_HOST_IDS, "CL_CFG_SET_PARTNER_HOST_IDS", \ 7842 CL_CFG_GET_CLUSTER_CONFIG, "CL_CFG_GET_CLUSTER_CONFIG", \ 7843 CC_CLI_CLEAR_MESSAGE_BUFFER, "CC_CLI_CLEAR_MESSAGE_BUFFER", \ 7844 CC_SRV_CLEAR_MESSAGE_BUFFER, "CC_SRV_CLEAR_MESSAGE_BUFFER", \ 7845 CC_CLI_SHOW_MESSAGE_BUFFER, "CC_CLI_SHOW_MESSAGE_BUFFER", \ 7846 CC_SRV_SHOW_MESSAGE_BUFFER, "CC_SRV_SHOW_MESSAGE_BUFFER", \ 7847 CC_CLI_SEND_MESSAGE, "CC_CLI_SEND_MESSAGE", \ 7848 CC_SRV_SEND_MESSAGE, "CC_SRV_SEND_MESSAGE", \ 7849 CC_CLI_GET_MESSAGE, "CC_CLI_GET_MESSAGE", \ 7850 CC_SRV_GET_MESSAGE, "CC_SRV_GET_MESSAGE", \ 7851 CC_SEND_TEST_MESSAGE, "CC_SEND_TEST_MESSAGE", \ 7852 CC_GET_BUSINFO, "CC_GET_BUSINFO", \ 7853 CC_GET_PORTINFO, "CC_GET_PORTINFO", \ 7854 CC_GET_NAMEINFO, "CC_GET_NAMEINFO", \ 7855 CC_GET_CONFIGINFO, "CC_GET_CONFIGINFO", \ 7856 CQ_QUORUM_OP, "CQ_QUORUM_OP" 7857 7858 #define AAC_AIF_SUBCMD_KEY_STRINGS \ 7859 AifCmdEventNotify, "AifCmdEventNotify", \ 7860 AifCmdJobProgress, "AifCmdJobProgress", \ 7861 AifCmdAPIReport, "AifCmdAPIReport", \ 7862 AifCmdDriverNotify, "AifCmdDriverNotify", \ 7863 AifReqJobList, "AifReqJobList", \ 7864 AifReqJobsForCtr, "AifReqJobsForCtr", \ 7865 AifReqJobsForScsi, "AifReqJobsForScsi", \ 7866 AifReqJobReport, "AifReqJobReport", \ 7867 AifReqTerminateJob, "AifReqTerminateJob", \ 7868 AifReqSuspendJob, "AifReqSuspendJob", \ 7869 AifReqResumeJob, "AifReqResumeJob", \ 7870 AifReqSendAPIReport, "AifReqSendAPIReport", \ 7871 AifReqAPIJobStart, "AifReqAPIJobStart", \ 7872 AifReqAPIJobUpdate, "AifReqAPIJobUpdate", \ 7873 AifReqAPIJobFinish, "AifReqAPIJobFinish" 7874 7875 #define AAC_IOCTL_SUBCMD_KEY_STRINGS \ 7876 Reserved_IOCTL, "Reserved_IOCTL", \ 7877 GetDeviceHandle, "GetDeviceHandle", \ 7878 BusTargetLun_to_DeviceHandle, "BusTargetLun_to_DeviceHandle", \ 7879 DeviceHandle_to_BusTargetLun, "DeviceHandle_to_BusTargetLun", \ 7880 RescanBus, "RescanBus", \ 7881 GetDeviceProbeInfo, "GetDeviceProbeInfo", \ 7882 GetDeviceCapacity, "GetDeviceCapacity", \ 7883 GetContainerProbeInfo, "GetContainerProbeInfo", \ 7884 GetRequestedMemorySize, "GetRequestedMemorySize", \ 7885 GetBusInfo, "GetBusInfo", \ 7886 GetVendorSpecific, "GetVendorSpecific", \ 7887 EnhancedGetDeviceProbeInfo, "EnhancedGetDeviceProbeInfo", \ 7888 EnhancedGetBusInfo, "EnhancedGetBusInfo", \ 7889 SetupExtendedCounters, "SetupExtendedCounters", \ 7890 GetPerformanceCounters, "GetPerformanceCounters", \ 7891 ResetPerformanceCounters, "ResetPerformanceCounters", \ 7892 ReadModePage, "ReadModePage", \ 7893 WriteModePage, "WriteModePage", \ 7894 ReadDriveParameter, "ReadDriveParameter", \ 7895 WriteDriveParameter, "WriteDriveParameter", \ 7896 ResetAdapter, "ResetAdapter", \ 7897 ResetBus, "ResetBus", \ 7898 ResetBusDevice, "ResetBusDevice", \ 7899 ExecuteSrb, "ExecuteSrb", \ 7900 Create_IO_Task, "Create_IO_Task", \ 7901 Delete_IO_Task, "Delete_IO_Task", \ 7902 Get_IO_Task_Info, "Get_IO_Task_Info", \ 7903 Check_Task_Progress, "Check_Task_Progress", \ 7904 InjectError, "InjectError", \ 7905 GetDeviceDefectCounts, "GetDeviceDefectCounts", \ 7906 GetDeviceDefectInfo, "GetDeviceDefectInfo", \ 7907 GetDeviceStatus, "GetDeviceStatus", \ 7908 ClearDeviceStatus, "ClearDeviceStatus", \ 7909 DiskSpinControl, "DiskSpinControl", \ 7910 DiskSmartControl, "DiskSmartControl", \ 7911 WriteSame, "WriteSame", \ 7912 ReadWriteLong, "ReadWriteLong", \ 7913 FormatUnit, "FormatUnit", \ 7914 TargetDeviceControl, "TargetDeviceControl", \ 7915 TargetChannelControl, "TargetChannelControl", \ 7916 FlashNewCode, "FlashNewCode", \ 7917 DiskCheck, "DiskCheck", \ 7918 RequestSense, "RequestSense", \ 7919 DiskPERControl, "DiskPERControl", \ 7920 Read10, "Read10", \ 7921 Write10, "Write10" 7922 7923 #define AAC_AIFEN_KEY_STRINGS \ 7924 AifEnGeneric, "Generic", \ 7925 AifEnTaskComplete, "TaskComplete", \ 7926 AifEnConfigChange, "Config change", \ 7927 AifEnContainerChange, "Container change", \ 7928 AifEnDeviceFailure, "device failed", \ 7929 AifEnMirrorFailover, "Mirror failover", \ 7930 AifEnContainerEvent, "container event", \ 7931 AifEnFileSystemChange, "File system changed", \ 7932 AifEnConfigPause, "Container pause event", \ 7933 AifEnConfigResume, "Container resume event", \ 7934 AifEnFailoverChange, "Failover space assignment changed", \ 7935 AifEnRAID5RebuildDone, "RAID5 rebuild finished", \ 7936 AifEnEnclosureManagement, "Enclosure management event", \ 7937 AifEnBatteryEvent, "battery event", \ 7938 AifEnAddContainer, "Add container", \ 7939 AifEnDeleteContainer, "Delete container", \ 7940 AifEnSMARTEvent, "SMART Event", \ 7941 AifEnBatteryNeedsRecond, "battery needs reconditioning", \ 7942 AifEnClusterEvent, "cluster event", \ 7943 AifEnDiskSetEvent, "disk set event occured", \ 7944 AifDenMorphComplete, "morph operation completed", \ 7945 AifDenVolumeExtendComplete, "VolumeExtendComplete" 7946 7947 struct aac_key_strings { 7948 int key; 7949 char *message; 7950 }; 7951 7952 extern struct scsi_key_strings scsi_cmds[]; 7953 7954 static struct aac_key_strings aac_fib_cmds[] = { 7955 AAC_FIB_CMD_KEY_STRINGS, 7956 -1, NULL 7957 }; 7958 7959 static struct aac_key_strings aac_ctvm_subcmds[] = { 7960 AAC_CTVM_SUBCMD_KEY_STRINGS, 7961 -1, NULL 7962 }; 7963 7964 static struct aac_key_strings aac_ct_subcmds[] = { 7965 AAC_CT_SUBCMD_KEY_STRINGS, 7966 -1, NULL 7967 }; 7968 7969 static struct aac_key_strings aac_cl_subcmds[] = { 7970 AAC_CL_SUBCMD_KEY_STRINGS, 7971 -1, NULL 7972 }; 7973 7974 static struct aac_key_strings aac_aif_subcmds[] = { 7975 AAC_AIF_SUBCMD_KEY_STRINGS, 7976 -1, NULL 7977 }; 7978 7979 static struct aac_key_strings aac_ioctl_subcmds[] = { 7980 AAC_IOCTL_SUBCMD_KEY_STRINGS, 7981 -1, NULL 7982 }; 7983 7984 static struct aac_key_strings aac_aifens[] = { 7985 AAC_AIFEN_KEY_STRINGS, 7986 -1, NULL 7987 }; 7988 7989 /* 7990 * The following function comes from Adaptec: 7991 * 7992 * Get the firmware print buffer parameters from the firmware, 7993 * if the command was successful map in the address. 7994 */ 7995 static int 7996 aac_get_fw_debug_buffer(struct aac_softstate *softs) 7997 { 7998 if (aac_sync_mbcommand(softs, AAC_MONKER_GETDRVPROP, 7999 0, 0, 0, 0, NULL) == AACOK) { 8000 uint32_t mondrv_buf_paddrl = AAC_MAILBOX_GET(softs, 1); 8001 uint32_t mondrv_buf_paddrh = AAC_MAILBOX_GET(softs, 2); 8002 uint32_t mondrv_buf_size = AAC_MAILBOX_GET(softs, 3); 8003 uint32_t mondrv_hdr_size = AAC_MAILBOX_GET(softs, 4); 8004 8005 if (mondrv_buf_size) { 8006 uint32_t offset = mondrv_buf_paddrl - \ 8007 softs->pci_mem_base_paddr; 8008 8009 /* 8010 * See if the address is already mapped in, and 8011 * if so set it up from the base address 8012 */ 8013 if ((mondrv_buf_paddrh == 0) && 8014 (offset + mondrv_buf_size < softs->map_size)) { 8015 mutex_enter(&aac_prt_mutex); 8016 softs->debug_buf_offset = offset; 8017 softs->debug_header_size = mondrv_hdr_size; 8018 softs->debug_buf_size = mondrv_buf_size; 8019 softs->debug_fw_flags = 0; 8020 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT; 8021 mutex_exit(&aac_prt_mutex); 8022 8023 return (AACOK); 8024 } 8025 } 8026 } 8027 return (AACERR); 8028 } 8029 8030 int 8031 aac_dbflag_on(struct aac_softstate *softs, int flag) 8032 { 8033 int debug_flags = softs ? softs->debug_flags : aac_debug_flags; 8034 8035 return ((debug_flags & (AACDB_FLAGS_FW_PRINT | \ 8036 AACDB_FLAGS_KERNEL_PRINT)) && (debug_flags & flag)); 8037 } 8038 8039 static void 8040 aac_cmn_err(struct aac_softstate *softs, uint_t lev, char sl, int noheader) 8041 { 8042 if (noheader) { 8043 if (sl) { 8044 aac_fmt[0] = sl; 8045 cmn_err(lev, aac_fmt, aac_prt_buf); 8046 } else { 8047 cmn_err(lev, &aac_fmt[1], aac_prt_buf); 8048 } 8049 } else { 8050 if (sl) { 8051 aac_fmt_header[0] = sl; 8052 cmn_err(lev, aac_fmt_header, 8053 softs->vendor_name, softs->instance, 8054 aac_prt_buf); 8055 } else { 8056 cmn_err(lev, &aac_fmt_header[1], 8057 softs->vendor_name, softs->instance, 8058 aac_prt_buf); 8059 } 8060 } 8061 } 8062 8063 /* 8064 * The following function comes from Adaptec: 8065 * 8066 * Format and print out the data passed in to UART or console 8067 * as specified by debug flags. 8068 */ 8069 void 8070 aac_printf(struct aac_softstate *softs, uint_t lev, const char *fmt, ...) 8071 { 8072 va_list args; 8073 char sl; /* system log character */ 8074 8075 mutex_enter(&aac_prt_mutex); 8076 /* Set up parameters and call sprintf function to format the data */ 8077 if (strchr("^!?", fmt[0]) == NULL) { 8078 sl = 0; 8079 } else { 8080 sl = fmt[0]; 8081 fmt++; 8082 } 8083 va_start(args, fmt); 8084 (void) vsprintf(aac_prt_buf, fmt, args); 8085 va_end(args); 8086 8087 /* Make sure the softs structure has been passed in for this section */ 8088 if (softs) { 8089 if ((softs->debug_flags & AACDB_FLAGS_FW_PRINT) && 8090 /* If we are set up for a Firmware print */ 8091 (softs->debug_buf_size)) { 8092 uint32_t count, i; 8093 8094 /* Make sure the string size is within boundaries */ 8095 count = strlen(aac_prt_buf); 8096 if (count > softs->debug_buf_size) 8097 count = (uint16_t)softs->debug_buf_size; 8098 8099 /* 8100 * Wait for no more than AAC_PRINT_TIMEOUT for the 8101 * previous message length to clear (the handshake). 8102 */ 8103 for (i = 0; i < AAC_PRINT_TIMEOUT; i++) { 8104 if (!PCI_MEM_GET32(softs, 8105 softs->debug_buf_offset + \ 8106 AAC_FW_DBG_STRLEN_OFFSET)) 8107 break; 8108 8109 drv_usecwait(1000); 8110 } 8111 8112 /* 8113 * If the length is clear, copy over the message, the 8114 * flags, and the length. Make sure the length is the 8115 * last because that is the signal for the Firmware to 8116 * pick it up. 8117 */ 8118 if (!PCI_MEM_GET32(softs, softs->debug_buf_offset + \ 8119 AAC_FW_DBG_STRLEN_OFFSET)) { 8120 PCI_MEM_REP_PUT8(softs, 8121 softs->debug_buf_offset + \ 8122 softs->debug_header_size, 8123 aac_prt_buf, count); 8124 PCI_MEM_PUT32(softs, 8125 softs->debug_buf_offset + \ 8126 AAC_FW_DBG_FLAGS_OFFSET, 8127 softs->debug_fw_flags); 8128 PCI_MEM_PUT32(softs, 8129 softs->debug_buf_offset + \ 8130 AAC_FW_DBG_STRLEN_OFFSET, count); 8131 } else { 8132 cmn_err(CE_WARN, "UART output fail"); 8133 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT; 8134 } 8135 } 8136 8137 /* 8138 * If the Kernel Debug Print flag is set, send it off 8139 * to the Kernel Debugger 8140 */ 8141 if (softs->debug_flags & AACDB_FLAGS_KERNEL_PRINT) 8142 aac_cmn_err(softs, lev, sl, 8143 (softs->debug_flags & AACDB_FLAGS_NO_HEADERS)); 8144 } else { 8145 /* Driver not initialized yet, no firmware or header output */ 8146 if (aac_debug_flags & AACDB_FLAGS_KERNEL_PRINT) 8147 aac_cmn_err(softs, lev, sl, 1); 8148 } 8149 mutex_exit(&aac_prt_mutex); 8150 } 8151 8152 /* 8153 * Translate command number to description string 8154 */ 8155 static char * 8156 aac_cmd_name(int cmd, struct aac_key_strings *cmdlist) 8157 { 8158 int i; 8159 8160 for (i = 0; cmdlist[i].key != -1; i++) { 8161 if (cmd == cmdlist[i].key) 8162 return (cmdlist[i].message); 8163 } 8164 return (NULL); 8165 } 8166 8167 static void 8168 aac_print_scmd(struct aac_softstate *softs, struct aac_cmd *acp) 8169 { 8170 struct scsi_pkt *pkt = acp->pkt; 8171 struct scsi_address *ap = &pkt->pkt_address; 8172 int is_pd = 0; 8173 int ctl = ddi_get_instance(softs->devinfo_p); 8174 int tgt = ap->a_target; 8175 int lun = ap->a_lun; 8176 union scsi_cdb *cdbp = (void *)pkt->pkt_cdbp; 8177 uchar_t cmd = cdbp->scc_cmd; 8178 char *desc; 8179 8180 if (tgt >= AAC_MAX_LD) { 8181 is_pd = 1; 8182 ctl = ((struct aac_nondasd *)acp->dvp)->bus; 8183 tgt = ((struct aac_nondasd *)acp->dvp)->tid; 8184 lun = 0; 8185 } 8186 8187 if ((desc = aac_cmd_name(cmd, 8188 (struct aac_key_strings *)scsi_cmds)) == NULL) { 8189 aac_printf(softs, CE_NOTE, 8190 "SCMD> Unknown(0x%2x) --> c%dt%dL%d %s", 8191 cmd, ctl, tgt, lun, is_pd ? "(pd)" : ""); 8192 return; 8193 } 8194 8195 switch (cmd) { 8196 case SCMD_READ: 8197 case SCMD_WRITE: 8198 aac_printf(softs, CE_NOTE, 8199 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s", 8200 desc, GETG0ADDR(cdbp), GETG0COUNT(cdbp), 8201 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 8202 ctl, tgt, lun, is_pd ? "(pd)" : ""); 8203 break; 8204 case SCMD_READ_G1: 8205 case SCMD_WRITE_G1: 8206 aac_printf(softs, CE_NOTE, 8207 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s", 8208 desc, GETG1ADDR(cdbp), GETG1COUNT(cdbp), 8209 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 8210 ctl, tgt, lun, is_pd ? "(pd)" : ""); 8211 break; 8212 case SCMD_READ_G4: 8213 case SCMD_WRITE_G4: 8214 aac_printf(softs, CE_NOTE, 8215 "SCMD> %s 0x%x.%08x[%d] %s --> c%dt%dL%d %s", 8216 desc, GETG4ADDR(cdbp), GETG4ADDRTL(cdbp), 8217 GETG4COUNT(cdbp), 8218 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 8219 ctl, tgt, lun, is_pd ? "(pd)" : ""); 8220 break; 8221 case SCMD_READ_G5: 8222 case SCMD_WRITE_G5: 8223 aac_printf(softs, CE_NOTE, 8224 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s", 8225 desc, GETG5ADDR(cdbp), GETG5COUNT(cdbp), 8226 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 8227 ctl, tgt, lun, is_pd ? "(pd)" : ""); 8228 break; 8229 default: 8230 aac_printf(softs, CE_NOTE, "SCMD> %s --> c%dt%dL%d %s", 8231 desc, ctl, tgt, lun, is_pd ? "(pd)" : ""); 8232 } 8233 } 8234 8235 void 8236 aac_print_fib(struct aac_softstate *softs, struct aac_slot *slotp) 8237 { 8238 struct aac_cmd *acp = slotp->acp; 8239 struct aac_fib *fibp = slotp->fibp; 8240 ddi_acc_handle_t acc = slotp->fib_acc_handle; 8241 uint16_t fib_size; 8242 uint32_t fib_cmd, sub_cmd; 8243 char *cmdstr, *subcmdstr; 8244 char *caller; 8245 int i; 8246 8247 if (acp) { 8248 if (!(softs->debug_fib_flags & acp->fib_flags)) 8249 return; 8250 if (acp->fib_flags & AACDB_FLAGS_FIB_SCMD) 8251 caller = "SCMD"; 8252 else if (acp->fib_flags & AACDB_FLAGS_FIB_IOCTL) 8253 caller = "IOCTL"; 8254 else if (acp->fib_flags & AACDB_FLAGS_FIB_SRB) 8255 caller = "SRB"; 8256 else 8257 return; 8258 } else { 8259 if (!(softs->debug_fib_flags & AACDB_FLAGS_FIB_SYNC)) 8260 return; 8261 caller = "SYNC"; 8262 } 8263 8264 fib_cmd = ddi_get16(acc, &fibp->Header.Command); 8265 cmdstr = aac_cmd_name(fib_cmd, aac_fib_cmds); 8266 sub_cmd = (uint32_t)-1; 8267 subcmdstr = NULL; 8268 8269 /* Print FIB header */ 8270 if (softs->debug_fib_flags & AACDB_FLAGS_FIB_HEADER) { 8271 aac_printf(softs, CE_NOTE, "FIB> from %s", caller); 8272 aac_printf(softs, CE_NOTE, " XferState %d", 8273 ddi_get32(acc, &fibp->Header.XferState)); 8274 aac_printf(softs, CE_NOTE, " Command %d", 8275 ddi_get16(acc, &fibp->Header.Command)); 8276 aac_printf(softs, CE_NOTE, " StructType %d", 8277 ddi_get8(acc, &fibp->Header.StructType)); 8278 aac_printf(softs, CE_NOTE, " Flags 0x%x", 8279 ddi_get8(acc, &fibp->Header.Flags)); 8280 aac_printf(softs, CE_NOTE, " Size %d", 8281 ddi_get16(acc, &fibp->Header.Size)); 8282 aac_printf(softs, CE_NOTE, " SenderSize %d", 8283 ddi_get16(acc, &fibp->Header.SenderSize)); 8284 aac_printf(softs, CE_NOTE, " SenderAddr 0x%x", 8285 ddi_get32(acc, &fibp->Header.SenderFibAddress)); 8286 aac_printf(softs, CE_NOTE, " RcvrAddr 0x%x", 8287 ddi_get32(acc, &fibp->Header.ReceiverFibAddress)); 8288 aac_printf(softs, CE_NOTE, " SenderData 0x%x", 8289 ddi_get32(acc, &fibp->Header.SenderData)); 8290 } 8291 8292 /* Print FIB data */ 8293 switch (fib_cmd) { 8294 case ContainerCommand: 8295 sub_cmd = ddi_get32(acc, 8296 (void *)&(((uint32_t *)(void *)&fibp->data[0])[0])); 8297 subcmdstr = aac_cmd_name(sub_cmd, aac_ctvm_subcmds); 8298 if (subcmdstr == NULL) 8299 break; 8300 8301 switch (sub_cmd) { 8302 case VM_ContainerConfig: { 8303 struct aac_Container *pContainer = 8304 (struct aac_Container *)fibp->data; 8305 8306 fib_cmd = sub_cmd; 8307 cmdstr = subcmdstr; 8308 sub_cmd = (uint32_t)-1; 8309 subcmdstr = NULL; 8310 8311 sub_cmd = ddi_get32(acc, 8312 &pContainer->CTCommand.command); 8313 subcmdstr = aac_cmd_name(sub_cmd, aac_ct_subcmds); 8314 if (subcmdstr == NULL) 8315 break; 8316 aac_printf(softs, CE_NOTE, "FIB> %s (0x%x, 0x%x, 0x%x)", 8317 subcmdstr, 8318 ddi_get32(acc, &pContainer->CTCommand.param[0]), 8319 ddi_get32(acc, &pContainer->CTCommand.param[1]), 8320 ddi_get32(acc, &pContainer->CTCommand.param[2])); 8321 return; 8322 } 8323 8324 case VM_Ioctl: 8325 fib_cmd = sub_cmd; 8326 cmdstr = subcmdstr; 8327 sub_cmd = (uint32_t)-1; 8328 subcmdstr = NULL; 8329 8330 sub_cmd = ddi_get32(acc, 8331 (void *)&(((uint32_t *)(void *)&fibp->data[0])[4])); 8332 subcmdstr = aac_cmd_name(sub_cmd, aac_ioctl_subcmds); 8333 break; 8334 8335 case VM_CtBlockRead: 8336 case VM_CtBlockWrite: { 8337 struct aac_blockread *br = 8338 (struct aac_blockread *)fibp->data; 8339 struct aac_sg_table *sg = &br->SgMap; 8340 uint32_t sgcount = ddi_get32(acc, &sg->SgCount); 8341 8342 aac_printf(softs, CE_NOTE, 8343 "FIB> %s Container %d 0x%x/%d", subcmdstr, 8344 ddi_get32(acc, &br->ContainerId), 8345 ddi_get32(acc, &br->BlockNumber), 8346 ddi_get32(acc, &br->ByteCount)); 8347 for (i = 0; i < sgcount; i++) 8348 aac_printf(softs, CE_NOTE, 8349 " %d: 0x%08x/%d", i, 8350 ddi_get32(acc, &sg->SgEntry[i].SgAddress), 8351 ddi_get32(acc, &sg->SgEntry[i]. \ 8352 SgByteCount)); 8353 return; 8354 } 8355 } 8356 break; 8357 8358 case ContainerCommand64: { 8359 struct aac_blockread64 *br = 8360 (struct aac_blockread64 *)fibp->data; 8361 struct aac_sg_table64 *sg = &br->SgMap64; 8362 uint32_t sgcount = ddi_get32(acc, &sg->SgCount); 8363 uint64_t sgaddr; 8364 8365 sub_cmd = br->Command; 8366 subcmdstr = NULL; 8367 if (sub_cmd == VM_CtHostRead64) 8368 subcmdstr = "VM_CtHostRead64"; 8369 else if (sub_cmd == VM_CtHostWrite64) 8370 subcmdstr = "VM_CtHostWrite64"; 8371 else 8372 break; 8373 8374 aac_printf(softs, CE_NOTE, 8375 "FIB> %s Container %d 0x%x/%d", subcmdstr, 8376 ddi_get16(acc, &br->ContainerId), 8377 ddi_get32(acc, &br->BlockNumber), 8378 ddi_get16(acc, &br->SectorCount)); 8379 for (i = 0; i < sgcount; i++) { 8380 sgaddr = ddi_get64(acc, 8381 &sg->SgEntry64[i].SgAddress); 8382 aac_printf(softs, CE_NOTE, 8383 " %d: 0x%08x.%08x/%d", i, 8384 AAC_MS32(sgaddr), AAC_LS32(sgaddr), 8385 ddi_get32(acc, &sg->SgEntry64[i]. \ 8386 SgByteCount)); 8387 } 8388 return; 8389 } 8390 8391 case RawIo: { 8392 struct aac_raw_io *io = (struct aac_raw_io *)fibp->data; 8393 struct aac_sg_tableraw *sg = &io->SgMapRaw; 8394 uint32_t sgcount = ddi_get32(acc, &sg->SgCount); 8395 uint64_t sgaddr; 8396 8397 aac_printf(softs, CE_NOTE, 8398 "FIB> RawIo Container %d 0x%llx/%d 0x%x", 8399 ddi_get16(acc, &io->ContainerId), 8400 ddi_get64(acc, &io->BlockNumber), 8401 ddi_get32(acc, &io->ByteCount), 8402 ddi_get16(acc, &io->Flags)); 8403 for (i = 0; i < sgcount; i++) { 8404 sgaddr = ddi_get64(acc, &sg->SgEntryRaw[i].SgAddress); 8405 aac_printf(softs, CE_NOTE, " %d: 0x%08x.%08x/%d", i, 8406 AAC_MS32(sgaddr), AAC_LS32(sgaddr), 8407 ddi_get32(acc, &sg->SgEntryRaw[i].SgByteCount)); 8408 } 8409 return; 8410 } 8411 8412 case ClusterCommand: 8413 sub_cmd = ddi_get32(acc, 8414 (void *)&(((uint32_t *)(void *)fibp->data)[0])); 8415 subcmdstr = aac_cmd_name(sub_cmd, aac_cl_subcmds); 8416 break; 8417 8418 case AifRequest: 8419 sub_cmd = ddi_get32(acc, 8420 (void *)&(((uint32_t *)(void *)fibp->data)[0])); 8421 subcmdstr = aac_cmd_name(sub_cmd, aac_aif_subcmds); 8422 break; 8423 8424 default: 8425 break; 8426 } 8427 8428 fib_size = ddi_get16(acc, &(fibp->Header.Size)); 8429 if (subcmdstr) 8430 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d", 8431 subcmdstr, fib_size); 8432 else if (cmdstr && sub_cmd == (uint32_t)-1) 8433 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d", 8434 cmdstr, fib_size); 8435 else if (cmdstr) 8436 aac_printf(softs, CE_NOTE, "FIB> %s: Unknown(0x%x), sz=%d", 8437 cmdstr, sub_cmd, fib_size); 8438 else 8439 aac_printf(softs, CE_NOTE, "FIB> Unknown(0x%x), sz=%d", 8440 fib_cmd, fib_size); 8441 } 8442 8443 static void 8444 aac_print_aif(struct aac_softstate *softs, struct aac_aif_command *aif) 8445 { 8446 int aif_command; 8447 uint32_t aif_seqnumber; 8448 int aif_en_type; 8449 char *str; 8450 8451 aif_command = LE_32(aif->command); 8452 aif_seqnumber = LE_32(aif->seqNumber); 8453 aif_en_type = LE_32(aif->data.EN.type); 8454 8455 switch (aif_command) { 8456 case AifCmdEventNotify: 8457 str = aac_cmd_name(aif_en_type, aac_aifens); 8458 if (str) 8459 aac_printf(softs, CE_NOTE, "AIF! %s", str); 8460 else 8461 aac_printf(softs, CE_NOTE, "AIF! Unknown(0x%x)", 8462 aif_en_type); 8463 break; 8464 8465 case AifCmdJobProgress: 8466 switch (LE_32(aif->data.PR[0].status)) { 8467 case AifJobStsSuccess: 8468 str = "success"; break; 8469 case AifJobStsFinished: 8470 str = "finished"; break; 8471 case AifJobStsAborted: 8472 str = "aborted"; break; 8473 case AifJobStsFailed: 8474 str = "failed"; break; 8475 case AifJobStsSuspended: 8476 str = "suspended"; break; 8477 case AifJobStsRunning: 8478 str = "running"; break; 8479 default: 8480 str = "unknown"; break; 8481 } 8482 aac_printf(softs, CE_NOTE, 8483 "AIF! JobProgress (%d) - %s (%d, %d)", 8484 aif_seqnumber, str, 8485 LE_32(aif->data.PR[0].currentTick), 8486 LE_32(aif->data.PR[0].finalTick)); 8487 break; 8488 8489 case AifCmdAPIReport: 8490 aac_printf(softs, CE_NOTE, "AIF! APIReport (%d)", 8491 aif_seqnumber); 8492 break; 8493 8494 case AifCmdDriverNotify: 8495 aac_printf(softs, CE_NOTE, "AIF! DriverNotify (%d)", 8496 aif_seqnumber); 8497 break; 8498 8499 default: 8500 aac_printf(softs, CE_NOTE, "AIF! AIF %d (%d)", 8501 aif_command, aif_seqnumber); 8502 break; 8503 } 8504 } 8505 8506 #endif /* DEBUG */ 8507