1 /*- 2 * 3 * Copyright (c) 1997-2006 by Matthew Jacob 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice immediately at the beginning of the file, without modification, 11 * this list of conditions, and the following disclaimer. 12 * 2. The name of the author may not be used to endorse or promote products 13 * derived from this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 /* 29 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 30 * FreeBSD Version. 31 */ 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/module.h> 39 #if __FreeBSD_version >= 700000 40 #include <sys/linker.h> 41 #include <sys/firmware.h> 42 #endif 43 #include <sys/bus.h> 44 #if __FreeBSD_version < 500000 45 #include <pci/pcireg.h> 46 #include <pci/pcivar.h> 47 #include <machine/bus_memio.h> 48 #include <machine/bus_pio.h> 49 #else 50 #include <sys/stdint.h> 51 #include <dev/pci/pcireg.h> 52 #include <dev/pci/pcivar.h> 53 #endif 54 #include <machine/bus.h> 55 #include <machine/resource.h> 56 #include <sys/rman.h> 57 #include <sys/malloc.h> 58 59 #include <dev/isp/isp_freebsd.h> 60 61 #if __FreeBSD_version < 500000 62 #define BUS_PROBE_DEFAULT 0 63 #endif 64 65 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); 66 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); 67 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 68 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); 69 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 70 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 71 static int 72 isp_pci_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 73 static int 74 isp_pci_rd_isr_2300(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 75 static int 76 isp_pci_rd_isr_2400(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 77 static int isp_pci_mbxdma(ispsoftc_t *); 78 static int 79 isp_pci_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint32_t *, uint32_t); 80 static void 81 isp_pci_dmateardown(ispsoftc_t *, XS_T *, uint32_t); 82 83 84 static void isp_pci_reset1(ispsoftc_t *); 85 static void isp_pci_dumpregs(ispsoftc_t *, const char *); 86 87 static struct ispmdvec mdvec = { 88 isp_pci_rd_isr, 89 isp_pci_rd_reg, 90 isp_pci_wr_reg, 91 isp_pci_mbxdma, 92 isp_pci_dmasetup, 93 isp_pci_dmateardown, 94 NULL, 95 isp_pci_reset1, 96 isp_pci_dumpregs, 97 NULL, 98 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 99 }; 100 101 static struct ispmdvec mdvec_1080 = { 102 isp_pci_rd_isr, 103 isp_pci_rd_reg_1080, 104 isp_pci_wr_reg_1080, 105 isp_pci_mbxdma, 106 isp_pci_dmasetup, 107 isp_pci_dmateardown, 108 NULL, 109 isp_pci_reset1, 110 isp_pci_dumpregs, 111 NULL, 112 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 113 }; 114 115 static struct ispmdvec mdvec_12160 = { 116 isp_pci_rd_isr, 117 isp_pci_rd_reg_1080, 118 isp_pci_wr_reg_1080, 119 isp_pci_mbxdma, 120 isp_pci_dmasetup, 121 isp_pci_dmateardown, 122 NULL, 123 isp_pci_reset1, 124 isp_pci_dumpregs, 125 NULL, 126 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 127 }; 128 129 static struct ispmdvec mdvec_2100 = { 130 isp_pci_rd_isr, 131 isp_pci_rd_reg, 132 isp_pci_wr_reg, 133 isp_pci_mbxdma, 134 isp_pci_dmasetup, 135 isp_pci_dmateardown, 136 NULL, 137 isp_pci_reset1, 138 isp_pci_dumpregs 139 }; 140 141 static struct ispmdvec mdvec_2200 = { 142 isp_pci_rd_isr, 143 isp_pci_rd_reg, 144 isp_pci_wr_reg, 145 isp_pci_mbxdma, 146 isp_pci_dmasetup, 147 isp_pci_dmateardown, 148 NULL, 149 isp_pci_reset1, 150 isp_pci_dumpregs 151 }; 152 153 static struct ispmdvec mdvec_2300 = { 154 isp_pci_rd_isr_2300, 155 isp_pci_rd_reg, 156 isp_pci_wr_reg, 157 isp_pci_mbxdma, 158 isp_pci_dmasetup, 159 isp_pci_dmateardown, 160 NULL, 161 isp_pci_reset1, 162 isp_pci_dumpregs 163 }; 164 165 static struct ispmdvec mdvec_2400 = { 166 isp_pci_rd_isr_2400, 167 isp_pci_rd_reg_2400, 168 isp_pci_wr_reg_2400, 169 isp_pci_mbxdma, 170 isp_pci_dmasetup, 171 isp_pci_dmateardown, 172 NULL, 173 isp_pci_reset1, 174 NULL 175 }; 176 177 #ifndef PCIM_CMD_INVEN 178 #define PCIM_CMD_INVEN 0x10 179 #endif 180 #ifndef PCIM_CMD_BUSMASTEREN 181 #define PCIM_CMD_BUSMASTEREN 0x0004 182 #endif 183 #ifndef PCIM_CMD_PERRESPEN 184 #define PCIM_CMD_PERRESPEN 0x0040 185 #endif 186 #ifndef PCIM_CMD_SEREN 187 #define PCIM_CMD_SEREN 0x0100 188 #endif 189 #ifndef PCIM_CMD_INTX_DISABLE 190 #define PCIM_CMD_INTX_DISABLE 0x0400 191 #endif 192 193 #ifndef PCIR_COMMAND 194 #define PCIR_COMMAND 0x04 195 #endif 196 197 #ifndef PCIR_CACHELNSZ 198 #define PCIR_CACHELNSZ 0x0c 199 #endif 200 201 #ifndef PCIR_LATTIMER 202 #define PCIR_LATTIMER 0x0d 203 #endif 204 205 #ifndef PCIR_ROMADDR 206 #define PCIR_ROMADDR 0x30 207 #endif 208 209 #ifndef PCI_VENDOR_QLOGIC 210 #define PCI_VENDOR_QLOGIC 0x1077 211 #endif 212 213 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 214 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 215 #endif 216 217 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 218 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 219 #endif 220 221 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 222 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 223 #endif 224 225 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 226 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 227 #endif 228 229 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 230 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 231 #endif 232 233 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 234 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 235 #endif 236 237 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 238 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 239 #endif 240 241 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 242 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 243 #endif 244 245 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 246 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 247 #endif 248 249 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 250 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 251 #endif 252 253 #ifndef PCI_PRODUCT_QLOGIC_ISP2322 254 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 255 #endif 256 257 #ifndef PCI_PRODUCT_QLOGIC_ISP2422 258 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 259 #endif 260 261 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 262 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 263 #endif 264 265 #ifndef PCI_PRODUCT_QLOGIC_ISP6322 266 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 267 #endif 268 269 270 #define PCI_QLOGIC_ISP1020 \ 271 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 272 273 #define PCI_QLOGIC_ISP1080 \ 274 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 275 276 #define PCI_QLOGIC_ISP10160 \ 277 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 278 279 #define PCI_QLOGIC_ISP12160 \ 280 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 281 282 #define PCI_QLOGIC_ISP1240 \ 283 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 284 285 #define PCI_QLOGIC_ISP1280 \ 286 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 287 288 #define PCI_QLOGIC_ISP2100 \ 289 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 290 291 #define PCI_QLOGIC_ISP2200 \ 292 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 293 294 #define PCI_QLOGIC_ISP2300 \ 295 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 296 297 #define PCI_QLOGIC_ISP2312 \ 298 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 299 300 #define PCI_QLOGIC_ISP2322 \ 301 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 302 303 #define PCI_QLOGIC_ISP2422 \ 304 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 305 306 #define PCI_QLOGIC_ISP6312 \ 307 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 308 309 #define PCI_QLOGIC_ISP6322 \ 310 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 311 312 /* 313 * Odd case for some AMI raid cards... We need to *not* attach to this. 314 */ 315 #define AMI_RAID_SUBVENDOR_ID 0x101e 316 317 #define IO_MAP_REG 0x10 318 #define MEM_MAP_REG 0x14 319 320 #define PCI_DFLT_LTNCY 0x40 321 #define PCI_DFLT_LNSZ 0x10 322 323 static int isp_pci_probe (device_t); 324 static int isp_pci_attach (device_t); 325 static int isp_pci_detach (device_t); 326 327 328 struct isp_pcisoftc { 329 ispsoftc_t pci_isp; 330 device_t pci_dev; 331 struct resource * pci_reg; 332 bus_space_tag_t pci_st; 333 bus_space_handle_t pci_sh; 334 void * ih; 335 int16_t pci_poff[_NREG_BLKS]; 336 bus_dma_tag_t dmat; 337 bus_dmamap_t *dmaps; 338 }; 339 340 341 static device_method_t isp_pci_methods[] = { 342 /* Device interface */ 343 DEVMETHOD(device_probe, isp_pci_probe), 344 DEVMETHOD(device_attach, isp_pci_attach), 345 DEVMETHOD(device_detach, isp_pci_detach), 346 { 0, 0 } 347 }; 348 static void isp_pci_intr(void *); 349 350 static driver_t isp_pci_driver = { 351 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 352 }; 353 static devclass_t isp_devclass; 354 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 355 #if __FreeBSD_version >= 700000 356 MODULE_DEPEND(isp, ispfw, 1, 1, 1); 357 MODULE_DEPEND(isp, firmware, 1, 1, 1); 358 #else 359 extern ispfwfunc *isp_get_firmware_p; 360 #endif 361 362 static int 363 isp_pci_probe(device_t dev) 364 { 365 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 366 case PCI_QLOGIC_ISP1020: 367 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 368 break; 369 case PCI_QLOGIC_ISP1080: 370 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 371 break; 372 case PCI_QLOGIC_ISP1240: 373 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 374 break; 375 case PCI_QLOGIC_ISP1280: 376 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 377 break; 378 case PCI_QLOGIC_ISP10160: 379 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 380 break; 381 case PCI_QLOGIC_ISP12160: 382 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 383 return (ENXIO); 384 } 385 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 386 break; 387 case PCI_QLOGIC_ISP2100: 388 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 389 break; 390 case PCI_QLOGIC_ISP2200: 391 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 392 break; 393 case PCI_QLOGIC_ISP2300: 394 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 395 break; 396 case PCI_QLOGIC_ISP2312: 397 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 398 break; 399 case PCI_QLOGIC_ISP2322: 400 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 401 break; 402 case PCI_QLOGIC_ISP2422: 403 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 404 break; 405 case PCI_QLOGIC_ISP6312: 406 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 407 break; 408 case PCI_QLOGIC_ISP6322: 409 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 410 break; 411 default: 412 return (ENXIO); 413 } 414 if (isp_announced == 0 && bootverbose) { 415 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 416 "Core Version %d.%d\n", 417 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 418 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 419 isp_announced++; 420 } 421 /* 422 * XXXX: Here is where we might load the f/w module 423 * XXXX: (or increase a reference count to it). 424 */ 425 return (BUS_PROBE_DEFAULT); 426 } 427 428 #if __FreeBSD_version < 500000 429 static void 430 isp_get_options(device_t dev, ispsoftc_t *isp) 431 { 432 uint64_t wwn; 433 int bitmap, unit; 434 435 unit = device_get_unit(dev); 436 if (getenv_int("isp_disable", &bitmap)) { 437 if (bitmap & (1 << unit)) { 438 isp->isp_osinfo.disabled = 1; 439 return; 440 } 441 } 442 443 if (getenv_int("isp_no_fwload", &bitmap)) { 444 if (bitmap & (1 << unit)) 445 isp->isp_confopts |= ISP_CFG_NORELOAD; 446 } 447 if (getenv_int("isp_fwload", &bitmap)) { 448 if (bitmap & (1 << unit)) 449 isp->isp_confopts &= ~ISP_CFG_NORELOAD; 450 } 451 if (getenv_int("isp_no_nvram", &bitmap)) { 452 if (bitmap & (1 << unit)) 453 isp->isp_confopts |= ISP_CFG_NONVRAM; 454 } 455 if (getenv_int("isp_nvram", &bitmap)) { 456 if (bitmap & (1 << unit)) 457 isp->isp_confopts &= ~ISP_CFG_NONVRAM; 458 } 459 if (getenv_int("isp_fcduplex", &bitmap)) { 460 if (bitmap & (1 << unit)) 461 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 462 } 463 if (getenv_int("isp_no_fcduplex", &bitmap)) { 464 if (bitmap & (1 << unit)) 465 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX; 466 } 467 if (getenv_int("isp_nport", &bitmap)) { 468 if (bitmap & (1 << unit)) 469 isp->isp_confopts |= ISP_CFG_NPORT; 470 } 471 472 /* 473 * Because the resource_*_value functions can neither return 474 * 64 bit integer values, nor can they be directly coerced 475 * to interpret the right hand side of the assignment as 476 * you want them to interpret it, we have to force WWN 477 * hint replacement to specify WWN strings with a leading 478 * 'w' (e..g w50000000aaaa0001). Sigh. 479 */ 480 if (getenv_quad("isp_portwwn", &wwn)) { 481 isp->isp_osinfo.default_port_wwn = wwn; 482 isp->isp_confopts |= ISP_CFG_OWNWWPN; 483 } 484 if (isp->isp_osinfo.default_port_wwn == 0) { 485 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 486 } 487 488 if (getenv_quad("isp_nodewwn", &wwn)) { 489 isp->isp_osinfo.default_node_wwn = wwn; 490 isp->isp_confopts |= ISP_CFG_OWNWWNN; 491 } 492 if (isp->isp_osinfo.default_node_wwn == 0) { 493 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 494 } 495 496 bitmap = 0; 497 (void) getenv_int("isp_debug", &bitmap); 498 if (bitmap) { 499 isp->isp_dblev = bitmap; 500 } else { 501 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 502 } 503 if (bootverbose) { 504 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 505 } 506 507 bitmap = 0; 508 (void) getenv_int("isp_fabric_hysteresis", &bitmap); 509 if (bitmap >= 0 && bitmap < 256) { 510 isp->isp_osinfo.hysteresis = bitmap; 511 } else { 512 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis; 513 } 514 515 bitmap = 0; 516 (void) getenv_int("isp_loop_down_limit", &bitmap); 517 if (bitmap >= 0 && bitmap < 0xffff) { 518 isp->isp_osinfo.loop_down_limit = bitmap; 519 } else { 520 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit; 521 } 522 523 524 #ifdef ISP_FW_CRASH_DUMP 525 bitmap = 0; 526 if (getenv_int("isp_fw_dump_enable", &bitmap)) { 527 if (bitmap & (1 << unit) { 528 size_t amt = 0; 529 if (IS_2200(isp)) { 530 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 531 } else if (IS_23XX(isp)) { 532 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 533 } 534 if (amt) { 535 FCPARAM(isp)->isp_dump_data = 536 malloc(amt, M_DEVBUF, M_WAITOK); 537 memset(FCPARAM(isp)->isp_dump_data, 0, amt); 538 } else { 539 device_printf(dev, 540 "f/w crash dumps not supported for card\n"); 541 } 542 } 543 } 544 #endif 545 bitmap = 0; 546 if (getenv_int("role", &bitmap)) { 547 isp->isp_role = bitmap; 548 } else { 549 isp->isp_role = ISP_DEFAULT_ROLES; 550 } 551 } 552 553 static void 554 isp_get_pci_options(device_t dev, int *m1, int *m2) 555 { 556 int bitmap; 557 int unit = device_get_unit(dev); 558 559 *m1 = PCIM_CMD_MEMEN; 560 *m2 = PCIM_CMD_PORTEN; 561 if (getenv_int("isp_mem_map", &bitmap)) { 562 if (bitmap & (1 << unit)) { 563 *m1 = PCIM_CMD_MEMEN; 564 *m2 = PCIM_CMD_PORTEN; 565 } 566 } 567 bitmap = 0; 568 if (getenv_int("isp_io_map", &bitmap)) { 569 if (bitmap & (1 << unit)) { 570 *m1 = PCIM_CMD_PORTEN; 571 *m2 = PCIM_CMD_MEMEN; 572 } 573 } 574 } 575 #else 576 static void 577 isp_get_options(device_t dev, ispsoftc_t *isp) 578 { 579 int tval; 580 const char *sptr; 581 /* 582 * Figure out if we're supposed to skip this one. 583 */ 584 585 tval = 0; 586 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 587 "disable", &tval) == 0 && tval) { 588 device_printf(dev, "disabled at user request\n"); 589 isp->isp_osinfo.disabled = 1; 590 return; 591 } 592 593 tval = -1; 594 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 595 "role", &tval) == 0 && tval != -1) { 596 tval &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET); 597 isp->isp_role = tval; 598 device_printf(dev, "setting role to 0x%x\n", isp->isp_role); 599 } else { 600 #ifdef ISP_TARGET_MODE 601 isp->isp_role = ISP_ROLE_TARGET; 602 #else 603 isp->isp_role = ISP_DEFAULT_ROLES; 604 #endif 605 } 606 607 tval = 0; 608 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 609 "fwload_disable", &tval) == 0 && tval != 0) { 610 isp->isp_confopts |= ISP_CFG_NORELOAD; 611 } 612 tval = 0; 613 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 614 "ignore_nvram", &tval) == 0 && tval != 0) { 615 isp->isp_confopts |= ISP_CFG_NONVRAM; 616 } 617 tval = 0; 618 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 619 "fullduplex", &tval) == 0 && tval != 0) { 620 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 621 } 622 #ifdef ISP_FW_CRASH_DUMP 623 tval = 0; 624 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 625 "fw_dump_enable", &tval) == 0 && tval != 0) { 626 size_t amt = 0; 627 if (IS_2200(isp)) { 628 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 629 } else if (IS_23XX(isp)) { 630 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 631 } 632 if (amt) { 633 FCPARAM(isp)->isp_dump_data = 634 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); 635 } else { 636 device_printf(dev, 637 "f/w crash dumps not supported for this model\n"); 638 } 639 } 640 #endif 641 642 sptr = 0; 643 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 644 "topology", (const char **) &sptr) == 0 && sptr != 0) { 645 if (strcmp(sptr, "lport") == 0) { 646 isp->isp_confopts |= ISP_CFG_LPORT; 647 } else if (strcmp(sptr, "nport") == 0) { 648 isp->isp_confopts |= ISP_CFG_NPORT; 649 } else if (strcmp(sptr, "lport-only") == 0) { 650 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 651 } else if (strcmp(sptr, "nport-only") == 0) { 652 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 653 } 654 } 655 656 /* 657 * Because the resource_*_value functions can neither return 658 * 64 bit integer values, nor can they be directly coerced 659 * to interpret the right hand side of the assignment as 660 * you want them to interpret it, we have to force WWN 661 * hint replacement to specify WWN strings with a leading 662 * 'w' (e..g w50000000aaaa0001). Sigh. 663 */ 664 sptr = 0; 665 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 666 "portwwn", (const char **) &sptr); 667 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 668 char *eptr = 0; 669 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 670 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 671 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 672 isp->isp_osinfo.default_port_wwn = 0; 673 } else { 674 isp->isp_confopts |= ISP_CFG_OWNWWPN; 675 } 676 } 677 if (isp->isp_osinfo.default_port_wwn == 0) { 678 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 679 } 680 681 sptr = 0; 682 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 683 "nodewwn", (const char **) &sptr); 684 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 685 char *eptr = 0; 686 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 687 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 688 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 689 isp->isp_osinfo.default_node_wwn = 0; 690 } else { 691 isp->isp_confopts |= ISP_CFG_OWNWWNN; 692 } 693 } 694 if (isp->isp_osinfo.default_node_wwn == 0) { 695 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 696 } 697 698 isp->isp_osinfo.default_id = -1; 699 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 700 "iid", &tval) == 0) { 701 isp->isp_osinfo.default_id = tval; 702 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 703 } 704 if (isp->isp_osinfo.default_id == -1) { 705 if (IS_FC(isp)) { 706 isp->isp_osinfo.default_id = 109; 707 } else { 708 isp->isp_osinfo.default_id = 7; 709 } 710 } 711 712 /* 713 * Set up logging levels. 714 */ 715 tval = 0; 716 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 717 "debug", &tval); 718 if (tval) { 719 isp->isp_dblev = tval; 720 } else { 721 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 722 } 723 if (bootverbose) { 724 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 725 } 726 727 tval = 0; 728 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 729 "hysteresis", &tval); 730 if (tval >= 0 && tval < 256) { 731 isp->isp_osinfo.hysteresis = tval; 732 } else { 733 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis; 734 } 735 736 tval = 0; 737 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 738 "loop_down_limit", &tval); 739 if (tval >= 0 && tval < 0xffff) { 740 isp->isp_osinfo.loop_down_limit = tval; 741 } else { 742 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit; 743 } 744 745 } 746 747 static void 748 isp_get_pci_options(device_t dev, int *m1, int *m2) 749 { 750 int tval; 751 /* 752 * Which we should try first - memory mapping or i/o mapping? 753 * 754 * We used to try memory first followed by i/o on alpha, otherwise 755 * the reverse, but we should just try memory first all the time now. 756 */ 757 *m1 = PCIM_CMD_MEMEN; 758 *m2 = PCIM_CMD_PORTEN; 759 760 tval = 0; 761 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 762 "prefer_iomap", &tval) == 0 && tval != 0) { 763 *m1 = PCIM_CMD_PORTEN; 764 *m2 = PCIM_CMD_MEMEN; 765 } 766 tval = 0; 767 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 768 "prefer_memmap", &tval) == 0 && tval != 0) { 769 *m1 = PCIM_CMD_MEMEN; 770 *m2 = PCIM_CMD_PORTEN; 771 } 772 } 773 #endif 774 775 static int 776 isp_pci_attach(device_t dev) 777 { 778 struct resource *regs, *irq; 779 int rtp, rgd, iqd, m1, m2; 780 uint32_t data, cmd, linesz, psize, basetype; 781 struct isp_pcisoftc *pcs; 782 ispsoftc_t *isp = NULL; 783 struct ispmdvec *mdvp; 784 #if __FreeBSD_version >= 500000 785 int locksetup = 0; 786 #endif 787 788 pcs = device_get_softc(dev); 789 if (pcs == NULL) { 790 device_printf(dev, "cannot get softc\n"); 791 return (ENOMEM); 792 } 793 memset(pcs, 0, sizeof (*pcs)); 794 pcs->pci_dev = dev; 795 isp = &pcs->pci_isp; 796 797 /* 798 * Get Generic Options 799 */ 800 isp_get_options(dev, isp); 801 802 /* 803 * Check to see if options have us disabled 804 */ 805 if (isp->isp_osinfo.disabled) { 806 /* 807 * But return zero to preserve unit numbering 808 */ 809 return (0); 810 } 811 812 /* 813 * Get PCI options- which in this case are just mapping preferences. 814 */ 815 isp_get_pci_options(dev, &m1, &m2); 816 817 818 linesz = PCI_DFLT_LNSZ; 819 irq = regs = NULL; 820 rgd = rtp = iqd = 0; 821 822 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 823 if (cmd & m1) { 824 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 825 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 826 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 827 } 828 if (regs == NULL && (cmd & m2)) { 829 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 830 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 831 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 832 } 833 if (regs == NULL) { 834 device_printf(dev, "unable to map any ports\n"); 835 goto bad; 836 } 837 if (bootverbose) { 838 device_printf(dev, "using %s space register mapping\n", 839 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 840 } 841 pcs->pci_dev = dev; 842 pcs->pci_reg = regs; 843 pcs->pci_st = rman_get_bustag(regs); 844 pcs->pci_sh = rman_get_bushandle(regs); 845 846 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 847 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 848 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 849 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 850 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 851 mdvp = &mdvec; 852 basetype = ISP_HA_SCSI_UNKNOWN; 853 psize = sizeof (sdparam); 854 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 855 mdvp = &mdvec; 856 basetype = ISP_HA_SCSI_UNKNOWN; 857 psize = sizeof (sdparam); 858 } 859 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 860 mdvp = &mdvec_1080; 861 basetype = ISP_HA_SCSI_1080; 862 psize = sizeof (sdparam); 863 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 864 ISP1080_DMA_REGS_OFF; 865 } 866 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 867 mdvp = &mdvec_1080; 868 basetype = ISP_HA_SCSI_1240; 869 psize = 2 * sizeof (sdparam); 870 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 871 ISP1080_DMA_REGS_OFF; 872 } 873 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 874 mdvp = &mdvec_1080; 875 basetype = ISP_HA_SCSI_1280; 876 psize = 2 * sizeof (sdparam); 877 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 878 ISP1080_DMA_REGS_OFF; 879 } 880 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 881 mdvp = &mdvec_12160; 882 basetype = ISP_HA_SCSI_10160; 883 psize = sizeof (sdparam); 884 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 885 ISP1080_DMA_REGS_OFF; 886 } 887 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 888 mdvp = &mdvec_12160; 889 basetype = ISP_HA_SCSI_12160; 890 psize = 2 * sizeof (sdparam); 891 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 892 ISP1080_DMA_REGS_OFF; 893 } 894 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 895 mdvp = &mdvec_2100; 896 basetype = ISP_HA_FC_2100; 897 psize = sizeof (fcparam); 898 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 899 PCI_MBOX_REGS2100_OFF; 900 if (pci_get_revid(dev) < 3) { 901 /* 902 * XXX: Need to get the actual revision 903 * XXX: number of the 2100 FB. At any rate, 904 * XXX: lower cache line size for early revision 905 * XXX; boards. 906 */ 907 linesz = 1; 908 } 909 } 910 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 911 mdvp = &mdvec_2200; 912 basetype = ISP_HA_FC_2200; 913 psize = sizeof (fcparam); 914 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 915 PCI_MBOX_REGS2100_OFF; 916 } 917 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 918 mdvp = &mdvec_2300; 919 basetype = ISP_HA_FC_2300; 920 psize = sizeof (fcparam); 921 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 922 PCI_MBOX_REGS2300_OFF; 923 } 924 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 || 925 pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 926 mdvp = &mdvec_2300; 927 basetype = ISP_HA_FC_2312; 928 psize = sizeof (fcparam); 929 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 930 PCI_MBOX_REGS2300_OFF; 931 } 932 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322 || 933 pci_get_devid(dev) == PCI_QLOGIC_ISP6322) { 934 mdvp = &mdvec_2300; 935 basetype = ISP_HA_FC_2322; 936 psize = sizeof (fcparam); 937 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 938 PCI_MBOX_REGS2300_OFF; 939 } 940 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422) { 941 mdvp = &mdvec_2400; 942 basetype = ISP_HA_FC_2400; 943 psize = sizeof (fcparam); 944 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 945 PCI_MBOX_REGS2400_OFF; 946 } 947 isp = &pcs->pci_isp; 948 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 949 if (isp->isp_param == NULL) { 950 device_printf(dev, "cannot allocate parameter data\n"); 951 goto bad; 952 } 953 isp->isp_mdvec = mdvp; 954 isp->isp_type = basetype; 955 isp->isp_revision = pci_get_revid(dev); 956 isp->isp_dev = dev; 957 958 #if __FreeBSD_version >= 700000 959 /* 960 * Try and find firmware for this device. 961 */ 962 { 963 char fwname[32]; 964 unsigned int did = pci_get_device(dev); 965 966 /* 967 * Map a few pci ids to fw names 968 */ 969 switch (did) { 970 case PCI_PRODUCT_QLOGIC_ISP1020: 971 did = 0x1040; 972 break; 973 case PCI_PRODUCT_QLOGIC_ISP1240: 974 did = 0x1080; 975 break; 976 case PCI_PRODUCT_QLOGIC_ISP10160: 977 case PCI_PRODUCT_QLOGIC_ISP12160: 978 did = 0x12160; 979 break; 980 case PCI_PRODUCT_QLOGIC_ISP6312: 981 case PCI_PRODUCT_QLOGIC_ISP2312: 982 did = 0x2300; 983 break; 984 case PCI_PRODUCT_QLOGIC_ISP6322: 985 did = 0x2322; 986 break; 987 case PCI_PRODUCT_QLOGIC_ISP2422: 988 did = 0x2400; 989 break; 990 default: 991 break; 992 } 993 994 isp->isp_osinfo.fw = NULL; 995 if (isp->isp_role & ISP_ROLE_TARGET) { 996 snprintf(fwname, sizeof (fwname), "isp_%04x_it", did); 997 isp->isp_osinfo.fw = firmware_get(fwname); 998 } 999 if (isp->isp_osinfo.fw == NULL) { 1000 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 1001 isp->isp_osinfo.fw = firmware_get(fwname); 1002 } 1003 if (isp->isp_osinfo.fw != NULL) { 1004 union { 1005 const void *fred; 1006 uint16_t *bob; 1007 } u; 1008 u.fred = isp->isp_osinfo.fw->data; 1009 isp->isp_mdvec->dv_ispfw = u.bob; 1010 } 1011 } 1012 #else 1013 if (isp_get_firmware_p) { 1014 int device = (int) pci_get_device(dev); 1015 #ifdef ISP_TARGET_MODE 1016 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 1017 #else 1018 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 1019 #endif 1020 } 1021 #endif 1022 1023 /* 1024 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 1025 * are set. 1026 */ 1027 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 1028 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 1029 1030 if (IS_2300(isp)) { /* per QLogic errata */ 1031 cmd &= ~PCIM_CMD_INVEN; 1032 } 1033 1034 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 1035 cmd &= ~PCIM_CMD_INTX_DISABLE; 1036 } 1037 1038 if (IS_24XX(isp)) { 1039 int reg; 1040 1041 cmd &= ~PCIM_CMD_INTX_DISABLE; 1042 1043 /* 1044 * Is this a PCI-X card? If so, set max read byte count. 1045 */ 1046 if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) { 1047 uint16_t pxcmd; 1048 reg += 2; 1049 1050 pxcmd = pci_read_config(dev, reg, 2); 1051 pxcmd &= ~0xc; 1052 pxcmd |= 0x8; 1053 pci_write_config(dev, reg, 2, pxcmd); 1054 } 1055 1056 /* 1057 * Is this a PCI Express card? If so, set max read byte count. 1058 */ 1059 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 1060 uint16_t pectl; 1061 1062 reg += 0x8; 1063 pectl = pci_read_config(dev, reg, 2); 1064 pectl &= ~0x7000; 1065 pectl |= 0x4000; 1066 pci_write_config(dev, reg, 2, pectl); 1067 } 1068 } 1069 1070 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 1071 1072 /* 1073 * Make sure the Cache Line Size register is set sensibly. 1074 */ 1075 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 1076 if (data != linesz) { 1077 data = PCI_DFLT_LNSZ; 1078 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 1079 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 1080 } 1081 1082 /* 1083 * Make sure the Latency Timer is sane. 1084 */ 1085 data = pci_read_config(dev, PCIR_LATTIMER, 1); 1086 if (data < PCI_DFLT_LTNCY) { 1087 data = PCI_DFLT_LTNCY; 1088 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 1089 pci_write_config(dev, PCIR_LATTIMER, data, 1); 1090 } 1091 1092 /* 1093 * Make sure we've disabled the ROM. 1094 */ 1095 data = pci_read_config(dev, PCIR_ROMADDR, 4); 1096 data &= ~1; 1097 pci_write_config(dev, PCIR_ROMADDR, data, 4); 1098 1099 iqd = 0; 1100 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, 1101 RF_ACTIVE | RF_SHAREABLE); 1102 if (irq == NULL) { 1103 device_printf(dev, "could not allocate interrupt\n"); 1104 goto bad; 1105 } 1106 1107 #if __FreeBSD_version >= 500000 1108 /* Make sure the lock is set up. */ 1109 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 1110 locksetup++; 1111 #endif 1112 1113 if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) { 1114 device_printf(dev, "could not setup interrupt\n"); 1115 goto bad; 1116 } 1117 1118 /* 1119 * Last minute checks... 1120 */ 1121 if (IS_23XX(isp) || IS_24XX(isp)) { 1122 isp->isp_port = pci_get_function(dev); 1123 } 1124 1125 if (IS_23XX(isp)) { 1126 /* 1127 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 1128 */ 1129 isp->isp_touched = 1; 1130 } 1131 1132 /* 1133 * Make sure we're in reset state. 1134 */ 1135 ISP_LOCK(isp); 1136 isp_reset(isp); 1137 if (isp->isp_state != ISP_RESETSTATE) { 1138 ISP_UNLOCK(isp); 1139 goto bad; 1140 } 1141 isp_init(isp); 1142 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 1143 isp_uninit(isp); 1144 ISP_UNLOCK(isp); 1145 goto bad; 1146 } 1147 isp_attach(isp); 1148 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 1149 isp_uninit(isp); 1150 ISP_UNLOCK(isp); 1151 goto bad; 1152 } 1153 /* 1154 * XXXX: Here is where we might unload the f/w module 1155 * XXXX: (or decrease the reference count to it). 1156 */ 1157 ISP_UNLOCK(isp); 1158 1159 return (0); 1160 1161 bad: 1162 1163 if (pcs && pcs->ih) { 1164 (void) bus_teardown_intr(dev, irq, pcs->ih); 1165 } 1166 1167 #if __FreeBSD_version >= 500000 1168 if (locksetup && isp) { 1169 mtx_destroy(&isp->isp_osinfo.lock); 1170 } 1171 #endif 1172 1173 if (irq) { 1174 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 1175 } 1176 1177 1178 if (regs) { 1179 (void) bus_release_resource(dev, rtp, rgd, regs); 1180 } 1181 1182 if (pcs) { 1183 if (pcs->pci_isp.isp_param) { 1184 #ifdef ISP_FW_CRASH_DUMP 1185 if (IS_FC(isp) && FCPARAM(isp)->isp_dump_data) { 1186 free(FCPARAM(isp)->isp_dump_data, M_DEVBUF); 1187 } 1188 #endif 1189 free(pcs->pci_isp.isp_param, M_DEVBUF); 1190 } 1191 } 1192 1193 /* 1194 * XXXX: Here is where we might unload the f/w module 1195 * XXXX: (or decrease the reference count to it). 1196 */ 1197 return (ENXIO); 1198 } 1199 1200 static int 1201 isp_pci_detach(device_t dev) 1202 { 1203 struct isp_pcisoftc *pcs; 1204 ispsoftc_t *isp; 1205 1206 pcs = device_get_softc(dev); 1207 if (pcs == NULL) { 1208 return (ENXIO); 1209 } 1210 isp = (ispsoftc_t *) pcs; 1211 ISP_DISABLE_INTS(isp); 1212 return (0); 1213 } 1214 1215 static void 1216 isp_pci_intr(void *arg) 1217 { 1218 ispsoftc_t *isp = arg; 1219 uint32_t isr; 1220 uint16_t sema, mbox; 1221 1222 ISP_LOCK(isp); 1223 isp->isp_intcnt++; 1224 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 1225 isp->isp_intbogus++; 1226 } else { 1227 isp_intr(isp, isr, sema, mbox); 1228 } 1229 ISP_UNLOCK(isp); 1230 } 1231 1232 1233 #define IspVirt2Off(a, x) \ 1234 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1235 _BLK_REG_SHFT] + ((x) & 0xfff)) 1236 1237 #define BXR2(pcs, off) \ 1238 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 1239 #define BXW2(pcs, off, v) \ 1240 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 1241 #define BXR4(pcs, off) \ 1242 bus_space_read_4(pcs->pci_st, pcs->pci_sh, off) 1243 #define BXW4(pcs, off, v) \ 1244 bus_space_write_4(pcs->pci_st, pcs->pci_sh, off, v) 1245 1246 1247 static __inline int 1248 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp) 1249 { 1250 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1251 uint32_t val0, val1; 1252 int i = 0; 1253 1254 do { 1255 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 1256 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 1257 } while (val0 != val1 && ++i < 1000); 1258 if (val0 != val1) { 1259 return (1); 1260 } 1261 *rp = val0; 1262 return (0); 1263 } 1264 1265 static int 1266 isp_pci_rd_isr(ispsoftc_t *isp, uint32_t *isrp, 1267 uint16_t *semap, uint16_t *mbp) 1268 { 1269 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1270 uint16_t isr, sema; 1271 1272 if (IS_2100(isp)) { 1273 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 1274 return (0); 1275 } 1276 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 1277 return (0); 1278 } 1279 } else { 1280 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 1281 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 1282 } 1283 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1284 isr &= INT_PENDING_MASK(isp); 1285 sema &= BIU_SEMA_LOCK; 1286 if (isr == 0 && sema == 0) { 1287 return (0); 1288 } 1289 *isrp = isr; 1290 if ((*semap = sema) != 0) { 1291 if (IS_2100(isp)) { 1292 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 1293 return (0); 1294 } 1295 } else { 1296 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 1297 } 1298 } 1299 return (1); 1300 } 1301 1302 static int 1303 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint32_t *isrp, 1304 uint16_t *semap, uint16_t *mbox0p) 1305 { 1306 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1307 uint32_t hccr; 1308 uint32_t r2hisr; 1309 1310 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 1311 *isrp = 0; 1312 return (0); 1313 } 1314 r2hisr = BXR4(pcs, IspVirt2Off(pcs, BIU_R2HSTSLO)); 1315 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1316 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1317 *isrp = 0; 1318 return (0); 1319 } 1320 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 1321 case ISPR2HST_ROM_MBX_OK: 1322 case ISPR2HST_ROM_MBX_FAIL: 1323 case ISPR2HST_MBX_OK: 1324 case ISPR2HST_MBX_FAIL: 1325 case ISPR2HST_ASYNC_EVENT: 1326 *isrp = r2hisr & 0xffff; 1327 *mbox0p = (r2hisr >> 16); 1328 *semap = 1; 1329 return (1); 1330 case ISPR2HST_RIO_16: 1331 *isrp = r2hisr & 0xffff; 1332 *mbox0p = ASYNC_RIO1; 1333 *semap = 1; 1334 return (1); 1335 case ISPR2HST_FPOST: 1336 *isrp = r2hisr & 0xffff; 1337 *mbox0p = ASYNC_CMD_CMPLT; 1338 *semap = 1; 1339 return (1); 1340 case ISPR2HST_FPOST_CTIO: 1341 *isrp = r2hisr & 0xffff; 1342 *mbox0p = ASYNC_CTIO_DONE; 1343 *semap = 1; 1344 return (1); 1345 case ISPR2HST_RSPQ_UPDATE: 1346 *isrp = r2hisr & 0xffff; 1347 *mbox0p = 0; 1348 *semap = 0; 1349 return (1); 1350 default: 1351 hccr = ISP_READ(isp, HCCR); 1352 if (hccr & HCCR_PAUSE) { 1353 ISP_WRITE(isp, HCCR, HCCR_RESET); 1354 isp_prt(isp, ISP_LOGERR, 1355 "RISC paused at interrupt (%x->%x\n", hccr, 1356 ISP_READ(isp, HCCR)); 1357 } else { 1358 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", 1359 r2hisr); 1360 } 1361 return (0); 1362 } 1363 } 1364 1365 static int 1366 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint32_t *isrp, 1367 uint16_t *semap, uint16_t *mbox0p) 1368 { 1369 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1370 uint32_t r2hisr; 1371 1372 r2hisr = BXR4(pcs, IspVirt2Off(pcs, BIU2400_R2HSTSLO)); 1373 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1374 if ((r2hisr & BIU2400_R2HST_INTR) == 0) { 1375 *isrp = 0; 1376 return (0); 1377 } 1378 switch (r2hisr & BIU2400_R2HST_ISTAT_MASK) { 1379 case ISP2400R2HST_ROM_MBX_OK: 1380 case ISP2400R2HST_ROM_MBX_FAIL: 1381 case ISP2400R2HST_MBX_OK: 1382 case ISP2400R2HST_MBX_FAIL: 1383 case ISP2400R2HST_ASYNC_EVENT: 1384 *isrp = r2hisr & 0xffff; 1385 *mbox0p = (r2hisr >> 16); 1386 *semap = 1; 1387 return (1); 1388 case ISP2400R2HST_RSPQ_UPDATE: 1389 case ISP2400R2HST_ATIO_RSPQ_UPDATE: 1390 case ISP2400R2HST_ATIO_RQST_UPDATE: 1391 *isrp = r2hisr & 0xffff; 1392 *mbox0p = 0; 1393 *semap = 0; 1394 return (1); 1395 default: 1396 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 1397 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1398 return (0); 1399 } 1400 } 1401 1402 static uint32_t 1403 isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1404 { 1405 uint32_t rv; 1406 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1407 int oldconf = 0; 1408 1409 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1410 /* 1411 * We will assume that someone has paused the RISC processor. 1412 */ 1413 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1414 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1415 oldconf | BIU_PCI_CONF1_SXP); 1416 } 1417 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1418 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1419 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 1420 } 1421 return (rv); 1422 } 1423 1424 static void 1425 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) 1426 { 1427 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1428 int oldconf = 0; 1429 volatile int junk; 1430 1431 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1432 /* 1433 * We will assume that someone has paused the RISC processor. 1434 */ 1435 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1436 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1437 oldconf | BIU_PCI_CONF1_SXP); 1438 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1439 } 1440 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1441 junk = BXR2(pcs, IspVirt2Off(isp, regoff)); 1442 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1443 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 1444 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1445 } 1446 } 1447 1448 static uint32_t 1449 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1450 { 1451 uint32_t rv, oc = 0; 1452 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1453 1454 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1455 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1456 uint32_t tc; 1457 /* 1458 * We will assume that someone has paused the RISC processor. 1459 */ 1460 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1461 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1462 if (regoff & SXP_BANK1_SELECT) 1463 tc |= BIU_PCI1080_CONF1_SXP1; 1464 else 1465 tc |= BIU_PCI1080_CONF1_SXP0; 1466 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1467 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1468 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1469 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1470 oc | BIU_PCI1080_CONF1_DMA); 1471 } 1472 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1473 if (oc) { 1474 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1475 } 1476 return (rv); 1477 } 1478 1479 static void 1480 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) 1481 { 1482 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1483 int oc = 0; 1484 volatile int junk; 1485 1486 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1487 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1488 uint32_t tc; 1489 /* 1490 * We will assume that someone has paused the RISC processor. 1491 */ 1492 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1493 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1494 if (regoff & SXP_BANK1_SELECT) 1495 tc |= BIU_PCI1080_CONF1_SXP1; 1496 else 1497 tc |= BIU_PCI1080_CONF1_SXP0; 1498 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1499 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1500 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1501 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1502 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1503 oc | BIU_PCI1080_CONF1_DMA); 1504 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1505 } 1506 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1507 junk = BXR2(pcs, IspVirt2Off(isp, regoff)); 1508 if (oc) { 1509 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1510 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1511 } 1512 } 1513 1514 static uint32_t 1515 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 1516 { 1517 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1518 uint32_t rv; 1519 int block = regoff & _BLK_REG_MASK; 1520 1521 switch (block) { 1522 case BIU_BLOCK: 1523 break; 1524 case MBOX_BLOCK: 1525 return (BXR2(pcs, IspVirt2Off(pcs, regoff))); 1526 case SXP_BLOCK: 1527 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff); 1528 return (0xffffffff); 1529 case RISC_BLOCK: 1530 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff); 1531 return (0xffffffff); 1532 case DMA_BLOCK: 1533 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff); 1534 return (0xffffffff); 1535 default: 1536 isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff); 1537 return (0xffffffff); 1538 } 1539 1540 1541 switch (regoff) { 1542 case BIU2400_FLASH_ADDR: 1543 case BIU2400_FLASH_DATA: 1544 case BIU2400_ICR: 1545 case BIU2400_ISR: 1546 case BIU2400_CSR: 1547 case BIU2400_REQINP: 1548 case BIU2400_REQOUTP: 1549 case BIU2400_RSPINP: 1550 case BIU2400_RSPOUTP: 1551 case BIU2400_PRI_RQINP: 1552 case BIU2400_PRI_RSPINP: 1553 case BIU2400_ATIO_RSPINP: 1554 case BIU2400_ATIO_REQINP: 1555 case BIU2400_HCCR: 1556 case BIU2400_GPIOD: 1557 case BIU2400_GPIOE: 1558 case BIU2400_HSEMA: 1559 rv = BXR4(pcs, IspVirt2Off(pcs, regoff)); 1560 break; 1561 case BIU2400_R2HSTSLO: 1562 rv = BXR4(pcs, IspVirt2Off(pcs, regoff)); 1563 break; 1564 case BIU2400_R2HSTSHI: 1565 rv = BXR4(pcs, IspVirt2Off(pcs, regoff)) >> 16; 1566 break; 1567 default: 1568 isp_prt(isp, ISP_LOGERR, 1569 "isp_pci_rd_reg_2400: unknown offset %x", regoff); 1570 rv = 0xffffffff; 1571 break; 1572 } 1573 return (rv); 1574 } 1575 1576 static void 1577 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 1578 { 1579 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1580 int block = regoff & _BLK_REG_MASK; 1581 volatile int junk; 1582 1583 switch (block) { 1584 case BIU_BLOCK: 1585 break; 1586 case MBOX_BLOCK: 1587 BXW2(pcs, IspVirt2Off(pcs, regoff), val); 1588 junk = BXR2(pcs, IspVirt2Off(pcs, regoff)); 1589 return; 1590 case SXP_BLOCK: 1591 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff); 1592 return; 1593 case RISC_BLOCK: 1594 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff); 1595 return; 1596 case DMA_BLOCK: 1597 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff); 1598 return; 1599 default: 1600 isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x", 1601 regoff); 1602 break; 1603 } 1604 1605 switch (regoff) { 1606 case BIU2400_FLASH_ADDR: 1607 case BIU2400_FLASH_DATA: 1608 case BIU2400_ICR: 1609 case BIU2400_ISR: 1610 case BIU2400_CSR: 1611 case BIU2400_REQINP: 1612 case BIU2400_REQOUTP: 1613 case BIU2400_RSPINP: 1614 case BIU2400_RSPOUTP: 1615 case BIU2400_PRI_RQINP: 1616 case BIU2400_PRI_RSPINP: 1617 case BIU2400_ATIO_RSPINP: 1618 case BIU2400_ATIO_REQINP: 1619 case BIU2400_HCCR: 1620 case BIU2400_GPIOD: 1621 case BIU2400_GPIOE: 1622 case BIU2400_HSEMA: 1623 BXW4(pcs, IspVirt2Off(pcs, regoff), val); 1624 junk = BXR4(pcs, IspVirt2Off(pcs, regoff)); 1625 break; 1626 default: 1627 isp_prt(isp, ISP_LOGERR, 1628 "isp_pci_wr_reg_2400: bad offset 0x%x", regoff); 1629 break; 1630 } 1631 } 1632 1633 1634 struct imush { 1635 ispsoftc_t *isp; 1636 int error; 1637 }; 1638 1639 static void imc(void *, bus_dma_segment_t *, int, int); 1640 1641 static void 1642 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1643 { 1644 struct imush *imushp = (struct imush *) arg; 1645 if (error) { 1646 imushp->error = error; 1647 } else { 1648 ispsoftc_t *isp =imushp->isp; 1649 bus_addr_t addr = segs->ds_addr; 1650 1651 isp->isp_rquest_dma = addr; 1652 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1653 isp->isp_result_dma = addr; 1654 if (IS_FC(isp)) { 1655 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1656 FCPARAM(isp)->isp_scdma = addr; 1657 } 1658 } 1659 } 1660 1661 /* 1662 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 1663 */ 1664 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 1665 1666 #if __FreeBSD_version < 500000 1667 #define BUS_DMA_ROOTARG NULL 1668 #define isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) \ 1669 bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) 1670 #elif __FreeBSD_version < 700020 1671 #define BUS_DMA_ROOTARG NULL 1672 #define isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) \ 1673 bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, \ 1674 busdma_lock_mutex, &Giant, z) 1675 #else 1676 #define BUS_DMA_ROOTARG bus_get_dma_tag(pcs->pci_dev) 1677 #define isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) \ 1678 bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, \ 1679 busdma_lock_mutex, &Giant, z) 1680 #endif 1681 1682 static int 1683 isp_pci_mbxdma(ispsoftc_t *isp) 1684 { 1685 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1686 caddr_t base; 1687 uint32_t len; 1688 int i, error, ns; 1689 bus_size_t slim; /* segment size */ 1690 bus_addr_t llim; /* low limit of unavailable dma */ 1691 bus_addr_t hlim; /* high limit of unavailable dma */ 1692 struct imush im; 1693 1694 /* 1695 * Already been here? If so, leave... 1696 */ 1697 if (isp->isp_rquest) { 1698 return (0); 1699 } 1700 1701 if (isp->isp_maxcmds == 0) { 1702 isp_prt(isp, ISP_LOGERR, "maxcmds not set"); 1703 return (1); 1704 } 1705 1706 hlim = BUS_SPACE_MAXADDR; 1707 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1708 slim = (bus_size_t) (1ULL << 32); 1709 llim = BUS_SPACE_MAXADDR; 1710 } else { 1711 llim = BUS_SPACE_MAXADDR_32BIT; 1712 slim = (1 << 24); 1713 } 1714 1715 /* 1716 * XXX: We don't really support 64 bit target mode for parallel scsi yet 1717 */ 1718 #ifdef ISP_TARGET_MODE 1719 if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) { 1720 isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet"); 1721 return (1); 1722 } 1723 #endif 1724 1725 ISP_UNLOCK(isp); 1726 if (isp_dma_tag_create(BUS_DMA_ROOTARG, 1, slim, llim, 1727 hlim, NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, 1728 &pcs->dmat)) { 1729 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1730 ISP_LOCK(isp); 1731 return (1); 1732 } 1733 1734 1735 len = sizeof (XS_T **) * isp->isp_maxcmds; 1736 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1737 if (isp->isp_xflist == NULL) { 1738 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1739 ISP_LOCK(isp); 1740 return (1); 1741 } 1742 #ifdef ISP_TARGET_MODE 1743 len = sizeof (void **) * isp->isp_maxcmds; 1744 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1745 if (isp->isp_tgtlist == NULL) { 1746 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1747 ISP_LOCK(isp); 1748 return (1); 1749 } 1750 #endif 1751 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1752 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1753 if (pcs->dmaps == NULL) { 1754 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1755 free(isp->isp_xflist, M_DEVBUF); 1756 #ifdef ISP_TARGET_MODE 1757 free(isp->isp_tgtlist, M_DEVBUF); 1758 #endif 1759 ISP_LOCK(isp); 1760 return (1); 1761 } 1762 1763 /* 1764 * Allocate and map the request, result queues, plus FC scratch area. 1765 */ 1766 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1767 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1768 if (IS_FC(isp)) { 1769 len += ISP2100_SCRLEN; 1770 } 1771 1772 ns = (len / PAGE_SIZE) + 1; 1773 /* 1774 * Create a tag for the control spaces- force it to within 32 bits. 1775 */ 1776 if (isp_dma_tag_create(pcs->dmat, QENTRY_LEN, slim, 1777 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1778 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) { 1779 isp_prt(isp, ISP_LOGERR, 1780 "cannot create a dma tag for control spaces"); 1781 free(pcs->dmaps, M_DEVBUF); 1782 free(isp->isp_xflist, M_DEVBUF); 1783 #ifdef ISP_TARGET_MODE 1784 free(isp->isp_tgtlist, M_DEVBUF); 1785 #endif 1786 ISP_LOCK(isp); 1787 return (1); 1788 } 1789 1790 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1791 &isp->isp_cdmap) != 0) { 1792 isp_prt(isp, ISP_LOGERR, 1793 "cannot allocate %d bytes of CCB memory", len); 1794 bus_dma_tag_destroy(isp->isp_cdmat); 1795 free(isp->isp_xflist, M_DEVBUF); 1796 #ifdef ISP_TARGET_MODE 1797 free(isp->isp_tgtlist, M_DEVBUF); 1798 #endif 1799 free(pcs->dmaps, M_DEVBUF); 1800 ISP_LOCK(isp); 1801 return (1); 1802 } 1803 1804 for (i = 0; i < isp->isp_maxcmds; i++) { 1805 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1806 if (error) { 1807 isp_prt(isp, ISP_LOGERR, 1808 "error %d creating per-cmd DMA maps", error); 1809 while (--i >= 0) { 1810 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1811 } 1812 goto bad; 1813 } 1814 } 1815 1816 im.isp = isp; 1817 im.error = 0; 1818 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1819 if (im.error) { 1820 isp_prt(isp, ISP_LOGERR, 1821 "error %d loading dma map for control areas", im.error); 1822 goto bad; 1823 } 1824 1825 isp->isp_rquest = base; 1826 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1827 isp->isp_result = base; 1828 if (IS_FC(isp)) { 1829 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1830 FCPARAM(isp)->isp_scratch = base; 1831 } 1832 ISP_LOCK(isp); 1833 return (0); 1834 1835 bad: 1836 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1837 bus_dma_tag_destroy(isp->isp_cdmat); 1838 free(isp->isp_xflist, M_DEVBUF); 1839 #ifdef ISP_TARGET_MODE 1840 free(isp->isp_tgtlist, M_DEVBUF); 1841 #endif 1842 free(pcs->dmaps, M_DEVBUF); 1843 ISP_LOCK(isp); 1844 isp->isp_rquest = NULL; 1845 return (1); 1846 } 1847 1848 typedef struct { 1849 ispsoftc_t *isp; 1850 void *cmd_token; 1851 void *rq; 1852 uint32_t *nxtip; 1853 uint32_t optr; 1854 int error; 1855 } mush_t; 1856 1857 #define MUSHERR_NOQENTRIES -2 1858 1859 #ifdef ISP_TARGET_MODE 1860 /* 1861 * We need to handle DMA for target mode differently from initiator mode. 1862 * 1863 * DMA mapping and construction and submission of CTIO Request Entries 1864 * and rendevous for completion are very tightly coupled because we start 1865 * out by knowing (per platform) how much data we have to move, but we 1866 * don't know, up front, how many DMA mapping segments will have to be used 1867 * cover that data, so we don't know how many CTIO Request Entries we 1868 * will end up using. Further, for performance reasons we may want to 1869 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1870 * 1871 * The standard vector still goes through isp_pci_dmasetup, but the callback 1872 * for the DMA mapping routines comes here instead with the whole transfer 1873 * mapped and a pointer to a partially filled in already allocated request 1874 * queue entry. We finish the job. 1875 */ 1876 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1877 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1878 1879 #define STATUS_WITH_DATA 1 1880 1881 static void 1882 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1883 { 1884 mush_t *mp; 1885 struct ccb_scsiio *csio; 1886 ispsoftc_t *isp; 1887 struct isp_pcisoftc *pcs; 1888 bus_dmamap_t *dp; 1889 ct_entry_t *cto, *qe; 1890 uint8_t scsi_status; 1891 uint32_t curi, nxti, handle; 1892 uint32_t sflags; 1893 int32_t resid; 1894 int nth_ctio, nctios, send_status; 1895 1896 mp = (mush_t *) arg; 1897 if (error) { 1898 mp->error = error; 1899 return; 1900 } 1901 1902 isp = mp->isp; 1903 csio = mp->cmd_token; 1904 cto = mp->rq; 1905 curi = isp->isp_reqidx; 1906 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1907 1908 cto->ct_xfrlen = 0; 1909 cto->ct_seg_count = 0; 1910 cto->ct_header.rqs_entry_count = 1; 1911 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1912 1913 if (nseg == 0) { 1914 cto->ct_header.rqs_seqno = 1; 1915 isp_prt(isp, ISP_LOGTDEBUG1, 1916 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1917 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1918 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1919 cto->ct_scsi_status, cto->ct_resid); 1920 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1921 isp_put_ctio(isp, cto, qe); 1922 return; 1923 } 1924 1925 nctios = nseg / ISP_RQDSEG; 1926 if (nseg % ISP_RQDSEG) { 1927 nctios++; 1928 } 1929 1930 /* 1931 * Save syshandle, and potentially any SCSI status, which we'll 1932 * reinsert on the last CTIO we're going to send. 1933 */ 1934 1935 handle = cto->ct_syshandle; 1936 cto->ct_syshandle = 0; 1937 cto->ct_header.rqs_seqno = 0; 1938 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1939 1940 if (send_status) { 1941 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1942 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1943 /* 1944 * Preserve residual. 1945 */ 1946 resid = cto->ct_resid; 1947 1948 /* 1949 * Save actual SCSI status. 1950 */ 1951 scsi_status = cto->ct_scsi_status; 1952 1953 #ifndef STATUS_WITH_DATA 1954 sflags |= CT_NO_DATA; 1955 /* 1956 * We can't do a status at the same time as a data CTIO, so 1957 * we need to synthesize an extra CTIO at this level. 1958 */ 1959 nctios++; 1960 #endif 1961 } else { 1962 sflags = scsi_status = resid = 0; 1963 } 1964 1965 cto->ct_resid = 0; 1966 cto->ct_scsi_status = 0; 1967 1968 pcs = (struct isp_pcisoftc *)isp; 1969 dp = &pcs->dmaps[isp_handle_index(handle)]; 1970 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1971 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1972 } else { 1973 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1974 } 1975 1976 nxti = *mp->nxtip; 1977 1978 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1979 int seglim; 1980 1981 seglim = nseg; 1982 if (seglim) { 1983 int seg; 1984 1985 if (seglim > ISP_RQDSEG) 1986 seglim = ISP_RQDSEG; 1987 1988 for (seg = 0; seg < seglim; seg++, nseg--) { 1989 /* 1990 * Unlike normal initiator commands, we don't 1991 * do any swizzling here. 1992 */ 1993 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1994 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1995 cto->ct_xfrlen += dm_segs->ds_len; 1996 dm_segs++; 1997 } 1998 cto->ct_seg_count = seg; 1999 } else { 2000 /* 2001 * This case should only happen when we're sending an 2002 * extra CTIO with final status. 2003 */ 2004 if (send_status == 0) { 2005 isp_prt(isp, ISP_LOGWARN, 2006 "tdma_mk ran out of segments"); 2007 mp->error = EINVAL; 2008 return; 2009 } 2010 } 2011 2012 /* 2013 * At this point, the fields ct_lun, ct_iid, ct_tagval, 2014 * ct_tagtype, and ct_timeout have been carried over 2015 * unchanged from what our caller had set. 2016 * 2017 * The dataseg fields and the seg_count fields we just got 2018 * through setting. The data direction we've preserved all 2019 * along and only clear it if we're now sending status. 2020 */ 2021 2022 if (nth_ctio == nctios - 1) { 2023 /* 2024 * We're the last in a sequence of CTIOs, so mark 2025 * this CTIO and save the handle to the CCB such that 2026 * when this CTIO completes we can free dma resources 2027 * and do whatever else we need to do to finish the 2028 * rest of the command. We *don't* give this to the 2029 * firmware to work on- the caller will do that. 2030 */ 2031 2032 cto->ct_syshandle = handle; 2033 cto->ct_header.rqs_seqno = 1; 2034 2035 if (send_status) { 2036 cto->ct_scsi_status = scsi_status; 2037 cto->ct_flags |= sflags; 2038 cto->ct_resid = resid; 2039 } 2040 if (send_status) { 2041 isp_prt(isp, ISP_LOGTDEBUG1, 2042 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 2043 "scsi status %x resid %d", 2044 cto->ct_fwhandle, csio->ccb_h.target_lun, 2045 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 2046 cto->ct_scsi_status, cto->ct_resid); 2047 } else { 2048 isp_prt(isp, ISP_LOGTDEBUG1, 2049 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 2050 cto->ct_fwhandle, csio->ccb_h.target_lun, 2051 cto->ct_iid, cto->ct_tag_val, 2052 cto->ct_flags); 2053 } 2054 isp_put_ctio(isp, cto, qe); 2055 ISP_TDQE(isp, "last tdma_mk", curi, cto); 2056 if (nctios > 1) { 2057 MEMORYBARRIER(isp, SYNC_REQUEST, 2058 curi, QENTRY_LEN); 2059 } 2060 } else { 2061 ct_entry_t *oqe = qe; 2062 2063 /* 2064 * Make sure syshandle fields are clean 2065 */ 2066 cto->ct_syshandle = 0; 2067 cto->ct_header.rqs_seqno = 0; 2068 2069 isp_prt(isp, ISP_LOGTDEBUG1, 2070 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 2071 cto->ct_fwhandle, csio->ccb_h.target_lun, 2072 cto->ct_iid, cto->ct_flags); 2073 2074 /* 2075 * Get a new CTIO 2076 */ 2077 qe = (ct_entry_t *) 2078 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2079 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 2080 if (nxti == mp->optr) { 2081 isp_prt(isp, ISP_LOGTDEBUG0, 2082 "Queue Overflow in tdma_mk"); 2083 mp->error = MUSHERR_NOQENTRIES; 2084 return; 2085 } 2086 2087 /* 2088 * Now that we're done with the old CTIO, 2089 * flush it out to the request queue. 2090 */ 2091 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 2092 isp_put_ctio(isp, cto, oqe); 2093 if (nth_ctio != 0) { 2094 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 2095 QENTRY_LEN); 2096 } 2097 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 2098 2099 /* 2100 * Reset some fields in the CTIO so we can reuse 2101 * for the next one we'll flush to the request 2102 * queue. 2103 */ 2104 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 2105 cto->ct_header.rqs_entry_count = 1; 2106 cto->ct_header.rqs_flags = 0; 2107 cto->ct_status = 0; 2108 cto->ct_scsi_status = 0; 2109 cto->ct_xfrlen = 0; 2110 cto->ct_resid = 0; 2111 cto->ct_seg_count = 0; 2112 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 2113 } 2114 } 2115 *mp->nxtip = nxti; 2116 } 2117 2118 /* 2119 * We don't have to do multiple CTIOs here. Instead, we can just do 2120 * continuation segments as needed. This greatly simplifies the code 2121 * improves performance. 2122 */ 2123 2124 static void 2125 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2126 { 2127 mush_t *mp; 2128 struct ccb_scsiio *csio; 2129 ispsoftc_t *isp; 2130 ct2_entry_t *cto, *qe; 2131 uint32_t curi, nxti; 2132 ispds_t *ds; 2133 ispds64_t *ds64; 2134 int segcnt, seglim; 2135 2136 mp = (mush_t *) arg; 2137 if (error) { 2138 mp->error = error; 2139 return; 2140 } 2141 2142 isp = mp->isp; 2143 csio = mp->cmd_token; 2144 cto = mp->rq; 2145 2146 curi = isp->isp_reqidx; 2147 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 2148 2149 if (nseg == 0) { 2150 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 2151 isp_prt(isp, ISP_LOGWARN, 2152 "dma2_tgt_fc, a status CTIO2 without MODE1 " 2153 "set (0x%x)", cto->ct_flags); 2154 mp->error = EINVAL; 2155 return; 2156 } 2157 /* 2158 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 2159 * flags to NO DATA and clear relative offset flags. 2160 * We preserve the ct_resid and the response area. 2161 */ 2162 cto->ct_header.rqs_seqno = 1; 2163 cto->ct_seg_count = 0; 2164 cto->ct_reloff = 0; 2165 isp_prt(isp, ISP_LOGTDEBUG1, 2166 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 2167 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 2168 cto->ct_iid, cto->ct_flags, cto->ct_status, 2169 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 2170 if (FCPARAM(isp)->isp_2klogin) { 2171 isp_put_ctio2e(isp, 2172 (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); 2173 } else { 2174 isp_put_ctio2(isp, cto, qe); 2175 } 2176 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 2177 return; 2178 } 2179 2180 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 2181 isp_prt(isp, ISP_LOGERR, 2182 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 2183 "(0x%x)", cto->ct_flags); 2184 mp->error = EINVAL; 2185 return; 2186 } 2187 2188 2189 nxti = *mp->nxtip; 2190 2191 /* 2192 * Check to see if we need to DAC addressing or not. 2193 * 2194 * Any address that's over the 4GB boundary causes this 2195 * to happen. 2196 */ 2197 segcnt = nseg; 2198 if (sizeof (bus_addr_t) > 4) { 2199 for (segcnt = 0; segcnt < nseg; segcnt++) { 2200 uint64_t addr = dm_segs[segcnt].ds_addr; 2201 if (addr >= 0x100000000LL) { 2202 break; 2203 } 2204 } 2205 } 2206 if (segcnt != nseg) { 2207 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3; 2208 seglim = ISP_RQDSEG_T3; 2209 ds64 = &cto->rsp.m0.u.ct_dataseg64[0]; 2210 ds = NULL; 2211 } else { 2212 seglim = ISP_RQDSEG_T2; 2213 ds64 = NULL; 2214 ds = &cto->rsp.m0.u.ct_dataseg[0]; 2215 } 2216 cto->ct_seg_count = 0; 2217 2218 /* 2219 * Set up the CTIO2 data segments. 2220 */ 2221 for (segcnt = 0; cto->ct_seg_count < seglim && segcnt < nseg; 2222 cto->ct_seg_count++, segcnt++) { 2223 if (ds64) { 2224 ds64->ds_basehi = 2225 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); 2226 ds64->ds_base = dm_segs[segcnt].ds_addr; 2227 ds64->ds_count = dm_segs[segcnt].ds_len; 2228 ds64++; 2229 } else { 2230 ds->ds_base = dm_segs[segcnt].ds_addr; 2231 ds->ds_count = dm_segs[segcnt].ds_len; 2232 ds++; 2233 } 2234 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 2235 #if __FreeBSD_version < 500000 2236 isp_prt(isp, ISP_LOGTDEBUG1, 2237 "isp_send_ctio2: ent0[%d]0x%llx:%llu", 2238 cto->ct_seg_count, (uint64_t)dm_segs[segcnt].ds_addr, 2239 (uint64_t)dm_segs[segcnt].ds_len); 2240 #else 2241 isp_prt(isp, ISP_LOGTDEBUG1, 2242 "isp_send_ctio2: ent0[%d]0x%jx:%ju", 2243 cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr, 2244 (uintmax_t)dm_segs[segcnt].ds_len); 2245 #endif 2246 } 2247 2248 while (segcnt < nseg) { 2249 uint32_t curip; 2250 int seg; 2251 ispcontreq_t local, *crq = &local, *qep; 2252 2253 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2254 curip = nxti; 2255 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 2256 if (nxti == mp->optr) { 2257 ISP_UNLOCK(isp); 2258 isp_prt(isp, ISP_LOGTDEBUG0, 2259 "tdma_mkfc: request queue overflow"); 2260 mp->error = MUSHERR_NOQENTRIES; 2261 return; 2262 } 2263 cto->ct_header.rqs_entry_count++; 2264 MEMZERO((void *)crq, sizeof (*crq)); 2265 crq->req_header.rqs_entry_count = 1; 2266 if (cto->ct_header.rqs_entry_type == RQSTYPE_CTIO3) { 2267 seglim = ISP_CDSEG64; 2268 ds = NULL; 2269 ds64 = &((ispcontreq64_t *)crq)->req_dataseg[0]; 2270 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2271 } else { 2272 seglim = ISP_CDSEG; 2273 ds = &crq->req_dataseg[0]; 2274 ds64 = NULL; 2275 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 2276 } 2277 for (seg = 0; segcnt < nseg && seg < seglim; 2278 segcnt++, seg++) { 2279 if (ds64) { 2280 ds64->ds_basehi = 2281 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); 2282 ds64->ds_base = dm_segs[segcnt].ds_addr; 2283 ds64->ds_count = dm_segs[segcnt].ds_len; 2284 ds64++; 2285 } else { 2286 ds->ds_base = dm_segs[segcnt].ds_addr; 2287 ds->ds_count = dm_segs[segcnt].ds_len; 2288 ds++; 2289 } 2290 #if __FreeBSD_version < 500000 2291 isp_prt(isp, ISP_LOGTDEBUG1, 2292 "isp_send_ctio2: ent%d[%d]%llx:%llu", 2293 cto->ct_header.rqs_entry_count-1, seg, 2294 (uint64_t)dm_segs[segcnt].ds_addr, 2295 (uint64_t)dm_segs[segcnt].ds_len); 2296 #else 2297 isp_prt(isp, ISP_LOGTDEBUG1, 2298 "isp_send_ctio2: ent%d[%d]%jx:%ju", 2299 cto->ct_header.rqs_entry_count-1, seg, 2300 (uintmax_t)dm_segs[segcnt].ds_addr, 2301 (uintmax_t)dm_segs[segcnt].ds_len); 2302 #endif 2303 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 2304 cto->ct_seg_count++; 2305 } 2306 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 2307 isp_put_cont_req(isp, crq, qep); 2308 ISP_TDQE(isp, "cont entry", curi, qep); 2309 } 2310 2311 /* 2312 * No do final twiddling for the CTIO itself. 2313 */ 2314 cto->ct_header.rqs_seqno = 1; 2315 isp_prt(isp, ISP_LOGTDEBUG1, 2316 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 2317 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 2318 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 2319 cto->ct_resid); 2320 if (FCPARAM(isp)->isp_2klogin) { 2321 isp_put_ctio2e(isp, (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); 2322 } else { 2323 isp_put_ctio2(isp, cto, qe); 2324 } 2325 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 2326 *mp->nxtip = nxti; 2327 } 2328 #endif 2329 2330 static void dma_2400(void *, bus_dma_segment_t *, int, int); 2331 static void dma2_a64(void *, bus_dma_segment_t *, int, int); 2332 static void dma2(void *, bus_dma_segment_t *, int, int); 2333 2334 static void 2335 dma_2400(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2336 { 2337 mush_t *mp; 2338 ispsoftc_t *isp; 2339 struct ccb_scsiio *csio; 2340 struct isp_pcisoftc *pcs; 2341 bus_dmamap_t *dp; 2342 bus_dma_segment_t *eseg; 2343 ispreqt7_t *rq; 2344 int seglim, datalen; 2345 uint32_t nxti; 2346 2347 mp = (mush_t *) arg; 2348 if (error) { 2349 mp->error = error; 2350 return; 2351 } 2352 2353 if (nseg < 1) { 2354 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2355 mp->error = EFAULT; 2356 return; 2357 } 2358 2359 csio = mp->cmd_token; 2360 isp = mp->isp; 2361 rq = mp->rq; 2362 pcs = (struct isp_pcisoftc *)mp->isp; 2363 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2364 nxti = *mp->nxtip; 2365 2366 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2367 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2368 } else { 2369 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2370 } 2371 datalen = XS_XFRLEN(csio); 2372 2373 /* 2374 * We're passed an initial partially filled in entry that 2375 * has most fields filled in except for data transfer 2376 * related values. 2377 * 2378 * Our job is to fill in the initial request queue entry and 2379 * then to start allocating and filling in continuation entries 2380 * until we've covered the entire transfer. 2381 */ 2382 2383 rq->req_header.rqs_entry_type = RQSTYPE_T7RQS; 2384 rq->req_dl = datalen; 2385 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2386 rq->req_alen_datadir = 0x2; 2387 } else { 2388 rq->req_alen_datadir = 0x1; 2389 } 2390 2391 eseg = dm_segs + nseg; 2392 2393 rq->req_dataseg.ds_base = DMA_LO32(dm_segs->ds_addr); 2394 rq->req_dataseg.ds_basehi = DMA_HI32(dm_segs->ds_addr); 2395 rq->req_dataseg.ds_count = dm_segs->ds_len; 2396 2397 datalen -= dm_segs->ds_len; 2398 2399 dm_segs++; 2400 rq->req_seg_count++; 2401 2402 while (datalen > 0 && dm_segs != eseg) { 2403 uint32_t onxti; 2404 ispcontreq64_t local, *crq = &local, *cqe; 2405 2406 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2407 onxti = nxti; 2408 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2409 if (nxti == mp->optr) { 2410 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2411 mp->error = MUSHERR_NOQENTRIES; 2412 return; 2413 } 2414 rq->req_header.rqs_entry_count++; 2415 MEMZERO((void *)crq, sizeof (*crq)); 2416 crq->req_header.rqs_entry_count = 1; 2417 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2418 2419 seglim = 0; 2420 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 2421 crq->req_dataseg[seglim].ds_base = 2422 DMA_LO32(dm_segs->ds_addr); 2423 crq->req_dataseg[seglim].ds_basehi = 2424 DMA_HI32(dm_segs->ds_addr); 2425 crq->req_dataseg[seglim].ds_count = 2426 dm_segs->ds_len; 2427 rq->req_seg_count++; 2428 dm_segs++; 2429 seglim++; 2430 datalen -= dm_segs->ds_len; 2431 } 2432 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2433 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2434 } 2435 isp_put_cont64_req(isp, crq, cqe); 2436 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2437 } 2438 *mp->nxtip = nxti; 2439 } 2440 2441 static void 2442 dma2_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2443 { 2444 mush_t *mp; 2445 ispsoftc_t *isp; 2446 struct ccb_scsiio *csio; 2447 struct isp_pcisoftc *pcs; 2448 bus_dmamap_t *dp; 2449 bus_dma_segment_t *eseg; 2450 ispreq64_t *rq; 2451 int seglim, datalen; 2452 uint32_t nxti; 2453 2454 mp = (mush_t *) arg; 2455 if (error) { 2456 mp->error = error; 2457 return; 2458 } 2459 2460 if (nseg < 1) { 2461 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2462 mp->error = EFAULT; 2463 return; 2464 } 2465 csio = mp->cmd_token; 2466 isp = mp->isp; 2467 rq = mp->rq; 2468 pcs = (struct isp_pcisoftc *)mp->isp; 2469 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2470 nxti = *mp->nxtip; 2471 2472 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2473 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2474 } else { 2475 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2476 } 2477 datalen = XS_XFRLEN(csio); 2478 2479 /* 2480 * We're passed an initial partially filled in entry that 2481 * has most fields filled in except for data transfer 2482 * related values. 2483 * 2484 * Our job is to fill in the initial request queue entry and 2485 * then to start allocating and filling in continuation entries 2486 * until we've covered the entire transfer. 2487 */ 2488 2489 if (IS_FC(isp)) { 2490 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 2491 seglim = ISP_RQDSEG_T3; 2492 ((ispreqt3_t *)rq)->req_totalcnt = datalen; 2493 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2494 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN; 2495 } else { 2496 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 2497 } 2498 } else { 2499 rq->req_header.rqs_entry_type = RQSTYPE_A64; 2500 if (csio->cdb_len > 12) { 2501 seglim = 0; 2502 } else { 2503 seglim = ISP_RQDSEG_A64; 2504 } 2505 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2506 rq->req_flags |= REQFLAG_DATA_IN; 2507 } else { 2508 rq->req_flags |= REQFLAG_DATA_OUT; 2509 } 2510 } 2511 2512 eseg = dm_segs + nseg; 2513 2514 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 2515 if (IS_FC(isp)) { 2516 ispreqt3_t *rq3 = (ispreqt3_t *)rq; 2517 rq3->req_dataseg[rq3->req_seg_count].ds_base = 2518 DMA_LO32(dm_segs->ds_addr); 2519 rq3->req_dataseg[rq3->req_seg_count].ds_basehi = 2520 DMA_HI32(dm_segs->ds_addr); 2521 rq3->req_dataseg[rq3->req_seg_count].ds_count = 2522 dm_segs->ds_len; 2523 } else { 2524 rq->req_dataseg[rq->req_seg_count].ds_base = 2525 DMA_LO32(dm_segs->ds_addr); 2526 rq->req_dataseg[rq->req_seg_count].ds_basehi = 2527 DMA_HI32(dm_segs->ds_addr); 2528 rq->req_dataseg[rq->req_seg_count].ds_count = 2529 dm_segs->ds_len; 2530 } 2531 datalen -= dm_segs->ds_len; 2532 rq->req_seg_count++; 2533 dm_segs++; 2534 } 2535 2536 while (datalen > 0 && dm_segs != eseg) { 2537 uint32_t onxti; 2538 ispcontreq64_t local, *crq = &local, *cqe; 2539 2540 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2541 onxti = nxti; 2542 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2543 if (nxti == mp->optr) { 2544 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2545 mp->error = MUSHERR_NOQENTRIES; 2546 return; 2547 } 2548 rq->req_header.rqs_entry_count++; 2549 MEMZERO((void *)crq, sizeof (*crq)); 2550 crq->req_header.rqs_entry_count = 1; 2551 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2552 2553 seglim = 0; 2554 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 2555 crq->req_dataseg[seglim].ds_base = 2556 DMA_LO32(dm_segs->ds_addr); 2557 crq->req_dataseg[seglim].ds_basehi = 2558 DMA_HI32(dm_segs->ds_addr); 2559 crq->req_dataseg[seglim].ds_count = 2560 dm_segs->ds_len; 2561 rq->req_seg_count++; 2562 dm_segs++; 2563 seglim++; 2564 datalen -= dm_segs->ds_len; 2565 } 2566 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2567 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2568 } 2569 isp_put_cont64_req(isp, crq, cqe); 2570 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2571 } 2572 *mp->nxtip = nxti; 2573 } 2574 2575 static void 2576 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2577 { 2578 mush_t *mp; 2579 ispsoftc_t *isp; 2580 struct ccb_scsiio *csio; 2581 struct isp_pcisoftc *pcs; 2582 bus_dmamap_t *dp; 2583 bus_dma_segment_t *eseg; 2584 ispreq_t *rq; 2585 int seglim, datalen; 2586 uint32_t nxti; 2587 2588 mp = (mush_t *) arg; 2589 if (error) { 2590 mp->error = error; 2591 return; 2592 } 2593 2594 if (nseg < 1) { 2595 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2596 mp->error = EFAULT; 2597 return; 2598 } 2599 csio = mp->cmd_token; 2600 isp = mp->isp; 2601 rq = mp->rq; 2602 pcs = (struct isp_pcisoftc *)mp->isp; 2603 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2604 nxti = *mp->nxtip; 2605 2606 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2607 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2608 } else { 2609 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2610 } 2611 2612 datalen = XS_XFRLEN(csio); 2613 2614 /* 2615 * We're passed an initial partially filled in entry that 2616 * has most fields filled in except for data transfer 2617 * related values. 2618 * 2619 * Our job is to fill in the initial request queue entry and 2620 * then to start allocating and filling in continuation entries 2621 * until we've covered the entire transfer. 2622 */ 2623 2624 if (IS_FC(isp)) { 2625 seglim = ISP_RQDSEG_T2; 2626 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 2627 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2628 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 2629 } else { 2630 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 2631 } 2632 } else { 2633 if (csio->cdb_len > 12) { 2634 seglim = 0; 2635 } else { 2636 seglim = ISP_RQDSEG; 2637 } 2638 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2639 rq->req_flags |= REQFLAG_DATA_IN; 2640 } else { 2641 rq->req_flags |= REQFLAG_DATA_OUT; 2642 } 2643 } 2644 2645 eseg = dm_segs + nseg; 2646 2647 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 2648 if (IS_FC(isp)) { 2649 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 2650 rq2->req_dataseg[rq2->req_seg_count].ds_base = 2651 DMA_LO32(dm_segs->ds_addr); 2652 rq2->req_dataseg[rq2->req_seg_count].ds_count = 2653 dm_segs->ds_len; 2654 } else { 2655 rq->req_dataseg[rq->req_seg_count].ds_base = 2656 DMA_LO32(dm_segs->ds_addr); 2657 rq->req_dataseg[rq->req_seg_count].ds_count = 2658 dm_segs->ds_len; 2659 } 2660 datalen -= dm_segs->ds_len; 2661 rq->req_seg_count++; 2662 dm_segs++; 2663 } 2664 2665 while (datalen > 0 && dm_segs != eseg) { 2666 uint32_t onxti; 2667 ispcontreq_t local, *crq = &local, *cqe; 2668 2669 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2670 onxti = nxti; 2671 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2672 if (nxti == mp->optr) { 2673 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2674 mp->error = MUSHERR_NOQENTRIES; 2675 return; 2676 } 2677 rq->req_header.rqs_entry_count++; 2678 MEMZERO((void *)crq, sizeof (*crq)); 2679 crq->req_header.rqs_entry_count = 1; 2680 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 2681 2682 seglim = 0; 2683 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 2684 crq->req_dataseg[seglim].ds_base = 2685 DMA_LO32(dm_segs->ds_addr); 2686 crq->req_dataseg[seglim].ds_count = 2687 dm_segs->ds_len; 2688 rq->req_seg_count++; 2689 dm_segs++; 2690 seglim++; 2691 datalen -= dm_segs->ds_len; 2692 } 2693 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2694 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2695 } 2696 isp_put_cont_req(isp, crq, cqe); 2697 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2698 } 2699 *mp->nxtip = nxti; 2700 } 2701 2702 /* 2703 * We enter with ISP_LOCK held 2704 */ 2705 static int 2706 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq, 2707 uint32_t *nxtip, uint32_t optr) 2708 { 2709 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2710 ispreq_t *qep; 2711 bus_dmamap_t *dp = NULL; 2712 mush_t mush, *mp; 2713 void (*eptr)(void *, bus_dma_segment_t *, int, int); 2714 2715 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 2716 #ifdef ISP_TARGET_MODE 2717 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 2718 if (IS_FC(isp)) { 2719 eptr = tdma_mkfc; 2720 } else { 2721 eptr = tdma_mk; 2722 } 2723 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 2724 (csio->dxfer_len == 0)) { 2725 mp = &mush; 2726 mp->isp = isp; 2727 mp->cmd_token = csio; 2728 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 2729 mp->nxtip = nxtip; 2730 mp->optr = optr; 2731 mp->error = 0; 2732 ISPLOCK_2_CAMLOCK(isp); 2733 (*eptr)(mp, NULL, 0, 0); 2734 CAMLOCK_2_ISPLOCK(isp); 2735 goto mbxsync; 2736 } 2737 } else 2738 #endif 2739 if (IS_24XX(isp)) { 2740 eptr = dma_2400; 2741 } else if (sizeof (bus_addr_t) > 4) { 2742 eptr = dma2_a64; 2743 } else { 2744 eptr = dma2; 2745 } 2746 2747 2748 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 2749 (csio->dxfer_len == 0)) { 2750 rq->req_seg_count = 1; 2751 goto mbxsync; 2752 } 2753 2754 /* 2755 * Do a virtual grapevine step to collect info for 2756 * the callback dma allocation that we have to use... 2757 */ 2758 mp = &mush; 2759 mp->isp = isp; 2760 mp->cmd_token = csio; 2761 mp->rq = rq; 2762 mp->nxtip = nxtip; 2763 mp->optr = optr; 2764 mp->error = 0; 2765 2766 ISPLOCK_2_CAMLOCK(isp); 2767 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 2768 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 2769 int error, s; 2770 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2771 s = splsoftvm(); 2772 error = bus_dmamap_load(pcs->dmat, *dp, 2773 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 2774 if (error == EINPROGRESS) { 2775 bus_dmamap_unload(pcs->dmat, *dp); 2776 mp->error = EINVAL; 2777 isp_prt(isp, ISP_LOGERR, 2778 "deferred dma allocation not supported"); 2779 } else if (error && mp->error == 0) { 2780 #ifdef DIAGNOSTIC 2781 isp_prt(isp, ISP_LOGERR, 2782 "error %d in dma mapping code", error); 2783 #endif 2784 mp->error = error; 2785 } 2786 splx(s); 2787 } else { 2788 /* Pointer to physical buffer */ 2789 struct bus_dma_segment seg; 2790 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; 2791 seg.ds_len = csio->dxfer_len; 2792 (*eptr)(mp, &seg, 1, 0); 2793 } 2794 } else { 2795 struct bus_dma_segment *segs; 2796 2797 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 2798 isp_prt(isp, ISP_LOGERR, 2799 "Physical segment pointers unsupported"); 2800 mp->error = EINVAL; 2801 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2802 isp_prt(isp, ISP_LOGERR, 2803 "Virtual segment addresses unsupported"); 2804 mp->error = EINVAL; 2805 } else { 2806 /* Just use the segments provided */ 2807 segs = (struct bus_dma_segment *) csio->data_ptr; 2808 (*eptr)(mp, segs, csio->sglist_cnt, 0); 2809 } 2810 } 2811 CAMLOCK_2_ISPLOCK(isp); 2812 if (mp->error) { 2813 int retval = CMD_COMPLETE; 2814 if (mp->error == MUSHERR_NOQENTRIES) { 2815 retval = CMD_EAGAIN; 2816 } else if (mp->error == EFBIG) { 2817 XS_SETERR(csio, CAM_REQ_TOO_BIG); 2818 } else if (mp->error == EINVAL) { 2819 XS_SETERR(csio, CAM_REQ_INVALID); 2820 } else { 2821 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 2822 } 2823 return (retval); 2824 } 2825 mbxsync: 2826 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2827 isp_print_bytes(isp, "Request Queue Entry", QENTRY_LEN, rq); 2828 } 2829 switch (rq->req_header.rqs_entry_type) { 2830 case RQSTYPE_REQUEST: 2831 isp_put_request(isp, rq, qep); 2832 break; 2833 case RQSTYPE_CMDONLY: 2834 isp_put_extended_request(isp, (ispextreq_t *)rq, 2835 (ispextreq_t *)qep); 2836 break; 2837 case RQSTYPE_T2RQS: 2838 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 2839 break; 2840 case RQSTYPE_A64: 2841 case RQSTYPE_T3RQS: 2842 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep); 2843 break; 2844 case RQSTYPE_T7RQS: 2845 isp_put_request_t7(isp, (ispreqt7_t *) rq, (ispreqt7_t *) qep); 2846 break; 2847 } 2848 return (CMD_QUEUED); 2849 } 2850 2851 static void 2852 isp_pci_dmateardown(ispsoftc_t *isp, XS_T *xs, uint32_t handle) 2853 { 2854 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2855 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 2856 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2857 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 2858 } else { 2859 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 2860 } 2861 bus_dmamap_unload(pcs->dmat, *dp); 2862 } 2863 2864 2865 static void 2866 isp_pci_reset1(ispsoftc_t *isp) 2867 { 2868 if (!IS_24XX(isp)) { 2869 /* Make sure the BIOS is disabled */ 2870 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 2871 } 2872 /* and enable interrupts */ 2873 ISP_ENABLE_INTS(isp); 2874 } 2875 2876 static void 2877 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 2878 { 2879 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2880 if (msg) 2881 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2882 else 2883 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2884 if (IS_SCSI(isp)) 2885 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2886 else 2887 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2888 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2889 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2890 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2891 2892 2893 if (IS_SCSI(isp)) { 2894 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2895 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2896 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2897 ISP_READ(isp, CDMA_FIFO_STS)); 2898 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2899 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2900 ISP_READ(isp, DDMA_FIFO_STS)); 2901 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2902 ISP_READ(isp, SXP_INTERRUPT), 2903 ISP_READ(isp, SXP_GROSS_ERR), 2904 ISP_READ(isp, SXP_PINS_CTRL)); 2905 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2906 } 2907 printf(" mbox regs: %x %x %x %x %x\n", 2908 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2909 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2910 ISP_READ(isp, OUTMAILBOX4)); 2911 printf(" PCI Status Command/Status=%x\n", 2912 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2913 } 2914