1 /*- 2 * 3 * Copyright (c) 1997-2006 by Matthew Jacob 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice immediately at the beginning of the file, without modification, 11 * this list of conditions, and the following disclaimer. 12 * 2. The name of the author may not be used to endorse or promote products 13 * derived from this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 /* 29 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 30 * FreeBSD Version. 31 */ 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/module.h> 39 #if __FreeBSD_version >= 700000 40 #include <sys/linker.h> 41 #include <sys/firmware.h> 42 #endif 43 #include <sys/bus.h> 44 #if __FreeBSD_version < 500000 45 #include <pci/pcireg.h> 46 #include <pci/pcivar.h> 47 #include <machine/bus_memio.h> 48 #include <machine/bus_pio.h> 49 #else 50 #include <sys/stdint.h> 51 #include <dev/pci/pcireg.h> 52 #include <dev/pci/pcivar.h> 53 #endif 54 #include <machine/bus.h> 55 #include <machine/resource.h> 56 #include <sys/rman.h> 57 #include <sys/malloc.h> 58 59 #include <dev/isp/isp_freebsd.h> 60 61 #if __FreeBSD_version < 500000 62 #define BUS_PROBE_DEFAULT 0 63 #endif 64 65 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); 66 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); 67 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 68 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); 69 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 70 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 71 static int 72 isp_pci_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 73 static int 74 isp_pci_rd_isr_2300(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 75 static int 76 isp_pci_rd_isr_2400(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 77 static int isp_pci_mbxdma(ispsoftc_t *); 78 static int 79 isp_pci_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint32_t *, uint32_t); 80 static void 81 isp_pci_dmateardown(ispsoftc_t *, XS_T *, uint32_t); 82 83 84 static void isp_pci_reset0(ispsoftc_t *); 85 static void isp_pci_reset1(ispsoftc_t *); 86 static void isp_pci_dumpregs(ispsoftc_t *, const char *); 87 88 static struct ispmdvec mdvec = { 89 isp_pci_rd_isr, 90 isp_pci_rd_reg, 91 isp_pci_wr_reg, 92 isp_pci_mbxdma, 93 isp_pci_dmasetup, 94 isp_pci_dmateardown, 95 isp_pci_reset0, 96 isp_pci_reset1, 97 isp_pci_dumpregs, 98 NULL, 99 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 100 }; 101 102 static struct ispmdvec mdvec_1080 = { 103 isp_pci_rd_isr, 104 isp_pci_rd_reg_1080, 105 isp_pci_wr_reg_1080, 106 isp_pci_mbxdma, 107 isp_pci_dmasetup, 108 isp_pci_dmateardown, 109 isp_pci_reset0, 110 isp_pci_reset1, 111 isp_pci_dumpregs, 112 NULL, 113 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 114 }; 115 116 static struct ispmdvec mdvec_12160 = { 117 isp_pci_rd_isr, 118 isp_pci_rd_reg_1080, 119 isp_pci_wr_reg_1080, 120 isp_pci_mbxdma, 121 isp_pci_dmasetup, 122 isp_pci_dmateardown, 123 isp_pci_reset0, 124 isp_pci_reset1, 125 isp_pci_dumpregs, 126 NULL, 127 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 128 }; 129 130 static struct ispmdvec mdvec_2100 = { 131 isp_pci_rd_isr, 132 isp_pci_rd_reg, 133 isp_pci_wr_reg, 134 isp_pci_mbxdma, 135 isp_pci_dmasetup, 136 isp_pci_dmateardown, 137 isp_pci_reset0, 138 isp_pci_reset1, 139 isp_pci_dumpregs 140 }; 141 142 static struct ispmdvec mdvec_2200 = { 143 isp_pci_rd_isr, 144 isp_pci_rd_reg, 145 isp_pci_wr_reg, 146 isp_pci_mbxdma, 147 isp_pci_dmasetup, 148 isp_pci_dmateardown, 149 isp_pci_reset0, 150 isp_pci_reset1, 151 isp_pci_dumpregs 152 }; 153 154 static struct ispmdvec mdvec_2300 = { 155 isp_pci_rd_isr_2300, 156 isp_pci_rd_reg, 157 isp_pci_wr_reg, 158 isp_pci_mbxdma, 159 isp_pci_dmasetup, 160 isp_pci_dmateardown, 161 isp_pci_reset0, 162 isp_pci_reset1, 163 isp_pci_dumpregs 164 }; 165 166 static struct ispmdvec mdvec_2400 = { 167 isp_pci_rd_isr_2400, 168 isp_pci_rd_reg_2400, 169 isp_pci_wr_reg_2400, 170 isp_pci_mbxdma, 171 isp_pci_dmasetup, 172 isp_pci_dmateardown, 173 isp_pci_reset0, 174 isp_pci_reset1, 175 NULL 176 }; 177 178 #ifndef PCIM_CMD_INVEN 179 #define PCIM_CMD_INVEN 0x10 180 #endif 181 #ifndef PCIM_CMD_BUSMASTEREN 182 #define PCIM_CMD_BUSMASTEREN 0x0004 183 #endif 184 #ifndef PCIM_CMD_PERRESPEN 185 #define PCIM_CMD_PERRESPEN 0x0040 186 #endif 187 #ifndef PCIM_CMD_SEREN 188 #define PCIM_CMD_SEREN 0x0100 189 #endif 190 #ifndef PCIM_CMD_INTX_DISABLE 191 #define PCIM_CMD_INTX_DISABLE 0x0400 192 #endif 193 194 #ifndef PCIR_COMMAND 195 #define PCIR_COMMAND 0x04 196 #endif 197 198 #ifndef PCIR_CACHELNSZ 199 #define PCIR_CACHELNSZ 0x0c 200 #endif 201 202 #ifndef PCIR_LATTIMER 203 #define PCIR_LATTIMER 0x0d 204 #endif 205 206 #ifndef PCIR_ROMADDR 207 #define PCIR_ROMADDR 0x30 208 #endif 209 210 #ifndef PCI_VENDOR_QLOGIC 211 #define PCI_VENDOR_QLOGIC 0x1077 212 #endif 213 214 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 215 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 216 #endif 217 218 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 219 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 220 #endif 221 222 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 223 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 224 #endif 225 226 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 227 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 228 #endif 229 230 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 231 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 232 #endif 233 234 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 235 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 236 #endif 237 238 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 239 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 240 #endif 241 242 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 243 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 244 #endif 245 246 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 247 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 248 #endif 249 250 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 251 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 252 #endif 253 254 #ifndef PCI_PRODUCT_QLOGIC_ISP2322 255 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 256 #endif 257 258 #ifndef PCI_PRODUCT_QLOGIC_ISP2422 259 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 260 #endif 261 262 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 263 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 264 #endif 265 266 #ifndef PCI_PRODUCT_QLOGIC_ISP6322 267 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 268 #endif 269 270 271 #define PCI_QLOGIC_ISP1020 \ 272 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 273 274 #define PCI_QLOGIC_ISP1080 \ 275 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 276 277 #define PCI_QLOGIC_ISP10160 \ 278 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 279 280 #define PCI_QLOGIC_ISP12160 \ 281 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 282 283 #define PCI_QLOGIC_ISP1240 \ 284 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 285 286 #define PCI_QLOGIC_ISP1280 \ 287 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 288 289 #define PCI_QLOGIC_ISP2100 \ 290 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 291 292 #define PCI_QLOGIC_ISP2200 \ 293 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 294 295 #define PCI_QLOGIC_ISP2300 \ 296 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 297 298 #define PCI_QLOGIC_ISP2312 \ 299 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 300 301 #define PCI_QLOGIC_ISP2322 \ 302 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 303 304 #define PCI_QLOGIC_ISP2422 \ 305 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 306 307 #define PCI_QLOGIC_ISP6312 \ 308 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 309 310 #define PCI_QLOGIC_ISP6322 \ 311 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 312 313 /* 314 * Odd case for some AMI raid cards... We need to *not* attach to this. 315 */ 316 #define AMI_RAID_SUBVENDOR_ID 0x101e 317 318 #define IO_MAP_REG 0x10 319 #define MEM_MAP_REG 0x14 320 321 #define PCI_DFLT_LTNCY 0x40 322 #define PCI_DFLT_LNSZ 0x10 323 324 static int isp_pci_probe (device_t); 325 static int isp_pci_attach (device_t); 326 static int isp_pci_detach (device_t); 327 328 329 struct isp_pcisoftc { 330 ispsoftc_t pci_isp; 331 device_t pci_dev; 332 struct resource * pci_reg; 333 bus_space_tag_t pci_st; 334 bus_space_handle_t pci_sh; 335 void * ih; 336 int16_t pci_poff[_NREG_BLKS]; 337 bus_dma_tag_t dmat; 338 bus_dmamap_t *dmaps; 339 }; 340 341 342 static device_method_t isp_pci_methods[] = { 343 /* Device interface */ 344 DEVMETHOD(device_probe, isp_pci_probe), 345 DEVMETHOD(device_attach, isp_pci_attach), 346 DEVMETHOD(device_detach, isp_pci_detach), 347 { 0, 0 } 348 }; 349 static void isp_pci_intr(void *); 350 351 static driver_t isp_pci_driver = { 352 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 353 }; 354 static devclass_t isp_devclass; 355 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 356 #if __FreeBSD_version < 700000 357 extern ispfwfunc *isp_get_firmware_p; 358 #endif 359 360 static int 361 isp_pci_probe(device_t dev) 362 { 363 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 364 case PCI_QLOGIC_ISP1020: 365 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 366 break; 367 case PCI_QLOGIC_ISP1080: 368 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 369 break; 370 case PCI_QLOGIC_ISP1240: 371 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 372 break; 373 case PCI_QLOGIC_ISP1280: 374 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 375 break; 376 case PCI_QLOGIC_ISP10160: 377 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 378 break; 379 case PCI_QLOGIC_ISP12160: 380 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 381 return (ENXIO); 382 } 383 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 384 break; 385 case PCI_QLOGIC_ISP2100: 386 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 387 break; 388 case PCI_QLOGIC_ISP2200: 389 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 390 break; 391 case PCI_QLOGIC_ISP2300: 392 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 393 break; 394 case PCI_QLOGIC_ISP2312: 395 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 396 break; 397 case PCI_QLOGIC_ISP2322: 398 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 399 break; 400 case PCI_QLOGIC_ISP2422: 401 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 402 break; 403 case PCI_QLOGIC_ISP6312: 404 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 405 break; 406 case PCI_QLOGIC_ISP6322: 407 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 408 break; 409 default: 410 return (ENXIO); 411 } 412 if (isp_announced == 0 && bootverbose) { 413 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 414 "Core Version %d.%d\n", 415 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 416 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 417 isp_announced++; 418 } 419 /* 420 * XXXX: Here is where we might load the f/w module 421 * XXXX: (or increase a reference count to it). 422 */ 423 return (BUS_PROBE_DEFAULT); 424 } 425 426 #if __FreeBSD_version < 500000 427 static void 428 isp_get_options(device_t dev, ispsoftc_t *isp) 429 { 430 uint64_t wwn; 431 int bitmap, unit; 432 433 callout_handle_init(&isp->isp_osinfo.ldt); 434 callout_handle_init(&isp->isp_osinfo.gdt); 435 436 unit = device_get_unit(dev); 437 if (getenv_int("isp_disable", &bitmap)) { 438 if (bitmap & (1 << unit)) { 439 isp->isp_osinfo.disabled = 1; 440 return; 441 } 442 } 443 444 if (getenv_int("isp_no_fwload", &bitmap)) { 445 if (bitmap & (1 << unit)) 446 isp->isp_confopts |= ISP_CFG_NORELOAD; 447 } 448 if (getenv_int("isp_fwload", &bitmap)) { 449 if (bitmap & (1 << unit)) 450 isp->isp_confopts &= ~ISP_CFG_NORELOAD; 451 } 452 if (getenv_int("isp_no_nvram", &bitmap)) { 453 if (bitmap & (1 << unit)) 454 isp->isp_confopts |= ISP_CFG_NONVRAM; 455 } 456 if (getenv_int("isp_nvram", &bitmap)) { 457 if (bitmap & (1 << unit)) 458 isp->isp_confopts &= ~ISP_CFG_NONVRAM; 459 } 460 if (getenv_int("isp_fcduplex", &bitmap)) { 461 if (bitmap & (1 << unit)) 462 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 463 } 464 if (getenv_int("isp_no_fcduplex", &bitmap)) { 465 if (bitmap & (1 << unit)) 466 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX; 467 } 468 if (getenv_int("isp_nport", &bitmap)) { 469 if (bitmap & (1 << unit)) 470 isp->isp_confopts |= ISP_CFG_NPORT; 471 } 472 473 /* 474 * Because the resource_*_value functions can neither return 475 * 64 bit integer values, nor can they be directly coerced 476 * to interpret the right hand side of the assignment as 477 * you want them to interpret it, we have to force WWN 478 * hint replacement to specify WWN strings with a leading 479 * 'w' (e..g w50000000aaaa0001). Sigh. 480 */ 481 if (getenv_quad("isp_portwwn", &wwn)) { 482 isp->isp_osinfo.default_port_wwn = wwn; 483 isp->isp_confopts |= ISP_CFG_OWNWWPN; 484 } 485 if (isp->isp_osinfo.default_port_wwn == 0) { 486 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 487 } 488 489 if (getenv_quad("isp_nodewwn", &wwn)) { 490 isp->isp_osinfo.default_node_wwn = wwn; 491 isp->isp_confopts |= ISP_CFG_OWNWWNN; 492 } 493 if (isp->isp_osinfo.default_node_wwn == 0) { 494 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 495 } 496 497 bitmap = 0; 498 (void) getenv_int("isp_debug", &bitmap); 499 if (bitmap) { 500 isp->isp_dblev = bitmap; 501 } else { 502 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 503 } 504 if (bootverbose) { 505 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 506 } 507 508 bitmap = 0; 509 (void) getenv_int("isp_fabric_hysteresis", &bitmap); 510 if (bitmap >= 0 && bitmap < 256) { 511 isp->isp_osinfo.hysteresis = bitmap; 512 } else { 513 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis; 514 } 515 516 bitmap = 0; 517 (void) getenv_int("isp_loop_down_limit", &bitmap); 518 if (bitmap >= 0 && bitmap < 0xffff) { 519 isp->isp_osinfo.loop_down_limit = bitmap; 520 } else { 521 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit; 522 } 523 524 bitmap = 0; 525 (void) getenv_int("isp_gone_device_time", &bitmap); 526 if (bitmap >= 0 && bitmap < 0xffff) { 527 isp->isp_osinfo.gone_device_time = bitmap; 528 } else { 529 isp->isp_osinfo.gone_device_time = isp_gone_device_time; 530 } 531 532 533 #ifdef ISP_FW_CRASH_DUMP 534 bitmap = 0; 535 if (getenv_int("isp_fw_dump_enable", &bitmap)) { 536 if (bitmap & (1 << unit) { 537 size_t amt = 0; 538 if (IS_2200(isp)) { 539 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 540 } else if (IS_23XX(isp)) { 541 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 542 } 543 if (amt) { 544 FCPARAM(isp)->isp_dump_data = 545 malloc(amt, M_DEVBUF, M_WAITOK); 546 memset(FCPARAM(isp)->isp_dump_data, 0, amt); 547 } else { 548 device_printf(dev, 549 "f/w crash dumps not supported for card\n"); 550 } 551 } 552 } 553 #endif 554 bitmap = 0; 555 if (getenv_int("role", &bitmap)) { 556 isp->isp_role = bitmap; 557 } else { 558 isp->isp_role = ISP_DEFAULT_ROLES; 559 } 560 } 561 562 static void 563 isp_get_pci_options(device_t dev, int *m1, int *m2) 564 { 565 int bitmap; 566 int unit = device_get_unit(dev); 567 568 *m1 = PCIM_CMD_MEMEN; 569 *m2 = PCIM_CMD_PORTEN; 570 if (getenv_int("isp_mem_map", &bitmap)) { 571 if (bitmap & (1 << unit)) { 572 *m1 = PCIM_CMD_MEMEN; 573 *m2 = PCIM_CMD_PORTEN; 574 } 575 } 576 bitmap = 0; 577 if (getenv_int("isp_io_map", &bitmap)) { 578 if (bitmap & (1 << unit)) { 579 *m1 = PCIM_CMD_PORTEN; 580 *m2 = PCIM_CMD_MEMEN; 581 } 582 } 583 } 584 #else 585 static void 586 isp_get_options(device_t dev, ispsoftc_t *isp) 587 { 588 int tval; 589 const char *sptr; 590 591 callout_handle_init(&isp->isp_osinfo.ldt); 592 callout_handle_init(&isp->isp_osinfo.gdt); 593 594 /* 595 * Figure out if we're supposed to skip this one. 596 */ 597 598 tval = 0; 599 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 600 "disable", &tval) == 0 && tval) { 601 device_printf(dev, "disabled at user request\n"); 602 isp->isp_osinfo.disabled = 1; 603 return; 604 } 605 606 tval = -1; 607 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 608 "role", &tval) == 0 && tval != -1) { 609 tval &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET); 610 isp->isp_role = tval; 611 device_printf(dev, "setting role to 0x%x\n", isp->isp_role); 612 } else { 613 #ifdef ISP_TARGET_MODE 614 isp->isp_role = ISP_ROLE_TARGET; 615 #else 616 isp->isp_role = ISP_DEFAULT_ROLES; 617 #endif 618 } 619 620 tval = 0; 621 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 622 "fwload_disable", &tval) == 0 && tval != 0) { 623 isp->isp_confopts |= ISP_CFG_NORELOAD; 624 } 625 tval = 0; 626 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 627 "ignore_nvram", &tval) == 0 && tval != 0) { 628 isp->isp_confopts |= ISP_CFG_NONVRAM; 629 } 630 tval = 0; 631 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 632 "fullduplex", &tval) == 0 && tval != 0) { 633 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 634 } 635 #ifdef ISP_FW_CRASH_DUMP 636 tval = 0; 637 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 638 "fw_dump_enable", &tval) == 0 && tval != 0) { 639 size_t amt = 0; 640 if (IS_2200(isp)) { 641 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 642 } else if (IS_23XX(isp)) { 643 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 644 } 645 if (amt) { 646 FCPARAM(isp)->isp_dump_data = 647 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); 648 } else { 649 device_printf(dev, 650 "f/w crash dumps not supported for this model\n"); 651 } 652 } 653 #endif 654 655 sptr = 0; 656 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 657 "topology", (const char **) &sptr) == 0 && sptr != 0) { 658 if (strcmp(sptr, "lport") == 0) { 659 isp->isp_confopts |= ISP_CFG_LPORT; 660 } else if (strcmp(sptr, "nport") == 0) { 661 isp->isp_confopts |= ISP_CFG_NPORT; 662 } else if (strcmp(sptr, "lport-only") == 0) { 663 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 664 } else if (strcmp(sptr, "nport-only") == 0) { 665 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 666 } 667 } 668 669 /* 670 * Because the resource_*_value functions can neither return 671 * 64 bit integer values, nor can they be directly coerced 672 * to interpret the right hand side of the assignment as 673 * you want them to interpret it, we have to force WWN 674 * hint replacement to specify WWN strings with a leading 675 * 'w' (e..g w50000000aaaa0001). Sigh. 676 */ 677 sptr = 0; 678 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 679 "portwwn", (const char **) &sptr); 680 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 681 char *eptr = 0; 682 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 683 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 684 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 685 isp->isp_osinfo.default_port_wwn = 0; 686 } else { 687 isp->isp_confopts |= ISP_CFG_OWNWWPN; 688 } 689 } 690 if (isp->isp_osinfo.default_port_wwn == 0) { 691 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 692 } 693 694 sptr = 0; 695 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 696 "nodewwn", (const char **) &sptr); 697 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 698 char *eptr = 0; 699 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 700 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 701 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 702 isp->isp_osinfo.default_node_wwn = 0; 703 } else { 704 isp->isp_confopts |= ISP_CFG_OWNWWNN; 705 } 706 } 707 if (isp->isp_osinfo.default_node_wwn == 0) { 708 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 709 } 710 711 isp->isp_osinfo.default_id = -1; 712 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 713 "iid", &tval) == 0) { 714 isp->isp_osinfo.default_id = tval; 715 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 716 } 717 if (isp->isp_osinfo.default_id == -1) { 718 if (IS_FC(isp)) { 719 isp->isp_osinfo.default_id = 109; 720 } else { 721 isp->isp_osinfo.default_id = 7; 722 } 723 } 724 725 /* 726 * Set up logging levels. 727 */ 728 tval = 0; 729 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 730 "debug", &tval); 731 if (tval) { 732 isp->isp_dblev = tval; 733 } else { 734 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 735 } 736 if (bootverbose) { 737 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 738 } 739 740 tval = 0; 741 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 742 "hysteresis", &tval); 743 if (tval >= 0 && tval < 256) { 744 isp->isp_osinfo.hysteresis = tval; 745 } else { 746 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis; 747 } 748 749 tval = -1; 750 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 751 "loop_down_limit", &tval); 752 if (tval >= 0 && tval < 0xffff) { 753 isp->isp_osinfo.loop_down_limit = tval; 754 } else { 755 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit; 756 } 757 758 tval = -1; 759 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 760 "gone_device_time", &tval); 761 if (tval >= 0 && tval < 0xffff) { 762 isp->isp_osinfo.gone_device_time = tval; 763 } else { 764 isp->isp_osinfo.gone_device_time = isp_gone_device_time; 765 } 766 } 767 768 static void 769 isp_get_pci_options(device_t dev, int *m1, int *m2) 770 { 771 int tval; 772 /* 773 * Which we should try first - memory mapping or i/o mapping? 774 * 775 * We used to try memory first followed by i/o on alpha, otherwise 776 * the reverse, but we should just try memory first all the time now. 777 */ 778 *m1 = PCIM_CMD_MEMEN; 779 *m2 = PCIM_CMD_PORTEN; 780 781 tval = 0; 782 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 783 "prefer_iomap", &tval) == 0 && tval != 0) { 784 *m1 = PCIM_CMD_PORTEN; 785 *m2 = PCIM_CMD_MEMEN; 786 } 787 tval = 0; 788 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 789 "prefer_memmap", &tval) == 0 && tval != 0) { 790 *m1 = PCIM_CMD_MEMEN; 791 *m2 = PCIM_CMD_PORTEN; 792 } 793 } 794 #endif 795 796 static int 797 isp_pci_attach(device_t dev) 798 { 799 struct resource *regs, *irq; 800 int rtp, rgd, iqd, m1, m2; 801 uint32_t data, cmd, linesz, psize, basetype; 802 struct isp_pcisoftc *pcs; 803 ispsoftc_t *isp = NULL; 804 struct ispmdvec *mdvp; 805 #if __FreeBSD_version >= 500000 806 int locksetup = 0; 807 #endif 808 809 pcs = device_get_softc(dev); 810 if (pcs == NULL) { 811 device_printf(dev, "cannot get softc\n"); 812 return (ENOMEM); 813 } 814 memset(pcs, 0, sizeof (*pcs)); 815 pcs->pci_dev = dev; 816 isp = &pcs->pci_isp; 817 818 /* 819 * Set and Get Generic Options 820 */ 821 isp_get_options(dev, isp); 822 823 /* 824 * Check to see if options have us disabled 825 */ 826 if (isp->isp_osinfo.disabled) { 827 /* 828 * But return zero to preserve unit numbering 829 */ 830 return (0); 831 } 832 833 /* 834 * Get PCI options- which in this case are just mapping preferences. 835 */ 836 isp_get_pci_options(dev, &m1, &m2); 837 838 linesz = PCI_DFLT_LNSZ; 839 irq = regs = NULL; 840 rgd = rtp = iqd = 0; 841 842 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 843 if (cmd & m1) { 844 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 845 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 846 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 847 } 848 if (regs == NULL && (cmd & m2)) { 849 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 850 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 851 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 852 } 853 if (regs == NULL) { 854 device_printf(dev, "unable to map any ports\n"); 855 goto bad; 856 } 857 if (bootverbose) { 858 device_printf(dev, "using %s space register mapping\n", 859 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 860 } 861 pcs->pci_dev = dev; 862 pcs->pci_reg = regs; 863 pcs->pci_st = rman_get_bustag(regs); 864 pcs->pci_sh = rman_get_bushandle(regs); 865 866 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 867 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 868 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 869 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 870 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 871 mdvp = &mdvec; 872 basetype = ISP_HA_SCSI_UNKNOWN; 873 psize = sizeof (sdparam); 874 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 875 mdvp = &mdvec; 876 basetype = ISP_HA_SCSI_UNKNOWN; 877 psize = sizeof (sdparam); 878 } 879 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 880 mdvp = &mdvec_1080; 881 basetype = ISP_HA_SCSI_1080; 882 psize = sizeof (sdparam); 883 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 884 ISP1080_DMA_REGS_OFF; 885 } 886 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 887 mdvp = &mdvec_1080; 888 basetype = ISP_HA_SCSI_1240; 889 psize = 2 * sizeof (sdparam); 890 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 891 ISP1080_DMA_REGS_OFF; 892 } 893 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 894 mdvp = &mdvec_1080; 895 basetype = ISP_HA_SCSI_1280; 896 psize = 2 * sizeof (sdparam); 897 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 898 ISP1080_DMA_REGS_OFF; 899 } 900 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 901 mdvp = &mdvec_12160; 902 basetype = ISP_HA_SCSI_10160; 903 psize = sizeof (sdparam); 904 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 905 ISP1080_DMA_REGS_OFF; 906 } 907 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 908 mdvp = &mdvec_12160; 909 basetype = ISP_HA_SCSI_12160; 910 psize = 2 * sizeof (sdparam); 911 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 912 ISP1080_DMA_REGS_OFF; 913 } 914 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 915 mdvp = &mdvec_2100; 916 basetype = ISP_HA_FC_2100; 917 psize = sizeof (fcparam); 918 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 919 PCI_MBOX_REGS2100_OFF; 920 if (pci_get_revid(dev) < 3) { 921 /* 922 * XXX: Need to get the actual revision 923 * XXX: number of the 2100 FB. At any rate, 924 * XXX: lower cache line size for early revision 925 * XXX; boards. 926 */ 927 linesz = 1; 928 } 929 } 930 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 931 mdvp = &mdvec_2200; 932 basetype = ISP_HA_FC_2200; 933 psize = sizeof (fcparam); 934 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 935 PCI_MBOX_REGS2100_OFF; 936 } 937 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 938 mdvp = &mdvec_2300; 939 basetype = ISP_HA_FC_2300; 940 psize = sizeof (fcparam); 941 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 942 PCI_MBOX_REGS2300_OFF; 943 } 944 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 || 945 pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 946 mdvp = &mdvec_2300; 947 basetype = ISP_HA_FC_2312; 948 psize = sizeof (fcparam); 949 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 950 PCI_MBOX_REGS2300_OFF; 951 } 952 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322 || 953 pci_get_devid(dev) == PCI_QLOGIC_ISP6322) { 954 mdvp = &mdvec_2300; 955 basetype = ISP_HA_FC_2322; 956 psize = sizeof (fcparam); 957 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 958 PCI_MBOX_REGS2300_OFF; 959 } 960 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422) { 961 mdvp = &mdvec_2400; 962 basetype = ISP_HA_FC_2400; 963 psize = sizeof (fcparam); 964 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 965 PCI_MBOX_REGS2400_OFF; 966 } 967 isp = &pcs->pci_isp; 968 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 969 if (isp->isp_param == NULL) { 970 device_printf(dev, "cannot allocate parameter data\n"); 971 goto bad; 972 } 973 isp->isp_mdvec = mdvp; 974 isp->isp_type = basetype; 975 isp->isp_revision = pci_get_revid(dev); 976 isp->isp_dev = dev; 977 978 #if __FreeBSD_version >= 700000 979 /* 980 * Try and find firmware for this device. 981 */ 982 { 983 char fwname[32]; 984 unsigned int did = pci_get_device(dev); 985 986 /* 987 * Map a few pci ids to fw names 988 */ 989 switch (did) { 990 case PCI_PRODUCT_QLOGIC_ISP1020: 991 did = 0x1040; 992 break; 993 case PCI_PRODUCT_QLOGIC_ISP1240: 994 did = 0x1080; 995 break; 996 case PCI_PRODUCT_QLOGIC_ISP10160: 997 case PCI_PRODUCT_QLOGIC_ISP12160: 998 did = 0x12160; 999 break; 1000 case PCI_PRODUCT_QLOGIC_ISP6312: 1001 case PCI_PRODUCT_QLOGIC_ISP2312: 1002 did = 0x2300; 1003 break; 1004 case PCI_PRODUCT_QLOGIC_ISP6322: 1005 did = 0x2322; 1006 break; 1007 case PCI_PRODUCT_QLOGIC_ISP2422: 1008 did = 0x2400; 1009 break; 1010 default: 1011 break; 1012 } 1013 1014 isp->isp_osinfo.fw = NULL; 1015 if (isp->isp_role & ISP_ROLE_TARGET) { 1016 snprintf(fwname, sizeof (fwname), "isp_%04x_it", did); 1017 isp->isp_osinfo.fw = firmware_get(fwname); 1018 } 1019 if (isp->isp_osinfo.fw == NULL) { 1020 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 1021 isp->isp_osinfo.fw = firmware_get(fwname); 1022 } 1023 if (isp->isp_osinfo.fw != NULL) { 1024 union { 1025 const void *fred; 1026 uint16_t *bob; 1027 } u; 1028 u.fred = isp->isp_osinfo.fw->data; 1029 isp->isp_mdvec->dv_ispfw = u.bob; 1030 } 1031 } 1032 #else 1033 if (isp_get_firmware_p) { 1034 int device = (int) pci_get_device(dev); 1035 #ifdef ISP_TARGET_MODE 1036 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 1037 #else 1038 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 1039 #endif 1040 } 1041 #endif 1042 1043 /* 1044 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 1045 * are set. 1046 */ 1047 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 1048 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 1049 1050 if (IS_2300(isp)) { /* per QLogic errata */ 1051 cmd &= ~PCIM_CMD_INVEN; 1052 } 1053 1054 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 1055 cmd &= ~PCIM_CMD_INTX_DISABLE; 1056 } 1057 1058 #ifdef WE_KNEW_WHAT_WE_WERE_DOING 1059 if (IS_24XX(isp)) { 1060 int reg; 1061 1062 cmd &= ~PCIM_CMD_INTX_DISABLE; 1063 1064 /* 1065 * Is this a PCI-X card? If so, set max read byte count. 1066 */ 1067 if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) { 1068 uint16_t pxcmd; 1069 reg += 2; 1070 1071 pxcmd = pci_read_config(dev, reg, 2); 1072 pxcmd &= ~0xc; 1073 pxcmd |= 0x8; 1074 pci_write_config(dev, reg, 2, pxcmd); 1075 } 1076 1077 /* 1078 * Is this a PCI Express card? If so, set max read byte count. 1079 */ 1080 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 1081 uint16_t pectl; 1082 1083 reg += 0x8; 1084 pectl = pci_read_config(dev, reg, 2); 1085 pectl &= ~0x7000; 1086 pectl |= 0x4000; 1087 pci_write_config(dev, reg, 2, pectl); 1088 } 1089 } 1090 #else 1091 if (IS_24XX(isp)) { 1092 cmd &= ~PCIM_CMD_INTX_DISABLE; 1093 } 1094 #endif 1095 1096 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 1097 1098 /* 1099 * Make sure the Cache Line Size register is set sensibly. 1100 */ 1101 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 1102 if (data != linesz) { 1103 data = PCI_DFLT_LNSZ; 1104 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 1105 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 1106 } 1107 1108 /* 1109 * Make sure the Latency Timer is sane. 1110 */ 1111 data = pci_read_config(dev, PCIR_LATTIMER, 1); 1112 if (data < PCI_DFLT_LTNCY) { 1113 data = PCI_DFLT_LTNCY; 1114 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 1115 pci_write_config(dev, PCIR_LATTIMER, data, 1); 1116 } 1117 1118 /* 1119 * Make sure we've disabled the ROM. 1120 */ 1121 data = pci_read_config(dev, PCIR_ROMADDR, 4); 1122 data &= ~1; 1123 pci_write_config(dev, PCIR_ROMADDR, data, 4); 1124 1125 iqd = 0; 1126 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, 1127 RF_ACTIVE | RF_SHAREABLE); 1128 if (irq == NULL) { 1129 device_printf(dev, "could not allocate interrupt\n"); 1130 goto bad; 1131 } 1132 1133 #if __FreeBSD_version >= 500000 1134 /* Make sure the lock is set up. */ 1135 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 1136 locksetup++; 1137 #endif 1138 1139 if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) { 1140 device_printf(dev, "could not setup interrupt\n"); 1141 goto bad; 1142 } 1143 1144 /* 1145 * Last minute checks... 1146 */ 1147 if (IS_23XX(isp) || IS_24XX(isp)) { 1148 isp->isp_port = pci_get_function(dev); 1149 } 1150 1151 if (IS_23XX(isp)) { 1152 /* 1153 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 1154 */ 1155 isp->isp_touched = 1; 1156 } 1157 1158 /* 1159 * Make sure we're in reset state. 1160 */ 1161 ISP_LOCK(isp); 1162 isp_reset(isp); 1163 if (isp->isp_state != ISP_RESETSTATE) { 1164 ISP_UNLOCK(isp); 1165 goto bad; 1166 } 1167 isp_init(isp); 1168 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 1169 isp_uninit(isp); 1170 ISP_UNLOCK(isp); 1171 goto bad; 1172 } 1173 isp_attach(isp); 1174 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 1175 isp_uninit(isp); 1176 ISP_UNLOCK(isp); 1177 goto bad; 1178 } 1179 /* 1180 * XXXX: Here is where we might unload the f/w module 1181 * XXXX: (or decrease the reference count to it). 1182 */ 1183 ISP_UNLOCK(isp); 1184 1185 return (0); 1186 1187 bad: 1188 1189 if (pcs && pcs->ih) { 1190 (void) bus_teardown_intr(dev, irq, pcs->ih); 1191 } 1192 1193 #if __FreeBSD_version >= 500000 1194 if (locksetup && isp) { 1195 mtx_destroy(&isp->isp_osinfo.lock); 1196 } 1197 #endif 1198 1199 if (irq) { 1200 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 1201 } 1202 1203 1204 if (regs) { 1205 (void) bus_release_resource(dev, rtp, rgd, regs); 1206 } 1207 1208 if (pcs) { 1209 if (pcs->pci_isp.isp_param) { 1210 #ifdef ISP_FW_CRASH_DUMP 1211 if (IS_FC(isp) && FCPARAM(isp)->isp_dump_data) { 1212 free(FCPARAM(isp)->isp_dump_data, M_DEVBUF); 1213 } 1214 #endif 1215 free(pcs->pci_isp.isp_param, M_DEVBUF); 1216 } 1217 } 1218 1219 /* 1220 * XXXX: Here is where we might unload the f/w module 1221 * XXXX: (or decrease the reference count to it). 1222 */ 1223 return (ENXIO); 1224 } 1225 1226 static int 1227 isp_pci_detach(device_t dev) 1228 { 1229 struct isp_pcisoftc *pcs; 1230 ispsoftc_t *isp; 1231 1232 pcs = device_get_softc(dev); 1233 if (pcs == NULL) { 1234 return (ENXIO); 1235 } 1236 isp = (ispsoftc_t *) pcs; 1237 ISP_DISABLE_INTS(isp); 1238 return (0); 1239 } 1240 1241 static void 1242 isp_pci_intr(void *arg) 1243 { 1244 ispsoftc_t *isp = arg; 1245 uint32_t isr; 1246 uint16_t sema, mbox; 1247 1248 ISP_LOCK(isp); 1249 isp->isp_intcnt++; 1250 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 1251 isp->isp_intbogus++; 1252 } else { 1253 isp_intr(isp, isr, sema, mbox); 1254 } 1255 ISP_UNLOCK(isp); 1256 } 1257 1258 1259 #define IspVirt2Off(a, x) \ 1260 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1261 _BLK_REG_SHFT] + ((x) & 0xfff)) 1262 1263 #define BXR2(pcs, off) \ 1264 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 1265 #define BXW2(pcs, off, v) \ 1266 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 1267 #define BXR4(pcs, off) \ 1268 bus_space_read_4(pcs->pci_st, pcs->pci_sh, off) 1269 #define BXW4(pcs, off, v) \ 1270 bus_space_write_4(pcs->pci_st, pcs->pci_sh, off, v) 1271 1272 1273 static __inline int 1274 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp) 1275 { 1276 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1277 uint32_t val0, val1; 1278 int i = 0; 1279 1280 do { 1281 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 1282 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 1283 } while (val0 != val1 && ++i < 1000); 1284 if (val0 != val1) { 1285 return (1); 1286 } 1287 *rp = val0; 1288 return (0); 1289 } 1290 1291 static int 1292 isp_pci_rd_isr(ispsoftc_t *isp, uint32_t *isrp, 1293 uint16_t *semap, uint16_t *mbp) 1294 { 1295 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1296 uint16_t isr, sema; 1297 1298 if (IS_2100(isp)) { 1299 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 1300 return (0); 1301 } 1302 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 1303 return (0); 1304 } 1305 } else { 1306 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 1307 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 1308 } 1309 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1310 isr &= INT_PENDING_MASK(isp); 1311 sema &= BIU_SEMA_LOCK; 1312 if (isr == 0 && sema == 0) { 1313 return (0); 1314 } 1315 *isrp = isr; 1316 if ((*semap = sema) != 0) { 1317 if (IS_2100(isp)) { 1318 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 1319 return (0); 1320 } 1321 } else { 1322 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 1323 } 1324 } 1325 return (1); 1326 } 1327 1328 static int 1329 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint32_t *isrp, 1330 uint16_t *semap, uint16_t *mbox0p) 1331 { 1332 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1333 uint32_t hccr; 1334 uint32_t r2hisr; 1335 1336 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 1337 *isrp = 0; 1338 return (0); 1339 } 1340 r2hisr = BXR4(pcs, IspVirt2Off(pcs, BIU_R2HSTSLO)); 1341 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1342 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1343 *isrp = 0; 1344 return (0); 1345 } 1346 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 1347 case ISPR2HST_ROM_MBX_OK: 1348 case ISPR2HST_ROM_MBX_FAIL: 1349 case ISPR2HST_MBX_OK: 1350 case ISPR2HST_MBX_FAIL: 1351 case ISPR2HST_ASYNC_EVENT: 1352 *isrp = r2hisr & 0xffff; 1353 *mbox0p = (r2hisr >> 16); 1354 *semap = 1; 1355 return (1); 1356 case ISPR2HST_RIO_16: 1357 *isrp = r2hisr & 0xffff; 1358 *mbox0p = ASYNC_RIO1; 1359 *semap = 1; 1360 return (1); 1361 case ISPR2HST_FPOST: 1362 *isrp = r2hisr & 0xffff; 1363 *mbox0p = ASYNC_CMD_CMPLT; 1364 *semap = 1; 1365 return (1); 1366 case ISPR2HST_FPOST_CTIO: 1367 *isrp = r2hisr & 0xffff; 1368 *mbox0p = ASYNC_CTIO_DONE; 1369 *semap = 1; 1370 return (1); 1371 case ISPR2HST_RSPQ_UPDATE: 1372 *isrp = r2hisr & 0xffff; 1373 *mbox0p = 0; 1374 *semap = 0; 1375 return (1); 1376 default: 1377 hccr = ISP_READ(isp, HCCR); 1378 if (hccr & HCCR_PAUSE) { 1379 ISP_WRITE(isp, HCCR, HCCR_RESET); 1380 isp_prt(isp, ISP_LOGERR, 1381 "RISC paused at interrupt (%x->%x)", hccr, 1382 ISP_READ(isp, HCCR)); 1383 ISP_WRITE(isp, BIU_ICR, 0); 1384 } else { 1385 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", 1386 r2hisr); 1387 } 1388 return (0); 1389 } 1390 } 1391 1392 static int 1393 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint32_t *isrp, 1394 uint16_t *semap, uint16_t *mbox0p) 1395 { 1396 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1397 uint32_t r2hisr; 1398 1399 r2hisr = BXR4(pcs, IspVirt2Off(pcs, BIU2400_R2HSTSLO)); 1400 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1401 if ((r2hisr & BIU2400_R2HST_INTR) == 0) { 1402 *isrp = 0; 1403 return (0); 1404 } 1405 switch (r2hisr & BIU2400_R2HST_ISTAT_MASK) { 1406 case ISP2400R2HST_ROM_MBX_OK: 1407 case ISP2400R2HST_ROM_MBX_FAIL: 1408 case ISP2400R2HST_MBX_OK: 1409 case ISP2400R2HST_MBX_FAIL: 1410 case ISP2400R2HST_ASYNC_EVENT: 1411 *isrp = r2hisr & 0xffff; 1412 *mbox0p = (r2hisr >> 16); 1413 *semap = 1; 1414 return (1); 1415 case ISP2400R2HST_RSPQ_UPDATE: 1416 case ISP2400R2HST_ATIO_RSPQ_UPDATE: 1417 case ISP2400R2HST_ATIO_RQST_UPDATE: 1418 *isrp = r2hisr & 0xffff; 1419 *mbox0p = 0; 1420 *semap = 0; 1421 return (1); 1422 default: 1423 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 1424 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1425 return (0); 1426 } 1427 } 1428 1429 static uint32_t 1430 isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1431 { 1432 uint32_t rv; 1433 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1434 int oldconf = 0; 1435 1436 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1437 /* 1438 * We will assume that someone has paused the RISC processor. 1439 */ 1440 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1441 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1442 oldconf | BIU_PCI_CONF1_SXP); 1443 } 1444 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1445 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1446 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 1447 } 1448 return (rv); 1449 } 1450 1451 static void 1452 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) 1453 { 1454 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1455 int oldconf = 0; 1456 volatile int junk; 1457 1458 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1459 /* 1460 * We will assume that someone has paused the RISC processor. 1461 */ 1462 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1463 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1464 oldconf | BIU_PCI_CONF1_SXP); 1465 if (IS_2100(isp)) { 1466 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1467 } 1468 } 1469 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1470 if (IS_2100(isp)) { 1471 junk = BXR2(pcs, IspVirt2Off(isp, regoff)); 1472 } 1473 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1474 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 1475 if (IS_2100(isp)) { 1476 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1477 } 1478 } 1479 } 1480 1481 static uint32_t 1482 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1483 { 1484 uint32_t rv, oc = 0; 1485 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1486 1487 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1488 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1489 uint32_t tc; 1490 /* 1491 * We will assume that someone has paused the RISC processor. 1492 */ 1493 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1494 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1495 if (regoff & SXP_BANK1_SELECT) 1496 tc |= BIU_PCI1080_CONF1_SXP1; 1497 else 1498 tc |= BIU_PCI1080_CONF1_SXP0; 1499 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1500 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1501 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1502 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1503 oc | BIU_PCI1080_CONF1_DMA); 1504 } 1505 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1506 if (oc) { 1507 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1508 } 1509 return (rv); 1510 } 1511 1512 static void 1513 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) 1514 { 1515 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1516 int oc = 0; 1517 volatile int junk; 1518 1519 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1520 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1521 uint32_t tc; 1522 /* 1523 * We will assume that someone has paused the RISC processor. 1524 */ 1525 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1526 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1527 if (regoff & SXP_BANK1_SELECT) 1528 tc |= BIU_PCI1080_CONF1_SXP1; 1529 else 1530 tc |= BIU_PCI1080_CONF1_SXP0; 1531 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1532 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1533 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1534 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1535 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1536 oc | BIU_PCI1080_CONF1_DMA); 1537 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1538 } 1539 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1540 junk = BXR2(pcs, IspVirt2Off(isp, regoff)); 1541 if (oc) { 1542 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1543 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1544 } 1545 } 1546 1547 static uint32_t 1548 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 1549 { 1550 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1551 uint32_t rv; 1552 int block = regoff & _BLK_REG_MASK; 1553 1554 switch (block) { 1555 case BIU_BLOCK: 1556 break; 1557 case MBOX_BLOCK: 1558 return (BXR2(pcs, IspVirt2Off(pcs, regoff))); 1559 case SXP_BLOCK: 1560 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff); 1561 return (0xffffffff); 1562 case RISC_BLOCK: 1563 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff); 1564 return (0xffffffff); 1565 case DMA_BLOCK: 1566 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff); 1567 return (0xffffffff); 1568 default: 1569 isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff); 1570 return (0xffffffff); 1571 } 1572 1573 1574 switch (regoff) { 1575 case BIU2400_FLASH_ADDR: 1576 case BIU2400_FLASH_DATA: 1577 case BIU2400_ICR: 1578 case BIU2400_ISR: 1579 case BIU2400_CSR: 1580 case BIU2400_REQINP: 1581 case BIU2400_REQOUTP: 1582 case BIU2400_RSPINP: 1583 case BIU2400_RSPOUTP: 1584 case BIU2400_PRI_RQINP: 1585 case BIU2400_PRI_RSPINP: 1586 case BIU2400_ATIO_RSPINP: 1587 case BIU2400_ATIO_REQINP: 1588 case BIU2400_HCCR: 1589 case BIU2400_GPIOD: 1590 case BIU2400_GPIOE: 1591 case BIU2400_HSEMA: 1592 rv = BXR4(pcs, IspVirt2Off(pcs, regoff)); 1593 break; 1594 case BIU2400_R2HSTSLO: 1595 rv = BXR4(pcs, IspVirt2Off(pcs, regoff)); 1596 break; 1597 case BIU2400_R2HSTSHI: 1598 rv = BXR4(pcs, IspVirt2Off(pcs, regoff)) >> 16; 1599 break; 1600 default: 1601 isp_prt(isp, ISP_LOGERR, 1602 "isp_pci_rd_reg_2400: unknown offset %x", regoff); 1603 rv = 0xffffffff; 1604 break; 1605 } 1606 return (rv); 1607 } 1608 1609 static void 1610 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 1611 { 1612 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1613 int block = regoff & _BLK_REG_MASK; 1614 volatile int junk; 1615 1616 switch (block) { 1617 case BIU_BLOCK: 1618 break; 1619 case MBOX_BLOCK: 1620 BXW2(pcs, IspVirt2Off(pcs, regoff), val); 1621 junk = BXR2(pcs, IspVirt2Off(pcs, regoff)); 1622 return; 1623 case SXP_BLOCK: 1624 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff); 1625 return; 1626 case RISC_BLOCK: 1627 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff); 1628 return; 1629 case DMA_BLOCK: 1630 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff); 1631 return; 1632 default: 1633 isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x", 1634 regoff); 1635 break; 1636 } 1637 1638 switch (regoff) { 1639 case BIU2400_FLASH_ADDR: 1640 case BIU2400_FLASH_DATA: 1641 case BIU2400_ICR: 1642 case BIU2400_ISR: 1643 case BIU2400_CSR: 1644 case BIU2400_REQINP: 1645 case BIU2400_REQOUTP: 1646 case BIU2400_RSPINP: 1647 case BIU2400_RSPOUTP: 1648 case BIU2400_PRI_RQINP: 1649 case BIU2400_PRI_RSPINP: 1650 case BIU2400_ATIO_RSPINP: 1651 case BIU2400_ATIO_REQINP: 1652 case BIU2400_HCCR: 1653 case BIU2400_GPIOD: 1654 case BIU2400_GPIOE: 1655 case BIU2400_HSEMA: 1656 BXW4(pcs, IspVirt2Off(pcs, regoff), val); 1657 junk = BXR4(pcs, IspVirt2Off(pcs, regoff)); 1658 break; 1659 default: 1660 isp_prt(isp, ISP_LOGERR, 1661 "isp_pci_wr_reg_2400: bad offset 0x%x", regoff); 1662 break; 1663 } 1664 } 1665 1666 1667 struct imush { 1668 ispsoftc_t *isp; 1669 int error; 1670 }; 1671 1672 static void imc(void *, bus_dma_segment_t *, int, int); 1673 1674 static void 1675 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1676 { 1677 struct imush *imushp = (struct imush *) arg; 1678 if (error) { 1679 imushp->error = error; 1680 } else { 1681 ispsoftc_t *isp =imushp->isp; 1682 bus_addr_t addr = segs->ds_addr; 1683 1684 isp->isp_rquest_dma = addr; 1685 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1686 isp->isp_result_dma = addr; 1687 if (IS_FC(isp)) { 1688 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1689 FCPARAM(isp)->isp_scdma = addr; 1690 } 1691 } 1692 } 1693 1694 static int 1695 isp_pci_mbxdma(ispsoftc_t *isp) 1696 { 1697 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1698 caddr_t base; 1699 uint32_t len; 1700 int i, error, ns; 1701 bus_size_t slim; /* segment size */ 1702 bus_addr_t llim; /* low limit of unavailable dma */ 1703 bus_addr_t hlim; /* high limit of unavailable dma */ 1704 struct imush im; 1705 1706 /* 1707 * Already been here? If so, leave... 1708 */ 1709 if (isp->isp_rquest) { 1710 return (0); 1711 } 1712 1713 if (isp->isp_maxcmds == 0) { 1714 isp_prt(isp, ISP_LOGERR, "maxcmds not set"); 1715 return (1); 1716 } 1717 1718 hlim = BUS_SPACE_MAXADDR; 1719 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1720 slim = (bus_size_t) (1ULL << 32); 1721 llim = BUS_SPACE_MAXADDR; 1722 } else { 1723 llim = BUS_SPACE_MAXADDR_32BIT; 1724 slim = (1 << 24); 1725 } 1726 1727 /* 1728 * XXX: We don't really support 64 bit target mode for parallel scsi yet 1729 */ 1730 #ifdef ISP_TARGET_MODE 1731 if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) { 1732 isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet"); 1733 return (1); 1734 } 1735 #endif 1736 1737 ISP_UNLOCK(isp); 1738 if (isp_dma_tag_create(BUS_DMA_ROOTARG(pcs->pci_dev), 1, slim, llim, 1739 hlim, NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, 1740 &pcs->dmat)) { 1741 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1742 ISP_LOCK(isp); 1743 return (1); 1744 } 1745 1746 1747 len = sizeof (XS_T **) * isp->isp_maxcmds; 1748 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1749 if (isp->isp_xflist == NULL) { 1750 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1751 ISP_LOCK(isp); 1752 return (1); 1753 } 1754 #ifdef ISP_TARGET_MODE 1755 len = sizeof (void **) * isp->isp_maxcmds; 1756 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1757 if (isp->isp_tgtlist == NULL) { 1758 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1759 ISP_LOCK(isp); 1760 return (1); 1761 } 1762 #endif 1763 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1764 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1765 if (pcs->dmaps == NULL) { 1766 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1767 free(isp->isp_xflist, M_DEVBUF); 1768 #ifdef ISP_TARGET_MODE 1769 free(isp->isp_tgtlist, M_DEVBUF); 1770 #endif 1771 ISP_LOCK(isp); 1772 return (1); 1773 } 1774 1775 /* 1776 * Allocate and map the request, result queues, plus FC scratch area. 1777 */ 1778 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1779 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1780 if (IS_FC(isp)) { 1781 len += ISP2100_SCRLEN; 1782 } 1783 1784 ns = (len / PAGE_SIZE) + 1; 1785 /* 1786 * Create a tag for the control spaces- force it to within 32 bits. 1787 */ 1788 if (isp_dma_tag_create(pcs->dmat, QENTRY_LEN, slim, 1789 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1790 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) { 1791 isp_prt(isp, ISP_LOGERR, 1792 "cannot create a dma tag for control spaces"); 1793 free(pcs->dmaps, M_DEVBUF); 1794 free(isp->isp_xflist, M_DEVBUF); 1795 #ifdef ISP_TARGET_MODE 1796 free(isp->isp_tgtlist, M_DEVBUF); 1797 #endif 1798 ISP_LOCK(isp); 1799 return (1); 1800 } 1801 1802 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1803 &isp->isp_cdmap) != 0) { 1804 isp_prt(isp, ISP_LOGERR, 1805 "cannot allocate %d bytes of CCB memory", len); 1806 bus_dma_tag_destroy(isp->isp_cdmat); 1807 free(isp->isp_xflist, M_DEVBUF); 1808 #ifdef ISP_TARGET_MODE 1809 free(isp->isp_tgtlist, M_DEVBUF); 1810 #endif 1811 free(pcs->dmaps, M_DEVBUF); 1812 ISP_LOCK(isp); 1813 return (1); 1814 } 1815 1816 for (i = 0; i < isp->isp_maxcmds; i++) { 1817 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1818 if (error) { 1819 isp_prt(isp, ISP_LOGERR, 1820 "error %d creating per-cmd DMA maps", error); 1821 while (--i >= 0) { 1822 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1823 } 1824 goto bad; 1825 } 1826 } 1827 1828 im.isp = isp; 1829 im.error = 0; 1830 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1831 if (im.error) { 1832 isp_prt(isp, ISP_LOGERR, 1833 "error %d loading dma map for control areas", im.error); 1834 goto bad; 1835 } 1836 1837 isp->isp_rquest = base; 1838 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1839 isp->isp_result = base; 1840 if (IS_FC(isp)) { 1841 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1842 FCPARAM(isp)->isp_scratch = base; 1843 } 1844 ISP_LOCK(isp); 1845 return (0); 1846 1847 bad: 1848 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1849 bus_dma_tag_destroy(isp->isp_cdmat); 1850 free(isp->isp_xflist, M_DEVBUF); 1851 #ifdef ISP_TARGET_MODE 1852 free(isp->isp_tgtlist, M_DEVBUF); 1853 #endif 1854 free(pcs->dmaps, M_DEVBUF); 1855 ISP_LOCK(isp); 1856 isp->isp_rquest = NULL; 1857 return (1); 1858 } 1859 1860 typedef struct { 1861 ispsoftc_t *isp; 1862 void *cmd_token; 1863 void *rq; 1864 uint32_t *nxtip; 1865 uint32_t optr; 1866 int error; 1867 } mush_t; 1868 1869 #define MUSHERR_NOQENTRIES -2 1870 1871 #ifdef ISP_TARGET_MODE 1872 /* 1873 * We need to handle DMA for target mode differently from initiator mode. 1874 * 1875 * DMA mapping and construction and submission of CTIO Request Entries 1876 * and rendevous for completion are very tightly coupled because we start 1877 * out by knowing (per platform) how much data we have to move, but we 1878 * don't know, up front, how many DMA mapping segments will have to be used 1879 * cover that data, so we don't know how many CTIO Request Entries we 1880 * will end up using. Further, for performance reasons we may want to 1881 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1882 * 1883 * The standard vector still goes through isp_pci_dmasetup, but the callback 1884 * for the DMA mapping routines comes here instead with the whole transfer 1885 * mapped and a pointer to a partially filled in already allocated request 1886 * queue entry. We finish the job. 1887 */ 1888 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1889 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1890 1891 #define STATUS_WITH_DATA 1 1892 1893 static void 1894 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1895 { 1896 mush_t *mp; 1897 struct ccb_scsiio *csio; 1898 ispsoftc_t *isp; 1899 struct isp_pcisoftc *pcs; 1900 bus_dmamap_t *dp; 1901 ct_entry_t *cto, *qe; 1902 uint8_t scsi_status; 1903 uint32_t curi, nxti, handle; 1904 uint32_t sflags; 1905 int32_t resid; 1906 int nth_ctio, nctios, send_status; 1907 1908 mp = (mush_t *) arg; 1909 if (error) { 1910 mp->error = error; 1911 return; 1912 } 1913 1914 isp = mp->isp; 1915 csio = mp->cmd_token; 1916 cto = mp->rq; 1917 curi = isp->isp_reqidx; 1918 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1919 1920 cto->ct_xfrlen = 0; 1921 cto->ct_seg_count = 0; 1922 cto->ct_header.rqs_entry_count = 1; 1923 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1924 1925 if (nseg == 0) { 1926 cto->ct_header.rqs_seqno = 1; 1927 isp_prt(isp, ISP_LOGTDEBUG1, 1928 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1929 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1930 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1931 cto->ct_scsi_status, cto->ct_resid); 1932 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1933 isp_put_ctio(isp, cto, qe); 1934 return; 1935 } 1936 1937 nctios = nseg / ISP_RQDSEG; 1938 if (nseg % ISP_RQDSEG) { 1939 nctios++; 1940 } 1941 1942 /* 1943 * Save syshandle, and potentially any SCSI status, which we'll 1944 * reinsert on the last CTIO we're going to send. 1945 */ 1946 1947 handle = cto->ct_syshandle; 1948 cto->ct_syshandle = 0; 1949 cto->ct_header.rqs_seqno = 0; 1950 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1951 1952 if (send_status) { 1953 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1954 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1955 /* 1956 * Preserve residual. 1957 */ 1958 resid = cto->ct_resid; 1959 1960 /* 1961 * Save actual SCSI status. 1962 */ 1963 scsi_status = cto->ct_scsi_status; 1964 1965 #ifndef STATUS_WITH_DATA 1966 sflags |= CT_NO_DATA; 1967 /* 1968 * We can't do a status at the same time as a data CTIO, so 1969 * we need to synthesize an extra CTIO at this level. 1970 */ 1971 nctios++; 1972 #endif 1973 } else { 1974 sflags = scsi_status = resid = 0; 1975 } 1976 1977 cto->ct_resid = 0; 1978 cto->ct_scsi_status = 0; 1979 1980 pcs = (struct isp_pcisoftc *)isp; 1981 dp = &pcs->dmaps[isp_handle_index(handle)]; 1982 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1983 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1984 } else { 1985 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1986 } 1987 1988 nxti = *mp->nxtip; 1989 1990 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1991 int seglim; 1992 1993 seglim = nseg; 1994 if (seglim) { 1995 int seg; 1996 1997 if (seglim > ISP_RQDSEG) 1998 seglim = ISP_RQDSEG; 1999 2000 for (seg = 0; seg < seglim; seg++, nseg--) { 2001 /* 2002 * Unlike normal initiator commands, we don't 2003 * do any swizzling here. 2004 */ 2005 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 2006 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 2007 cto->ct_xfrlen += dm_segs->ds_len; 2008 dm_segs++; 2009 } 2010 cto->ct_seg_count = seg; 2011 } else { 2012 /* 2013 * This case should only happen when we're sending an 2014 * extra CTIO with final status. 2015 */ 2016 if (send_status == 0) { 2017 isp_prt(isp, ISP_LOGWARN, 2018 "tdma_mk ran out of segments"); 2019 mp->error = EINVAL; 2020 return; 2021 } 2022 } 2023 2024 /* 2025 * At this point, the fields ct_lun, ct_iid, ct_tagval, 2026 * ct_tagtype, and ct_timeout have been carried over 2027 * unchanged from what our caller had set. 2028 * 2029 * The dataseg fields and the seg_count fields we just got 2030 * through setting. The data direction we've preserved all 2031 * along and only clear it if we're now sending status. 2032 */ 2033 2034 if (nth_ctio == nctios - 1) { 2035 /* 2036 * We're the last in a sequence of CTIOs, so mark 2037 * this CTIO and save the handle to the CCB such that 2038 * when this CTIO completes we can free dma resources 2039 * and do whatever else we need to do to finish the 2040 * rest of the command. We *don't* give this to the 2041 * firmware to work on- the caller will do that. 2042 */ 2043 2044 cto->ct_syshandle = handle; 2045 cto->ct_header.rqs_seqno = 1; 2046 2047 if (send_status) { 2048 cto->ct_scsi_status = scsi_status; 2049 cto->ct_flags |= sflags; 2050 cto->ct_resid = resid; 2051 } 2052 if (send_status) { 2053 isp_prt(isp, ISP_LOGTDEBUG1, 2054 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 2055 "scsi status %x resid %d", 2056 cto->ct_fwhandle, csio->ccb_h.target_lun, 2057 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 2058 cto->ct_scsi_status, cto->ct_resid); 2059 } else { 2060 isp_prt(isp, ISP_LOGTDEBUG1, 2061 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 2062 cto->ct_fwhandle, csio->ccb_h.target_lun, 2063 cto->ct_iid, cto->ct_tag_val, 2064 cto->ct_flags); 2065 } 2066 isp_put_ctio(isp, cto, qe); 2067 ISP_TDQE(isp, "last tdma_mk", curi, cto); 2068 if (nctios > 1) { 2069 MEMORYBARRIER(isp, SYNC_REQUEST, 2070 curi, QENTRY_LEN); 2071 } 2072 } else { 2073 ct_entry_t *oqe = qe; 2074 2075 /* 2076 * Make sure syshandle fields are clean 2077 */ 2078 cto->ct_syshandle = 0; 2079 cto->ct_header.rqs_seqno = 0; 2080 2081 isp_prt(isp, ISP_LOGTDEBUG1, 2082 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 2083 cto->ct_fwhandle, csio->ccb_h.target_lun, 2084 cto->ct_iid, cto->ct_flags); 2085 2086 /* 2087 * Get a new CTIO 2088 */ 2089 qe = (ct_entry_t *) 2090 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2091 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 2092 if (nxti == mp->optr) { 2093 isp_prt(isp, ISP_LOGTDEBUG0, 2094 "Queue Overflow in tdma_mk"); 2095 mp->error = MUSHERR_NOQENTRIES; 2096 return; 2097 } 2098 2099 /* 2100 * Now that we're done with the old CTIO, 2101 * flush it out to the request queue. 2102 */ 2103 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 2104 isp_put_ctio(isp, cto, oqe); 2105 if (nth_ctio != 0) { 2106 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 2107 QENTRY_LEN); 2108 } 2109 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 2110 2111 /* 2112 * Reset some fields in the CTIO so we can reuse 2113 * for the next one we'll flush to the request 2114 * queue. 2115 */ 2116 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 2117 cto->ct_header.rqs_entry_count = 1; 2118 cto->ct_header.rqs_flags = 0; 2119 cto->ct_status = 0; 2120 cto->ct_scsi_status = 0; 2121 cto->ct_xfrlen = 0; 2122 cto->ct_resid = 0; 2123 cto->ct_seg_count = 0; 2124 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 2125 } 2126 } 2127 *mp->nxtip = nxti; 2128 } 2129 2130 /* 2131 * We don't have to do multiple CTIOs here. Instead, we can just do 2132 * continuation segments as needed. This greatly simplifies the code 2133 * improves performance. 2134 */ 2135 2136 static void 2137 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2138 { 2139 mush_t *mp; 2140 struct ccb_scsiio *csio; 2141 ispsoftc_t *isp; 2142 ct2_entry_t *cto, *qe; 2143 uint32_t curi, nxti; 2144 ispds_t *ds; 2145 ispds64_t *ds64; 2146 int segcnt, seglim; 2147 2148 mp = (mush_t *) arg; 2149 if (error) { 2150 mp->error = error; 2151 return; 2152 } 2153 2154 isp = mp->isp; 2155 csio = mp->cmd_token; 2156 cto = mp->rq; 2157 2158 curi = isp->isp_reqidx; 2159 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 2160 2161 if (nseg == 0) { 2162 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 2163 isp_prt(isp, ISP_LOGWARN, 2164 "dma2_tgt_fc, a status CTIO2 without MODE1 " 2165 "set (0x%x)", cto->ct_flags); 2166 mp->error = EINVAL; 2167 return; 2168 } 2169 /* 2170 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 2171 * flags to NO DATA and clear relative offset flags. 2172 * We preserve the ct_resid and the response area. 2173 */ 2174 cto->ct_header.rqs_seqno = 1; 2175 cto->ct_seg_count = 0; 2176 cto->ct_reloff = 0; 2177 isp_prt(isp, ISP_LOGTDEBUG1, 2178 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 2179 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 2180 cto->ct_iid, cto->ct_flags, cto->ct_status, 2181 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 2182 if (FCPARAM(isp)->isp_2klogin) { 2183 isp_put_ctio2e(isp, 2184 (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); 2185 } else { 2186 isp_put_ctio2(isp, cto, qe); 2187 } 2188 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 2189 return; 2190 } 2191 2192 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 2193 isp_prt(isp, ISP_LOGERR, 2194 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 2195 "(0x%x)", cto->ct_flags); 2196 mp->error = EINVAL; 2197 return; 2198 } 2199 2200 2201 nxti = *mp->nxtip; 2202 2203 /* 2204 * Check to see if we need to DAC addressing or not. 2205 * 2206 * Any address that's over the 4GB boundary causes this 2207 * to happen. 2208 */ 2209 segcnt = nseg; 2210 if (sizeof (bus_addr_t) > 4) { 2211 for (segcnt = 0; segcnt < nseg; segcnt++) { 2212 uint64_t addr = dm_segs[segcnt].ds_addr; 2213 if (addr >= 0x100000000LL) { 2214 break; 2215 } 2216 } 2217 } 2218 if (segcnt != nseg) { 2219 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3; 2220 seglim = ISP_RQDSEG_T3; 2221 ds64 = &cto->rsp.m0.u.ct_dataseg64[0]; 2222 ds = NULL; 2223 } else { 2224 seglim = ISP_RQDSEG_T2; 2225 ds64 = NULL; 2226 ds = &cto->rsp.m0.u.ct_dataseg[0]; 2227 } 2228 cto->ct_seg_count = 0; 2229 2230 /* 2231 * Set up the CTIO2 data segments. 2232 */ 2233 for (segcnt = 0; cto->ct_seg_count < seglim && segcnt < nseg; 2234 cto->ct_seg_count++, segcnt++) { 2235 if (ds64) { 2236 ds64->ds_basehi = 2237 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); 2238 ds64->ds_base = dm_segs[segcnt].ds_addr; 2239 ds64->ds_count = dm_segs[segcnt].ds_len; 2240 ds64++; 2241 } else { 2242 ds->ds_base = dm_segs[segcnt].ds_addr; 2243 ds->ds_count = dm_segs[segcnt].ds_len; 2244 ds++; 2245 } 2246 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 2247 #if __FreeBSD_version < 500000 2248 isp_prt(isp, ISP_LOGTDEBUG1, 2249 "isp_send_ctio2: ent0[%d]0x%llx:%llu", 2250 cto->ct_seg_count, (uint64_t)dm_segs[segcnt].ds_addr, 2251 (uint64_t)dm_segs[segcnt].ds_len); 2252 #else 2253 isp_prt(isp, ISP_LOGTDEBUG1, 2254 "isp_send_ctio2: ent0[%d]0x%jx:%ju", 2255 cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr, 2256 (uintmax_t)dm_segs[segcnt].ds_len); 2257 #endif 2258 } 2259 2260 while (segcnt < nseg) { 2261 uint32_t curip; 2262 int seg; 2263 ispcontreq_t local, *crq = &local, *qep; 2264 2265 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2266 curip = nxti; 2267 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 2268 if (nxti == mp->optr) { 2269 ISP_UNLOCK(isp); 2270 isp_prt(isp, ISP_LOGTDEBUG0, 2271 "tdma_mkfc: request queue overflow"); 2272 mp->error = MUSHERR_NOQENTRIES; 2273 return; 2274 } 2275 cto->ct_header.rqs_entry_count++; 2276 MEMZERO((void *)crq, sizeof (*crq)); 2277 crq->req_header.rqs_entry_count = 1; 2278 if (cto->ct_header.rqs_entry_type == RQSTYPE_CTIO3) { 2279 seglim = ISP_CDSEG64; 2280 ds = NULL; 2281 ds64 = &((ispcontreq64_t *)crq)->req_dataseg[0]; 2282 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2283 } else { 2284 seglim = ISP_CDSEG; 2285 ds = &crq->req_dataseg[0]; 2286 ds64 = NULL; 2287 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 2288 } 2289 for (seg = 0; segcnt < nseg && seg < seglim; 2290 segcnt++, seg++) { 2291 if (ds64) { 2292 ds64->ds_basehi = 2293 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); 2294 ds64->ds_base = dm_segs[segcnt].ds_addr; 2295 ds64->ds_count = dm_segs[segcnt].ds_len; 2296 ds64++; 2297 } else { 2298 ds->ds_base = dm_segs[segcnt].ds_addr; 2299 ds->ds_count = dm_segs[segcnt].ds_len; 2300 ds++; 2301 } 2302 #if __FreeBSD_version < 500000 2303 isp_prt(isp, ISP_LOGTDEBUG1, 2304 "isp_send_ctio2: ent%d[%d]%llx:%llu", 2305 cto->ct_header.rqs_entry_count-1, seg, 2306 (uint64_t)dm_segs[segcnt].ds_addr, 2307 (uint64_t)dm_segs[segcnt].ds_len); 2308 #else 2309 isp_prt(isp, ISP_LOGTDEBUG1, 2310 "isp_send_ctio2: ent%d[%d]%jx:%ju", 2311 cto->ct_header.rqs_entry_count-1, seg, 2312 (uintmax_t)dm_segs[segcnt].ds_addr, 2313 (uintmax_t)dm_segs[segcnt].ds_len); 2314 #endif 2315 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 2316 cto->ct_seg_count++; 2317 } 2318 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 2319 isp_put_cont_req(isp, crq, qep); 2320 ISP_TDQE(isp, "cont entry", curi, qep); 2321 } 2322 2323 /* 2324 * No do final twiddling for the CTIO itself. 2325 */ 2326 cto->ct_header.rqs_seqno = 1; 2327 isp_prt(isp, ISP_LOGTDEBUG1, 2328 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 2329 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 2330 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 2331 cto->ct_resid); 2332 if (FCPARAM(isp)->isp_2klogin) { 2333 isp_put_ctio2e(isp, (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); 2334 } else { 2335 isp_put_ctio2(isp, cto, qe); 2336 } 2337 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 2338 *mp->nxtip = nxti; 2339 } 2340 #endif 2341 2342 static void dma_2400(void *, bus_dma_segment_t *, int, int); 2343 static void dma2_a64(void *, bus_dma_segment_t *, int, int); 2344 static void dma2(void *, bus_dma_segment_t *, int, int); 2345 2346 static void 2347 dma_2400(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2348 { 2349 mush_t *mp; 2350 ispsoftc_t *isp; 2351 struct ccb_scsiio *csio; 2352 struct isp_pcisoftc *pcs; 2353 bus_dmamap_t *dp; 2354 bus_dma_segment_t *eseg; 2355 ispreqt7_t *rq; 2356 int seglim, datalen; 2357 uint32_t nxti; 2358 2359 mp = (mush_t *) arg; 2360 if (error) { 2361 mp->error = error; 2362 return; 2363 } 2364 2365 if (nseg < 1) { 2366 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2367 mp->error = EFAULT; 2368 return; 2369 } 2370 2371 csio = mp->cmd_token; 2372 isp = mp->isp; 2373 rq = mp->rq; 2374 pcs = (struct isp_pcisoftc *)mp->isp; 2375 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2376 nxti = *mp->nxtip; 2377 2378 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2379 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2380 } else { 2381 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2382 } 2383 datalen = XS_XFRLEN(csio); 2384 2385 /* 2386 * We're passed an initial partially filled in entry that 2387 * has most fields filled in except for data transfer 2388 * related values. 2389 * 2390 * Our job is to fill in the initial request queue entry and 2391 * then to start allocating and filling in continuation entries 2392 * until we've covered the entire transfer. 2393 */ 2394 2395 rq->req_header.rqs_entry_type = RQSTYPE_T7RQS; 2396 rq->req_dl = datalen; 2397 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2398 rq->req_alen_datadir = 0x2; 2399 } else { 2400 rq->req_alen_datadir = 0x1; 2401 } 2402 2403 eseg = dm_segs + nseg; 2404 2405 rq->req_dataseg.ds_base = DMA_LO32(dm_segs->ds_addr); 2406 rq->req_dataseg.ds_basehi = DMA_HI32(dm_segs->ds_addr); 2407 rq->req_dataseg.ds_count = dm_segs->ds_len; 2408 2409 datalen -= dm_segs->ds_len; 2410 2411 dm_segs++; 2412 rq->req_seg_count++; 2413 2414 while (datalen > 0 && dm_segs != eseg) { 2415 uint32_t onxti; 2416 ispcontreq64_t local, *crq = &local, *cqe; 2417 2418 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2419 onxti = nxti; 2420 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2421 if (nxti == mp->optr) { 2422 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2423 mp->error = MUSHERR_NOQENTRIES; 2424 return; 2425 } 2426 rq->req_header.rqs_entry_count++; 2427 MEMZERO((void *)crq, sizeof (*crq)); 2428 crq->req_header.rqs_entry_count = 1; 2429 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2430 2431 seglim = 0; 2432 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 2433 crq->req_dataseg[seglim].ds_base = 2434 DMA_LO32(dm_segs->ds_addr); 2435 crq->req_dataseg[seglim].ds_basehi = 2436 DMA_HI32(dm_segs->ds_addr); 2437 crq->req_dataseg[seglim].ds_count = 2438 dm_segs->ds_len; 2439 rq->req_seg_count++; 2440 dm_segs++; 2441 seglim++; 2442 datalen -= dm_segs->ds_len; 2443 } 2444 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2445 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2446 } 2447 isp_put_cont64_req(isp, crq, cqe); 2448 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2449 } 2450 *mp->nxtip = nxti; 2451 } 2452 2453 static void 2454 dma2_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2455 { 2456 mush_t *mp; 2457 ispsoftc_t *isp; 2458 struct ccb_scsiio *csio; 2459 struct isp_pcisoftc *pcs; 2460 bus_dmamap_t *dp; 2461 bus_dma_segment_t *eseg; 2462 ispreq64_t *rq; 2463 int seglim, datalen; 2464 uint32_t nxti; 2465 2466 mp = (mush_t *) arg; 2467 if (error) { 2468 mp->error = error; 2469 return; 2470 } 2471 2472 if (nseg < 1) { 2473 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2474 mp->error = EFAULT; 2475 return; 2476 } 2477 csio = mp->cmd_token; 2478 isp = mp->isp; 2479 rq = mp->rq; 2480 pcs = (struct isp_pcisoftc *)mp->isp; 2481 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2482 nxti = *mp->nxtip; 2483 2484 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2485 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2486 } else { 2487 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2488 } 2489 datalen = XS_XFRLEN(csio); 2490 2491 /* 2492 * We're passed an initial partially filled in entry that 2493 * has most fields filled in except for data transfer 2494 * related values. 2495 * 2496 * Our job is to fill in the initial request queue entry and 2497 * then to start allocating and filling in continuation entries 2498 * until we've covered the entire transfer. 2499 */ 2500 2501 if (IS_FC(isp)) { 2502 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 2503 seglim = ISP_RQDSEG_T3; 2504 ((ispreqt3_t *)rq)->req_totalcnt = datalen; 2505 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2506 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN; 2507 } else { 2508 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 2509 } 2510 } else { 2511 rq->req_header.rqs_entry_type = RQSTYPE_A64; 2512 if (csio->cdb_len > 12) { 2513 seglim = 0; 2514 } else { 2515 seglim = ISP_RQDSEG_A64; 2516 } 2517 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2518 rq->req_flags |= REQFLAG_DATA_IN; 2519 } else { 2520 rq->req_flags |= REQFLAG_DATA_OUT; 2521 } 2522 } 2523 2524 eseg = dm_segs + nseg; 2525 2526 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 2527 if (IS_FC(isp)) { 2528 ispreqt3_t *rq3 = (ispreqt3_t *)rq; 2529 rq3->req_dataseg[rq3->req_seg_count].ds_base = 2530 DMA_LO32(dm_segs->ds_addr); 2531 rq3->req_dataseg[rq3->req_seg_count].ds_basehi = 2532 DMA_HI32(dm_segs->ds_addr); 2533 rq3->req_dataseg[rq3->req_seg_count].ds_count = 2534 dm_segs->ds_len; 2535 } else { 2536 rq->req_dataseg[rq->req_seg_count].ds_base = 2537 DMA_LO32(dm_segs->ds_addr); 2538 rq->req_dataseg[rq->req_seg_count].ds_basehi = 2539 DMA_HI32(dm_segs->ds_addr); 2540 rq->req_dataseg[rq->req_seg_count].ds_count = 2541 dm_segs->ds_len; 2542 } 2543 datalen -= dm_segs->ds_len; 2544 rq->req_seg_count++; 2545 dm_segs++; 2546 } 2547 2548 while (datalen > 0 && dm_segs != eseg) { 2549 uint32_t onxti; 2550 ispcontreq64_t local, *crq = &local, *cqe; 2551 2552 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2553 onxti = nxti; 2554 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2555 if (nxti == mp->optr) { 2556 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2557 mp->error = MUSHERR_NOQENTRIES; 2558 return; 2559 } 2560 rq->req_header.rqs_entry_count++; 2561 MEMZERO((void *)crq, sizeof (*crq)); 2562 crq->req_header.rqs_entry_count = 1; 2563 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2564 2565 seglim = 0; 2566 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 2567 crq->req_dataseg[seglim].ds_base = 2568 DMA_LO32(dm_segs->ds_addr); 2569 crq->req_dataseg[seglim].ds_basehi = 2570 DMA_HI32(dm_segs->ds_addr); 2571 crq->req_dataseg[seglim].ds_count = 2572 dm_segs->ds_len; 2573 rq->req_seg_count++; 2574 dm_segs++; 2575 seglim++; 2576 datalen -= dm_segs->ds_len; 2577 } 2578 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2579 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2580 } 2581 isp_put_cont64_req(isp, crq, cqe); 2582 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2583 } 2584 *mp->nxtip = nxti; 2585 } 2586 2587 static void 2588 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2589 { 2590 mush_t *mp; 2591 ispsoftc_t *isp; 2592 struct ccb_scsiio *csio; 2593 struct isp_pcisoftc *pcs; 2594 bus_dmamap_t *dp; 2595 bus_dma_segment_t *eseg; 2596 ispreq_t *rq; 2597 int seglim, datalen; 2598 uint32_t nxti; 2599 2600 mp = (mush_t *) arg; 2601 if (error) { 2602 mp->error = error; 2603 return; 2604 } 2605 2606 if (nseg < 1) { 2607 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2608 mp->error = EFAULT; 2609 return; 2610 } 2611 csio = mp->cmd_token; 2612 isp = mp->isp; 2613 rq = mp->rq; 2614 pcs = (struct isp_pcisoftc *)mp->isp; 2615 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2616 nxti = *mp->nxtip; 2617 2618 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2619 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2620 } else { 2621 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2622 } 2623 2624 datalen = XS_XFRLEN(csio); 2625 2626 /* 2627 * We're passed an initial partially filled in entry that 2628 * has most fields filled in except for data transfer 2629 * related values. 2630 * 2631 * Our job is to fill in the initial request queue entry and 2632 * then to start allocating and filling in continuation entries 2633 * until we've covered the entire transfer. 2634 */ 2635 2636 if (IS_FC(isp)) { 2637 seglim = ISP_RQDSEG_T2; 2638 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 2639 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2640 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 2641 } else { 2642 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 2643 } 2644 } else { 2645 if (csio->cdb_len > 12) { 2646 seglim = 0; 2647 } else { 2648 seglim = ISP_RQDSEG; 2649 } 2650 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2651 rq->req_flags |= REQFLAG_DATA_IN; 2652 } else { 2653 rq->req_flags |= REQFLAG_DATA_OUT; 2654 } 2655 } 2656 2657 eseg = dm_segs + nseg; 2658 2659 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 2660 if (IS_FC(isp)) { 2661 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 2662 rq2->req_dataseg[rq2->req_seg_count].ds_base = 2663 DMA_LO32(dm_segs->ds_addr); 2664 rq2->req_dataseg[rq2->req_seg_count].ds_count = 2665 dm_segs->ds_len; 2666 } else { 2667 rq->req_dataseg[rq->req_seg_count].ds_base = 2668 DMA_LO32(dm_segs->ds_addr); 2669 rq->req_dataseg[rq->req_seg_count].ds_count = 2670 dm_segs->ds_len; 2671 } 2672 datalen -= dm_segs->ds_len; 2673 rq->req_seg_count++; 2674 dm_segs++; 2675 } 2676 2677 while (datalen > 0 && dm_segs != eseg) { 2678 uint32_t onxti; 2679 ispcontreq_t local, *crq = &local, *cqe; 2680 2681 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2682 onxti = nxti; 2683 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2684 if (nxti == mp->optr) { 2685 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2686 mp->error = MUSHERR_NOQENTRIES; 2687 return; 2688 } 2689 rq->req_header.rqs_entry_count++; 2690 MEMZERO((void *)crq, sizeof (*crq)); 2691 crq->req_header.rqs_entry_count = 1; 2692 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 2693 2694 seglim = 0; 2695 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 2696 crq->req_dataseg[seglim].ds_base = 2697 DMA_LO32(dm_segs->ds_addr); 2698 crq->req_dataseg[seglim].ds_count = 2699 dm_segs->ds_len; 2700 rq->req_seg_count++; 2701 dm_segs++; 2702 seglim++; 2703 datalen -= dm_segs->ds_len; 2704 } 2705 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2706 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2707 } 2708 isp_put_cont_req(isp, crq, cqe); 2709 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2710 } 2711 *mp->nxtip = nxti; 2712 } 2713 2714 /* 2715 * We enter with ISP_LOCK held 2716 */ 2717 static int 2718 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq, 2719 uint32_t *nxtip, uint32_t optr) 2720 { 2721 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2722 ispreq_t *qep; 2723 bus_dmamap_t *dp = NULL; 2724 mush_t mush, *mp; 2725 void (*eptr)(void *, bus_dma_segment_t *, int, int); 2726 2727 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 2728 #ifdef ISP_TARGET_MODE 2729 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 2730 if (IS_FC(isp)) { 2731 eptr = tdma_mkfc; 2732 } else { 2733 eptr = tdma_mk; 2734 } 2735 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 2736 (csio->dxfer_len == 0)) { 2737 mp = &mush; 2738 mp->isp = isp; 2739 mp->cmd_token = csio; 2740 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 2741 mp->nxtip = nxtip; 2742 mp->optr = optr; 2743 mp->error = 0; 2744 ISPLOCK_2_CAMLOCK(isp); 2745 (*eptr)(mp, NULL, 0, 0); 2746 CAMLOCK_2_ISPLOCK(isp); 2747 goto mbxsync; 2748 } 2749 } else 2750 #endif 2751 if (IS_24XX(isp)) { 2752 eptr = dma_2400; 2753 } else if (sizeof (bus_addr_t) > 4) { 2754 eptr = dma2_a64; 2755 } else { 2756 eptr = dma2; 2757 } 2758 2759 2760 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 2761 (csio->dxfer_len == 0)) { 2762 rq->req_seg_count = 1; 2763 goto mbxsync; 2764 } 2765 2766 /* 2767 * Do a virtual grapevine step to collect info for 2768 * the callback dma allocation that we have to use... 2769 */ 2770 mp = &mush; 2771 mp->isp = isp; 2772 mp->cmd_token = csio; 2773 mp->rq = rq; 2774 mp->nxtip = nxtip; 2775 mp->optr = optr; 2776 mp->error = 0; 2777 2778 ISPLOCK_2_CAMLOCK(isp); 2779 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 2780 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 2781 int error, s; 2782 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2783 s = splsoftvm(); 2784 error = bus_dmamap_load(pcs->dmat, *dp, 2785 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 2786 if (error == EINPROGRESS) { 2787 bus_dmamap_unload(pcs->dmat, *dp); 2788 mp->error = EINVAL; 2789 isp_prt(isp, ISP_LOGERR, 2790 "deferred dma allocation not supported"); 2791 } else if (error && mp->error == 0) { 2792 #ifdef DIAGNOSTIC 2793 isp_prt(isp, ISP_LOGERR, 2794 "error %d in dma mapping code", error); 2795 #endif 2796 mp->error = error; 2797 } 2798 splx(s); 2799 } else { 2800 /* Pointer to physical buffer */ 2801 struct bus_dma_segment seg; 2802 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; 2803 seg.ds_len = csio->dxfer_len; 2804 (*eptr)(mp, &seg, 1, 0); 2805 } 2806 } else { 2807 struct bus_dma_segment *segs; 2808 2809 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 2810 isp_prt(isp, ISP_LOGERR, 2811 "Physical segment pointers unsupported"); 2812 mp->error = EINVAL; 2813 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2814 isp_prt(isp, ISP_LOGERR, 2815 "Virtual segment addresses unsupported"); 2816 mp->error = EINVAL; 2817 } else { 2818 /* Just use the segments provided */ 2819 segs = (struct bus_dma_segment *) csio->data_ptr; 2820 (*eptr)(mp, segs, csio->sglist_cnt, 0); 2821 } 2822 } 2823 CAMLOCK_2_ISPLOCK(isp); 2824 if (mp->error) { 2825 int retval = CMD_COMPLETE; 2826 if (mp->error == MUSHERR_NOQENTRIES) { 2827 retval = CMD_EAGAIN; 2828 } else if (mp->error == EFBIG) { 2829 XS_SETERR(csio, CAM_REQ_TOO_BIG); 2830 } else if (mp->error == EINVAL) { 2831 XS_SETERR(csio, CAM_REQ_INVALID); 2832 } else { 2833 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 2834 } 2835 return (retval); 2836 } 2837 mbxsync: 2838 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2839 isp_print_bytes(isp, "Request Queue Entry", QENTRY_LEN, rq); 2840 } 2841 switch (rq->req_header.rqs_entry_type) { 2842 case RQSTYPE_REQUEST: 2843 isp_put_request(isp, rq, qep); 2844 break; 2845 case RQSTYPE_CMDONLY: 2846 isp_put_extended_request(isp, (ispextreq_t *)rq, 2847 (ispextreq_t *)qep); 2848 break; 2849 case RQSTYPE_T2RQS: 2850 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 2851 break; 2852 case RQSTYPE_A64: 2853 case RQSTYPE_T3RQS: 2854 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep); 2855 break; 2856 case RQSTYPE_T7RQS: 2857 isp_put_request_t7(isp, (ispreqt7_t *) rq, (ispreqt7_t *) qep); 2858 break; 2859 } 2860 return (CMD_QUEUED); 2861 } 2862 2863 static void 2864 isp_pci_dmateardown(ispsoftc_t *isp, XS_T *xs, uint32_t handle) 2865 { 2866 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2867 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 2868 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2869 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 2870 } else { 2871 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 2872 } 2873 bus_dmamap_unload(pcs->dmat, *dp); 2874 } 2875 2876 2877 static void 2878 isp_pci_reset0(ispsoftc_t *isp) 2879 { 2880 ISP_DISABLE_INTS(isp); 2881 } 2882 2883 static void 2884 isp_pci_reset1(ispsoftc_t *isp) 2885 { 2886 if (!IS_24XX(isp)) { 2887 /* Make sure the BIOS is disabled */ 2888 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 2889 } 2890 /* and enable interrupts */ 2891 ISP_ENABLE_INTS(isp); 2892 } 2893 2894 static void 2895 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 2896 { 2897 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2898 if (msg) 2899 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2900 else 2901 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2902 if (IS_SCSI(isp)) 2903 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2904 else 2905 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2906 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2907 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2908 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2909 2910 2911 if (IS_SCSI(isp)) { 2912 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2913 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2914 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2915 ISP_READ(isp, CDMA_FIFO_STS)); 2916 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2917 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2918 ISP_READ(isp, DDMA_FIFO_STS)); 2919 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2920 ISP_READ(isp, SXP_INTERRUPT), 2921 ISP_READ(isp, SXP_GROSS_ERR), 2922 ISP_READ(isp, SXP_PINS_CTRL)); 2923 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2924 } 2925 printf(" mbox regs: %x %x %x %x %x\n", 2926 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2927 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2928 ISP_READ(isp, OUTMAILBOX4)); 2929 printf(" PCI Status Command/Status=%x\n", 2930 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2931 } 2932