1 /*- 2 * Copyright (c) 1997-2006 by Matthew Jacob 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /* 27 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 28 * FreeBSD Version. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/module.h> 37 #if __FreeBSD_version >= 700000 38 #include <sys/linker.h> 39 #include <sys/firmware.h> 40 #endif 41 #include <sys/bus.h> 42 #if __FreeBSD_version < 500000 43 #include <pci/pcireg.h> 44 #include <pci/pcivar.h> 45 #include <machine/bus_memio.h> 46 #include <machine/bus_pio.h> 47 #else 48 #include <sys/stdint.h> 49 #include <dev/pci/pcireg.h> 50 #include <dev/pci/pcivar.h> 51 #endif 52 #include <machine/bus.h> 53 #include <machine/resource.h> 54 #include <sys/rman.h> 55 #include <sys/malloc.h> 56 57 #include <dev/isp/isp_freebsd.h> 58 59 #if __FreeBSD_version < 500000 60 #define BUS_PROBE_DEFAULT 0 61 #endif 62 63 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); 64 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); 65 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 66 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); 67 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 68 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 69 static int 70 isp_pci_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 71 static int 72 isp_pci_rd_isr_2300(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 73 static int 74 isp_pci_rd_isr_2400(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 75 static int isp_pci_mbxdma(ispsoftc_t *); 76 static int 77 isp_pci_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint32_t *, uint32_t); 78 79 80 static void isp_pci_reset0(ispsoftc_t *); 81 static void isp_pci_reset1(ispsoftc_t *); 82 static void isp_pci_dumpregs(ispsoftc_t *, const char *); 83 84 static struct ispmdvec mdvec = { 85 isp_pci_rd_isr, 86 isp_pci_rd_reg, 87 isp_pci_wr_reg, 88 isp_pci_mbxdma, 89 isp_pci_dmasetup, 90 isp_common_dmateardown, 91 isp_pci_reset0, 92 isp_pci_reset1, 93 isp_pci_dumpregs, 94 NULL, 95 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 96 }; 97 98 static struct ispmdvec mdvec_1080 = { 99 isp_pci_rd_isr, 100 isp_pci_rd_reg_1080, 101 isp_pci_wr_reg_1080, 102 isp_pci_mbxdma, 103 isp_pci_dmasetup, 104 isp_common_dmateardown, 105 isp_pci_reset0, 106 isp_pci_reset1, 107 isp_pci_dumpregs, 108 NULL, 109 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 110 }; 111 112 static struct ispmdvec mdvec_12160 = { 113 isp_pci_rd_isr, 114 isp_pci_rd_reg_1080, 115 isp_pci_wr_reg_1080, 116 isp_pci_mbxdma, 117 isp_pci_dmasetup, 118 isp_common_dmateardown, 119 isp_pci_reset0, 120 isp_pci_reset1, 121 isp_pci_dumpregs, 122 NULL, 123 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 124 }; 125 126 static struct ispmdvec mdvec_2100 = { 127 isp_pci_rd_isr, 128 isp_pci_rd_reg, 129 isp_pci_wr_reg, 130 isp_pci_mbxdma, 131 isp_pci_dmasetup, 132 isp_common_dmateardown, 133 isp_pci_reset0, 134 isp_pci_reset1, 135 isp_pci_dumpregs 136 }; 137 138 static struct ispmdvec mdvec_2200 = { 139 isp_pci_rd_isr, 140 isp_pci_rd_reg, 141 isp_pci_wr_reg, 142 isp_pci_mbxdma, 143 isp_pci_dmasetup, 144 isp_common_dmateardown, 145 isp_pci_reset0, 146 isp_pci_reset1, 147 isp_pci_dumpregs 148 }; 149 150 static struct ispmdvec mdvec_2300 = { 151 isp_pci_rd_isr_2300, 152 isp_pci_rd_reg, 153 isp_pci_wr_reg, 154 isp_pci_mbxdma, 155 isp_pci_dmasetup, 156 isp_common_dmateardown, 157 isp_pci_reset0, 158 isp_pci_reset1, 159 isp_pci_dumpregs 160 }; 161 162 static struct ispmdvec mdvec_2400 = { 163 isp_pci_rd_isr_2400, 164 isp_pci_rd_reg_2400, 165 isp_pci_wr_reg_2400, 166 isp_pci_mbxdma, 167 isp_pci_dmasetup, 168 isp_common_dmateardown, 169 isp_pci_reset0, 170 isp_pci_reset1, 171 NULL 172 }; 173 174 #ifndef PCIM_CMD_INVEN 175 #define PCIM_CMD_INVEN 0x10 176 #endif 177 #ifndef PCIM_CMD_BUSMASTEREN 178 #define PCIM_CMD_BUSMASTEREN 0x0004 179 #endif 180 #ifndef PCIM_CMD_PERRESPEN 181 #define PCIM_CMD_PERRESPEN 0x0040 182 #endif 183 #ifndef PCIM_CMD_SEREN 184 #define PCIM_CMD_SEREN 0x0100 185 #endif 186 #ifndef PCIM_CMD_INTX_DISABLE 187 #define PCIM_CMD_INTX_DISABLE 0x0400 188 #endif 189 190 #ifndef PCIR_COMMAND 191 #define PCIR_COMMAND 0x04 192 #endif 193 194 #ifndef PCIR_CACHELNSZ 195 #define PCIR_CACHELNSZ 0x0c 196 #endif 197 198 #ifndef PCIR_LATTIMER 199 #define PCIR_LATTIMER 0x0d 200 #endif 201 202 #ifndef PCIR_ROMADDR 203 #define PCIR_ROMADDR 0x30 204 #endif 205 206 #ifndef PCI_VENDOR_QLOGIC 207 #define PCI_VENDOR_QLOGIC 0x1077 208 #endif 209 210 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 211 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 212 #endif 213 214 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 215 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 216 #endif 217 218 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 219 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 220 #endif 221 222 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 223 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 224 #endif 225 226 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 227 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 228 #endif 229 230 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 231 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 232 #endif 233 234 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 235 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 236 #endif 237 238 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 239 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 240 #endif 241 242 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 243 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 244 #endif 245 246 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 247 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 248 #endif 249 250 #ifndef PCI_PRODUCT_QLOGIC_ISP2322 251 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 252 #endif 253 254 #ifndef PCI_PRODUCT_QLOGIC_ISP2422 255 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 256 #endif 257 258 #ifndef PCI_PRODUCT_QLOGIC_ISP2432 259 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432 260 #endif 261 262 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 263 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 264 #endif 265 266 #ifndef PCI_PRODUCT_QLOGIC_ISP6322 267 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 268 #endif 269 270 271 #define PCI_QLOGIC_ISP1020 \ 272 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 273 274 #define PCI_QLOGIC_ISP1080 \ 275 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 276 277 #define PCI_QLOGIC_ISP10160 \ 278 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 279 280 #define PCI_QLOGIC_ISP12160 \ 281 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 282 283 #define PCI_QLOGIC_ISP1240 \ 284 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 285 286 #define PCI_QLOGIC_ISP1280 \ 287 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 288 289 #define PCI_QLOGIC_ISP2100 \ 290 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 291 292 #define PCI_QLOGIC_ISP2200 \ 293 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 294 295 #define PCI_QLOGIC_ISP2300 \ 296 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 297 298 #define PCI_QLOGIC_ISP2312 \ 299 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 300 301 #define PCI_QLOGIC_ISP2322 \ 302 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 303 304 #define PCI_QLOGIC_ISP2422 \ 305 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 306 307 #define PCI_QLOGIC_ISP2432 \ 308 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC) 309 310 #define PCI_QLOGIC_ISP6312 \ 311 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 312 313 #define PCI_QLOGIC_ISP6322 \ 314 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 315 316 /* 317 * Odd case for some AMI raid cards... We need to *not* attach to this. 318 */ 319 #define AMI_RAID_SUBVENDOR_ID 0x101e 320 321 #define IO_MAP_REG 0x10 322 #define MEM_MAP_REG 0x14 323 324 #define PCI_DFLT_LTNCY 0x40 325 #define PCI_DFLT_LNSZ 0x10 326 327 static int isp_pci_probe (device_t); 328 static int isp_pci_attach (device_t); 329 static int isp_pci_detach (device_t); 330 331 332 #define ISP_PCD(isp) ((struct isp_pcisoftc *)isp)->pci_dev 333 struct isp_pcisoftc { 334 ispsoftc_t pci_isp; 335 device_t pci_dev; 336 struct resource * pci_reg; 337 void * ih; 338 int16_t pci_poff[_NREG_BLKS]; 339 bus_dma_tag_t dmat; 340 #if __FreeBSD_version > 700025 341 int msicount; 342 #endif 343 }; 344 345 346 static device_method_t isp_pci_methods[] = { 347 /* Device interface */ 348 DEVMETHOD(device_probe, isp_pci_probe), 349 DEVMETHOD(device_attach, isp_pci_attach), 350 DEVMETHOD(device_detach, isp_pci_detach), 351 { 0, 0 } 352 }; 353 354 static driver_t isp_pci_driver = { 355 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 356 }; 357 static devclass_t isp_devclass; 358 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 359 #if __FreeBSD_version < 700000 360 extern ispfwfunc *isp_get_firmware_p; 361 #endif 362 363 static int 364 isp_pci_probe(device_t dev) 365 { 366 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 367 case PCI_QLOGIC_ISP1020: 368 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 369 break; 370 case PCI_QLOGIC_ISP1080: 371 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 372 break; 373 case PCI_QLOGIC_ISP1240: 374 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 375 break; 376 case PCI_QLOGIC_ISP1280: 377 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 378 break; 379 case PCI_QLOGIC_ISP10160: 380 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 381 break; 382 case PCI_QLOGIC_ISP12160: 383 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 384 return (ENXIO); 385 } 386 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 387 break; 388 case PCI_QLOGIC_ISP2100: 389 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 390 break; 391 case PCI_QLOGIC_ISP2200: 392 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 393 break; 394 case PCI_QLOGIC_ISP2300: 395 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 396 break; 397 case PCI_QLOGIC_ISP2312: 398 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 399 break; 400 case PCI_QLOGIC_ISP2322: 401 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 402 break; 403 case PCI_QLOGIC_ISP2422: 404 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 405 break; 406 case PCI_QLOGIC_ISP2432: 407 device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter"); 408 break; 409 case PCI_QLOGIC_ISP6312: 410 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 411 break; 412 case PCI_QLOGIC_ISP6322: 413 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 414 break; 415 default: 416 return (ENXIO); 417 } 418 if (isp_announced == 0 && bootverbose) { 419 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 420 "Core Version %d.%d\n", 421 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 422 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 423 isp_announced++; 424 } 425 /* 426 * XXXX: Here is where we might load the f/w module 427 * XXXX: (or increase a reference count to it). 428 */ 429 return (BUS_PROBE_DEFAULT); 430 } 431 432 #if __FreeBSD_version < 500000 433 static void 434 isp_get_generic_options(device_t dev, ispsoftc_t *isp) 435 { 436 int bitmap, unit; 437 438 unit = device_get_unit(dev); 439 if (getenv_int("isp_disable", &bitmap)) { 440 if (bitmap & (1 << unit)) { 441 isp->isp_osinfo.disabled = 1; 442 return; 443 } 444 } 445 if (getenv_int("isp_no_fwload", &bitmap)) { 446 if (bitmap & (1 << unit)) 447 isp->isp_confopts |= ISP_CFG_NORELOAD; 448 } 449 if (getenv_int("isp_fwload", &bitmap)) { 450 if (bitmap & (1 << unit)) 451 isp->isp_confopts &= ~ISP_CFG_NORELOAD; 452 } 453 if (getenv_int("isp_no_nvram", &bitmap)) { 454 if (bitmap & (1 << unit)) 455 isp->isp_confopts |= ISP_CFG_NONVRAM; 456 } 457 if (getenv_int("isp_nvram", &bitmap)) { 458 if (bitmap & (1 << unit)) 459 isp->isp_confopts &= ~ISP_CFG_NONVRAM; 460 } 461 462 bitmap = 0; 463 (void) getenv_int("isp_debug", &bitmap); 464 if (bitmap) { 465 isp->isp_dblev = bitmap; 466 } else { 467 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 468 } 469 if (bootverbose) { 470 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 471 } 472 473 bitmap = 0; 474 if (getenv_int("role", &bitmap)) { 475 isp->isp_role = bitmap; 476 } else { 477 isp->isp_role = ISP_DEFAULT_ROLES; 478 } 479 480 } 481 482 static void 483 isp_get_pci_options(device_t dev, int *m1, int *m2) 484 { 485 int bitmap; 486 int unit = device_get_unit(dev); 487 488 *m1 = PCIM_CMD_MEMEN; 489 *m2 = PCIM_CMD_PORTEN; 490 if (getenv_int("isp_mem_map", &bitmap)) { 491 if (bitmap & (1 << unit)) { 492 *m1 = PCIM_CMD_MEMEN; 493 *m2 = PCIM_CMD_PORTEN; 494 } 495 } 496 bitmap = 0; 497 if (getenv_int("isp_io_map", &bitmap)) { 498 if (bitmap & (1 << unit)) { 499 *m1 = PCIM_CMD_PORTEN; 500 *m2 = PCIM_CMD_MEMEN; 501 } 502 } 503 } 504 505 static void 506 isp_get_specific_options(device_t dev, ispsoftc_t *isp) 507 { 508 uint64_t wwn; 509 int bitmap; 510 int unit = device_get_unit(dev); 511 512 513 if (IS_SCSI(isp)) { 514 return; 515 } 516 517 if (getenv_int("isp_fcduplex", &bitmap)) { 518 if (bitmap & (1 << unit)) 519 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 520 } 521 if (getenv_int("isp_no_fcduplex", &bitmap)) { 522 if (bitmap & (1 << unit)) 523 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX; 524 } 525 if (getenv_int("isp_nport", &bitmap)) { 526 if (bitmap & (1 << unit)) 527 isp->isp_confopts |= ISP_CFG_NPORT; 528 } 529 530 /* 531 * Because the resource_*_value functions can neither return 532 * 64 bit integer values, nor can they be directly coerced 533 * to interpret the right hand side of the assignment as 534 * you want them to interpret it, we have to force WWN 535 * hint replacement to specify WWN strings with a leading 536 * 'w' (e..g w50000000aaaa0001). Sigh. 537 */ 538 if (getenv_quad("isp_portwwn", &wwn)) { 539 isp->isp_osinfo.default_port_wwn = wwn; 540 isp->isp_confopts |= ISP_CFG_OWNWWPN; 541 } 542 if (isp->isp_osinfo.default_port_wwn == 0) { 543 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 544 } 545 546 if (getenv_quad("isp_nodewwn", &wwn)) { 547 isp->isp_osinfo.default_node_wwn = wwn; 548 isp->isp_confopts |= ISP_CFG_OWNWWNN; 549 } 550 if (isp->isp_osinfo.default_node_wwn == 0) { 551 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 552 } 553 554 bitmap = 0; 555 (void) getenv_int("isp_fabric_hysteresis", &bitmap); 556 if (bitmap >= 0 && bitmap < 256) { 557 isp->isp_osinfo.hysteresis = bitmap; 558 } else { 559 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis; 560 } 561 562 bitmap = 0; 563 (void) getenv_int("isp_loop_down_limit", &bitmap); 564 if (bitmap >= 0 && bitmap < 0xffff) { 565 isp->isp_osinfo.loop_down_limit = bitmap; 566 } else { 567 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit; 568 } 569 570 bitmap = 0; 571 (void) getenv_int("isp_gone_device_time", &bitmap); 572 if (bitmap >= 0 && bitmap < 0xffff) { 573 isp->isp_osinfo.gone_device_time = bitmap; 574 } else { 575 isp->isp_osinfo.gone_device_time = isp_gone_device_time; 576 } 577 #ifdef ISP_FW_CRASH_DUMP 578 bitmap = 0; 579 if (getenv_int("isp_fw_dump_enable", &bitmap)) { 580 if (bitmap & (1 << unit) { 581 size_t amt = 0; 582 if (IS_2200(isp)) { 583 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 584 } else if (IS_23XX(isp)) { 585 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 586 } 587 if (amt) { 588 FCPARAM(isp)->isp_dump_data = 589 malloc(amt, M_DEVBUF, M_WAITOK); 590 memset(FCPARAM(isp)->isp_dump_data, 0, amt); 591 } else { 592 device_printf(dev, 593 "f/w crash dumps not supported for card\n"); 594 } 595 } 596 } 597 #endif 598 } 599 #else 600 static void 601 isp_get_generic_options(device_t dev, ispsoftc_t *isp) 602 { 603 int tval; 604 605 /* 606 * Figure out if we're supposed to skip this one. 607 */ 608 tval = 0; 609 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 610 "disable", &tval) == 0 && tval) { 611 device_printf(dev, "disabled at user request\n"); 612 isp->isp_osinfo.disabled = 1; 613 return; 614 } 615 616 tval = -1; 617 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 618 "role", &tval) == 0 && tval != -1) { 619 tval &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET); 620 isp->isp_role = tval; 621 device_printf(dev, "setting role to 0x%x\n", isp->isp_role); 622 } else { 623 #ifdef ISP_TARGET_MODE 624 isp->isp_role = ISP_ROLE_TARGET; 625 #else 626 isp->isp_role = ISP_DEFAULT_ROLES; 627 #endif 628 } 629 630 tval = 0; 631 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 632 "fwload_disable", &tval) == 0 && tval != 0) { 633 isp->isp_confopts |= ISP_CFG_NORELOAD; 634 } 635 tval = 0; 636 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 637 "ignore_nvram", &tval) == 0 && tval != 0) { 638 isp->isp_confopts |= ISP_CFG_NONVRAM; 639 } 640 641 tval = 0; 642 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 643 "debug", &tval); 644 if (tval) { 645 isp->isp_dblev = tval; 646 } else { 647 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 648 } 649 if (bootverbose) { 650 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 651 } 652 653 } 654 655 static void 656 isp_get_pci_options(device_t dev, int *m1, int *m2) 657 { 658 int tval; 659 /* 660 * Which we should try first - memory mapping or i/o mapping? 661 * 662 * We used to try memory first followed by i/o on alpha, otherwise 663 * the reverse, but we should just try memory first all the time now. 664 */ 665 *m1 = PCIM_CMD_MEMEN; 666 *m2 = PCIM_CMD_PORTEN; 667 668 tval = 0; 669 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 670 "prefer_iomap", &tval) == 0 && tval != 0) { 671 *m1 = PCIM_CMD_PORTEN; 672 *m2 = PCIM_CMD_MEMEN; 673 } 674 tval = 0; 675 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 676 "prefer_memmap", &tval) == 0 && tval != 0) { 677 *m1 = PCIM_CMD_MEMEN; 678 *m2 = PCIM_CMD_PORTEN; 679 } 680 } 681 682 static void 683 isp_get_specific_options(device_t dev, ispsoftc_t *isp) 684 { 685 const char *sptr; 686 int tval; 687 688 isp->isp_osinfo.default_id = -1; 689 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 690 "iid", &tval) == 0) { 691 isp->isp_osinfo.default_id = tval; 692 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 693 } 694 if (isp->isp_osinfo.default_id == -1) { 695 if (IS_FC(isp)) { 696 isp->isp_osinfo.default_id = 109; 697 } else { 698 isp->isp_osinfo.default_id = 7; 699 } 700 } 701 702 if (IS_SCSI(isp)) { 703 return; 704 } 705 706 tval = 0; 707 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 708 "fullduplex", &tval) == 0 && tval != 0) { 709 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 710 } 711 #ifdef ISP_FW_CRASH_DUMP 712 tval = 0; 713 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 714 "fw_dump_enable", &tval) == 0 && tval != 0) { 715 size_t amt = 0; 716 if (IS_2200(isp)) { 717 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 718 } else if (IS_23XX(isp)) { 719 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 720 } 721 if (amt) { 722 FCPARAM(isp)->isp_dump_data = 723 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); 724 } else { 725 device_printf(dev, 726 "f/w crash dumps not supported for this model\n"); 727 } 728 } 729 #endif 730 sptr = 0; 731 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 732 "topology", (const char **) &sptr) == 0 && sptr != 0) { 733 if (strcmp(sptr, "lport") == 0) { 734 isp->isp_confopts |= ISP_CFG_LPORT; 735 } else if (strcmp(sptr, "nport") == 0) { 736 isp->isp_confopts |= ISP_CFG_NPORT; 737 } else if (strcmp(sptr, "lport-only") == 0) { 738 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 739 } else if (strcmp(sptr, "nport-only") == 0) { 740 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 741 } 742 } 743 744 /* 745 * Because the resource_*_value functions can neither return 746 * 64 bit integer values, nor can they be directly coerced 747 * to interpret the right hand side of the assignment as 748 * you want them to interpret it, we have to force WWN 749 * hint replacement to specify WWN strings with a leading 750 * 'w' (e..g w50000000aaaa0001). Sigh. 751 */ 752 sptr = 0; 753 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 754 "portwwn", (const char **) &sptr); 755 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 756 char *eptr = 0; 757 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 758 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 759 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 760 isp->isp_osinfo.default_port_wwn = 0; 761 } else { 762 isp->isp_confopts |= ISP_CFG_OWNWWPN; 763 } 764 } 765 if (isp->isp_osinfo.default_port_wwn == 0) { 766 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 767 } 768 769 sptr = 0; 770 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 771 "nodewwn", (const char **) &sptr); 772 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 773 char *eptr = 0; 774 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 775 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 776 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 777 isp->isp_osinfo.default_node_wwn = 0; 778 } else { 779 isp->isp_confopts |= ISP_CFG_OWNWWNN; 780 } 781 } 782 if (isp->isp_osinfo.default_node_wwn == 0) { 783 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 784 } 785 786 787 tval = 0; 788 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 789 "hysteresis", &tval); 790 if (tval >= 0 && tval < 256) { 791 isp->isp_osinfo.hysteresis = tval; 792 } else { 793 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis; 794 } 795 796 tval = -1; 797 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 798 "loop_down_limit", &tval); 799 if (tval >= 0 && tval < 0xffff) { 800 isp->isp_osinfo.loop_down_limit = tval; 801 } else { 802 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit; 803 } 804 805 tval = -1; 806 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 807 "gone_device_time", &tval); 808 if (tval >= 0 && tval < 0xffff) { 809 isp->isp_osinfo.gone_device_time = tval; 810 } else { 811 isp->isp_osinfo.gone_device_time = isp_gone_device_time; 812 } 813 } 814 #endif 815 816 static int 817 isp_pci_attach(device_t dev) 818 { 819 struct resource *regs, *irq; 820 int rtp, rgd, iqd, m1, m2; 821 uint32_t data, cmd, linesz, psize, basetype; 822 struct isp_pcisoftc *pcs; 823 ispsoftc_t *isp = NULL; 824 struct ispmdvec *mdvp; 825 #if __FreeBSD_version >= 500000 826 int locksetup = 0; 827 #endif 828 829 pcs = device_get_softc(dev); 830 if (pcs == NULL) { 831 device_printf(dev, "cannot get softc\n"); 832 return (ENOMEM); 833 } 834 memset(pcs, 0, sizeof (*pcs)); 835 pcs->pci_dev = dev; 836 isp = &pcs->pci_isp; 837 838 /* 839 * Get Generic Options 840 */ 841 isp_get_generic_options(dev, isp); 842 843 /* 844 * Check to see if options have us disabled 845 */ 846 if (isp->isp_osinfo.disabled) { 847 /* 848 * But return zero to preserve unit numbering 849 */ 850 return (0); 851 } 852 853 /* 854 * Get PCI options- which in this case are just mapping preferences. 855 */ 856 isp_get_pci_options(dev, &m1, &m2); 857 858 linesz = PCI_DFLT_LNSZ; 859 irq = regs = NULL; 860 rgd = rtp = iqd = 0; 861 862 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 863 if (cmd & m1) { 864 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 865 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 866 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 867 } 868 if (regs == NULL && (cmd & m2)) { 869 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 870 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 871 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 872 } 873 if (regs == NULL) { 874 device_printf(dev, "unable to map any ports\n"); 875 goto bad; 876 } 877 if (bootverbose) { 878 device_printf(dev, "using %s space register mapping\n", 879 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 880 } 881 pcs->pci_dev = dev; 882 pcs->pci_reg = regs; 883 isp->isp_bus_tag = rman_get_bustag(regs); 884 isp->isp_bus_handle = rman_get_bushandle(regs); 885 886 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 887 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 888 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 889 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 890 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 891 mdvp = &mdvec; 892 basetype = ISP_HA_SCSI_UNKNOWN; 893 psize = sizeof (sdparam); 894 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 895 mdvp = &mdvec; 896 basetype = ISP_HA_SCSI_UNKNOWN; 897 psize = sizeof (sdparam); 898 } 899 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 900 mdvp = &mdvec_1080; 901 basetype = ISP_HA_SCSI_1080; 902 psize = sizeof (sdparam); 903 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 904 ISP1080_DMA_REGS_OFF; 905 } 906 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 907 mdvp = &mdvec_1080; 908 basetype = ISP_HA_SCSI_1240; 909 psize = 2 * sizeof (sdparam); 910 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 911 ISP1080_DMA_REGS_OFF; 912 } 913 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 914 mdvp = &mdvec_1080; 915 basetype = ISP_HA_SCSI_1280; 916 psize = 2 * sizeof (sdparam); 917 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 918 ISP1080_DMA_REGS_OFF; 919 } 920 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 921 mdvp = &mdvec_12160; 922 basetype = ISP_HA_SCSI_10160; 923 psize = sizeof (sdparam); 924 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 925 ISP1080_DMA_REGS_OFF; 926 } 927 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 928 mdvp = &mdvec_12160; 929 basetype = ISP_HA_SCSI_12160; 930 psize = 2 * sizeof (sdparam); 931 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 932 ISP1080_DMA_REGS_OFF; 933 } 934 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 935 mdvp = &mdvec_2100; 936 basetype = ISP_HA_FC_2100; 937 psize = sizeof (fcparam); 938 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 939 PCI_MBOX_REGS2100_OFF; 940 if (pci_get_revid(dev) < 3) { 941 /* 942 * XXX: Need to get the actual revision 943 * XXX: number of the 2100 FB. At any rate, 944 * XXX: lower cache line size for early revision 945 * XXX; boards. 946 */ 947 linesz = 1; 948 } 949 } 950 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 951 mdvp = &mdvec_2200; 952 basetype = ISP_HA_FC_2200; 953 psize = sizeof (fcparam); 954 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 955 PCI_MBOX_REGS2100_OFF; 956 } 957 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 958 mdvp = &mdvec_2300; 959 basetype = ISP_HA_FC_2300; 960 psize = sizeof (fcparam); 961 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 962 PCI_MBOX_REGS2300_OFF; 963 } 964 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 || 965 pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 966 mdvp = &mdvec_2300; 967 basetype = ISP_HA_FC_2312; 968 psize = sizeof (fcparam); 969 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 970 PCI_MBOX_REGS2300_OFF; 971 } 972 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322 || 973 pci_get_devid(dev) == PCI_QLOGIC_ISP6322) { 974 mdvp = &mdvec_2300; 975 basetype = ISP_HA_FC_2322; 976 psize = sizeof (fcparam); 977 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 978 PCI_MBOX_REGS2300_OFF; 979 } 980 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422 || 981 pci_get_devid(dev) == PCI_QLOGIC_ISP2432) { 982 mdvp = &mdvec_2400; 983 basetype = ISP_HA_FC_2400; 984 psize = sizeof (fcparam); 985 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 986 PCI_MBOX_REGS2400_OFF; 987 } 988 isp = &pcs->pci_isp; 989 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 990 if (isp->isp_param == NULL) { 991 device_printf(dev, "cannot allocate parameter data\n"); 992 goto bad; 993 } 994 isp->isp_mdvec = mdvp; 995 isp->isp_type = basetype; 996 isp->isp_revision = pci_get_revid(dev); 997 isp->isp_dev = dev; 998 999 /* 1000 * Now that we know who we are (roughly) get/set specific options 1001 */ 1002 isp_get_specific_options(dev, isp); 1003 1004 #if __FreeBSD_version >= 700000 1005 /* 1006 * Try and find firmware for this device. 1007 */ 1008 { 1009 char fwname[32]; 1010 unsigned int did = pci_get_device(dev); 1011 1012 /* 1013 * Map a few pci ids to fw names 1014 */ 1015 switch (did) { 1016 case PCI_PRODUCT_QLOGIC_ISP1020: 1017 did = 0x1040; 1018 break; 1019 case PCI_PRODUCT_QLOGIC_ISP1240: 1020 did = 0x1080; 1021 break; 1022 case PCI_PRODUCT_QLOGIC_ISP10160: 1023 case PCI_PRODUCT_QLOGIC_ISP12160: 1024 did = 0x12160; 1025 break; 1026 case PCI_PRODUCT_QLOGIC_ISP6312: 1027 case PCI_PRODUCT_QLOGIC_ISP2312: 1028 did = 0x2300; 1029 break; 1030 case PCI_PRODUCT_QLOGIC_ISP6322: 1031 did = 0x2322; 1032 break; 1033 case PCI_PRODUCT_QLOGIC_ISP2422: 1034 case PCI_PRODUCT_QLOGIC_ISP2432: 1035 did = 0x2400; 1036 break; 1037 default: 1038 break; 1039 } 1040 1041 isp->isp_osinfo.fw = NULL; 1042 if (isp->isp_role & ISP_ROLE_TARGET) { 1043 snprintf(fwname, sizeof (fwname), "isp_%04x_it", did); 1044 isp->isp_osinfo.fw = firmware_get(fwname); 1045 } 1046 if (isp->isp_osinfo.fw == NULL) { 1047 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 1048 isp->isp_osinfo.fw = firmware_get(fwname); 1049 } 1050 if (isp->isp_osinfo.fw != NULL) { 1051 isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data; 1052 } 1053 } 1054 #else 1055 if (isp_get_firmware_p) { 1056 int device = (int) pci_get_device(dev); 1057 #ifdef ISP_TARGET_MODE 1058 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 1059 #else 1060 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 1061 #endif 1062 } 1063 #endif 1064 1065 /* 1066 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 1067 * are set. 1068 */ 1069 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 1070 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 1071 1072 if (IS_2300(isp)) { /* per QLogic errata */ 1073 cmd &= ~PCIM_CMD_INVEN; 1074 } 1075 1076 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 1077 cmd &= ~PCIM_CMD_INTX_DISABLE; 1078 } 1079 1080 #ifdef WE_KNEW_WHAT_WE_WERE_DOING 1081 if (IS_24XX(isp)) { 1082 int reg; 1083 1084 cmd &= ~PCIM_CMD_INTX_DISABLE; 1085 1086 /* 1087 * Is this a PCI-X card? If so, set max read byte count. 1088 */ 1089 if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) { 1090 uint16_t pxcmd; 1091 reg += 2; 1092 1093 pxcmd = pci_read_config(dev, reg, 2); 1094 pxcmd &= ~0xc; 1095 pxcmd |= 0x8; 1096 pci_write_config(dev, reg, 2, pxcmd); 1097 } 1098 1099 /* 1100 * Is this a PCI Express card? If so, set max read byte count. 1101 */ 1102 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 1103 uint16_t pectl; 1104 1105 reg += 0x8; 1106 pectl = pci_read_config(dev, reg, 2); 1107 pectl &= ~0x7000; 1108 pectl |= 0x4000; 1109 pci_write_config(dev, reg, 2, pectl); 1110 } 1111 } 1112 #else 1113 if (IS_24XX(isp)) { 1114 cmd &= ~PCIM_CMD_INTX_DISABLE; 1115 } 1116 #endif 1117 1118 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 1119 1120 /* 1121 * Make sure the Cache Line Size register is set sensibly. 1122 */ 1123 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 1124 if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) { 1125 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d from %d", 1126 linesz, data); 1127 data = linesz; 1128 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 1129 } 1130 1131 /* 1132 * Make sure the Latency Timer is sane. 1133 */ 1134 data = pci_read_config(dev, PCIR_LATTIMER, 1); 1135 if (data < PCI_DFLT_LTNCY) { 1136 data = PCI_DFLT_LTNCY; 1137 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 1138 pci_write_config(dev, PCIR_LATTIMER, data, 1); 1139 } 1140 1141 /* 1142 * Make sure we've disabled the ROM. 1143 */ 1144 data = pci_read_config(dev, PCIR_ROMADDR, 4); 1145 data &= ~1; 1146 pci_write_config(dev, PCIR_ROMADDR, data, 4); 1147 #if __FreeBSD_version > 700025 1148 if (IS_24XX(isp) || IS_2322(isp)) { 1149 pcs->msicount = pci_msi_count(dev); 1150 if (pcs->msicount > 1) { 1151 pcs->msicount = 1; 1152 } 1153 if (pci_alloc_msi(dev, &pcs->msicount) == 0) { 1154 iqd = 1; 1155 } else { 1156 iqd = 0; 1157 } 1158 } 1159 #else 1160 iqd = 0; 1161 #endif 1162 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, 1163 RF_ACTIVE | RF_SHAREABLE); 1164 if (irq == NULL) { 1165 device_printf(dev, "could not allocate interrupt\n"); 1166 goto bad; 1167 } 1168 1169 #if __FreeBSD_version >= 500000 1170 /* Make sure the lock is set up. */ 1171 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 1172 locksetup++; 1173 #endif 1174 1175 if (isp_setup_intr(dev, irq, ISP_IFLAGS, NULL, isp_platform_intr, isp, 1176 &pcs->ih)) { 1177 device_printf(dev, "could not setup interrupt\n"); 1178 goto bad; 1179 } 1180 1181 /* 1182 * Last minute checks... 1183 */ 1184 if (IS_23XX(isp) || IS_24XX(isp)) { 1185 isp->isp_port = pci_get_function(dev); 1186 } 1187 1188 if (IS_23XX(isp)) { 1189 /* 1190 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 1191 */ 1192 isp->isp_touched = 1; 1193 } 1194 1195 /* 1196 * Make sure we're in reset state. 1197 */ 1198 ISP_LOCK(isp); 1199 isp_reset(isp); 1200 if (isp->isp_state != ISP_RESETSTATE) { 1201 ISP_UNLOCK(isp); 1202 goto bad; 1203 } 1204 isp_init(isp); 1205 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 1206 isp_uninit(isp); 1207 ISP_UNLOCK(isp); 1208 goto bad; 1209 } 1210 isp_attach(isp); 1211 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 1212 isp_uninit(isp); 1213 ISP_UNLOCK(isp); 1214 goto bad; 1215 } 1216 ISP_UNLOCK(isp); 1217 return (0); 1218 1219 bad: 1220 if (pcs && pcs->ih) { 1221 (void) bus_teardown_intr(dev, irq, pcs->ih); 1222 } 1223 #if __FreeBSD_version >= 500000 1224 if (locksetup && isp) { 1225 mtx_destroy(&isp->isp_osinfo.lock); 1226 } 1227 #endif 1228 if (irq) { 1229 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 1230 } 1231 #if __FreeBSD_version > 700025 1232 if (pcs && pcs->msicount) { 1233 pci_release_msi(dev); 1234 } 1235 #endif 1236 if (regs) { 1237 (void) bus_release_resource(dev, rtp, rgd, regs); 1238 } 1239 if (pcs) { 1240 if (pcs->pci_isp.isp_param) { 1241 #ifdef ISP_FW_CRASH_DUMP 1242 if (IS_FC(isp) && FCPARAM(isp)->isp_dump_data) { 1243 free(FCPARAM(isp)->isp_dump_data, M_DEVBUF); 1244 } 1245 #endif 1246 free(pcs->pci_isp.isp_param, M_DEVBUF); 1247 } 1248 } 1249 return (ENXIO); 1250 } 1251 1252 static int 1253 isp_pci_detach(device_t dev) 1254 { 1255 struct isp_pcisoftc *pcs; 1256 ispsoftc_t *isp; 1257 1258 pcs = device_get_softc(dev); 1259 if (pcs == NULL) { 1260 return (ENXIO); 1261 } 1262 isp = (ispsoftc_t *) pcs; 1263 ISP_DISABLE_INTS(isp); 1264 return (0); 1265 } 1266 1267 #define IspVirt2Off(a, x) \ 1268 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1269 _BLK_REG_SHFT] + ((x) & 0xfff)) 1270 1271 #define BXR2(isp, off) \ 1272 bus_space_read_2(isp->isp_bus_tag, isp->isp_bus_handle, off) 1273 #define BXW2(isp, off, v) \ 1274 bus_space_write_2(isp->isp_bus_tag, isp->isp_bus_handle, off, v) 1275 #define BXR4(isp, off) \ 1276 bus_space_read_4(isp->isp_bus_tag, isp->isp_bus_handle, off) 1277 #define BXW4(isp, off, v) \ 1278 bus_space_write_4(isp->isp_bus_tag, isp->isp_bus_handle, off, v) 1279 1280 1281 static __inline int 1282 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp) 1283 { 1284 uint32_t val0, val1; 1285 int i = 0; 1286 1287 do { 1288 val0 = BXR2(isp, IspVirt2Off(isp, off)); 1289 val1 = BXR2(isp, IspVirt2Off(isp, off)); 1290 } while (val0 != val1 && ++i < 1000); 1291 if (val0 != val1) { 1292 return (1); 1293 } 1294 *rp = val0; 1295 return (0); 1296 } 1297 1298 static int 1299 isp_pci_rd_isr(ispsoftc_t *isp, uint32_t *isrp, uint16_t *semap, uint16_t *mbp) 1300 { 1301 uint16_t isr, sema; 1302 1303 if (IS_2100(isp)) { 1304 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 1305 return (0); 1306 } 1307 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 1308 return (0); 1309 } 1310 } else { 1311 isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR)); 1312 sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA)); 1313 } 1314 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1315 isr &= INT_PENDING_MASK(isp); 1316 sema &= BIU_SEMA_LOCK; 1317 if (isr == 0 && sema == 0) { 1318 return (0); 1319 } 1320 *isrp = isr; 1321 if ((*semap = sema) != 0) { 1322 if (IS_2100(isp)) { 1323 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 1324 return (0); 1325 } 1326 } else { 1327 *mbp = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0)); 1328 } 1329 } 1330 return (1); 1331 } 1332 1333 static int 1334 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint32_t *isrp, 1335 uint16_t *semap, uint16_t *mbox0p) 1336 { 1337 uint32_t hccr; 1338 uint32_t r2hisr; 1339 1340 if (!(BXR2(isp, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 1341 *isrp = 0; 1342 return (0); 1343 } 1344 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO)); 1345 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1346 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1347 *isrp = 0; 1348 return (0); 1349 } 1350 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 1351 case ISPR2HST_ROM_MBX_OK: 1352 case ISPR2HST_ROM_MBX_FAIL: 1353 case ISPR2HST_MBX_OK: 1354 case ISPR2HST_MBX_FAIL: 1355 case ISPR2HST_ASYNC_EVENT: 1356 *isrp = r2hisr & 0xffff; 1357 *mbox0p = (r2hisr >> 16); 1358 *semap = 1; 1359 return (1); 1360 case ISPR2HST_RIO_16: 1361 *isrp = r2hisr & 0xffff; 1362 *mbox0p = ASYNC_RIO1; 1363 *semap = 1; 1364 return (1); 1365 case ISPR2HST_FPOST: 1366 *isrp = r2hisr & 0xffff; 1367 *mbox0p = ASYNC_CMD_CMPLT; 1368 *semap = 1; 1369 return (1); 1370 case ISPR2HST_FPOST_CTIO: 1371 *isrp = r2hisr & 0xffff; 1372 *mbox0p = ASYNC_CTIO_DONE; 1373 *semap = 1; 1374 return (1); 1375 case ISPR2HST_RSPQ_UPDATE: 1376 *isrp = r2hisr & 0xffff; 1377 *mbox0p = 0; 1378 *semap = 0; 1379 return (1); 1380 default: 1381 hccr = ISP_READ(isp, HCCR); 1382 if (hccr & HCCR_PAUSE) { 1383 ISP_WRITE(isp, HCCR, HCCR_RESET); 1384 isp_prt(isp, ISP_LOGERR, 1385 "RISC paused at interrupt (%x->%x)", hccr, 1386 ISP_READ(isp, HCCR)); 1387 ISP_WRITE(isp, BIU_ICR, 0); 1388 } else { 1389 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", 1390 r2hisr); 1391 } 1392 return (0); 1393 } 1394 } 1395 1396 static int 1397 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint32_t *isrp, 1398 uint16_t *semap, uint16_t *mbox0p) 1399 { 1400 uint32_t r2hisr; 1401 1402 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO)); 1403 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1404 if ((r2hisr & BIU2400_R2HST_INTR) == 0) { 1405 *isrp = 0; 1406 return (0); 1407 } 1408 switch (r2hisr & BIU2400_R2HST_ISTAT_MASK) { 1409 case ISP2400R2HST_ROM_MBX_OK: 1410 case ISP2400R2HST_ROM_MBX_FAIL: 1411 case ISP2400R2HST_MBX_OK: 1412 case ISP2400R2HST_MBX_FAIL: 1413 case ISP2400R2HST_ASYNC_EVENT: 1414 *isrp = r2hisr & 0xffff; 1415 *mbox0p = (r2hisr >> 16); 1416 *semap = 1; 1417 return (1); 1418 case ISP2400R2HST_RSPQ_UPDATE: 1419 case ISP2400R2HST_ATIO_RSPQ_UPDATE: 1420 case ISP2400R2HST_ATIO_RQST_UPDATE: 1421 *isrp = r2hisr & 0xffff; 1422 *mbox0p = 0; 1423 *semap = 0; 1424 return (1); 1425 default: 1426 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 1427 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1428 return (0); 1429 } 1430 } 1431 1432 static uint32_t 1433 isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1434 { 1435 uint16_t rv; 1436 int oldconf = 0; 1437 1438 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1439 /* 1440 * We will assume that someone has paused the RISC processor. 1441 */ 1442 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1443 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1444 oldconf | BIU_PCI_CONF1_SXP); 1445 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1446 } 1447 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1448 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1449 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1450 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1451 } 1452 return (rv); 1453 } 1454 1455 static void 1456 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) 1457 { 1458 int oldconf = 0; 1459 1460 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1461 /* 1462 * We will assume that someone has paused the RISC processor. 1463 */ 1464 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1465 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1466 oldconf | BIU_PCI_CONF1_SXP); 1467 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1468 } 1469 BXW2(isp, IspVirt2Off(isp, regoff), val); 1470 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2); 1471 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1472 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1473 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1474 } 1475 1476 } 1477 1478 static uint32_t 1479 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1480 { 1481 uint32_t rv, oc = 0; 1482 1483 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1484 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1485 uint32_t tc; 1486 /* 1487 * We will assume that someone has paused the RISC processor. 1488 */ 1489 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1490 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1491 if (regoff & SXP_BANK1_SELECT) 1492 tc |= BIU_PCI1080_CONF1_SXP1; 1493 else 1494 tc |= BIU_PCI1080_CONF1_SXP0; 1495 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1496 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1497 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1498 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1499 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1500 oc | BIU_PCI1080_CONF1_DMA); 1501 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1502 } 1503 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1504 if (oc) { 1505 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1506 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1507 } 1508 return (rv); 1509 } 1510 1511 static void 1512 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) 1513 { 1514 int oc = 0; 1515 1516 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1517 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1518 uint32_t tc; 1519 /* 1520 * We will assume that someone has paused the RISC processor. 1521 */ 1522 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1523 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1524 if (regoff & SXP_BANK1_SELECT) 1525 tc |= BIU_PCI1080_CONF1_SXP1; 1526 else 1527 tc |= BIU_PCI1080_CONF1_SXP0; 1528 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1529 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1530 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1531 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1532 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1533 oc | BIU_PCI1080_CONF1_DMA); 1534 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1535 } 1536 BXW2(isp, IspVirt2Off(isp, regoff), val); 1537 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2); 1538 if (oc) { 1539 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1540 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1541 } 1542 } 1543 1544 static uint32_t 1545 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 1546 { 1547 uint32_t rv; 1548 int block = regoff & _BLK_REG_MASK; 1549 1550 switch (block) { 1551 case BIU_BLOCK: 1552 break; 1553 case MBOX_BLOCK: 1554 return (BXR2(isp, IspVirt2Off(isp, regoff))); 1555 case SXP_BLOCK: 1556 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff); 1557 return (0xffffffff); 1558 case RISC_BLOCK: 1559 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff); 1560 return (0xffffffff); 1561 case DMA_BLOCK: 1562 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff); 1563 return (0xffffffff); 1564 default: 1565 isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff); 1566 return (0xffffffff); 1567 } 1568 1569 1570 switch (regoff) { 1571 case BIU2400_FLASH_ADDR: 1572 case BIU2400_FLASH_DATA: 1573 case BIU2400_ICR: 1574 case BIU2400_ISR: 1575 case BIU2400_CSR: 1576 case BIU2400_REQINP: 1577 case BIU2400_REQOUTP: 1578 case BIU2400_RSPINP: 1579 case BIU2400_RSPOUTP: 1580 case BIU2400_PRI_RQINP: 1581 case BIU2400_PRI_RSPINP: 1582 case BIU2400_ATIO_RSPINP: 1583 case BIU2400_ATIO_REQINP: 1584 case BIU2400_HCCR: 1585 case BIU2400_GPIOD: 1586 case BIU2400_GPIOE: 1587 case BIU2400_HSEMA: 1588 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1589 break; 1590 case BIU2400_R2HSTSLO: 1591 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1592 break; 1593 case BIU2400_R2HSTSHI: 1594 rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16; 1595 break; 1596 default: 1597 isp_prt(isp, ISP_LOGERR, 1598 "isp_pci_rd_reg_2400: unknown offset %x", regoff); 1599 rv = 0xffffffff; 1600 break; 1601 } 1602 return (rv); 1603 } 1604 1605 static void 1606 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 1607 { 1608 int block = regoff & _BLK_REG_MASK; 1609 1610 switch (block) { 1611 case BIU_BLOCK: 1612 break; 1613 case MBOX_BLOCK: 1614 BXW2(isp, IspVirt2Off(isp, regoff), val); 1615 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2); 1616 return; 1617 case SXP_BLOCK: 1618 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff); 1619 return; 1620 case RISC_BLOCK: 1621 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff); 1622 return; 1623 case DMA_BLOCK: 1624 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff); 1625 return; 1626 default: 1627 isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x", 1628 regoff); 1629 break; 1630 } 1631 1632 switch (regoff) { 1633 case BIU2400_FLASH_ADDR: 1634 case BIU2400_FLASH_DATA: 1635 case BIU2400_ICR: 1636 case BIU2400_ISR: 1637 case BIU2400_CSR: 1638 case BIU2400_REQINP: 1639 case BIU2400_REQOUTP: 1640 case BIU2400_RSPINP: 1641 case BIU2400_RSPOUTP: 1642 case BIU2400_PRI_RQINP: 1643 case BIU2400_PRI_RSPINP: 1644 case BIU2400_ATIO_RSPINP: 1645 case BIU2400_ATIO_REQINP: 1646 case BIU2400_HCCR: 1647 case BIU2400_GPIOD: 1648 case BIU2400_GPIOE: 1649 case BIU2400_HSEMA: 1650 BXW4(isp, IspVirt2Off(isp, regoff), val); 1651 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4); 1652 break; 1653 default: 1654 isp_prt(isp, ISP_LOGERR, 1655 "isp_pci_wr_reg_2400: bad offset 0x%x", regoff); 1656 break; 1657 } 1658 } 1659 1660 1661 struct imush { 1662 ispsoftc_t *isp; 1663 int error; 1664 }; 1665 1666 static void imc(void *, bus_dma_segment_t *, int, int); 1667 1668 static void 1669 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1670 { 1671 struct imush *imushp = (struct imush *) arg; 1672 if (error) { 1673 imushp->error = error; 1674 } else { 1675 ispsoftc_t *isp =imushp->isp; 1676 bus_addr_t addr = segs->ds_addr; 1677 1678 isp->isp_rquest_dma = addr; 1679 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1680 isp->isp_result_dma = addr; 1681 if (IS_FC(isp)) { 1682 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1683 FCPARAM(isp)->isp_scdma = addr; 1684 } 1685 } 1686 } 1687 1688 static int 1689 isp_pci_mbxdma(ispsoftc_t *isp) 1690 { 1691 caddr_t base; 1692 uint32_t len; 1693 int i, error, ns; 1694 bus_size_t slim; /* segment size */ 1695 bus_addr_t llim; /* low limit of unavailable dma */ 1696 bus_addr_t hlim; /* high limit of unavailable dma */ 1697 struct imush im; 1698 1699 /* 1700 * Already been here? If so, leave... 1701 */ 1702 if (isp->isp_rquest) { 1703 return (0); 1704 } 1705 ISP_UNLOCK(isp); 1706 1707 if (isp->isp_maxcmds == 0) { 1708 isp_prt(isp, ISP_LOGERR, "maxcmds not set"); 1709 ISP_LOCK(isp); 1710 return (1); 1711 } 1712 1713 hlim = BUS_SPACE_MAXADDR; 1714 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1715 if (sizeof (bus_size_t) > 4) { 1716 slim = (bus_size_t) (1ULL << 32); 1717 } else { 1718 slim = (bus_size_t) (1UL << 31); 1719 } 1720 llim = BUS_SPACE_MAXADDR; 1721 } else { 1722 llim = BUS_SPACE_MAXADDR_32BIT; 1723 slim = (1UL << 24); 1724 } 1725 1726 len = isp->isp_maxcmds * sizeof (struct isp_pcmd); 1727 isp->isp_osinfo.pcmd_pool = 1728 (struct isp_pcmd *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1729 if (isp->isp_osinfo.pcmd_pool == NULL) { 1730 isp_prt(isp, ISP_LOGERR, "cannot allocate pcmds"); 1731 ISP_LOCK(isp); 1732 return (1); 1733 } 1734 1735 /* 1736 * XXX: We don't really support 64 bit target mode for parallel scsi yet 1737 */ 1738 #ifdef ISP_TARGET_MODE 1739 if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) { 1740 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1741 ISP_LOCK(isp); 1742 isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet"); 1743 return (1); 1744 } 1745 #endif 1746 1747 if (isp_dma_tag_create(BUS_DMA_ROOTARG(ISP_PCD(isp)), 1, 1748 slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, 1749 slim, 0, &isp->isp_osinfo.dmat)) { 1750 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1751 ISP_LOCK(isp); 1752 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1753 return (1); 1754 } 1755 1756 1757 len = sizeof (XS_T **) * isp->isp_maxcmds; 1758 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1759 if (isp->isp_xflist == NULL) { 1760 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1761 ISP_LOCK(isp); 1762 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1763 return (1); 1764 } 1765 #ifdef ISP_TARGET_MODE 1766 len = sizeof (void **) * isp->isp_maxcmds; 1767 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1768 if (isp->isp_tgtlist == NULL) { 1769 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1770 free(isp->isp_xflist, M_DEVBUF); 1771 ISP_LOCK(isp); 1772 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1773 return (1); 1774 } 1775 #endif 1776 1777 /* 1778 * Allocate and map the request, result queues, plus FC scratch area. 1779 */ 1780 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1781 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1782 if (IS_FC(isp)) { 1783 len += ISP2100_SCRLEN; 1784 } 1785 1786 ns = (len / PAGE_SIZE) + 1; 1787 /* 1788 * Create a tag for the control spaces- force it to within 32 bits. 1789 */ 1790 if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, 1791 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1792 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) { 1793 isp_prt(isp, ISP_LOGERR, 1794 "cannot create a dma tag for control spaces"); 1795 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1796 free(isp->isp_xflist, M_DEVBUF); 1797 #ifdef ISP_TARGET_MODE 1798 free(isp->isp_tgtlist, M_DEVBUF); 1799 #endif 1800 ISP_LOCK(isp); 1801 return (1); 1802 } 1803 1804 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1805 &isp->isp_cdmap) != 0) { 1806 isp_prt(isp, ISP_LOGERR, 1807 "cannot allocate %d bytes of CCB memory", len); 1808 bus_dma_tag_destroy(isp->isp_cdmat); 1809 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1810 free(isp->isp_xflist, M_DEVBUF); 1811 #ifdef ISP_TARGET_MODE 1812 free(isp->isp_tgtlist, M_DEVBUF); 1813 #endif 1814 ISP_LOCK(isp); 1815 return (1); 1816 } 1817 1818 for (i = 0; i < isp->isp_maxcmds; i++) { 1819 struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i]; 1820 error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap); 1821 if (error) { 1822 isp_prt(isp, ISP_LOGERR, 1823 "error %d creating per-cmd DMA maps", error); 1824 while (--i >= 0) { 1825 bus_dmamap_destroy(isp->isp_osinfo.dmat, 1826 isp->isp_osinfo.pcmd_pool[i].dmap); 1827 } 1828 goto bad; 1829 } 1830 isp_callout_init(&pcmd->wdog); 1831 if (i == isp->isp_maxcmds-1) { 1832 pcmd->next = NULL; 1833 } else { 1834 pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1]; 1835 } 1836 } 1837 isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0]; 1838 1839 im.isp = isp; 1840 im.error = 0; 1841 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1842 if (im.error) { 1843 isp_prt(isp, ISP_LOGERR, 1844 "error %d loading dma map for control areas", im.error); 1845 goto bad; 1846 } 1847 1848 isp->isp_rquest = base; 1849 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1850 isp->isp_result = base; 1851 if (IS_FC(isp)) { 1852 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1853 FCPARAM(isp)->isp_scratch = base; 1854 } 1855 ISP_LOCK(isp); 1856 return (0); 1857 1858 bad: 1859 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1860 bus_dma_tag_destroy(isp->isp_cdmat); 1861 free(isp->isp_xflist, M_DEVBUF); 1862 #ifdef ISP_TARGET_MODE 1863 free(isp->isp_tgtlist, M_DEVBUF); 1864 #endif 1865 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1866 isp->isp_rquest = NULL; 1867 ISP_LOCK(isp); 1868 return (1); 1869 } 1870 1871 typedef struct { 1872 ispsoftc_t *isp; 1873 void *cmd_token; 1874 void *rq; 1875 uint32_t *nxtip; 1876 uint32_t optr; 1877 int error; 1878 } mush_t; 1879 1880 #define MUSHERR_NOQENTRIES -2 1881 1882 #ifdef ISP_TARGET_MODE 1883 /* 1884 * We need to handle DMA for target mode differently from initiator mode. 1885 * 1886 * DMA mapping and construction and submission of CTIO Request Entries 1887 * and rendevous for completion are very tightly coupled because we start 1888 * out by knowing (per platform) how much data we have to move, but we 1889 * don't know, up front, how many DMA mapping segments will have to be used 1890 * cover that data, so we don't know how many CTIO Request Entries we 1891 * will end up using. Further, for performance reasons we may want to 1892 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1893 * 1894 * The standard vector still goes through isp_pci_dmasetup, but the callback 1895 * for the DMA mapping routines comes here instead with the whole transfer 1896 * mapped and a pointer to a partially filled in already allocated request 1897 * queue entry. We finish the job. 1898 */ 1899 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1900 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1901 1902 #define STATUS_WITH_DATA 1 1903 1904 static void 1905 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1906 { 1907 mush_t *mp; 1908 struct ccb_scsiio *csio; 1909 ispsoftc_t *isp; 1910 ct_entry_t *cto, *qe; 1911 uint8_t scsi_status; 1912 uint32_t curi, nxti, handle; 1913 uint32_t sflags; 1914 int32_t resid; 1915 int nth_ctio, nctios, send_status; 1916 1917 mp = (mush_t *) arg; 1918 if (error) { 1919 mp->error = error; 1920 return; 1921 } 1922 1923 isp = mp->isp; 1924 csio = mp->cmd_token; 1925 cto = mp->rq; 1926 curi = isp->isp_reqidx; 1927 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1928 1929 cto->ct_xfrlen = 0; 1930 cto->ct_seg_count = 0; 1931 cto->ct_header.rqs_entry_count = 1; 1932 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1933 1934 if (nseg == 0) { 1935 cto->ct_header.rqs_seqno = 1; 1936 isp_prt(isp, ISP_LOGTDEBUG1, 1937 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1938 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1939 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1940 cto->ct_scsi_status, cto->ct_resid); 1941 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1942 isp_put_ctio(isp, cto, qe); 1943 return; 1944 } 1945 1946 nctios = nseg / ISP_RQDSEG; 1947 if (nseg % ISP_RQDSEG) { 1948 nctios++; 1949 } 1950 1951 /* 1952 * Save syshandle, and potentially any SCSI status, which we'll 1953 * reinsert on the last CTIO we're going to send. 1954 */ 1955 1956 handle = cto->ct_syshandle; 1957 cto->ct_syshandle = 0; 1958 cto->ct_header.rqs_seqno = 0; 1959 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1960 1961 if (send_status) { 1962 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1963 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1964 /* 1965 * Preserve residual. 1966 */ 1967 resid = cto->ct_resid; 1968 1969 /* 1970 * Save actual SCSI status. 1971 */ 1972 scsi_status = cto->ct_scsi_status; 1973 1974 #ifndef STATUS_WITH_DATA 1975 sflags |= CT_NO_DATA; 1976 /* 1977 * We can't do a status at the same time as a data CTIO, so 1978 * we need to synthesize an extra CTIO at this level. 1979 */ 1980 nctios++; 1981 #endif 1982 } else { 1983 sflags = scsi_status = resid = 0; 1984 } 1985 1986 cto->ct_resid = 0; 1987 cto->ct_scsi_status = 0; 1988 1989 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1990 bus_dmamap_sync(isp->isp_osinfo.dmat, 1991 PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1992 } else { 1993 bus_dmamap_sync(isp->isp_osinfo.dmat, 1994 PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1995 } 1996 1997 nxti = *mp->nxtip; 1998 1999 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 2000 int seglim; 2001 2002 seglim = nseg; 2003 if (seglim) { 2004 int seg; 2005 2006 if (seglim > ISP_RQDSEG) 2007 seglim = ISP_RQDSEG; 2008 2009 for (seg = 0; seg < seglim; seg++, nseg--) { 2010 /* 2011 * Unlike normal initiator commands, we don't 2012 * do any swizzling here. 2013 */ 2014 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 2015 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 2016 cto->ct_xfrlen += dm_segs->ds_len; 2017 dm_segs++; 2018 } 2019 cto->ct_seg_count = seg; 2020 } else { 2021 /* 2022 * This case should only happen when we're sending an 2023 * extra CTIO with final status. 2024 */ 2025 if (send_status == 0) { 2026 isp_prt(isp, ISP_LOGWARN, 2027 "tdma_mk ran out of segments"); 2028 mp->error = EINVAL; 2029 return; 2030 } 2031 } 2032 2033 /* 2034 * At this point, the fields ct_lun, ct_iid, ct_tagval, 2035 * ct_tagtype, and ct_timeout have been carried over 2036 * unchanged from what our caller had set. 2037 * 2038 * The dataseg fields and the seg_count fields we just got 2039 * through setting. The data direction we've preserved all 2040 * along and only clear it if we're now sending status. 2041 */ 2042 2043 if (nth_ctio == nctios - 1) { 2044 /* 2045 * We're the last in a sequence of CTIOs, so mark 2046 * this CTIO and save the handle to the CCB such that 2047 * when this CTIO completes we can free dma resources 2048 * and do whatever else we need to do to finish the 2049 * rest of the command. We *don't* give this to the 2050 * firmware to work on- the caller will do that. 2051 */ 2052 2053 cto->ct_syshandle = handle; 2054 cto->ct_header.rqs_seqno = 1; 2055 2056 if (send_status) { 2057 cto->ct_scsi_status = scsi_status; 2058 cto->ct_flags |= sflags; 2059 cto->ct_resid = resid; 2060 } 2061 if (send_status) { 2062 isp_prt(isp, ISP_LOGTDEBUG1, 2063 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 2064 "scsi status %x resid %d", 2065 cto->ct_fwhandle, csio->ccb_h.target_lun, 2066 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 2067 cto->ct_scsi_status, cto->ct_resid); 2068 } else { 2069 isp_prt(isp, ISP_LOGTDEBUG1, 2070 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 2071 cto->ct_fwhandle, csio->ccb_h.target_lun, 2072 cto->ct_iid, cto->ct_tag_val, 2073 cto->ct_flags); 2074 } 2075 isp_put_ctio(isp, cto, qe); 2076 ISP_TDQE(isp, "last tdma_mk", curi, cto); 2077 if (nctios > 1) { 2078 MEMORYBARRIER(isp, SYNC_REQUEST, 2079 curi, QENTRY_LEN); 2080 } 2081 } else { 2082 ct_entry_t *oqe = qe; 2083 2084 /* 2085 * Make sure syshandle fields are clean 2086 */ 2087 cto->ct_syshandle = 0; 2088 cto->ct_header.rqs_seqno = 0; 2089 2090 isp_prt(isp, ISP_LOGTDEBUG1, 2091 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 2092 cto->ct_fwhandle, csio->ccb_h.target_lun, 2093 cto->ct_iid, cto->ct_flags); 2094 2095 /* 2096 * Get a new CTIO 2097 */ 2098 qe = (ct_entry_t *) 2099 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2100 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 2101 if (nxti == mp->optr) { 2102 isp_prt(isp, ISP_LOGTDEBUG0, 2103 "Queue Overflow in tdma_mk"); 2104 mp->error = MUSHERR_NOQENTRIES; 2105 return; 2106 } 2107 2108 /* 2109 * Now that we're done with the old CTIO, 2110 * flush it out to the request queue. 2111 */ 2112 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 2113 isp_put_ctio(isp, cto, oqe); 2114 if (nth_ctio != 0) { 2115 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 2116 QENTRY_LEN); 2117 } 2118 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 2119 2120 /* 2121 * Reset some fields in the CTIO so we can reuse 2122 * for the next one we'll flush to the request 2123 * queue. 2124 */ 2125 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 2126 cto->ct_header.rqs_entry_count = 1; 2127 cto->ct_header.rqs_flags = 0; 2128 cto->ct_status = 0; 2129 cto->ct_scsi_status = 0; 2130 cto->ct_xfrlen = 0; 2131 cto->ct_resid = 0; 2132 cto->ct_seg_count = 0; 2133 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 2134 } 2135 } 2136 *mp->nxtip = nxti; 2137 } 2138 2139 /* 2140 * We don't have to do multiple CTIOs here. Instead, we can just do 2141 * continuation segments as needed. This greatly simplifies the code 2142 * improves performance. 2143 */ 2144 2145 static void 2146 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2147 { 2148 mush_t *mp; 2149 struct ccb_scsiio *csio; 2150 ispsoftc_t *isp; 2151 ct2_entry_t *cto, *qe; 2152 uint32_t curi, nxti; 2153 ispds_t *ds; 2154 ispds64_t *ds64; 2155 int segcnt, seglim; 2156 2157 mp = (mush_t *) arg; 2158 if (error) { 2159 mp->error = error; 2160 return; 2161 } 2162 2163 isp = mp->isp; 2164 csio = mp->cmd_token; 2165 cto = mp->rq; 2166 2167 curi = isp->isp_reqidx; 2168 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 2169 2170 if (nseg == 0) { 2171 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 2172 isp_prt(isp, ISP_LOGWARN, 2173 "dma2_tgt_fc, a status CTIO2 without MODE1 " 2174 "set (0x%x)", cto->ct_flags); 2175 mp->error = EINVAL; 2176 return; 2177 } 2178 /* 2179 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 2180 * flags to NO DATA and clear relative offset flags. 2181 * We preserve the ct_resid and the response area. 2182 */ 2183 cto->ct_header.rqs_seqno = 1; 2184 cto->ct_seg_count = 0; 2185 cto->ct_reloff = 0; 2186 isp_prt(isp, ISP_LOGTDEBUG1, 2187 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 2188 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 2189 cto->ct_iid, cto->ct_flags, cto->ct_status, 2190 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 2191 if (FCPARAM(isp)->isp_2klogin) { 2192 isp_put_ctio2e(isp, 2193 (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); 2194 } else { 2195 isp_put_ctio2(isp, cto, qe); 2196 } 2197 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 2198 return; 2199 } 2200 2201 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 2202 isp_prt(isp, ISP_LOGERR, 2203 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 2204 "(0x%x)", cto->ct_flags); 2205 mp->error = EINVAL; 2206 return; 2207 } 2208 2209 2210 nxti = *mp->nxtip; 2211 2212 /* 2213 * Check to see if we need to DAC addressing or not. 2214 * 2215 * Any address that's over the 4GB boundary causes this 2216 * to happen. 2217 */ 2218 segcnt = nseg; 2219 if (sizeof (bus_addr_t) > 4) { 2220 for (segcnt = 0; segcnt < nseg; segcnt++) { 2221 uint64_t addr = dm_segs[segcnt].ds_addr; 2222 if (addr >= 0x100000000LL) { 2223 break; 2224 } 2225 } 2226 } 2227 if (segcnt != nseg) { 2228 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3; 2229 seglim = ISP_RQDSEG_T3; 2230 ds64 = &cto->rsp.m0.u.ct_dataseg64[0]; 2231 ds = NULL; 2232 } else { 2233 seglim = ISP_RQDSEG_T2; 2234 ds64 = NULL; 2235 ds = &cto->rsp.m0.u.ct_dataseg[0]; 2236 } 2237 cto->ct_seg_count = 0; 2238 2239 /* 2240 * Set up the CTIO2 data segments. 2241 */ 2242 for (segcnt = 0; cto->ct_seg_count < seglim && segcnt < nseg; 2243 cto->ct_seg_count++, segcnt++) { 2244 if (ds64) { 2245 ds64->ds_basehi = 2246 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); 2247 ds64->ds_base = dm_segs[segcnt].ds_addr; 2248 ds64->ds_count = dm_segs[segcnt].ds_len; 2249 ds64++; 2250 } else { 2251 ds->ds_base = dm_segs[segcnt].ds_addr; 2252 ds->ds_count = dm_segs[segcnt].ds_len; 2253 ds++; 2254 } 2255 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 2256 #if __FreeBSD_version < 500000 2257 isp_prt(isp, ISP_LOGTDEBUG1, 2258 "isp_send_ctio2: ent0[%d]0x%llx:%llu", 2259 cto->ct_seg_count, (uint64_t)dm_segs[segcnt].ds_addr, 2260 (uint64_t)dm_segs[segcnt].ds_len); 2261 #else 2262 isp_prt(isp, ISP_LOGTDEBUG1, 2263 "isp_send_ctio2: ent0[%d]0x%jx:%ju", 2264 cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr, 2265 (uintmax_t)dm_segs[segcnt].ds_len); 2266 #endif 2267 } 2268 2269 while (segcnt < nseg) { 2270 uint32_t curip; 2271 int seg; 2272 ispcontreq_t local, *crq = &local, *qep; 2273 2274 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2275 curip = nxti; 2276 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 2277 if (nxti == mp->optr) { 2278 isp_prt(isp, ISP_LOGTDEBUG0, 2279 "tdma_mkfc: request queue overflow"); 2280 mp->error = MUSHERR_NOQENTRIES; 2281 return; 2282 } 2283 cto->ct_header.rqs_entry_count++; 2284 MEMZERO((void *)crq, sizeof (*crq)); 2285 crq->req_header.rqs_entry_count = 1; 2286 if (cto->ct_header.rqs_entry_type == RQSTYPE_CTIO3) { 2287 seglim = ISP_CDSEG64; 2288 ds = NULL; 2289 ds64 = &((ispcontreq64_t *)crq)->req_dataseg[0]; 2290 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2291 } else { 2292 seglim = ISP_CDSEG; 2293 ds = &crq->req_dataseg[0]; 2294 ds64 = NULL; 2295 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 2296 } 2297 for (seg = 0; segcnt < nseg && seg < seglim; 2298 segcnt++, seg++) { 2299 if (ds64) { 2300 ds64->ds_basehi = 2301 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); 2302 ds64->ds_base = dm_segs[segcnt].ds_addr; 2303 ds64->ds_count = dm_segs[segcnt].ds_len; 2304 ds64++; 2305 } else { 2306 ds->ds_base = dm_segs[segcnt].ds_addr; 2307 ds->ds_count = dm_segs[segcnt].ds_len; 2308 ds++; 2309 } 2310 #if __FreeBSD_version < 500000 2311 isp_prt(isp, ISP_LOGTDEBUG1, 2312 "isp_send_ctio2: ent%d[%d]%llx:%llu", 2313 cto->ct_header.rqs_entry_count-1, seg, 2314 (uint64_t)dm_segs[segcnt].ds_addr, 2315 (uint64_t)dm_segs[segcnt].ds_len); 2316 #else 2317 isp_prt(isp, ISP_LOGTDEBUG1, 2318 "isp_send_ctio2: ent%d[%d]%jx:%ju", 2319 cto->ct_header.rqs_entry_count-1, seg, 2320 (uintmax_t)dm_segs[segcnt].ds_addr, 2321 (uintmax_t)dm_segs[segcnt].ds_len); 2322 #endif 2323 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 2324 cto->ct_seg_count++; 2325 } 2326 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 2327 isp_put_cont_req(isp, crq, qep); 2328 ISP_TDQE(isp, "cont entry", curi, qep); 2329 } 2330 2331 /* 2332 * No do final twiddling for the CTIO itself. 2333 */ 2334 cto->ct_header.rqs_seqno = 1; 2335 isp_prt(isp, ISP_LOGTDEBUG1, 2336 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 2337 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 2338 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 2339 cto->ct_resid); 2340 if (FCPARAM(isp)->isp_2klogin) { 2341 isp_put_ctio2e(isp, (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); 2342 } else { 2343 isp_put_ctio2(isp, cto, qe); 2344 } 2345 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 2346 *mp->nxtip = nxti; 2347 } 2348 #endif 2349 2350 static void dma_2400(void *, bus_dma_segment_t *, int, int); 2351 static void dma2_a64(void *, bus_dma_segment_t *, int, int); 2352 static void dma2(void *, bus_dma_segment_t *, int, int); 2353 2354 static void 2355 dma_2400(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2356 { 2357 mush_t *mp; 2358 ispsoftc_t *isp; 2359 struct ccb_scsiio *csio; 2360 bus_dma_segment_t *eseg; 2361 ispreqt7_t *rq; 2362 int seglim, datalen; 2363 uint32_t nxti; 2364 2365 mp = (mush_t *) arg; 2366 if (error) { 2367 mp->error = error; 2368 return; 2369 } 2370 2371 if (nseg < 1) { 2372 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2373 mp->error = EFAULT; 2374 return; 2375 } 2376 2377 csio = mp->cmd_token; 2378 isp = mp->isp; 2379 rq = mp->rq; 2380 nxti = *mp->nxtip; 2381 2382 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2383 bus_dmamap_sync(isp->isp_osinfo.dmat, 2384 PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 2385 } else { 2386 bus_dmamap_sync(isp->isp_osinfo.dmat, 2387 PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 2388 } 2389 datalen = XS_XFRLEN(csio); 2390 2391 /* 2392 * We're passed an initial partially filled in entry that 2393 * has most fields filled in except for data transfer 2394 * related values. 2395 * 2396 * Our job is to fill in the initial request queue entry and 2397 * then to start allocating and filling in continuation entries 2398 * until we've covered the entire transfer. 2399 */ 2400 2401 rq->req_header.rqs_entry_type = RQSTYPE_T7RQS; 2402 rq->req_dl = datalen; 2403 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2404 rq->req_alen_datadir = 0x2; 2405 } else { 2406 rq->req_alen_datadir = 0x1; 2407 } 2408 2409 eseg = dm_segs + nseg; 2410 2411 rq->req_dataseg.ds_base = DMA_LO32(dm_segs->ds_addr); 2412 rq->req_dataseg.ds_basehi = DMA_HI32(dm_segs->ds_addr); 2413 rq->req_dataseg.ds_count = dm_segs->ds_len; 2414 2415 datalen -= dm_segs->ds_len; 2416 2417 dm_segs++; 2418 rq->req_seg_count++; 2419 2420 while (datalen > 0 && dm_segs != eseg) { 2421 uint32_t onxti; 2422 ispcontreq64_t local, *crq = &local, *cqe; 2423 2424 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2425 onxti = nxti; 2426 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2427 if (nxti == mp->optr) { 2428 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2429 mp->error = MUSHERR_NOQENTRIES; 2430 return; 2431 } 2432 rq->req_header.rqs_entry_count++; 2433 MEMZERO((void *)crq, sizeof (*crq)); 2434 crq->req_header.rqs_entry_count = 1; 2435 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2436 2437 seglim = 0; 2438 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 2439 crq->req_dataseg[seglim].ds_base = 2440 DMA_LO32(dm_segs->ds_addr); 2441 crq->req_dataseg[seglim].ds_basehi = 2442 DMA_HI32(dm_segs->ds_addr); 2443 crq->req_dataseg[seglim].ds_count = 2444 dm_segs->ds_len; 2445 rq->req_seg_count++; 2446 dm_segs++; 2447 seglim++; 2448 datalen -= dm_segs->ds_len; 2449 } 2450 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2451 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2452 } 2453 isp_put_cont64_req(isp, crq, cqe); 2454 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2455 } 2456 *mp->nxtip = nxti; 2457 } 2458 2459 static void 2460 dma2_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2461 { 2462 mush_t *mp; 2463 ispsoftc_t *isp; 2464 struct ccb_scsiio *csio; 2465 bus_dma_segment_t *eseg; 2466 ispreq64_t *rq; 2467 int seglim, datalen; 2468 uint32_t nxti; 2469 2470 mp = (mush_t *) arg; 2471 if (error) { 2472 mp->error = error; 2473 return; 2474 } 2475 2476 if (nseg < 1) { 2477 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2478 mp->error = EFAULT; 2479 return; 2480 } 2481 csio = mp->cmd_token; 2482 isp = mp->isp; 2483 rq = mp->rq; 2484 nxti = *mp->nxtip; 2485 2486 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2487 bus_dmamap_sync(isp->isp_osinfo.dmat, 2488 PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 2489 } else { 2490 bus_dmamap_sync(isp->isp_osinfo.dmat, 2491 PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 2492 } 2493 datalen = XS_XFRLEN(csio); 2494 2495 /* 2496 * We're passed an initial partially filled in entry that 2497 * has most fields filled in except for data transfer 2498 * related values. 2499 * 2500 * Our job is to fill in the initial request queue entry and 2501 * then to start allocating and filling in continuation entries 2502 * until we've covered the entire transfer. 2503 */ 2504 2505 if (IS_FC(isp)) { 2506 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 2507 seglim = ISP_RQDSEG_T3; 2508 ((ispreqt3_t *)rq)->req_totalcnt = datalen; 2509 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2510 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN; 2511 } else { 2512 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 2513 } 2514 } else { 2515 rq->req_header.rqs_entry_type = RQSTYPE_A64; 2516 if (csio->cdb_len > 12) { 2517 seglim = 0; 2518 } else { 2519 seglim = ISP_RQDSEG_A64; 2520 } 2521 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2522 rq->req_flags |= REQFLAG_DATA_IN; 2523 } else { 2524 rq->req_flags |= REQFLAG_DATA_OUT; 2525 } 2526 } 2527 2528 eseg = dm_segs + nseg; 2529 2530 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 2531 if (IS_FC(isp)) { 2532 ispreqt3_t *rq3 = (ispreqt3_t *)rq; 2533 rq3->req_dataseg[rq3->req_seg_count].ds_base = 2534 DMA_LO32(dm_segs->ds_addr); 2535 rq3->req_dataseg[rq3->req_seg_count].ds_basehi = 2536 DMA_HI32(dm_segs->ds_addr); 2537 rq3->req_dataseg[rq3->req_seg_count].ds_count = 2538 dm_segs->ds_len; 2539 } else { 2540 rq->req_dataseg[rq->req_seg_count].ds_base = 2541 DMA_LO32(dm_segs->ds_addr); 2542 rq->req_dataseg[rq->req_seg_count].ds_basehi = 2543 DMA_HI32(dm_segs->ds_addr); 2544 rq->req_dataseg[rq->req_seg_count].ds_count = 2545 dm_segs->ds_len; 2546 } 2547 datalen -= dm_segs->ds_len; 2548 rq->req_seg_count++; 2549 dm_segs++; 2550 } 2551 2552 while (datalen > 0 && dm_segs != eseg) { 2553 uint32_t onxti; 2554 ispcontreq64_t local, *crq = &local, *cqe; 2555 2556 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2557 onxti = nxti; 2558 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2559 if (nxti == mp->optr) { 2560 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2561 mp->error = MUSHERR_NOQENTRIES; 2562 return; 2563 } 2564 rq->req_header.rqs_entry_count++; 2565 MEMZERO((void *)crq, sizeof (*crq)); 2566 crq->req_header.rqs_entry_count = 1; 2567 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2568 2569 seglim = 0; 2570 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 2571 crq->req_dataseg[seglim].ds_base = 2572 DMA_LO32(dm_segs->ds_addr); 2573 crq->req_dataseg[seglim].ds_basehi = 2574 DMA_HI32(dm_segs->ds_addr); 2575 crq->req_dataseg[seglim].ds_count = 2576 dm_segs->ds_len; 2577 rq->req_seg_count++; 2578 dm_segs++; 2579 seglim++; 2580 datalen -= dm_segs->ds_len; 2581 } 2582 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2583 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2584 } 2585 isp_put_cont64_req(isp, crq, cqe); 2586 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2587 } 2588 *mp->nxtip = nxti; 2589 } 2590 2591 static void 2592 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2593 { 2594 mush_t *mp; 2595 ispsoftc_t *isp; 2596 struct ccb_scsiio *csio; 2597 bus_dma_segment_t *eseg; 2598 ispreq_t *rq; 2599 int seglim, datalen; 2600 uint32_t nxti; 2601 2602 mp = (mush_t *) arg; 2603 if (error) { 2604 mp->error = error; 2605 return; 2606 } 2607 2608 if (nseg < 1) { 2609 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2610 mp->error = EFAULT; 2611 return; 2612 } 2613 csio = mp->cmd_token; 2614 isp = mp->isp; 2615 rq = mp->rq; 2616 nxti = *mp->nxtip; 2617 2618 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2619 bus_dmamap_sync(isp->isp_osinfo.dmat, 2620 PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 2621 } else { 2622 bus_dmamap_sync(isp->isp_osinfo.dmat, 2623 PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 2624 } 2625 2626 datalen = XS_XFRLEN(csio); 2627 2628 /* 2629 * We're passed an initial partially filled in entry that 2630 * has most fields filled in except for data transfer 2631 * related values. 2632 * 2633 * Our job is to fill in the initial request queue entry and 2634 * then to start allocating and filling in continuation entries 2635 * until we've covered the entire transfer. 2636 */ 2637 2638 if (IS_FC(isp)) { 2639 seglim = ISP_RQDSEG_T2; 2640 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 2641 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2642 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 2643 } else { 2644 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 2645 } 2646 } else { 2647 if (csio->cdb_len > 12) { 2648 seglim = 0; 2649 } else { 2650 seglim = ISP_RQDSEG; 2651 } 2652 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2653 rq->req_flags |= REQFLAG_DATA_IN; 2654 } else { 2655 rq->req_flags |= REQFLAG_DATA_OUT; 2656 } 2657 } 2658 2659 eseg = dm_segs + nseg; 2660 2661 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 2662 if (IS_FC(isp)) { 2663 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 2664 rq2->req_dataseg[rq2->req_seg_count].ds_base = 2665 DMA_LO32(dm_segs->ds_addr); 2666 rq2->req_dataseg[rq2->req_seg_count].ds_count = 2667 dm_segs->ds_len; 2668 } else { 2669 rq->req_dataseg[rq->req_seg_count].ds_base = 2670 DMA_LO32(dm_segs->ds_addr); 2671 rq->req_dataseg[rq->req_seg_count].ds_count = 2672 dm_segs->ds_len; 2673 } 2674 datalen -= dm_segs->ds_len; 2675 rq->req_seg_count++; 2676 dm_segs++; 2677 } 2678 2679 while (datalen > 0 && dm_segs != eseg) { 2680 uint32_t onxti; 2681 ispcontreq_t local, *crq = &local, *cqe; 2682 2683 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2684 onxti = nxti; 2685 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2686 if (nxti == mp->optr) { 2687 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2688 mp->error = MUSHERR_NOQENTRIES; 2689 return; 2690 } 2691 rq->req_header.rqs_entry_count++; 2692 MEMZERO((void *)crq, sizeof (*crq)); 2693 crq->req_header.rqs_entry_count = 1; 2694 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 2695 2696 seglim = 0; 2697 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 2698 crq->req_dataseg[seglim].ds_base = 2699 DMA_LO32(dm_segs->ds_addr); 2700 crq->req_dataseg[seglim].ds_count = 2701 dm_segs->ds_len; 2702 rq->req_seg_count++; 2703 dm_segs++; 2704 seglim++; 2705 datalen -= dm_segs->ds_len; 2706 } 2707 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2708 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2709 } 2710 isp_put_cont_req(isp, crq, cqe); 2711 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2712 } 2713 *mp->nxtip = nxti; 2714 } 2715 2716 /* 2717 */ 2718 static int 2719 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq, 2720 uint32_t *nxtip, uint32_t optr) 2721 { 2722 ispreq_t *qep; 2723 mush_t mush, *mp; 2724 void (*eptr)(void *, bus_dma_segment_t *, int, int); 2725 2726 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 2727 #ifdef ISP_TARGET_MODE 2728 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 2729 if (IS_FC(isp)) { 2730 eptr = tdma_mkfc; 2731 } else { 2732 eptr = tdma_mk; 2733 } 2734 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 2735 (csio->dxfer_len == 0)) { 2736 mp = &mush; 2737 mp->isp = isp; 2738 mp->cmd_token = csio; 2739 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 2740 mp->nxtip = nxtip; 2741 mp->optr = optr; 2742 mp->error = 0; 2743 (*eptr)(mp, NULL, 0, 0); 2744 goto mbxsync; 2745 } 2746 } else 2747 #endif 2748 if (IS_24XX(isp)) { 2749 eptr = dma_2400; 2750 } else if (sizeof (bus_addr_t) > 4) { 2751 eptr = dma2_a64; 2752 } else { 2753 eptr = dma2; 2754 } 2755 2756 2757 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 2758 (csio->dxfer_len == 0)) { 2759 rq->req_seg_count = 1; 2760 goto mbxsync; 2761 } 2762 2763 /* 2764 * Do a virtual grapevine step to collect info for 2765 * the callback dma allocation that we have to use... 2766 */ 2767 mp = &mush; 2768 mp->isp = isp; 2769 mp->cmd_token = csio; 2770 mp->rq = rq; 2771 mp->nxtip = nxtip; 2772 mp->optr = optr; 2773 mp->error = 0; 2774 2775 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 2776 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 2777 int error; 2778 #if __FreeBSD_version < 500000 2779 int s = splsoftvm(); 2780 #endif 2781 error = bus_dmamap_load(isp->isp_osinfo.dmat, 2782 PISP_PCMD(csio)->dmap, csio->data_ptr, 2783 csio->dxfer_len, eptr, mp, 0); 2784 #if __FreeBSD_version < 500000 2785 splx(s); 2786 #endif 2787 if (error == EINPROGRESS) { 2788 bus_dmamap_unload(isp->isp_osinfo.dmat, 2789 PISP_PCMD(csio)->dmap); 2790 mp->error = EINVAL; 2791 isp_prt(isp, ISP_LOGERR, 2792 "deferred dma allocation not supported"); 2793 } else if (error && mp->error == 0) { 2794 #ifdef DIAGNOSTIC 2795 isp_prt(isp, ISP_LOGERR, 2796 "error %d in dma mapping code", error); 2797 #endif 2798 mp->error = error; 2799 } 2800 } else { 2801 /* Pointer to physical buffer */ 2802 struct bus_dma_segment seg; 2803 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; 2804 seg.ds_len = csio->dxfer_len; 2805 (*eptr)(mp, &seg, 1, 0); 2806 } 2807 } else { 2808 struct bus_dma_segment *segs; 2809 2810 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 2811 isp_prt(isp, ISP_LOGERR, 2812 "Physical segment pointers unsupported"); 2813 mp->error = EINVAL; 2814 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2815 isp_prt(isp, ISP_LOGERR, 2816 "Virtual segment addresses unsupported"); 2817 mp->error = EINVAL; 2818 } else { 2819 /* Just use the segments provided */ 2820 segs = (struct bus_dma_segment *) csio->data_ptr; 2821 (*eptr)(mp, segs, csio->sglist_cnt, 0); 2822 } 2823 } 2824 if (mp->error) { 2825 int retval = CMD_COMPLETE; 2826 if (mp->error == MUSHERR_NOQENTRIES) { 2827 retval = CMD_EAGAIN; 2828 } else if (mp->error == EFBIG) { 2829 XS_SETERR(csio, CAM_REQ_TOO_BIG); 2830 } else if (mp->error == EINVAL) { 2831 XS_SETERR(csio, CAM_REQ_INVALID); 2832 } else { 2833 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 2834 } 2835 return (retval); 2836 } 2837 mbxsync: 2838 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2839 isp_print_bytes(isp, "Request Queue Entry", QENTRY_LEN, rq); 2840 } 2841 switch (rq->req_header.rqs_entry_type) { 2842 case RQSTYPE_REQUEST: 2843 isp_put_request(isp, rq, qep); 2844 break; 2845 case RQSTYPE_CMDONLY: 2846 isp_put_extended_request(isp, (ispextreq_t *)rq, 2847 (ispextreq_t *)qep); 2848 break; 2849 case RQSTYPE_T2RQS: 2850 if (FCPARAM(isp)->isp_2klogin) { 2851 isp_put_request_t2e(isp, 2852 (ispreqt2e_t *) rq, (ispreqt2e_t *) qep); 2853 } else { 2854 isp_put_request_t2(isp, 2855 (ispreqt2_t *) rq, (ispreqt2_t *) qep); 2856 } 2857 break; 2858 case RQSTYPE_T3RQS: 2859 if (FCPARAM(isp)->isp_2klogin) { 2860 isp_put_request_t3e(isp, 2861 (ispreqt3e_t *) rq, (ispreqt3e_t *) qep); 2862 break; 2863 } 2864 /* FALLTHROUGH */ 2865 case RQSTYPE_A64: 2866 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep); 2867 break; 2868 case RQSTYPE_T7RQS: 2869 isp_put_request_t7(isp, (ispreqt7_t *) rq, (ispreqt7_t *) qep); 2870 break; 2871 } 2872 return (CMD_QUEUED); 2873 } 2874 2875 static void 2876 isp_pci_reset0(ispsoftc_t *isp) 2877 { 2878 ISP_DISABLE_INTS(isp); 2879 } 2880 2881 static void 2882 isp_pci_reset1(ispsoftc_t *isp) 2883 { 2884 if (!IS_24XX(isp)) { 2885 /* Make sure the BIOS is disabled */ 2886 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 2887 } 2888 /* and enable interrupts */ 2889 ISP_ENABLE_INTS(isp); 2890 } 2891 2892 static void 2893 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 2894 { 2895 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2896 if (msg) 2897 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2898 else 2899 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2900 if (IS_SCSI(isp)) 2901 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2902 else 2903 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2904 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2905 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2906 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2907 2908 2909 if (IS_SCSI(isp)) { 2910 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2911 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2912 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2913 ISP_READ(isp, CDMA_FIFO_STS)); 2914 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2915 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2916 ISP_READ(isp, DDMA_FIFO_STS)); 2917 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2918 ISP_READ(isp, SXP_INTERRUPT), 2919 ISP_READ(isp, SXP_GROSS_ERR), 2920 ISP_READ(isp, SXP_PINS_CTRL)); 2921 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2922 } 2923 printf(" mbox regs: %x %x %x %x %x\n", 2924 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2925 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2926 ISP_READ(isp, OUTMAILBOX4)); 2927 printf(" PCI Status Command/Status=%x\n", 2928 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2929 } 2930