1 /*- 2 * 3 * Copyright (c) 1997-2006 by Matthew Jacob 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice immediately at the beginning of the file, without modification, 11 * this list of conditions, and the following disclaimer. 12 * 2. The name of the author may not be used to endorse or promote products 13 * derived from this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 /* 29 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 30 * FreeBSD Version. 31 */ 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/module.h> 39 #if __FreeBSD_version >= 700000 40 #include <sys/linker.h> 41 #include <sys/firmware.h> 42 #endif 43 #include <sys/bus.h> 44 #if __FreeBSD_version < 500000 45 #include <pci/pcireg.h> 46 #include <pci/pcivar.h> 47 #include <machine/bus_memio.h> 48 #include <machine/bus_pio.h> 49 #else 50 #include <sys/stdint.h> 51 #include <dev/pci/pcireg.h> 52 #include <dev/pci/pcivar.h> 53 #endif 54 #include <machine/bus.h> 55 #include <machine/resource.h> 56 #include <sys/rman.h> 57 #include <sys/malloc.h> 58 59 #include <dev/isp/isp_freebsd.h> 60 61 #if __FreeBSD_version < 500000 62 #define BUS_PROBE_DEFAULT 0 63 #endif 64 65 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); 66 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); 67 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 68 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); 69 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 70 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 71 static int 72 isp_pci_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 73 static int 74 isp_pci_rd_isr_2300(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 75 static int 76 isp_pci_rd_isr_2400(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 77 static int isp_pci_mbxdma(ispsoftc_t *); 78 static int 79 isp_pci_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint32_t *, uint32_t); 80 static void 81 isp_pci_dmateardown(ispsoftc_t *, XS_T *, uint32_t); 82 83 84 static void isp_pci_reset1(ispsoftc_t *); 85 static void isp_pci_dumpregs(ispsoftc_t *, const char *); 86 87 static struct ispmdvec mdvec = { 88 isp_pci_rd_isr, 89 isp_pci_rd_reg, 90 isp_pci_wr_reg, 91 isp_pci_mbxdma, 92 isp_pci_dmasetup, 93 isp_pci_dmateardown, 94 NULL, 95 isp_pci_reset1, 96 isp_pci_dumpregs, 97 NULL, 98 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 99 }; 100 101 static struct ispmdvec mdvec_1080 = { 102 isp_pci_rd_isr, 103 isp_pci_rd_reg_1080, 104 isp_pci_wr_reg_1080, 105 isp_pci_mbxdma, 106 isp_pci_dmasetup, 107 isp_pci_dmateardown, 108 NULL, 109 isp_pci_reset1, 110 isp_pci_dumpregs, 111 NULL, 112 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 113 }; 114 115 static struct ispmdvec mdvec_12160 = { 116 isp_pci_rd_isr, 117 isp_pci_rd_reg_1080, 118 isp_pci_wr_reg_1080, 119 isp_pci_mbxdma, 120 isp_pci_dmasetup, 121 isp_pci_dmateardown, 122 NULL, 123 isp_pci_reset1, 124 isp_pci_dumpregs, 125 NULL, 126 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 127 }; 128 129 static struct ispmdvec mdvec_2100 = { 130 isp_pci_rd_isr, 131 isp_pci_rd_reg, 132 isp_pci_wr_reg, 133 isp_pci_mbxdma, 134 isp_pci_dmasetup, 135 isp_pci_dmateardown, 136 NULL, 137 isp_pci_reset1, 138 isp_pci_dumpregs 139 }; 140 141 static struct ispmdvec mdvec_2200 = { 142 isp_pci_rd_isr, 143 isp_pci_rd_reg, 144 isp_pci_wr_reg, 145 isp_pci_mbxdma, 146 isp_pci_dmasetup, 147 isp_pci_dmateardown, 148 NULL, 149 isp_pci_reset1, 150 isp_pci_dumpregs 151 }; 152 153 static struct ispmdvec mdvec_2300 = { 154 isp_pci_rd_isr_2300, 155 isp_pci_rd_reg, 156 isp_pci_wr_reg, 157 isp_pci_mbxdma, 158 isp_pci_dmasetup, 159 isp_pci_dmateardown, 160 NULL, 161 isp_pci_reset1, 162 isp_pci_dumpregs 163 }; 164 165 static struct ispmdvec mdvec_2400 = { 166 isp_pci_rd_isr_2400, 167 isp_pci_rd_reg_2400, 168 isp_pci_wr_reg_2400, 169 isp_pci_mbxdma, 170 isp_pci_dmasetup, 171 isp_pci_dmateardown, 172 NULL, 173 isp_pci_reset1, 174 NULL 175 }; 176 177 #ifndef PCIM_CMD_INVEN 178 #define PCIM_CMD_INVEN 0x10 179 #endif 180 #ifndef PCIM_CMD_BUSMASTEREN 181 #define PCIM_CMD_BUSMASTEREN 0x0004 182 #endif 183 #ifndef PCIM_CMD_PERRESPEN 184 #define PCIM_CMD_PERRESPEN 0x0040 185 #endif 186 #ifndef PCIM_CMD_SEREN 187 #define PCIM_CMD_SEREN 0x0100 188 #endif 189 #ifndef PCIM_CMD_INTX_DISABLE 190 #define PCIM_CMD_INTX_DISABLE 0x0400 191 #endif 192 193 #ifndef PCIR_COMMAND 194 #define PCIR_COMMAND 0x04 195 #endif 196 197 #ifndef PCIR_CACHELNSZ 198 #define PCIR_CACHELNSZ 0x0c 199 #endif 200 201 #ifndef PCIR_LATTIMER 202 #define PCIR_LATTIMER 0x0d 203 #endif 204 205 #ifndef PCIR_ROMADDR 206 #define PCIR_ROMADDR 0x30 207 #endif 208 209 #ifndef PCI_VENDOR_QLOGIC 210 #define PCI_VENDOR_QLOGIC 0x1077 211 #endif 212 213 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 214 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 215 #endif 216 217 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 218 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 219 #endif 220 221 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 222 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 223 #endif 224 225 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 226 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 227 #endif 228 229 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 230 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 231 #endif 232 233 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 234 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 235 #endif 236 237 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 238 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 239 #endif 240 241 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 242 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 243 #endif 244 245 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 246 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 247 #endif 248 249 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 250 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 251 #endif 252 253 #ifndef PCI_PRODUCT_QLOGIC_ISP2322 254 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 255 #endif 256 257 #ifndef PCI_PRODUCT_QLOGIC_ISP2422 258 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 259 #endif 260 261 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 262 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 263 #endif 264 265 #ifndef PCI_PRODUCT_QLOGIC_ISP6322 266 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 267 #endif 268 269 270 #define PCI_QLOGIC_ISP1020 \ 271 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 272 273 #define PCI_QLOGIC_ISP1080 \ 274 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 275 276 #define PCI_QLOGIC_ISP10160 \ 277 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 278 279 #define PCI_QLOGIC_ISP12160 \ 280 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 281 282 #define PCI_QLOGIC_ISP1240 \ 283 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 284 285 #define PCI_QLOGIC_ISP1280 \ 286 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 287 288 #define PCI_QLOGIC_ISP2100 \ 289 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 290 291 #define PCI_QLOGIC_ISP2200 \ 292 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 293 294 #define PCI_QLOGIC_ISP2300 \ 295 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 296 297 #define PCI_QLOGIC_ISP2312 \ 298 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 299 300 #define PCI_QLOGIC_ISP2322 \ 301 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 302 303 #define PCI_QLOGIC_ISP2422 \ 304 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 305 306 #define PCI_QLOGIC_ISP6312 \ 307 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 308 309 #define PCI_QLOGIC_ISP6322 \ 310 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 311 312 /* 313 * Odd case for some AMI raid cards... We need to *not* attach to this. 314 */ 315 #define AMI_RAID_SUBVENDOR_ID 0x101e 316 317 #define IO_MAP_REG 0x10 318 #define MEM_MAP_REG 0x14 319 320 #define PCI_DFLT_LTNCY 0x40 321 #define PCI_DFLT_LNSZ 0x10 322 323 static int isp_pci_probe (device_t); 324 static int isp_pci_attach (device_t); 325 static int isp_pci_detach (device_t); 326 327 328 struct isp_pcisoftc { 329 ispsoftc_t pci_isp; 330 device_t pci_dev; 331 struct resource * pci_reg; 332 bus_space_tag_t pci_st; 333 bus_space_handle_t pci_sh; 334 void * ih; 335 int16_t pci_poff[_NREG_BLKS]; 336 bus_dma_tag_t dmat; 337 bus_dmamap_t *dmaps; 338 }; 339 340 341 static device_method_t isp_pci_methods[] = { 342 /* Device interface */ 343 DEVMETHOD(device_probe, isp_pci_probe), 344 DEVMETHOD(device_attach, isp_pci_attach), 345 DEVMETHOD(device_detach, isp_pci_detach), 346 { 0, 0 } 347 }; 348 static void isp_pci_intr(void *); 349 350 static driver_t isp_pci_driver = { 351 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 352 }; 353 static devclass_t isp_devclass; 354 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 355 #if __FreeBSD_version >= 700000 356 MODULE_DEPEND(isp, ispfw, 1, 1, 1); 357 MODULE_DEPEND(isp, firmware, 1, 1, 1); 358 #else 359 extern ispfwfunc *isp_get_firmware_p; 360 #endif 361 362 static int 363 isp_pci_probe(device_t dev) 364 { 365 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 366 case PCI_QLOGIC_ISP1020: 367 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 368 break; 369 case PCI_QLOGIC_ISP1080: 370 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 371 break; 372 case PCI_QLOGIC_ISP1240: 373 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 374 break; 375 case PCI_QLOGIC_ISP1280: 376 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 377 break; 378 case PCI_QLOGIC_ISP10160: 379 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 380 break; 381 case PCI_QLOGIC_ISP12160: 382 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 383 return (ENXIO); 384 } 385 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 386 break; 387 case PCI_QLOGIC_ISP2100: 388 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 389 break; 390 case PCI_QLOGIC_ISP2200: 391 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 392 break; 393 case PCI_QLOGIC_ISP2300: 394 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 395 break; 396 case PCI_QLOGIC_ISP2312: 397 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 398 break; 399 case PCI_QLOGIC_ISP2322: 400 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 401 break; 402 case PCI_QLOGIC_ISP2422: 403 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 404 break; 405 case PCI_QLOGIC_ISP6312: 406 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 407 break; 408 case PCI_QLOGIC_ISP6322: 409 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 410 break; 411 default: 412 return (ENXIO); 413 } 414 if (isp_announced == 0 && bootverbose) { 415 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 416 "Core Version %d.%d\n", 417 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 418 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 419 isp_announced++; 420 } 421 /* 422 * XXXX: Here is where we might load the f/w module 423 * XXXX: (or increase a reference count to it). 424 */ 425 return (BUS_PROBE_DEFAULT); 426 } 427 428 #if __FreeBSD_version < 500000 429 static void 430 isp_get_options(device_t dev, ispsoftc_t *isp) 431 { 432 uint64_t wwn; 433 int bitmap, unit; 434 435 callout_handle_init(&isp->isp_osinfo.ldt); 436 callout_handle_init(&isp->isp_osinfo.gdt); 437 438 unit = device_get_unit(dev); 439 if (getenv_int("isp_disable", &bitmap)) { 440 if (bitmap & (1 << unit)) { 441 isp->isp_osinfo.disabled = 1; 442 return; 443 } 444 } 445 446 if (getenv_int("isp_no_fwload", &bitmap)) { 447 if (bitmap & (1 << unit)) 448 isp->isp_confopts |= ISP_CFG_NORELOAD; 449 } 450 if (getenv_int("isp_fwload", &bitmap)) { 451 if (bitmap & (1 << unit)) 452 isp->isp_confopts &= ~ISP_CFG_NORELOAD; 453 } 454 if (getenv_int("isp_no_nvram", &bitmap)) { 455 if (bitmap & (1 << unit)) 456 isp->isp_confopts |= ISP_CFG_NONVRAM; 457 } 458 if (getenv_int("isp_nvram", &bitmap)) { 459 if (bitmap & (1 << unit)) 460 isp->isp_confopts &= ~ISP_CFG_NONVRAM; 461 } 462 if (getenv_int("isp_fcduplex", &bitmap)) { 463 if (bitmap & (1 << unit)) 464 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 465 } 466 if (getenv_int("isp_no_fcduplex", &bitmap)) { 467 if (bitmap & (1 << unit)) 468 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX; 469 } 470 if (getenv_int("isp_nport", &bitmap)) { 471 if (bitmap & (1 << unit)) 472 isp->isp_confopts |= ISP_CFG_NPORT; 473 } 474 475 /* 476 * Because the resource_*_value functions can neither return 477 * 64 bit integer values, nor can they be directly coerced 478 * to interpret the right hand side of the assignment as 479 * you want them to interpret it, we have to force WWN 480 * hint replacement to specify WWN strings with a leading 481 * 'w' (e..g w50000000aaaa0001). Sigh. 482 */ 483 if (getenv_quad("isp_portwwn", &wwn)) { 484 isp->isp_osinfo.default_port_wwn = wwn; 485 isp->isp_confopts |= ISP_CFG_OWNWWPN; 486 } 487 if (isp->isp_osinfo.default_port_wwn == 0) { 488 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 489 } 490 491 if (getenv_quad("isp_nodewwn", &wwn)) { 492 isp->isp_osinfo.default_node_wwn = wwn; 493 isp->isp_confopts |= ISP_CFG_OWNWWNN; 494 } 495 if (isp->isp_osinfo.default_node_wwn == 0) { 496 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 497 } 498 499 bitmap = 0; 500 (void) getenv_int("isp_debug", &bitmap); 501 if (bitmap) { 502 isp->isp_dblev = bitmap; 503 } else { 504 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 505 } 506 if (bootverbose) { 507 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 508 } 509 510 bitmap = 0; 511 (void) getenv_int("isp_fabric_hysteresis", &bitmap); 512 if (bitmap >= 0 && bitmap < 256) { 513 isp->isp_osinfo.hysteresis = bitmap; 514 } else { 515 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis; 516 } 517 518 bitmap = 0; 519 (void) getenv_int("isp_loop_down_limit", &bitmap); 520 if (bitmap >= 0 && bitmap < 0xffff) { 521 isp->isp_osinfo.loop_down_limit = bitmap; 522 } else { 523 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit; 524 } 525 526 bitmap = 0; 527 (void) getenv_int("isp_gone_device_time", &bitmap); 528 if (bitmap >= 0 && bitmap < 0xffff) { 529 isp->isp_osinfo.gone_device_time = bitmap; 530 } else { 531 isp->isp_osinfo.gone_device_time = isp_gone_device_time; 532 } 533 534 535 #ifdef ISP_FW_CRASH_DUMP 536 bitmap = 0; 537 if (getenv_int("isp_fw_dump_enable", &bitmap)) { 538 if (bitmap & (1 << unit) { 539 size_t amt = 0; 540 if (IS_2200(isp)) { 541 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 542 } else if (IS_23XX(isp)) { 543 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 544 } 545 if (amt) { 546 FCPARAM(isp)->isp_dump_data = 547 malloc(amt, M_DEVBUF, M_WAITOK); 548 memset(FCPARAM(isp)->isp_dump_data, 0, amt); 549 } else { 550 device_printf(dev, 551 "f/w crash dumps not supported for card\n"); 552 } 553 } 554 } 555 #endif 556 bitmap = 0; 557 if (getenv_int("role", &bitmap)) { 558 isp->isp_role = bitmap; 559 } else { 560 isp->isp_role = ISP_DEFAULT_ROLES; 561 } 562 } 563 564 static void 565 isp_get_pci_options(device_t dev, int *m1, int *m2) 566 { 567 int bitmap; 568 int unit = device_get_unit(dev); 569 570 *m1 = PCIM_CMD_MEMEN; 571 *m2 = PCIM_CMD_PORTEN; 572 if (getenv_int("isp_mem_map", &bitmap)) { 573 if (bitmap & (1 << unit)) { 574 *m1 = PCIM_CMD_MEMEN; 575 *m2 = PCIM_CMD_PORTEN; 576 } 577 } 578 bitmap = 0; 579 if (getenv_int("isp_io_map", &bitmap)) { 580 if (bitmap & (1 << unit)) { 581 *m1 = PCIM_CMD_PORTEN; 582 *m2 = PCIM_CMD_MEMEN; 583 } 584 } 585 } 586 #else 587 static void 588 isp_get_options(device_t dev, ispsoftc_t *isp) 589 { 590 int tval; 591 const char *sptr; 592 593 callout_handle_init(&isp->isp_osinfo.ldt); 594 callout_handle_init(&isp->isp_osinfo.gdt); 595 596 /* 597 * Figure out if we're supposed to skip this one. 598 */ 599 600 tval = 0; 601 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 602 "disable", &tval) == 0 && tval) { 603 device_printf(dev, "disabled at user request\n"); 604 isp->isp_osinfo.disabled = 1; 605 return; 606 } 607 608 tval = -1; 609 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 610 "role", &tval) == 0 && tval != -1) { 611 tval &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET); 612 isp->isp_role = tval; 613 device_printf(dev, "setting role to 0x%x\n", isp->isp_role); 614 } else { 615 #ifdef ISP_TARGET_MODE 616 isp->isp_role = ISP_ROLE_TARGET; 617 #else 618 isp->isp_role = ISP_DEFAULT_ROLES; 619 #endif 620 } 621 622 tval = 0; 623 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 624 "fwload_disable", &tval) == 0 && tval != 0) { 625 isp->isp_confopts |= ISP_CFG_NORELOAD; 626 } 627 tval = 0; 628 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 629 "ignore_nvram", &tval) == 0 && tval != 0) { 630 isp->isp_confopts |= ISP_CFG_NONVRAM; 631 } 632 tval = 0; 633 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 634 "fullduplex", &tval) == 0 && tval != 0) { 635 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 636 } 637 #ifdef ISP_FW_CRASH_DUMP 638 tval = 0; 639 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 640 "fw_dump_enable", &tval) == 0 && tval != 0) { 641 size_t amt = 0; 642 if (IS_2200(isp)) { 643 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 644 } else if (IS_23XX(isp)) { 645 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 646 } 647 if (amt) { 648 FCPARAM(isp)->isp_dump_data = 649 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); 650 } else { 651 device_printf(dev, 652 "f/w crash dumps not supported for this model\n"); 653 } 654 } 655 #endif 656 657 sptr = 0; 658 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 659 "topology", (const char **) &sptr) == 0 && sptr != 0) { 660 if (strcmp(sptr, "lport") == 0) { 661 isp->isp_confopts |= ISP_CFG_LPORT; 662 } else if (strcmp(sptr, "nport") == 0) { 663 isp->isp_confopts |= ISP_CFG_NPORT; 664 } else if (strcmp(sptr, "lport-only") == 0) { 665 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 666 } else if (strcmp(sptr, "nport-only") == 0) { 667 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 668 } 669 } 670 671 /* 672 * Because the resource_*_value functions can neither return 673 * 64 bit integer values, nor can they be directly coerced 674 * to interpret the right hand side of the assignment as 675 * you want them to interpret it, we have to force WWN 676 * hint replacement to specify WWN strings with a leading 677 * 'w' (e..g w50000000aaaa0001). Sigh. 678 */ 679 sptr = 0; 680 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 681 "portwwn", (const char **) &sptr); 682 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 683 char *eptr = 0; 684 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 685 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 686 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 687 isp->isp_osinfo.default_port_wwn = 0; 688 } else { 689 isp->isp_confopts |= ISP_CFG_OWNWWPN; 690 } 691 } 692 if (isp->isp_osinfo.default_port_wwn == 0) { 693 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 694 } 695 696 sptr = 0; 697 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 698 "nodewwn", (const char **) &sptr); 699 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 700 char *eptr = 0; 701 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 702 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 703 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 704 isp->isp_osinfo.default_node_wwn = 0; 705 } else { 706 isp->isp_confopts |= ISP_CFG_OWNWWNN; 707 } 708 } 709 if (isp->isp_osinfo.default_node_wwn == 0) { 710 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 711 } 712 713 isp->isp_osinfo.default_id = -1; 714 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 715 "iid", &tval) == 0) { 716 isp->isp_osinfo.default_id = tval; 717 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 718 } 719 if (isp->isp_osinfo.default_id == -1) { 720 if (IS_FC(isp)) { 721 isp->isp_osinfo.default_id = 109; 722 } else { 723 isp->isp_osinfo.default_id = 7; 724 } 725 } 726 727 /* 728 * Set up logging levels. 729 */ 730 tval = 0; 731 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 732 "debug", &tval); 733 if (tval) { 734 isp->isp_dblev = tval; 735 } else { 736 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 737 } 738 if (bootverbose) { 739 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 740 } 741 742 tval = 0; 743 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 744 "hysteresis", &tval); 745 if (tval >= 0 && tval < 256) { 746 isp->isp_osinfo.hysteresis = tval; 747 } else { 748 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis; 749 } 750 751 tval = -1; 752 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 753 "loop_down_limit", &tval); 754 if (tval >= 0 && tval < 0xffff) { 755 isp->isp_osinfo.loop_down_limit = tval; 756 } else { 757 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit; 758 } 759 760 tval = -1; 761 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 762 "gone_device_time", &tval); 763 if (tval >= 0 && tval < 0xffff) { 764 isp->isp_osinfo.gone_device_time = tval; 765 } else { 766 isp->isp_osinfo.gone_device_time = isp_gone_device_time; 767 } 768 } 769 770 static void 771 isp_get_pci_options(device_t dev, int *m1, int *m2) 772 { 773 int tval; 774 /* 775 * Which we should try first - memory mapping or i/o mapping? 776 * 777 * We used to try memory first followed by i/o on alpha, otherwise 778 * the reverse, but we should just try memory first all the time now. 779 */ 780 *m1 = PCIM_CMD_MEMEN; 781 *m2 = PCIM_CMD_PORTEN; 782 783 tval = 0; 784 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 785 "prefer_iomap", &tval) == 0 && tval != 0) { 786 *m1 = PCIM_CMD_PORTEN; 787 *m2 = PCIM_CMD_MEMEN; 788 } 789 tval = 0; 790 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 791 "prefer_memmap", &tval) == 0 && tval != 0) { 792 *m1 = PCIM_CMD_MEMEN; 793 *m2 = PCIM_CMD_PORTEN; 794 } 795 } 796 #endif 797 798 static int 799 isp_pci_attach(device_t dev) 800 { 801 struct resource *regs, *irq; 802 int rtp, rgd, iqd, m1, m2; 803 uint32_t data, cmd, linesz, psize, basetype; 804 struct isp_pcisoftc *pcs; 805 ispsoftc_t *isp = NULL; 806 struct ispmdvec *mdvp; 807 #if __FreeBSD_version >= 500000 808 int locksetup = 0; 809 #endif 810 811 pcs = device_get_softc(dev); 812 if (pcs == NULL) { 813 device_printf(dev, "cannot get softc\n"); 814 return (ENOMEM); 815 } 816 memset(pcs, 0, sizeof (*pcs)); 817 pcs->pci_dev = dev; 818 isp = &pcs->pci_isp; 819 820 /* 821 * Set and Get Generic Options 822 */ 823 isp_get_options(dev, isp); 824 825 /* 826 * Check to see if options have us disabled 827 */ 828 if (isp->isp_osinfo.disabled) { 829 /* 830 * But return zero to preserve unit numbering 831 */ 832 return (0); 833 } 834 835 /* 836 * Get PCI options- which in this case are just mapping preferences. 837 */ 838 isp_get_pci_options(dev, &m1, &m2); 839 840 linesz = PCI_DFLT_LNSZ; 841 irq = regs = NULL; 842 rgd = rtp = iqd = 0; 843 844 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 845 if (cmd & m1) { 846 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 847 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 848 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 849 } 850 if (regs == NULL && (cmd & m2)) { 851 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 852 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 853 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 854 } 855 if (regs == NULL) { 856 device_printf(dev, "unable to map any ports\n"); 857 goto bad; 858 } 859 if (bootverbose) { 860 device_printf(dev, "using %s space register mapping\n", 861 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 862 } 863 pcs->pci_dev = dev; 864 pcs->pci_reg = regs; 865 pcs->pci_st = rman_get_bustag(regs); 866 pcs->pci_sh = rman_get_bushandle(regs); 867 868 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 869 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 870 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 871 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 872 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 873 mdvp = &mdvec; 874 basetype = ISP_HA_SCSI_UNKNOWN; 875 psize = sizeof (sdparam); 876 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 877 mdvp = &mdvec; 878 basetype = ISP_HA_SCSI_UNKNOWN; 879 psize = sizeof (sdparam); 880 } 881 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 882 mdvp = &mdvec_1080; 883 basetype = ISP_HA_SCSI_1080; 884 psize = sizeof (sdparam); 885 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 886 ISP1080_DMA_REGS_OFF; 887 } 888 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 889 mdvp = &mdvec_1080; 890 basetype = ISP_HA_SCSI_1240; 891 psize = 2 * sizeof (sdparam); 892 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 893 ISP1080_DMA_REGS_OFF; 894 } 895 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 896 mdvp = &mdvec_1080; 897 basetype = ISP_HA_SCSI_1280; 898 psize = 2 * sizeof (sdparam); 899 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 900 ISP1080_DMA_REGS_OFF; 901 } 902 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 903 mdvp = &mdvec_12160; 904 basetype = ISP_HA_SCSI_10160; 905 psize = sizeof (sdparam); 906 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 907 ISP1080_DMA_REGS_OFF; 908 } 909 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 910 mdvp = &mdvec_12160; 911 basetype = ISP_HA_SCSI_12160; 912 psize = 2 * sizeof (sdparam); 913 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 914 ISP1080_DMA_REGS_OFF; 915 } 916 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 917 mdvp = &mdvec_2100; 918 basetype = ISP_HA_FC_2100; 919 psize = sizeof (fcparam); 920 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 921 PCI_MBOX_REGS2100_OFF; 922 if (pci_get_revid(dev) < 3) { 923 /* 924 * XXX: Need to get the actual revision 925 * XXX: number of the 2100 FB. At any rate, 926 * XXX: lower cache line size for early revision 927 * XXX; boards. 928 */ 929 linesz = 1; 930 } 931 } 932 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 933 mdvp = &mdvec_2200; 934 basetype = ISP_HA_FC_2200; 935 psize = sizeof (fcparam); 936 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 937 PCI_MBOX_REGS2100_OFF; 938 } 939 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 940 mdvp = &mdvec_2300; 941 basetype = ISP_HA_FC_2300; 942 psize = sizeof (fcparam); 943 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 944 PCI_MBOX_REGS2300_OFF; 945 } 946 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 || 947 pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 948 mdvp = &mdvec_2300; 949 basetype = ISP_HA_FC_2312; 950 psize = sizeof (fcparam); 951 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 952 PCI_MBOX_REGS2300_OFF; 953 } 954 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322 || 955 pci_get_devid(dev) == PCI_QLOGIC_ISP6322) { 956 mdvp = &mdvec_2300; 957 basetype = ISP_HA_FC_2322; 958 psize = sizeof (fcparam); 959 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 960 PCI_MBOX_REGS2300_OFF; 961 } 962 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422) { 963 mdvp = &mdvec_2400; 964 basetype = ISP_HA_FC_2400; 965 psize = sizeof (fcparam); 966 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 967 PCI_MBOX_REGS2400_OFF; 968 } 969 isp = &pcs->pci_isp; 970 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 971 if (isp->isp_param == NULL) { 972 device_printf(dev, "cannot allocate parameter data\n"); 973 goto bad; 974 } 975 isp->isp_mdvec = mdvp; 976 isp->isp_type = basetype; 977 isp->isp_revision = pci_get_revid(dev); 978 isp->isp_dev = dev; 979 980 #if __FreeBSD_version >= 700000 981 /* 982 * Try and find firmware for this device. 983 */ 984 { 985 char fwname[32]; 986 unsigned int did = pci_get_device(dev); 987 988 /* 989 * Map a few pci ids to fw names 990 */ 991 switch (did) { 992 case PCI_PRODUCT_QLOGIC_ISP1020: 993 did = 0x1040; 994 break; 995 case PCI_PRODUCT_QLOGIC_ISP1240: 996 did = 0x1080; 997 break; 998 case PCI_PRODUCT_QLOGIC_ISP10160: 999 case PCI_PRODUCT_QLOGIC_ISP12160: 1000 did = 0x12160; 1001 break; 1002 case PCI_PRODUCT_QLOGIC_ISP6312: 1003 case PCI_PRODUCT_QLOGIC_ISP2312: 1004 did = 0x2300; 1005 break; 1006 case PCI_PRODUCT_QLOGIC_ISP6322: 1007 did = 0x2322; 1008 break; 1009 case PCI_PRODUCT_QLOGIC_ISP2422: 1010 did = 0x2400; 1011 break; 1012 default: 1013 break; 1014 } 1015 1016 isp->isp_osinfo.fw = NULL; 1017 if (isp->isp_role & ISP_ROLE_TARGET) { 1018 snprintf(fwname, sizeof (fwname), "isp_%04x_it", did); 1019 isp->isp_osinfo.fw = firmware_get(fwname); 1020 } 1021 if (isp->isp_osinfo.fw == NULL) { 1022 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 1023 isp->isp_osinfo.fw = firmware_get(fwname); 1024 } 1025 if (isp->isp_osinfo.fw != NULL) { 1026 union { 1027 const void *fred; 1028 uint16_t *bob; 1029 } u; 1030 u.fred = isp->isp_osinfo.fw->data; 1031 isp->isp_mdvec->dv_ispfw = u.bob; 1032 } 1033 } 1034 #else 1035 if (isp_get_firmware_p) { 1036 int device = (int) pci_get_device(dev); 1037 #ifdef ISP_TARGET_MODE 1038 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 1039 #else 1040 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 1041 #endif 1042 } 1043 #endif 1044 1045 /* 1046 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 1047 * are set. 1048 */ 1049 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 1050 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 1051 1052 if (IS_2300(isp)) { /* per QLogic errata */ 1053 cmd &= ~PCIM_CMD_INVEN; 1054 } 1055 1056 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 1057 cmd &= ~PCIM_CMD_INTX_DISABLE; 1058 } 1059 1060 if (IS_24XX(isp)) { 1061 int reg; 1062 1063 cmd &= ~PCIM_CMD_INTX_DISABLE; 1064 1065 /* 1066 * Is this a PCI-X card? If so, set max read byte count. 1067 */ 1068 if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) { 1069 uint16_t pxcmd; 1070 reg += 2; 1071 1072 pxcmd = pci_read_config(dev, reg, 2); 1073 pxcmd &= ~0xc; 1074 pxcmd |= 0x8; 1075 pci_write_config(dev, reg, 2, pxcmd); 1076 } 1077 1078 /* 1079 * Is this a PCI Express card? If so, set max read byte count. 1080 */ 1081 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 1082 uint16_t pectl; 1083 1084 reg += 0x8; 1085 pectl = pci_read_config(dev, reg, 2); 1086 pectl &= ~0x7000; 1087 pectl |= 0x4000; 1088 pci_write_config(dev, reg, 2, pectl); 1089 } 1090 } 1091 1092 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 1093 1094 /* 1095 * Make sure the Cache Line Size register is set sensibly. 1096 */ 1097 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 1098 if (data != linesz) { 1099 data = PCI_DFLT_LNSZ; 1100 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 1101 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 1102 } 1103 1104 /* 1105 * Make sure the Latency Timer is sane. 1106 */ 1107 data = pci_read_config(dev, PCIR_LATTIMER, 1); 1108 if (data < PCI_DFLT_LTNCY) { 1109 data = PCI_DFLT_LTNCY; 1110 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 1111 pci_write_config(dev, PCIR_LATTIMER, data, 1); 1112 } 1113 1114 /* 1115 * Make sure we've disabled the ROM. 1116 */ 1117 data = pci_read_config(dev, PCIR_ROMADDR, 4); 1118 data &= ~1; 1119 pci_write_config(dev, PCIR_ROMADDR, data, 4); 1120 1121 iqd = 0; 1122 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, 1123 RF_ACTIVE | RF_SHAREABLE); 1124 if (irq == NULL) { 1125 device_printf(dev, "could not allocate interrupt\n"); 1126 goto bad; 1127 } 1128 1129 #if __FreeBSD_version >= 500000 1130 /* Make sure the lock is set up. */ 1131 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 1132 locksetup++; 1133 #endif 1134 1135 if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) { 1136 device_printf(dev, "could not setup interrupt\n"); 1137 goto bad; 1138 } 1139 1140 /* 1141 * Last minute checks... 1142 */ 1143 if (IS_23XX(isp) || IS_24XX(isp)) { 1144 isp->isp_port = pci_get_function(dev); 1145 } 1146 1147 if (IS_23XX(isp)) { 1148 /* 1149 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 1150 */ 1151 isp->isp_touched = 1; 1152 } 1153 1154 /* 1155 * Make sure we're in reset state. 1156 */ 1157 ISP_LOCK(isp); 1158 isp_reset(isp); 1159 if (isp->isp_state != ISP_RESETSTATE) { 1160 ISP_UNLOCK(isp); 1161 goto bad; 1162 } 1163 isp_init(isp); 1164 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 1165 isp_uninit(isp); 1166 ISP_UNLOCK(isp); 1167 goto bad; 1168 } 1169 isp_attach(isp); 1170 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 1171 isp_uninit(isp); 1172 ISP_UNLOCK(isp); 1173 goto bad; 1174 } 1175 /* 1176 * XXXX: Here is where we might unload the f/w module 1177 * XXXX: (or decrease the reference count to it). 1178 */ 1179 ISP_UNLOCK(isp); 1180 1181 return (0); 1182 1183 bad: 1184 1185 if (pcs && pcs->ih) { 1186 (void) bus_teardown_intr(dev, irq, pcs->ih); 1187 } 1188 1189 #if __FreeBSD_version >= 500000 1190 if (locksetup && isp) { 1191 mtx_destroy(&isp->isp_osinfo.lock); 1192 } 1193 #endif 1194 1195 if (irq) { 1196 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 1197 } 1198 1199 1200 if (regs) { 1201 (void) bus_release_resource(dev, rtp, rgd, regs); 1202 } 1203 1204 if (pcs) { 1205 if (pcs->pci_isp.isp_param) { 1206 #ifdef ISP_FW_CRASH_DUMP 1207 if (IS_FC(isp) && FCPARAM(isp)->isp_dump_data) { 1208 free(FCPARAM(isp)->isp_dump_data, M_DEVBUF); 1209 } 1210 #endif 1211 free(pcs->pci_isp.isp_param, M_DEVBUF); 1212 } 1213 } 1214 1215 /* 1216 * XXXX: Here is where we might unload the f/w module 1217 * XXXX: (or decrease the reference count to it). 1218 */ 1219 return (ENXIO); 1220 } 1221 1222 static int 1223 isp_pci_detach(device_t dev) 1224 { 1225 struct isp_pcisoftc *pcs; 1226 ispsoftc_t *isp; 1227 1228 pcs = device_get_softc(dev); 1229 if (pcs == NULL) { 1230 return (ENXIO); 1231 } 1232 isp = (ispsoftc_t *) pcs; 1233 ISP_DISABLE_INTS(isp); 1234 return (0); 1235 } 1236 1237 static void 1238 isp_pci_intr(void *arg) 1239 { 1240 ispsoftc_t *isp = arg; 1241 uint32_t isr; 1242 uint16_t sema, mbox; 1243 1244 ISP_LOCK(isp); 1245 isp->isp_intcnt++; 1246 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 1247 isp->isp_intbogus++; 1248 } else { 1249 isp_intr(isp, isr, sema, mbox); 1250 } 1251 ISP_UNLOCK(isp); 1252 } 1253 1254 1255 #define IspVirt2Off(a, x) \ 1256 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1257 _BLK_REG_SHFT] + ((x) & 0xfff)) 1258 1259 #define BXR2(pcs, off) \ 1260 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 1261 #define BXW2(pcs, off, v) \ 1262 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 1263 #define BXR4(pcs, off) \ 1264 bus_space_read_4(pcs->pci_st, pcs->pci_sh, off) 1265 #define BXW4(pcs, off, v) \ 1266 bus_space_write_4(pcs->pci_st, pcs->pci_sh, off, v) 1267 1268 1269 static __inline int 1270 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp) 1271 { 1272 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1273 uint32_t val0, val1; 1274 int i = 0; 1275 1276 do { 1277 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 1278 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 1279 } while (val0 != val1 && ++i < 1000); 1280 if (val0 != val1) { 1281 return (1); 1282 } 1283 *rp = val0; 1284 return (0); 1285 } 1286 1287 static int 1288 isp_pci_rd_isr(ispsoftc_t *isp, uint32_t *isrp, 1289 uint16_t *semap, uint16_t *mbp) 1290 { 1291 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1292 uint16_t isr, sema; 1293 1294 if (IS_2100(isp)) { 1295 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 1296 return (0); 1297 } 1298 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 1299 return (0); 1300 } 1301 } else { 1302 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 1303 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 1304 } 1305 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1306 isr &= INT_PENDING_MASK(isp); 1307 sema &= BIU_SEMA_LOCK; 1308 if (isr == 0 && sema == 0) { 1309 return (0); 1310 } 1311 *isrp = isr; 1312 if ((*semap = sema) != 0) { 1313 if (IS_2100(isp)) { 1314 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 1315 return (0); 1316 } 1317 } else { 1318 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 1319 } 1320 } 1321 return (1); 1322 } 1323 1324 static int 1325 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint32_t *isrp, 1326 uint16_t *semap, uint16_t *mbox0p) 1327 { 1328 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1329 uint32_t hccr; 1330 uint32_t r2hisr; 1331 1332 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 1333 *isrp = 0; 1334 return (0); 1335 } 1336 r2hisr = BXR4(pcs, IspVirt2Off(pcs, BIU_R2HSTSLO)); 1337 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1338 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1339 *isrp = 0; 1340 return (0); 1341 } 1342 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 1343 case ISPR2HST_ROM_MBX_OK: 1344 case ISPR2HST_ROM_MBX_FAIL: 1345 case ISPR2HST_MBX_OK: 1346 case ISPR2HST_MBX_FAIL: 1347 case ISPR2HST_ASYNC_EVENT: 1348 *isrp = r2hisr & 0xffff; 1349 *mbox0p = (r2hisr >> 16); 1350 *semap = 1; 1351 return (1); 1352 case ISPR2HST_RIO_16: 1353 *isrp = r2hisr & 0xffff; 1354 *mbox0p = ASYNC_RIO1; 1355 *semap = 1; 1356 return (1); 1357 case ISPR2HST_FPOST: 1358 *isrp = r2hisr & 0xffff; 1359 *mbox0p = ASYNC_CMD_CMPLT; 1360 *semap = 1; 1361 return (1); 1362 case ISPR2HST_FPOST_CTIO: 1363 *isrp = r2hisr & 0xffff; 1364 *mbox0p = ASYNC_CTIO_DONE; 1365 *semap = 1; 1366 return (1); 1367 case ISPR2HST_RSPQ_UPDATE: 1368 *isrp = r2hisr & 0xffff; 1369 *mbox0p = 0; 1370 *semap = 0; 1371 return (1); 1372 default: 1373 hccr = ISP_READ(isp, HCCR); 1374 if (hccr & HCCR_PAUSE) { 1375 ISP_WRITE(isp, HCCR, HCCR_RESET); 1376 isp_prt(isp, ISP_LOGERR, 1377 "RISC paused at interrupt (%x->%x\n", hccr, 1378 ISP_READ(isp, HCCR)); 1379 } else { 1380 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", 1381 r2hisr); 1382 } 1383 return (0); 1384 } 1385 } 1386 1387 static int 1388 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint32_t *isrp, 1389 uint16_t *semap, uint16_t *mbox0p) 1390 { 1391 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1392 uint32_t r2hisr; 1393 1394 r2hisr = BXR4(pcs, IspVirt2Off(pcs, BIU2400_R2HSTSLO)); 1395 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1396 if ((r2hisr & BIU2400_R2HST_INTR) == 0) { 1397 *isrp = 0; 1398 return (0); 1399 } 1400 switch (r2hisr & BIU2400_R2HST_ISTAT_MASK) { 1401 case ISP2400R2HST_ROM_MBX_OK: 1402 case ISP2400R2HST_ROM_MBX_FAIL: 1403 case ISP2400R2HST_MBX_OK: 1404 case ISP2400R2HST_MBX_FAIL: 1405 case ISP2400R2HST_ASYNC_EVENT: 1406 *isrp = r2hisr & 0xffff; 1407 *mbox0p = (r2hisr >> 16); 1408 *semap = 1; 1409 return (1); 1410 case ISP2400R2HST_RSPQ_UPDATE: 1411 case ISP2400R2HST_ATIO_RSPQ_UPDATE: 1412 case ISP2400R2HST_ATIO_RQST_UPDATE: 1413 *isrp = r2hisr & 0xffff; 1414 *mbox0p = 0; 1415 *semap = 0; 1416 return (1); 1417 default: 1418 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 1419 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1420 return (0); 1421 } 1422 } 1423 1424 static uint32_t 1425 isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1426 { 1427 uint32_t rv; 1428 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1429 int oldconf = 0; 1430 1431 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1432 /* 1433 * We will assume that someone has paused the RISC processor. 1434 */ 1435 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1436 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1437 oldconf | BIU_PCI_CONF1_SXP); 1438 } 1439 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1440 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1441 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 1442 } 1443 return (rv); 1444 } 1445 1446 static void 1447 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) 1448 { 1449 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1450 int oldconf = 0; 1451 volatile int junk; 1452 1453 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1454 /* 1455 * We will assume that someone has paused the RISC processor. 1456 */ 1457 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1458 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1459 oldconf | BIU_PCI_CONF1_SXP); 1460 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1461 } 1462 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1463 junk = BXR2(pcs, IspVirt2Off(isp, regoff)); 1464 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1465 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 1466 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1467 } 1468 } 1469 1470 static uint32_t 1471 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1472 { 1473 uint32_t rv, oc = 0; 1474 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1475 1476 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1477 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1478 uint32_t tc; 1479 /* 1480 * We will assume that someone has paused the RISC processor. 1481 */ 1482 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1483 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1484 if (regoff & SXP_BANK1_SELECT) 1485 tc |= BIU_PCI1080_CONF1_SXP1; 1486 else 1487 tc |= BIU_PCI1080_CONF1_SXP0; 1488 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1489 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1490 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1491 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1492 oc | BIU_PCI1080_CONF1_DMA); 1493 } 1494 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1495 if (oc) { 1496 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1497 } 1498 return (rv); 1499 } 1500 1501 static void 1502 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) 1503 { 1504 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1505 int oc = 0; 1506 volatile int junk; 1507 1508 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1509 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1510 uint32_t tc; 1511 /* 1512 * We will assume that someone has paused the RISC processor. 1513 */ 1514 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1515 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1516 if (regoff & SXP_BANK1_SELECT) 1517 tc |= BIU_PCI1080_CONF1_SXP1; 1518 else 1519 tc |= BIU_PCI1080_CONF1_SXP0; 1520 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1521 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1522 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1523 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1524 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1525 oc | BIU_PCI1080_CONF1_DMA); 1526 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1527 } 1528 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1529 junk = BXR2(pcs, IspVirt2Off(isp, regoff)); 1530 if (oc) { 1531 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1532 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1533 } 1534 } 1535 1536 static uint32_t 1537 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 1538 { 1539 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1540 uint32_t rv; 1541 int block = regoff & _BLK_REG_MASK; 1542 1543 switch (block) { 1544 case BIU_BLOCK: 1545 break; 1546 case MBOX_BLOCK: 1547 return (BXR2(pcs, IspVirt2Off(pcs, regoff))); 1548 case SXP_BLOCK: 1549 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff); 1550 return (0xffffffff); 1551 case RISC_BLOCK: 1552 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff); 1553 return (0xffffffff); 1554 case DMA_BLOCK: 1555 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff); 1556 return (0xffffffff); 1557 default: 1558 isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff); 1559 return (0xffffffff); 1560 } 1561 1562 1563 switch (regoff) { 1564 case BIU2400_FLASH_ADDR: 1565 case BIU2400_FLASH_DATA: 1566 case BIU2400_ICR: 1567 case BIU2400_ISR: 1568 case BIU2400_CSR: 1569 case BIU2400_REQINP: 1570 case BIU2400_REQOUTP: 1571 case BIU2400_RSPINP: 1572 case BIU2400_RSPOUTP: 1573 case BIU2400_PRI_RQINP: 1574 case BIU2400_PRI_RSPINP: 1575 case BIU2400_ATIO_RSPINP: 1576 case BIU2400_ATIO_REQINP: 1577 case BIU2400_HCCR: 1578 case BIU2400_GPIOD: 1579 case BIU2400_GPIOE: 1580 case BIU2400_HSEMA: 1581 rv = BXR4(pcs, IspVirt2Off(pcs, regoff)); 1582 break; 1583 case BIU2400_R2HSTSLO: 1584 rv = BXR4(pcs, IspVirt2Off(pcs, regoff)); 1585 break; 1586 case BIU2400_R2HSTSHI: 1587 rv = BXR4(pcs, IspVirt2Off(pcs, regoff)) >> 16; 1588 break; 1589 default: 1590 isp_prt(isp, ISP_LOGERR, 1591 "isp_pci_rd_reg_2400: unknown offset %x", regoff); 1592 rv = 0xffffffff; 1593 break; 1594 } 1595 return (rv); 1596 } 1597 1598 static void 1599 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 1600 { 1601 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1602 int block = regoff & _BLK_REG_MASK; 1603 volatile int junk; 1604 1605 switch (block) { 1606 case BIU_BLOCK: 1607 break; 1608 case MBOX_BLOCK: 1609 BXW2(pcs, IspVirt2Off(pcs, regoff), val); 1610 junk = BXR2(pcs, IspVirt2Off(pcs, regoff)); 1611 return; 1612 case SXP_BLOCK: 1613 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff); 1614 return; 1615 case RISC_BLOCK: 1616 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff); 1617 return; 1618 case DMA_BLOCK: 1619 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff); 1620 return; 1621 default: 1622 isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x", 1623 regoff); 1624 break; 1625 } 1626 1627 switch (regoff) { 1628 case BIU2400_FLASH_ADDR: 1629 case BIU2400_FLASH_DATA: 1630 case BIU2400_ICR: 1631 case BIU2400_ISR: 1632 case BIU2400_CSR: 1633 case BIU2400_REQINP: 1634 case BIU2400_REQOUTP: 1635 case BIU2400_RSPINP: 1636 case BIU2400_RSPOUTP: 1637 case BIU2400_PRI_RQINP: 1638 case BIU2400_PRI_RSPINP: 1639 case BIU2400_ATIO_RSPINP: 1640 case BIU2400_ATIO_REQINP: 1641 case BIU2400_HCCR: 1642 case BIU2400_GPIOD: 1643 case BIU2400_GPIOE: 1644 case BIU2400_HSEMA: 1645 BXW4(pcs, IspVirt2Off(pcs, regoff), val); 1646 junk = BXR4(pcs, IspVirt2Off(pcs, regoff)); 1647 break; 1648 default: 1649 isp_prt(isp, ISP_LOGERR, 1650 "isp_pci_wr_reg_2400: bad offset 0x%x", regoff); 1651 break; 1652 } 1653 } 1654 1655 1656 struct imush { 1657 ispsoftc_t *isp; 1658 int error; 1659 }; 1660 1661 static void imc(void *, bus_dma_segment_t *, int, int); 1662 1663 static void 1664 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1665 { 1666 struct imush *imushp = (struct imush *) arg; 1667 if (error) { 1668 imushp->error = error; 1669 } else { 1670 ispsoftc_t *isp =imushp->isp; 1671 bus_addr_t addr = segs->ds_addr; 1672 1673 isp->isp_rquest_dma = addr; 1674 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1675 isp->isp_result_dma = addr; 1676 if (IS_FC(isp)) { 1677 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1678 FCPARAM(isp)->isp_scdma = addr; 1679 } 1680 } 1681 } 1682 1683 /* 1684 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 1685 */ 1686 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 1687 1688 #if __FreeBSD_version < 500000 1689 #define BUS_DMA_ROOTARG NULL 1690 #define isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) \ 1691 bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) 1692 #elif __FreeBSD_version < 700020 1693 #define BUS_DMA_ROOTARG NULL 1694 #define isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) \ 1695 bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, \ 1696 busdma_lock_mutex, &Giant, z) 1697 #else 1698 #define BUS_DMA_ROOTARG bus_get_dma_tag(pcs->pci_dev) 1699 #define isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) \ 1700 bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, \ 1701 busdma_lock_mutex, &Giant, z) 1702 #endif 1703 1704 static int 1705 isp_pci_mbxdma(ispsoftc_t *isp) 1706 { 1707 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1708 caddr_t base; 1709 uint32_t len; 1710 int i, error, ns; 1711 bus_size_t slim; /* segment size */ 1712 bus_addr_t llim; /* low limit of unavailable dma */ 1713 bus_addr_t hlim; /* high limit of unavailable dma */ 1714 struct imush im; 1715 1716 /* 1717 * Already been here? If so, leave... 1718 */ 1719 if (isp->isp_rquest) { 1720 return (0); 1721 } 1722 1723 if (isp->isp_maxcmds == 0) { 1724 isp_prt(isp, ISP_LOGERR, "maxcmds not set"); 1725 return (1); 1726 } 1727 1728 hlim = BUS_SPACE_MAXADDR; 1729 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1730 slim = (bus_size_t) (1ULL << 32); 1731 llim = BUS_SPACE_MAXADDR; 1732 } else { 1733 llim = BUS_SPACE_MAXADDR_32BIT; 1734 slim = (1 << 24); 1735 } 1736 1737 /* 1738 * XXX: We don't really support 64 bit target mode for parallel scsi yet 1739 */ 1740 #ifdef ISP_TARGET_MODE 1741 if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) { 1742 isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet"); 1743 return (1); 1744 } 1745 #endif 1746 1747 ISP_UNLOCK(isp); 1748 if (isp_dma_tag_create(BUS_DMA_ROOTARG, 1, slim, llim, 1749 hlim, NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, 1750 &pcs->dmat)) { 1751 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1752 ISP_LOCK(isp); 1753 return (1); 1754 } 1755 1756 1757 len = sizeof (XS_T **) * isp->isp_maxcmds; 1758 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1759 if (isp->isp_xflist == NULL) { 1760 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1761 ISP_LOCK(isp); 1762 return (1); 1763 } 1764 #ifdef ISP_TARGET_MODE 1765 len = sizeof (void **) * isp->isp_maxcmds; 1766 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1767 if (isp->isp_tgtlist == NULL) { 1768 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1769 ISP_LOCK(isp); 1770 return (1); 1771 } 1772 #endif 1773 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1774 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1775 if (pcs->dmaps == NULL) { 1776 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1777 free(isp->isp_xflist, M_DEVBUF); 1778 #ifdef ISP_TARGET_MODE 1779 free(isp->isp_tgtlist, M_DEVBUF); 1780 #endif 1781 ISP_LOCK(isp); 1782 return (1); 1783 } 1784 1785 /* 1786 * Allocate and map the request, result queues, plus FC scratch area. 1787 */ 1788 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1789 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1790 if (IS_FC(isp)) { 1791 len += ISP2100_SCRLEN; 1792 } 1793 1794 ns = (len / PAGE_SIZE) + 1; 1795 /* 1796 * Create a tag for the control spaces- force it to within 32 bits. 1797 */ 1798 if (isp_dma_tag_create(pcs->dmat, QENTRY_LEN, slim, 1799 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1800 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) { 1801 isp_prt(isp, ISP_LOGERR, 1802 "cannot create a dma tag for control spaces"); 1803 free(pcs->dmaps, M_DEVBUF); 1804 free(isp->isp_xflist, M_DEVBUF); 1805 #ifdef ISP_TARGET_MODE 1806 free(isp->isp_tgtlist, M_DEVBUF); 1807 #endif 1808 ISP_LOCK(isp); 1809 return (1); 1810 } 1811 1812 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1813 &isp->isp_cdmap) != 0) { 1814 isp_prt(isp, ISP_LOGERR, 1815 "cannot allocate %d bytes of CCB memory", len); 1816 bus_dma_tag_destroy(isp->isp_cdmat); 1817 free(isp->isp_xflist, M_DEVBUF); 1818 #ifdef ISP_TARGET_MODE 1819 free(isp->isp_tgtlist, M_DEVBUF); 1820 #endif 1821 free(pcs->dmaps, M_DEVBUF); 1822 ISP_LOCK(isp); 1823 return (1); 1824 } 1825 1826 for (i = 0; i < isp->isp_maxcmds; i++) { 1827 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1828 if (error) { 1829 isp_prt(isp, ISP_LOGERR, 1830 "error %d creating per-cmd DMA maps", error); 1831 while (--i >= 0) { 1832 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1833 } 1834 goto bad; 1835 } 1836 } 1837 1838 im.isp = isp; 1839 im.error = 0; 1840 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1841 if (im.error) { 1842 isp_prt(isp, ISP_LOGERR, 1843 "error %d loading dma map for control areas", im.error); 1844 goto bad; 1845 } 1846 1847 isp->isp_rquest = base; 1848 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1849 isp->isp_result = base; 1850 if (IS_FC(isp)) { 1851 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1852 FCPARAM(isp)->isp_scratch = base; 1853 } 1854 ISP_LOCK(isp); 1855 return (0); 1856 1857 bad: 1858 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1859 bus_dma_tag_destroy(isp->isp_cdmat); 1860 free(isp->isp_xflist, M_DEVBUF); 1861 #ifdef ISP_TARGET_MODE 1862 free(isp->isp_tgtlist, M_DEVBUF); 1863 #endif 1864 free(pcs->dmaps, M_DEVBUF); 1865 ISP_LOCK(isp); 1866 isp->isp_rquest = NULL; 1867 return (1); 1868 } 1869 1870 typedef struct { 1871 ispsoftc_t *isp; 1872 void *cmd_token; 1873 void *rq; 1874 uint32_t *nxtip; 1875 uint32_t optr; 1876 int error; 1877 } mush_t; 1878 1879 #define MUSHERR_NOQENTRIES -2 1880 1881 #ifdef ISP_TARGET_MODE 1882 /* 1883 * We need to handle DMA for target mode differently from initiator mode. 1884 * 1885 * DMA mapping and construction and submission of CTIO Request Entries 1886 * and rendevous for completion are very tightly coupled because we start 1887 * out by knowing (per platform) how much data we have to move, but we 1888 * don't know, up front, how many DMA mapping segments will have to be used 1889 * cover that data, so we don't know how many CTIO Request Entries we 1890 * will end up using. Further, for performance reasons we may want to 1891 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1892 * 1893 * The standard vector still goes through isp_pci_dmasetup, but the callback 1894 * for the DMA mapping routines comes here instead with the whole transfer 1895 * mapped and a pointer to a partially filled in already allocated request 1896 * queue entry. We finish the job. 1897 */ 1898 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1899 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1900 1901 #define STATUS_WITH_DATA 1 1902 1903 static void 1904 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1905 { 1906 mush_t *mp; 1907 struct ccb_scsiio *csio; 1908 ispsoftc_t *isp; 1909 struct isp_pcisoftc *pcs; 1910 bus_dmamap_t *dp; 1911 ct_entry_t *cto, *qe; 1912 uint8_t scsi_status; 1913 uint32_t curi, nxti, handle; 1914 uint32_t sflags; 1915 int32_t resid; 1916 int nth_ctio, nctios, send_status; 1917 1918 mp = (mush_t *) arg; 1919 if (error) { 1920 mp->error = error; 1921 return; 1922 } 1923 1924 isp = mp->isp; 1925 csio = mp->cmd_token; 1926 cto = mp->rq; 1927 curi = isp->isp_reqidx; 1928 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1929 1930 cto->ct_xfrlen = 0; 1931 cto->ct_seg_count = 0; 1932 cto->ct_header.rqs_entry_count = 1; 1933 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1934 1935 if (nseg == 0) { 1936 cto->ct_header.rqs_seqno = 1; 1937 isp_prt(isp, ISP_LOGTDEBUG1, 1938 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1939 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1940 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1941 cto->ct_scsi_status, cto->ct_resid); 1942 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1943 isp_put_ctio(isp, cto, qe); 1944 return; 1945 } 1946 1947 nctios = nseg / ISP_RQDSEG; 1948 if (nseg % ISP_RQDSEG) { 1949 nctios++; 1950 } 1951 1952 /* 1953 * Save syshandle, and potentially any SCSI status, which we'll 1954 * reinsert on the last CTIO we're going to send. 1955 */ 1956 1957 handle = cto->ct_syshandle; 1958 cto->ct_syshandle = 0; 1959 cto->ct_header.rqs_seqno = 0; 1960 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1961 1962 if (send_status) { 1963 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1964 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1965 /* 1966 * Preserve residual. 1967 */ 1968 resid = cto->ct_resid; 1969 1970 /* 1971 * Save actual SCSI status. 1972 */ 1973 scsi_status = cto->ct_scsi_status; 1974 1975 #ifndef STATUS_WITH_DATA 1976 sflags |= CT_NO_DATA; 1977 /* 1978 * We can't do a status at the same time as a data CTIO, so 1979 * we need to synthesize an extra CTIO at this level. 1980 */ 1981 nctios++; 1982 #endif 1983 } else { 1984 sflags = scsi_status = resid = 0; 1985 } 1986 1987 cto->ct_resid = 0; 1988 cto->ct_scsi_status = 0; 1989 1990 pcs = (struct isp_pcisoftc *)isp; 1991 dp = &pcs->dmaps[isp_handle_index(handle)]; 1992 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1993 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1994 } else { 1995 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1996 } 1997 1998 nxti = *mp->nxtip; 1999 2000 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 2001 int seglim; 2002 2003 seglim = nseg; 2004 if (seglim) { 2005 int seg; 2006 2007 if (seglim > ISP_RQDSEG) 2008 seglim = ISP_RQDSEG; 2009 2010 for (seg = 0; seg < seglim; seg++, nseg--) { 2011 /* 2012 * Unlike normal initiator commands, we don't 2013 * do any swizzling here. 2014 */ 2015 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 2016 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 2017 cto->ct_xfrlen += dm_segs->ds_len; 2018 dm_segs++; 2019 } 2020 cto->ct_seg_count = seg; 2021 } else { 2022 /* 2023 * This case should only happen when we're sending an 2024 * extra CTIO with final status. 2025 */ 2026 if (send_status == 0) { 2027 isp_prt(isp, ISP_LOGWARN, 2028 "tdma_mk ran out of segments"); 2029 mp->error = EINVAL; 2030 return; 2031 } 2032 } 2033 2034 /* 2035 * At this point, the fields ct_lun, ct_iid, ct_tagval, 2036 * ct_tagtype, and ct_timeout have been carried over 2037 * unchanged from what our caller had set. 2038 * 2039 * The dataseg fields and the seg_count fields we just got 2040 * through setting. The data direction we've preserved all 2041 * along and only clear it if we're now sending status. 2042 */ 2043 2044 if (nth_ctio == nctios - 1) { 2045 /* 2046 * We're the last in a sequence of CTIOs, so mark 2047 * this CTIO and save the handle to the CCB such that 2048 * when this CTIO completes we can free dma resources 2049 * and do whatever else we need to do to finish the 2050 * rest of the command. We *don't* give this to the 2051 * firmware to work on- the caller will do that. 2052 */ 2053 2054 cto->ct_syshandle = handle; 2055 cto->ct_header.rqs_seqno = 1; 2056 2057 if (send_status) { 2058 cto->ct_scsi_status = scsi_status; 2059 cto->ct_flags |= sflags; 2060 cto->ct_resid = resid; 2061 } 2062 if (send_status) { 2063 isp_prt(isp, ISP_LOGTDEBUG1, 2064 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 2065 "scsi status %x resid %d", 2066 cto->ct_fwhandle, csio->ccb_h.target_lun, 2067 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 2068 cto->ct_scsi_status, cto->ct_resid); 2069 } else { 2070 isp_prt(isp, ISP_LOGTDEBUG1, 2071 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 2072 cto->ct_fwhandle, csio->ccb_h.target_lun, 2073 cto->ct_iid, cto->ct_tag_val, 2074 cto->ct_flags); 2075 } 2076 isp_put_ctio(isp, cto, qe); 2077 ISP_TDQE(isp, "last tdma_mk", curi, cto); 2078 if (nctios > 1) { 2079 MEMORYBARRIER(isp, SYNC_REQUEST, 2080 curi, QENTRY_LEN); 2081 } 2082 } else { 2083 ct_entry_t *oqe = qe; 2084 2085 /* 2086 * Make sure syshandle fields are clean 2087 */ 2088 cto->ct_syshandle = 0; 2089 cto->ct_header.rqs_seqno = 0; 2090 2091 isp_prt(isp, ISP_LOGTDEBUG1, 2092 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 2093 cto->ct_fwhandle, csio->ccb_h.target_lun, 2094 cto->ct_iid, cto->ct_flags); 2095 2096 /* 2097 * Get a new CTIO 2098 */ 2099 qe = (ct_entry_t *) 2100 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2101 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 2102 if (nxti == mp->optr) { 2103 isp_prt(isp, ISP_LOGTDEBUG0, 2104 "Queue Overflow in tdma_mk"); 2105 mp->error = MUSHERR_NOQENTRIES; 2106 return; 2107 } 2108 2109 /* 2110 * Now that we're done with the old CTIO, 2111 * flush it out to the request queue. 2112 */ 2113 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 2114 isp_put_ctio(isp, cto, oqe); 2115 if (nth_ctio != 0) { 2116 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 2117 QENTRY_LEN); 2118 } 2119 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 2120 2121 /* 2122 * Reset some fields in the CTIO so we can reuse 2123 * for the next one we'll flush to the request 2124 * queue. 2125 */ 2126 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 2127 cto->ct_header.rqs_entry_count = 1; 2128 cto->ct_header.rqs_flags = 0; 2129 cto->ct_status = 0; 2130 cto->ct_scsi_status = 0; 2131 cto->ct_xfrlen = 0; 2132 cto->ct_resid = 0; 2133 cto->ct_seg_count = 0; 2134 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 2135 } 2136 } 2137 *mp->nxtip = nxti; 2138 } 2139 2140 /* 2141 * We don't have to do multiple CTIOs here. Instead, we can just do 2142 * continuation segments as needed. This greatly simplifies the code 2143 * improves performance. 2144 */ 2145 2146 static void 2147 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2148 { 2149 mush_t *mp; 2150 struct ccb_scsiio *csio; 2151 ispsoftc_t *isp; 2152 ct2_entry_t *cto, *qe; 2153 uint32_t curi, nxti; 2154 ispds_t *ds; 2155 ispds64_t *ds64; 2156 int segcnt, seglim; 2157 2158 mp = (mush_t *) arg; 2159 if (error) { 2160 mp->error = error; 2161 return; 2162 } 2163 2164 isp = mp->isp; 2165 csio = mp->cmd_token; 2166 cto = mp->rq; 2167 2168 curi = isp->isp_reqidx; 2169 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 2170 2171 if (nseg == 0) { 2172 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 2173 isp_prt(isp, ISP_LOGWARN, 2174 "dma2_tgt_fc, a status CTIO2 without MODE1 " 2175 "set (0x%x)", cto->ct_flags); 2176 mp->error = EINVAL; 2177 return; 2178 } 2179 /* 2180 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 2181 * flags to NO DATA and clear relative offset flags. 2182 * We preserve the ct_resid and the response area. 2183 */ 2184 cto->ct_header.rqs_seqno = 1; 2185 cto->ct_seg_count = 0; 2186 cto->ct_reloff = 0; 2187 isp_prt(isp, ISP_LOGTDEBUG1, 2188 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 2189 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 2190 cto->ct_iid, cto->ct_flags, cto->ct_status, 2191 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 2192 if (FCPARAM(isp)->isp_2klogin) { 2193 isp_put_ctio2e(isp, 2194 (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); 2195 } else { 2196 isp_put_ctio2(isp, cto, qe); 2197 } 2198 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 2199 return; 2200 } 2201 2202 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 2203 isp_prt(isp, ISP_LOGERR, 2204 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 2205 "(0x%x)", cto->ct_flags); 2206 mp->error = EINVAL; 2207 return; 2208 } 2209 2210 2211 nxti = *mp->nxtip; 2212 2213 /* 2214 * Check to see if we need to DAC addressing or not. 2215 * 2216 * Any address that's over the 4GB boundary causes this 2217 * to happen. 2218 */ 2219 segcnt = nseg; 2220 if (sizeof (bus_addr_t) > 4) { 2221 for (segcnt = 0; segcnt < nseg; segcnt++) { 2222 uint64_t addr = dm_segs[segcnt].ds_addr; 2223 if (addr >= 0x100000000LL) { 2224 break; 2225 } 2226 } 2227 } 2228 if (segcnt != nseg) { 2229 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3; 2230 seglim = ISP_RQDSEG_T3; 2231 ds64 = &cto->rsp.m0.u.ct_dataseg64[0]; 2232 ds = NULL; 2233 } else { 2234 seglim = ISP_RQDSEG_T2; 2235 ds64 = NULL; 2236 ds = &cto->rsp.m0.u.ct_dataseg[0]; 2237 } 2238 cto->ct_seg_count = 0; 2239 2240 /* 2241 * Set up the CTIO2 data segments. 2242 */ 2243 for (segcnt = 0; cto->ct_seg_count < seglim && segcnt < nseg; 2244 cto->ct_seg_count++, segcnt++) { 2245 if (ds64) { 2246 ds64->ds_basehi = 2247 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); 2248 ds64->ds_base = dm_segs[segcnt].ds_addr; 2249 ds64->ds_count = dm_segs[segcnt].ds_len; 2250 ds64++; 2251 } else { 2252 ds->ds_base = dm_segs[segcnt].ds_addr; 2253 ds->ds_count = dm_segs[segcnt].ds_len; 2254 ds++; 2255 } 2256 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 2257 #if __FreeBSD_version < 500000 2258 isp_prt(isp, ISP_LOGTDEBUG1, 2259 "isp_send_ctio2: ent0[%d]0x%llx:%llu", 2260 cto->ct_seg_count, (uint64_t)dm_segs[segcnt].ds_addr, 2261 (uint64_t)dm_segs[segcnt].ds_len); 2262 #else 2263 isp_prt(isp, ISP_LOGTDEBUG1, 2264 "isp_send_ctio2: ent0[%d]0x%jx:%ju", 2265 cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr, 2266 (uintmax_t)dm_segs[segcnt].ds_len); 2267 #endif 2268 } 2269 2270 while (segcnt < nseg) { 2271 uint32_t curip; 2272 int seg; 2273 ispcontreq_t local, *crq = &local, *qep; 2274 2275 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2276 curip = nxti; 2277 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 2278 if (nxti == mp->optr) { 2279 ISP_UNLOCK(isp); 2280 isp_prt(isp, ISP_LOGTDEBUG0, 2281 "tdma_mkfc: request queue overflow"); 2282 mp->error = MUSHERR_NOQENTRIES; 2283 return; 2284 } 2285 cto->ct_header.rqs_entry_count++; 2286 MEMZERO((void *)crq, sizeof (*crq)); 2287 crq->req_header.rqs_entry_count = 1; 2288 if (cto->ct_header.rqs_entry_type == RQSTYPE_CTIO3) { 2289 seglim = ISP_CDSEG64; 2290 ds = NULL; 2291 ds64 = &((ispcontreq64_t *)crq)->req_dataseg[0]; 2292 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2293 } else { 2294 seglim = ISP_CDSEG; 2295 ds = &crq->req_dataseg[0]; 2296 ds64 = NULL; 2297 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 2298 } 2299 for (seg = 0; segcnt < nseg && seg < seglim; 2300 segcnt++, seg++) { 2301 if (ds64) { 2302 ds64->ds_basehi = 2303 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); 2304 ds64->ds_base = dm_segs[segcnt].ds_addr; 2305 ds64->ds_count = dm_segs[segcnt].ds_len; 2306 ds64++; 2307 } else { 2308 ds->ds_base = dm_segs[segcnt].ds_addr; 2309 ds->ds_count = dm_segs[segcnt].ds_len; 2310 ds++; 2311 } 2312 #if __FreeBSD_version < 500000 2313 isp_prt(isp, ISP_LOGTDEBUG1, 2314 "isp_send_ctio2: ent%d[%d]%llx:%llu", 2315 cto->ct_header.rqs_entry_count-1, seg, 2316 (uint64_t)dm_segs[segcnt].ds_addr, 2317 (uint64_t)dm_segs[segcnt].ds_len); 2318 #else 2319 isp_prt(isp, ISP_LOGTDEBUG1, 2320 "isp_send_ctio2: ent%d[%d]%jx:%ju", 2321 cto->ct_header.rqs_entry_count-1, seg, 2322 (uintmax_t)dm_segs[segcnt].ds_addr, 2323 (uintmax_t)dm_segs[segcnt].ds_len); 2324 #endif 2325 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 2326 cto->ct_seg_count++; 2327 } 2328 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 2329 isp_put_cont_req(isp, crq, qep); 2330 ISP_TDQE(isp, "cont entry", curi, qep); 2331 } 2332 2333 /* 2334 * No do final twiddling for the CTIO itself. 2335 */ 2336 cto->ct_header.rqs_seqno = 1; 2337 isp_prt(isp, ISP_LOGTDEBUG1, 2338 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 2339 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 2340 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 2341 cto->ct_resid); 2342 if (FCPARAM(isp)->isp_2klogin) { 2343 isp_put_ctio2e(isp, (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); 2344 } else { 2345 isp_put_ctio2(isp, cto, qe); 2346 } 2347 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 2348 *mp->nxtip = nxti; 2349 } 2350 #endif 2351 2352 static void dma_2400(void *, bus_dma_segment_t *, int, int); 2353 static void dma2_a64(void *, bus_dma_segment_t *, int, int); 2354 static void dma2(void *, bus_dma_segment_t *, int, int); 2355 2356 static void 2357 dma_2400(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2358 { 2359 mush_t *mp; 2360 ispsoftc_t *isp; 2361 struct ccb_scsiio *csio; 2362 struct isp_pcisoftc *pcs; 2363 bus_dmamap_t *dp; 2364 bus_dma_segment_t *eseg; 2365 ispreqt7_t *rq; 2366 int seglim, datalen; 2367 uint32_t nxti; 2368 2369 mp = (mush_t *) arg; 2370 if (error) { 2371 mp->error = error; 2372 return; 2373 } 2374 2375 if (nseg < 1) { 2376 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2377 mp->error = EFAULT; 2378 return; 2379 } 2380 2381 csio = mp->cmd_token; 2382 isp = mp->isp; 2383 rq = mp->rq; 2384 pcs = (struct isp_pcisoftc *)mp->isp; 2385 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2386 nxti = *mp->nxtip; 2387 2388 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2389 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2390 } else { 2391 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2392 } 2393 datalen = XS_XFRLEN(csio); 2394 2395 /* 2396 * We're passed an initial partially filled in entry that 2397 * has most fields filled in except for data transfer 2398 * related values. 2399 * 2400 * Our job is to fill in the initial request queue entry and 2401 * then to start allocating and filling in continuation entries 2402 * until we've covered the entire transfer. 2403 */ 2404 2405 rq->req_header.rqs_entry_type = RQSTYPE_T7RQS; 2406 rq->req_dl = datalen; 2407 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2408 rq->req_alen_datadir = 0x2; 2409 } else { 2410 rq->req_alen_datadir = 0x1; 2411 } 2412 2413 eseg = dm_segs + nseg; 2414 2415 rq->req_dataseg.ds_base = DMA_LO32(dm_segs->ds_addr); 2416 rq->req_dataseg.ds_basehi = DMA_HI32(dm_segs->ds_addr); 2417 rq->req_dataseg.ds_count = dm_segs->ds_len; 2418 2419 datalen -= dm_segs->ds_len; 2420 2421 dm_segs++; 2422 rq->req_seg_count++; 2423 2424 while (datalen > 0 && dm_segs != eseg) { 2425 uint32_t onxti; 2426 ispcontreq64_t local, *crq = &local, *cqe; 2427 2428 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2429 onxti = nxti; 2430 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2431 if (nxti == mp->optr) { 2432 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2433 mp->error = MUSHERR_NOQENTRIES; 2434 return; 2435 } 2436 rq->req_header.rqs_entry_count++; 2437 MEMZERO((void *)crq, sizeof (*crq)); 2438 crq->req_header.rqs_entry_count = 1; 2439 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2440 2441 seglim = 0; 2442 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 2443 crq->req_dataseg[seglim].ds_base = 2444 DMA_LO32(dm_segs->ds_addr); 2445 crq->req_dataseg[seglim].ds_basehi = 2446 DMA_HI32(dm_segs->ds_addr); 2447 crq->req_dataseg[seglim].ds_count = 2448 dm_segs->ds_len; 2449 rq->req_seg_count++; 2450 dm_segs++; 2451 seglim++; 2452 datalen -= dm_segs->ds_len; 2453 } 2454 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2455 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2456 } 2457 isp_put_cont64_req(isp, crq, cqe); 2458 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2459 } 2460 *mp->nxtip = nxti; 2461 } 2462 2463 static void 2464 dma2_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2465 { 2466 mush_t *mp; 2467 ispsoftc_t *isp; 2468 struct ccb_scsiio *csio; 2469 struct isp_pcisoftc *pcs; 2470 bus_dmamap_t *dp; 2471 bus_dma_segment_t *eseg; 2472 ispreq64_t *rq; 2473 int seglim, datalen; 2474 uint32_t nxti; 2475 2476 mp = (mush_t *) arg; 2477 if (error) { 2478 mp->error = error; 2479 return; 2480 } 2481 2482 if (nseg < 1) { 2483 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2484 mp->error = EFAULT; 2485 return; 2486 } 2487 csio = mp->cmd_token; 2488 isp = mp->isp; 2489 rq = mp->rq; 2490 pcs = (struct isp_pcisoftc *)mp->isp; 2491 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2492 nxti = *mp->nxtip; 2493 2494 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2495 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2496 } else { 2497 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2498 } 2499 datalen = XS_XFRLEN(csio); 2500 2501 /* 2502 * We're passed an initial partially filled in entry that 2503 * has most fields filled in except for data transfer 2504 * related values. 2505 * 2506 * Our job is to fill in the initial request queue entry and 2507 * then to start allocating and filling in continuation entries 2508 * until we've covered the entire transfer. 2509 */ 2510 2511 if (IS_FC(isp)) { 2512 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 2513 seglim = ISP_RQDSEG_T3; 2514 ((ispreqt3_t *)rq)->req_totalcnt = datalen; 2515 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2516 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN; 2517 } else { 2518 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 2519 } 2520 } else { 2521 rq->req_header.rqs_entry_type = RQSTYPE_A64; 2522 if (csio->cdb_len > 12) { 2523 seglim = 0; 2524 } else { 2525 seglim = ISP_RQDSEG_A64; 2526 } 2527 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2528 rq->req_flags |= REQFLAG_DATA_IN; 2529 } else { 2530 rq->req_flags |= REQFLAG_DATA_OUT; 2531 } 2532 } 2533 2534 eseg = dm_segs + nseg; 2535 2536 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 2537 if (IS_FC(isp)) { 2538 ispreqt3_t *rq3 = (ispreqt3_t *)rq; 2539 rq3->req_dataseg[rq3->req_seg_count].ds_base = 2540 DMA_LO32(dm_segs->ds_addr); 2541 rq3->req_dataseg[rq3->req_seg_count].ds_basehi = 2542 DMA_HI32(dm_segs->ds_addr); 2543 rq3->req_dataseg[rq3->req_seg_count].ds_count = 2544 dm_segs->ds_len; 2545 } else { 2546 rq->req_dataseg[rq->req_seg_count].ds_base = 2547 DMA_LO32(dm_segs->ds_addr); 2548 rq->req_dataseg[rq->req_seg_count].ds_basehi = 2549 DMA_HI32(dm_segs->ds_addr); 2550 rq->req_dataseg[rq->req_seg_count].ds_count = 2551 dm_segs->ds_len; 2552 } 2553 datalen -= dm_segs->ds_len; 2554 rq->req_seg_count++; 2555 dm_segs++; 2556 } 2557 2558 while (datalen > 0 && dm_segs != eseg) { 2559 uint32_t onxti; 2560 ispcontreq64_t local, *crq = &local, *cqe; 2561 2562 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2563 onxti = nxti; 2564 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2565 if (nxti == mp->optr) { 2566 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2567 mp->error = MUSHERR_NOQENTRIES; 2568 return; 2569 } 2570 rq->req_header.rqs_entry_count++; 2571 MEMZERO((void *)crq, sizeof (*crq)); 2572 crq->req_header.rqs_entry_count = 1; 2573 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2574 2575 seglim = 0; 2576 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 2577 crq->req_dataseg[seglim].ds_base = 2578 DMA_LO32(dm_segs->ds_addr); 2579 crq->req_dataseg[seglim].ds_basehi = 2580 DMA_HI32(dm_segs->ds_addr); 2581 crq->req_dataseg[seglim].ds_count = 2582 dm_segs->ds_len; 2583 rq->req_seg_count++; 2584 dm_segs++; 2585 seglim++; 2586 datalen -= dm_segs->ds_len; 2587 } 2588 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2589 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2590 } 2591 isp_put_cont64_req(isp, crq, cqe); 2592 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2593 } 2594 *mp->nxtip = nxti; 2595 } 2596 2597 static void 2598 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2599 { 2600 mush_t *mp; 2601 ispsoftc_t *isp; 2602 struct ccb_scsiio *csio; 2603 struct isp_pcisoftc *pcs; 2604 bus_dmamap_t *dp; 2605 bus_dma_segment_t *eseg; 2606 ispreq_t *rq; 2607 int seglim, datalen; 2608 uint32_t nxti; 2609 2610 mp = (mush_t *) arg; 2611 if (error) { 2612 mp->error = error; 2613 return; 2614 } 2615 2616 if (nseg < 1) { 2617 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2618 mp->error = EFAULT; 2619 return; 2620 } 2621 csio = mp->cmd_token; 2622 isp = mp->isp; 2623 rq = mp->rq; 2624 pcs = (struct isp_pcisoftc *)mp->isp; 2625 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2626 nxti = *mp->nxtip; 2627 2628 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2629 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2630 } else { 2631 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2632 } 2633 2634 datalen = XS_XFRLEN(csio); 2635 2636 /* 2637 * We're passed an initial partially filled in entry that 2638 * has most fields filled in except for data transfer 2639 * related values. 2640 * 2641 * Our job is to fill in the initial request queue entry and 2642 * then to start allocating and filling in continuation entries 2643 * until we've covered the entire transfer. 2644 */ 2645 2646 if (IS_FC(isp)) { 2647 seglim = ISP_RQDSEG_T2; 2648 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 2649 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2650 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 2651 } else { 2652 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 2653 } 2654 } else { 2655 if (csio->cdb_len > 12) { 2656 seglim = 0; 2657 } else { 2658 seglim = ISP_RQDSEG; 2659 } 2660 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2661 rq->req_flags |= REQFLAG_DATA_IN; 2662 } else { 2663 rq->req_flags |= REQFLAG_DATA_OUT; 2664 } 2665 } 2666 2667 eseg = dm_segs + nseg; 2668 2669 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 2670 if (IS_FC(isp)) { 2671 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 2672 rq2->req_dataseg[rq2->req_seg_count].ds_base = 2673 DMA_LO32(dm_segs->ds_addr); 2674 rq2->req_dataseg[rq2->req_seg_count].ds_count = 2675 dm_segs->ds_len; 2676 } else { 2677 rq->req_dataseg[rq->req_seg_count].ds_base = 2678 DMA_LO32(dm_segs->ds_addr); 2679 rq->req_dataseg[rq->req_seg_count].ds_count = 2680 dm_segs->ds_len; 2681 } 2682 datalen -= dm_segs->ds_len; 2683 rq->req_seg_count++; 2684 dm_segs++; 2685 } 2686 2687 while (datalen > 0 && dm_segs != eseg) { 2688 uint32_t onxti; 2689 ispcontreq_t local, *crq = &local, *cqe; 2690 2691 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2692 onxti = nxti; 2693 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2694 if (nxti == mp->optr) { 2695 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2696 mp->error = MUSHERR_NOQENTRIES; 2697 return; 2698 } 2699 rq->req_header.rqs_entry_count++; 2700 MEMZERO((void *)crq, sizeof (*crq)); 2701 crq->req_header.rqs_entry_count = 1; 2702 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 2703 2704 seglim = 0; 2705 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 2706 crq->req_dataseg[seglim].ds_base = 2707 DMA_LO32(dm_segs->ds_addr); 2708 crq->req_dataseg[seglim].ds_count = 2709 dm_segs->ds_len; 2710 rq->req_seg_count++; 2711 dm_segs++; 2712 seglim++; 2713 datalen -= dm_segs->ds_len; 2714 } 2715 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2716 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2717 } 2718 isp_put_cont_req(isp, crq, cqe); 2719 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2720 } 2721 *mp->nxtip = nxti; 2722 } 2723 2724 /* 2725 * We enter with ISP_LOCK held 2726 */ 2727 static int 2728 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq, 2729 uint32_t *nxtip, uint32_t optr) 2730 { 2731 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2732 ispreq_t *qep; 2733 bus_dmamap_t *dp = NULL; 2734 mush_t mush, *mp; 2735 void (*eptr)(void *, bus_dma_segment_t *, int, int); 2736 2737 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 2738 #ifdef ISP_TARGET_MODE 2739 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 2740 if (IS_FC(isp)) { 2741 eptr = tdma_mkfc; 2742 } else { 2743 eptr = tdma_mk; 2744 } 2745 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 2746 (csio->dxfer_len == 0)) { 2747 mp = &mush; 2748 mp->isp = isp; 2749 mp->cmd_token = csio; 2750 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 2751 mp->nxtip = nxtip; 2752 mp->optr = optr; 2753 mp->error = 0; 2754 ISPLOCK_2_CAMLOCK(isp); 2755 (*eptr)(mp, NULL, 0, 0); 2756 CAMLOCK_2_ISPLOCK(isp); 2757 goto mbxsync; 2758 } 2759 } else 2760 #endif 2761 if (IS_24XX(isp)) { 2762 eptr = dma_2400; 2763 } else if (sizeof (bus_addr_t) > 4) { 2764 eptr = dma2_a64; 2765 } else { 2766 eptr = dma2; 2767 } 2768 2769 2770 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 2771 (csio->dxfer_len == 0)) { 2772 rq->req_seg_count = 1; 2773 goto mbxsync; 2774 } 2775 2776 /* 2777 * Do a virtual grapevine step to collect info for 2778 * the callback dma allocation that we have to use... 2779 */ 2780 mp = &mush; 2781 mp->isp = isp; 2782 mp->cmd_token = csio; 2783 mp->rq = rq; 2784 mp->nxtip = nxtip; 2785 mp->optr = optr; 2786 mp->error = 0; 2787 2788 ISPLOCK_2_CAMLOCK(isp); 2789 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 2790 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 2791 int error, s; 2792 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2793 s = splsoftvm(); 2794 error = bus_dmamap_load(pcs->dmat, *dp, 2795 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 2796 if (error == EINPROGRESS) { 2797 bus_dmamap_unload(pcs->dmat, *dp); 2798 mp->error = EINVAL; 2799 isp_prt(isp, ISP_LOGERR, 2800 "deferred dma allocation not supported"); 2801 } else if (error && mp->error == 0) { 2802 #ifdef DIAGNOSTIC 2803 isp_prt(isp, ISP_LOGERR, 2804 "error %d in dma mapping code", error); 2805 #endif 2806 mp->error = error; 2807 } 2808 splx(s); 2809 } else { 2810 /* Pointer to physical buffer */ 2811 struct bus_dma_segment seg; 2812 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; 2813 seg.ds_len = csio->dxfer_len; 2814 (*eptr)(mp, &seg, 1, 0); 2815 } 2816 } else { 2817 struct bus_dma_segment *segs; 2818 2819 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 2820 isp_prt(isp, ISP_LOGERR, 2821 "Physical segment pointers unsupported"); 2822 mp->error = EINVAL; 2823 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2824 isp_prt(isp, ISP_LOGERR, 2825 "Virtual segment addresses unsupported"); 2826 mp->error = EINVAL; 2827 } else { 2828 /* Just use the segments provided */ 2829 segs = (struct bus_dma_segment *) csio->data_ptr; 2830 (*eptr)(mp, segs, csio->sglist_cnt, 0); 2831 } 2832 } 2833 CAMLOCK_2_ISPLOCK(isp); 2834 if (mp->error) { 2835 int retval = CMD_COMPLETE; 2836 if (mp->error == MUSHERR_NOQENTRIES) { 2837 retval = CMD_EAGAIN; 2838 } else if (mp->error == EFBIG) { 2839 XS_SETERR(csio, CAM_REQ_TOO_BIG); 2840 } else if (mp->error == EINVAL) { 2841 XS_SETERR(csio, CAM_REQ_INVALID); 2842 } else { 2843 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 2844 } 2845 return (retval); 2846 } 2847 mbxsync: 2848 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2849 isp_print_bytes(isp, "Request Queue Entry", QENTRY_LEN, rq); 2850 } 2851 switch (rq->req_header.rqs_entry_type) { 2852 case RQSTYPE_REQUEST: 2853 isp_put_request(isp, rq, qep); 2854 break; 2855 case RQSTYPE_CMDONLY: 2856 isp_put_extended_request(isp, (ispextreq_t *)rq, 2857 (ispextreq_t *)qep); 2858 break; 2859 case RQSTYPE_T2RQS: 2860 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 2861 break; 2862 case RQSTYPE_A64: 2863 case RQSTYPE_T3RQS: 2864 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep); 2865 break; 2866 case RQSTYPE_T7RQS: 2867 isp_put_request_t7(isp, (ispreqt7_t *) rq, (ispreqt7_t *) qep); 2868 break; 2869 } 2870 return (CMD_QUEUED); 2871 } 2872 2873 static void 2874 isp_pci_dmateardown(ispsoftc_t *isp, XS_T *xs, uint32_t handle) 2875 { 2876 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2877 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 2878 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2879 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 2880 } else { 2881 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 2882 } 2883 bus_dmamap_unload(pcs->dmat, *dp); 2884 } 2885 2886 2887 static void 2888 isp_pci_reset1(ispsoftc_t *isp) 2889 { 2890 if (!IS_24XX(isp)) { 2891 /* Make sure the BIOS is disabled */ 2892 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 2893 } 2894 /* and enable interrupts */ 2895 ISP_ENABLE_INTS(isp); 2896 } 2897 2898 static void 2899 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 2900 { 2901 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2902 if (msg) 2903 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2904 else 2905 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2906 if (IS_SCSI(isp)) 2907 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2908 else 2909 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2910 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2911 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2912 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2913 2914 2915 if (IS_SCSI(isp)) { 2916 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2917 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2918 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2919 ISP_READ(isp, CDMA_FIFO_STS)); 2920 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2921 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2922 ISP_READ(isp, DDMA_FIFO_STS)); 2923 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2924 ISP_READ(isp, SXP_INTERRUPT), 2925 ISP_READ(isp, SXP_GROSS_ERR), 2926 ISP_READ(isp, SXP_PINS_CTRL)); 2927 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2928 } 2929 printf(" mbox regs: %x %x %x %x %x\n", 2930 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2931 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2932 ISP_READ(isp, OUTMAILBOX4)); 2933 printf(" PCI Status Command/Status=%x\n", 2934 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2935 } 2936