1 /*- 2 * 3 * Copyright (c) 1997-2006 by Matthew Jacob 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice immediately at the beginning of the file, without modification, 11 * this list of conditions, and the following disclaimer. 12 * 2. The name of the author may not be used to endorse or promote products 13 * derived from this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 /* 29 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 30 * FreeBSD Version. 31 */ 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/module.h> 39 #if __FreeBSD_version >= 700000 40 #include <sys/linker.h> 41 #include <sys/firmware.h> 42 #endif 43 #include <sys/bus.h> 44 #if __FreeBSD_version < 500000 45 #include <pci/pcireg.h> 46 #include <pci/pcivar.h> 47 #include <machine/bus_memio.h> 48 #include <machine/bus_pio.h> 49 #else 50 #include <sys/stdint.h> 51 #include <dev/pci/pcireg.h> 52 #include <dev/pci/pcivar.h> 53 #endif 54 #include <machine/bus.h> 55 #include <machine/resource.h> 56 #include <sys/rman.h> 57 #include <sys/malloc.h> 58 59 #include <dev/isp/isp_freebsd.h> 60 61 #if __FreeBSD_version < 500000 62 #define BUS_PROBE_DEFAULT 0 63 #endif 64 65 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); 66 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); 67 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 68 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); 69 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 70 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 71 static int 72 isp_pci_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 73 static int 74 isp_pci_rd_isr_2300(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 75 static int 76 isp_pci_rd_isr_2400(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 77 static int isp_pci_mbxdma(ispsoftc_t *); 78 static int 79 isp_pci_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint32_t *, uint32_t); 80 static void 81 isp_pci_dmateardown(ispsoftc_t *, XS_T *, uint32_t); 82 83 84 static void isp_pci_reset0(ispsoftc_t *); 85 static void isp_pci_reset1(ispsoftc_t *); 86 static void isp_pci_dumpregs(ispsoftc_t *, const char *); 87 88 static struct ispmdvec mdvec = { 89 isp_pci_rd_isr, 90 isp_pci_rd_reg, 91 isp_pci_wr_reg, 92 isp_pci_mbxdma, 93 isp_pci_dmasetup, 94 isp_pci_dmateardown, 95 isp_pci_reset0, 96 isp_pci_reset1, 97 isp_pci_dumpregs, 98 NULL, 99 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 100 }; 101 102 static struct ispmdvec mdvec_1080 = { 103 isp_pci_rd_isr, 104 isp_pci_rd_reg_1080, 105 isp_pci_wr_reg_1080, 106 isp_pci_mbxdma, 107 isp_pci_dmasetup, 108 isp_pci_dmateardown, 109 isp_pci_reset0, 110 isp_pci_reset1, 111 isp_pci_dumpregs, 112 NULL, 113 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 114 }; 115 116 static struct ispmdvec mdvec_12160 = { 117 isp_pci_rd_isr, 118 isp_pci_rd_reg_1080, 119 isp_pci_wr_reg_1080, 120 isp_pci_mbxdma, 121 isp_pci_dmasetup, 122 isp_pci_dmateardown, 123 isp_pci_reset0, 124 isp_pci_reset1, 125 isp_pci_dumpregs, 126 NULL, 127 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 128 }; 129 130 static struct ispmdvec mdvec_2100 = { 131 isp_pci_rd_isr, 132 isp_pci_rd_reg, 133 isp_pci_wr_reg, 134 isp_pci_mbxdma, 135 isp_pci_dmasetup, 136 isp_pci_dmateardown, 137 isp_pci_reset0, 138 isp_pci_reset1, 139 isp_pci_dumpregs 140 }; 141 142 static struct ispmdvec mdvec_2200 = { 143 isp_pci_rd_isr, 144 isp_pci_rd_reg, 145 isp_pci_wr_reg, 146 isp_pci_mbxdma, 147 isp_pci_dmasetup, 148 isp_pci_dmateardown, 149 isp_pci_reset0, 150 isp_pci_reset1, 151 isp_pci_dumpregs 152 }; 153 154 static struct ispmdvec mdvec_2300 = { 155 isp_pci_rd_isr_2300, 156 isp_pci_rd_reg, 157 isp_pci_wr_reg, 158 isp_pci_mbxdma, 159 isp_pci_dmasetup, 160 isp_pci_dmateardown, 161 isp_pci_reset0, 162 isp_pci_reset1, 163 isp_pci_dumpregs 164 }; 165 166 static struct ispmdvec mdvec_2400 = { 167 isp_pci_rd_isr_2400, 168 isp_pci_rd_reg_2400, 169 isp_pci_wr_reg_2400, 170 isp_pci_mbxdma, 171 isp_pci_dmasetup, 172 isp_pci_dmateardown, 173 isp_pci_reset0, 174 isp_pci_reset1, 175 NULL 176 }; 177 178 #ifndef PCIM_CMD_INVEN 179 #define PCIM_CMD_INVEN 0x10 180 #endif 181 #ifndef PCIM_CMD_BUSMASTEREN 182 #define PCIM_CMD_BUSMASTEREN 0x0004 183 #endif 184 #ifndef PCIM_CMD_PERRESPEN 185 #define PCIM_CMD_PERRESPEN 0x0040 186 #endif 187 #ifndef PCIM_CMD_SEREN 188 #define PCIM_CMD_SEREN 0x0100 189 #endif 190 #ifndef PCIM_CMD_INTX_DISABLE 191 #define PCIM_CMD_INTX_DISABLE 0x0400 192 #endif 193 194 #ifndef PCIR_COMMAND 195 #define PCIR_COMMAND 0x04 196 #endif 197 198 #ifndef PCIR_CACHELNSZ 199 #define PCIR_CACHELNSZ 0x0c 200 #endif 201 202 #ifndef PCIR_LATTIMER 203 #define PCIR_LATTIMER 0x0d 204 #endif 205 206 #ifndef PCIR_ROMADDR 207 #define PCIR_ROMADDR 0x30 208 #endif 209 210 #ifndef PCI_VENDOR_QLOGIC 211 #define PCI_VENDOR_QLOGIC 0x1077 212 #endif 213 214 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 215 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 216 #endif 217 218 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 219 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 220 #endif 221 222 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 223 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 224 #endif 225 226 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 227 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 228 #endif 229 230 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 231 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 232 #endif 233 234 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 235 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 236 #endif 237 238 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 239 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 240 #endif 241 242 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 243 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 244 #endif 245 246 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 247 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 248 #endif 249 250 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 251 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 252 #endif 253 254 #ifndef PCI_PRODUCT_QLOGIC_ISP2322 255 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 256 #endif 257 258 #ifndef PCI_PRODUCT_QLOGIC_ISP2422 259 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 260 #endif 261 262 #ifndef PCI_PRODUCT_QLOGIC_ISP2432 263 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432 264 #endif 265 266 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 267 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 268 #endif 269 270 #ifndef PCI_PRODUCT_QLOGIC_ISP6322 271 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 272 #endif 273 274 275 #define PCI_QLOGIC_ISP1020 \ 276 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 277 278 #define PCI_QLOGIC_ISP1080 \ 279 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 280 281 #define PCI_QLOGIC_ISP10160 \ 282 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 283 284 #define PCI_QLOGIC_ISP12160 \ 285 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 286 287 #define PCI_QLOGIC_ISP1240 \ 288 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 289 290 #define PCI_QLOGIC_ISP1280 \ 291 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 292 293 #define PCI_QLOGIC_ISP2100 \ 294 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 295 296 #define PCI_QLOGIC_ISP2200 \ 297 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 298 299 #define PCI_QLOGIC_ISP2300 \ 300 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 301 302 #define PCI_QLOGIC_ISP2312 \ 303 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 304 305 #define PCI_QLOGIC_ISP2322 \ 306 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 307 308 #define PCI_QLOGIC_ISP2422 \ 309 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 310 311 #define PCI_QLOGIC_ISP2432 \ 312 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC) 313 314 #define PCI_QLOGIC_ISP6312 \ 315 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 316 317 #define PCI_QLOGIC_ISP6322 \ 318 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 319 320 /* 321 * Odd case for some AMI raid cards... We need to *not* attach to this. 322 */ 323 #define AMI_RAID_SUBVENDOR_ID 0x101e 324 325 #define IO_MAP_REG 0x10 326 #define MEM_MAP_REG 0x14 327 328 #define PCI_DFLT_LTNCY 0x40 329 #define PCI_DFLT_LNSZ 0x10 330 331 static int isp_pci_probe (device_t); 332 static int isp_pci_attach (device_t); 333 static int isp_pci_detach (device_t); 334 335 336 struct isp_pcisoftc { 337 ispsoftc_t pci_isp; 338 device_t pci_dev; 339 struct resource * pci_reg; 340 bus_space_tag_t pci_st; 341 bus_space_handle_t pci_sh; 342 void * ih; 343 int16_t pci_poff[_NREG_BLKS]; 344 bus_dma_tag_t dmat; 345 bus_dmamap_t *dmaps; 346 }; 347 348 349 static device_method_t isp_pci_methods[] = { 350 /* Device interface */ 351 DEVMETHOD(device_probe, isp_pci_probe), 352 DEVMETHOD(device_attach, isp_pci_attach), 353 DEVMETHOD(device_detach, isp_pci_detach), 354 { 0, 0 } 355 }; 356 static void isp_pci_intr(void *); 357 358 static driver_t isp_pci_driver = { 359 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 360 }; 361 static devclass_t isp_devclass; 362 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 363 #if __FreeBSD_version < 700000 364 extern ispfwfunc *isp_get_firmware_p; 365 #endif 366 367 static int 368 isp_pci_probe(device_t dev) 369 { 370 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 371 case PCI_QLOGIC_ISP1020: 372 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 373 break; 374 case PCI_QLOGIC_ISP1080: 375 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 376 break; 377 case PCI_QLOGIC_ISP1240: 378 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 379 break; 380 case PCI_QLOGIC_ISP1280: 381 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 382 break; 383 case PCI_QLOGIC_ISP10160: 384 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 385 break; 386 case PCI_QLOGIC_ISP12160: 387 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 388 return (ENXIO); 389 } 390 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 391 break; 392 case PCI_QLOGIC_ISP2100: 393 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 394 break; 395 case PCI_QLOGIC_ISP2200: 396 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 397 break; 398 case PCI_QLOGIC_ISP2300: 399 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 400 break; 401 case PCI_QLOGIC_ISP2312: 402 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 403 break; 404 case PCI_QLOGIC_ISP2322: 405 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 406 break; 407 case PCI_QLOGIC_ISP2422: 408 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 409 break; 410 case PCI_QLOGIC_ISP2432: 411 device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter"); 412 break; 413 case PCI_QLOGIC_ISP6312: 414 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 415 break; 416 case PCI_QLOGIC_ISP6322: 417 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 418 break; 419 default: 420 return (ENXIO); 421 } 422 if (isp_announced == 0 && bootverbose) { 423 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 424 "Core Version %d.%d\n", 425 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 426 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 427 isp_announced++; 428 } 429 /* 430 * XXXX: Here is where we might load the f/w module 431 * XXXX: (or increase a reference count to it). 432 */ 433 return (BUS_PROBE_DEFAULT); 434 } 435 436 #if __FreeBSD_version < 500000 437 static void 438 isp_get_options(device_t dev, ispsoftc_t *isp) 439 { 440 uint64_t wwn; 441 int bitmap, unit; 442 443 callout_handle_init(&isp->isp_osinfo.ldt); 444 callout_handle_init(&isp->isp_osinfo.gdt); 445 446 unit = device_get_unit(dev); 447 if (getenv_int("isp_disable", &bitmap)) { 448 if (bitmap & (1 << unit)) { 449 isp->isp_osinfo.disabled = 1; 450 return; 451 } 452 } 453 454 if (getenv_int("isp_no_fwload", &bitmap)) { 455 if (bitmap & (1 << unit)) 456 isp->isp_confopts |= ISP_CFG_NORELOAD; 457 } 458 if (getenv_int("isp_fwload", &bitmap)) { 459 if (bitmap & (1 << unit)) 460 isp->isp_confopts &= ~ISP_CFG_NORELOAD; 461 } 462 if (getenv_int("isp_no_nvram", &bitmap)) { 463 if (bitmap & (1 << unit)) 464 isp->isp_confopts |= ISP_CFG_NONVRAM; 465 } 466 if (getenv_int("isp_nvram", &bitmap)) { 467 if (bitmap & (1 << unit)) 468 isp->isp_confopts &= ~ISP_CFG_NONVRAM; 469 } 470 if (getenv_int("isp_fcduplex", &bitmap)) { 471 if (bitmap & (1 << unit)) 472 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 473 } 474 if (getenv_int("isp_no_fcduplex", &bitmap)) { 475 if (bitmap & (1 << unit)) 476 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX; 477 } 478 if (getenv_int("isp_nport", &bitmap)) { 479 if (bitmap & (1 << unit)) 480 isp->isp_confopts |= ISP_CFG_NPORT; 481 } 482 483 /* 484 * Because the resource_*_value functions can neither return 485 * 64 bit integer values, nor can they be directly coerced 486 * to interpret the right hand side of the assignment as 487 * you want them to interpret it, we have to force WWN 488 * hint replacement to specify WWN strings with a leading 489 * 'w' (e..g w50000000aaaa0001). Sigh. 490 */ 491 if (getenv_quad("isp_portwwn", &wwn)) { 492 isp->isp_osinfo.default_port_wwn = wwn; 493 isp->isp_confopts |= ISP_CFG_OWNWWPN; 494 } 495 if (isp->isp_osinfo.default_port_wwn == 0) { 496 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 497 } 498 499 if (getenv_quad("isp_nodewwn", &wwn)) { 500 isp->isp_osinfo.default_node_wwn = wwn; 501 isp->isp_confopts |= ISP_CFG_OWNWWNN; 502 } 503 if (isp->isp_osinfo.default_node_wwn == 0) { 504 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 505 } 506 507 bitmap = 0; 508 (void) getenv_int("isp_debug", &bitmap); 509 if (bitmap) { 510 isp->isp_dblev = bitmap; 511 } else { 512 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 513 } 514 if (bootverbose) { 515 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 516 } 517 518 bitmap = 0; 519 (void) getenv_int("isp_fabric_hysteresis", &bitmap); 520 if (bitmap >= 0 && bitmap < 256) { 521 isp->isp_osinfo.hysteresis = bitmap; 522 } else { 523 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis; 524 } 525 526 bitmap = 0; 527 (void) getenv_int("isp_loop_down_limit", &bitmap); 528 if (bitmap >= 0 && bitmap < 0xffff) { 529 isp->isp_osinfo.loop_down_limit = bitmap; 530 } else { 531 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit; 532 } 533 534 bitmap = 0; 535 (void) getenv_int("isp_gone_device_time", &bitmap); 536 if (bitmap >= 0 && bitmap < 0xffff) { 537 isp->isp_osinfo.gone_device_time = bitmap; 538 } else { 539 isp->isp_osinfo.gone_device_time = isp_gone_device_time; 540 } 541 542 543 #ifdef ISP_FW_CRASH_DUMP 544 bitmap = 0; 545 if (getenv_int("isp_fw_dump_enable", &bitmap)) { 546 if (bitmap & (1 << unit) { 547 size_t amt = 0; 548 if (IS_2200(isp)) { 549 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 550 } else if (IS_23XX(isp)) { 551 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 552 } 553 if (amt) { 554 FCPARAM(isp)->isp_dump_data = 555 malloc(amt, M_DEVBUF, M_WAITOK); 556 memset(FCPARAM(isp)->isp_dump_data, 0, amt); 557 } else { 558 device_printf(dev, 559 "f/w crash dumps not supported for card\n"); 560 } 561 } 562 } 563 #endif 564 bitmap = 0; 565 if (getenv_int("role", &bitmap)) { 566 isp->isp_role = bitmap; 567 } else { 568 isp->isp_role = ISP_DEFAULT_ROLES; 569 } 570 } 571 572 static void 573 isp_get_pci_options(device_t dev, int *m1, int *m2) 574 { 575 int bitmap; 576 int unit = device_get_unit(dev); 577 578 *m1 = PCIM_CMD_MEMEN; 579 *m2 = PCIM_CMD_PORTEN; 580 if (getenv_int("isp_mem_map", &bitmap)) { 581 if (bitmap & (1 << unit)) { 582 *m1 = PCIM_CMD_MEMEN; 583 *m2 = PCIM_CMD_PORTEN; 584 } 585 } 586 bitmap = 0; 587 if (getenv_int("isp_io_map", &bitmap)) { 588 if (bitmap & (1 << unit)) { 589 *m1 = PCIM_CMD_PORTEN; 590 *m2 = PCIM_CMD_MEMEN; 591 } 592 } 593 } 594 #else 595 static void 596 isp_get_options(device_t dev, ispsoftc_t *isp) 597 { 598 int tval; 599 const char *sptr; 600 601 callout_handle_init(&isp->isp_osinfo.ldt); 602 callout_handle_init(&isp->isp_osinfo.gdt); 603 604 /* 605 * Figure out if we're supposed to skip this one. 606 */ 607 608 tval = 0; 609 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 610 "disable", &tval) == 0 && tval) { 611 device_printf(dev, "disabled at user request\n"); 612 isp->isp_osinfo.disabled = 1; 613 return; 614 } 615 616 tval = -1; 617 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 618 "role", &tval) == 0 && tval != -1) { 619 tval &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET); 620 isp->isp_role = tval; 621 device_printf(dev, "setting role to 0x%x\n", isp->isp_role); 622 } else { 623 #ifdef ISP_TARGET_MODE 624 isp->isp_role = ISP_ROLE_TARGET; 625 #else 626 isp->isp_role = ISP_DEFAULT_ROLES; 627 #endif 628 } 629 630 tval = 0; 631 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 632 "fwload_disable", &tval) == 0 && tval != 0) { 633 isp->isp_confopts |= ISP_CFG_NORELOAD; 634 } 635 tval = 0; 636 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 637 "ignore_nvram", &tval) == 0 && tval != 0) { 638 isp->isp_confopts |= ISP_CFG_NONVRAM; 639 } 640 tval = 0; 641 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 642 "fullduplex", &tval) == 0 && tval != 0) { 643 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 644 } 645 #ifdef ISP_FW_CRASH_DUMP 646 tval = 0; 647 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 648 "fw_dump_enable", &tval) == 0 && tval != 0) { 649 size_t amt = 0; 650 if (IS_2200(isp)) { 651 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 652 } else if (IS_23XX(isp)) { 653 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 654 } 655 if (amt) { 656 FCPARAM(isp)->isp_dump_data = 657 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); 658 } else { 659 device_printf(dev, 660 "f/w crash dumps not supported for this model\n"); 661 } 662 } 663 #endif 664 665 sptr = 0; 666 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 667 "topology", (const char **) &sptr) == 0 && sptr != 0) { 668 if (strcmp(sptr, "lport") == 0) { 669 isp->isp_confopts |= ISP_CFG_LPORT; 670 } else if (strcmp(sptr, "nport") == 0) { 671 isp->isp_confopts |= ISP_CFG_NPORT; 672 } else if (strcmp(sptr, "lport-only") == 0) { 673 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 674 } else if (strcmp(sptr, "nport-only") == 0) { 675 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 676 } 677 } 678 679 /* 680 * Because the resource_*_value functions can neither return 681 * 64 bit integer values, nor can they be directly coerced 682 * to interpret the right hand side of the assignment as 683 * you want them to interpret it, we have to force WWN 684 * hint replacement to specify WWN strings with a leading 685 * 'w' (e..g w50000000aaaa0001). Sigh. 686 */ 687 sptr = 0; 688 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 689 "portwwn", (const char **) &sptr); 690 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 691 char *eptr = 0; 692 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 693 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 694 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 695 isp->isp_osinfo.default_port_wwn = 0; 696 } else { 697 isp->isp_confopts |= ISP_CFG_OWNWWPN; 698 } 699 } 700 if (isp->isp_osinfo.default_port_wwn == 0) { 701 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 702 } 703 704 sptr = 0; 705 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 706 "nodewwn", (const char **) &sptr); 707 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 708 char *eptr = 0; 709 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 710 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 711 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 712 isp->isp_osinfo.default_node_wwn = 0; 713 } else { 714 isp->isp_confopts |= ISP_CFG_OWNWWNN; 715 } 716 } 717 if (isp->isp_osinfo.default_node_wwn == 0) { 718 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 719 } 720 721 isp->isp_osinfo.default_id = -1; 722 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 723 "iid", &tval) == 0) { 724 isp->isp_osinfo.default_id = tval; 725 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 726 } 727 if (isp->isp_osinfo.default_id == -1) { 728 if (IS_FC(isp)) { 729 isp->isp_osinfo.default_id = 109; 730 } else { 731 isp->isp_osinfo.default_id = 7; 732 } 733 } 734 735 /* 736 * Set up logging levels. 737 */ 738 tval = 0; 739 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 740 "debug", &tval); 741 if (tval) { 742 isp->isp_dblev = tval; 743 } else { 744 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 745 } 746 if (bootverbose) { 747 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 748 } 749 750 tval = 0; 751 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 752 "hysteresis", &tval); 753 if (tval >= 0 && tval < 256) { 754 isp->isp_osinfo.hysteresis = tval; 755 } else { 756 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis; 757 } 758 759 tval = -1; 760 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 761 "loop_down_limit", &tval); 762 if (tval >= 0 && tval < 0xffff) { 763 isp->isp_osinfo.loop_down_limit = tval; 764 } else { 765 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit; 766 } 767 768 tval = -1; 769 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 770 "gone_device_time", &tval); 771 if (tval >= 0 && tval < 0xffff) { 772 isp->isp_osinfo.gone_device_time = tval; 773 } else { 774 isp->isp_osinfo.gone_device_time = isp_gone_device_time; 775 } 776 } 777 778 static void 779 isp_get_pci_options(device_t dev, int *m1, int *m2) 780 { 781 int tval; 782 /* 783 * Which we should try first - memory mapping or i/o mapping? 784 * 785 * We used to try memory first followed by i/o on alpha, otherwise 786 * the reverse, but we should just try memory first all the time now. 787 */ 788 *m1 = PCIM_CMD_MEMEN; 789 *m2 = PCIM_CMD_PORTEN; 790 791 tval = 0; 792 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 793 "prefer_iomap", &tval) == 0 && tval != 0) { 794 *m1 = PCIM_CMD_PORTEN; 795 *m2 = PCIM_CMD_MEMEN; 796 } 797 tval = 0; 798 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 799 "prefer_memmap", &tval) == 0 && tval != 0) { 800 *m1 = PCIM_CMD_MEMEN; 801 *m2 = PCIM_CMD_PORTEN; 802 } 803 } 804 #endif 805 806 static int 807 isp_pci_attach(device_t dev) 808 { 809 struct resource *regs, *irq; 810 int rtp, rgd, iqd, m1, m2; 811 uint32_t data, cmd, linesz, psize, basetype; 812 struct isp_pcisoftc *pcs; 813 ispsoftc_t *isp = NULL; 814 struct ispmdvec *mdvp; 815 #if __FreeBSD_version >= 500000 816 int locksetup = 0; 817 #endif 818 819 pcs = device_get_softc(dev); 820 if (pcs == NULL) { 821 device_printf(dev, "cannot get softc\n"); 822 return (ENOMEM); 823 } 824 memset(pcs, 0, sizeof (*pcs)); 825 pcs->pci_dev = dev; 826 isp = &pcs->pci_isp; 827 828 /* 829 * Set and Get Generic Options 830 */ 831 isp_get_options(dev, isp); 832 833 /* 834 * Check to see if options have us disabled 835 */ 836 if (isp->isp_osinfo.disabled) { 837 /* 838 * But return zero to preserve unit numbering 839 */ 840 return (0); 841 } 842 843 /* 844 * Get PCI options- which in this case are just mapping preferences. 845 */ 846 isp_get_pci_options(dev, &m1, &m2); 847 848 linesz = PCI_DFLT_LNSZ; 849 irq = regs = NULL; 850 rgd = rtp = iqd = 0; 851 852 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 853 if (cmd & m1) { 854 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 855 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 856 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 857 } 858 if (regs == NULL && (cmd & m2)) { 859 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 860 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 861 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 862 } 863 if (regs == NULL) { 864 device_printf(dev, "unable to map any ports\n"); 865 goto bad; 866 } 867 if (bootverbose) { 868 device_printf(dev, "using %s space register mapping\n", 869 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 870 } 871 pcs->pci_dev = dev; 872 pcs->pci_reg = regs; 873 pcs->pci_st = rman_get_bustag(regs); 874 pcs->pci_sh = rman_get_bushandle(regs); 875 876 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 877 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 878 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 879 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 880 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 881 mdvp = &mdvec; 882 basetype = ISP_HA_SCSI_UNKNOWN; 883 psize = sizeof (sdparam); 884 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 885 mdvp = &mdvec; 886 basetype = ISP_HA_SCSI_UNKNOWN; 887 psize = sizeof (sdparam); 888 } 889 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 890 mdvp = &mdvec_1080; 891 basetype = ISP_HA_SCSI_1080; 892 psize = sizeof (sdparam); 893 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 894 ISP1080_DMA_REGS_OFF; 895 } 896 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 897 mdvp = &mdvec_1080; 898 basetype = ISP_HA_SCSI_1240; 899 psize = 2 * sizeof (sdparam); 900 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 901 ISP1080_DMA_REGS_OFF; 902 } 903 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 904 mdvp = &mdvec_1080; 905 basetype = ISP_HA_SCSI_1280; 906 psize = 2 * sizeof (sdparam); 907 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 908 ISP1080_DMA_REGS_OFF; 909 } 910 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 911 mdvp = &mdvec_12160; 912 basetype = ISP_HA_SCSI_10160; 913 psize = sizeof (sdparam); 914 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 915 ISP1080_DMA_REGS_OFF; 916 } 917 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 918 mdvp = &mdvec_12160; 919 basetype = ISP_HA_SCSI_12160; 920 psize = 2 * sizeof (sdparam); 921 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 922 ISP1080_DMA_REGS_OFF; 923 } 924 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 925 mdvp = &mdvec_2100; 926 basetype = ISP_HA_FC_2100; 927 psize = sizeof (fcparam); 928 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 929 PCI_MBOX_REGS2100_OFF; 930 if (pci_get_revid(dev) < 3) { 931 /* 932 * XXX: Need to get the actual revision 933 * XXX: number of the 2100 FB. At any rate, 934 * XXX: lower cache line size for early revision 935 * XXX; boards. 936 */ 937 linesz = 1; 938 } 939 } 940 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 941 mdvp = &mdvec_2200; 942 basetype = ISP_HA_FC_2200; 943 psize = sizeof (fcparam); 944 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 945 PCI_MBOX_REGS2100_OFF; 946 } 947 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 948 mdvp = &mdvec_2300; 949 basetype = ISP_HA_FC_2300; 950 psize = sizeof (fcparam); 951 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 952 PCI_MBOX_REGS2300_OFF; 953 } 954 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 || 955 pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 956 mdvp = &mdvec_2300; 957 basetype = ISP_HA_FC_2312; 958 psize = sizeof (fcparam); 959 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 960 PCI_MBOX_REGS2300_OFF; 961 } 962 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322 || 963 pci_get_devid(dev) == PCI_QLOGIC_ISP6322) { 964 mdvp = &mdvec_2300; 965 basetype = ISP_HA_FC_2322; 966 psize = sizeof (fcparam); 967 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 968 PCI_MBOX_REGS2300_OFF; 969 } 970 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422 || 971 pci_get_devid(dev) == PCI_QLOGIC_ISP2432) { 972 mdvp = &mdvec_2400; 973 basetype = ISP_HA_FC_2400; 974 psize = sizeof (fcparam); 975 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 976 PCI_MBOX_REGS2400_OFF; 977 } 978 isp = &pcs->pci_isp; 979 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 980 if (isp->isp_param == NULL) { 981 device_printf(dev, "cannot allocate parameter data\n"); 982 goto bad; 983 } 984 isp->isp_mdvec = mdvp; 985 isp->isp_type = basetype; 986 isp->isp_revision = pci_get_revid(dev); 987 isp->isp_dev = dev; 988 989 #if __FreeBSD_version >= 700000 990 /* 991 * Try and find firmware for this device. 992 */ 993 { 994 char fwname[32]; 995 unsigned int did = pci_get_device(dev); 996 997 /* 998 * Map a few pci ids to fw names 999 */ 1000 switch (did) { 1001 case PCI_PRODUCT_QLOGIC_ISP1020: 1002 did = 0x1040; 1003 break; 1004 case PCI_PRODUCT_QLOGIC_ISP1240: 1005 did = 0x1080; 1006 break; 1007 case PCI_PRODUCT_QLOGIC_ISP10160: 1008 case PCI_PRODUCT_QLOGIC_ISP12160: 1009 did = 0x12160; 1010 break; 1011 case PCI_PRODUCT_QLOGIC_ISP6312: 1012 case PCI_PRODUCT_QLOGIC_ISP2312: 1013 did = 0x2300; 1014 break; 1015 case PCI_PRODUCT_QLOGIC_ISP6322: 1016 did = 0x2322; 1017 break; 1018 case PCI_PRODUCT_QLOGIC_ISP2422: 1019 case PCI_PRODUCT_QLOGIC_ISP2432: 1020 did = 0x2400; 1021 break; 1022 default: 1023 break; 1024 } 1025 1026 isp->isp_osinfo.fw = NULL; 1027 if (isp->isp_role & ISP_ROLE_TARGET) { 1028 snprintf(fwname, sizeof (fwname), "isp_%04x_it", did); 1029 isp->isp_osinfo.fw = firmware_get(fwname); 1030 } 1031 if (isp->isp_osinfo.fw == NULL) { 1032 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 1033 isp->isp_osinfo.fw = firmware_get(fwname); 1034 } 1035 if (isp->isp_osinfo.fw != NULL) { 1036 union { 1037 const void *fred; 1038 uint16_t *bob; 1039 } u; 1040 u.fred = isp->isp_osinfo.fw->data; 1041 isp->isp_mdvec->dv_ispfw = u.bob; 1042 } 1043 } 1044 #else 1045 if (isp_get_firmware_p) { 1046 int device = (int) pci_get_device(dev); 1047 #ifdef ISP_TARGET_MODE 1048 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 1049 #else 1050 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 1051 #endif 1052 } 1053 #endif 1054 1055 /* 1056 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 1057 * are set. 1058 */ 1059 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 1060 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 1061 1062 if (IS_2300(isp)) { /* per QLogic errata */ 1063 cmd &= ~PCIM_CMD_INVEN; 1064 } 1065 1066 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 1067 cmd &= ~PCIM_CMD_INTX_DISABLE; 1068 } 1069 1070 #ifdef WE_KNEW_WHAT_WE_WERE_DOING 1071 if (IS_24XX(isp)) { 1072 int reg; 1073 1074 cmd &= ~PCIM_CMD_INTX_DISABLE; 1075 1076 /* 1077 * Is this a PCI-X card? If so, set max read byte count. 1078 */ 1079 if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) { 1080 uint16_t pxcmd; 1081 reg += 2; 1082 1083 pxcmd = pci_read_config(dev, reg, 2); 1084 pxcmd &= ~0xc; 1085 pxcmd |= 0x8; 1086 pci_write_config(dev, reg, 2, pxcmd); 1087 } 1088 1089 /* 1090 * Is this a PCI Express card? If so, set max read byte count. 1091 */ 1092 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 1093 uint16_t pectl; 1094 1095 reg += 0x8; 1096 pectl = pci_read_config(dev, reg, 2); 1097 pectl &= ~0x7000; 1098 pectl |= 0x4000; 1099 pci_write_config(dev, reg, 2, pectl); 1100 } 1101 } 1102 #else 1103 if (IS_24XX(isp)) { 1104 cmd &= ~PCIM_CMD_INTX_DISABLE; 1105 } 1106 #endif 1107 1108 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 1109 1110 /* 1111 * Make sure the Cache Line Size register is set sensibly. 1112 */ 1113 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 1114 if (data != linesz) { 1115 data = PCI_DFLT_LNSZ; 1116 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 1117 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 1118 } 1119 1120 /* 1121 * Make sure the Latency Timer is sane. 1122 */ 1123 data = pci_read_config(dev, PCIR_LATTIMER, 1); 1124 if (data < PCI_DFLT_LTNCY) { 1125 data = PCI_DFLT_LTNCY; 1126 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 1127 pci_write_config(dev, PCIR_LATTIMER, data, 1); 1128 } 1129 1130 /* 1131 * Make sure we've disabled the ROM. 1132 */ 1133 data = pci_read_config(dev, PCIR_ROMADDR, 4); 1134 data &= ~1; 1135 pci_write_config(dev, PCIR_ROMADDR, data, 4); 1136 1137 iqd = 0; 1138 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, 1139 RF_ACTIVE | RF_SHAREABLE); 1140 if (irq == NULL) { 1141 device_printf(dev, "could not allocate interrupt\n"); 1142 goto bad; 1143 } 1144 1145 #if __FreeBSD_version >= 500000 1146 /* Make sure the lock is set up. */ 1147 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 1148 locksetup++; 1149 #endif 1150 1151 if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) { 1152 device_printf(dev, "could not setup interrupt\n"); 1153 goto bad; 1154 } 1155 1156 /* 1157 * Last minute checks... 1158 */ 1159 if (IS_23XX(isp) || IS_24XX(isp)) { 1160 isp->isp_port = pci_get_function(dev); 1161 } 1162 1163 if (IS_23XX(isp)) { 1164 /* 1165 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 1166 */ 1167 isp->isp_touched = 1; 1168 } 1169 1170 /* 1171 * Make sure we're in reset state. 1172 */ 1173 ISP_LOCK(isp); 1174 isp_reset(isp); 1175 if (isp->isp_state != ISP_RESETSTATE) { 1176 ISP_UNLOCK(isp); 1177 goto bad; 1178 } 1179 isp_init(isp); 1180 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 1181 isp_uninit(isp); 1182 ISP_UNLOCK(isp); 1183 goto bad; 1184 } 1185 isp_attach(isp); 1186 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 1187 isp_uninit(isp); 1188 ISP_UNLOCK(isp); 1189 goto bad; 1190 } 1191 /* 1192 * XXXX: Here is where we might unload the f/w module 1193 * XXXX: (or decrease the reference count to it). 1194 */ 1195 ISP_UNLOCK(isp); 1196 1197 return (0); 1198 1199 bad: 1200 1201 if (pcs && pcs->ih) { 1202 (void) bus_teardown_intr(dev, irq, pcs->ih); 1203 } 1204 1205 #if __FreeBSD_version >= 500000 1206 if (locksetup && isp) { 1207 mtx_destroy(&isp->isp_osinfo.lock); 1208 } 1209 #endif 1210 1211 if (irq) { 1212 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 1213 } 1214 1215 1216 if (regs) { 1217 (void) bus_release_resource(dev, rtp, rgd, regs); 1218 } 1219 1220 if (pcs) { 1221 if (pcs->pci_isp.isp_param) { 1222 #ifdef ISP_FW_CRASH_DUMP 1223 if (IS_FC(isp) && FCPARAM(isp)->isp_dump_data) { 1224 free(FCPARAM(isp)->isp_dump_data, M_DEVBUF); 1225 } 1226 #endif 1227 free(pcs->pci_isp.isp_param, M_DEVBUF); 1228 } 1229 } 1230 1231 /* 1232 * XXXX: Here is where we might unload the f/w module 1233 * XXXX: (or decrease the reference count to it). 1234 */ 1235 return (ENXIO); 1236 } 1237 1238 static int 1239 isp_pci_detach(device_t dev) 1240 { 1241 struct isp_pcisoftc *pcs; 1242 ispsoftc_t *isp; 1243 1244 pcs = device_get_softc(dev); 1245 if (pcs == NULL) { 1246 return (ENXIO); 1247 } 1248 isp = (ispsoftc_t *) pcs; 1249 ISP_DISABLE_INTS(isp); 1250 return (0); 1251 } 1252 1253 static void 1254 isp_pci_intr(void *arg) 1255 { 1256 ispsoftc_t *isp = arg; 1257 uint32_t isr; 1258 uint16_t sema, mbox; 1259 1260 ISP_LOCK(isp); 1261 isp->isp_intcnt++; 1262 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 1263 isp->isp_intbogus++; 1264 } else { 1265 isp_intr(isp, isr, sema, mbox); 1266 } 1267 ISP_UNLOCK(isp); 1268 } 1269 1270 1271 #define IspVirt2Off(a, x) \ 1272 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1273 _BLK_REG_SHFT] + ((x) & 0xfff)) 1274 1275 #define BXR2(pcs, off) \ 1276 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 1277 #define BXW2(pcs, off, v) \ 1278 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 1279 #define BXR4(pcs, off) \ 1280 bus_space_read_4(pcs->pci_st, pcs->pci_sh, off) 1281 #define BXW4(pcs, off, v) \ 1282 bus_space_write_4(pcs->pci_st, pcs->pci_sh, off, v) 1283 1284 1285 static __inline int 1286 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp) 1287 { 1288 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1289 uint32_t val0, val1; 1290 int i = 0; 1291 1292 do { 1293 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 1294 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 1295 } while (val0 != val1 && ++i < 1000); 1296 if (val0 != val1) { 1297 return (1); 1298 } 1299 *rp = val0; 1300 return (0); 1301 } 1302 1303 static int 1304 isp_pci_rd_isr(ispsoftc_t *isp, uint32_t *isrp, 1305 uint16_t *semap, uint16_t *mbp) 1306 { 1307 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1308 uint16_t isr, sema; 1309 1310 if (IS_2100(isp)) { 1311 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 1312 return (0); 1313 } 1314 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 1315 return (0); 1316 } 1317 } else { 1318 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 1319 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 1320 } 1321 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1322 isr &= INT_PENDING_MASK(isp); 1323 sema &= BIU_SEMA_LOCK; 1324 if (isr == 0 && sema == 0) { 1325 return (0); 1326 } 1327 *isrp = isr; 1328 if ((*semap = sema) != 0) { 1329 if (IS_2100(isp)) { 1330 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 1331 return (0); 1332 } 1333 } else { 1334 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 1335 } 1336 } 1337 return (1); 1338 } 1339 1340 static int 1341 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint32_t *isrp, 1342 uint16_t *semap, uint16_t *mbox0p) 1343 { 1344 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1345 uint32_t hccr; 1346 uint32_t r2hisr; 1347 1348 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 1349 *isrp = 0; 1350 return (0); 1351 } 1352 r2hisr = BXR4(pcs, IspVirt2Off(pcs, BIU_R2HSTSLO)); 1353 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1354 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1355 *isrp = 0; 1356 return (0); 1357 } 1358 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 1359 case ISPR2HST_ROM_MBX_OK: 1360 case ISPR2HST_ROM_MBX_FAIL: 1361 case ISPR2HST_MBX_OK: 1362 case ISPR2HST_MBX_FAIL: 1363 case ISPR2HST_ASYNC_EVENT: 1364 *isrp = r2hisr & 0xffff; 1365 *mbox0p = (r2hisr >> 16); 1366 *semap = 1; 1367 return (1); 1368 case ISPR2HST_RIO_16: 1369 *isrp = r2hisr & 0xffff; 1370 *mbox0p = ASYNC_RIO1; 1371 *semap = 1; 1372 return (1); 1373 case ISPR2HST_FPOST: 1374 *isrp = r2hisr & 0xffff; 1375 *mbox0p = ASYNC_CMD_CMPLT; 1376 *semap = 1; 1377 return (1); 1378 case ISPR2HST_FPOST_CTIO: 1379 *isrp = r2hisr & 0xffff; 1380 *mbox0p = ASYNC_CTIO_DONE; 1381 *semap = 1; 1382 return (1); 1383 case ISPR2HST_RSPQ_UPDATE: 1384 *isrp = r2hisr & 0xffff; 1385 *mbox0p = 0; 1386 *semap = 0; 1387 return (1); 1388 default: 1389 hccr = ISP_READ(isp, HCCR); 1390 if (hccr & HCCR_PAUSE) { 1391 ISP_WRITE(isp, HCCR, HCCR_RESET); 1392 isp_prt(isp, ISP_LOGERR, 1393 "RISC paused at interrupt (%x->%x)", hccr, 1394 ISP_READ(isp, HCCR)); 1395 ISP_WRITE(isp, BIU_ICR, 0); 1396 } else { 1397 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", 1398 r2hisr); 1399 } 1400 return (0); 1401 } 1402 } 1403 1404 static int 1405 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint32_t *isrp, 1406 uint16_t *semap, uint16_t *mbox0p) 1407 { 1408 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1409 uint32_t r2hisr; 1410 1411 r2hisr = BXR4(pcs, IspVirt2Off(pcs, BIU2400_R2HSTSLO)); 1412 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1413 if ((r2hisr & BIU2400_R2HST_INTR) == 0) { 1414 *isrp = 0; 1415 return (0); 1416 } 1417 switch (r2hisr & BIU2400_R2HST_ISTAT_MASK) { 1418 case ISP2400R2HST_ROM_MBX_OK: 1419 case ISP2400R2HST_ROM_MBX_FAIL: 1420 case ISP2400R2HST_MBX_OK: 1421 case ISP2400R2HST_MBX_FAIL: 1422 case ISP2400R2HST_ASYNC_EVENT: 1423 *isrp = r2hisr & 0xffff; 1424 *mbox0p = (r2hisr >> 16); 1425 *semap = 1; 1426 return (1); 1427 case ISP2400R2HST_RSPQ_UPDATE: 1428 case ISP2400R2HST_ATIO_RSPQ_UPDATE: 1429 case ISP2400R2HST_ATIO_RQST_UPDATE: 1430 *isrp = r2hisr & 0xffff; 1431 *mbox0p = 0; 1432 *semap = 0; 1433 return (1); 1434 default: 1435 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 1436 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1437 return (0); 1438 } 1439 } 1440 1441 static uint32_t 1442 isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1443 { 1444 uint32_t rv; 1445 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1446 int oldconf = 0; 1447 1448 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1449 /* 1450 * We will assume that someone has paused the RISC processor. 1451 */ 1452 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1453 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1454 oldconf | BIU_PCI_CONF1_SXP); 1455 } 1456 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1457 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1458 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 1459 } 1460 return (rv); 1461 } 1462 1463 static void 1464 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) 1465 { 1466 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1467 int oldconf = 0; 1468 volatile int junk; 1469 1470 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1471 /* 1472 * We will assume that someone has paused the RISC processor. 1473 */ 1474 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1475 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1476 oldconf | BIU_PCI_CONF1_SXP); 1477 if (IS_2100(isp)) { 1478 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1479 } 1480 } 1481 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1482 if (IS_2100(isp)) { 1483 junk = BXR2(pcs, IspVirt2Off(isp, regoff)); 1484 } 1485 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1486 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 1487 if (IS_2100(isp)) { 1488 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1489 } 1490 } 1491 } 1492 1493 static uint32_t 1494 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1495 { 1496 uint32_t rv, oc = 0; 1497 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1498 1499 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1500 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1501 uint32_t tc; 1502 /* 1503 * We will assume that someone has paused the RISC processor. 1504 */ 1505 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1506 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1507 if (regoff & SXP_BANK1_SELECT) 1508 tc |= BIU_PCI1080_CONF1_SXP1; 1509 else 1510 tc |= BIU_PCI1080_CONF1_SXP0; 1511 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1512 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1513 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1514 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1515 oc | BIU_PCI1080_CONF1_DMA); 1516 } 1517 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1518 if (oc) { 1519 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1520 } 1521 return (rv); 1522 } 1523 1524 static void 1525 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) 1526 { 1527 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1528 int oc = 0; 1529 volatile int junk; 1530 1531 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1532 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1533 uint32_t tc; 1534 /* 1535 * We will assume that someone has paused the RISC processor. 1536 */ 1537 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1538 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1539 if (regoff & SXP_BANK1_SELECT) 1540 tc |= BIU_PCI1080_CONF1_SXP1; 1541 else 1542 tc |= BIU_PCI1080_CONF1_SXP0; 1543 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1544 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1545 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1546 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1547 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1548 oc | BIU_PCI1080_CONF1_DMA); 1549 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1550 } 1551 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1552 junk = BXR2(pcs, IspVirt2Off(isp, regoff)); 1553 if (oc) { 1554 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1555 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1556 } 1557 } 1558 1559 static uint32_t 1560 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 1561 { 1562 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1563 uint32_t rv; 1564 int block = regoff & _BLK_REG_MASK; 1565 1566 switch (block) { 1567 case BIU_BLOCK: 1568 break; 1569 case MBOX_BLOCK: 1570 return (BXR2(pcs, IspVirt2Off(pcs, regoff))); 1571 case SXP_BLOCK: 1572 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff); 1573 return (0xffffffff); 1574 case RISC_BLOCK: 1575 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff); 1576 return (0xffffffff); 1577 case DMA_BLOCK: 1578 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff); 1579 return (0xffffffff); 1580 default: 1581 isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff); 1582 return (0xffffffff); 1583 } 1584 1585 1586 switch (regoff) { 1587 case BIU2400_FLASH_ADDR: 1588 case BIU2400_FLASH_DATA: 1589 case BIU2400_ICR: 1590 case BIU2400_ISR: 1591 case BIU2400_CSR: 1592 case BIU2400_REQINP: 1593 case BIU2400_REQOUTP: 1594 case BIU2400_RSPINP: 1595 case BIU2400_RSPOUTP: 1596 case BIU2400_PRI_RQINP: 1597 case BIU2400_PRI_RSPINP: 1598 case BIU2400_ATIO_RSPINP: 1599 case BIU2400_ATIO_REQINP: 1600 case BIU2400_HCCR: 1601 case BIU2400_GPIOD: 1602 case BIU2400_GPIOE: 1603 case BIU2400_HSEMA: 1604 rv = BXR4(pcs, IspVirt2Off(pcs, regoff)); 1605 break; 1606 case BIU2400_R2HSTSLO: 1607 rv = BXR4(pcs, IspVirt2Off(pcs, regoff)); 1608 break; 1609 case BIU2400_R2HSTSHI: 1610 rv = BXR4(pcs, IspVirt2Off(pcs, regoff)) >> 16; 1611 break; 1612 default: 1613 isp_prt(isp, ISP_LOGERR, 1614 "isp_pci_rd_reg_2400: unknown offset %x", regoff); 1615 rv = 0xffffffff; 1616 break; 1617 } 1618 return (rv); 1619 } 1620 1621 static void 1622 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 1623 { 1624 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1625 int block = regoff & _BLK_REG_MASK; 1626 volatile int junk; 1627 1628 switch (block) { 1629 case BIU_BLOCK: 1630 break; 1631 case MBOX_BLOCK: 1632 BXW2(pcs, IspVirt2Off(pcs, regoff), val); 1633 junk = BXR2(pcs, IspVirt2Off(pcs, regoff)); 1634 return; 1635 case SXP_BLOCK: 1636 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff); 1637 return; 1638 case RISC_BLOCK: 1639 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff); 1640 return; 1641 case DMA_BLOCK: 1642 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff); 1643 return; 1644 default: 1645 isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x", 1646 regoff); 1647 break; 1648 } 1649 1650 switch (regoff) { 1651 case BIU2400_FLASH_ADDR: 1652 case BIU2400_FLASH_DATA: 1653 case BIU2400_ICR: 1654 case BIU2400_ISR: 1655 case BIU2400_CSR: 1656 case BIU2400_REQINP: 1657 case BIU2400_REQOUTP: 1658 case BIU2400_RSPINP: 1659 case BIU2400_RSPOUTP: 1660 case BIU2400_PRI_RQINP: 1661 case BIU2400_PRI_RSPINP: 1662 case BIU2400_ATIO_RSPINP: 1663 case BIU2400_ATIO_REQINP: 1664 case BIU2400_HCCR: 1665 case BIU2400_GPIOD: 1666 case BIU2400_GPIOE: 1667 case BIU2400_HSEMA: 1668 BXW4(pcs, IspVirt2Off(pcs, regoff), val); 1669 junk = BXR4(pcs, IspVirt2Off(pcs, regoff)); 1670 break; 1671 default: 1672 isp_prt(isp, ISP_LOGERR, 1673 "isp_pci_wr_reg_2400: bad offset 0x%x", regoff); 1674 break; 1675 } 1676 } 1677 1678 1679 struct imush { 1680 ispsoftc_t *isp; 1681 int error; 1682 }; 1683 1684 static void imc(void *, bus_dma_segment_t *, int, int); 1685 1686 static void 1687 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1688 { 1689 struct imush *imushp = (struct imush *) arg; 1690 if (error) { 1691 imushp->error = error; 1692 } else { 1693 ispsoftc_t *isp =imushp->isp; 1694 bus_addr_t addr = segs->ds_addr; 1695 1696 isp->isp_rquest_dma = addr; 1697 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1698 isp->isp_result_dma = addr; 1699 if (IS_FC(isp)) { 1700 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1701 FCPARAM(isp)->isp_scdma = addr; 1702 } 1703 } 1704 } 1705 1706 static int 1707 isp_pci_mbxdma(ispsoftc_t *isp) 1708 { 1709 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1710 caddr_t base; 1711 uint32_t len; 1712 int i, error, ns; 1713 bus_size_t slim; /* segment size */ 1714 bus_addr_t llim; /* low limit of unavailable dma */ 1715 bus_addr_t hlim; /* high limit of unavailable dma */ 1716 struct imush im; 1717 1718 /* 1719 * Already been here? If so, leave... 1720 */ 1721 if (isp->isp_rquest) { 1722 return (0); 1723 } 1724 1725 if (isp->isp_maxcmds == 0) { 1726 isp_prt(isp, ISP_LOGERR, "maxcmds not set"); 1727 return (1); 1728 } 1729 1730 hlim = BUS_SPACE_MAXADDR; 1731 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1732 slim = (bus_size_t) (1ULL << 32); 1733 llim = BUS_SPACE_MAXADDR; 1734 } else { 1735 llim = BUS_SPACE_MAXADDR_32BIT; 1736 slim = (1 << 24); 1737 } 1738 1739 /* 1740 * XXX: We don't really support 64 bit target mode for parallel scsi yet 1741 */ 1742 #ifdef ISP_TARGET_MODE 1743 if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) { 1744 isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet"); 1745 return (1); 1746 } 1747 #endif 1748 1749 ISP_UNLOCK(isp); 1750 if (isp_dma_tag_create(BUS_DMA_ROOTARG(pcs->pci_dev), 1, slim, llim, 1751 hlim, NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, 1752 &pcs->dmat)) { 1753 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1754 ISP_LOCK(isp); 1755 return (1); 1756 } 1757 1758 1759 len = sizeof (XS_T **) * isp->isp_maxcmds; 1760 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1761 if (isp->isp_xflist == NULL) { 1762 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1763 ISP_LOCK(isp); 1764 return (1); 1765 } 1766 #ifdef ISP_TARGET_MODE 1767 len = sizeof (void **) * isp->isp_maxcmds; 1768 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1769 if (isp->isp_tgtlist == NULL) { 1770 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1771 ISP_LOCK(isp); 1772 return (1); 1773 } 1774 #endif 1775 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1776 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1777 if (pcs->dmaps == NULL) { 1778 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1779 free(isp->isp_xflist, M_DEVBUF); 1780 #ifdef ISP_TARGET_MODE 1781 free(isp->isp_tgtlist, M_DEVBUF); 1782 #endif 1783 ISP_LOCK(isp); 1784 return (1); 1785 } 1786 1787 /* 1788 * Allocate and map the request, result queues, plus FC scratch area. 1789 */ 1790 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1791 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1792 if (IS_FC(isp)) { 1793 len += ISP2100_SCRLEN; 1794 } 1795 1796 ns = (len / PAGE_SIZE) + 1; 1797 /* 1798 * Create a tag for the control spaces- force it to within 32 bits. 1799 */ 1800 if (isp_dma_tag_create(pcs->dmat, QENTRY_LEN, slim, 1801 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1802 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) { 1803 isp_prt(isp, ISP_LOGERR, 1804 "cannot create a dma tag for control spaces"); 1805 free(pcs->dmaps, M_DEVBUF); 1806 free(isp->isp_xflist, M_DEVBUF); 1807 #ifdef ISP_TARGET_MODE 1808 free(isp->isp_tgtlist, M_DEVBUF); 1809 #endif 1810 ISP_LOCK(isp); 1811 return (1); 1812 } 1813 1814 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1815 &isp->isp_cdmap) != 0) { 1816 isp_prt(isp, ISP_LOGERR, 1817 "cannot allocate %d bytes of CCB memory", len); 1818 bus_dma_tag_destroy(isp->isp_cdmat); 1819 free(isp->isp_xflist, M_DEVBUF); 1820 #ifdef ISP_TARGET_MODE 1821 free(isp->isp_tgtlist, M_DEVBUF); 1822 #endif 1823 free(pcs->dmaps, M_DEVBUF); 1824 ISP_LOCK(isp); 1825 return (1); 1826 } 1827 1828 for (i = 0; i < isp->isp_maxcmds; i++) { 1829 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1830 if (error) { 1831 isp_prt(isp, ISP_LOGERR, 1832 "error %d creating per-cmd DMA maps", error); 1833 while (--i >= 0) { 1834 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1835 } 1836 goto bad; 1837 } 1838 } 1839 1840 im.isp = isp; 1841 im.error = 0; 1842 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1843 if (im.error) { 1844 isp_prt(isp, ISP_LOGERR, 1845 "error %d loading dma map for control areas", im.error); 1846 goto bad; 1847 } 1848 1849 isp->isp_rquest = base; 1850 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1851 isp->isp_result = base; 1852 if (IS_FC(isp)) { 1853 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1854 FCPARAM(isp)->isp_scratch = base; 1855 } 1856 ISP_LOCK(isp); 1857 return (0); 1858 1859 bad: 1860 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1861 bus_dma_tag_destroy(isp->isp_cdmat); 1862 free(isp->isp_xflist, M_DEVBUF); 1863 #ifdef ISP_TARGET_MODE 1864 free(isp->isp_tgtlist, M_DEVBUF); 1865 #endif 1866 free(pcs->dmaps, M_DEVBUF); 1867 ISP_LOCK(isp); 1868 isp->isp_rquest = NULL; 1869 return (1); 1870 } 1871 1872 typedef struct { 1873 ispsoftc_t *isp; 1874 void *cmd_token; 1875 void *rq; 1876 uint32_t *nxtip; 1877 uint32_t optr; 1878 int error; 1879 } mush_t; 1880 1881 #define MUSHERR_NOQENTRIES -2 1882 1883 #ifdef ISP_TARGET_MODE 1884 /* 1885 * We need to handle DMA for target mode differently from initiator mode. 1886 * 1887 * DMA mapping and construction and submission of CTIO Request Entries 1888 * and rendevous for completion are very tightly coupled because we start 1889 * out by knowing (per platform) how much data we have to move, but we 1890 * don't know, up front, how many DMA mapping segments will have to be used 1891 * cover that data, so we don't know how many CTIO Request Entries we 1892 * will end up using. Further, for performance reasons we may want to 1893 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1894 * 1895 * The standard vector still goes through isp_pci_dmasetup, but the callback 1896 * for the DMA mapping routines comes here instead with the whole transfer 1897 * mapped and a pointer to a partially filled in already allocated request 1898 * queue entry. We finish the job. 1899 */ 1900 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1901 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1902 1903 #define STATUS_WITH_DATA 1 1904 1905 static void 1906 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1907 { 1908 mush_t *mp; 1909 struct ccb_scsiio *csio; 1910 ispsoftc_t *isp; 1911 struct isp_pcisoftc *pcs; 1912 bus_dmamap_t *dp; 1913 ct_entry_t *cto, *qe; 1914 uint8_t scsi_status; 1915 uint32_t curi, nxti, handle; 1916 uint32_t sflags; 1917 int32_t resid; 1918 int nth_ctio, nctios, send_status; 1919 1920 mp = (mush_t *) arg; 1921 if (error) { 1922 mp->error = error; 1923 return; 1924 } 1925 1926 isp = mp->isp; 1927 csio = mp->cmd_token; 1928 cto = mp->rq; 1929 curi = isp->isp_reqidx; 1930 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1931 1932 cto->ct_xfrlen = 0; 1933 cto->ct_seg_count = 0; 1934 cto->ct_header.rqs_entry_count = 1; 1935 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1936 1937 if (nseg == 0) { 1938 cto->ct_header.rqs_seqno = 1; 1939 isp_prt(isp, ISP_LOGTDEBUG1, 1940 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1941 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1942 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1943 cto->ct_scsi_status, cto->ct_resid); 1944 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1945 isp_put_ctio(isp, cto, qe); 1946 return; 1947 } 1948 1949 nctios = nseg / ISP_RQDSEG; 1950 if (nseg % ISP_RQDSEG) { 1951 nctios++; 1952 } 1953 1954 /* 1955 * Save syshandle, and potentially any SCSI status, which we'll 1956 * reinsert on the last CTIO we're going to send. 1957 */ 1958 1959 handle = cto->ct_syshandle; 1960 cto->ct_syshandle = 0; 1961 cto->ct_header.rqs_seqno = 0; 1962 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1963 1964 if (send_status) { 1965 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1966 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1967 /* 1968 * Preserve residual. 1969 */ 1970 resid = cto->ct_resid; 1971 1972 /* 1973 * Save actual SCSI status. 1974 */ 1975 scsi_status = cto->ct_scsi_status; 1976 1977 #ifndef STATUS_WITH_DATA 1978 sflags |= CT_NO_DATA; 1979 /* 1980 * We can't do a status at the same time as a data CTIO, so 1981 * we need to synthesize an extra CTIO at this level. 1982 */ 1983 nctios++; 1984 #endif 1985 } else { 1986 sflags = scsi_status = resid = 0; 1987 } 1988 1989 cto->ct_resid = 0; 1990 cto->ct_scsi_status = 0; 1991 1992 pcs = (struct isp_pcisoftc *)isp; 1993 dp = &pcs->dmaps[isp_handle_index(handle)]; 1994 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1995 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1996 } else { 1997 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1998 } 1999 2000 nxti = *mp->nxtip; 2001 2002 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 2003 int seglim; 2004 2005 seglim = nseg; 2006 if (seglim) { 2007 int seg; 2008 2009 if (seglim > ISP_RQDSEG) 2010 seglim = ISP_RQDSEG; 2011 2012 for (seg = 0; seg < seglim; seg++, nseg--) { 2013 /* 2014 * Unlike normal initiator commands, we don't 2015 * do any swizzling here. 2016 */ 2017 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 2018 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 2019 cto->ct_xfrlen += dm_segs->ds_len; 2020 dm_segs++; 2021 } 2022 cto->ct_seg_count = seg; 2023 } else { 2024 /* 2025 * This case should only happen when we're sending an 2026 * extra CTIO with final status. 2027 */ 2028 if (send_status == 0) { 2029 isp_prt(isp, ISP_LOGWARN, 2030 "tdma_mk ran out of segments"); 2031 mp->error = EINVAL; 2032 return; 2033 } 2034 } 2035 2036 /* 2037 * At this point, the fields ct_lun, ct_iid, ct_tagval, 2038 * ct_tagtype, and ct_timeout have been carried over 2039 * unchanged from what our caller had set. 2040 * 2041 * The dataseg fields and the seg_count fields we just got 2042 * through setting. The data direction we've preserved all 2043 * along and only clear it if we're now sending status. 2044 */ 2045 2046 if (nth_ctio == nctios - 1) { 2047 /* 2048 * We're the last in a sequence of CTIOs, so mark 2049 * this CTIO and save the handle to the CCB such that 2050 * when this CTIO completes we can free dma resources 2051 * and do whatever else we need to do to finish the 2052 * rest of the command. We *don't* give this to the 2053 * firmware to work on- the caller will do that. 2054 */ 2055 2056 cto->ct_syshandle = handle; 2057 cto->ct_header.rqs_seqno = 1; 2058 2059 if (send_status) { 2060 cto->ct_scsi_status = scsi_status; 2061 cto->ct_flags |= sflags; 2062 cto->ct_resid = resid; 2063 } 2064 if (send_status) { 2065 isp_prt(isp, ISP_LOGTDEBUG1, 2066 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 2067 "scsi status %x resid %d", 2068 cto->ct_fwhandle, csio->ccb_h.target_lun, 2069 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 2070 cto->ct_scsi_status, cto->ct_resid); 2071 } else { 2072 isp_prt(isp, ISP_LOGTDEBUG1, 2073 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 2074 cto->ct_fwhandle, csio->ccb_h.target_lun, 2075 cto->ct_iid, cto->ct_tag_val, 2076 cto->ct_flags); 2077 } 2078 isp_put_ctio(isp, cto, qe); 2079 ISP_TDQE(isp, "last tdma_mk", curi, cto); 2080 if (nctios > 1) { 2081 MEMORYBARRIER(isp, SYNC_REQUEST, 2082 curi, QENTRY_LEN); 2083 } 2084 } else { 2085 ct_entry_t *oqe = qe; 2086 2087 /* 2088 * Make sure syshandle fields are clean 2089 */ 2090 cto->ct_syshandle = 0; 2091 cto->ct_header.rqs_seqno = 0; 2092 2093 isp_prt(isp, ISP_LOGTDEBUG1, 2094 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 2095 cto->ct_fwhandle, csio->ccb_h.target_lun, 2096 cto->ct_iid, cto->ct_flags); 2097 2098 /* 2099 * Get a new CTIO 2100 */ 2101 qe = (ct_entry_t *) 2102 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2103 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 2104 if (nxti == mp->optr) { 2105 isp_prt(isp, ISP_LOGTDEBUG0, 2106 "Queue Overflow in tdma_mk"); 2107 mp->error = MUSHERR_NOQENTRIES; 2108 return; 2109 } 2110 2111 /* 2112 * Now that we're done with the old CTIO, 2113 * flush it out to the request queue. 2114 */ 2115 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 2116 isp_put_ctio(isp, cto, oqe); 2117 if (nth_ctio != 0) { 2118 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 2119 QENTRY_LEN); 2120 } 2121 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 2122 2123 /* 2124 * Reset some fields in the CTIO so we can reuse 2125 * for the next one we'll flush to the request 2126 * queue. 2127 */ 2128 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 2129 cto->ct_header.rqs_entry_count = 1; 2130 cto->ct_header.rqs_flags = 0; 2131 cto->ct_status = 0; 2132 cto->ct_scsi_status = 0; 2133 cto->ct_xfrlen = 0; 2134 cto->ct_resid = 0; 2135 cto->ct_seg_count = 0; 2136 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 2137 } 2138 } 2139 *mp->nxtip = nxti; 2140 } 2141 2142 /* 2143 * We don't have to do multiple CTIOs here. Instead, we can just do 2144 * continuation segments as needed. This greatly simplifies the code 2145 * improves performance. 2146 */ 2147 2148 static void 2149 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2150 { 2151 mush_t *mp; 2152 struct ccb_scsiio *csio; 2153 ispsoftc_t *isp; 2154 ct2_entry_t *cto, *qe; 2155 uint32_t curi, nxti; 2156 ispds_t *ds; 2157 ispds64_t *ds64; 2158 int segcnt, seglim; 2159 2160 mp = (mush_t *) arg; 2161 if (error) { 2162 mp->error = error; 2163 return; 2164 } 2165 2166 isp = mp->isp; 2167 csio = mp->cmd_token; 2168 cto = mp->rq; 2169 2170 curi = isp->isp_reqidx; 2171 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 2172 2173 if (nseg == 0) { 2174 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 2175 isp_prt(isp, ISP_LOGWARN, 2176 "dma2_tgt_fc, a status CTIO2 without MODE1 " 2177 "set (0x%x)", cto->ct_flags); 2178 mp->error = EINVAL; 2179 return; 2180 } 2181 /* 2182 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 2183 * flags to NO DATA and clear relative offset flags. 2184 * We preserve the ct_resid and the response area. 2185 */ 2186 cto->ct_header.rqs_seqno = 1; 2187 cto->ct_seg_count = 0; 2188 cto->ct_reloff = 0; 2189 isp_prt(isp, ISP_LOGTDEBUG1, 2190 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 2191 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 2192 cto->ct_iid, cto->ct_flags, cto->ct_status, 2193 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 2194 if (FCPARAM(isp)->isp_2klogin) { 2195 isp_put_ctio2e(isp, 2196 (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); 2197 } else { 2198 isp_put_ctio2(isp, cto, qe); 2199 } 2200 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 2201 return; 2202 } 2203 2204 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 2205 isp_prt(isp, ISP_LOGERR, 2206 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 2207 "(0x%x)", cto->ct_flags); 2208 mp->error = EINVAL; 2209 return; 2210 } 2211 2212 2213 nxti = *mp->nxtip; 2214 2215 /* 2216 * Check to see if we need to DAC addressing or not. 2217 * 2218 * Any address that's over the 4GB boundary causes this 2219 * to happen. 2220 */ 2221 segcnt = nseg; 2222 if (sizeof (bus_addr_t) > 4) { 2223 for (segcnt = 0; segcnt < nseg; segcnt++) { 2224 uint64_t addr = dm_segs[segcnt].ds_addr; 2225 if (addr >= 0x100000000LL) { 2226 break; 2227 } 2228 } 2229 } 2230 if (segcnt != nseg) { 2231 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3; 2232 seglim = ISP_RQDSEG_T3; 2233 ds64 = &cto->rsp.m0.u.ct_dataseg64[0]; 2234 ds = NULL; 2235 } else { 2236 seglim = ISP_RQDSEG_T2; 2237 ds64 = NULL; 2238 ds = &cto->rsp.m0.u.ct_dataseg[0]; 2239 } 2240 cto->ct_seg_count = 0; 2241 2242 /* 2243 * Set up the CTIO2 data segments. 2244 */ 2245 for (segcnt = 0; cto->ct_seg_count < seglim && segcnt < nseg; 2246 cto->ct_seg_count++, segcnt++) { 2247 if (ds64) { 2248 ds64->ds_basehi = 2249 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); 2250 ds64->ds_base = dm_segs[segcnt].ds_addr; 2251 ds64->ds_count = dm_segs[segcnt].ds_len; 2252 ds64++; 2253 } else { 2254 ds->ds_base = dm_segs[segcnt].ds_addr; 2255 ds->ds_count = dm_segs[segcnt].ds_len; 2256 ds++; 2257 } 2258 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 2259 #if __FreeBSD_version < 500000 2260 isp_prt(isp, ISP_LOGTDEBUG1, 2261 "isp_send_ctio2: ent0[%d]0x%llx:%llu", 2262 cto->ct_seg_count, (uint64_t)dm_segs[segcnt].ds_addr, 2263 (uint64_t)dm_segs[segcnt].ds_len); 2264 #else 2265 isp_prt(isp, ISP_LOGTDEBUG1, 2266 "isp_send_ctio2: ent0[%d]0x%jx:%ju", 2267 cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr, 2268 (uintmax_t)dm_segs[segcnt].ds_len); 2269 #endif 2270 } 2271 2272 while (segcnt < nseg) { 2273 uint32_t curip; 2274 int seg; 2275 ispcontreq_t local, *crq = &local, *qep; 2276 2277 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2278 curip = nxti; 2279 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 2280 if (nxti == mp->optr) { 2281 ISP_UNLOCK(isp); 2282 isp_prt(isp, ISP_LOGTDEBUG0, 2283 "tdma_mkfc: request queue overflow"); 2284 mp->error = MUSHERR_NOQENTRIES; 2285 return; 2286 } 2287 cto->ct_header.rqs_entry_count++; 2288 MEMZERO((void *)crq, sizeof (*crq)); 2289 crq->req_header.rqs_entry_count = 1; 2290 if (cto->ct_header.rqs_entry_type == RQSTYPE_CTIO3) { 2291 seglim = ISP_CDSEG64; 2292 ds = NULL; 2293 ds64 = &((ispcontreq64_t *)crq)->req_dataseg[0]; 2294 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2295 } else { 2296 seglim = ISP_CDSEG; 2297 ds = &crq->req_dataseg[0]; 2298 ds64 = NULL; 2299 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 2300 } 2301 for (seg = 0; segcnt < nseg && seg < seglim; 2302 segcnt++, seg++) { 2303 if (ds64) { 2304 ds64->ds_basehi = 2305 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); 2306 ds64->ds_base = dm_segs[segcnt].ds_addr; 2307 ds64->ds_count = dm_segs[segcnt].ds_len; 2308 ds64++; 2309 } else { 2310 ds->ds_base = dm_segs[segcnt].ds_addr; 2311 ds->ds_count = dm_segs[segcnt].ds_len; 2312 ds++; 2313 } 2314 #if __FreeBSD_version < 500000 2315 isp_prt(isp, ISP_LOGTDEBUG1, 2316 "isp_send_ctio2: ent%d[%d]%llx:%llu", 2317 cto->ct_header.rqs_entry_count-1, seg, 2318 (uint64_t)dm_segs[segcnt].ds_addr, 2319 (uint64_t)dm_segs[segcnt].ds_len); 2320 #else 2321 isp_prt(isp, ISP_LOGTDEBUG1, 2322 "isp_send_ctio2: ent%d[%d]%jx:%ju", 2323 cto->ct_header.rqs_entry_count-1, seg, 2324 (uintmax_t)dm_segs[segcnt].ds_addr, 2325 (uintmax_t)dm_segs[segcnt].ds_len); 2326 #endif 2327 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 2328 cto->ct_seg_count++; 2329 } 2330 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 2331 isp_put_cont_req(isp, crq, qep); 2332 ISP_TDQE(isp, "cont entry", curi, qep); 2333 } 2334 2335 /* 2336 * No do final twiddling for the CTIO itself. 2337 */ 2338 cto->ct_header.rqs_seqno = 1; 2339 isp_prt(isp, ISP_LOGTDEBUG1, 2340 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 2341 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 2342 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 2343 cto->ct_resid); 2344 if (FCPARAM(isp)->isp_2klogin) { 2345 isp_put_ctio2e(isp, (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); 2346 } else { 2347 isp_put_ctio2(isp, cto, qe); 2348 } 2349 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 2350 *mp->nxtip = nxti; 2351 } 2352 #endif 2353 2354 static void dma_2400(void *, bus_dma_segment_t *, int, int); 2355 static void dma2_a64(void *, bus_dma_segment_t *, int, int); 2356 static void dma2(void *, bus_dma_segment_t *, int, int); 2357 2358 static void 2359 dma_2400(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2360 { 2361 mush_t *mp; 2362 ispsoftc_t *isp; 2363 struct ccb_scsiio *csio; 2364 struct isp_pcisoftc *pcs; 2365 bus_dmamap_t *dp; 2366 bus_dma_segment_t *eseg; 2367 ispreqt7_t *rq; 2368 int seglim, datalen; 2369 uint32_t nxti; 2370 2371 mp = (mush_t *) arg; 2372 if (error) { 2373 mp->error = error; 2374 return; 2375 } 2376 2377 if (nseg < 1) { 2378 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2379 mp->error = EFAULT; 2380 return; 2381 } 2382 2383 csio = mp->cmd_token; 2384 isp = mp->isp; 2385 rq = mp->rq; 2386 pcs = (struct isp_pcisoftc *)mp->isp; 2387 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2388 nxti = *mp->nxtip; 2389 2390 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2391 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2392 } else { 2393 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2394 } 2395 datalen = XS_XFRLEN(csio); 2396 2397 /* 2398 * We're passed an initial partially filled in entry that 2399 * has most fields filled in except for data transfer 2400 * related values. 2401 * 2402 * Our job is to fill in the initial request queue entry and 2403 * then to start allocating and filling in continuation entries 2404 * until we've covered the entire transfer. 2405 */ 2406 2407 rq->req_header.rqs_entry_type = RQSTYPE_T7RQS; 2408 rq->req_dl = datalen; 2409 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2410 rq->req_alen_datadir = 0x2; 2411 } else { 2412 rq->req_alen_datadir = 0x1; 2413 } 2414 2415 eseg = dm_segs + nseg; 2416 2417 rq->req_dataseg.ds_base = DMA_LO32(dm_segs->ds_addr); 2418 rq->req_dataseg.ds_basehi = DMA_HI32(dm_segs->ds_addr); 2419 rq->req_dataseg.ds_count = dm_segs->ds_len; 2420 2421 datalen -= dm_segs->ds_len; 2422 2423 dm_segs++; 2424 rq->req_seg_count++; 2425 2426 while (datalen > 0 && dm_segs != eseg) { 2427 uint32_t onxti; 2428 ispcontreq64_t local, *crq = &local, *cqe; 2429 2430 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2431 onxti = nxti; 2432 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2433 if (nxti == mp->optr) { 2434 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2435 mp->error = MUSHERR_NOQENTRIES; 2436 return; 2437 } 2438 rq->req_header.rqs_entry_count++; 2439 MEMZERO((void *)crq, sizeof (*crq)); 2440 crq->req_header.rqs_entry_count = 1; 2441 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2442 2443 seglim = 0; 2444 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 2445 crq->req_dataseg[seglim].ds_base = 2446 DMA_LO32(dm_segs->ds_addr); 2447 crq->req_dataseg[seglim].ds_basehi = 2448 DMA_HI32(dm_segs->ds_addr); 2449 crq->req_dataseg[seglim].ds_count = 2450 dm_segs->ds_len; 2451 rq->req_seg_count++; 2452 dm_segs++; 2453 seglim++; 2454 datalen -= dm_segs->ds_len; 2455 } 2456 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2457 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2458 } 2459 isp_put_cont64_req(isp, crq, cqe); 2460 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2461 } 2462 *mp->nxtip = nxti; 2463 } 2464 2465 static void 2466 dma2_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2467 { 2468 mush_t *mp; 2469 ispsoftc_t *isp; 2470 struct ccb_scsiio *csio; 2471 struct isp_pcisoftc *pcs; 2472 bus_dmamap_t *dp; 2473 bus_dma_segment_t *eseg; 2474 ispreq64_t *rq; 2475 int seglim, datalen; 2476 uint32_t nxti; 2477 2478 mp = (mush_t *) arg; 2479 if (error) { 2480 mp->error = error; 2481 return; 2482 } 2483 2484 if (nseg < 1) { 2485 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2486 mp->error = EFAULT; 2487 return; 2488 } 2489 csio = mp->cmd_token; 2490 isp = mp->isp; 2491 rq = mp->rq; 2492 pcs = (struct isp_pcisoftc *)mp->isp; 2493 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2494 nxti = *mp->nxtip; 2495 2496 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2497 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2498 } else { 2499 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2500 } 2501 datalen = XS_XFRLEN(csio); 2502 2503 /* 2504 * We're passed an initial partially filled in entry that 2505 * has most fields filled in except for data transfer 2506 * related values. 2507 * 2508 * Our job is to fill in the initial request queue entry and 2509 * then to start allocating and filling in continuation entries 2510 * until we've covered the entire transfer. 2511 */ 2512 2513 if (IS_FC(isp)) { 2514 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 2515 seglim = ISP_RQDSEG_T3; 2516 ((ispreqt3_t *)rq)->req_totalcnt = datalen; 2517 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2518 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN; 2519 } else { 2520 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 2521 } 2522 } else { 2523 rq->req_header.rqs_entry_type = RQSTYPE_A64; 2524 if (csio->cdb_len > 12) { 2525 seglim = 0; 2526 } else { 2527 seglim = ISP_RQDSEG_A64; 2528 } 2529 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2530 rq->req_flags |= REQFLAG_DATA_IN; 2531 } else { 2532 rq->req_flags |= REQFLAG_DATA_OUT; 2533 } 2534 } 2535 2536 eseg = dm_segs + nseg; 2537 2538 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 2539 if (IS_FC(isp)) { 2540 ispreqt3_t *rq3 = (ispreqt3_t *)rq; 2541 rq3->req_dataseg[rq3->req_seg_count].ds_base = 2542 DMA_LO32(dm_segs->ds_addr); 2543 rq3->req_dataseg[rq3->req_seg_count].ds_basehi = 2544 DMA_HI32(dm_segs->ds_addr); 2545 rq3->req_dataseg[rq3->req_seg_count].ds_count = 2546 dm_segs->ds_len; 2547 } else { 2548 rq->req_dataseg[rq->req_seg_count].ds_base = 2549 DMA_LO32(dm_segs->ds_addr); 2550 rq->req_dataseg[rq->req_seg_count].ds_basehi = 2551 DMA_HI32(dm_segs->ds_addr); 2552 rq->req_dataseg[rq->req_seg_count].ds_count = 2553 dm_segs->ds_len; 2554 } 2555 datalen -= dm_segs->ds_len; 2556 rq->req_seg_count++; 2557 dm_segs++; 2558 } 2559 2560 while (datalen > 0 && dm_segs != eseg) { 2561 uint32_t onxti; 2562 ispcontreq64_t local, *crq = &local, *cqe; 2563 2564 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2565 onxti = nxti; 2566 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2567 if (nxti == mp->optr) { 2568 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2569 mp->error = MUSHERR_NOQENTRIES; 2570 return; 2571 } 2572 rq->req_header.rqs_entry_count++; 2573 MEMZERO((void *)crq, sizeof (*crq)); 2574 crq->req_header.rqs_entry_count = 1; 2575 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2576 2577 seglim = 0; 2578 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 2579 crq->req_dataseg[seglim].ds_base = 2580 DMA_LO32(dm_segs->ds_addr); 2581 crq->req_dataseg[seglim].ds_basehi = 2582 DMA_HI32(dm_segs->ds_addr); 2583 crq->req_dataseg[seglim].ds_count = 2584 dm_segs->ds_len; 2585 rq->req_seg_count++; 2586 dm_segs++; 2587 seglim++; 2588 datalen -= dm_segs->ds_len; 2589 } 2590 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2591 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2592 } 2593 isp_put_cont64_req(isp, crq, cqe); 2594 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2595 } 2596 *mp->nxtip = nxti; 2597 } 2598 2599 static void 2600 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2601 { 2602 mush_t *mp; 2603 ispsoftc_t *isp; 2604 struct ccb_scsiio *csio; 2605 struct isp_pcisoftc *pcs; 2606 bus_dmamap_t *dp; 2607 bus_dma_segment_t *eseg; 2608 ispreq_t *rq; 2609 int seglim, datalen; 2610 uint32_t nxti; 2611 2612 mp = (mush_t *) arg; 2613 if (error) { 2614 mp->error = error; 2615 return; 2616 } 2617 2618 if (nseg < 1) { 2619 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2620 mp->error = EFAULT; 2621 return; 2622 } 2623 csio = mp->cmd_token; 2624 isp = mp->isp; 2625 rq = mp->rq; 2626 pcs = (struct isp_pcisoftc *)mp->isp; 2627 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2628 nxti = *mp->nxtip; 2629 2630 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2631 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2632 } else { 2633 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2634 } 2635 2636 datalen = XS_XFRLEN(csio); 2637 2638 /* 2639 * We're passed an initial partially filled in entry that 2640 * has most fields filled in except for data transfer 2641 * related values. 2642 * 2643 * Our job is to fill in the initial request queue entry and 2644 * then to start allocating and filling in continuation entries 2645 * until we've covered the entire transfer. 2646 */ 2647 2648 if (IS_FC(isp)) { 2649 seglim = ISP_RQDSEG_T2; 2650 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 2651 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2652 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 2653 } else { 2654 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 2655 } 2656 } else { 2657 if (csio->cdb_len > 12) { 2658 seglim = 0; 2659 } else { 2660 seglim = ISP_RQDSEG; 2661 } 2662 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2663 rq->req_flags |= REQFLAG_DATA_IN; 2664 } else { 2665 rq->req_flags |= REQFLAG_DATA_OUT; 2666 } 2667 } 2668 2669 eseg = dm_segs + nseg; 2670 2671 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 2672 if (IS_FC(isp)) { 2673 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 2674 rq2->req_dataseg[rq2->req_seg_count].ds_base = 2675 DMA_LO32(dm_segs->ds_addr); 2676 rq2->req_dataseg[rq2->req_seg_count].ds_count = 2677 dm_segs->ds_len; 2678 } else { 2679 rq->req_dataseg[rq->req_seg_count].ds_base = 2680 DMA_LO32(dm_segs->ds_addr); 2681 rq->req_dataseg[rq->req_seg_count].ds_count = 2682 dm_segs->ds_len; 2683 } 2684 datalen -= dm_segs->ds_len; 2685 rq->req_seg_count++; 2686 dm_segs++; 2687 } 2688 2689 while (datalen > 0 && dm_segs != eseg) { 2690 uint32_t onxti; 2691 ispcontreq_t local, *crq = &local, *cqe; 2692 2693 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2694 onxti = nxti; 2695 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2696 if (nxti == mp->optr) { 2697 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2698 mp->error = MUSHERR_NOQENTRIES; 2699 return; 2700 } 2701 rq->req_header.rqs_entry_count++; 2702 MEMZERO((void *)crq, sizeof (*crq)); 2703 crq->req_header.rqs_entry_count = 1; 2704 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 2705 2706 seglim = 0; 2707 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 2708 crq->req_dataseg[seglim].ds_base = 2709 DMA_LO32(dm_segs->ds_addr); 2710 crq->req_dataseg[seglim].ds_count = 2711 dm_segs->ds_len; 2712 rq->req_seg_count++; 2713 dm_segs++; 2714 seglim++; 2715 datalen -= dm_segs->ds_len; 2716 } 2717 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2718 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2719 } 2720 isp_put_cont_req(isp, crq, cqe); 2721 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2722 } 2723 *mp->nxtip = nxti; 2724 } 2725 2726 /* 2727 * We enter with ISP_LOCK held 2728 */ 2729 static int 2730 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq, 2731 uint32_t *nxtip, uint32_t optr) 2732 { 2733 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2734 ispreq_t *qep; 2735 bus_dmamap_t *dp = NULL; 2736 mush_t mush, *mp; 2737 void (*eptr)(void *, bus_dma_segment_t *, int, int); 2738 2739 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 2740 #ifdef ISP_TARGET_MODE 2741 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 2742 if (IS_FC(isp)) { 2743 eptr = tdma_mkfc; 2744 } else { 2745 eptr = tdma_mk; 2746 } 2747 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 2748 (csio->dxfer_len == 0)) { 2749 mp = &mush; 2750 mp->isp = isp; 2751 mp->cmd_token = csio; 2752 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 2753 mp->nxtip = nxtip; 2754 mp->optr = optr; 2755 mp->error = 0; 2756 ISPLOCK_2_CAMLOCK(isp); 2757 (*eptr)(mp, NULL, 0, 0); 2758 CAMLOCK_2_ISPLOCK(isp); 2759 goto mbxsync; 2760 } 2761 } else 2762 #endif 2763 if (IS_24XX(isp)) { 2764 eptr = dma_2400; 2765 } else if (sizeof (bus_addr_t) > 4) { 2766 eptr = dma2_a64; 2767 } else { 2768 eptr = dma2; 2769 } 2770 2771 2772 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 2773 (csio->dxfer_len == 0)) { 2774 rq->req_seg_count = 1; 2775 goto mbxsync; 2776 } 2777 2778 /* 2779 * Do a virtual grapevine step to collect info for 2780 * the callback dma allocation that we have to use... 2781 */ 2782 mp = &mush; 2783 mp->isp = isp; 2784 mp->cmd_token = csio; 2785 mp->rq = rq; 2786 mp->nxtip = nxtip; 2787 mp->optr = optr; 2788 mp->error = 0; 2789 2790 ISPLOCK_2_CAMLOCK(isp); 2791 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 2792 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 2793 int error, s; 2794 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2795 s = splsoftvm(); 2796 error = bus_dmamap_load(pcs->dmat, *dp, 2797 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 2798 if (error == EINPROGRESS) { 2799 bus_dmamap_unload(pcs->dmat, *dp); 2800 mp->error = EINVAL; 2801 isp_prt(isp, ISP_LOGERR, 2802 "deferred dma allocation not supported"); 2803 } else if (error && mp->error == 0) { 2804 #ifdef DIAGNOSTIC 2805 isp_prt(isp, ISP_LOGERR, 2806 "error %d in dma mapping code", error); 2807 #endif 2808 mp->error = error; 2809 } 2810 splx(s); 2811 } else { 2812 /* Pointer to physical buffer */ 2813 struct bus_dma_segment seg; 2814 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; 2815 seg.ds_len = csio->dxfer_len; 2816 (*eptr)(mp, &seg, 1, 0); 2817 } 2818 } else { 2819 struct bus_dma_segment *segs; 2820 2821 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 2822 isp_prt(isp, ISP_LOGERR, 2823 "Physical segment pointers unsupported"); 2824 mp->error = EINVAL; 2825 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2826 isp_prt(isp, ISP_LOGERR, 2827 "Virtual segment addresses unsupported"); 2828 mp->error = EINVAL; 2829 } else { 2830 /* Just use the segments provided */ 2831 segs = (struct bus_dma_segment *) csio->data_ptr; 2832 (*eptr)(mp, segs, csio->sglist_cnt, 0); 2833 } 2834 } 2835 CAMLOCK_2_ISPLOCK(isp); 2836 if (mp->error) { 2837 int retval = CMD_COMPLETE; 2838 if (mp->error == MUSHERR_NOQENTRIES) { 2839 retval = CMD_EAGAIN; 2840 } else if (mp->error == EFBIG) { 2841 XS_SETERR(csio, CAM_REQ_TOO_BIG); 2842 } else if (mp->error == EINVAL) { 2843 XS_SETERR(csio, CAM_REQ_INVALID); 2844 } else { 2845 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 2846 } 2847 return (retval); 2848 } 2849 mbxsync: 2850 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2851 isp_print_bytes(isp, "Request Queue Entry", QENTRY_LEN, rq); 2852 } 2853 switch (rq->req_header.rqs_entry_type) { 2854 case RQSTYPE_REQUEST: 2855 isp_put_request(isp, rq, qep); 2856 break; 2857 case RQSTYPE_CMDONLY: 2858 isp_put_extended_request(isp, (ispextreq_t *)rq, 2859 (ispextreq_t *)qep); 2860 break; 2861 case RQSTYPE_T2RQS: 2862 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 2863 break; 2864 case RQSTYPE_A64: 2865 case RQSTYPE_T3RQS: 2866 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep); 2867 break; 2868 case RQSTYPE_T7RQS: 2869 isp_put_request_t7(isp, (ispreqt7_t *) rq, (ispreqt7_t *) qep); 2870 break; 2871 } 2872 return (CMD_QUEUED); 2873 } 2874 2875 static void 2876 isp_pci_dmateardown(ispsoftc_t *isp, XS_T *xs, uint32_t handle) 2877 { 2878 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2879 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 2880 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2881 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 2882 } else { 2883 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 2884 } 2885 bus_dmamap_unload(pcs->dmat, *dp); 2886 } 2887 2888 2889 static void 2890 isp_pci_reset0(ispsoftc_t *isp) 2891 { 2892 ISP_DISABLE_INTS(isp); 2893 } 2894 2895 static void 2896 isp_pci_reset1(ispsoftc_t *isp) 2897 { 2898 if (!IS_24XX(isp)) { 2899 /* Make sure the BIOS is disabled */ 2900 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 2901 } 2902 /* and enable interrupts */ 2903 ISP_ENABLE_INTS(isp); 2904 } 2905 2906 static void 2907 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 2908 { 2909 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2910 if (msg) 2911 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2912 else 2913 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2914 if (IS_SCSI(isp)) 2915 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2916 else 2917 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2918 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2919 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2920 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2921 2922 2923 if (IS_SCSI(isp)) { 2924 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2925 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2926 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2927 ISP_READ(isp, CDMA_FIFO_STS)); 2928 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2929 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2930 ISP_READ(isp, DDMA_FIFO_STS)); 2931 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2932 ISP_READ(isp, SXP_INTERRUPT), 2933 ISP_READ(isp, SXP_GROSS_ERR), 2934 ISP_READ(isp, SXP_PINS_CTRL)); 2935 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2936 } 2937 printf(" mbox regs: %x %x %x %x %x\n", 2938 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2939 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2940 ISP_READ(isp, OUTMAILBOX4)); 2941 printf(" PCI Status Command/Status=%x\n", 2942 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2943 } 2944