1 /*- 2 * 3 * Copyright (c) 1997-2006 by Matthew Jacob 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice immediately at the beginning of the file, without modification, 11 * this list of conditions, and the following disclaimer. 12 * 2. The name of the author may not be used to endorse or promote products 13 * derived from this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 /* 29 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 30 * FreeBSD Version. 31 */ 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/module.h> 39 #if __FreeBSD_version >= 700000 40 #include <sys/linker.h> 41 #include <sys/firmware.h> 42 #endif 43 #include <sys/bus.h> 44 #if __FreeBSD_version < 500000 45 #include <pci/pcireg.h> 46 #include <pci/pcivar.h> 47 #include <machine/bus_memio.h> 48 #include <machine/bus_pio.h> 49 #else 50 #include <sys/stdint.h> 51 #include <dev/pci/pcireg.h> 52 #include <dev/pci/pcivar.h> 53 #endif 54 #include <machine/bus.h> 55 #include <machine/resource.h> 56 #include <sys/rman.h> 57 #include <sys/malloc.h> 58 59 #include <dev/isp/isp_freebsd.h> 60 61 #if __FreeBSD_version < 500000 62 #define BUS_PROBE_DEFAULT 0 63 #endif 64 65 static uint16_t isp_pci_rd_reg(ispsoftc_t *, int); 66 static void isp_pci_wr_reg(ispsoftc_t *, int, uint16_t); 67 static uint16_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 68 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint16_t); 69 static int 70 isp_pci_rd_isr(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); 71 static int 72 isp_pci_rd_isr_2300(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); 73 static int isp_pci_mbxdma(ispsoftc_t *); 74 static int 75 isp_pci_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint16_t *, uint16_t); 76 static void 77 isp_pci_dmateardown(ispsoftc_t *, XS_T *, uint16_t); 78 79 80 static void isp_pci_reset1(ispsoftc_t *); 81 static void isp_pci_dumpregs(ispsoftc_t *, const char *); 82 83 static struct ispmdvec mdvec = { 84 isp_pci_rd_isr, 85 isp_pci_rd_reg, 86 isp_pci_wr_reg, 87 isp_pci_mbxdma, 88 isp_pci_dmasetup, 89 isp_pci_dmateardown, 90 NULL, 91 isp_pci_reset1, 92 isp_pci_dumpregs, 93 NULL, 94 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 95 }; 96 97 static struct ispmdvec mdvec_1080 = { 98 isp_pci_rd_isr, 99 isp_pci_rd_reg_1080, 100 isp_pci_wr_reg_1080, 101 isp_pci_mbxdma, 102 isp_pci_dmasetup, 103 isp_pci_dmateardown, 104 NULL, 105 isp_pci_reset1, 106 isp_pci_dumpregs, 107 NULL, 108 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 109 }; 110 111 static struct ispmdvec mdvec_12160 = { 112 isp_pci_rd_isr, 113 isp_pci_rd_reg_1080, 114 isp_pci_wr_reg_1080, 115 isp_pci_mbxdma, 116 isp_pci_dmasetup, 117 isp_pci_dmateardown, 118 NULL, 119 isp_pci_reset1, 120 isp_pci_dumpregs, 121 NULL, 122 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 123 }; 124 125 static struct ispmdvec mdvec_2100 = { 126 isp_pci_rd_isr, 127 isp_pci_rd_reg, 128 isp_pci_wr_reg, 129 isp_pci_mbxdma, 130 isp_pci_dmasetup, 131 isp_pci_dmateardown, 132 NULL, 133 isp_pci_reset1, 134 isp_pci_dumpregs 135 }; 136 137 static struct ispmdvec mdvec_2200 = { 138 isp_pci_rd_isr, 139 isp_pci_rd_reg, 140 isp_pci_wr_reg, 141 isp_pci_mbxdma, 142 isp_pci_dmasetup, 143 isp_pci_dmateardown, 144 NULL, 145 isp_pci_reset1, 146 isp_pci_dumpregs 147 }; 148 149 static struct ispmdvec mdvec_2300 = { 150 isp_pci_rd_isr_2300, 151 isp_pci_rd_reg, 152 isp_pci_wr_reg, 153 isp_pci_mbxdma, 154 isp_pci_dmasetup, 155 isp_pci_dmateardown, 156 NULL, 157 isp_pci_reset1, 158 isp_pci_dumpregs 159 }; 160 161 #ifndef PCIM_CMD_INVEN 162 #define PCIM_CMD_INVEN 0x10 163 #endif 164 #ifndef PCIM_CMD_BUSMASTEREN 165 #define PCIM_CMD_BUSMASTEREN 0x0004 166 #endif 167 #ifndef PCIM_CMD_PERRESPEN 168 #define PCIM_CMD_PERRESPEN 0x0040 169 #endif 170 #ifndef PCIM_CMD_SEREN 171 #define PCIM_CMD_SEREN 0x0100 172 #endif 173 #ifndef PCIM_CMD_INTX_DISABLE 174 #define PCIM_CMD_INTX_DISABLE 0x0400 175 #endif 176 177 #ifndef PCIR_COMMAND 178 #define PCIR_COMMAND 0x04 179 #endif 180 181 #ifndef PCIR_CACHELNSZ 182 #define PCIR_CACHELNSZ 0x0c 183 #endif 184 185 #ifndef PCIR_LATTIMER 186 #define PCIR_LATTIMER 0x0d 187 #endif 188 189 #ifndef PCIR_ROMADDR 190 #define PCIR_ROMADDR 0x30 191 #endif 192 193 #ifndef PCI_VENDOR_QLOGIC 194 #define PCI_VENDOR_QLOGIC 0x1077 195 #endif 196 197 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 198 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 199 #endif 200 201 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 202 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 203 #endif 204 205 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 206 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 207 #endif 208 209 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 210 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 211 #endif 212 213 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 214 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 215 #endif 216 217 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 218 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 219 #endif 220 221 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 222 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 223 #endif 224 225 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 226 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 227 #endif 228 229 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 230 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 231 #endif 232 233 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 234 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 235 #endif 236 237 #ifndef PCI_PRODUCT_QLOGIC_ISP2322 238 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 239 #endif 240 241 #ifndef PCI_PRODUCT_QLOGIC_ISP2422 242 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 243 #endif 244 245 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 246 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 247 #endif 248 249 #ifndef PCI_PRODUCT_QLOGIC_ISP6322 250 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 251 #endif 252 253 254 #define PCI_QLOGIC_ISP1020 \ 255 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 256 257 #define PCI_QLOGIC_ISP1080 \ 258 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 259 260 #define PCI_QLOGIC_ISP10160 \ 261 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 262 263 #define PCI_QLOGIC_ISP12160 \ 264 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 265 266 #define PCI_QLOGIC_ISP1240 \ 267 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 268 269 #define PCI_QLOGIC_ISP1280 \ 270 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 271 272 #define PCI_QLOGIC_ISP2100 \ 273 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 274 275 #define PCI_QLOGIC_ISP2200 \ 276 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 277 278 #define PCI_QLOGIC_ISP2300 \ 279 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 280 281 #define PCI_QLOGIC_ISP2312 \ 282 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 283 284 #define PCI_QLOGIC_ISP2322 \ 285 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 286 287 #define PCI_QLOGIC_ISP2422 \ 288 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 289 290 #define PCI_QLOGIC_ISP6312 \ 291 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 292 293 #define PCI_QLOGIC_ISP6322 \ 294 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 295 296 /* 297 * Odd case for some AMI raid cards... We need to *not* attach to this. 298 */ 299 #define AMI_RAID_SUBVENDOR_ID 0x101e 300 301 #define IO_MAP_REG 0x10 302 #define MEM_MAP_REG 0x14 303 304 #define PCI_DFLT_LTNCY 0x40 305 #define PCI_DFLT_LNSZ 0x10 306 307 static int isp_pci_probe (device_t); 308 static int isp_pci_attach (device_t); 309 310 311 struct isp_pcisoftc { 312 ispsoftc_t pci_isp; 313 device_t pci_dev; 314 struct resource * pci_reg; 315 bus_space_tag_t pci_st; 316 bus_space_handle_t pci_sh; 317 void * ih; 318 int16_t pci_poff[_NREG_BLKS]; 319 bus_dma_tag_t dmat; 320 bus_dmamap_t *dmaps; 321 }; 322 323 static device_method_t isp_pci_methods[] = { 324 /* Device interface */ 325 DEVMETHOD(device_probe, isp_pci_probe), 326 DEVMETHOD(device_attach, isp_pci_attach), 327 { 0, 0 } 328 }; 329 static void isp_pci_intr(void *); 330 331 static driver_t isp_pci_driver = { 332 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 333 }; 334 static devclass_t isp_devclass; 335 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 336 #if __FreeBSD_version >= 700000 337 MODULE_DEPEND(isp, ispfw, 1, 1, 1); 338 MODULE_DEPEND(isp, firmware, 1, 1, 1); 339 #else 340 typedef void ispfwfunc(int, int, int, uint16_t **); 341 extern ispfwfunc *isp_get_firmware_p; 342 #endif 343 344 static int 345 isp_pci_probe(device_t dev) 346 { 347 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 348 case PCI_QLOGIC_ISP1020: 349 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 350 break; 351 case PCI_QLOGIC_ISP1080: 352 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 353 break; 354 case PCI_QLOGIC_ISP1240: 355 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 356 break; 357 case PCI_QLOGIC_ISP1280: 358 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 359 break; 360 case PCI_QLOGIC_ISP10160: 361 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 362 break; 363 case PCI_QLOGIC_ISP12160: 364 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 365 return (ENXIO); 366 } 367 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 368 break; 369 case PCI_QLOGIC_ISP2100: 370 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 371 break; 372 case PCI_QLOGIC_ISP2200: 373 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 374 break; 375 case PCI_QLOGIC_ISP2300: 376 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 377 break; 378 case PCI_QLOGIC_ISP2312: 379 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 380 break; 381 case PCI_QLOGIC_ISP2322: 382 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 383 break; 384 #if 0 385 case PCI_QLOGIC_ISP2422: 386 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 387 break; 388 #endif 389 case PCI_QLOGIC_ISP6312: 390 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 391 break; 392 case PCI_QLOGIC_ISP6322: 393 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 394 break; 395 default: 396 return (ENXIO); 397 } 398 if (isp_announced == 0 && bootverbose) { 399 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 400 "Core Version %d.%d\n", 401 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 402 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 403 isp_announced++; 404 } 405 /* 406 * XXXX: Here is where we might load the f/w module 407 * XXXX: (or increase a reference count to it). 408 */ 409 return (BUS_PROBE_DEFAULT); 410 } 411 412 #if __FreeBSD_version < 500000 413 static void 414 isp_get_options(device_t dev, ispsoftc_t *isp) 415 { 416 uint64_t wwn; 417 int bitmap, unit; 418 419 unit = device_get_unit(dev); 420 if (getenv_int("isp_disable", &bitmap)) { 421 if (bitmap & (1 << unit)) { 422 isp->isp_osinfo.disabled = 1; 423 return; 424 } 425 } 426 427 if (getenv_int("isp_no_fwload", &bitmap)) { 428 if (bitmap & (1 << unit)) 429 isp->isp_confopts |= ISP_CFG_NORELOAD; 430 } 431 if (getenv_int("isp_fwload", &bitmap)) { 432 if (bitmap & (1 << unit)) 433 isp->isp_confopts &= ~ISP_CFG_NORELOAD; 434 } 435 if (getenv_int("isp_no_nvram", &bitmap)) { 436 if (bitmap & (1 << unit)) 437 isp->isp_confopts |= ISP_CFG_NONVRAM; 438 } 439 if (getenv_int("isp_nvram", &bitmap)) { 440 if (bitmap & (1 << unit)) 441 isp->isp_confopts &= ~ISP_CFG_NONVRAM; 442 } 443 if (getenv_int("isp_fcduplex", &bitmap)) { 444 if (bitmap & (1 << unit)) 445 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 446 } 447 if (getenv_int("isp_no_fcduplex", &bitmap)) { 448 if (bitmap & (1 << unit)) 449 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX; 450 } 451 if (getenv_int("isp_nport", &bitmap)) { 452 if (bitmap & (1 << unit)) 453 isp->isp_confopts |= ISP_CFG_NPORT; 454 } 455 456 /* 457 * Because the resource_*_value functions can neither return 458 * 64 bit integer values, nor can they be directly coerced 459 * to interpret the right hand side of the assignment as 460 * you want them to interpret it, we have to force WWN 461 * hint replacement to specify WWN strings with a leading 462 * 'w' (e..g w50000000aaaa0001). Sigh. 463 */ 464 if (getenv_quad("isp_portwwn", &wwn)) { 465 isp->isp_osinfo.default_port_wwn = wwn; 466 isp->isp_confopts |= ISP_CFG_OWNWWPN; 467 } 468 if (isp->isp_osinfo.default_port_wwn == 0) { 469 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 470 } 471 472 if (getenv_quad("isp_nodewwn", &wwn)) { 473 isp->isp_osinfo.default_node_wwn = wwn; 474 isp->isp_confopts |= ISP_CFG_OWNWWNN; 475 } 476 if (isp->isp_osinfo.default_node_wwn == 0) { 477 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 478 } 479 480 bitmap = 0; 481 (void) getenv_int("isp_debug", &bitmap); 482 if (bitmap) { 483 isp->isp_dblev = bitmap; 484 } else { 485 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 486 } 487 if (bootverbose) { 488 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 489 } 490 491 #ifdef ISP_FW_CRASH_DUMP 492 bitmap = 0; 493 if (getenv_int("isp_fw_dump_enable", &bitmap)) { 494 if (bitmap & (1 << unit) { 495 size_t amt = 0; 496 if (IS_2200(isp)) { 497 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 498 } else if (IS_23XX(isp)) { 499 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 500 } 501 if (amt) { 502 FCPARAM(isp)->isp_dump_data = 503 malloc(amt, M_DEVBUF, M_WAITOK); 504 memset(FCPARAM(isp)->isp_dump_data, 0, amt); 505 } else { 506 device_printf(dev, 507 "f/w crash dumps not supported for card\n"); 508 } 509 } 510 } 511 #endif 512 bitmap = 0; 513 if (getenv_int("role", &bitmap)) { 514 isp->isp_role = bitmap; 515 } else { 516 isp->isp_role = ISP_DEFAULT_ROLES; 517 } 518 } 519 520 static void 521 isp_get_pci_options(device_t dev, int *m1, int *m2) 522 { 523 int bitmap; 524 int unit = device_get_unit(dev); 525 526 *m1 = PCIM_CMD_MEMEN; 527 *m2 = PCIM_CMD_PORTEN; 528 if (getenv_int("isp_mem_map", &bitmap)) { 529 if (bitmap & (1 << unit)) { 530 *m1 = PCIM_CMD_MEMEN; 531 *m2 = PCIM_CMD_PORTEN; 532 } 533 } 534 bitmap = 0; 535 if (getenv_int("isp_io_map", &bitmap)) { 536 if (bitmap & (1 << unit)) { 537 *m1 = PCIM_CMD_PORTEN; 538 *m2 = PCIM_CMD_MEMEN; 539 } 540 } 541 } 542 #else 543 static void 544 isp_get_options(device_t dev, ispsoftc_t *isp) 545 { 546 int tval; 547 const char *sptr; 548 /* 549 * Figure out if we're supposed to skip this one. 550 */ 551 552 tval = 0; 553 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 554 "disable", &tval) == 0 && tval) { 555 device_printf(dev, "disabled at user request\n"); 556 isp->isp_osinfo.disabled = 1; 557 return; 558 } 559 560 tval = -1; 561 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 562 "role", &tval) == 0 && tval != -1) { 563 tval &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET); 564 isp->isp_role = tval; 565 device_printf(dev, "setting role to 0x%x\n", isp->isp_role); 566 } else { 567 #ifdef ISP_TARGET_MODE 568 isp->isp_role = ISP_ROLE_TARGET; 569 #else 570 isp->isp_role = ISP_DEFAULT_ROLES; 571 #endif 572 } 573 574 tval = 0; 575 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 576 "fwload_disable", &tval) == 0 && tval != 0) { 577 isp->isp_confopts |= ISP_CFG_NORELOAD; 578 } 579 tval = 0; 580 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 581 "ignore_nvram", &tval) == 0 && tval != 0) { 582 isp->isp_confopts |= ISP_CFG_NONVRAM; 583 } 584 tval = 0; 585 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 586 "fullduplex", &tval) == 0 && tval != 0) { 587 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 588 } 589 #ifdef ISP_FW_CRASH_DUMP 590 tval = 0; 591 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 592 "fw_dump_enable", &tval) == 0 && tval != 0) { 593 size_t amt = 0; 594 if (IS_2200(isp)) { 595 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 596 } else if (IS_23XX(isp)) { 597 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 598 } 599 if (amt) { 600 FCPARAM(isp)->isp_dump_data = 601 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); 602 } else { 603 device_printf(dev, 604 "f/w crash dumps not supported for this model\n"); 605 } 606 } 607 #endif 608 609 sptr = 0; 610 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 611 "topology", (const char **) &sptr) == 0 && sptr != 0) { 612 if (strcmp(sptr, "lport") == 0) { 613 isp->isp_confopts |= ISP_CFG_LPORT; 614 } else if (strcmp(sptr, "nport") == 0) { 615 isp->isp_confopts |= ISP_CFG_NPORT; 616 } else if (strcmp(sptr, "lport-only") == 0) { 617 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 618 } else if (strcmp(sptr, "nport-only") == 0) { 619 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 620 } 621 } 622 623 /* 624 * Because the resource_*_value functions can neither return 625 * 64 bit integer values, nor can they be directly coerced 626 * to interpret the right hand side of the assignment as 627 * you want them to interpret it, we have to force WWN 628 * hint replacement to specify WWN strings with a leading 629 * 'w' (e..g w50000000aaaa0001). Sigh. 630 */ 631 sptr = 0; 632 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 633 "portwwn", (const char **) &sptr); 634 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 635 char *eptr = 0; 636 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 637 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 638 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 639 isp->isp_osinfo.default_port_wwn = 0; 640 } else { 641 isp->isp_confopts |= ISP_CFG_OWNWWPN; 642 } 643 } 644 if (isp->isp_osinfo.default_port_wwn == 0) { 645 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 646 } 647 648 sptr = 0; 649 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 650 "nodewwn", (const char **) &sptr); 651 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 652 char *eptr = 0; 653 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 654 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 655 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 656 isp->isp_osinfo.default_node_wwn = 0; 657 } else { 658 isp->isp_confopts |= ISP_CFG_OWNWWNN; 659 } 660 } 661 if (isp->isp_osinfo.default_node_wwn == 0) { 662 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 663 } 664 665 isp->isp_osinfo.default_id = -1; 666 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 667 "iid", &tval) == 0) { 668 isp->isp_osinfo.default_id = tval; 669 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 670 } 671 if (isp->isp_osinfo.default_id == -1) { 672 if (IS_FC(isp)) { 673 isp->isp_osinfo.default_id = 109; 674 } else { 675 isp->isp_osinfo.default_id = 7; 676 } 677 } 678 679 /* 680 * Set up logging levels. 681 */ 682 tval = 0; 683 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 684 "debug", &tval); 685 if (tval) { 686 isp->isp_dblev = tval; 687 } else { 688 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 689 } 690 if (bootverbose) { 691 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 692 } 693 694 } 695 696 static void 697 isp_get_pci_options(device_t dev, int *m1, int *m2) 698 { 699 int tval; 700 /* 701 * Which we should try first - memory mapping or i/o mapping? 702 * 703 * We used to try memory first followed by i/o on alpha, otherwise 704 * the reverse, but we should just try memory first all the time now. 705 */ 706 *m1 = PCIM_CMD_MEMEN; 707 *m2 = PCIM_CMD_PORTEN; 708 709 tval = 0; 710 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 711 "prefer_iomap", &tval) == 0 && tval != 0) { 712 *m1 = PCIM_CMD_PORTEN; 713 *m2 = PCIM_CMD_MEMEN; 714 } 715 tval = 0; 716 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 717 "prefer_memmap", &tval) == 0 && tval != 0) { 718 *m1 = PCIM_CMD_MEMEN; 719 *m2 = PCIM_CMD_PORTEN; 720 } 721 } 722 #endif 723 724 static int 725 isp_pci_attach(device_t dev) 726 { 727 struct resource *regs, *irq; 728 int rtp, rgd, iqd, m1, m2; 729 uint32_t data, cmd, linesz, psize, basetype; 730 struct isp_pcisoftc *pcs; 731 ispsoftc_t *isp = NULL; 732 struct ispmdvec *mdvp; 733 #if __FreeBSD_version >= 500000 734 int locksetup = 0; 735 #endif 736 737 pcs = device_get_softc(dev); 738 if (pcs == NULL) { 739 device_printf(dev, "cannot get softc\n"); 740 return (ENOMEM); 741 } 742 memset(pcs, 0, sizeof (*pcs)); 743 pcs->pci_dev = dev; 744 isp = &pcs->pci_isp; 745 746 /* 747 * Get Generic Options 748 */ 749 isp_get_options(dev, isp); 750 751 /* 752 * Check to see if options have us disabled 753 */ 754 if (isp->isp_osinfo.disabled) { 755 /* 756 * But return zero to preserve unit numbering 757 */ 758 return (0); 759 } 760 761 /* 762 * Get PCI options- which in this case are just mapping preferences. 763 */ 764 isp_get_pci_options(dev, &m1, &m2); 765 766 767 linesz = PCI_DFLT_LNSZ; 768 irq = regs = NULL; 769 rgd = rtp = iqd = 0; 770 771 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 772 if (cmd & m1) { 773 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 774 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 775 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 776 } 777 if (regs == NULL && (cmd & m2)) { 778 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 779 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 780 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 781 } 782 if (regs == NULL) { 783 device_printf(dev, "unable to map any ports\n"); 784 goto bad; 785 } 786 if (bootverbose) { 787 device_printf(dev, "using %s space register mapping\n", 788 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 789 } 790 pcs->pci_dev = dev; 791 pcs->pci_reg = regs; 792 pcs->pci_st = rman_get_bustag(regs); 793 pcs->pci_sh = rman_get_bushandle(regs); 794 795 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 796 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 797 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 798 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 799 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 800 mdvp = &mdvec; 801 basetype = ISP_HA_SCSI_UNKNOWN; 802 psize = sizeof (sdparam); 803 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 804 mdvp = &mdvec; 805 basetype = ISP_HA_SCSI_UNKNOWN; 806 psize = sizeof (sdparam); 807 } 808 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 809 mdvp = &mdvec_1080; 810 basetype = ISP_HA_SCSI_1080; 811 psize = sizeof (sdparam); 812 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 813 ISP1080_DMA_REGS_OFF; 814 } 815 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 816 mdvp = &mdvec_1080; 817 basetype = ISP_HA_SCSI_1240; 818 psize = 2 * sizeof (sdparam); 819 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 820 ISP1080_DMA_REGS_OFF; 821 } 822 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 823 mdvp = &mdvec_1080; 824 basetype = ISP_HA_SCSI_1280; 825 psize = 2 * sizeof (sdparam); 826 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 827 ISP1080_DMA_REGS_OFF; 828 } 829 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 830 mdvp = &mdvec_12160; 831 basetype = ISP_HA_SCSI_10160; 832 psize = sizeof (sdparam); 833 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 834 ISP1080_DMA_REGS_OFF; 835 } 836 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 837 mdvp = &mdvec_12160; 838 basetype = ISP_HA_SCSI_12160; 839 psize = 2 * sizeof (sdparam); 840 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 841 ISP1080_DMA_REGS_OFF; 842 } 843 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 844 mdvp = &mdvec_2100; 845 basetype = ISP_HA_FC_2100; 846 psize = sizeof (fcparam); 847 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 848 PCI_MBOX_REGS2100_OFF; 849 if (pci_get_revid(dev) < 3) { 850 /* 851 * XXX: Need to get the actual revision 852 * XXX: number of the 2100 FB. At any rate, 853 * XXX: lower cache line size for early revision 854 * XXX; boards. 855 */ 856 linesz = 1; 857 } 858 } 859 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 860 mdvp = &mdvec_2200; 861 basetype = ISP_HA_FC_2200; 862 psize = sizeof (fcparam); 863 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 864 PCI_MBOX_REGS2100_OFF; 865 } 866 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 867 mdvp = &mdvec_2300; 868 basetype = ISP_HA_FC_2300; 869 psize = sizeof (fcparam); 870 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 871 PCI_MBOX_REGS2300_OFF; 872 } 873 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 || 874 pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 875 mdvp = &mdvec_2300; 876 basetype = ISP_HA_FC_2312; 877 psize = sizeof (fcparam); 878 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 879 PCI_MBOX_REGS2300_OFF; 880 } 881 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322 || 882 pci_get_devid(dev) == PCI_QLOGIC_ISP6322) { 883 mdvp = &mdvec_2300; 884 basetype = ISP_HA_FC_2322; 885 psize = sizeof (fcparam); 886 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 887 PCI_MBOX_REGS2300_OFF; 888 } 889 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422) { 890 mdvp = &mdvec_2300; 891 basetype = ISP_HA_FC_2422; 892 psize = sizeof (fcparam); 893 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 894 PCI_MBOX_REGS2300_OFF; 895 } 896 isp = &pcs->pci_isp; 897 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 898 if (isp->isp_param == NULL) { 899 device_printf(dev, "cannot allocate parameter data\n"); 900 goto bad; 901 } 902 isp->isp_mdvec = mdvp; 903 isp->isp_type = basetype; 904 isp->isp_revision = pci_get_revid(dev); 905 isp->isp_dev = dev; 906 907 #if __FreeBSD_version >= 700000 908 /* 909 * Try and find firmware for this device. 910 */ 911 { 912 char fwname[32]; 913 unsigned int did = pci_get_device(dev); 914 915 /* 916 * Map a few pci ids to fw names 917 */ 918 switch (did) { 919 case PCI_PRODUCT_QLOGIC_ISP1020: 920 did = 0x1040; 921 break; 922 case PCI_PRODUCT_QLOGIC_ISP1240: 923 did = 0x1080; 924 break; 925 case PCI_PRODUCT_QLOGIC_ISP10160: 926 case PCI_PRODUCT_QLOGIC_ISP12160: 927 did = 0x12160; 928 break; 929 case PCI_PRODUCT_QLOGIC_ISP6312: 930 case PCI_PRODUCT_QLOGIC_ISP2312: 931 did = 0x2300; 932 break; 933 case PCI_PRODUCT_QLOGIC_ISP6322: 934 did = 0x2322; 935 break; 936 default: 937 break; 938 } 939 940 isp->isp_osinfo.fw = NULL; 941 if (isp->isp_role & ISP_ROLE_TARGET) { 942 snprintf(fwname, sizeof (fwname), "isp_%04x_it", did); 943 isp->isp_osinfo.fw = firmware_get(fwname); 944 } 945 if (isp->isp_osinfo.fw == NULL) { 946 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 947 isp->isp_osinfo.fw = firmware_get(fwname); 948 } 949 if (isp->isp_osinfo.fw != NULL) { 950 union { 951 const void *fred; 952 uint16_t *bob; 953 } u; 954 u.fred = isp->isp_osinfo.fw->data; 955 isp->isp_mdvec->dv_ispfw = u.bob; 956 } 957 } 958 #else 959 if (isp_get_firmware_p) { 960 int device = (int) pci_get_device(dev); 961 #ifdef ISP_TARGET_MODE 962 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 963 #else 964 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 965 #endif 966 } 967 #endif 968 969 /* 970 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 971 * are set. 972 */ 973 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 974 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 975 976 if (IS_2300(isp)) { /* per QLogic errata */ 977 cmd &= ~PCIM_CMD_INVEN; 978 } 979 980 if (IS_23XX(isp)) { 981 /* 982 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 983 */ 984 isp->isp_touched = 1; 985 986 } 987 988 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 989 cmd &= ~PCIM_CMD_INTX_DISABLE; 990 } 991 992 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 993 994 /* 995 * Make sure the Cache Line Size register is set sensibly. 996 */ 997 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 998 if (data != linesz) { 999 data = PCI_DFLT_LNSZ; 1000 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 1001 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 1002 } 1003 1004 /* 1005 * Make sure the Latency Timer is sane. 1006 */ 1007 data = pci_read_config(dev, PCIR_LATTIMER, 1); 1008 if (data < PCI_DFLT_LTNCY) { 1009 data = PCI_DFLT_LTNCY; 1010 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 1011 pci_write_config(dev, PCIR_LATTIMER, data, 1); 1012 } 1013 1014 /* 1015 * Make sure we've disabled the ROM. 1016 */ 1017 data = pci_read_config(dev, PCIR_ROMADDR, 4); 1018 data &= ~1; 1019 pci_write_config(dev, PCIR_ROMADDR, data, 4); 1020 1021 iqd = 0; 1022 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, 1023 RF_ACTIVE | RF_SHAREABLE); 1024 if (irq == NULL) { 1025 device_printf(dev, "could not allocate interrupt\n"); 1026 goto bad; 1027 } 1028 1029 #if __FreeBSD_version >= 500000 1030 /* Make sure the lock is set up. */ 1031 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 1032 locksetup++; 1033 #endif 1034 1035 if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) { 1036 device_printf(dev, "could not setup interrupt\n"); 1037 goto bad; 1038 } 1039 1040 /* 1041 * Last minute checks... 1042 */ 1043 if (IS_23XX(isp)) { 1044 isp->isp_port = pci_get_function(dev); 1045 } 1046 1047 /* 1048 * Make sure we're in reset state. 1049 */ 1050 ISP_LOCK(isp); 1051 isp_reset(isp); 1052 if (isp->isp_state != ISP_RESETSTATE) { 1053 ISP_UNLOCK(isp); 1054 goto bad; 1055 } 1056 isp_init(isp); 1057 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 1058 isp_uninit(isp); 1059 ISP_UNLOCK(isp); 1060 goto bad; 1061 } 1062 isp_attach(isp); 1063 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 1064 isp_uninit(isp); 1065 ISP_UNLOCK(isp); 1066 goto bad; 1067 } 1068 /* 1069 * XXXX: Here is where we might unload the f/w module 1070 * XXXX: (or decrease the reference count to it). 1071 */ 1072 ISP_UNLOCK(isp); 1073 return (0); 1074 1075 bad: 1076 1077 if (pcs && pcs->ih) { 1078 (void) bus_teardown_intr(dev, irq, pcs->ih); 1079 } 1080 1081 #if __FreeBSD_version >= 500000 1082 if (locksetup && isp) { 1083 mtx_destroy(&isp->isp_osinfo.lock); 1084 } 1085 #endif 1086 1087 if (irq) { 1088 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 1089 } 1090 1091 1092 if (regs) { 1093 (void) bus_release_resource(dev, rtp, rgd, regs); 1094 } 1095 1096 if (pcs) { 1097 if (pcs->pci_isp.isp_param) { 1098 #ifdef ISP_FW_CRASH_DUMP 1099 if (IS_FC(isp) && FCPARAM(isp)->isp_dump_data) { 1100 free(FCPARAM(isp)->isp_dump_data, M_DEVBUF); 1101 } 1102 #endif 1103 free(pcs->pci_isp.isp_param, M_DEVBUF); 1104 } 1105 } 1106 1107 /* 1108 * XXXX: Here is where we might unload the f/w module 1109 * XXXX: (or decrease the reference count to it). 1110 */ 1111 return (ENXIO); 1112 } 1113 1114 static void 1115 isp_pci_intr(void *arg) 1116 { 1117 ispsoftc_t *isp = arg; 1118 uint16_t isr, sema, mbox; 1119 1120 ISP_LOCK(isp); 1121 isp->isp_intcnt++; 1122 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 1123 isp->isp_intbogus++; 1124 } else { 1125 int iok = isp->isp_osinfo.intsok; 1126 isp->isp_osinfo.intsok = 0; 1127 isp_intr(isp, isr, sema, mbox); 1128 isp->isp_osinfo.intsok = iok; 1129 } 1130 ISP_UNLOCK(isp); 1131 } 1132 1133 1134 #define IspVirt2Off(a, x) \ 1135 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1136 _BLK_REG_SHFT] + ((x) & 0xfff)) 1137 1138 #define BXR2(pcs, off) \ 1139 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 1140 #define BXW2(pcs, off, v) \ 1141 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 1142 1143 1144 static __inline int 1145 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp) 1146 { 1147 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1148 uint16_t val0, val1; 1149 int i = 0; 1150 1151 do { 1152 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 1153 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 1154 } while (val0 != val1 && ++i < 1000); 1155 if (val0 != val1) { 1156 return (1); 1157 } 1158 *rp = val0; 1159 return (0); 1160 } 1161 1162 static int 1163 isp_pci_rd_isr(ispsoftc_t *isp, uint16_t *isrp, 1164 uint16_t *semap, uint16_t *mbp) 1165 { 1166 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1167 uint16_t isr, sema; 1168 1169 if (IS_2100(isp)) { 1170 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 1171 return (0); 1172 } 1173 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 1174 return (0); 1175 } 1176 } else { 1177 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 1178 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 1179 } 1180 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1181 isr &= INT_PENDING_MASK(isp); 1182 sema &= BIU_SEMA_LOCK; 1183 if (isr == 0 && sema == 0) { 1184 return (0); 1185 } 1186 *isrp = isr; 1187 if ((*semap = sema) != 0) { 1188 if (IS_2100(isp)) { 1189 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 1190 return (0); 1191 } 1192 } else { 1193 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 1194 } 1195 } 1196 return (1); 1197 } 1198 1199 static int 1200 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint16_t *isrp, 1201 uint16_t *semap, uint16_t *mbox0p) 1202 { 1203 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1204 uint16_t hccr; 1205 uint32_t r2hisr; 1206 1207 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 1208 *isrp = 0; 1209 return (0); 1210 } 1211 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh, 1212 IspVirt2Off(pcs, BIU_R2HSTSLO)); 1213 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1214 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1215 *isrp = 0; 1216 return (0); 1217 } 1218 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 1219 case ISPR2HST_ROM_MBX_OK: 1220 case ISPR2HST_ROM_MBX_FAIL: 1221 case ISPR2HST_MBX_OK: 1222 case ISPR2HST_MBX_FAIL: 1223 case ISPR2HST_ASYNC_EVENT: 1224 *isrp = r2hisr & 0xffff; 1225 *mbox0p = (r2hisr >> 16); 1226 *semap = 1; 1227 return (1); 1228 case ISPR2HST_RIO_16: 1229 *isrp = r2hisr & 0xffff; 1230 *mbox0p = ASYNC_RIO1; 1231 *semap = 1; 1232 return (1); 1233 case ISPR2HST_FPOST: 1234 *isrp = r2hisr & 0xffff; 1235 *mbox0p = ASYNC_CMD_CMPLT; 1236 *semap = 1; 1237 return (1); 1238 case ISPR2HST_FPOST_CTIO: 1239 *isrp = r2hisr & 0xffff; 1240 *mbox0p = ASYNC_CTIO_DONE; 1241 *semap = 1; 1242 return (1); 1243 case ISPR2HST_RSPQ_UPDATE: 1244 *isrp = r2hisr & 0xffff; 1245 *mbox0p = 0; 1246 *semap = 0; 1247 return (1); 1248 default: 1249 hccr = ISP_READ(isp, HCCR); 1250 if (hccr & HCCR_PAUSE) { 1251 ISP_WRITE(isp, HCCR, HCCR_RESET); 1252 isp_prt(isp, ISP_LOGERR, 1253 "RISC paused at interrupt (%x->%x\n", hccr, 1254 ISP_READ(isp, HCCR)); 1255 } else { 1256 isp_prt(isp, ISP_LOGERR, "unknown interrerupt 0x%x\n", 1257 r2hisr); 1258 } 1259 return (0); 1260 } 1261 } 1262 1263 static uint16_t 1264 isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1265 { 1266 uint16_t rv; 1267 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1268 int oldconf = 0; 1269 1270 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1271 /* 1272 * We will assume that someone has paused the RISC processor. 1273 */ 1274 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1275 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1276 oldconf | BIU_PCI_CONF1_SXP); 1277 } 1278 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1279 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1280 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 1281 } 1282 return (rv); 1283 } 1284 1285 static void 1286 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint16_t val) 1287 { 1288 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1289 int oldconf = 0; 1290 1291 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1292 /* 1293 * We will assume that someone has paused the RISC processor. 1294 */ 1295 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1296 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1297 oldconf | BIU_PCI_CONF1_SXP); 1298 } 1299 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1300 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1301 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 1302 } 1303 } 1304 1305 static uint16_t 1306 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1307 { 1308 uint16_t rv, oc = 0; 1309 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1310 1311 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1312 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1313 uint16_t tc; 1314 /* 1315 * We will assume that someone has paused the RISC processor. 1316 */ 1317 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1318 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1319 if (regoff & SXP_BANK1_SELECT) 1320 tc |= BIU_PCI1080_CONF1_SXP1; 1321 else 1322 tc |= BIU_PCI1080_CONF1_SXP0; 1323 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1324 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1325 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1326 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1327 oc | BIU_PCI1080_CONF1_DMA); 1328 } 1329 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1330 if (oc) { 1331 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1332 } 1333 return (rv); 1334 } 1335 1336 static void 1337 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint16_t val) 1338 { 1339 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1340 int oc = 0; 1341 1342 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1343 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1344 uint16_t tc; 1345 /* 1346 * We will assume that someone has paused the RISC processor. 1347 */ 1348 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1349 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1350 if (regoff & SXP_BANK1_SELECT) 1351 tc |= BIU_PCI1080_CONF1_SXP1; 1352 else 1353 tc |= BIU_PCI1080_CONF1_SXP0; 1354 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1355 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1356 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1357 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1358 oc | BIU_PCI1080_CONF1_DMA); 1359 } 1360 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1361 if (oc) { 1362 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1363 } 1364 } 1365 1366 1367 struct imush { 1368 ispsoftc_t *isp; 1369 int error; 1370 }; 1371 1372 static void imc(void *, bus_dma_segment_t *, int, int); 1373 1374 static void 1375 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1376 { 1377 struct imush *imushp = (struct imush *) arg; 1378 if (error) { 1379 imushp->error = error; 1380 } else { 1381 ispsoftc_t *isp =imushp->isp; 1382 bus_addr_t addr = segs->ds_addr; 1383 1384 isp->isp_rquest_dma = addr; 1385 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1386 isp->isp_result_dma = addr; 1387 if (IS_FC(isp)) { 1388 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1389 FCPARAM(isp)->isp_scdma = addr; 1390 } 1391 } 1392 } 1393 1394 /* 1395 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 1396 */ 1397 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 1398 1399 #if __FreeBSD_version < 500000 1400 #define isp_dma_tag_create bus_dma_tag_create 1401 #else 1402 #define isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) \ 1403 bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, \ 1404 busdma_lock_mutex, &Giant, z) 1405 #endif 1406 1407 static int 1408 isp_pci_mbxdma(ispsoftc_t *isp) 1409 { 1410 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1411 caddr_t base; 1412 uint32_t len; 1413 int i, error, ns; 1414 bus_size_t slim; /* segment size */ 1415 bus_addr_t llim; /* low limit of unavailable dma */ 1416 bus_addr_t hlim; /* high limit of unavailable dma */ 1417 struct imush im; 1418 1419 /* 1420 * Already been here? If so, leave... 1421 */ 1422 if (isp->isp_rquest) { 1423 return (0); 1424 } 1425 1426 hlim = BUS_SPACE_MAXADDR; 1427 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1428 slim = (bus_size_t) (1ULL << 32); 1429 llim = BUS_SPACE_MAXADDR; 1430 } else { 1431 llim = BUS_SPACE_MAXADDR_32BIT; 1432 slim = (1 << 24); 1433 } 1434 1435 /* 1436 * XXX: We don't really support 64 bit target mode for parallel scsi yet 1437 */ 1438 #ifdef ISP_TARGET_MODE 1439 if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) { 1440 isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet"); 1441 return (1); 1442 } 1443 #endif 1444 1445 ISP_UNLOCK(isp); 1446 if (isp_dma_tag_create(NULL, 1, slim, llim, hlim, 1447 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, &pcs->dmat)) { 1448 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1449 ISP_LOCK(isp); 1450 return (1); 1451 } 1452 1453 1454 len = sizeof (XS_T **) * isp->isp_maxcmds; 1455 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1456 if (isp->isp_xflist == NULL) { 1457 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1458 ISP_LOCK(isp); 1459 return (1); 1460 } 1461 #ifdef ISP_TARGET_MODE 1462 len = sizeof (void **) * isp->isp_maxcmds; 1463 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1464 if (isp->isp_tgtlist == NULL) { 1465 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1466 ISP_LOCK(isp); 1467 return (1); 1468 } 1469 #endif 1470 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1471 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1472 if (pcs->dmaps == NULL) { 1473 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1474 free(isp->isp_xflist, M_DEVBUF); 1475 #ifdef ISP_TARGET_MODE 1476 free(isp->isp_tgtlist, M_DEVBUF); 1477 #endif 1478 ISP_LOCK(isp); 1479 return (1); 1480 } 1481 1482 /* 1483 * Allocate and map the request, result queues, plus FC scratch area. 1484 */ 1485 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1486 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1487 if (IS_FC(isp)) { 1488 len += ISP2100_SCRLEN; 1489 } 1490 1491 ns = (len / PAGE_SIZE) + 1; 1492 /* 1493 * Create a tag for the control spaces- force it to within 32 bits. 1494 */ 1495 if (isp_dma_tag_create(pcs->dmat, QENTRY_LEN, slim, 1496 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1497 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) { 1498 isp_prt(isp, ISP_LOGERR, 1499 "cannot create a dma tag for control spaces"); 1500 free(pcs->dmaps, M_DEVBUF); 1501 free(isp->isp_xflist, M_DEVBUF); 1502 #ifdef ISP_TARGET_MODE 1503 free(isp->isp_tgtlist, M_DEVBUF); 1504 #endif 1505 ISP_LOCK(isp); 1506 return (1); 1507 } 1508 1509 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1510 &isp->isp_cdmap) != 0) { 1511 isp_prt(isp, ISP_LOGERR, 1512 "cannot allocate %d bytes of CCB memory", len); 1513 bus_dma_tag_destroy(isp->isp_cdmat); 1514 free(isp->isp_xflist, M_DEVBUF); 1515 #ifdef ISP_TARGET_MODE 1516 free(isp->isp_tgtlist, M_DEVBUF); 1517 #endif 1518 free(pcs->dmaps, M_DEVBUF); 1519 ISP_LOCK(isp); 1520 return (1); 1521 } 1522 1523 for (i = 0; i < isp->isp_maxcmds; i++) { 1524 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1525 if (error) { 1526 isp_prt(isp, ISP_LOGERR, 1527 "error %d creating per-cmd DMA maps", error); 1528 while (--i >= 0) { 1529 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1530 } 1531 goto bad; 1532 } 1533 } 1534 1535 im.isp = isp; 1536 im.error = 0; 1537 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1538 if (im.error) { 1539 isp_prt(isp, ISP_LOGERR, 1540 "error %d loading dma map for control areas", im.error); 1541 goto bad; 1542 } 1543 1544 isp->isp_rquest = base; 1545 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1546 isp->isp_result = base; 1547 if (IS_FC(isp)) { 1548 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1549 FCPARAM(isp)->isp_scratch = base; 1550 } 1551 ISP_LOCK(isp); 1552 return (0); 1553 1554 bad: 1555 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1556 bus_dma_tag_destroy(isp->isp_cdmat); 1557 free(isp->isp_xflist, M_DEVBUF); 1558 #ifdef ISP_TARGET_MODE 1559 free(isp->isp_tgtlist, M_DEVBUF); 1560 #endif 1561 free(pcs->dmaps, M_DEVBUF); 1562 ISP_LOCK(isp); 1563 isp->isp_rquest = NULL; 1564 return (1); 1565 } 1566 1567 typedef struct { 1568 ispsoftc_t *isp; 1569 void *cmd_token; 1570 void *rq; 1571 uint16_t *nxtip; 1572 uint16_t optr; 1573 int error; 1574 } mush_t; 1575 1576 #define MUSHERR_NOQENTRIES -2 1577 1578 #ifdef ISP_TARGET_MODE 1579 /* 1580 * We need to handle DMA for target mode differently from initiator mode. 1581 * 1582 * DMA mapping and construction and submission of CTIO Request Entries 1583 * and rendevous for completion are very tightly coupled because we start 1584 * out by knowing (per platform) how much data we have to move, but we 1585 * don't know, up front, how many DMA mapping segments will have to be used 1586 * cover that data, so we don't know how many CTIO Request Entries we 1587 * will end up using. Further, for performance reasons we may want to 1588 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1589 * 1590 * The standard vector still goes through isp_pci_dmasetup, but the callback 1591 * for the DMA mapping routines comes here instead with the whole transfer 1592 * mapped and a pointer to a partially filled in already allocated request 1593 * queue entry. We finish the job. 1594 */ 1595 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1596 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1597 1598 #define STATUS_WITH_DATA 1 1599 1600 static void 1601 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1602 { 1603 mush_t *mp; 1604 struct ccb_scsiio *csio; 1605 ispsoftc_t *isp; 1606 struct isp_pcisoftc *pcs; 1607 bus_dmamap_t *dp; 1608 ct_entry_t *cto, *qe; 1609 uint8_t scsi_status; 1610 uint16_t curi, nxti, handle; 1611 uint32_t sflags; 1612 int32_t resid; 1613 int nth_ctio, nctios, send_status; 1614 1615 mp = (mush_t *) arg; 1616 if (error) { 1617 mp->error = error; 1618 return; 1619 } 1620 1621 isp = mp->isp; 1622 csio = mp->cmd_token; 1623 cto = mp->rq; 1624 curi = isp->isp_reqidx; 1625 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1626 1627 cto->ct_xfrlen = 0; 1628 cto->ct_seg_count = 0; 1629 cto->ct_header.rqs_entry_count = 1; 1630 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1631 1632 if (nseg == 0) { 1633 cto->ct_header.rqs_seqno = 1; 1634 isp_prt(isp, ISP_LOGTDEBUG1, 1635 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1636 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1637 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1638 cto->ct_scsi_status, cto->ct_resid); 1639 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1640 isp_put_ctio(isp, cto, qe); 1641 return; 1642 } 1643 1644 nctios = nseg / ISP_RQDSEG; 1645 if (nseg % ISP_RQDSEG) { 1646 nctios++; 1647 } 1648 1649 /* 1650 * Save syshandle, and potentially any SCSI status, which we'll 1651 * reinsert on the last CTIO we're going to send. 1652 */ 1653 1654 handle = cto->ct_syshandle; 1655 cto->ct_syshandle = 0; 1656 cto->ct_header.rqs_seqno = 0; 1657 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1658 1659 if (send_status) { 1660 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1661 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1662 /* 1663 * Preserve residual. 1664 */ 1665 resid = cto->ct_resid; 1666 1667 /* 1668 * Save actual SCSI status. 1669 */ 1670 scsi_status = cto->ct_scsi_status; 1671 1672 #ifndef STATUS_WITH_DATA 1673 sflags |= CT_NO_DATA; 1674 /* 1675 * We can't do a status at the same time as a data CTIO, so 1676 * we need to synthesize an extra CTIO at this level. 1677 */ 1678 nctios++; 1679 #endif 1680 } else { 1681 sflags = scsi_status = resid = 0; 1682 } 1683 1684 cto->ct_resid = 0; 1685 cto->ct_scsi_status = 0; 1686 1687 pcs = (struct isp_pcisoftc *)isp; 1688 dp = &pcs->dmaps[isp_handle_index(handle)]; 1689 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1690 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1691 } else { 1692 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1693 } 1694 1695 nxti = *mp->nxtip; 1696 1697 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1698 int seglim; 1699 1700 seglim = nseg; 1701 if (seglim) { 1702 int seg; 1703 1704 if (seglim > ISP_RQDSEG) 1705 seglim = ISP_RQDSEG; 1706 1707 for (seg = 0; seg < seglim; seg++, nseg--) { 1708 /* 1709 * Unlike normal initiator commands, we don't 1710 * do any swizzling here. 1711 */ 1712 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1713 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1714 cto->ct_xfrlen += dm_segs->ds_len; 1715 dm_segs++; 1716 } 1717 cto->ct_seg_count = seg; 1718 } else { 1719 /* 1720 * This case should only happen when we're sending an 1721 * extra CTIO with final status. 1722 */ 1723 if (send_status == 0) { 1724 isp_prt(isp, ISP_LOGWARN, 1725 "tdma_mk ran out of segments"); 1726 mp->error = EINVAL; 1727 return; 1728 } 1729 } 1730 1731 /* 1732 * At this point, the fields ct_lun, ct_iid, ct_tagval, 1733 * ct_tagtype, and ct_timeout have been carried over 1734 * unchanged from what our caller had set. 1735 * 1736 * The dataseg fields and the seg_count fields we just got 1737 * through setting. The data direction we've preserved all 1738 * along and only clear it if we're now sending status. 1739 */ 1740 1741 if (nth_ctio == nctios - 1) { 1742 /* 1743 * We're the last in a sequence of CTIOs, so mark 1744 * this CTIO and save the handle to the CCB such that 1745 * when this CTIO completes we can free dma resources 1746 * and do whatever else we need to do to finish the 1747 * rest of the command. We *don't* give this to the 1748 * firmware to work on- the caller will do that. 1749 */ 1750 1751 cto->ct_syshandle = handle; 1752 cto->ct_header.rqs_seqno = 1; 1753 1754 if (send_status) { 1755 cto->ct_scsi_status = scsi_status; 1756 cto->ct_flags |= sflags; 1757 cto->ct_resid = resid; 1758 } 1759 if (send_status) { 1760 isp_prt(isp, ISP_LOGTDEBUG1, 1761 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 1762 "scsi status %x resid %d", 1763 cto->ct_fwhandle, csio->ccb_h.target_lun, 1764 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 1765 cto->ct_scsi_status, cto->ct_resid); 1766 } else { 1767 isp_prt(isp, ISP_LOGTDEBUG1, 1768 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 1769 cto->ct_fwhandle, csio->ccb_h.target_lun, 1770 cto->ct_iid, cto->ct_tag_val, 1771 cto->ct_flags); 1772 } 1773 isp_put_ctio(isp, cto, qe); 1774 ISP_TDQE(isp, "last tdma_mk", curi, cto); 1775 if (nctios > 1) { 1776 MEMORYBARRIER(isp, SYNC_REQUEST, 1777 curi, QENTRY_LEN); 1778 } 1779 } else { 1780 ct_entry_t *oqe = qe; 1781 1782 /* 1783 * Make sure syshandle fields are clean 1784 */ 1785 cto->ct_syshandle = 0; 1786 cto->ct_header.rqs_seqno = 0; 1787 1788 isp_prt(isp, ISP_LOGTDEBUG1, 1789 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 1790 cto->ct_fwhandle, csio->ccb_h.target_lun, 1791 cto->ct_iid, cto->ct_flags); 1792 1793 /* 1794 * Get a new CTIO 1795 */ 1796 qe = (ct_entry_t *) 1797 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1798 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 1799 if (nxti == mp->optr) { 1800 isp_prt(isp, ISP_LOGTDEBUG0, 1801 "Queue Overflow in tdma_mk"); 1802 mp->error = MUSHERR_NOQENTRIES; 1803 return; 1804 } 1805 1806 /* 1807 * Now that we're done with the old CTIO, 1808 * flush it out to the request queue. 1809 */ 1810 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 1811 isp_put_ctio(isp, cto, oqe); 1812 if (nth_ctio != 0) { 1813 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 1814 QENTRY_LEN); 1815 } 1816 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 1817 1818 /* 1819 * Reset some fields in the CTIO so we can reuse 1820 * for the next one we'll flush to the request 1821 * queue. 1822 */ 1823 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1824 cto->ct_header.rqs_entry_count = 1; 1825 cto->ct_header.rqs_flags = 0; 1826 cto->ct_status = 0; 1827 cto->ct_scsi_status = 0; 1828 cto->ct_xfrlen = 0; 1829 cto->ct_resid = 0; 1830 cto->ct_seg_count = 0; 1831 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1832 } 1833 } 1834 *mp->nxtip = nxti; 1835 } 1836 1837 /* 1838 * We don't have to do multiple CTIOs here. Instead, we can just do 1839 * continuation segments as needed. This greatly simplifies the code 1840 * improves performance. 1841 */ 1842 1843 static void 1844 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1845 { 1846 mush_t *mp; 1847 struct ccb_scsiio *csio; 1848 ispsoftc_t *isp; 1849 ct2_entry_t *cto, *qe; 1850 uint16_t curi, nxti; 1851 ispds_t *ds; 1852 ispds64_t *ds64; 1853 int segcnt, seglim; 1854 1855 mp = (mush_t *) arg; 1856 if (error) { 1857 mp->error = error; 1858 return; 1859 } 1860 1861 isp = mp->isp; 1862 csio = mp->cmd_token; 1863 cto = mp->rq; 1864 1865 curi = isp->isp_reqidx; 1866 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1867 1868 if (nseg == 0) { 1869 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1870 isp_prt(isp, ISP_LOGWARN, 1871 "dma2_tgt_fc, a status CTIO2 without MODE1 " 1872 "set (0x%x)", cto->ct_flags); 1873 mp->error = EINVAL; 1874 return; 1875 } 1876 /* 1877 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1878 * flags to NO DATA and clear relative offset flags. 1879 * We preserve the ct_resid and the response area. 1880 */ 1881 cto->ct_header.rqs_seqno = 1; 1882 cto->ct_seg_count = 0; 1883 cto->ct_reloff = 0; 1884 isp_prt(isp, ISP_LOGTDEBUG1, 1885 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1886 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 1887 cto->ct_iid, cto->ct_flags, cto->ct_status, 1888 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1889 if (IS_2KLOGIN(isp)) { 1890 isp_put_ctio2e(isp, 1891 (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); 1892 } else { 1893 isp_put_ctio2(isp, cto, qe); 1894 } 1895 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 1896 return; 1897 } 1898 1899 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1900 isp_prt(isp, ISP_LOGERR, 1901 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 1902 "(0x%x)", cto->ct_flags); 1903 mp->error = EINVAL; 1904 return; 1905 } 1906 1907 1908 nxti = *mp->nxtip; 1909 1910 /* 1911 * Check to see if we need to DAC addressing or not. 1912 * 1913 * Any address that's over the 4GB boundary causes this 1914 * to happen. 1915 */ 1916 segcnt = nseg; 1917 if (sizeof (bus_addr_t) > 4) { 1918 for (segcnt = 0; segcnt < nseg; segcnt++) { 1919 uint64_t addr = dm_segs[segcnt].ds_addr; 1920 if (addr >= 0x100000000LL) { 1921 break; 1922 } 1923 } 1924 } 1925 if (segcnt != nseg) { 1926 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3; 1927 seglim = ISP_RQDSEG_T3; 1928 ds64 = &cto->rsp.m0.ct_dataseg64[0]; 1929 ds = NULL; 1930 } else { 1931 seglim = ISP_RQDSEG_T2; 1932 ds64 = NULL; 1933 ds = &cto->rsp.m0.ct_dataseg[0]; 1934 } 1935 cto->ct_seg_count = 0; 1936 1937 /* 1938 * Set up the CTIO2 data segments. 1939 */ 1940 for (segcnt = 0; cto->ct_seg_count < seglim && segcnt < nseg; 1941 cto->ct_seg_count++, segcnt++) { 1942 if (ds64) { 1943 ds64->ds_basehi = 1944 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); 1945 ds64->ds_base = dm_segs[segcnt].ds_addr; 1946 ds64->ds_count = dm_segs[segcnt].ds_len; 1947 ds64++; 1948 } else { 1949 ds->ds_base = dm_segs[segcnt].ds_addr; 1950 ds->ds_count = dm_segs[segcnt].ds_len; 1951 ds++; 1952 } 1953 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1954 #if __FreeBSD_version < 500000 1955 isp_prt(isp, ISP_LOGTDEBUG1, 1956 "isp_send_ctio2: ent0[%d]0x%llx:%llu", 1957 cto->ct_seg_count, (uint64_t)dm_segs[segcnt].ds_addr, 1958 (uint64_t)dm_segs[segcnt].ds_len); 1959 #else 1960 isp_prt(isp, ISP_LOGTDEBUG1, 1961 "isp_send_ctio2: ent0[%d]0x%jx:%ju", 1962 cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr, 1963 (uintmax_t)dm_segs[segcnt].ds_len); 1964 #endif 1965 } 1966 1967 while (segcnt < nseg) { 1968 uint16_t curip; 1969 int seg; 1970 ispcontreq_t local, *crq = &local, *qep; 1971 1972 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1973 curip = nxti; 1974 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 1975 if (nxti == mp->optr) { 1976 ISP_UNLOCK(isp); 1977 isp_prt(isp, ISP_LOGTDEBUG0, 1978 "tdma_mkfc: request queue overflow"); 1979 mp->error = MUSHERR_NOQENTRIES; 1980 return; 1981 } 1982 cto->ct_header.rqs_entry_count++; 1983 MEMZERO((void *)crq, sizeof (*crq)); 1984 crq->req_header.rqs_entry_count = 1; 1985 if (cto->ct_header.rqs_entry_type == RQSTYPE_CTIO3) { 1986 seglim = ISP_CDSEG64; 1987 ds = NULL; 1988 ds64 = &((ispcontreq64_t *)crq)->req_dataseg[0]; 1989 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 1990 } else { 1991 seglim = ISP_CDSEG; 1992 ds = &crq->req_dataseg[0]; 1993 ds64 = NULL; 1994 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1995 } 1996 for (seg = 0; segcnt < nseg && seg < seglim; 1997 segcnt++, seg++) { 1998 if (ds64) { 1999 ds64->ds_basehi = 2000 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); 2001 ds64->ds_base = dm_segs[segcnt].ds_addr; 2002 ds64->ds_count = dm_segs[segcnt].ds_len; 2003 ds64++; 2004 } else { 2005 ds->ds_base = dm_segs[segcnt].ds_addr; 2006 ds->ds_count = dm_segs[segcnt].ds_len; 2007 ds++; 2008 } 2009 #if __FreeBSD_version < 500000 2010 isp_prt(isp, ISP_LOGTDEBUG1, 2011 "isp_send_ctio2: ent%d[%d]%llx:%llu", 2012 cto->ct_header.rqs_entry_count-1, seg, 2013 (uint64_t)dm_segs[segcnt].ds_addr, 2014 (uint64_t)dm_segs[segcnt].ds_len); 2015 #else 2016 isp_prt(isp, ISP_LOGTDEBUG1, 2017 "isp_send_ctio2: ent%d[%d]%jx:%ju", 2018 cto->ct_header.rqs_entry_count-1, seg, 2019 (uintmax_t)dm_segs[segcnt].ds_addr, 2020 (uintmax_t)dm_segs[segcnt].ds_len); 2021 #endif 2022 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 2023 cto->ct_seg_count++; 2024 } 2025 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 2026 isp_put_cont_req(isp, crq, qep); 2027 ISP_TDQE(isp, "cont entry", curi, qep); 2028 } 2029 2030 /* 2031 * No do final twiddling for the CTIO itself. 2032 */ 2033 cto->ct_header.rqs_seqno = 1; 2034 isp_prt(isp, ISP_LOGTDEBUG1, 2035 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 2036 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 2037 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 2038 cto->ct_resid); 2039 if (IS_2KLOGIN(isp)) 2040 isp_put_ctio2e(isp, (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); 2041 else 2042 isp_put_ctio2(isp, cto, qe); 2043 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 2044 *mp->nxtip = nxti; 2045 } 2046 #endif 2047 2048 static void dma2_a64(void *, bus_dma_segment_t *, int, int); 2049 static void dma2(void *, bus_dma_segment_t *, int, int); 2050 2051 static void 2052 dma2_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2053 { 2054 mush_t *mp; 2055 ispsoftc_t *isp; 2056 struct ccb_scsiio *csio; 2057 struct isp_pcisoftc *pcs; 2058 bus_dmamap_t *dp; 2059 bus_dma_segment_t *eseg; 2060 ispreq64_t *rq; 2061 int seglim, datalen; 2062 uint16_t nxti; 2063 2064 mp = (mush_t *) arg; 2065 if (error) { 2066 mp->error = error; 2067 return; 2068 } 2069 2070 if (nseg < 1) { 2071 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2072 mp->error = EFAULT; 2073 return; 2074 } 2075 csio = mp->cmd_token; 2076 isp = mp->isp; 2077 rq = mp->rq; 2078 pcs = (struct isp_pcisoftc *)mp->isp; 2079 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2080 nxti = *mp->nxtip; 2081 2082 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2083 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2084 } else { 2085 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2086 } 2087 datalen = XS_XFRLEN(csio); 2088 2089 /* 2090 * We're passed an initial partially filled in entry that 2091 * has most fields filled in except for data transfer 2092 * related values. 2093 * 2094 * Our job is to fill in the initial request queue entry and 2095 * then to start allocating and filling in continuation entries 2096 * until we've covered the entire transfer. 2097 */ 2098 2099 if (IS_FC(isp)) { 2100 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 2101 seglim = ISP_RQDSEG_T3; 2102 ((ispreqt3_t *)rq)->req_totalcnt = datalen; 2103 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2104 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN; 2105 } else { 2106 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 2107 } 2108 } else { 2109 rq->req_header.rqs_entry_type = RQSTYPE_A64; 2110 if (csio->cdb_len > 12) { 2111 seglim = 0; 2112 } else { 2113 seglim = ISP_RQDSEG_A64; 2114 } 2115 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2116 rq->req_flags |= REQFLAG_DATA_IN; 2117 } else { 2118 rq->req_flags |= REQFLAG_DATA_OUT; 2119 } 2120 } 2121 2122 eseg = dm_segs + nseg; 2123 2124 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 2125 if (IS_FC(isp)) { 2126 ispreqt3_t *rq3 = (ispreqt3_t *)rq; 2127 rq3->req_dataseg[rq3->req_seg_count].ds_base = 2128 DMA_LO32(dm_segs->ds_addr); 2129 rq3->req_dataseg[rq3->req_seg_count].ds_basehi = 2130 DMA_HI32(dm_segs->ds_addr); 2131 rq3->req_dataseg[rq3->req_seg_count].ds_count = 2132 dm_segs->ds_len; 2133 } else { 2134 rq->req_dataseg[rq->req_seg_count].ds_base = 2135 DMA_LO32(dm_segs->ds_addr); 2136 rq->req_dataseg[rq->req_seg_count].ds_basehi = 2137 DMA_HI32(dm_segs->ds_addr); 2138 rq->req_dataseg[rq->req_seg_count].ds_count = 2139 dm_segs->ds_len; 2140 } 2141 datalen -= dm_segs->ds_len; 2142 rq->req_seg_count++; 2143 dm_segs++; 2144 } 2145 2146 while (datalen > 0 && dm_segs != eseg) { 2147 uint16_t onxti; 2148 ispcontreq64_t local, *crq = &local, *cqe; 2149 2150 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2151 onxti = nxti; 2152 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2153 if (nxti == mp->optr) { 2154 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2155 mp->error = MUSHERR_NOQENTRIES; 2156 return; 2157 } 2158 rq->req_header.rqs_entry_count++; 2159 MEMZERO((void *)crq, sizeof (*crq)); 2160 crq->req_header.rqs_entry_count = 1; 2161 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2162 2163 seglim = 0; 2164 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 2165 crq->req_dataseg[seglim].ds_base = 2166 DMA_LO32(dm_segs->ds_addr); 2167 crq->req_dataseg[seglim].ds_basehi = 2168 DMA_HI32(dm_segs->ds_addr); 2169 crq->req_dataseg[seglim].ds_count = 2170 dm_segs->ds_len; 2171 rq->req_seg_count++; 2172 dm_segs++; 2173 seglim++; 2174 datalen -= dm_segs->ds_len; 2175 } 2176 isp_put_cont64_req(isp, crq, cqe); 2177 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2178 } 2179 *mp->nxtip = nxti; 2180 } 2181 2182 static void 2183 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2184 { 2185 mush_t *mp; 2186 ispsoftc_t *isp; 2187 struct ccb_scsiio *csio; 2188 struct isp_pcisoftc *pcs; 2189 bus_dmamap_t *dp; 2190 bus_dma_segment_t *eseg; 2191 ispreq_t *rq; 2192 int seglim, datalen; 2193 uint16_t nxti; 2194 2195 mp = (mush_t *) arg; 2196 if (error) { 2197 mp->error = error; 2198 return; 2199 } 2200 2201 if (nseg < 1) { 2202 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2203 mp->error = EFAULT; 2204 return; 2205 } 2206 csio = mp->cmd_token; 2207 isp = mp->isp; 2208 rq = mp->rq; 2209 pcs = (struct isp_pcisoftc *)mp->isp; 2210 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2211 nxti = *mp->nxtip; 2212 2213 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2214 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2215 } else { 2216 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2217 } 2218 2219 datalen = XS_XFRLEN(csio); 2220 2221 /* 2222 * We're passed an initial partially filled in entry that 2223 * has most fields filled in except for data transfer 2224 * related values. 2225 * 2226 * Our job is to fill in the initial request queue entry and 2227 * then to start allocating and filling in continuation entries 2228 * until we've covered the entire transfer. 2229 */ 2230 2231 if (IS_FC(isp)) { 2232 seglim = ISP_RQDSEG_T2; 2233 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 2234 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2235 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 2236 } else { 2237 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 2238 } 2239 } else { 2240 if (csio->cdb_len > 12) { 2241 seglim = 0; 2242 } else { 2243 seglim = ISP_RQDSEG; 2244 } 2245 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2246 rq->req_flags |= REQFLAG_DATA_IN; 2247 } else { 2248 rq->req_flags |= REQFLAG_DATA_OUT; 2249 } 2250 } 2251 2252 eseg = dm_segs + nseg; 2253 2254 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 2255 if (IS_FC(isp)) { 2256 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 2257 rq2->req_dataseg[rq2->req_seg_count].ds_base = 2258 DMA_LO32(dm_segs->ds_addr); 2259 rq2->req_dataseg[rq2->req_seg_count].ds_count = 2260 dm_segs->ds_len; 2261 } else { 2262 rq->req_dataseg[rq->req_seg_count].ds_base = 2263 DMA_LO32(dm_segs->ds_addr); 2264 rq->req_dataseg[rq->req_seg_count].ds_count = 2265 dm_segs->ds_len; 2266 } 2267 datalen -= dm_segs->ds_len; 2268 rq->req_seg_count++; 2269 dm_segs++; 2270 } 2271 2272 while (datalen > 0 && dm_segs != eseg) { 2273 uint16_t onxti; 2274 ispcontreq_t local, *crq = &local, *cqe; 2275 2276 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2277 onxti = nxti; 2278 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2279 if (nxti == mp->optr) { 2280 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2281 mp->error = MUSHERR_NOQENTRIES; 2282 return; 2283 } 2284 rq->req_header.rqs_entry_count++; 2285 MEMZERO((void *)crq, sizeof (*crq)); 2286 crq->req_header.rqs_entry_count = 1; 2287 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 2288 2289 seglim = 0; 2290 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 2291 crq->req_dataseg[seglim].ds_base = 2292 DMA_LO32(dm_segs->ds_addr); 2293 crq->req_dataseg[seglim].ds_count = 2294 dm_segs->ds_len; 2295 rq->req_seg_count++; 2296 dm_segs++; 2297 seglim++; 2298 datalen -= dm_segs->ds_len; 2299 } 2300 isp_put_cont_req(isp, crq, cqe); 2301 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2302 } 2303 *mp->nxtip = nxti; 2304 } 2305 2306 /* 2307 * We enter with ISP_LOCK held 2308 */ 2309 static int 2310 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq, 2311 uint16_t *nxtip, uint16_t optr) 2312 { 2313 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2314 ispreq_t *qep; 2315 bus_dmamap_t *dp = NULL; 2316 mush_t mush, *mp; 2317 void (*eptr)(void *, bus_dma_segment_t *, int, int); 2318 2319 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 2320 #ifdef ISP_TARGET_MODE 2321 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 2322 if (IS_FC(isp)) { 2323 eptr = tdma_mkfc; 2324 } else { 2325 eptr = tdma_mk; 2326 } 2327 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 2328 (csio->dxfer_len == 0)) { 2329 mp = &mush; 2330 mp->isp = isp; 2331 mp->cmd_token = csio; 2332 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 2333 mp->nxtip = nxtip; 2334 mp->optr = optr; 2335 mp->error = 0; 2336 ISPLOCK_2_CAMLOCK(isp); 2337 (*eptr)(mp, NULL, 0, 0); 2338 CAMLOCK_2_ISPLOCK(isp); 2339 goto mbxsync; 2340 } 2341 } else 2342 #endif 2343 if (sizeof (bus_addr_t) > 4) { 2344 eptr = dma2_a64; 2345 } else { 2346 eptr = dma2; 2347 } 2348 2349 2350 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 2351 (csio->dxfer_len == 0)) { 2352 rq->req_seg_count = 1; 2353 goto mbxsync; 2354 } 2355 2356 /* 2357 * Do a virtual grapevine step to collect info for 2358 * the callback dma allocation that we have to use... 2359 */ 2360 mp = &mush; 2361 mp->isp = isp; 2362 mp->cmd_token = csio; 2363 mp->rq = rq; 2364 mp->nxtip = nxtip; 2365 mp->optr = optr; 2366 mp->error = 0; 2367 2368 ISPLOCK_2_CAMLOCK(isp); 2369 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 2370 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 2371 int error, s; 2372 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2373 s = splsoftvm(); 2374 error = bus_dmamap_load(pcs->dmat, *dp, 2375 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 2376 if (error == EINPROGRESS) { 2377 bus_dmamap_unload(pcs->dmat, *dp); 2378 mp->error = EINVAL; 2379 isp_prt(isp, ISP_LOGERR, 2380 "deferred dma allocation not supported"); 2381 } else if (error && mp->error == 0) { 2382 #ifdef DIAGNOSTIC 2383 isp_prt(isp, ISP_LOGERR, 2384 "error %d in dma mapping code", error); 2385 #endif 2386 mp->error = error; 2387 } 2388 splx(s); 2389 } else { 2390 /* Pointer to physical buffer */ 2391 struct bus_dma_segment seg; 2392 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; 2393 seg.ds_len = csio->dxfer_len; 2394 (*eptr)(mp, &seg, 1, 0); 2395 } 2396 } else { 2397 struct bus_dma_segment *segs; 2398 2399 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 2400 isp_prt(isp, ISP_LOGERR, 2401 "Physical segment pointers unsupported"); 2402 mp->error = EINVAL; 2403 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2404 isp_prt(isp, ISP_LOGERR, 2405 "Virtual segment addresses unsupported"); 2406 mp->error = EINVAL; 2407 } else { 2408 /* Just use the segments provided */ 2409 segs = (struct bus_dma_segment *) csio->data_ptr; 2410 (*eptr)(mp, segs, csio->sglist_cnt, 0); 2411 } 2412 } 2413 CAMLOCK_2_ISPLOCK(isp); 2414 if (mp->error) { 2415 int retval = CMD_COMPLETE; 2416 if (mp->error == MUSHERR_NOQENTRIES) { 2417 retval = CMD_EAGAIN; 2418 } else if (mp->error == EFBIG) { 2419 XS_SETERR(csio, CAM_REQ_TOO_BIG); 2420 } else if (mp->error == EINVAL) { 2421 XS_SETERR(csio, CAM_REQ_INVALID); 2422 } else { 2423 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 2424 } 2425 return (retval); 2426 } 2427 mbxsync: 2428 switch (rq->req_header.rqs_entry_type) { 2429 case RQSTYPE_REQUEST: 2430 isp_put_request(isp, rq, qep); 2431 break; 2432 case RQSTYPE_CMDONLY: 2433 isp_put_extended_request(isp, (ispextreq_t *)rq, 2434 (ispextreq_t *)qep); 2435 break; 2436 case RQSTYPE_T2RQS: 2437 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 2438 break; 2439 case RQSTYPE_A64: 2440 case RQSTYPE_T3RQS: 2441 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep); 2442 break; 2443 } 2444 return (CMD_QUEUED); 2445 } 2446 2447 static void 2448 isp_pci_dmateardown(ispsoftc_t *isp, XS_T *xs, uint16_t handle) 2449 { 2450 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2451 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 2452 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2453 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 2454 } else { 2455 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 2456 } 2457 bus_dmamap_unload(pcs->dmat, *dp); 2458 } 2459 2460 2461 static void 2462 isp_pci_reset1(ispsoftc_t *isp) 2463 { 2464 /* Make sure the BIOS is disabled */ 2465 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 2466 /* and enable interrupts */ 2467 ENABLE_INTS(isp); 2468 } 2469 2470 static void 2471 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 2472 { 2473 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2474 if (msg) 2475 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2476 else 2477 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2478 if (IS_SCSI(isp)) 2479 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2480 else 2481 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2482 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2483 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2484 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2485 2486 2487 if (IS_SCSI(isp)) { 2488 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2489 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2490 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2491 ISP_READ(isp, CDMA_FIFO_STS)); 2492 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2493 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2494 ISP_READ(isp, DDMA_FIFO_STS)); 2495 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2496 ISP_READ(isp, SXP_INTERRUPT), 2497 ISP_READ(isp, SXP_GROSS_ERR), 2498 ISP_READ(isp, SXP_PINS_CTRL)); 2499 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2500 } 2501 printf(" mbox regs: %x %x %x %x %x\n", 2502 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2503 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2504 ISP_READ(isp, OUTMAILBOX4)); 2505 printf(" PCI Status Command/Status=%x\n", 2506 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2507 } 2508