1 /*- 2 * Copyright (c) 1997-2008 by Matthew Jacob 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /* 27 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 28 * FreeBSD Version. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/module.h> 37 #include <sys/linker.h> 38 #include <sys/firmware.h> 39 #include <sys/bus.h> 40 #include <sys/stdint.h> 41 #include <dev/pci/pcireg.h> 42 #include <dev/pci/pcivar.h> 43 #include <machine/bus.h> 44 #include <machine/resource.h> 45 #include <sys/rman.h> 46 #include <sys/malloc.h> 47 #include <sys/uio.h> 48 49 #ifdef __sparc64__ 50 #include <dev/ofw/openfirm.h> 51 #include <machine/ofw_machdep.h> 52 #endif 53 54 #include <dev/isp/isp_freebsd.h> 55 56 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); 57 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); 58 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 59 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); 60 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 61 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 62 static uint32_t isp_pci_rd_reg_2600(ispsoftc_t *, int); 63 static void isp_pci_wr_reg_2600(ispsoftc_t *, int, uint32_t); 64 static void isp_pci_run_isr(ispsoftc_t *); 65 static void isp_pci_run_isr_2300(ispsoftc_t *); 66 static void isp_pci_run_isr_2400(ispsoftc_t *); 67 static int isp_pci_mbxdma(ispsoftc_t *); 68 static void isp_pci_mbxdmafree(ispsoftc_t *); 69 static int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *); 70 static int isp_pci_irqsetup(ispsoftc_t *); 71 static void isp_pci_dumpregs(ispsoftc_t *, const char *); 72 73 static struct ispmdvec mdvec = { 74 isp_pci_run_isr, 75 isp_pci_rd_reg, 76 isp_pci_wr_reg, 77 isp_pci_mbxdma, 78 isp_pci_dmasetup, 79 isp_common_dmateardown, 80 isp_pci_irqsetup, 81 isp_pci_dumpregs, 82 NULL, 83 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 84 }; 85 86 static struct ispmdvec mdvec_1080 = { 87 isp_pci_run_isr, 88 isp_pci_rd_reg_1080, 89 isp_pci_wr_reg_1080, 90 isp_pci_mbxdma, 91 isp_pci_dmasetup, 92 isp_common_dmateardown, 93 isp_pci_irqsetup, 94 isp_pci_dumpregs, 95 NULL, 96 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 97 }; 98 99 static struct ispmdvec mdvec_12160 = { 100 isp_pci_run_isr, 101 isp_pci_rd_reg_1080, 102 isp_pci_wr_reg_1080, 103 isp_pci_mbxdma, 104 isp_pci_dmasetup, 105 isp_common_dmateardown, 106 isp_pci_irqsetup, 107 isp_pci_dumpregs, 108 NULL, 109 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 110 }; 111 112 static struct ispmdvec mdvec_2100 = { 113 isp_pci_run_isr, 114 isp_pci_rd_reg, 115 isp_pci_wr_reg, 116 isp_pci_mbxdma, 117 isp_pci_dmasetup, 118 isp_common_dmateardown, 119 isp_pci_irqsetup, 120 isp_pci_dumpregs 121 }; 122 123 static struct ispmdvec mdvec_2200 = { 124 isp_pci_run_isr, 125 isp_pci_rd_reg, 126 isp_pci_wr_reg, 127 isp_pci_mbxdma, 128 isp_pci_dmasetup, 129 isp_common_dmateardown, 130 isp_pci_irqsetup, 131 isp_pci_dumpregs 132 }; 133 134 static struct ispmdvec mdvec_2300 = { 135 isp_pci_run_isr_2300, 136 isp_pci_rd_reg, 137 isp_pci_wr_reg, 138 isp_pci_mbxdma, 139 isp_pci_dmasetup, 140 isp_common_dmateardown, 141 isp_pci_irqsetup, 142 isp_pci_dumpregs 143 }; 144 145 static struct ispmdvec mdvec_2400 = { 146 isp_pci_run_isr_2400, 147 isp_pci_rd_reg_2400, 148 isp_pci_wr_reg_2400, 149 isp_pci_mbxdma, 150 isp_pci_dmasetup, 151 isp_common_dmateardown, 152 isp_pci_irqsetup, 153 NULL 154 }; 155 156 static struct ispmdvec mdvec_2500 = { 157 isp_pci_run_isr_2400, 158 isp_pci_rd_reg_2400, 159 isp_pci_wr_reg_2400, 160 isp_pci_mbxdma, 161 isp_pci_dmasetup, 162 isp_common_dmateardown, 163 isp_pci_irqsetup, 164 NULL 165 }; 166 167 static struct ispmdvec mdvec_2600 = { 168 isp_pci_run_isr_2400, 169 isp_pci_rd_reg_2600, 170 isp_pci_wr_reg_2600, 171 isp_pci_mbxdma, 172 isp_pci_dmasetup, 173 isp_common_dmateardown, 174 isp_pci_irqsetup, 175 NULL 176 }; 177 178 #ifndef PCIM_CMD_INVEN 179 #define PCIM_CMD_INVEN 0x10 180 #endif 181 #ifndef PCIM_CMD_BUSMASTEREN 182 #define PCIM_CMD_BUSMASTEREN 0x0004 183 #endif 184 #ifndef PCIM_CMD_PERRESPEN 185 #define PCIM_CMD_PERRESPEN 0x0040 186 #endif 187 #ifndef PCIM_CMD_SEREN 188 #define PCIM_CMD_SEREN 0x0100 189 #endif 190 #ifndef PCIM_CMD_INTX_DISABLE 191 #define PCIM_CMD_INTX_DISABLE 0x0400 192 #endif 193 194 #ifndef PCIR_COMMAND 195 #define PCIR_COMMAND 0x04 196 #endif 197 198 #ifndef PCIR_CACHELNSZ 199 #define PCIR_CACHELNSZ 0x0c 200 #endif 201 202 #ifndef PCIR_LATTIMER 203 #define PCIR_LATTIMER 0x0d 204 #endif 205 206 #ifndef PCIR_ROMADDR 207 #define PCIR_ROMADDR 0x30 208 #endif 209 210 #ifndef PCI_VENDOR_QLOGIC 211 #define PCI_VENDOR_QLOGIC 0x1077 212 #endif 213 214 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 215 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 216 #endif 217 218 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 219 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 220 #endif 221 222 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 223 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 224 #endif 225 226 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 227 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 228 #endif 229 230 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 231 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 232 #endif 233 234 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 235 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 236 #endif 237 238 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 239 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 240 #endif 241 242 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 243 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 244 #endif 245 246 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 247 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 248 #endif 249 250 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 251 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 252 #endif 253 254 #ifndef PCI_PRODUCT_QLOGIC_ISP2322 255 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 256 #endif 257 258 #ifndef PCI_PRODUCT_QLOGIC_ISP2422 259 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 260 #endif 261 262 #ifndef PCI_PRODUCT_QLOGIC_ISP2432 263 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432 264 #endif 265 266 #ifndef PCI_PRODUCT_QLOGIC_ISP2532 267 #define PCI_PRODUCT_QLOGIC_ISP2532 0x2532 268 #endif 269 270 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 271 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 272 #endif 273 274 #ifndef PCI_PRODUCT_QLOGIC_ISP6322 275 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 276 #endif 277 278 #ifndef PCI_PRODUCT_QLOGIC_ISP5432 279 #define PCI_PRODUCT_QLOGIC_ISP5432 0x5432 280 #endif 281 282 #ifndef PCI_PRODUCT_QLOGIC_ISP2031 283 #define PCI_PRODUCT_QLOGIC_ISP2031 0x2031 284 #endif 285 286 #ifndef PCI_PRODUCT_QLOGIC_ISP8031 287 #define PCI_PRODUCT_QLOGIC_ISP8031 0x8031 288 #endif 289 290 #define PCI_QLOGIC_ISP5432 \ 291 ((PCI_PRODUCT_QLOGIC_ISP5432 << 16) | PCI_VENDOR_QLOGIC) 292 293 #define PCI_QLOGIC_ISP1020 \ 294 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 295 296 #define PCI_QLOGIC_ISP1080 \ 297 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 298 299 #define PCI_QLOGIC_ISP10160 \ 300 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 301 302 #define PCI_QLOGIC_ISP12160 \ 303 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 304 305 #define PCI_QLOGIC_ISP1240 \ 306 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 307 308 #define PCI_QLOGIC_ISP1280 \ 309 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 310 311 #define PCI_QLOGIC_ISP2100 \ 312 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 313 314 #define PCI_QLOGIC_ISP2200 \ 315 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 316 317 #define PCI_QLOGIC_ISP2300 \ 318 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 319 320 #define PCI_QLOGIC_ISP2312 \ 321 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 322 323 #define PCI_QLOGIC_ISP2322 \ 324 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 325 326 #define PCI_QLOGIC_ISP2422 \ 327 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 328 329 #define PCI_QLOGIC_ISP2432 \ 330 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC) 331 332 #define PCI_QLOGIC_ISP2532 \ 333 ((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC) 334 335 #define PCI_QLOGIC_ISP6312 \ 336 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 337 338 #define PCI_QLOGIC_ISP6322 \ 339 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 340 341 #define PCI_QLOGIC_ISP2031 \ 342 ((PCI_PRODUCT_QLOGIC_ISP2031 << 16) | PCI_VENDOR_QLOGIC) 343 344 #define PCI_QLOGIC_ISP8031 \ 345 ((PCI_PRODUCT_QLOGIC_ISP8031 << 16) | PCI_VENDOR_QLOGIC) 346 347 /* 348 * Odd case for some AMI raid cards... We need to *not* attach to this. 349 */ 350 #define AMI_RAID_SUBVENDOR_ID 0x101e 351 352 #define PCI_DFLT_LTNCY 0x40 353 #define PCI_DFLT_LNSZ 0x10 354 355 static int isp_pci_probe (device_t); 356 static int isp_pci_attach (device_t); 357 static int isp_pci_detach (device_t); 358 359 360 #define ISP_PCD(isp) ((struct isp_pcisoftc *)isp)->pci_dev 361 struct isp_pcisoftc { 362 ispsoftc_t pci_isp; 363 device_t pci_dev; 364 struct resource * regs; 365 struct resource * regs1; 366 struct resource * regs2; 367 struct { 368 int iqd; 369 struct resource * irq; 370 void * ih; 371 } irq[ISP_MAX_IRQS]; 372 int rtp; 373 int rgd; 374 int rtp1; 375 int rgd1; 376 int rtp2; 377 int rgd2; 378 int16_t pci_poff[_NREG_BLKS]; 379 bus_dma_tag_t dmat; 380 int msicount; 381 }; 382 383 384 static device_method_t isp_pci_methods[] = { 385 /* Device interface */ 386 DEVMETHOD(device_probe, isp_pci_probe), 387 DEVMETHOD(device_attach, isp_pci_attach), 388 DEVMETHOD(device_detach, isp_pci_detach), 389 { 0, 0 } 390 }; 391 392 static driver_t isp_pci_driver = { 393 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 394 }; 395 static devclass_t isp_devclass; 396 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 397 MODULE_DEPEND(isp, cam, 1, 1, 1); 398 MODULE_DEPEND(isp, firmware, 1, 1, 1); 399 static int isp_nvports = 0; 400 401 static int 402 isp_pci_probe(device_t dev) 403 { 404 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 405 case PCI_QLOGIC_ISP1020: 406 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 407 break; 408 case PCI_QLOGIC_ISP1080: 409 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 410 break; 411 case PCI_QLOGIC_ISP1240: 412 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 413 break; 414 case PCI_QLOGIC_ISP1280: 415 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 416 break; 417 case PCI_QLOGIC_ISP10160: 418 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 419 break; 420 case PCI_QLOGIC_ISP12160: 421 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 422 return (ENXIO); 423 } 424 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 425 break; 426 case PCI_QLOGIC_ISP2100: 427 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 428 break; 429 case PCI_QLOGIC_ISP2200: 430 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 431 break; 432 case PCI_QLOGIC_ISP2300: 433 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 434 break; 435 case PCI_QLOGIC_ISP2312: 436 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 437 break; 438 case PCI_QLOGIC_ISP2322: 439 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 440 break; 441 case PCI_QLOGIC_ISP2422: 442 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 443 break; 444 case PCI_QLOGIC_ISP2432: 445 device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter"); 446 break; 447 case PCI_QLOGIC_ISP2532: 448 device_set_desc(dev, "Qlogic ISP 2532 PCI FC-AL Adapter"); 449 break; 450 case PCI_QLOGIC_ISP5432: 451 device_set_desc(dev, "Qlogic ISP 5432 PCI FC-AL Adapter"); 452 break; 453 case PCI_QLOGIC_ISP6312: 454 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 455 break; 456 case PCI_QLOGIC_ISP6322: 457 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 458 break; 459 case PCI_QLOGIC_ISP2031: 460 device_set_desc(dev, "Qlogic ISP 2031 PCI FC-AL Adapter"); 461 break; 462 case PCI_QLOGIC_ISP8031: 463 device_set_desc(dev, "Qlogic ISP 8031 PCI FCoE Adapter"); 464 break; 465 default: 466 return (ENXIO); 467 } 468 if (isp_announced == 0 && bootverbose) { 469 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 470 "Core Version %d.%d\n", 471 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 472 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 473 isp_announced++; 474 } 475 /* 476 * XXXX: Here is where we might load the f/w module 477 * XXXX: (or increase a reference count to it). 478 */ 479 return (BUS_PROBE_DEFAULT); 480 } 481 482 static void 483 isp_get_generic_options(device_t dev, ispsoftc_t *isp) 484 { 485 int tval; 486 487 tval = 0; 488 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) { 489 isp->isp_confopts |= ISP_CFG_NORELOAD; 490 } 491 tval = 0; 492 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) { 493 isp->isp_confopts |= ISP_CFG_NONVRAM; 494 } 495 tval = 0; 496 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &tval); 497 if (tval) { 498 isp->isp_dblev = tval; 499 } else { 500 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 501 } 502 if (bootverbose) { 503 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 504 } 505 tval = -1; 506 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "vports", &tval); 507 if (tval > 0 && tval <= 254) { 508 isp_nvports = tval; 509 } 510 tval = 7; 511 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "quickboot_time", &tval); 512 isp_quickboot_time = tval; 513 } 514 515 static void 516 isp_get_specific_options(device_t dev, int chan, ispsoftc_t *isp) 517 { 518 const char *sptr; 519 int tval = 0; 520 char prefix[12], name[16]; 521 522 if (chan == 0) 523 prefix[0] = 0; 524 else 525 snprintf(prefix, sizeof(prefix), "chan%d.", chan); 526 snprintf(name, sizeof(name), "%siid", prefix); 527 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 528 name, &tval)) { 529 if (IS_FC(isp)) { 530 ISP_FC_PC(isp, chan)->default_id = 109 - chan; 531 } else { 532 #ifdef __sparc64__ 533 ISP_SPI_PC(isp, chan)->iid = OF_getscsinitid(dev); 534 #else 535 ISP_SPI_PC(isp, chan)->iid = 7; 536 #endif 537 } 538 } else { 539 if (IS_FC(isp)) { 540 ISP_FC_PC(isp, chan)->default_id = tval - chan; 541 } else { 542 ISP_SPI_PC(isp, chan)->iid = tval; 543 } 544 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 545 } 546 547 if (IS_SCSI(isp)) 548 return; 549 550 tval = -1; 551 snprintf(name, sizeof(name), "%srole", prefix); 552 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 553 name, &tval) == 0) { 554 switch (tval) { 555 case ISP_ROLE_NONE: 556 case ISP_ROLE_INITIATOR: 557 case ISP_ROLE_TARGET: 558 case ISP_ROLE_BOTH: 559 device_printf(dev, "Chan %d setting role to 0x%x\n", chan, tval); 560 break; 561 default: 562 tval = -1; 563 break; 564 } 565 } 566 if (tval == -1) { 567 tval = ISP_DEFAULT_ROLES; 568 } 569 ISP_FC_PC(isp, chan)->def_role = tval; 570 571 tval = 0; 572 snprintf(name, sizeof(name), "%sfullduplex", prefix); 573 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 574 name, &tval) == 0 && tval != 0) { 575 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 576 } 577 sptr = NULL; 578 snprintf(name, sizeof(name), "%stopology", prefix); 579 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 580 name, (const char **) &sptr) == 0 && sptr != NULL) { 581 if (strcmp(sptr, "lport") == 0) { 582 isp->isp_confopts |= ISP_CFG_LPORT; 583 } else if (strcmp(sptr, "nport") == 0) { 584 isp->isp_confopts |= ISP_CFG_NPORT; 585 } else if (strcmp(sptr, "lport-only") == 0) { 586 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 587 } else if (strcmp(sptr, "nport-only") == 0) { 588 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 589 } 590 } 591 592 #ifdef ISP_FCTAPE_OFF 593 isp->isp_confopts |= ISP_CFG_NOFCTAPE; 594 #else 595 isp->isp_confopts |= ISP_CFG_FCTAPE; 596 #endif 597 598 tval = 0; 599 snprintf(name, sizeof(name), "%snofctape", prefix); 600 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 601 name, &tval); 602 if (tval) { 603 isp->isp_confopts &= ~ISP_CFG_FCTAPE; 604 isp->isp_confopts |= ISP_CFG_NOFCTAPE; 605 } 606 607 tval = 0; 608 snprintf(name, sizeof(name), "%sfctape", prefix); 609 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 610 name, &tval); 611 if (tval) { 612 isp->isp_confopts &= ~ISP_CFG_NOFCTAPE; 613 isp->isp_confopts |= ISP_CFG_FCTAPE; 614 } 615 616 617 /* 618 * Because the resource_*_value functions can neither return 619 * 64 bit integer values, nor can they be directly coerced 620 * to interpret the right hand side of the assignment as 621 * you want them to interpret it, we have to force WWN 622 * hint replacement to specify WWN strings with a leading 623 * 'w' (e..g w50000000aaaa0001). Sigh. 624 */ 625 sptr = NULL; 626 snprintf(name, sizeof(name), "%sportwwn", prefix); 627 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 628 name, (const char **) &sptr); 629 if (tval == 0 && sptr != NULL && *sptr++ == 'w') { 630 char *eptr = NULL; 631 ISP_FC_PC(isp, chan)->def_wwpn = strtouq(sptr, &eptr, 16); 632 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwpn == -1) { 633 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 634 ISP_FC_PC(isp, chan)->def_wwpn = 0; 635 } 636 } 637 638 sptr = NULL; 639 snprintf(name, sizeof(name), "%snodewwn", prefix); 640 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 641 name, (const char **) &sptr); 642 if (tval == 0 && sptr != NULL && *sptr++ == 'w') { 643 char *eptr = NULL; 644 ISP_FC_PC(isp, chan)->def_wwnn = strtouq(sptr, &eptr, 16); 645 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwnn == 0) { 646 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 647 ISP_FC_PC(isp, chan)->def_wwnn = 0; 648 } 649 } 650 651 tval = -1; 652 snprintf(name, sizeof(name), "%sloop_down_limit", prefix); 653 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 654 name, &tval); 655 if (tval >= 0 && tval < 0xffff) { 656 ISP_FC_PC(isp, chan)->loop_down_limit = tval; 657 } else { 658 ISP_FC_PC(isp, chan)->loop_down_limit = isp_loop_down_limit; 659 } 660 661 tval = -1; 662 snprintf(name, sizeof(name), "%sgone_device_time", prefix); 663 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 664 name, &tval); 665 if (tval >= 0 && tval < 0xffff) { 666 ISP_FC_PC(isp, chan)->gone_device_time = tval; 667 } else { 668 ISP_FC_PC(isp, chan)->gone_device_time = isp_gone_device_time; 669 } 670 } 671 672 static int 673 isp_pci_attach(device_t dev) 674 { 675 struct isp_pcisoftc *pcs = device_get_softc(dev); 676 ispsoftc_t *isp = &pcs->pci_isp; 677 int i; 678 uint32_t data, cmd, linesz, did; 679 size_t psize, xsize; 680 char fwname[32]; 681 682 pcs->pci_dev = dev; 683 isp->isp_dev = dev; 684 isp->isp_nchan = 1; 685 if (sizeof (bus_addr_t) > 4) 686 isp->isp_osinfo.sixtyfourbit = 1; 687 mtx_init(&isp->isp_lock, "isp", NULL, MTX_DEF); 688 689 /* 690 * Get Generic Options 691 */ 692 isp_nvports = 0; 693 isp_get_generic_options(dev, isp); 694 695 linesz = PCI_DFLT_LNSZ; 696 pcs->regs = pcs->regs2 = NULL; 697 pcs->rgd = pcs->rtp = 0; 698 699 pcs->pci_dev = dev; 700 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 701 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 702 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 703 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 704 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 705 706 switch (pci_get_devid(dev)) { 707 case PCI_QLOGIC_ISP1020: 708 did = 0x1040; 709 isp->isp_mdvec = &mdvec; 710 isp->isp_type = ISP_HA_SCSI_UNKNOWN; 711 break; 712 case PCI_QLOGIC_ISP1080: 713 did = 0x1080; 714 isp->isp_mdvec = &mdvec_1080; 715 isp->isp_type = ISP_HA_SCSI_1080; 716 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 717 break; 718 case PCI_QLOGIC_ISP1240: 719 did = 0x1080; 720 isp->isp_mdvec = &mdvec_1080; 721 isp->isp_type = ISP_HA_SCSI_1240; 722 isp->isp_nchan = 2; 723 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 724 break; 725 case PCI_QLOGIC_ISP1280: 726 did = 0x1080; 727 isp->isp_mdvec = &mdvec_1080; 728 isp->isp_type = ISP_HA_SCSI_1280; 729 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 730 break; 731 case PCI_QLOGIC_ISP10160: 732 did = 0x12160; 733 isp->isp_mdvec = &mdvec_12160; 734 isp->isp_type = ISP_HA_SCSI_10160; 735 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 736 break; 737 case PCI_QLOGIC_ISP12160: 738 did = 0x12160; 739 isp->isp_nchan = 2; 740 isp->isp_mdvec = &mdvec_12160; 741 isp->isp_type = ISP_HA_SCSI_12160; 742 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 743 break; 744 case PCI_QLOGIC_ISP2100: 745 did = 0x2100; 746 isp->isp_mdvec = &mdvec_2100; 747 isp->isp_type = ISP_HA_FC_2100; 748 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 749 if (pci_get_revid(dev) < 3) { 750 /* 751 * XXX: Need to get the actual revision 752 * XXX: number of the 2100 FB. At any rate, 753 * XXX: lower cache line size for early revision 754 * XXX; boards. 755 */ 756 linesz = 1; 757 } 758 break; 759 case PCI_QLOGIC_ISP2200: 760 did = 0x2200; 761 isp->isp_mdvec = &mdvec_2200; 762 isp->isp_type = ISP_HA_FC_2200; 763 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 764 break; 765 case PCI_QLOGIC_ISP2300: 766 did = 0x2300; 767 isp->isp_mdvec = &mdvec_2300; 768 isp->isp_type = ISP_HA_FC_2300; 769 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 770 break; 771 case PCI_QLOGIC_ISP2312: 772 case PCI_QLOGIC_ISP6312: 773 did = 0x2300; 774 isp->isp_mdvec = &mdvec_2300; 775 isp->isp_type = ISP_HA_FC_2312; 776 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 777 break; 778 case PCI_QLOGIC_ISP2322: 779 case PCI_QLOGIC_ISP6322: 780 did = 0x2322; 781 isp->isp_mdvec = &mdvec_2300; 782 isp->isp_type = ISP_HA_FC_2322; 783 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 784 break; 785 case PCI_QLOGIC_ISP2422: 786 case PCI_QLOGIC_ISP2432: 787 did = 0x2400; 788 isp->isp_nchan += isp_nvports; 789 isp->isp_mdvec = &mdvec_2400; 790 isp->isp_type = ISP_HA_FC_2400; 791 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 792 break; 793 case PCI_QLOGIC_ISP2532: 794 did = 0x2500; 795 isp->isp_nchan += isp_nvports; 796 isp->isp_mdvec = &mdvec_2500; 797 isp->isp_type = ISP_HA_FC_2500; 798 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 799 break; 800 case PCI_QLOGIC_ISP5432: 801 did = 0x2500; 802 isp->isp_mdvec = &mdvec_2500; 803 isp->isp_type = ISP_HA_FC_2500; 804 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 805 break; 806 case PCI_QLOGIC_ISP2031: 807 case PCI_QLOGIC_ISP8031: 808 did = 0x2600; 809 isp->isp_nchan += isp_nvports; 810 isp->isp_mdvec = &mdvec_2600; 811 isp->isp_type = ISP_HA_FC_2600; 812 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 813 break; 814 default: 815 device_printf(dev, "unknown device type\n"); 816 goto bad; 817 break; 818 } 819 isp->isp_revision = pci_get_revid(dev); 820 821 if (IS_26XX(isp)) { 822 pcs->rtp = SYS_RES_MEMORY; 823 pcs->rgd = PCIR_BAR(0); 824 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, 825 RF_ACTIVE); 826 pcs->rtp1 = SYS_RES_MEMORY; 827 pcs->rgd1 = PCIR_BAR(2); 828 pcs->regs1 = bus_alloc_resource_any(dev, pcs->rtp1, &pcs->rgd1, 829 RF_ACTIVE); 830 pcs->rtp2 = SYS_RES_MEMORY; 831 pcs->rgd2 = PCIR_BAR(4); 832 pcs->regs2 = bus_alloc_resource_any(dev, pcs->rtp2, &pcs->rgd2, 833 RF_ACTIVE); 834 } else { 835 pcs->rtp = SYS_RES_MEMORY; 836 pcs->rgd = PCIR_BAR(1); 837 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, 838 RF_ACTIVE); 839 if (pcs->regs == NULL) { 840 pcs->rtp = SYS_RES_IOPORT; 841 pcs->rgd = PCIR_BAR(0); 842 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, 843 &pcs->rgd, RF_ACTIVE); 844 } 845 } 846 if (pcs->regs == NULL) { 847 device_printf(dev, "Unable to map any ports\n"); 848 goto bad; 849 } 850 if (bootverbose) { 851 device_printf(dev, "Using %s space register mapping\n", 852 (pcs->rtp == SYS_RES_IOPORT)? "I/O" : "Memory"); 853 } 854 isp->isp_regs = pcs->regs; 855 isp->isp_regs2 = pcs->regs2; 856 857 if (IS_FC(isp)) { 858 psize = sizeof (fcparam); 859 xsize = sizeof (struct isp_fc); 860 } else { 861 psize = sizeof (sdparam); 862 xsize = sizeof (struct isp_spi); 863 } 864 psize *= isp->isp_nchan; 865 xsize *= isp->isp_nchan; 866 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 867 if (isp->isp_param == NULL) { 868 device_printf(dev, "cannot allocate parameter data\n"); 869 goto bad; 870 } 871 isp->isp_osinfo.pc.ptr = malloc(xsize, M_DEVBUF, M_NOWAIT | M_ZERO); 872 if (isp->isp_osinfo.pc.ptr == NULL) { 873 device_printf(dev, "cannot allocate parameter data\n"); 874 goto bad; 875 } 876 877 /* 878 * Now that we know who we are (roughly) get/set specific options 879 */ 880 for (i = 0; i < isp->isp_nchan; i++) { 881 isp_get_specific_options(dev, i, isp); 882 } 883 884 isp->isp_osinfo.fw = NULL; 885 if (isp->isp_osinfo.fw == NULL) { 886 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 887 isp->isp_osinfo.fw = firmware_get(fwname); 888 } 889 if (isp->isp_osinfo.fw != NULL) { 890 isp_prt(isp, ISP_LOGCONFIG, "loaded firmware %s", fwname); 891 isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data; 892 } 893 894 /* 895 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set. 896 */ 897 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 898 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 899 if (IS_2300(isp)) { /* per QLogic errata */ 900 cmd &= ~PCIM_CMD_INVEN; 901 } 902 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 903 cmd &= ~PCIM_CMD_INTX_DISABLE; 904 } 905 if (IS_24XX(isp)) { 906 cmd &= ~PCIM_CMD_INTX_DISABLE; 907 } 908 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 909 910 /* 911 * Make sure the Cache Line Size register is set sensibly. 912 */ 913 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 914 if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) { 915 isp_prt(isp, ISP_LOGDEBUG0, "set PCI line size to %d from %d", linesz, data); 916 data = linesz; 917 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 918 } 919 920 /* 921 * Make sure the Latency Timer is sane. 922 */ 923 data = pci_read_config(dev, PCIR_LATTIMER, 1); 924 if (data < PCI_DFLT_LTNCY) { 925 data = PCI_DFLT_LTNCY; 926 isp_prt(isp, ISP_LOGDEBUG0, "set PCI latency to %d", data); 927 pci_write_config(dev, PCIR_LATTIMER, data, 1); 928 } 929 930 /* 931 * Make sure we've disabled the ROM. 932 */ 933 data = pci_read_config(dev, PCIR_ROMADDR, 4); 934 data &= ~1; 935 pci_write_config(dev, PCIR_ROMADDR, data, 4); 936 937 /* 938 * Last minute checks... 939 */ 940 if (IS_23XX(isp) || IS_24XX(isp)) { 941 isp->isp_port = pci_get_function(dev); 942 } 943 944 /* 945 * Make sure we're in reset state. 946 */ 947 ISP_LOCK(isp); 948 if (isp_reinit(isp, 1) != 0) { 949 ISP_UNLOCK(isp); 950 goto bad; 951 } 952 ISP_UNLOCK(isp); 953 if (isp_attach(isp)) { 954 ISP_LOCK(isp); 955 isp_shutdown(isp); 956 ISP_UNLOCK(isp); 957 goto bad; 958 } 959 return (0); 960 961 bad: 962 for (i = 0; i < isp->isp_nirq; i++) { 963 (void) bus_teardown_intr(dev, pcs->irq[i].irq, pcs->irq[i].ih); 964 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->irq[i].iqd, 965 pcs->irq[0].irq); 966 } 967 if (pcs->msicount) { 968 pci_release_msi(dev); 969 } 970 if (pcs->regs) 971 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 972 if (pcs->regs1) 973 (void) bus_release_resource(dev, pcs->rtp1, pcs->rgd1, pcs->regs1); 974 if (pcs->regs2) 975 (void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2); 976 if (pcs->pci_isp.isp_param) { 977 free(pcs->pci_isp.isp_param, M_DEVBUF); 978 pcs->pci_isp.isp_param = NULL; 979 } 980 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 981 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 982 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 983 } 984 mtx_destroy(&isp->isp_lock); 985 return (ENXIO); 986 } 987 988 static int 989 isp_pci_detach(device_t dev) 990 { 991 struct isp_pcisoftc *pcs = device_get_softc(dev); 992 ispsoftc_t *isp = &pcs->pci_isp; 993 int i, status; 994 995 status = isp_detach(isp); 996 if (status) 997 return (status); 998 ISP_LOCK(isp); 999 isp_shutdown(isp); 1000 ISP_UNLOCK(isp); 1001 for (i = 0; i < isp->isp_nirq; i++) { 1002 (void) bus_teardown_intr(dev, pcs->irq[i].irq, pcs->irq[i].ih); 1003 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->irq[i].iqd, 1004 pcs->irq[i].irq); 1005 } 1006 if (pcs->msicount) 1007 pci_release_msi(dev); 1008 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 1009 if (pcs->regs1) 1010 (void) bus_release_resource(dev, pcs->rtp1, pcs->rgd1, pcs->regs1); 1011 if (pcs->regs2) 1012 (void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2); 1013 isp_pci_mbxdmafree(isp); 1014 if (pcs->pci_isp.isp_param) { 1015 free(pcs->pci_isp.isp_param, M_DEVBUF); 1016 pcs->pci_isp.isp_param = NULL; 1017 } 1018 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 1019 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 1020 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 1021 } 1022 mtx_destroy(&isp->isp_lock); 1023 return (0); 1024 } 1025 1026 #define IspVirt2Off(a, x) \ 1027 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1028 _BLK_REG_SHFT] + ((x) & 0xfff)) 1029 1030 #define BXR2(isp, off) bus_read_2((isp)->isp_regs, (off)) 1031 #define BXW2(isp, off, v) bus_write_2((isp)->isp_regs, (off), (v)) 1032 #define BXR4(isp, off) bus_read_4((isp)->isp_regs, (off)) 1033 #define BXW4(isp, off, v) bus_write_4((isp)->isp_regs, (off), (v)) 1034 #define B2R4(isp, off) bus_read_4((isp)->isp_regs2, (off)) 1035 #define B2W4(isp, off, v) bus_write_4((isp)->isp_regs2, (off), (v)) 1036 1037 static ISP_INLINE uint16_t 1038 isp_pci_rd_debounced(ispsoftc_t *isp, int off) 1039 { 1040 uint16_t val, prev; 1041 1042 val = BXR2(isp, IspVirt2Off(isp, off)); 1043 do { 1044 prev = val; 1045 val = BXR2(isp, IspVirt2Off(isp, off)); 1046 } while (val != prev); 1047 return (val); 1048 } 1049 1050 static void 1051 isp_pci_run_isr(ispsoftc_t *isp) 1052 { 1053 uint16_t isr, sema, info; 1054 1055 if (IS_2100(isp)) { 1056 isr = isp_pci_rd_debounced(isp, BIU_ISR); 1057 sema = isp_pci_rd_debounced(isp, BIU_SEMA); 1058 } else { 1059 isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR)); 1060 sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA)); 1061 } 1062 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1063 isr &= INT_PENDING_MASK(isp); 1064 sema &= BIU_SEMA_LOCK; 1065 if (isr == 0 && sema == 0) 1066 return; 1067 if (sema != 0) { 1068 if (IS_2100(isp)) 1069 info = isp_pci_rd_debounced(isp, OUTMAILBOX0); 1070 else 1071 info = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0)); 1072 if (info & MBOX_COMMAND_COMPLETE) 1073 isp_intr_mbox(isp, info); 1074 else 1075 isp_intr_async(isp, info); 1076 if (!IS_FC(isp) && isp->isp_state == ISP_RUNSTATE) 1077 isp_intr_respq(isp); 1078 } else 1079 isp_intr_respq(isp); 1080 ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT); 1081 if (sema) 1082 ISP_WRITE(isp, BIU_SEMA, 0); 1083 } 1084 1085 static void 1086 isp_pci_run_isr_2300(ispsoftc_t *isp) 1087 { 1088 uint32_t hccr, r2hisr; 1089 uint16_t isr, info; 1090 1091 if ((BXR2(isp, IspVirt2Off(isp, BIU_ISR)) & BIU2100_ISR_RISC_INT) == 0) 1092 return; 1093 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO)); 1094 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1095 if ((r2hisr & BIU_R2HST_INTR) == 0) 1096 return; 1097 isr = r2hisr & BIU_R2HST_ISTAT_MASK; 1098 info = r2hisr >> 16; 1099 switch (isr) { 1100 case ISPR2HST_ROM_MBX_OK: 1101 case ISPR2HST_ROM_MBX_FAIL: 1102 case ISPR2HST_MBX_OK: 1103 case ISPR2HST_MBX_FAIL: 1104 isp_intr_mbox(isp, info); 1105 break; 1106 case ISPR2HST_ASYNC_EVENT: 1107 isp_intr_async(isp, info); 1108 break; 1109 case ISPR2HST_RIO_16: 1110 isp_intr_async(isp, ASYNC_RIO16_1); 1111 break; 1112 case ISPR2HST_FPOST: 1113 isp_intr_async(isp, ASYNC_CMD_CMPLT); 1114 break; 1115 case ISPR2HST_FPOST_CTIO: 1116 isp_intr_async(isp, ASYNC_CTIO_DONE); 1117 break; 1118 case ISPR2HST_RSPQ_UPDATE: 1119 isp_intr_respq(isp); 1120 break; 1121 default: 1122 hccr = ISP_READ(isp, HCCR); 1123 if (hccr & HCCR_PAUSE) { 1124 ISP_WRITE(isp, HCCR, HCCR_RESET); 1125 isp_prt(isp, ISP_LOGERR, "RISC paused at interrupt (%x->%x)", hccr, ISP_READ(isp, HCCR)); 1126 ISP_WRITE(isp, BIU_ICR, 0); 1127 } else { 1128 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1129 } 1130 } 1131 ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT); 1132 ISP_WRITE(isp, BIU_SEMA, 0); 1133 } 1134 1135 static void 1136 isp_pci_run_isr_2400(ispsoftc_t *isp) 1137 { 1138 uint32_t r2hisr; 1139 uint16_t isr, info; 1140 1141 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO)); 1142 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1143 if ((r2hisr & BIU_R2HST_INTR) == 0) 1144 return; 1145 isr = r2hisr & BIU_R2HST_ISTAT_MASK; 1146 info = (r2hisr >> 16); 1147 switch (isr) { 1148 case ISPR2HST_ROM_MBX_OK: 1149 case ISPR2HST_ROM_MBX_FAIL: 1150 case ISPR2HST_MBX_OK: 1151 case ISPR2HST_MBX_FAIL: 1152 isp_intr_mbox(isp, info); 1153 break; 1154 case ISPR2HST_ASYNC_EVENT: 1155 isp_intr_async(isp, info); 1156 break; 1157 case ISPR2HST_RSPQ_UPDATE: 1158 isp_intr_respq(isp); 1159 break; 1160 case ISPR2HST_RSPQ_UPDATE2: 1161 #ifdef ISP_TARGET_MODE 1162 case ISPR2HST_ATIO_RSPQ_UPDATE: 1163 #endif 1164 isp_intr_respq(isp); 1165 /* FALLTHROUGH */ 1166 #ifdef ISP_TARGET_MODE 1167 case ISPR2HST_ATIO_UPDATE: 1168 case ISPR2HST_ATIO_UPDATE2: 1169 isp_intr_atioq(isp); 1170 #endif 1171 break; 1172 default: 1173 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1174 } 1175 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 1176 } 1177 1178 static uint32_t 1179 isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1180 { 1181 uint16_t rv; 1182 int oldconf = 0; 1183 1184 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1185 /* 1186 * We will assume that someone has paused the RISC processor. 1187 */ 1188 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1189 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP); 1190 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1191 } 1192 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1193 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1194 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1195 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1196 } 1197 return (rv); 1198 } 1199 1200 static void 1201 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) 1202 { 1203 int oldconf = 0; 1204 1205 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1206 /* 1207 * We will assume that someone has paused the RISC processor. 1208 */ 1209 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1210 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1211 oldconf | BIU_PCI_CONF1_SXP); 1212 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1213 } 1214 BXW2(isp, IspVirt2Off(isp, regoff), val); 1215 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1216 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1217 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1218 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1219 } 1220 1221 } 1222 1223 static uint32_t 1224 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1225 { 1226 uint32_t rv, oc = 0; 1227 1228 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1229 uint32_t tc; 1230 /* 1231 * We will assume that someone has paused the RISC processor. 1232 */ 1233 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1234 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1235 if (regoff & SXP_BANK1_SELECT) 1236 tc |= BIU_PCI1080_CONF1_SXP1; 1237 else 1238 tc |= BIU_PCI1080_CONF1_SXP0; 1239 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1240 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1241 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1242 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1243 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1244 oc | BIU_PCI1080_CONF1_DMA); 1245 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1246 } 1247 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1248 if (oc) { 1249 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1250 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1251 } 1252 return (rv); 1253 } 1254 1255 static void 1256 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) 1257 { 1258 int oc = 0; 1259 1260 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1261 uint32_t tc; 1262 /* 1263 * We will assume that someone has paused the RISC processor. 1264 */ 1265 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1266 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1267 if (regoff & SXP_BANK1_SELECT) 1268 tc |= BIU_PCI1080_CONF1_SXP1; 1269 else 1270 tc |= BIU_PCI1080_CONF1_SXP0; 1271 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1272 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1273 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1274 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1275 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1276 oc | BIU_PCI1080_CONF1_DMA); 1277 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1278 } 1279 BXW2(isp, IspVirt2Off(isp, regoff), val); 1280 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1281 if (oc) { 1282 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1283 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1284 } 1285 } 1286 1287 static uint32_t 1288 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 1289 { 1290 uint32_t rv; 1291 int block = regoff & _BLK_REG_MASK; 1292 1293 switch (block) { 1294 case BIU_BLOCK: 1295 break; 1296 case MBOX_BLOCK: 1297 return (BXR2(isp, IspVirt2Off(isp, regoff))); 1298 case SXP_BLOCK: 1299 isp_prt(isp, ISP_LOGERR, "SXP_BLOCK read at 0x%x", regoff); 1300 return (0xffffffff); 1301 case RISC_BLOCK: 1302 isp_prt(isp, ISP_LOGERR, "RISC_BLOCK read at 0x%x", regoff); 1303 return (0xffffffff); 1304 case DMA_BLOCK: 1305 isp_prt(isp, ISP_LOGERR, "DMA_BLOCK read at 0x%x", regoff); 1306 return (0xffffffff); 1307 default: 1308 isp_prt(isp, ISP_LOGERR, "unknown block read at 0x%x", regoff); 1309 return (0xffffffff); 1310 } 1311 1312 switch (regoff) { 1313 case BIU2400_FLASH_ADDR: 1314 case BIU2400_FLASH_DATA: 1315 case BIU2400_ICR: 1316 case BIU2400_ISR: 1317 case BIU2400_CSR: 1318 case BIU2400_REQINP: 1319 case BIU2400_REQOUTP: 1320 case BIU2400_RSPINP: 1321 case BIU2400_RSPOUTP: 1322 case BIU2400_PRI_REQINP: 1323 case BIU2400_PRI_REQOUTP: 1324 case BIU2400_ATIO_RSPINP: 1325 case BIU2400_ATIO_RSPOUTP: 1326 case BIU2400_HCCR: 1327 case BIU2400_GPIOD: 1328 case BIU2400_GPIOE: 1329 case BIU2400_HSEMA: 1330 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1331 break; 1332 case BIU2400_R2HSTSLO: 1333 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1334 break; 1335 case BIU2400_R2HSTSHI: 1336 rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16; 1337 break; 1338 default: 1339 isp_prt(isp, ISP_LOGERR, "unknown register read at 0x%x", 1340 regoff); 1341 rv = 0xffffffff; 1342 break; 1343 } 1344 return (rv); 1345 } 1346 1347 static void 1348 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 1349 { 1350 int block = regoff & _BLK_REG_MASK; 1351 1352 switch (block) { 1353 case BIU_BLOCK: 1354 break; 1355 case MBOX_BLOCK: 1356 BXW2(isp, IspVirt2Off(isp, regoff), val); 1357 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1358 return; 1359 case SXP_BLOCK: 1360 isp_prt(isp, ISP_LOGERR, "SXP_BLOCK write at 0x%x", regoff); 1361 return; 1362 case RISC_BLOCK: 1363 isp_prt(isp, ISP_LOGERR, "RISC_BLOCK write at 0x%x", regoff); 1364 return; 1365 case DMA_BLOCK: 1366 isp_prt(isp, ISP_LOGERR, "DMA_BLOCK write at 0x%x", regoff); 1367 return; 1368 default: 1369 isp_prt(isp, ISP_LOGERR, "unknown block write at 0x%x", regoff); 1370 break; 1371 } 1372 1373 switch (regoff) { 1374 case BIU2400_FLASH_ADDR: 1375 case BIU2400_FLASH_DATA: 1376 case BIU2400_ICR: 1377 case BIU2400_ISR: 1378 case BIU2400_CSR: 1379 case BIU2400_REQINP: 1380 case BIU2400_REQOUTP: 1381 case BIU2400_RSPINP: 1382 case BIU2400_RSPOUTP: 1383 case BIU2400_PRI_REQINP: 1384 case BIU2400_PRI_REQOUTP: 1385 case BIU2400_ATIO_RSPINP: 1386 case BIU2400_ATIO_RSPOUTP: 1387 case BIU2400_HCCR: 1388 case BIU2400_GPIOD: 1389 case BIU2400_GPIOE: 1390 case BIU2400_HSEMA: 1391 BXW4(isp, IspVirt2Off(isp, regoff), val); 1392 #ifdef MEMORYBARRIERW 1393 if (regoff == BIU2400_REQINP || 1394 regoff == BIU2400_RSPOUTP || 1395 regoff == BIU2400_PRI_REQINP || 1396 regoff == BIU2400_ATIO_RSPOUTP) 1397 MEMORYBARRIERW(isp, SYNC_REG, 1398 IspVirt2Off(isp, regoff), 4, -1) 1399 else 1400 #endif 1401 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4, -1); 1402 break; 1403 default: 1404 isp_prt(isp, ISP_LOGERR, "unknown register write at 0x%x", 1405 regoff); 1406 break; 1407 } 1408 } 1409 1410 static uint32_t 1411 isp_pci_rd_reg_2600(ispsoftc_t *isp, int regoff) 1412 { 1413 uint32_t rv; 1414 1415 switch (regoff) { 1416 case BIU2400_PRI_REQINP: 1417 case BIU2400_PRI_REQOUTP: 1418 isp_prt(isp, ISP_LOGERR, "unknown register read at 0x%x", 1419 regoff); 1420 rv = 0xffffffff; 1421 break; 1422 case BIU2400_REQINP: 1423 rv = B2R4(isp, 0x00); 1424 break; 1425 case BIU2400_REQOUTP: 1426 rv = B2R4(isp, 0x04); 1427 break; 1428 case BIU2400_RSPINP: 1429 rv = B2R4(isp, 0x08); 1430 break; 1431 case BIU2400_RSPOUTP: 1432 rv = B2R4(isp, 0x0c); 1433 break; 1434 case BIU2400_ATIO_RSPINP: 1435 rv = B2R4(isp, 0x10); 1436 break; 1437 case BIU2400_ATIO_RSPOUTP: 1438 rv = B2R4(isp, 0x14); 1439 break; 1440 default: 1441 rv = isp_pci_rd_reg_2400(isp, regoff); 1442 break; 1443 } 1444 return (rv); 1445 } 1446 1447 static void 1448 isp_pci_wr_reg_2600(ispsoftc_t *isp, int regoff, uint32_t val) 1449 { 1450 int off; 1451 1452 switch (regoff) { 1453 case BIU2400_PRI_REQINP: 1454 case BIU2400_PRI_REQOUTP: 1455 isp_prt(isp, ISP_LOGERR, "unknown register write at 0x%x", 1456 regoff); 1457 return; 1458 case BIU2400_REQINP: 1459 off = 0x00; 1460 break; 1461 case BIU2400_REQOUTP: 1462 off = 0x04; 1463 break; 1464 case BIU2400_RSPINP: 1465 off = 0x08; 1466 break; 1467 case BIU2400_RSPOUTP: 1468 off = 0x0c; 1469 break; 1470 case BIU2400_ATIO_RSPINP: 1471 off = 0x10; 1472 break; 1473 case BIU2400_ATIO_RSPOUTP: 1474 off = 0x14; 1475 break; 1476 default: 1477 isp_pci_wr_reg_2400(isp, regoff, val); 1478 return; 1479 } 1480 B2W4(isp, off, val); 1481 } 1482 1483 1484 struct imush { 1485 bus_addr_t maddr; 1486 int error; 1487 }; 1488 1489 static void 1490 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1491 { 1492 struct imush *imushp = (struct imush *) arg; 1493 1494 if (!(imushp->error = error)) 1495 imushp->maddr = segs[0].ds_addr; 1496 } 1497 1498 static int 1499 isp_pci_mbxdma(ispsoftc_t *isp) 1500 { 1501 caddr_t base; 1502 uint32_t len, nsegs; 1503 int i, error, cmap = 0; 1504 bus_size_t slim; /* segment size */ 1505 bus_addr_t llim; /* low limit of unavailable dma */ 1506 bus_addr_t hlim; /* high limit of unavailable dma */ 1507 struct imush im; 1508 isp_ecmd_t *ecmd; 1509 1510 /* Already been here? If so, leave... */ 1511 if (isp->isp_xflist != NULL) 1512 return (0); 1513 if (isp->isp_rquest != NULL && isp->isp_maxcmds == 0) 1514 return (0); 1515 ISP_UNLOCK(isp); 1516 if (isp->isp_rquest != NULL) 1517 goto gotmaxcmds; 1518 1519 hlim = BUS_SPACE_MAXADDR; 1520 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1521 if (sizeof (bus_size_t) > 4) 1522 slim = (bus_size_t) (1ULL << 32); 1523 else 1524 slim = (bus_size_t) (1UL << 31); 1525 llim = BUS_SPACE_MAXADDR; 1526 } else { 1527 slim = (1UL << 24); 1528 llim = BUS_SPACE_MAXADDR_32BIT; 1529 } 1530 if (isp->isp_osinfo.sixtyfourbit) 1531 nsegs = ISP_NSEG64_MAX; 1532 else 1533 nsegs = ISP_NSEG_MAX; 1534 1535 if (bus_dma_tag_create(bus_get_dma_tag(ISP_PCD(isp)), 1, 1536 slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, nsegs, slim, 0, 1537 busdma_lock_mutex, &isp->isp_lock, &isp->isp_osinfo.dmat)) { 1538 ISP_LOCK(isp); 1539 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1540 return (1); 1541 } 1542 1543 /* 1544 * Allocate and map the request queue and a region for external 1545 * DMA addressable command/status structures (22XX and later). 1546 */ 1547 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1548 if (isp->isp_type >= ISP_HA_FC_2200) 1549 len += (N_XCMDS * XCMD_SIZE); 1550 if (bus_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, 1551 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1552 len, 1, len, 0, busdma_lock_mutex, &isp->isp_lock, 1553 &isp->isp_osinfo.reqdmat)) { 1554 isp_prt(isp, ISP_LOGERR, "cannot create request DMA tag"); 1555 goto bad; 1556 } 1557 if (bus_dmamem_alloc(isp->isp_osinfo.reqdmat, (void **)&base, 1558 BUS_DMA_COHERENT, &isp->isp_osinfo.reqmap) != 0) { 1559 isp_prt(isp, ISP_LOGERR, "cannot allocate request DMA memory"); 1560 bus_dma_tag_destroy(isp->isp_osinfo.reqdmat); 1561 goto bad; 1562 } 1563 isp->isp_rquest = base; 1564 im.error = 0; 1565 if (bus_dmamap_load(isp->isp_osinfo.reqdmat, isp->isp_osinfo.reqmap, 1566 base, len, imc, &im, 0) || im.error) { 1567 isp_prt(isp, ISP_LOGERR, "error loading request DMA map %d", im.error); 1568 goto bad; 1569 } 1570 isp_prt(isp, ISP_LOGDEBUG0, "request area @ 0x%jx/0x%jx", 1571 (uintmax_t)im.maddr, (uintmax_t)len); 1572 isp->isp_rquest_dma = im.maddr; 1573 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1574 im.maddr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1575 if (isp->isp_type >= ISP_HA_FC_2200) { 1576 isp->isp_osinfo.ecmd_dma = im.maddr; 1577 isp->isp_osinfo.ecmd_free = (isp_ecmd_t *)base; 1578 isp->isp_osinfo.ecmd_base = isp->isp_osinfo.ecmd_free; 1579 for (ecmd = isp->isp_osinfo.ecmd_free; 1580 ecmd < &isp->isp_osinfo.ecmd_free[N_XCMDS]; ecmd++) { 1581 if (ecmd == &isp->isp_osinfo.ecmd_free[N_XCMDS - 1]) 1582 ecmd->next = NULL; 1583 else 1584 ecmd->next = ecmd + 1; 1585 } 1586 } 1587 1588 /* 1589 * Allocate and map the result queue. 1590 */ 1591 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1592 if (bus_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, 1593 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1594 len, 1, len, 0, busdma_lock_mutex, &isp->isp_lock, 1595 &isp->isp_osinfo.respdmat)) { 1596 isp_prt(isp, ISP_LOGERR, "cannot create response DMA tag"); 1597 goto bad; 1598 } 1599 if (bus_dmamem_alloc(isp->isp_osinfo.respdmat, (void **)&base, 1600 BUS_DMA_COHERENT, &isp->isp_osinfo.respmap) != 0) { 1601 isp_prt(isp, ISP_LOGERR, "cannot allocate response DMA memory"); 1602 bus_dma_tag_destroy(isp->isp_osinfo.respdmat); 1603 goto bad; 1604 } 1605 isp->isp_result = base; 1606 im.error = 0; 1607 if (bus_dmamap_load(isp->isp_osinfo.respdmat, isp->isp_osinfo.respmap, 1608 base, len, imc, &im, 0) || im.error) { 1609 isp_prt(isp, ISP_LOGERR, "error loading response DMA map %d", im.error); 1610 goto bad; 1611 } 1612 isp_prt(isp, ISP_LOGDEBUG0, "response area @ 0x%jx/0x%jx", 1613 (uintmax_t)im.maddr, (uintmax_t)len); 1614 isp->isp_result_dma = im.maddr; 1615 1616 #ifdef ISP_TARGET_MODE 1617 /* 1618 * Allocate and map ATIO queue on 24xx with target mode. 1619 */ 1620 if (IS_24XX(isp)) { 1621 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1622 if (bus_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, 1623 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1624 len, 1, len, 0, busdma_lock_mutex, &isp->isp_lock, 1625 &isp->isp_osinfo.atiodmat)) { 1626 isp_prt(isp, ISP_LOGERR, "cannot create ATIO DMA tag"); 1627 goto bad; 1628 } 1629 if (bus_dmamem_alloc(isp->isp_osinfo.atiodmat, (void **)&base, 1630 BUS_DMA_COHERENT, &isp->isp_osinfo.atiomap) != 0) { 1631 isp_prt(isp, ISP_LOGERR, "cannot allocate ATIO DMA memory"); 1632 bus_dma_tag_destroy(isp->isp_osinfo.atiodmat); 1633 goto bad; 1634 } 1635 isp->isp_atioq = base; 1636 im.error = 0; 1637 if (bus_dmamap_load(isp->isp_osinfo.atiodmat, isp->isp_osinfo.atiomap, 1638 base, len, imc, &im, 0) || im.error) { 1639 isp_prt(isp, ISP_LOGERR, "error loading ATIO DMA map %d", im.error); 1640 goto bad; 1641 } 1642 isp_prt(isp, ISP_LOGDEBUG0, "ATIO area @ 0x%jx/0x%jx", 1643 (uintmax_t)im.maddr, (uintmax_t)len); 1644 isp->isp_atioq_dma = im.maddr; 1645 } 1646 #endif 1647 1648 if (IS_FC(isp)) { 1649 if (bus_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, 1650 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1651 2*QENTRY_LEN, 1, 2*QENTRY_LEN, 0, busdma_lock_mutex, 1652 &isp->isp_lock, &isp->isp_osinfo.iocbdmat)) { 1653 goto bad; 1654 } 1655 if (bus_dmamem_alloc(isp->isp_osinfo.iocbdmat, 1656 (void **)&base, BUS_DMA_COHERENT, &isp->isp_osinfo.iocbmap) != 0) 1657 goto bad; 1658 isp->isp_iocb = base; 1659 im.error = 0; 1660 if (bus_dmamap_load(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap, 1661 base, 2*QENTRY_LEN, imc, &im, 0) || im.error) 1662 goto bad; 1663 isp->isp_iocb_dma = im.maddr; 1664 1665 if (bus_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, 1666 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1667 ISP_FC_SCRLEN, 1, ISP_FC_SCRLEN, 0, busdma_lock_mutex, 1668 &isp->isp_lock, &isp->isp_osinfo.scdmat)) 1669 goto bad; 1670 for (cmap = 0; cmap < isp->isp_nchan; cmap++) { 1671 struct isp_fc *fc = ISP_FC_PC(isp, cmap); 1672 if (bus_dmamem_alloc(isp->isp_osinfo.scdmat, 1673 (void **)&base, BUS_DMA_COHERENT, &fc->scmap) != 0) 1674 goto bad; 1675 FCPARAM(isp, cmap)->isp_scratch = base; 1676 im.error = 0; 1677 if (bus_dmamap_load(isp->isp_osinfo.scdmat, fc->scmap, 1678 base, ISP_FC_SCRLEN, imc, &im, 0) || im.error) { 1679 bus_dmamem_free(isp->isp_osinfo.scdmat, 1680 base, fc->scmap); 1681 FCPARAM(isp, cmap)->isp_scratch = NULL; 1682 goto bad; 1683 } 1684 FCPARAM(isp, cmap)->isp_scdma = im.maddr; 1685 if (!IS_2100(isp)) { 1686 for (i = 0; i < INITIAL_NEXUS_COUNT; i++) { 1687 struct isp_nexus *n = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_NOWAIT | M_ZERO); 1688 if (n == NULL) { 1689 while (fc->nexus_free_list) { 1690 n = fc->nexus_free_list; 1691 fc->nexus_free_list = n->next; 1692 free(n, M_DEVBUF); 1693 } 1694 goto bad; 1695 } 1696 n->next = fc->nexus_free_list; 1697 fc->nexus_free_list = n; 1698 } 1699 } 1700 } 1701 } 1702 1703 if (isp->isp_maxcmds == 0) { 1704 ISP_LOCK(isp); 1705 return (0); 1706 } 1707 1708 gotmaxcmds: 1709 len = isp->isp_maxcmds * sizeof (struct isp_pcmd); 1710 isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *) 1711 malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1712 for (i = 0; i < isp->isp_maxcmds; i++) { 1713 struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i]; 1714 error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap); 1715 if (error) { 1716 isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error); 1717 while (--i >= 0) { 1718 bus_dmamap_destroy(isp->isp_osinfo.dmat, 1719 isp->isp_osinfo.pcmd_pool[i].dmap); 1720 } 1721 goto bad; 1722 } 1723 callout_init_mtx(&pcmd->wdog, &isp->isp_lock, 0); 1724 if (i == isp->isp_maxcmds-1) 1725 pcmd->next = NULL; 1726 else 1727 pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1]; 1728 } 1729 isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0]; 1730 1731 len = sizeof (isp_hdl_t) * isp->isp_maxcmds; 1732 isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1733 for (len = 0; len < isp->isp_maxcmds - 1; len++) 1734 isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1]; 1735 isp->isp_xffree = isp->isp_xflist; 1736 1737 ISP_LOCK(isp); 1738 return (0); 1739 1740 bad: 1741 isp_pci_mbxdmafree(isp); 1742 ISP_LOCK(isp); 1743 return (1); 1744 } 1745 1746 static void 1747 isp_pci_mbxdmafree(ispsoftc_t *isp) 1748 { 1749 int i; 1750 1751 if (isp->isp_xflist != NULL) { 1752 free(isp->isp_xflist, M_DEVBUF); 1753 isp->isp_xflist = NULL; 1754 } 1755 if (isp->isp_osinfo.pcmd_pool != NULL) { 1756 for (i = 0; i < isp->isp_maxcmds; i++) { 1757 bus_dmamap_destroy(isp->isp_osinfo.dmat, 1758 isp->isp_osinfo.pcmd_pool[i].dmap); 1759 } 1760 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1761 isp->isp_osinfo.pcmd_pool = NULL; 1762 } 1763 if (IS_FC(isp)) { 1764 for (i = 0; i < isp->isp_nchan; i++) { 1765 struct isp_fc *fc = ISP_FC_PC(isp, i); 1766 if (FCPARAM(isp, i)->isp_scdma != 0) { 1767 bus_dmamap_unload(isp->isp_osinfo.scdmat, 1768 fc->scmap); 1769 FCPARAM(isp, i)->isp_scdma = 0; 1770 } 1771 if (FCPARAM(isp, i)->isp_scratch != NULL) { 1772 bus_dmamem_free(isp->isp_osinfo.scdmat, 1773 FCPARAM(isp, i)->isp_scratch, fc->scmap); 1774 FCPARAM(isp, i)->isp_scratch = NULL; 1775 } 1776 while (fc->nexus_free_list) { 1777 struct isp_nexus *n = fc->nexus_free_list; 1778 fc->nexus_free_list = n->next; 1779 free(n, M_DEVBUF); 1780 } 1781 } 1782 if (isp->isp_iocb_dma != 0) { 1783 bus_dma_tag_destroy(isp->isp_osinfo.scdmat); 1784 bus_dmamap_unload(isp->isp_osinfo.iocbdmat, 1785 isp->isp_osinfo.iocbmap); 1786 isp->isp_iocb_dma = 0; 1787 } 1788 if (isp->isp_iocb != NULL) { 1789 bus_dmamem_free(isp->isp_osinfo.iocbdmat, 1790 isp->isp_iocb, isp->isp_osinfo.iocbmap); 1791 bus_dma_tag_destroy(isp->isp_osinfo.iocbdmat); 1792 } 1793 } 1794 #ifdef ISP_TARGET_MODE 1795 if (IS_24XX(isp)) { 1796 if (isp->isp_atioq_dma != 0) { 1797 bus_dmamap_unload(isp->isp_osinfo.atiodmat, 1798 isp->isp_osinfo.atiomap); 1799 isp->isp_atioq_dma = 0; 1800 } 1801 if (isp->isp_atioq != NULL) { 1802 bus_dmamem_free(isp->isp_osinfo.atiodmat, isp->isp_atioq, 1803 isp->isp_osinfo.atiomap); 1804 bus_dma_tag_destroy(isp->isp_osinfo.atiodmat); 1805 isp->isp_atioq = NULL; 1806 } 1807 } 1808 #endif 1809 if (isp->isp_result_dma != 0) { 1810 bus_dmamap_unload(isp->isp_osinfo.respdmat, 1811 isp->isp_osinfo.respmap); 1812 isp->isp_result_dma = 0; 1813 } 1814 if (isp->isp_result != NULL) { 1815 bus_dmamem_free(isp->isp_osinfo.respdmat, isp->isp_result, 1816 isp->isp_osinfo.respmap); 1817 bus_dma_tag_destroy(isp->isp_osinfo.respdmat); 1818 isp->isp_result = NULL; 1819 } 1820 if (isp->isp_rquest_dma != 0) { 1821 bus_dmamap_unload(isp->isp_osinfo.reqdmat, 1822 isp->isp_osinfo.reqmap); 1823 isp->isp_rquest_dma = 0; 1824 } 1825 if (isp->isp_rquest != NULL) { 1826 bus_dmamem_free(isp->isp_osinfo.reqdmat, isp->isp_rquest, 1827 isp->isp_osinfo.reqmap); 1828 bus_dma_tag_destroy(isp->isp_osinfo.reqdmat); 1829 isp->isp_rquest = NULL; 1830 } 1831 } 1832 1833 typedef struct { 1834 ispsoftc_t *isp; 1835 void *cmd_token; 1836 void *rq; /* original request */ 1837 int error; 1838 } mush_t; 1839 1840 #define MUSHERR_NOQENTRIES -2 1841 1842 #ifdef ISP_TARGET_MODE 1843 static void 1844 tdma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1845 { 1846 mush_t *mp; 1847 ispsoftc_t *isp; 1848 struct ccb_scsiio *csio; 1849 isp_ddir_t ddir; 1850 ispreq_t *rq; 1851 1852 mp = (mush_t *) arg; 1853 if (error) { 1854 mp->error = error; 1855 return; 1856 } 1857 csio = mp->cmd_token; 1858 isp = mp->isp; 1859 rq = mp->rq; 1860 if (nseg) { 1861 if (isp->isp_osinfo.sixtyfourbit) { 1862 if (nseg >= ISP_NSEG64_MAX) { 1863 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1864 mp->error = EFAULT; 1865 return; 1866 } 1867 if (rq->req_header.rqs_entry_type == RQSTYPE_CTIO2) { 1868 rq->req_header.rqs_entry_type = RQSTYPE_CTIO3; 1869 } 1870 } else { 1871 if (nseg >= ISP_NSEG_MAX) { 1872 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1873 mp->error = EFAULT; 1874 return; 1875 } 1876 } 1877 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1878 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1879 ddir = ISP_TO_DEVICE; 1880 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1881 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1882 ddir = ISP_FROM_DEVICE; 1883 } else { 1884 dm_segs = NULL; 1885 nseg = 0; 1886 ddir = ISP_NOXFR; 1887 } 1888 } else { 1889 dm_segs = NULL; 1890 nseg = 0; 1891 ddir = ISP_NOXFR; 1892 } 1893 1894 error = isp_send_tgt_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, &csio->sense_data, csio->sense_len); 1895 switch (error) { 1896 case CMD_EAGAIN: 1897 mp->error = MUSHERR_NOQENTRIES; 1898 case CMD_QUEUED: 1899 break; 1900 default: 1901 mp->error = EIO; 1902 } 1903 } 1904 #endif 1905 1906 static void 1907 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1908 { 1909 mush_t *mp; 1910 ispsoftc_t *isp; 1911 struct ccb_scsiio *csio; 1912 isp_ddir_t ddir; 1913 ispreq_t *rq; 1914 1915 mp = (mush_t *) arg; 1916 if (error) { 1917 mp->error = error; 1918 return; 1919 } 1920 csio = mp->cmd_token; 1921 isp = mp->isp; 1922 rq = mp->rq; 1923 if (nseg) { 1924 if (isp->isp_osinfo.sixtyfourbit) { 1925 if (nseg >= ISP_NSEG64_MAX) { 1926 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1927 mp->error = EFAULT; 1928 return; 1929 } 1930 if (rq->req_header.rqs_entry_type == RQSTYPE_T2RQS) { 1931 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 1932 } else if (rq->req_header.rqs_entry_type == RQSTYPE_REQUEST) { 1933 rq->req_header.rqs_entry_type = RQSTYPE_A64; 1934 } 1935 } else { 1936 if (nseg >= ISP_NSEG_MAX) { 1937 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1938 mp->error = EFAULT; 1939 return; 1940 } 1941 } 1942 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1943 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1944 ddir = ISP_FROM_DEVICE; 1945 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1946 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1947 ddir = ISP_TO_DEVICE; 1948 } else { 1949 ddir = ISP_NOXFR; 1950 } 1951 } else { 1952 dm_segs = NULL; 1953 nseg = 0; 1954 ddir = ISP_NOXFR; 1955 } 1956 1957 error = isp_send_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, (ispds64_t *)csio->req_map); 1958 switch (error) { 1959 case CMD_EAGAIN: 1960 mp->error = MUSHERR_NOQENTRIES; 1961 break; 1962 case CMD_QUEUED: 1963 break; 1964 default: 1965 mp->error = EIO; 1966 break; 1967 } 1968 } 1969 1970 static int 1971 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff) 1972 { 1973 mush_t mush, *mp; 1974 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1975 int error; 1976 1977 mp = &mush; 1978 mp->isp = isp; 1979 mp->cmd_token = csio; 1980 mp->rq = ff; 1981 mp->error = 0; 1982 1983 #ifdef ISP_TARGET_MODE 1984 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) 1985 eptr = tdma2; 1986 else 1987 #endif 1988 eptr = dma2; 1989 1990 error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, 1991 (union ccb *)csio, eptr, mp, 0); 1992 if (error == EINPROGRESS) { 1993 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); 1994 mp->error = EINVAL; 1995 isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported"); 1996 } else if (error && mp->error == 0) { 1997 #ifdef DIAGNOSTIC 1998 isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); 1999 #endif 2000 mp->error = error; 2001 } 2002 if (mp->error) { 2003 int retval = CMD_COMPLETE; 2004 if (mp->error == MUSHERR_NOQENTRIES) { 2005 retval = CMD_EAGAIN; 2006 } else if (mp->error == EFBIG) { 2007 csio->ccb_h.status = CAM_REQ_TOO_BIG; 2008 } else if (mp->error == EINVAL) { 2009 csio->ccb_h.status = CAM_REQ_INVALID; 2010 } else { 2011 csio->ccb_h.status = CAM_UNREC_HBA_ERROR; 2012 } 2013 return (retval); 2014 } 2015 return (CMD_QUEUED); 2016 } 2017 2018 static int 2019 isp_pci_irqsetup(ispsoftc_t *isp) 2020 { 2021 device_t dev = isp->isp_osinfo.dev; 2022 struct isp_pcisoftc *pcs = device_get_softc(dev); 2023 driver_intr_t *f; 2024 int i, max_irq; 2025 2026 /* Allocate IRQs only once. */ 2027 if (isp->isp_nirq > 0) 2028 return (0); 2029 2030 ISP_UNLOCK(isp); 2031 if (ISP_CAP_MSIX(isp)) { 2032 max_irq = min(ISP_MAX_IRQS, IS_26XX(isp) ? 3 : 2); 2033 pcs->msicount = imin(pci_msix_count(dev), max_irq); 2034 if (pcs->msicount > 0 && 2035 pci_alloc_msix(dev, &pcs->msicount) != 0) 2036 pcs->msicount = 0; 2037 } 2038 if (pcs->msicount == 0) { 2039 pcs->msicount = imin(pci_msi_count(dev), 1); 2040 if (pcs->msicount > 0 && 2041 pci_alloc_msi(dev, &pcs->msicount) != 0) 2042 pcs->msicount = 0; 2043 } 2044 for (i = 0; i < MAX(1, pcs->msicount); i++) { 2045 pcs->irq[i].iqd = i + (pcs->msicount > 0); 2046 pcs->irq[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 2047 &pcs->irq[i].iqd, RF_ACTIVE | RF_SHAREABLE); 2048 if (pcs->irq[i].irq == NULL) { 2049 device_printf(dev, "could not allocate interrupt\n"); 2050 break; 2051 } 2052 if (i == 0) 2053 f = isp_platform_intr; 2054 else if (i == 1) 2055 f = isp_platform_intr_resp; 2056 else 2057 f = isp_platform_intr_atio; 2058 if (bus_setup_intr(dev, pcs->irq[i].irq, ISP_IFLAGS, NULL, 2059 f, isp, &pcs->irq[i].ih)) { 2060 device_printf(dev, "could not setup interrupt\n"); 2061 (void) bus_release_resource(dev, SYS_RES_IRQ, 2062 pcs->irq[i].iqd, pcs->irq[i].irq); 2063 break; 2064 } 2065 if (pcs->msicount > 1) { 2066 bus_describe_intr(dev, pcs->irq[i].irq, pcs->irq[i].ih, 2067 "%d", i); 2068 } 2069 isp->isp_nirq = i + 1; 2070 } 2071 ISP_LOCK(isp); 2072 2073 return (isp->isp_nirq == 0); 2074 } 2075 2076 static void 2077 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 2078 { 2079 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2080 if (msg) 2081 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2082 else 2083 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2084 if (IS_SCSI(isp)) 2085 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2086 else 2087 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2088 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2089 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2090 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2091 2092 2093 if (IS_SCSI(isp)) { 2094 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2095 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2096 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2097 ISP_READ(isp, CDMA_FIFO_STS)); 2098 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2099 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2100 ISP_READ(isp, DDMA_FIFO_STS)); 2101 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2102 ISP_READ(isp, SXP_INTERRUPT), 2103 ISP_READ(isp, SXP_GROSS_ERR), 2104 ISP_READ(isp, SXP_PINS_CTRL)); 2105 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2106 } 2107 printf(" mbox regs: %x %x %x %x %x\n", 2108 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2109 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2110 ISP_READ(isp, OUTMAILBOX4)); 2111 printf(" PCI Status Command/Status=%x\n", 2112 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2113 } 2114