1 /*- 2 * Copyright (c) 1997-2008 by Matthew Jacob 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /* 27 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 28 * FreeBSD Version. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/module.h> 37 #include <sys/linker.h> 38 #include <sys/firmware.h> 39 #include <sys/bus.h> 40 #include <sys/stdint.h> 41 #include <dev/pci/pcireg.h> 42 #include <dev/pci/pcivar.h> 43 #include <machine/bus.h> 44 #include <machine/resource.h> 45 #include <sys/rman.h> 46 #include <sys/malloc.h> 47 #include <sys/uio.h> 48 49 #ifdef __sparc64__ 50 #include <dev/ofw/openfirm.h> 51 #include <machine/ofw_machdep.h> 52 #endif 53 54 #include <dev/isp/isp_freebsd.h> 55 56 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); 57 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); 58 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 59 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); 60 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 61 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 62 static int isp_pci_rd_isr(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); 63 static int isp_pci_rd_isr_2300(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); 64 static int isp_pci_rd_isr_2400(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); 65 static int isp_pci_mbxdma(ispsoftc_t *); 66 static int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *); 67 68 69 static void isp_pci_reset0(ispsoftc_t *); 70 static void isp_pci_reset1(ispsoftc_t *); 71 static void isp_pci_dumpregs(ispsoftc_t *, const char *); 72 73 static struct ispmdvec mdvec = { 74 isp_pci_rd_isr, 75 isp_pci_rd_reg, 76 isp_pci_wr_reg, 77 isp_pci_mbxdma, 78 isp_pci_dmasetup, 79 isp_common_dmateardown, 80 isp_pci_reset0, 81 isp_pci_reset1, 82 isp_pci_dumpregs, 83 NULL, 84 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 85 }; 86 87 static struct ispmdvec mdvec_1080 = { 88 isp_pci_rd_isr, 89 isp_pci_rd_reg_1080, 90 isp_pci_wr_reg_1080, 91 isp_pci_mbxdma, 92 isp_pci_dmasetup, 93 isp_common_dmateardown, 94 isp_pci_reset0, 95 isp_pci_reset1, 96 isp_pci_dumpregs, 97 NULL, 98 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 99 }; 100 101 static struct ispmdvec mdvec_12160 = { 102 isp_pci_rd_isr, 103 isp_pci_rd_reg_1080, 104 isp_pci_wr_reg_1080, 105 isp_pci_mbxdma, 106 isp_pci_dmasetup, 107 isp_common_dmateardown, 108 isp_pci_reset0, 109 isp_pci_reset1, 110 isp_pci_dumpregs, 111 NULL, 112 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 113 }; 114 115 static struct ispmdvec mdvec_2100 = { 116 isp_pci_rd_isr, 117 isp_pci_rd_reg, 118 isp_pci_wr_reg, 119 isp_pci_mbxdma, 120 isp_pci_dmasetup, 121 isp_common_dmateardown, 122 isp_pci_reset0, 123 isp_pci_reset1, 124 isp_pci_dumpregs 125 }; 126 127 static struct ispmdvec mdvec_2200 = { 128 isp_pci_rd_isr, 129 isp_pci_rd_reg, 130 isp_pci_wr_reg, 131 isp_pci_mbxdma, 132 isp_pci_dmasetup, 133 isp_common_dmateardown, 134 isp_pci_reset0, 135 isp_pci_reset1, 136 isp_pci_dumpregs 137 }; 138 139 static struct ispmdvec mdvec_2300 = { 140 isp_pci_rd_isr_2300, 141 isp_pci_rd_reg, 142 isp_pci_wr_reg, 143 isp_pci_mbxdma, 144 isp_pci_dmasetup, 145 isp_common_dmateardown, 146 isp_pci_reset0, 147 isp_pci_reset1, 148 isp_pci_dumpregs 149 }; 150 151 static struct ispmdvec mdvec_2400 = { 152 isp_pci_rd_isr_2400, 153 isp_pci_rd_reg_2400, 154 isp_pci_wr_reg_2400, 155 isp_pci_mbxdma, 156 isp_pci_dmasetup, 157 isp_common_dmateardown, 158 isp_pci_reset0, 159 isp_pci_reset1, 160 NULL 161 }; 162 163 static struct ispmdvec mdvec_2500 = { 164 isp_pci_rd_isr_2400, 165 isp_pci_rd_reg_2400, 166 isp_pci_wr_reg_2400, 167 isp_pci_mbxdma, 168 isp_pci_dmasetup, 169 isp_common_dmateardown, 170 isp_pci_reset0, 171 isp_pci_reset1, 172 NULL 173 }; 174 175 static struct ispmdvec mdvec_2600 = { 176 isp_pci_rd_isr_2400, 177 isp_pci_rd_reg_2400, 178 isp_pci_wr_reg_2400, 179 isp_pci_mbxdma, 180 isp_pci_dmasetup, 181 isp_common_dmateardown, 182 isp_pci_reset0, 183 isp_pci_reset1, 184 NULL 185 }; 186 187 #ifndef PCIM_CMD_INVEN 188 #define PCIM_CMD_INVEN 0x10 189 #endif 190 #ifndef PCIM_CMD_BUSMASTEREN 191 #define PCIM_CMD_BUSMASTEREN 0x0004 192 #endif 193 #ifndef PCIM_CMD_PERRESPEN 194 #define PCIM_CMD_PERRESPEN 0x0040 195 #endif 196 #ifndef PCIM_CMD_SEREN 197 #define PCIM_CMD_SEREN 0x0100 198 #endif 199 #ifndef PCIM_CMD_INTX_DISABLE 200 #define PCIM_CMD_INTX_DISABLE 0x0400 201 #endif 202 203 #ifndef PCIR_COMMAND 204 #define PCIR_COMMAND 0x04 205 #endif 206 207 #ifndef PCIR_CACHELNSZ 208 #define PCIR_CACHELNSZ 0x0c 209 #endif 210 211 #ifndef PCIR_LATTIMER 212 #define PCIR_LATTIMER 0x0d 213 #endif 214 215 #ifndef PCIR_ROMADDR 216 #define PCIR_ROMADDR 0x30 217 #endif 218 219 #ifndef PCI_VENDOR_QLOGIC 220 #define PCI_VENDOR_QLOGIC 0x1077 221 #endif 222 223 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 224 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 225 #endif 226 227 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 228 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 229 #endif 230 231 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 232 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 233 #endif 234 235 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 236 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 237 #endif 238 239 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 240 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 241 #endif 242 243 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 244 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 245 #endif 246 247 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 248 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 249 #endif 250 251 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 252 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 253 #endif 254 255 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 256 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 257 #endif 258 259 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 260 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 261 #endif 262 263 #ifndef PCI_PRODUCT_QLOGIC_ISP2322 264 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 265 #endif 266 267 #ifndef PCI_PRODUCT_QLOGIC_ISP2422 268 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 269 #endif 270 271 #ifndef PCI_PRODUCT_QLOGIC_ISP2432 272 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432 273 #endif 274 275 #ifndef PCI_PRODUCT_QLOGIC_ISP2532 276 #define PCI_PRODUCT_QLOGIC_ISP2532 0x2532 277 #endif 278 279 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 280 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 281 #endif 282 283 #ifndef PCI_PRODUCT_QLOGIC_ISP6322 284 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 285 #endif 286 287 #ifndef PCI_PRODUCT_QLOGIC_ISP5432 288 #define PCI_PRODUCT_QLOGIC_ISP5432 0x5432 289 #endif 290 291 #ifndef PCI_PRODUCT_QLOGIC_ISP2031 292 #define PCI_PRODUCT_QLOGIC_ISP2031 0x2031 293 #endif 294 295 #define PCI_QLOGIC_ISP5432 \ 296 ((PCI_PRODUCT_QLOGIC_ISP5432 << 16) | PCI_VENDOR_QLOGIC) 297 298 #define PCI_QLOGIC_ISP1020 \ 299 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 300 301 #define PCI_QLOGIC_ISP1080 \ 302 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 303 304 #define PCI_QLOGIC_ISP10160 \ 305 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 306 307 #define PCI_QLOGIC_ISP12160 \ 308 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 309 310 #define PCI_QLOGIC_ISP1240 \ 311 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 312 313 #define PCI_QLOGIC_ISP1280 \ 314 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 315 316 #define PCI_QLOGIC_ISP2100 \ 317 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 318 319 #define PCI_QLOGIC_ISP2200 \ 320 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 321 322 #define PCI_QLOGIC_ISP2300 \ 323 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 324 325 #define PCI_QLOGIC_ISP2312 \ 326 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 327 328 #define PCI_QLOGIC_ISP2322 \ 329 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 330 331 #define PCI_QLOGIC_ISP2422 \ 332 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 333 334 #define PCI_QLOGIC_ISP2432 \ 335 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC) 336 337 #define PCI_QLOGIC_ISP2532 \ 338 ((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC) 339 340 #define PCI_QLOGIC_ISP6312 \ 341 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 342 343 #define PCI_QLOGIC_ISP6322 \ 344 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 345 346 #define PCI_QLOGIC_ISP2031 \ 347 ((PCI_PRODUCT_QLOGIC_ISP2031 << 16) | PCI_VENDOR_QLOGIC) 348 349 /* 350 * Odd case for some AMI raid cards... We need to *not* attach to this. 351 */ 352 #define AMI_RAID_SUBVENDOR_ID 0x101e 353 354 #define PCI_DFLT_LTNCY 0x40 355 #define PCI_DFLT_LNSZ 0x10 356 357 static int isp_pci_probe (device_t); 358 static int isp_pci_attach (device_t); 359 static int isp_pci_detach (device_t); 360 361 362 #define ISP_PCD(isp) ((struct isp_pcisoftc *)isp)->pci_dev 363 struct isp_pcisoftc { 364 ispsoftc_t pci_isp; 365 device_t pci_dev; 366 struct resource * regs; 367 void * irq; 368 int iqd; 369 int rtp; 370 int rgd; 371 void * ih; 372 int16_t pci_poff[_NREG_BLKS]; 373 bus_dma_tag_t dmat; 374 int msicount; 375 }; 376 377 378 static device_method_t isp_pci_methods[] = { 379 /* Device interface */ 380 DEVMETHOD(device_probe, isp_pci_probe), 381 DEVMETHOD(device_attach, isp_pci_attach), 382 DEVMETHOD(device_detach, isp_pci_detach), 383 { 0, 0 } 384 }; 385 386 static driver_t isp_pci_driver = { 387 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 388 }; 389 static devclass_t isp_devclass; 390 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 391 MODULE_DEPEND(isp, cam, 1, 1, 1); 392 MODULE_DEPEND(isp, firmware, 1, 1, 1); 393 static int isp_nvports = 0; 394 395 static int 396 isp_pci_probe(device_t dev) 397 { 398 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 399 case PCI_QLOGIC_ISP1020: 400 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 401 break; 402 case PCI_QLOGIC_ISP1080: 403 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 404 break; 405 case PCI_QLOGIC_ISP1240: 406 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 407 break; 408 case PCI_QLOGIC_ISP1280: 409 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 410 break; 411 case PCI_QLOGIC_ISP10160: 412 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 413 break; 414 case PCI_QLOGIC_ISP12160: 415 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 416 return (ENXIO); 417 } 418 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 419 break; 420 case PCI_QLOGIC_ISP2100: 421 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 422 break; 423 case PCI_QLOGIC_ISP2200: 424 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 425 break; 426 case PCI_QLOGIC_ISP2300: 427 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 428 break; 429 case PCI_QLOGIC_ISP2312: 430 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 431 break; 432 case PCI_QLOGIC_ISP2322: 433 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 434 break; 435 case PCI_QLOGIC_ISP2422: 436 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 437 break; 438 case PCI_QLOGIC_ISP2432: 439 device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter"); 440 break; 441 case PCI_QLOGIC_ISP2532: 442 device_set_desc(dev, "Qlogic ISP 2532 PCI FC-AL Adapter"); 443 break; 444 case PCI_QLOGIC_ISP5432: 445 device_set_desc(dev, "Qlogic ISP 5432 PCI FC-AL Adapter"); 446 break; 447 case PCI_QLOGIC_ISP6312: 448 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 449 break; 450 case PCI_QLOGIC_ISP6322: 451 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 452 break; 453 case PCI_QLOGIC_ISP2031: 454 device_set_desc(dev, "Qlogic ISP 2031 PCI FC-AL Adapter"); 455 break; 456 default: 457 return (ENXIO); 458 } 459 if (isp_announced == 0 && bootverbose) { 460 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 461 "Core Version %d.%d\n", 462 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 463 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 464 isp_announced++; 465 } 466 /* 467 * XXXX: Here is where we might load the f/w module 468 * XXXX: (or increase a reference count to it). 469 */ 470 return (BUS_PROBE_DEFAULT); 471 } 472 473 static void 474 isp_get_generic_options(device_t dev, ispsoftc_t *isp) 475 { 476 int tval; 477 478 tval = 0; 479 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) { 480 isp->isp_confopts |= ISP_CFG_NORELOAD; 481 } 482 tval = 0; 483 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) { 484 isp->isp_confopts |= ISP_CFG_NONVRAM; 485 } 486 tval = 0; 487 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &tval); 488 if (tval) { 489 isp->isp_dblev = tval; 490 } else { 491 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 492 } 493 if (bootverbose) { 494 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 495 } 496 tval = -1; 497 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "vports", &tval); 498 if (tval > 0 && tval <= 254) { 499 isp_nvports = tval; 500 } 501 tval = 7; 502 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "quickboot_time", &tval); 503 isp_quickboot_time = tval; 504 } 505 506 static void 507 isp_get_specific_options(device_t dev, int chan, ispsoftc_t *isp) 508 { 509 const char *sptr; 510 int tval = 0; 511 char prefix[12], name[16]; 512 513 if (chan == 0) 514 prefix[0] = 0; 515 else 516 snprintf(prefix, sizeof(prefix), "chan%d.", chan); 517 snprintf(name, sizeof(name), "%siid", prefix); 518 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 519 name, &tval)) { 520 if (IS_FC(isp)) { 521 ISP_FC_PC(isp, chan)->default_id = 109 - chan; 522 } else { 523 #ifdef __sparc64__ 524 ISP_SPI_PC(isp, chan)->iid = OF_getscsinitid(dev); 525 #else 526 ISP_SPI_PC(isp, chan)->iid = 7; 527 #endif 528 } 529 } else { 530 if (IS_FC(isp)) { 531 ISP_FC_PC(isp, chan)->default_id = tval - chan; 532 } else { 533 ISP_SPI_PC(isp, chan)->iid = tval; 534 } 535 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 536 } 537 538 if (IS_SCSI(isp)) 539 return; 540 541 tval = -1; 542 snprintf(name, sizeof(name), "%srole", prefix); 543 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 544 name, &tval) == 0) { 545 switch (tval) { 546 case ISP_ROLE_NONE: 547 case ISP_ROLE_INITIATOR: 548 case ISP_ROLE_TARGET: 549 case ISP_ROLE_BOTH: 550 device_printf(dev, "Chan %d setting role to 0x%x\n", chan, tval); 551 break; 552 default: 553 tval = -1; 554 break; 555 } 556 } 557 if (tval == -1) { 558 tval = ISP_DEFAULT_ROLES; 559 } 560 ISP_FC_PC(isp, chan)->def_role = tval; 561 562 tval = 0; 563 snprintf(name, sizeof(name), "%sfullduplex", prefix); 564 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 565 name, &tval) == 0 && tval != 0) { 566 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 567 } 568 sptr = 0; 569 snprintf(name, sizeof(name), "%stopology", prefix); 570 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 571 name, (const char **) &sptr) == 0 && sptr != 0) { 572 if (strcmp(sptr, "lport") == 0) { 573 isp->isp_confopts |= ISP_CFG_LPORT; 574 } else if (strcmp(sptr, "nport") == 0) { 575 isp->isp_confopts |= ISP_CFG_NPORT; 576 } else if (strcmp(sptr, "lport-only") == 0) { 577 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 578 } else if (strcmp(sptr, "nport-only") == 0) { 579 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 580 } 581 } 582 583 tval = 0; 584 snprintf(name, sizeof(name), "%snofctape", prefix); 585 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 586 name, &tval); 587 if (tval) { 588 isp->isp_confopts |= ISP_CFG_NOFCTAPE; 589 } 590 591 tval = 0; 592 snprintf(name, sizeof(name), "%sfctape", prefix); 593 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 594 name, &tval); 595 if (tval) { 596 isp->isp_confopts &= ~ISP_CFG_NOFCTAPE; 597 isp->isp_confopts |= ISP_CFG_FCTAPE; 598 } 599 600 601 /* 602 * Because the resource_*_value functions can neither return 603 * 64 bit integer values, nor can they be directly coerced 604 * to interpret the right hand side of the assignment as 605 * you want them to interpret it, we have to force WWN 606 * hint replacement to specify WWN strings with a leading 607 * 'w' (e..g w50000000aaaa0001). Sigh. 608 */ 609 sptr = 0; 610 snprintf(name, sizeof(name), "%sportwwn", prefix); 611 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 612 name, (const char **) &sptr); 613 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 614 char *eptr = 0; 615 ISP_FC_PC(isp, chan)->def_wwpn = strtouq(sptr, &eptr, 16); 616 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwpn == -1) { 617 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 618 ISP_FC_PC(isp, chan)->def_wwpn = 0; 619 } 620 } 621 622 sptr = 0; 623 snprintf(name, sizeof(name), "%snodewwn", prefix); 624 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 625 name, (const char **) &sptr); 626 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 627 char *eptr = 0; 628 ISP_FC_PC(isp, chan)->def_wwnn = strtouq(sptr, &eptr, 16); 629 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwnn == 0) { 630 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 631 ISP_FC_PC(isp, chan)->def_wwnn = 0; 632 } 633 } 634 635 tval = -1; 636 snprintf(name, sizeof(name), "%sloop_down_limit", prefix); 637 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 638 name, &tval); 639 if (tval >= 0 && tval < 0xffff) { 640 ISP_FC_PC(isp, chan)->loop_down_limit = tval; 641 } else { 642 ISP_FC_PC(isp, chan)->loop_down_limit = isp_loop_down_limit; 643 } 644 645 tval = -1; 646 snprintf(name, sizeof(name), "%sgone_device_time", prefix); 647 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 648 name, &tval); 649 if (tval >= 0 && tval < 0xffff) { 650 ISP_FC_PC(isp, chan)->gone_device_time = tval; 651 } else { 652 ISP_FC_PC(isp, chan)->gone_device_time = isp_gone_device_time; 653 } 654 } 655 656 static int 657 isp_pci_attach(device_t dev) 658 { 659 int i, locksetup = 0; 660 uint32_t data, cmd, linesz, did; 661 struct isp_pcisoftc *pcs; 662 ispsoftc_t *isp; 663 size_t psize, xsize; 664 char fwname[32]; 665 666 pcs = device_get_softc(dev); 667 if (pcs == NULL) { 668 device_printf(dev, "cannot get softc\n"); 669 return (ENOMEM); 670 } 671 memset(pcs, 0, sizeof (*pcs)); 672 673 pcs->pci_dev = dev; 674 isp = &pcs->pci_isp; 675 isp->isp_dev = dev; 676 isp->isp_nchan = 1; 677 if (sizeof (bus_addr_t) > 4) 678 isp->isp_osinfo.sixtyfourbit = 1; 679 680 /* 681 * Get Generic Options 682 */ 683 isp_nvports = 0; 684 isp_get_generic_options(dev, isp); 685 686 linesz = PCI_DFLT_LNSZ; 687 pcs->irq = pcs->regs = NULL; 688 pcs->rgd = pcs->rtp = pcs->iqd = 0; 689 690 pcs->pci_dev = dev; 691 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 692 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 693 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 694 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 695 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 696 697 switch (pci_get_devid(dev)) { 698 case PCI_QLOGIC_ISP1020: 699 did = 0x1040; 700 isp->isp_mdvec = &mdvec; 701 isp->isp_type = ISP_HA_SCSI_UNKNOWN; 702 break; 703 case PCI_QLOGIC_ISP1080: 704 did = 0x1080; 705 isp->isp_mdvec = &mdvec_1080; 706 isp->isp_type = ISP_HA_SCSI_1080; 707 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 708 break; 709 case PCI_QLOGIC_ISP1240: 710 did = 0x1080; 711 isp->isp_mdvec = &mdvec_1080; 712 isp->isp_type = ISP_HA_SCSI_1240; 713 isp->isp_nchan = 2; 714 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 715 break; 716 case PCI_QLOGIC_ISP1280: 717 did = 0x1080; 718 isp->isp_mdvec = &mdvec_1080; 719 isp->isp_type = ISP_HA_SCSI_1280; 720 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 721 break; 722 case PCI_QLOGIC_ISP10160: 723 did = 0x12160; 724 isp->isp_mdvec = &mdvec_12160; 725 isp->isp_type = ISP_HA_SCSI_10160; 726 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 727 break; 728 case PCI_QLOGIC_ISP12160: 729 did = 0x12160; 730 isp->isp_nchan = 2; 731 isp->isp_mdvec = &mdvec_12160; 732 isp->isp_type = ISP_HA_SCSI_12160; 733 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 734 break; 735 case PCI_QLOGIC_ISP2100: 736 did = 0x2100; 737 isp->isp_mdvec = &mdvec_2100; 738 isp->isp_type = ISP_HA_FC_2100; 739 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 740 if (pci_get_revid(dev) < 3) { 741 /* 742 * XXX: Need to get the actual revision 743 * XXX: number of the 2100 FB. At any rate, 744 * XXX: lower cache line size for early revision 745 * XXX; boards. 746 */ 747 linesz = 1; 748 } 749 break; 750 case PCI_QLOGIC_ISP2200: 751 did = 0x2200; 752 isp->isp_mdvec = &mdvec_2200; 753 isp->isp_type = ISP_HA_FC_2200; 754 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 755 break; 756 case PCI_QLOGIC_ISP2300: 757 did = 0x2300; 758 isp->isp_mdvec = &mdvec_2300; 759 isp->isp_type = ISP_HA_FC_2300; 760 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 761 break; 762 case PCI_QLOGIC_ISP2312: 763 case PCI_QLOGIC_ISP6312: 764 did = 0x2300; 765 isp->isp_mdvec = &mdvec_2300; 766 isp->isp_type = ISP_HA_FC_2312; 767 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 768 break; 769 case PCI_QLOGIC_ISP2322: 770 case PCI_QLOGIC_ISP6322: 771 did = 0x2322; 772 isp->isp_mdvec = &mdvec_2300; 773 isp->isp_type = ISP_HA_FC_2322; 774 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 775 break; 776 case PCI_QLOGIC_ISP2422: 777 case PCI_QLOGIC_ISP2432: 778 did = 0x2400; 779 isp->isp_nchan += isp_nvports; 780 isp->isp_mdvec = &mdvec_2400; 781 isp->isp_type = ISP_HA_FC_2400; 782 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 783 break; 784 case PCI_QLOGIC_ISP2532: 785 did = 0x2500; 786 isp->isp_nchan += isp_nvports; 787 isp->isp_mdvec = &mdvec_2500; 788 isp->isp_type = ISP_HA_FC_2500; 789 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 790 break; 791 case PCI_QLOGIC_ISP5432: 792 did = 0x2500; 793 isp->isp_mdvec = &mdvec_2500; 794 isp->isp_type = ISP_HA_FC_2500; 795 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 796 break; 797 case PCI_QLOGIC_ISP2031: 798 did = 0x2600; 799 isp->isp_nchan += isp_nvports; 800 isp->isp_mdvec = &mdvec_2600; 801 isp->isp_type = ISP_HA_FC_2600; 802 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 803 break; 804 default: 805 device_printf(dev, "unknown device type\n"); 806 goto bad; 807 break; 808 } 809 isp->isp_revision = pci_get_revid(dev); 810 811 if (IS_26XX(isp)) { 812 pcs->rtp = SYS_RES_MEMORY; 813 pcs->rgd = PCIR_BAR(0); 814 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, 815 RF_ACTIVE); 816 } else { 817 pcs->rtp = SYS_RES_MEMORY; 818 pcs->rgd = PCIR_BAR(1); 819 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, 820 RF_ACTIVE); 821 if (pcs->regs == NULL) { 822 pcs->rtp = SYS_RES_IOPORT; 823 pcs->rgd = PCIR_BAR(0); 824 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, 825 &pcs->rgd, RF_ACTIVE); 826 } 827 } 828 if (pcs->regs == NULL) { 829 device_printf(dev, "Unable to map any ports\n"); 830 goto bad; 831 } 832 if (bootverbose) { 833 device_printf(dev, "Using %s space register mapping\n", 834 (pcs->rtp == SYS_RES_IOPORT)? "I/O" : "Memory"); 835 } 836 isp->isp_bus_tag = rman_get_bustag(pcs->regs); 837 isp->isp_bus_handle = rman_get_bushandle(pcs->regs); 838 839 if (IS_FC(isp)) { 840 psize = sizeof (fcparam); 841 xsize = sizeof (struct isp_fc); 842 } else { 843 psize = sizeof (sdparam); 844 xsize = sizeof (struct isp_spi); 845 } 846 psize *= isp->isp_nchan; 847 xsize *= isp->isp_nchan; 848 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 849 if (isp->isp_param == NULL) { 850 device_printf(dev, "cannot allocate parameter data\n"); 851 goto bad; 852 } 853 isp->isp_osinfo.pc.ptr = malloc(xsize, M_DEVBUF, M_NOWAIT | M_ZERO); 854 if (isp->isp_osinfo.pc.ptr == NULL) { 855 device_printf(dev, "cannot allocate parameter data\n"); 856 goto bad; 857 } 858 859 /* 860 * Now that we know who we are (roughly) get/set specific options 861 */ 862 for (i = 0; i < isp->isp_nchan; i++) { 863 isp_get_specific_options(dev, i, isp); 864 } 865 866 isp->isp_osinfo.fw = NULL; 867 if (isp->isp_osinfo.fw == NULL) { 868 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 869 isp->isp_osinfo.fw = firmware_get(fwname); 870 } 871 if (isp->isp_osinfo.fw != NULL) { 872 isp_prt(isp, ISP_LOGCONFIG, "loaded firmware %s", fwname); 873 isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data; 874 } 875 876 /* 877 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set. 878 */ 879 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 880 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 881 if (IS_2300(isp)) { /* per QLogic errata */ 882 cmd &= ~PCIM_CMD_INVEN; 883 } 884 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 885 cmd &= ~PCIM_CMD_INTX_DISABLE; 886 } 887 if (IS_24XX(isp)) { 888 cmd &= ~PCIM_CMD_INTX_DISABLE; 889 } 890 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 891 892 /* 893 * Make sure the Cache Line Size register is set sensibly. 894 */ 895 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 896 if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) { 897 isp_prt(isp, ISP_LOGDEBUG0, "set PCI line size to %d from %d", linesz, data); 898 data = linesz; 899 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 900 } 901 902 /* 903 * Make sure the Latency Timer is sane. 904 */ 905 data = pci_read_config(dev, PCIR_LATTIMER, 1); 906 if (data < PCI_DFLT_LTNCY) { 907 data = PCI_DFLT_LTNCY; 908 isp_prt(isp, ISP_LOGDEBUG0, "set PCI latency to %d", data); 909 pci_write_config(dev, PCIR_LATTIMER, data, 1); 910 } 911 912 /* 913 * Make sure we've disabled the ROM. 914 */ 915 data = pci_read_config(dev, PCIR_ROMADDR, 4); 916 data &= ~1; 917 pci_write_config(dev, PCIR_ROMADDR, data, 4); 918 919 /* 920 * Do MSI 921 * 922 * NB: MSI-X needs to be disabled for the 2432 (PCI-Express) 923 */ 924 if (IS_24XX(isp) || IS_2322(isp)) { 925 pcs->msicount = pci_msi_count(dev); 926 if (pcs->msicount > 1) { 927 pcs->msicount = 1; 928 } 929 if (pci_alloc_msi(dev, &pcs->msicount) == 0) { 930 pcs->iqd = 1; 931 } else { 932 pcs->iqd = 0; 933 } 934 } 935 pcs->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &pcs->iqd, RF_ACTIVE | RF_SHAREABLE); 936 if (pcs->irq == NULL) { 937 device_printf(dev, "could not allocate interrupt\n"); 938 goto bad; 939 } 940 941 /* Make sure the lock is set up. */ 942 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 943 locksetup++; 944 945 if (isp_setup_intr(dev, pcs->irq, ISP_IFLAGS, NULL, isp_platform_intr, isp, &pcs->ih)) { 946 device_printf(dev, "could not setup interrupt\n"); 947 goto bad; 948 } 949 950 /* 951 * Last minute checks... 952 */ 953 if (IS_23XX(isp) || IS_24XX(isp)) { 954 isp->isp_port = pci_get_function(dev); 955 } 956 957 /* 958 * Make sure we're in reset state. 959 */ 960 ISP_LOCK(isp); 961 if (isp_reinit(isp, 1) != 0) { 962 ISP_UNLOCK(isp); 963 goto bad; 964 } 965 ISP_UNLOCK(isp); 966 if (isp_attach(isp)) { 967 ISP_LOCK(isp); 968 isp_uninit(isp); 969 ISP_UNLOCK(isp); 970 goto bad; 971 } 972 return (0); 973 974 bad: 975 if (pcs->ih) { 976 (void) bus_teardown_intr(dev, pcs->irq, pcs->ih); 977 } 978 if (locksetup) { 979 mtx_destroy(&isp->isp_osinfo.lock); 980 } 981 if (pcs->irq) { 982 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq); 983 } 984 if (pcs->msicount) { 985 pci_release_msi(dev); 986 } 987 if (pcs->regs) { 988 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 989 } 990 if (pcs->pci_isp.isp_param) { 991 free(pcs->pci_isp.isp_param, M_DEVBUF); 992 pcs->pci_isp.isp_param = NULL; 993 } 994 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 995 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 996 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 997 } 998 return (ENXIO); 999 } 1000 1001 static int 1002 isp_pci_detach(device_t dev) 1003 { 1004 struct isp_pcisoftc *pcs; 1005 ispsoftc_t *isp; 1006 int status; 1007 1008 pcs = device_get_softc(dev); 1009 if (pcs == NULL) { 1010 return (ENXIO); 1011 } 1012 isp = (ispsoftc_t *) pcs; 1013 status = isp_detach(isp); 1014 if (status) 1015 return (status); 1016 ISP_LOCK(isp); 1017 isp_uninit(isp); 1018 if (pcs->ih) { 1019 (void) bus_teardown_intr(dev, pcs->irq, pcs->ih); 1020 } 1021 ISP_UNLOCK(isp); 1022 mtx_destroy(&isp->isp_osinfo.lock); 1023 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq); 1024 if (pcs->msicount) { 1025 pci_release_msi(dev); 1026 } 1027 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 1028 /* 1029 * XXX: THERE IS A LOT OF LEAKAGE HERE 1030 */ 1031 if (pcs->pci_isp.isp_param) { 1032 free(pcs->pci_isp.isp_param, M_DEVBUF); 1033 pcs->pci_isp.isp_param = NULL; 1034 } 1035 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 1036 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 1037 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 1038 } 1039 return (0); 1040 } 1041 1042 #define IspVirt2Off(a, x) \ 1043 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1044 _BLK_REG_SHFT] + ((x) & 0xfff)) 1045 1046 #define BXR2(isp, off) \ 1047 bus_space_read_2(isp->isp_bus_tag, isp->isp_bus_handle, off) 1048 #define BXW2(isp, off, v) \ 1049 bus_space_write_2(isp->isp_bus_tag, isp->isp_bus_handle, off, v) 1050 #define BXR4(isp, off) \ 1051 bus_space_read_4(isp->isp_bus_tag, isp->isp_bus_handle, off) 1052 #define BXW4(isp, off, v) \ 1053 bus_space_write_4(isp->isp_bus_tag, isp->isp_bus_handle, off, v) 1054 1055 1056 static ISP_INLINE int 1057 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp) 1058 { 1059 uint32_t val0, val1; 1060 int i = 0; 1061 1062 do { 1063 val0 = BXR2(isp, IspVirt2Off(isp, off)); 1064 val1 = BXR2(isp, IspVirt2Off(isp, off)); 1065 } while (val0 != val1 && ++i < 1000); 1066 if (val0 != val1) { 1067 return (1); 1068 } 1069 *rp = val0; 1070 return (0); 1071 } 1072 1073 static int 1074 isp_pci_rd_isr(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info) 1075 { 1076 uint16_t isr, sema; 1077 1078 if (IS_2100(isp)) { 1079 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 1080 return (0); 1081 } 1082 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 1083 return (0); 1084 } 1085 } else { 1086 isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR)); 1087 sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA)); 1088 } 1089 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1090 isr &= INT_PENDING_MASK(isp); 1091 sema &= BIU_SEMA_LOCK; 1092 if (isr == 0 && sema == 0) { 1093 return (0); 1094 } 1095 *isrp = isr; 1096 if ((*semap = sema) != 0) { 1097 if (IS_2100(isp)) { 1098 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, info)) { 1099 return (0); 1100 } 1101 } else { 1102 *info = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0)); 1103 } 1104 } 1105 return (1); 1106 } 1107 1108 static int 1109 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info) 1110 { 1111 uint32_t hccr, r2hisr; 1112 1113 if (!(BXR2(isp, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 1114 *isrp = 0; 1115 return (0); 1116 } 1117 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO)); 1118 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1119 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1120 *isrp = 0; 1121 return (0); 1122 } 1123 switch ((*isrp = r2hisr & BIU_R2HST_ISTAT_MASK)) { 1124 case ISPR2HST_ROM_MBX_OK: 1125 case ISPR2HST_ROM_MBX_FAIL: 1126 case ISPR2HST_MBX_OK: 1127 case ISPR2HST_MBX_FAIL: 1128 case ISPR2HST_ASYNC_EVENT: 1129 *semap = 1; 1130 break; 1131 case ISPR2HST_RIO_16: 1132 *info = ASYNC_RIO16_1; 1133 *semap = 1; 1134 return (1); 1135 case ISPR2HST_FPOST: 1136 *info = ASYNC_CMD_CMPLT; 1137 *semap = 1; 1138 return (1); 1139 case ISPR2HST_FPOST_CTIO: 1140 *info = ASYNC_CTIO_DONE; 1141 *semap = 1; 1142 return (1); 1143 case ISPR2HST_RSPQ_UPDATE: 1144 *semap = 0; 1145 break; 1146 default: 1147 hccr = ISP_READ(isp, HCCR); 1148 if (hccr & HCCR_PAUSE) { 1149 ISP_WRITE(isp, HCCR, HCCR_RESET); 1150 isp_prt(isp, ISP_LOGERR, "RISC paused at interrupt (%x->%x)", hccr, ISP_READ(isp, HCCR)); 1151 ISP_WRITE(isp, BIU_ICR, 0); 1152 } else { 1153 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1154 } 1155 return (0); 1156 } 1157 *info = (r2hisr >> 16); 1158 return (1); 1159 } 1160 1161 static int 1162 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info) 1163 { 1164 uint32_t r2hisr; 1165 1166 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO)); 1167 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1168 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1169 *isrp = 0; 1170 return (0); 1171 } 1172 switch ((*isrp = r2hisr & BIU_R2HST_ISTAT_MASK)) { 1173 case ISPR2HST_ROM_MBX_OK: 1174 case ISPR2HST_ROM_MBX_FAIL: 1175 case ISPR2HST_MBX_OK: 1176 case ISPR2HST_MBX_FAIL: 1177 case ISPR2HST_ASYNC_EVENT: 1178 *semap = 1; 1179 break; 1180 case ISPR2HST_RSPQ_UPDATE: 1181 case ISPR2HST_RSPQ_UPDATE2: 1182 case ISPR2HST_ATIO_UPDATE: 1183 case ISPR2HST_ATIO_RSPQ_UPDATE: 1184 case ISPR2HST_ATIO_UPDATE2: 1185 *semap = 0; 1186 break; 1187 default: 1188 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 1189 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1190 return (0); 1191 } 1192 *info = (r2hisr >> 16); 1193 return (1); 1194 } 1195 1196 static uint32_t 1197 isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1198 { 1199 uint16_t rv; 1200 int oldconf = 0; 1201 1202 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1203 /* 1204 * We will assume that someone has paused the RISC processor. 1205 */ 1206 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1207 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP); 1208 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1209 } 1210 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1211 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1212 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1213 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1214 } 1215 return (rv); 1216 } 1217 1218 static void 1219 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) 1220 { 1221 int oldconf = 0; 1222 1223 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1224 /* 1225 * We will assume that someone has paused the RISC processor. 1226 */ 1227 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1228 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1229 oldconf | BIU_PCI_CONF1_SXP); 1230 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1231 } 1232 BXW2(isp, IspVirt2Off(isp, regoff), val); 1233 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1234 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1235 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1236 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1237 } 1238 1239 } 1240 1241 static uint32_t 1242 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1243 { 1244 uint32_t rv, oc = 0; 1245 1246 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1247 uint32_t tc; 1248 /* 1249 * We will assume that someone has paused the RISC processor. 1250 */ 1251 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1252 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1253 if (regoff & SXP_BANK1_SELECT) 1254 tc |= BIU_PCI1080_CONF1_SXP1; 1255 else 1256 tc |= BIU_PCI1080_CONF1_SXP0; 1257 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1258 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1259 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1260 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1261 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1262 oc | BIU_PCI1080_CONF1_DMA); 1263 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1264 } 1265 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1266 if (oc) { 1267 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1268 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1269 } 1270 return (rv); 1271 } 1272 1273 static void 1274 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) 1275 { 1276 int oc = 0; 1277 1278 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1279 uint32_t tc; 1280 /* 1281 * We will assume that someone has paused the RISC processor. 1282 */ 1283 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1284 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1285 if (regoff & SXP_BANK1_SELECT) 1286 tc |= BIU_PCI1080_CONF1_SXP1; 1287 else 1288 tc |= BIU_PCI1080_CONF1_SXP0; 1289 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1290 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1291 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1292 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1293 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1294 oc | BIU_PCI1080_CONF1_DMA); 1295 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1296 } 1297 BXW2(isp, IspVirt2Off(isp, regoff), val); 1298 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1299 if (oc) { 1300 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1301 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1302 } 1303 } 1304 1305 static uint32_t 1306 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 1307 { 1308 uint32_t rv; 1309 int block = regoff & _BLK_REG_MASK; 1310 1311 switch (block) { 1312 case BIU_BLOCK: 1313 break; 1314 case MBOX_BLOCK: 1315 return (BXR2(isp, IspVirt2Off(isp, regoff))); 1316 case SXP_BLOCK: 1317 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff); 1318 return (0xffffffff); 1319 case RISC_BLOCK: 1320 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff); 1321 return (0xffffffff); 1322 case DMA_BLOCK: 1323 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff); 1324 return (0xffffffff); 1325 default: 1326 isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff); 1327 return (0xffffffff); 1328 } 1329 1330 1331 switch (regoff) { 1332 case BIU2400_FLASH_ADDR: 1333 case BIU2400_FLASH_DATA: 1334 case BIU2400_ICR: 1335 case BIU2400_ISR: 1336 case BIU2400_CSR: 1337 case BIU2400_REQINP: 1338 case BIU2400_REQOUTP: 1339 case BIU2400_RSPINP: 1340 case BIU2400_RSPOUTP: 1341 case BIU2400_PRI_REQINP: 1342 case BIU2400_PRI_REQOUTP: 1343 case BIU2400_ATIO_RSPINP: 1344 case BIU2400_ATIO_RSPOUTP: 1345 case BIU2400_HCCR: 1346 case BIU2400_GPIOD: 1347 case BIU2400_GPIOE: 1348 case BIU2400_HSEMA: 1349 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1350 break; 1351 case BIU2400_R2HSTSLO: 1352 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1353 break; 1354 case BIU2400_R2HSTSHI: 1355 rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16; 1356 break; 1357 default: 1358 isp_prt(isp, ISP_LOGERR, 1359 "isp_pci_rd_reg_2400: unknown offset %x", regoff); 1360 rv = 0xffffffff; 1361 break; 1362 } 1363 return (rv); 1364 } 1365 1366 static void 1367 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 1368 { 1369 int block = regoff & _BLK_REG_MASK; 1370 1371 switch (block) { 1372 case BIU_BLOCK: 1373 break; 1374 case MBOX_BLOCK: 1375 BXW2(isp, IspVirt2Off(isp, regoff), val); 1376 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1377 return; 1378 case SXP_BLOCK: 1379 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff); 1380 return; 1381 case RISC_BLOCK: 1382 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff); 1383 return; 1384 case DMA_BLOCK: 1385 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff); 1386 return; 1387 default: 1388 isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x", 1389 regoff); 1390 break; 1391 } 1392 1393 switch (regoff) { 1394 case BIU2400_FLASH_ADDR: 1395 case BIU2400_FLASH_DATA: 1396 case BIU2400_ICR: 1397 case BIU2400_ISR: 1398 case BIU2400_CSR: 1399 case BIU2400_REQINP: 1400 case BIU2400_REQOUTP: 1401 case BIU2400_RSPINP: 1402 case BIU2400_RSPOUTP: 1403 case BIU2400_PRI_REQINP: 1404 case BIU2400_PRI_REQOUTP: 1405 case BIU2400_ATIO_RSPINP: 1406 case BIU2400_ATIO_RSPOUTP: 1407 case BIU2400_HCCR: 1408 case BIU2400_GPIOD: 1409 case BIU2400_GPIOE: 1410 case BIU2400_HSEMA: 1411 BXW4(isp, IspVirt2Off(isp, regoff), val); 1412 #ifdef MEMORYBARRIERW 1413 if (regoff == BIU2400_REQINP || 1414 regoff == BIU2400_RSPOUTP || 1415 regoff == BIU2400_PRI_REQINP || 1416 regoff == BIU2400_ATIO_RSPOUTP) 1417 MEMORYBARRIERW(isp, SYNC_REG, 1418 IspVirt2Off(isp, regoff), 4, -1) 1419 else 1420 #endif 1421 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4, -1); 1422 break; 1423 default: 1424 isp_prt(isp, ISP_LOGERR, 1425 "isp_pci_wr_reg_2400: bad offset 0x%x", regoff); 1426 break; 1427 } 1428 } 1429 1430 1431 struct imush { 1432 ispsoftc_t *isp; 1433 caddr_t vbase; 1434 int chan; 1435 int error; 1436 }; 1437 1438 static void imc(void *, bus_dma_segment_t *, int, int); 1439 static void imc1(void *, bus_dma_segment_t *, int, int); 1440 1441 static void 1442 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1443 { 1444 struct imush *imushp = (struct imush *) arg; 1445 isp_ecmd_t *ecmd; 1446 1447 if (error) { 1448 imushp->error = error; 1449 return; 1450 } 1451 if (nseg != 1) { 1452 imushp->error = EINVAL; 1453 return; 1454 } 1455 isp_prt(imushp->isp, ISP_LOGDEBUG0, "request/result area @ 0x%jx/0x%jx", (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len); 1456 1457 imushp->isp->isp_rquest = imushp->vbase; 1458 imushp->isp->isp_rquest_dma = segs->ds_addr; 1459 segs->ds_addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp)); 1460 imushp->vbase += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp)); 1461 1462 imushp->isp->isp_result_dma = segs->ds_addr; 1463 imushp->isp->isp_result = imushp->vbase; 1464 segs->ds_addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp)); 1465 imushp->vbase += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp)); 1466 1467 if (imushp->isp->isp_type >= ISP_HA_FC_2200) { 1468 imushp->isp->isp_osinfo.ecmd_dma = segs->ds_addr; 1469 imushp->isp->isp_osinfo.ecmd_free = (isp_ecmd_t *)imushp->vbase; 1470 imushp->isp->isp_osinfo.ecmd_base = imushp->isp->isp_osinfo.ecmd_free; 1471 for (ecmd = imushp->isp->isp_osinfo.ecmd_free; ecmd < &imushp->isp->isp_osinfo.ecmd_free[N_XCMDS]; ecmd++) { 1472 if (ecmd == &imushp->isp->isp_osinfo.ecmd_free[N_XCMDS - 1]) { 1473 ecmd->next = NULL; 1474 } else { 1475 ecmd->next = ecmd + 1; 1476 } 1477 } 1478 } 1479 #ifdef ISP_TARGET_MODE 1480 segs->ds_addr += (N_XCMDS * XCMD_SIZE); 1481 imushp->vbase += (N_XCMDS * XCMD_SIZE); 1482 if (IS_24XX(imushp->isp)) { 1483 imushp->isp->isp_atioq_dma = segs->ds_addr; 1484 imushp->isp->isp_atioq = imushp->vbase; 1485 } 1486 #endif 1487 } 1488 1489 static void 1490 imc1(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1491 { 1492 struct imush *imushp = (struct imush *) arg; 1493 if (error) { 1494 imushp->error = error; 1495 return; 1496 } 1497 if (nseg != 1) { 1498 imushp->error = EINVAL; 1499 return; 1500 } 1501 isp_prt(imushp->isp, ISP_LOGDEBUG0, "scdma @ 0x%jx/0x%jx", (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len); 1502 FCPARAM(imushp->isp, imushp->chan)->isp_scdma = segs->ds_addr; 1503 FCPARAM(imushp->isp, imushp->chan)->isp_scratch = imushp->vbase; 1504 } 1505 1506 static int 1507 isp_pci_mbxdma(ispsoftc_t *isp) 1508 { 1509 caddr_t base; 1510 uint32_t len, nsegs; 1511 int i, error, cmap = 0; 1512 bus_size_t slim; /* segment size */ 1513 bus_addr_t llim; /* low limit of unavailable dma */ 1514 bus_addr_t hlim; /* high limit of unavailable dma */ 1515 struct imush im; 1516 1517 /* 1518 * Already been here? If so, leave... 1519 */ 1520 if (isp->isp_rquest) { 1521 return (0); 1522 } 1523 ISP_UNLOCK(isp); 1524 1525 if (isp->isp_maxcmds == 0) { 1526 isp_prt(isp, ISP_LOGERR, "maxcmds not set"); 1527 ISP_LOCK(isp); 1528 return (1); 1529 } 1530 1531 hlim = BUS_SPACE_MAXADDR; 1532 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1533 if (sizeof (bus_size_t) > 4) { 1534 slim = (bus_size_t) (1ULL << 32); 1535 } else { 1536 slim = (bus_size_t) (1UL << 31); 1537 } 1538 llim = BUS_SPACE_MAXADDR; 1539 } else { 1540 llim = BUS_SPACE_MAXADDR_32BIT; 1541 slim = (1UL << 24); 1542 } 1543 1544 len = isp->isp_maxcmds * sizeof (struct isp_pcmd); 1545 isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1546 if (isp->isp_osinfo.pcmd_pool == NULL) { 1547 isp_prt(isp, ISP_LOGERR, "cannot allocate pcmds"); 1548 ISP_LOCK(isp); 1549 return (1); 1550 } 1551 1552 if (isp->isp_osinfo.sixtyfourbit) { 1553 nsegs = ISP_NSEG64_MAX; 1554 } else { 1555 nsegs = ISP_NSEG_MAX; 1556 } 1557 1558 if (isp_dma_tag_create(BUS_DMA_ROOTARG(ISP_PCD(isp)), 1, slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, nsegs, slim, 0, &isp->isp_osinfo.dmat)) { 1559 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1560 ISP_LOCK(isp); 1561 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1562 return (1); 1563 } 1564 1565 len = sizeof (isp_hdl_t) * isp->isp_maxcmds; 1566 isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1567 if (isp->isp_xflist == NULL) { 1568 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1569 ISP_LOCK(isp); 1570 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1571 return (1); 1572 } 1573 for (len = 0; len < isp->isp_maxcmds - 1; len++) { 1574 isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1]; 1575 } 1576 isp->isp_xffree = isp->isp_xflist; 1577 #ifdef ISP_TARGET_MODE 1578 len = sizeof (isp_hdl_t) * isp->isp_maxcmds; 1579 isp->isp_tgtlist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1580 if (isp->isp_tgtlist == NULL) { 1581 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1582 free(isp->isp_xflist, M_DEVBUF); 1583 ISP_LOCK(isp); 1584 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1585 return (1); 1586 } 1587 for (len = 0; len < isp->isp_maxcmds - 1; len++) { 1588 isp->isp_tgtlist[len].cmd = &isp->isp_tgtlist[len+1]; 1589 } 1590 isp->isp_tgtfree = isp->isp_tgtlist; 1591 #endif 1592 1593 /* 1594 * Allocate and map the request and result queues (and ATIO queue 1595 * if we're a 2400 supporting target mode), and a region for 1596 * external dma addressable command/status structures (23XX and 1597 * later). 1598 */ 1599 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1600 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1601 #ifdef ISP_TARGET_MODE 1602 if (IS_24XX(isp)) { 1603 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1604 } 1605 #endif 1606 if (isp->isp_type >= ISP_HA_FC_2200) { 1607 len += (N_XCMDS * XCMD_SIZE); 1608 } 1609 1610 /* 1611 * Create a tag for the control spaces. We don't always need this 1612 * to be 32 bits, but we do this for simplicity and speed's sake. 1613 */ 1614 if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, len, 1, slim, 0, &isp->isp_osinfo.cdmat)) { 1615 isp_prt(isp, ISP_LOGERR, "cannot create a dma tag for control spaces"); 1616 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1617 free(isp->isp_xflist, M_DEVBUF); 1618 #ifdef ISP_TARGET_MODE 1619 free(isp->isp_tgtlist, M_DEVBUF); 1620 #endif 1621 ISP_LOCK(isp); 1622 return (1); 1623 } 1624 1625 if (bus_dmamem_alloc(isp->isp_osinfo.cdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &isp->isp_osinfo.cdmap) != 0) { 1626 isp_prt(isp, ISP_LOGERR, "cannot allocate %d bytes of CCB memory", len); 1627 bus_dma_tag_destroy(isp->isp_osinfo.cdmat); 1628 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1629 free(isp->isp_xflist, M_DEVBUF); 1630 #ifdef ISP_TARGET_MODE 1631 free(isp->isp_tgtlist, M_DEVBUF); 1632 #endif 1633 ISP_LOCK(isp); 1634 return (1); 1635 } 1636 1637 im.isp = isp; 1638 im.chan = 0; 1639 im.vbase = base; 1640 im.error = 0; 1641 1642 bus_dmamap_load(isp->isp_osinfo.cdmat, isp->isp_osinfo.cdmap, base, len, imc, &im, 0); 1643 if (im.error) { 1644 isp_prt(isp, ISP_LOGERR, "error %d loading dma map for control areas", im.error); 1645 goto bad; 1646 } 1647 1648 if (IS_FC(isp)) { 1649 for (cmap = 0; cmap < isp->isp_nchan; cmap++) { 1650 struct isp_fc *fc = ISP_FC_PC(isp, cmap); 1651 if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ISP_FC_SCRLEN, 1, slim, 0, &fc->tdmat)) { 1652 goto bad; 1653 } 1654 if (bus_dmamem_alloc(fc->tdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &fc->tdmap) != 0) { 1655 bus_dma_tag_destroy(fc->tdmat); 1656 goto bad; 1657 } 1658 im.isp = isp; 1659 im.chan = cmap; 1660 im.vbase = base; 1661 im.error = 0; 1662 bus_dmamap_load(fc->tdmat, fc->tdmap, base, ISP_FC_SCRLEN, imc1, &im, 0); 1663 if (im.error) { 1664 bus_dmamem_free(fc->tdmat, base, fc->tdmap); 1665 bus_dma_tag_destroy(fc->tdmat); 1666 goto bad; 1667 } 1668 if (!IS_2100(isp)) { 1669 for (i = 0; i < INITIAL_NEXUS_COUNT; i++) { 1670 struct isp_nexus *n = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_NOWAIT | M_ZERO); 1671 if (n == NULL) { 1672 while (fc->nexus_free_list) { 1673 n = fc->nexus_free_list; 1674 fc->nexus_free_list = n->next; 1675 free(n, M_DEVBUF); 1676 } 1677 goto bad; 1678 } 1679 n->next = fc->nexus_free_list; 1680 fc->nexus_free_list = n; 1681 } 1682 } 1683 } 1684 } 1685 1686 for (i = 0; i < isp->isp_maxcmds; i++) { 1687 struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i]; 1688 error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap); 1689 if (error) { 1690 isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error); 1691 while (--i >= 0) { 1692 bus_dmamap_destroy(isp->isp_osinfo.dmat, isp->isp_osinfo.pcmd_pool[i].dmap); 1693 } 1694 goto bad; 1695 } 1696 callout_init_mtx(&pcmd->wdog, &isp->isp_osinfo.lock, 0); 1697 if (i == isp->isp_maxcmds-1) { 1698 pcmd->next = NULL; 1699 } else { 1700 pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1]; 1701 } 1702 } 1703 isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0]; 1704 ISP_LOCK(isp); 1705 return (0); 1706 1707 bad: 1708 while (--cmap >= 0) { 1709 struct isp_fc *fc = ISP_FC_PC(isp, cmap); 1710 bus_dmamap_unload(fc->tdmat, fc->tdmap); 1711 bus_dmamem_free(fc->tdmat, base, fc->tdmap); 1712 bus_dma_tag_destroy(fc->tdmat); 1713 while (fc->nexus_free_list) { 1714 struct isp_nexus *n = fc->nexus_free_list; 1715 fc->nexus_free_list = n->next; 1716 free(n, M_DEVBUF); 1717 } 1718 } 1719 if (isp->isp_rquest_dma != 0) 1720 bus_dmamap_unload(isp->isp_osinfo.cdmat, isp->isp_osinfo.cdmap); 1721 bus_dmamem_free(isp->isp_osinfo.cdmat, base, isp->isp_osinfo.cdmap); 1722 bus_dma_tag_destroy(isp->isp_osinfo.cdmat); 1723 free(isp->isp_xflist, M_DEVBUF); 1724 #ifdef ISP_TARGET_MODE 1725 free(isp->isp_tgtlist, M_DEVBUF); 1726 #endif 1727 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1728 isp->isp_rquest = NULL; 1729 ISP_LOCK(isp); 1730 return (1); 1731 } 1732 1733 typedef struct { 1734 ispsoftc_t *isp; 1735 void *cmd_token; 1736 void *rq; /* original request */ 1737 int error; 1738 bus_size_t mapsize; 1739 } mush_t; 1740 1741 #define MUSHERR_NOQENTRIES -2 1742 1743 #ifdef ISP_TARGET_MODE 1744 static void tdma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); 1745 static void tdma2(void *, bus_dma_segment_t *, int, int); 1746 1747 static void 1748 tdma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) 1749 { 1750 mush_t *mp; 1751 mp = (mush_t *)arg; 1752 mp->mapsize = mapsize; 1753 tdma2(arg, dm_segs, nseg, error); 1754 } 1755 1756 static void 1757 tdma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1758 { 1759 mush_t *mp; 1760 ispsoftc_t *isp; 1761 struct ccb_scsiio *csio; 1762 isp_ddir_t ddir; 1763 ispreq_t *rq; 1764 1765 mp = (mush_t *) arg; 1766 if (error) { 1767 mp->error = error; 1768 return; 1769 } 1770 csio = mp->cmd_token; 1771 isp = mp->isp; 1772 rq = mp->rq; 1773 if (nseg) { 1774 if (isp->isp_osinfo.sixtyfourbit) { 1775 if (nseg >= ISP_NSEG64_MAX) { 1776 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1777 mp->error = EFAULT; 1778 return; 1779 } 1780 if (rq->req_header.rqs_entry_type == RQSTYPE_CTIO2) { 1781 rq->req_header.rqs_entry_type = RQSTYPE_CTIO3; 1782 } 1783 } else { 1784 if (nseg >= ISP_NSEG_MAX) { 1785 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1786 mp->error = EFAULT; 1787 return; 1788 } 1789 } 1790 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1791 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1792 ddir = ISP_TO_DEVICE; 1793 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1794 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1795 ddir = ISP_FROM_DEVICE; 1796 } else { 1797 dm_segs = NULL; 1798 nseg = 0; 1799 ddir = ISP_NOXFR; 1800 } 1801 } else { 1802 dm_segs = NULL; 1803 nseg = 0; 1804 ddir = ISP_NOXFR; 1805 } 1806 1807 error = isp_send_tgt_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, &csio->sense_data, csio->sense_len); 1808 switch (error) { 1809 case CMD_EAGAIN: 1810 mp->error = MUSHERR_NOQENTRIES; 1811 case CMD_QUEUED: 1812 break; 1813 default: 1814 mp->error = EIO; 1815 } 1816 } 1817 #endif 1818 1819 static void dma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); 1820 static void dma2(void *, bus_dma_segment_t *, int, int); 1821 1822 static void 1823 dma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) 1824 { 1825 mush_t *mp; 1826 mp = (mush_t *)arg; 1827 mp->mapsize = mapsize; 1828 dma2(arg, dm_segs, nseg, error); 1829 } 1830 1831 static void 1832 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1833 { 1834 mush_t *mp; 1835 ispsoftc_t *isp; 1836 struct ccb_scsiio *csio; 1837 isp_ddir_t ddir; 1838 ispreq_t *rq; 1839 1840 mp = (mush_t *) arg; 1841 if (error) { 1842 mp->error = error; 1843 return; 1844 } 1845 csio = mp->cmd_token; 1846 isp = mp->isp; 1847 rq = mp->rq; 1848 if (nseg) { 1849 if (isp->isp_osinfo.sixtyfourbit) { 1850 if (nseg >= ISP_NSEG64_MAX) { 1851 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1852 mp->error = EFAULT; 1853 return; 1854 } 1855 if (rq->req_header.rqs_entry_type == RQSTYPE_T2RQS) { 1856 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 1857 } else if (rq->req_header.rqs_entry_type == RQSTYPE_REQUEST) { 1858 rq->req_header.rqs_entry_type = RQSTYPE_A64; 1859 } 1860 } else { 1861 if (nseg >= ISP_NSEG_MAX) { 1862 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1863 mp->error = EFAULT; 1864 return; 1865 } 1866 } 1867 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1868 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1869 ddir = ISP_FROM_DEVICE; 1870 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1871 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1872 ddir = ISP_TO_DEVICE; 1873 } else { 1874 ddir = ISP_NOXFR; 1875 } 1876 } else { 1877 dm_segs = NULL; 1878 nseg = 0; 1879 ddir = ISP_NOXFR; 1880 } 1881 1882 error = isp_send_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, (ispds64_t *)csio->req_map); 1883 switch (error) { 1884 case CMD_EAGAIN: 1885 mp->error = MUSHERR_NOQENTRIES; 1886 break; 1887 case CMD_QUEUED: 1888 break; 1889 default: 1890 mp->error = EIO; 1891 break; 1892 } 1893 } 1894 1895 static int 1896 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff) 1897 { 1898 mush_t mush, *mp; 1899 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1900 void (*eptr2)(void *, bus_dma_segment_t *, int, bus_size_t, int); 1901 int error; 1902 1903 mp = &mush; 1904 mp->isp = isp; 1905 mp->cmd_token = csio; 1906 mp->rq = ff; 1907 mp->error = 0; 1908 mp->mapsize = 0; 1909 1910 #ifdef ISP_TARGET_MODE 1911 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1912 eptr = tdma2; 1913 eptr2 = tdma2_2; 1914 } else 1915 #endif 1916 { 1917 eptr = dma2; 1918 eptr2 = dma2_2; 1919 } 1920 1921 1922 error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, 1923 (union ccb *)csio, eptr, mp, 0); 1924 if (error == EINPROGRESS) { 1925 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); 1926 mp->error = EINVAL; 1927 isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported"); 1928 } else if (error && mp->error == 0) { 1929 #ifdef DIAGNOSTIC 1930 isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); 1931 #endif 1932 mp->error = error; 1933 } 1934 if (mp->error) { 1935 int retval = CMD_COMPLETE; 1936 if (mp->error == MUSHERR_NOQENTRIES) { 1937 retval = CMD_EAGAIN; 1938 } else if (mp->error == EFBIG) { 1939 csio->ccb_h.status = CAM_REQ_TOO_BIG; 1940 } else if (mp->error == EINVAL) { 1941 csio->ccb_h.status = CAM_REQ_INVALID; 1942 } else { 1943 csio->ccb_h.status = CAM_UNREC_HBA_ERROR; 1944 } 1945 return (retval); 1946 } 1947 return (CMD_QUEUED); 1948 } 1949 1950 static void 1951 isp_pci_reset0(ispsoftc_t *isp) 1952 { 1953 ISP_DISABLE_INTS(isp); 1954 } 1955 1956 static void 1957 isp_pci_reset1(ispsoftc_t *isp) 1958 { 1959 if (!IS_24XX(isp)) { 1960 /* Make sure the BIOS is disabled */ 1961 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 1962 } 1963 /* and enable interrupts */ 1964 ISP_ENABLE_INTS(isp); 1965 } 1966 1967 static void 1968 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 1969 { 1970 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1971 if (msg) 1972 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 1973 else 1974 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 1975 if (IS_SCSI(isp)) 1976 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 1977 else 1978 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 1979 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 1980 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 1981 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 1982 1983 1984 if (IS_SCSI(isp)) { 1985 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 1986 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 1987 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 1988 ISP_READ(isp, CDMA_FIFO_STS)); 1989 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 1990 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 1991 ISP_READ(isp, DDMA_FIFO_STS)); 1992 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 1993 ISP_READ(isp, SXP_INTERRUPT), 1994 ISP_READ(isp, SXP_GROSS_ERR), 1995 ISP_READ(isp, SXP_PINS_CTRL)); 1996 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 1997 } 1998 printf(" mbox regs: %x %x %x %x %x\n", 1999 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2000 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2001 ISP_READ(isp, OUTMAILBOX4)); 2002 printf(" PCI Status Command/Status=%x\n", 2003 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2004 } 2005