1 /*- 2 * Copyright (c) 1997-2008 by Matthew Jacob 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /* 27 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 28 * FreeBSD Version. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/module.h> 37 #include <sys/linker.h> 38 #include <sys/firmware.h> 39 #include <sys/bus.h> 40 #include <sys/stdint.h> 41 #include <dev/pci/pcireg.h> 42 #include <dev/pci/pcivar.h> 43 #include <machine/bus.h> 44 #include <machine/resource.h> 45 #include <sys/rman.h> 46 #include <sys/malloc.h> 47 #include <sys/uio.h> 48 49 #ifdef __sparc64__ 50 #include <dev/ofw/openfirm.h> 51 #include <machine/ofw_machdep.h> 52 #endif 53 54 #include <dev/isp/isp_freebsd.h> 55 56 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); 57 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); 58 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 59 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); 60 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 61 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 62 static int isp_pci_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 63 static int isp_pci_rd_isr_2300(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 64 static int isp_pci_rd_isr_2400(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 65 static int isp_pci_mbxdma(ispsoftc_t *); 66 static int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *); 67 68 69 static void isp_pci_reset0(ispsoftc_t *); 70 static void isp_pci_reset1(ispsoftc_t *); 71 static void isp_pci_dumpregs(ispsoftc_t *, const char *); 72 73 static struct ispmdvec mdvec = { 74 isp_pci_rd_isr, 75 isp_pci_rd_reg, 76 isp_pci_wr_reg, 77 isp_pci_mbxdma, 78 isp_pci_dmasetup, 79 isp_common_dmateardown, 80 isp_pci_reset0, 81 isp_pci_reset1, 82 isp_pci_dumpregs, 83 NULL, 84 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 85 }; 86 87 static struct ispmdvec mdvec_1080 = { 88 isp_pci_rd_isr, 89 isp_pci_rd_reg_1080, 90 isp_pci_wr_reg_1080, 91 isp_pci_mbxdma, 92 isp_pci_dmasetup, 93 isp_common_dmateardown, 94 isp_pci_reset0, 95 isp_pci_reset1, 96 isp_pci_dumpregs, 97 NULL, 98 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 99 }; 100 101 static struct ispmdvec mdvec_12160 = { 102 isp_pci_rd_isr, 103 isp_pci_rd_reg_1080, 104 isp_pci_wr_reg_1080, 105 isp_pci_mbxdma, 106 isp_pci_dmasetup, 107 isp_common_dmateardown, 108 isp_pci_reset0, 109 isp_pci_reset1, 110 isp_pci_dumpregs, 111 NULL, 112 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 113 }; 114 115 static struct ispmdvec mdvec_2100 = { 116 isp_pci_rd_isr, 117 isp_pci_rd_reg, 118 isp_pci_wr_reg, 119 isp_pci_mbxdma, 120 isp_pci_dmasetup, 121 isp_common_dmateardown, 122 isp_pci_reset0, 123 isp_pci_reset1, 124 isp_pci_dumpregs 125 }; 126 127 static struct ispmdvec mdvec_2200 = { 128 isp_pci_rd_isr, 129 isp_pci_rd_reg, 130 isp_pci_wr_reg, 131 isp_pci_mbxdma, 132 isp_pci_dmasetup, 133 isp_common_dmateardown, 134 isp_pci_reset0, 135 isp_pci_reset1, 136 isp_pci_dumpregs 137 }; 138 139 static struct ispmdvec mdvec_2300 = { 140 isp_pci_rd_isr_2300, 141 isp_pci_rd_reg, 142 isp_pci_wr_reg, 143 isp_pci_mbxdma, 144 isp_pci_dmasetup, 145 isp_common_dmateardown, 146 isp_pci_reset0, 147 isp_pci_reset1, 148 isp_pci_dumpregs 149 }; 150 151 static struct ispmdvec mdvec_2400 = { 152 isp_pci_rd_isr_2400, 153 isp_pci_rd_reg_2400, 154 isp_pci_wr_reg_2400, 155 isp_pci_mbxdma, 156 isp_pci_dmasetup, 157 isp_common_dmateardown, 158 isp_pci_reset0, 159 isp_pci_reset1, 160 NULL 161 }; 162 163 static struct ispmdvec mdvec_2500 = { 164 isp_pci_rd_isr_2400, 165 isp_pci_rd_reg_2400, 166 isp_pci_wr_reg_2400, 167 isp_pci_mbxdma, 168 isp_pci_dmasetup, 169 isp_common_dmateardown, 170 isp_pci_reset0, 171 isp_pci_reset1, 172 NULL 173 }; 174 175 #ifndef PCIM_CMD_INVEN 176 #define PCIM_CMD_INVEN 0x10 177 #endif 178 #ifndef PCIM_CMD_BUSMASTEREN 179 #define PCIM_CMD_BUSMASTEREN 0x0004 180 #endif 181 #ifndef PCIM_CMD_PERRESPEN 182 #define PCIM_CMD_PERRESPEN 0x0040 183 #endif 184 #ifndef PCIM_CMD_SEREN 185 #define PCIM_CMD_SEREN 0x0100 186 #endif 187 #ifndef PCIM_CMD_INTX_DISABLE 188 #define PCIM_CMD_INTX_DISABLE 0x0400 189 #endif 190 191 #ifndef PCIR_COMMAND 192 #define PCIR_COMMAND 0x04 193 #endif 194 195 #ifndef PCIR_CACHELNSZ 196 #define PCIR_CACHELNSZ 0x0c 197 #endif 198 199 #ifndef PCIR_LATTIMER 200 #define PCIR_LATTIMER 0x0d 201 #endif 202 203 #ifndef PCIR_ROMADDR 204 #define PCIR_ROMADDR 0x30 205 #endif 206 207 #ifndef PCI_VENDOR_QLOGIC 208 #define PCI_VENDOR_QLOGIC 0x1077 209 #endif 210 211 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 212 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 213 #endif 214 215 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 216 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 217 #endif 218 219 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 220 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 221 #endif 222 223 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 224 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 225 #endif 226 227 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 228 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 229 #endif 230 231 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 232 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 233 #endif 234 235 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 236 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 237 #endif 238 239 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 240 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 241 #endif 242 243 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 244 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 245 #endif 246 247 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 248 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 249 #endif 250 251 #ifndef PCI_PRODUCT_QLOGIC_ISP2322 252 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 253 #endif 254 255 #ifndef PCI_PRODUCT_QLOGIC_ISP2422 256 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 257 #endif 258 259 #ifndef PCI_PRODUCT_QLOGIC_ISP2432 260 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432 261 #endif 262 263 #ifndef PCI_PRODUCT_QLOGIC_ISP2532 264 #define PCI_PRODUCT_QLOGIC_ISP2532 0x2532 265 #endif 266 267 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 268 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 269 #endif 270 271 #ifndef PCI_PRODUCT_QLOGIC_ISP6322 272 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 273 #endif 274 275 #ifndef PCI_PRODUCT_QLOGIC_ISP5432 276 #define PCI_PRODUCT_QLOGIC_ISP5432 0x5432 277 #endif 278 279 #define PCI_QLOGIC_ISP5432 \ 280 ((PCI_PRODUCT_QLOGIC_ISP5432 << 16) | PCI_VENDOR_QLOGIC) 281 282 #define PCI_QLOGIC_ISP1020 \ 283 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 284 285 #define PCI_QLOGIC_ISP1080 \ 286 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 287 288 #define PCI_QLOGIC_ISP10160 \ 289 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 290 291 #define PCI_QLOGIC_ISP12160 \ 292 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 293 294 #define PCI_QLOGIC_ISP1240 \ 295 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 296 297 #define PCI_QLOGIC_ISP1280 \ 298 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 299 300 #define PCI_QLOGIC_ISP2100 \ 301 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 302 303 #define PCI_QLOGIC_ISP2200 \ 304 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 305 306 #define PCI_QLOGIC_ISP2300 \ 307 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 308 309 #define PCI_QLOGIC_ISP2312 \ 310 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 311 312 #define PCI_QLOGIC_ISP2322 \ 313 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 314 315 #define PCI_QLOGIC_ISP2422 \ 316 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 317 318 #define PCI_QLOGIC_ISP2432 \ 319 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC) 320 321 #define PCI_QLOGIC_ISP2532 \ 322 ((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC) 323 324 #define PCI_QLOGIC_ISP6312 \ 325 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 326 327 #define PCI_QLOGIC_ISP6322 \ 328 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 329 330 /* 331 * Odd case for some AMI raid cards... We need to *not* attach to this. 332 */ 333 #define AMI_RAID_SUBVENDOR_ID 0x101e 334 335 #define IO_MAP_REG 0x10 336 #define MEM_MAP_REG 0x14 337 338 #define PCI_DFLT_LTNCY 0x40 339 #define PCI_DFLT_LNSZ 0x10 340 341 static int isp_pci_probe (device_t); 342 static int isp_pci_attach (device_t); 343 static int isp_pci_detach (device_t); 344 345 346 #define ISP_PCD(isp) ((struct isp_pcisoftc *)isp)->pci_dev 347 struct isp_pcisoftc { 348 ispsoftc_t pci_isp; 349 device_t pci_dev; 350 struct resource * regs; 351 void * irq; 352 int iqd; 353 int rtp; 354 int rgd; 355 void * ih; 356 int16_t pci_poff[_NREG_BLKS]; 357 bus_dma_tag_t dmat; 358 int msicount; 359 }; 360 361 362 static device_method_t isp_pci_methods[] = { 363 /* Device interface */ 364 DEVMETHOD(device_probe, isp_pci_probe), 365 DEVMETHOD(device_attach, isp_pci_attach), 366 DEVMETHOD(device_detach, isp_pci_detach), 367 { 0, 0 } 368 }; 369 370 static driver_t isp_pci_driver = { 371 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 372 }; 373 static devclass_t isp_devclass; 374 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 375 MODULE_DEPEND(isp, cam, 1, 1, 1); 376 MODULE_DEPEND(isp, firmware, 1, 1, 1); 377 static int isp_nvports = 0; 378 379 static int 380 isp_pci_probe(device_t dev) 381 { 382 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 383 case PCI_QLOGIC_ISP1020: 384 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 385 break; 386 case PCI_QLOGIC_ISP1080: 387 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 388 break; 389 case PCI_QLOGIC_ISP1240: 390 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 391 break; 392 case PCI_QLOGIC_ISP1280: 393 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 394 break; 395 case PCI_QLOGIC_ISP10160: 396 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 397 break; 398 case PCI_QLOGIC_ISP12160: 399 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 400 return (ENXIO); 401 } 402 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 403 break; 404 case PCI_QLOGIC_ISP2100: 405 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 406 break; 407 case PCI_QLOGIC_ISP2200: 408 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 409 break; 410 case PCI_QLOGIC_ISP2300: 411 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 412 break; 413 case PCI_QLOGIC_ISP2312: 414 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 415 break; 416 case PCI_QLOGIC_ISP2322: 417 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 418 break; 419 case PCI_QLOGIC_ISP2422: 420 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 421 break; 422 case PCI_QLOGIC_ISP2432: 423 device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter"); 424 break; 425 case PCI_QLOGIC_ISP2532: 426 device_set_desc(dev, "Qlogic ISP 2532 PCI FC-AL Adapter"); 427 break; 428 case PCI_QLOGIC_ISP5432: 429 device_set_desc(dev, "Qlogic ISP 5432 PCI FC-AL Adapter"); 430 break; 431 case PCI_QLOGIC_ISP6312: 432 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 433 break; 434 case PCI_QLOGIC_ISP6322: 435 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 436 break; 437 default: 438 return (ENXIO); 439 } 440 if (isp_announced == 0 && bootverbose) { 441 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 442 "Core Version %d.%d\n", 443 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 444 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 445 isp_announced++; 446 } 447 /* 448 * XXXX: Here is where we might load the f/w module 449 * XXXX: (or increase a reference count to it). 450 */ 451 return (BUS_PROBE_DEFAULT); 452 } 453 454 static void 455 isp_get_generic_options(device_t dev, ispsoftc_t *isp) 456 { 457 int tval; 458 459 /* 460 * Figure out if we're supposed to skip this one. 461 */ 462 tval = 0; 463 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "disable", &tval) == 0 && tval) { 464 device_printf(dev, "disabled at user request\n"); 465 isp->isp_osinfo.disabled = 1; 466 return; 467 } 468 469 tval = 0; 470 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) { 471 isp->isp_confopts |= ISP_CFG_NORELOAD; 472 } 473 tval = 0; 474 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) { 475 isp->isp_confopts |= ISP_CFG_NONVRAM; 476 } 477 tval = 0; 478 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &tval); 479 if (tval) { 480 isp->isp_dblev = tval; 481 } else { 482 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 483 } 484 if (bootverbose) { 485 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 486 } 487 tval = -1; 488 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "vports", &tval); 489 if (tval > 0 && tval < 127) { 490 isp_nvports = tval; 491 } 492 tval = 1; 493 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "autoconfig", &tval); 494 isp_autoconfig = tval; 495 tval = 7; 496 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "quickboot_time", &tval); 497 isp_quickboot_time = tval; 498 } 499 500 static void 501 isp_get_pci_options(device_t dev, int *m1, int *m2) 502 { 503 int tval; 504 /* 505 * Which we should try first - memory mapping or i/o mapping? 506 * 507 * We used to try memory first followed by i/o on alpha, otherwise 508 * the reverse, but we should just try memory first all the time now. 509 */ 510 *m1 = PCIM_CMD_MEMEN; 511 *m2 = PCIM_CMD_PORTEN; 512 513 tval = 0; 514 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_iomap", &tval) == 0 && tval != 0) { 515 *m1 = PCIM_CMD_PORTEN; 516 *m2 = PCIM_CMD_MEMEN; 517 } 518 tval = 0; 519 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_memmap", &tval) == 0 && tval != 0) { 520 *m1 = PCIM_CMD_MEMEN; 521 *m2 = PCIM_CMD_PORTEN; 522 } 523 } 524 525 static void 526 isp_get_specific_options(device_t dev, int chan, ispsoftc_t *isp) 527 { 528 const char *sptr; 529 int tval = 0; 530 char prefix[12], name[16]; 531 532 if (chan == 0) 533 prefix[0] = 0; 534 else 535 snprintf(prefix, sizeof(prefix), "chan%d.", chan); 536 snprintf(name, sizeof(name), "%siid", prefix); 537 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 538 name, &tval)) { 539 if (IS_FC(isp)) { 540 ISP_FC_PC(isp, chan)->default_id = 109 - chan; 541 } else { 542 #ifdef __sparc64__ 543 ISP_SPI_PC(isp, chan)->iid = OF_getscsinitid(dev); 544 #else 545 ISP_SPI_PC(isp, chan)->iid = 7; 546 #endif 547 } 548 } else { 549 if (IS_FC(isp)) { 550 ISP_FC_PC(isp, chan)->default_id = tval - chan; 551 } else { 552 ISP_SPI_PC(isp, chan)->iid = tval; 553 } 554 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 555 } 556 557 tval = -1; 558 snprintf(name, sizeof(name), "%srole", prefix); 559 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 560 name, &tval) == 0) { 561 switch (tval) { 562 case ISP_ROLE_NONE: 563 case ISP_ROLE_INITIATOR: 564 case ISP_ROLE_TARGET: 565 case ISP_ROLE_BOTH: 566 device_printf(dev, "Chan %d setting role to 0x%x\n", chan, tval); 567 break; 568 default: 569 tval = -1; 570 break; 571 } 572 } 573 if (tval == -1) { 574 tval = ISP_DEFAULT_ROLES; 575 } 576 577 if (IS_SCSI(isp)) { 578 ISP_SPI_PC(isp, chan)->def_role = tval; 579 return; 580 } 581 ISP_FC_PC(isp, chan)->def_role = tval; 582 583 tval = 0; 584 snprintf(name, sizeof(name), "%sfullduplex", prefix); 585 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 586 name, &tval) == 0 && tval != 0) { 587 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 588 } 589 sptr = 0; 590 snprintf(name, sizeof(name), "%stopology", prefix); 591 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 592 name, (const char **) &sptr) == 0 && sptr != 0) { 593 if (strcmp(sptr, "lport") == 0) { 594 isp->isp_confopts |= ISP_CFG_LPORT; 595 } else if (strcmp(sptr, "nport") == 0) { 596 isp->isp_confopts |= ISP_CFG_NPORT; 597 } else if (strcmp(sptr, "lport-only") == 0) { 598 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 599 } else if (strcmp(sptr, "nport-only") == 0) { 600 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 601 } 602 } 603 604 tval = 0; 605 snprintf(name, sizeof(name), "%snofctape", prefix); 606 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 607 name, &tval); 608 if (tval) { 609 isp->isp_confopts |= ISP_CFG_NOFCTAPE; 610 } 611 612 tval = 0; 613 snprintf(name, sizeof(name), "%sfctape", prefix); 614 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 615 name, &tval); 616 if (tval) { 617 isp->isp_confopts &= ~ISP_CFG_NOFCTAPE; 618 isp->isp_confopts |= ISP_CFG_FCTAPE; 619 } 620 621 622 /* 623 * Because the resource_*_value functions can neither return 624 * 64 bit integer values, nor can they be directly coerced 625 * to interpret the right hand side of the assignment as 626 * you want them to interpret it, we have to force WWN 627 * hint replacement to specify WWN strings with a leading 628 * 'w' (e..g w50000000aaaa0001). Sigh. 629 */ 630 sptr = 0; 631 snprintf(name, sizeof(name), "%sportwwn", prefix); 632 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 633 name, (const char **) &sptr); 634 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 635 char *eptr = 0; 636 ISP_FC_PC(isp, chan)->def_wwpn = strtouq(sptr, &eptr, 16); 637 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwpn == -1) { 638 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 639 ISP_FC_PC(isp, chan)->def_wwpn = 0; 640 } 641 } 642 643 sptr = 0; 644 snprintf(name, sizeof(name), "%snodewwn", prefix); 645 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 646 name, (const char **) &sptr); 647 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 648 char *eptr = 0; 649 ISP_FC_PC(isp, chan)->def_wwnn = strtouq(sptr, &eptr, 16); 650 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwnn == 0) { 651 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 652 ISP_FC_PC(isp, chan)->def_wwnn = 0; 653 } 654 } 655 656 tval = 0; 657 snprintf(name, sizeof(name), "%shysteresis", prefix); 658 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 659 "name", &tval); 660 if (tval >= 0 && tval < 256) { 661 ISP_FC_PC(isp, chan)->hysteresis = tval; 662 } else { 663 ISP_FC_PC(isp, chan)->hysteresis = isp_fabric_hysteresis; 664 } 665 666 tval = -1; 667 snprintf(name, sizeof(name), "%sloop_down_limit", prefix); 668 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 669 name, &tval); 670 if (tval >= 0 && tval < 0xffff) { 671 ISP_FC_PC(isp, chan)->loop_down_limit = tval; 672 } else { 673 ISP_FC_PC(isp, chan)->loop_down_limit = isp_loop_down_limit; 674 } 675 676 tval = -1; 677 snprintf(name, sizeof(name), "%sgone_device_time", prefix); 678 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 679 name, &tval); 680 if (tval >= 0 && tval < 0xffff) { 681 ISP_FC_PC(isp, chan)->gone_device_time = tval; 682 } else { 683 ISP_FC_PC(isp, chan)->gone_device_time = isp_gone_device_time; 684 } 685 } 686 687 static int 688 isp_pci_attach(device_t dev) 689 { 690 int i, m1, m2, locksetup = 0; 691 uint32_t data, cmd, linesz, did; 692 struct isp_pcisoftc *pcs; 693 ispsoftc_t *isp; 694 size_t psize, xsize; 695 char fwname[32]; 696 697 pcs = device_get_softc(dev); 698 if (pcs == NULL) { 699 device_printf(dev, "cannot get softc\n"); 700 return (ENOMEM); 701 } 702 memset(pcs, 0, sizeof (*pcs)); 703 704 pcs->pci_dev = dev; 705 isp = &pcs->pci_isp; 706 isp->isp_dev = dev; 707 isp->isp_nchan = 1; 708 if (sizeof (bus_addr_t) > 4) 709 isp->isp_osinfo.sixtyfourbit = 1; 710 711 /* 712 * Get Generic Options 713 */ 714 isp_nvports = 0; 715 isp_get_generic_options(dev, isp); 716 717 /* 718 * Check to see if options have us disabled 719 */ 720 if (isp->isp_osinfo.disabled) { 721 /* 722 * But return zero to preserve unit numbering 723 */ 724 return (0); 725 } 726 727 /* 728 * Get PCI options- which in this case are just mapping preferences. 729 */ 730 isp_get_pci_options(dev, &m1, &m2); 731 732 linesz = PCI_DFLT_LNSZ; 733 pcs->irq = pcs->regs = NULL; 734 pcs->rgd = pcs->rtp = pcs->iqd = 0; 735 736 pcs->rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 737 pcs->rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 738 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, RF_ACTIVE); 739 if (pcs->regs == NULL) { 740 pcs->rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 741 pcs->rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 742 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, RF_ACTIVE); 743 } 744 if (pcs->regs == NULL) { 745 device_printf(dev, "unable to map any ports\n"); 746 goto bad; 747 } 748 if (bootverbose) { 749 device_printf(dev, "using %s space register mapping\n", (pcs->rgd == IO_MAP_REG)? "I/O" : "Memory"); 750 } 751 isp->isp_bus_tag = rman_get_bustag(pcs->regs); 752 isp->isp_bus_handle = rman_get_bushandle(pcs->regs); 753 754 pcs->pci_dev = dev; 755 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 756 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 757 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 758 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 759 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 760 761 switch (pci_get_devid(dev)) { 762 case PCI_QLOGIC_ISP1020: 763 did = 0x1040; 764 isp->isp_mdvec = &mdvec; 765 isp->isp_type = ISP_HA_SCSI_UNKNOWN; 766 break; 767 case PCI_QLOGIC_ISP1080: 768 did = 0x1080; 769 isp->isp_mdvec = &mdvec_1080; 770 isp->isp_type = ISP_HA_SCSI_1080; 771 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 772 break; 773 case PCI_QLOGIC_ISP1240: 774 did = 0x1080; 775 isp->isp_mdvec = &mdvec_1080; 776 isp->isp_type = ISP_HA_SCSI_1240; 777 isp->isp_nchan = 2; 778 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 779 break; 780 case PCI_QLOGIC_ISP1280: 781 did = 0x1080; 782 isp->isp_mdvec = &mdvec_1080; 783 isp->isp_type = ISP_HA_SCSI_1280; 784 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 785 break; 786 case PCI_QLOGIC_ISP10160: 787 did = 0x12160; 788 isp->isp_mdvec = &mdvec_12160; 789 isp->isp_type = ISP_HA_SCSI_10160; 790 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 791 break; 792 case PCI_QLOGIC_ISP12160: 793 did = 0x12160; 794 isp->isp_nchan = 2; 795 isp->isp_mdvec = &mdvec_12160; 796 isp->isp_type = ISP_HA_SCSI_12160; 797 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 798 break; 799 case PCI_QLOGIC_ISP2100: 800 did = 0x2100; 801 isp->isp_mdvec = &mdvec_2100; 802 isp->isp_type = ISP_HA_FC_2100; 803 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 804 if (pci_get_revid(dev) < 3) { 805 /* 806 * XXX: Need to get the actual revision 807 * XXX: number of the 2100 FB. At any rate, 808 * XXX: lower cache line size for early revision 809 * XXX; boards. 810 */ 811 linesz = 1; 812 } 813 break; 814 case PCI_QLOGIC_ISP2200: 815 did = 0x2200; 816 isp->isp_mdvec = &mdvec_2200; 817 isp->isp_type = ISP_HA_FC_2200; 818 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 819 break; 820 case PCI_QLOGIC_ISP2300: 821 did = 0x2300; 822 isp->isp_mdvec = &mdvec_2300; 823 isp->isp_type = ISP_HA_FC_2300; 824 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 825 break; 826 case PCI_QLOGIC_ISP2312: 827 case PCI_QLOGIC_ISP6312: 828 did = 0x2300; 829 isp->isp_mdvec = &mdvec_2300; 830 isp->isp_type = ISP_HA_FC_2312; 831 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 832 break; 833 case PCI_QLOGIC_ISP2322: 834 case PCI_QLOGIC_ISP6322: 835 did = 0x2322; 836 isp->isp_mdvec = &mdvec_2300; 837 isp->isp_type = ISP_HA_FC_2322; 838 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 839 break; 840 case PCI_QLOGIC_ISP2422: 841 case PCI_QLOGIC_ISP2432: 842 did = 0x2400; 843 isp->isp_nchan += isp_nvports; 844 isp->isp_mdvec = &mdvec_2400; 845 isp->isp_type = ISP_HA_FC_2400; 846 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 847 break; 848 case PCI_QLOGIC_ISP2532: 849 did = 0x2500; 850 isp->isp_nchan += isp_nvports; 851 isp->isp_mdvec = &mdvec_2500; 852 isp->isp_type = ISP_HA_FC_2500; 853 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 854 break; 855 case PCI_QLOGIC_ISP5432: 856 did = 0x2500; 857 isp->isp_mdvec = &mdvec_2500; 858 isp->isp_type = ISP_HA_FC_2500; 859 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 860 break; 861 default: 862 device_printf(dev, "unknown device type\n"); 863 goto bad; 864 break; 865 } 866 isp->isp_revision = pci_get_revid(dev); 867 868 if (IS_FC(isp)) { 869 psize = sizeof (fcparam); 870 xsize = sizeof (struct isp_fc); 871 } else { 872 psize = sizeof (sdparam); 873 xsize = sizeof (struct isp_spi); 874 } 875 psize *= isp->isp_nchan; 876 xsize *= isp->isp_nchan; 877 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 878 if (isp->isp_param == NULL) { 879 device_printf(dev, "cannot allocate parameter data\n"); 880 goto bad; 881 } 882 isp->isp_osinfo.pc.ptr = malloc(xsize, M_DEVBUF, M_NOWAIT | M_ZERO); 883 if (isp->isp_osinfo.pc.ptr == NULL) { 884 device_printf(dev, "cannot allocate parameter data\n"); 885 goto bad; 886 } 887 888 /* 889 * Now that we know who we are (roughly) get/set specific options 890 */ 891 for (i = 0; i < isp->isp_nchan; i++) { 892 isp_get_specific_options(dev, i, isp); 893 } 894 895 /* 896 * The 'it' suffix really only matters for SCSI cards in target mode. 897 */ 898 isp->isp_osinfo.fw = NULL; 899 if (IS_SCSI(isp) && (ISP_SPI_PC(isp, 0)->def_role & ISP_ROLE_TARGET)) { 900 snprintf(fwname, sizeof (fwname), "isp_%04x_it", did); 901 isp->isp_osinfo.fw = firmware_get(fwname); 902 } else if (IS_24XX(isp)) { 903 snprintf(fwname, sizeof (fwname), "isp_%04x_multi", did); 904 isp->isp_osinfo.fw = firmware_get(fwname); 905 } 906 if (isp->isp_osinfo.fw == NULL) { 907 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 908 isp->isp_osinfo.fw = firmware_get(fwname); 909 } 910 if (isp->isp_osinfo.fw != NULL) { 911 isp_prt(isp, ISP_LOGCONFIG, "loaded firmware %s", fwname); 912 isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data; 913 } 914 915 /* 916 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set. 917 */ 918 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 919 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 920 if (IS_2300(isp)) { /* per QLogic errata */ 921 cmd &= ~PCIM_CMD_INVEN; 922 } 923 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 924 cmd &= ~PCIM_CMD_INTX_DISABLE; 925 } 926 if (IS_24XX(isp)) { 927 cmd &= ~PCIM_CMD_INTX_DISABLE; 928 } 929 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 930 931 /* 932 * Make sure the Cache Line Size register is set sensibly. 933 */ 934 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 935 if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) { 936 isp_prt(isp, ISP_LOGDEBUG0, "set PCI line size to %d from %d", linesz, data); 937 data = linesz; 938 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 939 } 940 941 /* 942 * Make sure the Latency Timer is sane. 943 */ 944 data = pci_read_config(dev, PCIR_LATTIMER, 1); 945 if (data < PCI_DFLT_LTNCY) { 946 data = PCI_DFLT_LTNCY; 947 isp_prt(isp, ISP_LOGDEBUG0, "set PCI latency to %d", data); 948 pci_write_config(dev, PCIR_LATTIMER, data, 1); 949 } 950 951 /* 952 * Make sure we've disabled the ROM. 953 */ 954 data = pci_read_config(dev, PCIR_ROMADDR, 4); 955 data &= ~1; 956 pci_write_config(dev, PCIR_ROMADDR, data, 4); 957 958 /* 959 * Do MSI 960 * 961 * NB: MSI-X needs to be disabled for the 2432 (PCI-Express) 962 */ 963 if (IS_24XX(isp) || IS_2322(isp)) { 964 pcs->msicount = pci_msi_count(dev); 965 if (pcs->msicount > 1) { 966 pcs->msicount = 1; 967 } 968 if (pci_alloc_msi(dev, &pcs->msicount) == 0) { 969 pcs->iqd = 1; 970 } else { 971 pcs->iqd = 0; 972 } 973 } 974 pcs->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &pcs->iqd, RF_ACTIVE | RF_SHAREABLE); 975 if (pcs->irq == NULL) { 976 device_printf(dev, "could not allocate interrupt\n"); 977 goto bad; 978 } 979 980 /* Make sure the lock is set up. */ 981 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 982 locksetup++; 983 984 if (isp_setup_intr(dev, pcs->irq, ISP_IFLAGS, NULL, isp_platform_intr, isp, &pcs->ih)) { 985 device_printf(dev, "could not setup interrupt\n"); 986 goto bad; 987 } 988 989 /* 990 * Last minute checks... 991 */ 992 if (IS_23XX(isp) || IS_24XX(isp)) { 993 isp->isp_port = pci_get_function(dev); 994 } 995 996 /* 997 * Make sure we're in reset state. 998 */ 999 ISP_LOCK(isp); 1000 isp_reset(isp, 1); 1001 if (isp->isp_state != ISP_RESETSTATE) { 1002 ISP_UNLOCK(isp); 1003 goto bad; 1004 } 1005 isp_init(isp); 1006 if (isp->isp_state == ISP_INITSTATE) { 1007 isp->isp_state = ISP_RUNSTATE; 1008 } 1009 ISP_UNLOCK(isp); 1010 if (isp_attach(isp)) { 1011 ISP_LOCK(isp); 1012 isp_uninit(isp); 1013 ISP_UNLOCK(isp); 1014 goto bad; 1015 } 1016 return (0); 1017 1018 bad: 1019 if (pcs->ih) { 1020 (void) bus_teardown_intr(dev, pcs->irq, pcs->ih); 1021 } 1022 if (locksetup) { 1023 mtx_destroy(&isp->isp_osinfo.lock); 1024 } 1025 if (pcs->irq) { 1026 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq); 1027 } 1028 if (pcs->msicount) { 1029 pci_release_msi(dev); 1030 } 1031 if (pcs->regs) { 1032 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 1033 } 1034 if (pcs->pci_isp.isp_param) { 1035 free(pcs->pci_isp.isp_param, M_DEVBUF); 1036 pcs->pci_isp.isp_param = NULL; 1037 } 1038 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 1039 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 1040 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 1041 } 1042 return (ENXIO); 1043 } 1044 1045 static int 1046 isp_pci_detach(device_t dev) 1047 { 1048 struct isp_pcisoftc *pcs; 1049 ispsoftc_t *isp; 1050 int status; 1051 1052 pcs = device_get_softc(dev); 1053 if (pcs == NULL) { 1054 return (ENXIO); 1055 } 1056 isp = (ispsoftc_t *) pcs; 1057 status = isp_detach(isp); 1058 if (status) 1059 return (status); 1060 ISP_LOCK(isp); 1061 isp_uninit(isp); 1062 if (pcs->ih) { 1063 (void) bus_teardown_intr(dev, pcs->irq, pcs->ih); 1064 } 1065 ISP_UNLOCK(isp); 1066 mtx_destroy(&isp->isp_osinfo.lock); 1067 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq); 1068 if (pcs->msicount) { 1069 pci_release_msi(dev); 1070 } 1071 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 1072 /* 1073 * XXX: THERE IS A LOT OF LEAKAGE HERE 1074 */ 1075 if (pcs->pci_isp.isp_param) { 1076 free(pcs->pci_isp.isp_param, M_DEVBUF); 1077 pcs->pci_isp.isp_param = NULL; 1078 } 1079 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 1080 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 1081 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 1082 } 1083 return (0); 1084 } 1085 1086 #define IspVirt2Off(a, x) \ 1087 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1088 _BLK_REG_SHFT] + ((x) & 0xfff)) 1089 1090 #define BXR2(isp, off) \ 1091 bus_space_read_2(isp->isp_bus_tag, isp->isp_bus_handle, off) 1092 #define BXW2(isp, off, v) \ 1093 bus_space_write_2(isp->isp_bus_tag, isp->isp_bus_handle, off, v) 1094 #define BXR4(isp, off) \ 1095 bus_space_read_4(isp->isp_bus_tag, isp->isp_bus_handle, off) 1096 #define BXW4(isp, off, v) \ 1097 bus_space_write_4(isp->isp_bus_tag, isp->isp_bus_handle, off, v) 1098 1099 1100 static ISP_INLINE int 1101 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp) 1102 { 1103 uint32_t val0, val1; 1104 int i = 0; 1105 1106 do { 1107 val0 = BXR2(isp, IspVirt2Off(isp, off)); 1108 val1 = BXR2(isp, IspVirt2Off(isp, off)); 1109 } while (val0 != val1 && ++i < 1000); 1110 if (val0 != val1) { 1111 return (1); 1112 } 1113 *rp = val0; 1114 return (0); 1115 } 1116 1117 static int 1118 isp_pci_rd_isr(ispsoftc_t *isp, uint32_t *isrp, uint16_t *semap, uint16_t *mbp) 1119 { 1120 uint16_t isr, sema; 1121 1122 if (IS_2100(isp)) { 1123 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 1124 return (0); 1125 } 1126 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 1127 return (0); 1128 } 1129 } else { 1130 isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR)); 1131 sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA)); 1132 } 1133 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1134 isr &= INT_PENDING_MASK(isp); 1135 sema &= BIU_SEMA_LOCK; 1136 if (isr == 0 && sema == 0) { 1137 return (0); 1138 } 1139 *isrp = isr; 1140 if ((*semap = sema) != 0) { 1141 if (IS_2100(isp)) { 1142 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 1143 return (0); 1144 } 1145 } else { 1146 *mbp = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0)); 1147 } 1148 } 1149 return (1); 1150 } 1151 1152 static int 1153 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint32_t *isrp, uint16_t *semap, uint16_t *mbox0p) 1154 { 1155 uint32_t hccr; 1156 uint32_t r2hisr; 1157 1158 if (!(BXR2(isp, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 1159 *isrp = 0; 1160 return (0); 1161 } 1162 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO)); 1163 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1164 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1165 *isrp = 0; 1166 return (0); 1167 } 1168 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 1169 case ISPR2HST_ROM_MBX_OK: 1170 case ISPR2HST_ROM_MBX_FAIL: 1171 case ISPR2HST_MBX_OK: 1172 case ISPR2HST_MBX_FAIL: 1173 case ISPR2HST_ASYNC_EVENT: 1174 *isrp = r2hisr & 0xffff; 1175 *mbox0p = (r2hisr >> 16); 1176 *semap = 1; 1177 return (1); 1178 case ISPR2HST_RIO_16: 1179 *isrp = r2hisr & 0xffff; 1180 *mbox0p = ASYNC_RIO16_1; 1181 *semap = 1; 1182 return (1); 1183 case ISPR2HST_FPOST: 1184 *isrp = r2hisr & 0xffff; 1185 *mbox0p = ASYNC_CMD_CMPLT; 1186 *semap = 1; 1187 return (1); 1188 case ISPR2HST_FPOST_CTIO: 1189 *isrp = r2hisr & 0xffff; 1190 *mbox0p = ASYNC_CTIO_DONE; 1191 *semap = 1; 1192 return (1); 1193 case ISPR2HST_RSPQ_UPDATE: 1194 *isrp = r2hisr & 0xffff; 1195 *mbox0p = 0; 1196 *semap = 0; 1197 return (1); 1198 default: 1199 hccr = ISP_READ(isp, HCCR); 1200 if (hccr & HCCR_PAUSE) { 1201 ISP_WRITE(isp, HCCR, HCCR_RESET); 1202 isp_prt(isp, ISP_LOGERR, "RISC paused at interrupt (%x->%x)", hccr, ISP_READ(isp, HCCR)); 1203 ISP_WRITE(isp, BIU_ICR, 0); 1204 } else { 1205 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1206 } 1207 return (0); 1208 } 1209 } 1210 1211 static int 1212 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint32_t *isrp, uint16_t *semap, uint16_t *mbox0p) 1213 { 1214 uint32_t r2hisr; 1215 1216 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO)); 1217 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1218 if ((r2hisr & BIU2400_R2HST_INTR) == 0) { 1219 *isrp = 0; 1220 return (0); 1221 } 1222 switch (r2hisr & BIU2400_R2HST_ISTAT_MASK) { 1223 case ISP2400R2HST_ROM_MBX_OK: 1224 case ISP2400R2HST_ROM_MBX_FAIL: 1225 case ISP2400R2HST_MBX_OK: 1226 case ISP2400R2HST_MBX_FAIL: 1227 case ISP2400R2HST_ASYNC_EVENT: 1228 *isrp = r2hisr & 0xffff; 1229 *mbox0p = (r2hisr >> 16); 1230 *semap = 1; 1231 return (1); 1232 case ISP2400R2HST_RSPQ_UPDATE: 1233 case ISP2400R2HST_ATIO_RSPQ_UPDATE: 1234 case ISP2400R2HST_ATIO_RQST_UPDATE: 1235 *isrp = r2hisr & 0xffff; 1236 *mbox0p = 0; 1237 *semap = 0; 1238 return (1); 1239 default: 1240 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 1241 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1242 return (0); 1243 } 1244 } 1245 1246 static uint32_t 1247 isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1248 { 1249 uint16_t rv; 1250 int oldconf = 0; 1251 1252 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1253 /* 1254 * We will assume that someone has paused the RISC processor. 1255 */ 1256 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1257 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP); 1258 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1259 } 1260 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1261 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1262 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1263 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1264 } 1265 return (rv); 1266 } 1267 1268 static void 1269 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) 1270 { 1271 int oldconf = 0; 1272 1273 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1274 /* 1275 * We will assume that someone has paused the RISC processor. 1276 */ 1277 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1278 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1279 oldconf | BIU_PCI_CONF1_SXP); 1280 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1281 } 1282 BXW2(isp, IspVirt2Off(isp, regoff), val); 1283 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1284 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1285 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1286 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1287 } 1288 1289 } 1290 1291 static uint32_t 1292 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1293 { 1294 uint32_t rv, oc = 0; 1295 1296 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1297 uint32_t tc; 1298 /* 1299 * We will assume that someone has paused the RISC processor. 1300 */ 1301 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1302 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1303 if (regoff & SXP_BANK1_SELECT) 1304 tc |= BIU_PCI1080_CONF1_SXP1; 1305 else 1306 tc |= BIU_PCI1080_CONF1_SXP0; 1307 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1308 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1309 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1310 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1311 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1312 oc | BIU_PCI1080_CONF1_DMA); 1313 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1314 } 1315 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1316 if (oc) { 1317 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1318 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1319 } 1320 return (rv); 1321 } 1322 1323 static void 1324 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) 1325 { 1326 int oc = 0; 1327 1328 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1329 uint32_t tc; 1330 /* 1331 * We will assume that someone has paused the RISC processor. 1332 */ 1333 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1334 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1335 if (regoff & SXP_BANK1_SELECT) 1336 tc |= BIU_PCI1080_CONF1_SXP1; 1337 else 1338 tc |= BIU_PCI1080_CONF1_SXP0; 1339 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1340 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1341 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1342 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1343 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1344 oc | BIU_PCI1080_CONF1_DMA); 1345 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1346 } 1347 BXW2(isp, IspVirt2Off(isp, regoff), val); 1348 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1349 if (oc) { 1350 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1351 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1352 } 1353 } 1354 1355 static uint32_t 1356 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 1357 { 1358 uint32_t rv; 1359 int block = regoff & _BLK_REG_MASK; 1360 1361 switch (block) { 1362 case BIU_BLOCK: 1363 break; 1364 case MBOX_BLOCK: 1365 return (BXR2(isp, IspVirt2Off(isp, regoff))); 1366 case SXP_BLOCK: 1367 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff); 1368 return (0xffffffff); 1369 case RISC_BLOCK: 1370 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff); 1371 return (0xffffffff); 1372 case DMA_BLOCK: 1373 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff); 1374 return (0xffffffff); 1375 default: 1376 isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff); 1377 return (0xffffffff); 1378 } 1379 1380 1381 switch (regoff) { 1382 case BIU2400_FLASH_ADDR: 1383 case BIU2400_FLASH_DATA: 1384 case BIU2400_ICR: 1385 case BIU2400_ISR: 1386 case BIU2400_CSR: 1387 case BIU2400_REQINP: 1388 case BIU2400_REQOUTP: 1389 case BIU2400_RSPINP: 1390 case BIU2400_RSPOUTP: 1391 case BIU2400_PRI_REQINP: 1392 case BIU2400_PRI_REQOUTP: 1393 case BIU2400_ATIO_RSPINP: 1394 case BIU2400_ATIO_RSPOUTP: 1395 case BIU2400_HCCR: 1396 case BIU2400_GPIOD: 1397 case BIU2400_GPIOE: 1398 case BIU2400_HSEMA: 1399 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1400 break; 1401 case BIU2400_R2HSTSLO: 1402 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1403 break; 1404 case BIU2400_R2HSTSHI: 1405 rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16; 1406 break; 1407 default: 1408 isp_prt(isp, ISP_LOGERR, 1409 "isp_pci_rd_reg_2400: unknown offset %x", regoff); 1410 rv = 0xffffffff; 1411 break; 1412 } 1413 return (rv); 1414 } 1415 1416 static void 1417 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 1418 { 1419 int block = regoff & _BLK_REG_MASK; 1420 1421 switch (block) { 1422 case BIU_BLOCK: 1423 break; 1424 case MBOX_BLOCK: 1425 BXW2(isp, IspVirt2Off(isp, regoff), val); 1426 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1427 return; 1428 case SXP_BLOCK: 1429 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff); 1430 return; 1431 case RISC_BLOCK: 1432 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff); 1433 return; 1434 case DMA_BLOCK: 1435 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff); 1436 return; 1437 default: 1438 isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x", 1439 regoff); 1440 break; 1441 } 1442 1443 switch (regoff) { 1444 case BIU2400_FLASH_ADDR: 1445 case BIU2400_FLASH_DATA: 1446 case BIU2400_ICR: 1447 case BIU2400_ISR: 1448 case BIU2400_CSR: 1449 case BIU2400_REQINP: 1450 case BIU2400_REQOUTP: 1451 case BIU2400_RSPINP: 1452 case BIU2400_RSPOUTP: 1453 case BIU2400_PRI_REQINP: 1454 case BIU2400_PRI_REQOUTP: 1455 case BIU2400_ATIO_RSPINP: 1456 case BIU2400_ATIO_RSPOUTP: 1457 case BIU2400_HCCR: 1458 case BIU2400_GPIOD: 1459 case BIU2400_GPIOE: 1460 case BIU2400_HSEMA: 1461 BXW4(isp, IspVirt2Off(isp, regoff), val); 1462 #ifdef MEMORYBARRIERW 1463 if (regoff == BIU2400_REQINP || 1464 regoff == BIU2400_RSPOUTP || 1465 regoff == BIU2400_PRI_REQINP || 1466 regoff == BIU2400_ATIO_RSPOUTP) 1467 MEMORYBARRIERW(isp, SYNC_REG, 1468 IspVirt2Off(isp, regoff), 4, -1) 1469 else 1470 #endif 1471 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4, -1); 1472 break; 1473 default: 1474 isp_prt(isp, ISP_LOGERR, 1475 "isp_pci_wr_reg_2400: bad offset 0x%x", regoff); 1476 break; 1477 } 1478 } 1479 1480 1481 struct imush { 1482 ispsoftc_t *isp; 1483 caddr_t vbase; 1484 int chan; 1485 int error; 1486 }; 1487 1488 static void imc(void *, bus_dma_segment_t *, int, int); 1489 static void imc1(void *, bus_dma_segment_t *, int, int); 1490 1491 static void 1492 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1493 { 1494 struct imush *imushp = (struct imush *) arg; 1495 isp_ecmd_t *ecmd; 1496 1497 if (error) { 1498 imushp->error = error; 1499 return; 1500 } 1501 if (nseg != 1) { 1502 imushp->error = EINVAL; 1503 return; 1504 } 1505 isp_prt(imushp->isp, ISP_LOGDEBUG0, "request/result area @ 0x%jx/0x%jx", (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len); 1506 1507 imushp->isp->isp_rquest = imushp->vbase; 1508 imushp->isp->isp_rquest_dma = segs->ds_addr; 1509 segs->ds_addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp)); 1510 imushp->vbase += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp)); 1511 1512 imushp->isp->isp_result_dma = segs->ds_addr; 1513 imushp->isp->isp_result = imushp->vbase; 1514 segs->ds_addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp)); 1515 imushp->vbase += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp)); 1516 1517 if (imushp->isp->isp_type >= ISP_HA_FC_2300) { 1518 imushp->isp->isp_osinfo.ecmd_dma = segs->ds_addr; 1519 imushp->isp->isp_osinfo.ecmd_free = (isp_ecmd_t *)imushp->vbase; 1520 imushp->isp->isp_osinfo.ecmd_base = imushp->isp->isp_osinfo.ecmd_free; 1521 for (ecmd = imushp->isp->isp_osinfo.ecmd_free; ecmd < &imushp->isp->isp_osinfo.ecmd_free[N_XCMDS]; ecmd++) { 1522 if (ecmd == &imushp->isp->isp_osinfo.ecmd_free[N_XCMDS - 1]) { 1523 ecmd->next = NULL; 1524 } else { 1525 ecmd->next = ecmd + 1; 1526 } 1527 } 1528 } 1529 #ifdef ISP_TARGET_MODE 1530 segs->ds_addr += (N_XCMDS * XCMD_SIZE); 1531 imushp->vbase += (N_XCMDS * XCMD_SIZE); 1532 if (IS_24XX(imushp->isp)) { 1533 imushp->isp->isp_atioq_dma = segs->ds_addr; 1534 imushp->isp->isp_atioq = imushp->vbase; 1535 } 1536 #endif 1537 } 1538 1539 static void 1540 imc1(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1541 { 1542 struct imush *imushp = (struct imush *) arg; 1543 if (error) { 1544 imushp->error = error; 1545 return; 1546 } 1547 if (nseg != 1) { 1548 imushp->error = EINVAL; 1549 return; 1550 } 1551 isp_prt(imushp->isp, ISP_LOGDEBUG0, "scdma @ 0x%jx/0x%jx", (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len); 1552 FCPARAM(imushp->isp, imushp->chan)->isp_scdma = segs->ds_addr; 1553 FCPARAM(imushp->isp, imushp->chan)->isp_scratch = imushp->vbase; 1554 } 1555 1556 static int 1557 isp_pci_mbxdma(ispsoftc_t *isp) 1558 { 1559 caddr_t base; 1560 uint32_t len, nsegs; 1561 int i, error, cmap = 0; 1562 bus_size_t slim; /* segment size */ 1563 bus_addr_t llim; /* low limit of unavailable dma */ 1564 bus_addr_t hlim; /* high limit of unavailable dma */ 1565 struct imush im; 1566 1567 /* 1568 * Already been here? If so, leave... 1569 */ 1570 if (isp->isp_rquest) { 1571 return (0); 1572 } 1573 ISP_UNLOCK(isp); 1574 1575 if (isp->isp_maxcmds == 0) { 1576 isp_prt(isp, ISP_LOGERR, "maxcmds not set"); 1577 ISP_LOCK(isp); 1578 return (1); 1579 } 1580 1581 hlim = BUS_SPACE_MAXADDR; 1582 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1583 if (sizeof (bus_size_t) > 4) { 1584 slim = (bus_size_t) (1ULL << 32); 1585 } else { 1586 slim = (bus_size_t) (1UL << 31); 1587 } 1588 llim = BUS_SPACE_MAXADDR; 1589 } else { 1590 llim = BUS_SPACE_MAXADDR_32BIT; 1591 slim = (1UL << 24); 1592 } 1593 1594 len = isp->isp_maxcmds * sizeof (struct isp_pcmd); 1595 isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1596 if (isp->isp_osinfo.pcmd_pool == NULL) { 1597 isp_prt(isp, ISP_LOGERR, "cannot allocate pcmds"); 1598 ISP_LOCK(isp); 1599 return (1); 1600 } 1601 1602 if (isp->isp_osinfo.sixtyfourbit) { 1603 nsegs = ISP_NSEG64_MAX; 1604 } else { 1605 nsegs = ISP_NSEG_MAX; 1606 } 1607 #ifdef ISP_TARGET_MODE 1608 /* 1609 * XXX: We don't really support 64 bit target mode for parallel scsi yet 1610 */ 1611 if (IS_SCSI(isp) && isp->isp_osinfo.sixtyfourbit) { 1612 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1613 isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet"); 1614 ISP_LOCK(isp); 1615 return (1); 1616 } 1617 #endif 1618 1619 if (isp_dma_tag_create(BUS_DMA_ROOTARG(ISP_PCD(isp)), 1, slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, nsegs, slim, 0, &isp->isp_osinfo.dmat)) { 1620 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1621 ISP_LOCK(isp); 1622 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1623 return (1); 1624 } 1625 1626 len = sizeof (isp_hdl_t) * isp->isp_maxcmds; 1627 isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1628 if (isp->isp_xflist == NULL) { 1629 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1630 ISP_LOCK(isp); 1631 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1632 return (1); 1633 } 1634 for (len = 0; len < isp->isp_maxcmds - 1; len++) { 1635 isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1]; 1636 } 1637 isp->isp_xffree = isp->isp_xflist; 1638 #ifdef ISP_TARGET_MODE 1639 len = sizeof (isp_hdl_t) * isp->isp_maxcmds; 1640 isp->isp_tgtlist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1641 if (isp->isp_tgtlist == NULL) { 1642 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1643 free(isp->isp_xflist, M_DEVBUF); 1644 ISP_LOCK(isp); 1645 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1646 return (1); 1647 } 1648 for (len = 0; len < isp->isp_maxcmds - 1; len++) { 1649 isp->isp_tgtlist[len].cmd = &isp->isp_tgtlist[len+1]; 1650 } 1651 isp->isp_tgtfree = isp->isp_tgtlist; 1652 #endif 1653 1654 /* 1655 * Allocate and map the request and result queues (and ATIO queue 1656 * if we're a 2400 supporting target mode), and a region for 1657 * external dma addressable command/status structures (23XX and 1658 * later). 1659 */ 1660 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1661 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1662 #ifdef ISP_TARGET_MODE 1663 if (IS_24XX(isp)) { 1664 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1665 } 1666 #endif 1667 if (isp->isp_type >= ISP_HA_FC_2300) { 1668 len += (N_XCMDS * XCMD_SIZE); 1669 } 1670 1671 /* 1672 * Create a tag for the control spaces. We don't always need this 1673 * to be 32 bits, but we do this for simplicity and speed's sake. 1674 */ 1675 if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, len, 1, slim, 0, &isp->isp_osinfo.cdmat)) { 1676 isp_prt(isp, ISP_LOGERR, "cannot create a dma tag for control spaces"); 1677 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1678 free(isp->isp_xflist, M_DEVBUF); 1679 #ifdef ISP_TARGET_MODE 1680 free(isp->isp_tgtlist, M_DEVBUF); 1681 #endif 1682 ISP_LOCK(isp); 1683 return (1); 1684 } 1685 1686 if (bus_dmamem_alloc(isp->isp_osinfo.cdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &isp->isp_osinfo.cdmap) != 0) { 1687 isp_prt(isp, ISP_LOGERR, "cannot allocate %d bytes of CCB memory", len); 1688 bus_dma_tag_destroy(isp->isp_osinfo.cdmat); 1689 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1690 free(isp->isp_xflist, M_DEVBUF); 1691 #ifdef ISP_TARGET_MODE 1692 free(isp->isp_tgtlist, M_DEVBUF); 1693 #endif 1694 ISP_LOCK(isp); 1695 return (1); 1696 } 1697 1698 im.isp = isp; 1699 im.chan = 0; 1700 im.vbase = base; 1701 im.error = 0; 1702 1703 bus_dmamap_load(isp->isp_osinfo.cdmat, isp->isp_osinfo.cdmap, base, len, imc, &im, 0); 1704 if (im.error) { 1705 isp_prt(isp, ISP_LOGERR, "error %d loading dma map for control areas", im.error); 1706 goto bad; 1707 } 1708 1709 if (IS_FC(isp)) { 1710 for (cmap = 0; cmap < isp->isp_nchan; cmap++) { 1711 struct isp_fc *fc = ISP_FC_PC(isp, cmap); 1712 if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ISP_FC_SCRLEN, 1, slim, 0, &fc->tdmat)) { 1713 goto bad; 1714 } 1715 if (bus_dmamem_alloc(fc->tdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &fc->tdmap) != 0) { 1716 bus_dma_tag_destroy(fc->tdmat); 1717 goto bad; 1718 } 1719 im.isp = isp; 1720 im.chan = cmap; 1721 im.vbase = base; 1722 im.error = 0; 1723 bus_dmamap_load(fc->tdmat, fc->tdmap, base, ISP_FC_SCRLEN, imc1, &im, 0); 1724 if (im.error) { 1725 bus_dmamem_free(fc->tdmat, base, fc->tdmap); 1726 bus_dma_tag_destroy(fc->tdmat); 1727 goto bad; 1728 } 1729 if (isp->isp_type >= ISP_HA_FC_2300) { 1730 for (i = 0; i < INITIAL_NEXUS_COUNT; i++) { 1731 struct isp_nexus *n = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_NOWAIT | M_ZERO); 1732 if (n == NULL) { 1733 while (fc->nexus_free_list) { 1734 n = fc->nexus_free_list; 1735 fc->nexus_free_list = n->next; 1736 free(n, M_DEVBUF); 1737 } 1738 goto bad; 1739 } 1740 n->next = fc->nexus_free_list; 1741 fc->nexus_free_list = n; 1742 } 1743 } 1744 } 1745 } 1746 1747 for (i = 0; i < isp->isp_maxcmds; i++) { 1748 struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i]; 1749 error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap); 1750 if (error) { 1751 isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error); 1752 while (--i >= 0) { 1753 bus_dmamap_destroy(isp->isp_osinfo.dmat, isp->isp_osinfo.pcmd_pool[i].dmap); 1754 } 1755 goto bad; 1756 } 1757 callout_init_mtx(&pcmd->wdog, &isp->isp_osinfo.lock, 0); 1758 if (i == isp->isp_maxcmds-1) { 1759 pcmd->next = NULL; 1760 } else { 1761 pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1]; 1762 } 1763 } 1764 isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0]; 1765 ISP_LOCK(isp); 1766 return (0); 1767 1768 bad: 1769 while (--cmap >= 0) { 1770 struct isp_fc *fc = ISP_FC_PC(isp, cmap); 1771 bus_dmamap_unload(fc->tdmat, fc->tdmap); 1772 bus_dmamem_free(fc->tdmat, base, fc->tdmap); 1773 bus_dma_tag_destroy(fc->tdmat); 1774 while (fc->nexus_free_list) { 1775 struct isp_nexus *n = fc->nexus_free_list; 1776 fc->nexus_free_list = n->next; 1777 free(n, M_DEVBUF); 1778 } 1779 } 1780 if (isp->isp_rquest_dma != 0) 1781 bus_dmamap_unload(isp->isp_osinfo.cdmat, isp->isp_osinfo.cdmap); 1782 bus_dmamem_free(isp->isp_osinfo.cdmat, base, isp->isp_osinfo.cdmap); 1783 bus_dma_tag_destroy(isp->isp_osinfo.cdmat); 1784 free(isp->isp_xflist, M_DEVBUF); 1785 #ifdef ISP_TARGET_MODE 1786 free(isp->isp_tgtlist, M_DEVBUF); 1787 #endif 1788 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1789 isp->isp_rquest = NULL; 1790 ISP_LOCK(isp); 1791 return (1); 1792 } 1793 1794 typedef struct { 1795 ispsoftc_t *isp; 1796 void *cmd_token; 1797 void *rq; /* original request */ 1798 int error; 1799 bus_size_t mapsize; 1800 } mush_t; 1801 1802 #define MUSHERR_NOQENTRIES -2 1803 1804 #ifdef ISP_TARGET_MODE 1805 static void tdma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); 1806 static void tdma2(void *, bus_dma_segment_t *, int, int); 1807 1808 static void 1809 tdma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) 1810 { 1811 mush_t *mp; 1812 mp = (mush_t *)arg; 1813 mp->mapsize = mapsize; 1814 tdma2(arg, dm_segs, nseg, error); 1815 } 1816 1817 static void 1818 tdma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1819 { 1820 mush_t *mp; 1821 ispsoftc_t *isp; 1822 struct ccb_scsiio *csio; 1823 isp_ddir_t ddir; 1824 ispreq_t *rq; 1825 1826 mp = (mush_t *) arg; 1827 if (error) { 1828 mp->error = error; 1829 return; 1830 } 1831 csio = mp->cmd_token; 1832 isp = mp->isp; 1833 rq = mp->rq; 1834 if (nseg) { 1835 if (isp->isp_osinfo.sixtyfourbit) { 1836 if (nseg >= ISP_NSEG64_MAX) { 1837 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1838 mp->error = EFAULT; 1839 return; 1840 } 1841 if (rq->req_header.rqs_entry_type == RQSTYPE_CTIO2) { 1842 rq->req_header.rqs_entry_type = RQSTYPE_CTIO3; 1843 } 1844 } else { 1845 if (nseg >= ISP_NSEG_MAX) { 1846 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1847 mp->error = EFAULT; 1848 return; 1849 } 1850 } 1851 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1852 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1853 ddir = ISP_TO_DEVICE; 1854 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1855 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1856 ddir = ISP_FROM_DEVICE; 1857 } else { 1858 dm_segs = NULL; 1859 nseg = 0; 1860 ddir = ISP_NOXFR; 1861 } 1862 } else { 1863 dm_segs = NULL; 1864 nseg = 0; 1865 ddir = ISP_NOXFR; 1866 } 1867 1868 error = isp_send_tgt_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, &csio->sense_data, csio->sense_len); 1869 switch (error) { 1870 case CMD_EAGAIN: 1871 mp->error = MUSHERR_NOQENTRIES; 1872 case CMD_QUEUED: 1873 break; 1874 default: 1875 mp->error = EIO; 1876 } 1877 } 1878 #endif 1879 1880 static void dma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); 1881 static void dma2(void *, bus_dma_segment_t *, int, int); 1882 1883 static void 1884 dma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) 1885 { 1886 mush_t *mp; 1887 mp = (mush_t *)arg; 1888 mp->mapsize = mapsize; 1889 dma2(arg, dm_segs, nseg, error); 1890 } 1891 1892 static void 1893 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1894 { 1895 mush_t *mp; 1896 ispsoftc_t *isp; 1897 struct ccb_scsiio *csio; 1898 isp_ddir_t ddir; 1899 ispreq_t *rq; 1900 1901 mp = (mush_t *) arg; 1902 if (error) { 1903 mp->error = error; 1904 return; 1905 } 1906 csio = mp->cmd_token; 1907 isp = mp->isp; 1908 rq = mp->rq; 1909 if (nseg) { 1910 if (isp->isp_osinfo.sixtyfourbit) { 1911 if (nseg >= ISP_NSEG64_MAX) { 1912 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1913 mp->error = EFAULT; 1914 return; 1915 } 1916 if (rq->req_header.rqs_entry_type == RQSTYPE_T2RQS) { 1917 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 1918 } else if (rq->req_header.rqs_entry_type == RQSTYPE_REQUEST) { 1919 rq->req_header.rqs_entry_type = RQSTYPE_A64; 1920 } 1921 } else { 1922 if (nseg >= ISP_NSEG_MAX) { 1923 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1924 mp->error = EFAULT; 1925 return; 1926 } 1927 } 1928 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1929 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1930 ddir = ISP_FROM_DEVICE; 1931 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1932 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1933 ddir = ISP_TO_DEVICE; 1934 } else { 1935 ddir = ISP_NOXFR; 1936 } 1937 } else { 1938 dm_segs = NULL; 1939 nseg = 0; 1940 ddir = ISP_NOXFR; 1941 } 1942 1943 error = isp_send_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, (ispds64_t *)csio->req_map); 1944 switch (error) { 1945 case CMD_EAGAIN: 1946 mp->error = MUSHERR_NOQENTRIES; 1947 break; 1948 case CMD_QUEUED: 1949 break; 1950 default: 1951 mp->error = EIO; 1952 break; 1953 } 1954 } 1955 1956 static int 1957 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff) 1958 { 1959 mush_t mush, *mp; 1960 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1961 void (*eptr2)(void *, bus_dma_segment_t *, int, bus_size_t, int); 1962 int error; 1963 1964 mp = &mush; 1965 mp->isp = isp; 1966 mp->cmd_token = csio; 1967 mp->rq = ff; 1968 mp->error = 0; 1969 mp->mapsize = 0; 1970 1971 #ifdef ISP_TARGET_MODE 1972 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1973 eptr = tdma2; 1974 eptr2 = tdma2_2; 1975 } else 1976 #endif 1977 { 1978 eptr = dma2; 1979 eptr2 = dma2_2; 1980 } 1981 1982 1983 error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, 1984 (union ccb *)csio, eptr, mp, 0); 1985 if (error == EINPROGRESS) { 1986 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); 1987 mp->error = EINVAL; 1988 isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported"); 1989 } else if (error && mp->error == 0) { 1990 #ifdef DIAGNOSTIC 1991 isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); 1992 #endif 1993 mp->error = error; 1994 } 1995 if (mp->error) { 1996 int retval = CMD_COMPLETE; 1997 if (mp->error == MUSHERR_NOQENTRIES) { 1998 retval = CMD_EAGAIN; 1999 } else if (mp->error == EFBIG) { 2000 csio->ccb_h.status = CAM_REQ_TOO_BIG; 2001 } else if (mp->error == EINVAL) { 2002 csio->ccb_h.status = CAM_REQ_INVALID; 2003 } else { 2004 csio->ccb_h.status = CAM_UNREC_HBA_ERROR; 2005 } 2006 return (retval); 2007 } 2008 return (CMD_QUEUED); 2009 } 2010 2011 static void 2012 isp_pci_reset0(ispsoftc_t *isp) 2013 { 2014 ISP_DISABLE_INTS(isp); 2015 } 2016 2017 static void 2018 isp_pci_reset1(ispsoftc_t *isp) 2019 { 2020 if (!IS_24XX(isp)) { 2021 /* Make sure the BIOS is disabled */ 2022 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 2023 } 2024 /* and enable interrupts */ 2025 ISP_ENABLE_INTS(isp); 2026 } 2027 2028 static void 2029 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 2030 { 2031 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2032 if (msg) 2033 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2034 else 2035 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2036 if (IS_SCSI(isp)) 2037 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2038 else 2039 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2040 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2041 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2042 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2043 2044 2045 if (IS_SCSI(isp)) { 2046 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2047 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2048 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2049 ISP_READ(isp, CDMA_FIFO_STS)); 2050 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2051 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2052 ISP_READ(isp, DDMA_FIFO_STS)); 2053 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2054 ISP_READ(isp, SXP_INTERRUPT), 2055 ISP_READ(isp, SXP_GROSS_ERR), 2056 ISP_READ(isp, SXP_PINS_CTRL)); 2057 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2058 } 2059 printf(" mbox regs: %x %x %x %x %x\n", 2060 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2061 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2062 ISP_READ(isp, OUTMAILBOX4)); 2063 printf(" PCI Status Command/Status=%x\n", 2064 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2065 } 2066