1 /*- 2 * Copyright (c) 1997-2008 by Matthew Jacob 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /* 27 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 28 * FreeBSD Version. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/module.h> 37 #include <sys/linker.h> 38 #include <sys/firmware.h> 39 #include <sys/bus.h> 40 #include <sys/stdint.h> 41 #include <dev/pci/pcireg.h> 42 #include <dev/pci/pcivar.h> 43 #include <machine/bus.h> 44 #include <machine/resource.h> 45 #include <sys/rman.h> 46 #include <sys/malloc.h> 47 #include <sys/uio.h> 48 49 #ifdef __sparc64__ 50 #include <dev/ofw/openfirm.h> 51 #include <machine/ofw_machdep.h> 52 #endif 53 54 #include <dev/isp/isp_freebsd.h> 55 56 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); 57 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); 58 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 59 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); 60 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 61 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 62 static int isp_pci_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 63 static int isp_pci_rd_isr_2300(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 64 static int isp_pci_rd_isr_2400(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 65 static int isp_pci_mbxdma(ispsoftc_t *); 66 static int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *); 67 68 69 static void isp_pci_reset0(ispsoftc_t *); 70 static void isp_pci_reset1(ispsoftc_t *); 71 static void isp_pci_dumpregs(ispsoftc_t *, const char *); 72 73 static struct ispmdvec mdvec = { 74 isp_pci_rd_isr, 75 isp_pci_rd_reg, 76 isp_pci_wr_reg, 77 isp_pci_mbxdma, 78 isp_pci_dmasetup, 79 isp_common_dmateardown, 80 isp_pci_reset0, 81 isp_pci_reset1, 82 isp_pci_dumpregs, 83 NULL, 84 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 85 }; 86 87 static struct ispmdvec mdvec_1080 = { 88 isp_pci_rd_isr, 89 isp_pci_rd_reg_1080, 90 isp_pci_wr_reg_1080, 91 isp_pci_mbxdma, 92 isp_pci_dmasetup, 93 isp_common_dmateardown, 94 isp_pci_reset0, 95 isp_pci_reset1, 96 isp_pci_dumpregs, 97 NULL, 98 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 99 }; 100 101 static struct ispmdvec mdvec_12160 = { 102 isp_pci_rd_isr, 103 isp_pci_rd_reg_1080, 104 isp_pci_wr_reg_1080, 105 isp_pci_mbxdma, 106 isp_pci_dmasetup, 107 isp_common_dmateardown, 108 isp_pci_reset0, 109 isp_pci_reset1, 110 isp_pci_dumpregs, 111 NULL, 112 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 113 }; 114 115 static struct ispmdvec mdvec_2100 = { 116 isp_pci_rd_isr, 117 isp_pci_rd_reg, 118 isp_pci_wr_reg, 119 isp_pci_mbxdma, 120 isp_pci_dmasetup, 121 isp_common_dmateardown, 122 isp_pci_reset0, 123 isp_pci_reset1, 124 isp_pci_dumpregs 125 }; 126 127 static struct ispmdvec mdvec_2200 = { 128 isp_pci_rd_isr, 129 isp_pci_rd_reg, 130 isp_pci_wr_reg, 131 isp_pci_mbxdma, 132 isp_pci_dmasetup, 133 isp_common_dmateardown, 134 isp_pci_reset0, 135 isp_pci_reset1, 136 isp_pci_dumpregs 137 }; 138 139 static struct ispmdvec mdvec_2300 = { 140 isp_pci_rd_isr_2300, 141 isp_pci_rd_reg, 142 isp_pci_wr_reg, 143 isp_pci_mbxdma, 144 isp_pci_dmasetup, 145 isp_common_dmateardown, 146 isp_pci_reset0, 147 isp_pci_reset1, 148 isp_pci_dumpregs 149 }; 150 151 static struct ispmdvec mdvec_2400 = { 152 isp_pci_rd_isr_2400, 153 isp_pci_rd_reg_2400, 154 isp_pci_wr_reg_2400, 155 isp_pci_mbxdma, 156 isp_pci_dmasetup, 157 isp_common_dmateardown, 158 isp_pci_reset0, 159 isp_pci_reset1, 160 NULL 161 }; 162 163 static struct ispmdvec mdvec_2500 = { 164 isp_pci_rd_isr_2400, 165 isp_pci_rd_reg_2400, 166 isp_pci_wr_reg_2400, 167 isp_pci_mbxdma, 168 isp_pci_dmasetup, 169 isp_common_dmateardown, 170 isp_pci_reset0, 171 isp_pci_reset1, 172 NULL 173 }; 174 175 #ifndef PCIM_CMD_INVEN 176 #define PCIM_CMD_INVEN 0x10 177 #endif 178 #ifndef PCIM_CMD_BUSMASTEREN 179 #define PCIM_CMD_BUSMASTEREN 0x0004 180 #endif 181 #ifndef PCIM_CMD_PERRESPEN 182 #define PCIM_CMD_PERRESPEN 0x0040 183 #endif 184 #ifndef PCIM_CMD_SEREN 185 #define PCIM_CMD_SEREN 0x0100 186 #endif 187 #ifndef PCIM_CMD_INTX_DISABLE 188 #define PCIM_CMD_INTX_DISABLE 0x0400 189 #endif 190 191 #ifndef PCIR_COMMAND 192 #define PCIR_COMMAND 0x04 193 #endif 194 195 #ifndef PCIR_CACHELNSZ 196 #define PCIR_CACHELNSZ 0x0c 197 #endif 198 199 #ifndef PCIR_LATTIMER 200 #define PCIR_LATTIMER 0x0d 201 #endif 202 203 #ifndef PCIR_ROMADDR 204 #define PCIR_ROMADDR 0x30 205 #endif 206 207 #ifndef PCI_VENDOR_QLOGIC 208 #define PCI_VENDOR_QLOGIC 0x1077 209 #endif 210 211 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 212 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 213 #endif 214 215 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 216 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 217 #endif 218 219 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 220 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 221 #endif 222 223 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 224 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 225 #endif 226 227 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 228 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 229 #endif 230 231 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 232 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 233 #endif 234 235 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 236 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 237 #endif 238 239 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 240 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 241 #endif 242 243 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 244 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 245 #endif 246 247 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 248 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 249 #endif 250 251 #ifndef PCI_PRODUCT_QLOGIC_ISP2322 252 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 253 #endif 254 255 #ifndef PCI_PRODUCT_QLOGIC_ISP2422 256 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 257 #endif 258 259 #ifndef PCI_PRODUCT_QLOGIC_ISP2432 260 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432 261 #endif 262 263 #ifndef PCI_PRODUCT_QLOGIC_ISP2532 264 #define PCI_PRODUCT_QLOGIC_ISP2532 0x2532 265 #endif 266 267 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 268 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 269 #endif 270 271 #ifndef PCI_PRODUCT_QLOGIC_ISP6322 272 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 273 #endif 274 275 #ifndef PCI_PRODUCT_QLOGIC_ISP5432 276 #define PCI_PRODUCT_QLOGIC_ISP5432 0x5432 277 #endif 278 279 #define PCI_QLOGIC_ISP5432 \ 280 ((PCI_PRODUCT_QLOGIC_ISP5432 << 16) | PCI_VENDOR_QLOGIC) 281 282 #define PCI_QLOGIC_ISP1020 \ 283 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 284 285 #define PCI_QLOGIC_ISP1080 \ 286 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 287 288 #define PCI_QLOGIC_ISP10160 \ 289 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 290 291 #define PCI_QLOGIC_ISP12160 \ 292 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 293 294 #define PCI_QLOGIC_ISP1240 \ 295 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 296 297 #define PCI_QLOGIC_ISP1280 \ 298 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 299 300 #define PCI_QLOGIC_ISP2100 \ 301 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 302 303 #define PCI_QLOGIC_ISP2200 \ 304 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 305 306 #define PCI_QLOGIC_ISP2300 \ 307 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 308 309 #define PCI_QLOGIC_ISP2312 \ 310 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 311 312 #define PCI_QLOGIC_ISP2322 \ 313 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 314 315 #define PCI_QLOGIC_ISP2422 \ 316 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 317 318 #define PCI_QLOGIC_ISP2432 \ 319 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC) 320 321 #define PCI_QLOGIC_ISP2532 \ 322 ((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC) 323 324 #define PCI_QLOGIC_ISP6312 \ 325 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 326 327 #define PCI_QLOGIC_ISP6322 \ 328 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 329 330 /* 331 * Odd case for some AMI raid cards... We need to *not* attach to this. 332 */ 333 #define AMI_RAID_SUBVENDOR_ID 0x101e 334 335 #define IO_MAP_REG 0x10 336 #define MEM_MAP_REG 0x14 337 338 #define PCI_DFLT_LTNCY 0x40 339 #define PCI_DFLT_LNSZ 0x10 340 341 static int isp_pci_probe (device_t); 342 static int isp_pci_attach (device_t); 343 static int isp_pci_detach (device_t); 344 345 346 #define ISP_PCD(isp) ((struct isp_pcisoftc *)isp)->pci_dev 347 struct isp_pcisoftc { 348 ispsoftc_t pci_isp; 349 device_t pci_dev; 350 struct resource * regs; 351 void * irq; 352 int iqd; 353 int rtp; 354 int rgd; 355 void * ih; 356 int16_t pci_poff[_NREG_BLKS]; 357 bus_dma_tag_t dmat; 358 int msicount; 359 }; 360 361 362 static device_method_t isp_pci_methods[] = { 363 /* Device interface */ 364 DEVMETHOD(device_probe, isp_pci_probe), 365 DEVMETHOD(device_attach, isp_pci_attach), 366 DEVMETHOD(device_detach, isp_pci_detach), 367 { 0, 0 } 368 }; 369 370 static driver_t isp_pci_driver = { 371 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 372 }; 373 static devclass_t isp_devclass; 374 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 375 MODULE_DEPEND(isp, cam, 1, 1, 1); 376 MODULE_DEPEND(isp, firmware, 1, 1, 1); 377 378 static int 379 isp_pci_probe(device_t dev) 380 { 381 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 382 case PCI_QLOGIC_ISP1020: 383 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 384 break; 385 case PCI_QLOGIC_ISP1080: 386 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 387 break; 388 case PCI_QLOGIC_ISP1240: 389 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 390 break; 391 case PCI_QLOGIC_ISP1280: 392 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 393 break; 394 case PCI_QLOGIC_ISP10160: 395 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 396 break; 397 case PCI_QLOGIC_ISP12160: 398 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 399 return (ENXIO); 400 } 401 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 402 break; 403 case PCI_QLOGIC_ISP2100: 404 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 405 break; 406 case PCI_QLOGIC_ISP2200: 407 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 408 break; 409 case PCI_QLOGIC_ISP2300: 410 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 411 break; 412 case PCI_QLOGIC_ISP2312: 413 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 414 break; 415 case PCI_QLOGIC_ISP2322: 416 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 417 break; 418 case PCI_QLOGIC_ISP2422: 419 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 420 break; 421 case PCI_QLOGIC_ISP2432: 422 device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter"); 423 break; 424 case PCI_QLOGIC_ISP2532: 425 device_set_desc(dev, "Qlogic ISP 2532 PCI FC-AL Adapter"); 426 break; 427 case PCI_QLOGIC_ISP5432: 428 device_set_desc(dev, "Qlogic ISP 5432 PCI FC-AL Adapter"); 429 break; 430 case PCI_QLOGIC_ISP6312: 431 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 432 break; 433 case PCI_QLOGIC_ISP6322: 434 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 435 break; 436 default: 437 return (ENXIO); 438 } 439 if (isp_announced == 0 && bootverbose) { 440 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 441 "Core Version %d.%d\n", 442 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 443 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 444 isp_announced++; 445 } 446 /* 447 * XXXX: Here is where we might load the f/w module 448 * XXXX: (or increase a reference count to it). 449 */ 450 return (BUS_PROBE_DEFAULT); 451 } 452 453 static void 454 isp_get_generic_options(device_t dev, ispsoftc_t *isp, int *nvp) 455 { 456 int tval; 457 458 /* 459 * Figure out if we're supposed to skip this one. 460 */ 461 tval = 0; 462 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "disable", &tval) == 0 && tval) { 463 device_printf(dev, "disabled at user request\n"); 464 isp->isp_osinfo.disabled = 1; 465 return; 466 } 467 468 tval = 0; 469 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) { 470 isp->isp_confopts |= ISP_CFG_NORELOAD; 471 } 472 tval = 0; 473 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) { 474 isp->isp_confopts |= ISP_CFG_NONVRAM; 475 } 476 tval = 0; 477 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &tval); 478 if (tval) { 479 isp->isp_dblev = tval; 480 } else { 481 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 482 } 483 if (bootverbose) { 484 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 485 } 486 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "vports", &tval); 487 if (tval > 0 && tval < 127) { 488 *nvp = tval; 489 } else { 490 *nvp = 0; 491 } 492 tval = 1; 493 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "autoconfig", &tval); 494 isp_autoconfig = tval; 495 tval = 7; 496 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "quickboot_time", &tval); 497 isp_quickboot_time = tval; 498 499 tval = 0; 500 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "forcemulti", &tval) == 0 && tval != 0) { 501 isp->isp_osinfo.forcemulti = 1; 502 } 503 } 504 505 static void 506 isp_get_pci_options(device_t dev, int *m1, int *m2) 507 { 508 int tval; 509 /* 510 * Which we should try first - memory mapping or i/o mapping? 511 * 512 * We used to try memory first followed by i/o on alpha, otherwise 513 * the reverse, but we should just try memory first all the time now. 514 */ 515 *m1 = PCIM_CMD_MEMEN; 516 *m2 = PCIM_CMD_PORTEN; 517 518 tval = 0; 519 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_iomap", &tval) == 0 && tval != 0) { 520 *m1 = PCIM_CMD_PORTEN; 521 *m2 = PCIM_CMD_MEMEN; 522 } 523 tval = 0; 524 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_memmap", &tval) == 0 && tval != 0) { 525 *m1 = PCIM_CMD_MEMEN; 526 *m2 = PCIM_CMD_PORTEN; 527 } 528 } 529 530 static void 531 isp_get_specific_options(device_t dev, int chan, ispsoftc_t *isp) 532 { 533 const char *sptr; 534 int tval; 535 536 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "iid", &tval)) { 537 if (IS_FC(isp)) { 538 ISP_FC_PC(isp, chan)->default_id = 109 - chan; 539 } else { 540 #ifdef __sparc64__ 541 ISP_SPI_PC(isp, chan)->iid = OF_getscsinitid(dev); 542 #else 543 ISP_SPI_PC(isp, chan)->iid = 7; 544 #endif 545 } 546 } else { 547 if (IS_FC(isp)) { 548 ISP_FC_PC(isp, chan)->default_id = tval - chan; 549 } else { 550 ISP_SPI_PC(isp, chan)->iid = tval; 551 } 552 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 553 } 554 555 tval = -1; 556 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "role", &tval) == 0) { 557 switch (tval) { 558 case ISP_ROLE_NONE: 559 case ISP_ROLE_INITIATOR: 560 case ISP_ROLE_TARGET: 561 case ISP_ROLE_INITIATOR|ISP_ROLE_TARGET: 562 device_printf(dev, "setting role to 0x%x\n", tval); 563 break; 564 default: 565 tval = -1; 566 break; 567 } 568 } 569 if (tval == -1) { 570 tval = ISP_DEFAULT_ROLES; 571 } 572 573 if (IS_SCSI(isp)) { 574 ISP_SPI_PC(isp, chan)->def_role = tval; 575 return; 576 } 577 ISP_FC_PC(isp, chan)->def_role = tval; 578 579 tval = 0; 580 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fullduplex", &tval) == 0 && tval != 0) { 581 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 582 } 583 sptr = 0; 584 if (resource_string_value(device_get_name(dev), device_get_unit(dev), "topology", (const char **) &sptr) == 0 && sptr != 0) { 585 if (strcmp(sptr, "lport") == 0) { 586 isp->isp_confopts |= ISP_CFG_LPORT; 587 } else if (strcmp(sptr, "nport") == 0) { 588 isp->isp_confopts |= ISP_CFG_NPORT; 589 } else if (strcmp(sptr, "lport-only") == 0) { 590 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 591 } else if (strcmp(sptr, "nport-only") == 0) { 592 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 593 } 594 } 595 596 /* 597 * Because the resource_*_value functions can neither return 598 * 64 bit integer values, nor can they be directly coerced 599 * to interpret the right hand side of the assignment as 600 * you want them to interpret it, we have to force WWN 601 * hint replacement to specify WWN strings with a leading 602 * 'w' (e..g w50000000aaaa0001). Sigh. 603 */ 604 sptr = 0; 605 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), "portwwn", (const char **) &sptr); 606 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 607 char *eptr = 0; 608 ISP_FC_PC(isp, chan)->def_wwpn = strtouq(sptr, &eptr, 16); 609 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwpn == -1) { 610 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 611 ISP_FC_PC(isp, chan)->def_wwpn = 0; 612 } 613 } 614 615 sptr = 0; 616 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), "nodewwn", (const char **) &sptr); 617 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 618 char *eptr = 0; 619 ISP_FC_PC(isp, chan)->def_wwnn = strtouq(sptr, &eptr, 16); 620 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwnn == 0) { 621 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 622 ISP_FC_PC(isp, chan)->def_wwnn = 0; 623 } 624 } 625 626 tval = 0; 627 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "hysteresis", &tval); 628 if (tval >= 0 && tval < 256) { 629 ISP_FC_PC(isp, chan)->hysteresis = tval; 630 } else { 631 ISP_FC_PC(isp, chan)->hysteresis = isp_fabric_hysteresis; 632 } 633 634 tval = -1; 635 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "loop_down_limit", &tval); 636 if (tval >= 0 && tval < 0xffff) { 637 ISP_FC_PC(isp, chan)->loop_down_limit = tval; 638 } else { 639 ISP_FC_PC(isp, chan)->loop_down_limit = isp_loop_down_limit; 640 } 641 642 tval = -1; 643 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "gone_device_time", &tval); 644 if (tval >= 0 && tval < 0xffff) { 645 ISP_FC_PC(isp, chan)->gone_device_time = tval; 646 } else { 647 ISP_FC_PC(isp, chan)->gone_device_time = isp_gone_device_time; 648 } 649 } 650 651 static int 652 isp_pci_attach(device_t dev) 653 { 654 int i, m1, m2, locksetup = 0; 655 int isp_nvports = 0; 656 uint32_t data, cmd, linesz, did; 657 struct isp_pcisoftc *pcs; 658 ispsoftc_t *isp; 659 size_t psize, xsize; 660 char fwname[32]; 661 662 pcs = device_get_softc(dev); 663 if (pcs == NULL) { 664 device_printf(dev, "cannot get softc\n"); 665 return (ENOMEM); 666 } 667 memset(pcs, 0, sizeof (*pcs)); 668 669 pcs->pci_dev = dev; 670 isp = &pcs->pci_isp; 671 isp->isp_dev = dev; 672 isp->isp_nchan = 1; 673 674 /* 675 * Get Generic Options 676 */ 677 isp_get_generic_options(dev, isp, &isp_nvports); 678 679 /* 680 * Check to see if options have us disabled 681 */ 682 if (isp->isp_osinfo.disabled) { 683 /* 684 * But return zero to preserve unit numbering 685 */ 686 return (0); 687 } 688 689 /* 690 * Get PCI options- which in this case are just mapping preferences. 691 */ 692 isp_get_pci_options(dev, &m1, &m2); 693 694 linesz = PCI_DFLT_LNSZ; 695 pcs->irq = pcs->regs = NULL; 696 pcs->rgd = pcs->rtp = pcs->iqd = 0; 697 698 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 699 if (cmd & m1) { 700 pcs->rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 701 pcs->rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 702 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, RF_ACTIVE); 703 } 704 if (pcs->regs == NULL && (cmd & m2)) { 705 pcs->rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 706 pcs->rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 707 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, RF_ACTIVE); 708 } 709 if (pcs->regs == NULL) { 710 device_printf(dev, "unable to map any ports\n"); 711 goto bad; 712 } 713 if (bootverbose) { 714 device_printf(dev, "using %s space register mapping\n", (pcs->rgd == IO_MAP_REG)? "I/O" : "Memory"); 715 } 716 isp->isp_bus_tag = rman_get_bustag(pcs->regs); 717 isp->isp_bus_handle = rman_get_bushandle(pcs->regs); 718 719 pcs->pci_dev = dev; 720 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 721 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 722 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 723 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 724 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 725 726 switch (pci_get_devid(dev)) { 727 case PCI_QLOGIC_ISP1020: 728 did = 0x1040; 729 isp->isp_mdvec = &mdvec; 730 isp->isp_type = ISP_HA_SCSI_UNKNOWN; 731 break; 732 case PCI_QLOGIC_ISP1080: 733 did = 0x1080; 734 isp->isp_mdvec = &mdvec_1080; 735 isp->isp_type = ISP_HA_SCSI_1080; 736 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 737 break; 738 case PCI_QLOGIC_ISP1240: 739 did = 0x1080; 740 isp->isp_mdvec = &mdvec_1080; 741 isp->isp_type = ISP_HA_SCSI_1240; 742 isp->isp_nchan = 2; 743 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 744 break; 745 case PCI_QLOGIC_ISP1280: 746 did = 0x1080; 747 isp->isp_mdvec = &mdvec_1080; 748 isp->isp_type = ISP_HA_SCSI_1280; 749 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 750 break; 751 case PCI_QLOGIC_ISP10160: 752 did = 0x12160; 753 isp->isp_mdvec = &mdvec_12160; 754 isp->isp_type = ISP_HA_SCSI_10160; 755 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 756 break; 757 case PCI_QLOGIC_ISP12160: 758 did = 0x12160; 759 isp->isp_nchan = 2; 760 isp->isp_mdvec = &mdvec_12160; 761 isp->isp_type = ISP_HA_SCSI_12160; 762 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 763 break; 764 case PCI_QLOGIC_ISP2100: 765 did = 0x2100; 766 isp->isp_mdvec = &mdvec_2100; 767 isp->isp_type = ISP_HA_FC_2100; 768 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 769 if (pci_get_revid(dev) < 3) { 770 /* 771 * XXX: Need to get the actual revision 772 * XXX: number of the 2100 FB. At any rate, 773 * XXX: lower cache line size for early revision 774 * XXX; boards. 775 */ 776 linesz = 1; 777 } 778 break; 779 case PCI_QLOGIC_ISP2200: 780 did = 0x2200; 781 isp->isp_mdvec = &mdvec_2200; 782 isp->isp_type = ISP_HA_FC_2200; 783 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 784 break; 785 case PCI_QLOGIC_ISP2300: 786 did = 0x2300; 787 isp->isp_mdvec = &mdvec_2300; 788 isp->isp_type = ISP_HA_FC_2300; 789 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 790 break; 791 case PCI_QLOGIC_ISP2312: 792 case PCI_QLOGIC_ISP6312: 793 did = 0x2300; 794 isp->isp_mdvec = &mdvec_2300; 795 isp->isp_type = ISP_HA_FC_2312; 796 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 797 break; 798 case PCI_QLOGIC_ISP2322: 799 case PCI_QLOGIC_ISP6322: 800 did = 0x2322; 801 isp->isp_mdvec = &mdvec_2300; 802 isp->isp_type = ISP_HA_FC_2322; 803 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 804 break; 805 case PCI_QLOGIC_ISP2422: 806 case PCI_QLOGIC_ISP2432: 807 did = 0x2400; 808 isp->isp_nchan += isp_nvports; 809 isp->isp_mdvec = &mdvec_2400; 810 isp->isp_type = ISP_HA_FC_2400; 811 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 812 break; 813 case PCI_QLOGIC_ISP2532: 814 did = 0x2500; 815 isp->isp_nchan += isp_nvports; 816 isp->isp_mdvec = &mdvec_2500; 817 isp->isp_type = ISP_HA_FC_2500; 818 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 819 break; 820 case PCI_QLOGIC_ISP5432: 821 did = 0x2500; 822 isp->isp_mdvec = &mdvec_2500; 823 isp->isp_type = ISP_HA_FC_2500; 824 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 825 break; 826 default: 827 device_printf(dev, "unknown device type\n"); 828 goto bad; 829 break; 830 } 831 isp->isp_revision = pci_get_revid(dev); 832 833 if (IS_FC(isp)) { 834 psize = sizeof (fcparam); 835 xsize = sizeof (struct isp_fc); 836 } else { 837 psize = sizeof (sdparam); 838 xsize = sizeof (struct isp_spi); 839 } 840 psize *= isp->isp_nchan; 841 xsize *= isp->isp_nchan; 842 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 843 if (isp->isp_param == NULL) { 844 device_printf(dev, "cannot allocate parameter data\n"); 845 goto bad; 846 } 847 isp->isp_osinfo.pc.ptr = malloc(xsize, M_DEVBUF, M_NOWAIT | M_ZERO); 848 if (isp->isp_osinfo.pc.ptr == NULL) { 849 device_printf(dev, "cannot allocate parameter data\n"); 850 goto bad; 851 } 852 853 /* 854 * Now that we know who we are (roughly) get/set specific options 855 */ 856 for (i = 0; i < isp->isp_nchan; i++) { 857 isp_get_specific_options(dev, i, isp); 858 } 859 860 /* 861 * The 'it' suffix really only matters for SCSI cards in target mode. 862 */ 863 isp->isp_osinfo.fw = NULL; 864 if (IS_SCSI(isp) && (ISP_SPI_PC(isp, 0)->def_role & ISP_ROLE_TARGET)) { 865 snprintf(fwname, sizeof (fwname), "isp_%04x_it", did); 866 isp->isp_osinfo.fw = firmware_get(fwname); 867 } else if (IS_24XX(isp) && (isp->isp_nchan > 1 || isp->isp_osinfo.forcemulti)) { 868 snprintf(fwname, sizeof (fwname), "isp_%04x_multi", did); 869 isp->isp_osinfo.fw = firmware_get(fwname); 870 } 871 if (isp->isp_osinfo.fw == NULL) { 872 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 873 isp->isp_osinfo.fw = firmware_get(fwname); 874 } 875 if (isp->isp_osinfo.fw != NULL) { 876 isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data; 877 } 878 879 /* 880 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 881 * are set. 882 */ 883 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 884 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 885 886 if (IS_2300(isp)) { /* per QLogic errata */ 887 cmd &= ~PCIM_CMD_INVEN; 888 } 889 890 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 891 cmd &= ~PCIM_CMD_INTX_DISABLE; 892 } 893 894 if (IS_24XX(isp)) { 895 cmd &= ~PCIM_CMD_INTX_DISABLE; 896 } 897 898 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 899 900 /* 901 * Make sure the Cache Line Size register is set sensibly. 902 */ 903 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 904 if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) { 905 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d from %d", linesz, data); 906 data = linesz; 907 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 908 } 909 910 /* 911 * Make sure the Latency Timer is sane. 912 */ 913 data = pci_read_config(dev, PCIR_LATTIMER, 1); 914 if (data < PCI_DFLT_LTNCY) { 915 data = PCI_DFLT_LTNCY; 916 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 917 pci_write_config(dev, PCIR_LATTIMER, data, 1); 918 } 919 920 /* 921 * Make sure we've disabled the ROM. 922 */ 923 data = pci_read_config(dev, PCIR_ROMADDR, 4); 924 data &= ~1; 925 pci_write_config(dev, PCIR_ROMADDR, data, 4); 926 927 /* 928 * Do MSI 929 * 930 * NB: MSI-X needs to be disabled for the 2432 (PCI-Express) 931 */ 932 if (IS_24XX(isp) || IS_2322(isp)) { 933 pcs->msicount = pci_msi_count(dev); 934 if (pcs->msicount > 1) { 935 pcs->msicount = 1; 936 } 937 if (pci_alloc_msi(dev, &pcs->msicount) == 0) { 938 pcs->iqd = 1; 939 } else { 940 pcs->iqd = 0; 941 } 942 } 943 pcs->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &pcs->iqd, RF_ACTIVE | RF_SHAREABLE); 944 if (pcs->irq == NULL) { 945 device_printf(dev, "could not allocate interrupt\n"); 946 goto bad; 947 } 948 949 /* Make sure the lock is set up. */ 950 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 951 locksetup++; 952 953 if (isp_setup_intr(dev, pcs->irq, ISP_IFLAGS, NULL, isp_platform_intr, isp, &pcs->ih)) { 954 device_printf(dev, "could not setup interrupt\n"); 955 goto bad; 956 } 957 958 /* 959 * Last minute checks... 960 */ 961 if (IS_23XX(isp) || IS_24XX(isp)) { 962 isp->isp_port = pci_get_function(dev); 963 } 964 965 /* 966 * Make sure we're in reset state. 967 */ 968 ISP_LOCK(isp); 969 isp_reset(isp, 1); 970 if (isp->isp_state != ISP_RESETSTATE) { 971 ISP_UNLOCK(isp); 972 goto bad; 973 } 974 isp_init(isp); 975 if (isp->isp_state == ISP_INITSTATE) { 976 isp->isp_state = ISP_RUNSTATE; 977 } 978 ISP_UNLOCK(isp); 979 if (isp_attach(isp)) { 980 ISP_LOCK(isp); 981 isp_uninit(isp); 982 ISP_UNLOCK(isp); 983 goto bad; 984 } 985 return (0); 986 987 bad: 988 if (pcs->ih) { 989 (void) bus_teardown_intr(dev, pcs->irq, pcs->ih); 990 } 991 if (locksetup) { 992 mtx_destroy(&isp->isp_osinfo.lock); 993 } 994 if (pcs->irq) { 995 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq); 996 } 997 if (pcs->msicount) { 998 pci_release_msi(dev); 999 } 1000 if (pcs->regs) { 1001 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 1002 } 1003 if (pcs->pci_isp.isp_param) { 1004 free(pcs->pci_isp.isp_param, M_DEVBUF); 1005 pcs->pci_isp.isp_param = NULL; 1006 } 1007 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 1008 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 1009 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 1010 } 1011 return (ENXIO); 1012 } 1013 1014 static int 1015 isp_pci_detach(device_t dev) 1016 { 1017 struct isp_pcisoftc *pcs; 1018 ispsoftc_t *isp; 1019 int status; 1020 1021 pcs = device_get_softc(dev); 1022 if (pcs == NULL) { 1023 return (ENXIO); 1024 } 1025 isp = (ispsoftc_t *) pcs; 1026 status = isp_detach(isp); 1027 if (status) 1028 return (status); 1029 ISP_LOCK(isp); 1030 isp_uninit(isp); 1031 if (pcs->ih) { 1032 (void) bus_teardown_intr(dev, pcs->irq, pcs->ih); 1033 } 1034 ISP_UNLOCK(isp); 1035 mtx_destroy(&isp->isp_osinfo.lock); 1036 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq); 1037 if (pcs->msicount) { 1038 pci_release_msi(dev); 1039 } 1040 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 1041 if (pcs->pci_isp.isp_param) { 1042 free(pcs->pci_isp.isp_param, M_DEVBUF); 1043 pcs->pci_isp.isp_param = NULL; 1044 } 1045 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 1046 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 1047 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 1048 } 1049 return (0); 1050 } 1051 1052 #define IspVirt2Off(a, x) \ 1053 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1054 _BLK_REG_SHFT] + ((x) & 0xfff)) 1055 1056 #define BXR2(isp, off) \ 1057 bus_space_read_2(isp->isp_bus_tag, isp->isp_bus_handle, off) 1058 #define BXW2(isp, off, v) \ 1059 bus_space_write_2(isp->isp_bus_tag, isp->isp_bus_handle, off, v) 1060 #define BXR4(isp, off) \ 1061 bus_space_read_4(isp->isp_bus_tag, isp->isp_bus_handle, off) 1062 #define BXW4(isp, off, v) \ 1063 bus_space_write_4(isp->isp_bus_tag, isp->isp_bus_handle, off, v) 1064 1065 1066 static ISP_INLINE int 1067 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp) 1068 { 1069 uint32_t val0, val1; 1070 int i = 0; 1071 1072 do { 1073 val0 = BXR2(isp, IspVirt2Off(isp, off)); 1074 val1 = BXR2(isp, IspVirt2Off(isp, off)); 1075 } while (val0 != val1 && ++i < 1000); 1076 if (val0 != val1) { 1077 return (1); 1078 } 1079 *rp = val0; 1080 return (0); 1081 } 1082 1083 static int 1084 isp_pci_rd_isr(ispsoftc_t *isp, uint32_t *isrp, uint16_t *semap, uint16_t *mbp) 1085 { 1086 uint16_t isr, sema; 1087 1088 if (IS_2100(isp)) { 1089 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 1090 return (0); 1091 } 1092 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 1093 return (0); 1094 } 1095 } else { 1096 isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR)); 1097 sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA)); 1098 } 1099 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1100 isr &= INT_PENDING_MASK(isp); 1101 sema &= BIU_SEMA_LOCK; 1102 if (isr == 0 && sema == 0) { 1103 return (0); 1104 } 1105 *isrp = isr; 1106 if ((*semap = sema) != 0) { 1107 if (IS_2100(isp)) { 1108 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 1109 return (0); 1110 } 1111 } else { 1112 *mbp = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0)); 1113 } 1114 } 1115 return (1); 1116 } 1117 1118 static int 1119 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint32_t *isrp, uint16_t *semap, uint16_t *mbox0p) 1120 { 1121 uint32_t hccr; 1122 uint32_t r2hisr; 1123 1124 if (!(BXR2(isp, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 1125 *isrp = 0; 1126 return (0); 1127 } 1128 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO)); 1129 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1130 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1131 *isrp = 0; 1132 return (0); 1133 } 1134 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 1135 case ISPR2HST_ROM_MBX_OK: 1136 case ISPR2HST_ROM_MBX_FAIL: 1137 case ISPR2HST_MBX_OK: 1138 case ISPR2HST_MBX_FAIL: 1139 case ISPR2HST_ASYNC_EVENT: 1140 *isrp = r2hisr & 0xffff; 1141 *mbox0p = (r2hisr >> 16); 1142 *semap = 1; 1143 return (1); 1144 case ISPR2HST_RIO_16: 1145 *isrp = r2hisr & 0xffff; 1146 *mbox0p = ASYNC_RIO16_1; 1147 *semap = 1; 1148 return (1); 1149 case ISPR2HST_FPOST: 1150 *isrp = r2hisr & 0xffff; 1151 *mbox0p = ASYNC_CMD_CMPLT; 1152 *semap = 1; 1153 return (1); 1154 case ISPR2HST_FPOST_CTIO: 1155 *isrp = r2hisr & 0xffff; 1156 *mbox0p = ASYNC_CTIO_DONE; 1157 *semap = 1; 1158 return (1); 1159 case ISPR2HST_RSPQ_UPDATE: 1160 *isrp = r2hisr & 0xffff; 1161 *mbox0p = 0; 1162 *semap = 0; 1163 return (1); 1164 default: 1165 hccr = ISP_READ(isp, HCCR); 1166 if (hccr & HCCR_PAUSE) { 1167 ISP_WRITE(isp, HCCR, HCCR_RESET); 1168 isp_prt(isp, ISP_LOGERR, "RISC paused at interrupt (%x->%x)", hccr, ISP_READ(isp, HCCR)); 1169 ISP_WRITE(isp, BIU_ICR, 0); 1170 } else { 1171 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1172 } 1173 return (0); 1174 } 1175 } 1176 1177 static int 1178 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint32_t *isrp, uint16_t *semap, uint16_t *mbox0p) 1179 { 1180 uint32_t r2hisr; 1181 1182 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO)); 1183 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1184 if ((r2hisr & BIU2400_R2HST_INTR) == 0) { 1185 *isrp = 0; 1186 return (0); 1187 } 1188 switch (r2hisr & BIU2400_R2HST_ISTAT_MASK) { 1189 case ISP2400R2HST_ROM_MBX_OK: 1190 case ISP2400R2HST_ROM_MBX_FAIL: 1191 case ISP2400R2HST_MBX_OK: 1192 case ISP2400R2HST_MBX_FAIL: 1193 case ISP2400R2HST_ASYNC_EVENT: 1194 *isrp = r2hisr & 0xffff; 1195 *mbox0p = (r2hisr >> 16); 1196 *semap = 1; 1197 return (1); 1198 case ISP2400R2HST_RSPQ_UPDATE: 1199 case ISP2400R2HST_ATIO_RSPQ_UPDATE: 1200 case ISP2400R2HST_ATIO_RQST_UPDATE: 1201 *isrp = r2hisr & 0xffff; 1202 *mbox0p = 0; 1203 *semap = 0; 1204 return (1); 1205 default: 1206 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 1207 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1208 return (0); 1209 } 1210 } 1211 1212 static uint32_t 1213 isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1214 { 1215 uint16_t rv; 1216 int oldconf = 0; 1217 1218 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1219 /* 1220 * We will assume that someone has paused the RISC processor. 1221 */ 1222 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1223 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP); 1224 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1225 } 1226 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1227 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1228 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1229 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1230 } 1231 return (rv); 1232 } 1233 1234 static void 1235 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) 1236 { 1237 int oldconf = 0; 1238 1239 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1240 /* 1241 * We will assume that someone has paused the RISC processor. 1242 */ 1243 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1244 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1245 oldconf | BIU_PCI_CONF1_SXP); 1246 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1247 } 1248 BXW2(isp, IspVirt2Off(isp, regoff), val); 1249 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1250 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1251 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1252 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1253 } 1254 1255 } 1256 1257 static uint32_t 1258 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1259 { 1260 uint32_t rv, oc = 0; 1261 1262 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1263 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1264 uint32_t tc; 1265 /* 1266 * We will assume that someone has paused the RISC processor. 1267 */ 1268 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1269 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1270 if (regoff & SXP_BANK1_SELECT) 1271 tc |= BIU_PCI1080_CONF1_SXP1; 1272 else 1273 tc |= BIU_PCI1080_CONF1_SXP0; 1274 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1275 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1276 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1277 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1278 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1279 oc | BIU_PCI1080_CONF1_DMA); 1280 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1281 } 1282 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1283 if (oc) { 1284 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1285 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1286 } 1287 return (rv); 1288 } 1289 1290 static void 1291 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) 1292 { 1293 int oc = 0; 1294 1295 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1296 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1297 uint32_t tc; 1298 /* 1299 * We will assume that someone has paused the RISC processor. 1300 */ 1301 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1302 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1303 if (regoff & SXP_BANK1_SELECT) 1304 tc |= BIU_PCI1080_CONF1_SXP1; 1305 else 1306 tc |= BIU_PCI1080_CONF1_SXP0; 1307 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1308 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1309 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1310 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1311 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1312 oc | BIU_PCI1080_CONF1_DMA); 1313 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1314 } 1315 BXW2(isp, IspVirt2Off(isp, regoff), val); 1316 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1317 if (oc) { 1318 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1319 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1320 } 1321 } 1322 1323 static uint32_t 1324 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 1325 { 1326 uint32_t rv; 1327 int block = regoff & _BLK_REG_MASK; 1328 1329 switch (block) { 1330 case BIU_BLOCK: 1331 break; 1332 case MBOX_BLOCK: 1333 return (BXR2(isp, IspVirt2Off(isp, regoff))); 1334 case SXP_BLOCK: 1335 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff); 1336 return (0xffffffff); 1337 case RISC_BLOCK: 1338 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff); 1339 return (0xffffffff); 1340 case DMA_BLOCK: 1341 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff); 1342 return (0xffffffff); 1343 default: 1344 isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff); 1345 return (0xffffffff); 1346 } 1347 1348 1349 switch (regoff) { 1350 case BIU2400_FLASH_ADDR: 1351 case BIU2400_FLASH_DATA: 1352 case BIU2400_ICR: 1353 case BIU2400_ISR: 1354 case BIU2400_CSR: 1355 case BIU2400_REQINP: 1356 case BIU2400_REQOUTP: 1357 case BIU2400_RSPINP: 1358 case BIU2400_RSPOUTP: 1359 case BIU2400_PRI_REQINP: 1360 case BIU2400_PRI_REQOUTP: 1361 case BIU2400_ATIO_RSPINP: 1362 case BIU2400_ATIO_RSPOUTP: 1363 case BIU2400_HCCR: 1364 case BIU2400_GPIOD: 1365 case BIU2400_GPIOE: 1366 case BIU2400_HSEMA: 1367 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1368 break; 1369 case BIU2400_R2HSTSLO: 1370 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1371 break; 1372 case BIU2400_R2HSTSHI: 1373 rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16; 1374 break; 1375 default: 1376 isp_prt(isp, ISP_LOGERR, 1377 "isp_pci_rd_reg_2400: unknown offset %x", regoff); 1378 rv = 0xffffffff; 1379 break; 1380 } 1381 return (rv); 1382 } 1383 1384 static void 1385 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 1386 { 1387 int block = regoff & _BLK_REG_MASK; 1388 1389 switch (block) { 1390 case BIU_BLOCK: 1391 break; 1392 case MBOX_BLOCK: 1393 BXW2(isp, IspVirt2Off(isp, regoff), val); 1394 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1395 return; 1396 case SXP_BLOCK: 1397 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff); 1398 return; 1399 case RISC_BLOCK: 1400 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff); 1401 return; 1402 case DMA_BLOCK: 1403 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff); 1404 return; 1405 default: 1406 isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x", 1407 regoff); 1408 break; 1409 } 1410 1411 switch (regoff) { 1412 case BIU2400_FLASH_ADDR: 1413 case BIU2400_FLASH_DATA: 1414 case BIU2400_ICR: 1415 case BIU2400_ISR: 1416 case BIU2400_CSR: 1417 case BIU2400_REQINP: 1418 case BIU2400_REQOUTP: 1419 case BIU2400_RSPINP: 1420 case BIU2400_RSPOUTP: 1421 case BIU2400_PRI_REQINP: 1422 case BIU2400_PRI_REQOUTP: 1423 case BIU2400_ATIO_RSPINP: 1424 case BIU2400_ATIO_RSPOUTP: 1425 case BIU2400_HCCR: 1426 case BIU2400_GPIOD: 1427 case BIU2400_GPIOE: 1428 case BIU2400_HSEMA: 1429 BXW4(isp, IspVirt2Off(isp, regoff), val); 1430 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4, -1); 1431 break; 1432 default: 1433 isp_prt(isp, ISP_LOGERR, 1434 "isp_pci_wr_reg_2400: bad offset 0x%x", regoff); 1435 break; 1436 } 1437 } 1438 1439 1440 struct imush { 1441 ispsoftc_t *isp; 1442 caddr_t vbase; 1443 int chan; 1444 int error; 1445 }; 1446 1447 static void imc(void *, bus_dma_segment_t *, int, int); 1448 static void imc1(void *, bus_dma_segment_t *, int, int); 1449 1450 static void 1451 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1452 { 1453 struct imush *imushp = (struct imush *) arg; 1454 1455 if (error) { 1456 imushp->error = error; 1457 return; 1458 } 1459 if (nseg != 1) { 1460 imushp->error = EINVAL; 1461 return; 1462 } 1463 isp_prt(imushp->isp, ISP_LOGDEBUG0, "request/result area @ 0x%jx/0x%jx", (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len); 1464 imushp->isp->isp_rquest = imushp->vbase; 1465 imushp->isp->isp_rquest_dma = segs->ds_addr; 1466 segs->ds_addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp)); 1467 imushp->vbase += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp)); 1468 imushp->isp->isp_result_dma = segs->ds_addr; 1469 imushp->isp->isp_result = imushp->vbase; 1470 1471 #ifdef ISP_TARGET_MODE 1472 if (IS_24XX(imushp->isp)) { 1473 segs->ds_addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp)); 1474 imushp->vbase += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp)); 1475 imushp->isp->isp_atioq_dma = segs->ds_addr; 1476 imushp->isp->isp_atioq = imushp->vbase; 1477 } 1478 #endif 1479 } 1480 1481 static void 1482 imc1(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1483 { 1484 struct imush *imushp = (struct imush *) arg; 1485 if (error) { 1486 imushp->error = error; 1487 return; 1488 } 1489 if (nseg != 1) { 1490 imushp->error = EINVAL; 1491 return; 1492 } 1493 isp_prt(imushp->isp, ISP_LOGDEBUG0, "scdma @ 0x%jx/0x%jx", (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len); 1494 FCPARAM(imushp->isp, imushp->chan)->isp_scdma = segs->ds_addr; 1495 FCPARAM(imushp->isp, imushp->chan)->isp_scratch = imushp->vbase; 1496 } 1497 1498 static int 1499 isp_pci_mbxdma(ispsoftc_t *isp) 1500 { 1501 caddr_t base; 1502 uint32_t len; 1503 int i, error, ns, cmap = 0; 1504 bus_size_t slim; /* segment size */ 1505 bus_addr_t llim; /* low limit of unavailable dma */ 1506 bus_addr_t hlim; /* high limit of unavailable dma */ 1507 struct imush im; 1508 1509 /* 1510 * Already been here? If so, leave... 1511 */ 1512 if (isp->isp_rquest) { 1513 return (0); 1514 } 1515 ISP_UNLOCK(isp); 1516 1517 if (isp->isp_maxcmds == 0) { 1518 isp_prt(isp, ISP_LOGERR, "maxcmds not set"); 1519 ISP_LOCK(isp); 1520 return (1); 1521 } 1522 1523 hlim = BUS_SPACE_MAXADDR; 1524 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1525 if (sizeof (bus_size_t) > 4) { 1526 slim = (bus_size_t) (1ULL << 32); 1527 } else { 1528 slim = (bus_size_t) (1UL << 31); 1529 } 1530 llim = BUS_SPACE_MAXADDR; 1531 } else { 1532 llim = BUS_SPACE_MAXADDR_32BIT; 1533 slim = (1UL << 24); 1534 } 1535 1536 len = isp->isp_maxcmds * sizeof (struct isp_pcmd); 1537 isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1538 if (isp->isp_osinfo.pcmd_pool == NULL) { 1539 isp_prt(isp, ISP_LOGERR, "cannot allocate pcmds"); 1540 ISP_LOCK(isp); 1541 return (1); 1542 } 1543 1544 /* 1545 * XXX: We don't really support 64 bit target mode for parallel scsi yet 1546 */ 1547 #ifdef ISP_TARGET_MODE 1548 if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) { 1549 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1550 isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet"); 1551 ISP_LOCK(isp); 1552 return (1); 1553 } 1554 #endif 1555 1556 if (isp_dma_tag_create(BUS_DMA_ROOTARG(ISP_PCD(isp)), 1, slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, &isp->isp_osinfo.dmat)) { 1557 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1558 ISP_LOCK(isp); 1559 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1560 return (1); 1561 } 1562 1563 len = sizeof (isp_hdl_t) * isp->isp_maxcmds; 1564 isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1565 if (isp->isp_xflist == NULL) { 1566 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1567 ISP_LOCK(isp); 1568 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1569 return (1); 1570 } 1571 for (len = 0; len < isp->isp_maxcmds - 1; len++) { 1572 isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1]; 1573 } 1574 isp->isp_xffree = isp->isp_xflist; 1575 #ifdef ISP_TARGET_MODE 1576 len = sizeof (isp_hdl_t) * isp->isp_maxcmds; 1577 isp->isp_tgtlist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1578 if (isp->isp_tgtlist == NULL) { 1579 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1580 free(isp->isp_xflist, M_DEVBUF); 1581 ISP_LOCK(isp); 1582 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1583 return (1); 1584 } 1585 for (len = 0; len < isp->isp_maxcmds - 1; len++) { 1586 isp->isp_tgtlist[len].cmd = &isp->isp_tgtlist[len+1]; 1587 } 1588 isp->isp_tgtfree = isp->isp_tgtlist; 1589 #endif 1590 1591 /* 1592 * Allocate and map the request and result queues (and ATIO queue 1593 * if we're a 2400 supporting target mode). 1594 */ 1595 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1596 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1597 #ifdef ISP_TARGET_MODE 1598 if (IS_24XX(isp)) { 1599 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1600 } 1601 #endif 1602 1603 ns = (len / PAGE_SIZE) + 1; 1604 1605 /* 1606 * Create a tag for the control spaces. We don't always need this 1607 * to be 32 bits, but we do this for simplicity and speed's sake. 1608 */ 1609 if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, len, ns, slim, 0, &isp->isp_osinfo.cdmat)) { 1610 isp_prt(isp, ISP_LOGERR, "cannot create a dma tag for control spaces"); 1611 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1612 free(isp->isp_xflist, M_DEVBUF); 1613 #ifdef ISP_TARGET_MODE 1614 free(isp->isp_tgtlist, M_DEVBUF); 1615 #endif 1616 ISP_LOCK(isp); 1617 return (1); 1618 } 1619 1620 if (bus_dmamem_alloc(isp->isp_osinfo.cdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &isp->isp_osinfo.cdmap) != 0) { 1621 isp_prt(isp, ISP_LOGERR, "cannot allocate %d bytes of CCB memory", len); 1622 bus_dma_tag_destroy(isp->isp_osinfo.cdmat); 1623 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1624 free(isp->isp_xflist, M_DEVBUF); 1625 #ifdef ISP_TARGET_MODE 1626 free(isp->isp_tgtlist, M_DEVBUF); 1627 #endif 1628 ISP_LOCK(isp); 1629 return (1); 1630 } 1631 1632 im.isp = isp; 1633 im.chan = 0; 1634 im.vbase = base; 1635 im.error = 0; 1636 1637 bus_dmamap_load(isp->isp_osinfo.cdmat, isp->isp_osinfo.cdmap, base, len, imc, &im, 0); 1638 if (im.error) { 1639 isp_prt(isp, ISP_LOGERR, "error %d loading dma map for control areas", im.error); 1640 goto bad; 1641 } 1642 1643 if (IS_FC(isp)) { 1644 for (cmap = 0; cmap < isp->isp_nchan; cmap++) { 1645 struct isp_fc *fc = ISP_FC_PC(isp, cmap); 1646 if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ISP_FC_SCRLEN, 1, slim, 0, &fc->tdmat)) { 1647 goto bad; 1648 } 1649 if (bus_dmamem_alloc(fc->tdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &fc->tdmap) != 0) { 1650 bus_dma_tag_destroy(fc->tdmat); 1651 goto bad; 1652 } 1653 im.isp = isp; 1654 im.chan = cmap; 1655 im.vbase = base; 1656 im.error = 0; 1657 bus_dmamap_load(fc->tdmat, fc->tdmap, base, ISP_FC_SCRLEN, imc1, &im, 0); 1658 if (im.error) { 1659 bus_dmamem_free(fc->tdmat, base, fc->tdmap); 1660 bus_dma_tag_destroy(fc->tdmat); 1661 goto bad; 1662 } 1663 } 1664 } 1665 1666 for (i = 0; i < isp->isp_maxcmds; i++) { 1667 struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i]; 1668 error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap); 1669 if (error) { 1670 isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error); 1671 while (--i >= 0) { 1672 bus_dmamap_destroy(isp->isp_osinfo.dmat, isp->isp_osinfo.pcmd_pool[i].dmap); 1673 } 1674 goto bad; 1675 } 1676 callout_init_mtx(&pcmd->wdog, &isp->isp_osinfo.lock, 0); 1677 if (i == isp->isp_maxcmds-1) { 1678 pcmd->next = NULL; 1679 } else { 1680 pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1]; 1681 } 1682 } 1683 isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0]; 1684 ISP_LOCK(isp); 1685 return (0); 1686 1687 bad: 1688 while (--cmap >= 0) { 1689 struct isp_fc *fc = ISP_FC_PC(isp, cmap); 1690 bus_dmamem_free(fc->tdmat, base, fc->tdmap); 1691 bus_dma_tag_destroy(fc->tdmat); 1692 } 1693 bus_dmamem_free(isp->isp_osinfo.cdmat, base, isp->isp_osinfo.cdmap); 1694 bus_dma_tag_destroy(isp->isp_osinfo.cdmat); 1695 free(isp->isp_xflist, M_DEVBUF); 1696 #ifdef ISP_TARGET_MODE 1697 free(isp->isp_tgtlist, M_DEVBUF); 1698 #endif 1699 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1700 isp->isp_rquest = NULL; 1701 ISP_LOCK(isp); 1702 return (1); 1703 } 1704 1705 typedef struct { 1706 ispsoftc_t *isp; 1707 void *cmd_token; 1708 void *rq; /* original request */ 1709 int error; 1710 bus_size_t mapsize; 1711 } mush_t; 1712 1713 #define MUSHERR_NOQENTRIES -2 1714 1715 #ifdef ISP_TARGET_MODE 1716 static void tdma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); 1717 static void tdma2(void *, bus_dma_segment_t *, int, int); 1718 1719 static void 1720 tdma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) 1721 { 1722 mush_t *mp; 1723 mp = (mush_t *)arg; 1724 mp->mapsize = mapsize; 1725 tdma2(arg, dm_segs, nseg, error); 1726 } 1727 1728 static void 1729 tdma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1730 { 1731 mush_t *mp; 1732 ispsoftc_t *isp; 1733 struct ccb_scsiio *csio; 1734 isp_ddir_t ddir; 1735 ispreq_t *rq; 1736 1737 mp = (mush_t *) arg; 1738 if (error) { 1739 mp->error = error; 1740 return; 1741 } 1742 csio = mp->cmd_token; 1743 isp = mp->isp; 1744 rq = mp->rq; 1745 if (nseg) { 1746 if (sizeof (bus_addr_t) > 4) { 1747 if (nseg >= ISP_NSEG64_MAX) { 1748 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1749 mp->error = EFAULT; 1750 return; 1751 } 1752 if (rq->req_header.rqs_entry_type == RQSTYPE_CTIO2) { 1753 rq->req_header.rqs_entry_type = RQSTYPE_CTIO3; 1754 } 1755 } else { 1756 if (nseg >= ISP_NSEG_MAX) { 1757 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1758 mp->error = EFAULT; 1759 return; 1760 } 1761 } 1762 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1763 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1764 ddir = ISP_TO_DEVICE; 1765 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1766 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1767 ddir = ISP_FROM_DEVICE; 1768 } else { 1769 dm_segs = NULL; 1770 nseg = 0; 1771 ddir = ISP_NOXFR; 1772 } 1773 } else { 1774 dm_segs = NULL; 1775 nseg = 0; 1776 ddir = ISP_NOXFR; 1777 } 1778 1779 if (isp_send_tgt_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, &csio->sense_data, csio->sense_len) != CMD_QUEUED) { 1780 mp->error = MUSHERR_NOQENTRIES; 1781 } 1782 } 1783 #endif 1784 1785 static void dma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); 1786 static void dma2(void *, bus_dma_segment_t *, int, int); 1787 1788 static void 1789 dma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) 1790 { 1791 mush_t *mp; 1792 mp = (mush_t *)arg; 1793 mp->mapsize = mapsize; 1794 dma2(arg, dm_segs, nseg, error); 1795 } 1796 1797 static void 1798 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1799 { 1800 mush_t *mp; 1801 ispsoftc_t *isp; 1802 struct ccb_scsiio *csio; 1803 isp_ddir_t ddir; 1804 ispreq_t *rq; 1805 1806 mp = (mush_t *) arg; 1807 if (error) { 1808 mp->error = error; 1809 return; 1810 } 1811 csio = mp->cmd_token; 1812 isp = mp->isp; 1813 rq = mp->rq; 1814 if (nseg) { 1815 if (sizeof (bus_addr_t) > 4) { 1816 if (nseg >= ISP_NSEG64_MAX) { 1817 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1818 mp->error = EFAULT; 1819 return; 1820 } 1821 if (rq->req_header.rqs_entry_type == RQSTYPE_T2RQS) { 1822 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 1823 } else if (rq->req_header.rqs_entry_type == RQSTYPE_REQUEST) { 1824 rq->req_header.rqs_entry_type = RQSTYPE_A64; 1825 } 1826 } else { 1827 if (nseg >= ISP_NSEG_MAX) { 1828 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1829 mp->error = EFAULT; 1830 return; 1831 } 1832 } 1833 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1834 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1835 ddir = ISP_FROM_DEVICE; 1836 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1837 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1838 ddir = ISP_TO_DEVICE; 1839 } else { 1840 ddir = ISP_NOXFR; 1841 } 1842 } else { 1843 dm_segs = NULL; 1844 nseg = 0; 1845 ddir = ISP_NOXFR; 1846 } 1847 1848 if (isp_send_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir) != CMD_QUEUED) { 1849 mp->error = MUSHERR_NOQENTRIES; 1850 } 1851 } 1852 1853 static int 1854 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff) 1855 { 1856 mush_t mush, *mp; 1857 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1858 void (*eptr2)(void *, bus_dma_segment_t *, int, bus_size_t, int); 1859 1860 mp = &mush; 1861 mp->isp = isp; 1862 mp->cmd_token = csio; 1863 mp->rq = ff; 1864 mp->error = 0; 1865 mp->mapsize = 0; 1866 1867 #ifdef ISP_TARGET_MODE 1868 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1869 eptr = tdma2; 1870 eptr2 = tdma2_2; 1871 } else 1872 #endif 1873 { 1874 eptr = dma2; 1875 eptr2 = dma2_2; 1876 } 1877 1878 1879 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || (csio->dxfer_len == 0)) { 1880 (*eptr)(mp, NULL, 0, 0); 1881 } else if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1882 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1883 int error; 1884 error = bus_dmamap_load(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1885 #if 0 1886 xpt_print(csio->ccb_h.path, "%s: bus_dmamap_load " "ptr %p len %d returned %d\n", __func__, csio->data_ptr, csio->dxfer_len, error); 1887 #endif 1888 1889 if (error == EINPROGRESS) { 1890 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); 1891 mp->error = EINVAL; 1892 isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported"); 1893 } else if (error && mp->error == 0) { 1894 #ifdef DIAGNOSTIC 1895 isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); 1896 #endif 1897 mp->error = error; 1898 } 1899 } else { 1900 /* Pointer to physical buffer */ 1901 struct bus_dma_segment seg; 1902 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; 1903 seg.ds_len = csio->dxfer_len; 1904 (*eptr)(mp, &seg, 1, 0); 1905 } 1906 } else { 1907 struct bus_dma_segment *segs; 1908 1909 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1910 isp_prt(isp, ISP_LOGERR, "Physical segment pointers unsupported"); 1911 mp->error = EINVAL; 1912 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1913 struct uio sguio; 1914 int error; 1915 1916 /* 1917 * We're taking advantage of the fact that 1918 * the pointer/length sizes and layout of the iovec 1919 * structure are the same as the bus_dma_segment 1920 * structure. This might be a little dangerous, 1921 * but only if they change the structures, which 1922 * seems unlikely. 1923 */ 1924 KASSERT((sizeof (sguio.uio_iov) == sizeof (csio->data_ptr) && 1925 sizeof (sguio.uio_iovcnt) >= sizeof (csio->sglist_cnt) && 1926 sizeof (sguio.uio_resid) >= sizeof (csio->dxfer_len)), ("Ken's assumption failed")); 1927 sguio.uio_iov = (struct iovec *)csio->data_ptr; 1928 sguio.uio_iovcnt = csio->sglist_cnt; 1929 sguio.uio_resid = csio->dxfer_len; 1930 sguio.uio_segflg = UIO_SYSSPACE; 1931 1932 error = bus_dmamap_load_uio(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, &sguio, eptr2, mp, 0); 1933 1934 if (error != 0 && mp->error == 0) { 1935 isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); 1936 mp->error = error; 1937 } 1938 } else { 1939 /* Just use the segments provided */ 1940 segs = (struct bus_dma_segment *) csio->data_ptr; 1941 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1942 } 1943 } 1944 if (mp->error) { 1945 int retval = CMD_COMPLETE; 1946 if (mp->error == MUSHERR_NOQENTRIES) { 1947 retval = CMD_EAGAIN; 1948 } else if (mp->error == EFBIG) { 1949 XS_SETERR(csio, CAM_REQ_TOO_BIG); 1950 } else if (mp->error == EINVAL) { 1951 XS_SETERR(csio, CAM_REQ_INVALID); 1952 } else { 1953 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 1954 } 1955 return (retval); 1956 } 1957 return (CMD_QUEUED); 1958 } 1959 1960 static void 1961 isp_pci_reset0(ispsoftc_t *isp) 1962 { 1963 ISP_DISABLE_INTS(isp); 1964 } 1965 1966 static void 1967 isp_pci_reset1(ispsoftc_t *isp) 1968 { 1969 if (!IS_24XX(isp)) { 1970 /* Make sure the BIOS is disabled */ 1971 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 1972 } 1973 /* and enable interrupts */ 1974 ISP_ENABLE_INTS(isp); 1975 } 1976 1977 static void 1978 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 1979 { 1980 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1981 if (msg) 1982 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 1983 else 1984 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 1985 if (IS_SCSI(isp)) 1986 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 1987 else 1988 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 1989 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 1990 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 1991 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 1992 1993 1994 if (IS_SCSI(isp)) { 1995 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 1996 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 1997 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 1998 ISP_READ(isp, CDMA_FIFO_STS)); 1999 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2000 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2001 ISP_READ(isp, DDMA_FIFO_STS)); 2002 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2003 ISP_READ(isp, SXP_INTERRUPT), 2004 ISP_READ(isp, SXP_GROSS_ERR), 2005 ISP_READ(isp, SXP_PINS_CTRL)); 2006 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2007 } 2008 printf(" mbox regs: %x %x %x %x %x\n", 2009 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2010 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2011 ISP_READ(isp, OUTMAILBOX4)); 2012 printf(" PCI Status Command/Status=%x\n", 2013 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2014 } 2015