1 /*- 2 * Copyright (c) 1997-2008 by Matthew Jacob 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /* 27 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 28 * FreeBSD Version. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/module.h> 37 #include <sys/linker.h> 38 #include <sys/firmware.h> 39 #include <sys/bus.h> 40 #include <sys/stdint.h> 41 #include <dev/pci/pcireg.h> 42 #include <dev/pci/pcivar.h> 43 #include <machine/bus.h> 44 #include <machine/resource.h> 45 #include <sys/rman.h> 46 #include <sys/malloc.h> 47 #include <sys/uio.h> 48 49 #ifdef __sparc64__ 50 #include <dev/ofw/openfirm.h> 51 #include <machine/ofw_machdep.h> 52 #endif 53 54 #include <dev/isp/isp_freebsd.h> 55 56 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); 57 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); 58 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 59 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); 60 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 61 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 62 static int isp_pci_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 63 static int isp_pci_rd_isr_2300(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 64 static int isp_pci_rd_isr_2400(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 65 static int isp_pci_mbxdma(ispsoftc_t *); 66 static int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *); 67 68 69 static void isp_pci_reset0(ispsoftc_t *); 70 static void isp_pci_reset1(ispsoftc_t *); 71 static void isp_pci_dumpregs(ispsoftc_t *, const char *); 72 73 static struct ispmdvec mdvec = { 74 isp_pci_rd_isr, 75 isp_pci_rd_reg, 76 isp_pci_wr_reg, 77 isp_pci_mbxdma, 78 isp_pci_dmasetup, 79 isp_common_dmateardown, 80 isp_pci_reset0, 81 isp_pci_reset1, 82 isp_pci_dumpregs, 83 NULL, 84 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 85 }; 86 87 static struct ispmdvec mdvec_1080 = { 88 isp_pci_rd_isr, 89 isp_pci_rd_reg_1080, 90 isp_pci_wr_reg_1080, 91 isp_pci_mbxdma, 92 isp_pci_dmasetup, 93 isp_common_dmateardown, 94 isp_pci_reset0, 95 isp_pci_reset1, 96 isp_pci_dumpregs, 97 NULL, 98 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 99 }; 100 101 static struct ispmdvec mdvec_12160 = { 102 isp_pci_rd_isr, 103 isp_pci_rd_reg_1080, 104 isp_pci_wr_reg_1080, 105 isp_pci_mbxdma, 106 isp_pci_dmasetup, 107 isp_common_dmateardown, 108 isp_pci_reset0, 109 isp_pci_reset1, 110 isp_pci_dumpregs, 111 NULL, 112 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 113 }; 114 115 static struct ispmdvec mdvec_2100 = { 116 isp_pci_rd_isr, 117 isp_pci_rd_reg, 118 isp_pci_wr_reg, 119 isp_pci_mbxdma, 120 isp_pci_dmasetup, 121 isp_common_dmateardown, 122 isp_pci_reset0, 123 isp_pci_reset1, 124 isp_pci_dumpregs 125 }; 126 127 static struct ispmdvec mdvec_2200 = { 128 isp_pci_rd_isr, 129 isp_pci_rd_reg, 130 isp_pci_wr_reg, 131 isp_pci_mbxdma, 132 isp_pci_dmasetup, 133 isp_common_dmateardown, 134 isp_pci_reset0, 135 isp_pci_reset1, 136 isp_pci_dumpregs 137 }; 138 139 static struct ispmdvec mdvec_2300 = { 140 isp_pci_rd_isr_2300, 141 isp_pci_rd_reg, 142 isp_pci_wr_reg, 143 isp_pci_mbxdma, 144 isp_pci_dmasetup, 145 isp_common_dmateardown, 146 isp_pci_reset0, 147 isp_pci_reset1, 148 isp_pci_dumpregs 149 }; 150 151 static struct ispmdvec mdvec_2400 = { 152 isp_pci_rd_isr_2400, 153 isp_pci_rd_reg_2400, 154 isp_pci_wr_reg_2400, 155 isp_pci_mbxdma, 156 isp_pci_dmasetup, 157 isp_common_dmateardown, 158 isp_pci_reset0, 159 isp_pci_reset1, 160 NULL 161 }; 162 163 static struct ispmdvec mdvec_2500 = { 164 isp_pci_rd_isr_2400, 165 isp_pci_rd_reg_2400, 166 isp_pci_wr_reg_2400, 167 isp_pci_mbxdma, 168 isp_pci_dmasetup, 169 isp_common_dmateardown, 170 isp_pci_reset0, 171 isp_pci_reset1, 172 NULL 173 }; 174 175 #ifndef PCIM_CMD_INVEN 176 #define PCIM_CMD_INVEN 0x10 177 #endif 178 #ifndef PCIM_CMD_BUSMASTEREN 179 #define PCIM_CMD_BUSMASTEREN 0x0004 180 #endif 181 #ifndef PCIM_CMD_PERRESPEN 182 #define PCIM_CMD_PERRESPEN 0x0040 183 #endif 184 #ifndef PCIM_CMD_SEREN 185 #define PCIM_CMD_SEREN 0x0100 186 #endif 187 #ifndef PCIM_CMD_INTX_DISABLE 188 #define PCIM_CMD_INTX_DISABLE 0x0400 189 #endif 190 191 #ifndef PCIR_COMMAND 192 #define PCIR_COMMAND 0x04 193 #endif 194 195 #ifndef PCIR_CACHELNSZ 196 #define PCIR_CACHELNSZ 0x0c 197 #endif 198 199 #ifndef PCIR_LATTIMER 200 #define PCIR_LATTIMER 0x0d 201 #endif 202 203 #ifndef PCIR_ROMADDR 204 #define PCIR_ROMADDR 0x30 205 #endif 206 207 #ifndef PCI_VENDOR_QLOGIC 208 #define PCI_VENDOR_QLOGIC 0x1077 209 #endif 210 211 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 212 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 213 #endif 214 215 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 216 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 217 #endif 218 219 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 220 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 221 #endif 222 223 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 224 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 225 #endif 226 227 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 228 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 229 #endif 230 231 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 232 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 233 #endif 234 235 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 236 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 237 #endif 238 239 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 240 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 241 #endif 242 243 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 244 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 245 #endif 246 247 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 248 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 249 #endif 250 251 #ifndef PCI_PRODUCT_QLOGIC_ISP2322 252 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 253 #endif 254 255 #ifndef PCI_PRODUCT_QLOGIC_ISP2422 256 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 257 #endif 258 259 #ifndef PCI_PRODUCT_QLOGIC_ISP2432 260 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432 261 #endif 262 263 #ifndef PCI_PRODUCT_QLOGIC_ISP2532 264 #define PCI_PRODUCT_QLOGIC_ISP2532 0x2532 265 #endif 266 267 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 268 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 269 #endif 270 271 #ifndef PCI_PRODUCT_QLOGIC_ISP6322 272 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 273 #endif 274 275 #ifndef PCI_PRODUCT_QLOGIC_ISP5432 276 #define PCI_PRODUCT_QLOGIC_ISP5432 0x5432 277 #endif 278 279 #define PCI_QLOGIC_ISP5432 \ 280 ((PCI_PRODUCT_QLOGIC_ISP5432 << 16) | PCI_VENDOR_QLOGIC) 281 282 #define PCI_QLOGIC_ISP1020 \ 283 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 284 285 #define PCI_QLOGIC_ISP1080 \ 286 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 287 288 #define PCI_QLOGIC_ISP10160 \ 289 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 290 291 #define PCI_QLOGIC_ISP12160 \ 292 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 293 294 #define PCI_QLOGIC_ISP1240 \ 295 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 296 297 #define PCI_QLOGIC_ISP1280 \ 298 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 299 300 #define PCI_QLOGIC_ISP2100 \ 301 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 302 303 #define PCI_QLOGIC_ISP2200 \ 304 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 305 306 #define PCI_QLOGIC_ISP2300 \ 307 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 308 309 #define PCI_QLOGIC_ISP2312 \ 310 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 311 312 #define PCI_QLOGIC_ISP2322 \ 313 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 314 315 #define PCI_QLOGIC_ISP2422 \ 316 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 317 318 #define PCI_QLOGIC_ISP2432 \ 319 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC) 320 321 #define PCI_QLOGIC_ISP2532 \ 322 ((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC) 323 324 #define PCI_QLOGIC_ISP6312 \ 325 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 326 327 #define PCI_QLOGIC_ISP6322 \ 328 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 329 330 /* 331 * Odd case for some AMI raid cards... We need to *not* attach to this. 332 */ 333 #define AMI_RAID_SUBVENDOR_ID 0x101e 334 335 #define IO_MAP_REG 0x10 336 #define MEM_MAP_REG 0x14 337 338 #define PCI_DFLT_LTNCY 0x40 339 #define PCI_DFLT_LNSZ 0x10 340 341 static int isp_pci_probe (device_t); 342 static int isp_pci_attach (device_t); 343 static int isp_pci_detach (device_t); 344 345 346 #define ISP_PCD(isp) ((struct isp_pcisoftc *)isp)->pci_dev 347 struct isp_pcisoftc { 348 ispsoftc_t pci_isp; 349 device_t pci_dev; 350 struct resource * regs; 351 void * irq; 352 int iqd; 353 int rtp; 354 int rgd; 355 void * ih; 356 int16_t pci_poff[_NREG_BLKS]; 357 bus_dma_tag_t dmat; 358 int msicount; 359 }; 360 361 362 static device_method_t isp_pci_methods[] = { 363 /* Device interface */ 364 DEVMETHOD(device_probe, isp_pci_probe), 365 DEVMETHOD(device_attach, isp_pci_attach), 366 DEVMETHOD(device_detach, isp_pci_detach), 367 { 0, 0 } 368 }; 369 370 static driver_t isp_pci_driver = { 371 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 372 }; 373 static devclass_t isp_devclass; 374 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 375 376 static int 377 isp_pci_probe(device_t dev) 378 { 379 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 380 case PCI_QLOGIC_ISP1020: 381 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 382 break; 383 case PCI_QLOGIC_ISP1080: 384 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 385 break; 386 case PCI_QLOGIC_ISP1240: 387 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 388 break; 389 case PCI_QLOGIC_ISP1280: 390 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 391 break; 392 case PCI_QLOGIC_ISP10160: 393 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 394 break; 395 case PCI_QLOGIC_ISP12160: 396 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 397 return (ENXIO); 398 } 399 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 400 break; 401 case PCI_QLOGIC_ISP2100: 402 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 403 break; 404 case PCI_QLOGIC_ISP2200: 405 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 406 break; 407 case PCI_QLOGIC_ISP2300: 408 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 409 break; 410 case PCI_QLOGIC_ISP2312: 411 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 412 break; 413 case PCI_QLOGIC_ISP2322: 414 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 415 break; 416 case PCI_QLOGIC_ISP2422: 417 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 418 break; 419 case PCI_QLOGIC_ISP2432: 420 device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter"); 421 break; 422 case PCI_QLOGIC_ISP2532: 423 device_set_desc(dev, "Qlogic ISP 2532 PCI FC-AL Adapter"); 424 break; 425 case PCI_QLOGIC_ISP5432: 426 device_set_desc(dev, "Qlogic ISP 5432 PCI FC-AL Adapter"); 427 break; 428 case PCI_QLOGIC_ISP6312: 429 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 430 break; 431 case PCI_QLOGIC_ISP6322: 432 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 433 break; 434 default: 435 return (ENXIO); 436 } 437 if (isp_announced == 0 && bootverbose) { 438 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 439 "Core Version %d.%d\n", 440 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 441 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 442 isp_announced++; 443 } 444 /* 445 * XXXX: Here is where we might load the f/w module 446 * XXXX: (or increase a reference count to it). 447 */ 448 return (BUS_PROBE_DEFAULT); 449 } 450 451 static void 452 isp_get_generic_options(device_t dev, ispsoftc_t *isp, int *nvp) 453 { 454 int tval; 455 456 /* 457 * Figure out if we're supposed to skip this one. 458 */ 459 tval = 0; 460 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "disable", &tval) == 0 && tval) { 461 device_printf(dev, "disabled at user request\n"); 462 isp->isp_osinfo.disabled = 1; 463 return; 464 } 465 466 tval = 0; 467 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) { 468 isp->isp_confopts |= ISP_CFG_NORELOAD; 469 } 470 tval = 0; 471 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) { 472 isp->isp_confopts |= ISP_CFG_NONVRAM; 473 } 474 tval = 0; 475 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &tval); 476 if (tval) { 477 isp->isp_dblev = tval; 478 } else { 479 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 480 } 481 if (bootverbose) { 482 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 483 } 484 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "vports", &tval); 485 if (tval > 0 && tval < 127) { 486 *nvp = tval; 487 } else { 488 *nvp = 0; 489 } 490 tval = 1; 491 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "autoconfig", &tval); 492 isp_autoconfig = tval; 493 tval = 7; 494 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "quickboot_time", &tval); 495 isp_quickboot_time = tval; 496 497 tval = 0; 498 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "forcemulti", &tval) == 0 && tval != 0) { 499 isp->isp_osinfo.forcemulti = 1; 500 } 501 } 502 503 static void 504 isp_get_pci_options(device_t dev, int *m1, int *m2) 505 { 506 int tval; 507 /* 508 * Which we should try first - memory mapping or i/o mapping? 509 * 510 * We used to try memory first followed by i/o on alpha, otherwise 511 * the reverse, but we should just try memory first all the time now. 512 */ 513 *m1 = PCIM_CMD_MEMEN; 514 *m2 = PCIM_CMD_PORTEN; 515 516 tval = 0; 517 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_iomap", &tval) == 0 && tval != 0) { 518 *m1 = PCIM_CMD_PORTEN; 519 *m2 = PCIM_CMD_MEMEN; 520 } 521 tval = 0; 522 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_memmap", &tval) == 0 && tval != 0) { 523 *m1 = PCIM_CMD_MEMEN; 524 *m2 = PCIM_CMD_PORTEN; 525 } 526 } 527 528 static void 529 isp_get_specific_options(device_t dev, int chan, ispsoftc_t *isp) 530 { 531 const char *sptr; 532 int tval; 533 534 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "iid", &tval)) { 535 if (IS_FC(isp)) { 536 ISP_FC_PC(isp, chan)->default_id = 109 - chan; 537 } else { 538 #ifdef __sparc64__ 539 ISP_SPI_PC(isp, chan)->iid = OF_getscsinitid(dev); 540 #else 541 ISP_SPI_PC(isp, chan)->iid = 7; 542 #endif 543 } 544 } else { 545 if (IS_FC(isp)) { 546 ISP_FC_PC(isp, chan)->default_id = tval - chan; 547 } else { 548 ISP_SPI_PC(isp, chan)->iid = tval; 549 } 550 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 551 } 552 553 tval = -1; 554 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "role", &tval) == 0) { 555 switch (tval) { 556 case ISP_ROLE_NONE: 557 case ISP_ROLE_INITIATOR: 558 case ISP_ROLE_TARGET: 559 case ISP_ROLE_INITIATOR|ISP_ROLE_TARGET: 560 device_printf(dev, "setting role to 0x%x\n", tval); 561 break; 562 default: 563 tval = -1; 564 break; 565 } 566 } 567 if (tval == -1) { 568 tval = ISP_DEFAULT_ROLES; 569 } 570 571 if (IS_SCSI(isp)) { 572 ISP_SPI_PC(isp, chan)->def_role = tval; 573 return; 574 } 575 ISP_FC_PC(isp, chan)->def_role = tval; 576 577 tval = 0; 578 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fullduplex", &tval) == 0 && tval != 0) { 579 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 580 } 581 sptr = 0; 582 if (resource_string_value(device_get_name(dev), device_get_unit(dev), "topology", (const char **) &sptr) == 0 && sptr != 0) { 583 if (strcmp(sptr, "lport") == 0) { 584 isp->isp_confopts |= ISP_CFG_LPORT; 585 } else if (strcmp(sptr, "nport") == 0) { 586 isp->isp_confopts |= ISP_CFG_NPORT; 587 } else if (strcmp(sptr, "lport-only") == 0) { 588 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 589 } else if (strcmp(sptr, "nport-only") == 0) { 590 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 591 } 592 } 593 594 /* 595 * Because the resource_*_value functions can neither return 596 * 64 bit integer values, nor can they be directly coerced 597 * to interpret the right hand side of the assignment as 598 * you want them to interpret it, we have to force WWN 599 * hint replacement to specify WWN strings with a leading 600 * 'w' (e..g w50000000aaaa0001). Sigh. 601 */ 602 sptr = 0; 603 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), "portwwn", (const char **) &sptr); 604 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 605 char *eptr = 0; 606 ISP_FC_PC(isp, chan)->def_wwpn = strtouq(sptr, &eptr, 16); 607 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwpn == -1) { 608 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 609 ISP_FC_PC(isp, chan)->def_wwpn = 0; 610 } 611 } 612 613 sptr = 0; 614 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), "nodewwn", (const char **) &sptr); 615 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 616 char *eptr = 0; 617 ISP_FC_PC(isp, chan)->def_wwnn = strtouq(sptr, &eptr, 16); 618 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwnn == 0) { 619 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 620 ISP_FC_PC(isp, chan)->def_wwnn = 0; 621 } 622 } 623 624 tval = 0; 625 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "hysteresis", &tval); 626 if (tval >= 0 && tval < 256) { 627 ISP_FC_PC(isp, chan)->hysteresis = tval; 628 } else { 629 ISP_FC_PC(isp, chan)->hysteresis = isp_fabric_hysteresis; 630 } 631 632 tval = -1; 633 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "loop_down_limit", &tval); 634 if (tval >= 0 && tval < 0xffff) { 635 ISP_FC_PC(isp, chan)->loop_down_limit = tval; 636 } else { 637 ISP_FC_PC(isp, chan)->loop_down_limit = isp_loop_down_limit; 638 } 639 640 tval = -1; 641 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "gone_device_time", &tval); 642 if (tval >= 0 && tval < 0xffff) { 643 ISP_FC_PC(isp, chan)->gone_device_time = tval; 644 } else { 645 ISP_FC_PC(isp, chan)->gone_device_time = isp_gone_device_time; 646 } 647 } 648 649 static int 650 isp_pci_attach(device_t dev) 651 { 652 int i, m1, m2, locksetup = 0; 653 int isp_nvports = 0; 654 uint32_t data, cmd, linesz, did; 655 struct isp_pcisoftc *pcs; 656 ispsoftc_t *isp; 657 size_t psize, xsize; 658 char fwname[32]; 659 660 pcs = device_get_softc(dev); 661 if (pcs == NULL) { 662 device_printf(dev, "cannot get softc\n"); 663 return (ENOMEM); 664 } 665 memset(pcs, 0, sizeof (*pcs)); 666 667 pcs->pci_dev = dev; 668 isp = &pcs->pci_isp; 669 isp->isp_dev = dev; 670 isp->isp_nchan = 1; 671 672 /* 673 * Get Generic Options 674 */ 675 isp_get_generic_options(dev, isp, &isp_nvports); 676 677 /* 678 * Check to see if options have us disabled 679 */ 680 if (isp->isp_osinfo.disabled) { 681 /* 682 * But return zero to preserve unit numbering 683 */ 684 return (0); 685 } 686 687 /* 688 * Get PCI options- which in this case are just mapping preferences. 689 */ 690 isp_get_pci_options(dev, &m1, &m2); 691 692 linesz = PCI_DFLT_LNSZ; 693 pcs->irq = pcs->regs = NULL; 694 pcs->rgd = pcs->rtp = pcs->iqd = 0; 695 696 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 697 if (cmd & m1) { 698 pcs->rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 699 pcs->rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 700 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, RF_ACTIVE); 701 } 702 if (pcs->regs == NULL && (cmd & m2)) { 703 pcs->rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 704 pcs->rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 705 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, RF_ACTIVE); 706 } 707 if (pcs->regs == NULL) { 708 device_printf(dev, "unable to map any ports\n"); 709 goto bad; 710 } 711 if (bootverbose) { 712 device_printf(dev, "using %s space register mapping\n", (pcs->rgd == IO_MAP_REG)? "I/O" : "Memory"); 713 } 714 isp->isp_bus_tag = rman_get_bustag(pcs->regs); 715 isp->isp_bus_handle = rman_get_bushandle(pcs->regs); 716 717 pcs->pci_dev = dev; 718 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 719 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 720 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 721 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 722 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 723 724 switch (pci_get_devid(dev)) { 725 case PCI_QLOGIC_ISP1020: 726 did = 0x1040; 727 isp->isp_mdvec = &mdvec; 728 isp->isp_type = ISP_HA_SCSI_UNKNOWN; 729 break; 730 case PCI_QLOGIC_ISP1080: 731 did = 0x1080; 732 isp->isp_mdvec = &mdvec_1080; 733 isp->isp_type = ISP_HA_SCSI_1080; 734 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 735 break; 736 case PCI_QLOGIC_ISP1240: 737 did = 0x1080; 738 isp->isp_mdvec = &mdvec_1080; 739 isp->isp_type = ISP_HA_SCSI_1240; 740 isp->isp_nchan = 2; 741 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 742 break; 743 case PCI_QLOGIC_ISP1280: 744 did = 0x1080; 745 isp->isp_mdvec = &mdvec_1080; 746 isp->isp_type = ISP_HA_SCSI_1280; 747 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 748 break; 749 case PCI_QLOGIC_ISP10160: 750 did = 0x12160; 751 isp->isp_mdvec = &mdvec_12160; 752 isp->isp_type = ISP_HA_SCSI_10160; 753 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 754 break; 755 case PCI_QLOGIC_ISP12160: 756 did = 0x12160; 757 isp->isp_nchan = 2; 758 isp->isp_mdvec = &mdvec_12160; 759 isp->isp_type = ISP_HA_SCSI_12160; 760 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 761 break; 762 case PCI_QLOGIC_ISP2100: 763 did = 0x2100; 764 isp->isp_mdvec = &mdvec_2100; 765 isp->isp_type = ISP_HA_FC_2100; 766 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 767 if (pci_get_revid(dev) < 3) { 768 /* 769 * XXX: Need to get the actual revision 770 * XXX: number of the 2100 FB. At any rate, 771 * XXX: lower cache line size for early revision 772 * XXX; boards. 773 */ 774 linesz = 1; 775 } 776 break; 777 case PCI_QLOGIC_ISP2200: 778 did = 0x2200; 779 isp->isp_mdvec = &mdvec_2200; 780 isp->isp_type = ISP_HA_FC_2200; 781 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 782 break; 783 case PCI_QLOGIC_ISP2300: 784 did = 0x2300; 785 isp->isp_mdvec = &mdvec_2300; 786 isp->isp_type = ISP_HA_FC_2300; 787 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 788 break; 789 case PCI_QLOGIC_ISP2312: 790 case PCI_QLOGIC_ISP6312: 791 did = 0x2300; 792 isp->isp_mdvec = &mdvec_2300; 793 isp->isp_type = ISP_HA_FC_2312; 794 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 795 break; 796 case PCI_QLOGIC_ISP2322: 797 case PCI_QLOGIC_ISP6322: 798 did = 0x2322; 799 isp->isp_mdvec = &mdvec_2300; 800 isp->isp_type = ISP_HA_FC_2322; 801 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 802 break; 803 case PCI_QLOGIC_ISP2422: 804 case PCI_QLOGIC_ISP2432: 805 did = 0x2400; 806 isp->isp_nchan += isp_nvports; 807 isp->isp_mdvec = &mdvec_2400; 808 isp->isp_type = ISP_HA_FC_2400; 809 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 810 break; 811 case PCI_QLOGIC_ISP2532: 812 did = 0x2500; 813 isp->isp_nchan += isp_nvports; 814 isp->isp_mdvec = &mdvec_2500; 815 isp->isp_type = ISP_HA_FC_2500; 816 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 817 break; 818 case PCI_QLOGIC_ISP5432: 819 did = 0x2500; 820 isp->isp_mdvec = &mdvec_2500; 821 isp->isp_type = ISP_HA_FC_2500; 822 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 823 break; 824 default: 825 device_printf(dev, "unknown device type\n"); 826 goto bad; 827 break; 828 } 829 isp->isp_revision = pci_get_revid(dev); 830 831 if (IS_FC(isp)) { 832 psize = sizeof (fcparam); 833 xsize = sizeof (struct isp_fc); 834 } else { 835 psize = sizeof (sdparam); 836 xsize = sizeof (struct isp_spi); 837 } 838 psize *= isp->isp_nchan; 839 xsize *= isp->isp_nchan; 840 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 841 if (isp->isp_param == NULL) { 842 device_printf(dev, "cannot allocate parameter data\n"); 843 goto bad; 844 } 845 isp->isp_osinfo.pc.ptr = malloc(xsize, M_DEVBUF, M_NOWAIT | M_ZERO); 846 if (isp->isp_osinfo.pc.ptr == NULL) { 847 device_printf(dev, "cannot allocate parameter data\n"); 848 goto bad; 849 } 850 851 /* 852 * Now that we know who we are (roughly) get/set specific options 853 */ 854 for (i = 0; i < isp->isp_nchan; i++) { 855 isp_get_specific_options(dev, i, isp); 856 } 857 858 /* 859 * The 'it' suffix really only matters for SCSI cards in target mode. 860 */ 861 isp->isp_osinfo.fw = NULL; 862 if (IS_SCSI(isp) && (ISP_SPI_PC(isp, 0)->def_role & ISP_ROLE_TARGET)) { 863 snprintf(fwname, sizeof (fwname), "isp_%04x_it", did); 864 isp->isp_osinfo.fw = firmware_get(fwname); 865 } else if (IS_24XX(isp) && (isp->isp_nchan > 1 || isp->isp_osinfo.forcemulti)) { 866 snprintf(fwname, sizeof (fwname), "isp_%04x_multi", did); 867 isp->isp_osinfo.fw = firmware_get(fwname); 868 } 869 if (isp->isp_osinfo.fw == NULL) { 870 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 871 isp->isp_osinfo.fw = firmware_get(fwname); 872 } 873 if (isp->isp_osinfo.fw != NULL) { 874 isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data; 875 } 876 877 /* 878 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 879 * are set. 880 */ 881 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 882 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 883 884 if (IS_2300(isp)) { /* per QLogic errata */ 885 cmd &= ~PCIM_CMD_INVEN; 886 } 887 888 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 889 cmd &= ~PCIM_CMD_INTX_DISABLE; 890 } 891 892 if (IS_24XX(isp)) { 893 cmd &= ~PCIM_CMD_INTX_DISABLE; 894 } 895 896 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 897 898 /* 899 * Make sure the Cache Line Size register is set sensibly. 900 */ 901 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 902 if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) { 903 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d from %d", linesz, data); 904 data = linesz; 905 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 906 } 907 908 /* 909 * Make sure the Latency Timer is sane. 910 */ 911 data = pci_read_config(dev, PCIR_LATTIMER, 1); 912 if (data < PCI_DFLT_LTNCY) { 913 data = PCI_DFLT_LTNCY; 914 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 915 pci_write_config(dev, PCIR_LATTIMER, data, 1); 916 } 917 918 /* 919 * Make sure we've disabled the ROM. 920 */ 921 data = pci_read_config(dev, PCIR_ROMADDR, 4); 922 data &= ~1; 923 pci_write_config(dev, PCIR_ROMADDR, data, 4); 924 925 /* 926 * Do MSI 927 * 928 * NB: MSI-X needs to be disabled for the 2432 (PCI-Express) 929 */ 930 if (IS_24XX(isp) || IS_2322(isp)) { 931 pcs->msicount = pci_msi_count(dev); 932 if (pcs->msicount > 1) { 933 pcs->msicount = 1; 934 } 935 if (pci_alloc_msi(dev, &pcs->msicount) == 0) { 936 pcs->iqd = 1; 937 } else { 938 pcs->iqd = 0; 939 } 940 } 941 pcs->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &pcs->iqd, RF_ACTIVE | RF_SHAREABLE); 942 if (pcs->irq == NULL) { 943 device_printf(dev, "could not allocate interrupt\n"); 944 goto bad; 945 } 946 947 /* Make sure the lock is set up. */ 948 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 949 locksetup++; 950 951 if (isp_setup_intr(dev, pcs->irq, ISP_IFLAGS, NULL, isp_platform_intr, isp, &pcs->ih)) { 952 device_printf(dev, "could not setup interrupt\n"); 953 goto bad; 954 } 955 956 /* 957 * Last minute checks... 958 */ 959 if (IS_23XX(isp) || IS_24XX(isp)) { 960 isp->isp_port = pci_get_function(dev); 961 } 962 963 /* 964 * Make sure we're in reset state. 965 */ 966 ISP_LOCK(isp); 967 isp_reset(isp, 1); 968 if (isp->isp_state != ISP_RESETSTATE) { 969 ISP_UNLOCK(isp); 970 goto bad; 971 } 972 isp_init(isp); 973 if (isp->isp_state == ISP_INITSTATE) { 974 isp->isp_state = ISP_RUNSTATE; 975 } 976 ISP_UNLOCK(isp); 977 if (isp_attach(isp)) { 978 ISP_LOCK(isp); 979 isp_uninit(isp); 980 ISP_UNLOCK(isp); 981 goto bad; 982 } 983 return (0); 984 985 bad: 986 if (pcs->ih) { 987 (void) bus_teardown_intr(dev, pcs->irq, pcs->ih); 988 } 989 if (locksetup) { 990 mtx_destroy(&isp->isp_osinfo.lock); 991 } 992 if (pcs->irq) { 993 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq); 994 } 995 if (pcs->msicount) { 996 pci_release_msi(dev); 997 } 998 if (pcs->regs) { 999 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 1000 } 1001 if (pcs->pci_isp.isp_param) { 1002 free(pcs->pci_isp.isp_param, M_DEVBUF); 1003 pcs->pci_isp.isp_param = NULL; 1004 } 1005 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 1006 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 1007 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 1008 } 1009 return (ENXIO); 1010 } 1011 1012 static int 1013 isp_pci_detach(device_t dev) 1014 { 1015 struct isp_pcisoftc *pcs; 1016 ispsoftc_t *isp; 1017 int status; 1018 1019 pcs = device_get_softc(dev); 1020 if (pcs == NULL) { 1021 return (ENXIO); 1022 } 1023 isp = (ispsoftc_t *) pcs; 1024 status = isp_detach(isp); 1025 if (status) 1026 return (status); 1027 ISP_LOCK(isp); 1028 isp_uninit(isp); 1029 if (pcs->ih) { 1030 (void) bus_teardown_intr(dev, pcs->irq, pcs->ih); 1031 } 1032 ISP_UNLOCK(isp); 1033 mtx_destroy(&isp->isp_osinfo.lock); 1034 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq); 1035 if (pcs->msicount) { 1036 pci_release_msi(dev); 1037 } 1038 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 1039 if (pcs->pci_isp.isp_param) { 1040 free(pcs->pci_isp.isp_param, M_DEVBUF); 1041 pcs->pci_isp.isp_param = NULL; 1042 } 1043 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 1044 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 1045 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 1046 } 1047 return (0); 1048 } 1049 1050 #define IspVirt2Off(a, x) \ 1051 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1052 _BLK_REG_SHFT] + ((x) & 0xfff)) 1053 1054 #define BXR2(isp, off) \ 1055 bus_space_read_2(isp->isp_bus_tag, isp->isp_bus_handle, off) 1056 #define BXW2(isp, off, v) \ 1057 bus_space_write_2(isp->isp_bus_tag, isp->isp_bus_handle, off, v) 1058 #define BXR4(isp, off) \ 1059 bus_space_read_4(isp->isp_bus_tag, isp->isp_bus_handle, off) 1060 #define BXW4(isp, off, v) \ 1061 bus_space_write_4(isp->isp_bus_tag, isp->isp_bus_handle, off, v) 1062 1063 1064 static ISP_INLINE int 1065 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp) 1066 { 1067 uint32_t val0, val1; 1068 int i = 0; 1069 1070 do { 1071 val0 = BXR2(isp, IspVirt2Off(isp, off)); 1072 val1 = BXR2(isp, IspVirt2Off(isp, off)); 1073 } while (val0 != val1 && ++i < 1000); 1074 if (val0 != val1) { 1075 return (1); 1076 } 1077 *rp = val0; 1078 return (0); 1079 } 1080 1081 static int 1082 isp_pci_rd_isr(ispsoftc_t *isp, uint32_t *isrp, uint16_t *semap, uint16_t *mbp) 1083 { 1084 uint16_t isr, sema; 1085 1086 if (IS_2100(isp)) { 1087 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 1088 return (0); 1089 } 1090 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 1091 return (0); 1092 } 1093 } else { 1094 isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR)); 1095 sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA)); 1096 } 1097 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1098 isr &= INT_PENDING_MASK(isp); 1099 sema &= BIU_SEMA_LOCK; 1100 if (isr == 0 && sema == 0) { 1101 return (0); 1102 } 1103 *isrp = isr; 1104 if ((*semap = sema) != 0) { 1105 if (IS_2100(isp)) { 1106 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 1107 return (0); 1108 } 1109 } else { 1110 *mbp = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0)); 1111 } 1112 } 1113 return (1); 1114 } 1115 1116 static int 1117 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint32_t *isrp, uint16_t *semap, uint16_t *mbox0p) 1118 { 1119 uint32_t hccr; 1120 uint32_t r2hisr; 1121 1122 if (!(BXR2(isp, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 1123 *isrp = 0; 1124 return (0); 1125 } 1126 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO)); 1127 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1128 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1129 *isrp = 0; 1130 return (0); 1131 } 1132 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 1133 case ISPR2HST_ROM_MBX_OK: 1134 case ISPR2HST_ROM_MBX_FAIL: 1135 case ISPR2HST_MBX_OK: 1136 case ISPR2HST_MBX_FAIL: 1137 case ISPR2HST_ASYNC_EVENT: 1138 *isrp = r2hisr & 0xffff; 1139 *mbox0p = (r2hisr >> 16); 1140 *semap = 1; 1141 return (1); 1142 case ISPR2HST_RIO_16: 1143 *isrp = r2hisr & 0xffff; 1144 *mbox0p = ASYNC_RIO16_1; 1145 *semap = 1; 1146 return (1); 1147 case ISPR2HST_FPOST: 1148 *isrp = r2hisr & 0xffff; 1149 *mbox0p = ASYNC_CMD_CMPLT; 1150 *semap = 1; 1151 return (1); 1152 case ISPR2HST_FPOST_CTIO: 1153 *isrp = r2hisr & 0xffff; 1154 *mbox0p = ASYNC_CTIO_DONE; 1155 *semap = 1; 1156 return (1); 1157 case ISPR2HST_RSPQ_UPDATE: 1158 *isrp = r2hisr & 0xffff; 1159 *mbox0p = 0; 1160 *semap = 0; 1161 return (1); 1162 default: 1163 hccr = ISP_READ(isp, HCCR); 1164 if (hccr & HCCR_PAUSE) { 1165 ISP_WRITE(isp, HCCR, HCCR_RESET); 1166 isp_prt(isp, ISP_LOGERR, "RISC paused at interrupt (%x->%x)", hccr, ISP_READ(isp, HCCR)); 1167 ISP_WRITE(isp, BIU_ICR, 0); 1168 } else { 1169 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1170 } 1171 return (0); 1172 } 1173 } 1174 1175 static int 1176 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint32_t *isrp, uint16_t *semap, uint16_t *mbox0p) 1177 { 1178 uint32_t r2hisr; 1179 1180 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO)); 1181 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1182 if ((r2hisr & BIU2400_R2HST_INTR) == 0) { 1183 *isrp = 0; 1184 return (0); 1185 } 1186 switch (r2hisr & BIU2400_R2HST_ISTAT_MASK) { 1187 case ISP2400R2HST_ROM_MBX_OK: 1188 case ISP2400R2HST_ROM_MBX_FAIL: 1189 case ISP2400R2HST_MBX_OK: 1190 case ISP2400R2HST_MBX_FAIL: 1191 case ISP2400R2HST_ASYNC_EVENT: 1192 *isrp = r2hisr & 0xffff; 1193 *mbox0p = (r2hisr >> 16); 1194 *semap = 1; 1195 return (1); 1196 case ISP2400R2HST_RSPQ_UPDATE: 1197 case ISP2400R2HST_ATIO_RSPQ_UPDATE: 1198 case ISP2400R2HST_ATIO_RQST_UPDATE: 1199 *isrp = r2hisr & 0xffff; 1200 *mbox0p = 0; 1201 *semap = 0; 1202 return (1); 1203 default: 1204 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 1205 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1206 return (0); 1207 } 1208 } 1209 1210 static uint32_t 1211 isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1212 { 1213 uint16_t rv; 1214 int oldconf = 0; 1215 1216 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1217 /* 1218 * We will assume that someone has paused the RISC processor. 1219 */ 1220 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1221 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP); 1222 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1223 } 1224 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1225 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1226 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1227 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1228 } 1229 return (rv); 1230 } 1231 1232 static void 1233 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) 1234 { 1235 int oldconf = 0; 1236 1237 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1238 /* 1239 * We will assume that someone has paused the RISC processor. 1240 */ 1241 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1242 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1243 oldconf | BIU_PCI_CONF1_SXP); 1244 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1245 } 1246 BXW2(isp, IspVirt2Off(isp, regoff), val); 1247 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1248 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1249 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1250 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1251 } 1252 1253 } 1254 1255 static uint32_t 1256 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1257 { 1258 uint32_t rv, oc = 0; 1259 1260 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1261 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1262 uint32_t tc; 1263 /* 1264 * We will assume that someone has paused the RISC processor. 1265 */ 1266 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1267 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1268 if (regoff & SXP_BANK1_SELECT) 1269 tc |= BIU_PCI1080_CONF1_SXP1; 1270 else 1271 tc |= BIU_PCI1080_CONF1_SXP0; 1272 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1273 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1274 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1275 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1276 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1277 oc | BIU_PCI1080_CONF1_DMA); 1278 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1279 } 1280 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1281 if (oc) { 1282 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1283 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1284 } 1285 return (rv); 1286 } 1287 1288 static void 1289 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) 1290 { 1291 int oc = 0; 1292 1293 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1294 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1295 uint32_t tc; 1296 /* 1297 * We will assume that someone has paused the RISC processor. 1298 */ 1299 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1300 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1301 if (regoff & SXP_BANK1_SELECT) 1302 tc |= BIU_PCI1080_CONF1_SXP1; 1303 else 1304 tc |= BIU_PCI1080_CONF1_SXP0; 1305 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1306 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1307 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1308 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1309 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1310 oc | BIU_PCI1080_CONF1_DMA); 1311 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1312 } 1313 BXW2(isp, IspVirt2Off(isp, regoff), val); 1314 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1315 if (oc) { 1316 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1317 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1318 } 1319 } 1320 1321 static uint32_t 1322 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 1323 { 1324 uint32_t rv; 1325 int block = regoff & _BLK_REG_MASK; 1326 1327 switch (block) { 1328 case BIU_BLOCK: 1329 break; 1330 case MBOX_BLOCK: 1331 return (BXR2(isp, IspVirt2Off(isp, regoff))); 1332 case SXP_BLOCK: 1333 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff); 1334 return (0xffffffff); 1335 case RISC_BLOCK: 1336 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff); 1337 return (0xffffffff); 1338 case DMA_BLOCK: 1339 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff); 1340 return (0xffffffff); 1341 default: 1342 isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff); 1343 return (0xffffffff); 1344 } 1345 1346 1347 switch (regoff) { 1348 case BIU2400_FLASH_ADDR: 1349 case BIU2400_FLASH_DATA: 1350 case BIU2400_ICR: 1351 case BIU2400_ISR: 1352 case BIU2400_CSR: 1353 case BIU2400_REQINP: 1354 case BIU2400_REQOUTP: 1355 case BIU2400_RSPINP: 1356 case BIU2400_RSPOUTP: 1357 case BIU2400_PRI_REQINP: 1358 case BIU2400_PRI_REQOUTP: 1359 case BIU2400_ATIO_RSPINP: 1360 case BIU2400_ATIO_RSPOUTP: 1361 case BIU2400_HCCR: 1362 case BIU2400_GPIOD: 1363 case BIU2400_GPIOE: 1364 case BIU2400_HSEMA: 1365 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1366 break; 1367 case BIU2400_R2HSTSLO: 1368 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1369 break; 1370 case BIU2400_R2HSTSHI: 1371 rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16; 1372 break; 1373 default: 1374 isp_prt(isp, ISP_LOGERR, 1375 "isp_pci_rd_reg_2400: unknown offset %x", regoff); 1376 rv = 0xffffffff; 1377 break; 1378 } 1379 return (rv); 1380 } 1381 1382 static void 1383 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 1384 { 1385 int block = regoff & _BLK_REG_MASK; 1386 1387 switch (block) { 1388 case BIU_BLOCK: 1389 break; 1390 case MBOX_BLOCK: 1391 BXW2(isp, IspVirt2Off(isp, regoff), val); 1392 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1393 return; 1394 case SXP_BLOCK: 1395 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff); 1396 return; 1397 case RISC_BLOCK: 1398 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff); 1399 return; 1400 case DMA_BLOCK: 1401 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff); 1402 return; 1403 default: 1404 isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x", 1405 regoff); 1406 break; 1407 } 1408 1409 switch (regoff) { 1410 case BIU2400_FLASH_ADDR: 1411 case BIU2400_FLASH_DATA: 1412 case BIU2400_ICR: 1413 case BIU2400_ISR: 1414 case BIU2400_CSR: 1415 case BIU2400_REQINP: 1416 case BIU2400_REQOUTP: 1417 case BIU2400_RSPINP: 1418 case BIU2400_RSPOUTP: 1419 case BIU2400_PRI_REQINP: 1420 case BIU2400_PRI_REQOUTP: 1421 case BIU2400_ATIO_RSPINP: 1422 case BIU2400_ATIO_RSPOUTP: 1423 case BIU2400_HCCR: 1424 case BIU2400_GPIOD: 1425 case BIU2400_GPIOE: 1426 case BIU2400_HSEMA: 1427 BXW4(isp, IspVirt2Off(isp, regoff), val); 1428 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4, -1); 1429 break; 1430 default: 1431 isp_prt(isp, ISP_LOGERR, 1432 "isp_pci_wr_reg_2400: bad offset 0x%x", regoff); 1433 break; 1434 } 1435 } 1436 1437 1438 struct imush { 1439 ispsoftc_t *isp; 1440 caddr_t vbase; 1441 int chan; 1442 int error; 1443 }; 1444 1445 static void imc(void *, bus_dma_segment_t *, int, int); 1446 static void imc1(void *, bus_dma_segment_t *, int, int); 1447 1448 static void 1449 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1450 { 1451 struct imush *imushp = (struct imush *) arg; 1452 1453 if (error) { 1454 imushp->error = error; 1455 return; 1456 } 1457 if (nseg != 1) { 1458 imushp->error = EINVAL; 1459 return; 1460 } 1461 isp_prt(imushp->isp, ISP_LOGDEBUG0, "request/result area @ 0x%jx/0x%jx", (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len); 1462 imushp->isp->isp_rquest = imushp->vbase; 1463 imushp->isp->isp_rquest_dma = segs->ds_addr; 1464 segs->ds_addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp)); 1465 imushp->vbase += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp)); 1466 imushp->isp->isp_result_dma = segs->ds_addr; 1467 imushp->isp->isp_result = imushp->vbase; 1468 1469 #ifdef ISP_TARGET_MODE 1470 if (IS_24XX(imushp->isp)) { 1471 segs->ds_addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp)); 1472 imushp->vbase += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp)); 1473 imushp->isp->isp_atioq_dma = segs->ds_addr; 1474 imushp->isp->isp_atioq = imushp->vbase; 1475 } 1476 #endif 1477 } 1478 1479 static void 1480 imc1(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1481 { 1482 struct imush *imushp = (struct imush *) arg; 1483 if (error) { 1484 imushp->error = error; 1485 return; 1486 } 1487 if (nseg != 1) { 1488 imushp->error = EINVAL; 1489 return; 1490 } 1491 isp_prt(imushp->isp, ISP_LOGDEBUG0, "scdma @ 0x%jx/0x%jx", (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len); 1492 FCPARAM(imushp->isp, imushp->chan)->isp_scdma = segs->ds_addr; 1493 FCPARAM(imushp->isp, imushp->chan)->isp_scratch = imushp->vbase; 1494 } 1495 1496 static int 1497 isp_pci_mbxdma(ispsoftc_t *isp) 1498 { 1499 caddr_t base; 1500 uint32_t len; 1501 int i, error, ns, cmap = 0; 1502 bus_size_t slim; /* segment size */ 1503 bus_addr_t llim; /* low limit of unavailable dma */ 1504 bus_addr_t hlim; /* high limit of unavailable dma */ 1505 struct imush im; 1506 1507 /* 1508 * Already been here? If so, leave... 1509 */ 1510 if (isp->isp_rquest) { 1511 return (0); 1512 } 1513 ISP_UNLOCK(isp); 1514 1515 if (isp->isp_maxcmds == 0) { 1516 isp_prt(isp, ISP_LOGERR, "maxcmds not set"); 1517 ISP_LOCK(isp); 1518 return (1); 1519 } 1520 1521 hlim = BUS_SPACE_MAXADDR; 1522 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1523 if (sizeof (bus_size_t) > 4) { 1524 slim = (bus_size_t) (1ULL << 32); 1525 } else { 1526 slim = (bus_size_t) (1UL << 31); 1527 } 1528 llim = BUS_SPACE_MAXADDR; 1529 } else { 1530 llim = BUS_SPACE_MAXADDR_32BIT; 1531 slim = (1UL << 24); 1532 } 1533 1534 len = isp->isp_maxcmds * sizeof (struct isp_pcmd); 1535 isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1536 if (isp->isp_osinfo.pcmd_pool == NULL) { 1537 isp_prt(isp, ISP_LOGERR, "cannot allocate pcmds"); 1538 ISP_LOCK(isp); 1539 return (1); 1540 } 1541 1542 /* 1543 * XXX: We don't really support 64 bit target mode for parallel scsi yet 1544 */ 1545 #ifdef ISP_TARGET_MODE 1546 if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) { 1547 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1548 isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet"); 1549 ISP_LOCK(isp); 1550 return (1); 1551 } 1552 #endif 1553 1554 if (isp_dma_tag_create(BUS_DMA_ROOTARG(ISP_PCD(isp)), 1, slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, &isp->isp_osinfo.dmat)) { 1555 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1556 ISP_LOCK(isp); 1557 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1558 return (1); 1559 } 1560 1561 len = sizeof (isp_hdl_t) * isp->isp_maxcmds; 1562 isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1563 if (isp->isp_xflist == NULL) { 1564 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1565 ISP_LOCK(isp); 1566 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1567 return (1); 1568 } 1569 for (len = 0; len < isp->isp_maxcmds - 1; len++) { 1570 isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1]; 1571 } 1572 isp->isp_xffree = isp->isp_xflist; 1573 #ifdef ISP_TARGET_MODE 1574 len = sizeof (isp_hdl_t) * isp->isp_maxcmds; 1575 isp->isp_tgtlist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1576 if (isp->isp_tgtlist == NULL) { 1577 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1578 free(isp->isp_xflist, M_DEVBUF); 1579 ISP_LOCK(isp); 1580 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1581 return (1); 1582 } 1583 for (len = 0; len < isp->isp_maxcmds - 1; len++) { 1584 isp->isp_tgtlist[len].cmd = &isp->isp_tgtlist[len+1]; 1585 } 1586 isp->isp_tgtfree = isp->isp_tgtlist; 1587 #endif 1588 1589 /* 1590 * Allocate and map the request and result queues (and ATIO queue 1591 * if we're a 2400 supporting target mode). 1592 */ 1593 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1594 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1595 #ifdef ISP_TARGET_MODE 1596 if (IS_24XX(isp)) { 1597 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1598 } 1599 #endif 1600 1601 ns = (len / PAGE_SIZE) + 1; 1602 1603 /* 1604 * Create a tag for the control spaces. We don't always need this 1605 * to be 32 bits, but we do this for simplicity and speed's sake. 1606 */ 1607 if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, len, ns, slim, 0, &isp->isp_osinfo.cdmat)) { 1608 isp_prt(isp, ISP_LOGERR, "cannot create a dma tag for control spaces"); 1609 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1610 free(isp->isp_xflist, M_DEVBUF); 1611 #ifdef ISP_TARGET_MODE 1612 free(isp->isp_tgtlist, M_DEVBUF); 1613 #endif 1614 ISP_LOCK(isp); 1615 return (1); 1616 } 1617 1618 if (bus_dmamem_alloc(isp->isp_osinfo.cdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &isp->isp_osinfo.cdmap) != 0) { 1619 isp_prt(isp, ISP_LOGERR, "cannot allocate %d bytes of CCB memory", len); 1620 bus_dma_tag_destroy(isp->isp_osinfo.cdmat); 1621 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1622 free(isp->isp_xflist, M_DEVBUF); 1623 #ifdef ISP_TARGET_MODE 1624 free(isp->isp_tgtlist, M_DEVBUF); 1625 #endif 1626 ISP_LOCK(isp); 1627 return (1); 1628 } 1629 1630 im.isp = isp; 1631 im.chan = 0; 1632 im.vbase = base; 1633 im.error = 0; 1634 1635 bus_dmamap_load(isp->isp_osinfo.cdmat, isp->isp_osinfo.cdmap, base, len, imc, &im, 0); 1636 if (im.error) { 1637 isp_prt(isp, ISP_LOGERR, "error %d loading dma map for control areas", im.error); 1638 goto bad; 1639 } 1640 1641 if (IS_FC(isp)) { 1642 for (cmap = 0; cmap < isp->isp_nchan; cmap++) { 1643 struct isp_fc *fc = ISP_FC_PC(isp, cmap); 1644 if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ISP_FC_SCRLEN, 1, slim, 0, &fc->tdmat)) { 1645 goto bad; 1646 } 1647 if (bus_dmamem_alloc(fc->tdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &fc->tdmap) != 0) { 1648 bus_dma_tag_destroy(fc->tdmat); 1649 goto bad; 1650 } 1651 im.isp = isp; 1652 im.chan = cmap; 1653 im.vbase = base; 1654 im.error = 0; 1655 bus_dmamap_load(fc->tdmat, fc->tdmap, base, ISP_FC_SCRLEN, imc1, &im, 0); 1656 if (im.error) { 1657 bus_dmamem_free(fc->tdmat, base, fc->tdmap); 1658 bus_dma_tag_destroy(fc->tdmat); 1659 goto bad; 1660 } 1661 } 1662 } 1663 1664 for (i = 0; i < isp->isp_maxcmds; i++) { 1665 struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i]; 1666 error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap); 1667 if (error) { 1668 isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error); 1669 while (--i >= 0) { 1670 bus_dmamap_destroy(isp->isp_osinfo.dmat, isp->isp_osinfo.pcmd_pool[i].dmap); 1671 } 1672 goto bad; 1673 } 1674 callout_init_mtx(&pcmd->wdog, &isp->isp_osinfo.lock, 0); 1675 if (i == isp->isp_maxcmds-1) { 1676 pcmd->next = NULL; 1677 } else { 1678 pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1]; 1679 } 1680 } 1681 isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0]; 1682 ISP_LOCK(isp); 1683 return (0); 1684 1685 bad: 1686 while (--cmap >= 0) { 1687 struct isp_fc *fc = ISP_FC_PC(isp, cmap); 1688 bus_dmamem_free(fc->tdmat, base, fc->tdmap); 1689 bus_dma_tag_destroy(fc->tdmat); 1690 } 1691 bus_dmamem_free(isp->isp_osinfo.cdmat, base, isp->isp_osinfo.cdmap); 1692 bus_dma_tag_destroy(isp->isp_osinfo.cdmat); 1693 free(isp->isp_xflist, M_DEVBUF); 1694 #ifdef ISP_TARGET_MODE 1695 free(isp->isp_tgtlist, M_DEVBUF); 1696 #endif 1697 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1698 isp->isp_rquest = NULL; 1699 ISP_LOCK(isp); 1700 return (1); 1701 } 1702 1703 typedef struct { 1704 ispsoftc_t *isp; 1705 void *cmd_token; 1706 void *rq; /* original request */ 1707 int error; 1708 bus_size_t mapsize; 1709 } mush_t; 1710 1711 #define MUSHERR_NOQENTRIES -2 1712 1713 #ifdef ISP_TARGET_MODE 1714 static void tdma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); 1715 static void tdma2(void *, bus_dma_segment_t *, int, int); 1716 1717 static void 1718 tdma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) 1719 { 1720 mush_t *mp; 1721 mp = (mush_t *)arg; 1722 mp->mapsize = mapsize; 1723 tdma2(arg, dm_segs, nseg, error); 1724 } 1725 1726 static void 1727 tdma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1728 { 1729 mush_t *mp; 1730 ispsoftc_t *isp; 1731 struct ccb_scsiio *csio; 1732 isp_ddir_t ddir; 1733 ispreq_t *rq; 1734 1735 mp = (mush_t *) arg; 1736 if (error) { 1737 mp->error = error; 1738 return; 1739 } 1740 csio = mp->cmd_token; 1741 isp = mp->isp; 1742 rq = mp->rq; 1743 if (nseg) { 1744 if (sizeof (bus_addr_t) > 4) { 1745 if (nseg >= ISP_NSEG64_MAX) { 1746 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1747 mp->error = EFAULT; 1748 return; 1749 } 1750 if (rq->req_header.rqs_entry_type == RQSTYPE_CTIO2) { 1751 rq->req_header.rqs_entry_type = RQSTYPE_CTIO3; 1752 } 1753 } else { 1754 if (nseg >= ISP_NSEG_MAX) { 1755 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1756 mp->error = EFAULT; 1757 return; 1758 } 1759 } 1760 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1761 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1762 ddir = ISP_TO_DEVICE; 1763 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1764 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1765 ddir = ISP_FROM_DEVICE; 1766 } else { 1767 dm_segs = NULL; 1768 nseg = 0; 1769 ddir = ISP_NOXFR; 1770 } 1771 } else { 1772 dm_segs = NULL; 1773 nseg = 0; 1774 ddir = ISP_NOXFR; 1775 } 1776 1777 if (isp_send_tgt_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, &csio->sense_data, csio->sense_len) != CMD_QUEUED) { 1778 mp->error = MUSHERR_NOQENTRIES; 1779 } 1780 } 1781 #endif 1782 1783 static void dma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); 1784 static void dma2(void *, bus_dma_segment_t *, int, int); 1785 1786 static void 1787 dma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) 1788 { 1789 mush_t *mp; 1790 mp = (mush_t *)arg; 1791 mp->mapsize = mapsize; 1792 dma2(arg, dm_segs, nseg, error); 1793 } 1794 1795 static void 1796 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1797 { 1798 mush_t *mp; 1799 ispsoftc_t *isp; 1800 struct ccb_scsiio *csio; 1801 isp_ddir_t ddir; 1802 ispreq_t *rq; 1803 1804 mp = (mush_t *) arg; 1805 if (error) { 1806 mp->error = error; 1807 return; 1808 } 1809 csio = mp->cmd_token; 1810 isp = mp->isp; 1811 rq = mp->rq; 1812 if (nseg) { 1813 if (sizeof (bus_addr_t) > 4) { 1814 if (nseg >= ISP_NSEG64_MAX) { 1815 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1816 mp->error = EFAULT; 1817 return; 1818 } 1819 if (rq->req_header.rqs_entry_type == RQSTYPE_T2RQS) { 1820 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 1821 } else if (rq->req_header.rqs_entry_type == RQSTYPE_REQUEST) { 1822 rq->req_header.rqs_entry_type = RQSTYPE_A64; 1823 } 1824 } else { 1825 if (nseg >= ISP_NSEG_MAX) { 1826 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1827 mp->error = EFAULT; 1828 return; 1829 } 1830 } 1831 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1832 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1833 ddir = ISP_FROM_DEVICE; 1834 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1835 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1836 ddir = ISP_TO_DEVICE; 1837 } else { 1838 ddir = ISP_NOXFR; 1839 } 1840 } else { 1841 dm_segs = NULL; 1842 nseg = 0; 1843 ddir = ISP_NOXFR; 1844 } 1845 1846 if (isp_send_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir) != CMD_QUEUED) { 1847 mp->error = MUSHERR_NOQENTRIES; 1848 } 1849 } 1850 1851 static int 1852 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff) 1853 { 1854 mush_t mush, *mp; 1855 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1856 void (*eptr2)(void *, bus_dma_segment_t *, int, bus_size_t, int); 1857 1858 mp = &mush; 1859 mp->isp = isp; 1860 mp->cmd_token = csio; 1861 mp->rq = ff; 1862 mp->error = 0; 1863 mp->mapsize = 0; 1864 1865 #ifdef ISP_TARGET_MODE 1866 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1867 eptr = tdma2; 1868 eptr2 = tdma2_2; 1869 } else 1870 #endif 1871 { 1872 eptr = dma2; 1873 eptr2 = dma2_2; 1874 } 1875 1876 1877 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || (csio->dxfer_len == 0)) { 1878 (*eptr)(mp, NULL, 0, 0); 1879 } else if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1880 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1881 int error; 1882 error = bus_dmamap_load(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1883 #if 0 1884 xpt_print(csio->ccb_h.path, "%s: bus_dmamap_load " "ptr %p len %d returned %d\n", __func__, csio->data_ptr, csio->dxfer_len, error); 1885 #endif 1886 1887 if (error == EINPROGRESS) { 1888 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); 1889 mp->error = EINVAL; 1890 isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported"); 1891 } else if (error && mp->error == 0) { 1892 #ifdef DIAGNOSTIC 1893 isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); 1894 #endif 1895 mp->error = error; 1896 } 1897 } else { 1898 /* Pointer to physical buffer */ 1899 struct bus_dma_segment seg; 1900 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; 1901 seg.ds_len = csio->dxfer_len; 1902 (*eptr)(mp, &seg, 1, 0); 1903 } 1904 } else { 1905 struct bus_dma_segment *segs; 1906 1907 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1908 isp_prt(isp, ISP_LOGERR, "Physical segment pointers unsupported"); 1909 mp->error = EINVAL; 1910 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1911 struct uio sguio; 1912 int error; 1913 1914 /* 1915 * We're taking advantage of the fact that 1916 * the pointer/length sizes and layout of the iovec 1917 * structure are the same as the bus_dma_segment 1918 * structure. This might be a little dangerous, 1919 * but only if they change the structures, which 1920 * seems unlikely. 1921 */ 1922 KASSERT((sizeof (sguio.uio_iov) == sizeof (csio->data_ptr) && 1923 sizeof (sguio.uio_iovcnt) >= sizeof (csio->sglist_cnt) && 1924 sizeof (sguio.uio_resid) >= sizeof (csio->dxfer_len)), ("Ken's assumption failed")); 1925 sguio.uio_iov = (struct iovec *)csio->data_ptr; 1926 sguio.uio_iovcnt = csio->sglist_cnt; 1927 sguio.uio_resid = csio->dxfer_len; 1928 sguio.uio_segflg = UIO_SYSSPACE; 1929 1930 error = bus_dmamap_load_uio(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, &sguio, eptr2, mp, 0); 1931 1932 if (error != 0 && mp->error == 0) { 1933 isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); 1934 mp->error = error; 1935 } 1936 } else { 1937 /* Just use the segments provided */ 1938 segs = (struct bus_dma_segment *) csio->data_ptr; 1939 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1940 } 1941 } 1942 if (mp->error) { 1943 int retval = CMD_COMPLETE; 1944 if (mp->error == MUSHERR_NOQENTRIES) { 1945 retval = CMD_EAGAIN; 1946 } else if (mp->error == EFBIG) { 1947 XS_SETERR(csio, CAM_REQ_TOO_BIG); 1948 } else if (mp->error == EINVAL) { 1949 XS_SETERR(csio, CAM_REQ_INVALID); 1950 } else { 1951 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 1952 } 1953 return (retval); 1954 } 1955 return (CMD_QUEUED); 1956 } 1957 1958 static void 1959 isp_pci_reset0(ispsoftc_t *isp) 1960 { 1961 ISP_DISABLE_INTS(isp); 1962 } 1963 1964 static void 1965 isp_pci_reset1(ispsoftc_t *isp) 1966 { 1967 if (!IS_24XX(isp)) { 1968 /* Make sure the BIOS is disabled */ 1969 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 1970 } 1971 /* and enable interrupts */ 1972 ISP_ENABLE_INTS(isp); 1973 } 1974 1975 static void 1976 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 1977 { 1978 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1979 if (msg) 1980 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 1981 else 1982 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 1983 if (IS_SCSI(isp)) 1984 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 1985 else 1986 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 1987 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 1988 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 1989 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 1990 1991 1992 if (IS_SCSI(isp)) { 1993 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 1994 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 1995 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 1996 ISP_READ(isp, CDMA_FIFO_STS)); 1997 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 1998 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 1999 ISP_READ(isp, DDMA_FIFO_STS)); 2000 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2001 ISP_READ(isp, SXP_INTERRUPT), 2002 ISP_READ(isp, SXP_GROSS_ERR), 2003 ISP_READ(isp, SXP_PINS_CTRL)); 2004 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2005 } 2006 printf(" mbox regs: %x %x %x %x %x\n", 2007 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2008 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2009 ISP_READ(isp, OUTMAILBOX4)); 2010 printf(" PCI Status Command/Status=%x\n", 2011 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2012 } 2013