1 /* $FreeBSD$ */ 2 /* 3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 4 * FreeBSD Version. 5 * 6 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice immediately at the beginning of the file, without modification, 13 * this list of conditions, and the following disclaimer. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/module.h> 34 #include <sys/bus.h> 35 36 #include <pci/pcireg.h> 37 #include <pci/pcivar.h> 38 39 #include <machine/bus_memio.h> 40 #include <machine/bus_pio.h> 41 #include <machine/bus.h> 42 #include <machine/resource.h> 43 #include <sys/rman.h> 44 #include <sys/malloc.h> 45 46 #include <dev/isp/isp_freebsd.h> 47 48 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int); 49 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t); 50 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int); 51 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t); 52 static int 53 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 54 static int 55 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 56 static int isp_pci_mbxdma(struct ispsoftc *); 57 static int 58 isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t); 59 static void 60 isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t); 61 62 static void isp_pci_reset1(struct ispsoftc *); 63 static void isp_pci_dumpregs(struct ispsoftc *, const char *); 64 65 static struct ispmdvec mdvec = { 66 isp_pci_rd_isr, 67 isp_pci_rd_reg, 68 isp_pci_wr_reg, 69 isp_pci_mbxdma, 70 isp_pci_dmasetup, 71 isp_pci_dmateardown, 72 NULL, 73 isp_pci_reset1, 74 isp_pci_dumpregs, 75 NULL, 76 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 77 }; 78 79 static struct ispmdvec mdvec_1080 = { 80 isp_pci_rd_isr, 81 isp_pci_rd_reg_1080, 82 isp_pci_wr_reg_1080, 83 isp_pci_mbxdma, 84 isp_pci_dmasetup, 85 isp_pci_dmateardown, 86 NULL, 87 isp_pci_reset1, 88 isp_pci_dumpregs, 89 NULL, 90 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 91 }; 92 93 static struct ispmdvec mdvec_12160 = { 94 isp_pci_rd_isr, 95 isp_pci_rd_reg_1080, 96 isp_pci_wr_reg_1080, 97 isp_pci_mbxdma, 98 isp_pci_dmasetup, 99 isp_pci_dmateardown, 100 NULL, 101 isp_pci_reset1, 102 isp_pci_dumpregs, 103 NULL, 104 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 105 }; 106 107 static struct ispmdvec mdvec_2100 = { 108 isp_pci_rd_isr, 109 isp_pci_rd_reg, 110 isp_pci_wr_reg, 111 isp_pci_mbxdma, 112 isp_pci_dmasetup, 113 isp_pci_dmateardown, 114 NULL, 115 isp_pci_reset1, 116 isp_pci_dumpregs 117 }; 118 119 static struct ispmdvec mdvec_2200 = { 120 isp_pci_rd_isr, 121 isp_pci_rd_reg, 122 isp_pci_wr_reg, 123 isp_pci_mbxdma, 124 isp_pci_dmasetup, 125 isp_pci_dmateardown, 126 NULL, 127 isp_pci_reset1, 128 isp_pci_dumpregs 129 }; 130 131 static struct ispmdvec mdvec_2300 = { 132 isp_pci_rd_isr_2300, 133 isp_pci_rd_reg, 134 isp_pci_wr_reg, 135 isp_pci_mbxdma, 136 isp_pci_dmasetup, 137 isp_pci_dmateardown, 138 NULL, 139 isp_pci_reset1, 140 isp_pci_dumpregs 141 }; 142 143 #ifndef PCIM_CMD_INVEN 144 #define PCIM_CMD_INVEN 0x10 145 #endif 146 #ifndef PCIM_CMD_BUSMASTEREN 147 #define PCIM_CMD_BUSMASTEREN 0x0004 148 #endif 149 #ifndef PCIM_CMD_PERRESPEN 150 #define PCIM_CMD_PERRESPEN 0x0040 151 #endif 152 #ifndef PCIM_CMD_SEREN 153 #define PCIM_CMD_SEREN 0x0100 154 #endif 155 156 #ifndef PCIR_COMMAND 157 #define PCIR_COMMAND 0x04 158 #endif 159 160 #ifndef PCIR_CACHELNSZ 161 #define PCIR_CACHELNSZ 0x0c 162 #endif 163 164 #ifndef PCIR_LATTIMER 165 #define PCIR_LATTIMER 0x0d 166 #endif 167 168 #ifndef PCIR_ROMADDR 169 #define PCIR_ROMADDR 0x30 170 #endif 171 172 #ifndef PCI_VENDOR_QLOGIC 173 #define PCI_VENDOR_QLOGIC 0x1077 174 #endif 175 176 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 177 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 178 #endif 179 180 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 181 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 182 #endif 183 184 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 185 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 186 #endif 187 188 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 189 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 190 #endif 191 192 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 193 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 194 #endif 195 196 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 197 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 198 #endif 199 200 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 201 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 202 #endif 203 204 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 205 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 206 #endif 207 208 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 209 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 210 #endif 211 212 #define PCI_QLOGIC_ISP1020 \ 213 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 214 215 #define PCI_QLOGIC_ISP1080 \ 216 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 217 218 #define PCI_QLOGIC_ISP12160 \ 219 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 220 221 #define PCI_QLOGIC_ISP1240 \ 222 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 223 224 #define PCI_QLOGIC_ISP1280 \ 225 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 226 227 #define PCI_QLOGIC_ISP2100 \ 228 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 229 230 #define PCI_QLOGIC_ISP2200 \ 231 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 232 233 #define PCI_QLOGIC_ISP2300 \ 234 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 235 236 #define PCI_QLOGIC_ISP2312 \ 237 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 238 239 /* 240 * Odd case for some AMI raid cards... We need to *not* attach to this. 241 */ 242 #define AMI_RAID_SUBVENDOR_ID 0x101e 243 244 #define IO_MAP_REG 0x10 245 #define MEM_MAP_REG 0x14 246 247 #define PCI_DFLT_LTNCY 0x40 248 #define PCI_DFLT_LNSZ 0x10 249 250 static int isp_pci_probe (device_t); 251 static int isp_pci_attach (device_t); 252 253 254 struct isp_pcisoftc { 255 struct ispsoftc pci_isp; 256 device_t pci_dev; 257 struct resource * pci_reg; 258 bus_space_tag_t pci_st; 259 bus_space_handle_t pci_sh; 260 void * ih; 261 int16_t pci_poff[_NREG_BLKS]; 262 bus_dma_tag_t dmat; 263 bus_dmamap_t *dmaps; 264 }; 265 extern ispfwfunc *isp_get_firmware_p; 266 267 static device_method_t isp_pci_methods[] = { 268 /* Device interface */ 269 DEVMETHOD(device_probe, isp_pci_probe), 270 DEVMETHOD(device_attach, isp_pci_attach), 271 { 0, 0 } 272 }; 273 static void isp_pci_intr(void *); 274 275 static driver_t isp_pci_driver = { 276 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 277 }; 278 static devclass_t isp_devclass; 279 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 280 281 static int 282 isp_pci_probe(device_t dev) 283 { 284 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 285 case PCI_QLOGIC_ISP1020: 286 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 287 break; 288 case PCI_QLOGIC_ISP1080: 289 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 290 break; 291 case PCI_QLOGIC_ISP1240: 292 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 293 break; 294 case PCI_QLOGIC_ISP1280: 295 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 296 break; 297 case PCI_QLOGIC_ISP12160: 298 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 299 return (ENXIO); 300 } 301 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 302 break; 303 case PCI_QLOGIC_ISP2100: 304 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 305 break; 306 case PCI_QLOGIC_ISP2200: 307 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 308 break; 309 case PCI_QLOGIC_ISP2300: 310 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 311 break; 312 case PCI_QLOGIC_ISP2312: 313 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 314 break; 315 default: 316 return (ENXIO); 317 } 318 if (isp_announced == 0 && bootverbose) { 319 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 320 "Core Version %d.%d\n", 321 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 322 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 323 isp_announced++; 324 } 325 /* 326 * XXXX: Here is where we might load the f/w module 327 * XXXX: (or increase a reference count to it). 328 */ 329 return (0); 330 } 331 332 static int 333 isp_pci_attach(device_t dev) 334 { 335 struct resource *regs, *irq; 336 int tval, rtp, rgd, iqd, m1, m2, isp_debug, role; 337 u_int32_t data, cmd, linesz, psize, basetype; 338 struct isp_pcisoftc *pcs; 339 struct ispsoftc *isp = NULL; 340 struct ispmdvec *mdvp; 341 const char *sptr; 342 int locksetup = 0; 343 344 /* 345 * Figure out if we're supposed to skip this one. 346 * If we are, we actually go to ISP_ROLE_NONE. 347 */ 348 349 tval = 0; 350 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 351 "disable", &tval) == 0 && tval) { 352 device_printf(dev, "device is disabled\n"); 353 /* but return 0 so the !$)$)*!$*) unit isn't reused */ 354 return (0); 355 } 356 357 role = 0; 358 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 359 "role", &role) == 0 && 360 ((role & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) == 0)) { 361 device_printf(dev, "setting role to 0x%x\n", role); 362 } else { 363 #ifdef ISP_TARGET_MODE 364 role = ISP_ROLE_INITIATOR|ISP_ROLE_TARGET; 365 #else 366 role = ISP_DEFAULT_ROLES; 367 #endif 368 } 369 370 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT | M_ZERO); 371 if (pcs == NULL) { 372 device_printf(dev, "cannot allocate softc\n"); 373 return (ENOMEM); 374 } 375 376 /* 377 * Figure out which we should try first - memory mapping or i/o mapping? 378 */ 379 #ifdef __alpha__ 380 m1 = PCIM_CMD_MEMEN; 381 m2 = PCIM_CMD_PORTEN; 382 #else 383 m1 = PCIM_CMD_PORTEN; 384 m2 = PCIM_CMD_MEMEN; 385 #endif 386 387 tval = 0; 388 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 389 "prefer_iomap", &tval) == 0 && tval != 0) { 390 m1 = PCIM_CMD_PORTEN; 391 m2 = PCIM_CMD_MEMEN; 392 } 393 tval = 0; 394 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 395 "prefer_memmap", &tval) == 0 && tval != 0) { 396 m1 = PCIM_CMD_MEMEN; 397 m2 = PCIM_CMD_PORTEN; 398 } 399 400 linesz = PCI_DFLT_LNSZ; 401 irq = regs = NULL; 402 rgd = rtp = iqd = 0; 403 404 cmd = pci_read_config(dev, PCIR_COMMAND, 1); 405 if (cmd & m1) { 406 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 407 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 408 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 409 } 410 if (regs == NULL && (cmd & m2)) { 411 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 412 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 413 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 414 } 415 if (regs == NULL) { 416 device_printf(dev, "unable to map any ports\n"); 417 goto bad; 418 } 419 if (bootverbose) 420 device_printf(dev, "using %s space register mapping\n", 421 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 422 pcs->pci_dev = dev; 423 pcs->pci_reg = regs; 424 pcs->pci_st = rman_get_bustag(regs); 425 pcs->pci_sh = rman_get_bushandle(regs); 426 427 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 428 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 429 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 430 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 431 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 432 mdvp = &mdvec; 433 basetype = ISP_HA_SCSI_UNKNOWN; 434 psize = sizeof (sdparam); 435 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 436 mdvp = &mdvec; 437 basetype = ISP_HA_SCSI_UNKNOWN; 438 psize = sizeof (sdparam); 439 } 440 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 441 mdvp = &mdvec_1080; 442 basetype = ISP_HA_SCSI_1080; 443 psize = sizeof (sdparam); 444 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 445 ISP1080_DMA_REGS_OFF; 446 } 447 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 448 mdvp = &mdvec_1080; 449 basetype = ISP_HA_SCSI_1240; 450 psize = 2 * sizeof (sdparam); 451 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 452 ISP1080_DMA_REGS_OFF; 453 } 454 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 455 mdvp = &mdvec_1080; 456 basetype = ISP_HA_SCSI_1280; 457 psize = 2 * sizeof (sdparam); 458 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 459 ISP1080_DMA_REGS_OFF; 460 } 461 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 462 mdvp = &mdvec_12160; 463 basetype = ISP_HA_SCSI_12160; 464 psize = 2 * sizeof (sdparam); 465 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 466 ISP1080_DMA_REGS_OFF; 467 } 468 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 469 mdvp = &mdvec_2100; 470 basetype = ISP_HA_FC_2100; 471 psize = sizeof (fcparam); 472 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 473 PCI_MBOX_REGS2100_OFF; 474 if (pci_get_revid(dev) < 3) { 475 /* 476 * XXX: Need to get the actual revision 477 * XXX: number of the 2100 FB. At any rate, 478 * XXX: lower cache line size for early revision 479 * XXX; boards. 480 */ 481 linesz = 1; 482 } 483 } 484 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 485 mdvp = &mdvec_2200; 486 basetype = ISP_HA_FC_2200; 487 psize = sizeof (fcparam); 488 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 489 PCI_MBOX_REGS2100_OFF; 490 } 491 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 492 mdvp = &mdvec_2300; 493 basetype = ISP_HA_FC_2300; 494 psize = sizeof (fcparam); 495 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 496 PCI_MBOX_REGS2300_OFF; 497 } 498 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312) { 499 mdvp = &mdvec_2300; 500 basetype = ISP_HA_FC_2312; 501 psize = sizeof (fcparam); 502 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 503 PCI_MBOX_REGS2300_OFF; 504 } 505 isp = &pcs->pci_isp; 506 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 507 if (isp->isp_param == NULL) { 508 device_printf(dev, "cannot allocate parameter data\n"); 509 goto bad; 510 } 511 isp->isp_mdvec = mdvp; 512 isp->isp_type = basetype; 513 isp->isp_revision = pci_get_revid(dev); 514 isp->isp_role = role; 515 isp->isp_dev = dev; 516 517 /* 518 * Try and find firmware for this device. 519 */ 520 521 if (isp_get_firmware_p) { 522 int device = (int) pci_get_device(dev); 523 #ifdef ISP_TARGET_MODE 524 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 525 #else 526 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 527 #endif 528 } 529 530 /* 531 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 532 * are set. 533 */ 534 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 535 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 536 if (IS_2300(isp)) { /* per QLogic errata */ 537 cmd &= ~PCIM_CMD_INVEN; 538 } 539 if (IS_23XX(isp)) { 540 /* 541 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 542 */ 543 isp->isp_touched = 1; 544 545 } 546 pci_write_config(dev, PCIR_COMMAND, cmd, 1); 547 548 /* 549 * Make sure the Cache Line Size register is set sensibly. 550 */ 551 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 552 if (data != linesz) { 553 data = PCI_DFLT_LNSZ; 554 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 555 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 556 } 557 558 /* 559 * Make sure the Latency Timer is sane. 560 */ 561 data = pci_read_config(dev, PCIR_LATTIMER, 1); 562 if (data < PCI_DFLT_LTNCY) { 563 data = PCI_DFLT_LTNCY; 564 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 565 pci_write_config(dev, PCIR_LATTIMER, data, 1); 566 } 567 568 /* 569 * Make sure we've disabled the ROM. 570 */ 571 data = pci_read_config(dev, PCIR_ROMADDR, 4); 572 data &= ~1; 573 pci_write_config(dev, PCIR_ROMADDR, data, 4); 574 575 iqd = 0; 576 irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0, 577 1, RF_ACTIVE | RF_SHAREABLE); 578 if (irq == NULL) { 579 device_printf(dev, "could not allocate interrupt\n"); 580 goto bad; 581 } 582 583 tval = 0; 584 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 585 "fwload_disable", &tval) == 0 && tval != 0) { 586 isp->isp_confopts |= ISP_CFG_NORELOAD; 587 } 588 tval = 0; 589 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 590 "ignore_nvram", &tval) == 0 && tval != 0) { 591 isp->isp_confopts |= ISP_CFG_NONVRAM; 592 } 593 tval = 0; 594 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 595 "fullduplex", &tval) == 0 && tval != 0) { 596 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 597 } 598 #ifdef ISP_FW_CRASH_DUMP 599 tval = 0; 600 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 601 "fw_dump_enable", &tval) == 0 && tval != 0) { 602 size_t amt = 0; 603 if (IS_2200(isp)) { 604 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 605 } else if (IS_23XX(isp)) { 606 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 607 } 608 if (amt) { 609 FCPARAM(isp)->isp_dump_data = 610 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); 611 } else { 612 device_printf(dev, 613 "f/w crash dumps not supported for this model\n"); 614 } 615 } 616 #endif 617 618 sptr = 0; 619 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 620 "topology", (const char **) &sptr) == 0 && sptr != 0) { 621 if (strcmp(sptr, "lport") == 0) { 622 isp->isp_confopts |= ISP_CFG_LPORT; 623 } else if (strcmp(sptr, "nport") == 0) { 624 isp->isp_confopts |= ISP_CFG_NPORT; 625 } else if (strcmp(sptr, "lport-only") == 0) { 626 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 627 } else if (strcmp(sptr, "nport-only") == 0) { 628 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 629 } 630 } 631 632 /* 633 * Because the resource_*_value functions can neither return 634 * 64 bit integer values, nor can they be directly coerced 635 * to interpret the right hand side of the assignment as 636 * you want them to interpret it, we have to force WWN 637 * hint replacement to specify WWN strings with a leading 638 * 'w' (e..g w50000000aaaa0001). Sigh. 639 */ 640 sptr = 0; 641 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 642 "portwwn", (const char **) &sptr); 643 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 644 char *eptr = 0; 645 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 646 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 647 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 648 isp->isp_osinfo.default_port_wwn = 0; 649 } else { 650 isp->isp_confopts |= ISP_CFG_OWNWWPN; 651 } 652 } 653 if (isp->isp_osinfo.default_port_wwn == 0) { 654 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 655 } 656 657 sptr = 0; 658 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 659 "nodewwn", (const char **) &sptr); 660 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 661 char *eptr = 0; 662 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 663 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 664 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 665 isp->isp_osinfo.default_node_wwn = 0; 666 } else { 667 isp->isp_confopts |= ISP_CFG_OWNWWNN; 668 } 669 } 670 if (isp->isp_osinfo.default_node_wwn == 0) { 671 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 672 } 673 674 isp->isp_osinfo.default_id = -1; 675 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 676 "iid", &tval) == 0) { 677 isp->isp_osinfo.default_id = tval; 678 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 679 } 680 if (isp->isp_osinfo.default_id == -1) { 681 if (IS_FC(isp)) { 682 isp->isp_osinfo.default_id = 109; 683 } else { 684 isp->isp_osinfo.default_id = 7; 685 } 686 } 687 688 isp_debug = 0; 689 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 690 "debug", &isp_debug); 691 692 /* Make sure the lock is set up. */ 693 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 694 locksetup++; 695 696 if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) { 697 device_printf(dev, "could not setup interrupt\n"); 698 goto bad; 699 } 700 701 /* 702 * Set up logging levels. 703 */ 704 if (isp_debug) { 705 isp->isp_dblev = isp_debug; 706 } else { 707 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 708 } 709 if (bootverbose) 710 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 711 712 /* 713 * Last minute checks... 714 */ 715 if (IS_2312(isp)) { 716 isp->isp_port = pci_get_function(dev); 717 } 718 719 /* 720 * Make sure we're in reset state. 721 */ 722 ISP_LOCK(isp); 723 isp_reset(isp); 724 if (isp->isp_state != ISP_RESETSTATE) { 725 ISP_UNLOCK(isp); 726 goto bad; 727 } 728 isp_init(isp); 729 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 730 isp_uninit(isp); 731 ISP_UNLOCK(isp); 732 goto bad; 733 } 734 isp_attach(isp); 735 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 736 isp_uninit(isp); 737 ISP_UNLOCK(isp); 738 goto bad; 739 } 740 /* 741 * XXXX: Here is where we might unload the f/w module 742 * XXXX: (or decrease the reference count to it). 743 */ 744 ISP_UNLOCK(isp); 745 return (0); 746 747 bad: 748 749 if (pcs && pcs->ih) { 750 (void) bus_teardown_intr(dev, irq, pcs->ih); 751 } 752 753 if (locksetup && isp) { 754 mtx_destroy(&isp->isp_osinfo.lock); 755 } 756 757 if (irq) { 758 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 759 } 760 761 762 if (regs) { 763 (void) bus_release_resource(dev, rtp, rgd, regs); 764 } 765 766 if (pcs) { 767 if (pcs->pci_isp.isp_param) 768 free(pcs->pci_isp.isp_param, M_DEVBUF); 769 free(pcs, M_DEVBUF); 770 } 771 772 /* 773 * XXXX: Here is where we might unload the f/w module 774 * XXXX: (or decrease the reference count to it). 775 */ 776 return (ENXIO); 777 } 778 779 static void 780 isp_pci_intr(void *arg) 781 { 782 struct ispsoftc *isp = arg; 783 u_int16_t isr, sema, mbox; 784 785 ISP_LOCK(isp); 786 isp->isp_intcnt++; 787 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 788 isp->isp_intbogus++; 789 } else { 790 int iok = isp->isp_osinfo.intsok; 791 isp->isp_osinfo.intsok = 0; 792 isp_intr(isp, isr, sema, mbox); 793 isp->isp_osinfo.intsok = iok; 794 } 795 ISP_UNLOCK(isp); 796 } 797 798 799 #define IspVirt2Off(a, x) \ 800 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 801 _BLK_REG_SHFT] + ((x) & 0xff)) 802 803 #define BXR2(pcs, off) \ 804 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 805 #define BXW2(pcs, off, v) \ 806 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 807 808 809 static INLINE int 810 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp) 811 { 812 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 813 u_int16_t val0, val1; 814 int i = 0; 815 816 do { 817 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 818 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 819 } while (val0 != val1 && ++i < 1000); 820 if (val0 != val1) { 821 return (1); 822 } 823 *rp = val0; 824 return (0); 825 } 826 827 static int 828 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp, 829 u_int16_t *semap, u_int16_t *mbp) 830 { 831 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 832 u_int16_t isr, sema; 833 834 if (IS_2100(isp)) { 835 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 836 return (0); 837 } 838 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 839 return (0); 840 } 841 } else { 842 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 843 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 844 } 845 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 846 isr &= INT_PENDING_MASK(isp); 847 sema &= BIU_SEMA_LOCK; 848 if (isr == 0 && sema == 0) { 849 return (0); 850 } 851 *isrp = isr; 852 if ((*semap = sema) != 0) { 853 if (IS_2100(isp)) { 854 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 855 return (0); 856 } 857 } else { 858 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 859 } 860 } 861 return (1); 862 } 863 864 static int 865 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp, 866 u_int16_t *semap, u_int16_t *mbox0p) 867 { 868 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 869 u_int32_t r2hisr; 870 871 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 872 *isrp = 0; 873 return (0); 874 } 875 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh, 876 IspVirt2Off(pcs, BIU_R2HSTSLO)); 877 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 878 if ((r2hisr & BIU_R2HST_INTR) == 0) { 879 *isrp = 0; 880 return (0); 881 } 882 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 883 case ISPR2HST_ROM_MBX_OK: 884 case ISPR2HST_ROM_MBX_FAIL: 885 case ISPR2HST_MBX_OK: 886 case ISPR2HST_MBX_FAIL: 887 case ISPR2HST_ASYNC_EVENT: 888 *isrp = r2hisr & 0xffff; 889 *mbox0p = (r2hisr >> 16); 890 *semap = 1; 891 return (1); 892 case ISPR2HST_RIO_16: 893 *isrp = r2hisr & 0xffff; 894 *mbox0p = ASYNC_RIO1; 895 *semap = 1; 896 return (1); 897 case ISPR2HST_FPOST: 898 *isrp = r2hisr & 0xffff; 899 *mbox0p = ASYNC_CMD_CMPLT; 900 *semap = 1; 901 return (1); 902 case ISPR2HST_FPOST_CTIO: 903 *isrp = r2hisr & 0xffff; 904 *mbox0p = ASYNC_CTIO_DONE; 905 *semap = 1; 906 return (1); 907 case ISPR2HST_RSPQ_UPDATE: 908 *isrp = r2hisr & 0xffff; 909 *mbox0p = 0; 910 *semap = 0; 911 return (1); 912 default: 913 return (0); 914 } 915 } 916 917 static u_int16_t 918 isp_pci_rd_reg(struct ispsoftc *isp, int regoff) 919 { 920 u_int16_t rv; 921 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 922 int oldconf = 0; 923 924 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 925 /* 926 * We will assume that someone has paused the RISC processor. 927 */ 928 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 929 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 930 oldconf | BIU_PCI_CONF1_SXP); 931 } 932 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 933 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 934 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 935 } 936 return (rv); 937 } 938 939 static void 940 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val) 941 { 942 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 943 int oldconf = 0; 944 945 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 946 /* 947 * We will assume that someone has paused the RISC processor. 948 */ 949 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 950 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 951 oldconf | BIU_PCI_CONF1_SXP); 952 } 953 BXW2(pcs, IspVirt2Off(isp, regoff), val); 954 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 955 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 956 } 957 } 958 959 static u_int16_t 960 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff) 961 { 962 u_int16_t rv, oc = 0; 963 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 964 965 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 966 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 967 u_int16_t tc; 968 /* 969 * We will assume that someone has paused the RISC processor. 970 */ 971 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 972 tc = oc & ~BIU_PCI1080_CONF1_DMA; 973 if (regoff & SXP_BANK1_SELECT) 974 tc |= BIU_PCI1080_CONF1_SXP1; 975 else 976 tc |= BIU_PCI1080_CONF1_SXP0; 977 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 978 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 979 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 980 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 981 oc | BIU_PCI1080_CONF1_DMA); 982 } 983 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 984 if (oc) { 985 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 986 } 987 return (rv); 988 } 989 990 static void 991 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val) 992 { 993 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 994 int oc = 0; 995 996 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 997 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 998 u_int16_t tc; 999 /* 1000 * We will assume that someone has paused the RISC processor. 1001 */ 1002 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1003 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1004 if (regoff & SXP_BANK1_SELECT) 1005 tc |= BIU_PCI1080_CONF1_SXP1; 1006 else 1007 tc |= BIU_PCI1080_CONF1_SXP0; 1008 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1009 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1010 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1011 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1012 oc | BIU_PCI1080_CONF1_DMA); 1013 } 1014 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1015 if (oc) { 1016 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1017 } 1018 } 1019 1020 1021 struct imush { 1022 struct ispsoftc *isp; 1023 int error; 1024 }; 1025 1026 static void imc(void *, bus_dma_segment_t *, int, int); 1027 1028 static void 1029 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1030 { 1031 struct imush *imushp = (struct imush *) arg; 1032 if (error) { 1033 imushp->error = error; 1034 } else { 1035 struct ispsoftc *isp =imushp->isp; 1036 bus_addr_t addr = segs->ds_addr; 1037 1038 isp->isp_rquest_dma = addr; 1039 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1040 isp->isp_result_dma = addr; 1041 if (IS_FC(isp)) { 1042 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1043 FCPARAM(isp)->isp_scdma = addr; 1044 } 1045 } 1046 } 1047 1048 /* 1049 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 1050 */ 1051 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 1052 1053 static int 1054 isp_pci_mbxdma(struct ispsoftc *isp) 1055 { 1056 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1057 caddr_t base; 1058 u_int32_t len; 1059 int i, error, ns; 1060 bus_size_t bl; 1061 struct imush im; 1062 1063 /* 1064 * Already been here? If so, leave... 1065 */ 1066 if (isp->isp_rquest) { 1067 return (0); 1068 } 1069 1070 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1071 bl = BUS_SPACE_UNRESTRICTED; 1072 } else { 1073 bl = BUS_SPACE_MAXADDR_24BIT; 1074 } 1075 1076 ISP_UNLOCK(isp); 1077 if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR, 1078 BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 1079 ISP_NSEGS, bl, 0, &pcs->dmat)) { 1080 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1081 ISP_LOCK(isp); 1082 return(1); 1083 } 1084 1085 1086 len = sizeof (XS_T **) * isp->isp_maxcmds; 1087 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1088 if (isp->isp_xflist == NULL) { 1089 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1090 ISP_LOCK(isp); 1091 return (1); 1092 } 1093 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1094 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1095 if (pcs->dmaps == NULL) { 1096 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1097 free(isp->isp_xflist, M_DEVBUF); 1098 ISP_LOCK(isp); 1099 return (1); 1100 } 1101 1102 /* 1103 * Allocate and map the request, result queues, plus FC scratch area. 1104 */ 1105 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1106 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1107 if (IS_FC(isp)) { 1108 len += ISP2100_SCRLEN; 1109 } 1110 1111 ns = (len / PAGE_SIZE) + 1; 1112 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, 0, BUS_SPACE_MAXADDR, 1113 BUS_SPACE_MAXADDR, NULL, NULL, len, ns, bl, 0, &isp->isp_cdmat)) { 1114 isp_prt(isp, ISP_LOGERR, 1115 "cannot create a dma tag for control spaces"); 1116 free(pcs->dmaps, M_DEVBUF); 1117 free(isp->isp_xflist, M_DEVBUF); 1118 ISP_LOCK(isp); 1119 return (1); 1120 } 1121 1122 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1123 &isp->isp_cdmap) != 0) { 1124 isp_prt(isp, ISP_LOGERR, 1125 "cannot allocate %d bytes of CCB memory", len); 1126 bus_dma_tag_destroy(isp->isp_cdmat); 1127 free(isp->isp_xflist, M_DEVBUF); 1128 free(pcs->dmaps, M_DEVBUF); 1129 ISP_LOCK(isp); 1130 return (1); 1131 } 1132 1133 for (i = 0; i < isp->isp_maxcmds; i++) { 1134 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1135 if (error) { 1136 isp_prt(isp, ISP_LOGERR, 1137 "error %d creating per-cmd DMA maps", error); 1138 while (--i >= 0) { 1139 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1140 } 1141 goto bad; 1142 } 1143 } 1144 1145 im.isp = isp; 1146 im.error = 0; 1147 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1148 if (im.error) { 1149 isp_prt(isp, ISP_LOGERR, 1150 "error %d loading dma map for control areas", im.error); 1151 goto bad; 1152 } 1153 1154 isp->isp_rquest = base; 1155 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1156 isp->isp_result = base; 1157 if (IS_FC(isp)) { 1158 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1159 FCPARAM(isp)->isp_scratch = base; 1160 } 1161 ISP_LOCK(isp); 1162 return (0); 1163 1164 bad: 1165 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1166 bus_dma_tag_destroy(isp->isp_cdmat); 1167 free(isp->isp_xflist, M_DEVBUF); 1168 free(pcs->dmaps, M_DEVBUF); 1169 ISP_LOCK(isp); 1170 isp->isp_rquest = NULL; 1171 return (1); 1172 } 1173 1174 typedef struct { 1175 struct ispsoftc *isp; 1176 void *cmd_token; 1177 void *rq; 1178 u_int16_t *nxtip; 1179 u_int16_t optr; 1180 u_int error; 1181 } mush_t; 1182 1183 #define MUSHERR_NOQENTRIES -2 1184 1185 #ifdef ISP_TARGET_MODE 1186 /* 1187 * We need to handle DMA for target mode differently from initiator mode. 1188 * 1189 * DMA mapping and construction and submission of CTIO Request Entries 1190 * and rendevous for completion are very tightly coupled because we start 1191 * out by knowing (per platform) how much data we have to move, but we 1192 * don't know, up front, how many DMA mapping segments will have to be used 1193 * cover that data, so we don't know how many CTIO Request Entries we 1194 * will end up using. Further, for performance reasons we may want to 1195 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1196 * 1197 * The standard vector still goes through isp_pci_dmasetup, but the callback 1198 * for the DMA mapping routines comes here instead with the whole transfer 1199 * mapped and a pointer to a partially filled in already allocated request 1200 * queue entry. We finish the job. 1201 */ 1202 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1203 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1204 1205 #define STATUS_WITH_DATA 1 1206 1207 static void 1208 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1209 { 1210 mush_t *mp; 1211 struct ccb_scsiio *csio; 1212 struct ispsoftc *isp; 1213 struct isp_pcisoftc *pcs; 1214 bus_dmamap_t *dp; 1215 ct_entry_t *cto, *qe; 1216 u_int8_t scsi_status; 1217 u_int16_t curi, nxti, handle; 1218 u_int32_t sflags; 1219 int32_t resid; 1220 int nth_ctio, nctios, send_status; 1221 1222 mp = (mush_t *) arg; 1223 if (error) { 1224 mp->error = error; 1225 return; 1226 } 1227 1228 isp = mp->isp; 1229 csio = mp->cmd_token; 1230 cto = mp->rq; 1231 curi = isp->isp_reqidx; 1232 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1233 1234 cto->ct_xfrlen = 0; 1235 cto->ct_seg_count = 0; 1236 cto->ct_header.rqs_entry_count = 1; 1237 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1238 1239 if (nseg == 0) { 1240 cto->ct_header.rqs_seqno = 1; 1241 isp_prt(isp, ISP_LOGTDEBUG1, 1242 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1243 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1244 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1245 cto->ct_scsi_status, cto->ct_resid); 1246 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1247 isp_put_ctio(isp, cto, qe); 1248 return; 1249 } 1250 1251 nctios = nseg / ISP_RQDSEG; 1252 if (nseg % ISP_RQDSEG) { 1253 nctios++; 1254 } 1255 1256 /* 1257 * Save syshandle, and potentially any SCSI status, which we'll 1258 * reinsert on the last CTIO we're going to send. 1259 */ 1260 1261 handle = cto->ct_syshandle; 1262 cto->ct_syshandle = 0; 1263 cto->ct_header.rqs_seqno = 0; 1264 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1265 1266 if (send_status) { 1267 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1268 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1269 /* 1270 * Preserve residual. 1271 */ 1272 resid = cto->ct_resid; 1273 1274 /* 1275 * Save actual SCSI status. 1276 */ 1277 scsi_status = cto->ct_scsi_status; 1278 1279 #ifndef STATUS_WITH_DATA 1280 sflags |= CT_NO_DATA; 1281 /* 1282 * We can't do a status at the same time as a data CTIO, so 1283 * we need to synthesize an extra CTIO at this level. 1284 */ 1285 nctios++; 1286 #endif 1287 } else { 1288 sflags = scsi_status = resid = 0; 1289 } 1290 1291 cto->ct_resid = 0; 1292 cto->ct_scsi_status = 0; 1293 1294 pcs = (struct isp_pcisoftc *)isp; 1295 dp = &pcs->dmaps[isp_handle_index(handle)]; 1296 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1297 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1298 } else { 1299 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1300 } 1301 1302 nxti = *mp->nxtip; 1303 1304 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1305 int seglim; 1306 1307 seglim = nseg; 1308 if (seglim) { 1309 int seg; 1310 1311 if (seglim > ISP_RQDSEG) 1312 seglim = ISP_RQDSEG; 1313 1314 for (seg = 0; seg < seglim; seg++, nseg--) { 1315 /* 1316 * Unlike normal initiator commands, we don't 1317 * do any swizzling here. 1318 */ 1319 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1320 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1321 cto->ct_xfrlen += dm_segs->ds_len; 1322 dm_segs++; 1323 } 1324 cto->ct_seg_count = seg; 1325 } else { 1326 /* 1327 * This case should only happen when we're sending an 1328 * extra CTIO with final status. 1329 */ 1330 if (send_status == 0) { 1331 isp_prt(isp, ISP_LOGWARN, 1332 "tdma_mk ran out of segments"); 1333 mp->error = EINVAL; 1334 return; 1335 } 1336 } 1337 1338 /* 1339 * At this point, the fields ct_lun, ct_iid, ct_tagval, 1340 * ct_tagtype, and ct_timeout have been carried over 1341 * unchanged from what our caller had set. 1342 * 1343 * The dataseg fields and the seg_count fields we just got 1344 * through setting. The data direction we've preserved all 1345 * along and only clear it if we're now sending status. 1346 */ 1347 1348 if (nth_ctio == nctios - 1) { 1349 /* 1350 * We're the last in a sequence of CTIOs, so mark 1351 * this CTIO and save the handle to the CCB such that 1352 * when this CTIO completes we can free dma resources 1353 * and do whatever else we need to do to finish the 1354 * rest of the command. We *don't* give this to the 1355 * firmware to work on- the caller will do that. 1356 */ 1357 1358 cto->ct_syshandle = handle; 1359 cto->ct_header.rqs_seqno = 1; 1360 1361 if (send_status) { 1362 cto->ct_scsi_status = scsi_status; 1363 cto->ct_flags |= sflags; 1364 cto->ct_resid = resid; 1365 } 1366 if (send_status) { 1367 isp_prt(isp, ISP_LOGTDEBUG1, 1368 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 1369 "scsi status %x resid %d", 1370 cto->ct_fwhandle, csio->ccb_h.target_lun, 1371 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 1372 cto->ct_scsi_status, cto->ct_resid); 1373 } else { 1374 isp_prt(isp, ISP_LOGTDEBUG1, 1375 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 1376 cto->ct_fwhandle, csio->ccb_h.target_lun, 1377 cto->ct_iid, cto->ct_tag_val, 1378 cto->ct_flags); 1379 } 1380 isp_put_ctio(isp, cto, qe); 1381 ISP_TDQE(isp, "last tdma_mk", curi, cto); 1382 if (nctios > 1) { 1383 MEMORYBARRIER(isp, SYNC_REQUEST, 1384 curi, QENTRY_LEN); 1385 } 1386 } else { 1387 ct_entry_t *oqe = qe; 1388 1389 /* 1390 * Make sure syshandle fields are clean 1391 */ 1392 cto->ct_syshandle = 0; 1393 cto->ct_header.rqs_seqno = 0; 1394 1395 isp_prt(isp, ISP_LOGTDEBUG1, 1396 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 1397 cto->ct_fwhandle, csio->ccb_h.target_lun, 1398 cto->ct_iid, cto->ct_flags); 1399 1400 /* 1401 * Get a new CTIO 1402 */ 1403 qe = (ct_entry_t *) 1404 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1405 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 1406 if (nxti == mp->optr) { 1407 isp_prt(isp, ISP_LOGTDEBUG0, 1408 "Queue Overflow in tdma_mk"); 1409 mp->error = MUSHERR_NOQENTRIES; 1410 return; 1411 } 1412 1413 /* 1414 * Now that we're done with the old CTIO, 1415 * flush it out to the request queue. 1416 */ 1417 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 1418 isp_put_ctio(isp, cto, oqe); 1419 if (nth_ctio != 0) { 1420 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 1421 QENTRY_LEN); 1422 } 1423 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 1424 1425 /* 1426 * Reset some fields in the CTIO so we can reuse 1427 * for the next one we'll flush to the request 1428 * queue. 1429 */ 1430 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1431 cto->ct_header.rqs_entry_count = 1; 1432 cto->ct_header.rqs_flags = 0; 1433 cto->ct_status = 0; 1434 cto->ct_scsi_status = 0; 1435 cto->ct_xfrlen = 0; 1436 cto->ct_resid = 0; 1437 cto->ct_seg_count = 0; 1438 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1439 } 1440 } 1441 *mp->nxtip = nxti; 1442 } 1443 1444 /* 1445 * We don't have to do multiple CTIOs here. Instead, we can just do 1446 * continuation segments as needed. This greatly simplifies the code 1447 * improves performance. 1448 */ 1449 1450 static void 1451 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1452 { 1453 mush_t *mp; 1454 struct ccb_scsiio *csio; 1455 struct ispsoftc *isp; 1456 ct2_entry_t *cto, *qe; 1457 u_int16_t curi, nxti; 1458 int segcnt; 1459 1460 mp = (mush_t *) arg; 1461 if (error) { 1462 mp->error = error; 1463 return; 1464 } 1465 1466 isp = mp->isp; 1467 csio = mp->cmd_token; 1468 cto = mp->rq; 1469 1470 curi = isp->isp_reqidx; 1471 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1472 1473 if (nseg == 0) { 1474 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1475 isp_prt(isp, ISP_LOGWARN, 1476 "dma2_tgt_fc, a status CTIO2 without MODE1 " 1477 "set (0x%x)", cto->ct_flags); 1478 mp->error = EINVAL; 1479 return; 1480 } 1481 /* 1482 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1483 * flags to NO DATA and clear relative offset flags. 1484 * We preserve the ct_resid and the response area. 1485 */ 1486 cto->ct_header.rqs_seqno = 1; 1487 cto->ct_seg_count = 0; 1488 cto->ct_reloff = 0; 1489 isp_prt(isp, ISP_LOGTDEBUG1, 1490 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1491 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 1492 cto->ct_iid, cto->ct_flags, cto->ct_status, 1493 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1494 isp_put_ctio2(isp, cto, qe); 1495 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 1496 return; 1497 } 1498 1499 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1500 isp_prt(isp, ISP_LOGERR, 1501 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 1502 "(0x%x)", cto->ct_flags); 1503 mp->error = EINVAL; 1504 return; 1505 } 1506 1507 1508 nxti = *mp->nxtip; 1509 1510 /* 1511 * Set up the CTIO2 data segments. 1512 */ 1513 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg; 1514 cto->ct_seg_count++, segcnt++) { 1515 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base = 1516 dm_segs[segcnt].ds_addr; 1517 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count = 1518 dm_segs[segcnt].ds_len; 1519 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1520 isp_prt(isp, ISP_LOGTDEBUG1, "isp_send_ctio2: ent0[%d]0x%x:%d", 1521 cto->ct_seg_count, dm_segs[segcnt].ds_addr, 1522 dm_segs[segcnt].ds_len); 1523 } 1524 1525 while (segcnt < nseg) { 1526 u_int16_t curip; 1527 int seg; 1528 ispcontreq_t local, *crq = &local, *qep; 1529 1530 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1531 curip = nxti; 1532 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 1533 if (nxti == mp->optr) { 1534 ISP_UNLOCK(isp); 1535 isp_prt(isp, ISP_LOGTDEBUG0, 1536 "tdma_mkfc: request queue overflow"); 1537 mp->error = MUSHERR_NOQENTRIES; 1538 return; 1539 } 1540 cto->ct_header.rqs_entry_count++; 1541 MEMZERO((void *)crq, sizeof (*crq)); 1542 crq->req_header.rqs_entry_count = 1; 1543 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1544 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG; 1545 segcnt++, seg++) { 1546 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr; 1547 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len; 1548 isp_prt(isp, ISP_LOGTDEBUG1, 1549 "isp_send_ctio2: ent%d[%d]%x:%u", 1550 cto->ct_header.rqs_entry_count-1, seg, 1551 dm_segs[segcnt].ds_addr, dm_segs[segcnt].ds_len); 1552 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1553 cto->ct_seg_count++; 1554 } 1555 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 1556 isp_put_cont_req(isp, crq, qep); 1557 ISP_TDQE(isp, "cont entry", curi, qep); 1558 } 1559 1560 /* 1561 * No do final twiddling for the CTIO itself. 1562 */ 1563 cto->ct_header.rqs_seqno = 1; 1564 isp_prt(isp, ISP_LOGTDEBUG1, 1565 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 1566 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 1567 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 1568 cto->ct_resid); 1569 isp_put_ctio2(isp, cto, qe); 1570 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 1571 *mp->nxtip = nxti; 1572 } 1573 #endif 1574 1575 static void dma2(void *, bus_dma_segment_t *, int, int); 1576 1577 static void 1578 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1579 { 1580 mush_t *mp; 1581 struct ispsoftc *isp; 1582 struct ccb_scsiio *csio; 1583 struct isp_pcisoftc *pcs; 1584 bus_dmamap_t *dp; 1585 bus_dma_segment_t *eseg; 1586 ispreq_t *rq; 1587 int seglim, datalen; 1588 u_int16_t nxti; 1589 1590 mp = (mush_t *) arg; 1591 if (error) { 1592 mp->error = error; 1593 return; 1594 } 1595 1596 if (nseg < 1) { 1597 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1598 mp->error = EFAULT; 1599 return; 1600 } 1601 csio = mp->cmd_token; 1602 isp = mp->isp; 1603 rq = mp->rq; 1604 pcs = (struct isp_pcisoftc *)mp->isp; 1605 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1606 nxti = *mp->nxtip; 1607 1608 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1609 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1610 } else { 1611 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1612 } 1613 1614 datalen = XS_XFRLEN(csio); 1615 1616 /* 1617 * We're passed an initial partially filled in entry that 1618 * has most fields filled in except for data transfer 1619 * related values. 1620 * 1621 * Our job is to fill in the initial request queue entry and 1622 * then to start allocating and filling in continuation entries 1623 * until we've covered the entire transfer. 1624 */ 1625 1626 if (IS_FC(isp)) { 1627 seglim = ISP_RQDSEG_T2; 1628 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1629 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1630 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1631 } else { 1632 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1633 } 1634 } else { 1635 if (csio->cdb_len > 12) { 1636 seglim = 0; 1637 } else { 1638 seglim = ISP_RQDSEG; 1639 } 1640 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1641 rq->req_flags |= REQFLAG_DATA_IN; 1642 } else { 1643 rq->req_flags |= REQFLAG_DATA_OUT; 1644 } 1645 } 1646 1647 eseg = dm_segs + nseg; 1648 1649 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1650 if (IS_FC(isp)) { 1651 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1652 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1653 dm_segs->ds_addr; 1654 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1655 dm_segs->ds_len; 1656 } else { 1657 rq->req_dataseg[rq->req_seg_count].ds_base = 1658 dm_segs->ds_addr; 1659 rq->req_dataseg[rq->req_seg_count].ds_count = 1660 dm_segs->ds_len; 1661 } 1662 datalen -= dm_segs->ds_len; 1663 rq->req_seg_count++; 1664 dm_segs++; 1665 } 1666 1667 while (datalen > 0 && dm_segs != eseg) { 1668 u_int16_t onxti; 1669 ispcontreq_t local, *crq = &local, *cqe; 1670 1671 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1672 onxti = nxti; 1673 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1674 if (nxti == mp->optr) { 1675 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1676 mp->error = MUSHERR_NOQENTRIES; 1677 return; 1678 } 1679 rq->req_header.rqs_entry_count++; 1680 MEMZERO((void *)crq, sizeof (*crq)); 1681 crq->req_header.rqs_entry_count = 1; 1682 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1683 1684 seglim = 0; 1685 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1686 crq->req_dataseg[seglim].ds_base = 1687 dm_segs->ds_addr; 1688 crq->req_dataseg[seglim].ds_count = 1689 dm_segs->ds_len; 1690 rq->req_seg_count++; 1691 dm_segs++; 1692 seglim++; 1693 datalen -= dm_segs->ds_len; 1694 } 1695 isp_put_cont_req(isp, crq, cqe); 1696 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1697 } 1698 *mp->nxtip = nxti; 1699 } 1700 1701 static int 1702 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1703 u_int16_t *nxtip, u_int16_t optr) 1704 { 1705 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1706 ispreq_t *qep; 1707 bus_dmamap_t *dp = NULL; 1708 mush_t mush, *mp; 1709 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1710 1711 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 1712 #ifdef ISP_TARGET_MODE 1713 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1714 if (IS_FC(isp)) { 1715 eptr = tdma_mkfc; 1716 } else { 1717 eptr = tdma_mk; 1718 } 1719 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1720 (csio->dxfer_len == 0)) { 1721 mp = &mush; 1722 mp->isp = isp; 1723 mp->cmd_token = csio; 1724 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 1725 mp->nxtip = nxtip; 1726 mp->optr = optr; 1727 mp->error = 0; 1728 (*eptr)(mp, NULL, 0, 0); 1729 goto mbxsync; 1730 } 1731 } else 1732 #endif 1733 eptr = dma2; 1734 1735 1736 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1737 (csio->dxfer_len == 0)) { 1738 rq->req_seg_count = 1; 1739 goto mbxsync; 1740 } 1741 1742 /* 1743 * Do a virtual grapevine step to collect info for 1744 * the callback dma allocation that we have to use... 1745 */ 1746 mp = &mush; 1747 mp->isp = isp; 1748 mp->cmd_token = csio; 1749 mp->rq = rq; 1750 mp->nxtip = nxtip; 1751 mp->optr = optr; 1752 mp->error = 0; 1753 1754 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1755 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1756 int error, s; 1757 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1758 s = splsoftvm(); 1759 error = bus_dmamap_load(pcs->dmat, *dp, 1760 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1761 if (error == EINPROGRESS) { 1762 bus_dmamap_unload(pcs->dmat, *dp); 1763 mp->error = EINVAL; 1764 isp_prt(isp, ISP_LOGERR, 1765 "deferred dma allocation not supported"); 1766 } else if (error && mp->error == 0) { 1767 #ifdef DIAGNOSTIC 1768 isp_prt(isp, ISP_LOGERR, 1769 "error %d in dma mapping code", error); 1770 #endif 1771 mp->error = error; 1772 } 1773 splx(s); 1774 } else { 1775 /* Pointer to physical buffer */ 1776 struct bus_dma_segment seg; 1777 seg.ds_addr = (bus_addr_t)csio->data_ptr; 1778 seg.ds_len = csio->dxfer_len; 1779 (*eptr)(mp, &seg, 1, 0); 1780 } 1781 } else { 1782 struct bus_dma_segment *segs; 1783 1784 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1785 isp_prt(isp, ISP_LOGERR, 1786 "Physical segment pointers unsupported"); 1787 mp->error = EINVAL; 1788 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1789 isp_prt(isp, ISP_LOGERR, 1790 "Virtual segment addresses unsupported"); 1791 mp->error = EINVAL; 1792 } else { 1793 /* Just use the segments provided */ 1794 segs = (struct bus_dma_segment *) csio->data_ptr; 1795 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1796 } 1797 } 1798 if (mp->error) { 1799 int retval = CMD_COMPLETE; 1800 if (mp->error == MUSHERR_NOQENTRIES) { 1801 retval = CMD_EAGAIN; 1802 } else if (mp->error == EFBIG) { 1803 XS_SETERR(csio, CAM_REQ_TOO_BIG); 1804 } else if (mp->error == EINVAL) { 1805 XS_SETERR(csio, CAM_REQ_INVALID); 1806 } else { 1807 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 1808 } 1809 return (retval); 1810 } 1811 mbxsync: 1812 switch (rq->req_header.rqs_entry_type) { 1813 case RQSTYPE_REQUEST: 1814 isp_put_request(isp, rq, qep); 1815 break; 1816 case RQSTYPE_CMDONLY: 1817 isp_put_extended_request(isp, (ispextreq_t *)rq, 1818 (ispextreq_t *)qep); 1819 break; 1820 case RQSTYPE_T2RQS: 1821 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 1822 break; 1823 } 1824 return (CMD_QUEUED); 1825 } 1826 1827 static void 1828 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle) 1829 { 1830 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1831 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 1832 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1833 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 1834 } else { 1835 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 1836 } 1837 bus_dmamap_unload(pcs->dmat, *dp); 1838 } 1839 1840 1841 static void 1842 isp_pci_reset1(struct ispsoftc *isp) 1843 { 1844 /* Make sure the BIOS is disabled */ 1845 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 1846 /* and enable interrupts */ 1847 ENABLE_INTS(isp); 1848 } 1849 1850 static void 1851 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg) 1852 { 1853 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1854 if (msg) 1855 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 1856 else 1857 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 1858 if (IS_SCSI(isp)) 1859 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 1860 else 1861 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 1862 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 1863 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 1864 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 1865 1866 1867 if (IS_SCSI(isp)) { 1868 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 1869 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 1870 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 1871 ISP_READ(isp, CDMA_FIFO_STS)); 1872 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 1873 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 1874 ISP_READ(isp, DDMA_FIFO_STS)); 1875 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 1876 ISP_READ(isp, SXP_INTERRUPT), 1877 ISP_READ(isp, SXP_GROSS_ERR), 1878 ISP_READ(isp, SXP_PINS_CTRL)); 1879 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 1880 } 1881 printf(" mbox regs: %x %x %x %x %x\n", 1882 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 1883 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 1884 ISP_READ(isp, OUTMAILBOX4)); 1885 printf(" PCI Status Command/Status=%x\n", 1886 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 1887 } 1888