1 /*- 2 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 1997-2006 by Matthew Jacob 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice immediately at the beginning of the file, without modification, 13 * this list of conditions, and the following disclaimer. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/module.h> 38 #include <sys/bus.h> 39 #include <sys/stdint.h> 40 41 #include <dev/pci/pcireg.h> 42 #include <dev/pci/pcivar.h> 43 44 #include <machine/bus.h> 45 #include <machine/resource.h> 46 #include <sys/rman.h> 47 #include <sys/malloc.h> 48 49 #ifdef ISP_TARGET_MODE 50 #ifdef PAE 51 #error "PAE and ISP_TARGET_MODE not supported yet" 52 #endif 53 #endif 54 55 #include <dev/isp/isp_freebsd.h> 56 57 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int); 58 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t); 59 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int); 60 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t); 61 static int 62 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 63 static int 64 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 65 static int isp_pci_mbxdma(struct ispsoftc *); 66 static int 67 isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t); 68 static void 69 isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t); 70 71 static void isp_pci_reset1(struct ispsoftc *); 72 static void isp_pci_dumpregs(struct ispsoftc *, const char *); 73 74 static struct ispmdvec mdvec = { 75 isp_pci_rd_isr, 76 isp_pci_rd_reg, 77 isp_pci_wr_reg, 78 isp_pci_mbxdma, 79 isp_pci_dmasetup, 80 isp_pci_dmateardown, 81 NULL, 82 isp_pci_reset1, 83 isp_pci_dumpregs, 84 NULL, 85 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 86 }; 87 88 static struct ispmdvec mdvec_1080 = { 89 isp_pci_rd_isr, 90 isp_pci_rd_reg_1080, 91 isp_pci_wr_reg_1080, 92 isp_pci_mbxdma, 93 isp_pci_dmasetup, 94 isp_pci_dmateardown, 95 NULL, 96 isp_pci_reset1, 97 isp_pci_dumpregs, 98 NULL, 99 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 100 }; 101 102 static struct ispmdvec mdvec_12160 = { 103 isp_pci_rd_isr, 104 isp_pci_rd_reg_1080, 105 isp_pci_wr_reg_1080, 106 isp_pci_mbxdma, 107 isp_pci_dmasetup, 108 isp_pci_dmateardown, 109 NULL, 110 isp_pci_reset1, 111 isp_pci_dumpregs, 112 NULL, 113 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 114 }; 115 116 static struct ispmdvec mdvec_2100 = { 117 isp_pci_rd_isr, 118 isp_pci_rd_reg, 119 isp_pci_wr_reg, 120 isp_pci_mbxdma, 121 isp_pci_dmasetup, 122 isp_pci_dmateardown, 123 NULL, 124 isp_pci_reset1, 125 isp_pci_dumpregs 126 }; 127 128 static struct ispmdvec mdvec_2200 = { 129 isp_pci_rd_isr, 130 isp_pci_rd_reg, 131 isp_pci_wr_reg, 132 isp_pci_mbxdma, 133 isp_pci_dmasetup, 134 isp_pci_dmateardown, 135 NULL, 136 isp_pci_reset1, 137 isp_pci_dumpregs 138 }; 139 140 static struct ispmdvec mdvec_2300 = { 141 isp_pci_rd_isr_2300, 142 isp_pci_rd_reg, 143 isp_pci_wr_reg, 144 isp_pci_mbxdma, 145 isp_pci_dmasetup, 146 isp_pci_dmateardown, 147 NULL, 148 isp_pci_reset1, 149 isp_pci_dumpregs 150 }; 151 152 #ifndef PCIM_CMD_INVEN 153 #define PCIM_CMD_INVEN 0x10 154 #endif 155 #ifndef PCIM_CMD_BUSMASTEREN 156 #define PCIM_CMD_BUSMASTEREN 0x0004 157 #endif 158 #ifndef PCIM_CMD_PERRESPEN 159 #define PCIM_CMD_PERRESPEN 0x0040 160 #endif 161 #ifndef PCIM_CMD_SEREN 162 #define PCIM_CMD_SEREN 0x0100 163 #endif 164 165 #ifndef PCIR_COMMAND 166 #define PCIR_COMMAND 0x04 167 #endif 168 169 #ifndef PCIR_CACHELNSZ 170 #define PCIR_CACHELNSZ 0x0c 171 #endif 172 173 #ifndef PCIR_LATTIMER 174 #define PCIR_LATTIMER 0x0d 175 #endif 176 177 #ifndef PCIR_ROMADDR 178 #define PCIR_ROMADDR 0x30 179 #endif 180 181 #ifndef PCI_VENDOR_QLOGIC 182 #define PCI_VENDOR_QLOGIC 0x1077 183 #endif 184 185 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 186 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 187 #endif 188 189 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 190 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 191 #endif 192 193 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 194 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 195 #endif 196 197 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 198 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 199 #endif 200 201 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 202 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 203 #endif 204 205 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 206 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 207 #endif 208 209 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 210 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 211 #endif 212 213 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 214 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 215 #endif 216 217 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 218 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 219 #endif 220 221 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 222 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 223 #endif 224 225 #ifndef PCI_PRODUCT_QLOGIC_ISP2322 226 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 227 #endif 228 229 #ifndef PCI_PRODUCT_QLOGIC_ISP2422 230 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 231 #endif 232 233 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 234 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 235 #endif 236 237 #define PCI_QLOGIC_ISP1020 \ 238 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 239 240 #define PCI_QLOGIC_ISP1080 \ 241 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 242 243 #define PCI_QLOGIC_ISP10160 \ 244 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 245 246 #define PCI_QLOGIC_ISP12160 \ 247 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 248 249 #define PCI_QLOGIC_ISP1240 \ 250 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 251 252 #define PCI_QLOGIC_ISP1280 \ 253 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 254 255 #define PCI_QLOGIC_ISP2100 \ 256 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 257 258 #define PCI_QLOGIC_ISP2200 \ 259 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 260 261 #define PCI_QLOGIC_ISP2300 \ 262 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 263 264 #define PCI_QLOGIC_ISP2312 \ 265 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 266 267 #define PCI_QLOGIC_ISP2322 \ 268 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 269 270 #define PCI_QLOGIC_ISP2422 \ 271 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 272 273 #define PCI_QLOGIC_ISP6312 \ 274 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 275 276 /* 277 * Odd case for some AMI raid cards... We need to *not* attach to this. 278 */ 279 #define AMI_RAID_SUBVENDOR_ID 0x101e 280 281 #define IO_MAP_REG 0x10 282 #define MEM_MAP_REG 0x14 283 284 #define PCI_DFLT_LTNCY 0x40 285 #define PCI_DFLT_LNSZ 0x10 286 287 static int isp_pci_probe (device_t); 288 static int isp_pci_attach (device_t); 289 290 291 struct isp_pcisoftc { 292 struct ispsoftc pci_isp; 293 device_t pci_dev; 294 struct resource * pci_reg; 295 bus_space_tag_t pci_st; 296 bus_space_handle_t pci_sh; 297 void * ih; 298 int16_t pci_poff[_NREG_BLKS]; 299 bus_dma_tag_t dmat; 300 bus_dmamap_t *dmaps; 301 }; 302 extern ispfwfunc *isp_get_firmware_p; 303 304 static device_method_t isp_pci_methods[] = { 305 /* Device interface */ 306 DEVMETHOD(device_probe, isp_pci_probe), 307 DEVMETHOD(device_attach, isp_pci_attach), 308 { 0, 0 } 309 }; 310 static void isp_pci_intr(void *); 311 312 static driver_t isp_pci_driver = { 313 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 314 }; 315 static devclass_t isp_devclass; 316 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 317 318 static int 319 isp_pci_probe(device_t dev) 320 { 321 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 322 case PCI_QLOGIC_ISP1020: 323 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 324 break; 325 case PCI_QLOGIC_ISP1080: 326 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 327 break; 328 case PCI_QLOGIC_ISP1240: 329 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 330 break; 331 case PCI_QLOGIC_ISP1280: 332 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 333 break; 334 case PCI_QLOGIC_ISP10160: 335 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 336 break; 337 case PCI_QLOGIC_ISP12160: 338 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 339 return (ENXIO); 340 } 341 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 342 break; 343 case PCI_QLOGIC_ISP2100: 344 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 345 break; 346 case PCI_QLOGIC_ISP2200: 347 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 348 break; 349 case PCI_QLOGIC_ISP2300: 350 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 351 break; 352 case PCI_QLOGIC_ISP2312: 353 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 354 break; 355 case PCI_QLOGIC_ISP2322: 356 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 357 break; 358 case PCI_QLOGIC_ISP2422: 359 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 360 break; 361 case PCI_QLOGIC_ISP6312: 362 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 363 break; 364 default: 365 return (ENXIO); 366 } 367 if (isp_announced == 0 && bootverbose) { 368 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 369 "Core Version %d.%d\n", 370 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 371 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 372 isp_announced++; 373 } 374 /* 375 * XXXX: Here is where we might load the f/w module 376 * XXXX: (or increase a reference count to it). 377 */ 378 return (BUS_PROBE_DEFAULT); 379 } 380 381 static int 382 isp_pci_attach(device_t dev) 383 { 384 struct resource *regs, *irq; 385 int tval, rtp, rgd, iqd, m1, m2, isp_debug, role; 386 u_int32_t data, cmd, linesz, psize, basetype; 387 struct isp_pcisoftc *pcs; 388 struct ispsoftc *isp = NULL; 389 struct ispmdvec *mdvp; 390 const char *sptr; 391 int locksetup = 0; 392 393 /* 394 * Figure out if we're supposed to skip this one. 395 */ 396 397 tval = 0; 398 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 399 "disable", &tval) == 0 && tval) { 400 device_printf(dev, "device is disabled\n"); 401 /* but return 0 so the !$)$)*!$*) unit isn't reused */ 402 return (0); 403 } 404 405 role = -1; 406 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 407 "role", &role) == 0 && role != -1) { 408 role &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET); 409 device_printf(dev, "setting role to 0x%x\n", role); 410 } else { 411 #ifdef ISP_TARGET_MODE 412 role = ISP_ROLE_TARGET; 413 #else 414 role = ISP_DEFAULT_ROLES; 415 #endif 416 } 417 418 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT | M_ZERO); 419 if (pcs == NULL) { 420 device_printf(dev, "cannot allocate softc\n"); 421 return (ENOMEM); 422 } 423 424 /* 425 * Which we should try first - memory mapping or i/o mapping? 426 * 427 * We used to try memory first followed by i/o on alpha, otherwise 428 * the reverse, but we should just try memory first all the time now. 429 */ 430 m1 = PCIM_CMD_MEMEN; 431 m2 = PCIM_CMD_PORTEN; 432 433 tval = 0; 434 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 435 "prefer_iomap", &tval) == 0 && tval != 0) { 436 m1 = PCIM_CMD_PORTEN; 437 m2 = PCIM_CMD_MEMEN; 438 } 439 tval = 0; 440 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 441 "prefer_memmap", &tval) == 0 && tval != 0) { 442 m1 = PCIM_CMD_MEMEN; 443 m2 = PCIM_CMD_PORTEN; 444 } 445 446 linesz = PCI_DFLT_LNSZ; 447 irq = regs = NULL; 448 rgd = rtp = iqd = 0; 449 450 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 451 if (cmd & m1) { 452 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 453 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 454 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 455 } 456 if (regs == NULL && (cmd & m2)) { 457 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 458 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 459 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 460 } 461 if (regs == NULL) { 462 device_printf(dev, "unable to map any ports\n"); 463 goto bad; 464 } 465 if (bootverbose) 466 device_printf(dev, "using %s space register mapping\n", 467 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 468 pcs->pci_dev = dev; 469 pcs->pci_reg = regs; 470 pcs->pci_st = rman_get_bustag(regs); 471 pcs->pci_sh = rman_get_bushandle(regs); 472 473 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 474 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 475 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 476 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 477 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 478 mdvp = &mdvec; 479 basetype = ISP_HA_SCSI_UNKNOWN; 480 psize = sizeof (sdparam); 481 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 482 mdvp = &mdvec; 483 basetype = ISP_HA_SCSI_UNKNOWN; 484 psize = sizeof (sdparam); 485 } 486 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 487 mdvp = &mdvec_1080; 488 basetype = ISP_HA_SCSI_1080; 489 psize = sizeof (sdparam); 490 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 491 ISP1080_DMA_REGS_OFF; 492 } 493 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 494 mdvp = &mdvec_1080; 495 basetype = ISP_HA_SCSI_1240; 496 psize = 2 * sizeof (sdparam); 497 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 498 ISP1080_DMA_REGS_OFF; 499 } 500 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 501 mdvp = &mdvec_1080; 502 basetype = ISP_HA_SCSI_1280; 503 psize = 2 * sizeof (sdparam); 504 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 505 ISP1080_DMA_REGS_OFF; 506 } 507 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 508 mdvp = &mdvec_12160; 509 basetype = ISP_HA_SCSI_10160; 510 psize = sizeof (sdparam); 511 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 512 ISP1080_DMA_REGS_OFF; 513 } 514 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 515 mdvp = &mdvec_12160; 516 basetype = ISP_HA_SCSI_12160; 517 psize = 2 * sizeof (sdparam); 518 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 519 ISP1080_DMA_REGS_OFF; 520 } 521 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 522 mdvp = &mdvec_2100; 523 basetype = ISP_HA_FC_2100; 524 psize = sizeof (fcparam); 525 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 526 PCI_MBOX_REGS2100_OFF; 527 if (pci_get_revid(dev) < 3) { 528 /* 529 * XXX: Need to get the actual revision 530 * XXX: number of the 2100 FB. At any rate, 531 * XXX: lower cache line size for early revision 532 * XXX; boards. 533 */ 534 linesz = 1; 535 } 536 } 537 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 538 mdvp = &mdvec_2200; 539 basetype = ISP_HA_FC_2200; 540 psize = sizeof (fcparam); 541 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 542 PCI_MBOX_REGS2100_OFF; 543 } 544 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 545 mdvp = &mdvec_2300; 546 basetype = ISP_HA_FC_2300; 547 psize = sizeof (fcparam); 548 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 549 PCI_MBOX_REGS2300_OFF; 550 } 551 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 || 552 pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 553 mdvp = &mdvec_2300; 554 basetype = ISP_HA_FC_2312; 555 psize = sizeof (fcparam); 556 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 557 PCI_MBOX_REGS2300_OFF; 558 } 559 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322) { 560 mdvp = &mdvec_2300; 561 basetype = ISP_HA_FC_2322; 562 psize = sizeof (fcparam); 563 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 564 PCI_MBOX_REGS2300_OFF; 565 } 566 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422) { 567 mdvp = &mdvec_2300; 568 basetype = ISP_HA_FC_2422; 569 psize = sizeof (fcparam); 570 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 571 PCI_MBOX_REGS2300_OFF; 572 } 573 isp = &pcs->pci_isp; 574 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 575 if (isp->isp_param == NULL) { 576 device_printf(dev, "cannot allocate parameter data\n"); 577 goto bad; 578 } 579 isp->isp_mdvec = mdvp; 580 isp->isp_type = basetype; 581 isp->isp_revision = pci_get_revid(dev); 582 isp->isp_role = role; 583 isp->isp_dev = dev; 584 585 /* 586 * Try and find firmware for this device. 587 */ 588 589 /* 590 * Don't even attempt to get firmware for the 2322/2422 (yet) 591 */ 592 if (IS_2322(isp) == 0 && IS_24XX(isp) == 0 && isp_get_firmware_p) { 593 int device = (int) pci_get_device(dev); 594 #ifdef ISP_TARGET_MODE 595 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 596 #else 597 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 598 #endif 599 } 600 601 /* 602 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 603 * are set. 604 */ 605 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 606 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 607 if (IS_2300(isp)) { /* per QLogic errata */ 608 cmd &= ~PCIM_CMD_INVEN; 609 } 610 if (IS_23XX(isp)) { 611 /* 612 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 613 */ 614 isp->isp_touched = 1; 615 616 } 617 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 618 619 /* 620 * Make sure the Cache Line Size register is set sensibly. 621 */ 622 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 623 if (data != linesz) { 624 data = PCI_DFLT_LNSZ; 625 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 626 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 627 } 628 629 /* 630 * Make sure the Latency Timer is sane. 631 */ 632 data = pci_read_config(dev, PCIR_LATTIMER, 1); 633 if (data < PCI_DFLT_LTNCY) { 634 data = PCI_DFLT_LTNCY; 635 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 636 pci_write_config(dev, PCIR_LATTIMER, data, 1); 637 } 638 639 /* 640 * Make sure we've disabled the ROM. 641 */ 642 data = pci_read_config(dev, PCIR_ROMADDR, 4); 643 data &= ~1; 644 pci_write_config(dev, PCIR_ROMADDR, data, 4); 645 646 iqd = 0; 647 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, 648 RF_ACTIVE | RF_SHAREABLE); 649 if (irq == NULL) { 650 device_printf(dev, "could not allocate interrupt\n"); 651 goto bad; 652 } 653 654 tval = 0; 655 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 656 "fwload_disable", &tval) == 0 && tval != 0) { 657 isp->isp_confopts |= ISP_CFG_NORELOAD; 658 } 659 tval = 0; 660 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 661 "ignore_nvram", &tval) == 0 && tval != 0) { 662 isp->isp_confopts |= ISP_CFG_NONVRAM; 663 } 664 tval = 0; 665 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 666 "fullduplex", &tval) == 0 && tval != 0) { 667 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 668 } 669 #ifdef ISP_FW_CRASH_DUMP 670 tval = 0; 671 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 672 "fw_dump_enable", &tval) == 0 && tval != 0) { 673 size_t amt = 0; 674 if (IS_2200(isp)) { 675 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 676 } else if (IS_23XX(isp)) { 677 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 678 } 679 if (amt) { 680 FCPARAM(isp)->isp_dump_data = 681 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); 682 } else { 683 device_printf(dev, 684 "f/w crash dumps not supported for this model\n"); 685 } 686 } 687 #endif 688 689 sptr = 0; 690 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 691 "topology", (const char **) &sptr) == 0 && sptr != 0) { 692 if (strcmp(sptr, "lport") == 0) { 693 isp->isp_confopts |= ISP_CFG_LPORT; 694 } else if (strcmp(sptr, "nport") == 0) { 695 isp->isp_confopts |= ISP_CFG_NPORT; 696 } else if (strcmp(sptr, "lport-only") == 0) { 697 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 698 } else if (strcmp(sptr, "nport-only") == 0) { 699 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 700 } 701 } 702 703 /* 704 * Because the resource_*_value functions can neither return 705 * 64 bit integer values, nor can they be directly coerced 706 * to interpret the right hand side of the assignment as 707 * you want them to interpret it, we have to force WWN 708 * hint replacement to specify WWN strings with a leading 709 * 'w' (e..g w50000000aaaa0001). Sigh. 710 */ 711 sptr = 0; 712 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 713 "portwwn", (const char **) &sptr); 714 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 715 char *eptr = 0; 716 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 717 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 718 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 719 isp->isp_osinfo.default_port_wwn = 0; 720 } else { 721 isp->isp_confopts |= ISP_CFG_OWNWWPN; 722 } 723 } 724 if (isp->isp_osinfo.default_port_wwn == 0) { 725 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 726 } 727 728 sptr = 0; 729 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 730 "nodewwn", (const char **) &sptr); 731 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 732 char *eptr = 0; 733 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 734 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 735 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 736 isp->isp_osinfo.default_node_wwn = 0; 737 } else { 738 isp->isp_confopts |= ISP_CFG_OWNWWNN; 739 } 740 } 741 if (isp->isp_osinfo.default_node_wwn == 0) { 742 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 743 } 744 745 isp->isp_osinfo.default_id = -1; 746 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 747 "iid", &tval) == 0) { 748 isp->isp_osinfo.default_id = tval; 749 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 750 } 751 if (isp->isp_osinfo.default_id == -1) { 752 if (IS_FC(isp)) { 753 isp->isp_osinfo.default_id = 109; 754 } else { 755 isp->isp_osinfo.default_id = 7; 756 } 757 } 758 759 isp_debug = 0; 760 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 761 "debug", &isp_debug); 762 763 /* Make sure the lock is set up. */ 764 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 765 locksetup++; 766 767 if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) { 768 device_printf(dev, "could not setup interrupt\n"); 769 goto bad; 770 } 771 772 /* 773 * Set up logging levels. 774 */ 775 if (isp_debug) { 776 isp->isp_dblev = isp_debug; 777 } else { 778 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 779 } 780 if (bootverbose) 781 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 782 783 /* 784 * Last minute checks... 785 */ 786 if (IS_23XX(isp)) { 787 isp->isp_port = pci_get_function(dev); 788 } 789 790 /* 791 * Make sure we're in reset state. 792 */ 793 ISP_LOCK(isp); 794 isp_reset(isp); 795 if (isp->isp_state != ISP_RESETSTATE) { 796 ISP_UNLOCK(isp); 797 goto bad; 798 } 799 isp_init(isp); 800 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 801 isp_uninit(isp); 802 ISP_UNLOCK(isp); 803 goto bad; 804 } 805 isp_attach(isp); 806 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 807 isp_uninit(isp); 808 ISP_UNLOCK(isp); 809 goto bad; 810 } 811 /* 812 * XXXX: Here is where we might unload the f/w module 813 * XXXX: (or decrease the reference count to it). 814 */ 815 ISP_UNLOCK(isp); 816 return (0); 817 818 bad: 819 820 if (pcs && pcs->ih) { 821 (void) bus_teardown_intr(dev, irq, pcs->ih); 822 } 823 824 if (locksetup && isp) { 825 mtx_destroy(&isp->isp_osinfo.lock); 826 } 827 828 if (irq) { 829 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 830 } 831 832 833 if (regs) { 834 (void) bus_release_resource(dev, rtp, rgd, regs); 835 } 836 837 if (pcs) { 838 if (pcs->pci_isp.isp_param) 839 free(pcs->pci_isp.isp_param, M_DEVBUF); 840 free(pcs, M_DEVBUF); 841 } 842 843 /* 844 * XXXX: Here is where we might unload the f/w module 845 * XXXX: (or decrease the reference count to it). 846 */ 847 return (ENXIO); 848 } 849 850 static void 851 isp_pci_intr(void *arg) 852 { 853 struct ispsoftc *isp = arg; 854 u_int16_t isr, sema, mbox; 855 856 ISP_LOCK(isp); 857 isp->isp_intcnt++; 858 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 859 isp->isp_intbogus++; 860 } else { 861 int iok = isp->isp_osinfo.intsok; 862 isp->isp_osinfo.intsok = 0; 863 isp_intr(isp, isr, sema, mbox); 864 isp->isp_osinfo.intsok = iok; 865 } 866 ISP_UNLOCK(isp); 867 } 868 869 870 #define IspVirt2Off(a, x) \ 871 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 872 _BLK_REG_SHFT] + ((x) & 0xff)) 873 874 #define BXR2(pcs, off) \ 875 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 876 #define BXW2(pcs, off, v) \ 877 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 878 879 880 static INLINE int 881 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp) 882 { 883 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 884 u_int16_t val0, val1; 885 int i = 0; 886 887 do { 888 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 889 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 890 } while (val0 != val1 && ++i < 1000); 891 if (val0 != val1) { 892 return (1); 893 } 894 *rp = val0; 895 return (0); 896 } 897 898 static int 899 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp, 900 u_int16_t *semap, u_int16_t *mbp) 901 { 902 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 903 u_int16_t isr, sema; 904 905 if (IS_2100(isp)) { 906 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 907 return (0); 908 } 909 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 910 return (0); 911 } 912 } else { 913 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 914 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 915 } 916 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 917 isr &= INT_PENDING_MASK(isp); 918 sema &= BIU_SEMA_LOCK; 919 if (isr == 0 && sema == 0) { 920 return (0); 921 } 922 *isrp = isr; 923 if ((*semap = sema) != 0) { 924 if (IS_2100(isp)) { 925 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 926 return (0); 927 } 928 } else { 929 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 930 } 931 } 932 return (1); 933 } 934 935 static int 936 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp, 937 u_int16_t *semap, u_int16_t *mbox0p) 938 { 939 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 940 u_int32_t r2hisr; 941 942 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 943 *isrp = 0; 944 return (0); 945 } 946 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh, 947 IspVirt2Off(pcs, BIU_R2HSTSLO)); 948 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 949 if ((r2hisr & BIU_R2HST_INTR) == 0) { 950 *isrp = 0; 951 return (0); 952 } 953 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 954 case ISPR2HST_ROM_MBX_OK: 955 case ISPR2HST_ROM_MBX_FAIL: 956 case ISPR2HST_MBX_OK: 957 case ISPR2HST_MBX_FAIL: 958 case ISPR2HST_ASYNC_EVENT: 959 *isrp = r2hisr & 0xffff; 960 *mbox0p = (r2hisr >> 16); 961 *semap = 1; 962 return (1); 963 case ISPR2HST_RIO_16: 964 *isrp = r2hisr & 0xffff; 965 *mbox0p = ASYNC_RIO1; 966 *semap = 1; 967 return (1); 968 case ISPR2HST_FPOST: 969 *isrp = r2hisr & 0xffff; 970 *mbox0p = ASYNC_CMD_CMPLT; 971 *semap = 1; 972 return (1); 973 case ISPR2HST_FPOST_CTIO: 974 *isrp = r2hisr & 0xffff; 975 *mbox0p = ASYNC_CTIO_DONE; 976 *semap = 1; 977 return (1); 978 case ISPR2HST_RSPQ_UPDATE: 979 *isrp = r2hisr & 0xffff; 980 *mbox0p = 0; 981 *semap = 0; 982 return (1); 983 default: 984 return (0); 985 } 986 } 987 988 static u_int16_t 989 isp_pci_rd_reg(struct ispsoftc *isp, int regoff) 990 { 991 u_int16_t rv; 992 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 993 int oldconf = 0; 994 995 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 996 /* 997 * We will assume that someone has paused the RISC processor. 998 */ 999 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1000 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1001 oldconf | BIU_PCI_CONF1_SXP); 1002 } 1003 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1004 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1005 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 1006 } 1007 return (rv); 1008 } 1009 1010 static void 1011 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val) 1012 { 1013 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1014 int oldconf = 0; 1015 1016 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1017 /* 1018 * We will assume that someone has paused the RISC processor. 1019 */ 1020 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1021 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1022 oldconf | BIU_PCI_CONF1_SXP); 1023 } 1024 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1025 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1026 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 1027 } 1028 } 1029 1030 static u_int16_t 1031 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff) 1032 { 1033 u_int16_t rv, oc = 0; 1034 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1035 1036 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1037 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1038 u_int16_t tc; 1039 /* 1040 * We will assume that someone has paused the RISC processor. 1041 */ 1042 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1043 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1044 if (regoff & SXP_BANK1_SELECT) 1045 tc |= BIU_PCI1080_CONF1_SXP1; 1046 else 1047 tc |= BIU_PCI1080_CONF1_SXP0; 1048 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1049 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1050 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1051 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1052 oc | BIU_PCI1080_CONF1_DMA); 1053 } 1054 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1055 if (oc) { 1056 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1057 } 1058 return (rv); 1059 } 1060 1061 static void 1062 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val) 1063 { 1064 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1065 int oc = 0; 1066 1067 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1068 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1069 u_int16_t tc; 1070 /* 1071 * We will assume that someone has paused the RISC processor. 1072 */ 1073 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1074 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1075 if (regoff & SXP_BANK1_SELECT) 1076 tc |= BIU_PCI1080_CONF1_SXP1; 1077 else 1078 tc |= BIU_PCI1080_CONF1_SXP0; 1079 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1080 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1081 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1082 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1083 oc | BIU_PCI1080_CONF1_DMA); 1084 } 1085 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1086 if (oc) { 1087 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1088 } 1089 } 1090 1091 1092 struct imush { 1093 struct ispsoftc *isp; 1094 int error; 1095 }; 1096 1097 static void imc(void *, bus_dma_segment_t *, int, int); 1098 1099 static void 1100 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1101 { 1102 struct imush *imushp = (struct imush *) arg; 1103 if (error) { 1104 imushp->error = error; 1105 } else { 1106 struct ispsoftc *isp =imushp->isp; 1107 bus_addr_t addr = segs->ds_addr; 1108 1109 isp->isp_rquest_dma = addr; 1110 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1111 isp->isp_result_dma = addr; 1112 if (IS_FC(isp)) { 1113 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1114 FCPARAM(isp)->isp_scdma = addr; 1115 } 1116 } 1117 } 1118 1119 /* 1120 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 1121 */ 1122 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 1123 1124 static int 1125 isp_pci_mbxdma(struct ispsoftc *isp) 1126 { 1127 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1128 caddr_t base; 1129 u_int32_t len; 1130 int i, error, ns; 1131 bus_size_t alim, slim, xlim; 1132 struct imush im; 1133 1134 /* 1135 * Already been here? If so, leave... 1136 */ 1137 if (isp->isp_rquest) { 1138 return (0); 1139 } 1140 1141 #ifdef ISP_DAC_SUPPORTED 1142 alim = BUS_SPACE_UNRESTRICTED; 1143 xlim = BUS_SPACE_MAXADDR_32BIT; 1144 #else 1145 xlim = alim = BUS_SPACE_MAXADDR_32BIT; 1146 #endif 1147 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1148 slim = BUS_SPACE_MAXADDR_32BIT; 1149 } else { 1150 slim = BUS_SPACE_MAXADDR_24BIT; 1151 } 1152 1153 ISP_UNLOCK(isp); 1154 if (bus_dma_tag_create(NULL, 1, slim+1, alim, alim, 1155 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, 1156 busdma_lock_mutex, &Giant, &pcs->dmat)) { 1157 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1158 ISP_LOCK(isp); 1159 return(1); 1160 } 1161 1162 1163 len = sizeof (XS_T **) * isp->isp_maxcmds; 1164 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1165 if (isp->isp_xflist == NULL) { 1166 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1167 ISP_LOCK(isp); 1168 return (1); 1169 } 1170 #ifdef ISP_TARGET_MODE 1171 len = sizeof (void **) * isp->isp_maxcmds; 1172 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1173 if (isp->isp_tgtlist == NULL) { 1174 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1175 ISP_LOCK(isp); 1176 return (1); 1177 } 1178 #endif 1179 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1180 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1181 if (pcs->dmaps == NULL) { 1182 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1183 free(isp->isp_xflist, M_DEVBUF); 1184 #ifdef ISP_TARGET_MODE 1185 free(isp->isp_tgtlist, M_DEVBUF); 1186 #endif 1187 ISP_LOCK(isp); 1188 return (1); 1189 } 1190 1191 /* 1192 * Allocate and map the request, result queues, plus FC scratch area. 1193 */ 1194 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1195 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1196 if (IS_FC(isp)) { 1197 len += ISP2100_SCRLEN; 1198 } 1199 1200 ns = (len / PAGE_SIZE) + 1; 1201 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim+1, xlim, xlim, 1202 NULL, NULL, len, ns, slim, 0, busdma_lock_mutex, &Giant, 1203 &isp->isp_cdmat)) { 1204 isp_prt(isp, ISP_LOGERR, 1205 "cannot create a dma tag for control spaces"); 1206 free(pcs->dmaps, M_DEVBUF); 1207 free(isp->isp_xflist, M_DEVBUF); 1208 #ifdef ISP_TARGET_MODE 1209 free(isp->isp_tgtlist, M_DEVBUF); 1210 #endif 1211 ISP_LOCK(isp); 1212 return (1); 1213 } 1214 1215 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1216 &isp->isp_cdmap) != 0) { 1217 isp_prt(isp, ISP_LOGERR, 1218 "cannot allocate %d bytes of CCB memory", len); 1219 bus_dma_tag_destroy(isp->isp_cdmat); 1220 free(isp->isp_xflist, M_DEVBUF); 1221 #ifdef ISP_TARGET_MODE 1222 free(isp->isp_tgtlist, M_DEVBUF); 1223 #endif 1224 free(pcs->dmaps, M_DEVBUF); 1225 ISP_LOCK(isp); 1226 return (1); 1227 } 1228 1229 for (i = 0; i < isp->isp_maxcmds; i++) { 1230 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1231 if (error) { 1232 isp_prt(isp, ISP_LOGERR, 1233 "error %d creating per-cmd DMA maps", error); 1234 while (--i >= 0) { 1235 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1236 } 1237 goto bad; 1238 } 1239 } 1240 1241 im.isp = isp; 1242 im.error = 0; 1243 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1244 if (im.error) { 1245 isp_prt(isp, ISP_LOGERR, 1246 "error %d loading dma map for control areas", im.error); 1247 goto bad; 1248 } 1249 1250 isp->isp_rquest = base; 1251 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1252 isp->isp_result = base; 1253 if (IS_FC(isp)) { 1254 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1255 FCPARAM(isp)->isp_scratch = base; 1256 } 1257 ISP_LOCK(isp); 1258 return (0); 1259 1260 bad: 1261 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1262 bus_dma_tag_destroy(isp->isp_cdmat); 1263 free(isp->isp_xflist, M_DEVBUF); 1264 #ifdef ISP_TARGET_MODE 1265 free(isp->isp_tgtlist, M_DEVBUF); 1266 #endif 1267 free(pcs->dmaps, M_DEVBUF); 1268 ISP_LOCK(isp); 1269 isp->isp_rquest = NULL; 1270 return (1); 1271 } 1272 1273 typedef struct { 1274 struct ispsoftc *isp; 1275 void *cmd_token; 1276 void *rq; 1277 u_int16_t *nxtip; 1278 u_int16_t optr; 1279 u_int error; 1280 } mush_t; 1281 1282 #define MUSHERR_NOQENTRIES -2 1283 1284 #ifdef ISP_TARGET_MODE 1285 /* 1286 * We need to handle DMA for target mode differently from initiator mode. 1287 * 1288 * DMA mapping and construction and submission of CTIO Request Entries 1289 * and rendevous for completion are very tightly coupled because we start 1290 * out by knowing (per platform) how much data we have to move, but we 1291 * don't know, up front, how many DMA mapping segments will have to be used 1292 * cover that data, so we don't know how many CTIO Request Entries we 1293 * will end up using. Further, for performance reasons we may want to 1294 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1295 * 1296 * The standard vector still goes through isp_pci_dmasetup, but the callback 1297 * for the DMA mapping routines comes here instead with the whole transfer 1298 * mapped and a pointer to a partially filled in already allocated request 1299 * queue entry. We finish the job. 1300 */ 1301 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1302 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1303 1304 #define STATUS_WITH_DATA 1 1305 1306 static void 1307 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1308 { 1309 mush_t *mp; 1310 struct ccb_scsiio *csio; 1311 struct ispsoftc *isp; 1312 struct isp_pcisoftc *pcs; 1313 bus_dmamap_t *dp; 1314 ct_entry_t *cto, *qe; 1315 u_int8_t scsi_status; 1316 u_int16_t curi, nxti, handle; 1317 u_int32_t sflags; 1318 int32_t resid; 1319 int nth_ctio, nctios, send_status; 1320 1321 mp = (mush_t *) arg; 1322 if (error) { 1323 mp->error = error; 1324 return; 1325 } 1326 1327 isp = mp->isp; 1328 csio = mp->cmd_token; 1329 cto = mp->rq; 1330 curi = isp->isp_reqidx; 1331 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1332 1333 cto->ct_xfrlen = 0; 1334 cto->ct_seg_count = 0; 1335 cto->ct_header.rqs_entry_count = 1; 1336 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1337 1338 if (nseg == 0) { 1339 cto->ct_header.rqs_seqno = 1; 1340 isp_prt(isp, ISP_LOGTDEBUG1, 1341 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1342 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1343 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1344 cto->ct_scsi_status, cto->ct_resid); 1345 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1346 isp_put_ctio(isp, cto, qe); 1347 return; 1348 } 1349 1350 nctios = nseg / ISP_RQDSEG; 1351 if (nseg % ISP_RQDSEG) { 1352 nctios++; 1353 } 1354 1355 /* 1356 * Save syshandle, and potentially any SCSI status, which we'll 1357 * reinsert on the last CTIO we're going to send. 1358 */ 1359 1360 handle = cto->ct_syshandle; 1361 cto->ct_syshandle = 0; 1362 cto->ct_header.rqs_seqno = 0; 1363 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1364 1365 if (send_status) { 1366 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1367 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1368 /* 1369 * Preserve residual. 1370 */ 1371 resid = cto->ct_resid; 1372 1373 /* 1374 * Save actual SCSI status. 1375 */ 1376 scsi_status = cto->ct_scsi_status; 1377 1378 #ifndef STATUS_WITH_DATA 1379 sflags |= CT_NO_DATA; 1380 /* 1381 * We can't do a status at the same time as a data CTIO, so 1382 * we need to synthesize an extra CTIO at this level. 1383 */ 1384 nctios++; 1385 #endif 1386 } else { 1387 sflags = scsi_status = resid = 0; 1388 } 1389 1390 cto->ct_resid = 0; 1391 cto->ct_scsi_status = 0; 1392 1393 pcs = (struct isp_pcisoftc *)isp; 1394 dp = &pcs->dmaps[isp_handle_index(handle)]; 1395 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1396 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1397 } else { 1398 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1399 } 1400 1401 nxti = *mp->nxtip; 1402 1403 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1404 int seglim; 1405 1406 seglim = nseg; 1407 if (seglim) { 1408 int seg; 1409 1410 if (seglim > ISP_RQDSEG) 1411 seglim = ISP_RQDSEG; 1412 1413 for (seg = 0; seg < seglim; seg++, nseg--) { 1414 /* 1415 * Unlike normal initiator commands, we don't 1416 * do any swizzling here. 1417 */ 1418 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1419 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1420 cto->ct_xfrlen += dm_segs->ds_len; 1421 dm_segs++; 1422 } 1423 cto->ct_seg_count = seg; 1424 } else { 1425 /* 1426 * This case should only happen when we're sending an 1427 * extra CTIO with final status. 1428 */ 1429 if (send_status == 0) { 1430 isp_prt(isp, ISP_LOGWARN, 1431 "tdma_mk ran out of segments"); 1432 mp->error = EINVAL; 1433 return; 1434 } 1435 } 1436 1437 /* 1438 * At this point, the fields ct_lun, ct_iid, ct_tagval, 1439 * ct_tagtype, and ct_timeout have been carried over 1440 * unchanged from what our caller had set. 1441 * 1442 * The dataseg fields and the seg_count fields we just got 1443 * through setting. The data direction we've preserved all 1444 * along and only clear it if we're now sending status. 1445 */ 1446 1447 if (nth_ctio == nctios - 1) { 1448 /* 1449 * We're the last in a sequence of CTIOs, so mark 1450 * this CTIO and save the handle to the CCB such that 1451 * when this CTIO completes we can free dma resources 1452 * and do whatever else we need to do to finish the 1453 * rest of the command. We *don't* give this to the 1454 * firmware to work on- the caller will do that. 1455 */ 1456 1457 cto->ct_syshandle = handle; 1458 cto->ct_header.rqs_seqno = 1; 1459 1460 if (send_status) { 1461 cto->ct_scsi_status = scsi_status; 1462 cto->ct_flags |= sflags; 1463 cto->ct_resid = resid; 1464 } 1465 if (send_status) { 1466 isp_prt(isp, ISP_LOGTDEBUG1, 1467 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 1468 "scsi status %x resid %d", 1469 cto->ct_fwhandle, csio->ccb_h.target_lun, 1470 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 1471 cto->ct_scsi_status, cto->ct_resid); 1472 } else { 1473 isp_prt(isp, ISP_LOGTDEBUG1, 1474 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 1475 cto->ct_fwhandle, csio->ccb_h.target_lun, 1476 cto->ct_iid, cto->ct_tag_val, 1477 cto->ct_flags); 1478 } 1479 isp_put_ctio(isp, cto, qe); 1480 ISP_TDQE(isp, "last tdma_mk", curi, cto); 1481 if (nctios > 1) { 1482 MEMORYBARRIER(isp, SYNC_REQUEST, 1483 curi, QENTRY_LEN); 1484 } 1485 } else { 1486 ct_entry_t *oqe = qe; 1487 1488 /* 1489 * Make sure syshandle fields are clean 1490 */ 1491 cto->ct_syshandle = 0; 1492 cto->ct_header.rqs_seqno = 0; 1493 1494 isp_prt(isp, ISP_LOGTDEBUG1, 1495 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 1496 cto->ct_fwhandle, csio->ccb_h.target_lun, 1497 cto->ct_iid, cto->ct_flags); 1498 1499 /* 1500 * Get a new CTIO 1501 */ 1502 qe = (ct_entry_t *) 1503 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1504 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 1505 if (nxti == mp->optr) { 1506 isp_prt(isp, ISP_LOGTDEBUG0, 1507 "Queue Overflow in tdma_mk"); 1508 mp->error = MUSHERR_NOQENTRIES; 1509 return; 1510 } 1511 1512 /* 1513 * Now that we're done with the old CTIO, 1514 * flush it out to the request queue. 1515 */ 1516 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 1517 isp_put_ctio(isp, cto, oqe); 1518 if (nth_ctio != 0) { 1519 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 1520 QENTRY_LEN); 1521 } 1522 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 1523 1524 /* 1525 * Reset some fields in the CTIO so we can reuse 1526 * for the next one we'll flush to the request 1527 * queue. 1528 */ 1529 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1530 cto->ct_header.rqs_entry_count = 1; 1531 cto->ct_header.rqs_flags = 0; 1532 cto->ct_status = 0; 1533 cto->ct_scsi_status = 0; 1534 cto->ct_xfrlen = 0; 1535 cto->ct_resid = 0; 1536 cto->ct_seg_count = 0; 1537 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1538 } 1539 } 1540 *mp->nxtip = nxti; 1541 } 1542 1543 /* 1544 * We don't have to do multiple CTIOs here. Instead, we can just do 1545 * continuation segments as needed. This greatly simplifies the code 1546 * improves performance. 1547 */ 1548 1549 static void 1550 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1551 { 1552 mush_t *mp; 1553 struct ccb_scsiio *csio; 1554 struct ispsoftc *isp; 1555 ct2_entry_t *cto, *qe; 1556 u_int16_t curi, nxti; 1557 int segcnt; 1558 1559 mp = (mush_t *) arg; 1560 if (error) { 1561 mp->error = error; 1562 return; 1563 } 1564 1565 isp = mp->isp; 1566 csio = mp->cmd_token; 1567 cto = mp->rq; 1568 1569 curi = isp->isp_reqidx; 1570 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1571 1572 if (nseg == 0) { 1573 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1574 isp_prt(isp, ISP_LOGWARN, 1575 "dma2_tgt_fc, a status CTIO2 without MODE1 " 1576 "set (0x%x)", cto->ct_flags); 1577 mp->error = EINVAL; 1578 return; 1579 } 1580 /* 1581 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1582 * flags to NO DATA and clear relative offset flags. 1583 * We preserve the ct_resid and the response area. 1584 */ 1585 cto->ct_header.rqs_seqno = 1; 1586 cto->ct_seg_count = 0; 1587 cto->ct_reloff = 0; 1588 isp_prt(isp, ISP_LOGTDEBUG1, 1589 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1590 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 1591 cto->ct_iid, cto->ct_flags, cto->ct_status, 1592 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1593 isp_put_ctio2(isp, cto, qe); 1594 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 1595 return; 1596 } 1597 1598 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1599 isp_prt(isp, ISP_LOGERR, 1600 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 1601 "(0x%x)", cto->ct_flags); 1602 mp->error = EINVAL; 1603 return; 1604 } 1605 1606 1607 nxti = *mp->nxtip; 1608 1609 /* 1610 * Set up the CTIO2 data segments. 1611 */ 1612 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg; 1613 cto->ct_seg_count++, segcnt++) { 1614 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base = 1615 dm_segs[segcnt].ds_addr; 1616 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count = 1617 dm_segs[segcnt].ds_len; 1618 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1619 isp_prt(isp, ISP_LOGTDEBUG1, 1620 "isp_send_ctio2: ent0[%d]0x%jx:%ju", 1621 cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr, 1622 (uintmax_t)dm_segs[segcnt].ds_len); 1623 } 1624 1625 while (segcnt < nseg) { 1626 u_int16_t curip; 1627 int seg; 1628 ispcontreq_t local, *crq = &local, *qep; 1629 1630 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1631 curip = nxti; 1632 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 1633 if (nxti == mp->optr) { 1634 ISP_UNLOCK(isp); 1635 isp_prt(isp, ISP_LOGTDEBUG0, 1636 "tdma_mkfc: request queue overflow"); 1637 mp->error = MUSHERR_NOQENTRIES; 1638 return; 1639 } 1640 cto->ct_header.rqs_entry_count++; 1641 MEMZERO((void *)crq, sizeof (*crq)); 1642 crq->req_header.rqs_entry_count = 1; 1643 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1644 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG; 1645 segcnt++, seg++) { 1646 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr; 1647 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len; 1648 isp_prt(isp, ISP_LOGTDEBUG1, 1649 "isp_send_ctio2: ent%d[%d]%jx:%ju", 1650 cto->ct_header.rqs_entry_count-1, seg, 1651 (uintmax_t)dm_segs[segcnt].ds_addr, 1652 (uintmax_t)dm_segs[segcnt].ds_len); 1653 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1654 cto->ct_seg_count++; 1655 } 1656 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 1657 isp_put_cont_req(isp, crq, qep); 1658 ISP_TDQE(isp, "cont entry", curi, qep); 1659 } 1660 1661 /* 1662 * No do final twiddling for the CTIO itself. 1663 */ 1664 cto->ct_header.rqs_seqno = 1; 1665 isp_prt(isp, ISP_LOGTDEBUG1, 1666 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 1667 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 1668 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 1669 cto->ct_resid); 1670 isp_put_ctio2(isp, cto, qe); 1671 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 1672 *mp->nxtip = nxti; 1673 } 1674 #endif 1675 1676 static void dma2(void *, bus_dma_segment_t *, int, int); 1677 1678 #ifdef PAE 1679 #define LOWD(x) ((uint32_t) x) 1680 #define HIWD(x) ((uint32_t) (x >> 32)) 1681 1682 static void 1683 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1684 { 1685 mush_t *mp; 1686 struct ispsoftc *isp; 1687 struct ccb_scsiio *csio; 1688 struct isp_pcisoftc *pcs; 1689 bus_dmamap_t *dp; 1690 bus_dma_segment_t *eseg; 1691 ispreq64_t *rq; 1692 int seglim, datalen; 1693 u_int16_t nxti; 1694 1695 mp = (mush_t *) arg; 1696 if (error) { 1697 mp->error = error; 1698 return; 1699 } 1700 1701 if (nseg < 1) { 1702 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1703 mp->error = EFAULT; 1704 return; 1705 } 1706 csio = mp->cmd_token; 1707 isp = mp->isp; 1708 rq = mp->rq; 1709 pcs = (struct isp_pcisoftc *)mp->isp; 1710 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1711 nxti = *mp->nxtip; 1712 1713 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1714 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1715 } else { 1716 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1717 } 1718 datalen = XS_XFRLEN(csio); 1719 1720 /* 1721 * We're passed an initial partially filled in entry that 1722 * has most fields filled in except for data transfer 1723 * related values. 1724 * 1725 * Our job is to fill in the initial request queue entry and 1726 * then to start allocating and filling in continuation entries 1727 * until we've covered the entire transfer. 1728 */ 1729 1730 if (IS_FC(isp)) { 1731 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 1732 seglim = ISP_RQDSEG_T3; 1733 ((ispreqt3_t *)rq)->req_totalcnt = datalen; 1734 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1735 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1736 } else { 1737 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1738 } 1739 } else { 1740 rq->req_header.rqs_entry_type = RQSTYPE_A64; 1741 if (csio->cdb_len > 12) { 1742 seglim = 0; 1743 } else { 1744 seglim = ISP_RQDSEG_A64; 1745 } 1746 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1747 rq->req_flags |= REQFLAG_DATA_IN; 1748 } else { 1749 rq->req_flags |= REQFLAG_DATA_OUT; 1750 } 1751 } 1752 1753 eseg = dm_segs + nseg; 1754 1755 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1756 if (IS_FC(isp)) { 1757 ispreqt3_t *rq3 = (ispreqt3_t *)rq; 1758 rq3->req_dataseg[rq3->req_seg_count].ds_base = 1759 LOWD(dm_segs->ds_addr); 1760 rq3->req_dataseg[rq3->req_seg_count].ds_basehi = 1761 HIWD(dm_segs->ds_addr); 1762 rq3->req_dataseg[rq3->req_seg_count].ds_count = 1763 dm_segs->ds_len; 1764 } else { 1765 rq->req_dataseg[rq->req_seg_count].ds_base = 1766 LOWD(dm_segs->ds_addr); 1767 rq->req_dataseg[rq->req_seg_count].ds_basehi = 1768 HIWD(dm_segs->ds_addr); 1769 rq->req_dataseg[rq->req_seg_count].ds_count = 1770 dm_segs->ds_len; 1771 } 1772 datalen -= dm_segs->ds_len; 1773 rq->req_seg_count++; 1774 dm_segs++; 1775 } 1776 1777 while (datalen > 0 && dm_segs != eseg) { 1778 u_int16_t onxti; 1779 ispcontreq64_t local, *crq = &local, *cqe; 1780 1781 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1782 onxti = nxti; 1783 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1784 if (nxti == mp->optr) { 1785 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1786 mp->error = MUSHERR_NOQENTRIES; 1787 return; 1788 } 1789 rq->req_header.rqs_entry_count++; 1790 MEMZERO((void *)crq, sizeof (*crq)); 1791 crq->req_header.rqs_entry_count = 1; 1792 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 1793 1794 seglim = 0; 1795 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 1796 crq->req_dataseg[seglim].ds_base = 1797 LOWD(dm_segs->ds_addr); 1798 crq->req_dataseg[seglim].ds_basehi = 1799 HIWD(dm_segs->ds_addr); 1800 crq->req_dataseg[seglim].ds_count = 1801 dm_segs->ds_len; 1802 rq->req_seg_count++; 1803 dm_segs++; 1804 seglim++; 1805 datalen -= dm_segs->ds_len; 1806 } 1807 isp_put_cont64_req(isp, crq, cqe); 1808 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1809 } 1810 *mp->nxtip = nxti; 1811 } 1812 #else 1813 static void 1814 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1815 { 1816 mush_t *mp; 1817 struct ispsoftc *isp; 1818 struct ccb_scsiio *csio; 1819 struct isp_pcisoftc *pcs; 1820 bus_dmamap_t *dp; 1821 bus_dma_segment_t *eseg; 1822 ispreq_t *rq; 1823 int seglim, datalen; 1824 u_int16_t nxti; 1825 1826 mp = (mush_t *) arg; 1827 if (error) { 1828 mp->error = error; 1829 return; 1830 } 1831 1832 if (nseg < 1) { 1833 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1834 mp->error = EFAULT; 1835 return; 1836 } 1837 csio = mp->cmd_token; 1838 isp = mp->isp; 1839 rq = mp->rq; 1840 pcs = (struct isp_pcisoftc *)mp->isp; 1841 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1842 nxti = *mp->nxtip; 1843 1844 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1845 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1846 } else { 1847 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1848 } 1849 1850 datalen = XS_XFRLEN(csio); 1851 1852 /* 1853 * We're passed an initial partially filled in entry that 1854 * has most fields filled in except for data transfer 1855 * related values. 1856 * 1857 * Our job is to fill in the initial request queue entry and 1858 * then to start allocating and filling in continuation entries 1859 * until we've covered the entire transfer. 1860 */ 1861 1862 if (IS_FC(isp)) { 1863 seglim = ISP_RQDSEG_T2; 1864 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1865 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1866 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1867 } else { 1868 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1869 } 1870 } else { 1871 if (csio->cdb_len > 12) { 1872 seglim = 0; 1873 } else { 1874 seglim = ISP_RQDSEG; 1875 } 1876 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1877 rq->req_flags |= REQFLAG_DATA_IN; 1878 } else { 1879 rq->req_flags |= REQFLAG_DATA_OUT; 1880 } 1881 } 1882 1883 eseg = dm_segs + nseg; 1884 1885 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1886 if (IS_FC(isp)) { 1887 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1888 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1889 dm_segs->ds_addr; 1890 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1891 dm_segs->ds_len; 1892 } else { 1893 rq->req_dataseg[rq->req_seg_count].ds_base = 1894 dm_segs->ds_addr; 1895 rq->req_dataseg[rq->req_seg_count].ds_count = 1896 dm_segs->ds_len; 1897 } 1898 datalen -= dm_segs->ds_len; 1899 rq->req_seg_count++; 1900 dm_segs++; 1901 } 1902 1903 while (datalen > 0 && dm_segs != eseg) { 1904 u_int16_t onxti; 1905 ispcontreq_t local, *crq = &local, *cqe; 1906 1907 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1908 onxti = nxti; 1909 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1910 if (nxti == mp->optr) { 1911 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1912 mp->error = MUSHERR_NOQENTRIES; 1913 return; 1914 } 1915 rq->req_header.rqs_entry_count++; 1916 MEMZERO((void *)crq, sizeof (*crq)); 1917 crq->req_header.rqs_entry_count = 1; 1918 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1919 1920 seglim = 0; 1921 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1922 crq->req_dataseg[seglim].ds_base = 1923 dm_segs->ds_addr; 1924 crq->req_dataseg[seglim].ds_count = 1925 dm_segs->ds_len; 1926 rq->req_seg_count++; 1927 dm_segs++; 1928 seglim++; 1929 datalen -= dm_segs->ds_len; 1930 } 1931 isp_put_cont_req(isp, crq, cqe); 1932 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1933 } 1934 *mp->nxtip = nxti; 1935 } 1936 #endif 1937 1938 static int 1939 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1940 u_int16_t *nxtip, u_int16_t optr) 1941 { 1942 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1943 ispreq_t *qep; 1944 bus_dmamap_t *dp = NULL; 1945 mush_t mush, *mp; 1946 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1947 1948 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 1949 #ifdef ISP_TARGET_MODE 1950 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1951 if (IS_FC(isp)) { 1952 eptr = tdma_mkfc; 1953 } else { 1954 eptr = tdma_mk; 1955 } 1956 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1957 (csio->dxfer_len == 0)) { 1958 mp = &mush; 1959 mp->isp = isp; 1960 mp->cmd_token = csio; 1961 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 1962 mp->nxtip = nxtip; 1963 mp->optr = optr; 1964 mp->error = 0; 1965 (*eptr)(mp, NULL, 0, 0); 1966 goto mbxsync; 1967 } 1968 } else 1969 #endif 1970 eptr = dma2; 1971 1972 1973 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1974 (csio->dxfer_len == 0)) { 1975 rq->req_seg_count = 1; 1976 goto mbxsync; 1977 } 1978 1979 /* 1980 * Do a virtual grapevine step to collect info for 1981 * the callback dma allocation that we have to use... 1982 */ 1983 mp = &mush; 1984 mp->isp = isp; 1985 mp->cmd_token = csio; 1986 mp->rq = rq; 1987 mp->nxtip = nxtip; 1988 mp->optr = optr; 1989 mp->error = 0; 1990 1991 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1992 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1993 int error, s; 1994 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1995 s = splsoftvm(); 1996 error = bus_dmamap_load(pcs->dmat, *dp, 1997 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1998 if (error == EINPROGRESS) { 1999 bus_dmamap_unload(pcs->dmat, *dp); 2000 mp->error = EINVAL; 2001 isp_prt(isp, ISP_LOGERR, 2002 "deferred dma allocation not supported"); 2003 } else if (error && mp->error == 0) { 2004 #ifdef DIAGNOSTIC 2005 isp_prt(isp, ISP_LOGERR, 2006 "error %d in dma mapping code", error); 2007 #endif 2008 mp->error = error; 2009 } 2010 splx(s); 2011 } else { 2012 /* Pointer to physical buffer */ 2013 struct bus_dma_segment seg; 2014 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; 2015 seg.ds_len = csio->dxfer_len; 2016 (*eptr)(mp, &seg, 1, 0); 2017 } 2018 } else { 2019 struct bus_dma_segment *segs; 2020 2021 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 2022 isp_prt(isp, ISP_LOGERR, 2023 "Physical segment pointers unsupported"); 2024 mp->error = EINVAL; 2025 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2026 isp_prt(isp, ISP_LOGERR, 2027 "Virtual segment addresses unsupported"); 2028 mp->error = EINVAL; 2029 } else { 2030 /* Just use the segments provided */ 2031 segs = (struct bus_dma_segment *) csio->data_ptr; 2032 (*eptr)(mp, segs, csio->sglist_cnt, 0); 2033 } 2034 } 2035 if (mp->error) { 2036 int retval = CMD_COMPLETE; 2037 if (mp->error == MUSHERR_NOQENTRIES) { 2038 retval = CMD_EAGAIN; 2039 } else if (mp->error == EFBIG) { 2040 XS_SETERR(csio, CAM_REQ_TOO_BIG); 2041 } else if (mp->error == EINVAL) { 2042 XS_SETERR(csio, CAM_REQ_INVALID); 2043 } else { 2044 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 2045 } 2046 return (retval); 2047 } 2048 mbxsync: 2049 switch (rq->req_header.rqs_entry_type) { 2050 case RQSTYPE_REQUEST: 2051 isp_put_request(isp, rq, qep); 2052 break; 2053 case RQSTYPE_CMDONLY: 2054 isp_put_extended_request(isp, (ispextreq_t *)rq, 2055 (ispextreq_t *)qep); 2056 break; 2057 case RQSTYPE_T2RQS: 2058 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 2059 break; 2060 case RQSTYPE_A64: 2061 case RQSTYPE_T3RQS: 2062 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep); 2063 break; 2064 } 2065 return (CMD_QUEUED); 2066 } 2067 2068 static void 2069 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle) 2070 { 2071 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2072 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 2073 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2074 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 2075 } else { 2076 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 2077 } 2078 bus_dmamap_unload(pcs->dmat, *dp); 2079 } 2080 2081 2082 static void 2083 isp_pci_reset1(struct ispsoftc *isp) 2084 { 2085 /* Make sure the BIOS is disabled */ 2086 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 2087 /* and enable interrupts */ 2088 ENABLE_INTS(isp); 2089 } 2090 2091 static void 2092 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg) 2093 { 2094 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2095 if (msg) 2096 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2097 else 2098 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2099 if (IS_SCSI(isp)) 2100 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2101 else 2102 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2103 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2104 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2105 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2106 2107 2108 if (IS_SCSI(isp)) { 2109 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2110 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2111 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2112 ISP_READ(isp, CDMA_FIFO_STS)); 2113 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2114 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2115 ISP_READ(isp, DDMA_FIFO_STS)); 2116 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2117 ISP_READ(isp, SXP_INTERRUPT), 2118 ISP_READ(isp, SXP_GROSS_ERR), 2119 ISP_READ(isp, SXP_PINS_CTRL)); 2120 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2121 } 2122 printf(" mbox regs: %x %x %x %x %x\n", 2123 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2124 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2125 ISP_READ(isp, OUTMAILBOX4)); 2126 printf(" PCI Status Command/Status=%x\n", 2127 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2128 } 2129