xref: /freebsd/sys/dev/isp/isp_pci.c (revision b601c69bdbe8755d26570261d7fd4c02ee4eff74)
1 /* $FreeBSD$ */
2 /*
3  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
4  * FreeBSD Version.
5  *
6  *---------------------------------------
7  * Copyright (c) 1997, 1998, 1999 by Matthew Jacob
8  * NASA/Ames Research Center
9  * All rights reserved.
10  *---------------------------------------
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice immediately at the beginning of the file, without modification,
17  *    this list of conditions, and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. The name of the author may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
28  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/module.h>
41 #include <sys/bus.h>
42 
43 #include <pci/pcireg.h>
44 #include <pci/pcivar.h>
45 
46 #include <machine/bus_memio.h>
47 #include <machine/bus_pio.h>
48 #include <machine/bus.h>
49 #include <machine/resource.h>
50 #include <machine/clock.h>
51 #include <sys/rman.h>
52 #include <sys/malloc.h>
53 
54 #include <dev/isp/isp_freebsd.h>
55 
56 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
57 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
58 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
59 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
60 static int isp_pci_mbxdma __P((struct ispsoftc *));
61 static int isp_pci_dmasetup __P((struct ispsoftc *, ISP_SCSI_XFER_T *,
62 	ispreq_t *, u_int16_t *, u_int16_t));
63 static void
64 isp_pci_dmateardown __P((struct ispsoftc *, ISP_SCSI_XFER_T *, u_int32_t));
65 
66 static void isp_pci_reset1 __P((struct ispsoftc *));
67 static void isp_pci_dumpregs __P((struct ispsoftc *));
68 
69 #ifndef	ISP_CODE_ORG
70 #define	ISP_CODE_ORG		0x1000
71 #endif
72 
73 static struct ispmdvec mdvec = {
74 	isp_pci_rd_reg,
75 	isp_pci_wr_reg,
76 	isp_pci_mbxdma,
77 	isp_pci_dmasetup,
78 	isp_pci_dmateardown,
79 	NULL,
80 	isp_pci_reset1,
81 	isp_pci_dumpregs,
82 	NULL,
83 	0,
84 	ISP_CODE_ORG,
85 	0,
86 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
87 	0
88 };
89 
90 static struct ispmdvec mdvec_1080 = {
91 	isp_pci_rd_reg_1080,
92 	isp_pci_wr_reg_1080,
93 	isp_pci_mbxdma,
94 	isp_pci_dmasetup,
95 	isp_pci_dmateardown,
96 	NULL,
97 	isp_pci_reset1,
98 	isp_pci_dumpregs,
99 	NULL,
100 	0,
101 	ISP_CODE_ORG,
102 	0,
103 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
104 	0
105 };
106 
107 static struct ispmdvec mdvec_12160 = {
108 	isp_pci_rd_reg_1080,
109 	isp_pci_wr_reg_1080,
110 	isp_pci_mbxdma,
111 	isp_pci_dmasetup,
112 	isp_pci_dmateardown,
113 	NULL,
114 	isp_pci_reset1,
115 	isp_pci_dumpregs,
116 	NULL,
117 	0,
118 	NULL,
119 	0,
120 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
121 	0
122 };
123 
124 static struct ispmdvec mdvec_2100 = {
125 	isp_pci_rd_reg,
126 	isp_pci_wr_reg,
127 	isp_pci_mbxdma,
128 	isp_pci_dmasetup,
129 	isp_pci_dmateardown,
130 	NULL,
131 	isp_pci_reset1,
132 	isp_pci_dumpregs,
133 	NULL,
134 	0,
135 	ISP_CODE_ORG,
136 	0,
137 	0,
138 	0
139 };
140 
141 static struct ispmdvec mdvec_2200 = {
142 	isp_pci_rd_reg,
143 	isp_pci_wr_reg,
144 	isp_pci_mbxdma,
145 	isp_pci_dmasetup,
146 	isp_pci_dmateardown,
147 	NULL,
148 	isp_pci_reset1,
149 	isp_pci_dumpregs,
150 	NULL,
151 	0,
152 	ISP_CODE_ORG,
153 	0,
154 	0,
155 	0
156 };
157 
158 #ifndef	PCIM_CMD_INVEN
159 #define	PCIM_CMD_INVEN			0x10
160 #endif
161 #ifndef	PCIM_CMD_BUSMASTEREN
162 #define	PCIM_CMD_BUSMASTEREN		0x0004
163 #endif
164 #ifndef	PCIM_CMD_PERRESPEN
165 #define	PCIM_CMD_PERRESPEN		0x0040
166 #endif
167 #ifndef	PCIM_CMD_SEREN
168 #define	PCIM_CMD_SEREN			0x0100
169 #endif
170 
171 #ifndef	PCIR_COMMAND
172 #define	PCIR_COMMAND			0x04
173 #endif
174 
175 #ifndef	PCIR_CACHELNSZ
176 #define	PCIR_CACHELNSZ			0x0c
177 #endif
178 
179 #ifndef	PCIR_LATTIMER
180 #define	PCIR_LATTIMER			0x0d
181 #endif
182 
183 #ifndef	PCIR_ROMADDR
184 #define	PCIR_ROMADDR			0x30
185 #endif
186 
187 #ifndef	PCI_VENDOR_QLOGIC
188 #define	PCI_VENDOR_QLOGIC		0x1077
189 #endif
190 
191 #ifndef	PCI_PRODUCT_QLOGIC_ISP1020
192 #define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
193 #endif
194 
195 #ifndef	PCI_PRODUCT_QLOGIC_ISP1080
196 #define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
197 #endif
198 
199 #ifndef	PCI_PRODUCT_QLOGIC_ISP12160
200 #define	PCI_PRODUCT_QLOGIC_ISP12160	0x1216
201 #endif
202 
203 #ifndef	PCI_PRODUCT_QLOGIC_ISP1240
204 #define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
205 #endif
206 
207 #ifndef	PCI_PRODUCT_QLOGIC_ISP1280
208 #define	PCI_PRODUCT_QLOGIC_ISP1280	0x1280
209 #endif
210 
211 #ifndef	PCI_PRODUCT_QLOGIC_ISP2100
212 #define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
213 #endif
214 
215 #ifndef	PCI_PRODUCT_QLOGIC_ISP2200
216 #define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
217 #endif
218 
219 #define	PCI_QLOGIC_ISP1020	\
220 	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
221 
222 #define	PCI_QLOGIC_ISP1080	\
223 	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
224 
225 #define	PCI_QLOGIC_ISP12160	\
226 	((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
227 
228 #define	PCI_QLOGIC_ISP1240	\
229 	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
230 
231 #define	PCI_QLOGIC_ISP1280	\
232 	((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
233 
234 #define	PCI_QLOGIC_ISP2100	\
235 	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
236 
237 #define	PCI_QLOGIC_ISP2200	\
238 	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
239 
240 #define	IO_MAP_REG	0x10
241 #define	MEM_MAP_REG	0x14
242 
243 #define	PCI_DFLT_LTNCY	0x40
244 #define	PCI_DFLT_LNSZ	0x10
245 
246 static int isp_pci_probe (device_t);
247 static int isp_pci_attach (device_t);
248 
249 /* This distinguishing define is not right, but it does work */
250 #ifdef __alpha__
251 #define IO_SPACE_MAPPING	ALPHA_BUS_SPACE_IO
252 #define MEM_SPACE_MAPPING	ALPHA_BUS_SPACE_MEM
253 #else
254 #define IO_SPACE_MAPPING	I386_BUS_SPACE_IO
255 #define MEM_SPACE_MAPPING	I386_BUS_SPACE_MEM
256 #endif
257 
258 struct isp_pcisoftc {
259 	struct ispsoftc			pci_isp;
260 	device_t			pci_dev;
261 	struct resource *		pci_reg;
262 	bus_space_tag_t			pci_st;
263 	bus_space_handle_t		pci_sh;
264 	void *				ih;
265 	int16_t				pci_poff[_NREG_BLKS];
266 	bus_dma_tag_t			parent_dmat;
267 	bus_dma_tag_t			cntrol_dmat;
268 	bus_dmamap_t			cntrol_dmap;
269 	bus_dmamap_t			*dmaps;
270 };
271 ispfwfunc *isp_get_firmware_p = NULL;
272 
273 static device_method_t isp_pci_methods[] = {
274 	/* Device interface */
275 	DEVMETHOD(device_probe,		isp_pci_probe),
276 	DEVMETHOD(device_attach,	isp_pci_attach),
277 	{ 0, 0 }
278 };
279 
280 static driver_t isp_pci_driver = {
281 	"isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
282 };
283 static devclass_t isp_devclass;
284 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
285 MODULE_VERSION(isp, 1);
286 
287 static int
288 isp_pci_probe(device_t dev)
289 {
290         switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
291 	case PCI_QLOGIC_ISP1020:
292 		device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
293 		break;
294 	case PCI_QLOGIC_ISP1080:
295 		device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
296 		break;
297 	case PCI_QLOGIC_ISP1240:
298 		device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
299 		break;
300 	case PCI_QLOGIC_ISP1280:
301 		device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
302 		break;
303 	case PCI_QLOGIC_ISP12160:
304 		device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
305 		break;
306 	case PCI_QLOGIC_ISP2100:
307 		device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
308 		break;
309 	case PCI_QLOGIC_ISP2200:
310 		device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
311 		break;
312 	default:
313 		return (ENXIO);
314 	}
315 	if (device_get_unit(dev) == 0) {
316 		CFGPRINTF("Qlogic ISP Driver, FreeBSD Version %d.%d, "
317 		    "Core Version %d.%d\n",
318 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
319 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
320 	}
321 	/*
322 	 * XXXX: Here is where we might load the f/w module
323 	 * XXXX: (or increase a reference count to it).
324 	 */
325 	return (0);
326 }
327 
328 static int
329 isp_pci_attach(device_t dev)
330 {
331 	struct resource *regs, *irq;
332 	int unit, bitmap, rtp, rgd, iqd, m1, m2, s;
333 	u_int32_t data, cmd, linesz, psize, basetype;
334 	struct isp_pcisoftc *pcs;
335 	struct ispsoftc *isp;
336 	struct ispmdvec *mdvp;
337 	bus_size_t lim;
338 
339 	/*
340 	 * Figure out if we're supposed to skip this one.
341 	 */
342 	unit = device_get_unit(dev);
343 	if (getenv_int("isp_disable", &bitmap)) {
344 		if (bitmap & (1 << unit)) {
345 			device_printf(dev, "not configuring\n");
346 			return (ENODEV);
347 		}
348 	}
349 
350 	pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT);
351 	if (pcs == NULL) {
352 		device_printf(dev, "cannot allocate softc\n");
353 		return (ENOMEM);
354 	}
355 	bzero(pcs, sizeof (struct isp_pcisoftc));
356 
357 	/*
358 	 * Figure out which we should try first - memory mapping or i/o mapping?
359 	 */
360 #ifdef	__alpha__
361 	m1 = PCIM_CMD_MEMEN;
362 	m2 = PCIM_CMD_PORTEN;
363 #else
364 	m1 = PCIM_CMD_PORTEN;
365 	m2 = PCIM_CMD_MEMEN;
366 #endif
367 	bitmap = 0;
368 	if (getenv_int("isp_mem_map", &bitmap)) {
369 		if (bitmap & (1 << unit)) {
370 			m1 = PCIM_CMD_MEMEN;
371 			m2 = PCIM_CMD_PORTEN;
372 		}
373 	}
374 	bitmap = 0;
375 	if (getenv_int("isp_io_map", &bitmap)) {
376 		if (bitmap & (1 << unit)) {
377 			m1 = PCIM_CMD_PORTEN;
378 			m2 = PCIM_CMD_MEMEN;
379 		}
380 	}
381 
382 	linesz = PCI_DFLT_LNSZ;
383 	irq = regs = NULL;
384 	rgd = rtp = iqd = 0;
385 
386 	cmd = pci_read_config(dev, PCIR_COMMAND, 1);
387 	if (cmd & m1) {
388 		rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
389 		rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
390 		regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
391 	}
392 	if (regs == NULL && (cmd & m2)) {
393 		rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
394 		rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
395 		regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
396 	}
397 	if (regs == NULL) {
398 		device_printf(dev, "unable to map any ports\n");
399 		goto bad;
400 	}
401 	if (bootverbose)
402 		printf("isp%d: using %s space register mapping\n", unit,
403 		    (rgd == IO_MAP_REG)? "I/O" : "Memory");
404 	pcs->pci_dev = dev;
405 	pcs->pci_reg = regs;
406 	pcs->pci_st = rman_get_bustag(regs);
407 	pcs->pci_sh = rman_get_bushandle(regs);
408 
409 	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
410 	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
411 	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
412 	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
413 	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
414 	mdvp = &mdvec;
415 	basetype = ISP_HA_SCSI_UNKNOWN;
416 	psize = sizeof (sdparam);
417 	lim = BUS_SPACE_MAXSIZE_32BIT;
418 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
419 		mdvp = &mdvec;
420 		basetype = ISP_HA_SCSI_UNKNOWN;
421 		psize = sizeof (sdparam);
422 		lim = BUS_SPACE_MAXSIZE_24BIT;
423 	}
424 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
425 		mdvp = &mdvec_1080;
426 		basetype = ISP_HA_SCSI_1080;
427 		psize = sizeof (sdparam);
428 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
429 		    ISP1080_DMA_REGS_OFF;
430 	}
431 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
432 		mdvp = &mdvec_1080;
433 		basetype = ISP_HA_SCSI_1240;
434 		psize = 2 * sizeof (sdparam);
435 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
436 		    ISP1080_DMA_REGS_OFF;
437 	}
438 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
439 		mdvp = &mdvec_1080;
440 		basetype = ISP_HA_SCSI_1280;
441 		psize = 2 * sizeof (sdparam);
442 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
443 		    ISP1080_DMA_REGS_OFF;
444 	}
445 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
446 		mdvp = &mdvec_12160;
447 		basetype = ISP_HA_SCSI_12160;
448 		psize = 2 * sizeof (sdparam);
449 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
450 		    ISP1080_DMA_REGS_OFF;
451 	}
452 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
453 		mdvp = &mdvec_2100;
454 		basetype = ISP_HA_FC_2100;
455 		psize = sizeof (fcparam);
456 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
457 		    PCI_MBOX_REGS2100_OFF;
458 		if (pci_get_revid(dev) < 3) {
459 			/*
460 			 * XXX: Need to get the actual revision
461 			 * XXX: number of the 2100 FB. At any rate,
462 			 * XXX: lower cache line size for early revision
463 			 * XXX; boards.
464 			 */
465 			linesz = 1;
466 		}
467 	}
468 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
469 		mdvp = &mdvec_2200;
470 		basetype = ISP_HA_FC_2200;
471 		psize = sizeof (fcparam);
472 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
473 		    PCI_MBOX_REGS2100_OFF;
474 	}
475 	isp = &pcs->pci_isp;
476 	isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT);
477 	if (isp->isp_param == NULL) {
478 		device_printf(dev, "cannot allocate parameter data\n");
479 		goto bad;
480 	}
481 	bzero(isp->isp_param, psize);
482 	isp->isp_mdvec = mdvp;
483 	isp->isp_type = basetype;
484 	isp->isp_revision = pci_get_revid(dev);
485 	(void) snprintf(isp->isp_name, sizeof (isp->isp_name), "isp%d", unit);
486 	isp->isp_osinfo.unit = unit;
487 
488 	/*
489 	 * Try and find firmware for this device.
490 	 */
491 
492 	if (isp_get_firmware_p) {
493 		int device = (int) pci_get_device(dev);
494 #ifdef	ISP_TARGET_MODE
495 		(*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
496 #else
497 		(*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
498 #endif
499 	}
500 
501 	/*
502 	 *
503 	 */
504 
505 	s = splbio();
506 	/*
507 	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
508 	 * are set.
509 	 */
510 	cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
511 		PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
512 	pci_write_config(dev, PCIR_COMMAND, cmd, 1);
513 
514 	/*
515 	 * Make sure the Cache Line Size register is set sensibly.
516 	 */
517 	data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
518 	if (data != linesz) {
519 		data = PCI_DFLT_LNSZ;
520 		CFGPRINTF("%s: set PCI line size to %d\n", isp->isp_name, data);
521 		pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
522 	}
523 
524 	/*
525 	 * Make sure the Latency Timer is sane.
526 	 */
527 	data = pci_read_config(dev, PCIR_LATTIMER, 1);
528 	if (data < PCI_DFLT_LTNCY) {
529 		data = PCI_DFLT_LTNCY;
530 		CFGPRINTF("%s: set PCI latency to %d\n", isp->isp_name, data);
531 		pci_write_config(dev, PCIR_LATTIMER, data, 1);
532 	}
533 
534 	/*
535 	 * Make sure we've disabled the ROM.
536 	 */
537 	data = pci_read_config(dev, PCIR_ROMADDR, 4);
538 	data &= ~1;
539 	pci_write_config(dev, PCIR_ROMADDR, data, 4);
540 
541 
542 	if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
543 	    BUS_SPACE_MAXADDR, NULL, NULL, lim + 1,
544 	    255, lim, 0, &pcs->parent_dmat) != 0) {
545 		splx(s);
546 		printf("%s: could not create master dma tag\n", isp->isp_name);
547 		free(isp->isp_param, M_DEVBUF);
548 		free(pcs, M_DEVBUF);
549 		return (ENXIO);
550 	}
551 
552 	iqd = 0;
553 	irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0,
554 	    1, RF_ACTIVE | RF_SHAREABLE);
555 	if (irq == NULL) {
556 		device_printf(dev, "could not allocate interrupt\n");
557 		goto bad;
558 	}
559 
560 	if (getenv_int("isp_no_fwload", &bitmap)) {
561 		if (bitmap & (1 << unit))
562 			isp->isp_confopts |= ISP_CFG_NORELOAD;
563 	}
564 	if (getenv_int("isp_fwload", &bitmap)) {
565 		if (bitmap & (1 << unit))
566 			isp->isp_confopts &= ~ISP_CFG_NORELOAD;
567 	}
568 	if (getenv_int("isp_no_nvram", &bitmap)) {
569 		if (bitmap & (1 << unit))
570 			isp->isp_confopts |= ISP_CFG_NONVRAM;
571 	}
572 	if (getenv_int("isp_nvram", &bitmap)) {
573 		if (bitmap & (1 << unit))
574 			isp->isp_confopts &= ~ISP_CFG_NONVRAM;
575 	}
576 	if (getenv_int("isp_fcduplex", &bitmap)) {
577 		if (bitmap & (1 << unit))
578 			isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
579 	}
580 	if (getenv_int("isp_no_fcduplex", &bitmap)) {
581 		if (bitmap & (1 << unit))
582 			isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
583 	}
584 	if (getenv_int("isp_nport", &bitmap)) {
585 		if (bitmap & (1 << unit))
586 			isp->isp_confopts |= ISP_CFG_NPORT;
587 	}
588 	/*
589 	 * Look for overriding WWN. This is a Node WWN so it binds to
590 	 * all FC instances. A Port WWN will be constructed from it
591 	 * as appropriate.
592 	 */
593 	if (!getenv_quad("isp_wwn", (quad_t *) &isp->isp_osinfo.default_wwn)) {
594 		int i;
595 		u_int64_t seed = (u_int64_t) (intptr_t) isp;
596 
597 		seed <<= 16;
598 		seed &= ((1LL << 48) - 1LL);
599 		/*
600 		 * This isn't very random, but it's the best we can do for
601 		 * the real edge case of cards that don't have WWNs. If
602 		 * you recompile a new vers.c, you'll get a different WWN.
603 		 */
604 		for (i = 0; version[i] != 0; i++) {
605 			seed += version[i];
606 		}
607 		/*
608 		 * Make sure the top nibble has something vaguely sensible.
609 		 */
610 		isp->isp_osinfo.default_wwn |= (4LL << 60) | seed;
611 	} else {
612 		isp->isp_confopts |= ISP_CFG_OWNWWN;
613 	}
614 	(void) getenv_int("isp_debug", &isp_debug);
615 #ifdef	ISP_TARGET_MODE
616 	(void) getenv_int("isp_tdebug", &isp_tdebug);
617 #endif
618 	if (bus_setup_intr(dev, irq, INTR_TYPE_CAM, (void (*)(void *))isp_intr,
619 	    isp, &pcs->ih)) {
620 		splx(s);
621 		device_printf(dev, "could not setup interrupt\n");
622 		goto bad;
623 	}
624 
625 	/*
626 	 * Make sure we're in reset state.
627 	 */
628 	isp_reset(isp);
629 	if (isp->isp_state != ISP_RESETSTATE) {
630 		splx(s);
631 		goto bad;
632 	}
633 	isp_init(isp);
634 	if (isp->isp_state != ISP_INITSTATE) {
635 		/* If we're a Fibre Channel Card, we allow deferred attach */
636 		if (IS_SCSI(isp)) {
637 			isp_uninit(isp);
638 			splx(s);
639 			goto bad;
640 		}
641 	}
642 	isp_attach(isp);
643 	if (isp->isp_state != ISP_RUNSTATE) {
644 		/* If we're a Fibre Channel Card, we allow deferred attach */
645 		if (IS_SCSI(isp)) {
646 			isp_uninit(isp);
647 			splx(s);
648 			goto bad;
649 		}
650 	}
651 	splx(s);
652 	/*
653 	 * XXXX: Here is where we might unload the f/w module
654 	 * XXXX: (or decrease the reference count to it).
655 	 */
656 	return (0);
657 
658 bad:
659 
660 	if (pcs && pcs->ih) {
661 		(void) bus_teardown_intr(dev, irq, pcs->ih);
662 	}
663 
664 	if (irq) {
665 		(void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
666 	}
667 	if (regs) {
668 		(void) bus_release_resource(dev, rtp, rgd, regs);
669 	}
670 	if (pcs) {
671 		if (pcs->pci_isp.isp_param)
672 			free(pcs->pci_isp.isp_param, M_DEVBUF);
673 		free(pcs, M_DEVBUF);
674 	}
675 	/*
676 	 * XXXX: Here is where we might unload the f/w module
677 	 * XXXX: (or decrease the reference count to it).
678 	 */
679 	return (ENXIO);
680 }
681 
682 static u_int16_t
683 isp_pci_rd_reg(isp, regoff)
684 	struct ispsoftc *isp;
685 	int regoff;
686 {
687 	u_int16_t rv;
688 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
689 	int offset, oldconf = 0;
690 
691 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
692 		/*
693 		 * We will assume that someone has paused the RISC processor.
694 		 */
695 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
696 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
697 	}
698 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
699 	offset += (regoff & 0xff);
700 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
701 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
702 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
703 	}
704 	return (rv);
705 }
706 
707 static void
708 isp_pci_wr_reg(isp, regoff, val)
709 	struct ispsoftc *isp;
710 	int regoff;
711 	u_int16_t val;
712 {
713 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
714 	int offset, oldconf = 0;
715 
716 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
717 		/*
718 		 * We will assume that someone has paused the RISC processor.
719 		 */
720 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
721 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
722 	}
723 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
724 	offset += (regoff & 0xff);
725 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
726 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
727 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
728 	}
729 }
730 
731 static u_int16_t
732 isp_pci_rd_reg_1080(isp, regoff)
733 	struct ispsoftc *isp;
734 	int regoff;
735 {
736 	u_int16_t rv, oc = 0;
737 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
738 	int offset;
739 
740 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
741 	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
742 		u_int16_t tc;
743 		/*
744 		 * We will assume that someone has paused the RISC processor.
745 		 */
746 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
747 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
748 		if (regoff & SXP_BANK1_SELECT)
749 			tc |= BIU_PCI1080_CONF1_SXP1;
750 		else
751 			tc |= BIU_PCI1080_CONF1_SXP0;
752 		isp_pci_wr_reg(isp, BIU_CONF1, tc);
753 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
754 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
755 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
756 	}
757 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
758 	offset += (regoff & 0xff);
759 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
760 	if (oc) {
761 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
762 	}
763 	return (rv);
764 }
765 
766 static void
767 isp_pci_wr_reg_1080(isp, regoff, val)
768 	struct ispsoftc *isp;
769 	int regoff;
770 	u_int16_t val;
771 {
772 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
773 	int offset, oc = 0;
774 
775 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
776 	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
777 		u_int16_t tc;
778 		/*
779 		 * We will assume that someone has paused the RISC processor.
780 		 */
781 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
782 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
783 		if (regoff & SXP_BANK1_SELECT)
784 			tc |= BIU_PCI1080_CONF1_SXP1;
785 		else
786 			tc |= BIU_PCI1080_CONF1_SXP0;
787 		isp_pci_wr_reg(isp, BIU_CONF1, tc);
788 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
789 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
790 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
791 	}
792 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
793 	offset += (regoff & 0xff);
794 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
795 	if (oc) {
796 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
797 	}
798 }
799 
800 static void isp_map_rquest __P((void *, bus_dma_segment_t *, int, int));
801 static void isp_map_result __P((void *, bus_dma_segment_t *, int, int));
802 static void isp_map_fcscrt __P((void *, bus_dma_segment_t *, int, int));
803 
804 struct imush {
805 	struct ispsoftc *isp;
806 	int error;
807 };
808 
809 static void
810 isp_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
811 {
812 	struct imush *imushp = (struct imush *) arg;
813 	if (error) {
814 		imushp->error = error;
815 	} else {
816 		imushp->isp->isp_rquest_dma = segs->ds_addr;
817 	}
818 }
819 
820 static void
821 isp_map_result(void *arg, bus_dma_segment_t *segs, int nseg, int error)
822 {
823 	struct imush *imushp = (struct imush *) arg;
824 	if (error) {
825 		imushp->error = error;
826 	} else {
827 		imushp->isp->isp_result_dma = segs->ds_addr;
828 	}
829 }
830 
831 static void
832 isp_map_fcscrt(void *arg, bus_dma_segment_t *segs, int nseg, int error)
833 {
834 	struct imush *imushp = (struct imush *) arg;
835 	if (error) {
836 		imushp->error = error;
837 	} else {
838 		fcparam *fcp = imushp->isp->isp_param;
839 		fcp->isp_scdma = segs->ds_addr;
840 	}
841 }
842 
843 static int
844 isp_pci_mbxdma(struct ispsoftc *isp)
845 {
846 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
847 	caddr_t base;
848 	u_int32_t len;
849 	int i, error;
850 	bus_size_t lim;
851 	struct imush im;
852 
853 
854 	/*
855 	 * Already been here? If so, leave...
856 	 */
857 	if (isp->isp_rquest) {
858 		return (0);
859 	}
860 
861 	len = sizeof (ISP_SCSI_XFER_T **) * isp->isp_maxcmds;
862 	isp->isp_xflist = (ISP_SCSI_XFER_T **) malloc(len, M_DEVBUF, M_WAITOK);
863 	if (isp->isp_xflist == NULL) {
864 		printf("%s: can't alloc xflist array\n", isp->isp_name);
865 		return (1);
866 	}
867 	bzero(isp->isp_xflist, len);
868 	len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
869 	pci->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF,  M_WAITOK);
870 	if (pci->dmaps == NULL) {
871 		printf("%s: can't alloc dma maps\n", isp->isp_name);
872 		free(isp->isp_xflist, M_DEVBUF);
873 		return (1);
874 	}
875 
876 	if (IS_FC(isp) || IS_ULTRA2(isp))
877 		lim = BUS_SPACE_MAXADDR + 1;
878 	else
879 		lim = BUS_SPACE_MAXADDR_24BIT + 1;
880 
881 	/*
882 	 * Allocate and map the request, result queues, plus FC scratch area.
883 	 */
884 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
885 	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
886 	if (IS_FC(isp)) {
887 		len += ISP2100_SCRLEN;
888 	}
889 	if (bus_dma_tag_create(pci->parent_dmat, PAGE_SIZE, lim,
890 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
891 	    BUS_SPACE_MAXSIZE_32BIT, 0, &pci->cntrol_dmat) != 0) {
892 		printf("%s: cannot create a dma tag for control spaces\n",
893 		    isp->isp_name);
894 		free(isp->isp_xflist, M_DEVBUF);
895 		free(pci->dmaps, M_DEVBUF);
896 		return (1);
897 	}
898 	if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base,
899 	    BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) {
900 		printf("%s: cannot allocate %d bytes of CCB memory\n",
901 		    isp->isp_name, len);
902 		free(isp->isp_xflist, M_DEVBUF);
903 		free(pci->dmaps, M_DEVBUF);
904 		return (1);
905 	}
906 
907 	isp->isp_rquest = base;
908 	im.isp = isp;
909 	im.error = 0;
910 	bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest,
911 	    ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN), isp_map_rquest, &im, 0);
912 	if (im.error) {
913 		printf("%s: error %d loading dma map for DMA request queue\n",
914 		    isp->isp_name, im.error);
915 		free(isp->isp_xflist, M_DEVBUF);
916 		free(pci->dmaps, M_DEVBUF);
917 		isp->isp_rquest = NULL;
918 		return (1);
919 	}
920 	isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
921 	im.error = 0;
922 	bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result,
923 	    ISP_QUEUE_SIZE(RESULT_QUEUE_LEN), isp_map_result, &im, 0);
924 	if (im.error) {
925 		printf("%s: error %d loading dma map for DMA result queue\n",
926 		    isp->isp_name, im.error);
927 		free(isp->isp_xflist, M_DEVBUF);
928 		free(pci->dmaps, M_DEVBUF);
929 		isp->isp_rquest = NULL;
930 		return (1);
931 	}
932 
933 	for (i = 0; i < isp->isp_maxcmds; i++) {
934 		error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]);
935 		if (error) {
936 			printf("%s: error %d creating per-cmd DMA maps\n",
937 			    isp->isp_name, error);
938 			free(isp->isp_xflist, M_DEVBUF);
939 			free(pci->dmaps, M_DEVBUF);
940 			isp->isp_rquest = NULL;
941 			return (1);
942 		}
943 	}
944 
945 	if (IS_FC(isp)) {
946 		fcparam *fcp = (fcparam *) isp->isp_param;
947 		fcp->isp_scratch = base +
948 			ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN) +
949 			ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
950 		im.error = 0;
951 		bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap,
952 		    fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, &im, 0);
953 		if (im.error) {
954 			printf("%s: error %d loading FC scratch area\n",
955 			    isp->isp_name, im.error);
956 			free(isp->isp_xflist, M_DEVBUF);
957 			free(pci->dmaps, M_DEVBUF);
958 			isp->isp_rquest = NULL;
959 			return (1);
960 		}
961 	}
962 	return (0);
963 }
964 
965 typedef struct {
966 	struct ispsoftc *isp;
967 	void *cmd_token;
968 	void *rq;
969 	u_int16_t *iptrp;
970 	u_int16_t optr;
971 	u_int error;
972 } mush_t;
973 
974 #define	MUSHERR_NOQENTRIES	-2
975 
976 #ifdef	ISP_TARGET_MODE
977 /*
978  * We need to handle DMA for target mode differently from initiator mode.
979  *
980  * DMA mapping and construction and submission of CTIO Request Entries
981  * and rendevous for completion are very tightly coupled because we start
982  * out by knowing (per platform) how much data we have to move, but we
983  * don't know, up front, how many DMA mapping segments will have to be used
984  * cover that data, so we don't know how many CTIO Request Entries we
985  * will end up using. Further, for performance reasons we may want to
986  * (on the last CTIO for Fibre Channel), send status too (if all went well).
987  *
988  * The standard vector still goes through isp_pci_dmasetup, but the callback
989  * for the DMA mapping routines comes here instead with the whole transfer
990  * mapped and a pointer to a partially filled in already allocated request
991  * queue entry. We finish the job.
992  */
993 static void tdma_mk __P((void *, bus_dma_segment_t *, int, int));
994 static void tdma_mkfc __P((void *, bus_dma_segment_t *, int, int));
995 
996 static void
997 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
998 {
999 	mush_t *mp;
1000 	struct ccb_scsiio *csio;
1001 	struct isp_pcisoftc *pci;
1002 	bus_dmamap_t *dp;
1003 	u_int8_t scsi_status;
1004 	ct_entry_t *cto;
1005 	u_int32_t handle, totxfr, sflags;
1006 	int nctios, send_status;
1007 	int32_t resid;
1008 
1009 	mp = (mush_t *) arg;
1010 	if (error) {
1011 		mp->error = error;
1012 		return;
1013 	}
1014 	csio = mp->cmd_token;
1015 	cto = mp->rq;
1016 
1017 	cto->ct_xfrlen = 0;
1018 	cto->ct_seg_count = 0;
1019 	cto->ct_header.rqs_entry_count = 1;
1020 	MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1021 
1022 	if (nseg == 0) {
1023 		cto->ct_header.rqs_seqno = 1;
1024 		ISP_TDQE(mp->isp, "tdma_mk[no data]", *mp->iptrp, cto);
1025 		if (isp_tdebug) {
1026 			printf("%s:CTIO lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1027 			    "0x%x res %d\n", mp->isp->isp_name,
1028 			    csio->ccb_h.target_lun, cto->ct_iid, cto->ct_flags,
1029 			    cto->ct_status, cto->ct_scsi_status, cto->ct_resid);
1030 		}
1031 		ISP_SWIZ_CTIO(mp->isp, cto, cto);
1032 		return;
1033 	}
1034 
1035 	nctios = nseg / ISP_RQDSEG;
1036 	if (nseg % ISP_RQDSEG) {
1037 		nctios++;
1038 	}
1039 
1040 	/*
1041 	 * Save handle, and potentially any SCSI status, which we'll reinsert
1042 	 * on the last CTIO we're going to send.
1043 	 */
1044 	handle = cto->ct_reserved;
1045 	cto->ct_reserved = 0;
1046 	cto->ct_header.rqs_seqno = 0;
1047 	send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1048 
1049 	if (send_status) {
1050 		sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1051 		cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1052 		/*
1053 		 * Preserve residual.
1054 		 */
1055 		resid = cto->ct_resid;
1056 
1057 		/*
1058 		 * Save actual SCSI status.
1059 		 */
1060 		scsi_status = cto->ct_scsi_status;
1061 
1062 		/*
1063 		 * We can't do a status at the same time as a data CTIO, so
1064 		 * we need to synthesize an extra CTIO at this level.
1065 		 */
1066 		nctios++;
1067 	} else {
1068 		sflags = scsi_status = resid = 0;
1069 	}
1070 
1071 	totxfr = cto->ct_resid = 0;
1072 	cto->ct_scsi_status = 0;
1073 
1074 	pci = (struct isp_pcisoftc *)mp->isp;
1075 	dp = &pci->dmaps[isp_handle_index(handle)];
1076 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1077 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
1078 	} else {
1079 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
1080 	}
1081 
1082 
1083 	while (nctios--) {
1084 		int seglim;
1085 
1086 		seglim = nseg;
1087 		if (seglim) {
1088 			int seg;
1089 
1090 			if (seglim > ISP_RQDSEG)
1091 				seglim = ISP_RQDSEG;
1092 
1093 			for (seg = 0; seg < seglim; seg++, nseg--) {
1094 				/*
1095 				 * Unlike normal initiator commands, we don't
1096 				 * do any swizzling here.
1097 				 */
1098 				cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
1099 				cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
1100 				cto->ct_xfrlen += dm_segs->ds_len;
1101 				totxfr += dm_segs->ds_len;
1102 				dm_segs++;
1103 			}
1104 			cto->ct_seg_count = seg;
1105 		} else {
1106 			/*
1107 			 * This case should only happen when we're sending an
1108 			 * extra CTIO with final status.
1109 			 */
1110 			if (send_status == 0) {
1111 				printf("%s: tdma_mk ran out of segments\n",
1112 				       mp->isp->isp_name);
1113 				mp->error = EINVAL;
1114 				return;
1115 			}
1116 		}
1117 
1118 		/*
1119 		 * At this point, the fields ct_lun, ct_iid, ct_tagval,
1120 		 * ct_tagtype, and ct_timeout have been carried over
1121 		 * unchanged from what our caller had set.
1122 		 *
1123 		 * The dataseg fields and the seg_count fields we just got
1124 		 * through setting. The data direction we've preserved all
1125 		 * along and only clear it if we're now sending status.
1126 		 */
1127 
1128 		if (nctios == 0) {
1129 			/*
1130 			 * We're the last in a sequence of CTIOs, so mark
1131 			 * this CTIO and save the handle to the CCB such that
1132 			 * when this CTIO completes we can free dma resources
1133 			 * and do whatever else we need to do to finish the
1134 			 * rest of the command.
1135 			 */
1136 			cto->ct_reserved = handle;
1137 			cto->ct_header.rqs_seqno = 1;
1138 
1139 			if (send_status) {
1140 				cto->ct_scsi_status = scsi_status;
1141 				cto->ct_flags |= sflags | CT_NO_DATA;;
1142 				cto->ct_resid = resid;
1143 			}
1144 			if (isp_tdebug && send_status) {
1145 				printf("%s:CTIO lun%d for ID%d ct_flags 0x%x "
1146 				    "scsi_status 0x%x res %d\n",
1147 				    mp->isp->isp_name, csio->ccb_h.target_lun,
1148 				    cto->ct_iid, cto->ct_flags,
1149 				    cto->ct_scsi_status, cto->ct_resid);
1150 			} else if (isp_tdebug) {
1151 				printf("%s:CTIO lun%d for ID%d ct_flags 0x%x\n",
1152 				    mp->isp->isp_name, csio->ccb_h.target_lun,
1153 				    cto->ct_iid, cto->ct_flags);
1154 			}
1155 			ISP_TDQE(mp->isp, "last tdma_mk", *mp->iptrp, cto);
1156 			ISP_SWIZ_CTIO(mp->isp, cto, cto);
1157 		} else {
1158 			ct_entry_t     *octo = cto;
1159 
1160 			/*
1161 			 * Make sure handle fields are clean
1162 			 */
1163 			cto->ct_reserved = 0;
1164 			cto->ct_header.rqs_seqno = 0;
1165 
1166 			if (isp_tdebug) {
1167 				printf("%s:CTIO lun%d for ID%d ct_flags 0x%x\n",
1168 				    mp->isp->isp_name, csio->ccb_h.target_lun,
1169 				    cto->ct_iid, cto->ct_flags);
1170 			}
1171 			ISP_TDQE(mp->isp, "tdma_mk", *mp->iptrp, cto);
1172 
1173 			/*
1174 			 * Get a new CTIO
1175 			 */
1176 			cto = (ct_entry_t *)
1177 			    ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp);
1178 			*mp->iptrp =
1179 			    ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN);
1180 			if (*mp->iptrp == mp->optr) {
1181 				printf("%s: Queue Overflow in tdma_mk\n",
1182 				    mp->isp->isp_name);
1183 				mp->error = MUSHERR_NOQENTRIES;
1184 				return;
1185 			}
1186 			/*
1187 			 * Fill in the new CTIO with info from the old one.
1188 			 */
1189 			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1190 			cto->ct_header.rqs_entry_count = 1;
1191 			cto->ct_header.rqs_flags = 0;
1192 			cto->ct_lun = octo->ct_lun;
1193 			cto->ct_iid = octo->ct_iid;
1194 			cto->ct_reserved2 = octo->ct_reserved2;
1195 			cto->ct_tgt = octo->ct_tgt;
1196 			cto->ct_flags = octo->ct_flags;
1197 			cto->ct_status = 0;
1198 			cto->ct_scsi_status = 0;
1199 			cto->ct_tag_val = octo->ct_tag_val;
1200 			cto->ct_tag_type = octo->ct_tag_type;
1201 			cto->ct_xfrlen = 0;
1202 			cto->ct_resid = 0;
1203 			cto->ct_timeout = octo->ct_timeout;
1204 			cto->ct_seg_count = 0;
1205 			MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1206 			/*
1207 			 * Now swizzle the old one for the consumption of the
1208 			 * chip.
1209 			 */
1210 			ISP_SWIZ_CTIO(mp->isp, octo, octo);
1211 		}
1212 	}
1213 }
1214 
1215 static void
1216 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1217 {
1218 	mush_t *mp;
1219 	struct ccb_scsiio *csio;
1220 	struct isp_pcisoftc *pci;
1221 	bus_dmamap_t *dp;
1222 	ct2_entry_t *cto;
1223 	u_int16_t scsi_status, send_status, send_sense;
1224 	u_int32_t handle, totxfr, datalen;
1225 	u_int8_t sense[QLTM_SENSELEN];
1226 	int nctios;
1227 
1228 	mp = (mush_t *) arg;
1229 	if (error) {
1230 		mp->error = error;
1231 		return;
1232 	}
1233 
1234 	csio = mp->cmd_token;
1235 	cto = mp->rq;
1236 
1237 	if (nseg == 0) {
1238 		if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
1239 			printf("%s: dma2_tgt_fc, a status CTIO2 without MODE1 "
1240 			    "set (0x%x)\n", mp->isp->isp_name, cto->ct_flags);
1241 			mp->error = EINVAL;
1242 			return;
1243 		}
1244 	 	cto->ct_header.rqs_entry_count = 1;
1245 		cto->ct_header.rqs_seqno = 1;
1246 		/* ct_reserved contains the handle set by caller */
1247 		/*
1248 		 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1249 		 * flags to NO DATA and clear relative offset flags.
1250 		 * We preserve the ct_resid and the response area.
1251 		 */
1252 		cto->ct_flags |= CT2_NO_DATA;
1253 		if (cto->ct_resid > 0)
1254 			cto->ct_flags |= CT2_DATA_UNDER;
1255 		else if (cto->ct_resid < 0)
1256 			cto->ct_flags |= CT2_DATA_OVER;
1257 		cto->ct_seg_count = 0;
1258 		cto->ct_reloff = 0;
1259 		ISP_TDQE(mp->isp, "dma2_tgt_fc[no data]", *mp->iptrp, cto);
1260 		if (isp_tdebug) {
1261 			scsi_status = cto->rsp.m1.ct_scsi_status;
1262 			printf("%s:CTIO2 RX_ID 0x%x lun %d->iid%d flgs 0x%x "
1263 			    "sts 0x%x ssts 0x%x res %d\n", mp->isp->isp_name,
1264 			    cto->ct_rxid, csio->ccb_h.target_lun, cto->ct_iid,
1265 			    cto->ct_flags, cto->ct_status,
1266 			    cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1267 		}
1268 		ISP_SWIZ_CTIO2(isp, cto, cto);
1269 		return;
1270 	}
1271 
1272 	if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
1273 		printf("%s: dma2_tgt_fc, a data CTIO2 without MODE0 set "
1274 		    "(0x%x)\n\n", mp->isp->isp_name, cto->ct_flags);
1275 		mp->error = EINVAL;
1276 		return;
1277 	}
1278 
1279 
1280 	nctios = nseg / ISP_RQDSEG_T2;
1281 	if (nseg % ISP_RQDSEG_T2) {
1282 		nctios++;
1283 	}
1284 
1285 	/*
1286 	 * Save the handle, status, reloff, and residual. We'll reinsert the
1287 	 * handle into the last CTIO2 we're going to send, and reinsert status
1288 	 * and residual (and possibly sense data) if that's to be sent as well.
1289 	 *
1290 	 * We preserve ct_reloff and adjust it for each data CTIO2 we send past
1291 	 * the first one. This is needed so that the FCP DATA IUs being sent
1292 	 * out have the correct offset (they can arrive at the other end out
1293 	 * of order).
1294 	 */
1295 
1296 	handle = cto->ct_reserved;
1297 	cto->ct_reserved = 0;
1298 
1299 	if ((send_status = (cto->ct_flags & CT2_SENDSTATUS)) != 0) {
1300 		cto->ct_flags &= ~CT2_SENDSTATUS;
1301 
1302 		/*
1303 		 * Preserve residual, which is actually the total count.
1304 		 */
1305 		datalen = cto->ct_resid;
1306 
1307 		/*
1308 		 * Save actual SCSI status. We'll reinsert the
1309 		 * CT2_SNSLEN_VALID later if appropriate.
1310 		 */
1311 		scsi_status = cto->rsp.m0.ct_scsi_status & 0xff;
1312 		send_sense = cto->rsp.m0.ct_scsi_status & CT2_SNSLEN_VALID;
1313 
1314 		/*
1315 		 * If we're sending status and have a CHECK CONDTION and
1316 		 * have sense data,  we send one more CTIO2 with just the
1317 		 * status and sense data. The upper layers have stashed
1318 		 * the sense data in the dataseg structure for us.
1319 		 */
1320 
1321 		if ((scsi_status & 0xf) == SCSI_STATUS_CHECK_COND &&
1322 		    send_sense) {
1323 			bcopy(cto->rsp.m0.ct_dataseg, sense, QLTM_SENSELEN);
1324 			nctios++;
1325 		}
1326 	} else {
1327 		scsi_status = send_sense = datalen = 0;
1328 	}
1329 
1330 	totxfr = cto->ct_resid = 0;
1331 	cto->rsp.m0.ct_scsi_status = 0;
1332 	bzero(&cto->rsp, sizeof (cto->rsp));
1333 
1334 	pci = (struct isp_pcisoftc *)mp->isp;
1335 	dp = &pci->dmaps[isp_handle_index(handle)];
1336 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1337 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
1338 	} else {
1339 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
1340 	}
1341 
1342 	while (nctios--) {
1343 		int seg, seglim;
1344 
1345 		seglim = nseg;
1346 		if (seglim) {
1347 			if (seglim > ISP_RQDSEG_T2)
1348 				seglim = ISP_RQDSEG_T2;
1349 
1350 			for (seg = 0; seg < seglim; seg++) {
1351 				cto->rsp.m0.ct_dataseg[seg].ds_base =
1352 				    dm_segs->ds_addr;
1353 				cto->rsp.m0.ct_dataseg[seg].ds_count =
1354 				    dm_segs->ds_len;
1355 				cto->rsp.m0.ct_xfrlen += dm_segs->ds_len;
1356 				totxfr += dm_segs->ds_len;
1357 				dm_segs++;
1358 			}
1359 			cto->ct_seg_count = seg;
1360 		} else {
1361 			/*
1362 			 * This case should only happen when we're sending a
1363 			 * synthesized MODE1 final status with sense data.
1364 			 */
1365 			if (send_sense == 0) {
1366 				printf("%s: dma2_tgt_fc ran out of segments, "
1367 				    "no SENSE DATA\n", mp->isp->isp_name);
1368 				mp->error = EINVAL;
1369 				return;
1370 			}
1371 		}
1372 
1373 		/*
1374 		 * At this point, the fields ct_lun, ct_iid, ct_rxid,
1375 		 * ct_timeout have been carried over unchanged from what
1376 		 * our caller had set.
1377 		 *
1378 		 * The field ct_reloff is either what the caller set, or
1379 		 * what we've added to below.
1380 		 *
1381 		 * The dataseg fields and the seg_count fields we just got
1382 		 * through setting. The data direction we've preserved all
1383 		 * along and only clear it if we're sending a MODE1 status
1384 		 * as the last CTIO.
1385 		 *
1386 		 */
1387 
1388 		if (nctios == 0) {
1389 
1390 			/*
1391 			 * We're the last in a sequence of CTIO2s, so mark this
1392 			 * CTIO2 and save the handle to the CCB such that when
1393 			 * this CTIO2 completes we can free dma resources and
1394 			 * do whatever else we need to do to finish the rest
1395 			 * of the command.
1396 			 */
1397 
1398 			cto->ct_reserved = handle;
1399 			cto->ct_header.rqs_seqno = 1;
1400 
1401 			if (send_status) {
1402 				if (send_sense) {
1403 					bcopy(sense, cto->rsp.m1.ct_resp,
1404 					    QLTM_SENSELEN);
1405 					cto->rsp.m1.ct_senselen =
1406 					    QLTM_SENSELEN;
1407 					scsi_status |= CT2_SNSLEN_VALID;
1408 					cto->rsp.m1.ct_scsi_status =
1409 					    scsi_status;
1410 					cto->ct_flags &= CT2_FLAG_MMASK;
1411 					cto->ct_flags |= CT2_FLAG_MODE1 |
1412 					    CT2_NO_DATA| CT2_SENDSTATUS;
1413 				} else {
1414 					cto->rsp.m0.ct_scsi_status =
1415 					    scsi_status;
1416 					cto->ct_flags |= CT2_SENDSTATUS;
1417 				}
1418 				/*
1419 				 * Get 'real' residual and set flags based
1420 				 * on it.
1421 				 */
1422 				cto->ct_resid = datalen - totxfr;
1423 				if (cto->ct_resid > 0)
1424 					cto->ct_flags |= CT2_DATA_UNDER;
1425 				else if (cto->ct_resid < 0)
1426 					cto->ct_flags |= CT2_DATA_OVER;
1427 			}
1428 			ISP_TDQE(mp->isp, "last dma2_tgt_fc", *mp->iptrp, cto);
1429 			if (isp_tdebug) {
1430 				printf("%s:CTIO2 RX_ID 0x%x lun %d->iid%d flgs"
1431 				    "0x%x sts 0x%x ssts 0x%x res %d\n",
1432 				    mp->isp->isp_name, cto->ct_rxid,
1433 				    csio->ccb_h.target_lun, (int) cto->ct_iid,
1434 				    cto->ct_flags, cto->ct_status,
1435 				    cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1436 			}
1437 			ISP_SWIZ_CTIO2(isp, cto, cto);
1438 		} else {
1439 			ct2_entry_t *octo = cto;
1440 
1441 			/*
1442 			 * Make sure handle fields are clean
1443 			 */
1444 			cto->ct_reserved = 0;
1445 			cto->ct_header.rqs_seqno = 0;
1446 
1447 			ISP_TDQE(mp->isp, "dma2_tgt_fc", *mp->iptrp, cto);
1448 			if (isp_tdebug) {
1449 				printf("%s:CTIO2 RX_ID 0x%x lun %d->iid%d flgs"
1450 				    "0x%x\n", mp->isp->isp_name, cto->ct_rxid,
1451 				    csio->ccb_h.target_lun, (int) cto->ct_iid,
1452 				    cto->ct_flags);
1453 			}
1454 			/*
1455 			 * Get a new CTIO2
1456 			 */
1457 			cto = (ct2_entry_t *)
1458 			    ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp);
1459 			*mp->iptrp =
1460 			    ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN);
1461 			if (*mp->iptrp == mp->optr) {
1462 				printf("%s: Queue Overflow in dma2_tgt_fc\n",
1463 				    mp->isp->isp_name);
1464 				mp->error = MUSHERR_NOQENTRIES;
1465 				return;
1466 			}
1467 
1468 			/*
1469 			 * Fill in the new CTIO2 with info from the old one.
1470 			 */
1471 			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1472 			cto->ct_header.rqs_entry_count = 1;
1473 			cto->ct_header.rqs_flags = 0;
1474 			/* ct_header.rqs_seqno && ct_reserved done later */
1475 			cto->ct_lun = octo->ct_lun;
1476 			cto->ct_iid = octo->ct_iid;
1477 			cto->ct_rxid = octo->ct_rxid;
1478 			cto->ct_flags = octo->ct_flags;
1479 			cto->ct_status = 0;
1480 			cto->ct_resid = 0;
1481 			cto->ct_timeout = octo->ct_timeout;
1482 			cto->ct_seg_count = 0;
1483 			/*
1484 			 * Adjust the new relative offset by the amount which
1485 			 * is recorded in the data segment of the old CTIO2 we
1486 			 * just finished filling out.
1487 			 */
1488 			cto->ct_reloff += octo->rsp.m0.ct_xfrlen;
1489 			bzero(&cto->rsp, sizeof (cto->rsp));
1490 			ISP_SWIZ_CTIO2(isp, cto, cto);
1491 		}
1492 	}
1493 }
1494 #endif
1495 
1496 static void dma2 __P((void *, bus_dma_segment_t *, int, int));
1497 
1498 static void
1499 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1500 {
1501 	mush_t *mp;
1502 	struct ccb_scsiio *csio;
1503 	struct isp_pcisoftc *pci;
1504 	bus_dmamap_t *dp;
1505 	bus_dma_segment_t *eseg;
1506 	ispreq_t *rq;
1507 	ispcontreq_t *crq;
1508 	int seglim, datalen;
1509 
1510 	mp = (mush_t *) arg;
1511 	if (error) {
1512 		mp->error = error;
1513 		return;
1514 	}
1515 
1516 	if (nseg < 1) {
1517 		printf("%s: bad segment count (%d)\n", mp->isp->isp_name, nseg);
1518 		mp->error = EFAULT;
1519 		return;
1520 	}
1521 	csio = mp->cmd_token;
1522 	rq = mp->rq;
1523 	pci = (struct isp_pcisoftc *)mp->isp;
1524 	dp = &pci->dmaps[isp_handle_index(rq->req_handle)];
1525 
1526 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1527 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
1528 	} else {
1529 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
1530 	}
1531 
1532 	datalen = XS_XFRLEN(csio);
1533 
1534 	/*
1535 	 * We're passed an initial partially filled in entry that
1536 	 * has most fields filled in except for data transfer
1537 	 * related values.
1538 	 *
1539 	 * Our job is to fill in the initial request queue entry and
1540 	 * then to start allocating and filling in continuation entries
1541 	 * until we've covered the entire transfer.
1542 	 */
1543 
1544 	if (IS_FC(mp->isp)) {
1545 		seglim = ISP_RQDSEG_T2;
1546 		((ispreqt2_t *)rq)->req_totalcnt = datalen;
1547 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1548 			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
1549 		} else {
1550 			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
1551 		}
1552 	} else {
1553 		if (csio->cdb_len > 12) {
1554 			seglim = 0;
1555 		} else {
1556 			seglim = ISP_RQDSEG;
1557 		}
1558 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1559 			rq->req_flags |= REQFLAG_DATA_IN;
1560 		} else {
1561 			rq->req_flags |= REQFLAG_DATA_OUT;
1562 		}
1563 	}
1564 
1565 	eseg = dm_segs + nseg;
1566 
1567 	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
1568 		if (IS_FC(mp->isp)) {
1569 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1570 			rq2->req_dataseg[rq2->req_seg_count].ds_base =
1571 			    dm_segs->ds_addr;
1572 			rq2->req_dataseg[rq2->req_seg_count].ds_count =
1573 			    dm_segs->ds_len;
1574 		} else {
1575 			rq->req_dataseg[rq->req_seg_count].ds_base =
1576 				dm_segs->ds_addr;
1577 			rq->req_dataseg[rq->req_seg_count].ds_count =
1578 				dm_segs->ds_len;
1579 		}
1580 		datalen -= dm_segs->ds_len;
1581 #if	0
1582 		if (IS_FC(mp->isp)) {
1583 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1584 			printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
1585 			    mp->isp->isp_name, rq->req_seg_count,
1586 			    rq2->req_dataseg[rq2->req_seg_count].ds_count,
1587 			    rq2->req_dataseg[rq2->req_seg_count].ds_base);
1588 		} else {
1589 			printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
1590 			    mp->isp->isp_name, rq->req_seg_count,
1591 			    rq->req_dataseg[rq->req_seg_count].ds_count,
1592 			    rq->req_dataseg[rq->req_seg_count].ds_base);
1593 		}
1594 #endif
1595 		rq->req_seg_count++;
1596 		dm_segs++;
1597 	}
1598 
1599 	while (datalen > 0 && dm_segs != eseg) {
1600 		crq = (ispcontreq_t *)
1601 		    ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp);
1602 		*mp->iptrp = ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN);
1603 		if (*mp->iptrp == mp->optr) {
1604 #if	0
1605 			printf("%s: Request Queue Overflow++\n",
1606 			    mp->isp->isp_name);
1607 #endif
1608 			mp->error = MUSHERR_NOQENTRIES;
1609 			return;
1610 		}
1611 		rq->req_header.rqs_entry_count++;
1612 		bzero((void *)crq, sizeof (*crq));
1613 		crq->req_header.rqs_entry_count = 1;
1614 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1615 
1616 		seglim = 0;
1617 		while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
1618 			crq->req_dataseg[seglim].ds_base =
1619 			    dm_segs->ds_addr;
1620 			crq->req_dataseg[seglim].ds_count =
1621 			    dm_segs->ds_len;
1622 #if	0
1623 			printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n",
1624 			    mp->isp->isp_name, rq->req_header.rqs_entry_count-1,
1625 			    seglim, crq->req_dataseg[seglim].ds_count,
1626 			    crq->req_dataseg[seglim].ds_base);
1627 #endif
1628 			rq->req_seg_count++;
1629 			dm_segs++;
1630 			seglim++;
1631 			datalen -= dm_segs->ds_len;
1632 		}
1633 	}
1634 }
1635 
1636 static int
1637 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq,
1638 	u_int16_t *iptrp, u_int16_t optr)
1639 {
1640 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1641 	bus_dmamap_t *dp = NULL;
1642 	mush_t mush, *mp;
1643 	void (*eptr) __P((void *, bus_dma_segment_t *, int, int));
1644 
1645 #ifdef	ISP_TARGET_MODE
1646 	if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1647 		if (IS_FC(isp)) {
1648 			eptr = tdma_mkfc;
1649 		} else {
1650 			eptr = tdma_mk;
1651 		}
1652 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1653 		    (csio->dxfer_len == 0)) {
1654 			rq->req_seg_count = 1;
1655 			mp = &mush;
1656 			mp->isp = isp;
1657 			mp->cmd_token = csio;
1658 			mp->rq = rq;
1659 			mp->iptrp = iptrp;
1660 			mp->optr = optr;
1661 			mp->error = 0;
1662 			(*eptr)(mp, NULL, 0, 0);
1663 			goto exit;
1664 		}
1665 	} else
1666 #endif
1667 	eptr = dma2;
1668 
1669 	/*
1670 	 * NB: if we need to do request queue entry swizzling,
1671 	 * NB: this is where it would need to be done for cmds
1672 	 * NB: that move no data. For commands that move data,
1673 	 * NB: swizzling would take place in those functions.
1674 	 */
1675 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1676 	    (csio->dxfer_len == 0)) {
1677 		rq->req_seg_count = 1;
1678 		return (CMD_QUEUED);
1679 	}
1680 
1681 	/*
1682 	 * Do a virtual grapevine step to collect info for
1683 	 * the callback dma allocation that we have to use...
1684 	 */
1685 	mp = &mush;
1686 	mp->isp = isp;
1687 	mp->cmd_token = csio;
1688 	mp->rq = rq;
1689 	mp->iptrp = iptrp;
1690 	mp->optr = optr;
1691 	mp->error = 0;
1692 
1693 	if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1694 		if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1695 			int error, s;
1696 			dp = &pci->dmaps[isp_handle_index(rq->req_handle)];
1697 			s = splsoftvm();
1698 			error = bus_dmamap_load(pci->parent_dmat, *dp,
1699 			    csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
1700 			if (error == EINPROGRESS) {
1701 				bus_dmamap_unload(pci->parent_dmat, *dp);
1702 				mp->error = EINVAL;
1703 				printf("%s: deferred dma allocation not "
1704 				    "supported\n", isp->isp_name);
1705 			} else if (error && mp->error == 0) {
1706 #ifdef	DIAGNOSTIC
1707 				printf("%s: error %d in dma mapping code\n",
1708 				    isp->isp_name, error);
1709 #endif
1710 				mp->error = error;
1711 			}
1712 			splx(s);
1713 		} else {
1714 			/* Pointer to physical buffer */
1715 			struct bus_dma_segment seg;
1716 			seg.ds_addr = (bus_addr_t)csio->data_ptr;
1717 			seg.ds_len = csio->dxfer_len;
1718 			(*eptr)(mp, &seg, 1, 0);
1719 		}
1720 	} else {
1721 		struct bus_dma_segment *segs;
1722 
1723 		if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1724 			printf("%s: Physical segment pointers unsupported",
1725 				isp->isp_name);
1726 			mp->error = EINVAL;
1727 		} else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1728 			printf("%s: Virtual segment addresses unsupported",
1729 				isp->isp_name);
1730 			mp->error = EINVAL;
1731 		} else {
1732 			/* Just use the segments provided */
1733 			segs = (struct bus_dma_segment *) csio->data_ptr;
1734 			(*eptr)(mp, segs, csio->sglist_cnt, 0);
1735 		}
1736 	}
1737 #ifdef	ISP_TARGET_MODE
1738 exit:
1739 #endif
1740 	if (mp->error) {
1741 		int retval = CMD_COMPLETE;
1742 		if (mp->error == MUSHERR_NOQENTRIES) {
1743 			retval = CMD_EAGAIN;
1744 		} else if (mp->error == EFBIG) {
1745 			XS_SETERR(csio, CAM_REQ_TOO_BIG);
1746 		} else if (mp->error == EINVAL) {
1747 			XS_SETERR(csio, CAM_REQ_INVALID);
1748 		} else {
1749 			XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
1750 		}
1751 		return (retval);
1752 	} else {
1753 		/*
1754 		 * Check to see if we weren't cancelled while sleeping on
1755 		 * getting DMA resources...
1756 		 */
1757 		if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1758 			if (dp) {
1759 				bus_dmamap_unload(pci->parent_dmat, *dp);
1760 			}
1761 			return (CMD_COMPLETE);
1762 		}
1763 		return (CMD_QUEUED);
1764 	}
1765 }
1766 
1767 static void
1768 isp_pci_dmateardown(struct ispsoftc *isp, ISP_SCSI_XFER_T *xs, u_int32_t handle)
1769 {
1770 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1771 	bus_dmamap_t *dp = &pci->dmaps[isp_handle_index(handle)];
1772 	if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1773 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD);
1774 	} else {
1775 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE);
1776 	}
1777 	bus_dmamap_unload(pci->parent_dmat, *dp);
1778 }
1779 
1780 
1781 static void
1782 isp_pci_reset1(struct ispsoftc *isp)
1783 {
1784 	/* Make sure the BIOS is disabled */
1785 	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1786 	/* and enable interrupts */
1787 	ENABLE_INTS(isp);
1788 }
1789 
1790 static void
1791 isp_pci_dumpregs(struct ispsoftc *isp)
1792 {
1793 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1794 	printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name,
1795 	    pci_read_config(pci->pci_dev, PCIR_COMMAND, 1));
1796 }
1797