xref: /freebsd/sys/dev/isp/isp_pci.c (revision a79b71281cd63ad7a6cc43a6d5673a2510b51630)
1 /* $FreeBSD$ */
2 /*
3  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
4  * FreeBSD Version.
5  *
6  *---------------------------------------
7  * Copyright (c) 1997, 1998, 1999 by Matthew Jacob
8  * NASA/Ames Research Center
9  * All rights reserved.
10  *---------------------------------------
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice immediately at the beginning of the file, without modification,
17  *    this list of conditions, and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. The name of the author may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
28  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/module.h>
41 #include <sys/bus.h>
42 
43 #include <pci/pcireg.h>
44 #include <pci/pcivar.h>
45 
46 #include <machine/bus_memio.h>
47 #include <machine/bus_pio.h>
48 #include <machine/bus.h>
49 #include <machine/resource.h>
50 #include <machine/clock.h>
51 #include <sys/rman.h>
52 #include <sys/malloc.h>
53 
54 #include <dev/isp/isp_freebsd.h>
55 
56 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
57 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
58 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
59 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
60 static int isp_pci_mbxdma __P((struct ispsoftc *));
61 static int isp_pci_dmasetup __P((struct ispsoftc *, ISP_SCSI_XFER_T *,
62 	ispreq_t *, u_int16_t *, u_int16_t));
63 static void
64 isp_pci_dmateardown __P((struct ispsoftc *, ISP_SCSI_XFER_T *, u_int32_t));
65 
66 static void isp_pci_reset1 __P((struct ispsoftc *));
67 static void isp_pci_dumpregs __P((struct ispsoftc *));
68 
69 #ifndef	ISP_CODE_ORG
70 #define	ISP_CODE_ORG		0x1000
71 #endif
72 
73 static struct ispmdvec mdvec = {
74 	isp_pci_rd_reg,
75 	isp_pci_wr_reg,
76 	isp_pci_mbxdma,
77 	isp_pci_dmasetup,
78 	isp_pci_dmateardown,
79 	NULL,
80 	isp_pci_reset1,
81 	isp_pci_dumpregs,
82 	NULL,
83 	0,
84 	ISP_CODE_ORG,
85 	0,
86 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
87 	0
88 };
89 
90 static struct ispmdvec mdvec_1080 = {
91 	isp_pci_rd_reg_1080,
92 	isp_pci_wr_reg_1080,
93 	isp_pci_mbxdma,
94 	isp_pci_dmasetup,
95 	isp_pci_dmateardown,
96 	NULL,
97 	isp_pci_reset1,
98 	isp_pci_dumpregs,
99 	NULL,
100 	0,
101 	ISP_CODE_ORG,
102 	0,
103 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
104 	0
105 };
106 
107 static struct ispmdvec mdvec_12160 = {
108 	isp_pci_rd_reg_1080,
109 	isp_pci_wr_reg_1080,
110 	isp_pci_mbxdma,
111 	isp_pci_dmasetup,
112 	isp_pci_dmateardown,
113 	NULL,
114 	isp_pci_reset1,
115 	isp_pci_dumpregs,
116 	NULL,
117 	0,
118 	NULL,
119 	0,
120 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
121 	0
122 };
123 
124 static struct ispmdvec mdvec_2100 = {
125 	isp_pci_rd_reg,
126 	isp_pci_wr_reg,
127 	isp_pci_mbxdma,
128 	isp_pci_dmasetup,
129 	isp_pci_dmateardown,
130 	NULL,
131 	isp_pci_reset1,
132 	isp_pci_dumpregs,
133 	NULL,
134 	0,
135 	ISP_CODE_ORG,
136 	0,
137 	0,
138 	0
139 };
140 
141 static struct ispmdvec mdvec_2200 = {
142 	isp_pci_rd_reg,
143 	isp_pci_wr_reg,
144 	isp_pci_mbxdma,
145 	isp_pci_dmasetup,
146 	isp_pci_dmateardown,
147 	NULL,
148 	isp_pci_reset1,
149 	isp_pci_dumpregs,
150 	NULL,
151 	0,
152 	ISP_CODE_ORG,
153 	0,
154 	0,
155 	0
156 };
157 
158 #ifndef	PCIM_CMD_INVEN
159 #define	PCIM_CMD_INVEN			0x10
160 #endif
161 #ifndef	PCIM_CMD_BUSMASTEREN
162 #define	PCIM_CMD_BUSMASTEREN		0x0004
163 #endif
164 #ifndef	PCIM_CMD_PERRESPEN
165 #define	PCIM_CMD_PERRESPEN		0x0040
166 #endif
167 #ifndef	PCIM_CMD_SEREN
168 #define	PCIM_CMD_SEREN			0x0100
169 #endif
170 
171 #ifndef	PCIR_COMMAND
172 #define	PCIR_COMMAND			0x04
173 #endif
174 
175 #ifndef	PCIR_CACHELNSZ
176 #define	PCIR_CACHELNSZ			0x0c
177 #endif
178 
179 #ifndef	PCIR_LATTIMER
180 #define	PCIR_LATTIMER			0x0d
181 #endif
182 
183 #ifndef	PCIR_ROMADDR
184 #define	PCIR_ROMADDR			0x30
185 #endif
186 
187 #ifndef	PCI_VENDOR_QLOGIC
188 #define	PCI_VENDOR_QLOGIC		0x1077
189 #endif
190 
191 #ifndef	PCI_PRODUCT_QLOGIC_ISP1020
192 #define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
193 #endif
194 
195 #ifndef	PCI_PRODUCT_QLOGIC_ISP1080
196 #define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
197 #endif
198 
199 #ifndef	PCI_PRODUCT_QLOGIC_ISP12160
200 #define	PCI_PRODUCT_QLOGIC_ISP12160	0x1216
201 #endif
202 
203 #ifndef	PCI_PRODUCT_QLOGIC_ISP1240
204 #define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
205 #endif
206 
207 #ifndef	PCI_PRODUCT_QLOGIC_ISP1280
208 #define	PCI_PRODUCT_QLOGIC_ISP1280	0x1280
209 #endif
210 
211 #ifndef	PCI_PRODUCT_QLOGIC_ISP2100
212 #define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
213 #endif
214 
215 #ifndef	PCI_PRODUCT_QLOGIC_ISP2200
216 #define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
217 #endif
218 
219 #define	PCI_QLOGIC_ISP1020	\
220 	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
221 
222 #define	PCI_QLOGIC_ISP1080	\
223 	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
224 
225 #define	PCI_QLOGIC_ISP12160	\
226 	((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
227 
228 #define	PCI_QLOGIC_ISP1240	\
229 	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
230 
231 #define	PCI_QLOGIC_ISP1280	\
232 	((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
233 
234 #define	PCI_QLOGIC_ISP2100	\
235 	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
236 
237 #define	PCI_QLOGIC_ISP2200	\
238 	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
239 
240 #define	IO_MAP_REG	0x10
241 #define	MEM_MAP_REG	0x14
242 
243 #define	PCI_DFLT_LTNCY	0x40
244 #define	PCI_DFLT_LNSZ	0x10
245 
246 static int isp_pci_probe (device_t);
247 static int isp_pci_attach (device_t);
248 
249 /* This distinguishing define is not right, but it does work */
250 #ifdef __alpha__
251 #define IO_SPACE_MAPPING	ALPHA_BUS_SPACE_IO
252 #define MEM_SPACE_MAPPING	ALPHA_BUS_SPACE_MEM
253 #else
254 #define IO_SPACE_MAPPING	I386_BUS_SPACE_IO
255 #define MEM_SPACE_MAPPING	I386_BUS_SPACE_MEM
256 #endif
257 
258 struct isp_pcisoftc {
259 	struct ispsoftc			pci_isp;
260 	device_t			pci_dev;
261 	struct resource *		pci_reg;
262 	bus_space_tag_t			pci_st;
263 	bus_space_handle_t		pci_sh;
264 	void *				ih;
265 	int16_t				pci_poff[_NREG_BLKS];
266 	bus_dma_tag_t			parent_dmat;
267 	bus_dma_tag_t			cntrol_dmat;
268 	bus_dmamap_t			cntrol_dmap;
269 	bus_dmamap_t			*dmaps;
270 };
271 ispfwfunc *isp_get_firmware_p = NULL;
272 
273 static device_method_t isp_pci_methods[] = {
274 	/* Device interface */
275 	DEVMETHOD(device_probe,		isp_pci_probe),
276 	DEVMETHOD(device_attach,	isp_pci_attach),
277 	{ 0, 0 }
278 };
279 
280 static driver_t isp_pci_driver = {
281 	"isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
282 };
283 static devclass_t isp_devclass;
284 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
285 MODULE_VERSION(isp, 1);
286 
287 static int
288 isp_pci_probe(device_t dev)
289 {
290         switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
291 	case PCI_QLOGIC_ISP1020:
292 		device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
293 		break;
294 	case PCI_QLOGIC_ISP1080:
295 		device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
296 		break;
297 	case PCI_QLOGIC_ISP1240:
298 		device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
299 		break;
300 	case PCI_QLOGIC_ISP1280:
301 		device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
302 		break;
303 	case PCI_QLOGIC_ISP12160:
304 		device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
305 		break;
306 	case PCI_QLOGIC_ISP2100:
307 		device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
308 		break;
309 	case PCI_QLOGIC_ISP2200:
310 		device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
311 		break;
312 	default:
313 		return (ENXIO);
314 	}
315 	if (device_get_unit(dev) == 0) {
316 		CFGPRINTF("Qlogic ISP Driver, FreeBSD Version %d.%d, "
317 		    "Core Version %d.%d\n",
318 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
319 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
320 	}
321 	/*
322 	 * XXXX: Here is where we might load the f/w module
323 	 * XXXX: (or increase a reference count to it).
324 	 */
325 	return (0);
326 }
327 
328 static int
329 isp_pci_attach(device_t dev)
330 {
331 	struct resource *regs, *irq;
332 	int unit, bitmap, rtp, rgd, iqd, m1, m2;
333 	u_int32_t data, cmd, linesz, psize, basetype;
334 	struct isp_pcisoftc *pcs;
335 	struct ispsoftc *isp;
336 	struct ispmdvec *mdvp;
337 	bus_size_t lim;
338 	ISP_LOCKVAL_DECL;
339 
340 	/*
341 	 * Figure out if we're supposed to skip this one.
342 	 */
343 	unit = device_get_unit(dev);
344 	if (getenv_int("isp_disable", &bitmap)) {
345 		if (bitmap & (1 << unit)) {
346 			device_printf(dev, "not configuring\n");
347 			return (ENODEV);
348 		}
349 	}
350 
351 	pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT);
352 	if (pcs == NULL) {
353 		device_printf(dev, "cannot allocate softc\n");
354 		return (ENOMEM);
355 	}
356 	bzero(pcs, sizeof (struct isp_pcisoftc));
357 
358 	/*
359 	 * Figure out which we should try first - memory mapping or i/o mapping?
360 	 */
361 #ifdef	__alpha__
362 	m1 = PCIM_CMD_MEMEN;
363 	m2 = PCIM_CMD_PORTEN;
364 #else
365 	m1 = PCIM_CMD_PORTEN;
366 	m2 = PCIM_CMD_MEMEN;
367 #endif
368 	bitmap = 0;
369 	if (getenv_int("isp_mem_map", &bitmap)) {
370 		if (bitmap & (1 << unit)) {
371 			m1 = PCIM_CMD_MEMEN;
372 			m2 = PCIM_CMD_PORTEN;
373 		}
374 	}
375 	bitmap = 0;
376 	if (getenv_int("isp_io_map", &bitmap)) {
377 		if (bitmap & (1 << unit)) {
378 			m1 = PCIM_CMD_PORTEN;
379 			m2 = PCIM_CMD_MEMEN;
380 		}
381 	}
382 
383 	linesz = PCI_DFLT_LNSZ;
384 	irq = regs = NULL;
385 	rgd = rtp = iqd = 0;
386 
387 	cmd = pci_read_config(dev, PCIR_COMMAND, 1);
388 	if (cmd & m1) {
389 		rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
390 		rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
391 		regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
392 	}
393 	if (regs == NULL && (cmd & m2)) {
394 		rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
395 		rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
396 		regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
397 	}
398 	if (regs == NULL) {
399 		device_printf(dev, "unable to map any ports\n");
400 		goto bad;
401 	}
402 	if (bootverbose)
403 		printf("isp%d: using %s space register mapping\n", unit,
404 		    (rgd == IO_MAP_REG)? "I/O" : "Memory");
405 	pcs->pci_dev = dev;
406 	pcs->pci_reg = regs;
407 	pcs->pci_st = rman_get_bustag(regs);
408 	pcs->pci_sh = rman_get_bushandle(regs);
409 
410 	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
411 	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
412 	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
413 	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
414 	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
415 	/*
416  	 * GCC!
417 	 */
418 	mdvp = &mdvec;
419 	basetype = ISP_HA_SCSI_UNKNOWN;
420 	psize = sizeof (sdparam);
421 	lim = BUS_SPACE_MAXSIZE_32BIT;
422 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
423 		mdvp = &mdvec;
424 		basetype = ISP_HA_SCSI_UNKNOWN;
425 		psize = sizeof (sdparam);
426 		lim = BUS_SPACE_MAXSIZE_24BIT;
427 	}
428 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
429 		mdvp = &mdvec_1080;
430 		basetype = ISP_HA_SCSI_1080;
431 		psize = sizeof (sdparam);
432 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
433 		    ISP1080_DMA_REGS_OFF;
434 	}
435 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
436 		mdvp = &mdvec_1080;
437 		basetype = ISP_HA_SCSI_1240;
438 		psize = 2 * sizeof (sdparam);
439 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
440 		    ISP1080_DMA_REGS_OFF;
441 	}
442 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
443 		mdvp = &mdvec_1080;
444 		basetype = ISP_HA_SCSI_1280;
445 		psize = 2 * sizeof (sdparam);
446 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
447 		    ISP1080_DMA_REGS_OFF;
448 	}
449 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
450 		mdvp = &mdvec_12160;
451 		basetype = ISP_HA_SCSI_12160;
452 		psize = 2 * sizeof (sdparam);
453 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
454 		    ISP1080_DMA_REGS_OFF;
455 	}
456 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
457 		mdvp = &mdvec_2100;
458 		basetype = ISP_HA_FC_2100;
459 		psize = sizeof (fcparam);
460 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
461 		    PCI_MBOX_REGS2100_OFF;
462 		if (pci_get_revid(dev) < 3) {
463 			/*
464 			 * XXX: Need to get the actual revision
465 			 * XXX: number of the 2100 FB. At any rate,
466 			 * XXX: lower cache line size for early revision
467 			 * XXX; boards.
468 			 */
469 			linesz = 1;
470 		}
471 	}
472 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
473 		mdvp = &mdvec_2200;
474 		basetype = ISP_HA_FC_2200;
475 		psize = sizeof (fcparam);
476 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
477 		    PCI_MBOX_REGS2100_OFF;
478 	}
479 	isp = &pcs->pci_isp;
480 	isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT);
481 	if (isp->isp_param == NULL) {
482 		device_printf(dev, "cannot allocate parameter data\n");
483 		goto bad;
484 	}
485 	bzero(isp->isp_param, psize);
486 	isp->isp_mdvec = mdvp;
487 	isp->isp_type = basetype;
488 	isp->isp_revision = pci_get_revid(dev);
489 	(void) snprintf(isp->isp_name, sizeof (isp->isp_name), "isp%d", unit);
490 	isp->isp_osinfo.unit = unit;
491 
492 	/*
493 	 * Try and find firmware for this device.
494 	 */
495 
496 	if (isp_get_firmware_p) {
497 		int device = (int) pci_get_device(dev);
498 #ifdef	ISP_TARGET_MODE
499 		(*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
500 #else
501 		(*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
502 #endif
503 	}
504 
505 	/*
506 	 *
507 	 */
508 
509 	ISP_LOCK(isp);
510 	/*
511 	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
512 	 * are set.
513 	 */
514 	cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
515 		PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
516 	pci_write_config(dev, PCIR_COMMAND, cmd, 1);
517 
518 	/*
519 	 * Make sure the Cache Line Size register is set sensibly.
520 	 */
521 	data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
522 	if (data != linesz) {
523 		data = PCI_DFLT_LNSZ;
524 		CFGPRINTF("%s: set PCI line size to %d\n", isp->isp_name, data);
525 		pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
526 	}
527 
528 	/*
529 	 * Make sure the Latency Timer is sane.
530 	 */
531 	data = pci_read_config(dev, PCIR_LATTIMER, 1);
532 	if (data < PCI_DFLT_LTNCY) {
533 		data = PCI_DFLT_LTNCY;
534 		CFGPRINTF("%s: set PCI latency to %d\n", isp->isp_name, data);
535 		pci_write_config(dev, PCIR_LATTIMER, data, 1);
536 	}
537 
538 	/*
539 	 * Make sure we've disabled the ROM.
540 	 */
541 	data = pci_read_config(dev, PCIR_ROMADDR, 4);
542 	data &= ~1;
543 	pci_write_config(dev, PCIR_ROMADDR, data, 4);
544 	ISP_UNLOCK(isp);
545 
546 	if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
547 	    BUS_SPACE_MAXADDR, NULL, NULL, lim + 1,
548 	    255, lim, 0, &pcs->parent_dmat) != 0) {
549 		printf("%s: could not create master dma tag\n", isp->isp_name);
550 		free(isp->isp_param, M_DEVBUF);
551 		free(pcs, M_DEVBUF);
552 		return (ENXIO);
553 	}
554 
555 	iqd = 0;
556 	irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0,
557 	    1, RF_ACTIVE | RF_SHAREABLE);
558 	if (irq == NULL) {
559 		device_printf(dev, "could not allocate interrupt\n");
560 		goto bad;
561 	}
562 
563 	if (getenv_int("isp_no_fwload", &bitmap)) {
564 		if (bitmap & (1 << unit))
565 			isp->isp_confopts |= ISP_CFG_NORELOAD;
566 	}
567 	if (getenv_int("isp_fwload", &bitmap)) {
568 		if (bitmap & (1 << unit))
569 			isp->isp_confopts &= ~ISP_CFG_NORELOAD;
570 	}
571 	if (getenv_int("isp_no_nvram", &bitmap)) {
572 		if (bitmap & (1 << unit))
573 			isp->isp_confopts |= ISP_CFG_NONVRAM;
574 	}
575 	if (getenv_int("isp_nvram", &bitmap)) {
576 		if (bitmap & (1 << unit))
577 			isp->isp_confopts &= ~ISP_CFG_NONVRAM;
578 	}
579 	if (getenv_int("isp_fcduplex", &bitmap)) {
580 		if (bitmap & (1 << unit))
581 			isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
582 	}
583 	if (getenv_int("isp_no_fcduplex", &bitmap)) {
584 		if (bitmap & (1 << unit))
585 			isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
586 	}
587 	if (getenv_int("isp_nport", &bitmap)) {
588 		if (bitmap & (1 << unit))
589 			isp->isp_confopts |= ISP_CFG_NPORT;
590 	}
591 	/*
592 	 * Look for overriding WWN. This is a Node WWN so it binds to
593 	 * all FC instances. A Port WWN will be constructed from it
594 	 * as appropriate.
595 	 */
596 	if (!getenv_quad("isp_wwn", (quad_t *) &isp->isp_osinfo.default_wwn)) {
597 		int i;
598 		u_int64_t seed = (u_int64_t) (intptr_t) isp;
599 
600 		seed <<= 16;
601 		seed &= ((1LL << 48) - 1LL);
602 		/*
603 		 * This isn't very random, but it's the best we can do for
604 		 * the real edge case of cards that don't have WWNs. If
605 		 * you recompile a new vers.c, you'll get a different WWN.
606 		 */
607 		for (i = 0; version[i] != 0; i++) {
608 			seed += version[i];
609 		}
610 		/*
611 		 * Make sure the top nibble has something vaguely sensible.
612 		 */
613 		isp->isp_osinfo.default_wwn |= (4LL << 60) | seed;
614 	} else {
615 		isp->isp_confopts |= ISP_CFG_OWNWWN;
616 	}
617 	(void) getenv_int("isp_debug", &isp_debug);
618 #ifdef	ISP_TARGET_MODE
619 	(void) getenv_int("isp_tdebug", &isp_tdebug);
620 #endif
621 	ISP_LOCK(isp);
622 	if (bus_setup_intr(dev, irq, INTR_TYPE_CAM, (void (*)(void *))isp_intr,
623 	    isp, &pcs->ih)) {
624 		ISP_UNLOCK(isp);
625 		device_printf(dev, "could not setup interrupt\n");
626 		goto bad;
627 	}
628 
629 	isp_reset(isp);
630 	if (isp->isp_state != ISP_RESETSTATE) {
631 		ISP_UNLOCK(isp);
632 		goto bad;
633 	}
634 	isp_init(isp);
635 	if (isp->isp_state != ISP_INITSTATE) {
636 		/* If we're a Fibre Channel Card, we allow deferred attach */
637 		if (IS_SCSI(isp)) {
638 			isp_uninit(isp);
639 			ISP_UNLOCK(isp);
640 			goto bad;
641 		}
642 	}
643 	isp_attach(isp);
644 	if (isp->isp_state != ISP_RUNSTATE) {
645 		/* If we're a Fibre Channel Card, we allow deferred attach */
646 		if (IS_SCSI(isp)) {
647 			isp_uninit(isp);
648 			ISP_UNLOCK(isp);
649 			goto bad;
650 		}
651 	}
652 	ISP_UNLOCK(isp);
653 	/*
654 	 * XXXX: Here is where we might unload the f/w module
655 	 * XXXX: (or decrease the reference count to it).
656 	 */
657 	return (0);
658 
659 bad:
660 
661 	if (pcs && pcs->ih) {
662 		(void) bus_teardown_intr(dev, irq, pcs->ih);
663 	}
664 
665 	if (irq) {
666 		(void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
667 	}
668 	if (regs) {
669 		(void) bus_release_resource(dev, rtp, rgd, regs);
670 	}
671 	if (pcs) {
672 		if (pcs->pci_isp.isp_param)
673 			free(pcs->pci_isp.isp_param, M_DEVBUF);
674 		free(pcs, M_DEVBUF);
675 	}
676 	/*
677 	 * XXXX: Here is where we might unload the f/w module
678 	 * XXXX: (or decrease the reference count to it).
679 	 */
680 	return (ENXIO);
681 }
682 
683 static u_int16_t
684 isp_pci_rd_reg(isp, regoff)
685 	struct ispsoftc *isp;
686 	int regoff;
687 {
688 	u_int16_t rv;
689 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
690 	int offset, oldconf = 0;
691 
692 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
693 		/*
694 		 * We will assume that someone has paused the RISC processor.
695 		 */
696 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
697 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
698 	}
699 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
700 	offset += (regoff & 0xff);
701 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
702 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
703 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
704 	}
705 	return (rv);
706 }
707 
708 static void
709 isp_pci_wr_reg(isp, regoff, val)
710 	struct ispsoftc *isp;
711 	int regoff;
712 	u_int16_t val;
713 {
714 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
715 	int offset, oldconf = 0;
716 
717 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
718 		/*
719 		 * We will assume that someone has paused the RISC processor.
720 		 */
721 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
722 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
723 	}
724 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
725 	offset += (regoff & 0xff);
726 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
727 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
728 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
729 	}
730 }
731 
732 static u_int16_t
733 isp_pci_rd_reg_1080(isp, regoff)
734 	struct ispsoftc *isp;
735 	int regoff;
736 {
737 	u_int16_t rv, oc = 0;
738 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
739 	int offset;
740 
741 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
742 	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
743 		u_int16_t tc;
744 		/*
745 		 * We will assume that someone has paused the RISC processor.
746 		 */
747 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
748 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
749 		if (regoff & SXP_BANK1_SELECT)
750 			tc |= BIU_PCI1080_CONF1_SXP1;
751 		else
752 			tc |= BIU_PCI1080_CONF1_SXP0;
753 		isp_pci_wr_reg(isp, BIU_CONF1, tc);
754 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
755 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
756 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
757 	}
758 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
759 	offset += (regoff & 0xff);
760 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
761 	if (oc) {
762 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
763 	}
764 	return (rv);
765 }
766 
767 static void
768 isp_pci_wr_reg_1080(isp, regoff, val)
769 	struct ispsoftc *isp;
770 	int regoff;
771 	u_int16_t val;
772 {
773 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
774 	int offset, oc = 0;
775 
776 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
777 	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
778 		u_int16_t tc;
779 		/*
780 		 * We will assume that someone has paused the RISC processor.
781 		 */
782 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
783 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
784 		if (regoff & SXP_BANK1_SELECT)
785 			tc |= BIU_PCI1080_CONF1_SXP1;
786 		else
787 			tc |= BIU_PCI1080_CONF1_SXP0;
788 		isp_pci_wr_reg(isp, BIU_CONF1, tc);
789 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
790 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
791 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
792 	}
793 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
794 	offset += (regoff & 0xff);
795 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
796 	if (oc) {
797 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
798 	}
799 }
800 
801 static void isp_map_rquest __P((void *, bus_dma_segment_t *, int, int));
802 static void isp_map_result __P((void *, bus_dma_segment_t *, int, int));
803 static void isp_map_fcscrt __P((void *, bus_dma_segment_t *, int, int));
804 
805 struct imush {
806 	struct ispsoftc *isp;
807 	int error;
808 };
809 
810 static void
811 isp_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
812 {
813 	struct imush *imushp = (struct imush *) arg;
814 	if (error) {
815 		imushp->error = error;
816 	} else {
817 		imushp->isp->isp_rquest_dma = segs->ds_addr;
818 	}
819 }
820 
821 static void
822 isp_map_result(void *arg, bus_dma_segment_t *segs, int nseg, int error)
823 {
824 	struct imush *imushp = (struct imush *) arg;
825 	if (error) {
826 		imushp->error = error;
827 	} else {
828 		imushp->isp->isp_result_dma = segs->ds_addr;
829 	}
830 }
831 
832 static void
833 isp_map_fcscrt(void *arg, bus_dma_segment_t *segs, int nseg, int error)
834 {
835 	struct imush *imushp = (struct imush *) arg;
836 	if (error) {
837 		imushp->error = error;
838 	} else {
839 		fcparam *fcp = imushp->isp->isp_param;
840 		fcp->isp_scdma = segs->ds_addr;
841 	}
842 }
843 
844 static int
845 isp_pci_mbxdma(struct ispsoftc *isp)
846 {
847 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
848 	caddr_t base;
849 	u_int32_t len;
850 	int i, error;
851 	bus_size_t lim;
852 	struct imush im;
853 
854 
855 	/*
856 	 * Already been here? If so, leave...
857 	 */
858 	if (isp->isp_rquest) {
859 		return (0);
860 	}
861 
862 	len = sizeof (ISP_SCSI_XFER_T **) * isp->isp_maxcmds;
863 	isp->isp_xflist = (ISP_SCSI_XFER_T **) malloc(len, M_DEVBUF, M_WAITOK);
864 	if (isp->isp_xflist == NULL) {
865 		printf("%s: can't alloc xflist array\n", isp->isp_name);
866 		return (1);
867 	}
868 	bzero(isp->isp_xflist, len);
869 	len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
870 	pci->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF,  M_WAITOK);
871 	if (pci->dmaps == NULL) {
872 		printf("%s: can't alloc dma maps\n", isp->isp_name);
873 		free(isp->isp_xflist, M_DEVBUF);
874 		return (1);
875 	}
876 
877 	if (IS_FC(isp) || IS_ULTRA2(isp))
878 		lim = BUS_SPACE_MAXADDR + 1;
879 	else
880 		lim = BUS_SPACE_MAXADDR_24BIT + 1;
881 
882 	/*
883 	 * Allocate and map the request, result queues, plus FC scratch area.
884 	 */
885 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
886 	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
887 	if (IS_FC(isp)) {
888 		len += ISP2100_SCRLEN;
889 	}
890 	if (bus_dma_tag_create(pci->parent_dmat, PAGE_SIZE, lim,
891 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
892 	    BUS_SPACE_MAXSIZE_32BIT, 0, &pci->cntrol_dmat) != 0) {
893 		printf("%s: cannot create a dma tag for control spaces\n",
894 		    isp->isp_name);
895 		free(isp->isp_xflist, M_DEVBUF);
896 		free(pci->dmaps, M_DEVBUF);
897 		return (1);
898 	}
899 	if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base,
900 	    BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) {
901 		printf("%s: cannot allocate %d bytes of CCB memory\n",
902 		    isp->isp_name, len);
903 		free(isp->isp_xflist, M_DEVBUF);
904 		free(pci->dmaps, M_DEVBUF);
905 		return (1);
906 	}
907 
908 	isp->isp_rquest = base;
909 	im.isp = isp;
910 	im.error = 0;
911 	bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest,
912 	    ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN), isp_map_rquest, &im, 0);
913 	if (im.error) {
914 		printf("%s: error %d loading dma map for DMA request queue\n",
915 		    isp->isp_name, im.error);
916 		free(isp->isp_xflist, M_DEVBUF);
917 		free(pci->dmaps, M_DEVBUF);
918 		isp->isp_rquest = NULL;
919 		return (1);
920 	}
921 	isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
922 	im.error = 0;
923 	bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result,
924 	    ISP_QUEUE_SIZE(RESULT_QUEUE_LEN), isp_map_result, &im, 0);
925 	if (im.error) {
926 		printf("%s: error %d loading dma map for DMA result queue\n",
927 		    isp->isp_name, im.error);
928 		free(isp->isp_xflist, M_DEVBUF);
929 		free(pci->dmaps, M_DEVBUF);
930 		isp->isp_rquest = NULL;
931 		return (1);
932 	}
933 
934 	for (i = 0; i < isp->isp_maxcmds; i++) {
935 		error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]);
936 		if (error) {
937 			printf("%s: error %d creating per-cmd DMA maps\n",
938 			    isp->isp_name, error);
939 			free(isp->isp_xflist, M_DEVBUF);
940 			free(pci->dmaps, M_DEVBUF);
941 			isp->isp_rquest = NULL;
942 			return (1);
943 		}
944 	}
945 
946 	if (IS_FC(isp)) {
947 		fcparam *fcp = (fcparam *) isp->isp_param;
948 		fcp->isp_scratch = base +
949 			ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN) +
950 			ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
951 		im.error = 0;
952 		bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap,
953 		    fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, &im, 0);
954 		if (im.error) {
955 			printf("%s: error %d loading FC scratch area\n",
956 			    isp->isp_name, im.error);
957 			free(isp->isp_xflist, M_DEVBUF);
958 			free(pci->dmaps, M_DEVBUF);
959 			isp->isp_rquest = NULL;
960 			return (1);
961 		}
962 	}
963 	return (0);
964 }
965 
966 typedef struct {
967 	struct ispsoftc *isp;
968 	void *cmd_token;
969 	void *rq;
970 	u_int16_t *iptrp;
971 	u_int16_t optr;
972 	u_int error;
973 } mush_t;
974 
975 #define	MUSHERR_NOQENTRIES	-2
976 
977 #ifdef	ISP_TARGET_MODE
978 /*
979  * We need to handle DMA for target mode differently from initiator mode.
980  *
981  * DMA mapping and construction and submission of CTIO Request Entries
982  * and rendevous for completion are very tightly coupled because we start
983  * out by knowing (per platform) how much data we have to move, but we
984  * don't know, up front, how many DMA mapping segments will have to be used
985  * cover that data, so we don't know how many CTIO Request Entries we
986  * will end up using. Further, for performance reasons we may want to
987  * (on the last CTIO for Fibre Channel), send status too (if all went well).
988  *
989  * The standard vector still goes through isp_pci_dmasetup, but the callback
990  * for the DMA mapping routines comes here instead with the whole transfer
991  * mapped and a pointer to a partially filled in already allocated request
992  * queue entry. We finish the job.
993  */
994 static void dma2_tgt __P((void *, bus_dma_segment_t *, int, int));
995 static void dma2_tgt_fc __P((void *, bus_dma_segment_t *, int, int));
996 
997 static void
998 dma2_tgt(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
999 {
1000 	mush_t *mp;
1001 	struct ccb_scsiio *csio;
1002 	struct isp_pcisoftc *pci;
1003 	bus_dmamap_t *dp;
1004 	u_int8_t scsi_status, send_status;
1005 	ct_entry_t *cto;
1006 	u_int32_t handle;
1007 	int nctios;
1008 
1009 	mp = (mush_t *) arg;
1010 	if (error) {
1011 		mp->error = error;
1012 		return;
1013 	}
1014 
1015 	csio = mp->cmd_token;
1016 	cto = mp->rq;
1017 
1018 	cto->ct_xfrlen = 0;
1019 	cto->ct_resid = 0;
1020 	cto->ct_seg_count = 0;
1021 	bzero(cto->ct_dataseg, sizeof (cto->ct_dataseg));
1022 	if (nseg == 0) {
1023 	 	cto->ct_header.rqs_entry_count = 1;
1024 		ISP_TDQE(mp->isp, "dma2_tgt[no data]", *mp->iptrp, cto);
1025 		if (isp_tdebug) {
1026 			printf("%s:CTIO lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1027 			    "0x%x res %u\n", mp->isp->isp_name,
1028 			    csio->ccb_h.target_lun, cto->ct_iid, cto->ct_flags,
1029 			    cto->ct_status, cto->ct_scsi_status, cto->ct_resid);
1030 		}
1031 		ISP_SWIZ_CTIO(isp, cto, cto);
1032 		return;
1033 	}
1034 
1035 	/*
1036 	 * Save handle, and potentially any SCSI status, which
1037 	 * we'll reinsert on the last CTIO we're going to send.
1038 	 */
1039 	handle = cto->ct_reserved;
1040 	cto->ct_reserved = 0;
1041 	scsi_status = cto->ct_scsi_status;
1042 	cto->ct_scsi_status = 0;
1043 	send_status = cto->ct_flags & CT_SENDSTATUS;
1044 	cto->ct_flags &= ~CT_SENDSTATUS;
1045 
1046 	nctios = nseg / ISP_RQDSEG;
1047 	if (nseg % ISP_RQDSEG) {
1048 		nctios++;
1049 	}
1050 
1051 	pci = (struct isp_pcisoftc *)mp->isp;
1052 	dp = &pci->dmaps[handle - 1];
1053 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1054 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
1055 	} else {
1056 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
1057 	}
1058 
1059 
1060 	while (nctios--) {
1061 		int seg, seglim;
1062 
1063 		seglim = nseg;
1064 		if (seglim > ISP_RQDSEG)
1065 			seglim = ISP_RQDSEG;
1066 
1067 		for (seg = 0; seg < seglim; seg++) {
1068 			cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
1069 			cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
1070 			cto->ct_xfrlen += dm_segs->ds_len;
1071 			dm_segs++;
1072 		}
1073 
1074 		cto->ct_seg_count = seg;
1075 		cto->ct_flags &= CT_DATAMASK;
1076 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1077 			cto->ct_flags |= CT_DATA_IN;
1078 		} else {
1079 			cto->ct_flags |= CT_DATA_OUT;
1080 		}
1081 
1082 		if (nctios == 0) {
1083 			/*
1084 			 * We're the last in a sequence of CTIOs, so mark this
1085 			 * CTIO and save the handle to the CCB such that when
1086 			 * this CTIO completes we can free dma resources and
1087 			 * do whatever else we need to do to finish the rest
1088 			 * of the command.
1089 			 */
1090 			cto->ct_header.rqs_seqno = 1;
1091 			cto->ct_reserved = handle;
1092 			cto->ct_scsi_status = scsi_status;
1093 			cto->ct_flags |= send_status;
1094 			ISP_TDQE(mp->isp, "last dma2_tgt", *mp->iptrp, cto);
1095 			if (isp_tdebug) {
1096 				printf("%s:CTIO lun %d->iid%d flgs 0x%x sts "
1097 				    "0x%x ssts 0x%x res %u\n",
1098 				    mp->isp->isp_name, csio->ccb_h.target_lun,
1099 				    cto->ct_iid, cto->ct_flags, cto->ct_status,
1100 				    cto->ct_scsi_status, cto->ct_resid);
1101 			}
1102 			ISP_SWIZ_CTIO(isp, cto, cto);
1103 		} else {
1104 			ct_entry_t *octo = cto;
1105 			cto->ct_reserved = 0;
1106 			cto->ct_header.rqs_seqno = 0;
1107 			ISP_TDQE(mp->isp, "dma2_tgt", *mp->iptrp, cto);
1108 			if (isp_tdebug) {
1109 				printf("%s:CTIO lun %d->iid%d flgs 0x%x res"
1110 				    " %u\n", mp->isp->isp_name,
1111 				    csio->ccb_h.target_lun, cto->ct_iid,
1112 				    cto->ct_flags, cto->ct_resid);
1113 			}
1114 			cto = (ct_entry_t *)
1115 			    ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp);
1116 			*mp->iptrp =
1117 			    ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN);
1118 			if (*mp->iptrp == mp->optr) {
1119 				printf("%s: Queue Overflow in dma2_tgt\n",
1120 				    mp->isp->isp_name);
1121 				mp->error = MUSHERR_NOQENTRIES;
1122 				return;
1123 			}
1124 			/*
1125 			 * Fill in the new CTIO with info from the old one.
1126 			 */
1127 			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1128 			cto->ct_header.rqs_entry_count = 1;
1129 			cto->ct_header.rqs_flags = 0;
1130 			/* ct_header.rqs_seqno && ct_reserved filled in later */
1131 			cto->ct_lun = octo->ct_lun;
1132 			cto->ct_iid = octo->ct_iid;
1133 			cto->ct_reserved2 = octo->ct_reserved2;
1134 			cto->ct_tgt = octo->ct_tgt;
1135 			cto->ct_flags = octo->ct_flags & ~CT_DATAMASK;
1136 			cto->ct_status = 0;
1137 			cto->ct_scsi_status = 0;
1138 			cto->ct_tag_val = octo->ct_tag_val;
1139 			cto->ct_tag_type = octo->ct_tag_type;
1140 			cto->ct_xfrlen = 0;
1141 			cto->ct_resid = 0;
1142 			cto->ct_timeout = octo->ct_timeout;
1143 			cto->ct_seg_count = 0;
1144 			bzero(cto->ct_dataseg, sizeof (cto->ct_dataseg));
1145 			ISP_SWIZ_CTIO(isp, octo, octo);
1146 		}
1147 	}
1148 }
1149 
1150 static void
1151 dma2_tgt_fc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1152 {
1153 	mush_t *mp;
1154 	struct ccb_scsiio *csio;
1155 	struct isp_pcisoftc *pci;
1156 	bus_dmamap_t *dp;
1157 	ct2_entry_t *cto;
1158 	u_int16_t scsi_status, send_status, send_sense;
1159 	u_int32_t handle, totxfr;
1160 	u_int8_t sense[QLTM_SENSELEN];
1161 	int nctios;
1162 	int32_t resid;
1163 
1164 	mp = (mush_t *) arg;
1165 	if (error) {
1166 		mp->error = error;
1167 		return;
1168 	}
1169 
1170 	csio = mp->cmd_token;
1171 	cto = mp->rq;
1172 
1173 	if (nseg == 0) {
1174 		if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
1175 			printf("%s: dma2_tgt_fc, a status CTIO2 without MODE1 "
1176 			    "set (0x%x)\n", mp->isp->isp_name, cto->ct_flags);
1177 			mp->error = EINVAL;
1178 			return;
1179 		}
1180 	 	cto->ct_header.rqs_entry_count = 1;
1181 		/* ct_reserved contains the handle set by caller */
1182 		/*
1183 		 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1184 		 * flags to NO DATA and clear relative offset flags.
1185 		 * We preserve the ct_resid and the response area.
1186 		 */
1187 		cto->ct_flags |= CT2_NO_DATA;
1188 		cto->ct_seg_count = 0;
1189 		cto->ct_reloff = 0;
1190 		ISP_TDQE(mp->isp, "dma2_tgt_fc[no data]", *mp->iptrp, cto);
1191 		if (isp_tdebug) {
1192 			scsi_status = cto->rsp.m1.ct_scsi_status;
1193 			printf("%s:CTIO2 RX_ID 0x%x lun %d->iid%d flgs 0x%x "
1194 			    "sts 0x%x ssts 0x%x res %u\n", mp->isp->isp_name,
1195 			    cto->ct_rxid, csio->ccb_h.target_lun, cto->ct_iid,
1196 			    cto->ct_flags, cto->ct_status,
1197 			    cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1198 		}
1199 		ISP_SWIZ_CTIO2(isp, cto, cto);
1200 		return;
1201 	}
1202 
1203 	if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
1204 		printf("%s: dma2_tgt_fc, a data CTIO2 without MODE0 set "
1205 		    "(0x%x)\n\n", mp->isp->isp_name, cto->ct_flags);
1206 		mp->error = EINVAL;
1207 		return;
1208 	}
1209 
1210 
1211 	nctios = nseg / ISP_RQDSEG_T2;
1212 	if (nseg % ISP_RQDSEG_T2) {
1213 		nctios++;
1214 	}
1215 
1216 	/*
1217 	 * Save the handle, status, reloff, and residual. We'll reinsert the
1218 	 * handle into the last CTIO2 we're going to send, and reinsert status
1219 	 * and residual (and possibly sense data) if that's to be sent as well.
1220 	 *
1221 	 * We preserve ct_reloff and adjust it for each data CTIO2 we send past
1222 	 * the first one. This is needed so that the FCP DATA IUs being sent
1223 	 * out have the correct offset (they can arrive at the other end out
1224 	 * of order).
1225 	 */
1226 
1227 	handle = cto->ct_reserved;
1228 	cto->ct_reserved = 0;
1229 
1230 	if ((send_status = (cto->ct_flags & CT2_SENDSTATUS)) != 0) {
1231 		cto->ct_flags &= ~CT2_SENDSTATUS;
1232 
1233 		/*
1234 		 * Preserve residual.
1235 		 */
1236 		resid = cto->ct_resid;
1237 
1238 		/*
1239 		 * Save actual SCSI status. We'll reinsert the
1240 		 * CT2_SNSLEN_VALID later if appropriate.
1241 		 */
1242 		scsi_status = cto->rsp.m0.ct_scsi_status & 0xff;
1243 		send_sense = cto->rsp.m0.ct_scsi_status & CT2_SNSLEN_VALID;
1244 
1245 		/*
1246 		 * If we're sending status and have a CHECK CONDTION and
1247 		 * have sense data,  we send one more CTIO2 with just the
1248 		 * status and sense data. The upper layers have stashed
1249 		 * the sense data in the dataseg structure for us.
1250 		 */
1251 
1252 		if ((scsi_status & 0xf) == SCSI_STATUS_CHECK_COND &&
1253 		    send_sense) {
1254 			bcopy(cto->rsp.m0.ct_dataseg, sense, QLTM_SENSELEN);
1255 			nctios++;
1256 		}
1257 	} else {
1258 		scsi_status = send_sense = resid = 0;
1259 	}
1260 
1261 	totxfr = cto->ct_resid = 0;
1262 	cto->rsp.m0.ct_scsi_status = 0;
1263 	bzero(&cto->rsp, sizeof (cto->rsp));
1264 
1265 	pci = (struct isp_pcisoftc *)mp->isp;
1266 	dp = &pci->dmaps[handle - 1];
1267 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1268 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
1269 	} else {
1270 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
1271 	}
1272 
1273 	while (nctios--) {
1274 		int seg, seglim;
1275 
1276 		seglim = nseg;
1277 		if (seglim) {
1278 			if (seglim > ISP_RQDSEG_T2)
1279 				seglim = ISP_RQDSEG_T2;
1280 
1281 			for (seg = 0; seg < seglim; seg++) {
1282 				cto->rsp.m0.ct_dataseg[seg].ds_base =
1283 				    dm_segs->ds_addr;
1284 				cto->rsp.m0.ct_dataseg[seg].ds_count =
1285 				    dm_segs->ds_len;
1286 				cto->rsp.m0.ct_xfrlen += dm_segs->ds_len;
1287 				totxfr += dm_segs->ds_len;
1288 				dm_segs++;
1289 			}
1290 			cto->ct_seg_count = seg;
1291 		} else {
1292 			/*
1293 			 * This case should only happen when we're sending a
1294 			 * synthesized MODE1 final status with sense data.
1295 			 */
1296 			if (send_sense == 0) {
1297 				printf("%s: dma2_tgt_fc ran out of segments, "
1298 				    "no SENSE DATA\n", mp->isp->isp_name);
1299 				mp->error = EINVAL;
1300 				return;
1301 			}
1302 		}
1303 
1304 		/*
1305 		 * At this point, the fields ct_lun, ct_iid, ct_rxid,
1306 		 * ct_timeout have been carried over unchanged from what
1307 		 * our caller had set.
1308 		 *
1309 		 * The field ct_reloff is either what the caller set, or
1310 		 * what we've added to below.
1311 		 *
1312 		 * The dataseg fields and the seg_count fields we just got
1313 		 * through setting. The data direction we've preserved all
1314 		 * along and only clear it if we're sending a MODE1 status
1315 		 * as the last CTIO.
1316 		 *
1317 		 */
1318 
1319 		if (nctios == 0) {
1320 
1321 			/*
1322 			 * We're the last in a sequence of CTIO2s, so mark this
1323 			 * CTIO2 and save the handle to the CCB such that when
1324 			 * this CTIO2 completes we can free dma resources and
1325 			 * do whatever else we need to do to finish the rest
1326 			 * of the command.
1327 			 */
1328 
1329 			cto->ct_reserved = handle;
1330 			cto->ct_header.rqs_seqno = 1;
1331 
1332 			if (send_status) {
1333 				if (send_sense) {
1334 					bcopy(sense, cto->rsp.m1.ct_resp,
1335 					    QLTM_SENSELEN);
1336 					cto->rsp.m1.ct_senselen =
1337 					    QLTM_SENSELEN;
1338 					scsi_status |= CT2_SNSLEN_VALID;
1339 					cto->rsp.m1.ct_scsi_status =
1340 					    scsi_status;
1341 					cto->ct_flags &= CT2_FLAG_MMASK;
1342 					cto->ct_flags |= CT2_FLAG_MODE1 |
1343 					    CT2_NO_DATA| CT2_SENDSTATUS;
1344 				} else {
1345 					cto->rsp.m0.ct_scsi_status =
1346 					    scsi_status;
1347 					cto->ct_flags |= CT2_SENDSTATUS;
1348 				}
1349 				cto->ct_resid = resid - totxfr;
1350 			}
1351 			ISP_TDQE(mp->isp, "last dma2_tgt_fc", *mp->iptrp, cto);
1352 			if (isp_tdebug) {
1353 				printf("%s:CTIO2 RX_ID 0x%x lun %d->iid%d flgs"
1354 				    "0x%x sts 0x%x ssts 0x%x res %u\n",
1355 				    mp->isp->isp_name, cto->ct_rxid,
1356 				    csio->ccb_h.target_lun, (int) cto->ct_iid,
1357 				    cto->ct_flags, cto->ct_status,
1358 				    cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1359 			}
1360 			ISP_SWIZ_CTIO2(isp, cto, cto);
1361 		} else {
1362 			ct2_entry_t *octo = cto;
1363 
1364 			/*
1365 			 * Make sure handle fields are clean
1366 			 */
1367 			cto->ct_reserved = 0;
1368 			cto->ct_header.rqs_seqno = 0;
1369 
1370 			ISP_TDQE(mp->isp, "dma2_tgt_fc", *mp->iptrp, cto);
1371 			if (isp_tdebug) {
1372 				printf("%s:CTIO2 RX_ID 0x%x lun %d->iid%d flgs"
1373 				    "0x%x\n", mp->isp->isp_name, cto->ct_rxid,
1374 				    csio->ccb_h.target_lun, (int) cto->ct_iid,
1375 				    cto->ct_flags);
1376 			}
1377 			/*
1378 			 * Get a new CTIO2
1379 			 */
1380 			cto = (ct2_entry_t *)
1381 			    ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp);
1382 			*mp->iptrp =
1383 			    ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN);
1384 			if (*mp->iptrp == mp->optr) {
1385 				printf("%s: Queue Overflow in dma2_tgt_fc\n",
1386 				    mp->isp->isp_name);
1387 				mp->error = MUSHERR_NOQENTRIES;
1388 				return;
1389 			}
1390 
1391 			/*
1392 			 * Fill in the new CTIO2 with info from the old one.
1393 			 */
1394 			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1395 			cto->ct_header.rqs_entry_count = 1;
1396 			cto->ct_header.rqs_flags = 0;
1397 			/* ct_header.rqs_seqno && ct_reserved done later */
1398 			cto->ct_lun = octo->ct_lun;
1399 			cto->ct_iid = octo->ct_iid;
1400 			cto->ct_rxid = octo->ct_rxid;
1401 			cto->ct_flags = octo->ct_flags;
1402 			cto->ct_status = 0;
1403 			cto->ct_resid = 0;
1404 			cto->ct_timeout = octo->ct_timeout;
1405 			cto->ct_seg_count = 0;
1406 			/*
1407 			 * Adjust the new relative offset by the amount which
1408 			 * is recorded in the data segment of the old CTIO2 we
1409 			 * just finished filling out.
1410 			 */
1411 			cto->ct_reloff += octo->rsp.m0.ct_xfrlen;
1412 			bzero(&cto->rsp, sizeof (cto->rsp));
1413 			ISP_SWIZ_CTIO2(isp, cto, cto);
1414 		}
1415 	}
1416 }
1417 #endif
1418 
1419 static void dma2 __P((void *, bus_dma_segment_t *, int, int));
1420 
1421 static void
1422 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1423 {
1424 	mush_t *mp;
1425 	struct ccb_scsiio *csio;
1426 	struct isp_pcisoftc *pci;
1427 	bus_dmamap_t *dp;
1428 	bus_dma_segment_t *eseg;
1429 	ispreq_t *rq;
1430 	ispcontreq_t *crq;
1431 	int seglim, datalen;
1432 
1433 	mp = (mush_t *) arg;
1434 	if (error) {
1435 		mp->error = error;
1436 		return;
1437 	}
1438 
1439 	if (nseg < 1) {
1440 		printf("%s: bad segment count (%d)\n", mp->isp->isp_name, nseg);
1441 		mp->error = EFAULT;
1442 		return;
1443 	}
1444 	csio = mp->cmd_token;
1445 	rq = mp->rq;
1446 	pci = (struct isp_pcisoftc *)mp->isp;
1447 	dp = &pci->dmaps[rq->req_handle - 1];
1448 
1449 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1450 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
1451 	} else {
1452 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
1453 	}
1454 
1455 	datalen = XS_XFRLEN(csio);
1456 
1457 	/*
1458 	 * We're passed an initial partially filled in entry that
1459 	 * has most fields filled in except for data transfer
1460 	 * related values.
1461 	 *
1462 	 * Our job is to fill in the initial request queue entry and
1463 	 * then to start allocating and filling in continuation entries
1464 	 * until we've covered the entire transfer.
1465 	 */
1466 
1467 	if (IS_FC(mp->isp)) {
1468 		seglim = ISP_RQDSEG_T2;
1469 		((ispreqt2_t *)rq)->req_totalcnt = datalen;
1470 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1471 			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
1472 		} else {
1473 			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
1474 		}
1475 	} else {
1476 		if (csio->cdb_len > 12) {
1477 			seglim = 0;
1478 		} else {
1479 			seglim = ISP_RQDSEG;
1480 		}
1481 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1482 			rq->req_flags |= REQFLAG_DATA_IN;
1483 		} else {
1484 			rq->req_flags |= REQFLAG_DATA_OUT;
1485 		}
1486 	}
1487 
1488 	eseg = dm_segs + nseg;
1489 
1490 	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
1491 		if (IS_FC(mp->isp)) {
1492 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1493 			rq2->req_dataseg[rq2->req_seg_count].ds_base =
1494 			    dm_segs->ds_addr;
1495 			rq2->req_dataseg[rq2->req_seg_count].ds_count =
1496 			    dm_segs->ds_len;
1497 		} else {
1498 			rq->req_dataseg[rq->req_seg_count].ds_base =
1499 				dm_segs->ds_addr;
1500 			rq->req_dataseg[rq->req_seg_count].ds_count =
1501 				dm_segs->ds_len;
1502 		}
1503 		datalen -= dm_segs->ds_len;
1504 #if	0
1505 		if (IS_FC(mp->isp)) {
1506 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1507 			printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
1508 			    mp->isp->isp_name, rq->req_seg_count,
1509 			    rq2->req_dataseg[rq2->req_seg_count].ds_count,
1510 			    rq2->req_dataseg[rq2->req_seg_count].ds_base);
1511 		} else {
1512 			printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
1513 			    mp->isp->isp_name, rq->req_seg_count,
1514 			    rq->req_dataseg[rq->req_seg_count].ds_count,
1515 			    rq->req_dataseg[rq->req_seg_count].ds_base);
1516 		}
1517 #endif
1518 		rq->req_seg_count++;
1519 		dm_segs++;
1520 	}
1521 
1522 	while (datalen > 0 && dm_segs != eseg) {
1523 		crq = (ispcontreq_t *)
1524 		    ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp);
1525 		*mp->iptrp = ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN);
1526 		if (*mp->iptrp == mp->optr) {
1527 #if	0
1528 			printf("%s: Request Queue Overflow++\n",
1529 			    mp->isp->isp_name);
1530 #endif
1531 			mp->error = MUSHERR_NOQENTRIES;
1532 			return;
1533 		}
1534 		rq->req_header.rqs_entry_count++;
1535 		bzero((void *)crq, sizeof (*crq));
1536 		crq->req_header.rqs_entry_count = 1;
1537 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1538 
1539 		seglim = 0;
1540 		while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
1541 			crq->req_dataseg[seglim].ds_base =
1542 			    dm_segs->ds_addr;
1543 			crq->req_dataseg[seglim].ds_count =
1544 			    dm_segs->ds_len;
1545 #if	0
1546 			printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n",
1547 			    mp->isp->isp_name, rq->req_header.rqs_entry_count-1,
1548 			    seglim, crq->req_dataseg[seglim].ds_count,
1549 			    crq->req_dataseg[seglim].ds_base);
1550 #endif
1551 			rq->req_seg_count++;
1552 			dm_segs++;
1553 			seglim++;
1554 			datalen -= dm_segs->ds_len;
1555 		}
1556 	}
1557 }
1558 
1559 static int
1560 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq,
1561 	u_int16_t *iptrp, u_int16_t optr)
1562 {
1563 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1564 	bus_dmamap_t *dp = NULL;
1565 	mush_t mush, *mp;
1566 	void (*eptr) __P((void *, bus_dma_segment_t *, int, int));
1567 
1568 #ifdef	ISP_TARGET_MODE
1569 	if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1570 		if (IS_FC(isp)) {
1571 			eptr = dma2_tgt_fc;
1572 		} else {
1573 			eptr = dma2_tgt;
1574 		}
1575 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) {
1576 			rq->req_seg_count = 1;
1577 			mp = &mush;
1578 			mp->isp = isp;
1579 			mp->cmd_token = csio;
1580 			mp->rq = rq;
1581 			mp->iptrp = iptrp;
1582 			mp->optr = optr;
1583 			mp->error = 0;
1584 			(*eptr)(mp, NULL, 0, 0);
1585 			goto exit;
1586 		}
1587 	} else
1588 #endif
1589 	eptr = dma2;
1590 
1591 	/*
1592 	 * NB: if we need to do request queue entry swizzling,
1593 	 * NB: this is where it would need to be done for cmds
1594 	 * NB: that move no data. For commands that move data,
1595 	 * NB: swizzling would take place in those functions.
1596 	 */
1597 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) {
1598 		rq->req_seg_count = 1;
1599 		return (CMD_QUEUED);
1600 	}
1601 
1602 	/*
1603 	 * Do a virtual grapevine step to collect info for
1604 	 * the callback dma allocation that we have to use...
1605 	 */
1606 	mp = &mush;
1607 	mp->isp = isp;
1608 	mp->cmd_token = csio;
1609 	mp->rq = rq;
1610 	mp->iptrp = iptrp;
1611 	mp->optr = optr;
1612 	mp->error = 0;
1613 
1614 	if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1615 		if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1616 			int error, s;
1617 			dp = &pci->dmaps[rq->req_handle - 1];
1618 			s = splsoftvm();
1619 			error = bus_dmamap_load(pci->parent_dmat, *dp,
1620 			    csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
1621 			if (error == EINPROGRESS) {
1622 				bus_dmamap_unload(pci->parent_dmat, *dp);
1623 				mp->error = EINVAL;
1624 				printf("%s: deferred dma allocation not "
1625 				    "supported\n", isp->isp_name);
1626 			} else if (error && mp->error == 0) {
1627 #ifdef	DIAGNOSTIC
1628 				printf("%s: error %d in dma mapping code\n",
1629 				    isp->isp_name, error);
1630 #endif
1631 				mp->error = error;
1632 			}
1633 			splx(s);
1634 		} else {
1635 			/* Pointer to physical buffer */
1636 			struct bus_dma_segment seg;
1637 			seg.ds_addr = (bus_addr_t)csio->data_ptr;
1638 			seg.ds_len = csio->dxfer_len;
1639 			(*eptr)(mp, &seg, 1, 0);
1640 		}
1641 	} else {
1642 		struct bus_dma_segment *segs;
1643 
1644 		if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1645 			printf("%s: Physical segment pointers unsupported",
1646 				isp->isp_name);
1647 			mp->error = EINVAL;
1648 		} else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1649 			printf("%s: Virtual segment addresses unsupported",
1650 				isp->isp_name);
1651 			mp->error = EINVAL;
1652 		} else {
1653 			/* Just use the segments provided */
1654 			segs = (struct bus_dma_segment *) csio->data_ptr;
1655 			(*eptr)(mp, segs, csio->sglist_cnt, 0);
1656 		}
1657 	}
1658 #ifdef	ISP_TARGET_MODE
1659 exit:
1660 #endif
1661 	if (mp->error) {
1662 		int retval = CMD_COMPLETE;
1663 		if (mp->error == MUSHERR_NOQENTRIES) {
1664 			retval = CMD_EAGAIN;
1665 		} else if (mp->error == EFBIG) {
1666 			XS_SETERR(csio, CAM_REQ_TOO_BIG);
1667 		} else if (mp->error == EINVAL) {
1668 			XS_SETERR(csio, CAM_REQ_INVALID);
1669 		} else {
1670 			XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
1671 		}
1672 		return (retval);
1673 	} else {
1674 		/*
1675 		 * Check to see if we weren't cancelled while sleeping on
1676 		 * getting DMA resources...
1677 		 */
1678 		if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1679 			if (dp) {
1680 				bus_dmamap_unload(pci->parent_dmat, *dp);
1681 			}
1682 			return (CMD_COMPLETE);
1683 		}
1684 		return (CMD_QUEUED);
1685 	}
1686 }
1687 
1688 static void
1689 isp_pci_dmateardown(struct ispsoftc *isp, ISP_SCSI_XFER_T *xs, u_int32_t handle)
1690 {
1691 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1692 	bus_dmamap_t *dp = &pci->dmaps[handle - 1];
1693 	KASSERT((handle > 0 && handle <= isp->isp_maxcmds),
1694 	    ("bad handle in isp_pci_dmateardonw"));
1695 	if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1696 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD);
1697 	} else {
1698 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE);
1699 	}
1700 	bus_dmamap_unload(pci->parent_dmat, *dp);
1701 }
1702 
1703 
1704 static void
1705 isp_pci_reset1(struct ispsoftc *isp)
1706 {
1707 	/* Make sure the BIOS is disabled */
1708 	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1709 }
1710 
1711 static void
1712 isp_pci_dumpregs(struct ispsoftc *isp)
1713 {
1714 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1715 	printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name,
1716 	    pci_read_config(pci->pci_dev, PCIR_COMMAND, 1));
1717 }
1718