xref: /freebsd/sys/dev/isp/isp_pci.c (revision 77a0943ded95b9e6438f7db70c4a28e4d93946d4)
1 /* $FreeBSD$ */
2 /*
3  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
4  * FreeBSD Version.
5  *
6  * Copyright (c) 1997, 1998, 1999, 2000 by Matthew Jacob
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice immediately at the beginning of the file, without modification,
13  *    this list of conditions, and the following disclaimer.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/bus.h>
35 
36 #include <pci/pcireg.h>
37 #include <pci/pcivar.h>
38 
39 #include <machine/bus_memio.h>
40 #include <machine/bus_pio.h>
41 #include <machine/bus.h>
42 #include <machine/resource.h>
43 #include <sys/rman.h>
44 #include <sys/malloc.h>
45 
46 #include <dev/isp/isp_freebsd.h>
47 
48 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
49 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
50 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
51 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
52 static int isp_pci_mbxdma __P((struct ispsoftc *));
53 static int isp_pci_dmasetup __P((struct ispsoftc *, XS_T *,
54 	ispreq_t *, u_int16_t *, u_int16_t));
55 static void
56 isp_pci_dmateardown __P((struct ispsoftc *, XS_T *, u_int32_t));
57 
58 static void isp_pci_reset1 __P((struct ispsoftc *));
59 static void isp_pci_dumpregs __P((struct ispsoftc *, const char *));
60 
61 #ifndef	ISP_CODE_ORG
62 #define	ISP_CODE_ORG		0x1000
63 #endif
64 
65 static struct ispmdvec mdvec = {
66 	isp_pci_rd_reg,
67 	isp_pci_wr_reg,
68 	isp_pci_mbxdma,
69 	isp_pci_dmasetup,
70 	isp_pci_dmateardown,
71 	NULL,
72 	isp_pci_reset1,
73 	isp_pci_dumpregs,
74 	NULL,
75 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
76 };
77 
78 static struct ispmdvec mdvec_1080 = {
79 	isp_pci_rd_reg_1080,
80 	isp_pci_wr_reg_1080,
81 	isp_pci_mbxdma,
82 	isp_pci_dmasetup,
83 	isp_pci_dmateardown,
84 	NULL,
85 	isp_pci_reset1,
86 	isp_pci_dumpregs,
87 	NULL,
88 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
89 };
90 
91 static struct ispmdvec mdvec_12160 = {
92 	isp_pci_rd_reg_1080,
93 	isp_pci_wr_reg_1080,
94 	isp_pci_mbxdma,
95 	isp_pci_dmasetup,
96 	isp_pci_dmateardown,
97 	NULL,
98 	isp_pci_reset1,
99 	isp_pci_dumpregs,
100 	NULL,
101 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
102 };
103 
104 static struct ispmdvec mdvec_2100 = {
105 	isp_pci_rd_reg,
106 	isp_pci_wr_reg,
107 	isp_pci_mbxdma,
108 	isp_pci_dmasetup,
109 	isp_pci_dmateardown,
110 	NULL,
111 	isp_pci_reset1,
112 	isp_pci_dumpregs
113 };
114 
115 static struct ispmdvec mdvec_2200 = {
116 	isp_pci_rd_reg,
117 	isp_pci_wr_reg,
118 	isp_pci_mbxdma,
119 	isp_pci_dmasetup,
120 	isp_pci_dmateardown,
121 	NULL,
122 	isp_pci_reset1,
123 	isp_pci_dumpregs
124 };
125 
126 #ifndef	PCIM_CMD_INVEN
127 #define	PCIM_CMD_INVEN			0x10
128 #endif
129 #ifndef	PCIM_CMD_BUSMASTEREN
130 #define	PCIM_CMD_BUSMASTEREN		0x0004
131 #endif
132 #ifndef	PCIM_CMD_PERRESPEN
133 #define	PCIM_CMD_PERRESPEN		0x0040
134 #endif
135 #ifndef	PCIM_CMD_SEREN
136 #define	PCIM_CMD_SEREN			0x0100
137 #endif
138 
139 #ifndef	PCIR_COMMAND
140 #define	PCIR_COMMAND			0x04
141 #endif
142 
143 #ifndef	PCIR_CACHELNSZ
144 #define	PCIR_CACHELNSZ			0x0c
145 #endif
146 
147 #ifndef	PCIR_LATTIMER
148 #define	PCIR_LATTIMER			0x0d
149 #endif
150 
151 #ifndef	PCIR_ROMADDR
152 #define	PCIR_ROMADDR			0x30
153 #endif
154 
155 #ifndef	PCI_VENDOR_QLOGIC
156 #define	PCI_VENDOR_QLOGIC		0x1077
157 #endif
158 
159 #ifndef	PCI_PRODUCT_QLOGIC_ISP1020
160 #define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
161 #endif
162 
163 #ifndef	PCI_PRODUCT_QLOGIC_ISP1080
164 #define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
165 #endif
166 
167 #ifndef	PCI_PRODUCT_QLOGIC_ISP12160
168 #define	PCI_PRODUCT_QLOGIC_ISP12160	0x1216
169 #endif
170 
171 #ifndef	PCI_PRODUCT_QLOGIC_ISP1240
172 #define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
173 #endif
174 
175 #ifndef	PCI_PRODUCT_QLOGIC_ISP1280
176 #define	PCI_PRODUCT_QLOGIC_ISP1280	0x1280
177 #endif
178 
179 #ifndef	PCI_PRODUCT_QLOGIC_ISP2100
180 #define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
181 #endif
182 
183 #ifndef	PCI_PRODUCT_QLOGIC_ISP2200
184 #define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
185 #endif
186 
187 #define	PCI_QLOGIC_ISP1020	\
188 	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
189 
190 #define	PCI_QLOGIC_ISP1080	\
191 	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
192 
193 #define	PCI_QLOGIC_ISP12160	\
194 	((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
195 
196 #define	PCI_QLOGIC_ISP1240	\
197 	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
198 
199 #define	PCI_QLOGIC_ISP1280	\
200 	((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
201 
202 #define	PCI_QLOGIC_ISP2100	\
203 	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
204 
205 #define	PCI_QLOGIC_ISP2200	\
206 	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
207 
208 /*
209  * Odd case for some AMI raid cards... We need to *not* attach to this.
210  */
211 #define	AMI_RAID_SUBVENDOR_ID	0x101e
212 
213 #define	IO_MAP_REG	0x10
214 #define	MEM_MAP_REG	0x14
215 
216 #define	PCI_DFLT_LTNCY	0x40
217 #define	PCI_DFLT_LNSZ	0x10
218 
219 static int isp_pci_probe (device_t);
220 static int isp_pci_attach (device_t);
221 
222 struct isp_pcisoftc {
223 	struct ispsoftc			pci_isp;
224 	device_t			pci_dev;
225 	struct resource *		pci_reg;
226 	bus_space_tag_t			pci_st;
227 	bus_space_handle_t		pci_sh;
228 	void *				ih;
229 	int16_t				pci_poff[_NREG_BLKS];
230 	bus_dma_tag_t			parent_dmat;
231 	bus_dma_tag_t			cntrol_dmat;
232 	bus_dmamap_t			cntrol_dmap;
233 	bus_dmamap_t			*dmaps;
234 };
235 ispfwfunc *isp_get_firmware_p = NULL;
236 
237 static device_method_t isp_pci_methods[] = {
238 	/* Device interface */
239 	DEVMETHOD(device_probe,		isp_pci_probe),
240 	DEVMETHOD(device_attach,	isp_pci_attach),
241 	{ 0, 0 }
242 };
243 
244 static driver_t isp_pci_driver = {
245 	"isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
246 };
247 static devclass_t isp_devclass;
248 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
249 MODULE_VERSION(isp, 1);
250 
251 static int
252 isp_pci_probe(device_t dev)
253 {
254         switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
255 	case PCI_QLOGIC_ISP1020:
256 		device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
257 		break;
258 	case PCI_QLOGIC_ISP1080:
259 		device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
260 		break;
261 	case PCI_QLOGIC_ISP1240:
262 		device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
263 		break;
264 	case PCI_QLOGIC_ISP1280:
265 		device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
266 		break;
267 	case PCI_QLOGIC_ISP12160:
268 		if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
269 			return (ENXIO);
270 		}
271 		device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
272 		break;
273 	case PCI_QLOGIC_ISP2100:
274 		device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
275 		break;
276 	case PCI_QLOGIC_ISP2200:
277 		device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
278 		break;
279 	default:
280 		return (ENXIO);
281 	}
282 	if (device_get_unit(dev) == 0 && bootverbose) {
283 		printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
284 		    "Core Version %d.%d\n",
285 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
286 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
287 	}
288 	/*
289 	 * XXXX: Here is where we might load the f/w module
290 	 * XXXX: (or increase a reference count to it).
291 	 */
292 	return (0);
293 }
294 
295 static int
296 isp_pci_attach(device_t dev)
297 {
298 	struct resource *regs, *irq;
299 	int unit, bitmap, rtp, rgd, iqd, m1, m2, isp_debug;
300 	u_int32_t data, cmd, linesz, psize, basetype;
301 	struct isp_pcisoftc *pcs;
302 	struct ispsoftc *isp = NULL;
303 	struct ispmdvec *mdvp;
304 	bus_size_t lim;
305 #ifdef	ISP_SMPLOCK
306 	int locksetup = 0;
307 #endif
308 
309 	/*
310 	 * Figure out if we're supposed to skip this one.
311 	 */
312 	unit = device_get_unit(dev);
313 	if (getenv_int("isp_disable", &bitmap)) {
314 		if (bitmap & (1 << unit)) {
315 			device_printf(dev, "not configuring\n");
316 			return (ENODEV);
317 		}
318 	}
319 
320 	pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT);
321 	if (pcs == NULL) {
322 		device_printf(dev, "cannot allocate softc\n");
323 		return (ENOMEM);
324 	}
325 	bzero(pcs, sizeof (struct isp_pcisoftc));
326 
327 	/*
328 	 * Figure out which we should try first - memory mapping or i/o mapping?
329 	 */
330 #ifdef	__alpha__
331 	m1 = PCIM_CMD_MEMEN;
332 	m2 = PCIM_CMD_PORTEN;
333 #else
334 	m1 = PCIM_CMD_PORTEN;
335 	m2 = PCIM_CMD_MEMEN;
336 #endif
337 	bitmap = 0;
338 	if (getenv_int("isp_mem_map", &bitmap)) {
339 		if (bitmap & (1 << unit)) {
340 			m1 = PCIM_CMD_MEMEN;
341 			m2 = PCIM_CMD_PORTEN;
342 		}
343 	}
344 	bitmap = 0;
345 	if (getenv_int("isp_io_map", &bitmap)) {
346 		if (bitmap & (1 << unit)) {
347 			m1 = PCIM_CMD_PORTEN;
348 			m2 = PCIM_CMD_MEMEN;
349 		}
350 	}
351 
352 	linesz = PCI_DFLT_LNSZ;
353 	irq = regs = NULL;
354 	rgd = rtp = iqd = 0;
355 
356 	cmd = pci_read_config(dev, PCIR_COMMAND, 1);
357 	if (cmd & m1) {
358 		rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
359 		rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
360 		regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
361 	}
362 	if (regs == NULL && (cmd & m2)) {
363 		rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
364 		rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
365 		regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
366 	}
367 	if (regs == NULL) {
368 		device_printf(dev, "unable to map any ports\n");
369 		goto bad;
370 	}
371 	if (bootverbose)
372 		printf("isp%d: using %s space register mapping\n", unit,
373 		    (rgd == IO_MAP_REG)? "I/O" : "Memory");
374 	pcs->pci_dev = dev;
375 	pcs->pci_reg = regs;
376 	pcs->pci_st = rman_get_bustag(regs);
377 	pcs->pci_sh = rman_get_bushandle(regs);
378 
379 	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
380 	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
381 	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
382 	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
383 	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
384 	mdvp = &mdvec;
385 	basetype = ISP_HA_SCSI_UNKNOWN;
386 	psize = sizeof (sdparam);
387 	lim = BUS_SPACE_MAXSIZE_32BIT;
388 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
389 		mdvp = &mdvec;
390 		basetype = ISP_HA_SCSI_UNKNOWN;
391 		psize = sizeof (sdparam);
392 		lim = BUS_SPACE_MAXSIZE_24BIT;
393 	}
394 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
395 		mdvp = &mdvec_1080;
396 		basetype = ISP_HA_SCSI_1080;
397 		psize = sizeof (sdparam);
398 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
399 		    ISP1080_DMA_REGS_OFF;
400 	}
401 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
402 		mdvp = &mdvec_1080;
403 		basetype = ISP_HA_SCSI_1240;
404 		psize = 2 * sizeof (sdparam);
405 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
406 		    ISP1080_DMA_REGS_OFF;
407 	}
408 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
409 		mdvp = &mdvec_1080;
410 		basetype = ISP_HA_SCSI_1280;
411 		psize = 2 * sizeof (sdparam);
412 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
413 		    ISP1080_DMA_REGS_OFF;
414 	}
415 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
416 		mdvp = &mdvec_12160;
417 		basetype = ISP_HA_SCSI_12160;
418 		psize = 2 * sizeof (sdparam);
419 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
420 		    ISP1080_DMA_REGS_OFF;
421 	}
422 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
423 		mdvp = &mdvec_2100;
424 		basetype = ISP_HA_FC_2100;
425 		psize = sizeof (fcparam);
426 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
427 		    PCI_MBOX_REGS2100_OFF;
428 		if (pci_get_revid(dev) < 3) {
429 			/*
430 			 * XXX: Need to get the actual revision
431 			 * XXX: number of the 2100 FB. At any rate,
432 			 * XXX: lower cache line size for early revision
433 			 * XXX; boards.
434 			 */
435 			linesz = 1;
436 		}
437 	}
438 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
439 		mdvp = &mdvec_2200;
440 		basetype = ISP_HA_FC_2200;
441 		psize = sizeof (fcparam);
442 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
443 		    PCI_MBOX_REGS2100_OFF;
444 	}
445 	isp = &pcs->pci_isp;
446 	isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT);
447 	if (isp->isp_param == NULL) {
448 		device_printf(dev, "cannot allocate parameter data\n");
449 		goto bad;
450 	}
451 	bzero(isp->isp_param, psize);
452 	isp->isp_mdvec = mdvp;
453 	isp->isp_type = basetype;
454 	isp->isp_revision = pci_get_revid(dev);
455 	(void) snprintf(isp->isp_name, sizeof (isp->isp_name), "isp%d", unit);
456 	isp->isp_osinfo.unit = unit;
457 
458 	/*
459 	 * Try and find firmware for this device.
460 	 */
461 
462 	if (isp_get_firmware_p) {
463 		int device = (int) pci_get_device(dev);
464 #ifdef	ISP_TARGET_MODE
465 		(*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
466 #else
467 		(*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
468 #endif
469 	}
470 
471 	/*
472 	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
473 	 * are set.
474 	 */
475 	cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
476 		PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
477 	pci_write_config(dev, PCIR_COMMAND, cmd, 1);
478 
479 	/*
480 	 * Make sure the Cache Line Size register is set sensibly.
481 	 */
482 	data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
483 	if (data != linesz) {
484 		data = PCI_DFLT_LNSZ;
485 		isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
486 		pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
487 	}
488 
489 	/*
490 	 * Make sure the Latency Timer is sane.
491 	 */
492 	data = pci_read_config(dev, PCIR_LATTIMER, 1);
493 	if (data < PCI_DFLT_LTNCY) {
494 		data = PCI_DFLT_LTNCY;
495 		isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
496 		pci_write_config(dev, PCIR_LATTIMER, data, 1);
497 	}
498 
499 	/*
500 	 * Make sure we've disabled the ROM.
501 	 */
502 	data = pci_read_config(dev, PCIR_ROMADDR, 4);
503 	data &= ~1;
504 	pci_write_config(dev, PCIR_ROMADDR, data, 4);
505 
506 
507 	if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
508 	    BUS_SPACE_MAXADDR, NULL, NULL, lim + 1,
509 	    255, lim, 0, &pcs->parent_dmat) != 0) {
510 		printf("%s: could not create master dma tag\n", isp->isp_name);
511 		free(isp->isp_param, M_DEVBUF);
512 		free(pcs, M_DEVBUF);
513 		return (ENXIO);
514 	}
515 
516 	iqd = 0;
517 	irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0,
518 	    1, RF_ACTIVE | RF_SHAREABLE);
519 	if (irq == NULL) {
520 		device_printf(dev, "could not allocate interrupt\n");
521 		goto bad;
522 	}
523 
524 	if (getenv_int("isp_no_fwload", &bitmap)) {
525 		if (bitmap & (1 << unit))
526 			isp->isp_confopts |= ISP_CFG_NORELOAD;
527 	}
528 	if (getenv_int("isp_fwload", &bitmap)) {
529 		if (bitmap & (1 << unit))
530 			isp->isp_confopts &= ~ISP_CFG_NORELOAD;
531 	}
532 	if (getenv_int("isp_no_nvram", &bitmap)) {
533 		if (bitmap & (1 << unit))
534 			isp->isp_confopts |= ISP_CFG_NONVRAM;
535 	}
536 	if (getenv_int("isp_nvram", &bitmap)) {
537 		if (bitmap & (1 << unit))
538 			isp->isp_confopts &= ~ISP_CFG_NONVRAM;
539 	}
540 	if (getenv_int("isp_fcduplex", &bitmap)) {
541 		if (bitmap & (1 << unit))
542 			isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
543 	}
544 	if (getenv_int("isp_no_fcduplex", &bitmap)) {
545 		if (bitmap & (1 << unit))
546 			isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
547 	}
548 	if (getenv_int("isp_nport", &bitmap)) {
549 		if (bitmap & (1 << unit))
550 			isp->isp_confopts |= ISP_CFG_NPORT;
551 	}
552 	/*
553 	 * Look for overriding WWN. This is a Node WWN so it binds to
554 	 * all FC instances. A Port WWN will be constructed from it
555 	 * as appropriate.
556 	 */
557 	if (!getenv_quad("isp_wwn", (quad_t *) &isp->isp_osinfo.default_wwn)) {
558 		int i;
559 		u_int64_t seed = (u_int64_t) (intptr_t) isp;
560 
561 		seed <<= 16;
562 		seed &= ((1LL << 48) - 1LL);
563 		/*
564 		 * This isn't very random, but it's the best we can do for
565 		 * the real edge case of cards that don't have WWNs. If
566 		 * you recompile a new vers.c, you'll get a different WWN.
567 		 */
568 		for (i = 0; version[i] != 0; i++) {
569 			seed += version[i];
570 		}
571 		/*
572 		 * Make sure the top nibble has something vaguely sensible
573 		 * (NAA == Locally Administered)
574 		 */
575 		isp->isp_osinfo.default_wwn |= (3LL << 60) | seed;
576 	} else {
577 		isp->isp_confopts |= ISP_CFG_OWNWWN;
578 	}
579 	isp_debug = 0;
580 	(void) getenv_int("isp_debug", &isp_debug);
581 	if (bus_setup_intr(dev, irq, INTR_TYPE_CAM, (void (*)(void *))isp_intr,
582 	    isp, &pcs->ih)) {
583 		device_printf(dev, "could not setup interrupt\n");
584 		goto bad;
585 	}
586 
587 	/*
588 	 * Set up logging levels.
589 	 */
590 	if (isp_debug) {
591 		isp->isp_dblev = isp_debug;
592 	} else {
593 		isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
594 	}
595 	if (bootverbose)
596 		isp->isp_dblev |= ISP_LOGCONFIG;
597 
598 #ifdef	ISP_SMPLOCK
599 	/* Make sure the lock is set up. */
600 	mtx_init(&isp->isp_osinfo.lock, "isp", MTX_DEF);
601 	locksetup++;
602 #endif
603 
604 	/*
605 	 * Make sure we're in reset state.
606 	 */
607 	ISP_LOCK(isp);
608 	isp_reset(isp);
609 
610 	if (isp->isp_state != ISP_RESETSTATE) {
611 		ISP_UNLOCK(isp);
612 		goto bad;
613 	}
614 	isp_init(isp);
615 	if (isp->isp_state != ISP_INITSTATE) {
616 		/* If we're a Fibre Channel Card, we allow deferred attach */
617 		if (IS_SCSI(isp)) {
618 			isp_uninit(isp);
619 			ISP_UNLOCK(isp);
620 			goto bad;
621 		}
622 	}
623 	isp_attach(isp);
624 	if (isp->isp_state != ISP_RUNSTATE) {
625 		/* If we're a Fibre Channel Card, we allow deferred attach */
626 		if (IS_SCSI(isp)) {
627 			isp_uninit(isp);
628 			ISP_UNLOCK(isp);
629 			goto bad;
630 		}
631 	}
632 	/*
633 	 * XXXX: Here is where we might unload the f/w module
634 	 * XXXX: (or decrease the reference count to it).
635 	 */
636 	ISP_UNLOCK(isp);
637 	return (0);
638 
639 bad:
640 
641 	if (pcs && pcs->ih) {
642 		(void) bus_teardown_intr(dev, irq, pcs->ih);
643 	}
644 
645 #ifdef	ISP_SMPLOCK
646 	if (locksetup && isp) {
647 		mtx_destroy(&isp->isp_osinfo.lock);
648 	}
649 #endif
650 
651 	if (irq) {
652 		(void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
653 	}
654 
655 
656 	if (regs) {
657 		(void) bus_release_resource(dev, rtp, rgd, regs);
658 	}
659 
660 	if (pcs) {
661 		if (pcs->pci_isp.isp_param)
662 			free(pcs->pci_isp.isp_param, M_DEVBUF);
663 		free(pcs, M_DEVBUF);
664 	}
665 
666 	/*
667 	 * XXXX: Here is where we might unload the f/w module
668 	 * XXXX: (or decrease the reference count to it).
669 	 */
670 	return (ENXIO);
671 }
672 
673 static u_int16_t
674 isp_pci_rd_reg(isp, regoff)
675 	struct ispsoftc *isp;
676 	int regoff;
677 {
678 	u_int16_t rv;
679 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
680 	int offset, oldconf = 0;
681 
682 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
683 		/*
684 		 * We will assume that someone has paused the RISC processor.
685 		 */
686 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
687 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
688 	}
689 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
690 	offset += (regoff & 0xff);
691 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
692 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
693 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
694 	}
695 	return (rv);
696 }
697 
698 static void
699 isp_pci_wr_reg(isp, regoff, val)
700 	struct ispsoftc *isp;
701 	int regoff;
702 	u_int16_t val;
703 {
704 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
705 	int offset, oldconf = 0;
706 
707 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
708 		/*
709 		 * We will assume that someone has paused the RISC processor.
710 		 */
711 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
712 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
713 	}
714 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
715 	offset += (regoff & 0xff);
716 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
717 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
718 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
719 	}
720 }
721 
722 static u_int16_t
723 isp_pci_rd_reg_1080(isp, regoff)
724 	struct ispsoftc *isp;
725 	int regoff;
726 {
727 	u_int16_t rv, oc = 0;
728 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
729 	int offset;
730 
731 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
732 	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
733 		u_int16_t tc;
734 		/*
735 		 * We will assume that someone has paused the RISC processor.
736 		 */
737 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
738 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
739 		if (regoff & SXP_BANK1_SELECT)
740 			tc |= BIU_PCI1080_CONF1_SXP1;
741 		else
742 			tc |= BIU_PCI1080_CONF1_SXP0;
743 		isp_pci_wr_reg(isp, BIU_CONF1, tc);
744 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
745 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
746 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
747 	}
748 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
749 	offset += (regoff & 0xff);
750 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
751 	if (oc) {
752 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
753 	}
754 	return (rv);
755 }
756 
757 static void
758 isp_pci_wr_reg_1080(isp, regoff, val)
759 	struct ispsoftc *isp;
760 	int regoff;
761 	u_int16_t val;
762 {
763 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
764 	int offset, oc = 0;
765 
766 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
767 	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
768 		u_int16_t tc;
769 		/*
770 		 * We will assume that someone has paused the RISC processor.
771 		 */
772 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
773 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
774 		if (regoff & SXP_BANK1_SELECT)
775 			tc |= BIU_PCI1080_CONF1_SXP1;
776 		else
777 			tc |= BIU_PCI1080_CONF1_SXP0;
778 		isp_pci_wr_reg(isp, BIU_CONF1, tc);
779 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
780 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
781 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
782 	}
783 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
784 	offset += (regoff & 0xff);
785 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
786 	if (oc) {
787 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
788 	}
789 }
790 
791 static void isp_map_rquest __P((void *, bus_dma_segment_t *, int, int));
792 static void isp_map_result __P((void *, bus_dma_segment_t *, int, int));
793 static void isp_map_fcscrt __P((void *, bus_dma_segment_t *, int, int));
794 
795 struct imush {
796 	struct ispsoftc *isp;
797 	int error;
798 };
799 
800 static void
801 isp_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
802 {
803 	struct imush *imushp = (struct imush *) arg;
804 	if (error) {
805 		imushp->error = error;
806 	} else {
807 		imushp->isp->isp_rquest_dma = segs->ds_addr;
808 	}
809 }
810 
811 static void
812 isp_map_result(void *arg, bus_dma_segment_t *segs, int nseg, int error)
813 {
814 	struct imush *imushp = (struct imush *) arg;
815 	if (error) {
816 		imushp->error = error;
817 	} else {
818 		imushp->isp->isp_result_dma = segs->ds_addr;
819 	}
820 }
821 
822 static void
823 isp_map_fcscrt(void *arg, bus_dma_segment_t *segs, int nseg, int error)
824 {
825 	struct imush *imushp = (struct imush *) arg;
826 	if (error) {
827 		imushp->error = error;
828 	} else {
829 		fcparam *fcp = imushp->isp->isp_param;
830 		fcp->isp_scdma = segs->ds_addr;
831 	}
832 }
833 
834 static int
835 isp_pci_mbxdma(struct ispsoftc *isp)
836 {
837 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
838 	caddr_t base;
839 	u_int32_t len;
840 	int i, error;
841 	bus_size_t lim;
842 	struct imush im;
843 
844 
845 	/*
846 	 * Already been here? If so, leave...
847 	 */
848 	if (isp->isp_rquest) {
849 		return (0);
850 	}
851 
852 	len = sizeof (XS_T **) * isp->isp_maxcmds;
853 	isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK);
854 	if (isp->isp_xflist == NULL) {
855 		isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
856 		return (1);
857 	}
858 	bzero(isp->isp_xflist, len);
859 	len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
860 	pci->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF,  M_WAITOK);
861 	if (pci->dmaps == NULL) {
862 		isp_prt(isp, ISP_LOGERR, "can't alloc dma maps");
863 		free(isp->isp_xflist, M_DEVBUF);
864 		return (1);
865 	}
866 
867 	if (IS_FC(isp) || IS_ULTRA2(isp))
868 		lim = BUS_SPACE_MAXADDR + 1;
869 	else
870 		lim = BUS_SPACE_MAXADDR_24BIT + 1;
871 
872 	/*
873 	 * Allocate and map the request, result queues, plus FC scratch area.
874 	 */
875 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
876 	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
877 	if (IS_FC(isp)) {
878 		len += ISP2100_SCRLEN;
879 	}
880 	if (bus_dma_tag_create(pci->parent_dmat, PAGE_SIZE, lim,
881 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
882 	    BUS_SPACE_MAXSIZE_32BIT, 0, &pci->cntrol_dmat) != 0) {
883 		printf("%s: cannot create a dma tag for control spaces\n",
884 		    isp->isp_name);
885 		free(isp->isp_xflist, M_DEVBUF);
886 		free(pci->dmaps, M_DEVBUF);
887 		return (1);
888 	}
889 	if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base,
890 	    BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) {
891 		printf("%s: cannot allocate %d bytes of CCB memory\n",
892 		    isp->isp_name, len);
893 		free(isp->isp_xflist, M_DEVBUF);
894 		free(pci->dmaps, M_DEVBUF);
895 		return (1);
896 	}
897 
898 	isp->isp_rquest = base;
899 	im.isp = isp;
900 	im.error = 0;
901 	bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest,
902 	    ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)), isp_map_rquest, &im, 0);
903 	if (im.error) {
904 		printf("%s: error %d loading dma map for DMA request queue\n",
905 		    isp->isp_name, im.error);
906 		free(isp->isp_xflist, M_DEVBUF);
907 		free(pci->dmaps, M_DEVBUF);
908 		isp->isp_rquest = NULL;
909 		return (1);
910 	}
911 	isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
912 	im.error = 0;
913 	bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result,
914 	    ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)), isp_map_result, &im, 0);
915 	if (im.error) {
916 		printf("%s: error %d loading dma map for DMA result queue\n",
917 		    isp->isp_name, im.error);
918 		free(isp->isp_xflist, M_DEVBUF);
919 		free(pci->dmaps, M_DEVBUF);
920 		isp->isp_rquest = NULL;
921 		return (1);
922 	}
923 
924 	for (i = 0; i < isp->isp_maxcmds; i++) {
925 		error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]);
926 		if (error) {
927 			printf("%s: error %d creating per-cmd DMA maps\n",
928 			    isp->isp_name, error);
929 			free(isp->isp_xflist, M_DEVBUF);
930 			free(pci->dmaps, M_DEVBUF);
931 			isp->isp_rquest = NULL;
932 			return (1);
933 		}
934 	}
935 
936 	if (IS_FC(isp)) {
937 		fcparam *fcp = (fcparam *) isp->isp_param;
938 		fcp->isp_scratch = base +
939 			ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)) +
940 			ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
941 		im.error = 0;
942 		bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap,
943 		    fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, &im, 0);
944 		if (im.error) {
945 			printf("%s: error %d loading FC scratch area\n",
946 			    isp->isp_name, im.error);
947 			free(isp->isp_xflist, M_DEVBUF);
948 			free(pci->dmaps, M_DEVBUF);
949 			isp->isp_rquest = NULL;
950 			return (1);
951 		}
952 	}
953 	return (0);
954 }
955 
956 typedef struct {
957 	struct ispsoftc *isp;
958 	void *cmd_token;
959 	void *rq;
960 	u_int16_t *iptrp;
961 	u_int16_t optr;
962 	u_int error;
963 } mush_t;
964 
965 #define	MUSHERR_NOQENTRIES	-2
966 
967 #ifdef	ISP_TARGET_MODE
968 /*
969  * We need to handle DMA for target mode differently from initiator mode.
970  *
971  * DMA mapping and construction and submission of CTIO Request Entries
972  * and rendevous for completion are very tightly coupled because we start
973  * out by knowing (per platform) how much data we have to move, but we
974  * don't know, up front, how many DMA mapping segments will have to be used
975  * cover that data, so we don't know how many CTIO Request Entries we
976  * will end up using. Further, for performance reasons we may want to
977  * (on the last CTIO for Fibre Channel), send status too (if all went well).
978  *
979  * The standard vector still goes through isp_pci_dmasetup, but the callback
980  * for the DMA mapping routines comes here instead with the whole transfer
981  * mapped and a pointer to a partially filled in already allocated request
982  * queue entry. We finish the job.
983  */
984 static void tdma_mk __P((void *, bus_dma_segment_t *, int, int));
985 static void tdma_mkfc __P((void *, bus_dma_segment_t *, int, int));
986 
987 static void
988 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
989 {
990 	mush_t *mp;
991 	struct ccb_scsiio *csio;
992 	struct isp_pcisoftc *pci;
993 	bus_dmamap_t *dp;
994 	u_int8_t scsi_status;
995 	ct_entry_t *cto;
996 	u_int32_t handle, totxfr, sflags;
997 	int nctios, send_status;
998 	int32_t resid;
999 
1000 	mp = (mush_t *) arg;
1001 	if (error) {
1002 		mp->error = error;
1003 		return;
1004 	}
1005 	csio = mp->cmd_token;
1006 	cto = mp->rq;
1007 
1008 	cto->ct_xfrlen = 0;
1009 	cto->ct_seg_count = 0;
1010 	cto->ct_header.rqs_entry_count = 1;
1011 	MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1012 
1013 	if (nseg == 0) {
1014 		cto->ct_header.rqs_seqno = 1;
1015 		ISP_TDQE(mp->isp, "tdma_mk[no data]", *mp->iptrp, cto);
1016 		isp_prt(mp->isp, ISP_LOGTDEBUG1,
1017 		    "CTIO lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x res %d",
1018 		    csio->ccb_h.target_lun, cto->ct_iid, cto->ct_flags,
1019 		    cto->ct_status, cto->ct_scsi_status, cto->ct_resid);
1020 		ISP_SWIZ_CTIO(mp->isp, cto, cto);
1021 		return;
1022 	}
1023 
1024 	nctios = nseg / ISP_RQDSEG;
1025 	if (nseg % ISP_RQDSEG) {
1026 		nctios++;
1027 	}
1028 
1029 	/*
1030 	 * Save handle, and potentially any SCSI status, which we'll reinsert
1031 	 * on the last CTIO we're going to send.
1032 	 */
1033 	handle = cto->ct_reserved;
1034 	cto->ct_reserved = 0;
1035 	cto->ct_header.rqs_seqno = 0;
1036 	send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1037 
1038 	if (send_status) {
1039 		sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1040 		cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1041 		/*
1042 		 * Preserve residual.
1043 		 */
1044 		resid = cto->ct_resid;
1045 
1046 		/*
1047 		 * Save actual SCSI status.
1048 		 */
1049 		scsi_status = cto->ct_scsi_status;
1050 
1051 		/*
1052 		 * We can't do a status at the same time as a data CTIO, so
1053 		 * we need to synthesize an extra CTIO at this level.
1054 		 */
1055 		nctios++;
1056 	} else {
1057 		sflags = scsi_status = resid = 0;
1058 	}
1059 
1060 	totxfr = cto->ct_resid = 0;
1061 	cto->ct_scsi_status = 0;
1062 
1063 	pci = (struct isp_pcisoftc *)mp->isp;
1064 	dp = &pci->dmaps[isp_handle_index(handle)];
1065 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1066 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
1067 	} else {
1068 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
1069 	}
1070 
1071 
1072 	while (nctios--) {
1073 		int seglim;
1074 
1075 		seglim = nseg;
1076 		if (seglim) {
1077 			int seg;
1078 
1079 			if (seglim > ISP_RQDSEG)
1080 				seglim = ISP_RQDSEG;
1081 
1082 			for (seg = 0; seg < seglim; seg++, nseg--) {
1083 				/*
1084 				 * Unlike normal initiator commands, we don't
1085 				 * do any swizzling here.
1086 				 */
1087 				cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
1088 				cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
1089 				cto->ct_xfrlen += dm_segs->ds_len;
1090 				totxfr += dm_segs->ds_len;
1091 				dm_segs++;
1092 			}
1093 			cto->ct_seg_count = seg;
1094 		} else {
1095 			/*
1096 			 * This case should only happen when we're sending an
1097 			 * extra CTIO with final status.
1098 			 */
1099 			if (send_status == 0) {
1100 				printf("%s: tdma_mk ran out of segments\n",
1101 				       mp->isp->isp_name);
1102 				mp->error = EINVAL;
1103 				return;
1104 			}
1105 		}
1106 
1107 		/*
1108 		 * At this point, the fields ct_lun, ct_iid, ct_tagval,
1109 		 * ct_tagtype, and ct_timeout have been carried over
1110 		 * unchanged from what our caller had set.
1111 		 *
1112 		 * The dataseg fields and the seg_count fields we just got
1113 		 * through setting. The data direction we've preserved all
1114 		 * along and only clear it if we're now sending status.
1115 		 */
1116 
1117 		if (nctios == 0) {
1118 			/*
1119 			 * We're the last in a sequence of CTIOs, so mark
1120 			 * this CTIO and save the handle to the CCB such that
1121 			 * when this CTIO completes we can free dma resources
1122 			 * and do whatever else we need to do to finish the
1123 			 * rest of the command.
1124 			 */
1125 			cto->ct_reserved = handle;
1126 			cto->ct_header.rqs_seqno = 1;
1127 
1128 			if (send_status) {
1129 				cto->ct_scsi_status = scsi_status;
1130 				cto->ct_flags |= sflags | CT_NO_DATA;;
1131 				cto->ct_resid = resid;
1132 			}
1133 			if (send_status) {
1134 				isp_prt(mp->isp, ISP_LOGTDEBUG1,
1135 				    "CTIO lun%d for ID %d ct_flags 0x%x scsi "
1136 				    "status %x resid %d",
1137 				    csio->ccb_h.target_lun,
1138 				    cto->ct_iid, cto->ct_flags,
1139 				    cto->ct_scsi_status, cto->ct_resid);
1140 			} else {
1141 				isp_prt(mp->isp, ISP_LOGTDEBUG1,
1142 				    "CTIO lun%d for ID%d ct_flags 0x%x",
1143 				    csio->ccb_h.target_lun,
1144 				    cto->ct_iid, cto->ct_flags);
1145 			}
1146 			ISP_TDQE(mp->isp, "last tdma_mk", *mp->iptrp, cto);
1147 			ISP_SWIZ_CTIO(mp->isp, cto, cto);
1148 		} else {
1149 			ct_entry_t     *octo = cto;
1150 
1151 			/*
1152 			 * Make sure handle fields are clean
1153 			 */
1154 			cto->ct_reserved = 0;
1155 			cto->ct_header.rqs_seqno = 0;
1156 
1157 			isp_prt(mp->isp, ISP_LOGTDEBUG1,
1158 			    "CTIO lun%d for ID%d ct_flags 0x%x",
1159 			    csio->ccb_h.target_lun, cto->ct_iid, cto->ct_flags);
1160 			ISP_TDQE(mp->isp, "tdma_mk", *mp->iptrp, cto);
1161 
1162 			/*
1163 			 * Get a new CTIO
1164 			 */
1165 			cto = (ct_entry_t *)
1166 			    ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp);
1167 			*mp->iptrp =
1168 			    ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN(isp));
1169 			if (*mp->iptrp == mp->optr) {
1170 				printf("%s: Queue Overflow in tdma_mk\n",
1171 				    mp->isp->isp_name);
1172 				mp->error = MUSHERR_NOQENTRIES;
1173 				return;
1174 			}
1175 			/*
1176 			 * Fill in the new CTIO with info from the old one.
1177 			 */
1178 			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1179 			cto->ct_header.rqs_entry_count = 1;
1180 			cto->ct_header.rqs_flags = 0;
1181 			cto->ct_lun = octo->ct_lun;
1182 			cto->ct_iid = octo->ct_iid;
1183 			cto->ct_reserved2 = octo->ct_reserved2;
1184 			cto->ct_tgt = octo->ct_tgt;
1185 			cto->ct_flags = octo->ct_flags;
1186 			cto->ct_status = 0;
1187 			cto->ct_scsi_status = 0;
1188 			cto->ct_tag_val = octo->ct_tag_val;
1189 			cto->ct_tag_type = octo->ct_tag_type;
1190 			cto->ct_xfrlen = 0;
1191 			cto->ct_resid = 0;
1192 			cto->ct_timeout = octo->ct_timeout;
1193 			cto->ct_seg_count = 0;
1194 			MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1195 			/*
1196 			 * Now swizzle the old one for the consumption of the
1197 			 * chip.
1198 			 */
1199 			ISP_SWIZ_CTIO(mp->isp, octo, octo);
1200 		}
1201 	}
1202 }
1203 
1204 static void
1205 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1206 {
1207 	mush_t *mp;
1208 	struct ccb_scsiio *csio;
1209 	struct isp_pcisoftc *pci;
1210 	bus_dmamap_t *dp;
1211 	ct2_entry_t *cto;
1212 	u_int16_t scsi_status, send_status, send_sense;
1213 	u_int32_t handle, totxfr, datalen;
1214 	u_int8_t sense[QLTM_SENSELEN];
1215 	int nctios;
1216 
1217 	mp = (mush_t *) arg;
1218 	if (error) {
1219 		mp->error = error;
1220 		return;
1221 	}
1222 
1223 	csio = mp->cmd_token;
1224 	cto = mp->rq;
1225 
1226 	if (nseg == 0) {
1227 		if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
1228 			printf("%s: dma2_tgt_fc, a status CTIO2 without MODE1 "
1229 			    "set (0x%x)\n", mp->isp->isp_name, cto->ct_flags);
1230 			mp->error = EINVAL;
1231 			return;
1232 		}
1233 	 	cto->ct_header.rqs_entry_count = 1;
1234 		cto->ct_header.rqs_seqno = 1;
1235 		/* ct_reserved contains the handle set by caller */
1236 		/*
1237 		 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1238 		 * flags to NO DATA and clear relative offset flags.
1239 		 * We preserve the ct_resid and the response area.
1240 		 */
1241 		cto->ct_flags |= CT2_NO_DATA;
1242 		if (cto->ct_resid > 0)
1243 			cto->ct_flags |= CT2_DATA_UNDER;
1244 		else if (cto->ct_resid < 0)
1245 			cto->ct_flags |= CT2_DATA_OVER;
1246 		cto->ct_seg_count = 0;
1247 		cto->ct_reloff = 0;
1248 		ISP_TDQE(mp->isp, "dma2_tgt_fc[no data]", *mp->iptrp, cto);
1249 		isp_prt(mp->isp, ISP_LOGTDEBUG1,
1250 		    "CTIO2 RX_ID 0x%x lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1251 		    "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
1252 		    cto->ct_iid, cto->ct_flags, cto->ct_status,
1253 		    cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1254 		ISP_SWIZ_CTIO2(isp, cto, cto);
1255 		return;
1256 	}
1257 
1258 	if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
1259 		printf("%s: dma2_tgt_fc, a data CTIO2 without MODE0 set "
1260 		    "(0x%x)\n\n", mp->isp->isp_name, cto->ct_flags);
1261 		mp->error = EINVAL;
1262 		return;
1263 	}
1264 
1265 
1266 	nctios = nseg / ISP_RQDSEG_T2;
1267 	if (nseg % ISP_RQDSEG_T2) {
1268 		nctios++;
1269 	}
1270 
1271 	/*
1272 	 * Save the handle, status, reloff, and residual. We'll reinsert the
1273 	 * handle into the last CTIO2 we're going to send, and reinsert status
1274 	 * and residual (and possibly sense data) if that's to be sent as well.
1275 	 *
1276 	 * We preserve ct_reloff and adjust it for each data CTIO2 we send past
1277 	 * the first one. This is needed so that the FCP DATA IUs being sent
1278 	 * out have the correct offset (they can arrive at the other end out
1279 	 * of order).
1280 	 */
1281 
1282 	handle = cto->ct_reserved;
1283 	cto->ct_reserved = 0;
1284 
1285 	if ((send_status = (cto->ct_flags & CT2_SENDSTATUS)) != 0) {
1286 		cto->ct_flags &= ~CT2_SENDSTATUS;
1287 
1288 		/*
1289 		 * Preserve residual, which is actually the total count.
1290 		 */
1291 		datalen = cto->ct_resid;
1292 
1293 		/*
1294 		 * Save actual SCSI status. We'll reinsert the
1295 		 * CT2_SNSLEN_VALID later if appropriate.
1296 		 */
1297 		scsi_status = cto->rsp.m0.ct_scsi_status & 0xff;
1298 		send_sense = cto->rsp.m0.ct_scsi_status & CT2_SNSLEN_VALID;
1299 
1300 		/*
1301 		 * If we're sending status and have a CHECK CONDTION and
1302 		 * have sense data,  we send one more CTIO2 with just the
1303 		 * status and sense data. The upper layers have stashed
1304 		 * the sense data in the dataseg structure for us.
1305 		 */
1306 
1307 		if ((scsi_status & 0xf) == SCSI_STATUS_CHECK_COND &&
1308 		    send_sense) {
1309 			bcopy(cto->rsp.m0.ct_dataseg, sense, QLTM_SENSELEN);
1310 			nctios++;
1311 		}
1312 	} else {
1313 		scsi_status = send_sense = datalen = 0;
1314 	}
1315 
1316 	totxfr = cto->ct_resid = 0;
1317 	cto->rsp.m0.ct_scsi_status = 0;
1318 	bzero(&cto->rsp, sizeof (cto->rsp));
1319 
1320 	pci = (struct isp_pcisoftc *)mp->isp;
1321 	dp = &pci->dmaps[isp_handle_index(handle)];
1322 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1323 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
1324 	} else {
1325 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
1326 	}
1327 
1328 	while (nctios--) {
1329 		int seg, seglim;
1330 
1331 		seglim = nseg;
1332 		if (seglim) {
1333 			if (seglim > ISP_RQDSEG_T2)
1334 				seglim = ISP_RQDSEG_T2;
1335 
1336 			for (seg = 0; seg < seglim; seg++) {
1337 				cto->rsp.m0.ct_dataseg[seg].ds_base =
1338 				    dm_segs->ds_addr;
1339 				cto->rsp.m0.ct_dataseg[seg].ds_count =
1340 				    dm_segs->ds_len;
1341 				cto->rsp.m0.ct_xfrlen += dm_segs->ds_len;
1342 				totxfr += dm_segs->ds_len;
1343 				dm_segs++;
1344 			}
1345 			cto->ct_seg_count = seg;
1346 		} else {
1347 			/*
1348 			 * This case should only happen when we're sending a
1349 			 * synthesized MODE1 final status with sense data.
1350 			 */
1351 			if (send_sense == 0) {
1352 				printf("%s: dma2_tgt_fc ran out of segments, "
1353 				    "no SENSE DATA\n", mp->isp->isp_name);
1354 				mp->error = EINVAL;
1355 				return;
1356 			}
1357 		}
1358 
1359 		/*
1360 		 * At this point, the fields ct_lun, ct_iid, ct_rxid,
1361 		 * ct_timeout have been carried over unchanged from what
1362 		 * our caller had set.
1363 		 *
1364 		 * The field ct_reloff is either what the caller set, or
1365 		 * what we've added to below.
1366 		 *
1367 		 * The dataseg fields and the seg_count fields we just got
1368 		 * through setting. The data direction we've preserved all
1369 		 * along and only clear it if we're sending a MODE1 status
1370 		 * as the last CTIO.
1371 		 *
1372 		 */
1373 
1374 		if (nctios == 0) {
1375 
1376 			/*
1377 			 * We're the last in a sequence of CTIO2s, so mark this
1378 			 * CTIO2 and save the handle to the CCB such that when
1379 			 * this CTIO2 completes we can free dma resources and
1380 			 * do whatever else we need to do to finish the rest
1381 			 * of the command.
1382 			 */
1383 
1384 			cto->ct_reserved = handle;
1385 			cto->ct_header.rqs_seqno = 1;
1386 
1387 			if (send_status) {
1388 				if (send_sense) {
1389 					bcopy(sense, cto->rsp.m1.ct_resp,
1390 					    QLTM_SENSELEN);
1391 					cto->rsp.m1.ct_senselen =
1392 					    QLTM_SENSELEN;
1393 					scsi_status |= CT2_SNSLEN_VALID;
1394 					cto->rsp.m1.ct_scsi_status =
1395 					    scsi_status;
1396 					cto->ct_flags &= CT2_FLAG_MMASK;
1397 					cto->ct_flags |= CT2_FLAG_MODE1 |
1398 					    CT2_NO_DATA| CT2_SENDSTATUS;
1399 				} else {
1400 					cto->rsp.m0.ct_scsi_status =
1401 					    scsi_status;
1402 					cto->ct_flags |= CT2_SENDSTATUS;
1403 				}
1404 				/*
1405 				 * Get 'real' residual and set flags based
1406 				 * on it.
1407 				 */
1408 				cto->ct_resid = datalen - totxfr;
1409 				if (cto->ct_resid > 0)
1410 					cto->ct_flags |= CT2_DATA_UNDER;
1411 				else if (cto->ct_resid < 0)
1412 					cto->ct_flags |= CT2_DATA_OVER;
1413 			}
1414 			ISP_TDQE(mp->isp, "last dma2_tgt_fc", *mp->iptrp, cto);
1415 			isp_prt(mp->isp, ISP_LOGTDEBUG1,
1416 			    "CTIO2 RX_ID 0x%x lun %d->iid%d flgs 0x%x sts 0x%x"
1417 			    " ssts 0x%x res %d", cto->ct_rxid,
1418 			    csio->ccb_h.target_lun, (int) cto->ct_iid,
1419 			    cto->ct_flags, cto->ct_status,
1420 			    cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1421 			ISP_SWIZ_CTIO2(isp, cto, cto);
1422 		} else {
1423 			ct2_entry_t *octo = cto;
1424 
1425 			/*
1426 			 * Make sure handle fields are clean
1427 			 */
1428 			cto->ct_reserved = 0;
1429 			cto->ct_header.rqs_seqno = 0;
1430 
1431 			ISP_TDQE(mp->isp, "dma2_tgt_fc", *mp->iptrp, cto);
1432 			isp_prt(mp->isp, ISP_LOGTDEBUG1,
1433 			    "CTIO2 RX_ID 0x%x lun %d->iid%d flgs 0x%x",
1434 			    cto->ct_rxid, csio->ccb_h.target_lun,
1435 			    (int) cto->ct_iid, cto->ct_flags);
1436 			/*
1437 			 * Get a new CTIO2
1438 			 */
1439 			cto = (ct2_entry_t *)
1440 			    ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp);
1441 			*mp->iptrp =
1442 			    ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN(isp));
1443 			if (*mp->iptrp == mp->optr) {
1444 				printf("%s: Queue Overflow in dma2_tgt_fc\n",
1445 				    mp->isp->isp_name);
1446 				mp->error = MUSHERR_NOQENTRIES;
1447 				return;
1448 			}
1449 
1450 			/*
1451 			 * Fill in the new CTIO2 with info from the old one.
1452 			 */
1453 			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1454 			cto->ct_header.rqs_entry_count = 1;
1455 			cto->ct_header.rqs_flags = 0;
1456 			/* ct_header.rqs_seqno && ct_reserved done later */
1457 			cto->ct_lun = octo->ct_lun;
1458 			cto->ct_iid = octo->ct_iid;
1459 			cto->ct_rxid = octo->ct_rxid;
1460 			cto->ct_flags = octo->ct_flags;
1461 			cto->ct_status = 0;
1462 			cto->ct_resid = 0;
1463 			cto->ct_timeout = octo->ct_timeout;
1464 			cto->ct_seg_count = 0;
1465 			/*
1466 			 * Adjust the new relative offset by the amount which
1467 			 * is recorded in the data segment of the old CTIO2 we
1468 			 * just finished filling out.
1469 			 */
1470 			cto->ct_reloff += octo->rsp.m0.ct_xfrlen;
1471 			bzero(&cto->rsp, sizeof (cto->rsp));
1472 			ISP_SWIZ_CTIO2(isp, cto, cto);
1473 		}
1474 	}
1475 }
1476 #endif
1477 
1478 static void dma2 __P((void *, bus_dma_segment_t *, int, int));
1479 
1480 static void
1481 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1482 {
1483 	mush_t *mp;
1484 	struct ccb_scsiio *csio;
1485 	struct isp_pcisoftc *pci;
1486 	bus_dmamap_t *dp;
1487 	bus_dma_segment_t *eseg;
1488 	ispreq_t *rq;
1489 	ispcontreq_t *crq;
1490 	int seglim, datalen;
1491 
1492 	mp = (mush_t *) arg;
1493 	if (error) {
1494 		mp->error = error;
1495 		return;
1496 	}
1497 
1498 	if (nseg < 1) {
1499 		printf("%s: bad segment count (%d)\n", mp->isp->isp_name, nseg);
1500 		mp->error = EFAULT;
1501 		return;
1502 	}
1503 	csio = mp->cmd_token;
1504 	rq = mp->rq;
1505 	pci = (struct isp_pcisoftc *)mp->isp;
1506 	dp = &pci->dmaps[isp_handle_index(rq->req_handle)];
1507 
1508 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1509 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
1510 	} else {
1511 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
1512 	}
1513 
1514 	datalen = XS_XFRLEN(csio);
1515 
1516 	/*
1517 	 * We're passed an initial partially filled in entry that
1518 	 * has most fields filled in except for data transfer
1519 	 * related values.
1520 	 *
1521 	 * Our job is to fill in the initial request queue entry and
1522 	 * then to start allocating and filling in continuation entries
1523 	 * until we've covered the entire transfer.
1524 	 */
1525 
1526 	if (IS_FC(mp->isp)) {
1527 		seglim = ISP_RQDSEG_T2;
1528 		((ispreqt2_t *)rq)->req_totalcnt = datalen;
1529 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1530 			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
1531 		} else {
1532 			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
1533 		}
1534 	} else {
1535 		if (csio->cdb_len > 12) {
1536 			seglim = 0;
1537 		} else {
1538 			seglim = ISP_RQDSEG;
1539 		}
1540 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1541 			rq->req_flags |= REQFLAG_DATA_IN;
1542 		} else {
1543 			rq->req_flags |= REQFLAG_DATA_OUT;
1544 		}
1545 	}
1546 
1547 	eseg = dm_segs + nseg;
1548 
1549 	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
1550 		if (IS_FC(mp->isp)) {
1551 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1552 			rq2->req_dataseg[rq2->req_seg_count].ds_base =
1553 			    dm_segs->ds_addr;
1554 			rq2->req_dataseg[rq2->req_seg_count].ds_count =
1555 			    dm_segs->ds_len;
1556 		} else {
1557 			rq->req_dataseg[rq->req_seg_count].ds_base =
1558 				dm_segs->ds_addr;
1559 			rq->req_dataseg[rq->req_seg_count].ds_count =
1560 				dm_segs->ds_len;
1561 		}
1562 		datalen -= dm_segs->ds_len;
1563 #if	0
1564 		if (IS_FC(mp->isp)) {
1565 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1566 			printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
1567 			    mp->isp->isp_name, rq->req_seg_count,
1568 			    rq2->req_dataseg[rq2->req_seg_count].ds_count,
1569 			    rq2->req_dataseg[rq2->req_seg_count].ds_base);
1570 		} else {
1571 			printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
1572 			    mp->isp->isp_name, rq->req_seg_count,
1573 			    rq->req_dataseg[rq->req_seg_count].ds_count,
1574 			    rq->req_dataseg[rq->req_seg_count].ds_base);
1575 		}
1576 #endif
1577 		rq->req_seg_count++;
1578 		dm_segs++;
1579 	}
1580 
1581 	while (datalen > 0 && dm_segs != eseg) {
1582 		crq = (ispcontreq_t *)
1583 		    ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp);
1584 		*mp->iptrp = ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN(isp));
1585 		if (*mp->iptrp == mp->optr) {
1586 #if	0
1587 			printf("%s: Request Queue Overflow++\n",
1588 			    mp->isp->isp_name);
1589 #endif
1590 			mp->error = MUSHERR_NOQENTRIES;
1591 			return;
1592 		}
1593 		rq->req_header.rqs_entry_count++;
1594 		bzero((void *)crq, sizeof (*crq));
1595 		crq->req_header.rqs_entry_count = 1;
1596 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1597 
1598 		seglim = 0;
1599 		while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
1600 			crq->req_dataseg[seglim].ds_base =
1601 			    dm_segs->ds_addr;
1602 			crq->req_dataseg[seglim].ds_count =
1603 			    dm_segs->ds_len;
1604 #if	0
1605 			printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n",
1606 			    mp->isp->isp_name, rq->req_header.rqs_entry_count-1,
1607 			    seglim, crq->req_dataseg[seglim].ds_count,
1608 			    crq->req_dataseg[seglim].ds_base);
1609 #endif
1610 			rq->req_seg_count++;
1611 			dm_segs++;
1612 			seglim++;
1613 			datalen -= dm_segs->ds_len;
1614 		}
1615 	}
1616 }
1617 
1618 static int
1619 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq,
1620 	u_int16_t *iptrp, u_int16_t optr)
1621 {
1622 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1623 	bus_dmamap_t *dp = NULL;
1624 	mush_t mush, *mp;
1625 	void (*eptr) __P((void *, bus_dma_segment_t *, int, int));
1626 
1627 #ifdef	ISP_TARGET_MODE
1628 	if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1629 		if (IS_FC(isp)) {
1630 			eptr = tdma_mkfc;
1631 		} else {
1632 			eptr = tdma_mk;
1633 		}
1634 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1635 		    (csio->dxfer_len == 0)) {
1636 			rq->req_seg_count = 1;
1637 			mp = &mush;
1638 			mp->isp = isp;
1639 			mp->cmd_token = csio;
1640 			mp->rq = rq;
1641 			mp->iptrp = iptrp;
1642 			mp->optr = optr;
1643 			mp->error = 0;
1644 			(*eptr)(mp, NULL, 0, 0);
1645 			goto exit;
1646 		}
1647 	} else
1648 #endif
1649 	eptr = dma2;
1650 
1651 	/*
1652 	 * NB: if we need to do request queue entry swizzling,
1653 	 * NB: this is where it would need to be done for cmds
1654 	 * NB: that move no data. For commands that move data,
1655 	 * NB: swizzling would take place in those functions.
1656 	 */
1657 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1658 	    (csio->dxfer_len == 0)) {
1659 		rq->req_seg_count = 1;
1660 		return (CMD_QUEUED);
1661 	}
1662 
1663 	/*
1664 	 * Do a virtual grapevine step to collect info for
1665 	 * the callback dma allocation that we have to use...
1666 	 */
1667 	mp = &mush;
1668 	mp->isp = isp;
1669 	mp->cmd_token = csio;
1670 	mp->rq = rq;
1671 	mp->iptrp = iptrp;
1672 	mp->optr = optr;
1673 	mp->error = 0;
1674 
1675 	if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1676 		if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1677 			int error, s;
1678 			dp = &pci->dmaps[isp_handle_index(rq->req_handle)];
1679 			s = splsoftvm();
1680 			error = bus_dmamap_load(pci->parent_dmat, *dp,
1681 			    csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
1682 			if (error == EINPROGRESS) {
1683 				bus_dmamap_unload(pci->parent_dmat, *dp);
1684 				mp->error = EINVAL;
1685 				printf("%s: deferred dma allocation not "
1686 				    "supported\n", isp->isp_name);
1687 			} else if (error && mp->error == 0) {
1688 #ifdef	DIAGNOSTIC
1689 				printf("%s: error %d in dma mapping code\n",
1690 				    isp->isp_name, error);
1691 #endif
1692 				mp->error = error;
1693 			}
1694 			splx(s);
1695 		} else {
1696 			/* Pointer to physical buffer */
1697 			struct bus_dma_segment seg;
1698 			seg.ds_addr = (bus_addr_t)csio->data_ptr;
1699 			seg.ds_len = csio->dxfer_len;
1700 			(*eptr)(mp, &seg, 1, 0);
1701 		}
1702 	} else {
1703 		struct bus_dma_segment *segs;
1704 
1705 		if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1706 			printf("%s: Physical segment pointers unsupported",
1707 				isp->isp_name);
1708 			mp->error = EINVAL;
1709 		} else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1710 			printf("%s: Virtual segment addresses unsupported",
1711 				isp->isp_name);
1712 			mp->error = EINVAL;
1713 		} else {
1714 			/* Just use the segments provided */
1715 			segs = (struct bus_dma_segment *) csio->data_ptr;
1716 			(*eptr)(mp, segs, csio->sglist_cnt, 0);
1717 		}
1718 	}
1719 #ifdef	ISP_TARGET_MODE
1720 exit:
1721 #endif
1722 	if (mp->error) {
1723 		int retval = CMD_COMPLETE;
1724 		if (mp->error == MUSHERR_NOQENTRIES) {
1725 			retval = CMD_EAGAIN;
1726 		} else if (mp->error == EFBIG) {
1727 			XS_SETERR(csio, CAM_REQ_TOO_BIG);
1728 		} else if (mp->error == EINVAL) {
1729 			XS_SETERR(csio, CAM_REQ_INVALID);
1730 		} else {
1731 			XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
1732 		}
1733 		return (retval);
1734 	} else {
1735 		/*
1736 		 * Check to see if we weren't cancelled while sleeping on
1737 		 * getting DMA resources...
1738 		 */
1739 		if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1740 			if (dp) {
1741 				bus_dmamap_unload(pci->parent_dmat, *dp);
1742 			}
1743 			return (CMD_COMPLETE);
1744 		}
1745 		return (CMD_QUEUED);
1746 	}
1747 }
1748 
1749 static void
1750 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int32_t handle)
1751 {
1752 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1753 	bus_dmamap_t *dp = &pci->dmaps[isp_handle_index(handle)];
1754 	if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1755 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD);
1756 	} else {
1757 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE);
1758 	}
1759 	bus_dmamap_unload(pci->parent_dmat, *dp);
1760 }
1761 
1762 
1763 static void
1764 isp_pci_reset1(struct ispsoftc *isp)
1765 {
1766 	/* Make sure the BIOS is disabled */
1767 	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1768 	/* and enable interrupts */
1769 	ENABLE_INTS(isp);
1770 }
1771 
1772 static void
1773 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg)
1774 {
1775 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1776 	if (msg)
1777 		printf("%s: %s\n", isp->isp_name, msg);
1778 	if (IS_SCSI(isp))
1779 		printf("    biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
1780 	else
1781 		printf("    biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
1782 	printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
1783 	    ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
1784 	printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
1785 
1786 
1787 	if (IS_SCSI(isp)) {
1788 		ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
1789 		printf("    cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
1790 			ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
1791 			ISP_READ(isp, CDMA_FIFO_STS));
1792 		printf("    ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
1793 			ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
1794 			ISP_READ(isp, DDMA_FIFO_STS));
1795 		printf("    sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
1796 			ISP_READ(isp, SXP_INTERRUPT),
1797 			ISP_READ(isp, SXP_GROSS_ERR),
1798 			ISP_READ(isp, SXP_PINS_CTRL));
1799 		ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
1800 	}
1801 	printf("    mbox regs: %x %x %x %x %x\n",
1802 	    ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
1803 	    ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
1804 	    ISP_READ(isp, OUTMAILBOX4));
1805 	printf("    PCI Status Command/Status=%x\n",
1806 	    pci_read_config(pci->pci_dev, PCIR_COMMAND, 1));
1807 }
1808