xref: /freebsd/sys/dev/isp/isp_pci.c (revision afcf05e46aacbe58f9fd8338ab6ecf9b7d06d004)
1 /* $FreeBSD$ */
2 /*
3  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
4  * FreeBSD Version.
5  *
6  * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice immediately at the beginning of the file, without modification,
13  *    this list of conditions, and the following disclaimer.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/bus.h>
35 
36 #include <pci/pcireg.h>
37 #include <pci/pcivar.h>
38 
39 #include <machine/bus_memio.h>
40 #include <machine/bus_pio.h>
41 #include <machine/bus.h>
42 #include <machine/resource.h>
43 #include <sys/rman.h>
44 #include <sys/malloc.h>
45 
46 #include <dev/isp/isp_freebsd.h>
47 
48 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
49 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
50 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
51 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
52 static int isp_pci_mbxdma __P((struct ispsoftc *));
53 static int isp_pci_dmasetup __P((struct ispsoftc *, XS_T *,
54 	ispreq_t *, u_int16_t *, u_int16_t));
55 static void
56 isp_pci_dmateardown __P((struct ispsoftc *, XS_T *, u_int32_t));
57 
58 static void isp_pci_reset1 __P((struct ispsoftc *));
59 static void isp_pci_dumpregs __P((struct ispsoftc *, const char *));
60 
61 #ifndef	ISP_CODE_ORG
62 #define	ISP_CODE_ORG		0x1000
63 #endif
64 
65 static struct ispmdvec mdvec = {
66 	isp_pci_rd_reg,
67 	isp_pci_wr_reg,
68 	isp_pci_mbxdma,
69 	isp_pci_dmasetup,
70 	isp_pci_dmateardown,
71 	NULL,
72 	isp_pci_reset1,
73 	isp_pci_dumpregs,
74 	NULL,
75 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
76 };
77 
78 static struct ispmdvec mdvec_1080 = {
79 	isp_pci_rd_reg_1080,
80 	isp_pci_wr_reg_1080,
81 	isp_pci_mbxdma,
82 	isp_pci_dmasetup,
83 	isp_pci_dmateardown,
84 	NULL,
85 	isp_pci_reset1,
86 	isp_pci_dumpregs,
87 	NULL,
88 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
89 };
90 
91 static struct ispmdvec mdvec_12160 = {
92 	isp_pci_rd_reg_1080,
93 	isp_pci_wr_reg_1080,
94 	isp_pci_mbxdma,
95 	isp_pci_dmasetup,
96 	isp_pci_dmateardown,
97 	NULL,
98 	isp_pci_reset1,
99 	isp_pci_dumpregs,
100 	NULL,
101 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
102 };
103 
104 static struct ispmdvec mdvec_2100 = {
105 	isp_pci_rd_reg,
106 	isp_pci_wr_reg,
107 	isp_pci_mbxdma,
108 	isp_pci_dmasetup,
109 	isp_pci_dmateardown,
110 	NULL,
111 	isp_pci_reset1,
112 	isp_pci_dumpregs
113 };
114 
115 static struct ispmdvec mdvec_2200 = {
116 	isp_pci_rd_reg,
117 	isp_pci_wr_reg,
118 	isp_pci_mbxdma,
119 	isp_pci_dmasetup,
120 	isp_pci_dmateardown,
121 	NULL,
122 	isp_pci_reset1,
123 	isp_pci_dumpregs
124 };
125 
126 #ifndef	PCIM_CMD_INVEN
127 #define	PCIM_CMD_INVEN			0x10
128 #endif
129 #ifndef	PCIM_CMD_BUSMASTEREN
130 #define	PCIM_CMD_BUSMASTEREN		0x0004
131 #endif
132 #ifndef	PCIM_CMD_PERRESPEN
133 #define	PCIM_CMD_PERRESPEN		0x0040
134 #endif
135 #ifndef	PCIM_CMD_SEREN
136 #define	PCIM_CMD_SEREN			0x0100
137 #endif
138 
139 #ifndef	PCIR_COMMAND
140 #define	PCIR_COMMAND			0x04
141 #endif
142 
143 #ifndef	PCIR_CACHELNSZ
144 #define	PCIR_CACHELNSZ			0x0c
145 #endif
146 
147 #ifndef	PCIR_LATTIMER
148 #define	PCIR_LATTIMER			0x0d
149 #endif
150 
151 #ifndef	PCIR_ROMADDR
152 #define	PCIR_ROMADDR			0x30
153 #endif
154 
155 #ifndef	PCI_VENDOR_QLOGIC
156 #define	PCI_VENDOR_QLOGIC		0x1077
157 #endif
158 
159 #ifndef	PCI_PRODUCT_QLOGIC_ISP1020
160 #define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
161 #endif
162 
163 #ifndef	PCI_PRODUCT_QLOGIC_ISP1080
164 #define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
165 #endif
166 
167 #ifndef	PCI_PRODUCT_QLOGIC_ISP12160
168 #define	PCI_PRODUCT_QLOGIC_ISP12160	0x1216
169 #endif
170 
171 #ifndef	PCI_PRODUCT_QLOGIC_ISP1240
172 #define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
173 #endif
174 
175 #ifndef	PCI_PRODUCT_QLOGIC_ISP1280
176 #define	PCI_PRODUCT_QLOGIC_ISP1280	0x1280
177 #endif
178 
179 #ifndef	PCI_PRODUCT_QLOGIC_ISP2100
180 #define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
181 #endif
182 
183 #ifndef	PCI_PRODUCT_QLOGIC_ISP2200
184 #define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
185 #endif
186 
187 #define	PCI_QLOGIC_ISP1020	\
188 	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
189 
190 #define	PCI_QLOGIC_ISP1080	\
191 	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
192 
193 #define	PCI_QLOGIC_ISP12160	\
194 	((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
195 
196 #define	PCI_QLOGIC_ISP1240	\
197 	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
198 
199 #define	PCI_QLOGIC_ISP1280	\
200 	((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
201 
202 #define	PCI_QLOGIC_ISP2100	\
203 	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
204 
205 #define	PCI_QLOGIC_ISP2200	\
206 	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
207 
208 /*
209  * Odd case for some AMI raid cards... We need to *not* attach to this.
210  */
211 #define	AMI_RAID_SUBVENDOR_ID	0x101e
212 
213 #define	IO_MAP_REG	0x10
214 #define	MEM_MAP_REG	0x14
215 
216 #define	PCI_DFLT_LTNCY	0x40
217 #define	PCI_DFLT_LNSZ	0x10
218 
219 static int isp_pci_probe (device_t);
220 static int isp_pci_attach (device_t);
221 
222 struct isp_pcisoftc {
223 	struct ispsoftc			pci_isp;
224 	device_t			pci_dev;
225 	struct resource *		pci_reg;
226 	bus_space_tag_t			pci_st;
227 	bus_space_handle_t		pci_sh;
228 	void *				ih;
229 	int16_t				pci_poff[_NREG_BLKS];
230 	bus_dma_tag_t			parent_dmat;
231 	bus_dma_tag_t			cntrol_dmat;
232 	bus_dmamap_t			cntrol_dmap;
233 	bus_dmamap_t			*dmaps;
234 };
235 ispfwfunc *isp_get_firmware_p = NULL;
236 
237 static device_method_t isp_pci_methods[] = {
238 	/* Device interface */
239 	DEVMETHOD(device_probe,		isp_pci_probe),
240 	DEVMETHOD(device_attach,	isp_pci_attach),
241 	{ 0, 0 }
242 };
243 static void isp_pci_intr __P((void *));
244 
245 static driver_t isp_pci_driver = {
246 	"isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
247 };
248 static devclass_t isp_devclass;
249 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
250 MODULE_VERSION(isp, 1);
251 
252 static int
253 isp_pci_probe(device_t dev)
254 {
255         switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
256 	case PCI_QLOGIC_ISP1020:
257 		device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
258 		break;
259 	case PCI_QLOGIC_ISP1080:
260 		device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
261 		break;
262 	case PCI_QLOGIC_ISP1240:
263 		device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
264 		break;
265 	case PCI_QLOGIC_ISP1280:
266 		device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
267 		break;
268 	case PCI_QLOGIC_ISP12160:
269 		if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
270 			return (ENXIO);
271 		}
272 		device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
273 		break;
274 	case PCI_QLOGIC_ISP2100:
275 		device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
276 		break;
277 	case PCI_QLOGIC_ISP2200:
278 		device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
279 		break;
280 	default:
281 		return (ENXIO);
282 	}
283 	if (device_get_unit(dev) == 0 && bootverbose) {
284 		printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
285 		    "Core Version %d.%d\n",
286 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
287 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
288 	}
289 	/*
290 	 * XXXX: Here is where we might load the f/w module
291 	 * XXXX: (or increase a reference count to it).
292 	 */
293 	return (0);
294 }
295 
296 static int
297 isp_pci_attach(device_t dev)
298 {
299 	struct resource *regs, *irq;
300 	int tval, rtp, rgd, iqd, m1, m2, isp_debug, role;
301 	u_int32_t data, cmd, linesz, psize, basetype;
302 	struct isp_pcisoftc *pcs;
303 	struct ispsoftc *isp = NULL;
304 	struct ispmdvec *mdvp;
305 	bus_size_t lim;
306 	char *sptr;
307 #ifdef	ISP_SMPLOCK
308 	int locksetup = 0;
309 #endif
310 
311 	/*
312 	 * Figure out if we're supposed to skip this one.
313 	 * If we are, we actually go to ISP_ROLE_NONE.
314 	 */
315 
316 	tval = 0;
317 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
318 	    "disable", &tval) == 0 && tval) {
319 		device_printf(dev, "device is disabled\n");
320 		/* but return 0 so the !$)$)*!$*) unit isn't reused */
321 		return (0);
322 	}
323 
324 	role = 0;
325 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
326 	    "role", &role) == 0 &&
327 	    ((role & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) == 0)) {
328 		device_printf(dev, "setting role to 0x%x\n", role);
329 	} else {
330 #ifdef	ISP_TARGET_MODE
331 		role = ISP_ROLE_INITIATOR|ISP_ROLE_TARGET;
332 #else
333 		role = ISP_DEFAULT_ROLES;
334 #endif
335 	}
336 
337 	pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT | M_ZERO);
338 	if (pcs == NULL) {
339 		device_printf(dev, "cannot allocate softc\n");
340 		return (ENOMEM);
341 	}
342 
343 	/*
344 	 * Figure out which we should try first - memory mapping or i/o mapping?
345 	 */
346 #ifdef	__alpha__
347 	m1 = PCIM_CMD_MEMEN;
348 	m2 = PCIM_CMD_PORTEN;
349 #else
350 	m1 = PCIM_CMD_PORTEN;
351 	m2 = PCIM_CMD_MEMEN;
352 #endif
353 
354 	tval = 0;
355         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
356             "prefer_iomap", &tval) == 0 && tval != 0) {
357 		m1 = PCIM_CMD_PORTEN;
358 		m2 = PCIM_CMD_MEMEN;
359 	}
360 	tval = 0;
361         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
362             "prefer_memmap", &tval) == 0 && tval != 0) {
363 		m1 = PCIM_CMD_MEMEN;
364 		m2 = PCIM_CMD_PORTEN;
365 	}
366 
367 	linesz = PCI_DFLT_LNSZ;
368 	irq = regs = NULL;
369 	rgd = rtp = iqd = 0;
370 
371 	cmd = pci_read_config(dev, PCIR_COMMAND, 1);
372 	if (cmd & m1) {
373 		rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
374 		rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
375 		regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
376 	}
377 	if (regs == NULL && (cmd & m2)) {
378 		rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
379 		rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
380 		regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
381 	}
382 	if (regs == NULL) {
383 		device_printf(dev, "unable to map any ports\n");
384 		goto bad;
385 	}
386 	if (bootverbose)
387 		device_printf(dev, "using %s space register mapping\n",
388 		    (rgd == IO_MAP_REG)? "I/O" : "Memory");
389 	pcs->pci_dev = dev;
390 	pcs->pci_reg = regs;
391 	pcs->pci_st = rman_get_bustag(regs);
392 	pcs->pci_sh = rman_get_bushandle(regs);
393 
394 	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
395 	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
396 	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
397 	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
398 	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
399 	mdvp = &mdvec;
400 	basetype = ISP_HA_SCSI_UNKNOWN;
401 	psize = sizeof (sdparam);
402 	lim = BUS_SPACE_MAXSIZE_32BIT;
403 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
404 		mdvp = &mdvec;
405 		basetype = ISP_HA_SCSI_UNKNOWN;
406 		psize = sizeof (sdparam);
407 		lim = BUS_SPACE_MAXSIZE_24BIT;
408 	}
409 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
410 		mdvp = &mdvec_1080;
411 		basetype = ISP_HA_SCSI_1080;
412 		psize = sizeof (sdparam);
413 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
414 		    ISP1080_DMA_REGS_OFF;
415 	}
416 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
417 		mdvp = &mdvec_1080;
418 		basetype = ISP_HA_SCSI_1240;
419 		psize = 2 * sizeof (sdparam);
420 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
421 		    ISP1080_DMA_REGS_OFF;
422 	}
423 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
424 		mdvp = &mdvec_1080;
425 		basetype = ISP_HA_SCSI_1280;
426 		psize = 2 * sizeof (sdparam);
427 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
428 		    ISP1080_DMA_REGS_OFF;
429 	}
430 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
431 		mdvp = &mdvec_12160;
432 		basetype = ISP_HA_SCSI_12160;
433 		psize = 2 * sizeof (sdparam);
434 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
435 		    ISP1080_DMA_REGS_OFF;
436 	}
437 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
438 		mdvp = &mdvec_2100;
439 		basetype = ISP_HA_FC_2100;
440 		psize = sizeof (fcparam);
441 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
442 		    PCI_MBOX_REGS2100_OFF;
443 		if (pci_get_revid(dev) < 3) {
444 			/*
445 			 * XXX: Need to get the actual revision
446 			 * XXX: number of the 2100 FB. At any rate,
447 			 * XXX: lower cache line size for early revision
448 			 * XXX; boards.
449 			 */
450 			linesz = 1;
451 		}
452 	}
453 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
454 		mdvp = &mdvec_2200;
455 		basetype = ISP_HA_FC_2200;
456 		psize = sizeof (fcparam);
457 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
458 		    PCI_MBOX_REGS2100_OFF;
459 	}
460 	isp = &pcs->pci_isp;
461 	isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO);
462 	if (isp->isp_param == NULL) {
463 		device_printf(dev, "cannot allocate parameter data\n");
464 		goto bad;
465 	}
466 	isp->isp_mdvec = mdvp;
467 	isp->isp_type = basetype;
468 	isp->isp_revision = pci_get_revid(dev);
469 	isp->isp_role = role;
470 	isp->isp_dev = dev;
471 
472 	/*
473 	 * Try and find firmware for this device.
474 	 */
475 
476 	if (isp_get_firmware_p) {
477 		int device = (int) pci_get_device(dev);
478 #ifdef	ISP_TARGET_MODE
479 		(*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
480 #else
481 		(*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
482 #endif
483 	}
484 
485 	/*
486 	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
487 	 * are set.
488 	 */
489 	cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
490 		PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
491 	pci_write_config(dev, PCIR_COMMAND, cmd, 1);
492 
493 	/*
494 	 * Make sure the Cache Line Size register is set sensibly.
495 	 */
496 	data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
497 	if (data != linesz) {
498 		data = PCI_DFLT_LNSZ;
499 		isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
500 		pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
501 	}
502 
503 	/*
504 	 * Make sure the Latency Timer is sane.
505 	 */
506 	data = pci_read_config(dev, PCIR_LATTIMER, 1);
507 	if (data < PCI_DFLT_LTNCY) {
508 		data = PCI_DFLT_LTNCY;
509 		isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
510 		pci_write_config(dev, PCIR_LATTIMER, data, 1);
511 	}
512 
513 	/*
514 	 * Make sure we've disabled the ROM.
515 	 */
516 	data = pci_read_config(dev, PCIR_ROMADDR, 4);
517 	data &= ~1;
518 	pci_write_config(dev, PCIR_ROMADDR, data, 4);
519 
520 
521 	if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
522 	    BUS_SPACE_MAXADDR, NULL, NULL, lim + 1,
523 	    255, lim, 0, &pcs->parent_dmat) != 0) {
524 		device_printf(dev, "could not create master dma tag\n");
525 		free(isp->isp_param, M_DEVBUF);
526 		free(pcs, M_DEVBUF);
527 		return (ENXIO);
528 	}
529 
530 	iqd = 0;
531 	irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0,
532 	    1, RF_ACTIVE | RF_SHAREABLE);
533 	if (irq == NULL) {
534 		device_printf(dev, "could not allocate interrupt\n");
535 		goto bad;
536 	}
537 
538 	tval = 0;
539         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
540             "fwload_disable", &tval) == 0 && tval != 0) {
541 		isp->isp_confopts |= ISP_CFG_NORELOAD;
542 	}
543 	tval = 0;
544         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
545             "ignore_nvram", &tval) == 0 && tval != 0) {
546 		isp->isp_confopts |= ISP_CFG_NONVRAM;
547 	}
548 	tval = 0;
549         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
550             "fullduplex", &tval) == 0 && tval != 0) {
551 		isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
552 	}
553 
554 	sptr = 0;
555         if (resource_string_value(device_get_name(dev), device_get_unit(dev),
556             "topology", &sptr) == 0 && sptr != 0) {
557 		if (strcmp(sptr, "lport") == 0) {
558 			isp->isp_confopts |= ISP_CFG_LPORT;
559 		} else if (strcmp(sptr, "nport") == 0) {
560 			isp->isp_confopts |= ISP_CFG_NPORT;
561 		} else if (strcmp(sptr, "lport-only") == 0) {
562 			isp->isp_confopts |= ISP_CFG_LPORT_ONLY;
563 		} else if (strcmp(sptr, "nport-only") == 0) {
564 			isp->isp_confopts |= ISP_CFG_NPORT_ONLY;
565 		}
566 	}
567 
568 	/*
569 	 * Because the resource_*_value functions can neither return
570 	 * 64 bit integer values, nor can they be directly coerced
571 	 * to interpret the right hand side of the assignment as
572 	 * you want them to interpret it, we have to force WWN
573 	 * hint replacement to specify WWN strings with a leading
574 	 * 'w' (e..g w50000000aaaa0001). Sigh.
575 	 */
576 	sptr = 0;
577 	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
578             "portwwn", &sptr);
579 	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
580 		char *eptr = 0;
581 		isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16);
582 		if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) {
583 			device_printf(dev, "mangled portwwn hint '%s'\n", sptr);
584 			isp->isp_osinfo.default_port_wwn = 0;
585 		} else {
586 			isp->isp_confopts |= ISP_CFG_OWNWWN;
587 		}
588 	}
589 	if (isp->isp_osinfo.default_port_wwn == 0) {
590 		isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
591 	}
592 
593 	sptr = 0;
594 	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
595             "nodewwn", &sptr);
596 	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
597 		char *eptr = 0;
598 		isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16);
599 		if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) {
600 			device_printf(dev, "mangled nodewwn hint '%s'\n", sptr);
601 			isp->isp_osinfo.default_node_wwn = 0;
602 		} else {
603 			isp->isp_confopts |= ISP_CFG_OWNWWN;
604 		}
605 	}
606 	if (isp->isp_osinfo.default_node_wwn == 0) {
607 		isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
608 	}
609 
610 	isp_debug = 0;
611         (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
612             "debug", &isp_debug);
613 
614 #ifdef	ISP_SMPLOCK
615 	/* Make sure the lock is set up. */
616 	mtx_init(&isp->isp_osinfo.lock, "isp", MTX_DEF);
617 	locksetup++;
618 
619 	if (bus_setup_intr(dev, irq, INTR_TYPE_CAM | INTR_MPSAFE | INTR_ENTROPY,
620 	    isp_pci_intr, isp, &pcs->ih)) {
621 		device_printf(dev, "could not setup interrupt\n");
622 		goto bad;
623 	}
624 #else
625 	if (bus_setup_intr(dev, irq, INTR_TYPE_CAM | INTR_ENTROPY,
626 	    isp_pci_intr, isp, &pcs->ih)) {
627 		device_printf(dev, "could not setup interrupt\n");
628 		goto bad;
629 	}
630 #endif
631 
632 	/*
633 	 * Set up logging levels.
634 	 */
635 	if (isp_debug) {
636 		isp->isp_dblev = isp_debug;
637 	} else {
638 		isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
639 	}
640 	if (bootverbose)
641 		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
642 
643 	/*
644 	 * Make sure we're in reset state.
645 	 */
646 	ISP_LOCK(isp);
647 	isp_reset(isp);
648 	if (isp->isp_state != ISP_RESETSTATE) {
649 		ISP_UNLOCK(isp);
650 		goto bad;
651 	}
652 	isp_init(isp);
653 	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) {
654 		isp_uninit(isp);
655 		ISP_UNLOCK(isp);
656 		goto bad;
657 	}
658 	isp_attach(isp);
659 	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) {
660 		isp_uninit(isp);
661 		ISP_UNLOCK(isp);
662 		goto bad;
663 	}
664 	/*
665 	 * XXXX: Here is where we might unload the f/w module
666 	 * XXXX: (or decrease the reference count to it).
667 	 */
668 	ISP_UNLOCK(isp);
669 	return (0);
670 
671 bad:
672 
673 	if (pcs && pcs->ih) {
674 		(void) bus_teardown_intr(dev, irq, pcs->ih);
675 	}
676 
677 #ifdef	ISP_SMPLOCK
678 	if (locksetup && isp) {
679 		mtx_destroy(&isp->isp_osinfo.lock);
680 	}
681 #endif
682 
683 	if (irq) {
684 		(void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
685 	}
686 
687 
688 	if (regs) {
689 		(void) bus_release_resource(dev, rtp, rgd, regs);
690 	}
691 
692 	if (pcs) {
693 		if (pcs->pci_isp.isp_param)
694 			free(pcs->pci_isp.isp_param, M_DEVBUF);
695 		free(pcs, M_DEVBUF);
696 	}
697 
698 	/*
699 	 * XXXX: Here is where we might unload the f/w module
700 	 * XXXX: (or decrease the reference count to it).
701 	 */
702 	return (ENXIO);
703 }
704 
705 static void
706 isp_pci_intr(void *arg)
707 {
708 	struct ispsoftc *isp = arg;
709 	ISP_LOCK(isp);
710 	(void) isp_intr(isp);
711 	ISP_UNLOCK(isp);
712 }
713 
714 static u_int16_t
715 isp_pci_rd_reg(isp, regoff)
716 	struct ispsoftc *isp;
717 	int regoff;
718 {
719 	u_int16_t rv;
720 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
721 	int offset, oldconf = 0;
722 
723 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
724 		/*
725 		 * We will assume that someone has paused the RISC processor.
726 		 */
727 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
728 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
729 	}
730 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
731 	offset += (regoff & 0xff);
732 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
733 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
734 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
735 	}
736 	return (rv);
737 }
738 
739 static void
740 isp_pci_wr_reg(isp, regoff, val)
741 	struct ispsoftc *isp;
742 	int regoff;
743 	u_int16_t val;
744 {
745 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
746 	int offset, oldconf = 0;
747 
748 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
749 		/*
750 		 * We will assume that someone has paused the RISC processor.
751 		 */
752 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
753 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
754 	}
755 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
756 	offset += (regoff & 0xff);
757 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
758 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
759 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
760 	}
761 }
762 
763 static u_int16_t
764 isp_pci_rd_reg_1080(isp, regoff)
765 	struct ispsoftc *isp;
766 	int regoff;
767 {
768 	u_int16_t rv, oc = 0;
769 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
770 	int offset;
771 
772 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
773 	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
774 		u_int16_t tc;
775 		/*
776 		 * We will assume that someone has paused the RISC processor.
777 		 */
778 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
779 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
780 		if (regoff & SXP_BANK1_SELECT)
781 			tc |= BIU_PCI1080_CONF1_SXP1;
782 		else
783 			tc |= BIU_PCI1080_CONF1_SXP0;
784 		isp_pci_wr_reg(isp, BIU_CONF1, tc);
785 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
786 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
787 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
788 	}
789 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
790 	offset += (regoff & 0xff);
791 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
792 	if (oc) {
793 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
794 	}
795 	return (rv);
796 }
797 
798 static void
799 isp_pci_wr_reg_1080(isp, regoff, val)
800 	struct ispsoftc *isp;
801 	int regoff;
802 	u_int16_t val;
803 {
804 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
805 	int offset, oc = 0;
806 
807 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
808 	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
809 		u_int16_t tc;
810 		/*
811 		 * We will assume that someone has paused the RISC processor.
812 		 */
813 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
814 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
815 		if (regoff & SXP_BANK1_SELECT)
816 			tc |= BIU_PCI1080_CONF1_SXP1;
817 		else
818 			tc |= BIU_PCI1080_CONF1_SXP0;
819 		isp_pci_wr_reg(isp, BIU_CONF1, tc);
820 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
821 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
822 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
823 	}
824 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
825 	offset += (regoff & 0xff);
826 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
827 	if (oc) {
828 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
829 	}
830 }
831 
832 static void isp_map_rquest __P((void *, bus_dma_segment_t *, int, int));
833 static void isp_map_result __P((void *, bus_dma_segment_t *, int, int));
834 static void isp_map_fcscrt __P((void *, bus_dma_segment_t *, int, int));
835 
836 struct imush {
837 	struct ispsoftc *isp;
838 	int error;
839 };
840 
841 static void
842 isp_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
843 {
844 	struct imush *imushp = (struct imush *) arg;
845 	if (error) {
846 		imushp->error = error;
847 	} else {
848 		imushp->isp->isp_rquest_dma = segs->ds_addr;
849 	}
850 }
851 
852 static void
853 isp_map_result(void *arg, bus_dma_segment_t *segs, int nseg, int error)
854 {
855 	struct imush *imushp = (struct imush *) arg;
856 	if (error) {
857 		imushp->error = error;
858 	} else {
859 		imushp->isp->isp_result_dma = segs->ds_addr;
860 	}
861 }
862 
863 static void
864 isp_map_fcscrt(void *arg, bus_dma_segment_t *segs, int nseg, int error)
865 {
866 	struct imush *imushp = (struct imush *) arg;
867 	if (error) {
868 		imushp->error = error;
869 	} else {
870 		fcparam *fcp = imushp->isp->isp_param;
871 		fcp->isp_scdma = segs->ds_addr;
872 	}
873 }
874 
875 static int
876 isp_pci_mbxdma(struct ispsoftc *isp)
877 {
878 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
879 	caddr_t base;
880 	u_int32_t len;
881 	int i, error;
882 	bus_size_t lim;
883 	struct imush im;
884 
885 
886 	/*
887 	 * Already been here? If so, leave...
888 	 */
889 	if (isp->isp_rquest) {
890 		return (0);
891 	}
892 
893 	len = sizeof (XS_T **) * isp->isp_maxcmds;
894 	isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
895 	if (isp->isp_xflist == NULL) {
896 		isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
897 		return (1);
898 	}
899 	len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
900 	pci->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF,  M_WAITOK);
901 	if (pci->dmaps == NULL) {
902 		isp_prt(isp, ISP_LOGERR, "can't alloc dma maps");
903 		free(isp->isp_xflist, M_DEVBUF);
904 		return (1);
905 	}
906 
907 	if (IS_FC(isp) || IS_ULTRA2(isp))
908 		lim = BUS_SPACE_MAXADDR + 1;
909 	else
910 		lim = BUS_SPACE_MAXADDR_24BIT + 1;
911 
912 	/*
913 	 * Allocate and map the request, result queues, plus FC scratch area.
914 	 */
915 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
916 	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
917 	if (IS_FC(isp)) {
918 		len += ISP2100_SCRLEN;
919 	}
920 	if (bus_dma_tag_create(pci->parent_dmat, PAGE_SIZE, lim,
921 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
922 	    BUS_SPACE_MAXSIZE_32BIT, 0, &pci->cntrol_dmat) != 0) {
923 		isp_prt(isp, ISP_LOGERR,
924 		    "cannot create a dma tag for control spaces");
925 		free(isp->isp_xflist, M_DEVBUF);
926 		free(pci->dmaps, M_DEVBUF);
927 		return (1);
928 	}
929 	if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base,
930 	    BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) {
931 		isp_prt(isp, ISP_LOGERR,
932 		    "cannot allocate %d bytes of CCB memory", len);
933 		free(isp->isp_xflist, M_DEVBUF);
934 		free(pci->dmaps, M_DEVBUF);
935 		return (1);
936 	}
937 
938 	isp->isp_rquest = base;
939 	im.isp = isp;
940 	im.error = 0;
941 	bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest,
942 	    ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)), isp_map_rquest, &im, 0);
943 	if (im.error) {
944 		isp_prt(isp, ISP_LOGERR,
945 		    "error %d loading dma map for DMA request queue", im.error);
946 		free(isp->isp_xflist, M_DEVBUF);
947 		free(pci->dmaps, M_DEVBUF);
948 		isp->isp_rquest = NULL;
949 		return (1);
950 	}
951 	isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
952 	im.error = 0;
953 	bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result,
954 	    ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)), isp_map_result, &im, 0);
955 	if (im.error) {
956 		isp_prt(isp, ISP_LOGERR,
957 		    "error %d loading dma map for DMA result queue", im.error);
958 		free(isp->isp_xflist, M_DEVBUF);
959 		free(pci->dmaps, M_DEVBUF);
960 		isp->isp_rquest = NULL;
961 		return (1);
962 	}
963 
964 	for (i = 0; i < isp->isp_maxcmds; i++) {
965 		error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]);
966 		if (error) {
967 			isp_prt(isp, ISP_LOGERR,
968 			    "error %d creating per-cmd DMA maps", error);
969 			free(isp->isp_xflist, M_DEVBUF);
970 			free(pci->dmaps, M_DEVBUF);
971 			isp->isp_rquest = NULL;
972 			return (1);
973 		}
974 	}
975 
976 	if (IS_FC(isp)) {
977 		fcparam *fcp = (fcparam *) isp->isp_param;
978 		fcp->isp_scratch = base +
979 			ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)) +
980 			ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
981 		im.error = 0;
982 		bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap,
983 		    fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, &im, 0);
984 		if (im.error) {
985 			isp_prt(isp, ISP_LOGERR,
986 			    "error %d loading FC scratch area", im.error);
987 			free(isp->isp_xflist, M_DEVBUF);
988 			free(pci->dmaps, M_DEVBUF);
989 			isp->isp_rquest = NULL;
990 			return (1);
991 		}
992 	}
993 	return (0);
994 }
995 
996 typedef struct {
997 	struct ispsoftc *isp;
998 	void *cmd_token;
999 	void *rq;
1000 	u_int16_t *iptrp;
1001 	u_int16_t optr;
1002 	u_int error;
1003 } mush_t;
1004 
1005 #define	MUSHERR_NOQENTRIES	-2
1006 
1007 #ifdef	ISP_TARGET_MODE
1008 /*
1009  * We need to handle DMA for target mode differently from initiator mode.
1010  *
1011  * DMA mapping and construction and submission of CTIO Request Entries
1012  * and rendevous for completion are very tightly coupled because we start
1013  * out by knowing (per platform) how much data we have to move, but we
1014  * don't know, up front, how many DMA mapping segments will have to be used
1015  * cover that data, so we don't know how many CTIO Request Entries we
1016  * will end up using. Further, for performance reasons we may want to
1017  * (on the last CTIO for Fibre Channel), send status too (if all went well).
1018  *
1019  * The standard vector still goes through isp_pci_dmasetup, but the callback
1020  * for the DMA mapping routines comes here instead with the whole transfer
1021  * mapped and a pointer to a partially filled in already allocated request
1022  * queue entry. We finish the job.
1023  */
1024 static void tdma_mk __P((void *, bus_dma_segment_t *, int, int));
1025 static void tdma_mkfc __P((void *, bus_dma_segment_t *, int, int));
1026 
1027 static void
1028 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1029 {
1030 	mush_t *mp;
1031 	struct ccb_scsiio *csio;
1032 	struct isp_pcisoftc *pci;
1033 	bus_dmamap_t *dp;
1034 	u_int8_t scsi_status;
1035 	ct_entry_t *cto;
1036 	u_int16_t handle;
1037 	u_int32_t totxfr, sflags;
1038 	int nctios, send_status;
1039 	int32_t resid;
1040 
1041 	mp = (mush_t *) arg;
1042 	if (error) {
1043 		mp->error = error;
1044 		return;
1045 	}
1046 	csio = mp->cmd_token;
1047 	cto = mp->rq;
1048 
1049 	cto->ct_xfrlen = 0;
1050 	cto->ct_seg_count = 0;
1051 	cto->ct_header.rqs_entry_count = 1;
1052 	MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1053 
1054 	if (nseg == 0) {
1055 		cto->ct_header.rqs_seqno = 1;
1056 		ISP_TDQE(mp->isp, "tdma_mk[no data]", *mp->iptrp, cto);
1057 		isp_prt(mp->isp, ISP_LOGTDEBUG1,
1058 		    "CTIO[%x] lun%d->iid%d flgs 0x%x sts 0x%x ssts 0x%x res %d",
1059 		    cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid,
1060 		    cto->ct_flags, cto->ct_status, cto->ct_scsi_status,
1061 		    cto->ct_resid);
1062 		ISP_SWIZ_CTIO(mp->isp, cto, cto);
1063 		return;
1064 	}
1065 
1066 	nctios = nseg / ISP_RQDSEG;
1067 	if (nseg % ISP_RQDSEG) {
1068 		nctios++;
1069 	}
1070 
1071 	/*
1072 	 * Save syshandle, and potentially any SCSI status, which we'll
1073 	 * reinsert on the last CTIO we're going to send.
1074 	 */
1075 	handle = cto->ct_syshandle;
1076 	cto->ct_syshandle = 0;
1077 	cto->ct_header.rqs_seqno = 0;
1078 	send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1079 
1080 	if (send_status) {
1081 		sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1082 		cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1083 		/*
1084 		 * Preserve residual.
1085 		 */
1086 		resid = cto->ct_resid;
1087 
1088 		/*
1089 		 * Save actual SCSI status.
1090 		 */
1091 		scsi_status = cto->ct_scsi_status;
1092 
1093 		/*
1094 		 * We can't do a status at the same time as a data CTIO, so
1095 		 * we need to synthesize an extra CTIO at this level.
1096 		 */
1097 		nctios++;
1098 	} else {
1099 		sflags = scsi_status = resid = 0;
1100 	}
1101 
1102 	totxfr = cto->ct_resid = 0;
1103 	cto->ct_scsi_status = 0;
1104 
1105 	pci = (struct isp_pcisoftc *)mp->isp;
1106 	dp = &pci->dmaps[isp_handle_index(handle)];
1107 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1108 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
1109 	} else {
1110 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
1111 	}
1112 
1113 
1114 	while (nctios--) {
1115 		int seglim;
1116 
1117 		seglim = nseg;
1118 		if (seglim) {
1119 			int seg;
1120 
1121 			if (seglim > ISP_RQDSEG)
1122 				seglim = ISP_RQDSEG;
1123 
1124 			for (seg = 0; seg < seglim; seg++, nseg--) {
1125 				/*
1126 				 * Unlike normal initiator commands, we don't
1127 				 * do any swizzling here.
1128 				 */
1129 				cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
1130 				cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
1131 				cto->ct_xfrlen += dm_segs->ds_len;
1132 				totxfr += dm_segs->ds_len;
1133 				dm_segs++;
1134 			}
1135 			cto->ct_seg_count = seg;
1136 		} else {
1137 			/*
1138 			 * This case should only happen when we're sending an
1139 			 * extra CTIO with final status.
1140 			 */
1141 			if (send_status == 0) {
1142 				isp_prt(mp->isp, ISP_LOGWARN,
1143 				    "tdma_mk ran out of segments");
1144 				mp->error = EINVAL;
1145 				return;
1146 			}
1147 		}
1148 
1149 		/*
1150 		 * At this point, the fields ct_lun, ct_iid, ct_tagval,
1151 		 * ct_tagtype, and ct_timeout have been carried over
1152 		 * unchanged from what our caller had set.
1153 		 *
1154 		 * The dataseg fields and the seg_count fields we just got
1155 		 * through setting. The data direction we've preserved all
1156 		 * along and only clear it if we're now sending status.
1157 		 */
1158 
1159 		if (nctios == 0) {
1160 			/*
1161 			 * We're the last in a sequence of CTIOs, so mark
1162 			 * this CTIO and save the handle to the CCB such that
1163 			 * when this CTIO completes we can free dma resources
1164 			 * and do whatever else we need to do to finish the
1165 			 * rest of the command.
1166 			 */
1167 			cto->ct_syshandle = handle;
1168 			cto->ct_header.rqs_seqno = 1;
1169 
1170 			if (send_status) {
1171 				cto->ct_scsi_status = scsi_status;
1172 				cto->ct_flags |= sflags | CT_NO_DATA;;
1173 				cto->ct_resid = resid;
1174 			}
1175 			if (send_status) {
1176 				isp_prt(mp->isp, ISP_LOGTDEBUG1,
1177 				    "CTIO[%x] lun%d for ID %d ct_flags 0x%x "
1178 				    "scsi status %x resid %d",
1179 				    cto->ct_fwhandle, csio->ccb_h.target_lun,
1180 				    cto->ct_iid, cto->ct_flags,
1181 				    cto->ct_scsi_status, cto->ct_resid);
1182 			} else {
1183 				isp_prt(mp->isp, ISP_LOGTDEBUG1,
1184 				    "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
1185 				    cto->ct_fwhandle, csio->ccb_h.target_lun,
1186 				    cto->ct_iid, cto->ct_flags);
1187 			}
1188 			ISP_TDQE(mp->isp, "last tdma_mk", *mp->iptrp, cto);
1189 			ISP_SWIZ_CTIO(mp->isp, cto, cto);
1190 		} else {
1191 			ct_entry_t     *octo = cto;
1192 
1193 			/*
1194 			 * Make sure syshandle fields are clean
1195 			 */
1196 			cto->ct_syshandle = 0;
1197 			cto->ct_header.rqs_seqno = 0;
1198 
1199 			isp_prt(mp->isp, ISP_LOGTDEBUG1,
1200 			    "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
1201 			    cto->ct_fwhandle, csio->ccb_h.target_lun,
1202 			    cto->ct_iid, cto->ct_flags);
1203 			ISP_TDQE(mp->isp, "tdma_mk", *mp->iptrp, cto);
1204 
1205 			/*
1206 			 * Get a new CTIO
1207 			 */
1208 			cto = (ct_entry_t *)
1209 			    ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp);
1210 			*mp->iptrp =
1211 			    ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN(isp));
1212 			if (*mp->iptrp == mp->optr) {
1213 				isp_prt(mp->isp, ISP_LOGTDEBUG0,
1214 				    "Queue Overflow in tdma_mk");
1215 				mp->error = MUSHERR_NOQENTRIES;
1216 				return;
1217 			}
1218 			/*
1219 			 * Fill in the new CTIO with info from the old one.
1220 			 */
1221 			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1222 			cto->ct_header.rqs_entry_count = 1;
1223 			cto->ct_fwhandle = octo->ct_fwhandle;
1224 			cto->ct_header.rqs_flags = 0;
1225 			cto->ct_lun = octo->ct_lun;
1226 			cto->ct_iid = octo->ct_iid;
1227 			cto->ct_reserved2 = octo->ct_reserved2;
1228 			cto->ct_tgt = octo->ct_tgt;
1229 			cto->ct_flags = octo->ct_flags;
1230 			cto->ct_status = 0;
1231 			cto->ct_scsi_status = 0;
1232 			cto->ct_tag_val = octo->ct_tag_val;
1233 			cto->ct_tag_type = octo->ct_tag_type;
1234 			cto->ct_xfrlen = 0;
1235 			cto->ct_resid = 0;
1236 			cto->ct_timeout = octo->ct_timeout;
1237 			cto->ct_seg_count = 0;
1238 			MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1239 			/*
1240 			 * Now swizzle the old one for the consumption of the
1241 			 * chip.
1242 			 */
1243 			ISP_SWIZ_CTIO(mp->isp, octo, octo);
1244 		}
1245 	}
1246 }
1247 
1248 static void
1249 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1250 {
1251 	mush_t *mp;
1252 	struct ccb_scsiio *csio;
1253 	struct isp_pcisoftc *pci;
1254 	bus_dmamap_t *dp;
1255 	ct2_entry_t *cto;
1256 	u_int16_t scsi_status, send_status, send_sense, handle;
1257 	u_int32_t totxfr, datalen;
1258 	u_int8_t sense[QLTM_SENSELEN];
1259 	int nctios;
1260 
1261 	mp = (mush_t *) arg;
1262 	if (error) {
1263 		mp->error = error;
1264 		return;
1265 	}
1266 
1267 	csio = mp->cmd_token;
1268 	cto = mp->rq;
1269 
1270 	if (nseg == 0) {
1271 		if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
1272 			isp_prt(mp->isp, ISP_LOGWARN,
1273 			    "dma2_tgt_fc, a status CTIO2 without MODE1 "
1274 			    "set (0x%x)", cto->ct_flags);
1275 			mp->error = EINVAL;
1276 			return;
1277 		}
1278 	 	cto->ct_header.rqs_entry_count = 1;
1279 		cto->ct_header.rqs_seqno = 1;
1280 		/* ct_syshandle contains the handle set by caller */
1281 		/*
1282 		 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1283 		 * flags to NO DATA and clear relative offset flags.
1284 		 * We preserve the ct_resid and the response area.
1285 		 */
1286 		cto->ct_flags |= CT2_NO_DATA;
1287 		if (cto->ct_resid > 0)
1288 			cto->ct_flags |= CT2_DATA_UNDER;
1289 		else if (cto->ct_resid < 0)
1290 			cto->ct_flags |= CT2_DATA_OVER;
1291 		cto->ct_seg_count = 0;
1292 		cto->ct_reloff = 0;
1293 		ISP_TDQE(mp->isp, "dma2_tgt_fc[no data]", *mp->iptrp, cto);
1294 		isp_prt(mp->isp, ISP_LOGTDEBUG1,
1295 		    "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1296 		    "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
1297 		    cto->ct_iid, cto->ct_flags, cto->ct_status,
1298 		    cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1299 		ISP_SWIZ_CTIO2(isp, cto, cto);
1300 		return;
1301 	}
1302 
1303 	if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
1304 		isp_prt(mp->isp, ISP_LOGWARN,
1305 		    "dma2_tgt_fc, a data CTIO2 without MODE0 set "
1306 		    "(0x%x)", cto->ct_flags);
1307 		mp->error = EINVAL;
1308 		return;
1309 	}
1310 
1311 
1312 	nctios = nseg / ISP_RQDSEG_T2;
1313 	if (nseg % ISP_RQDSEG_T2) {
1314 		nctios++;
1315 	}
1316 
1317 	/*
1318 	 * Save the handle, status, reloff, and residual. We'll reinsert the
1319 	 * handle into the last CTIO2 we're going to send, and reinsert status
1320 	 * and residual (and possibly sense data) if that's to be sent as well.
1321 	 *
1322 	 * We preserve ct_reloff and adjust it for each data CTIO2 we send past
1323 	 * the first one. This is needed so that the FCP DATA IUs being sent
1324 	 * out have the correct offset (they can arrive at the other end out
1325 	 * of order).
1326 	 */
1327 
1328 	handle = cto->ct_syshandle;
1329 	cto->ct_syshandle = 0;
1330 
1331 	if ((send_status = (cto->ct_flags & CT2_SENDSTATUS)) != 0) {
1332 		cto->ct_flags &= ~CT2_SENDSTATUS;
1333 
1334 		/*
1335 		 * Preserve residual, which is actually the total count.
1336 		 */
1337 		datalen = cto->ct_resid;
1338 
1339 		/*
1340 		 * Save actual SCSI status. We'll reinsert the
1341 		 * CT2_SNSLEN_VALID later if appropriate.
1342 		 */
1343 		scsi_status = cto->rsp.m0.ct_scsi_status & 0xff;
1344 		send_sense = cto->rsp.m0.ct_scsi_status & CT2_SNSLEN_VALID;
1345 
1346 		/*
1347 		 * If we're sending status and have a CHECK CONDTION and
1348 		 * have sense data,  we send one more CTIO2 with just the
1349 		 * status and sense data. The upper layers have stashed
1350 		 * the sense data in the dataseg structure for us.
1351 		 */
1352 
1353 		if ((scsi_status & 0xf) == SCSI_STATUS_CHECK_COND &&
1354 		    send_sense) {
1355 			bcopy(cto->rsp.m0.ct_dataseg, sense, QLTM_SENSELEN);
1356 			nctios++;
1357 		}
1358 	} else {
1359 		scsi_status = send_sense = datalen = 0;
1360 	}
1361 
1362 	totxfr = cto->ct_resid = 0;
1363 	cto->rsp.m0.ct_scsi_status = 0;
1364 	bzero(&cto->rsp, sizeof (cto->rsp));
1365 
1366 	pci = (struct isp_pcisoftc *)mp->isp;
1367 	dp = &pci->dmaps[isp_handle_index(handle)];
1368 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1369 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
1370 	} else {
1371 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
1372 	}
1373 
1374 	while (nctios--) {
1375 		int seg, seglim;
1376 
1377 		seglim = nseg;
1378 		if (seglim) {
1379 			if (seglim > ISP_RQDSEG_T2)
1380 				seglim = ISP_RQDSEG_T2;
1381 
1382 			for (seg = 0; seg < seglim; seg++) {
1383 				cto->rsp.m0.ct_dataseg[seg].ds_base =
1384 				    dm_segs->ds_addr;
1385 				cto->rsp.m0.ct_dataseg[seg].ds_count =
1386 				    dm_segs->ds_len;
1387 				cto->rsp.m0.ct_xfrlen += dm_segs->ds_len;
1388 				totxfr += dm_segs->ds_len;
1389 				dm_segs++;
1390 			}
1391 			cto->ct_seg_count = seg;
1392 		} else {
1393 			/*
1394 			 * This case should only happen when we're sending a
1395 			 * synthesized MODE1 final status with sense data.
1396 			 */
1397 			if (send_sense == 0) {
1398 				isp_prt(mp->isp, ISP_LOGWARN,
1399 				    "dma2_tgt_fc ran out of segments, "
1400 				    "no SENSE DATA");
1401 				mp->error = EINVAL;
1402 				return;
1403 			}
1404 		}
1405 
1406 		/*
1407 		 * At this point, the fields ct_lun, ct_iid, ct_rxid,
1408 		 * ct_timeout have been carried over unchanged from what
1409 		 * our caller had set.
1410 		 *
1411 		 * The field ct_reloff is either what the caller set, or
1412 		 * what we've added to below.
1413 		 *
1414 		 * The dataseg fields and the seg_count fields we just got
1415 		 * through setting. The data direction we've preserved all
1416 		 * along and only clear it if we're sending a MODE1 status
1417 		 * as the last CTIO.
1418 		 *
1419 		 */
1420 
1421 		if (nctios == 0) {
1422 
1423 			/*
1424 			 * We're the last in a sequence of CTIO2s, so mark this
1425 			 * CTIO2 and save the handle to the CCB such that when
1426 			 * this CTIO2 completes we can free dma resources and
1427 			 * do whatever else we need to do to finish the rest
1428 			 * of the command.
1429 			 */
1430 
1431 			cto->ct_syshandle = handle;
1432 			cto->ct_header.rqs_seqno = 1;
1433 
1434 			if (send_status) {
1435 				if (send_sense) {
1436 					bcopy(sense, cto->rsp.m1.ct_resp,
1437 					    QLTM_SENSELEN);
1438 					cto->rsp.m1.ct_senselen =
1439 					    QLTM_SENSELEN;
1440 					scsi_status |= CT2_SNSLEN_VALID;
1441 					cto->rsp.m1.ct_scsi_status =
1442 					    scsi_status;
1443 					cto->ct_flags &= CT2_FLAG_MMASK;
1444 					cto->ct_flags |= CT2_FLAG_MODE1 |
1445 					    CT2_NO_DATA| CT2_SENDSTATUS;
1446 				} else {
1447 					cto->rsp.m0.ct_scsi_status =
1448 					    scsi_status;
1449 					cto->ct_flags |= CT2_SENDSTATUS;
1450 				}
1451 				/*
1452 				 * Get 'real' residual and set flags based
1453 				 * on it.
1454 				 */
1455 				cto->ct_resid = datalen - totxfr;
1456 				if (cto->ct_resid > 0)
1457 					cto->ct_flags |= CT2_DATA_UNDER;
1458 				else if (cto->ct_resid < 0)
1459 					cto->ct_flags |= CT2_DATA_OVER;
1460 			}
1461 			ISP_TDQE(mp->isp, "last dma2_tgt_fc", *mp->iptrp, cto);
1462 			isp_prt(mp->isp, ISP_LOGTDEBUG1,
1463 			    "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x"
1464 			    " ssts 0x%x res %d", cto->ct_rxid,
1465 			    csio->ccb_h.target_lun, (int) cto->ct_iid,
1466 			    cto->ct_flags, cto->ct_status,
1467 			    cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1468 			ISP_SWIZ_CTIO2(isp, cto, cto);
1469 		} else {
1470 			ct2_entry_t *octo = cto;
1471 
1472 			/*
1473 			 * Make sure handle fields are clean
1474 			 */
1475 			cto->ct_syshandle = 0;
1476 			cto->ct_header.rqs_seqno = 0;
1477 
1478 			ISP_TDQE(mp->isp, "dma2_tgt_fc", *mp->iptrp, cto);
1479 			isp_prt(mp->isp, ISP_LOGTDEBUG1,
1480 			    "CTIO2[%x] lun %d->iid%d flgs 0x%x",
1481 			    cto->ct_rxid, csio->ccb_h.target_lun,
1482 			    (int) cto->ct_iid, cto->ct_flags);
1483 			/*
1484 			 * Get a new CTIO2
1485 			 */
1486 			cto = (ct2_entry_t *)
1487 			    ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp);
1488 			*mp->iptrp =
1489 			    ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN(isp));
1490 			if (*mp->iptrp == mp->optr) {
1491 				isp_prt(mp->isp, ISP_LOGWARN,
1492 				    "Queue Overflow in dma2_tgt_fc");
1493 				mp->error = MUSHERR_NOQENTRIES;
1494 				return;
1495 			}
1496 
1497 			/*
1498 			 * Fill in the new CTIO2 with info from the old one.
1499 			 */
1500 			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1501 			cto->ct_header.rqs_entry_count = 1;
1502 			cto->ct_header.rqs_flags = 0;
1503 			/* ct_header.rqs_seqno && ct_syshandle done later */
1504 			cto->ct_fwhandle = octo->ct_fwhandle;
1505 			cto->ct_lun = octo->ct_lun;
1506 			cto->ct_iid = octo->ct_iid;
1507 			cto->ct_rxid = octo->ct_rxid;
1508 			cto->ct_flags = octo->ct_flags;
1509 			cto->ct_status = 0;
1510 			cto->ct_resid = 0;
1511 			cto->ct_timeout = octo->ct_timeout;
1512 			cto->ct_seg_count = 0;
1513 			/*
1514 			 * Adjust the new relative offset by the amount which
1515 			 * is recorded in the data segment of the old CTIO2 we
1516 			 * just finished filling out.
1517 			 */
1518 			cto->ct_reloff += octo->rsp.m0.ct_xfrlen;
1519 			bzero(&cto->rsp, sizeof (cto->rsp));
1520 			ISP_SWIZ_CTIO2(isp, cto, cto);
1521 		}
1522 	}
1523 }
1524 #endif
1525 
1526 static void dma2 __P((void *, bus_dma_segment_t *, int, int));
1527 
1528 static void
1529 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1530 {
1531 	mush_t *mp;
1532 	struct ccb_scsiio *csio;
1533 	struct isp_pcisoftc *pci;
1534 	bus_dmamap_t *dp;
1535 	bus_dma_segment_t *eseg;
1536 	ispreq_t *rq;
1537 	ispcontreq_t *crq;
1538 	int seglim, datalen;
1539 
1540 	mp = (mush_t *) arg;
1541 	if (error) {
1542 		mp->error = error;
1543 		return;
1544 	}
1545 
1546 	if (nseg < 1) {
1547 		isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
1548 		mp->error = EFAULT;
1549 		return;
1550 	}
1551 	csio = mp->cmd_token;
1552 	rq = mp->rq;
1553 	pci = (struct isp_pcisoftc *)mp->isp;
1554 	dp = &pci->dmaps[isp_handle_index(rq->req_handle)];
1555 
1556 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1557 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
1558 	} else {
1559 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
1560 	}
1561 
1562 	datalen = XS_XFRLEN(csio);
1563 
1564 	/*
1565 	 * We're passed an initial partially filled in entry that
1566 	 * has most fields filled in except for data transfer
1567 	 * related values.
1568 	 *
1569 	 * Our job is to fill in the initial request queue entry and
1570 	 * then to start allocating and filling in continuation entries
1571 	 * until we've covered the entire transfer.
1572 	 */
1573 
1574 	if (IS_FC(mp->isp)) {
1575 		seglim = ISP_RQDSEG_T2;
1576 		((ispreqt2_t *)rq)->req_totalcnt = datalen;
1577 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1578 			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
1579 		} else {
1580 			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
1581 		}
1582 	} else {
1583 		if (csio->cdb_len > 12) {
1584 			seglim = 0;
1585 		} else {
1586 			seglim = ISP_RQDSEG;
1587 		}
1588 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1589 			rq->req_flags |= REQFLAG_DATA_IN;
1590 		} else {
1591 			rq->req_flags |= REQFLAG_DATA_OUT;
1592 		}
1593 	}
1594 
1595 	eseg = dm_segs + nseg;
1596 
1597 	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
1598 		if (IS_FC(mp->isp)) {
1599 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1600 			rq2->req_dataseg[rq2->req_seg_count].ds_base =
1601 			    dm_segs->ds_addr;
1602 			rq2->req_dataseg[rq2->req_seg_count].ds_count =
1603 			    dm_segs->ds_len;
1604 		} else {
1605 			rq->req_dataseg[rq->req_seg_count].ds_base =
1606 				dm_segs->ds_addr;
1607 			rq->req_dataseg[rq->req_seg_count].ds_count =
1608 				dm_segs->ds_len;
1609 		}
1610 		datalen -= dm_segs->ds_len;
1611 #if	0
1612 		if (IS_FC(mp->isp)) {
1613 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1614 			device_printf(mp->isp->isp_dev,
1615 			    "seg0[%d] cnt 0x%x paddr 0x%08x\n",
1616 			    rq->req_seg_count,
1617 			    rq2->req_dataseg[rq2->req_seg_count].ds_count,
1618 			    rq2->req_dataseg[rq2->req_seg_count].ds_base);
1619 		} else {
1620 			device_printf(mp->isp->isp_dev,
1621 			    "seg0[%d] cnt 0x%x paddr 0x%08x\n",
1622 			    rq->req_seg_count,
1623 			    rq->req_dataseg[rq->req_seg_count].ds_count,
1624 			    rq->req_dataseg[rq->req_seg_count].ds_base);
1625 		}
1626 #endif
1627 		rq->req_seg_count++;
1628 		dm_segs++;
1629 	}
1630 
1631 	while (datalen > 0 && dm_segs != eseg) {
1632 		crq = (ispcontreq_t *)
1633 		    ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp);
1634 		*mp->iptrp = ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN(isp));
1635 		if (*mp->iptrp == mp->optr) {
1636 			isp_prt(mp->isp,
1637 			    ISP_LOGDEBUG0, "Request Queue Overflow++");
1638 			mp->error = MUSHERR_NOQENTRIES;
1639 			return;
1640 		}
1641 		rq->req_header.rqs_entry_count++;
1642 		bzero((void *)crq, sizeof (*crq));
1643 		crq->req_header.rqs_entry_count = 1;
1644 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1645 
1646 		seglim = 0;
1647 		while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
1648 			crq->req_dataseg[seglim].ds_base =
1649 			    dm_segs->ds_addr;
1650 			crq->req_dataseg[seglim].ds_count =
1651 			    dm_segs->ds_len;
1652 #if	0
1653 			device_printf(mp->isp->isp_dev,
1654 			    "seg%d[%d] cnt 0x%x paddr 0x%08x\n",
1655 			    rq->req_header.rqs_entry_count-1,
1656 			    seglim, crq->req_dataseg[seglim].ds_count,
1657 			    crq->req_dataseg[seglim].ds_base);
1658 #endif
1659 			rq->req_seg_count++;
1660 			dm_segs++;
1661 			seglim++;
1662 			datalen -= dm_segs->ds_len;
1663 		}
1664 	}
1665 }
1666 
1667 static int
1668 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq,
1669 	u_int16_t *iptrp, u_int16_t optr)
1670 {
1671 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1672 	bus_dmamap_t *dp = NULL;
1673 	mush_t mush, *mp;
1674 	void (*eptr) __P((void *, bus_dma_segment_t *, int, int));
1675 
1676 #ifdef	ISP_TARGET_MODE
1677 	if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1678 		if (IS_FC(isp)) {
1679 			eptr = tdma_mkfc;
1680 		} else {
1681 			eptr = tdma_mk;
1682 		}
1683 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1684 		    (csio->dxfer_len == 0)) {
1685 			rq->req_seg_count = 1;
1686 			mp = &mush;
1687 			mp->isp = isp;
1688 			mp->cmd_token = csio;
1689 			mp->rq = rq;
1690 			mp->iptrp = iptrp;
1691 			mp->optr = optr;
1692 			mp->error = 0;
1693 			(*eptr)(mp, NULL, 0, 0);
1694 			goto exit;
1695 		}
1696 	} else
1697 #endif
1698 	eptr = dma2;
1699 
1700 	/*
1701 	 * NB: if we need to do request queue entry swizzling,
1702 	 * NB: this is where it would need to be done for cmds
1703 	 * NB: that move no data. For commands that move data,
1704 	 * NB: swizzling would take place in those functions.
1705 	 */
1706 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1707 	    (csio->dxfer_len == 0)) {
1708 		rq->req_seg_count = 1;
1709 		return (CMD_QUEUED);
1710 	}
1711 
1712 	/*
1713 	 * Do a virtual grapevine step to collect info for
1714 	 * the callback dma allocation that we have to use...
1715 	 */
1716 	mp = &mush;
1717 	mp->isp = isp;
1718 	mp->cmd_token = csio;
1719 	mp->rq = rq;
1720 	mp->iptrp = iptrp;
1721 	mp->optr = optr;
1722 	mp->error = 0;
1723 
1724 	if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1725 		if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1726 			int error, s;
1727 			dp = &pci->dmaps[isp_handle_index(rq->req_handle)];
1728 			s = splsoftvm();
1729 			error = bus_dmamap_load(pci->parent_dmat, *dp,
1730 			    csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
1731 			if (error == EINPROGRESS) {
1732 				bus_dmamap_unload(pci->parent_dmat, *dp);
1733 				mp->error = EINVAL;
1734 				isp_prt(isp, ISP_LOGERR,
1735 				    "deferred dma allocation not supported");
1736 			} else if (error && mp->error == 0) {
1737 #ifdef	DIAGNOSTIC
1738 				isp_prt(isp, ISP_LOGERR,
1739 				    "error %d in dma mapping code", error);
1740 #endif
1741 				mp->error = error;
1742 			}
1743 			splx(s);
1744 		} else {
1745 			/* Pointer to physical buffer */
1746 			struct bus_dma_segment seg;
1747 			seg.ds_addr = (bus_addr_t)csio->data_ptr;
1748 			seg.ds_len = csio->dxfer_len;
1749 			(*eptr)(mp, &seg, 1, 0);
1750 		}
1751 	} else {
1752 		struct bus_dma_segment *segs;
1753 
1754 		if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1755 			isp_prt(isp, ISP_LOGERR,
1756 			    "Physical segment pointers unsupported");
1757 			mp->error = EINVAL;
1758 		} else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1759 			isp_prt(isp, ISP_LOGERR,
1760 			    "Virtual segment addresses unsupported");
1761 			mp->error = EINVAL;
1762 		} else {
1763 			/* Just use the segments provided */
1764 			segs = (struct bus_dma_segment *) csio->data_ptr;
1765 			(*eptr)(mp, segs, csio->sglist_cnt, 0);
1766 		}
1767 	}
1768 #ifdef	ISP_TARGET_MODE
1769 exit:
1770 #endif
1771 	if (mp->error) {
1772 		int retval = CMD_COMPLETE;
1773 		if (mp->error == MUSHERR_NOQENTRIES) {
1774 			retval = CMD_EAGAIN;
1775 		} else if (mp->error == EFBIG) {
1776 			XS_SETERR(csio, CAM_REQ_TOO_BIG);
1777 		} else if (mp->error == EINVAL) {
1778 			XS_SETERR(csio, CAM_REQ_INVALID);
1779 		} else {
1780 			XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
1781 		}
1782 		return (retval);
1783 	} else {
1784 		/*
1785 		 * Check to see if we weren't cancelled while sleeping on
1786 		 * getting DMA resources...
1787 		 */
1788 		if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1789 			if (dp) {
1790 				bus_dmamap_unload(pci->parent_dmat, *dp);
1791 			}
1792 			return (CMD_COMPLETE);
1793 		}
1794 		return (CMD_QUEUED);
1795 	}
1796 }
1797 
1798 static void
1799 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int32_t handle)
1800 {
1801 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1802 	bus_dmamap_t *dp = &pci->dmaps[isp_handle_index(handle)];
1803 	if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1804 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD);
1805 	} else {
1806 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE);
1807 	}
1808 	bus_dmamap_unload(pci->parent_dmat, *dp);
1809 }
1810 
1811 
1812 static void
1813 isp_pci_reset1(struct ispsoftc *isp)
1814 {
1815 	/* Make sure the BIOS is disabled */
1816 	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1817 	/* and enable interrupts */
1818 	ENABLE_INTS(isp);
1819 }
1820 
1821 static void
1822 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg)
1823 {
1824 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1825 	if (msg)
1826 		printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
1827 	else
1828 		printf("%s:\n", device_get_nameunit(isp->isp_dev));
1829 	if (IS_SCSI(isp))
1830 		printf("    biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
1831 	else
1832 		printf("    biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
1833 	printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
1834 	    ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
1835 	printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
1836 
1837 
1838 	if (IS_SCSI(isp)) {
1839 		ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
1840 		printf("    cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
1841 			ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
1842 			ISP_READ(isp, CDMA_FIFO_STS));
1843 		printf("    ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
1844 			ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
1845 			ISP_READ(isp, DDMA_FIFO_STS));
1846 		printf("    sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
1847 			ISP_READ(isp, SXP_INTERRUPT),
1848 			ISP_READ(isp, SXP_GROSS_ERR),
1849 			ISP_READ(isp, SXP_PINS_CTRL));
1850 		ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
1851 	}
1852 	printf("    mbox regs: %x %x %x %x %x\n",
1853 	    ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
1854 	    ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
1855 	    ISP_READ(isp, OUTMAILBOX4));
1856 	printf("    PCI Status Command/Status=%x\n",
1857 	    pci_read_config(pci->pci_dev, PCIR_COMMAND, 1));
1858 }
1859