xref: /freebsd/sys/dev/isp/isp_pci.c (revision 63f9a4cb2684a303e3eb2ffed39c03a2e2b28ae0)
1 /*-
2  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/bus.h>
37 
38 #include <dev/pci/pcireg.h>
39 #include <dev/pci/pcivar.h>
40 
41 #include <machine/bus_memio.h>
42 #include <machine/bus_pio.h>
43 #include <machine/bus.h>
44 #include <machine/resource.h>
45 #include <sys/rman.h>
46 #include <sys/malloc.h>
47 
48 #ifdef	ISP_TARGET_MODE
49 #ifdef	PAE
50 #error	"PAE and ISP_TARGET_MODE not supported yet"
51 #endif
52 #endif
53 
54 #include <dev/isp/isp_freebsd.h>
55 
56 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int);
57 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t);
58 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int);
59 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t);
60 static int
61 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
62 static int
63 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
64 static int isp_pci_mbxdma(struct ispsoftc *);
65 static int
66 isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t);
67 static void
68 isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t);
69 
70 static void isp_pci_reset1(struct ispsoftc *);
71 static void isp_pci_dumpregs(struct ispsoftc *, const char *);
72 
73 static struct ispmdvec mdvec = {
74 	isp_pci_rd_isr,
75 	isp_pci_rd_reg,
76 	isp_pci_wr_reg,
77 	isp_pci_mbxdma,
78 	isp_pci_dmasetup,
79 	isp_pci_dmateardown,
80 	NULL,
81 	isp_pci_reset1,
82 	isp_pci_dumpregs,
83 	NULL,
84 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
85 };
86 
87 static struct ispmdvec mdvec_1080 = {
88 	isp_pci_rd_isr,
89 	isp_pci_rd_reg_1080,
90 	isp_pci_wr_reg_1080,
91 	isp_pci_mbxdma,
92 	isp_pci_dmasetup,
93 	isp_pci_dmateardown,
94 	NULL,
95 	isp_pci_reset1,
96 	isp_pci_dumpregs,
97 	NULL,
98 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
99 };
100 
101 static struct ispmdvec mdvec_12160 = {
102 	isp_pci_rd_isr,
103 	isp_pci_rd_reg_1080,
104 	isp_pci_wr_reg_1080,
105 	isp_pci_mbxdma,
106 	isp_pci_dmasetup,
107 	isp_pci_dmateardown,
108 	NULL,
109 	isp_pci_reset1,
110 	isp_pci_dumpregs,
111 	NULL,
112 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
113 };
114 
115 static struct ispmdvec mdvec_2100 = {
116 	isp_pci_rd_isr,
117 	isp_pci_rd_reg,
118 	isp_pci_wr_reg,
119 	isp_pci_mbxdma,
120 	isp_pci_dmasetup,
121 	isp_pci_dmateardown,
122 	NULL,
123 	isp_pci_reset1,
124 	isp_pci_dumpregs
125 };
126 
127 static struct ispmdvec mdvec_2200 = {
128 	isp_pci_rd_isr,
129 	isp_pci_rd_reg,
130 	isp_pci_wr_reg,
131 	isp_pci_mbxdma,
132 	isp_pci_dmasetup,
133 	isp_pci_dmateardown,
134 	NULL,
135 	isp_pci_reset1,
136 	isp_pci_dumpregs
137 };
138 
139 static struct ispmdvec mdvec_2300 = {
140 	isp_pci_rd_isr_2300,
141 	isp_pci_rd_reg,
142 	isp_pci_wr_reg,
143 	isp_pci_mbxdma,
144 	isp_pci_dmasetup,
145 	isp_pci_dmateardown,
146 	NULL,
147 	isp_pci_reset1,
148 	isp_pci_dumpregs
149 };
150 
151 #ifndef	PCIM_CMD_INVEN
152 #define	PCIM_CMD_INVEN			0x10
153 #endif
154 #ifndef	PCIM_CMD_BUSMASTEREN
155 #define	PCIM_CMD_BUSMASTEREN		0x0004
156 #endif
157 #ifndef	PCIM_CMD_PERRESPEN
158 #define	PCIM_CMD_PERRESPEN		0x0040
159 #endif
160 #ifndef	PCIM_CMD_SEREN
161 #define	PCIM_CMD_SEREN			0x0100
162 #endif
163 
164 #ifndef	PCIR_COMMAND
165 #define	PCIR_COMMAND			0x04
166 #endif
167 
168 #ifndef	PCIR_CACHELNSZ
169 #define	PCIR_CACHELNSZ			0x0c
170 #endif
171 
172 #ifndef	PCIR_LATTIMER
173 #define	PCIR_LATTIMER			0x0d
174 #endif
175 
176 #ifndef	PCIR_ROMADDR
177 #define	PCIR_ROMADDR			0x30
178 #endif
179 
180 #ifndef	PCI_VENDOR_QLOGIC
181 #define	PCI_VENDOR_QLOGIC		0x1077
182 #endif
183 
184 #ifndef	PCI_PRODUCT_QLOGIC_ISP1020
185 #define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
186 #endif
187 
188 #ifndef	PCI_PRODUCT_QLOGIC_ISP1080
189 #define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
190 #endif
191 
192 #ifndef	PCI_PRODUCT_QLOGIC_ISP10160
193 #define	PCI_PRODUCT_QLOGIC_ISP10160	0x1016
194 #endif
195 
196 #ifndef	PCI_PRODUCT_QLOGIC_ISP12160
197 #define	PCI_PRODUCT_QLOGIC_ISP12160	0x1216
198 #endif
199 
200 #ifndef	PCI_PRODUCT_QLOGIC_ISP1240
201 #define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
202 #endif
203 
204 #ifndef	PCI_PRODUCT_QLOGIC_ISP1280
205 #define	PCI_PRODUCT_QLOGIC_ISP1280	0x1280
206 #endif
207 
208 #ifndef	PCI_PRODUCT_QLOGIC_ISP2100
209 #define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
210 #endif
211 
212 #ifndef	PCI_PRODUCT_QLOGIC_ISP2200
213 #define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
214 #endif
215 
216 #ifndef	PCI_PRODUCT_QLOGIC_ISP2300
217 #define	PCI_PRODUCT_QLOGIC_ISP2300	0x2300
218 #endif
219 
220 #ifndef	PCI_PRODUCT_QLOGIC_ISP2312
221 #define	PCI_PRODUCT_QLOGIC_ISP2312	0x2312
222 #endif
223 
224 #define	PCI_QLOGIC_ISP1020	\
225 	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
226 
227 #define	PCI_QLOGIC_ISP1080	\
228 	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
229 
230 #define	PCI_QLOGIC_ISP10160	\
231 	((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
232 
233 #define	PCI_QLOGIC_ISP12160	\
234 	((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
235 
236 #define	PCI_QLOGIC_ISP1240	\
237 	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
238 
239 #define	PCI_QLOGIC_ISP1280	\
240 	((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
241 
242 #define	PCI_QLOGIC_ISP2100	\
243 	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
244 
245 #define	PCI_QLOGIC_ISP2200	\
246 	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
247 
248 #define	PCI_QLOGIC_ISP2300	\
249 	((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
250 
251 #define	PCI_QLOGIC_ISP2312	\
252 	((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
253 
254 /*
255  * Odd case for some AMI raid cards... We need to *not* attach to this.
256  */
257 #define	AMI_RAID_SUBVENDOR_ID	0x101e
258 
259 #define	IO_MAP_REG	0x10
260 #define	MEM_MAP_REG	0x14
261 
262 #define	PCI_DFLT_LTNCY	0x40
263 #define	PCI_DFLT_LNSZ	0x10
264 
265 static int isp_pci_probe (device_t);
266 static int isp_pci_attach (device_t);
267 
268 
269 struct isp_pcisoftc {
270 	struct ispsoftc			pci_isp;
271 	device_t			pci_dev;
272 	struct resource *		pci_reg;
273 	bus_space_tag_t			pci_st;
274 	bus_space_handle_t		pci_sh;
275 	void *				ih;
276 	int16_t				pci_poff[_NREG_BLKS];
277 	bus_dma_tag_t			dmat;
278 	bus_dmamap_t			*dmaps;
279 };
280 extern ispfwfunc *isp_get_firmware_p;
281 
282 static device_method_t isp_pci_methods[] = {
283 	/* Device interface */
284 	DEVMETHOD(device_probe,		isp_pci_probe),
285 	DEVMETHOD(device_attach,	isp_pci_attach),
286 	{ 0, 0 }
287 };
288 static void isp_pci_intr(void *);
289 
290 static driver_t isp_pci_driver = {
291 	"isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
292 };
293 static devclass_t isp_devclass;
294 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
295 
296 static int
297 isp_pci_probe(device_t dev)
298 {
299         switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
300 	case PCI_QLOGIC_ISP1020:
301 		device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
302 		break;
303 	case PCI_QLOGIC_ISP1080:
304 		device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
305 		break;
306 	case PCI_QLOGIC_ISP1240:
307 		device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
308 		break;
309 	case PCI_QLOGIC_ISP1280:
310 		device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
311 		break;
312 	case PCI_QLOGIC_ISP10160:
313 		device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
314 		break;
315 	case PCI_QLOGIC_ISP12160:
316 		if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
317 			return (ENXIO);
318 		}
319 		device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
320 		break;
321 	case PCI_QLOGIC_ISP2100:
322 		device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
323 		break;
324 	case PCI_QLOGIC_ISP2200:
325 		device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
326 		break;
327 	case PCI_QLOGIC_ISP2300:
328 		device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
329 		break;
330 	case PCI_QLOGIC_ISP2312:
331 		device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
332 		break;
333 	default:
334 		return (ENXIO);
335 	}
336 	if (isp_announced == 0 && bootverbose) {
337 		printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
338 		    "Core Version %d.%d\n",
339 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
340 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
341 		isp_announced++;
342 	}
343 	/*
344 	 * XXXX: Here is where we might load the f/w module
345 	 * XXXX: (or increase a reference count to it).
346 	 */
347 	return (0);
348 }
349 
350 static int
351 isp_pci_attach(device_t dev)
352 {
353 	struct resource *regs, *irq;
354 	int tval, rtp, rgd, iqd, m1, m2, isp_debug, role;
355 	u_int32_t data, cmd, linesz, psize, basetype;
356 	struct isp_pcisoftc *pcs;
357 	struct ispsoftc *isp = NULL;
358 	struct ispmdvec *mdvp;
359 	const char *sptr;
360 	int locksetup = 0;
361 
362 	/*
363 	 * Figure out if we're supposed to skip this one.
364 	 */
365 
366 	tval = 0;
367 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
368 	    "disable", &tval) == 0 && tval) {
369 		device_printf(dev, "device is disabled\n");
370 		/* but return 0 so the !$)$)*!$*) unit isn't reused */
371 		return (0);
372 	}
373 
374 	role = -1;
375 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
376 	    "role", &role) == 0 && role != -1) {
377 		role &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET);
378 		device_printf(dev, "setting role to 0x%x\n", role);
379 	} else {
380 #ifdef	ISP_TARGET_MODE
381 		role = ISP_ROLE_TARGET;
382 #else
383 		role = ISP_DEFAULT_ROLES;
384 #endif
385 	}
386 
387 	pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT | M_ZERO);
388 	if (pcs == NULL) {
389 		device_printf(dev, "cannot allocate softc\n");
390 		return (ENOMEM);
391 	}
392 
393 	/*
394 	 * Which we should try first - memory mapping or i/o mapping?
395 	 *
396 	 * We used to try memory first followed by i/o on alpha, otherwise
397 	 * the reverse, but we should just try memory first all the time now.
398 	 */
399 	m1 = PCIM_CMD_MEMEN;
400 	m2 = PCIM_CMD_PORTEN;
401 
402 	tval = 0;
403         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
404             "prefer_iomap", &tval) == 0 && tval != 0) {
405 		m1 = PCIM_CMD_PORTEN;
406 		m2 = PCIM_CMD_MEMEN;
407 	}
408 	tval = 0;
409         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
410             "prefer_memmap", &tval) == 0 && tval != 0) {
411 		m1 = PCIM_CMD_MEMEN;
412 		m2 = PCIM_CMD_PORTEN;
413 	}
414 
415 	linesz = PCI_DFLT_LNSZ;
416 	irq = regs = NULL;
417 	rgd = rtp = iqd = 0;
418 
419 	cmd = pci_read_config(dev, PCIR_COMMAND, 1);
420 	if (cmd & m1) {
421 		rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
422 		rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
423 		regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
424 	}
425 	if (regs == NULL && (cmd & m2)) {
426 		rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
427 		rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
428 		regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
429 	}
430 	if (regs == NULL) {
431 		device_printf(dev, "unable to map any ports\n");
432 		goto bad;
433 	}
434 	if (bootverbose)
435 		device_printf(dev, "using %s space register mapping\n",
436 		    (rgd == IO_MAP_REG)? "I/O" : "Memory");
437 	pcs->pci_dev = dev;
438 	pcs->pci_reg = regs;
439 	pcs->pci_st = rman_get_bustag(regs);
440 	pcs->pci_sh = rman_get_bushandle(regs);
441 
442 	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
443 	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
444 	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
445 	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
446 	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
447 	mdvp = &mdvec;
448 	basetype = ISP_HA_SCSI_UNKNOWN;
449 	psize = sizeof (sdparam);
450 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
451 		mdvp = &mdvec;
452 		basetype = ISP_HA_SCSI_UNKNOWN;
453 		psize = sizeof (sdparam);
454 	}
455 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
456 		mdvp = &mdvec_1080;
457 		basetype = ISP_HA_SCSI_1080;
458 		psize = sizeof (sdparam);
459 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
460 		    ISP1080_DMA_REGS_OFF;
461 	}
462 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
463 		mdvp = &mdvec_1080;
464 		basetype = ISP_HA_SCSI_1240;
465 		psize = 2 * sizeof (sdparam);
466 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
467 		    ISP1080_DMA_REGS_OFF;
468 	}
469 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
470 		mdvp = &mdvec_1080;
471 		basetype = ISP_HA_SCSI_1280;
472 		psize = 2 * sizeof (sdparam);
473 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
474 		    ISP1080_DMA_REGS_OFF;
475 	}
476 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) {
477 		mdvp = &mdvec_12160;
478 		basetype = ISP_HA_SCSI_10160;
479 		psize = sizeof (sdparam);
480 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
481 		    ISP1080_DMA_REGS_OFF;
482 	}
483 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
484 		mdvp = &mdvec_12160;
485 		basetype = ISP_HA_SCSI_12160;
486 		psize = 2 * sizeof (sdparam);
487 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
488 		    ISP1080_DMA_REGS_OFF;
489 	}
490 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
491 		mdvp = &mdvec_2100;
492 		basetype = ISP_HA_FC_2100;
493 		psize = sizeof (fcparam);
494 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
495 		    PCI_MBOX_REGS2100_OFF;
496 		if (pci_get_revid(dev) < 3) {
497 			/*
498 			 * XXX: Need to get the actual revision
499 			 * XXX: number of the 2100 FB. At any rate,
500 			 * XXX: lower cache line size for early revision
501 			 * XXX; boards.
502 			 */
503 			linesz = 1;
504 		}
505 	}
506 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
507 		mdvp = &mdvec_2200;
508 		basetype = ISP_HA_FC_2200;
509 		psize = sizeof (fcparam);
510 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
511 		    PCI_MBOX_REGS2100_OFF;
512 	}
513 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) {
514 		mdvp = &mdvec_2300;
515 		basetype = ISP_HA_FC_2300;
516 		psize = sizeof (fcparam);
517 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
518 		    PCI_MBOX_REGS2300_OFF;
519 	}
520 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312) {
521 		mdvp = &mdvec_2300;
522 		basetype = ISP_HA_FC_2312;
523 		psize = sizeof (fcparam);
524 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
525 		    PCI_MBOX_REGS2300_OFF;
526 	}
527 	isp = &pcs->pci_isp;
528 	isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO);
529 	if (isp->isp_param == NULL) {
530 		device_printf(dev, "cannot allocate parameter data\n");
531 		goto bad;
532 	}
533 	isp->isp_mdvec = mdvp;
534 	isp->isp_type = basetype;
535 	isp->isp_revision = pci_get_revid(dev);
536 	isp->isp_role = role;
537 	isp->isp_dev = dev;
538 
539 	/*
540 	 * Try and find firmware for this device.
541 	 */
542 
543 	if (isp_get_firmware_p) {
544 		int device = (int) pci_get_device(dev);
545 #ifdef	ISP_TARGET_MODE
546 		(*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
547 #else
548 		(*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
549 #endif
550 	}
551 
552 	/*
553 	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
554 	 * are set.
555 	 */
556 	cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
557 		PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
558 	if (IS_2300(isp)) {	/* per QLogic errata */
559 		cmd &= ~PCIM_CMD_INVEN;
560 	}
561 	if (IS_23XX(isp)) {
562 		/*
563 		 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command.
564 		 */
565 		isp->isp_touched = 1;
566 
567 	}
568 	pci_write_config(dev, PCIR_COMMAND, cmd, 1);
569 
570 	/*
571 	 * Make sure the Cache Line Size register is set sensibly.
572 	 */
573 	data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
574 	if (data != linesz) {
575 		data = PCI_DFLT_LNSZ;
576 		isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
577 		pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
578 	}
579 
580 	/*
581 	 * Make sure the Latency Timer is sane.
582 	 */
583 	data = pci_read_config(dev, PCIR_LATTIMER, 1);
584 	if (data < PCI_DFLT_LTNCY) {
585 		data = PCI_DFLT_LTNCY;
586 		isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
587 		pci_write_config(dev, PCIR_LATTIMER, data, 1);
588 	}
589 
590 	/*
591 	 * Make sure we've disabled the ROM.
592 	 */
593 	data = pci_read_config(dev, PCIR_ROMADDR, 4);
594 	data &= ~1;
595 	pci_write_config(dev, PCIR_ROMADDR, data, 4);
596 
597 	iqd = 0;
598 	irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd,
599 	    RF_ACTIVE | RF_SHAREABLE);
600 	if (irq == NULL) {
601 		device_printf(dev, "could not allocate interrupt\n");
602 		goto bad;
603 	}
604 
605 	tval = 0;
606         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
607             "fwload_disable", &tval) == 0 && tval != 0) {
608 		isp->isp_confopts |= ISP_CFG_NORELOAD;
609 	}
610 	tval = 0;
611         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
612             "ignore_nvram", &tval) == 0 && tval != 0) {
613 		isp->isp_confopts |= ISP_CFG_NONVRAM;
614 	}
615 	tval = 0;
616         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
617             "fullduplex", &tval) == 0 && tval != 0) {
618 		isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
619 	}
620 #ifdef	ISP_FW_CRASH_DUMP
621 	tval = 0;
622         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
623             "fw_dump_enable", &tval) == 0 && tval != 0) {
624 		size_t amt = 0;
625 		if (IS_2200(isp)) {
626 			amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
627 		} else if (IS_23XX(isp)) {
628 			amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
629 		}
630 		if (amt) {
631 			FCPARAM(isp)->isp_dump_data =
632 			    malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO);
633 		} else {
634 			device_printf(dev,
635 			    "f/w crash dumps not supported for this model\n");
636 		}
637 	}
638 #endif
639 
640 	sptr = 0;
641         if (resource_string_value(device_get_name(dev), device_get_unit(dev),
642             "topology", (const char **) &sptr) == 0 && sptr != 0) {
643 		if (strcmp(sptr, "lport") == 0) {
644 			isp->isp_confopts |= ISP_CFG_LPORT;
645 		} else if (strcmp(sptr, "nport") == 0) {
646 			isp->isp_confopts |= ISP_CFG_NPORT;
647 		} else if (strcmp(sptr, "lport-only") == 0) {
648 			isp->isp_confopts |= ISP_CFG_LPORT_ONLY;
649 		} else if (strcmp(sptr, "nport-only") == 0) {
650 			isp->isp_confopts |= ISP_CFG_NPORT_ONLY;
651 		}
652 	}
653 
654 	/*
655 	 * Because the resource_*_value functions can neither return
656 	 * 64 bit integer values, nor can they be directly coerced
657 	 * to interpret the right hand side of the assignment as
658 	 * you want them to interpret it, we have to force WWN
659 	 * hint replacement to specify WWN strings with a leading
660 	 * 'w' (e..g w50000000aaaa0001). Sigh.
661 	 */
662 	sptr = 0;
663 	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
664             "portwwn", (const char **) &sptr);
665 	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
666 		char *eptr = 0;
667 		isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16);
668 		if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) {
669 			device_printf(dev, "mangled portwwn hint '%s'\n", sptr);
670 			isp->isp_osinfo.default_port_wwn = 0;
671 		} else {
672 			isp->isp_confopts |= ISP_CFG_OWNWWPN;
673 		}
674 	}
675 	if (isp->isp_osinfo.default_port_wwn == 0) {
676 		isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
677 	}
678 
679 	sptr = 0;
680 	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
681             "nodewwn", (const char **) &sptr);
682 	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
683 		char *eptr = 0;
684 		isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16);
685 		if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) {
686 			device_printf(dev, "mangled nodewwn hint '%s'\n", sptr);
687 			isp->isp_osinfo.default_node_wwn = 0;
688 		} else {
689 			isp->isp_confopts |= ISP_CFG_OWNWWNN;
690 		}
691 	}
692 	if (isp->isp_osinfo.default_node_wwn == 0) {
693 		isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
694 	}
695 
696 	isp->isp_osinfo.default_id = -1;
697 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
698             "iid", &tval) == 0) {
699 		isp->isp_osinfo.default_id = tval;
700 		isp->isp_confopts |= ISP_CFG_OWNLOOPID;
701 	}
702 	if (isp->isp_osinfo.default_id == -1) {
703 		if (IS_FC(isp)) {
704 			isp->isp_osinfo.default_id = 109;
705 		} else {
706 			isp->isp_osinfo.default_id = 7;
707 		}
708 	}
709 
710 	isp_debug = 0;
711         (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
712             "debug", &isp_debug);
713 
714 	/* Make sure the lock is set up. */
715 	mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF);
716 	locksetup++;
717 
718 	if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) {
719 		device_printf(dev, "could not setup interrupt\n");
720 		goto bad;
721 	}
722 
723 	/*
724 	 * Set up logging levels.
725 	 */
726 	if (isp_debug) {
727 		isp->isp_dblev = isp_debug;
728 	} else {
729 		isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
730 	}
731 	if (bootverbose)
732 		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
733 
734 	/*
735 	 * Last minute checks...
736 	 */
737 	if (IS_2312(isp)) {
738 		isp->isp_port = pci_get_function(dev);
739 	}
740 
741 	/*
742 	 * Make sure we're in reset state.
743 	 */
744 	ISP_LOCK(isp);
745 	isp_reset(isp);
746 	if (isp->isp_state != ISP_RESETSTATE) {
747 		ISP_UNLOCK(isp);
748 		goto bad;
749 	}
750 	isp_init(isp);
751 	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) {
752 		isp_uninit(isp);
753 		ISP_UNLOCK(isp);
754 		goto bad;
755 	}
756 	isp_attach(isp);
757 	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) {
758 		isp_uninit(isp);
759 		ISP_UNLOCK(isp);
760 		goto bad;
761 	}
762 	/*
763 	 * XXXX: Here is where we might unload the f/w module
764 	 * XXXX: (or decrease the reference count to it).
765 	 */
766 	ISP_UNLOCK(isp);
767 	return (0);
768 
769 bad:
770 
771 	if (pcs && pcs->ih) {
772 		(void) bus_teardown_intr(dev, irq, pcs->ih);
773 	}
774 
775 	if (locksetup && isp) {
776 		mtx_destroy(&isp->isp_osinfo.lock);
777 	}
778 
779 	if (irq) {
780 		(void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
781 	}
782 
783 
784 	if (regs) {
785 		(void) bus_release_resource(dev, rtp, rgd, regs);
786 	}
787 
788 	if (pcs) {
789 		if (pcs->pci_isp.isp_param)
790 			free(pcs->pci_isp.isp_param, M_DEVBUF);
791 		free(pcs, M_DEVBUF);
792 	}
793 
794 	/*
795 	 * XXXX: Here is where we might unload the f/w module
796 	 * XXXX: (or decrease the reference count to it).
797 	 */
798 	return (ENXIO);
799 }
800 
801 static void
802 isp_pci_intr(void *arg)
803 {
804 	struct ispsoftc *isp = arg;
805 	u_int16_t isr, sema, mbox;
806 
807 	ISP_LOCK(isp);
808 	isp->isp_intcnt++;
809 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
810 		isp->isp_intbogus++;
811 	} else {
812 		int iok = isp->isp_osinfo.intsok;
813 		isp->isp_osinfo.intsok = 0;
814 		isp_intr(isp, isr, sema, mbox);
815 		isp->isp_osinfo.intsok = iok;
816 	}
817 	ISP_UNLOCK(isp);
818 }
819 
820 
821 #define	IspVirt2Off(a, x)	\
822 	(((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
823 	_BLK_REG_SHFT] + ((x) & 0xff))
824 
825 #define	BXR2(pcs, off)		\
826 	bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
827 #define	BXW2(pcs, off, v)	\
828 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
829 
830 
831 static INLINE int
832 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp)
833 {
834 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
835 	u_int16_t val0, val1;
836 	int i = 0;
837 
838 	do {
839 		val0 = BXR2(pcs, IspVirt2Off(isp, off));
840 		val1 = BXR2(pcs, IspVirt2Off(isp, off));
841 	} while (val0 != val1 && ++i < 1000);
842 	if (val0 != val1) {
843 		return (1);
844 	}
845 	*rp = val0;
846 	return (0);
847 }
848 
849 static int
850 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp,
851     u_int16_t *semap, u_int16_t *mbp)
852 {
853 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
854 	u_int16_t isr, sema;
855 
856 	if (IS_2100(isp)) {
857 		if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
858 		    return (0);
859 		}
860 		if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
861 		    return (0);
862 		}
863 	} else {
864 		isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
865 		sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
866 	}
867 	isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
868 	isr &= INT_PENDING_MASK(isp);
869 	sema &= BIU_SEMA_LOCK;
870 	if (isr == 0 && sema == 0) {
871 		return (0);
872 	}
873 	*isrp = isr;
874 	if ((*semap = sema) != 0) {
875 		if (IS_2100(isp)) {
876 			if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
877 				return (0);
878 			}
879 		} else {
880 			*mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
881 		}
882 	}
883 	return (1);
884 }
885 
886 static int
887 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp,
888     u_int16_t *semap, u_int16_t *mbox0p)
889 {
890 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
891 	u_int32_t r2hisr;
892 
893 	if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) {
894 		*isrp = 0;
895 		return (0);
896 	}
897 	r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh,
898 	    IspVirt2Off(pcs, BIU_R2HSTSLO));
899 	isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
900 	if ((r2hisr & BIU_R2HST_INTR) == 0) {
901 		*isrp = 0;
902 		return (0);
903 	}
904 	switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
905 	case ISPR2HST_ROM_MBX_OK:
906 	case ISPR2HST_ROM_MBX_FAIL:
907 	case ISPR2HST_MBX_OK:
908 	case ISPR2HST_MBX_FAIL:
909 	case ISPR2HST_ASYNC_EVENT:
910 		*isrp = r2hisr & 0xffff;
911 		*mbox0p = (r2hisr >> 16);
912 		*semap = 1;
913 		return (1);
914 	case ISPR2HST_RIO_16:
915 		*isrp = r2hisr & 0xffff;
916 		*mbox0p = ASYNC_RIO1;
917 		*semap = 1;
918 		return (1);
919 	case ISPR2HST_FPOST:
920 		*isrp = r2hisr & 0xffff;
921 		*mbox0p = ASYNC_CMD_CMPLT;
922 		*semap = 1;
923 		return (1);
924 	case ISPR2HST_FPOST_CTIO:
925 		*isrp = r2hisr & 0xffff;
926 		*mbox0p = ASYNC_CTIO_DONE;
927 		*semap = 1;
928 		return (1);
929 	case ISPR2HST_RSPQ_UPDATE:
930 		*isrp = r2hisr & 0xffff;
931 		*mbox0p = 0;
932 		*semap = 0;
933 		return (1);
934 	default:
935 		return (0);
936 	}
937 }
938 
939 static u_int16_t
940 isp_pci_rd_reg(struct ispsoftc *isp, int regoff)
941 {
942 	u_int16_t rv;
943 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
944 	int oldconf = 0;
945 
946 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
947 		/*
948 		 * We will assume that someone has paused the RISC processor.
949 		 */
950 		oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
951 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
952 		    oldconf | BIU_PCI_CONF1_SXP);
953 	}
954 	rv = BXR2(pcs, IspVirt2Off(isp, regoff));
955 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
956 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
957 	}
958 	return (rv);
959 }
960 
961 static void
962 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val)
963 {
964 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
965 	int oldconf = 0;
966 
967 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
968 		/*
969 		 * We will assume that someone has paused the RISC processor.
970 		 */
971 		oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
972 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
973 		    oldconf | BIU_PCI_CONF1_SXP);
974 	}
975 	BXW2(pcs, IspVirt2Off(isp, regoff), val);
976 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
977 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
978 	}
979 }
980 
981 static u_int16_t
982 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff)
983 {
984 	u_int16_t rv, oc = 0;
985 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
986 
987 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
988 	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
989 		u_int16_t tc;
990 		/*
991 		 * We will assume that someone has paused the RISC processor.
992 		 */
993 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
994 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
995 		if (regoff & SXP_BANK1_SELECT)
996 			tc |= BIU_PCI1080_CONF1_SXP1;
997 		else
998 			tc |= BIU_PCI1080_CONF1_SXP0;
999 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1000 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1001 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1002 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1003 		    oc | BIU_PCI1080_CONF1_DMA);
1004 	}
1005 	rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1006 	if (oc) {
1007 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1008 	}
1009 	return (rv);
1010 }
1011 
1012 static void
1013 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val)
1014 {
1015 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1016 	int oc = 0;
1017 
1018 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1019 	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1020 		u_int16_t tc;
1021 		/*
1022 		 * We will assume that someone has paused the RISC processor.
1023 		 */
1024 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1025 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
1026 		if (regoff & SXP_BANK1_SELECT)
1027 			tc |= BIU_PCI1080_CONF1_SXP1;
1028 		else
1029 			tc |= BIU_PCI1080_CONF1_SXP0;
1030 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1031 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1032 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1033 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1034 		    oc | BIU_PCI1080_CONF1_DMA);
1035 	}
1036 	BXW2(pcs, IspVirt2Off(isp, regoff), val);
1037 	if (oc) {
1038 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1039 	}
1040 }
1041 
1042 
1043 struct imush {
1044 	struct ispsoftc *isp;
1045 	int error;
1046 };
1047 
1048 static void imc(void *, bus_dma_segment_t *, int, int);
1049 
1050 static void
1051 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1052 {
1053 	struct imush *imushp = (struct imush *) arg;
1054 	if (error) {
1055 		imushp->error = error;
1056 	} else {
1057 		struct ispsoftc *isp =imushp->isp;
1058 		bus_addr_t addr = segs->ds_addr;
1059 
1060 		isp->isp_rquest_dma = addr;
1061 		addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1062 		isp->isp_result_dma = addr;
1063 		if (IS_FC(isp)) {
1064 			addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1065 			FCPARAM(isp)->isp_scdma = addr;
1066 		}
1067 	}
1068 }
1069 
1070 /*
1071  * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE
1072  */
1073 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1)
1074 
1075 static int
1076 isp_pci_mbxdma(struct ispsoftc *isp)
1077 {
1078 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1079 	caddr_t base;
1080 	u_int32_t len;
1081 	int i, error, ns;
1082 	bus_size_t alim, slim, xlim;
1083 	struct imush im;
1084 
1085 	/*
1086 	 * Already been here? If so, leave...
1087 	 */
1088 	if (isp->isp_rquest) {
1089 		return (0);
1090 	}
1091 
1092 #ifdef	ISP_DAC_SUPPORTED
1093 	alim = BUS_SPACE_UNRESTRICTED;
1094 	xlim = BUS_SPACE_MAXADDR_32BIT;
1095 #else
1096 	xlim = alim = BUS_SPACE_MAXADDR_32BIT;
1097 #endif
1098 	if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1099 		slim = BUS_SPACE_MAXADDR_32BIT;
1100 	} else {
1101 		slim = BUS_SPACE_MAXADDR_24BIT;
1102 	}
1103 
1104 	ISP_UNLOCK(isp);
1105 	if (bus_dma_tag_create(NULL, 1, slim+1, alim, alim,
1106 	    NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0,
1107 	    busdma_lock_mutex, &Giant, &pcs->dmat)) {
1108 		isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1109 		ISP_LOCK(isp);
1110 		return(1);
1111 	}
1112 
1113 
1114 	len = sizeof (XS_T **) * isp->isp_maxcmds;
1115 	isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1116 	if (isp->isp_xflist == NULL) {
1117 		isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
1118 		ISP_LOCK(isp);
1119 		return (1);
1120 	}
1121 #ifdef	ISP_TARGET_MODE
1122 	len = sizeof (void **) * isp->isp_maxcmds;
1123 	isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1124 	if (isp->isp_tgtlist == NULL) {
1125 		isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array");
1126 		ISP_LOCK(isp);
1127 		return (1);
1128 	}
1129 #endif
1130 	len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
1131 	pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF,  M_WAITOK);
1132 	if (pcs->dmaps == NULL) {
1133 		isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage");
1134 		free(isp->isp_xflist, M_DEVBUF);
1135 #ifdef	ISP_TARGET_MODE
1136 		free(isp->isp_tgtlist, M_DEVBUF);
1137 #endif
1138 		ISP_LOCK(isp);
1139 		return (1);
1140 	}
1141 
1142 	/*
1143 	 * Allocate and map the request, result queues, plus FC scratch area.
1144 	 */
1145 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1146 	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1147 	if (IS_FC(isp)) {
1148 		len += ISP2100_SCRLEN;
1149 	}
1150 
1151 	ns = (len / PAGE_SIZE) + 1;
1152 	if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim+1, xlim, xlim,
1153 	    NULL, NULL, len, ns, slim, 0, busdma_lock_mutex, &Giant,
1154 	    &isp->isp_cdmat)) {
1155 		isp_prt(isp, ISP_LOGERR,
1156 		    "cannot create a dma tag for control spaces");
1157 		free(pcs->dmaps, M_DEVBUF);
1158 		free(isp->isp_xflist, M_DEVBUF);
1159 #ifdef	ISP_TARGET_MODE
1160 		free(isp->isp_tgtlist, M_DEVBUF);
1161 #endif
1162 		ISP_LOCK(isp);
1163 		return (1);
1164 	}
1165 
1166 	if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT,
1167 	    &isp->isp_cdmap) != 0) {
1168 		isp_prt(isp, ISP_LOGERR,
1169 		    "cannot allocate %d bytes of CCB memory", len);
1170 		bus_dma_tag_destroy(isp->isp_cdmat);
1171 		free(isp->isp_xflist, M_DEVBUF);
1172 #ifdef	ISP_TARGET_MODE
1173 		free(isp->isp_tgtlist, M_DEVBUF);
1174 #endif
1175 		free(pcs->dmaps, M_DEVBUF);
1176 		ISP_LOCK(isp);
1177 		return (1);
1178 	}
1179 
1180 	for (i = 0; i < isp->isp_maxcmds; i++) {
1181 		error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]);
1182 		if (error) {
1183 			isp_prt(isp, ISP_LOGERR,
1184 			    "error %d creating per-cmd DMA maps", error);
1185 			while (--i >= 0) {
1186 				bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]);
1187 			}
1188 			goto bad;
1189 		}
1190 	}
1191 
1192 	im.isp = isp;
1193 	im.error = 0;
1194 	bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0);
1195 	if (im.error) {
1196 		isp_prt(isp, ISP_LOGERR,
1197 		    "error %d loading dma map for control areas", im.error);
1198 		goto bad;
1199 	}
1200 
1201 	isp->isp_rquest = base;
1202 	base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1203 	isp->isp_result = base;
1204 	if (IS_FC(isp)) {
1205 		base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1206 		FCPARAM(isp)->isp_scratch = base;
1207 	}
1208 	ISP_LOCK(isp);
1209 	return (0);
1210 
1211 bad:
1212 	bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap);
1213 	bus_dma_tag_destroy(isp->isp_cdmat);
1214 	free(isp->isp_xflist, M_DEVBUF);
1215 #ifdef	ISP_TARGET_MODE
1216 	free(isp->isp_tgtlist, M_DEVBUF);
1217 #endif
1218 	free(pcs->dmaps, M_DEVBUF);
1219 	ISP_LOCK(isp);
1220 	isp->isp_rquest = NULL;
1221 	return (1);
1222 }
1223 
1224 typedef struct {
1225 	struct ispsoftc *isp;
1226 	void *cmd_token;
1227 	void *rq;
1228 	u_int16_t *nxtip;
1229 	u_int16_t optr;
1230 	u_int error;
1231 } mush_t;
1232 
1233 #define	MUSHERR_NOQENTRIES	-2
1234 
1235 #ifdef	ISP_TARGET_MODE
1236 /*
1237  * We need to handle DMA for target mode differently from initiator mode.
1238  *
1239  * DMA mapping and construction and submission of CTIO Request Entries
1240  * and rendevous for completion are very tightly coupled because we start
1241  * out by knowing (per platform) how much data we have to move, but we
1242  * don't know, up front, how many DMA mapping segments will have to be used
1243  * cover that data, so we don't know how many CTIO Request Entries we
1244  * will end up using. Further, for performance reasons we may want to
1245  * (on the last CTIO for Fibre Channel), send status too (if all went well).
1246  *
1247  * The standard vector still goes through isp_pci_dmasetup, but the callback
1248  * for the DMA mapping routines comes here instead with the whole transfer
1249  * mapped and a pointer to a partially filled in already allocated request
1250  * queue entry. We finish the job.
1251  */
1252 static void tdma_mk(void *, bus_dma_segment_t *, int, int);
1253 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int);
1254 
1255 #define	STATUS_WITH_DATA	1
1256 
1257 static void
1258 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1259 {
1260 	mush_t *mp;
1261 	struct ccb_scsiio *csio;
1262 	struct ispsoftc *isp;
1263 	struct isp_pcisoftc *pcs;
1264 	bus_dmamap_t *dp;
1265 	ct_entry_t *cto, *qe;
1266 	u_int8_t scsi_status;
1267 	u_int16_t curi, nxti, handle;
1268 	u_int32_t sflags;
1269 	int32_t resid;
1270 	int nth_ctio, nctios, send_status;
1271 
1272 	mp = (mush_t *) arg;
1273 	if (error) {
1274 		mp->error = error;
1275 		return;
1276 	}
1277 
1278 	isp = mp->isp;
1279 	csio = mp->cmd_token;
1280 	cto = mp->rq;
1281 	curi = isp->isp_reqidx;
1282 	qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1283 
1284 	cto->ct_xfrlen = 0;
1285 	cto->ct_seg_count = 0;
1286 	cto->ct_header.rqs_entry_count = 1;
1287 	MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1288 
1289 	if (nseg == 0) {
1290 		cto->ct_header.rqs_seqno = 1;
1291 		isp_prt(isp, ISP_LOGTDEBUG1,
1292 		    "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d",
1293 		    cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid,
1294 		    cto->ct_tag_val, cto->ct_flags, cto->ct_status,
1295 		    cto->ct_scsi_status, cto->ct_resid);
1296 		ISP_TDQE(isp, "tdma_mk[no data]", curi, cto);
1297 		isp_put_ctio(isp, cto, qe);
1298 		return;
1299 	}
1300 
1301 	nctios = nseg / ISP_RQDSEG;
1302 	if (nseg % ISP_RQDSEG) {
1303 		nctios++;
1304 	}
1305 
1306 	/*
1307 	 * Save syshandle, and potentially any SCSI status, which we'll
1308 	 * reinsert on the last CTIO we're going to send.
1309 	 */
1310 
1311 	handle = cto->ct_syshandle;
1312 	cto->ct_syshandle = 0;
1313 	cto->ct_header.rqs_seqno = 0;
1314 	send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1315 
1316 	if (send_status) {
1317 		sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1318 		cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1319 		/*
1320 		 * Preserve residual.
1321 		 */
1322 		resid = cto->ct_resid;
1323 
1324 		/*
1325 		 * Save actual SCSI status.
1326 		 */
1327 		scsi_status = cto->ct_scsi_status;
1328 
1329 #ifndef	STATUS_WITH_DATA
1330 		sflags |= CT_NO_DATA;
1331 		/*
1332 		 * We can't do a status at the same time as a data CTIO, so
1333 		 * we need to synthesize an extra CTIO at this level.
1334 		 */
1335 		nctios++;
1336 #endif
1337 	} else {
1338 		sflags = scsi_status = resid = 0;
1339 	}
1340 
1341 	cto->ct_resid = 0;
1342 	cto->ct_scsi_status = 0;
1343 
1344 	pcs = (struct isp_pcisoftc *)isp;
1345 	dp = &pcs->dmaps[isp_handle_index(handle)];
1346 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1347 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1348 	} else {
1349 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1350 	}
1351 
1352 	nxti = *mp->nxtip;
1353 
1354 	for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) {
1355 		int seglim;
1356 
1357 		seglim = nseg;
1358 		if (seglim) {
1359 			int seg;
1360 
1361 			if (seglim > ISP_RQDSEG)
1362 				seglim = ISP_RQDSEG;
1363 
1364 			for (seg = 0; seg < seglim; seg++, nseg--) {
1365 				/*
1366 				 * Unlike normal initiator commands, we don't
1367 				 * do any swizzling here.
1368 				 */
1369 				cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
1370 				cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
1371 				cto->ct_xfrlen += dm_segs->ds_len;
1372 				dm_segs++;
1373 			}
1374 			cto->ct_seg_count = seg;
1375 		} else {
1376 			/*
1377 			 * This case should only happen when we're sending an
1378 			 * extra CTIO with final status.
1379 			 */
1380 			if (send_status == 0) {
1381 				isp_prt(isp, ISP_LOGWARN,
1382 				    "tdma_mk ran out of segments");
1383 				mp->error = EINVAL;
1384 				return;
1385 			}
1386 		}
1387 
1388 		/*
1389 		 * At this point, the fields ct_lun, ct_iid, ct_tagval,
1390 		 * ct_tagtype, and ct_timeout have been carried over
1391 		 * unchanged from what our caller had set.
1392 		 *
1393 		 * The dataseg fields and the seg_count fields we just got
1394 		 * through setting. The data direction we've preserved all
1395 		 * along and only clear it if we're now sending status.
1396 		 */
1397 
1398 		if (nth_ctio == nctios - 1) {
1399 			/*
1400 			 * We're the last in a sequence of CTIOs, so mark
1401 			 * this CTIO and save the handle to the CCB such that
1402 			 * when this CTIO completes we can free dma resources
1403 			 * and do whatever else we need to do to finish the
1404 			 * rest of the command. We *don't* give this to the
1405 			 * firmware to work on- the caller will do that.
1406 			 */
1407 
1408 			cto->ct_syshandle = handle;
1409 			cto->ct_header.rqs_seqno = 1;
1410 
1411 			if (send_status) {
1412 				cto->ct_scsi_status = scsi_status;
1413 				cto->ct_flags |= sflags;
1414 				cto->ct_resid = resid;
1415 			}
1416 			if (send_status) {
1417 				isp_prt(isp, ISP_LOGTDEBUG1,
1418 				    "CTIO[%x] lun%d iid %d tag %x ct_flags %x "
1419 				    "scsi status %x resid %d",
1420 				    cto->ct_fwhandle, csio->ccb_h.target_lun,
1421 				    cto->ct_iid, cto->ct_tag_val, cto->ct_flags,
1422 				    cto->ct_scsi_status, cto->ct_resid);
1423 			} else {
1424 				isp_prt(isp, ISP_LOGTDEBUG1,
1425 				    "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x",
1426 				    cto->ct_fwhandle, csio->ccb_h.target_lun,
1427 				    cto->ct_iid, cto->ct_tag_val,
1428 				    cto->ct_flags);
1429 			}
1430 			isp_put_ctio(isp, cto, qe);
1431 			ISP_TDQE(isp, "last tdma_mk", curi, cto);
1432 			if (nctios > 1) {
1433 				MEMORYBARRIER(isp, SYNC_REQUEST,
1434 				    curi, QENTRY_LEN);
1435 			}
1436 		} else {
1437 			ct_entry_t *oqe = qe;
1438 
1439 			/*
1440 			 * Make sure syshandle fields are clean
1441 			 */
1442 			cto->ct_syshandle = 0;
1443 			cto->ct_header.rqs_seqno = 0;
1444 
1445 			isp_prt(isp, ISP_LOGTDEBUG1,
1446 			    "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
1447 			    cto->ct_fwhandle, csio->ccb_h.target_lun,
1448 			    cto->ct_iid, cto->ct_flags);
1449 
1450 			/*
1451 			 * Get a new CTIO
1452 			 */
1453 			qe = (ct_entry_t *)
1454 			    ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1455 			nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp));
1456 			if (nxti == mp->optr) {
1457 				isp_prt(isp, ISP_LOGTDEBUG0,
1458 				    "Queue Overflow in tdma_mk");
1459 				mp->error = MUSHERR_NOQENTRIES;
1460 				return;
1461 			}
1462 
1463 			/*
1464 			 * Now that we're done with the old CTIO,
1465 			 * flush it out to the request queue.
1466 			 */
1467 			ISP_TDQE(isp, "dma_tgt_fc", curi, cto);
1468 			isp_put_ctio(isp, cto, oqe);
1469 			if (nth_ctio != 0) {
1470 				MEMORYBARRIER(isp, SYNC_REQUEST, curi,
1471 				    QENTRY_LEN);
1472 			}
1473 			curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp));
1474 
1475 			/*
1476 			 * Reset some fields in the CTIO so we can reuse
1477 			 * for the next one we'll flush to the request
1478 			 * queue.
1479 			 */
1480 			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1481 			cto->ct_header.rqs_entry_count = 1;
1482 			cto->ct_header.rqs_flags = 0;
1483 			cto->ct_status = 0;
1484 			cto->ct_scsi_status = 0;
1485 			cto->ct_xfrlen = 0;
1486 			cto->ct_resid = 0;
1487 			cto->ct_seg_count = 0;
1488 			MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1489 		}
1490 	}
1491 	*mp->nxtip = nxti;
1492 }
1493 
1494 /*
1495  * We don't have to do multiple CTIOs here. Instead, we can just do
1496  * continuation segments as needed. This greatly simplifies the code
1497  * improves performance.
1498  */
1499 
1500 static void
1501 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1502 {
1503 	mush_t *mp;
1504 	struct ccb_scsiio *csio;
1505 	struct ispsoftc *isp;
1506 	ct2_entry_t *cto, *qe;
1507 	u_int16_t curi, nxti;
1508 	int segcnt;
1509 
1510 	mp = (mush_t *) arg;
1511 	if (error) {
1512 		mp->error = error;
1513 		return;
1514 	}
1515 
1516 	isp = mp->isp;
1517 	csio = mp->cmd_token;
1518 	cto = mp->rq;
1519 
1520 	curi = isp->isp_reqidx;
1521 	qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1522 
1523 	if (nseg == 0) {
1524 		if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
1525 			isp_prt(isp, ISP_LOGWARN,
1526 			    "dma2_tgt_fc, a status CTIO2 without MODE1 "
1527 			    "set (0x%x)", cto->ct_flags);
1528 			mp->error = EINVAL;
1529 			return;
1530 		}
1531 		/*
1532 		 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1533 		 * flags to NO DATA and clear relative offset flags.
1534 		 * We preserve the ct_resid and the response area.
1535 		 */
1536 		cto->ct_header.rqs_seqno = 1;
1537 		cto->ct_seg_count = 0;
1538 		cto->ct_reloff = 0;
1539 		isp_prt(isp, ISP_LOGTDEBUG1,
1540 		    "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1541 		    "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
1542 		    cto->ct_iid, cto->ct_flags, cto->ct_status,
1543 		    cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1544 		isp_put_ctio2(isp, cto, qe);
1545 		ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe);
1546 		return;
1547 	}
1548 
1549 	if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
1550 		isp_prt(isp, ISP_LOGERR,
1551 		    "dma2_tgt_fc, a data CTIO2 without MODE0 set "
1552 		    "(0x%x)", cto->ct_flags);
1553 		mp->error = EINVAL;
1554 		return;
1555 	}
1556 
1557 
1558 	nxti = *mp->nxtip;
1559 
1560 	/*
1561 	 * Set up the CTIO2 data segments.
1562 	 */
1563 	for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg;
1564 	    cto->ct_seg_count++, segcnt++) {
1565 		cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base =
1566 		    dm_segs[segcnt].ds_addr;
1567 		cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count =
1568 		    dm_segs[segcnt].ds_len;
1569 		cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1570 		isp_prt(isp, ISP_LOGTDEBUG1,
1571 		    "isp_send_ctio2: ent0[%d]0x%llx:%lld",
1572 		    cto->ct_seg_count, (long long)dm_segs[segcnt].ds_addr,
1573 		    (long long)dm_segs[segcnt].ds_len);
1574 	}
1575 
1576 	while (segcnt < nseg) {
1577 		u_int16_t curip;
1578 		int seg;
1579 		ispcontreq_t local, *crq = &local, *qep;
1580 
1581 		qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1582 		curip = nxti;
1583 		nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp));
1584 		if (nxti == mp->optr) {
1585 			ISP_UNLOCK(isp);
1586 			isp_prt(isp, ISP_LOGTDEBUG0,
1587 			    "tdma_mkfc: request queue overflow");
1588 			mp->error = MUSHERR_NOQENTRIES;
1589 			return;
1590 		}
1591 		cto->ct_header.rqs_entry_count++;
1592 		MEMZERO((void *)crq, sizeof (*crq));
1593 		crq->req_header.rqs_entry_count = 1;
1594 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1595 		for (seg = 0; segcnt < nseg && seg < ISP_CDSEG;
1596 		    segcnt++, seg++) {
1597 			crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr;
1598 			crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len;
1599 			isp_prt(isp, ISP_LOGTDEBUG1,
1600 			    "isp_send_ctio2: ent%d[%d]0x%llx:%lld",
1601 			    cto->ct_header.rqs_entry_count-1, seg,
1602 			    (long long) dm_segs[segcnt].ds_addr,
1603 			    (long long) dm_segs[segcnt].ds_len);
1604 			cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1605 			cto->ct_seg_count++;
1606 		}
1607 		MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN);
1608 		isp_put_cont_req(isp, crq, qep);
1609 		ISP_TDQE(isp, "cont entry", curi, qep);
1610 	}
1611 
1612 	/*
1613 	 * No do final twiddling for the CTIO itself.
1614 	 */
1615 	cto->ct_header.rqs_seqno = 1;
1616 	isp_prt(isp, ISP_LOGTDEBUG1,
1617 	    "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d",
1618 	    cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid,
1619 	    cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status,
1620 	    cto->ct_resid);
1621 	isp_put_ctio2(isp, cto, qe);
1622 	ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe);
1623 	*mp->nxtip = nxti;
1624 }
1625 #endif
1626 
1627 static void dma2(void *, bus_dma_segment_t *, int, int);
1628 
1629 #ifdef	PAE
1630 #define	LOWD(x)		((uint32_t) x)
1631 #define	HIWD(x)		((uint32_t) (x >> 32))
1632 
1633 static void
1634 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1635 {
1636 	mush_t *mp;
1637 	struct ispsoftc *isp;
1638 	struct ccb_scsiio *csio;
1639 	struct isp_pcisoftc *pcs;
1640 	bus_dmamap_t *dp;
1641 	bus_dma_segment_t *eseg;
1642 	ispreq64_t *rq;
1643 	int seglim, datalen;
1644 	u_int16_t nxti;
1645 
1646 	mp = (mush_t *) arg;
1647 	if (error) {
1648 		mp->error = error;
1649 		return;
1650 	}
1651 
1652 	if (nseg < 1) {
1653 		isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
1654 		mp->error = EFAULT;
1655 		return;
1656 	}
1657 	csio = mp->cmd_token;
1658 	isp = mp->isp;
1659 	rq = mp->rq;
1660 	pcs = (struct isp_pcisoftc *)mp->isp;
1661 	dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1662 	nxti = *mp->nxtip;
1663 
1664 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1665 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1666 	} else {
1667 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1668 	}
1669 	datalen = XS_XFRLEN(csio);
1670 
1671 	/*
1672 	 * We're passed an initial partially filled in entry that
1673 	 * has most fields filled in except for data transfer
1674 	 * related values.
1675 	 *
1676 	 * Our job is to fill in the initial request queue entry and
1677 	 * then to start allocating and filling in continuation entries
1678 	 * until we've covered the entire transfer.
1679 	 */
1680 
1681 	if (IS_FC(isp)) {
1682 		rq->req_header.rqs_entry_type = RQSTYPE_T3RQS;
1683 		seglim = ISP_RQDSEG_T3;
1684 		((ispreqt3_t *)rq)->req_totalcnt = datalen;
1685 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1686 			((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN;
1687 		} else {
1688 			((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
1689 		}
1690 	} else {
1691 		rq->req_header.rqs_entry_type = RQSTYPE_A64;
1692 		if (csio->cdb_len > 12) {
1693 			seglim = 0;
1694 		} else {
1695 			seglim = ISP_RQDSEG_A64;
1696 		}
1697 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1698 			rq->req_flags |= REQFLAG_DATA_IN;
1699 		} else {
1700 			rq->req_flags |= REQFLAG_DATA_OUT;
1701 		}
1702 	}
1703 
1704 	eseg = dm_segs + nseg;
1705 
1706 	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
1707 		if (IS_FC(isp)) {
1708 			ispreqt3_t *rq3 = (ispreqt3_t *)rq;
1709 			rq3->req_dataseg[rq3->req_seg_count].ds_base =
1710 			    LOWD(dm_segs->ds_addr);
1711 			rq3->req_dataseg[rq3->req_seg_count].ds_basehi =
1712 			    HIWD(dm_segs->ds_addr);
1713 			rq3->req_dataseg[rq3->req_seg_count].ds_count =
1714 			    dm_segs->ds_len;
1715 		} else {
1716 			rq->req_dataseg[rq->req_seg_count].ds_base =
1717 			    LOWD(dm_segs->ds_addr);
1718 			rq->req_dataseg[rq->req_seg_count].ds_basehi =
1719 			    HIWD(dm_segs->ds_addr);
1720 			rq->req_dataseg[rq->req_seg_count].ds_count =
1721 			    dm_segs->ds_len;
1722 		}
1723 		datalen -= dm_segs->ds_len;
1724 		rq->req_seg_count++;
1725 		dm_segs++;
1726 	}
1727 
1728 	while (datalen > 0 && dm_segs != eseg) {
1729 		u_int16_t onxti;
1730 		ispcontreq64_t local, *crq = &local, *cqe;
1731 
1732 		cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1733 		onxti = nxti;
1734 		nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
1735 		if (nxti == mp->optr) {
1736 			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
1737 			mp->error = MUSHERR_NOQENTRIES;
1738 			return;
1739 		}
1740 		rq->req_header.rqs_entry_count++;
1741 		MEMZERO((void *)crq, sizeof (*crq));
1742 		crq->req_header.rqs_entry_count = 1;
1743 		crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
1744 
1745 		seglim = 0;
1746 		while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) {
1747 			crq->req_dataseg[seglim].ds_base =
1748 			    LOWD(dm_segs->ds_addr);
1749 			crq->req_dataseg[seglim].ds_basehi =
1750 			    HIWD(dm_segs->ds_addr);
1751 			crq->req_dataseg[seglim].ds_count =
1752 			    dm_segs->ds_len;
1753 			rq->req_seg_count++;
1754 			dm_segs++;
1755 			seglim++;
1756 			datalen -= dm_segs->ds_len;
1757 		}
1758 		isp_put_cont64_req(isp, crq, cqe);
1759 		MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
1760 	}
1761 	*mp->nxtip = nxti;
1762 }
1763 #else
1764 static void
1765 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1766 {
1767 	mush_t *mp;
1768 	struct ispsoftc *isp;
1769 	struct ccb_scsiio *csio;
1770 	struct isp_pcisoftc *pcs;
1771 	bus_dmamap_t *dp;
1772 	bus_dma_segment_t *eseg;
1773 	ispreq_t *rq;
1774 	int seglim, datalen;
1775 	u_int16_t nxti;
1776 
1777 	mp = (mush_t *) arg;
1778 	if (error) {
1779 		mp->error = error;
1780 		return;
1781 	}
1782 
1783 	if (nseg < 1) {
1784 		isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
1785 		mp->error = EFAULT;
1786 		return;
1787 	}
1788 	csio = mp->cmd_token;
1789 	isp = mp->isp;
1790 	rq = mp->rq;
1791 	pcs = (struct isp_pcisoftc *)mp->isp;
1792 	dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1793 	nxti = *mp->nxtip;
1794 
1795 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1796 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1797 	} else {
1798 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1799 	}
1800 
1801 	datalen = XS_XFRLEN(csio);
1802 
1803 	/*
1804 	 * We're passed an initial partially filled in entry that
1805 	 * has most fields filled in except for data transfer
1806 	 * related values.
1807 	 *
1808 	 * Our job is to fill in the initial request queue entry and
1809 	 * then to start allocating and filling in continuation entries
1810 	 * until we've covered the entire transfer.
1811 	 */
1812 
1813 	if (IS_FC(isp)) {
1814 		seglim = ISP_RQDSEG_T2;
1815 		((ispreqt2_t *)rq)->req_totalcnt = datalen;
1816 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1817 			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
1818 		} else {
1819 			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
1820 		}
1821 	} else {
1822 		if (csio->cdb_len > 12) {
1823 			seglim = 0;
1824 		} else {
1825 			seglim = ISP_RQDSEG;
1826 		}
1827 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1828 			rq->req_flags |= REQFLAG_DATA_IN;
1829 		} else {
1830 			rq->req_flags |= REQFLAG_DATA_OUT;
1831 		}
1832 	}
1833 
1834 	eseg = dm_segs + nseg;
1835 
1836 	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
1837 		if (IS_FC(isp)) {
1838 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1839 			rq2->req_dataseg[rq2->req_seg_count].ds_base =
1840 			    dm_segs->ds_addr;
1841 			rq2->req_dataseg[rq2->req_seg_count].ds_count =
1842 			    dm_segs->ds_len;
1843 		} else {
1844 			rq->req_dataseg[rq->req_seg_count].ds_base =
1845 				dm_segs->ds_addr;
1846 			rq->req_dataseg[rq->req_seg_count].ds_count =
1847 				dm_segs->ds_len;
1848 		}
1849 		datalen -= dm_segs->ds_len;
1850 		rq->req_seg_count++;
1851 		dm_segs++;
1852 	}
1853 
1854 	while (datalen > 0 && dm_segs != eseg) {
1855 		u_int16_t onxti;
1856 		ispcontreq_t local, *crq = &local, *cqe;
1857 
1858 		cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1859 		onxti = nxti;
1860 		nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
1861 		if (nxti == mp->optr) {
1862 			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
1863 			mp->error = MUSHERR_NOQENTRIES;
1864 			return;
1865 		}
1866 		rq->req_header.rqs_entry_count++;
1867 		MEMZERO((void *)crq, sizeof (*crq));
1868 		crq->req_header.rqs_entry_count = 1;
1869 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1870 
1871 		seglim = 0;
1872 		while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
1873 			crq->req_dataseg[seglim].ds_base =
1874 			    dm_segs->ds_addr;
1875 			crq->req_dataseg[seglim].ds_count =
1876 			    dm_segs->ds_len;
1877 			rq->req_seg_count++;
1878 			dm_segs++;
1879 			seglim++;
1880 			datalen -= dm_segs->ds_len;
1881 		}
1882 		isp_put_cont_req(isp, crq, cqe);
1883 		MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
1884 	}
1885 	*mp->nxtip = nxti;
1886 }
1887 #endif
1888 
1889 static int
1890 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq,
1891 	u_int16_t *nxtip, u_int16_t optr)
1892 {
1893 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1894 	ispreq_t *qep;
1895 	bus_dmamap_t *dp = NULL;
1896 	mush_t mush, *mp;
1897 	void (*eptr)(void *, bus_dma_segment_t *, int, int);
1898 
1899 	qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
1900 #ifdef	ISP_TARGET_MODE
1901 	if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1902 		if (IS_FC(isp)) {
1903 			eptr = tdma_mkfc;
1904 		} else {
1905 			eptr = tdma_mk;
1906 		}
1907 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1908 		    (csio->dxfer_len == 0)) {
1909 			mp = &mush;
1910 			mp->isp = isp;
1911 			mp->cmd_token = csio;
1912 			mp->rq = rq;	/* really a ct_entry_t or ct2_entry_t */
1913 			mp->nxtip = nxtip;
1914 			mp->optr = optr;
1915 			mp->error = 0;
1916 			(*eptr)(mp, NULL, 0, 0);
1917 			goto mbxsync;
1918 		}
1919 	} else
1920 #endif
1921 	eptr = dma2;
1922 
1923 
1924 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1925 	    (csio->dxfer_len == 0)) {
1926 		rq->req_seg_count = 1;
1927 		goto mbxsync;
1928 	}
1929 
1930 	/*
1931 	 * Do a virtual grapevine step to collect info for
1932 	 * the callback dma allocation that we have to use...
1933 	 */
1934 	mp = &mush;
1935 	mp->isp = isp;
1936 	mp->cmd_token = csio;
1937 	mp->rq = rq;
1938 	mp->nxtip = nxtip;
1939 	mp->optr = optr;
1940 	mp->error = 0;
1941 
1942 	if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1943 		if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1944 			int error, s;
1945 			dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1946 			s = splsoftvm();
1947 			error = bus_dmamap_load(pcs->dmat, *dp,
1948 			    csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
1949 			if (error == EINPROGRESS) {
1950 				bus_dmamap_unload(pcs->dmat, *dp);
1951 				mp->error = EINVAL;
1952 				isp_prt(isp, ISP_LOGERR,
1953 				    "deferred dma allocation not supported");
1954 			} else if (error && mp->error == 0) {
1955 #ifdef	DIAGNOSTIC
1956 				isp_prt(isp, ISP_LOGERR,
1957 				    "error %d in dma mapping code", error);
1958 #endif
1959 				mp->error = error;
1960 			}
1961 			splx(s);
1962 		} else {
1963 			/* Pointer to physical buffer */
1964 			struct bus_dma_segment seg;
1965 			seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr;
1966 			seg.ds_len = csio->dxfer_len;
1967 			(*eptr)(mp, &seg, 1, 0);
1968 		}
1969 	} else {
1970 		struct bus_dma_segment *segs;
1971 
1972 		if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1973 			isp_prt(isp, ISP_LOGERR,
1974 			    "Physical segment pointers unsupported");
1975 			mp->error = EINVAL;
1976 		} else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1977 			isp_prt(isp, ISP_LOGERR,
1978 			    "Virtual segment addresses unsupported");
1979 			mp->error = EINVAL;
1980 		} else {
1981 			/* Just use the segments provided */
1982 			segs = (struct bus_dma_segment *) csio->data_ptr;
1983 			(*eptr)(mp, segs, csio->sglist_cnt, 0);
1984 		}
1985 	}
1986 	if (mp->error) {
1987 		int retval = CMD_COMPLETE;
1988 		if (mp->error == MUSHERR_NOQENTRIES) {
1989 			retval = CMD_EAGAIN;
1990 		} else if (mp->error == EFBIG) {
1991 			XS_SETERR(csio, CAM_REQ_TOO_BIG);
1992 		} else if (mp->error == EINVAL) {
1993 			XS_SETERR(csio, CAM_REQ_INVALID);
1994 		} else {
1995 			XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
1996 		}
1997 		return (retval);
1998 	}
1999 mbxsync:
2000 	switch (rq->req_header.rqs_entry_type) {
2001 	case RQSTYPE_REQUEST:
2002 		isp_put_request(isp, rq, qep);
2003 		break;
2004 	case RQSTYPE_CMDONLY:
2005 		isp_put_extended_request(isp, (ispextreq_t *)rq,
2006 		    (ispextreq_t *)qep);
2007 		break;
2008 	case RQSTYPE_T2RQS:
2009 		isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
2010 		break;
2011 	case RQSTYPE_A64:
2012 	case RQSTYPE_T3RQS:
2013 		isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep);
2014 		break;
2015 	}
2016 	return (CMD_QUEUED);
2017 }
2018 
2019 static void
2020 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle)
2021 {
2022 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2023 	bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)];
2024 	if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2025 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD);
2026 	} else {
2027 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE);
2028 	}
2029 	bus_dmamap_unload(pcs->dmat, *dp);
2030 }
2031 
2032 
2033 static void
2034 isp_pci_reset1(struct ispsoftc *isp)
2035 {
2036 	/* Make sure the BIOS is disabled */
2037 	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
2038 	/* and enable interrupts */
2039 	ENABLE_INTS(isp);
2040 }
2041 
2042 static void
2043 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg)
2044 {
2045 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2046 	if (msg)
2047 		printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
2048 	else
2049 		printf("%s:\n", device_get_nameunit(isp->isp_dev));
2050 	if (IS_SCSI(isp))
2051 		printf("    biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
2052 	else
2053 		printf("    biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
2054 	printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
2055 	    ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
2056 	printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
2057 
2058 
2059 	if (IS_SCSI(isp)) {
2060 		ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
2061 		printf("    cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
2062 			ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
2063 			ISP_READ(isp, CDMA_FIFO_STS));
2064 		printf("    ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
2065 			ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
2066 			ISP_READ(isp, DDMA_FIFO_STS));
2067 		printf("    sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
2068 			ISP_READ(isp, SXP_INTERRUPT),
2069 			ISP_READ(isp, SXP_GROSS_ERR),
2070 			ISP_READ(isp, SXP_PINS_CTRL));
2071 		ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
2072 	}
2073 	printf("    mbox regs: %x %x %x %x %x\n",
2074 	    ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
2075 	    ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
2076 	    ISP_READ(isp, OUTMAILBOX4));
2077 	printf("    PCI Status Command/Status=%x\n",
2078 	    pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));
2079 }
2080