xref: /freebsd/sys/dev/isp/isp_pci.c (revision 2b743a9e9ddc6736208dc8ca1ce06ce64ad20a19)
1 /*-
2  *
3  * Copyright (c) 1997-2006 by Matthew Jacob
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice immediately at the beginning of the file, without modification,
11  *    this list of conditions, and the following disclaimer.
12  * 2. The name of the author may not be used to endorse or promote products
13  *    derived from this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28 /*
29  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
30  * FreeBSD Version.
31  */
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/module.h>
39 #if __FreeBSD_version >= 700000
40 #include <sys/linker.h>
41 #include <sys/firmware.h>
42 #endif
43 #include <sys/bus.h>
44 #if __FreeBSD_version < 500000
45 #include <pci/pcireg.h>
46 #include <pci/pcivar.h>
47 #include <machine/bus_memio.h>
48 #include <machine/bus_pio.h>
49 #else
50 #include <sys/stdint.h>
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
53 #endif
54 #include <machine/bus.h>
55 #include <machine/resource.h>
56 #include <sys/rman.h>
57 #include <sys/malloc.h>
58 
59 #include <dev/isp/isp_freebsd.h>
60 
61 #if __FreeBSD_version < 500000
62 #define	BUS_PROBE_DEFAULT	0
63 #endif
64 
65 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int);
66 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t);
67 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int);
68 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t);
69 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int);
70 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t);
71 static int
72 isp_pci_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *);
73 static int
74 isp_pci_rd_isr_2300(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *);
75 static int
76 isp_pci_rd_isr_2400(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *);
77 static int isp_pci_mbxdma(ispsoftc_t *);
78 static int
79 isp_pci_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint32_t *, uint32_t);
80 static void
81 isp_pci_dmateardown(ispsoftc_t *, XS_T *, uint32_t);
82 
83 
84 static void isp_pci_reset0(ispsoftc_t *);
85 static void isp_pci_reset1(ispsoftc_t *);
86 static void isp_pci_dumpregs(ispsoftc_t *, const char *);
87 
88 static struct ispmdvec mdvec = {
89 	isp_pci_rd_isr,
90 	isp_pci_rd_reg,
91 	isp_pci_wr_reg,
92 	isp_pci_mbxdma,
93 	isp_pci_dmasetup,
94 	isp_pci_dmateardown,
95 	isp_pci_reset0,
96 	isp_pci_reset1,
97 	isp_pci_dumpregs,
98 	NULL,
99 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
100 };
101 
102 static struct ispmdvec mdvec_1080 = {
103 	isp_pci_rd_isr,
104 	isp_pci_rd_reg_1080,
105 	isp_pci_wr_reg_1080,
106 	isp_pci_mbxdma,
107 	isp_pci_dmasetup,
108 	isp_pci_dmateardown,
109 	isp_pci_reset0,
110 	isp_pci_reset1,
111 	isp_pci_dumpregs,
112 	NULL,
113 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
114 };
115 
116 static struct ispmdvec mdvec_12160 = {
117 	isp_pci_rd_isr,
118 	isp_pci_rd_reg_1080,
119 	isp_pci_wr_reg_1080,
120 	isp_pci_mbxdma,
121 	isp_pci_dmasetup,
122 	isp_pci_dmateardown,
123 	isp_pci_reset0,
124 	isp_pci_reset1,
125 	isp_pci_dumpregs,
126 	NULL,
127 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
128 };
129 
130 static struct ispmdvec mdvec_2100 = {
131 	isp_pci_rd_isr,
132 	isp_pci_rd_reg,
133 	isp_pci_wr_reg,
134 	isp_pci_mbxdma,
135 	isp_pci_dmasetup,
136 	isp_pci_dmateardown,
137 	isp_pci_reset0,
138 	isp_pci_reset1,
139 	isp_pci_dumpregs
140 };
141 
142 static struct ispmdvec mdvec_2200 = {
143 	isp_pci_rd_isr,
144 	isp_pci_rd_reg,
145 	isp_pci_wr_reg,
146 	isp_pci_mbxdma,
147 	isp_pci_dmasetup,
148 	isp_pci_dmateardown,
149 	isp_pci_reset0,
150 	isp_pci_reset1,
151 	isp_pci_dumpregs
152 };
153 
154 static struct ispmdvec mdvec_2300 = {
155 	isp_pci_rd_isr_2300,
156 	isp_pci_rd_reg,
157 	isp_pci_wr_reg,
158 	isp_pci_mbxdma,
159 	isp_pci_dmasetup,
160 	isp_pci_dmateardown,
161 	isp_pci_reset0,
162 	isp_pci_reset1,
163 	isp_pci_dumpregs
164 };
165 
166 static struct ispmdvec mdvec_2400 = {
167 	isp_pci_rd_isr_2400,
168 	isp_pci_rd_reg_2400,
169 	isp_pci_wr_reg_2400,
170 	isp_pci_mbxdma,
171 	isp_pci_dmasetup,
172 	isp_pci_dmateardown,
173 	isp_pci_reset0,
174 	isp_pci_reset1,
175 	NULL
176 };
177 
178 #ifndef	PCIM_CMD_INVEN
179 #define	PCIM_CMD_INVEN			0x10
180 #endif
181 #ifndef	PCIM_CMD_BUSMASTEREN
182 #define	PCIM_CMD_BUSMASTEREN		0x0004
183 #endif
184 #ifndef	PCIM_CMD_PERRESPEN
185 #define	PCIM_CMD_PERRESPEN		0x0040
186 #endif
187 #ifndef	PCIM_CMD_SEREN
188 #define	PCIM_CMD_SEREN			0x0100
189 #endif
190 #ifndef	PCIM_CMD_INTX_DISABLE
191 #define	PCIM_CMD_INTX_DISABLE		0x0400
192 #endif
193 
194 #ifndef	PCIR_COMMAND
195 #define	PCIR_COMMAND			0x04
196 #endif
197 
198 #ifndef	PCIR_CACHELNSZ
199 #define	PCIR_CACHELNSZ			0x0c
200 #endif
201 
202 #ifndef	PCIR_LATTIMER
203 #define	PCIR_LATTIMER			0x0d
204 #endif
205 
206 #ifndef	PCIR_ROMADDR
207 #define	PCIR_ROMADDR			0x30
208 #endif
209 
210 #ifndef	PCI_VENDOR_QLOGIC
211 #define	PCI_VENDOR_QLOGIC		0x1077
212 #endif
213 
214 #ifndef	PCI_PRODUCT_QLOGIC_ISP1020
215 #define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
216 #endif
217 
218 #ifndef	PCI_PRODUCT_QLOGIC_ISP1080
219 #define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
220 #endif
221 
222 #ifndef	PCI_PRODUCT_QLOGIC_ISP10160
223 #define	PCI_PRODUCT_QLOGIC_ISP10160	0x1016
224 #endif
225 
226 #ifndef	PCI_PRODUCT_QLOGIC_ISP12160
227 #define	PCI_PRODUCT_QLOGIC_ISP12160	0x1216
228 #endif
229 
230 #ifndef	PCI_PRODUCT_QLOGIC_ISP1240
231 #define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
232 #endif
233 
234 #ifndef	PCI_PRODUCT_QLOGIC_ISP1280
235 #define	PCI_PRODUCT_QLOGIC_ISP1280	0x1280
236 #endif
237 
238 #ifndef	PCI_PRODUCT_QLOGIC_ISP2100
239 #define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
240 #endif
241 
242 #ifndef	PCI_PRODUCT_QLOGIC_ISP2200
243 #define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
244 #endif
245 
246 #ifndef	PCI_PRODUCT_QLOGIC_ISP2300
247 #define	PCI_PRODUCT_QLOGIC_ISP2300	0x2300
248 #endif
249 
250 #ifndef	PCI_PRODUCT_QLOGIC_ISP2312
251 #define	PCI_PRODUCT_QLOGIC_ISP2312	0x2312
252 #endif
253 
254 #ifndef	PCI_PRODUCT_QLOGIC_ISP2322
255 #define	PCI_PRODUCT_QLOGIC_ISP2322	0x2322
256 #endif
257 
258 #ifndef	PCI_PRODUCT_QLOGIC_ISP2422
259 #define	PCI_PRODUCT_QLOGIC_ISP2422	0x2422
260 #endif
261 
262 #ifndef	PCI_PRODUCT_QLOGIC_ISP2432
263 #define	PCI_PRODUCT_QLOGIC_ISP2432	0x2432
264 #endif
265 
266 #ifndef	PCI_PRODUCT_QLOGIC_ISP6312
267 #define	PCI_PRODUCT_QLOGIC_ISP6312	0x6312
268 #endif
269 
270 #ifndef	PCI_PRODUCT_QLOGIC_ISP6322
271 #define	PCI_PRODUCT_QLOGIC_ISP6322	0x6322
272 #endif
273 
274 
275 #define	PCI_QLOGIC_ISP1020	\
276 	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
277 
278 #define	PCI_QLOGIC_ISP1080	\
279 	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
280 
281 #define	PCI_QLOGIC_ISP10160	\
282 	((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
283 
284 #define	PCI_QLOGIC_ISP12160	\
285 	((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
286 
287 #define	PCI_QLOGIC_ISP1240	\
288 	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
289 
290 #define	PCI_QLOGIC_ISP1280	\
291 	((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
292 
293 #define	PCI_QLOGIC_ISP2100	\
294 	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
295 
296 #define	PCI_QLOGIC_ISP2200	\
297 	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
298 
299 #define	PCI_QLOGIC_ISP2300	\
300 	((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
301 
302 #define	PCI_QLOGIC_ISP2312	\
303 	((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
304 
305 #define	PCI_QLOGIC_ISP2322	\
306 	((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC)
307 
308 #define	PCI_QLOGIC_ISP2422	\
309 	((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC)
310 
311 #define	PCI_QLOGIC_ISP2432	\
312 	((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC)
313 
314 #define	PCI_QLOGIC_ISP6312	\
315 	((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC)
316 
317 #define	PCI_QLOGIC_ISP6322	\
318 	((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC)
319 
320 /*
321  * Odd case for some AMI raid cards... We need to *not* attach to this.
322  */
323 #define	AMI_RAID_SUBVENDOR_ID	0x101e
324 
325 #define	IO_MAP_REG	0x10
326 #define	MEM_MAP_REG	0x14
327 
328 #define	PCI_DFLT_LTNCY	0x40
329 #define	PCI_DFLT_LNSZ	0x10
330 
331 static int isp_pci_probe (device_t);
332 static int isp_pci_attach (device_t);
333 static int isp_pci_detach (device_t);
334 
335 
336 struct isp_pcisoftc {
337 	ispsoftc_t			pci_isp;
338 	device_t			pci_dev;
339 	struct resource *		pci_reg;
340 	bus_space_tag_t			pci_st;
341 	bus_space_handle_t		pci_sh;
342 	void *				ih;
343 	int16_t				pci_poff[_NREG_BLKS];
344 	bus_dma_tag_t			dmat;
345 	bus_dmamap_t			*dmaps;
346 };
347 
348 
349 static device_method_t isp_pci_methods[] = {
350 	/* Device interface */
351 	DEVMETHOD(device_probe,		isp_pci_probe),
352 	DEVMETHOD(device_attach,	isp_pci_attach),
353 	DEVMETHOD(device_detach,	isp_pci_detach),
354 	{ 0, 0 }
355 };
356 static void isp_pci_intr(void *);
357 
358 static driver_t isp_pci_driver = {
359 	"isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
360 };
361 static devclass_t isp_devclass;
362 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
363 #if __FreeBSD_version < 700000
364 extern ispfwfunc *isp_get_firmware_p;
365 #endif
366 
367 static int
368 isp_pci_probe(device_t dev)
369 {
370         switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
371 	case PCI_QLOGIC_ISP1020:
372 		device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
373 		break;
374 	case PCI_QLOGIC_ISP1080:
375 		device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
376 		break;
377 	case PCI_QLOGIC_ISP1240:
378 		device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
379 		break;
380 	case PCI_QLOGIC_ISP1280:
381 		device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
382 		break;
383 	case PCI_QLOGIC_ISP10160:
384 		device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
385 		break;
386 	case PCI_QLOGIC_ISP12160:
387 		if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
388 			return (ENXIO);
389 		}
390 		device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
391 		break;
392 	case PCI_QLOGIC_ISP2100:
393 		device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
394 		break;
395 	case PCI_QLOGIC_ISP2200:
396 		device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
397 		break;
398 	case PCI_QLOGIC_ISP2300:
399 		device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
400 		break;
401 	case PCI_QLOGIC_ISP2312:
402 		device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
403 		break;
404 	case PCI_QLOGIC_ISP2322:
405 		device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter");
406 		break;
407 	case PCI_QLOGIC_ISP2422:
408 		device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter");
409 		break;
410 	case PCI_QLOGIC_ISP2432:
411 		device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter");
412 		break;
413 	case PCI_QLOGIC_ISP6312:
414 		device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter");
415 		break;
416 	case PCI_QLOGIC_ISP6322:
417 		device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter");
418 		break;
419 	default:
420 		return (ENXIO);
421 	}
422 	if (isp_announced == 0 && bootverbose) {
423 		printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
424 		    "Core Version %d.%d\n",
425 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
426 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
427 		isp_announced++;
428 	}
429 	/*
430 	 * XXXX: Here is where we might load the f/w module
431 	 * XXXX: (or increase a reference count to it).
432 	 */
433 	return (BUS_PROBE_DEFAULT);
434 }
435 
436 #if __FreeBSD_version < 500000
437 static void
438 isp_get_generic_options(device_t dev, ispsoftc_t *isp)
439 {
440 	uint64_t wwn;
441 	int bitmap, unit;
442 
443 	unit = device_get_unit(dev);
444 	if (getenv_int("isp_disable", &bitmap)) {
445 		if (bitmap & (1 << unit)) {
446 			isp->isp_osinfo.disabled = 1;
447 			return;
448 		}
449 	}
450 	if (getenv_int("isp_no_fwload", &bitmap)) {
451 		if (bitmap & (1 << unit))
452 			isp->isp_confopts |= ISP_CFG_NORELOAD;
453 	}
454 	if (getenv_int("isp_fwload", &bitmap)) {
455 		if (bitmap & (1 << unit))
456 			isp->isp_confopts &= ~ISP_CFG_NORELOAD;
457 	}
458 	if (getenv_int("isp_no_nvram", &bitmap)) {
459 		if (bitmap & (1 << unit))
460 			isp->isp_confopts |= ISP_CFG_NONVRAM;
461 	}
462 	if (getenv_int("isp_nvram", &bitmap)) {
463 		if (bitmap & (1 << unit))
464 			isp->isp_confopts &= ~ISP_CFG_NONVRAM;
465 	}
466 
467 	bitmap = 0;
468 	(void) getenv_int("isp_debug", &bitmap);
469 	if (bitmap) {
470 		isp->isp_dblev = bitmap;
471 	} else {
472 		isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
473 	}
474 	if (bootverbose) {
475 		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
476 	}
477 
478 	bitmap = 0;
479 	if (getenv_int("role", &bitmap)) {
480 		isp->isp_role = bitmap;
481 	} else {
482 		isp->isp_role = ISP_DEFAULT_ROLES;
483 	}
484 
485 }
486 
487 static void
488 isp_get_pci_options(device_t dev, int *m1, int *m2)
489 {
490 	int bitmap;
491 	int unit = device_get_unit(dev);
492 
493 	*m1 = PCIM_CMD_MEMEN;
494 	*m2 = PCIM_CMD_PORTEN;
495 	if (getenv_int("isp_mem_map", &bitmap)) {
496 		if (bitmap & (1 << unit)) {
497 			*m1 = PCIM_CMD_MEMEN;
498 			*m2 = PCIM_CMD_PORTEN;
499 		}
500 	}
501 	bitmap = 0;
502 	if (getenv_int("isp_io_map", &bitmap)) {
503 		if (bitmap & (1 << unit)) {
504 			*m1 = PCIM_CMD_PORTEN;
505 			*m2 = PCIM_CMD_MEMEN;
506 		}
507 	}
508 }
509 
510 static void
511 isp_get_specific_options(device_t dev, ispsoftc_t *isp)
512 {
513 
514 	callout_handle_init(&isp->isp_osinfo.ldt);
515 	callout_handle_init(&isp->isp_osinfo.gdt);
516 
517 	if (IS_SCSI(isp)) {
518 		return;
519 	}
520 
521 	if (getenv_int("isp_fcduplex", &bitmap)) {
522 		if (bitmap & (1 << unit))
523 			isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
524 	}
525 	if (getenv_int("isp_no_fcduplex", &bitmap)) {
526 		if (bitmap & (1 << unit))
527 			isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
528 	}
529 	if (getenv_int("isp_nport", &bitmap)) {
530 		if (bitmap & (1 << unit))
531 			isp->isp_confopts |= ISP_CFG_NPORT;
532 	}
533 
534 	/*
535 	 * Because the resource_*_value functions can neither return
536 	 * 64 bit integer values, nor can they be directly coerced
537 	 * to interpret the right hand side of the assignment as
538 	 * you want them to interpret it, we have to force WWN
539 	 * hint replacement to specify WWN strings with a leading
540 	 * 'w' (e..g w50000000aaaa0001). Sigh.
541 	 */
542 	if (getenv_quad("isp_portwwn", &wwn)) {
543 		isp->isp_osinfo.default_port_wwn = wwn;
544 		isp->isp_confopts |= ISP_CFG_OWNWWPN;
545 	}
546 	if (isp->isp_osinfo.default_port_wwn == 0) {
547 		isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
548 	}
549 
550 	if (getenv_quad("isp_nodewwn", &wwn)) {
551 		isp->isp_osinfo.default_node_wwn = wwn;
552 		isp->isp_confopts |= ISP_CFG_OWNWWNN;
553 	}
554 	if (isp->isp_osinfo.default_node_wwn == 0) {
555 		isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
556 	}
557 
558 	bitmap = 0;
559 	(void) getenv_int("isp_fabric_hysteresis", &bitmap);
560 	if (bitmap >= 0 && bitmap < 256) {
561 		isp->isp_osinfo.hysteresis = bitmap;
562 	} else {
563 		isp->isp_osinfo.hysteresis = isp_fabric_hysteresis;
564 	}
565 
566 	bitmap = 0;
567 	(void) getenv_int("isp_loop_down_limit", &bitmap);
568 	if (bitmap >= 0 && bitmap < 0xffff) {
569 		isp->isp_osinfo.loop_down_limit = bitmap;
570 	} else {
571 		isp->isp_osinfo.loop_down_limit = isp_loop_down_limit;
572 	}
573 
574 	bitmap = 0;
575 	(void) getenv_int("isp_gone_device_time", &bitmap);
576 	if (bitmap >= 0 && bitmap < 0xffff) {
577 		isp->isp_osinfo.gone_device_time = bitmap;
578 	} else {
579 		isp->isp_osinfo.gone_device_time = isp_gone_device_time;
580 	}
581 #ifdef	ISP_FW_CRASH_DUMP
582 	bitmap = 0;
583 	if (getenv_int("isp_fw_dump_enable", &bitmap)) {
584 		if (bitmap & (1 << unit) {
585 			size_t amt = 0;
586 			if (IS_2200(isp)) {
587 				amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
588 			} else if (IS_23XX(isp)) {
589 				amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
590 			}
591 			if (amt) {
592 				FCPARAM(isp)->isp_dump_data =
593 				    malloc(amt, M_DEVBUF, M_WAITOK);
594 				memset(FCPARAM(isp)->isp_dump_data, 0, amt);
595 			} else {
596 				device_printf(dev,
597 				    "f/w crash dumps not supported for card\n");
598 			}
599 		}
600 	}
601 #endif
602 }
603 #else
604 static void
605 isp_get_generic_options(device_t dev, ispsoftc_t *isp)
606 {
607 	int tval;
608 
609 	/*
610 	 * Figure out if we're supposed to skip this one.
611 	 */
612 	tval = 0;
613 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
614 	    "disable", &tval) == 0 && tval) {
615 		device_printf(dev, "disabled at user request\n");
616 		isp->isp_osinfo.disabled = 1;
617 		return;
618 	}
619 
620 	tval = -1;
621 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
622 	    "role", &tval) == 0 && tval != -1) {
623 		tval &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET);
624 		isp->isp_role = tval;
625 		device_printf(dev, "setting role to 0x%x\n", isp->isp_role);
626 	} else {
627 #ifdef	ISP_TARGET_MODE
628 		isp->isp_role = ISP_ROLE_TARGET;
629 #else
630 		isp->isp_role = ISP_DEFAULT_ROLES;
631 #endif
632 	}
633 
634 	tval = 0;
635         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
636             "fwload_disable", &tval) == 0 && tval != 0) {
637 		isp->isp_confopts |= ISP_CFG_NORELOAD;
638 	}
639 	tval = 0;
640         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
641             "ignore_nvram", &tval) == 0 && tval != 0) {
642 		isp->isp_confopts |= ISP_CFG_NONVRAM;
643 	}
644 
645 	tval = 0;
646         (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
647             "debug", &tval);
648 	if (tval) {
649 		isp->isp_dblev = tval;
650 	} else {
651 		isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
652 	}
653 	if (bootverbose) {
654 		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
655 	}
656 
657 }
658 
659 static void
660 isp_get_pci_options(device_t dev, int *m1, int *m2)
661 {
662 	int tval;
663 	/*
664 	 * Which we should try first - memory mapping or i/o mapping?
665 	 *
666 	 * We used to try memory first followed by i/o on alpha, otherwise
667 	 * the reverse, but we should just try memory first all the time now.
668 	 */
669 	*m1 = PCIM_CMD_MEMEN;
670 	*m2 = PCIM_CMD_PORTEN;
671 
672 	tval = 0;
673         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
674             "prefer_iomap", &tval) == 0 && tval != 0) {
675 		*m1 = PCIM_CMD_PORTEN;
676 		*m2 = PCIM_CMD_MEMEN;
677 	}
678 	tval = 0;
679         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
680             "prefer_memmap", &tval) == 0 && tval != 0) {
681 		*m1 = PCIM_CMD_MEMEN;
682 		*m2 = PCIM_CMD_PORTEN;
683 	}
684 }
685 
686 static void
687 isp_get_specific_options(device_t dev, ispsoftc_t *isp)
688 {
689 	const char *sptr;
690 	int tval;
691 
692 	isp->isp_osinfo.default_id = -1;
693 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
694             "iid", &tval) == 0) {
695 		isp->isp_osinfo.default_id = tval;
696 		isp->isp_confopts |= ISP_CFG_OWNLOOPID;
697 	}
698 	if (isp->isp_osinfo.default_id == -1) {
699 		if (IS_FC(isp)) {
700 			isp->isp_osinfo.default_id = 109;
701 		} else {
702 			isp->isp_osinfo.default_id = 7;
703 		}
704 	}
705 
706 	callout_handle_init(&isp->isp_osinfo.ldt);
707 	callout_handle_init(&isp->isp_osinfo.gdt);
708 
709 	if (IS_SCSI(isp)) {
710 		return;
711 	}
712 
713 	tval = 0;
714         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
715             "fullduplex", &tval) == 0 && tval != 0) {
716 		isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
717 	}
718 #ifdef	ISP_FW_CRASH_DUMP
719 	tval = 0;
720         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
721             "fw_dump_enable", &tval) == 0 && tval != 0) {
722 		size_t amt = 0;
723 		if (IS_2200(isp)) {
724 			amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
725 		} else if (IS_23XX(isp)) {
726 			amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
727 		}
728 		if (amt) {
729 			FCPARAM(isp)->isp_dump_data =
730 			    malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO);
731 		} else {
732 			device_printf(dev,
733 			    "f/w crash dumps not supported for this model\n");
734 		}
735 	}
736 #endif
737 	sptr = 0;
738         if (resource_string_value(device_get_name(dev), device_get_unit(dev),
739             "topology", (const char **) &sptr) == 0 && sptr != 0) {
740 		if (strcmp(sptr, "lport") == 0) {
741 			isp->isp_confopts |= ISP_CFG_LPORT;
742 		} else if (strcmp(sptr, "nport") == 0) {
743 			isp->isp_confopts |= ISP_CFG_NPORT;
744 		} else if (strcmp(sptr, "lport-only") == 0) {
745 			isp->isp_confopts |= ISP_CFG_LPORT_ONLY;
746 		} else if (strcmp(sptr, "nport-only") == 0) {
747 			isp->isp_confopts |= ISP_CFG_NPORT_ONLY;
748 		}
749 	}
750 
751 	/*
752 	 * Because the resource_*_value functions can neither return
753 	 * 64 bit integer values, nor can they be directly coerced
754 	 * to interpret the right hand side of the assignment as
755 	 * you want them to interpret it, we have to force WWN
756 	 * hint replacement to specify WWN strings with a leading
757 	 * 'w' (e..g w50000000aaaa0001). Sigh.
758 	 */
759 	sptr = 0;
760 	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
761             "portwwn", (const char **) &sptr);
762 	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
763 		char *eptr = 0;
764 		isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16);
765 		if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) {
766 			device_printf(dev, "mangled portwwn hint '%s'\n", sptr);
767 			isp->isp_osinfo.default_port_wwn = 0;
768 		} else {
769 			isp->isp_confopts |= ISP_CFG_OWNWWPN;
770 		}
771 	}
772 	if (isp->isp_osinfo.default_port_wwn == 0) {
773 		isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
774 	}
775 
776 	sptr = 0;
777 	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
778             "nodewwn", (const char **) &sptr);
779 	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
780 		char *eptr = 0;
781 		isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16);
782 		if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) {
783 			device_printf(dev, "mangled nodewwn hint '%s'\n", sptr);
784 			isp->isp_osinfo.default_node_wwn = 0;
785 		} else {
786 			isp->isp_confopts |= ISP_CFG_OWNWWNN;
787 		}
788 	}
789 	if (isp->isp_osinfo.default_node_wwn == 0) {
790 		isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
791 	}
792 
793 
794 	tval = 0;
795 	(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
796 	    "hysteresis", &tval);
797 	if (tval >= 0 && tval < 256) {
798 		isp->isp_osinfo.hysteresis = tval;
799 	} else {
800 		isp->isp_osinfo.hysteresis = isp_fabric_hysteresis;
801 	}
802 
803 	tval = -1;
804 	(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
805 	    "loop_down_limit", &tval);
806 	if (tval >= 0 && tval < 0xffff) {
807 		isp->isp_osinfo.loop_down_limit = tval;
808 	} else {
809 		isp->isp_osinfo.loop_down_limit = isp_loop_down_limit;
810 	}
811 
812 	tval = -1;
813 	(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
814 	    "gone_device_time", &tval);
815 	if (tval >= 0 && tval < 0xffff) {
816 		isp->isp_osinfo.gone_device_time = tval;
817 	} else {
818 		isp->isp_osinfo.gone_device_time = isp_gone_device_time;
819 	}
820 }
821 #endif
822 
823 static int
824 isp_pci_attach(device_t dev)
825 {
826 	struct resource *regs, *irq;
827 	int rtp, rgd, iqd, m1, m2;
828 	uint32_t data, cmd, linesz, psize, basetype;
829 	struct isp_pcisoftc *pcs;
830 	ispsoftc_t *isp = NULL;
831 	struct ispmdvec *mdvp;
832 #if __FreeBSD_version >= 500000
833 	int locksetup = 0;
834 #endif
835 
836 	pcs = device_get_softc(dev);
837 	if (pcs == NULL) {
838 		device_printf(dev, "cannot get softc\n");
839 		return (ENOMEM);
840 	}
841 	memset(pcs, 0, sizeof (*pcs));
842 	pcs->pci_dev = dev;
843 	isp = &pcs->pci_isp;
844 
845 	/*
846 	 * Get Generic Options
847 	 */
848 	isp_get_generic_options(dev, isp);
849 
850 	/*
851 	 * Check to see if options have us disabled
852 	 */
853 	if (isp->isp_osinfo.disabled) {
854 		/*
855 		 * But return zero to preserve unit numbering
856 		 */
857 		return (0);
858 	}
859 
860 	/*
861 	 * Get PCI options- which in this case are just mapping preferences.
862 	 */
863 	isp_get_pci_options(dev, &m1, &m2);
864 
865 	linesz = PCI_DFLT_LNSZ;
866 	irq = regs = NULL;
867 	rgd = rtp = iqd = 0;
868 
869 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
870 	if (cmd & m1) {
871 		rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
872 		rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
873 		regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
874 	}
875 	if (regs == NULL && (cmd & m2)) {
876 		rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
877 		rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
878 		regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
879 	}
880 	if (regs == NULL) {
881 		device_printf(dev, "unable to map any ports\n");
882 		goto bad;
883 	}
884 	if (bootverbose) {
885 		device_printf(dev, "using %s space register mapping\n",
886 		    (rgd == IO_MAP_REG)? "I/O" : "Memory");
887 	}
888 	pcs->pci_dev = dev;
889 	pcs->pci_reg = regs;
890 	pcs->pci_st = rman_get_bustag(regs);
891 	pcs->pci_sh = rman_get_bushandle(regs);
892 
893 	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
894 	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
895 	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
896 	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
897 	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
898 	mdvp = &mdvec;
899 	basetype = ISP_HA_SCSI_UNKNOWN;
900 	psize = sizeof (sdparam);
901 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
902 		mdvp = &mdvec;
903 		basetype = ISP_HA_SCSI_UNKNOWN;
904 		psize = sizeof (sdparam);
905 	}
906 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
907 		mdvp = &mdvec_1080;
908 		basetype = ISP_HA_SCSI_1080;
909 		psize = sizeof (sdparam);
910 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
911 		    ISP1080_DMA_REGS_OFF;
912 	}
913 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
914 		mdvp = &mdvec_1080;
915 		basetype = ISP_HA_SCSI_1240;
916 		psize = 2 * sizeof (sdparam);
917 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
918 		    ISP1080_DMA_REGS_OFF;
919 	}
920 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
921 		mdvp = &mdvec_1080;
922 		basetype = ISP_HA_SCSI_1280;
923 		psize = 2 * sizeof (sdparam);
924 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
925 		    ISP1080_DMA_REGS_OFF;
926 	}
927 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) {
928 		mdvp = &mdvec_12160;
929 		basetype = ISP_HA_SCSI_10160;
930 		psize = sizeof (sdparam);
931 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
932 		    ISP1080_DMA_REGS_OFF;
933 	}
934 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
935 		mdvp = &mdvec_12160;
936 		basetype = ISP_HA_SCSI_12160;
937 		psize = 2 * sizeof (sdparam);
938 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
939 		    ISP1080_DMA_REGS_OFF;
940 	}
941 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
942 		mdvp = &mdvec_2100;
943 		basetype = ISP_HA_FC_2100;
944 		psize = sizeof (fcparam);
945 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
946 		    PCI_MBOX_REGS2100_OFF;
947 		if (pci_get_revid(dev) < 3) {
948 			/*
949 			 * XXX: Need to get the actual revision
950 			 * XXX: number of the 2100 FB. At any rate,
951 			 * XXX: lower cache line size for early revision
952 			 * XXX; boards.
953 			 */
954 			linesz = 1;
955 		}
956 	}
957 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
958 		mdvp = &mdvec_2200;
959 		basetype = ISP_HA_FC_2200;
960 		psize = sizeof (fcparam);
961 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
962 		    PCI_MBOX_REGS2100_OFF;
963 	}
964 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) {
965 		mdvp = &mdvec_2300;
966 		basetype = ISP_HA_FC_2300;
967 		psize = sizeof (fcparam);
968 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
969 		    PCI_MBOX_REGS2300_OFF;
970 	}
971 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 ||
972 	    pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
973 		mdvp = &mdvec_2300;
974 		basetype = ISP_HA_FC_2312;
975 		psize = sizeof (fcparam);
976 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
977 		    PCI_MBOX_REGS2300_OFF;
978 	}
979 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322 ||
980 	    pci_get_devid(dev) == PCI_QLOGIC_ISP6322) {
981 		mdvp = &mdvec_2300;
982 		basetype = ISP_HA_FC_2322;
983 		psize = sizeof (fcparam);
984 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
985 		    PCI_MBOX_REGS2300_OFF;
986 	}
987 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422 ||
988 	    pci_get_devid(dev) == PCI_QLOGIC_ISP2432) {
989 		mdvp = &mdvec_2400;
990 		basetype = ISP_HA_FC_2400;
991 		psize = sizeof (fcparam);
992 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
993 		    PCI_MBOX_REGS2400_OFF;
994 	}
995 	isp = &pcs->pci_isp;
996 	isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO);
997 	if (isp->isp_param == NULL) {
998 		device_printf(dev, "cannot allocate parameter data\n");
999 		goto bad;
1000 	}
1001 	isp->isp_mdvec = mdvp;
1002 	isp->isp_type = basetype;
1003 	isp->isp_revision = pci_get_revid(dev);
1004 	isp->isp_dev = dev;
1005 
1006 	/*
1007 	 * Now that we know who we are (roughly) get/set specific options
1008 	 */
1009 	isp_get_specific_options(dev, isp);
1010 
1011 #if __FreeBSD_version >= 700000
1012 	/*
1013 	 * Try and find firmware for this device.
1014 	 */
1015 	{
1016 		char fwname[32];
1017 		unsigned int did = pci_get_device(dev);
1018 
1019 		/*
1020 		 * Map a few pci ids to fw names
1021 		 */
1022 		switch (did) {
1023 		case PCI_PRODUCT_QLOGIC_ISP1020:
1024 			did = 0x1040;
1025 			break;
1026 		case PCI_PRODUCT_QLOGIC_ISP1240:
1027 			did = 0x1080;
1028 			break;
1029 		case PCI_PRODUCT_QLOGIC_ISP10160:
1030 		case PCI_PRODUCT_QLOGIC_ISP12160:
1031 			did = 0x12160;
1032 			break;
1033 		case PCI_PRODUCT_QLOGIC_ISP6312:
1034 		case PCI_PRODUCT_QLOGIC_ISP2312:
1035 			did = 0x2300;
1036 			break;
1037 		case PCI_PRODUCT_QLOGIC_ISP6322:
1038 			did = 0x2322;
1039 			break;
1040 		case PCI_PRODUCT_QLOGIC_ISP2422:
1041 		case PCI_PRODUCT_QLOGIC_ISP2432:
1042 			did = 0x2400;
1043 			break;
1044 		default:
1045 			break;
1046 		}
1047 
1048 		isp->isp_osinfo.fw = NULL;
1049 		if (isp->isp_role & ISP_ROLE_TARGET) {
1050 			snprintf(fwname, sizeof (fwname), "isp_%04x_it", did);
1051 			isp->isp_osinfo.fw = firmware_get(fwname);
1052 		}
1053 		if (isp->isp_osinfo.fw == NULL) {
1054 			snprintf(fwname, sizeof (fwname), "isp_%04x", did);
1055 			isp->isp_osinfo.fw = firmware_get(fwname);
1056 		}
1057 		if (isp->isp_osinfo.fw != NULL) {
1058 			union {
1059 				const void *fred;
1060 				uint16_t *bob;
1061 			} u;
1062 			u.fred = isp->isp_osinfo.fw->data;
1063 			isp->isp_mdvec->dv_ispfw = u.bob;
1064 		}
1065 	}
1066 #else
1067 	if (isp_get_firmware_p) {
1068 		int device = (int) pci_get_device(dev);
1069 #ifdef	ISP_TARGET_MODE
1070 		(*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
1071 #else
1072 		(*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
1073 #endif
1074 	}
1075 #endif
1076 
1077 	/*
1078 	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
1079 	 * are set.
1080 	 */
1081 	cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
1082 		PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
1083 
1084 	if (IS_2300(isp)) {	/* per QLogic errata */
1085 		cmd &= ~PCIM_CMD_INVEN;
1086 	}
1087 
1088 	if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
1089 		cmd &= ~PCIM_CMD_INTX_DISABLE;
1090 	}
1091 
1092 #ifdef	WE_KNEW_WHAT_WE_WERE_DOING
1093 	if (IS_24XX(isp)) {
1094 		int reg;
1095 
1096 		cmd &= ~PCIM_CMD_INTX_DISABLE;
1097 
1098 		/*
1099 		 * Is this a PCI-X card? If so, set max read byte count.
1100 		 */
1101 		if (pci_find_extcap(dev, PCIY_PCIX, &reg) == 0) {
1102 			uint16_t pxcmd;
1103 			reg += 2;
1104 
1105 			pxcmd = pci_read_config(dev, reg, 2);
1106 			pxcmd &= ~0xc;
1107 			pxcmd |= 0x8;
1108 			pci_write_config(dev, reg, 2, pxcmd);
1109 		}
1110 
1111 		/*
1112 		 * Is this a PCI Express card? If so, set max read byte count.
1113 		 */
1114 		if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
1115 			uint16_t pectl;
1116 
1117 			reg += 0x8;
1118 			pectl = pci_read_config(dev, reg, 2);
1119 			pectl &= ~0x7000;
1120 			pectl |= 0x4000;
1121 			pci_write_config(dev, reg, 2, pectl);
1122 		}
1123 	}
1124 #else
1125 	if (IS_24XX(isp)) {
1126 		cmd &= ~PCIM_CMD_INTX_DISABLE;
1127 	}
1128 #endif
1129 
1130 	pci_write_config(dev, PCIR_COMMAND, cmd, 2);
1131 
1132 	/*
1133 	 * Make sure the Cache Line Size register is set sensibly.
1134 	 */
1135 	data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
1136 	if (data != linesz) {
1137 		data = PCI_DFLT_LNSZ;
1138 		isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
1139 		pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
1140 	}
1141 
1142 	/*
1143 	 * Make sure the Latency Timer is sane.
1144 	 */
1145 	data = pci_read_config(dev, PCIR_LATTIMER, 1);
1146 	if (data < PCI_DFLT_LTNCY) {
1147 		data = PCI_DFLT_LTNCY;
1148 		isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
1149 		pci_write_config(dev, PCIR_LATTIMER, data, 1);
1150 	}
1151 
1152 	/*
1153 	 * Make sure we've disabled the ROM.
1154 	 */
1155 	data = pci_read_config(dev, PCIR_ROMADDR, 4);
1156 	data &= ~1;
1157 	pci_write_config(dev, PCIR_ROMADDR, data, 4);
1158 
1159 	iqd = 0;
1160 	irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd,
1161 	    RF_ACTIVE | RF_SHAREABLE);
1162 	if (irq == NULL) {
1163 		device_printf(dev, "could not allocate interrupt\n");
1164 		goto bad;
1165 	}
1166 
1167 #if __FreeBSD_version >= 500000
1168 	/* Make sure the lock is set up. */
1169 	mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF);
1170 	locksetup++;
1171 #endif
1172 
1173 	if (isp_setup_intr(dev, irq, ISP_IFLAGS, NULL, isp_pci_intr, isp,
1174 	    &pcs->ih)) {
1175 		device_printf(dev, "could not setup interrupt\n");
1176 		goto bad;
1177 	}
1178 
1179 	/*
1180 	 * Last minute checks...
1181 	 */
1182 	if (IS_23XX(isp) || IS_24XX(isp)) {
1183 		isp->isp_port = pci_get_function(dev);
1184 	}
1185 
1186 	if (IS_23XX(isp)) {
1187 		/*
1188 		 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command.
1189 		 */
1190 		isp->isp_touched = 1;
1191 	}
1192 
1193 	/*
1194 	 * Make sure we're in reset state.
1195 	 */
1196 	ISP_LOCK(isp);
1197 	isp_reset(isp);
1198 	if (isp->isp_state != ISP_RESETSTATE) {
1199 		ISP_UNLOCK(isp);
1200 		goto bad;
1201 	}
1202 	isp_init(isp);
1203 	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) {
1204 		isp_uninit(isp);
1205 		ISP_UNLOCK(isp);
1206 		goto bad;
1207 	}
1208 	isp_attach(isp);
1209 	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) {
1210 		isp_uninit(isp);
1211 		ISP_UNLOCK(isp);
1212 		goto bad;
1213 	}
1214 	/*
1215 	 * XXXX: Here is where we might unload the f/w module
1216 	 * XXXX: (or decrease the reference count to it).
1217 	 */
1218 	ISP_UNLOCK(isp);
1219 
1220 	return (0);
1221 
1222 bad:
1223 
1224 	if (pcs && pcs->ih) {
1225 		(void) bus_teardown_intr(dev, irq, pcs->ih);
1226 	}
1227 
1228 #if __FreeBSD_version >= 500000
1229 	if (locksetup && isp) {
1230 		mtx_destroy(&isp->isp_osinfo.lock);
1231 	}
1232 #endif
1233 
1234 	if (irq) {
1235 		(void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
1236 	}
1237 
1238 
1239 	if (regs) {
1240 		(void) bus_release_resource(dev, rtp, rgd, regs);
1241 	}
1242 
1243 	if (pcs) {
1244 		if (pcs->pci_isp.isp_param) {
1245 #ifdef	ISP_FW_CRASH_DUMP
1246 			if (IS_FC(isp) && FCPARAM(isp)->isp_dump_data) {
1247 				free(FCPARAM(isp)->isp_dump_data, M_DEVBUF);
1248 			}
1249 #endif
1250 			free(pcs->pci_isp.isp_param, M_DEVBUF);
1251 		}
1252 	}
1253 
1254 	/*
1255 	 * XXXX: Here is where we might unload the f/w module
1256 	 * XXXX: (or decrease the reference count to it).
1257 	 */
1258 	return (ENXIO);
1259 }
1260 
1261 static int
1262 isp_pci_detach(device_t dev)
1263 {
1264 	struct isp_pcisoftc *pcs;
1265 	ispsoftc_t *isp;
1266 
1267 	pcs = device_get_softc(dev);
1268 	if (pcs == NULL) {
1269 		return (ENXIO);
1270 	}
1271 	isp = (ispsoftc_t *) pcs;
1272 	ISP_DISABLE_INTS(isp);
1273 	return (0);
1274 }
1275 
1276 static void
1277 isp_pci_intr(void *arg)
1278 {
1279 	ispsoftc_t *isp = arg;
1280 	uint32_t isr;
1281 	uint16_t sema, mbox;
1282 
1283 	ISP_LOCK(isp);
1284 	isp->isp_intcnt++;
1285 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
1286 		isp->isp_intbogus++;
1287 	} else {
1288 		isp_intr(isp, isr, sema, mbox);
1289 	}
1290 	ISP_UNLOCK(isp);
1291 }
1292 
1293 
1294 #define	IspVirt2Off(a, x)	\
1295 	(((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
1296 	_BLK_REG_SHFT] + ((x) & 0xfff))
1297 
1298 #define	BXR2(pcs, off)		\
1299 	bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
1300 #define	BXW2(pcs, off, v)	\
1301 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
1302 #define	BXR4(pcs, off)		\
1303 	bus_space_read_4(pcs->pci_st, pcs->pci_sh, off)
1304 #define	BXW4(pcs, off, v)	\
1305 	bus_space_write_4(pcs->pci_st, pcs->pci_sh, off, v)
1306 
1307 
1308 static __inline int
1309 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp)
1310 {
1311 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1312 	uint32_t val0, val1;
1313 	int i = 0;
1314 
1315 	do {
1316 		val0 = BXR2(pcs, IspVirt2Off(isp, off));
1317 		val1 = BXR2(pcs, IspVirt2Off(isp, off));
1318 	} while (val0 != val1 && ++i < 1000);
1319 	if (val0 != val1) {
1320 		return (1);
1321 	}
1322 	*rp = val0;
1323 	return (0);
1324 }
1325 
1326 static int
1327 isp_pci_rd_isr(ispsoftc_t *isp, uint32_t *isrp,
1328     uint16_t *semap, uint16_t *mbp)
1329 {
1330 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1331 	uint16_t isr, sema;
1332 
1333 	if (IS_2100(isp)) {
1334 		if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
1335 		    return (0);
1336 		}
1337 		if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
1338 		    return (0);
1339 		}
1340 	} else {
1341 		isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
1342 		sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
1343 	}
1344 	isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
1345 	isr &= INT_PENDING_MASK(isp);
1346 	sema &= BIU_SEMA_LOCK;
1347 	if (isr == 0 && sema == 0) {
1348 		return (0);
1349 	}
1350 	*isrp = isr;
1351 	if ((*semap = sema) != 0) {
1352 		if (IS_2100(isp)) {
1353 			if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
1354 				return (0);
1355 			}
1356 		} else {
1357 			*mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
1358 		}
1359 	}
1360 	return (1);
1361 }
1362 
1363 static int
1364 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint32_t *isrp,
1365     uint16_t *semap, uint16_t *mbox0p)
1366 {
1367 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1368 	uint32_t hccr;
1369 	uint32_t r2hisr;
1370 
1371 	if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) {
1372 		*isrp = 0;
1373 		return (0);
1374 	}
1375 	r2hisr = BXR4(pcs, IspVirt2Off(pcs, BIU_R2HSTSLO));
1376 	isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1377 	if ((r2hisr & BIU_R2HST_INTR) == 0) {
1378 		*isrp = 0;
1379 		return (0);
1380 	}
1381 	switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
1382 	case ISPR2HST_ROM_MBX_OK:
1383 	case ISPR2HST_ROM_MBX_FAIL:
1384 	case ISPR2HST_MBX_OK:
1385 	case ISPR2HST_MBX_FAIL:
1386 	case ISPR2HST_ASYNC_EVENT:
1387 		*isrp = r2hisr & 0xffff;
1388 		*mbox0p = (r2hisr >> 16);
1389 		*semap = 1;
1390 		return (1);
1391 	case ISPR2HST_RIO_16:
1392 		*isrp = r2hisr & 0xffff;
1393 		*mbox0p = ASYNC_RIO1;
1394 		*semap = 1;
1395 		return (1);
1396 	case ISPR2HST_FPOST:
1397 		*isrp = r2hisr & 0xffff;
1398 		*mbox0p = ASYNC_CMD_CMPLT;
1399 		*semap = 1;
1400 		return (1);
1401 	case ISPR2HST_FPOST_CTIO:
1402 		*isrp = r2hisr & 0xffff;
1403 		*mbox0p = ASYNC_CTIO_DONE;
1404 		*semap = 1;
1405 		return (1);
1406 	case ISPR2HST_RSPQ_UPDATE:
1407 		*isrp = r2hisr & 0xffff;
1408 		*mbox0p = 0;
1409 		*semap = 0;
1410 		return (1);
1411 	default:
1412 		hccr = ISP_READ(isp, HCCR);
1413 		if (hccr & HCCR_PAUSE) {
1414 			ISP_WRITE(isp, HCCR, HCCR_RESET);
1415 			isp_prt(isp, ISP_LOGERR,
1416 			    "RISC paused at interrupt (%x->%x)", hccr,
1417 			    ISP_READ(isp, HCCR));
1418 			ISP_WRITE(isp, BIU_ICR, 0);
1419 		} else {
1420 			isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n",
1421 			    r2hisr);
1422 		}
1423 		return (0);
1424 	}
1425 }
1426 
1427 static int
1428 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint32_t *isrp,
1429     uint16_t *semap, uint16_t *mbox0p)
1430 {
1431 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1432 	uint32_t r2hisr;
1433 
1434 	r2hisr = BXR4(pcs, IspVirt2Off(pcs, BIU2400_R2HSTSLO));
1435 	isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1436 	if ((r2hisr & BIU2400_R2HST_INTR) == 0) {
1437 		*isrp = 0;
1438 		return (0);
1439 	}
1440 	switch (r2hisr & BIU2400_R2HST_ISTAT_MASK) {
1441 	case ISP2400R2HST_ROM_MBX_OK:
1442 	case ISP2400R2HST_ROM_MBX_FAIL:
1443 	case ISP2400R2HST_MBX_OK:
1444 	case ISP2400R2HST_MBX_FAIL:
1445 	case ISP2400R2HST_ASYNC_EVENT:
1446 		*isrp = r2hisr & 0xffff;
1447 		*mbox0p = (r2hisr >> 16);
1448 		*semap = 1;
1449 		return (1);
1450 	case ISP2400R2HST_RSPQ_UPDATE:
1451 	case ISP2400R2HST_ATIO_RSPQ_UPDATE:
1452 	case ISP2400R2HST_ATIO_RQST_UPDATE:
1453 		*isrp = r2hisr & 0xffff;
1454 		*mbox0p = 0;
1455 		*semap = 0;
1456 		return (1);
1457 	default:
1458 		ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
1459 		isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr);
1460 		return (0);
1461 	}
1462 }
1463 
1464 static uint32_t
1465 isp_pci_rd_reg(ispsoftc_t *isp, int regoff)
1466 {
1467 	uint32_t rv;
1468 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1469 	int oldconf = 0;
1470 
1471 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1472 		/*
1473 		 * We will assume that someone has paused the RISC processor.
1474 		 */
1475 		oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1476 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1477 		    oldconf | BIU_PCI_CONF1_SXP);
1478 	}
1479 	rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1480 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1481 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1482 	}
1483 	return (rv);
1484 }
1485 
1486 static void
1487 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val)
1488 {
1489 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1490 	int oldconf = 0;
1491 	volatile int junk;
1492 
1493 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1494 		/*
1495 		 * We will assume that someone has paused the RISC processor.
1496 		 */
1497 		oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1498 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1499 		    oldconf | BIU_PCI_CONF1_SXP);
1500 		if (IS_2100(isp)) {
1501 			junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1502 		}
1503 	}
1504 	BXW2(pcs, IspVirt2Off(isp, regoff), val);
1505 	if (IS_2100(isp)) {
1506 		junk = BXR2(pcs, IspVirt2Off(isp, regoff));
1507 	}
1508 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1509 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1510 		if (IS_2100(isp)) {
1511 			junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1512 		}
1513 	}
1514 }
1515 
1516 static uint32_t
1517 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff)
1518 {
1519 	uint32_t rv, oc = 0;
1520 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1521 
1522 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1523 	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1524 		uint32_t tc;
1525 		/*
1526 		 * We will assume that someone has paused the RISC processor.
1527 		 */
1528 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1529 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
1530 		if (regoff & SXP_BANK1_SELECT)
1531 			tc |= BIU_PCI1080_CONF1_SXP1;
1532 		else
1533 			tc |= BIU_PCI1080_CONF1_SXP0;
1534 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1535 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1536 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1537 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1538 		    oc | BIU_PCI1080_CONF1_DMA);
1539 	}
1540 	rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1541 	if (oc) {
1542 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1543 	}
1544 	return (rv);
1545 }
1546 
1547 static void
1548 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val)
1549 {
1550 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1551 	int oc = 0;
1552 	volatile int junk;
1553 
1554 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1555 	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1556 		uint32_t tc;
1557 		/*
1558 		 * We will assume that someone has paused the RISC processor.
1559 		 */
1560 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1561 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
1562 		if (regoff & SXP_BANK1_SELECT)
1563 			tc |= BIU_PCI1080_CONF1_SXP1;
1564 		else
1565 			tc |= BIU_PCI1080_CONF1_SXP0;
1566 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1567 		junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1568 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1569 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1570 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1571 		    oc | BIU_PCI1080_CONF1_DMA);
1572 		junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1573 	}
1574 	BXW2(pcs, IspVirt2Off(isp, regoff), val);
1575 	junk = BXR2(pcs, IspVirt2Off(isp, regoff));
1576 	if (oc) {
1577 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1578 		junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1579 	}
1580 }
1581 
1582 static uint32_t
1583 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff)
1584 {
1585 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1586 	uint32_t rv;
1587 	int block = regoff & _BLK_REG_MASK;
1588 
1589 	switch (block) {
1590 	case BIU_BLOCK:
1591 		break;
1592 	case MBOX_BLOCK:
1593 		return (BXR2(pcs, IspVirt2Off(pcs, regoff)));
1594 	case SXP_BLOCK:
1595 		isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff);
1596 		return (0xffffffff);
1597 	case RISC_BLOCK:
1598 		isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff);
1599 		return (0xffffffff);
1600 	case DMA_BLOCK:
1601 		isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff);
1602 		return (0xffffffff);
1603 	default:
1604 		isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff);
1605 		return (0xffffffff);
1606 	}
1607 
1608 
1609 	switch (regoff) {
1610 	case BIU2400_FLASH_ADDR:
1611 	case BIU2400_FLASH_DATA:
1612 	case BIU2400_ICR:
1613 	case BIU2400_ISR:
1614 	case BIU2400_CSR:
1615 	case BIU2400_REQINP:
1616 	case BIU2400_REQOUTP:
1617 	case BIU2400_RSPINP:
1618 	case BIU2400_RSPOUTP:
1619 	case BIU2400_PRI_RQINP:
1620 	case BIU2400_PRI_RSPINP:
1621 	case BIU2400_ATIO_RSPINP:
1622 	case BIU2400_ATIO_REQINP:
1623 	case BIU2400_HCCR:
1624 	case BIU2400_GPIOD:
1625 	case BIU2400_GPIOE:
1626 	case BIU2400_HSEMA:
1627 		rv = BXR4(pcs, IspVirt2Off(pcs, regoff));
1628 		break;
1629 	case BIU2400_R2HSTSLO:
1630 		rv = BXR4(pcs, IspVirt2Off(pcs, regoff));
1631 		break;
1632 	case BIU2400_R2HSTSHI:
1633 		rv = BXR4(pcs, IspVirt2Off(pcs, regoff)) >> 16;
1634 		break;
1635 	default:
1636 		isp_prt(isp, ISP_LOGERR,
1637 		    "isp_pci_rd_reg_2400: unknown offset %x", regoff);
1638 		rv = 0xffffffff;
1639 		break;
1640 	}
1641 	return (rv);
1642 }
1643 
1644 static void
1645 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val)
1646 {
1647 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1648 	int block = regoff & _BLK_REG_MASK;
1649 	volatile int junk;
1650 
1651 	switch (block) {
1652 	case BIU_BLOCK:
1653 		break;
1654 	case MBOX_BLOCK:
1655 		BXW2(pcs, IspVirt2Off(pcs, regoff), val);
1656 		junk = BXR2(pcs, IspVirt2Off(pcs, regoff));
1657 		return;
1658 	case SXP_BLOCK:
1659 		isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff);
1660 		return;
1661 	case RISC_BLOCK:
1662 		isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff);
1663 		return;
1664 	case DMA_BLOCK:
1665 		isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff);
1666 		return;
1667 	default:
1668 		isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x",
1669 		    regoff);
1670 		break;
1671 	}
1672 
1673 	switch (regoff) {
1674 	case BIU2400_FLASH_ADDR:
1675 	case BIU2400_FLASH_DATA:
1676 	case BIU2400_ICR:
1677 	case BIU2400_ISR:
1678 	case BIU2400_CSR:
1679 	case BIU2400_REQINP:
1680 	case BIU2400_REQOUTP:
1681 	case BIU2400_RSPINP:
1682 	case BIU2400_RSPOUTP:
1683 	case BIU2400_PRI_RQINP:
1684 	case BIU2400_PRI_RSPINP:
1685 	case BIU2400_ATIO_RSPINP:
1686 	case BIU2400_ATIO_REQINP:
1687 	case BIU2400_HCCR:
1688 	case BIU2400_GPIOD:
1689 	case BIU2400_GPIOE:
1690 	case BIU2400_HSEMA:
1691 		BXW4(pcs, IspVirt2Off(pcs, regoff), val);
1692 		junk = BXR4(pcs, IspVirt2Off(pcs, regoff));
1693 		break;
1694 	default:
1695 		isp_prt(isp, ISP_LOGERR,
1696 		    "isp_pci_wr_reg_2400: bad offset 0x%x", regoff);
1697 		break;
1698 	}
1699 }
1700 
1701 
1702 struct imush {
1703 	ispsoftc_t *isp;
1704 	int error;
1705 };
1706 
1707 static void imc(void *, bus_dma_segment_t *, int, int);
1708 
1709 static void
1710 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1711 {
1712 	struct imush *imushp = (struct imush *) arg;
1713 	if (error) {
1714 		imushp->error = error;
1715 	} else {
1716 		ispsoftc_t *isp =imushp->isp;
1717 		bus_addr_t addr = segs->ds_addr;
1718 
1719 		isp->isp_rquest_dma = addr;
1720 		addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1721 		isp->isp_result_dma = addr;
1722 		if (IS_FC(isp)) {
1723 			addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1724 			FCPARAM(isp)->isp_scdma = addr;
1725 		}
1726 	}
1727 }
1728 
1729 static int
1730 isp_pci_mbxdma(ispsoftc_t *isp)
1731 {
1732 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1733 	caddr_t base;
1734 	uint32_t len;
1735 	int i, error, ns;
1736 	bus_size_t slim;	/* segment size */
1737 	bus_addr_t llim;	/* low limit of unavailable dma */
1738 	bus_addr_t hlim;	/* high limit of unavailable dma */
1739 	struct imush im;
1740 
1741 	/*
1742 	 * Already been here? If so, leave...
1743 	 */
1744 	if (isp->isp_rquest) {
1745 		return (0);
1746 	}
1747 
1748 	if (isp->isp_maxcmds == 0) {
1749 		isp_prt(isp, ISP_LOGERR, "maxcmds not set");
1750 		return (1);
1751 	}
1752 
1753 	hlim = BUS_SPACE_MAXADDR;
1754 	if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1755 		slim = (bus_size_t) (1ULL << 32);
1756 		llim = BUS_SPACE_MAXADDR;
1757 	} else {
1758 		llim = BUS_SPACE_MAXADDR_32BIT;
1759 		slim = (1 << 24);
1760 	}
1761 
1762 	/*
1763 	 * XXX: We don't really support 64 bit target mode for parallel scsi yet
1764 	 */
1765 #ifdef	ISP_TARGET_MODE
1766 	if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) {
1767 		isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet");
1768 		return (1);
1769 	}
1770 #endif
1771 
1772 	ISP_UNLOCK(isp);
1773 	if (isp_dma_tag_create(BUS_DMA_ROOTARG(pcs->pci_dev), 1, slim, llim,
1774 	    hlim, NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0,
1775 	    &pcs->dmat)) {
1776 		isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1777 		ISP_LOCK(isp);
1778 		return (1);
1779 	}
1780 
1781 
1782 	len = sizeof (XS_T **) * isp->isp_maxcmds;
1783 	isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1784 	if (isp->isp_xflist == NULL) {
1785 		isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
1786 		ISP_LOCK(isp);
1787 		return (1);
1788 	}
1789 #ifdef	ISP_TARGET_MODE
1790 	len = sizeof (void **) * isp->isp_maxcmds;
1791 	isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1792 	if (isp->isp_tgtlist == NULL) {
1793 		isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array");
1794 		ISP_LOCK(isp);
1795 		return (1);
1796 	}
1797 #endif
1798 	len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
1799 	pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF,  M_WAITOK);
1800 	if (pcs->dmaps == NULL) {
1801 		isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage");
1802 		free(isp->isp_xflist, M_DEVBUF);
1803 #ifdef	ISP_TARGET_MODE
1804 		free(isp->isp_tgtlist, M_DEVBUF);
1805 #endif
1806 		ISP_LOCK(isp);
1807 		return (1);
1808 	}
1809 
1810 	/*
1811 	 * Allocate and map the request, result queues, plus FC scratch area.
1812 	 */
1813 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1814 	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1815 	if (IS_FC(isp)) {
1816 		len += ISP2100_SCRLEN;
1817 	}
1818 
1819 	ns = (len / PAGE_SIZE) + 1;
1820 	/*
1821 	 * Create a tag for the control spaces- force it to within 32 bits.
1822 	 */
1823 	if (isp_dma_tag_create(pcs->dmat, QENTRY_LEN, slim,
1824 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1825 	    NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) {
1826 		isp_prt(isp, ISP_LOGERR,
1827 		    "cannot create a dma tag for control spaces");
1828 		free(pcs->dmaps, M_DEVBUF);
1829 		free(isp->isp_xflist, M_DEVBUF);
1830 #ifdef	ISP_TARGET_MODE
1831 		free(isp->isp_tgtlist, M_DEVBUF);
1832 #endif
1833 		ISP_LOCK(isp);
1834 		return (1);
1835 	}
1836 
1837 	if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT,
1838 	    &isp->isp_cdmap) != 0) {
1839 		isp_prt(isp, ISP_LOGERR,
1840 		    "cannot allocate %d bytes of CCB memory", len);
1841 		bus_dma_tag_destroy(isp->isp_cdmat);
1842 		free(isp->isp_xflist, M_DEVBUF);
1843 #ifdef	ISP_TARGET_MODE
1844 		free(isp->isp_tgtlist, M_DEVBUF);
1845 #endif
1846 		free(pcs->dmaps, M_DEVBUF);
1847 		ISP_LOCK(isp);
1848 		return (1);
1849 	}
1850 
1851 	for (i = 0; i < isp->isp_maxcmds; i++) {
1852 		error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]);
1853 		if (error) {
1854 			isp_prt(isp, ISP_LOGERR,
1855 			    "error %d creating per-cmd DMA maps", error);
1856 			while (--i >= 0) {
1857 				bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]);
1858 			}
1859 			goto bad;
1860 		}
1861 	}
1862 
1863 	im.isp = isp;
1864 	im.error = 0;
1865 	bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0);
1866 	if (im.error) {
1867 		isp_prt(isp, ISP_LOGERR,
1868 		    "error %d loading dma map for control areas", im.error);
1869 		goto bad;
1870 	}
1871 
1872 	isp->isp_rquest = base;
1873 	base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1874 	isp->isp_result = base;
1875 	if (IS_FC(isp)) {
1876 		base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1877 		FCPARAM(isp)->isp_scratch = base;
1878 	}
1879 	ISP_LOCK(isp);
1880 	return (0);
1881 
1882 bad:
1883 	bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap);
1884 	bus_dma_tag_destroy(isp->isp_cdmat);
1885 	free(isp->isp_xflist, M_DEVBUF);
1886 #ifdef	ISP_TARGET_MODE
1887 	free(isp->isp_tgtlist, M_DEVBUF);
1888 #endif
1889 	free(pcs->dmaps, M_DEVBUF);
1890 	ISP_LOCK(isp);
1891 	isp->isp_rquest = NULL;
1892 	return (1);
1893 }
1894 
1895 typedef struct {
1896 	ispsoftc_t *isp;
1897 	void *cmd_token;
1898 	void *rq;
1899 	uint32_t *nxtip;
1900 	uint32_t optr;
1901 	int error;
1902 } mush_t;
1903 
1904 #define	MUSHERR_NOQENTRIES	-2
1905 
1906 #ifdef	ISP_TARGET_MODE
1907 /*
1908  * We need to handle DMA for target mode differently from initiator mode.
1909  *
1910  * DMA mapping and construction and submission of CTIO Request Entries
1911  * and rendevous for completion are very tightly coupled because we start
1912  * out by knowing (per platform) how much data we have to move, but we
1913  * don't know, up front, how many DMA mapping segments will have to be used
1914  * cover that data, so we don't know how many CTIO Request Entries we
1915  * will end up using. Further, for performance reasons we may want to
1916  * (on the last CTIO for Fibre Channel), send status too (if all went well).
1917  *
1918  * The standard vector still goes through isp_pci_dmasetup, but the callback
1919  * for the DMA mapping routines comes here instead with the whole transfer
1920  * mapped and a pointer to a partially filled in already allocated request
1921  * queue entry. We finish the job.
1922  */
1923 static void tdma_mk(void *, bus_dma_segment_t *, int, int);
1924 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int);
1925 
1926 #define	STATUS_WITH_DATA	1
1927 
1928 static void
1929 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1930 {
1931 	mush_t *mp;
1932 	struct ccb_scsiio *csio;
1933 	ispsoftc_t *isp;
1934 	struct isp_pcisoftc *pcs;
1935 	bus_dmamap_t *dp;
1936 	ct_entry_t *cto, *qe;
1937 	uint8_t scsi_status;
1938 	uint32_t curi, nxti, handle;
1939 	uint32_t sflags;
1940 	int32_t resid;
1941 	int nth_ctio, nctios, send_status;
1942 
1943 	mp = (mush_t *) arg;
1944 	if (error) {
1945 		mp->error = error;
1946 		return;
1947 	}
1948 
1949 	isp = mp->isp;
1950 	csio = mp->cmd_token;
1951 	cto = mp->rq;
1952 	curi = isp->isp_reqidx;
1953 	qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1954 
1955 	cto->ct_xfrlen = 0;
1956 	cto->ct_seg_count = 0;
1957 	cto->ct_header.rqs_entry_count = 1;
1958 	MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1959 
1960 	if (nseg == 0) {
1961 		cto->ct_header.rqs_seqno = 1;
1962 		isp_prt(isp, ISP_LOGTDEBUG1,
1963 		    "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d",
1964 		    cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid,
1965 		    cto->ct_tag_val, cto->ct_flags, cto->ct_status,
1966 		    cto->ct_scsi_status, cto->ct_resid);
1967 		ISP_TDQE(isp, "tdma_mk[no data]", curi, cto);
1968 		isp_put_ctio(isp, cto, qe);
1969 		return;
1970 	}
1971 
1972 	nctios = nseg / ISP_RQDSEG;
1973 	if (nseg % ISP_RQDSEG) {
1974 		nctios++;
1975 	}
1976 
1977 	/*
1978 	 * Save syshandle, and potentially any SCSI status, which we'll
1979 	 * reinsert on the last CTIO we're going to send.
1980 	 */
1981 
1982 	handle = cto->ct_syshandle;
1983 	cto->ct_syshandle = 0;
1984 	cto->ct_header.rqs_seqno = 0;
1985 	send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1986 
1987 	if (send_status) {
1988 		sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1989 		cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1990 		/*
1991 		 * Preserve residual.
1992 		 */
1993 		resid = cto->ct_resid;
1994 
1995 		/*
1996 		 * Save actual SCSI status.
1997 		 */
1998 		scsi_status = cto->ct_scsi_status;
1999 
2000 #ifndef	STATUS_WITH_DATA
2001 		sflags |= CT_NO_DATA;
2002 		/*
2003 		 * We can't do a status at the same time as a data CTIO, so
2004 		 * we need to synthesize an extra CTIO at this level.
2005 		 */
2006 		nctios++;
2007 #endif
2008 	} else {
2009 		sflags = scsi_status = resid = 0;
2010 	}
2011 
2012 	cto->ct_resid = 0;
2013 	cto->ct_scsi_status = 0;
2014 
2015 	pcs = (struct isp_pcisoftc *)isp;
2016 	dp = &pcs->dmaps[isp_handle_index(handle)];
2017 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2018 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
2019 	} else {
2020 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2021 	}
2022 
2023 	nxti = *mp->nxtip;
2024 
2025 	for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) {
2026 		int seglim;
2027 
2028 		seglim = nseg;
2029 		if (seglim) {
2030 			int seg;
2031 
2032 			if (seglim > ISP_RQDSEG)
2033 				seglim = ISP_RQDSEG;
2034 
2035 			for (seg = 0; seg < seglim; seg++, nseg--) {
2036 				/*
2037 				 * Unlike normal initiator commands, we don't
2038 				 * do any swizzling here.
2039 				 */
2040 				cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
2041 				cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
2042 				cto->ct_xfrlen += dm_segs->ds_len;
2043 				dm_segs++;
2044 			}
2045 			cto->ct_seg_count = seg;
2046 		} else {
2047 			/*
2048 			 * This case should only happen when we're sending an
2049 			 * extra CTIO with final status.
2050 			 */
2051 			if (send_status == 0) {
2052 				isp_prt(isp, ISP_LOGWARN,
2053 				    "tdma_mk ran out of segments");
2054 				mp->error = EINVAL;
2055 				return;
2056 			}
2057 		}
2058 
2059 		/*
2060 		 * At this point, the fields ct_lun, ct_iid, ct_tagval,
2061 		 * ct_tagtype, and ct_timeout have been carried over
2062 		 * unchanged from what our caller had set.
2063 		 *
2064 		 * The dataseg fields and the seg_count fields we just got
2065 		 * through setting. The data direction we've preserved all
2066 		 * along and only clear it if we're now sending status.
2067 		 */
2068 
2069 		if (nth_ctio == nctios - 1) {
2070 			/*
2071 			 * We're the last in a sequence of CTIOs, so mark
2072 			 * this CTIO and save the handle to the CCB such that
2073 			 * when this CTIO completes we can free dma resources
2074 			 * and do whatever else we need to do to finish the
2075 			 * rest of the command. We *don't* give this to the
2076 			 * firmware to work on- the caller will do that.
2077 			 */
2078 
2079 			cto->ct_syshandle = handle;
2080 			cto->ct_header.rqs_seqno = 1;
2081 
2082 			if (send_status) {
2083 				cto->ct_scsi_status = scsi_status;
2084 				cto->ct_flags |= sflags;
2085 				cto->ct_resid = resid;
2086 			}
2087 			if (send_status) {
2088 				isp_prt(isp, ISP_LOGTDEBUG1,
2089 				    "CTIO[%x] lun%d iid %d tag %x ct_flags %x "
2090 				    "scsi status %x resid %d",
2091 				    cto->ct_fwhandle, csio->ccb_h.target_lun,
2092 				    cto->ct_iid, cto->ct_tag_val, cto->ct_flags,
2093 				    cto->ct_scsi_status, cto->ct_resid);
2094 			} else {
2095 				isp_prt(isp, ISP_LOGTDEBUG1,
2096 				    "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x",
2097 				    cto->ct_fwhandle, csio->ccb_h.target_lun,
2098 				    cto->ct_iid, cto->ct_tag_val,
2099 				    cto->ct_flags);
2100 			}
2101 			isp_put_ctio(isp, cto, qe);
2102 			ISP_TDQE(isp, "last tdma_mk", curi, cto);
2103 			if (nctios > 1) {
2104 				MEMORYBARRIER(isp, SYNC_REQUEST,
2105 				    curi, QENTRY_LEN);
2106 			}
2107 		} else {
2108 			ct_entry_t *oqe = qe;
2109 
2110 			/*
2111 			 * Make sure syshandle fields are clean
2112 			 */
2113 			cto->ct_syshandle = 0;
2114 			cto->ct_header.rqs_seqno = 0;
2115 
2116 			isp_prt(isp, ISP_LOGTDEBUG1,
2117 			    "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
2118 			    cto->ct_fwhandle, csio->ccb_h.target_lun,
2119 			    cto->ct_iid, cto->ct_flags);
2120 
2121 			/*
2122 			 * Get a new CTIO
2123 			 */
2124 			qe = (ct_entry_t *)
2125 			    ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2126 			nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp));
2127 			if (nxti == mp->optr) {
2128 				isp_prt(isp, ISP_LOGTDEBUG0,
2129 				    "Queue Overflow in tdma_mk");
2130 				mp->error = MUSHERR_NOQENTRIES;
2131 				return;
2132 			}
2133 
2134 			/*
2135 			 * Now that we're done with the old CTIO,
2136 			 * flush it out to the request queue.
2137 			 */
2138 			ISP_TDQE(isp, "dma_tgt_fc", curi, cto);
2139 			isp_put_ctio(isp, cto, oqe);
2140 			if (nth_ctio != 0) {
2141 				MEMORYBARRIER(isp, SYNC_REQUEST, curi,
2142 				    QENTRY_LEN);
2143 			}
2144 			curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp));
2145 
2146 			/*
2147 			 * Reset some fields in the CTIO so we can reuse
2148 			 * for the next one we'll flush to the request
2149 			 * queue.
2150 			 */
2151 			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
2152 			cto->ct_header.rqs_entry_count = 1;
2153 			cto->ct_header.rqs_flags = 0;
2154 			cto->ct_status = 0;
2155 			cto->ct_scsi_status = 0;
2156 			cto->ct_xfrlen = 0;
2157 			cto->ct_resid = 0;
2158 			cto->ct_seg_count = 0;
2159 			MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
2160 		}
2161 	}
2162 	*mp->nxtip = nxti;
2163 }
2164 
2165 /*
2166  * We don't have to do multiple CTIOs here. Instead, we can just do
2167  * continuation segments as needed. This greatly simplifies the code
2168  * improves performance.
2169  */
2170 
2171 static void
2172 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2173 {
2174 	mush_t *mp;
2175 	struct ccb_scsiio *csio;
2176 	ispsoftc_t *isp;
2177 	ct2_entry_t *cto, *qe;
2178 	uint32_t curi, nxti;
2179 	ispds_t *ds;
2180 	ispds64_t *ds64;
2181 	int segcnt, seglim;
2182 
2183 	mp = (mush_t *) arg;
2184 	if (error) {
2185 		mp->error = error;
2186 		return;
2187 	}
2188 
2189 	isp = mp->isp;
2190 	csio = mp->cmd_token;
2191 	cto = mp->rq;
2192 
2193 	curi = isp->isp_reqidx;
2194 	qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
2195 
2196 	if (nseg == 0) {
2197 		if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
2198 			isp_prt(isp, ISP_LOGWARN,
2199 			    "dma2_tgt_fc, a status CTIO2 without MODE1 "
2200 			    "set (0x%x)", cto->ct_flags);
2201 			mp->error = EINVAL;
2202 			return;
2203 		}
2204 		/*
2205 		 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
2206 		 * flags to NO DATA and clear relative offset flags.
2207 		 * We preserve the ct_resid and the response area.
2208 		 */
2209 		cto->ct_header.rqs_seqno = 1;
2210 		cto->ct_seg_count = 0;
2211 		cto->ct_reloff = 0;
2212 		isp_prt(isp, ISP_LOGTDEBUG1,
2213 		    "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts "
2214 		    "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
2215 		    cto->ct_iid, cto->ct_flags, cto->ct_status,
2216 		    cto->rsp.m1.ct_scsi_status, cto->ct_resid);
2217 		if (FCPARAM(isp)->isp_2klogin) {
2218 			isp_put_ctio2e(isp,
2219 			    (ct2e_entry_t *)cto, (ct2e_entry_t *)qe);
2220 		} else {
2221 			isp_put_ctio2(isp, cto, qe);
2222 		}
2223 		ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe);
2224 		return;
2225 	}
2226 
2227 	if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
2228 		isp_prt(isp, ISP_LOGERR,
2229 		    "dma2_tgt_fc, a data CTIO2 without MODE0 set "
2230 		    "(0x%x)", cto->ct_flags);
2231 		mp->error = EINVAL;
2232 		return;
2233 	}
2234 
2235 
2236 	nxti = *mp->nxtip;
2237 
2238 	/*
2239 	 * Check to see if we need to DAC addressing or not.
2240 	 *
2241 	 * Any address that's over the 4GB boundary causes this
2242 	 * to happen.
2243 	 */
2244 	segcnt = nseg;
2245 	if (sizeof (bus_addr_t) > 4) {
2246 		for (segcnt = 0; segcnt < nseg; segcnt++) {
2247 			uint64_t addr = dm_segs[segcnt].ds_addr;
2248 			if (addr >= 0x100000000LL) {
2249 				break;
2250 			}
2251 		}
2252 	}
2253 	if (segcnt != nseg) {
2254 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3;
2255 		seglim = ISP_RQDSEG_T3;
2256 		ds64 = &cto->rsp.m0.u.ct_dataseg64[0];
2257 		ds = NULL;
2258 	} else {
2259 		seglim = ISP_RQDSEG_T2;
2260 		ds64 = NULL;
2261 		ds = &cto->rsp.m0.u.ct_dataseg[0];
2262 	}
2263 	cto->ct_seg_count = 0;
2264 
2265 	/*
2266 	 * Set up the CTIO2 data segments.
2267 	 */
2268 	for (segcnt = 0; cto->ct_seg_count < seglim && segcnt < nseg;
2269 	    cto->ct_seg_count++, segcnt++) {
2270 		if (ds64) {
2271 			ds64->ds_basehi =
2272 			    ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32);
2273 			ds64->ds_base = dm_segs[segcnt].ds_addr;
2274 			ds64->ds_count = dm_segs[segcnt].ds_len;
2275 			ds64++;
2276 		} else {
2277 			ds->ds_base = dm_segs[segcnt].ds_addr;
2278 			ds->ds_count = dm_segs[segcnt].ds_len;
2279 			ds++;
2280 		}
2281 		cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
2282 #if __FreeBSD_version < 500000
2283 		isp_prt(isp, ISP_LOGTDEBUG1,
2284 		    "isp_send_ctio2: ent0[%d]0x%llx:%llu",
2285 		    cto->ct_seg_count, (uint64_t)dm_segs[segcnt].ds_addr,
2286 		    (uint64_t)dm_segs[segcnt].ds_len);
2287 #else
2288 		isp_prt(isp, ISP_LOGTDEBUG1,
2289 		    "isp_send_ctio2: ent0[%d]0x%jx:%ju",
2290 		    cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr,
2291 		    (uintmax_t)dm_segs[segcnt].ds_len);
2292 #endif
2293 	}
2294 
2295 	while (segcnt < nseg) {
2296 		uint32_t curip;
2297 		int seg;
2298 		ispcontreq_t local, *crq = &local, *qep;
2299 
2300 		qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2301 		curip = nxti;
2302 		nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp));
2303 		if (nxti == mp->optr) {
2304 			ISP_UNLOCK(isp);
2305 			isp_prt(isp, ISP_LOGTDEBUG0,
2306 			    "tdma_mkfc: request queue overflow");
2307 			mp->error = MUSHERR_NOQENTRIES;
2308 			return;
2309 		}
2310 		cto->ct_header.rqs_entry_count++;
2311 		MEMZERO((void *)crq, sizeof (*crq));
2312 		crq->req_header.rqs_entry_count = 1;
2313 		if (cto->ct_header.rqs_entry_type == RQSTYPE_CTIO3) {
2314 			seglim = ISP_CDSEG64;
2315 			ds = NULL;
2316 			ds64 = &((ispcontreq64_t *)crq)->req_dataseg[0];
2317 			crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
2318 		} else {
2319 			seglim = ISP_CDSEG;
2320 			ds = &crq->req_dataseg[0];
2321 			ds64 = NULL;
2322 			crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
2323 		}
2324 		for (seg = 0; segcnt < nseg && seg < seglim;
2325 		    segcnt++, seg++) {
2326 			if (ds64) {
2327 				ds64->ds_basehi =
2328 				  ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32);
2329 				ds64->ds_base = dm_segs[segcnt].ds_addr;
2330 				ds64->ds_count = dm_segs[segcnt].ds_len;
2331 				ds64++;
2332 			} else {
2333 				ds->ds_base = dm_segs[segcnt].ds_addr;
2334 				ds->ds_count = dm_segs[segcnt].ds_len;
2335 				ds++;
2336 			}
2337 #if __FreeBSD_version < 500000
2338 			isp_prt(isp, ISP_LOGTDEBUG1,
2339 			    "isp_send_ctio2: ent%d[%d]%llx:%llu",
2340 			    cto->ct_header.rqs_entry_count-1, seg,
2341 			    (uint64_t)dm_segs[segcnt].ds_addr,
2342 			    (uint64_t)dm_segs[segcnt].ds_len);
2343 #else
2344 			isp_prt(isp, ISP_LOGTDEBUG1,
2345 			    "isp_send_ctio2: ent%d[%d]%jx:%ju",
2346 			    cto->ct_header.rqs_entry_count-1, seg,
2347 			    (uintmax_t)dm_segs[segcnt].ds_addr,
2348 			    (uintmax_t)dm_segs[segcnt].ds_len);
2349 #endif
2350 			cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
2351 			cto->ct_seg_count++;
2352 		}
2353 		MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN);
2354 		isp_put_cont_req(isp, crq, qep);
2355 		ISP_TDQE(isp, "cont entry", curi, qep);
2356 	}
2357 
2358 	/*
2359 	 * No do final twiddling for the CTIO itself.
2360 	 */
2361 	cto->ct_header.rqs_seqno = 1;
2362 	isp_prt(isp, ISP_LOGTDEBUG1,
2363 	    "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d",
2364 	    cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid,
2365 	    cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status,
2366 	    cto->ct_resid);
2367 	if (FCPARAM(isp)->isp_2klogin) {
2368 		isp_put_ctio2e(isp, (ct2e_entry_t *)cto, (ct2e_entry_t *)qe);
2369 	} else {
2370 		isp_put_ctio2(isp, cto, qe);
2371 	}
2372 	ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe);
2373 	*mp->nxtip = nxti;
2374 }
2375 #endif
2376 
2377 static void dma_2400(void *, bus_dma_segment_t *, int, int);
2378 static void dma2_a64(void *, bus_dma_segment_t *, int, int);
2379 static void dma2(void *, bus_dma_segment_t *, int, int);
2380 
2381 static void
2382 dma_2400(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2383 {
2384 	mush_t *mp;
2385 	ispsoftc_t *isp;
2386 	struct ccb_scsiio *csio;
2387 	struct isp_pcisoftc *pcs;
2388 	bus_dmamap_t *dp;
2389 	bus_dma_segment_t *eseg;
2390 	ispreqt7_t *rq;
2391 	int seglim, datalen;
2392 	uint32_t nxti;
2393 
2394 	mp = (mush_t *) arg;
2395 	if (error) {
2396 		mp->error = error;
2397 		return;
2398 	}
2399 
2400 	if (nseg < 1) {
2401 		isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
2402 		mp->error = EFAULT;
2403 		return;
2404 	}
2405 
2406 	csio = mp->cmd_token;
2407 	isp = mp->isp;
2408 	rq = mp->rq;
2409 	pcs = (struct isp_pcisoftc *)mp->isp;
2410 	dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2411 	nxti = *mp->nxtip;
2412 
2413 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2414 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
2415 	} else {
2416 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2417 	}
2418 	datalen = XS_XFRLEN(csio);
2419 
2420 	/*
2421 	 * We're passed an initial partially filled in entry that
2422 	 * has most fields filled in except for data transfer
2423 	 * related values.
2424 	 *
2425 	 * Our job is to fill in the initial request queue entry and
2426 	 * then to start allocating and filling in continuation entries
2427 	 * until we've covered the entire transfer.
2428 	 */
2429 
2430 	rq->req_header.rqs_entry_type = RQSTYPE_T7RQS;
2431 	rq->req_dl = datalen;
2432 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2433 		rq->req_alen_datadir = 0x2;
2434 	} else {
2435 		rq->req_alen_datadir = 0x1;
2436 	}
2437 
2438 	eseg = dm_segs + nseg;
2439 
2440 	rq->req_dataseg.ds_base = DMA_LO32(dm_segs->ds_addr);
2441 	rq->req_dataseg.ds_basehi = DMA_HI32(dm_segs->ds_addr);
2442 	rq->req_dataseg.ds_count = dm_segs->ds_len;
2443 
2444 	datalen -= dm_segs->ds_len;
2445 
2446 	dm_segs++;
2447 	rq->req_seg_count++;
2448 
2449 	while (datalen > 0 && dm_segs != eseg) {
2450 		uint32_t onxti;
2451 		ispcontreq64_t local, *crq = &local, *cqe;
2452 
2453 		cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2454 		onxti = nxti;
2455 		nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
2456 		if (nxti == mp->optr) {
2457 			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
2458 			mp->error = MUSHERR_NOQENTRIES;
2459 			return;
2460 		}
2461 		rq->req_header.rqs_entry_count++;
2462 		MEMZERO((void *)crq, sizeof (*crq));
2463 		crq->req_header.rqs_entry_count = 1;
2464 		crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
2465 
2466 		seglim = 0;
2467 		while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) {
2468 			crq->req_dataseg[seglim].ds_base =
2469 			    DMA_LO32(dm_segs->ds_addr);
2470 			crq->req_dataseg[seglim].ds_basehi =
2471 			    DMA_HI32(dm_segs->ds_addr);
2472 			crq->req_dataseg[seglim].ds_count =
2473 			    dm_segs->ds_len;
2474 			rq->req_seg_count++;
2475 			dm_segs++;
2476 			seglim++;
2477 			datalen -= dm_segs->ds_len;
2478 		}
2479 		if (isp->isp_dblev & ISP_LOGDEBUG1) {
2480 			isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq);
2481 		}
2482 		isp_put_cont64_req(isp, crq, cqe);
2483 		MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
2484 	}
2485 	*mp->nxtip = nxti;
2486 }
2487 
2488 static void
2489 dma2_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2490 {
2491 	mush_t *mp;
2492 	ispsoftc_t *isp;
2493 	struct ccb_scsiio *csio;
2494 	struct isp_pcisoftc *pcs;
2495 	bus_dmamap_t *dp;
2496 	bus_dma_segment_t *eseg;
2497 	ispreq64_t *rq;
2498 	int seglim, datalen;
2499 	uint32_t nxti;
2500 
2501 	mp = (mush_t *) arg;
2502 	if (error) {
2503 		mp->error = error;
2504 		return;
2505 	}
2506 
2507 	if (nseg < 1) {
2508 		isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
2509 		mp->error = EFAULT;
2510 		return;
2511 	}
2512 	csio = mp->cmd_token;
2513 	isp = mp->isp;
2514 	rq = mp->rq;
2515 	pcs = (struct isp_pcisoftc *)mp->isp;
2516 	dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2517 	nxti = *mp->nxtip;
2518 
2519 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2520 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
2521 	} else {
2522 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2523 	}
2524 	datalen = XS_XFRLEN(csio);
2525 
2526 	/*
2527 	 * We're passed an initial partially filled in entry that
2528 	 * has most fields filled in except for data transfer
2529 	 * related values.
2530 	 *
2531 	 * Our job is to fill in the initial request queue entry and
2532 	 * then to start allocating and filling in continuation entries
2533 	 * until we've covered the entire transfer.
2534 	 */
2535 
2536 	if (IS_FC(isp)) {
2537 		rq->req_header.rqs_entry_type = RQSTYPE_T3RQS;
2538 		seglim = ISP_RQDSEG_T3;
2539 		((ispreqt3_t *)rq)->req_totalcnt = datalen;
2540 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2541 			((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN;
2542 		} else {
2543 			((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
2544 		}
2545 	} else {
2546 		rq->req_header.rqs_entry_type = RQSTYPE_A64;
2547 		if (csio->cdb_len > 12) {
2548 			seglim = 0;
2549 		} else {
2550 			seglim = ISP_RQDSEG_A64;
2551 		}
2552 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2553 			rq->req_flags |= REQFLAG_DATA_IN;
2554 		} else {
2555 			rq->req_flags |= REQFLAG_DATA_OUT;
2556 		}
2557 	}
2558 
2559 	eseg = dm_segs + nseg;
2560 
2561 	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
2562 		if (IS_FC(isp)) {
2563 			ispreqt3_t *rq3 = (ispreqt3_t *)rq;
2564 			rq3->req_dataseg[rq3->req_seg_count].ds_base =
2565 			    DMA_LO32(dm_segs->ds_addr);
2566 			rq3->req_dataseg[rq3->req_seg_count].ds_basehi =
2567 			    DMA_HI32(dm_segs->ds_addr);
2568 			rq3->req_dataseg[rq3->req_seg_count].ds_count =
2569 			    dm_segs->ds_len;
2570 		} else {
2571 			rq->req_dataseg[rq->req_seg_count].ds_base =
2572 			    DMA_LO32(dm_segs->ds_addr);
2573 			rq->req_dataseg[rq->req_seg_count].ds_basehi =
2574 			    DMA_HI32(dm_segs->ds_addr);
2575 			rq->req_dataseg[rq->req_seg_count].ds_count =
2576 			    dm_segs->ds_len;
2577 		}
2578 		datalen -= dm_segs->ds_len;
2579 		rq->req_seg_count++;
2580 		dm_segs++;
2581 	}
2582 
2583 	while (datalen > 0 && dm_segs != eseg) {
2584 		uint32_t onxti;
2585 		ispcontreq64_t local, *crq = &local, *cqe;
2586 
2587 		cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2588 		onxti = nxti;
2589 		nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
2590 		if (nxti == mp->optr) {
2591 			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
2592 			mp->error = MUSHERR_NOQENTRIES;
2593 			return;
2594 		}
2595 		rq->req_header.rqs_entry_count++;
2596 		MEMZERO((void *)crq, sizeof (*crq));
2597 		crq->req_header.rqs_entry_count = 1;
2598 		crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
2599 
2600 		seglim = 0;
2601 		while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) {
2602 			crq->req_dataseg[seglim].ds_base =
2603 			    DMA_LO32(dm_segs->ds_addr);
2604 			crq->req_dataseg[seglim].ds_basehi =
2605 			    DMA_HI32(dm_segs->ds_addr);
2606 			crq->req_dataseg[seglim].ds_count =
2607 			    dm_segs->ds_len;
2608 			rq->req_seg_count++;
2609 			dm_segs++;
2610 			seglim++;
2611 			datalen -= dm_segs->ds_len;
2612 		}
2613 		if (isp->isp_dblev & ISP_LOGDEBUG1) {
2614 			isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq);
2615 		}
2616 		isp_put_cont64_req(isp, crq, cqe);
2617 		MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
2618 	}
2619 	*mp->nxtip = nxti;
2620 }
2621 
2622 static void
2623 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2624 {
2625 	mush_t *mp;
2626 	ispsoftc_t *isp;
2627 	struct ccb_scsiio *csio;
2628 	struct isp_pcisoftc *pcs;
2629 	bus_dmamap_t *dp;
2630 	bus_dma_segment_t *eseg;
2631 	ispreq_t *rq;
2632 	int seglim, datalen;
2633 	uint32_t nxti;
2634 
2635 	mp = (mush_t *) arg;
2636 	if (error) {
2637 		mp->error = error;
2638 		return;
2639 	}
2640 
2641 	if (nseg < 1) {
2642 		isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
2643 		mp->error = EFAULT;
2644 		return;
2645 	}
2646 	csio = mp->cmd_token;
2647 	isp = mp->isp;
2648 	rq = mp->rq;
2649 	pcs = (struct isp_pcisoftc *)mp->isp;
2650 	dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2651 	nxti = *mp->nxtip;
2652 
2653 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2654 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
2655 	} else {
2656 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2657 	}
2658 
2659 	datalen = XS_XFRLEN(csio);
2660 
2661 	/*
2662 	 * We're passed an initial partially filled in entry that
2663 	 * has most fields filled in except for data transfer
2664 	 * related values.
2665 	 *
2666 	 * Our job is to fill in the initial request queue entry and
2667 	 * then to start allocating and filling in continuation entries
2668 	 * until we've covered the entire transfer.
2669 	 */
2670 
2671 	if (IS_FC(isp)) {
2672 		seglim = ISP_RQDSEG_T2;
2673 		((ispreqt2_t *)rq)->req_totalcnt = datalen;
2674 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2675 			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
2676 		} else {
2677 			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
2678 		}
2679 	} else {
2680 		if (csio->cdb_len > 12) {
2681 			seglim = 0;
2682 		} else {
2683 			seglim = ISP_RQDSEG;
2684 		}
2685 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2686 			rq->req_flags |= REQFLAG_DATA_IN;
2687 		} else {
2688 			rq->req_flags |= REQFLAG_DATA_OUT;
2689 		}
2690 	}
2691 
2692 	eseg = dm_segs + nseg;
2693 
2694 	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
2695 		if (IS_FC(isp)) {
2696 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
2697 			rq2->req_dataseg[rq2->req_seg_count].ds_base =
2698 			    DMA_LO32(dm_segs->ds_addr);
2699 			rq2->req_dataseg[rq2->req_seg_count].ds_count =
2700 			    dm_segs->ds_len;
2701 		} else {
2702 			rq->req_dataseg[rq->req_seg_count].ds_base =
2703 				DMA_LO32(dm_segs->ds_addr);
2704 			rq->req_dataseg[rq->req_seg_count].ds_count =
2705 				dm_segs->ds_len;
2706 		}
2707 		datalen -= dm_segs->ds_len;
2708 		rq->req_seg_count++;
2709 		dm_segs++;
2710 	}
2711 
2712 	while (datalen > 0 && dm_segs != eseg) {
2713 		uint32_t onxti;
2714 		ispcontreq_t local, *crq = &local, *cqe;
2715 
2716 		cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2717 		onxti = nxti;
2718 		nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
2719 		if (nxti == mp->optr) {
2720 			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
2721 			mp->error = MUSHERR_NOQENTRIES;
2722 			return;
2723 		}
2724 		rq->req_header.rqs_entry_count++;
2725 		MEMZERO((void *)crq, sizeof (*crq));
2726 		crq->req_header.rqs_entry_count = 1;
2727 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
2728 
2729 		seglim = 0;
2730 		while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
2731 			crq->req_dataseg[seglim].ds_base =
2732 			    DMA_LO32(dm_segs->ds_addr);
2733 			crq->req_dataseg[seglim].ds_count =
2734 			    dm_segs->ds_len;
2735 			rq->req_seg_count++;
2736 			dm_segs++;
2737 			seglim++;
2738 			datalen -= dm_segs->ds_len;
2739 		}
2740 		if (isp->isp_dblev & ISP_LOGDEBUG1) {
2741 			isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq);
2742 		}
2743 		isp_put_cont_req(isp, crq, cqe);
2744 		MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
2745 	}
2746 	*mp->nxtip = nxti;
2747 }
2748 
2749 /*
2750  * We enter with ISP_LOCK held
2751  */
2752 static int
2753 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq,
2754 	uint32_t *nxtip, uint32_t optr)
2755 {
2756 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2757 	ispreq_t *qep;
2758 	bus_dmamap_t *dp = NULL;
2759 	mush_t mush, *mp;
2760 	void (*eptr)(void *, bus_dma_segment_t *, int, int);
2761 
2762 	qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
2763 #ifdef	ISP_TARGET_MODE
2764 	if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
2765 		if (IS_FC(isp)) {
2766 			eptr = tdma_mkfc;
2767 		} else {
2768 			eptr = tdma_mk;
2769 		}
2770 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
2771 		    (csio->dxfer_len == 0)) {
2772 			mp = &mush;
2773 			mp->isp = isp;
2774 			mp->cmd_token = csio;
2775 			mp->rq = rq;	/* really a ct_entry_t or ct2_entry_t */
2776 			mp->nxtip = nxtip;
2777 			mp->optr = optr;
2778 			mp->error = 0;
2779 			ISPLOCK_2_CAMLOCK(isp);
2780 			(*eptr)(mp, NULL, 0, 0);
2781 			CAMLOCK_2_ISPLOCK(isp);
2782 			goto mbxsync;
2783 		}
2784 	} else
2785 #endif
2786 	if (IS_24XX(isp)) {
2787 		eptr = dma_2400;
2788 	} else if (sizeof (bus_addr_t) > 4) {
2789 		eptr = dma2_a64;
2790 	} else {
2791 		eptr = dma2;
2792 	}
2793 
2794 
2795 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
2796 	    (csio->dxfer_len == 0)) {
2797 		rq->req_seg_count = 1;
2798 		goto mbxsync;
2799 	}
2800 
2801 	/*
2802 	 * Do a virtual grapevine step to collect info for
2803 	 * the callback dma allocation that we have to use...
2804 	 */
2805 	mp = &mush;
2806 	mp->isp = isp;
2807 	mp->cmd_token = csio;
2808 	mp->rq = rq;
2809 	mp->nxtip = nxtip;
2810 	mp->optr = optr;
2811 	mp->error = 0;
2812 
2813 	ISPLOCK_2_CAMLOCK(isp);
2814 	if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2815 		if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
2816 			int error, s;
2817 			dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2818 			s = splsoftvm();
2819 			error = bus_dmamap_load(pcs->dmat, *dp,
2820 			    csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
2821 			if (error == EINPROGRESS) {
2822 				bus_dmamap_unload(pcs->dmat, *dp);
2823 				mp->error = EINVAL;
2824 				isp_prt(isp, ISP_LOGERR,
2825 				    "deferred dma allocation not supported");
2826 			} else if (error && mp->error == 0) {
2827 #ifdef	DIAGNOSTIC
2828 				isp_prt(isp, ISP_LOGERR,
2829 				    "error %d in dma mapping code", error);
2830 #endif
2831 				mp->error = error;
2832 			}
2833 			splx(s);
2834 		} else {
2835 			/* Pointer to physical buffer */
2836 			struct bus_dma_segment seg;
2837 			seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr;
2838 			seg.ds_len = csio->dxfer_len;
2839 			(*eptr)(mp, &seg, 1, 0);
2840 		}
2841 	} else {
2842 		struct bus_dma_segment *segs;
2843 
2844 		if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2845 			isp_prt(isp, ISP_LOGERR,
2846 			    "Physical segment pointers unsupported");
2847 			mp->error = EINVAL;
2848 		} else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2849 			isp_prt(isp, ISP_LOGERR,
2850 			    "Virtual segment addresses unsupported");
2851 			mp->error = EINVAL;
2852 		} else {
2853 			/* Just use the segments provided */
2854 			segs = (struct bus_dma_segment *) csio->data_ptr;
2855 			(*eptr)(mp, segs, csio->sglist_cnt, 0);
2856 		}
2857 	}
2858 	CAMLOCK_2_ISPLOCK(isp);
2859 	if (mp->error) {
2860 		int retval = CMD_COMPLETE;
2861 		if (mp->error == MUSHERR_NOQENTRIES) {
2862 			retval = CMD_EAGAIN;
2863 		} else if (mp->error == EFBIG) {
2864 			XS_SETERR(csio, CAM_REQ_TOO_BIG);
2865 		} else if (mp->error == EINVAL) {
2866 			XS_SETERR(csio, CAM_REQ_INVALID);
2867 		} else {
2868 			XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
2869 		}
2870 		return (retval);
2871 	}
2872 mbxsync:
2873 	if (isp->isp_dblev & ISP_LOGDEBUG1) {
2874 		isp_print_bytes(isp, "Request Queue Entry", QENTRY_LEN, rq);
2875 	}
2876 	switch (rq->req_header.rqs_entry_type) {
2877 	case RQSTYPE_REQUEST:
2878 		isp_put_request(isp, rq, qep);
2879 		break;
2880 	case RQSTYPE_CMDONLY:
2881 		isp_put_extended_request(isp, (ispextreq_t *)rq,
2882 		    (ispextreq_t *)qep);
2883 		break;
2884 	case RQSTYPE_T2RQS:
2885 		isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
2886 		break;
2887 	case RQSTYPE_A64:
2888 	case RQSTYPE_T3RQS:
2889 		isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep);
2890 		break;
2891 	case RQSTYPE_T7RQS:
2892 		isp_put_request_t7(isp, (ispreqt7_t *) rq, (ispreqt7_t *) qep);
2893 		break;
2894 	}
2895 	return (CMD_QUEUED);
2896 }
2897 
2898 static void
2899 isp_pci_dmateardown(ispsoftc_t *isp, XS_T *xs, uint32_t handle)
2900 {
2901 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2902 	bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)];
2903 	if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2904 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD);
2905 	} else {
2906 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE);
2907 	}
2908 	bus_dmamap_unload(pcs->dmat, *dp);
2909 }
2910 
2911 
2912 static void
2913 isp_pci_reset0(ispsoftc_t *isp)
2914 {
2915 	ISP_DISABLE_INTS(isp);
2916 }
2917 
2918 static void
2919 isp_pci_reset1(ispsoftc_t *isp)
2920 {
2921 	if (!IS_24XX(isp)) {
2922 		/* Make sure the BIOS is disabled */
2923 		isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
2924 	}
2925 	/* and enable interrupts */
2926 	ISP_ENABLE_INTS(isp);
2927 }
2928 
2929 static void
2930 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg)
2931 {
2932 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2933 	if (msg)
2934 		printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
2935 	else
2936 		printf("%s:\n", device_get_nameunit(isp->isp_dev));
2937 	if (IS_SCSI(isp))
2938 		printf("    biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
2939 	else
2940 		printf("    biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
2941 	printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
2942 	    ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
2943 	printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
2944 
2945 
2946 	if (IS_SCSI(isp)) {
2947 		ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
2948 		printf("    cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
2949 			ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
2950 			ISP_READ(isp, CDMA_FIFO_STS));
2951 		printf("    ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
2952 			ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
2953 			ISP_READ(isp, DDMA_FIFO_STS));
2954 		printf("    sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
2955 			ISP_READ(isp, SXP_INTERRUPT),
2956 			ISP_READ(isp, SXP_GROSS_ERR),
2957 			ISP_READ(isp, SXP_PINS_CTRL));
2958 		ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
2959 	}
2960 	printf("    mbox regs: %x %x %x %x %x\n",
2961 	    ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
2962 	    ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
2963 	    ISP_READ(isp, OUTMAILBOX4));
2964 	printf("    PCI Status Command/Status=%x\n",
2965 	    pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));
2966 }
2967