xref: /freebsd/sys/dev/isp/isp_pci.c (revision 3fe92528afe8313fecf48822dde74bad5e380f48)
1 /*-
2  *
3  * Copyright (c) 1997-2006 by Matthew Jacob
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice immediately at the beginning of the file, without modification,
11  *    this list of conditions, and the following disclaimer.
12  * 2. The name of the author may not be used to endorse or promote products
13  *    derived from this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28 /*
29  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
30  * FreeBSD Version.
31  */
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/module.h>
39 #if __FreeBSD_version >= 700000
40 #include <sys/linker.h>
41 #include <sys/firmware.h>
42 #endif
43 #include <sys/bus.h>
44 #if __FreeBSD_version < 500000
45 #include <pci/pcireg.h>
46 #include <pci/pcivar.h>
47 #include <machine/bus_memio.h>
48 #include <machine/bus_pio.h>
49 #else
50 #include <sys/stdint.h>
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
53 #endif
54 #include <machine/bus.h>
55 #include <machine/resource.h>
56 #include <sys/rman.h>
57 #include <sys/malloc.h>
58 
59 #include <dev/isp/isp_freebsd.h>
60 
61 #if __FreeBSD_version < 500000
62 #define	BUS_PROBE_DEFAULT	0
63 #endif
64 
65 static uint16_t isp_pci_rd_reg(ispsoftc_t *, int);
66 static void isp_pci_wr_reg(ispsoftc_t *, int, uint16_t);
67 static uint16_t isp_pci_rd_reg_1080(ispsoftc_t *, int);
68 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint16_t);
69 static int
70 isp_pci_rd_isr(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *);
71 static int
72 isp_pci_rd_isr_2300(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *);
73 static int isp_pci_mbxdma(ispsoftc_t *);
74 static int
75 isp_pci_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint16_t *, uint16_t);
76 static void
77 isp_pci_dmateardown(ispsoftc_t *, XS_T *, uint16_t);
78 
79 
80 static void isp_pci_reset1(ispsoftc_t *);
81 static void isp_pci_dumpregs(ispsoftc_t *, const char *);
82 
83 static struct ispmdvec mdvec = {
84 	isp_pci_rd_isr,
85 	isp_pci_rd_reg,
86 	isp_pci_wr_reg,
87 	isp_pci_mbxdma,
88 	isp_pci_dmasetup,
89 	isp_pci_dmateardown,
90 	NULL,
91 	isp_pci_reset1,
92 	isp_pci_dumpregs,
93 	NULL,
94 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
95 };
96 
97 static struct ispmdvec mdvec_1080 = {
98 	isp_pci_rd_isr,
99 	isp_pci_rd_reg_1080,
100 	isp_pci_wr_reg_1080,
101 	isp_pci_mbxdma,
102 	isp_pci_dmasetup,
103 	isp_pci_dmateardown,
104 	NULL,
105 	isp_pci_reset1,
106 	isp_pci_dumpregs,
107 	NULL,
108 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
109 };
110 
111 static struct ispmdvec mdvec_12160 = {
112 	isp_pci_rd_isr,
113 	isp_pci_rd_reg_1080,
114 	isp_pci_wr_reg_1080,
115 	isp_pci_mbxdma,
116 	isp_pci_dmasetup,
117 	isp_pci_dmateardown,
118 	NULL,
119 	isp_pci_reset1,
120 	isp_pci_dumpregs,
121 	NULL,
122 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
123 };
124 
125 static struct ispmdvec mdvec_2100 = {
126 	isp_pci_rd_isr,
127 	isp_pci_rd_reg,
128 	isp_pci_wr_reg,
129 	isp_pci_mbxdma,
130 	isp_pci_dmasetup,
131 	isp_pci_dmateardown,
132 	NULL,
133 	isp_pci_reset1,
134 	isp_pci_dumpregs
135 };
136 
137 static struct ispmdvec mdvec_2200 = {
138 	isp_pci_rd_isr,
139 	isp_pci_rd_reg,
140 	isp_pci_wr_reg,
141 	isp_pci_mbxdma,
142 	isp_pci_dmasetup,
143 	isp_pci_dmateardown,
144 	NULL,
145 	isp_pci_reset1,
146 	isp_pci_dumpregs
147 };
148 
149 static struct ispmdvec mdvec_2300 = {
150 	isp_pci_rd_isr_2300,
151 	isp_pci_rd_reg,
152 	isp_pci_wr_reg,
153 	isp_pci_mbxdma,
154 	isp_pci_dmasetup,
155 	isp_pci_dmateardown,
156 	NULL,
157 	isp_pci_reset1,
158 	isp_pci_dumpregs
159 };
160 
161 #ifndef	PCIM_CMD_INVEN
162 #define	PCIM_CMD_INVEN			0x10
163 #endif
164 #ifndef	PCIM_CMD_BUSMASTEREN
165 #define	PCIM_CMD_BUSMASTEREN		0x0004
166 #endif
167 #ifndef	PCIM_CMD_PERRESPEN
168 #define	PCIM_CMD_PERRESPEN		0x0040
169 #endif
170 #ifndef	PCIM_CMD_SEREN
171 #define	PCIM_CMD_SEREN			0x0100
172 #endif
173 #ifndef	PCIM_CMD_INTX_DISABLE
174 #define	PCIM_CMD_INTX_DISABLE		0x0400
175 #endif
176 
177 #ifndef	PCIR_COMMAND
178 #define	PCIR_COMMAND			0x04
179 #endif
180 
181 #ifndef	PCIR_CACHELNSZ
182 #define	PCIR_CACHELNSZ			0x0c
183 #endif
184 
185 #ifndef	PCIR_LATTIMER
186 #define	PCIR_LATTIMER			0x0d
187 #endif
188 
189 #ifndef	PCIR_ROMADDR
190 #define	PCIR_ROMADDR			0x30
191 #endif
192 
193 #ifndef	PCI_VENDOR_QLOGIC
194 #define	PCI_VENDOR_QLOGIC		0x1077
195 #endif
196 
197 #ifndef	PCI_PRODUCT_QLOGIC_ISP1020
198 #define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
199 #endif
200 
201 #ifndef	PCI_PRODUCT_QLOGIC_ISP1080
202 #define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
203 #endif
204 
205 #ifndef	PCI_PRODUCT_QLOGIC_ISP10160
206 #define	PCI_PRODUCT_QLOGIC_ISP10160	0x1016
207 #endif
208 
209 #ifndef	PCI_PRODUCT_QLOGIC_ISP12160
210 #define	PCI_PRODUCT_QLOGIC_ISP12160	0x1216
211 #endif
212 
213 #ifndef	PCI_PRODUCT_QLOGIC_ISP1240
214 #define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
215 #endif
216 
217 #ifndef	PCI_PRODUCT_QLOGIC_ISP1280
218 #define	PCI_PRODUCT_QLOGIC_ISP1280	0x1280
219 #endif
220 
221 #ifndef	PCI_PRODUCT_QLOGIC_ISP2100
222 #define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
223 #endif
224 
225 #ifndef	PCI_PRODUCT_QLOGIC_ISP2200
226 #define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
227 #endif
228 
229 #ifndef	PCI_PRODUCT_QLOGIC_ISP2300
230 #define	PCI_PRODUCT_QLOGIC_ISP2300	0x2300
231 #endif
232 
233 #ifndef	PCI_PRODUCT_QLOGIC_ISP2312
234 #define	PCI_PRODUCT_QLOGIC_ISP2312	0x2312
235 #endif
236 
237 #ifndef	PCI_PRODUCT_QLOGIC_ISP2322
238 #define	PCI_PRODUCT_QLOGIC_ISP2322	0x2322
239 #endif
240 
241 #ifndef	PCI_PRODUCT_QLOGIC_ISP2422
242 #define	PCI_PRODUCT_QLOGIC_ISP2422	0x2422
243 #endif
244 
245 #ifndef	PCI_PRODUCT_QLOGIC_ISP6312
246 #define	PCI_PRODUCT_QLOGIC_ISP6312	0x6312
247 #endif
248 
249 #ifndef	PCI_PRODUCT_QLOGIC_ISP6322
250 #define	PCI_PRODUCT_QLOGIC_ISP6322	0x6322
251 #endif
252 
253 
254 #define	PCI_QLOGIC_ISP1020	\
255 	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
256 
257 #define	PCI_QLOGIC_ISP1080	\
258 	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
259 
260 #define	PCI_QLOGIC_ISP10160	\
261 	((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
262 
263 #define	PCI_QLOGIC_ISP12160	\
264 	((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
265 
266 #define	PCI_QLOGIC_ISP1240	\
267 	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
268 
269 #define	PCI_QLOGIC_ISP1280	\
270 	((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
271 
272 #define	PCI_QLOGIC_ISP2100	\
273 	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
274 
275 #define	PCI_QLOGIC_ISP2200	\
276 	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
277 
278 #define	PCI_QLOGIC_ISP2300	\
279 	((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
280 
281 #define	PCI_QLOGIC_ISP2312	\
282 	((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
283 
284 #define	PCI_QLOGIC_ISP2322	\
285 	((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC)
286 
287 #define	PCI_QLOGIC_ISP2422	\
288 	((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC)
289 
290 #define	PCI_QLOGIC_ISP6312	\
291 	((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC)
292 
293 #define	PCI_QLOGIC_ISP6322	\
294 	((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC)
295 
296 /*
297  * Odd case for some AMI raid cards... We need to *not* attach to this.
298  */
299 #define	AMI_RAID_SUBVENDOR_ID	0x101e
300 
301 #define	IO_MAP_REG	0x10
302 #define	MEM_MAP_REG	0x14
303 
304 #define	PCI_DFLT_LTNCY	0x40
305 #define	PCI_DFLT_LNSZ	0x10
306 
307 static int isp_pci_probe (device_t);
308 static int isp_pci_attach (device_t);
309 
310 
311 struct isp_pcisoftc {
312 	ispsoftc_t			pci_isp;
313 	device_t			pci_dev;
314 	struct resource *		pci_reg;
315 	bus_space_tag_t			pci_st;
316 	bus_space_handle_t		pci_sh;
317 	void *				ih;
318 	int16_t				pci_poff[_NREG_BLKS];
319 	bus_dma_tag_t			dmat;
320 	bus_dmamap_t			*dmaps;
321 };
322 
323 static device_method_t isp_pci_methods[] = {
324 	/* Device interface */
325 	DEVMETHOD(device_probe,		isp_pci_probe),
326 	DEVMETHOD(device_attach,	isp_pci_attach),
327 	{ 0, 0 }
328 };
329 static void isp_pci_intr(void *);
330 
331 static driver_t isp_pci_driver = {
332 	"isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
333 };
334 static devclass_t isp_devclass;
335 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
336 #if __FreeBSD_version >= 700000
337 MODULE_DEPEND(isp, ispfw, 1, 1, 1);
338 MODULE_DEPEND(isp, firmware, 1, 1, 1);
339 #else
340 extern ispfwfunc *isp_get_firmware_p;
341 #endif
342 
343 static int
344 isp_pci_probe(device_t dev)
345 {
346         switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
347 	case PCI_QLOGIC_ISP1020:
348 		device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
349 		break;
350 	case PCI_QLOGIC_ISP1080:
351 		device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
352 		break;
353 	case PCI_QLOGIC_ISP1240:
354 		device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
355 		break;
356 	case PCI_QLOGIC_ISP1280:
357 		device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
358 		break;
359 	case PCI_QLOGIC_ISP10160:
360 		device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
361 		break;
362 	case PCI_QLOGIC_ISP12160:
363 		if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
364 			return (ENXIO);
365 		}
366 		device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
367 		break;
368 	case PCI_QLOGIC_ISP2100:
369 		device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
370 		break;
371 	case PCI_QLOGIC_ISP2200:
372 		device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
373 		break;
374 	case PCI_QLOGIC_ISP2300:
375 		device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
376 		break;
377 	case PCI_QLOGIC_ISP2312:
378 		device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
379 		break;
380 	case PCI_QLOGIC_ISP2322:
381 		device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter");
382 		break;
383 #if	0
384 	case PCI_QLOGIC_ISP2422:
385 		device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter");
386 		break;
387 #endif
388 	case PCI_QLOGIC_ISP6312:
389 		device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter");
390 		break;
391 	case PCI_QLOGIC_ISP6322:
392 		device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter");
393 		break;
394 	default:
395 		return (ENXIO);
396 	}
397 	if (isp_announced == 0 && bootverbose) {
398 		printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
399 		    "Core Version %d.%d\n",
400 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
401 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
402 		isp_announced++;
403 	}
404 	/*
405 	 * XXXX: Here is where we might load the f/w module
406 	 * XXXX: (or increase a reference count to it).
407 	 */
408 	return (BUS_PROBE_DEFAULT);
409 }
410 
411 #if __FreeBSD_version < 500000
412 static void
413 isp_get_options(device_t dev, ispsoftc_t *isp)
414 {
415 	uint64_t wwn;
416 	int bitmap, unit;
417 
418 	unit = device_get_unit(dev);
419 	if (getenv_int("isp_disable", &bitmap)) {
420 		if (bitmap & (1 << unit)) {
421 			isp->isp_osinfo.disabled = 1;
422 			return;
423 		}
424 	}
425 
426 	if (getenv_int("isp_no_fwload", &bitmap)) {
427 		if (bitmap & (1 << unit))
428 			isp->isp_confopts |= ISP_CFG_NORELOAD;
429 	}
430 	if (getenv_int("isp_fwload", &bitmap)) {
431 		if (bitmap & (1 << unit))
432 			isp->isp_confopts &= ~ISP_CFG_NORELOAD;
433 	}
434 	if (getenv_int("isp_no_nvram", &bitmap)) {
435 		if (bitmap & (1 << unit))
436 			isp->isp_confopts |= ISP_CFG_NONVRAM;
437 	}
438 	if (getenv_int("isp_nvram", &bitmap)) {
439 		if (bitmap & (1 << unit))
440 			isp->isp_confopts &= ~ISP_CFG_NONVRAM;
441 	}
442 	if (getenv_int("isp_fcduplex", &bitmap)) {
443 		if (bitmap & (1 << unit))
444 			isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
445 	}
446 	if (getenv_int("isp_no_fcduplex", &bitmap)) {
447 		if (bitmap & (1 << unit))
448 			isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
449 	}
450 	if (getenv_int("isp_nport", &bitmap)) {
451 		if (bitmap & (1 << unit))
452 			isp->isp_confopts |= ISP_CFG_NPORT;
453 	}
454 
455 	/*
456 	 * Because the resource_*_value functions can neither return
457 	 * 64 bit integer values, nor can they be directly coerced
458 	 * to interpret the right hand side of the assignment as
459 	 * you want them to interpret it, we have to force WWN
460 	 * hint replacement to specify WWN strings with a leading
461 	 * 'w' (e..g w50000000aaaa0001). Sigh.
462 	 */
463 	if (getenv_quad("isp_portwwn", &wwn)) {
464 		isp->isp_osinfo.default_port_wwn = wwn;
465 		isp->isp_confopts |= ISP_CFG_OWNWWPN;
466 	}
467 	if (isp->isp_osinfo.default_port_wwn == 0) {
468 		isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
469 	}
470 
471 	if (getenv_quad("isp_nodewwn", &wwn)) {
472 		isp->isp_osinfo.default_node_wwn = wwn;
473 		isp->isp_confopts |= ISP_CFG_OWNWWNN;
474 	}
475 	if (isp->isp_osinfo.default_node_wwn == 0) {
476 		isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
477 	}
478 
479 	bitmap = 0;
480 	(void) getenv_int("isp_debug", &bitmap);
481 	if (bitmap) {
482 		isp->isp_dblev = bitmap;
483 	} else {
484 		isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
485 	}
486 	if (bootverbose) {
487 		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
488 	}
489 
490 #ifdef	ISP_FW_CRASH_DUMP
491 	bitmap = 0;
492 	if (getenv_int("isp_fw_dump_enable", &bitmap)) {
493 		if (bitmap & (1 << unit) {
494 			size_t amt = 0;
495 			if (IS_2200(isp)) {
496 				amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
497 			} else if (IS_23XX(isp)) {
498 				amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
499 			}
500 			if (amt) {
501 				FCPARAM(isp)->isp_dump_data =
502 				    malloc(amt, M_DEVBUF, M_WAITOK);
503 				memset(FCPARAM(isp)->isp_dump_data, 0, amt);
504 			} else {
505 				device_printf(dev,
506 				    "f/w crash dumps not supported for card\n");
507 			}
508 		}
509 	}
510 #endif
511 	bitmap = 0;
512 	if (getenv_int("role", &bitmap)) {
513 		isp->isp_role = bitmap;
514 	} else {
515 		isp->isp_role = ISP_DEFAULT_ROLES;
516 	}
517 }
518 
519 static void
520 isp_get_pci_options(device_t dev, int *m1, int *m2)
521 {
522 	int bitmap;
523 	int unit = device_get_unit(dev);
524 
525 	*m1 = PCIM_CMD_MEMEN;
526 	*m2 = PCIM_CMD_PORTEN;
527 	if (getenv_int("isp_mem_map", &bitmap)) {
528 		if (bitmap & (1 << unit)) {
529 			*m1 = PCIM_CMD_MEMEN;
530 			*m2 = PCIM_CMD_PORTEN;
531 		}
532 	}
533 	bitmap = 0;
534 	if (getenv_int("isp_io_map", &bitmap)) {
535 		if (bitmap & (1 << unit)) {
536 			*m1 = PCIM_CMD_PORTEN;
537 			*m2 = PCIM_CMD_MEMEN;
538 		}
539 	}
540 }
541 #else
542 static void
543 isp_get_options(device_t dev, ispsoftc_t *isp)
544 {
545 	int tval;
546 	const char *sptr;
547 	/*
548 	 * Figure out if we're supposed to skip this one.
549 	 */
550 
551 	tval = 0;
552 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
553 	    "disable", &tval) == 0 && tval) {
554 		device_printf(dev, "disabled at user request\n");
555 		isp->isp_osinfo.disabled = 1;
556 		return;
557 	}
558 
559 	tval = -1;
560 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
561 	    "role", &tval) == 0 && tval != -1) {
562 		tval &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET);
563 		isp->isp_role = tval;
564 		device_printf(dev, "setting role to 0x%x\n", isp->isp_role);
565 	} else {
566 #ifdef	ISP_TARGET_MODE
567 		isp->isp_role = ISP_ROLE_TARGET;
568 #else
569 		isp->isp_role = ISP_DEFAULT_ROLES;
570 #endif
571 	}
572 
573 	tval = 0;
574         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
575             "fwload_disable", &tval) == 0 && tval != 0) {
576 		isp->isp_confopts |= ISP_CFG_NORELOAD;
577 	}
578 	tval = 0;
579         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
580             "ignore_nvram", &tval) == 0 && tval != 0) {
581 		isp->isp_confopts |= ISP_CFG_NONVRAM;
582 	}
583 	tval = 0;
584         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
585             "fullduplex", &tval) == 0 && tval != 0) {
586 		isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
587 	}
588 #ifdef	ISP_FW_CRASH_DUMP
589 	tval = 0;
590         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
591             "fw_dump_enable", &tval) == 0 && tval != 0) {
592 		size_t amt = 0;
593 		if (IS_2200(isp)) {
594 			amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
595 		} else if (IS_23XX(isp)) {
596 			amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
597 		}
598 		if (amt) {
599 			FCPARAM(isp)->isp_dump_data =
600 			    malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO);
601 		} else {
602 			device_printf(dev,
603 			    "f/w crash dumps not supported for this model\n");
604 		}
605 	}
606 #endif
607 
608 	sptr = 0;
609         if (resource_string_value(device_get_name(dev), device_get_unit(dev),
610             "topology", (const char **) &sptr) == 0 && sptr != 0) {
611 		if (strcmp(sptr, "lport") == 0) {
612 			isp->isp_confopts |= ISP_CFG_LPORT;
613 		} else if (strcmp(sptr, "nport") == 0) {
614 			isp->isp_confopts |= ISP_CFG_NPORT;
615 		} else if (strcmp(sptr, "lport-only") == 0) {
616 			isp->isp_confopts |= ISP_CFG_LPORT_ONLY;
617 		} else if (strcmp(sptr, "nport-only") == 0) {
618 			isp->isp_confopts |= ISP_CFG_NPORT_ONLY;
619 		}
620 	}
621 
622 	/*
623 	 * Because the resource_*_value functions can neither return
624 	 * 64 bit integer values, nor can they be directly coerced
625 	 * to interpret the right hand side of the assignment as
626 	 * you want them to interpret it, we have to force WWN
627 	 * hint replacement to specify WWN strings with a leading
628 	 * 'w' (e..g w50000000aaaa0001). Sigh.
629 	 */
630 	sptr = 0;
631 	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
632             "portwwn", (const char **) &sptr);
633 	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
634 		char *eptr = 0;
635 		isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16);
636 		if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) {
637 			device_printf(dev, "mangled portwwn hint '%s'\n", sptr);
638 			isp->isp_osinfo.default_port_wwn = 0;
639 		} else {
640 			isp->isp_confopts |= ISP_CFG_OWNWWPN;
641 		}
642 	}
643 	if (isp->isp_osinfo.default_port_wwn == 0) {
644 		isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
645 	}
646 
647 	sptr = 0;
648 	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
649             "nodewwn", (const char **) &sptr);
650 	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
651 		char *eptr = 0;
652 		isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16);
653 		if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) {
654 			device_printf(dev, "mangled nodewwn hint '%s'\n", sptr);
655 			isp->isp_osinfo.default_node_wwn = 0;
656 		} else {
657 			isp->isp_confopts |= ISP_CFG_OWNWWNN;
658 		}
659 	}
660 	if (isp->isp_osinfo.default_node_wwn == 0) {
661 		isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
662 	}
663 
664 	isp->isp_osinfo.default_id = -1;
665 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
666             "iid", &tval) == 0) {
667 		isp->isp_osinfo.default_id = tval;
668 		isp->isp_confopts |= ISP_CFG_OWNLOOPID;
669 	}
670 	if (isp->isp_osinfo.default_id == -1) {
671 		if (IS_FC(isp)) {
672 			isp->isp_osinfo.default_id = 109;
673 		} else {
674 			isp->isp_osinfo.default_id = 7;
675 		}
676 	}
677 
678 	/*
679 	 * Set up logging levels.
680 	 */
681 	tval = 0;
682         (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
683             "debug", &tval);
684 	if (tval) {
685 		isp->isp_dblev = tval;
686 	} else {
687 		isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
688 	}
689 	if (bootverbose) {
690 		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
691 	}
692 
693 }
694 
695 static void
696 isp_get_pci_options(device_t dev, int *m1, int *m2)
697 {
698 	int tval;
699 	/*
700 	 * Which we should try first - memory mapping or i/o mapping?
701 	 *
702 	 * We used to try memory first followed by i/o on alpha, otherwise
703 	 * the reverse, but we should just try memory first all the time now.
704 	 */
705 	*m1 = PCIM_CMD_MEMEN;
706 	*m2 = PCIM_CMD_PORTEN;
707 
708 	tval = 0;
709         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
710             "prefer_iomap", &tval) == 0 && tval != 0) {
711 		*m1 = PCIM_CMD_PORTEN;
712 		*m2 = PCIM_CMD_MEMEN;
713 	}
714 	tval = 0;
715         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
716             "prefer_memmap", &tval) == 0 && tval != 0) {
717 		*m1 = PCIM_CMD_MEMEN;
718 		*m2 = PCIM_CMD_PORTEN;
719 	}
720 }
721 #endif
722 
723 static int
724 isp_pci_attach(device_t dev)
725 {
726 	struct resource *regs, *irq;
727 	int rtp, rgd, iqd, m1, m2;
728 	uint32_t data, cmd, linesz, psize, basetype;
729 	struct isp_pcisoftc *pcs;
730 	ispsoftc_t *isp = NULL;
731 	struct ispmdvec *mdvp;
732 #if __FreeBSD_version >= 500000
733 	int locksetup = 0;
734 #endif
735 
736 	pcs = device_get_softc(dev);
737 	if (pcs == NULL) {
738 		device_printf(dev, "cannot get softc\n");
739 		return (ENOMEM);
740 	}
741 	memset(pcs, 0, sizeof (*pcs));
742 	pcs->pci_dev = dev;
743 	isp = &pcs->pci_isp;
744 
745 	/*
746 	 * Get Generic Options
747 	 */
748 	isp_get_options(dev, isp);
749 
750 	/*
751 	 * Check to see if options have us disabled
752 	 */
753 	if (isp->isp_osinfo.disabled) {
754 		/*
755 		 * But return zero to preserve unit numbering
756 		 */
757 		return (0);
758 	}
759 
760 	/*
761 	 * Get PCI options- which in this case are just mapping preferences.
762 	 */
763 	isp_get_pci_options(dev, &m1, &m2);
764 
765 
766 	linesz = PCI_DFLT_LNSZ;
767 	irq = regs = NULL;
768 	rgd = rtp = iqd = 0;
769 
770 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
771 	if (cmd & m1) {
772 		rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
773 		rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
774 		regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
775 	}
776 	if (regs == NULL && (cmd & m2)) {
777 		rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
778 		rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
779 		regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
780 	}
781 	if (regs == NULL) {
782 		device_printf(dev, "unable to map any ports\n");
783 		goto bad;
784 	}
785 	if (bootverbose) {
786 		device_printf(dev, "using %s space register mapping\n",
787 		    (rgd == IO_MAP_REG)? "I/O" : "Memory");
788 	}
789 	pcs->pci_dev = dev;
790 	pcs->pci_reg = regs;
791 	pcs->pci_st = rman_get_bustag(regs);
792 	pcs->pci_sh = rman_get_bushandle(regs);
793 
794 	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
795 	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
796 	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
797 	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
798 	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
799 	mdvp = &mdvec;
800 	basetype = ISP_HA_SCSI_UNKNOWN;
801 	psize = sizeof (sdparam);
802 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
803 		mdvp = &mdvec;
804 		basetype = ISP_HA_SCSI_UNKNOWN;
805 		psize = sizeof (sdparam);
806 	}
807 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
808 		mdvp = &mdvec_1080;
809 		basetype = ISP_HA_SCSI_1080;
810 		psize = sizeof (sdparam);
811 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
812 		    ISP1080_DMA_REGS_OFF;
813 	}
814 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
815 		mdvp = &mdvec_1080;
816 		basetype = ISP_HA_SCSI_1240;
817 		psize = 2 * sizeof (sdparam);
818 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
819 		    ISP1080_DMA_REGS_OFF;
820 	}
821 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
822 		mdvp = &mdvec_1080;
823 		basetype = ISP_HA_SCSI_1280;
824 		psize = 2 * sizeof (sdparam);
825 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
826 		    ISP1080_DMA_REGS_OFF;
827 	}
828 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) {
829 		mdvp = &mdvec_12160;
830 		basetype = ISP_HA_SCSI_10160;
831 		psize = sizeof (sdparam);
832 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
833 		    ISP1080_DMA_REGS_OFF;
834 	}
835 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
836 		mdvp = &mdvec_12160;
837 		basetype = ISP_HA_SCSI_12160;
838 		psize = 2 * sizeof (sdparam);
839 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
840 		    ISP1080_DMA_REGS_OFF;
841 	}
842 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
843 		mdvp = &mdvec_2100;
844 		basetype = ISP_HA_FC_2100;
845 		psize = sizeof (fcparam);
846 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
847 		    PCI_MBOX_REGS2100_OFF;
848 		if (pci_get_revid(dev) < 3) {
849 			/*
850 			 * XXX: Need to get the actual revision
851 			 * XXX: number of the 2100 FB. At any rate,
852 			 * XXX: lower cache line size for early revision
853 			 * XXX; boards.
854 			 */
855 			linesz = 1;
856 		}
857 	}
858 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
859 		mdvp = &mdvec_2200;
860 		basetype = ISP_HA_FC_2200;
861 		psize = sizeof (fcparam);
862 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
863 		    PCI_MBOX_REGS2100_OFF;
864 	}
865 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) {
866 		mdvp = &mdvec_2300;
867 		basetype = ISP_HA_FC_2300;
868 		psize = sizeof (fcparam);
869 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
870 		    PCI_MBOX_REGS2300_OFF;
871 	}
872 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 ||
873 	    pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
874 		mdvp = &mdvec_2300;
875 		basetype = ISP_HA_FC_2312;
876 		psize = sizeof (fcparam);
877 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
878 		    PCI_MBOX_REGS2300_OFF;
879 	}
880 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322 ||
881 	    pci_get_devid(dev) == PCI_QLOGIC_ISP6322) {
882 		mdvp = &mdvec_2300;
883 		basetype = ISP_HA_FC_2322;
884 		psize = sizeof (fcparam);
885 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
886 		    PCI_MBOX_REGS2300_OFF;
887 	}
888 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422) {
889 		mdvp = &mdvec_2300;
890 		basetype = ISP_HA_FC_2422;
891 		psize = sizeof (fcparam);
892 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
893 		    PCI_MBOX_REGS2300_OFF;
894 	}
895 	isp = &pcs->pci_isp;
896 	isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO);
897 	if (isp->isp_param == NULL) {
898 		device_printf(dev, "cannot allocate parameter data\n");
899 		goto bad;
900 	}
901 	isp->isp_mdvec = mdvp;
902 	isp->isp_type = basetype;
903 	isp->isp_revision = pci_get_revid(dev);
904 	isp->isp_dev = dev;
905 
906 #if __FreeBSD_version >= 700000
907 	/*
908 	 * Try and find firmware for this device.
909 	 */
910 	{
911 		char fwname[32];
912 		unsigned int did = pci_get_device(dev);
913 
914 		/*
915 		 * Map a few pci ids to fw names
916 		 */
917 		switch (did) {
918 		case PCI_PRODUCT_QLOGIC_ISP1020:
919 			did = 0x1040;
920 			break;
921 		case PCI_PRODUCT_QLOGIC_ISP1240:
922 			did = 0x1080;
923 			break;
924 		case PCI_PRODUCT_QLOGIC_ISP10160:
925 		case PCI_PRODUCT_QLOGIC_ISP12160:
926 			did = 0x12160;
927 			break;
928 		case PCI_PRODUCT_QLOGIC_ISP6312:
929 		case PCI_PRODUCT_QLOGIC_ISP2312:
930 			did = 0x2300;
931 			break;
932 		case PCI_PRODUCT_QLOGIC_ISP6322:
933 			did = 0x2322;
934 			break;
935 		default:
936 			break;
937 		}
938 
939 		isp->isp_osinfo.fw = NULL;
940 		if (isp->isp_role & ISP_ROLE_TARGET) {
941 			snprintf(fwname, sizeof (fwname), "isp_%04x_it", did);
942 			isp->isp_osinfo.fw = firmware_get(fwname);
943 		}
944 		if (isp->isp_osinfo.fw == NULL) {
945 			snprintf(fwname, sizeof (fwname), "isp_%04x", did);
946 			isp->isp_osinfo.fw = firmware_get(fwname);
947 		}
948 		if (isp->isp_osinfo.fw != NULL) {
949 			union {
950 				const void *fred;
951 				uint16_t *bob;
952 			} u;
953 			u.fred = isp->isp_osinfo.fw->data;
954 			isp->isp_mdvec->dv_ispfw = u.bob;
955 		}
956 	}
957 #else
958 	if (isp_get_firmware_p) {
959 		int device = (int) pci_get_device(dev);
960 #ifdef	ISP_TARGET_MODE
961 		(*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
962 #else
963 		(*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
964 #endif
965 	}
966 #endif
967 
968 	/*
969 	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
970 	 * are set.
971 	 */
972 	cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
973 		PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
974 
975 	if (IS_2300(isp)) {	/* per QLogic errata */
976 		cmd &= ~PCIM_CMD_INVEN;
977 	}
978 
979 	if (IS_23XX(isp)) {
980 		/*
981 		 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command.
982 		 */
983 		isp->isp_touched = 1;
984 
985 	}
986 
987 	if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
988 		cmd &= ~PCIM_CMD_INTX_DISABLE;
989 	}
990 
991 	pci_write_config(dev, PCIR_COMMAND, cmd, 2);
992 
993 	/*
994 	 * Make sure the Cache Line Size register is set sensibly.
995 	 */
996 	data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
997 	if (data != linesz) {
998 		data = PCI_DFLT_LNSZ;
999 		isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
1000 		pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
1001 	}
1002 
1003 	/*
1004 	 * Make sure the Latency Timer is sane.
1005 	 */
1006 	data = pci_read_config(dev, PCIR_LATTIMER, 1);
1007 	if (data < PCI_DFLT_LTNCY) {
1008 		data = PCI_DFLT_LTNCY;
1009 		isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
1010 		pci_write_config(dev, PCIR_LATTIMER, data, 1);
1011 	}
1012 
1013 	/*
1014 	 * Make sure we've disabled the ROM.
1015 	 */
1016 	data = pci_read_config(dev, PCIR_ROMADDR, 4);
1017 	data &= ~1;
1018 	pci_write_config(dev, PCIR_ROMADDR, data, 4);
1019 
1020 	iqd = 0;
1021 	irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd,
1022 	    RF_ACTIVE | RF_SHAREABLE);
1023 	if (irq == NULL) {
1024 		device_printf(dev, "could not allocate interrupt\n");
1025 		goto bad;
1026 	}
1027 
1028 #if __FreeBSD_version >= 500000
1029 	/* Make sure the lock is set up. */
1030 	mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF);
1031 	locksetup++;
1032 #endif
1033 
1034 	if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) {
1035 		device_printf(dev, "could not setup interrupt\n");
1036 		goto bad;
1037 	}
1038 
1039 	/*
1040 	 * Last minute checks...
1041 	 */
1042 	if (IS_23XX(isp)) {
1043 		isp->isp_port = pci_get_function(dev);
1044 	}
1045 
1046 	/*
1047 	 * Make sure we're in reset state.
1048 	 */
1049 	ISP_LOCK(isp);
1050 	isp_reset(isp);
1051 	if (isp->isp_state != ISP_RESETSTATE) {
1052 		ISP_UNLOCK(isp);
1053 		goto bad;
1054 	}
1055 	isp_init(isp);
1056 	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) {
1057 		isp_uninit(isp);
1058 		ISP_UNLOCK(isp);
1059 		goto bad;
1060 	}
1061 	isp_attach(isp);
1062 	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) {
1063 		isp_uninit(isp);
1064 		ISP_UNLOCK(isp);
1065 		goto bad;
1066 	}
1067 	/*
1068 	 * XXXX: Here is where we might unload the f/w module
1069 	 * XXXX: (or decrease the reference count to it).
1070 	 */
1071 	ISP_UNLOCK(isp);
1072 	return (0);
1073 
1074 bad:
1075 
1076 	if (pcs && pcs->ih) {
1077 		(void) bus_teardown_intr(dev, irq, pcs->ih);
1078 	}
1079 
1080 #if __FreeBSD_version >= 500000
1081 	if (locksetup && isp) {
1082 		mtx_destroy(&isp->isp_osinfo.lock);
1083 	}
1084 #endif
1085 
1086 	if (irq) {
1087 		(void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
1088 	}
1089 
1090 
1091 	if (regs) {
1092 		(void) bus_release_resource(dev, rtp, rgd, regs);
1093 	}
1094 
1095 	if (pcs) {
1096 		if (pcs->pci_isp.isp_param) {
1097 #ifdef	ISP_FW_CRASH_DUMP
1098 			if (IS_FC(isp) && FCPARAM(isp)->isp_dump_data) {
1099 				free(FCPARAM(isp)->isp_dump_data, M_DEVBUF);
1100 			}
1101 #endif
1102 			free(pcs->pci_isp.isp_param, M_DEVBUF);
1103 		}
1104 	}
1105 
1106 	/*
1107 	 * XXXX: Here is where we might unload the f/w module
1108 	 * XXXX: (or decrease the reference count to it).
1109 	 */
1110 	return (ENXIO);
1111 }
1112 
1113 static void
1114 isp_pci_intr(void *arg)
1115 {
1116 	ispsoftc_t *isp = arg;
1117 	uint16_t isr, sema, mbox;
1118 
1119 	ISP_LOCK(isp);
1120 	isp->isp_intcnt++;
1121 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
1122 		isp->isp_intbogus++;
1123 	} else {
1124 		int iok = isp->isp_osinfo.intsok;
1125 		isp->isp_osinfo.intsok = 0;
1126 		isp_intr(isp, isr, sema, mbox);
1127 		isp->isp_osinfo.intsok = iok;
1128 	}
1129 	ISP_UNLOCK(isp);
1130 }
1131 
1132 
1133 #define	IspVirt2Off(a, x)	\
1134 	(((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
1135 	_BLK_REG_SHFT] + ((x) & 0xfff))
1136 
1137 #define	BXR2(pcs, off)		\
1138 	bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
1139 #define	BXW2(pcs, off, v)	\
1140 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
1141 
1142 
1143 static __inline int
1144 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp)
1145 {
1146 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1147 	uint16_t val0, val1;
1148 	int i = 0;
1149 
1150 	do {
1151 		val0 = BXR2(pcs, IspVirt2Off(isp, off));
1152 		val1 = BXR2(pcs, IspVirt2Off(isp, off));
1153 	} while (val0 != val1 && ++i < 1000);
1154 	if (val0 != val1) {
1155 		return (1);
1156 	}
1157 	*rp = val0;
1158 	return (0);
1159 }
1160 
1161 static int
1162 isp_pci_rd_isr(ispsoftc_t *isp, uint16_t *isrp,
1163     uint16_t *semap, uint16_t *mbp)
1164 {
1165 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1166 	uint16_t isr, sema;
1167 
1168 	if (IS_2100(isp)) {
1169 		if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
1170 		    return (0);
1171 		}
1172 		if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
1173 		    return (0);
1174 		}
1175 	} else {
1176 		isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
1177 		sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
1178 	}
1179 	isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
1180 	isr &= INT_PENDING_MASK(isp);
1181 	sema &= BIU_SEMA_LOCK;
1182 	if (isr == 0 && sema == 0) {
1183 		return (0);
1184 	}
1185 	*isrp = isr;
1186 	if ((*semap = sema) != 0) {
1187 		if (IS_2100(isp)) {
1188 			if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
1189 				return (0);
1190 			}
1191 		} else {
1192 			*mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
1193 		}
1194 	}
1195 	return (1);
1196 }
1197 
1198 static int
1199 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint16_t *isrp,
1200     uint16_t *semap, uint16_t *mbox0p)
1201 {
1202 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1203 	uint16_t hccr;
1204 	uint32_t r2hisr;
1205 
1206 	if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) {
1207 		*isrp = 0;
1208 		return (0);
1209 	}
1210 	r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh,
1211 	    IspVirt2Off(pcs, BIU_R2HSTSLO));
1212 	isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1213 	if ((r2hisr & BIU_R2HST_INTR) == 0) {
1214 		*isrp = 0;
1215 		return (0);
1216 	}
1217 	switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
1218 	case ISPR2HST_ROM_MBX_OK:
1219 	case ISPR2HST_ROM_MBX_FAIL:
1220 	case ISPR2HST_MBX_OK:
1221 	case ISPR2HST_MBX_FAIL:
1222 	case ISPR2HST_ASYNC_EVENT:
1223 		*isrp = r2hisr & 0xffff;
1224 		*mbox0p = (r2hisr >> 16);
1225 		*semap = 1;
1226 		return (1);
1227 	case ISPR2HST_RIO_16:
1228 		*isrp = r2hisr & 0xffff;
1229 		*mbox0p = ASYNC_RIO1;
1230 		*semap = 1;
1231 		return (1);
1232 	case ISPR2HST_FPOST:
1233 		*isrp = r2hisr & 0xffff;
1234 		*mbox0p = ASYNC_CMD_CMPLT;
1235 		*semap = 1;
1236 		return (1);
1237 	case ISPR2HST_FPOST_CTIO:
1238 		*isrp = r2hisr & 0xffff;
1239 		*mbox0p = ASYNC_CTIO_DONE;
1240 		*semap = 1;
1241 		return (1);
1242 	case ISPR2HST_RSPQ_UPDATE:
1243 		*isrp = r2hisr & 0xffff;
1244 		*mbox0p = 0;
1245 		*semap = 0;
1246 		return (1);
1247 	default:
1248 		hccr = ISP_READ(isp, HCCR);
1249 		if (hccr & HCCR_PAUSE) {
1250 			ISP_WRITE(isp, HCCR, HCCR_RESET);
1251 			isp_prt(isp, ISP_LOGERR,
1252 			    "RISC paused at interrupt (%x->%x\n", hccr,
1253 			    ISP_READ(isp, HCCR));
1254 		} else {
1255 			isp_prt(isp, ISP_LOGERR, "unknown interrerupt 0x%x\n",
1256 			    r2hisr);
1257 		}
1258 		return (0);
1259 	}
1260 }
1261 
1262 static uint16_t
1263 isp_pci_rd_reg(ispsoftc_t *isp, int regoff)
1264 {
1265 	uint16_t rv;
1266 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1267 	int oldconf = 0;
1268 
1269 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1270 		/*
1271 		 * We will assume that someone has paused the RISC processor.
1272 		 */
1273 		oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1274 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1275 		    oldconf | BIU_PCI_CONF1_SXP);
1276 	}
1277 	rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1278 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1279 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1280 	}
1281 	return (rv);
1282 }
1283 
1284 static void
1285 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint16_t val)
1286 {
1287 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1288 	int oldconf = 0;
1289 
1290 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1291 		/*
1292 		 * We will assume that someone has paused the RISC processor.
1293 		 */
1294 		oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1295 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1296 		    oldconf | BIU_PCI_CONF1_SXP);
1297 	}
1298 	BXW2(pcs, IspVirt2Off(isp, regoff), val);
1299 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1300 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1301 	}
1302 }
1303 
1304 static uint16_t
1305 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff)
1306 {
1307 	uint16_t rv, oc = 0;
1308 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1309 
1310 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1311 	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1312 		uint16_t tc;
1313 		/*
1314 		 * We will assume that someone has paused the RISC processor.
1315 		 */
1316 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1317 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
1318 		if (regoff & SXP_BANK1_SELECT)
1319 			tc |= BIU_PCI1080_CONF1_SXP1;
1320 		else
1321 			tc |= BIU_PCI1080_CONF1_SXP0;
1322 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1323 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1324 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1325 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1326 		    oc | BIU_PCI1080_CONF1_DMA);
1327 	}
1328 	rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1329 	if (oc) {
1330 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1331 	}
1332 	return (rv);
1333 }
1334 
1335 static void
1336 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint16_t val)
1337 {
1338 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1339 	int oc = 0;
1340 
1341 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1342 	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1343 		uint16_t tc;
1344 		/*
1345 		 * We will assume that someone has paused the RISC processor.
1346 		 */
1347 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1348 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
1349 		if (regoff & SXP_BANK1_SELECT)
1350 			tc |= BIU_PCI1080_CONF1_SXP1;
1351 		else
1352 			tc |= BIU_PCI1080_CONF1_SXP0;
1353 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1354 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1355 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1356 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1357 		    oc | BIU_PCI1080_CONF1_DMA);
1358 	}
1359 	BXW2(pcs, IspVirt2Off(isp, regoff), val);
1360 	if (oc) {
1361 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1362 	}
1363 }
1364 
1365 
1366 struct imush {
1367 	ispsoftc_t *isp;
1368 	int error;
1369 };
1370 
1371 static void imc(void *, bus_dma_segment_t *, int, int);
1372 
1373 static void
1374 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1375 {
1376 	struct imush *imushp = (struct imush *) arg;
1377 	if (error) {
1378 		imushp->error = error;
1379 	} else {
1380 		ispsoftc_t *isp =imushp->isp;
1381 		bus_addr_t addr = segs->ds_addr;
1382 
1383 		isp->isp_rquest_dma = addr;
1384 		addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1385 		isp->isp_result_dma = addr;
1386 		if (IS_FC(isp)) {
1387 			addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1388 			FCPARAM(isp)->isp_scdma = addr;
1389 		}
1390 	}
1391 }
1392 
1393 /*
1394  * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE
1395  */
1396 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1)
1397 
1398 #if __FreeBSD_version < 500000
1399 #define	BUS_DMA_ROOTARG	NULL
1400 #define	isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z)	\
1401 	bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z)
1402 #elif	__FreeBSD_version < 700020
1403 #define	BUS_DMA_ROOTARG	NULL
1404 #define	isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z)	\
1405 	bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, \
1406 	busdma_lock_mutex, &Giant, z)
1407 #else
1408 #define	BUS_DMA_ROOTARG	bus_get_dma_tag(pcs->pci_dev)
1409 #define	isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z)	\
1410 	bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, \
1411 	busdma_lock_mutex, &Giant, z)
1412 #endif
1413 
1414 static int
1415 isp_pci_mbxdma(ispsoftc_t *isp)
1416 {
1417 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1418 	caddr_t base;
1419 	uint32_t len;
1420 	int i, error, ns;
1421 	bus_size_t slim;	/* segment size */
1422 	bus_addr_t llim;	/* low limit of unavailable dma */
1423 	bus_addr_t hlim;	/* high limit of unavailable dma */
1424 	struct imush im;
1425 
1426 	/*
1427 	 * Already been here? If so, leave...
1428 	 */
1429 	if (isp->isp_rquest) {
1430 		return (0);
1431 	}
1432 
1433 	hlim = BUS_SPACE_MAXADDR;
1434 	if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1435 		slim = (bus_size_t) (1ULL << 32);
1436 		llim = BUS_SPACE_MAXADDR;
1437 	} else {
1438 		llim = BUS_SPACE_MAXADDR_32BIT;
1439 		slim = (1 << 24);
1440 	}
1441 
1442 	/*
1443 	 * XXX: We don't really support 64 bit target mode for parallel scsi yet
1444 	 */
1445 #ifdef	ISP_TARGET_MODE
1446 	if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) {
1447 		isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet");
1448 		return (1);
1449 	}
1450 #endif
1451 
1452 	ISP_UNLOCK(isp);
1453 	if (isp_dma_tag_create(BUS_DMA_ROOTARG, 1, slim, llim,
1454 	    hlim, NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0,
1455 	    &pcs->dmat)) {
1456 		isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1457 		ISP_LOCK(isp);
1458 		return (1);
1459 	}
1460 
1461 
1462 	len = sizeof (XS_T **) * isp->isp_maxcmds;
1463 	isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1464 	if (isp->isp_xflist == NULL) {
1465 		isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
1466 		ISP_LOCK(isp);
1467 		return (1);
1468 	}
1469 #ifdef	ISP_TARGET_MODE
1470 	len = sizeof (void **) * isp->isp_maxcmds;
1471 	isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1472 	if (isp->isp_tgtlist == NULL) {
1473 		isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array");
1474 		ISP_LOCK(isp);
1475 		return (1);
1476 	}
1477 #endif
1478 	len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
1479 	pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF,  M_WAITOK);
1480 	if (pcs->dmaps == NULL) {
1481 		isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage");
1482 		free(isp->isp_xflist, M_DEVBUF);
1483 #ifdef	ISP_TARGET_MODE
1484 		free(isp->isp_tgtlist, M_DEVBUF);
1485 #endif
1486 		ISP_LOCK(isp);
1487 		return (1);
1488 	}
1489 
1490 	/*
1491 	 * Allocate and map the request, result queues, plus FC scratch area.
1492 	 */
1493 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1494 	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1495 	if (IS_FC(isp)) {
1496 		len += ISP2100_SCRLEN;
1497 	}
1498 
1499 	ns = (len / PAGE_SIZE) + 1;
1500 	/*
1501 	 * Create a tag for the control spaces- force it to within 32 bits.
1502 	 */
1503 	if (isp_dma_tag_create(pcs->dmat, QENTRY_LEN, slim,
1504 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1505 	    NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) {
1506 		isp_prt(isp, ISP_LOGERR,
1507 		    "cannot create a dma tag for control spaces");
1508 		free(pcs->dmaps, M_DEVBUF);
1509 		free(isp->isp_xflist, M_DEVBUF);
1510 #ifdef	ISP_TARGET_MODE
1511 		free(isp->isp_tgtlist, M_DEVBUF);
1512 #endif
1513 		ISP_LOCK(isp);
1514 		return (1);
1515 	}
1516 
1517 	if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT,
1518 	    &isp->isp_cdmap) != 0) {
1519 		isp_prt(isp, ISP_LOGERR,
1520 		    "cannot allocate %d bytes of CCB memory", len);
1521 		bus_dma_tag_destroy(isp->isp_cdmat);
1522 		free(isp->isp_xflist, M_DEVBUF);
1523 #ifdef	ISP_TARGET_MODE
1524 		free(isp->isp_tgtlist, M_DEVBUF);
1525 #endif
1526 		free(pcs->dmaps, M_DEVBUF);
1527 		ISP_LOCK(isp);
1528 		return (1);
1529 	}
1530 
1531 	for (i = 0; i < isp->isp_maxcmds; i++) {
1532 		error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]);
1533 		if (error) {
1534 			isp_prt(isp, ISP_LOGERR,
1535 			    "error %d creating per-cmd DMA maps", error);
1536 			while (--i >= 0) {
1537 				bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]);
1538 			}
1539 			goto bad;
1540 		}
1541 	}
1542 
1543 	im.isp = isp;
1544 	im.error = 0;
1545 	bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0);
1546 	if (im.error) {
1547 		isp_prt(isp, ISP_LOGERR,
1548 		    "error %d loading dma map for control areas", im.error);
1549 		goto bad;
1550 	}
1551 
1552 	isp->isp_rquest = base;
1553 	base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1554 	isp->isp_result = base;
1555 	if (IS_FC(isp)) {
1556 		base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1557 		FCPARAM(isp)->isp_scratch = base;
1558 	}
1559 	ISP_LOCK(isp);
1560 	return (0);
1561 
1562 bad:
1563 	bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap);
1564 	bus_dma_tag_destroy(isp->isp_cdmat);
1565 	free(isp->isp_xflist, M_DEVBUF);
1566 #ifdef	ISP_TARGET_MODE
1567 	free(isp->isp_tgtlist, M_DEVBUF);
1568 #endif
1569 	free(pcs->dmaps, M_DEVBUF);
1570 	ISP_LOCK(isp);
1571 	isp->isp_rquest = NULL;
1572 	return (1);
1573 }
1574 
1575 typedef struct {
1576 	ispsoftc_t *isp;
1577 	void *cmd_token;
1578 	void *rq;
1579 	uint16_t *nxtip;
1580 	uint16_t optr;
1581 	int error;
1582 } mush_t;
1583 
1584 #define	MUSHERR_NOQENTRIES	-2
1585 
1586 #ifdef	ISP_TARGET_MODE
1587 /*
1588  * We need to handle DMA for target mode differently from initiator mode.
1589  *
1590  * DMA mapping and construction and submission of CTIO Request Entries
1591  * and rendevous for completion are very tightly coupled because we start
1592  * out by knowing (per platform) how much data we have to move, but we
1593  * don't know, up front, how many DMA mapping segments will have to be used
1594  * cover that data, so we don't know how many CTIO Request Entries we
1595  * will end up using. Further, for performance reasons we may want to
1596  * (on the last CTIO for Fibre Channel), send status too (if all went well).
1597  *
1598  * The standard vector still goes through isp_pci_dmasetup, but the callback
1599  * for the DMA mapping routines comes here instead with the whole transfer
1600  * mapped and a pointer to a partially filled in already allocated request
1601  * queue entry. We finish the job.
1602  */
1603 static void tdma_mk(void *, bus_dma_segment_t *, int, int);
1604 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int);
1605 
1606 #define	STATUS_WITH_DATA	1
1607 
1608 static void
1609 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1610 {
1611 	mush_t *mp;
1612 	struct ccb_scsiio *csio;
1613 	ispsoftc_t *isp;
1614 	struct isp_pcisoftc *pcs;
1615 	bus_dmamap_t *dp;
1616 	ct_entry_t *cto, *qe;
1617 	uint8_t scsi_status;
1618 	uint16_t curi, nxti, handle;
1619 	uint32_t sflags;
1620 	int32_t resid;
1621 	int nth_ctio, nctios, send_status;
1622 
1623 	mp = (mush_t *) arg;
1624 	if (error) {
1625 		mp->error = error;
1626 		return;
1627 	}
1628 
1629 	isp = mp->isp;
1630 	csio = mp->cmd_token;
1631 	cto = mp->rq;
1632 	curi = isp->isp_reqidx;
1633 	qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1634 
1635 	cto->ct_xfrlen = 0;
1636 	cto->ct_seg_count = 0;
1637 	cto->ct_header.rqs_entry_count = 1;
1638 	MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1639 
1640 	if (nseg == 0) {
1641 		cto->ct_header.rqs_seqno = 1;
1642 		isp_prt(isp, ISP_LOGTDEBUG1,
1643 		    "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d",
1644 		    cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid,
1645 		    cto->ct_tag_val, cto->ct_flags, cto->ct_status,
1646 		    cto->ct_scsi_status, cto->ct_resid);
1647 		ISP_TDQE(isp, "tdma_mk[no data]", curi, cto);
1648 		isp_put_ctio(isp, cto, qe);
1649 		return;
1650 	}
1651 
1652 	nctios = nseg / ISP_RQDSEG;
1653 	if (nseg % ISP_RQDSEG) {
1654 		nctios++;
1655 	}
1656 
1657 	/*
1658 	 * Save syshandle, and potentially any SCSI status, which we'll
1659 	 * reinsert on the last CTIO we're going to send.
1660 	 */
1661 
1662 	handle = cto->ct_syshandle;
1663 	cto->ct_syshandle = 0;
1664 	cto->ct_header.rqs_seqno = 0;
1665 	send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1666 
1667 	if (send_status) {
1668 		sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1669 		cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1670 		/*
1671 		 * Preserve residual.
1672 		 */
1673 		resid = cto->ct_resid;
1674 
1675 		/*
1676 		 * Save actual SCSI status.
1677 		 */
1678 		scsi_status = cto->ct_scsi_status;
1679 
1680 #ifndef	STATUS_WITH_DATA
1681 		sflags |= CT_NO_DATA;
1682 		/*
1683 		 * We can't do a status at the same time as a data CTIO, so
1684 		 * we need to synthesize an extra CTIO at this level.
1685 		 */
1686 		nctios++;
1687 #endif
1688 	} else {
1689 		sflags = scsi_status = resid = 0;
1690 	}
1691 
1692 	cto->ct_resid = 0;
1693 	cto->ct_scsi_status = 0;
1694 
1695 	pcs = (struct isp_pcisoftc *)isp;
1696 	dp = &pcs->dmaps[isp_handle_index(handle)];
1697 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1698 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1699 	} else {
1700 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1701 	}
1702 
1703 	nxti = *mp->nxtip;
1704 
1705 	for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) {
1706 		int seglim;
1707 
1708 		seglim = nseg;
1709 		if (seglim) {
1710 			int seg;
1711 
1712 			if (seglim > ISP_RQDSEG)
1713 				seglim = ISP_RQDSEG;
1714 
1715 			for (seg = 0; seg < seglim; seg++, nseg--) {
1716 				/*
1717 				 * Unlike normal initiator commands, we don't
1718 				 * do any swizzling here.
1719 				 */
1720 				cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
1721 				cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
1722 				cto->ct_xfrlen += dm_segs->ds_len;
1723 				dm_segs++;
1724 			}
1725 			cto->ct_seg_count = seg;
1726 		} else {
1727 			/*
1728 			 * This case should only happen when we're sending an
1729 			 * extra CTIO with final status.
1730 			 */
1731 			if (send_status == 0) {
1732 				isp_prt(isp, ISP_LOGWARN,
1733 				    "tdma_mk ran out of segments");
1734 				mp->error = EINVAL;
1735 				return;
1736 			}
1737 		}
1738 
1739 		/*
1740 		 * At this point, the fields ct_lun, ct_iid, ct_tagval,
1741 		 * ct_tagtype, and ct_timeout have been carried over
1742 		 * unchanged from what our caller had set.
1743 		 *
1744 		 * The dataseg fields and the seg_count fields we just got
1745 		 * through setting. The data direction we've preserved all
1746 		 * along and only clear it if we're now sending status.
1747 		 */
1748 
1749 		if (nth_ctio == nctios - 1) {
1750 			/*
1751 			 * We're the last in a sequence of CTIOs, so mark
1752 			 * this CTIO and save the handle to the CCB such that
1753 			 * when this CTIO completes we can free dma resources
1754 			 * and do whatever else we need to do to finish the
1755 			 * rest of the command. We *don't* give this to the
1756 			 * firmware to work on- the caller will do that.
1757 			 */
1758 
1759 			cto->ct_syshandle = handle;
1760 			cto->ct_header.rqs_seqno = 1;
1761 
1762 			if (send_status) {
1763 				cto->ct_scsi_status = scsi_status;
1764 				cto->ct_flags |= sflags;
1765 				cto->ct_resid = resid;
1766 			}
1767 			if (send_status) {
1768 				isp_prt(isp, ISP_LOGTDEBUG1,
1769 				    "CTIO[%x] lun%d iid %d tag %x ct_flags %x "
1770 				    "scsi status %x resid %d",
1771 				    cto->ct_fwhandle, csio->ccb_h.target_lun,
1772 				    cto->ct_iid, cto->ct_tag_val, cto->ct_flags,
1773 				    cto->ct_scsi_status, cto->ct_resid);
1774 			} else {
1775 				isp_prt(isp, ISP_LOGTDEBUG1,
1776 				    "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x",
1777 				    cto->ct_fwhandle, csio->ccb_h.target_lun,
1778 				    cto->ct_iid, cto->ct_tag_val,
1779 				    cto->ct_flags);
1780 			}
1781 			isp_put_ctio(isp, cto, qe);
1782 			ISP_TDQE(isp, "last tdma_mk", curi, cto);
1783 			if (nctios > 1) {
1784 				MEMORYBARRIER(isp, SYNC_REQUEST,
1785 				    curi, QENTRY_LEN);
1786 			}
1787 		} else {
1788 			ct_entry_t *oqe = qe;
1789 
1790 			/*
1791 			 * Make sure syshandle fields are clean
1792 			 */
1793 			cto->ct_syshandle = 0;
1794 			cto->ct_header.rqs_seqno = 0;
1795 
1796 			isp_prt(isp, ISP_LOGTDEBUG1,
1797 			    "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
1798 			    cto->ct_fwhandle, csio->ccb_h.target_lun,
1799 			    cto->ct_iid, cto->ct_flags);
1800 
1801 			/*
1802 			 * Get a new CTIO
1803 			 */
1804 			qe = (ct_entry_t *)
1805 			    ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1806 			nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp));
1807 			if (nxti == mp->optr) {
1808 				isp_prt(isp, ISP_LOGTDEBUG0,
1809 				    "Queue Overflow in tdma_mk");
1810 				mp->error = MUSHERR_NOQENTRIES;
1811 				return;
1812 			}
1813 
1814 			/*
1815 			 * Now that we're done with the old CTIO,
1816 			 * flush it out to the request queue.
1817 			 */
1818 			ISP_TDQE(isp, "dma_tgt_fc", curi, cto);
1819 			isp_put_ctio(isp, cto, oqe);
1820 			if (nth_ctio != 0) {
1821 				MEMORYBARRIER(isp, SYNC_REQUEST, curi,
1822 				    QENTRY_LEN);
1823 			}
1824 			curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp));
1825 
1826 			/*
1827 			 * Reset some fields in the CTIO so we can reuse
1828 			 * for the next one we'll flush to the request
1829 			 * queue.
1830 			 */
1831 			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1832 			cto->ct_header.rqs_entry_count = 1;
1833 			cto->ct_header.rqs_flags = 0;
1834 			cto->ct_status = 0;
1835 			cto->ct_scsi_status = 0;
1836 			cto->ct_xfrlen = 0;
1837 			cto->ct_resid = 0;
1838 			cto->ct_seg_count = 0;
1839 			MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1840 		}
1841 	}
1842 	*mp->nxtip = nxti;
1843 }
1844 
1845 /*
1846  * We don't have to do multiple CTIOs here. Instead, we can just do
1847  * continuation segments as needed. This greatly simplifies the code
1848  * improves performance.
1849  */
1850 
1851 static void
1852 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1853 {
1854 	mush_t *mp;
1855 	struct ccb_scsiio *csio;
1856 	ispsoftc_t *isp;
1857 	ct2_entry_t *cto, *qe;
1858 	uint16_t curi, nxti;
1859 	ispds_t *ds;
1860 	ispds64_t *ds64;
1861 	int segcnt, seglim;
1862 
1863 	mp = (mush_t *) arg;
1864 	if (error) {
1865 		mp->error = error;
1866 		return;
1867 	}
1868 
1869 	isp = mp->isp;
1870 	csio = mp->cmd_token;
1871 	cto = mp->rq;
1872 
1873 	curi = isp->isp_reqidx;
1874 	qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1875 
1876 	if (nseg == 0) {
1877 		if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
1878 			isp_prt(isp, ISP_LOGWARN,
1879 			    "dma2_tgt_fc, a status CTIO2 without MODE1 "
1880 			    "set (0x%x)", cto->ct_flags);
1881 			mp->error = EINVAL;
1882 			return;
1883 		}
1884 		/*
1885 		 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1886 		 * flags to NO DATA and clear relative offset flags.
1887 		 * We preserve the ct_resid and the response area.
1888 		 */
1889 		cto->ct_header.rqs_seqno = 1;
1890 		cto->ct_seg_count = 0;
1891 		cto->ct_reloff = 0;
1892 		isp_prt(isp, ISP_LOGTDEBUG1,
1893 		    "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1894 		    "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
1895 		    cto->ct_iid, cto->ct_flags, cto->ct_status,
1896 		    cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1897 		if (IS_2KLOGIN(isp)) {
1898 			isp_put_ctio2e(isp,
1899 			    (ct2e_entry_t *)cto, (ct2e_entry_t *)qe);
1900 		} else {
1901 			isp_put_ctio2(isp, cto, qe);
1902 		}
1903 		ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe);
1904 		return;
1905 	}
1906 
1907 	if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
1908 		isp_prt(isp, ISP_LOGERR,
1909 		    "dma2_tgt_fc, a data CTIO2 without MODE0 set "
1910 		    "(0x%x)", cto->ct_flags);
1911 		mp->error = EINVAL;
1912 		return;
1913 	}
1914 
1915 
1916 	nxti = *mp->nxtip;
1917 
1918 	/*
1919 	 * Check to see if we need to DAC addressing or not.
1920 	 *
1921 	 * Any address that's over the 4GB boundary causes this
1922 	 * to happen.
1923 	 */
1924 	segcnt = nseg;
1925 	if (sizeof (bus_addr_t) > 4) {
1926 		for (segcnt = 0; segcnt < nseg; segcnt++) {
1927 			uint64_t addr = dm_segs[segcnt].ds_addr;
1928 			if (addr >= 0x100000000LL) {
1929 				break;
1930 			}
1931 		}
1932 	}
1933 	if (segcnt != nseg) {
1934 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3;
1935 		seglim = ISP_RQDSEG_T3;
1936 		ds64 = &cto->rsp.m0.ct_dataseg64[0];
1937 		ds = NULL;
1938 	} else {
1939 		seglim = ISP_RQDSEG_T2;
1940 		ds64 = NULL;
1941 		ds = &cto->rsp.m0.ct_dataseg[0];
1942 	}
1943 	cto->ct_seg_count = 0;
1944 
1945 	/*
1946 	 * Set up the CTIO2 data segments.
1947 	 */
1948 	for (segcnt = 0; cto->ct_seg_count < seglim && segcnt < nseg;
1949 	    cto->ct_seg_count++, segcnt++) {
1950 		if (ds64) {
1951 			ds64->ds_basehi =
1952 			    ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32);
1953 			ds64->ds_base = dm_segs[segcnt].ds_addr;
1954 			ds64->ds_count = dm_segs[segcnt].ds_len;
1955 			ds64++;
1956 		} else {
1957 			ds->ds_base = dm_segs[segcnt].ds_addr;
1958 			ds->ds_count = dm_segs[segcnt].ds_len;
1959 			ds++;
1960 		}
1961 		cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1962 #if __FreeBSD_version < 500000
1963 		isp_prt(isp, ISP_LOGTDEBUG1,
1964 		    "isp_send_ctio2: ent0[%d]0x%llx:%llu",
1965 		    cto->ct_seg_count, (uint64_t)dm_segs[segcnt].ds_addr,
1966 		    (uint64_t)dm_segs[segcnt].ds_len);
1967 #else
1968 		isp_prt(isp, ISP_LOGTDEBUG1,
1969 		    "isp_send_ctio2: ent0[%d]0x%jx:%ju",
1970 		    cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr,
1971 		    (uintmax_t)dm_segs[segcnt].ds_len);
1972 #endif
1973 	}
1974 
1975 	while (segcnt < nseg) {
1976 		uint16_t curip;
1977 		int seg;
1978 		ispcontreq_t local, *crq = &local, *qep;
1979 
1980 		qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1981 		curip = nxti;
1982 		nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp));
1983 		if (nxti == mp->optr) {
1984 			ISP_UNLOCK(isp);
1985 			isp_prt(isp, ISP_LOGTDEBUG0,
1986 			    "tdma_mkfc: request queue overflow");
1987 			mp->error = MUSHERR_NOQENTRIES;
1988 			return;
1989 		}
1990 		cto->ct_header.rqs_entry_count++;
1991 		MEMZERO((void *)crq, sizeof (*crq));
1992 		crq->req_header.rqs_entry_count = 1;
1993 		if (cto->ct_header.rqs_entry_type == RQSTYPE_CTIO3) {
1994 			seglim = ISP_CDSEG64;
1995 			ds = NULL;
1996 			ds64 = &((ispcontreq64_t *)crq)->req_dataseg[0];
1997 			crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
1998 		} else {
1999 			seglim = ISP_CDSEG;
2000 			ds = &crq->req_dataseg[0];
2001 			ds64 = NULL;
2002 			crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
2003 		}
2004 		for (seg = 0; segcnt < nseg && seg < seglim;
2005 		    segcnt++, seg++) {
2006 			if (ds64) {
2007 				ds64->ds_basehi =
2008 				  ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32);
2009 				ds64->ds_base = dm_segs[segcnt].ds_addr;
2010 				ds64->ds_count = dm_segs[segcnt].ds_len;
2011 				ds64++;
2012 			} else {
2013 				ds->ds_base = dm_segs[segcnt].ds_addr;
2014 				ds->ds_count = dm_segs[segcnt].ds_len;
2015 				ds++;
2016 			}
2017 #if __FreeBSD_version < 500000
2018 			isp_prt(isp, ISP_LOGTDEBUG1,
2019 			    "isp_send_ctio2: ent%d[%d]%llx:%llu",
2020 			    cto->ct_header.rqs_entry_count-1, seg,
2021 			    (uint64_t)dm_segs[segcnt].ds_addr,
2022 			    (uint64_t)dm_segs[segcnt].ds_len);
2023 #else
2024 			isp_prt(isp, ISP_LOGTDEBUG1,
2025 			    "isp_send_ctio2: ent%d[%d]%jx:%ju",
2026 			    cto->ct_header.rqs_entry_count-1, seg,
2027 			    (uintmax_t)dm_segs[segcnt].ds_addr,
2028 			    (uintmax_t)dm_segs[segcnt].ds_len);
2029 #endif
2030 			cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
2031 			cto->ct_seg_count++;
2032 		}
2033 		MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN);
2034 		isp_put_cont_req(isp, crq, qep);
2035 		ISP_TDQE(isp, "cont entry", curi, qep);
2036 	}
2037 
2038 	/*
2039 	 * No do final twiddling for the CTIO itself.
2040 	 */
2041 	cto->ct_header.rqs_seqno = 1;
2042 	isp_prt(isp, ISP_LOGTDEBUG1,
2043 	    "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d",
2044 	    cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid,
2045 	    cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status,
2046 	    cto->ct_resid);
2047 	if (IS_2KLOGIN(isp))
2048 		isp_put_ctio2e(isp, (ct2e_entry_t *)cto, (ct2e_entry_t *)qe);
2049 	else
2050 		isp_put_ctio2(isp, cto, qe);
2051 	ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe);
2052 	*mp->nxtip = nxti;
2053 }
2054 #endif
2055 
2056 static void dma2_a64(void *, bus_dma_segment_t *, int, int);
2057 static void dma2(void *, bus_dma_segment_t *, int, int);
2058 
2059 static void
2060 dma2_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2061 {
2062 	mush_t *mp;
2063 	ispsoftc_t *isp;
2064 	struct ccb_scsiio *csio;
2065 	struct isp_pcisoftc *pcs;
2066 	bus_dmamap_t *dp;
2067 	bus_dma_segment_t *eseg;
2068 	ispreq64_t *rq;
2069 	int seglim, datalen;
2070 	uint16_t nxti;
2071 
2072 	mp = (mush_t *) arg;
2073 	if (error) {
2074 		mp->error = error;
2075 		return;
2076 	}
2077 
2078 	if (nseg < 1) {
2079 		isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
2080 		mp->error = EFAULT;
2081 		return;
2082 	}
2083 	csio = mp->cmd_token;
2084 	isp = mp->isp;
2085 	rq = mp->rq;
2086 	pcs = (struct isp_pcisoftc *)mp->isp;
2087 	dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2088 	nxti = *mp->nxtip;
2089 
2090 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2091 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
2092 	} else {
2093 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2094 	}
2095 	datalen = XS_XFRLEN(csio);
2096 
2097 	/*
2098 	 * We're passed an initial partially filled in entry that
2099 	 * has most fields filled in except for data transfer
2100 	 * related values.
2101 	 *
2102 	 * Our job is to fill in the initial request queue entry and
2103 	 * then to start allocating and filling in continuation entries
2104 	 * until we've covered the entire transfer.
2105 	 */
2106 
2107 	if (IS_FC(isp)) {
2108 		rq->req_header.rqs_entry_type = RQSTYPE_T3RQS;
2109 		seglim = ISP_RQDSEG_T3;
2110 		((ispreqt3_t *)rq)->req_totalcnt = datalen;
2111 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2112 			((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN;
2113 		} else {
2114 			((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
2115 		}
2116 	} else {
2117 		rq->req_header.rqs_entry_type = RQSTYPE_A64;
2118 		if (csio->cdb_len > 12) {
2119 			seglim = 0;
2120 		} else {
2121 			seglim = ISP_RQDSEG_A64;
2122 		}
2123 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2124 			rq->req_flags |= REQFLAG_DATA_IN;
2125 		} else {
2126 			rq->req_flags |= REQFLAG_DATA_OUT;
2127 		}
2128 	}
2129 
2130 	eseg = dm_segs + nseg;
2131 
2132 	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
2133 		if (IS_FC(isp)) {
2134 			ispreqt3_t *rq3 = (ispreqt3_t *)rq;
2135 			rq3->req_dataseg[rq3->req_seg_count].ds_base =
2136 			    DMA_LO32(dm_segs->ds_addr);
2137 			rq3->req_dataseg[rq3->req_seg_count].ds_basehi =
2138 			    DMA_HI32(dm_segs->ds_addr);
2139 			rq3->req_dataseg[rq3->req_seg_count].ds_count =
2140 			    dm_segs->ds_len;
2141 		} else {
2142 			rq->req_dataseg[rq->req_seg_count].ds_base =
2143 			    DMA_LO32(dm_segs->ds_addr);
2144 			rq->req_dataseg[rq->req_seg_count].ds_basehi =
2145 			    DMA_HI32(dm_segs->ds_addr);
2146 			rq->req_dataseg[rq->req_seg_count].ds_count =
2147 			    dm_segs->ds_len;
2148 		}
2149 		datalen -= dm_segs->ds_len;
2150 		rq->req_seg_count++;
2151 		dm_segs++;
2152 	}
2153 
2154 	while (datalen > 0 && dm_segs != eseg) {
2155 		uint16_t onxti;
2156 		ispcontreq64_t local, *crq = &local, *cqe;
2157 
2158 		cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2159 		onxti = nxti;
2160 		nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
2161 		if (nxti == mp->optr) {
2162 			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
2163 			mp->error = MUSHERR_NOQENTRIES;
2164 			return;
2165 		}
2166 		rq->req_header.rqs_entry_count++;
2167 		MEMZERO((void *)crq, sizeof (*crq));
2168 		crq->req_header.rqs_entry_count = 1;
2169 		crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
2170 
2171 		seglim = 0;
2172 		while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) {
2173 			crq->req_dataseg[seglim].ds_base =
2174 			    DMA_LO32(dm_segs->ds_addr);
2175 			crq->req_dataseg[seglim].ds_basehi =
2176 			    DMA_HI32(dm_segs->ds_addr);
2177 			crq->req_dataseg[seglim].ds_count =
2178 			    dm_segs->ds_len;
2179 			rq->req_seg_count++;
2180 			dm_segs++;
2181 			seglim++;
2182 			datalen -= dm_segs->ds_len;
2183 		}
2184 		isp_put_cont64_req(isp, crq, cqe);
2185 		MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
2186 	}
2187 	*mp->nxtip = nxti;
2188 }
2189 
2190 static void
2191 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2192 {
2193 	mush_t *mp;
2194 	ispsoftc_t *isp;
2195 	struct ccb_scsiio *csio;
2196 	struct isp_pcisoftc *pcs;
2197 	bus_dmamap_t *dp;
2198 	bus_dma_segment_t *eseg;
2199 	ispreq_t *rq;
2200 	int seglim, datalen;
2201 	uint16_t nxti;
2202 
2203 	mp = (mush_t *) arg;
2204 	if (error) {
2205 		mp->error = error;
2206 		return;
2207 	}
2208 
2209 	if (nseg < 1) {
2210 		isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
2211 		mp->error = EFAULT;
2212 		return;
2213 	}
2214 	csio = mp->cmd_token;
2215 	isp = mp->isp;
2216 	rq = mp->rq;
2217 	pcs = (struct isp_pcisoftc *)mp->isp;
2218 	dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2219 	nxti = *mp->nxtip;
2220 
2221 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2222 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
2223 	} else {
2224 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2225 	}
2226 
2227 	datalen = XS_XFRLEN(csio);
2228 
2229 	/*
2230 	 * We're passed an initial partially filled in entry that
2231 	 * has most fields filled in except for data transfer
2232 	 * related values.
2233 	 *
2234 	 * Our job is to fill in the initial request queue entry and
2235 	 * then to start allocating and filling in continuation entries
2236 	 * until we've covered the entire transfer.
2237 	 */
2238 
2239 	if (IS_FC(isp)) {
2240 		seglim = ISP_RQDSEG_T2;
2241 		((ispreqt2_t *)rq)->req_totalcnt = datalen;
2242 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2243 			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
2244 		} else {
2245 			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
2246 		}
2247 	} else {
2248 		if (csio->cdb_len > 12) {
2249 			seglim = 0;
2250 		} else {
2251 			seglim = ISP_RQDSEG;
2252 		}
2253 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2254 			rq->req_flags |= REQFLAG_DATA_IN;
2255 		} else {
2256 			rq->req_flags |= REQFLAG_DATA_OUT;
2257 		}
2258 	}
2259 
2260 	eseg = dm_segs + nseg;
2261 
2262 	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
2263 		if (IS_FC(isp)) {
2264 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
2265 			rq2->req_dataseg[rq2->req_seg_count].ds_base =
2266 			    DMA_LO32(dm_segs->ds_addr);
2267 			rq2->req_dataseg[rq2->req_seg_count].ds_count =
2268 			    dm_segs->ds_len;
2269 		} else {
2270 			rq->req_dataseg[rq->req_seg_count].ds_base =
2271 				DMA_LO32(dm_segs->ds_addr);
2272 			rq->req_dataseg[rq->req_seg_count].ds_count =
2273 				dm_segs->ds_len;
2274 		}
2275 		datalen -= dm_segs->ds_len;
2276 		rq->req_seg_count++;
2277 		dm_segs++;
2278 	}
2279 
2280 	while (datalen > 0 && dm_segs != eseg) {
2281 		uint16_t onxti;
2282 		ispcontreq_t local, *crq = &local, *cqe;
2283 
2284 		cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2285 		onxti = nxti;
2286 		nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
2287 		if (nxti == mp->optr) {
2288 			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
2289 			mp->error = MUSHERR_NOQENTRIES;
2290 			return;
2291 		}
2292 		rq->req_header.rqs_entry_count++;
2293 		MEMZERO((void *)crq, sizeof (*crq));
2294 		crq->req_header.rqs_entry_count = 1;
2295 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
2296 
2297 		seglim = 0;
2298 		while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
2299 			crq->req_dataseg[seglim].ds_base =
2300 			    DMA_LO32(dm_segs->ds_addr);
2301 			crq->req_dataseg[seglim].ds_count =
2302 			    dm_segs->ds_len;
2303 			rq->req_seg_count++;
2304 			dm_segs++;
2305 			seglim++;
2306 			datalen -= dm_segs->ds_len;
2307 		}
2308 		isp_put_cont_req(isp, crq, cqe);
2309 		MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
2310 	}
2311 	*mp->nxtip = nxti;
2312 }
2313 
2314 /*
2315  * We enter with ISP_LOCK held
2316  */
2317 static int
2318 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq,
2319 	uint16_t *nxtip, uint16_t optr)
2320 {
2321 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2322 	ispreq_t *qep;
2323 	bus_dmamap_t *dp = NULL;
2324 	mush_t mush, *mp;
2325 	void (*eptr)(void *, bus_dma_segment_t *, int, int);
2326 
2327 	qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
2328 #ifdef	ISP_TARGET_MODE
2329 	if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
2330 		if (IS_FC(isp)) {
2331 			eptr = tdma_mkfc;
2332 		} else {
2333 			eptr = tdma_mk;
2334 		}
2335 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
2336 		    (csio->dxfer_len == 0)) {
2337 			mp = &mush;
2338 			mp->isp = isp;
2339 			mp->cmd_token = csio;
2340 			mp->rq = rq;	/* really a ct_entry_t or ct2_entry_t */
2341 			mp->nxtip = nxtip;
2342 			mp->optr = optr;
2343 			mp->error = 0;
2344 			ISPLOCK_2_CAMLOCK(isp);
2345 			(*eptr)(mp, NULL, 0, 0);
2346 			CAMLOCK_2_ISPLOCK(isp);
2347 			goto mbxsync;
2348 		}
2349 	} else
2350 #endif
2351 	if (sizeof (bus_addr_t) > 4) {
2352 		eptr = dma2_a64;
2353 	} else {
2354 		eptr = dma2;
2355 	}
2356 
2357 
2358 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
2359 	    (csio->dxfer_len == 0)) {
2360 		rq->req_seg_count = 1;
2361 		goto mbxsync;
2362 	}
2363 
2364 	/*
2365 	 * Do a virtual grapevine step to collect info for
2366 	 * the callback dma allocation that we have to use...
2367 	 */
2368 	mp = &mush;
2369 	mp->isp = isp;
2370 	mp->cmd_token = csio;
2371 	mp->rq = rq;
2372 	mp->nxtip = nxtip;
2373 	mp->optr = optr;
2374 	mp->error = 0;
2375 
2376 	ISPLOCK_2_CAMLOCK(isp);
2377 	if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2378 		if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
2379 			int error, s;
2380 			dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2381 			s = splsoftvm();
2382 			error = bus_dmamap_load(pcs->dmat, *dp,
2383 			    csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
2384 			if (error == EINPROGRESS) {
2385 				bus_dmamap_unload(pcs->dmat, *dp);
2386 				mp->error = EINVAL;
2387 				isp_prt(isp, ISP_LOGERR,
2388 				    "deferred dma allocation not supported");
2389 			} else if (error && mp->error == 0) {
2390 #ifdef	DIAGNOSTIC
2391 				isp_prt(isp, ISP_LOGERR,
2392 				    "error %d in dma mapping code", error);
2393 #endif
2394 				mp->error = error;
2395 			}
2396 			splx(s);
2397 		} else {
2398 			/* Pointer to physical buffer */
2399 			struct bus_dma_segment seg;
2400 			seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr;
2401 			seg.ds_len = csio->dxfer_len;
2402 			(*eptr)(mp, &seg, 1, 0);
2403 		}
2404 	} else {
2405 		struct bus_dma_segment *segs;
2406 
2407 		if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2408 			isp_prt(isp, ISP_LOGERR,
2409 			    "Physical segment pointers unsupported");
2410 			mp->error = EINVAL;
2411 		} else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2412 			isp_prt(isp, ISP_LOGERR,
2413 			    "Virtual segment addresses unsupported");
2414 			mp->error = EINVAL;
2415 		} else {
2416 			/* Just use the segments provided */
2417 			segs = (struct bus_dma_segment *) csio->data_ptr;
2418 			(*eptr)(mp, segs, csio->sglist_cnt, 0);
2419 		}
2420 	}
2421 	CAMLOCK_2_ISPLOCK(isp);
2422 	if (mp->error) {
2423 		int retval = CMD_COMPLETE;
2424 		if (mp->error == MUSHERR_NOQENTRIES) {
2425 			retval = CMD_EAGAIN;
2426 		} else if (mp->error == EFBIG) {
2427 			XS_SETERR(csio, CAM_REQ_TOO_BIG);
2428 		} else if (mp->error == EINVAL) {
2429 			XS_SETERR(csio, CAM_REQ_INVALID);
2430 		} else {
2431 			XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
2432 		}
2433 		return (retval);
2434 	}
2435 mbxsync:
2436 	switch (rq->req_header.rqs_entry_type) {
2437 	case RQSTYPE_REQUEST:
2438 		isp_put_request(isp, rq, qep);
2439 		break;
2440 	case RQSTYPE_CMDONLY:
2441 		isp_put_extended_request(isp, (ispextreq_t *)rq,
2442 		    (ispextreq_t *)qep);
2443 		break;
2444 	case RQSTYPE_T2RQS:
2445 		isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
2446 		break;
2447 	case RQSTYPE_A64:
2448 	case RQSTYPE_T3RQS:
2449 		isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep);
2450 		break;
2451 	}
2452 	return (CMD_QUEUED);
2453 }
2454 
2455 static void
2456 isp_pci_dmateardown(ispsoftc_t *isp, XS_T *xs, uint16_t handle)
2457 {
2458 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2459 	bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)];
2460 	if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2461 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD);
2462 	} else {
2463 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE);
2464 	}
2465 	bus_dmamap_unload(pcs->dmat, *dp);
2466 }
2467 
2468 
2469 static void
2470 isp_pci_reset1(ispsoftc_t *isp)
2471 {
2472 	/* Make sure the BIOS is disabled */
2473 	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
2474 	/* and enable interrupts */
2475 	ENABLE_INTS(isp);
2476 }
2477 
2478 static void
2479 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg)
2480 {
2481 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2482 	if (msg)
2483 		printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
2484 	else
2485 		printf("%s:\n", device_get_nameunit(isp->isp_dev));
2486 	if (IS_SCSI(isp))
2487 		printf("    biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
2488 	else
2489 		printf("    biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
2490 	printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
2491 	    ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
2492 	printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
2493 
2494 
2495 	if (IS_SCSI(isp)) {
2496 		ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
2497 		printf("    cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
2498 			ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
2499 			ISP_READ(isp, CDMA_FIFO_STS));
2500 		printf("    ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
2501 			ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
2502 			ISP_READ(isp, DDMA_FIFO_STS));
2503 		printf("    sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
2504 			ISP_READ(isp, SXP_INTERRUPT),
2505 			ISP_READ(isp, SXP_GROSS_ERR),
2506 			ISP_READ(isp, SXP_PINS_CTRL));
2507 		ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
2508 	}
2509 	printf("    mbox regs: %x %x %x %x %x\n",
2510 	    ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
2511 	    ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
2512 	    ISP_READ(isp, OUTMAILBOX4));
2513 	printf("    PCI Status Command/Status=%x\n",
2514 	    pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));
2515 }
2516